language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | sympy__sympy | sympy/polys/domains/gaussiandomains.py | {
"start": 957,
"end": 5978
} | class ____(DomainElement, Generic[Tdom]):
"""Base class for elements of Gaussian type domains."""
base: Domain[Tdom]
_parent: GaussianDomain[Self, Tdom]
x: Tdom
y: Tdom
__slots__ = ('x', 'y')
def __new__(cls, x: Tdom | int, y: Tdom | int = 0):
conv = cls.base.convert
return cls.new(conv(x), conv(y))
@classmethod
def new(cls, x: Tdom, y: Tdom) -> Self:
"""Create a new GaussianElement of the same domain."""
obj = super().__new__(cls)
obj.x = x
obj.y = y
return obj
def parent(self) -> Domain[Self]:
"""The domain that this is an element of (ZZ_I or QQ_I)"""
return self._parent
def __hash__(self) -> int:
return hash((self.x, self.y))
def __eq__(self, other) -> bool:
if isinstance(other, self.__class__):
return self.x == other.x and self.y == other.y
else:
return NotImplemented
def __lt__(self, other) -> bool:
if not isinstance(other, GaussianElement):
return NotImplemented
return [self.y, self.x] < [other.y, other.x]
def __pos__(self) -> Self:
return self
def __neg__(self) -> Self:
return self.new(-self.x, -self.y)
def __repr__(self) -> str:
return "%s(%s, %s)" % (self._parent.rep, self.x, self.y)
def __str__(self) -> str:
return str(self._parent.to_sympy(self)) # type: ignore
@classmethod
def _get_xy(cls, other: GaussianElement[Tdom] | int) -> tuple[Tdom, Tdom] | None:
if isinstance(other, cls):
return other.x, other.y
else:
try:
other_convert = cls._parent.convert(other)
except CoercionFailed:
return None
return other_convert.x, other_convert.y
def __add__(self, other: Self | int) -> Self:
other_conv = self._get_xy(other)
if other_conv is None:
return NotImplemented
x, y = other_conv
return self.new(self.x + x, self.y + y)
__radd__ = __add__
def __sub__(self, other: Self | int) -> Self:
other_conv = self._get_xy(other)
if other_conv is None:
return NotImplemented
x, y = other_conv
return self.new(self.x - x, self.y - y)
def __rsub__(self, other: Self | int) -> Self:
other_conv = self._get_xy(other)
if other_conv is None:
return NotImplemented
x, y = other_conv
return self.new(x - self.x, y - self.y)
def __mul__(self, other: Self | int) -> Self:
other_conv = self._get_xy(other)
if other_conv is None:
return NotImplemented
x, y = other_conv
return self.new(self.x*x - self.y*y, self.x*y + self.y*x)
__rmul__ = __mul__
def __pow__(self, exp: int) -> Self:
if exp == 0:
return self.new(self.base(1), self.base(0))
if exp < 0:
self, exp = 1/self, -exp # type: ignore
if exp == 1:
return self
pow2 = self
prod = self if exp % 2 else self._parent.one
exp //= 2
while exp:
pow2 *= pow2
if exp % 2:
prod *= pow2
exp //= 2
return prod
def __bool__(self) -> bool:
return bool(self.x) or bool(self.y)
def quadrant(self) -> int:
"""Return quadrant index 0-3.
0 is included in quadrant 0.
"""
if self.y > 0:
return 0 if self.x > 0 else 1
elif self.y < 0:
return 2 if self.x < 0 else 3
else:
return 0 if self.x >= 0 else 2
def __divmod__(self, other: Self | int) -> tuple[Self, Self]:
raise NotImplementedError
def __rdivmod__(self, other: Self | int) -> tuple[Self, Self]:
try:
other_convert = self._parent.convert(other)
except CoercionFailed:
return NotImplemented
else:
return other_convert.__divmod__(self)
def __rtruediv__(self, other: GaussianElement | int) -> GaussianRational:
try:
other = QQ_I.convert(other)
except CoercionFailed:
return NotImplemented
else:
return other.__truediv__(self)
def __floordiv__(self, other: Self | int) -> Self:
qr = self.__divmod__(other)
if qr is NotImplemented:
return NotImplemented
return qr[0]
def __rfloordiv__(self, other: Self | int) -> Self:
qr = self.__rdivmod__(other)
if qr is NotImplemented:
return NotImplemented
return qr[0]
def __mod__(self, other: Self | int) -> Self:
qr = self.__divmod__(other)
if qr is NotImplemented:
return NotImplemented
return qr[1]
def __rmod__(self, other: Self | int) -> Self:
qr = self.__rdivmod__(other)
if qr is NotImplemented:
return NotImplemented
return qr[1]
| GaussianElement |
python | scipy__scipy | scipy/_lib/tests/test_warnings.py | {
"start": 531,
"end": 5006
} | class ____(ast.NodeVisitor):
def __init__(self, filename):
super().__init__()
self.__filename = filename
self.bad_filters = []
self.bad_stacklevels = []
def visit_Call(self, node):
p = ParseCall()
p.visit(node.func)
ast.NodeVisitor.generic_visit(self, node)
if p.ls[-1] == 'simplefilter' or p.ls[-1] == 'filterwarnings':
# get first argument of the `args` node of the filter call
match node.args[0]:
case ast.Constant() as c:
argtext = c.value
case ast.JoinedStr() as js:
# if we get an f-string, discard the templated pieces, which
# are likely the type or specific message; we're interested
# in the action, which is less likely to use a template
argtext = "".join(
x.value for x in js.values if isinstance(x, ast.Constant)
)
case _:
raise ValueError("unknown ast node type")
# check if filter is set to ignore outside of test code
if argtext == "ignore" and "tests" not in self.__filename.parts:
self.bad_filters.append(
f"{self.__filename}:{node.lineno}")
if p.ls[-1] == 'warn' and (
len(p.ls) == 1 or p.ls[-2] == 'warnings'):
if self.__filename == "_lib/tests/test_warnings.py":
# This file
return
# See if stacklevel exists:
if len(node.args) == 3:
return
args = {kw.arg for kw in node.keywords}
if "stacklevel" not in args:
self.bad_stacklevels.append(
f"{self.__filename}:{node.lineno}")
@pytest.fixture(scope="session")
def warning_calls():
# combined "ignore" and stacklevel error
base = Path(scipy.__file__).parent
bad_filters = []
bad_stacklevels = []
for path in base.rglob("*.py"):
# use tokenize to auto-detect encoding on systems where no
# default encoding is defined (e.g., LANG='C')
with tokenize.open(str(path)) as file:
tree = ast.parse(file.read(), filename=str(path))
finder = FindFuncs(path.relative_to(base))
finder.visit(tree)
bad_filters.extend(finder.bad_filters)
bad_stacklevels.extend(finder.bad_stacklevels)
return bad_filters, bad_stacklevels
@pytest.mark.fail_slow(40)
@pytest.mark.slow
def test_warning_calls_filters(warning_calls):
bad_filters, bad_stacklevels = warning_calls
# We try not to add filters in the code base, because those filters aren't
# thread-safe. We aim to only filter in tests with
# warnings.catch_warnings. However, in some cases it may prove
# necessary to filter out warnings, because we can't (easily) fix the root
# cause for them and we don't want users to see some warnings when they use
# SciPy correctly. So we list exceptions here. Add new entries only if
# there's a good reason.
allowed_filters = (
os.path.join('datasets', '_fetchers.py'),
os.path.join('datasets', '__init__.py'),
os.path.join('optimize', '_optimize.py'),
os.path.join('optimize', '_constraints.py'),
os.path.join('optimize', '_nnls.py'),
os.path.join('signal', '_ltisys.py'),
os.path.join('sparse', '__init__.py'), # np.matrix pending-deprecation
os.path.join('special', '_basic.py'), # gh-21801
os.path.join('stats', '_discrete_distns.py'), # gh-14901
os.path.join('stats', '_continuous_distns.py'),
os.path.join('stats', '_binned_statistic.py'), # gh-19345
os.path.join('stats', '_stats_py.py'), # gh-20743
os.path.join('stats', '_variation.py'), # gh-22827
os.path.join('stats', 'tests', 'test_axis_nan_policy.py'), # gh-20694
os.path.join('_lib', '_util.py'), # gh-19341
os.path.join('sparse', 'linalg', '_dsolve', 'linsolve.py'), # gh-17924
"conftest.py",
)
bad_filters = [item for item in bad_filters if item.split(':')[0] not in
allowed_filters]
if bad_filters:
raise AssertionError(
"Warning ignore filters should not be used outside of tests.\n"
"Found in:\n {}".format(
"\n ".join(bad_filters)))
| FindFuncs |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/annotatedVar5.py | {
"start": 796,
"end": 910
} | class ____(ClassC):
def __init__(self):
# This should generate an error.
self.inst_var1 = 3
| ClassE |
python | numba__numba | numba/core/types/npytypes.py | {
"start": 20076,
"end": 20597
} | class ____(Type):
def __init__(self, coef, domain=None, window=None, n_args=1):
super(PolynomialType, self).__init__(name=f'PolynomialType({coef}, {domain}, {domain}, {n_args})')
self.coef = coef
self.domain = domain
self.window = window
# We use n_args to keep track of the number of arguments in the
# constructor, since the types of domain and window arguments depend on
# that and we need that information when boxing
self.n_args = n_args
| PolynomialType |
python | dagster-io__dagster | python_modules/libraries/dagster-dlt/dagster_dlt/translator.py | {
"start": 754,
"end": 14422
} | class ____:
def get_asset_spec(self, data: DltResourceTranslatorData) -> AssetSpec:
"""Defines the asset spec for a given dlt resource.
This method can be overridden to provide custom asset key for a dlt resource.
Args:
data (DltResourceTranslatorData): The dlt data to pass to the translator,
including the resource and the destination.
Returns:
The :py:class:`dagster.AssetSpec` for the given dlt resource
"""
return AssetSpec(
key=self._resolve_back_compat_method(
"get_asset_key", self._default_asset_key_fn, data.resource
),
automation_condition=self._resolve_back_compat_method(
"get_automation_condition", self._default_automation_condition_fn, data.resource
),
deps=self._resolve_back_compat_method(
"get_deps_asset_keys", self._default_deps_fn, data.resource
),
description=self._resolve_back_compat_method(
"get_description", self._default_description_fn, data.resource
),
group_name=self._resolve_back_compat_method(
"get_group_name", self._default_group_name_fn, data.resource
),
metadata=self._resolve_back_compat_method(
"get_metadata", self._default_metadata_fn, data.resource
),
owners=self._resolve_back_compat_method(
"get_owners", self._default_owners_fn, data.resource
),
tags=self._resolve_back_compat_method("get_tags", self._default_tags_fn, data.resource),
kinds=self._resolve_back_compat_method(
"get_kinds", self._default_kinds_fn, data.resource, data.destination
),
)
def _resolve_back_compat_method(
self,
method_name: str,
default_fn: Union[Callable[[DltResource], Any], Callable[[DltResource, Destination], Any]],
resource: DltResource,
destination: Optional[Destination] = None,
):
method = getattr(type(self), method_name)
base_method = getattr(DagsterDltTranslator, method_name)
args = (resource,)
if method_name == "get_kinds":
args += (destination,)
if method is not base_method: # user defined this
supersession_warning(
subject=method_name,
additional_warn_text=(
f"Instead of overriding DagsterDltTranslator.{method_name}(), "
f"override DagsterDltTranslator.get_asset_spec()."
),
)
return method(self, *args)
else:
return default_fn(*args) # type: ignore
@superseded(
additional_warn_text="Use `DagsterDltTranslator.get_asset_spec(...).key` instead.",
)
@public
def get_asset_key(self, resource: DltResource) -> AssetKey:
"""Defines asset key for a given dlt resource key and dataset name.
This method can be overridden to provide custom asset key for a dlt resource.
Args:
resource (DltResource): dlt resource
Returns:
AssetKey of Dagster asset derived from dlt resource
"""
return self._default_asset_key_fn(resource)
def _default_asset_key_fn(self, resource: DltResource) -> AssetKey:
"""Defines asset key for a given dlt resource key and dataset name.
Args:
resource (DltResource): dlt resource
Returns:
AssetKey of Dagster asset derived from dlt resource
"""
return AssetKey(f"dlt_{resource.source_name}_{resource.name}")
@superseded(
additional_warn_text="Use `DagsterDltTranslator.get_asset_spec(...).auto_materialize_policy` instead.",
)
@public
def get_auto_materialize_policy(self, resource: DltResource) -> Optional[AutoMaterializePolicy]:
"""Defines resource specific auto materialize policy.
This method can be overridden to provide custom auto materialize policy for a dlt resource.
Args:
resource (DltResource): dlt resource
Returns:
Optional[AutoMaterializePolicy]: The auto-materialize policy for a resource
"""
return self._default_auto_materialize_policy_fn(resource)
def _default_auto_materialize_policy_fn(
self, resource: DltResource
) -> Optional[AutoMaterializePolicy]:
"""Defines resource specific auto materialize policy.
Args:
resource (DltResource): dlt resource
Returns:
Optional[AutoMaterializePolicy]: The auto-materialize policy for a resource
"""
return None
@superseded(
additional_warn_text="Use `DagsterDltTranslator.get_asset_spec(...).automation_condition` instead.",
)
@public
def get_automation_condition(self, resource: DltResource) -> Optional[AutomationCondition]:
"""Defines resource specific automation condition.
This method can be overridden to provide custom automation condition for a dlt resource.
Args:
resource (DltResource): dlt resource
Returns:
Optional[AutomationCondition]: The automation condition for a resource
"""
return self._default_automation_condition_fn(resource)
def _default_automation_condition_fn(
self, resource: DltResource
) -> Optional[AutomationCondition]:
"""Defines resource specific automation condition.
Args:
resource (DltResource): dlt resource
Returns:
Optional[AutomationCondition]: The automation condition for a resource
"""
auto_materialize_policy = self._default_auto_materialize_policy_fn(resource)
return (
auto_materialize_policy.to_automation_condition() if auto_materialize_policy else None
)
@superseded(
additional_warn_text=(
"Iterate over `DagsterDltTranslator.get_asset_spec(...).deps` to access `AssetDep.asset_key` instead."
),
)
@public
def get_deps_asset_keys(self, resource: DltResource) -> Iterable[AssetKey]:
"""Defines upstream asset dependencies given a dlt resource.
Defaults to a concatenation of `resource.source_name` and `resource.name`.
Args:
resource (DltResource): dlt resource
Returns:
Iterable[AssetKey]: The Dagster asset keys upstream of `dlt_resource_key`.
"""
return self._default_deps_fn(resource)
def _default_deps_fn(self, resource: DltResource) -> Iterable[AssetKey]:
"""Defines upstream asset dependencies given a dlt resource.
Args:
resource (DltResource): dlt resource
Returns:
Iterable[AssetKey]: The Dagster asset keys upstream of `dlt_resource_key`.
"""
if resource.is_transformer:
pipe = resource._pipe # noqa: SLF001
while pipe.has_parent:
pipe = pipe.parent
return [AssetKey(f"{resource.source_name}_{pipe.name}")]
return [AssetKey(f"{resource.source_name}_{resource.name}")]
@superseded(
additional_warn_text="Use `DagsterDltTranslator.get_asset_spec(...).description` instead.",
)
@public
def get_description(self, resource: DltResource) -> Optional[str]:
"""A method that takes in a dlt resource returns the Dagster description of the resource.
This method can be overridden to provide a custom description for a dlt resource.
Args:
resource (DltResource): dlt resource
Returns:
Optional[str]: The Dagster description for the dlt resource.
"""
return self._default_description_fn(resource)
def _default_description_fn(self, resource: DltResource) -> Optional[str]:
"""A method that takes in a dlt resource returns the Dagster description of the resource.
Args:
resource (DltResource): dlt resource
Returns:
Optional[str]: The Dagster description for the dlt resource.
"""
pipe = resource._pipe # noqa: SLF001
# If the function underlying the resource is a single callable,
# return the docstring of the callable.
if len(pipe.steps) == 1 and callable(pipe.steps[0]):
return pipe.steps[0].__doc__
return None
@superseded(
additional_warn_text="Use `DagsterDltTranslator.get_asset_spec(...).group_name` instead.",
)
@public
def get_group_name(self, resource: DltResource) -> Optional[str]:
"""A method that takes in a dlt resource and returns the Dagster group name of the resource.
This method can be overridden to provide a custom group name for a dlt resource.
Args:
resource (DltResource): dlt resource
Returns:
Optional[str]: A Dagster group name for the dlt resource.
"""
return self._default_group_name_fn(resource)
def _default_group_name_fn(self, resource: DltResource) -> Optional[str]:
"""A method that takes in a dlt resource and returns the Dagster group name of the resource.
Args:
resource (DltResource): dlt resource
Returns:
Optional[str]: A Dagster group name for the dlt resource.
"""
return None
@superseded(
additional_warn_text="Use `DagsterDltTranslator.get_asset_spec(...).metadata` instead.",
)
@public
def get_metadata(self, resource: DltResource) -> Mapping[str, Any]:
"""Defines resource specific metadata.
Args:
resource (DltResource): dlt resource
Returns:
Mapping[str, Any]: The custom metadata entries for this resource.
"""
return self._default_metadata_fn(resource)
def _default_metadata_fn(self, resource: DltResource) -> Mapping[str, Any]:
"""Defines resource specific metadata.
Args:
resource (DltResource): dlt resource
Returns:
Mapping[str, Any]: The custom metadata entries for this resource.
"""
return {}
@superseded(
additional_warn_text="Use `DagsterDltTranslator.get_asset_spec(...).owners` instead.",
)
@public
def get_owners(self, resource: DltResource) -> Optional[Sequence[str]]:
"""A method that takes in a dlt resource and returns the Dagster owners of the resource.
This method can be overridden to provide custom owners for a dlt resource.
Args:
resource (DltResource): dlt resource
Returns:
Optional[Sequence[str]]: A sequence of Dagster owners for the dlt resource.
"""
return self._default_owners_fn(resource)
def _default_owners_fn(self, resource: DltResource) -> Optional[Sequence[str]]:
"""A method that takes in a dlt resource and returns the Dagster owners of the resource.
Args:
resource (DltResource): dlt resource
Returns:
Optional[Sequence[str]]: A sequence of Dagster owners for the dlt resource.
"""
return None
@superseded(
additional_warn_text="Use `DagsterDltTranslator.get_asset_spec(...).tags` instead.",
)
@public
def get_tags(self, resource: DltResource) -> Mapping[str, str]:
"""A method that takes in a dlt resource and returns the Dagster tags of the structure.
This method can be overridden to provide custom tags for a dlt resource.
Args:
resource (DltResource): dlt resource
Returns:
Optional[Mapping[str, str]]: A dictionary representing the Dagster tags for the
dlt resource.
"""
return self._default_tags_fn(resource)
def _default_tags_fn(self, resource: DltResource) -> Mapping[str, str]:
"""A method that takes in a dlt resource and returns the Dagster tags of the structure.
Args:
resource (DltResource): dlt resource
Returns:
Optional[Mapping[str, str]]: A dictionary representing the Dagster tags for the
dlt resource.
"""
return {}
@superseded(
additional_warn_text="Use `DagsterDltTranslator.get_asset_spec(...).kinds` instead.",
)
@public
def get_kinds(self, resource: DltResource, destination: Destination) -> set[str]:
"""A method that takes in a dlt resource and returns the kinds which should be
attached. Defaults to the destination type and "dlt".
This method can be overridden to provide custom kinds for a dlt resource.
Args:
resource (DltResource): dlt resource
destination (Destination): dlt destination
Returns:
Set[str]: The kinds of the asset.
"""
return self._default_kinds_fn(resource, destination)
def _default_kinds_fn(
self, resource: DltResource, destination: Optional[Destination]
) -> set[str]:
"""A method that takes in a dlt resource and returns the kinds which should be
attached. Defaults to the destination type and "dlt".
Args:
resource (DltResource): dlt resource
destination (Optional[Destination]): dlt destination
Returns:
Set[str]: The kinds of the asset.
"""
kinds = {"dlt"}
destination_set = {destination.destination_name} if destination else set()
return kinds.union(destination_set)
| DagsterDltTranslator |
python | django-mptt__django-mptt | tests/myapp/tests.py | {
"start": 17783,
"end": 18350
} | class ____(TreeTestCase):
def setUp(self):
manager = CustomPKName.objects
c1 = manager.create(name="c1")
manager.create(name="c11", parent=c1)
manager.create(name="c12", parent=c1)
c2 = manager.create(name="c2")
manager.create(name="c21", parent=c2)
manager.create(name="c22", parent=c2)
manager.create(name="c3")
def test_get_next_sibling(self):
root = CustomPKName.objects.get(name="c12")
sib = root.get_next_sibling()
self.assertTrue(sib is None)
| CustomPKNameTestCase |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_gridlines04.py | {
"start": 315,
"end": 1442
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_gridlines04.xlsx")
def test_create_file(self):
"""Test XlsxWriter gridlines."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "radar"})
chart.axis_ids = [54977280, 54978816]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
chart.set_y_axis(
{"major_gridlines": {"visible": 0}, "major_tick_mark": "cross"}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | pytransitions__transitions | tests/test_parallel.py | {
"start": 10894,
"end": 11272
} | class ____(TestParallel):
def setUp(self):
class GVMachine(HierarchicalGraphMachine):
def __init__(self, *args, **kwargs):
kwargs['graph_engine'] = "graphviz"
super(GVMachine, self).__init__(*args, **kwargs)
super(TestParallelWithGraphviz, self).setUp()
self.machine_cls = GVMachine
| TestParallelWithGraphviz |
python | pikepdf__pikepdf | src/pikepdf/form.py | {
"start": 27311,
"end": 32983
} | class ____(DefaultAppearanceStreamGenerator):
"""Alternate appearance stream generator to address limitations of the default one.
Improved features include:
* Supports multiline text fields, with caveats:
* Word wrap does not take scaling factors (other than font size) into account
* Spacing operators not taken into consideration either
* Quadding is still ignored
* Due to limitations in Firefox's PDF viewer, the font and the line breaks will be
incorrect when viewed in Firefox. PDFs filled by full-fat PDF readers, including
Adobe Acrobat Reader, exhibit the same behavior when viewed in Firefox.
* Supports combed text fields, with most of the same caveats as above
Otherwise, this implementation has most of the same limitations as the default
implementation. Unlike the default implementation, this is implemented in Python
rather than C++, so will also be less performant.
"""
def generate_text(self, field: AcroFormField):
"""Generate the appearance stream for a text field."""
if field.flags & FormFieldFlag.tx_multiline:
_text_appearance_multiline(self.pdf, self.form, field)
elif field.flags & FormFieldFlag.tx_comb:
_text_appearance_combed(self.pdf, self.form, field)
else:
# Fall back to the default implementation if we don't have a better one
super().generate_text(field)
# The following functions are used to generate appearance streams for text inputs. With
# some additional refinement, some of this functionality could be moved to the canvas
# submodule and exposed as part of a public API. Right now, however, it's probably too
# specialized; it couldn't be used to create an arbitrary text box separate from a form
# field.
#
# Generating appearance streams for text fields is not trivial. Section 12.7.4.3 of the
# PDF 2.0 spec (Variable text) lays out how this is to be done. Also refer to the
# following similar implementations for references:
#
# * https://github.com/py-pdf/pypdf/blob/5c3550f66c5da530eb8853da91afe0f942afcbef/pypdf/_writer.py#L857
# * https://github.com/mozilla/pdf.js/blob/2c87c4854a486d5cd0731b947dd622f8abe5e1b5/src/core/annotation.js#L2138
# * https://github.com/fwenzel/pdftk/blob/a3db40d1a43207eaad558aa9591ef81403b51616/java/pdftk/com/lowagie/text/pdf/AcroFields.java#L407
# * https://github.com/qpdf/qpdf/blob/81823f4032caefd1050bccb207d315839c1c48db/libqpdf/QPDFFormFieldObjectHelper.cc#L746
def _text_appearance_multiline(pdf: Pdf, form: AcroForm, field: AcroFormField):
da_info = _DaInfo.decode_for_field(field)
for annot in form.get_annotations_for_field(field):
# There is likely only one annot, but we have to allow for multiple
bbox = annot.rect.to_bbox()
with _text_stream_builder(da_info.da) as cs:
if da_info.text_matrix is None:
# If there is no existing matrix, create located at the upper-right of
# the bbox (with allowance for the height of the text).
top_offset = da_info.font.ascent
if top_offset is None:
# Fallback to full line height
top_offset = da_info.line_spacing
else:
# Scale to text-space
top_offset = da_info.font.convert_width(
top_offset, da_info.font_size
)
cs.set_text_matrix(
Matrix.identity().translated(
bbox.llx, Decimal(bbox.ury) - top_offset
)
)
_layout_multiline_text(cs, field.value_as_string, da_info, bbox)
_apply_appearance_stream(pdf, annot, cs, bbox, da_info)
def _text_appearance_combed(pdf: Pdf, form: AcroForm, field: AcroFormField):
da_info = _DaInfo.decode_for_field(field)
for annot in form.get_annotations_for_field(field):
# There is likely only one annot, but we have to allow for multiple
bbox = annot.rect.to_bbox()
with _text_stream_builder(da_info.da) as cs:
if da_info.text_matrix is None:
# If there is no existing matrix, create located at the lower-right of
# the bbox (with allowance for the descent of the text).
# Fallback to zero
bottom_offset = da_info.font.descent or 0
# Scale to text-space
bottom_offset = da_info.font.convert_width(
bottom_offset, da_info.font_size
)
cs.set_text_matrix(
Matrix.identity().translated(
bbox.llx, Decimal(bbox.lly) - bottom_offset
)
)
_layout_combed_text(
cs,
field.value_as_string,
da_info,
bbox,
field.get_inheritable_field_value("/MaxLen"),
)
_apply_appearance_stream(pdf, annot, cs, bbox, da_info)
def _apply_appearance_stream(pdf, annot, cs, bbox, da_info):
"""Convert content stream to a Form XObject and save in annotation.
The appearance stream is saved in the annotation dictionary (AP) under the normal
(N) key.
"""
fonts_dict = Dictionary()
fonts_dict[da_info.font_name] = da_info.font.register(pdf)
resources = Dictionary(Font=fonts_dict)
xobj = _create_form_xobject(pdf, bbox, cs, resources)
if Name.AP in annot.obj:
annot.obj.AP.N = xobj
else:
annot.obj.AP = Dictionary(N=xobj)
@dataclass
| ExtendedAppearanceStreamGenerator |
python | jupyterlab__jupyterlab | jupyterlab/tests/test_jupyterlab.py | {
"start": 1562,
"end": 4664
} | class ____(TestCase):
def tempdir(self):
td = TemporaryDirectory()
self.tempdirs.append(td)
return td.name
def setUp(self):
# Any TemporaryDirectory objects appended to this list will be cleaned
# up at the end of the test run.
self.tempdirs = []
self.devnull = open(os.devnull, "w") # noqa
@self.addCleanup
def cleanup_tempdirs():
for d in self.tempdirs:
d.cleanup()
self.test_dir = self.tempdir()
self.data_dir = pjoin(self.test_dir, "data")
self.config_dir = pjoin(self.test_dir, "config")
self.pkg_names = {}
# Copy in the mock packages.
for name in ["extension", "incompat", "package", "mimeextension"]:
src = pjoin(here, "mock_packages", name)
def ignore(dname, files):
if "node_modules" in dname:
files = []
if "node_modules" in files:
files.remove("node_modules")
return dname, files
dest = pjoin(self.test_dir, name)
shutil.copytree(src, dest, ignore=ignore)
# Make a node modules folder so npm install is not called.
if not os.path.exists(pjoin(dest, "node_modules")):
os.makedirs(pjoin(dest, "node_modules"))
setattr(self, "mock_" + name, dest)
with open(pjoin(dest, "package.json")) as fid:
data = json.load(fid)
self.pkg_names[name] = data["name"]
self.patches = []
p = patch.dict(
"os.environ",
{
"JUPYTER_CONFIG_DIR": self.config_dir,
"JUPYTER_DATA_DIR": self.data_dir,
"JUPYTERLAB_DIR": pjoin(self.data_dir, "lab"),
},
)
self.patches.append(p)
for mod in [paths]:
if hasattr(mod, "ENV_JUPYTER_PATH"):
p = patch.object(mod, "ENV_JUPYTER_PATH", [self.data_dir])
self.patches.append(p)
if hasattr(mod, "ENV_CONFIG_PATH"):
p = patch.object(mod, "ENV_CONFIG_PATH", [self.config_dir])
self.patches.append(p)
if hasattr(mod, "CONFIG_PATH"):
p = patch.object(mod, "CONFIG_PATH", self.config_dir)
self.patches.append(p)
if hasattr(mod, "BUILD_PATH"):
p = patch.object(mod, "BUILD_PATH", self.data_dir)
self.patches.append(p)
for p in self.patches:
p.start()
self.addCleanup(p.stop)
# verify our patches
self.assertEqual(paths.ENV_CONFIG_PATH, [self.config_dir])
self.assertEqual(paths.ENV_JUPYTER_PATH, [self.data_dir])
self.assertEqual(
Path(commands.get_app_dir()).resolve(), (Path(self.data_dir) / "lab").resolve()
)
self.app_dir = commands.get_app_dir()
# Set pinned extension names
self.pinned_packages = ["jupyterlab-test-extension@1.0", "jupyterlab-test-extension@2.0"]
| AppHandlerTest |
python | huggingface__transformers | tests/models/prophetnet/test_modeling_prophetnet.py | {
"start": 42382,
"end": 49786
} | class ____(unittest.TestCase):
@slow
def test_pretrained_checkpoint_hidden_states(self):
model = ProphetNetForConditionalGeneration.from_pretrained("microsoft/prophetnet-large-uncased")
model.to(torch_device)
# encoder-decoder outputs
encoder_ids = torch.tensor(
[
[
2871,
102,
2048,
3176,
2780,
1997,
2871,
26727,
2169,
2097,
12673,
1996,
8457,
2006,
2049,
8240,
2859,
2799,
1012,
2023,
6512,
2038,
2174,
13977,
2195,
25962,
1012,
102,
]
]
).to(torch_device)
decoder_prev_ids = torch.tensor([[102, 2129, 2116, 2372, 2024, 2006, 2169, 1997, 2122, 2048, 2780, 1029]]).to(
torch_device
)
output = model(
input_ids=encoder_ids,
attention_mask=None,
encoder_outputs=None,
decoder_input_ids=decoder_prev_ids,
)
output_predited_logits = output[0]
expected_shape = torch.Size((1, 12, 30522))
self.assertEqual(output_predited_logits.shape, expected_shape)
expected_slice = torch.tensor(
[[[-7.7729, -8.0343, -8.26001], [-7.74213, -7.8629, -8.6000], [-7.7328, -7.8269, -8.5264]]]
).to(torch_device)
# torch.testing.assert_close(output_predited_logits[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
assert torch.allclose(output_predited_logits[:, :3, :3], expected_slice, atol=1e-4)
# encoder outputs
encoder_outputs = model.prophetnet.encoder(encoder_ids)[0]
expected_encoder_outputs_slice = torch.tensor(
[[[-0.2526, -0.1951, -0.2185], [-0.8923, 0.2992, -0.4623], [-0.4585, 0.0165, -0.6652]]]
).to(torch_device)
expected_shape_encoder = torch.Size((1, 28, 1024))
self.assertEqual(encoder_outputs.shape, expected_shape_encoder)
# torch.testing.assert_close(encoder_outputs[:, :3, :3], expected_encoder_outputs_slice, rtol=1e-4, atol=1e-4)
assert torch.allclose(encoder_outputs[:, :3, :3], expected_encoder_outputs_slice, atol=1e-4)
# decoder outputs
decoder_outputs = model.prophetnet.decoder(decoder_prev_ids, encoder_hidden_states=encoder_outputs)
predicting_streams = decoder_outputs[1].view(1, model.config.ngram, 12, -1)
predicting_streams_logits = model.lm_head(predicting_streams)
next_first_stream_logits = predicting_streams_logits[:, 0]
# torch.testing.assert_close(next_first_stream_logits[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
assert torch.allclose(next_first_stream_logits[:, :3, :3], expected_slice, atol=1e-4)
@slow
def test_cnndm_inference(self):
model = ProphetNetForConditionalGeneration.from_pretrained("microsoft/prophetnet-large-uncased-cnndm")
model.config.max_length = 512
model.to(torch_device)
tokenizer = ProphetNetTokenizer.from_pretrained("microsoft/prophetnet-large-uncased-cnndm")
ARTICLE_TO_SUMMARIZE = (
"USTC was founded in Beijing by the Chinese Academy of Sciences (CAS) in September 1958. The Director of"
" CAS, Mr. Guo Moruo was appointed the first president of USTC. USTC's founding mission was to develop a"
" high-level science and technology workforce, as deemed critical for development of China's economy,"
' defense, and science and technology education. The establishment was hailed as "A Major Event in the'
' History of Chinese Education and Science." CAS has supported USTC by combining most of its institutes'
" with the departments of the university. USTC is listed in the top 16 national key universities, becoming"
" the youngest national key university.".lower()
)
input_ids = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=511, return_tensors="pt").input_ids
input_ids = input_ids.to(torch_device)
summary_ids = model.generate(
input_ids, num_beams=4, length_penalty=1.0, no_repeat_ngram_size=3, early_stopping=True
)
EXPECTED_SUMMARIZE_512 = (
"us ##tc was founded by the chinese academy of sciences ( cas ) in 1958 . [X_SEP] us ##tc is listed in the"
" top 16 national key universities ."
)
generated_titles = [
" ".join(tokenizer.convert_ids_to_tokens(g, skip_special_tokens=True)) for g in summary_ids
]
self.assertListEqual(
[EXPECTED_SUMMARIZE_512],
generated_titles,
)
input_ids = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=99, return_tensors="pt").input_ids
input_ids = input_ids.to(torch_device)
# actually 98 tokens are used. max_length=100 contains bos and eos.
summary_ids = model.generate(
input_ids, num_beams=4, length_penalty=1.0, no_repeat_ngram_size=3, early_stopping=True
)
EXPECTED_SUMMARIZE_100 = (
r"us ##tc was founded in beijing by the chinese academy of sciences ( cas ) in 1958 . [X_SEP] us ##tc "
"'"
" s founding mission was to develop a high - level science and technology workforce . [X_SEP]"
' establishment hailed as " a major event in the history of chinese education and science "'
)
generated_titles = [
" ".join(tokenizer.convert_ids_to_tokens(g, skip_special_tokens=True)) for g in summary_ids
]
self.assertListEqual(
[EXPECTED_SUMMARIZE_100],
generated_titles,
)
@slow
def test_question_gen_inference(self):
model = ProphetNetForConditionalGeneration.from_pretrained("microsoft/prophetnet-large-uncased-squad-qg")
model.to(torch_device)
tokenizer = ProphetNetTokenizer.from_pretrained("microsoft/prophetnet-large-uncased-squad-qg")
INPUTS = [
"Bill Gates [SEP] Microsoft was founded by Bill Gates and Paul Allen on April 4, 1975.",
"1975 [SEP] Microsoft was founded by Bill Gates and Paul Allen on April 4, 1975.",
"April 4, 1975 [SEP] Microsoft was founded by Bill Gates and Paul Allen on April 4, 1975.",
]
input_ids = tokenizer(INPUTS, truncation=True, padding=True, return_tensors="pt").input_ids
input_ids = input_ids.to(torch_device)
gen_output = model.generate(input_ids, num_beams=5, early_stopping=True)
generated_questions = tokenizer.batch_decode(gen_output, skip_special_tokens=True)
EXPECTED_QUESTIONS = [
"along with paul allen, who founded microsoft?",
"what year was microsoft founded?",
"when was microsoft founded?",
]
self.assertListEqual(
EXPECTED_QUESTIONS,
generated_questions,
)
| ProphetNetModelIntegrationTest |
python | numba__numba | numba/misc/appdirs.py | {
"start": 16340,
"end": 22387
} | class ____(object):
"""Convenience wrapper for getting application dirs."""
def __init__(self, appname, appauthor=None, version=None, roaming=False,
multipath=False):
self.appname = appname
self.appauthor = appauthor
self.version = version
self.roaming = roaming
self.multipath = multipath
@property
def user_data_dir(self):
return user_data_dir(self.appname, self.appauthor,
version=self.version, roaming=self.roaming)
@property
def site_data_dir(self):
return site_data_dir(self.appname, self.appauthor,
version=self.version, multipath=self.multipath)
@property
def user_config_dir(self):
return user_config_dir(self.appname, self.appauthor,
version=self.version, roaming=self.roaming)
@property
def site_config_dir(self):
return site_config_dir(self.appname, self.appauthor,
version=self.version, multipath=self.multipath)
@property
def user_cache_dir(self):
return user_cache_dir(self.appname, self.appauthor,
version=self.version)
@property
def user_log_dir(self):
return user_log_dir(self.appname, self.appauthor,
version=self.version)
#---- internal support stuff
def _get_win_folder_from_registry(csidl_name):
"""This is a fallback technique at best. I'm not sure if using the
registry for this guarantees us the correct answer for all CSIDL_*
names.
"""
import winreg as _winreg
shell_folder_name = {
"CSIDL_APPDATA": "AppData",
"CSIDL_COMMON_APPDATA": "Common AppData",
"CSIDL_LOCAL_APPDATA": "Local AppData",
}[csidl_name]
key = _winreg.OpenKey(
_winreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
)
dir, type = _winreg.QueryValueEx(key, shell_folder_name)
return dir
def _get_win_folder_with_pywin32(csidl_name):
from win32com.shell import shellcon, shell
dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
# Try to make this a unicode path because SHGetFolderPath does
# not return unicode strings when there is unicode data in the
# path.
try:
dir = unicode(dir)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in dir:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
try:
import win32api
dir = win32api.GetShortPathName(dir)
except ImportError:
pass
except UnicodeError:
pass
return dir
def _get_win_folder_with_ctypes(csidl_name):
import ctypes
csidl_const = {
"CSIDL_APPDATA": 26,
"CSIDL_COMMON_APPDATA": 35,
"CSIDL_LOCAL_APPDATA": 28,
}[csidl_name]
buf = ctypes.create_unicode_buffer(1024)
ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in buf:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf2 = ctypes.create_unicode_buffer(1024)
if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
buf = buf2
return buf.value
def _get_win_folder_with_jna(csidl_name):
import array
from com.sun import jna
from com.sun.jna.platform import win32
buf_size = win32.WinDef.MAX_PATH * 2
buf = array.zeros('c', buf_size)
shell = win32.Shell32.INSTANCE
shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in dir:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf = array.zeros('c', buf_size)
kernel = win32.Kernel32.INSTANCE
if kernel.GetShortPathName(dir, buf, buf_size):
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
return dir
if system == "win32":
try:
import win32com.shell
_get_win_folder = _get_win_folder_with_pywin32
except ImportError:
try:
from ctypes import windll
_get_win_folder = _get_win_folder_with_ctypes
except ImportError:
try:
import com.sun.jna
_get_win_folder = _get_win_folder_with_jna
except ImportError:
_get_win_folder = _get_win_folder_from_registry
#---- self test code
if __name__ == "__main__":
appname = "MyApp"
appauthor = "MyCompany"
props = ("user_data_dir", "site_data_dir",
"user_config_dir", "site_config_dir",
"user_cache_dir", "user_log_dir")
print("-- app dirs %s --" % __version__)
print("-- app dirs (with optional 'version')")
dirs = AppDirs(appname, appauthor, version="1.0")
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (without optional 'version')")
dirs = AppDirs(appname, appauthor)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (without optional 'appauthor')")
dirs = AppDirs(appname)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (with disabled 'appauthor')")
dirs = AppDirs(appname, appauthor=False)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
| AppDirs |
python | tensorflow__tensorflow | tensorflow/python/ops/numpy_ops/tests/test_util.py | {
"start": 10430,
"end": 20486
} | class ____(ScalarShape): pass
NUMPY_SCALAR_SHAPE = _NumpyScalar()
PYTHON_SCALAR_SHAPE = _PythonScalar()
def _dims_of_shape(shape):
"""Converts `shape` to a tuple of dimensions."""
if type(shape) in (list, tuple):
return shape
elif isinstance(shape, ScalarShape):
return ()
else:
raise TypeError(type(shape))
def _cast_to_shape(value, shape, dtype):
"""Casts `value` to the correct Python type for `shape` and `dtype`."""
if shape is NUMPY_SCALAR_SHAPE:
# explicitly cast to NumPy scalar in case `value` is a Python scalar.
return onp.dtype(dtype).type(value)
elif shape is PYTHON_SCALAR_SHAPE:
# explicitly cast to Python scalar via https://stackoverflow.com/a/11389998
return numpy_compat.np_asarray(value).item()
elif type(shape) in (list, tuple):
assert onp.shape(value) == tuple(shape)
return value
else:
raise TypeError(type(shape))
def dtype_str(dtype):
return onp.dtype(dtype).name
def format_shape_dtype_string(shape, dtype):
if shape is NUMPY_SCALAR_SHAPE:
return dtype_str(dtype)
elif shape is PYTHON_SCALAR_SHAPE:
return 'py' + dtype_str(dtype)
elif type(shape) in (list, tuple):
shapestr = ','.join(str(dim) for dim in shape)
return '{}[{}]'.format(dtype_str(dtype), shapestr)
elif type(shape) is int:
return '{}[{},]'.format(dtype_str(dtype), shape)
elif isinstance(shape, onp.ndarray):
return '{}[{}]'.format(dtype_str(dtype), shape)
else:
raise TypeError(type(shape))
def _rand_dtype(rand, shape, dtype, scale=1., post=lambda x: x):
"""Produce random values given shape, dtype, scale, and post-processor.
Args:
rand: a function for producing random values of a given shape, e.g. a
bound version of either onp.RandomState.randn or onp.RandomState.rand.
shape: a shape value as a tuple of positive integers.
dtype: a numpy dtype.
scale: optional, a multiplicative scale for the random values (default 1).
post: optional, a callable for post-processing the random values (default
identity).
Returns:
An ndarray of the given shape and dtype using random values based on a call
to rand but scaled, converted to the appropriate dtype, and post-processed.
"""
r = lambda: numpy_compat.np_asarray(scale * rand(*_dims_of_shape(shape)),
dtype)
if onp.issubdtype(dtype, onp.complexfloating):
vals = r() + 1.0j * r()
else:
vals = r()
return _cast_to_shape(numpy_compat.np_asarray(post(vals), dtype), shape,
dtype)
def rand_default(scale=3):
randn = npr.RandomState(0).randn
return partial(_rand_dtype, randn, scale=scale)
def rand_nonzero():
post = lambda x: onp.where(x == 0, onp.array(1, dtype=x.dtype), x)
randn = npr.RandomState(0).randn
return partial(_rand_dtype, randn, scale=3, post=post)
def rand_positive():
post = lambda x: x + 1
rand = npr.RandomState(0).rand
return partial(_rand_dtype, rand, scale=2, post=post)
def rand_small():
randn = npr.RandomState(0).randn
return partial(_rand_dtype, randn, scale=1e-3)
def rand_not_small(offset=10.):
post = lambda x: x + onp.where(x > 0, offset, -offset)
randn = npr.RandomState(0).randn
return partial(_rand_dtype, randn, scale=3., post=post)
def rand_small_positive():
rand = npr.RandomState(0).rand
return partial(_rand_dtype, rand, scale=2e-5)
def rand_uniform(low=0.0, high=1.0):
assert low < high
rand = npr.RandomState(0).rand
post = lambda x: x * (high - low) + low
return partial(_rand_dtype, rand, post=post)
def rand_some_equal():
randn = npr.RandomState(0).randn
rng = npr.RandomState(0)
def post(x):
x_ravel = x.ravel()
if len(x_ravel) == 0:
return x
flips = rng.rand(*onp.shape(x)) < 0.5
return onp.where(flips, x_ravel[0], x)
return partial(_rand_dtype, randn, scale=100., post=post)
def rand_some_inf():
"""Return a random sampler that produces infinities in floating types."""
rng = npr.RandomState(1)
base_rand = rand_default()
"""
TODO: Complex numbers are not correctly tested
If blocks should be switched in order, and relevant tests should be fixed
"""
def rand(shape, dtype):
"""The random sampler function."""
if not onp.issubdtype(dtype, onp.floating):
# only float types have inf
return base_rand(shape, dtype)
if onp.issubdtype(dtype, onp.complexfloating):
base_dtype = onp.real(onp.array(0, dtype=dtype)).dtype
out = (rand(shape, base_dtype) +
onp.array(1j, dtype) * rand(shape, base_dtype))
return _cast_to_shape(out, shape, dtype)
dims = _dims_of_shape(shape)
posinf_flips = rng.rand(*dims) < 0.1
neginf_flips = rng.rand(*dims) < 0.1
vals = base_rand(shape, dtype)
vals = onp.where(posinf_flips, onp.array(onp.inf, dtype=dtype), vals)
vals = onp.where(neginf_flips, onp.array(-onp.inf, dtype=dtype), vals)
return _cast_to_shape(numpy_compat.np_asarray(vals, dtype=dtype), shape,
dtype)
return rand
def rand_some_nan():
"""Return a random sampler that produces nans in floating types."""
rng = npr.RandomState(1)
base_rand = rand_default()
def rand(shape, dtype):
"""The random sampler function."""
if onp.issubdtype(dtype, onp.complexfloating):
base_dtype = onp.real(onp.array(0, dtype=dtype)).dtype
out = (rand(shape, base_dtype) +
onp.array(1j, dtype) * rand(shape, base_dtype))
return _cast_to_shape(out, shape, dtype)
if not onp.issubdtype(dtype, onp.floating):
# only float types have inf
return base_rand(shape, dtype)
dims = _dims_of_shape(shape)
nan_flips = rng.rand(*dims) < 0.1
vals = base_rand(shape, dtype)
vals = onp.where(nan_flips, onp.array(onp.nan, dtype=dtype), vals)
return _cast_to_shape(numpy_compat.np_asarray(vals, dtype=dtype), shape,
dtype)
return rand
def rand_some_inf_and_nan():
"""Return a random sampler that produces infinities in floating types."""
rng = npr.RandomState(1)
base_rand = rand_default()
"""
TODO: Complex numbers are not correctly tested
If blocks should be switched in order, and relevant tests should be fixed
"""
def rand(shape, dtype):
"""The random sampler function."""
if not onp.issubdtype(dtype, onp.floating):
# only float types have inf
return base_rand(shape, dtype)
if onp.issubdtype(dtype, onp.complexfloating):
base_dtype = onp.real(onp.array(0, dtype=dtype)).dtype
out = (rand(shape, base_dtype) +
onp.array(1j, dtype) * rand(shape, base_dtype))
return _cast_to_shape(out, shape, dtype)
dims = _dims_of_shape(shape)
posinf_flips = rng.rand(*dims) < 0.1
neginf_flips = rng.rand(*dims) < 0.1
nan_flips = rng.rand(*dims) < 0.1
vals = base_rand(shape, dtype)
vals = onp.where(posinf_flips, onp.array(onp.inf, dtype=dtype), vals)
vals = onp.where(neginf_flips, onp.array(-onp.inf, dtype=dtype), vals)
vals = onp.where(nan_flips, onp.array(onp.nan, dtype=dtype), vals)
return _cast_to_shape(numpy_compat.np_asarray(vals, dtype=dtype), shape,
dtype)
return rand
# TODO(mattjj): doesn't handle complex types
def rand_some_zero():
"""Return a random sampler that produces some zeros."""
rng = npr.RandomState(1)
base_rand = rand_default()
def rand(shape, dtype):
"""The random sampler function."""
dims = _dims_of_shape(shape)
zeros = rng.rand(*dims) < 0.5
vals = base_rand(shape, dtype)
vals = onp.where(zeros, onp.array(0, dtype=dtype), vals)
return _cast_to_shape(numpy_compat.np_asarray(vals, dtype=dtype), shape,
dtype)
return rand
def rand_int(low, high=None):
randint = npr.RandomState(0).randint
def fn(shape, dtype):
return randint(low, high=high, size=shape, dtype=dtype)
return fn
def rand_unique_int():
randchoice = npr.RandomState(0).choice
def fn(shape, dtype):
return randchoice(onp.arange(onp.prod(shape), dtype=dtype),
size=shape, replace=False)
return fn
def rand_bool():
rng = npr.RandomState(0)
def generator(shape, dtype):
return _cast_to_shape(rng.rand(*_dims_of_shape(shape)) < 0.5, shape, dtype)
return generator
def check_raises(thunk, err_type, msg):
try:
thunk()
assert False
except err_type as e:
assert str(e).startswith(msg), "\n{}\n\n{}\n".format(e, msg)
def check_raises_regexp(thunk, err_type, pattern):
try:
thunk()
assert False
except err_type as e:
assert re.match(pattern, str(e)), "{}\n\n{}\n".format(e, pattern)
def _iter_eqns(jaxpr):
# TODO(necula): why doesn't this search in params?
for eqn in jaxpr.eqns:
yield eqn
for subjaxpr in core.subjaxprs(jaxpr):
yield from _iter_eqns(subjaxpr)
def assert_dot_precision(expected_precision, fun, *args):
jaxpr = api.make_jaxpr(fun)(*args)
precisions = [eqn.params['precision'] for eqn in _iter_eqns(jaxpr.jaxpr)
if eqn.primitive == lax.dot_general_p]
for precision in precisions:
msg = "Unexpected precision: {} != {}".format(expected_precision, precision)
assert precision == expected_precision, msg
_CACHED_INDICES: Dict[int, Sequence[int]] = {}
def cases_from_list(xs):
xs = list(xs)
n = len(xs)
k = min(n, FLAGS.num_generated_cases)
# Random sampling for every parameterized test is expensive. Do it once and
# cache the result.
indices = _CACHED_INDICES.get(n)
if indices is None:
rng = npr.RandomState(42)
_CACHED_INDICES[n] = indices = rng.permutation(n)
return [xs[i] for i in indices[:k]]
def cases_from_gens(*gens):
sizes = [1, 3, 10]
cases_per_size = int(FLAGS.num_generated_cases / len(sizes)) + 1
for size in sizes:
for i in range(cases_per_size):
yield ('_{}_{}'.format(size, i),) + tuple(gen(size) for gen in gens)
def to_np(a):
return nest.map_structure(np_array_ops.asarray, a)
def to_tf_fn(f):
return lambda *args: f(*to_np(args))
| _PythonScalar |
python | matplotlib__matplotlib | lib/matplotlib/__init__.py | {
"start": 7356,
"end": 10929
} | class ____:
__version__ = property(lambda self: _get_version())
__version_info__ = property(
lambda self: _parse_to_version_info(self.__version__))
def _check_versions():
# Quickfix to ensure Microsoft Visual C++ redistributable
# DLLs are loaded before importing kiwisolver
from . import ft2font # noqa: F401
for modname, minver in [
("cycler", "0.10"),
("dateutil", "2.7"),
("kiwisolver", "1.3.1"),
("numpy", "1.25"),
("pyparsing", "2.3.1"),
]:
module = importlib.import_module(modname)
if parse_version(module.__version__) < parse_version(minver):
raise ImportError(f"Matplotlib requires {modname}>={minver}; "
f"you have {module.__version__}")
_check_versions()
# The decorator ensures this always returns the same handler (and it is only
# attached once).
@functools.cache
def _ensure_handler():
"""
The first time this function is called, attach a `StreamHandler` using the
same format as `logging.basicConfig` to the Matplotlib root logger.
Return this handler every time this function is called.
"""
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(logging.BASIC_FORMAT))
_log.addHandler(handler)
return handler
def set_loglevel(level):
"""
Configure Matplotlib's logging levels.
Matplotlib uses the standard library `logging` framework under the root
logger 'matplotlib'. This is a helper function to:
- set Matplotlib's root logger level
- set the root logger handler's level, creating the handler
if it does not exist yet
Typically, one should call ``set_loglevel("INFO")`` or
``set_loglevel("DEBUG")`` to get additional debugging information.
Users or applications that are installing their own logging handlers
may want to directly manipulate ``logging.getLogger('matplotlib')`` rather
than use this function.
Parameters
----------
level : {"NOTSET", "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"}
The log level as defined in `Python logging levels
<https://docs.python.org/3/library/logging.html#logging-levels>`__.
For backwards compatibility, the levels are case-insensitive, but
the capitalized version is preferred in analogy to `logging.Logger.setLevel`.
Notes
-----
The first time this function is called, an additional handler is attached
to Matplotlib's root handler; this handler is reused every time and this
function simply manipulates the logger and handler's level.
"""
_log.setLevel(level.upper())
_ensure_handler().setLevel(level.upper())
def _logged_cached(fmt, func=None):
"""
Decorator that logs a function's return value, and memoizes that value.
After ::
@_logged_cached(fmt)
def func(): ...
the first call to *func* will log its return value at the DEBUG level using
%-format string *fmt*, and memoize it; later calls to *func* will directly
return that value.
"""
if func is None: # Return the actual decorator.
return functools.partial(_logged_cached, fmt)
called = False
ret = None
@functools.wraps(func)
def wrapper(**kwargs):
nonlocal called, ret
if not called:
ret = func(**kwargs)
called = True
_log.debug(fmt, ret)
return ret
return wrapper
_ExecInfo = namedtuple("_ExecInfo", "executable raw_version version")
| __getattr__ |
python | django__django | tests/middleware_exceptions/tests.py | {
"start": 293,
"end": 5816
} | class ____(SimpleTestCase):
def tearDown(self):
mw.log = []
@override_settings(
MIDDLEWARE=["middleware_exceptions.middleware.ProcessViewNoneMiddleware"]
)
def test_process_view_return_none(self):
response = self.client.get("/middleware_exceptions/view/")
self.assertEqual(mw.log, ["processed view normal_view"])
self.assertEqual(response.content, b"OK")
@override_settings(
MIDDLEWARE=["middleware_exceptions.middleware.ProcessViewMiddleware"]
)
def test_process_view_return_response(self):
response = self.client.get("/middleware_exceptions/view/")
self.assertEqual(response.content, b"Processed view normal_view")
@override_settings(
MIDDLEWARE=[
"middleware_exceptions.middleware.ProcessViewTemplateResponseMiddleware",
"middleware_exceptions.middleware.LogMiddleware",
]
)
def test_templateresponse_from_process_view_rendered(self):
"""
TemplateResponses returned from process_view() must be rendered before
being passed to any middleware that tries to access response.content,
such as middleware_exceptions.middleware.LogMiddleware.
"""
response = self.client.get("/middleware_exceptions/view/")
self.assertEqual(
response.content,
b"Processed view normal_view\nProcessViewTemplateResponseMiddleware",
)
@override_settings(
MIDDLEWARE=[
"middleware_exceptions.middleware.ProcessViewTemplateResponseMiddleware",
"middleware_exceptions.middleware.TemplateResponseMiddleware",
]
)
def test_templateresponse_from_process_view_passed_to_process_template_response(
self,
):
"""
TemplateResponses returned from process_view() should be passed to any
template response middleware.
"""
response = self.client.get("/middleware_exceptions/view/")
expected_lines = [
b"Processed view normal_view",
b"ProcessViewTemplateResponseMiddleware",
b"TemplateResponseMiddleware",
]
self.assertEqual(response.content, b"\n".join(expected_lines))
@override_settings(
MIDDLEWARE=["middleware_exceptions.middleware.TemplateResponseMiddleware"]
)
def test_process_template_response(self):
response = self.client.get("/middleware_exceptions/template_response/")
self.assertEqual(
response.content, b"template_response OK\nTemplateResponseMiddleware"
)
@override_settings(
MIDDLEWARE=["middleware_exceptions.middleware.NoTemplateResponseMiddleware"]
)
def test_process_template_response_returns_none(self):
msg = (
"NoTemplateResponseMiddleware.process_template_response didn't "
"return an HttpResponse object. It returned None instead."
)
with self.assertRaisesMessage(ValueError, msg):
self.client.get("/middleware_exceptions/template_response/")
@override_settings(MIDDLEWARE=["middleware_exceptions.middleware.LogMiddleware"])
def test_view_exception_converted_before_middleware(self):
response = self.client.get("/middleware_exceptions/permission_denied/")
self.assertEqual(mw.log, [(response.status_code, response.content)])
self.assertEqual(response.status_code, 403)
@override_settings(
MIDDLEWARE=["middleware_exceptions.middleware.ProcessExceptionMiddleware"]
)
def test_view_exception_handled_by_process_exception(self):
response = self.client.get("/middleware_exceptions/error/")
self.assertEqual(response.content, b"Exception caught")
@override_settings(
MIDDLEWARE=[
"middleware_exceptions.middleware.ProcessExceptionLogMiddleware",
"middleware_exceptions.middleware.ProcessExceptionMiddleware",
]
)
def test_response_from_process_exception_short_circuits_remainder(self):
response = self.client.get("/middleware_exceptions/error/")
self.assertEqual(mw.log, [])
self.assertEqual(response.content, b"Exception caught")
@override_settings(
MIDDLEWARE=[
"middleware_exceptions.middleware.ProcessExceptionMiddleware",
"middleware_exceptions.middleware.ProcessExceptionLogMiddleware",
]
)
def test_response_from_process_exception_when_return_response(self):
response = self.client.get("/middleware_exceptions/error/")
self.assertEqual(mw.log, ["process-exception"])
self.assertEqual(response.content, b"Exception caught")
@override_settings(
MIDDLEWARE=[
"middleware_exceptions.middleware.LogMiddleware",
"middleware_exceptions.middleware.NotFoundMiddleware",
]
)
def test_exception_in_middleware_converted_before_prior_middleware(self):
response = self.client.get("/middleware_exceptions/view/")
self.assertEqual(mw.log, [(404, response.content)])
self.assertEqual(response.status_code, 404)
@override_settings(
MIDDLEWARE=["middleware_exceptions.middleware.ProcessExceptionMiddleware"]
)
def test_exception_in_render_passed_to_process_exception(self):
response = self.client.get("/middleware_exceptions/exception_in_render/")
self.assertEqual(response.content, b"Exception caught")
@override_settings(ROOT_URLCONF="middleware_exceptions.urls")
| MiddlewareTests |
python | airbytehq__airbyte | airbyte-ci/connectors/metadata_service/lib/metadata_service/registry.py | {
"start": 1240,
"end": 1393
} | class ____(str, Enum, metaclass=CaseInsensitiveKeys):
SOURCE = "sourceDefinitionId"
DESTINATION = "destinationDefinitionId"
| ConnectorTypePrimaryKey |
python | mamba-org__mamba | docs/source/tools/mermaid.py | {
"start": 988,
"end": 1054
} | class ____(SphinxError):
category = "Mermaid error"
| MermaidError |
python | dask__dask | dask/utils.py | {
"start": 39023,
"end": 55941
} | class ____:
"""A mixin for dynamically implementing operators"""
__slots__ = ()
@classmethod
def _bind_operator(cls, op):
"""bind operator to this class"""
name = op.__name__
if name.endswith("_"):
# for and_ and or_
name = name[:-1]
elif name == "inv":
name = "invert"
meth = f"__{name}__"
if name in ("abs", "invert", "neg", "pos"):
setattr(cls, meth, cls._get_unary_operator(op))
else:
setattr(cls, meth, cls._get_binary_operator(op))
if name in ("eq", "gt", "ge", "lt", "le", "ne", "getitem"):
return
rmeth = f"__r{name}__"
setattr(cls, rmeth, cls._get_binary_operator(op, inv=True))
@classmethod
def _get_unary_operator(cls, op):
"""Must return a method used by unary operator"""
raise NotImplementedError
@classmethod
def _get_binary_operator(cls, op, inv=False):
"""Must return a method used by binary operator"""
raise NotImplementedError
def partial_by_order(*args, **kwargs):
"""
>>> from operator import add
>>> partial_by_order(5, function=add, other=[(1, 10)])
15
"""
function = kwargs.pop("function")
other = kwargs.pop("other")
args2 = list(args)
for i, arg in other:
args2.insert(i, arg)
return function(*args2, **kwargs)
def is_arraylike(x) -> bool:
"""Is this object a numpy array or something similar?
This function tests specifically for an object that already has
array attributes (e.g. np.ndarray, dask.array.Array, cupy.ndarray,
sparse.COO), **NOT** for something that can be coerced into an
array object (e.g. Python lists and tuples). It is meant for dask
developers and developers of downstream libraries.
Note that this function does not correspond with NumPy's
definition of array_like, which includes any object that can be
coerced into an array (see definition in the NumPy glossary):
https://numpy.org/doc/stable/glossary.html
Examples
--------
>>> import numpy as np
>>> is_arraylike(np.ones(5))
True
>>> is_arraylike(np.ones(()))
True
>>> is_arraylike(5)
False
>>> is_arraylike('cat')
False
"""
from dask.base import is_dask_collection
is_duck_array = hasattr(x, "__array_function__") or hasattr(x, "__array_ufunc__")
return bool(
hasattr(x, "shape")
and isinstance(x.shape, tuple)
and hasattr(x, "dtype")
and not any(is_dask_collection(n) for n in x.shape)
# We special case scipy.sparse and cupyx.scipy.sparse arrays as having partial
# support for them is useful in scenarios where we mostly call `map_partitions`
# or `map_blocks` with scikit-learn functions on dask arrays and dask dataframes.
# https://github.com/dask/dask/pull/3738
and (is_duck_array or "scipy.sparse" in typename(type(x)))
)
def is_dataframe_like(df) -> bool:
"""Looks like a Pandas DataFrame"""
if (df.__class__.__module__, df.__class__.__name__) == (
"pandas.core.frame",
"DataFrame",
):
# fast exec for most likely input
return True
typ = df.__class__
return (
all(hasattr(typ, name) for name in ("groupby", "head", "merge", "mean"))
and all(hasattr(df, name) for name in ("dtypes", "columns"))
and not any(hasattr(typ, name) for name in ("name", "dtype"))
)
def is_series_like(s) -> bool:
"""Looks like a Pandas Series"""
typ = s.__class__
return (
all(hasattr(typ, name) for name in ("groupby", "head", "mean"))
and all(hasattr(s, name) for name in ("dtype", "name"))
and "index" not in typ.__name__.lower()
)
def is_index_like(s) -> bool:
"""Looks like a Pandas Index"""
typ = s.__class__
return (
all(hasattr(s, name) for name in ("name", "dtype"))
and "index" in typ.__name__.lower()
)
def is_cupy_type(x) -> bool:
# TODO: avoid explicit reference to CuPy
return "cupy" in str(type(x))
def natural_sort_key(s: str) -> list[str | int]:
"""
Sorting `key` function for performing a natural sort on a collection of
strings
See https://en.wikipedia.org/wiki/Natural_sort_order
Parameters
----------
s : str
A string that is an element of the collection being sorted
Returns
-------
tuple[str or int]
Tuple of the parts of the input string where each part is either a
string or an integer
Examples
--------
>>> a = ['f0', 'f1', 'f2', 'f8', 'f9', 'f10', 'f11', 'f19', 'f20', 'f21']
>>> sorted(a)
['f0', 'f1', 'f10', 'f11', 'f19', 'f2', 'f20', 'f21', 'f8', 'f9']
>>> sorted(a, key=natural_sort_key)
['f0', 'f1', 'f2', 'f8', 'f9', 'f10', 'f11', 'f19', 'f20', 'f21']
"""
return [int(part) if part.isdigit() else part for part in re.split(r"(\d+)", s)]
def parse_bytes(s: float | str) -> int:
"""Parse byte string to numbers
>>> from dask.utils import parse_bytes
>>> parse_bytes('100')
100
>>> parse_bytes('100 MB')
100000000
>>> parse_bytes('100M')
100000000
>>> parse_bytes('5kB')
5000
>>> parse_bytes('5.4 kB')
5400
>>> parse_bytes('1kiB')
1024
>>> parse_bytes('1e6')
1000000
>>> parse_bytes('1e6 kB')
1000000000
>>> parse_bytes('MB')
1000000
>>> parse_bytes(123)
123
>>> parse_bytes('5 foos')
Traceback (most recent call last):
...
ValueError: Could not interpret 'foos' as a byte unit
"""
if isinstance(s, (int, float)):
return int(s)
s = s.replace(" ", "")
if not any(char.isdigit() for char in s):
s = f"1{s}"
for i in range(len(s) - 1, -1, -1):
if not s[i].isalpha():
break
index = i + 1
prefix = s[:index]
suffix = s[index:]
try:
n = float(prefix)
except ValueError as e:
raise ValueError(f"Could not interpret '{prefix}' as a number") from e
try:
multiplier = byte_sizes[suffix.lower()]
except KeyError as e:
raise ValueError(f"Could not interpret '{suffix}' as a byte unit") from e
result = n * multiplier
return int(result)
byte_sizes = {
"kB": 10**3,
"MB": 10**6,
"GB": 10**9,
"TB": 10**12,
"PB": 10**15,
"KiB": 2**10,
"MiB": 2**20,
"GiB": 2**30,
"TiB": 2**40,
"PiB": 2**50,
"B": 1,
"": 1,
}
byte_sizes = {k.lower(): v for k, v in byte_sizes.items()}
byte_sizes.update({k[0]: v for k, v in byte_sizes.items() if k and "i" not in k})
byte_sizes.update({k[:-1]: v for k, v in byte_sizes.items() if k and "i" in k})
def format_time(n: float) -> str:
"""format integers as time
>>> from dask.utils import format_time
>>> format_time(1)
'1.00 s'
>>> format_time(0.001234)
'1.23 ms'
>>> format_time(0.00012345)
'123.45 us'
>>> format_time(123.456)
'123.46 s'
>>> format_time(1234.567)
'20m 34s'
>>> format_time(12345.67)
'3hr 25m'
>>> format_time(123456.78)
'34hr 17m'
>>> format_time(1234567.89)
'14d 6hr'
"""
if n > 24 * 60 * 60 * 2:
d = int(n / 3600 / 24)
h = int((n - d * 3600 * 24) / 3600)
return f"{d}d {h}hr"
if n > 60 * 60 * 2:
h = int(n / 3600)
m = int((n - h * 3600) / 60)
return f"{h}hr {m}m"
if n > 60 * 10:
m = int(n / 60)
s = int(n - m * 60)
return f"{m}m {s}s"
if n >= 1:
return f"{n:.2f} s"
if n >= 1e-3:
return "%.2f ms" % (n * 1e3)
return "%.2f us" % (n * 1e6)
def format_time_ago(n: datetime) -> str:
"""Calculate a '3 hours ago' type string from a Python datetime.
Examples
--------
>>> from datetime import datetime, timedelta
>>> now = datetime.now()
>>> format_time_ago(now)
'Just now'
>>> past = datetime.now() - timedelta(minutes=1)
>>> format_time_ago(past)
'1 minute ago'
>>> past = datetime.now() - timedelta(minutes=2)
>>> format_time_ago(past)
'2 minutes ago'
>>> past = datetime.now() - timedelta(hours=1)
>>> format_time_ago(past)
'1 hour ago'
>>> past = datetime.now() - timedelta(hours=6)
>>> format_time_ago(past)
'6 hours ago'
>>> past = datetime.now() - timedelta(days=1)
>>> format_time_ago(past)
'1 day ago'
>>> past = datetime.now() - timedelta(days=5)
>>> format_time_ago(past)
'5 days ago'
>>> past = datetime.now() - timedelta(days=8)
>>> format_time_ago(past)
'1 week ago'
>>> past = datetime.now() - timedelta(days=16)
>>> format_time_ago(past)
'2 weeks ago'
>>> past = datetime.now() - timedelta(days=190)
>>> format_time_ago(past)
'6 months ago'
>>> past = datetime.now() - timedelta(days=800)
>>> format_time_ago(past)
'2 years ago'
"""
units = {
"years": lambda diff: diff.days / 365,
"months": lambda diff: diff.days / 30.436875, # Average days per month
"weeks": lambda diff: diff.days / 7,
"days": lambda diff: diff.days,
"hours": lambda diff: diff.seconds / 3600,
"minutes": lambda diff: diff.seconds % 3600 / 60,
}
diff = datetime.now() - n
for unit, func in units.items():
dur = int(func(diff))
if dur > 0:
if dur == 1: # De-pluralize
unit = unit[:-1]
return f"{dur} {unit} ago"
return "Just now"
def format_bytes(n: int) -> str:
"""Format bytes as text
>>> from dask.utils import format_bytes
>>> format_bytes(1)
'1 B'
>>> format_bytes(1234)
'1.21 kiB'
>>> format_bytes(12345678)
'11.77 MiB'
>>> format_bytes(1234567890)
'1.15 GiB'
>>> format_bytes(1234567890000)
'1.12 TiB'
>>> format_bytes(1234567890000000)
'1.10 PiB'
For all values < 2**60, the output is always <= 10 characters.
"""
for prefix, k in (
("Pi", 2**50),
("Ti", 2**40),
("Gi", 2**30),
("Mi", 2**20),
("ki", 2**10),
):
if n >= k * 0.9:
return f"{n / k:.2f} {prefix}B"
return f"{n} B"
timedelta_sizes = {
"s": 1,
"ms": 1e-3,
"us": 1e-6,
"ns": 1e-9,
"m": 60,
"h": 3600,
"d": 3600 * 24,
"w": 7 * 3600 * 24,
}
tds2 = {
"second": 1,
"minute": 60,
"hour": 60 * 60,
"day": 60 * 60 * 24,
"week": 7 * 60 * 60 * 24,
"millisecond": 1e-3,
"microsecond": 1e-6,
"nanosecond": 1e-9,
}
tds2.update({f"{k}s": v for k, v in tds2.items()})
timedelta_sizes.update(tds2)
timedelta_sizes.update({k.upper(): v for k, v in timedelta_sizes.items()})
@overload
def parse_timedelta(s: None, default: str | Literal[False] = "seconds") -> None: ...
@overload
def parse_timedelta(
s: str | float | timedelta, default: str | Literal[False] = "seconds"
) -> float: ...
def parse_timedelta(s, default="seconds"):
"""Parse timedelta string to number of seconds
Parameters
----------
s : str, float, timedelta, or None
default: str or False, optional
Unit of measure if s does not specify one. Defaults to seconds.
Set to False to require s to explicitly specify its own unit.
Examples
--------
>>> from datetime import timedelta
>>> from dask.utils import parse_timedelta
>>> parse_timedelta('3s')
3
>>> parse_timedelta('3.5 seconds')
3.5
>>> parse_timedelta('300ms')
0.3
>>> parse_timedelta(timedelta(seconds=3)) # also supports timedeltas
3
"""
if s is None:
return None
if isinstance(s, timedelta):
s = s.total_seconds()
return int(s) if int(s) == s else s
if isinstance(s, Number):
s = str(s)
s = s.replace(" ", "")
if not s[0].isdigit():
s = f"1{s}"
for i in range(len(s) - 1, -1, -1):
if not s[i].isalpha():
break
index = i + 1
prefix = s[:index]
suffix = s[index:] or default
if suffix is False:
raise ValueError(f"Missing time unit: {s}")
if not isinstance(suffix, str):
raise TypeError(f"default must be str or False, got {default!r}")
n = float(prefix)
try:
multiplier = timedelta_sizes[suffix.lower()]
except KeyError:
valid_units = ", ".join(timedelta_sizes.keys())
raise KeyError(
f"Invalid time unit: {suffix}. Valid units are: {valid_units}"
) from None
result = n * multiplier
if int(result) == result:
result = int(result)
return result
def has_keyword(func, keyword):
try:
return keyword in inspect.signature(func).parameters
except Exception:
return False
def ndimlist(seq):
if not isinstance(seq, (list, tuple)):
return 0
elif not seq:
return 1
else:
return 1 + ndimlist(seq[0])
def iter_chunks(sizes, max_size):
"""Split sizes into chunks of total max_size each
Parameters
----------
sizes : iterable of numbers
The sizes to be chunked
max_size : number
Maximum total size per chunk.
It must be greater or equal than each size in sizes
"""
chunk, chunk_sum = [], 0
iter_sizes = iter(sizes)
size = next(iter_sizes, None)
while size is not None:
assert size <= max_size
if chunk_sum + size <= max_size:
chunk.append(size)
chunk_sum += size
size = next(iter_sizes, None)
else:
assert chunk
yield chunk
chunk, chunk_sum = [], 0
if chunk:
yield chunk
hex_pattern = re.compile("[a-f]+")
@functools.lru_cache(100000)
def key_split(s):
"""
>>> key_split('x')
'x'
>>> key_split('x-1')
'x'
>>> key_split('x-1-2-3')
'x'
>>> key_split(('x-2', 1))
'x'
>>> key_split("('x-2', 1)")
'x'
>>> key_split("('x', 1)")
'x'
>>> key_split('hello-world-1')
'hello-world'
>>> key_split(b'hello-world-1')
'hello-world'
>>> key_split('ae05086432ca935f6eba409a8ecd4896')
'data'
>>> key_split('<module.submodule.myclass object at 0xdaf372')
'myclass'
>>> key_split(None)
'Other'
>>> key_split('x-abcdefab') # ignores hex
'x'
>>> key_split('_(x)') # strips unpleasant characters
'x'
"""
# If we convert the key, recurse to utilize LRU cache better
if type(s) is bytes:
return key_split(s.decode())
if type(s) is tuple:
return key_split(s[0])
try:
words = s.split("-")
if not words[0][0].isalpha():
result = words[0].split(",")[0].strip("_'()\"")
else:
result = words[0]
for word in words[1:]:
if word.isalpha() and not (
len(word) == 8 and hex_pattern.match(word) is not None
):
result += f"-{word}"
else:
break
if len(result) == 32 and re.match(r"[a-f0-9]{32}", result):
return "data"
else:
if result[0] == "<":
result = result.strip("<>").split()[0].split(".")[-1]
return sys.intern(result)
except Exception:
return "Other"
def stringify(obj, exclusive: Iterable | None = None):
"""Convert an object to a string
If ``exclusive`` is specified, search through `obj` and convert
values that are in ``exclusive``.
Note that when searching through dictionaries, only values are
converted, not the keys.
Parameters
----------
obj : Any
Object (or values within) to convert to string
exclusive: Iterable, optional
Set of values to search for when converting values to strings
Returns
-------
result : type(obj)
Stringified copy of ``obj`` or ``obj`` itself if it is already a
string or bytes.
Examples
--------
>>> stringify(b'x')
b'x'
>>> stringify('x')
'x'
>>> stringify({('a',0):('a',0), ('a',1): ('a',1)})
"{('a', 0): ('a', 0), ('a', 1): ('a', 1)}"
>>> stringify({('a',0):('a',0), ('a',1): ('a',1)}, exclusive={('a',0)})
{('a', 0): "('a', 0)", ('a', 1): ('a', 1)}
"""
typ = type(obj)
if typ is str or typ is bytes:
return obj
elif exclusive is None:
return str(obj)
if typ is list:
return [stringify(v, exclusive) for v in obj]
if typ is dict:
return {k: stringify(v, exclusive) for k, v in obj.items()}
try:
if obj in exclusive:
return stringify(obj)
except TypeError: # `obj` not hashable
pass
if typ is tuple: # If the tuple itself isn't a key, check its elements
return tuple(stringify(v, exclusive) for v in obj)
return obj
| OperatorMethodMixin |
python | pytorch__pytorch | torch/utils/_config_module.py | {
"start": 12839,
"end": 28415
} | class ____(ModuleType):
# NOTE: This should be kept in sync with _config_typing.pyi.
# The actual configuration settings. E.g., torch._dynamo.config.debug
# would live as "debug" in the key, and torch._inductor.config.triton.cudagraphs
# maps as "triton.cudagraphs". See discussion on the class for meaning of various sub items
_config: dict[str, _ConfigEntry]
_bypass_keys: set[str]
_compile_ignored_keys: set[str]
_is_dirty: bool
_hash_digest: bytes | None
def __init__(self) -> None:
raise NotImplementedError(
f"use {__name__}.install_config_module(sys.modules[__name__])"
)
def __setattr__(self, name: str, value: object) -> None:
if name in self._bypass_keys:
super().__setattr__(name, value)
elif name not in self._config:
raise AttributeError(f"{self.__name__}.{name} does not exist")
elif self._config[name].alias is not None:
self._set_alias_val(self._config[name], value)
else:
self._config[name].user_override = value
self._is_dirty = True
self._config[name].hide = False
def __getattr__(self, name: str) -> Any:
try:
config = self._config[name]
if config.hide:
raise AttributeError(f"{self.__name__}.{name} does not exist")
alias_val = self._get_alias_val(config)
if alias_val is not _UNSET_SENTINEL:
return alias_val
if config.env_value_force is not _UNSET_SENTINEL:
return config.env_value_force
if config.user_override is not _UNSET_SENTINEL:
return config.user_override
if config.env_value_default is not _UNSET_SENTINEL:
return config.env_value_default
if config.justknob is not None:
# JK only supports bools and ints
return justknobs_check(name=config.justknob, default=config.default)
# Note that reference types can still be modified, so we
# copy them to user_overrides in case the user overrides
# them
if isinstance(config.default, (list, set, dict)):
config.user_override = copy.deepcopy(config.default)
return config.user_override
return config.default
except KeyError as e:
# make hasattr() work properly
raise AttributeError(f"{self.__name__}.{name} does not exist") from e
def __delattr__(self, name: str) -> None:
self._is_dirty = True
# must support delete because unittest.mock.patch deletes
# then recreate things
self._config[name].user_override = _UNSET_SENTINEL
self._config[name].hide = True
def _get_alias_module_and_name(
self, entry: _ConfigEntry
) -> tuple[ModuleType, str] | None:
alias = entry.alias
if alias is None:
return None
module_name, constant_name = alias.rsplit(".", 1)
try:
module = importlib.import_module(module_name)
except ImportError as e:
raise AttributeError(f"config alias {alias} does not exist") from e
return module, constant_name
def _get_alias_val(self, entry: _ConfigEntry) -> Any:
data = self._get_alias_module_and_name(entry)
if data is None:
return _UNSET_SENTINEL
module, constant_name = data
constant_value = getattr(module, constant_name)
return constant_value
def _set_alias_val(self, entry: _ConfigEntry, val: Any) -> None:
data = self._get_alias_module_and_name(entry)
if data is None:
raise AssertionError(
"alias data should not be None when setting alias value"
)
module, constant_name = data
setattr(module, constant_name, val)
def _is_default(self, name: str) -> bool:
"""
Returns true if the config is at its default value.
configs overridden by the env are not considered default.
"""
config_val = self._config[name]
# The config is not overridden by the user, and the env_value_default
# is different from the default value (meaning user has set the env to
# change the default value).
not_set_env_default = (
config_val.env_value_default is _UNSET_SENTINEL
or config_val.env_value_default == config_val.default
)
not_set_env_force = (
config_val.env_value_force is _UNSET_SENTINEL
or config_val.env_value_force == config_val.default
)
unset = config_val.user_override is _UNSET_SENTINEL
# Handle reference types specially to avoid spammy warnings
if isinstance(config_val.default, (list, set, dict)):
unset = unset or config_val.user_override == config_val.default
return unset and not_set_env_default and not_set_env_force
def _get_dict(
self,
ignored_keys: list[str] | None = None,
ignored_prefixes: list[str] | None = None,
skip_default: bool = False,
) -> dict[str, Any]:
"""Export a dictionary of current configuration keys and values.
This function is design to provide a single point which handles
accessing config options and exporting them into a dictionary.
This is used by a number of different user facing export methods
which all have slightly different semantics re: how and what to
skip.
If a config is aliased, it skips this config.
Arguments:
ignored_keys are keys that should not be exported.
ignored_prefixes are prefixes that if a key matches should
not be exported
skip_default does two things. One if a key has not been modified
it skips it.
"""
config: dict[str, Any] = {}
for key in self._config:
if ignored_keys and key in ignored_keys:
continue
if ignored_prefixes:
if any(key.startswith(prefix) for prefix in ignored_prefixes):
continue
if skip_default and self._is_default(key):
continue
if self._config[key].alias is not None:
continue
config[key] = copy.deepcopy(getattr(self, key))
return config
def get_type(self, config_name: str) -> type:
return self._config[config_name].value_type
def save_config(self) -> bytes:
"""Convert config to a pickled blob"""
ignored_keys = getattr(self, "_save_config_ignore", [])
return pickle.dumps(
self._get_dict(ignored_keys=ignored_keys),
protocol=2,
)
def save_config_portable(
self, *, ignore_private_configs: bool = True
) -> dict[str, Any]:
"""Convert config to portable format"""
prefixes = []
if ignore_private_configs:
prefixes.append("_")
prefixes.extend(getattr(self, "_cache_config_ignore_prefix", []))
return self._get_dict(ignored_prefixes=prefixes)
def codegen_config(self) -> str:
"""Convert config to Python statements that replicate current config.
This does NOT include config settings that are at default values.
"""
# additional imports required
imports = set()
def get_module_name(func: Callable, add_dot: bool) -> str:
module_name = func.__module__
if module_name == "builtins":
module_name = ""
if add_dot and module_name != "":
module_name += "."
return module_name
def add_import(func: Callable) -> None:
module_name = get_module_name(func, False)
if module_name:
imports.add(module_name)
def list_of_callables_to_string(v: list | set) -> list[str]:
return [f"{get_module_name(item, True)}{item.__name__}" for item in v]
def importable_callable(v: Any) -> bool:
# functools.partial has no attributes below but is a callable
return callable(v) and hasattr(v, "__module__") and hasattr(v, "__name__")
def get_config_line(mod, k, v) -> str: # type: ignore[no-untyped-def]
"""
Return a string version of the config line.
Handle v when v is a callable, or a list/dict of callables. Add import statements for callables if necessary.
We assume that the value of a single config won't be a mix of callables and non-callables.
Example output:
import logging
import _warnings
torch._dynamo.config.reorderable_logging_functions = { _warnings.warn, logging.warn, print }
"""
if importable_callable(v):
add_import(v)
return f"{mod}.{k} = {get_module_name(v, True)}{v.__name__}"
elif isinstance(v, (list, set)) and all(
importable_callable(item) for item in v
):
for item in v:
add_import(item)
v_list = list_of_callables_to_string(v)
if isinstance(v, list):
return f"{mod}.{k} = {v_list}"
else:
return f"{mod}.{k} = {{ {', '.join(v_list)} }}"
else:
return f"{mod}.{k} = {v!r}"
lines = []
mod = self.__name__
for k, v in self._get_dict(
ignored_keys=getattr(self, "_save_config_ignore", []), skip_default=True
).items():
lines.append(get_config_line(mod, k, v))
for import_name in imports:
lines.insert(0, f"import {import_name}")
return "\n".join(lines)
def get_hash(self) -> bytes:
"""Hashes the configs that are not compile_ignored"""
if self._is_dirty or self._hash_digest is None:
dict_to_hash = self._get_dict(ignored_keys=list(self._compile_ignored_keys))
string_to_hash = repr(sorted(dict_to_hash.items()))
self._hash_digest = hashlib.md5(
string_to_hash.encode("utf-8"), usedforsecurity=False
).digest()
self._is_dirty = False
return self._hash_digest
@deprecated(
"`config.to_dict()` has been deprecated. It no longer changes the underlying config."
" use `config.get_config_copy()` instead if you just want a copy of the config, or "
"config.load_config if you need mutable access",
category=FutureWarning,
)
def to_dict(self) -> dict[str, Any]:
return self.get_config_copy()
@deprecated(
"`config.shallow_copy_dict()` has been deprecated. It no longer changes the underlying config."
" use `config.get_config_copy()` instead if you just want a copy of the config, or "
"config.load_config if you need mutable access",
category=FutureWarning,
)
def shallow_copy_dict(self) -> dict[str, Any]:
return self.get_config_copy()
def load_config(self, maybe_pickled_config: bytes | dict[str, Any]) -> None:
"""Restore from a prior call to save_config() or shallow_copy_dict()"""
if not isinstance(maybe_pickled_config, dict):
config = pickle.loads(maybe_pickled_config)
else:
config = maybe_pickled_config
for k, v in config.items():
if k in self._config:
setattr(self, k, v)
else:
from torch._dynamo.utils import warn_once
warn_once(f"key {k} with value {v} is not understood by this config")
def get_config_copy(self) -> dict[str, Any]:
return self._get_dict()
def get_serializable_config_copy(self) -> dict[str, Any]:
return self._get_dict(ignored_keys=getattr(self, "_save_config_ignore", []))
def patch(
self,
arg1: str | dict[str, Any] | None = None,
arg2: Any = None,
**kwargs: dict[str, Any],
) -> "ContextDecorator":
"""
Decorator and/or context manager to make temporary changes to a config.
As a decorator:
@config.patch("name", val)
@config.patch(name1=val1, name2=val2)
@config.patch({"name1": val1, "name2", val2})
def foo(...):
...
As a context manager:
with config.patch("name", val):
...
"""
changes: dict[str, Any]
if arg1 is not None:
if arg2 is not None:
if not isinstance(arg1, str):
raise AssertionError(
"first argument must be a string when passing 2 positional args to patch"
)
# patch("key", True) syntax
changes = {arg1: arg2}
else:
if not isinstance(arg1, dict):
raise AssertionError(
"first argument must be a dict when passing a single positional arg to patch"
)
# patch({"key": True}) syntax
changes = arg1
if kwargs:
raise AssertionError(
"cannot pass both positional and keyword arguments to patch"
)
else:
# patch(key=True) syntax
changes = kwargs
if arg2 is not None:
raise AssertionError(
"second positional argument is only valid when first argument is a key string"
)
if not isinstance(changes, dict):
raise AssertionError(f"expected `dict` got {type(changes)}")
prior: dict[str, Any] = {}
config = self
class ConfigPatch(ContextDecorator):
def __init__(self) -> None:
self.changes = changes
def __enter__(self) -> None:
if prior:
raise AssertionError(
"prior should be empty when entering ConfigPatch"
)
for key in self.changes:
# KeyError on invalid entry
prior[key] = config.__getattr__(key)
for k, v in self.changes.items():
config.__setattr__(k, v)
def __exit__(self, exc_type, exc_val, exc_tb): # type: ignore[no-untyped-def]
for k, v in prior.items():
config.__setattr__(k, v)
prior.clear()
return ConfigPatch()
def _make_closure_patcher(self, **changes: dict[str, Any]) -> Any:
"""
A lower-overhead version of patch() for things on the critical path.
Usage:
# do this off the critical path
change_fn = config.make_closure_patcher(foo=True)
...
revert = change_fn()
try:
...
finally:
revert()
"""
config = self._config
def change() -> Callable[[], None]:
prior = {k: config[k].user_override for k in changes}
for k, v in changes.items():
self._config[k].user_override = v
def revert() -> None:
for k, v in prior.items():
self._config[k].user_override = v
return revert
return change
| ConfigModule |
python | walkccc__LeetCode | solutions/936. Stamping The Sequence/936.py | {
"start": 0,
"end": 1408
} | class ____:
def movesToStamp(self, stamp: str, target: str) -> list[int]:
def stampify(s: int) -> int:
"""
Stamps target[i..i + |stamp|) and returns the number of newly stamped
characters.
e.g. stampify("abc", "ababc", 2) returns 3 because target becomes "ab***".
"""
stampified = len(stamp)
for i, st in enumerate(stamp):
if target[s + i] == '*': # It's already been stamped.
stampified -= 1
elif target[s + i] != st: # We can't stamp on the index i.
return 0
for i in range(s, s + len(stamp)):
target[i] = '*'
return stampified
ans = []
target = list(target)
# stamped[i] := True if we already stamped target by stamping on index i
stamped = [False] * len(target)
stampedCount = 0 # Our goal is to make stampedCount = |target|.
while stampedCount < len(target):
isStamped = False
# Try to stamp target[i..i + |stamp|) for each index.
for i in range(len(target) - len(stamp) + 1):
if stamped[i]:
continue
stampified = stampify(i)
if stampified == 0:
continue
stampedCount += stampified
isStamped = True
stamped[i] = True
ans.append(i)
# After trying to stamp on each index, we can't find a valid stamp.
if not isStamped:
return []
return ans[::-1]
| Solution |
python | sqlalchemy__sqlalchemy | test/orm/inheritance/test_poly_persistence.py | {
"start": 5763,
"end": 16882
} | class ____(PolymorphTest):
lazy_relationship = None
include_base = None
redefine_colprop = None
with_polymorphic = None
run_inserts = "once"
run_deletes = None
run_setup_mappers = "once"
@classmethod
def setup_mappers(cls):
include_base = cls.include_base
lazy_relationship = cls.lazy_relationship
redefine_colprop = cls.redefine_colprop
with_polymorphic = cls.with_polymorphic
if with_polymorphic == "unions":
if include_base:
person_join = polymorphic_union(
{
"engineer": people.join(engineers),
"manager": people.join(managers),
"person": people.select()
.where(people.c.type == "person")
.subquery(),
},
None,
"pjoin",
)
else:
person_join = polymorphic_union(
{
"engineer": people.join(engineers),
"manager": people.join(managers),
},
None,
"pjoin",
)
manager_join = people.join(managers).outerjoin(boss)
person_with_polymorphic = ["*", person_join]
manager_with_polymorphic = ["*", manager_join]
elif with_polymorphic == "joins":
person_join = (
people.outerjoin(engineers).outerjoin(managers).outerjoin(boss)
)
manager_join = people.join(managers).outerjoin(boss)
person_with_polymorphic = ["*", person_join]
manager_with_polymorphic = ["*", manager_join]
elif with_polymorphic == "auto":
person_with_polymorphic = "*"
manager_with_polymorphic = "*"
else:
person_with_polymorphic = None
manager_with_polymorphic = None
if redefine_colprop:
person_mapper = cls.mapper_registry.map_imperatively(
Person,
people,
with_polymorphic=person_with_polymorphic,
polymorphic_on=people.c.type,
polymorphic_identity="person",
properties={"person_name": people.c.name},
)
else:
person_mapper = cls.mapper_registry.map_imperatively(
Person,
people,
with_polymorphic=person_with_polymorphic,
polymorphic_on=people.c.type,
polymorphic_identity="person",
)
cls.mapper_registry.map_imperatively(
Engineer,
engineers,
inherits=person_mapper,
polymorphic_identity="engineer",
)
cls.mapper_registry.map_imperatively(
Manager,
managers,
inherits=person_mapper,
with_polymorphic=manager_with_polymorphic,
polymorphic_identity="manager",
)
cls.mapper_registry.map_imperatively(
Boss, boss, inherits=Manager, polymorphic_identity="boss"
)
cls.mapper_registry.map_imperatively(
Company,
companies,
properties={
"employees": relationship(
Person,
lazy=lazy_relationship,
cascade="all, delete-orphan",
backref="company",
order_by=people.c.person_id,
)
},
)
@classmethod
def insert_data(cls, connection):
redefine_colprop = cls.redefine_colprop
include_base = cls.include_base
if redefine_colprop:
person_attribute_name = "person_name"
else:
person_attribute_name = "name"
employees = [
Manager(
status="AAB",
manager_name="manager1",
**{person_attribute_name: "pointy haired boss"},
),
Engineer(
status="BBA",
engineer_name="engineer1",
primary_language="java",
**{person_attribute_name: "dilbert"},
),
]
if include_base:
employees.append(Person(**{person_attribute_name: "joesmith"}))
employees += [
Engineer(
status="CGG",
engineer_name="engineer2",
primary_language="python",
**{person_attribute_name: "wally"},
),
Manager(
status="ABA",
manager_name="manager2",
**{person_attribute_name: "jsmith"},
),
]
session = Session(connection)
c = Company(name="company1")
c.employees = employees
session.add(c)
session.commit()
@testing.fixture
def get_dilbert(self):
def run(session):
if self.redefine_colprop:
person_attribute_name = "person_name"
else:
person_attribute_name = "name"
dilbert = (
session.query(Engineer)
.filter_by(**{person_attribute_name: "dilbert"})
.one()
)
return dilbert
return run
def test_lazy_load(self):
lazy_relationship = self.lazy_relationship
with_polymorphic = self.with_polymorphic
if self.redefine_colprop:
person_attribute_name = "person_name"
else:
person_attribute_name = "name"
session = fixture_session()
dilbert = (
session.query(Engineer)
.filter_by(**{person_attribute_name: "dilbert"})
.one()
)
employees = session.query(Person).order_by(Person.person_id).all()
company = session.query(Company).first()
eq_(session.get(Person, dilbert.person_id), dilbert)
session.expunge_all()
eq_(
session.query(Person)
.filter(Person.person_id == dilbert.person_id)
.one(),
dilbert,
)
session.expunge_all()
def go():
cc = session.get(Company, company.company_id)
eq_(cc.employees, employees)
if not lazy_relationship:
if with_polymorphic != "none":
self.assert_sql_count(testing.db, go, 1)
else:
self.assert_sql_count(testing.db, go, 2)
else:
if with_polymorphic != "none":
self.assert_sql_count(testing.db, go, 2)
else:
self.assert_sql_count(testing.db, go, 3)
def test_baseclass_lookup(self, get_dilbert):
session = fixture_session()
dilbert = get_dilbert(session)
if self.redefine_colprop:
person_attribute_name = "person_name"
else:
person_attribute_name = "name"
# test selecting from the query, using the base
# mapped table (people) as the selection criterion.
# in the case of the polymorphic Person query,
# the "people" selectable should be adapted to be "person_join"
eq_(
session.query(Person)
.filter(getattr(Person, person_attribute_name) == "dilbert")
.first(),
dilbert,
)
def test_subclass_lookup(self, get_dilbert):
session = fixture_session()
dilbert = get_dilbert(session)
if self.redefine_colprop:
person_attribute_name = "person_name"
else:
person_attribute_name = "name"
eq_(
session.query(Engineer)
.filter(getattr(Person, person_attribute_name) == "dilbert")
.first(),
dilbert,
)
def test_baseclass_base_alias_filter(self, get_dilbert):
session = fixture_session()
dilbert = get_dilbert(session)
# test selecting from the query, joining against
# an alias of the base "people" table. test that
# the "palias" alias does *not* get sucked up
# into the "person_join" conversion.
palias = people.alias("palias")
dilbert = session.get(Person, dilbert.person_id)
is_(
dilbert,
session.query(Person)
.filter(
(palias.c.name == "dilbert")
& (palias.c.person_id == Person.person_id)
)
.first(),
)
def test_subclass_base_alias_filter(self, get_dilbert):
session = fixture_session()
dilbert = get_dilbert(session)
palias = people.alias("palias")
is_(
dilbert,
session.query(Engineer)
.filter(
(palias.c.name == "dilbert")
& (palias.c.person_id == Person.person_id)
)
.first(),
)
def test_baseclass_sub_table_filter(self, get_dilbert):
session = fixture_session()
dilbert = get_dilbert(session)
# this unusual test is selecting from the plain people/engineers
# table at the same time as the polymorphic entity
is_(
dilbert,
session.query(Person)
.filter(
(Engineer.engineer_name == "engineer1")
& (engineers.c.person_id == people.c.person_id)
& (people.c.person_id == Person.person_id)
)
.first(),
)
def test_subclass_getitem(self, get_dilbert):
session = fixture_session()
dilbert = get_dilbert(session)
is_(
dilbert,
session.query(Engineer).filter(
Engineer.engineer_name == "engineer1"
)[0],
)
def test_primary_table_only_for_requery(self):
session = fixture_session()
if self.redefine_colprop:
person_attribute_name = "person_name"
else:
person_attribute_name = "name"
dilbert = ( # noqa
session.query(Person)
.filter(getattr(Person, person_attribute_name) == "dilbert")
.first()
)
def go():
# assert that only primary table is queried for
# already-present-in-session
(
session.query(Person)
.filter(getattr(Person, person_attribute_name) == "dilbert")
.first()
)
self.assert_sql_count(testing.db, go, 1)
def test_standalone_orphans(self):
if self.redefine_colprop:
person_attribute_name = "person_name"
else:
person_attribute_name = "name"
session = fixture_session()
daboss = Boss(
status="BBB",
manager_name="boss",
golf_swing="fore",
**{person_attribute_name: "daboss"},
)
session.add(daboss)
assert_raises(sa_exc.DBAPIError, session.flush)
| RoundTripTest |
python | apache__airflow | airflow-ctl/src/airflowctl/api/datamodels/generated.py | {
"start": 41506,
"end": 41769
} | class ____(BaseModel):
"""
Connection Collection serializer for responses.
"""
connections: Annotated[list[ConnectionResponse], Field(title="Connections")]
total_entries: Annotated[int, Field(title="Total Entries")]
| ConnectionCollectionResponse |
python | faif__python-patterns | patterns/behavioral/memento.py | {
"start": 446,
"end": 1723
} | class ____:
"""A transaction guard.
This is, in fact, just syntactic sugar around a memento closure.
"""
deep = False
states: List[Callable[[], None]] = []
def __init__(self, deep: bool, *targets: Any) -> None:
self.deep = deep
self.targets = targets
self.commit()
def commit(self) -> None:
self.states = [memento(target, self.deep) for target in self.targets]
def rollback(self) -> None:
for a_state in self.states:
a_state()
def Transactional(method):
"""Adds transactional semantics to methods. Methods decorated with
@Transactional will roll back to entry-state upon exceptions.
:param method: The function to be decorated.
"""
def __init__(self, method: Callable) -> None:
self.method = method
def __get__(self, obj: Any, T: Type) -> Callable:
"""
A decorator that makes a function transactional.
:param method: The function to be decorated.
"""
def transaction(*args, **kwargs):
state = memento(obj)
try:
return self.method(obj, *args, **kwargs)
except Exception as e:
state()
raise e
return transaction
| Transaction |
python | sqlalchemy__sqlalchemy | test/dialect/sqlite/test_compiler.py | {
"start": 1543,
"end": 9868
} | class ____(fixtures.TestBase, AssertsCompiledSQL):
"""Tests SQLite-dialect specific compilation."""
__dialect__ = sqlite.dialect()
def test_extract(self):
t = sql.table("t", sql.column("col1"))
mapping = {
"month": "%m",
"day": "%d",
"year": "%Y",
"second": "%S",
"hour": "%H",
"doy": "%j",
"minute": "%M",
"epoch": "%s",
"dow": "%w",
"week": "%W",
}
for field, subst in mapping.items():
self.assert_compile(
select(extract(field, t.c.col1)),
"SELECT CAST(STRFTIME('%s', t.col1) AS "
"INTEGER) AS anon_1 FROM t" % subst,
)
def test_plain_stringify_returning(self):
t = Table(
"t",
MetaData(),
Column("myid", Integer, primary_key=True),
Column("name", String, server_default="some str"),
Column("description", String, default=func.lower("hi")),
)
stmt = t.insert().values().return_defaults()
eq_ignore_whitespace(
str(stmt.compile(dialect=sqlite.SQLiteDialect())),
"INSERT INTO t (description) VALUES (lower(?)) "
"RETURNING myid, name, description",
)
def test_true_false(self):
self.assert_compile(sql.false(), "0")
self.assert_compile(sql.true(), "1")
def test_is_distinct_from(self):
self.assert_compile(
sql.column("x").is_distinct_from(None), "x IS NOT NULL"
)
self.assert_compile(
sql.column("x").is_not_distinct_from(False), "x IS 0"
)
def test_localtime(self):
self.assert_compile(
func.localtimestamp(), "DATETIME(CURRENT_TIMESTAMP, 'localtime')"
)
def test_constraints_with_schemas(self):
metadata = MetaData()
Table(
"t1",
metadata,
Column("id", Integer, primary_key=True),
schema="master",
)
t2 = Table(
"t2",
metadata,
Column("id", Integer, primary_key=True),
Column("t1_id", Integer, ForeignKey("master.t1.id")),
schema="master",
)
t3 = Table(
"t3",
metadata,
Column("id", Integer, primary_key=True),
Column("t1_id", Integer, ForeignKey("master.t1.id")),
schema="alternate",
)
t4 = Table(
"t4",
metadata,
Column("id", Integer, primary_key=True),
Column("t1_id", Integer, ForeignKey("master.t1.id")),
)
# schema->schema, generate REFERENCES with no schema name
self.assert_compile(
schema.CreateTable(t2),
"CREATE TABLE master.t2 ("
"id INTEGER NOT NULL, "
"t1_id INTEGER, "
"PRIMARY KEY (id), "
"FOREIGN KEY(t1_id) REFERENCES t1 (id)"
")",
)
# schema->different schema, don't generate REFERENCES
self.assert_compile(
schema.CreateTable(t3),
"CREATE TABLE alternate.t3 ("
"id INTEGER NOT NULL, "
"t1_id INTEGER, "
"PRIMARY KEY (id)"
")",
)
# same for local schema
self.assert_compile(
schema.CreateTable(t4),
"CREATE TABLE t4 ("
"id INTEGER NOT NULL, "
"t1_id INTEGER, "
"PRIMARY KEY (id)"
")",
)
@testing.combinations(
(
Boolean(create_constraint=True),
sql.false(),
"BOOLEAN DEFAULT 0, CHECK (x IN (0, 1))",
),
(
String(),
func.sqlite_version(),
"VARCHAR DEFAULT (sqlite_version())",
),
(Integer(), func.abs(-5) + 17, "INTEGER DEFAULT (abs(-5) + 17)"),
(
# test #12425
String(),
func.now(),
"VARCHAR DEFAULT CURRENT_TIMESTAMP",
),
(
# test #12425
String(),
func.datetime(func.now(), "localtime"),
"VARCHAR DEFAULT (datetime(CURRENT_TIMESTAMP, 'localtime'))",
),
(
# test #12425
String(),
text("datetime(CURRENT_TIMESTAMP, 'localtime')"),
"VARCHAR DEFAULT (datetime(CURRENT_TIMESTAMP, 'localtime'))",
),
(
# default with leading spaces that should not be
# parenthesized
String,
text(" 'some default'"),
"VARCHAR DEFAULT 'some default'",
),
(String, text("'some default'"), "VARCHAR DEFAULT 'some default'"),
argnames="datatype,default,expected",
)
def test_column_defaults_ddl(self, datatype, default, expected):
t = Table(
"t",
MetaData(),
Column(
"x",
datatype,
server_default=default,
),
)
self.assert_compile(
CreateTable(t),
f"CREATE TABLE t (x {expected})",
)
def test_create_partial_index(self):
m = MetaData()
tbl = Table("testtbl", m, Column("data", Integer))
idx = Index(
"test_idx1",
tbl.c.data,
sqlite_where=and_(tbl.c.data > 5, tbl.c.data < 10),
)
# test quoting and all that
idx2 = Index(
"test_idx2",
tbl.c.data,
sqlite_where=and_(tbl.c.data > "a", tbl.c.data < "b's"),
)
self.assert_compile(
schema.CreateIndex(idx),
"CREATE INDEX test_idx1 ON testtbl (data) "
"WHERE data > 5 AND data < 10",
dialect=sqlite.dialect(),
)
self.assert_compile(
schema.CreateIndex(idx2),
"CREATE INDEX test_idx2 ON testtbl (data) "
"WHERE data > 'a' AND data < 'b''s'",
dialect=sqlite.dialect(),
)
def test_no_autoinc_on_composite_pk(self):
m = MetaData()
t = Table(
"t",
m,
Column("x", Integer, primary_key=True, autoincrement=True),
Column("y", Integer, primary_key=True),
)
assert_raises_message(
exc.CompileError,
"SQLite does not support autoincrement for composite",
CreateTable(t).compile,
dialect=sqlite.dialect(),
)
def test_in_tuple(self):
compiled = (
tuple_(column("q"), column("p"))
.in_([(1, 2), (3, 4)])
.compile(dialect=sqlite.dialect())
)
eq_(str(compiled), "(q, p) IN (__[POSTCOMPILE_param_1])")
eq_(
compiled._literal_execute_expanding_parameter(
"param_1",
compiled.binds["param_1"],
compiled.binds["param_1"].value,
),
(
[
("param_1_1_1", 1),
("param_1_1_2", 2),
("param_1_2_1", 3),
("param_1_2_2", 4),
],
"VALUES (?, ?), (?, ?)",
),
)
def test_create_table_without_rowid(self):
m = MetaData()
tbl = Table(
"atable", m, Column("id", Integer), sqlite_with_rowid=False
)
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE atable (id INTEGER) WITHOUT ROWID",
)
def test_create_table_strict(self):
m = MetaData()
table = Table("atable", m, Column("id", Integer), sqlite_strict=True)
self.assert_compile(
schema.CreateTable(table),
"CREATE TABLE atable (id INTEGER) STRICT",
)
def test_create_table_without_rowid_strict(self):
m = MetaData()
table = Table(
"atable",
m,
Column("id", Integer),
sqlite_with_rowid=False,
sqlite_strict=True,
)
self.assert_compile(
schema.CreateTable(table),
"CREATE TABLE atable (id INTEGER) WITHOUT ROWID, STRICT",
)
| SQLTest |
python | pytorch__pytorch | torch/fx/experimental/sym_node.py | {
"start": 21560,
"end": 61732
} | class ____(_DynamicScalar, int):
"""
User API for marking dynamic integers in `torch.compile`.
Intended to be compatible with both compile and eager mode.
Example usage::
fn = torch.compile(f)
x = DynamicInt(4)
fn(x) # compiles x as a dynamic integer input; returns f(4)
"""
def __new__(cls, val):
assert isinstance(val, int)
obj = super().__new__(cls, int(val))
return obj
def __repr__(self):
return f"DynamicInt({self.real})"
def __floordiv__(self, other): # // was casting to int without these overrides?
return DynamicInt(self.real // other)
def __rfloordiv__(self, other):
return DynamicInt(other // self.real)
# TODO: this probably needs the sizes-strides eval functions
METHOD_TO_OPERATOR = {
"pos": operator.pos,
"abs": operator.abs,
"add": operator.add,
"and": operator.and_,
"bitwise_and": operator.and_,
"ceil": math.ceil,
"eq": operator.eq,
"floor": math.floor,
"trunc": math.trunc,
"int_floordiv": operator.floordiv,
"ge": operator.ge,
"gt": operator.gt,
"is_integer": lambda x: x.is_integer(),
"le": operator.le,
"lshift": operator.lshift,
"lt": operator.lt,
"mod": operator.mod,
"mul": operator.mul,
"ne": operator.ne,
"neg": operator.neg,
"or": operator.or_,
"bitwise_or": operator.or_,
"bitwise_xor": operator.xor,
"float_pow": operator.pow,
"pow_by_natural": operator.pow,
"round": builtins.round,
"rshift": operator.rshift,
"sub": operator.sub,
"sym_float": sym_float,
"sym_ite": sym_ite,
"sym_max": sym_max,
"sym_min": sym_min,
"sym_not": sym_not,
"float_truediv": operator.truediv,
"int_truediv": operator.truediv,
}
unary_magic_methods = {
"abs",
"sym_float",
"sym_int",
"ceil",
"floor",
"neg",
"sym_not",
"pos",
"trunc",
}
# Adding math ops: sqrt, cos, sin, ...
def _get_sym_node_fn(name):
def fn(self):
return getattr(self, f"_sym_{name}")()
return fn
math_op_names = (
"sqrt",
"cos",
"cosh",
"sin",
"sinh",
"tan",
"tanh",
"asin",
"acos",
"atan",
"log2",
)
for name in math_op_names:
sym_name = f"sym_{name}"
priv_sym_name = f"_{sym_name}"
setattr(SymNode, sym_name, _get_sym_node_fn(name))
METHOD_TO_OPERATOR[sym_name] = getattr(torch, priv_sym_name)
unary_magic_methods.add(sym_name)
__all__.append(sym_name)
# Unary methods that are not magic methods
unary_nonmagic_methods = {
"is_integer",
}
unary_methods = unary_magic_methods | unary_nonmagic_methods
# Most methods are only registered on SymInt and SymFloat
# Some methods are only be registered on SymBool
only_bool_magic_methods = {"and", "or", "sym_not", "sym_ite"}
# Methods that implicitly convert SymBool into SymInt
bool_becomes_int_magic_methods = {"add", "sub", "mul"}
# Methods that are also on SymBool, in addition to on SymInt and SymFloat
also_bool_magic_methods = {"eq"}
bool_magic_methods = only_bool_magic_methods | also_bool_magic_methods
# Methods that are only for float
only_float_magic_methods = {"is_integer", "round", "sym_int", "sym_log2"}
magic_methods_on_operator_with_trailing_underscore = {"and", "or"}
# remap necessary because an op name can have a bitwise and boolean implementation
bitwise_ops = {"bitwise_and": "and", "bitwise_or": "or", "bitwise_xor": "xor"}
always_float_magic_methods = {"int_truediv", "float_truediv", "sym_float", "float_pow"}
for name in math_op_names:
sym_name = f"sym_{name}"
always_float_magic_methods.add(sym_name)
always_int_magic_methods = {"ceil", "floor", "trunc", "pow_by_natural"}
always_bool_magic_methods = {
"eq",
"ne",
"gt",
"lt",
"le",
"ge",
"and",
"or",
"sym_not",
"is_non_overlapping_and_dense",
"is_integer",
}
# Methods that have a `__foo__` as well as `__rfoo__`
def _sympy_float_truediv(a, b):
from torch.utils._sympy.functions import FloatTrueDiv
return FloatTrueDiv(a, b)
def _sympy_int_truediv(a, b):
from torch.utils._sympy.functions import IntTrueDiv
return IntTrueDiv(a, b)
def _sympy_floordiv(a, b):
from torch.utils._sympy.functions import FloorDiv
return FloorDiv(a, b)
def _sympy_mod(a, b):
from torch.utils._sympy.functions import Mod, PythonMod
if a.is_nonnegative and b.is_nonnegative:
return Mod(a, b)
else:
return PythonMod(a, b)
def _sympy_pow_by_natural(a, b):
from torch.utils._sympy.functions import PowByNatural
return PowByNatural(a, b)
def _sympy_float_pow(a, b):
from torch.utils._sympy.functions import FloatPow
return FloatPow(a, b)
def _sympy_and(a, b):
import sympy
return sympy.And(a, b)
def _sympy_or(a, b):
import sympy
return sympy.Or(a, b)
def _sympy_lshift(a, b):
from torch.utils._sympy.functions import LShift
return LShift(a, b)
def _sympy_rshift(a, b):
from torch.utils._sympy.functions import RShift
return RShift(a, b)
def _binary_search_insert_arg(ordered_args, new_arg):
"""
If new_arg is found in ordered_args None is returned, else the new
ordered_args with new_arg inserted
"""
if len(ordered_args) == 0:
return [new_arg]
from sympy.core.basic import _args_sortkey as sort_key, Basic
# Fast path when new_arg > ordered_args[-1].
if sort_key(ordered_args[-1]) < sort_key(new_arg):
return ordered_args + [new_arg]
# Fast path when new_arg < ordered_args[0].
if sort_key(ordered_args[0]) > sort_key(new_arg):
return [new_arg] + ordered_args
low, high = 0, len(ordered_args) - 1
while low <= high:
mid = (low + high) // 2
compare_result = Basic.compare(ordered_args[mid], new_arg)
if compare_result == 0:
return None
elif compare_result < 0:
low = mid + 1
else:
high = mid - 1
ordered_args.insert(low, new_arg)
return ordered_args
def _optimized_add(
lhs, rhs, lhs_is_optimized_summation=False, rhs_is_optimized_summation=False
):
"""
Custom optimization for Add used to optimize incremental binary summations of certain properties. The idea
is when we know the expression is a summation of unique symbols all we need to know is the correct order of symbols,
and no other optimizations are needed. We pass evaluate=false, with the correct order of args and save the following.
1. Avoid running other optimizations when the Add is constructed.
2. Manually figure out the order of the args for the new expression in log(n) comparisons instead of nLog(n)
(comparing terms is expensive and shows in the profiles).
The function returns a tuple of (1) a boolean that indicates whether the output is a summation of unique symbols,
(2) the result sympy expression.
"""
import sympy
from sympy.core.basic import _args_sortkey as sortkey
def make_optimized(ordered_args):
assert ordered_args is not None
result = sympy.Add(*ordered_args, evaluate=False)
return (True, result)
from torch.utils._sympy.functions import _is_symbols_binary_summation
lhs_is_optimized_summation |= _is_symbols_binary_summation(lhs)
rhs_is_optimized_summation |= _is_symbols_binary_summation(rhs)
if lhs_is_optimized_summation and rhs_is_optimized_summation:
# (a0+a1..) + (a2+a3..) => (a0+a1+a2+a3)
if sortkey(lhs._args[-1]) < sortkey(rhs._args[0]):
return make_optimized(lhs._args + rhs._args)
# (a2+a3..) + (a0+a1..) => (a0+a1+a2+a3)
if sortkey(lhs._args[0]) > sortkey(rhs._args[-1]):
return make_optimized(rhs._args + lhs._args)
# (a1+a3) + (a0+a2) => (a0+a1+a2+a3)
if len(lhs._args) <= 2 and len(rhs._args) <= 2:
new_args = list(lhs._args)
for a in rhs._args:
new_args = _binary_search_insert_arg(new_args, a)
if new_args is None:
break
# None means an element already exists.
if new_args is not None:
return make_optimized(new_args)
# (a0+a2) + a1 => (a0+a1+a2)
if lhs_is_optimized_summation and rhs.is_symbol:
new_args = _binary_search_insert_arg(list(lhs._args), rhs)
# None means an element already exists.
if new_args is not None:
return make_optimized(new_args)
# a1 + (a0+a2)=> (a0+a1+a2)
if rhs_is_optimized_summation and lhs.is_symbol:
new_args = _binary_search_insert_arg(list(rhs._args), lhs)
# None means an element already exists.
if new_args is not None:
return make_optimized(new_args)
result = sympy.Add(lhs, rhs)
return (_is_symbols_binary_summation(result), result)
def _bitwise_and(a, b):
from torch.utils._sympy.functions import BitwiseFn_bitwise_and
return BitwiseFn_bitwise_and(a, b)
def _bitwise_or(a, b):
from torch.utils._sympy.functions import BitwiseFn_bitwise_or
return BitwiseFn_bitwise_or(a, b)
def _bitwise_xor(a, b):
from torch.utils._sympy.functions import BitwiseFn_bitwise_xor
return BitwiseFn_bitwise_xor(a, b)
reflectable_magic_methods = {
"add": operator.add,
"sub": operator.sub,
"mul": operator.mul,
"mod": _sympy_mod,
"pow_by_natural": _sympy_pow_by_natural,
"float_pow": _sympy_float_pow,
"and": _sympy_and,
"bitwise_and": _bitwise_and,
"or": _sympy_or,
"bitwise_or": _bitwise_or,
"bitwise_xor": _bitwise_xor,
"float_truediv": _sympy_float_truediv,
"int_truediv": _sympy_int_truediv,
"int_floordiv": _sympy_floordiv,
"lshift": _sympy_lshift,
"rshift": _sympy_rshift,
}
def _floor_ceil_helper(a, fn):
import sympy
if isinstance(a, sympy.Mul):
aa = a.args
if len(aa) == 2 and isinstance(aa[0], sympy.Float) and aa[1].is_integer:
coef = sympy.Integer(aa[0])
if aa[0] == coef: # structural equality test
return coef * aa[1]
if (
isinstance(a, sympy.Float)
and a == sympy.Integer(a)
or isinstance(a, sympy.Integer)
):
return sympy.Integer(a)
return fn(a)
def _sympy_floor(a):
from torch.utils._sympy.functions import FloorToInt
return FloorToInt(a)
# NB: this is Python trunc semantics which returns an int. Do NOT use this to
# represent torch.trunc (which is float to float)
def _sympy_trunc(a):
from torch.utils._sympy.functions import TruncToInt
return TruncToInt(a)
def _sympy_ceil(a):
from torch.utils._sympy.functions import CeilToInt
return CeilToInt(a)
def _sympy_eq(a, b):
import sympy
return sympy.Eq(a, b)
def _sympy_ne(a, b):
import sympy
return sympy.Ne(a, b)
def _sympy_gt(a, b):
import sympy
return sympy.Gt(a, b)
def _sympy_lt(a, b):
import sympy
return sympy.Lt(a, b)
def _sympy_le(a, b):
import sympy
return sympy.Le(a, b)
def _sympy_ge(a, b):
import sympy
return sympy.Ge(a, b)
def _sympy_min(a, b):
from torch.utils._sympy.functions import Min
return Min(a, b)
def _sympy_max(a, b):
from torch.utils._sympy.functions import Max
return Max(a, b)
def _sympy_ite(a, t, f):
import sympy
return sympy.Piecewise((t, a), (f, True))
current_module = sys.modules[__name__]
def _get_sym_math_fn(name):
def fn(a):
import torch.utils._sympy.functions
return getattr(torch.utils._sympy.functions, f"OpaqueUnaryFn_{name}")(a)
return fn
for name in math_op_names:
priv_sympy_name = f"_sympy_{name}"
fn = _get_sym_math_fn(name)
fn.__qualname__ = fn.__name__ = priv_sympy_name
setattr(current_module, priv_sympy_name, fn)
del fn, name, priv_sympy_name # type: ignore[possibly-undefined]
def _sympy_abs(a):
import sympy
return sympy.Abs(a)
def _sympy_round(number, ndigits=None):
from torch.utils._sympy.functions import RoundDecimal, RoundToInt
if ndigits is None:
return RoundToInt(number)
else:
return RoundDecimal(number, ndigits)
def _sympy_sym_float(a):
from torch.utils._sympy.functions import ToFloat
# NB: Cannot use a * 1.0 here, because 0 * 1.0 is 0 which incorrectly
# reports that it is an integer
return ToFloat(a)
def _sympy_is_integer(a):
import sympy
from torch.utils._sympy.functions import ToFloat
return sympy.Eq(ToFloat(sympy.floor(a)), a)
magic_methods = {
**reflectable_magic_methods,
"sym_not": operator.invert,
"pos": operator.pos,
"eq": _sympy_eq,
"ne": _sympy_ne,
"gt": _sympy_gt,
"lt": _sympy_lt,
"le": _sympy_le,
"ge": _sympy_ge,
"floor": _sympy_floor,
"trunc": _sympy_trunc,
"sym_float": _sympy_sym_float,
"ceil": _sympy_ceil,
"neg": operator.neg,
"sym_min": _sympy_min,
"sym_max": _sympy_max,
"sym_ite": _sympy_ite,
"abs": _sympy_abs,
"round": _sympy_round,
"is_integer": _sympy_is_integer,
}
for name in math_op_names:
sym_name = f"sym_{name}"
magic_methods[sym_name] = getattr(current_module, f"_sympy_{name}")
del name, sym_name, math_op_names, current_module # type: ignore[possibly-undefined]
def sympy_is_contiguous(sizes, strides):
dim = len(sizes)
return sympy_is_contiguous_generic(sizes, strides, list(range(dim - 1, -1, -1)))
def sympy_is_contiguous_generic(sizes, strides, dim_order):
import sympy
dim = len(sizes)
if len(dim_order) != dim:
return sympy.false
is_contiguous = sympy.true
z = sympy.S.One
# Contiguous if the strides make sense (or the dim is size 1)
for d in dim_order:
is_contiguous &= sympy.Eq(sizes[d], sympy.S.One) | sympy.Eq(strides[d], z)
z *= sizes[d]
# OR if any size is zero
for d in range(dim):
is_contiguous |= sympy.Eq(sizes[d], sympy.S.Zero)
return is_contiguous
# NB: There is a TODO in C++ to allow omitting the batch dim. If that
# happens you will need to refactor this
def sympy_is_channels_last_contiguous_2d(sizes, strides):
return sympy_is_contiguous_generic(sizes, strides, [1, 3, 2, 0])
def sympy_is_channels_last_contiguous_3d(sizes, strides):
return sympy_is_contiguous_generic(sizes, strides, [1, 4, 3, 2, 0])
def sympy_is_channels_last_strides_generic(sizes, strides, dim_order):
import sympy
from torch.utils._sympy.functions import Max
dim = len(sizes)
if dim != len(dim_order):
return sympy.false
m = sympy.S.Zero
r = sympy.true
# special case for trivial C dimension. default to NCHW
r &= sympy.Ne(strides[1], 0)
for d in dim_order:
r &= sympy.Ne(sizes[d], 0) & (strides[d] >= m)
# Fallback to NCHW as default layout for ambiguous cases
# This is the flaw of implicit memory_format from strides.
# N111 tensor with identical strides for size 1 dimension;
# Two cases could lead us here:
# a. N111 contiguous Tensor ([N,1,1,1]@[1,1,1,1])
# b. N11W contiguous Tensor sliced on the W-dimension.
# ([N,1,1,1]@[W,W,W,W])
if d == 0:
r &= sympy.Ne(m, strides[1])
# This is necessary to:
# 1. distinguish the memory_format of N1H1;
# [H, 1, 1, 1] channels_last stride
# [H, H, 1, 1] contiguous stride
# 2. permutation of 1C1W:
# [1, C, 1, H]@[HC, H, H, 1] transpose(1, 3)
# [1, H, 1, C]@[HC, 1, H, H] shouldn't be identified as
# channels_last
m = strides[d] * Max(sizes[d], 1)
return r
def sympy_is_channels_last_strides_2d(sizes, strides):
return sympy_is_channels_last_strides_generic(sizes, strides, [1, 3, 2, 0])
def sympy_is_channels_last_strides_3d(sizes, strides):
return sympy_is_channels_last_strides_generic(sizes, strides, [1, 4, 3, 2, 0])
def _sympy_is_non_overlapping_and_dense_indicator(sizes, strides):
from torch.utils._sympy.functions import IsNonOverlappingAndDenseIndicator
return IsNonOverlappingAndDenseIndicator(*sizes, *strides)
sizes_strides_methods = {
# TODO: These could also be done with indicators, maybe it is better
# for reasoning to do it that way
"is_contiguous": sympy_is_contiguous,
"is_channels_last_contiguous_2d": sympy_is_channels_last_contiguous_2d,
"is_channels_last_contiguous_3d": sympy_is_channels_last_contiguous_3d,
"is_channels_last_strides_2d": sympy_is_channels_last_strides_2d,
"is_channels_last_strides_3d": sympy_is_channels_last_strides_3d,
"is_non_overlapping_and_dense_indicator": _sympy_is_non_overlapping_and_dense_indicator,
}
def to_node(self, num):
if isinstance(num, SymTypes):
return num.node
elif type(num) is bool:
return self.wrap_bool(num)
elif type(num) is int:
return self.wrap_int(num)
elif type(num) is float:
return self.wrap_float(num)
else:
# NotImplemented is important so that Python tries the
# other magic method
return NotImplemented
def wrap_node(x):
# TODO: let C++ also take advantage of this
if isinstance(x, SymNode) and x.constant is not None:
return x.constant
if x.is_int():
return SymInt(x)
elif x.is_float():
return SymFloat(x)
elif x.is_bool():
return SymBool(x)
else:
raise AssertionError(f"unrecognized return type {x}")
def method_to_operator(method):
return METHOD_TO_OPERATOR[method]
def _make_node_magic(method, func):
func = lru_cache(256)(func)
if method in magic_methods_on_operator_with_trailing_underscore:
method_attr = f"{method}_"
else:
method_attr = method
def uninteresting_files() -> set[str]:
import torch
mods = [
torch._dynamo.eval_frame,
torch._dynamo.utils,
torch.fx.experimental.sym_node,
torch,
]
import torch._dynamo.guards
return (
{inspect.getfile(m) for m in mods}
| torch._dynamo.guards.uninteresting_files()
| {"<string>"}
)
def capture_provenance(fn):
@functools.wraps(fn)
def wrapper(self, other=None):
if other is None:
result = fn(self)
else:
result = fn(self, other)
if torch._logging._internal.GET_DTRACE_STRUCTURED:
if other is not None:
arguments = [self, other]
else:
arguments = [self]
def get_id(sym_node) -> Optional[int]:
# We don't want to return an ID if the input is a constant
import sympy
if sym_node.constant is not None:
return None
elif id(sym_node) == id(result):
return None
elif isinstance(sym_node.expr, (sympy.Integer, sympy.Float)):
return None
elif sym_node.expr in (sympy.true, sympy.false):
return None
return id(sym_node)
dtrace_structured(
"expression_created",
metadata_fn=lambda: {
"method": method,
"result": str(result),
"result_id": id(result),
"arguments": [str(a) for a in arguments],
"argument_ids": [
get_id(i) for i in arguments if get_id(i) is not None
],
"user_stack": structured.get_user_stack(3),
"stack": structured.get_framework_stack(3),
},
)
return result
return wrapper
@capture_provenance
def binary_magic_impl(self, other):
from torch.fx.experimental.proxy_tensor import (
get_proxy_mode,
handle_sym_dispatch,
)
op = method_to_operator(method)
out_hint = None
if self.hint is not None and other.hint is not None:
out_hint = op(self.hint, other.hint)
if get_proxy_mode():
return to_node(
self, handle_sym_dispatch(op, (wrap_node(self), wrap_node(other)), {})
)
assert isinstance(other, SymNode)
optimized_summation = False
try:
if method == "mod":
from torch.utils._sympy.functions import Mod, PythonMod
# Special handling for mod that requires access to the value
# ranges
shape_env = self.shape_env
if (
self.expr.is_nonnegative
or shape_env.bound_sympy(self.expr).lower >= 0
) and (
other.expr.is_nonnegative
or shape_env.bound_sympy(other.expr).lower >= 0
):
out = Mod(self.expr, other.expr)
else:
out = PythonMod(self.expr, other.expr)
elif method == "add":
# see Note [optimized_summation]
(optimized_summation, out) = _optimized_add(
self.expr,
other.expr,
self._optimized_summation,
other._optimized_summation,
)
else:
# TODO: consider constant prop here
out = func(self.expr, other.expr)
except Exception:
log.warning("failed to eval %s(%s, %s)", method, self.expr, other.expr)
raise
sym_node_log.debug("%s %s %s -> %s", method, self.expr, other.expr, out)
pytype: type
# This is not strictly correct. In Python, a**b may return complex when
# a < 0 and b is a float: (-1)**2.1. Same for sympy.sqrt(-3.14). This
# returns a float while both arguments are ints: 2**(-1). Also, max and
# min do not type promote. To avoid having data-dependent control flow
# here, we just set the type to float if one of the args is a float. In
# case of a type mismatch, we assume that it will be detected during
# evaluation.
if method in always_float_magic_methods:
pytype = float
elif method in always_bool_magic_methods:
pytype = bool
elif self.pytype is float or other.pytype is float:
pytype = float
else:
pytype = self.pytype
if (
pytype is not None
and out_hint is not None
and not isinstance(out_hint, SymTypes)
):
out_hint = pytype(out_hint)
# Create a FX node that corresponds to the operation being applied to
# this node.
fx_node, _ = self.shape_env._create_fx_call_function(
op, (self.fx_node, other.fx_node)
)
result = SymNode(
out,
self.shape_env,
pytype,
out_hint, # type: ignore[arg-type]
fx_node=fx_node,
optimized_summation=optimized_summation, # see Note [optimized_summation]
)
return result
@capture_provenance
def unary_magic_impl(self):
from torch.fx.experimental.proxy_tensor import (
get_proxy_mode,
handle_sym_dispatch,
)
op = method_to_operator(method)
if get_proxy_mode():
return to_node(self, handle_sym_dispatch(op, (wrap_node(self),), {}))
# TODO: consider constant prop here
expr = self.expr
if method == "floor" or method == "ceiling":
expr = self.shape_env._simplify_floor_div(expr)
try:
out = func(expr)
except Exception:
log.warning("failed to eval %s(%s)", method, expr)
raise
sym_node_log.debug("%s %s -> %s", func, expr, out)
out_hint = None
if self.hint is not None:
out_hint = op(self.hint)
pytype: type
if method in always_int_magic_methods:
pytype = int
elif method in always_bool_magic_methods:
pytype = bool
elif method in always_float_magic_methods:
pytype = float
else:
pytype = self.pytype
fx_node, _ = self.shape_env._create_fx_call_function(op, (self.fx_node,))
return SymNode(out, self.shape_env, pytype, out_hint, fx_node=fx_node)
if method in unary_methods:
setattr(SymNode, f"_{method_attr}", unary_magic_impl)
elif method == "sym_ite":
def sym_ite_impl(pred_node, then_node, else_node):
from torch.fx.experimental.proxy_tensor import (
get_proxy_mode,
handle_sym_dispatch,
)
out_hint = then_node.hint if pred_node.hint else else_node.hint
if get_proxy_mode():
return to_node(
pred_node,
handle_sym_dispatch(
sym_ite,
(
wrap_node(pred_node),
wrap_node(then_node),
wrap_node(else_node),
),
{},
),
)
try:
out = func(pred_node.expr, then_node.expr, else_node.expr)
except Exception:
log.warning(
"failed to eval %s(%s, %s, %s)",
method,
pred_node.expr,
then_node.expr,
else_node.expr,
)
raise
fx_node, _ = pred_node.shape_env._create_fx_call_function(
sym_ite, (pred_node.fx_node, then_node.fx_node, else_node.fx_node)
)
return SymNode(
out, pred_node.shape_env, then_node.pytype, out_hint, fx_node=fx_node
)
setattr(SymNode, f"_{method_attr}", sym_ite_impl)
elif method == "round":
def round_impl(self, ndigits=None):
from torch.fx.experimental.proxy_tensor import (
get_proxy_mode,
handle_sym_dispatch,
)
op = builtins.round
if get_proxy_mode():
return to_node(
self, handle_sym_dispatch(op, (wrap_node(self), ndigits), {})
)
expr = self.expr
try:
out = func(expr, ndigits)
except Exception:
log.warning("failed to eval %s(%s, ndigits=%s)", method, expr, ndigits)
raise
if ndigits is None:
pytype = int
else:
pytype = self.pytype
out_hint = None
if self.hint is not None:
out_hint = op(self.hint, ndigits)
# Internally, None is used as sentinel to indicate that a something is not a node on an FX graph. At the
# same time, there is no way to wrap a plain None into an FX node. Thus, there is no way to pass None here
# without triggering some asserts that check whether we are mixing FX nodes with untracked arguments. The
# hack down below works, because all round function down the line all take ndigits=None as default in their
# signature.
# TODO: Remove the args construction below if a different sentinel is used by FX.
# ezyang(May 2024): LOL
args = [self.fx_node]
if ndigits is not None:
args.append(ndigits)
fx_node, _ = self.shape_env._create_fx_call_function(op, tuple(args))
return SymNode(out, self.shape_env, pytype, out_hint, fx_node=fx_node)
setattr(SymNode, f"_{method_attr}", round_impl)
else:
setattr(SymNode, f"_{method_attr}", binary_magic_impl)
def _make_node_sizes_strides(method, func):
# NB: don't LRU cache, lots of arguments
def sizes_strides_impl(self, sizes, strides):
from torch.fx.experimental.proxy_tensor import (
get_proxy_mode,
handle_sym_dispatch,
)
op = getattr(sys.modules[__name__], method)
if get_proxy_mode():
return to_node(
self,
handle_sym_dispatch(
op,
([wrap_node(s) for s in sizes], [wrap_node(s) for s in strides]),
{},
),
)
size_exprs = [s.expr for s in sizes]
stride_exprs = [s.expr for s in strides]
try:
out = func(size_exprs, stride_exprs)
except Exception:
log.warning("failed to eval %s(%s, %s)", method, size_exprs, stride_exprs)
raise
# bool is never expandable
size_hints = []
out_hint = None
for s in sizes:
if s.hint is None:
break
size_hints.append(s.hint)
else:
stride_hints = []
for s in strides:
if s.hint is None:
break
stride_hints.append(s.hint)
else:
out_hint = op(size_hints, stride_hints)
# NB: This is the indicator function, not the actual bool!
pytype: type
if method.endswith("_indicator"):
pytype = int
else:
pytype = bool
return SymNode(out, self.shape_env, pytype, out_hint)
setattr(SymNode, f"_{method}", sizes_strides_impl)
# TODO: This is technically hotpath, but in the ideal end state
# guards on this will resolve at a higher level so you never
# spend time in this code
def sizes_strides_user(sizes, strides):
import sympy
from torch.fx.experimental.symbolic_shapes import (
eval_is_non_overlapping_and_dense,
)
for a in itertools.chain(sizes, strides):
if isinstance(a, SymInt):
return wrap_node(
getattr(a.node, method)(
[to_node(a.node, b) for b in sizes],
[to_node(a.node, b) for b in strides],
)
)
if method == "is_non_overlapping_and_dense_indicator":
return eval_is_non_overlapping_and_dense(sizes, strides)
else:
# TODO: this is an awful implementation
return bool(
func(
[sympy.sympify(a) for a in sizes],
[sympy.sympify(a) for a in strides],
)
)
# Skip for is_non_overlapping_and_dense_indicator
if not hasattr(sys.modules[__name__], method):
setattr(sys.modules[__name__], method, sizes_strides_user)
for method, func in magic_methods.items():
_make_node_magic(method, func)
for method, func in sizes_strides_methods.items():
_make_node_sizes_strides(method, func)
def _make_user_magic(method, user_type):
# User magic takes care of wrapping the other operand into a node,
# so that our internal logic can assume everything is nodes
if method in magic_methods_on_operator_with_trailing_underscore:
method_attr = f"sym_{method}"
else:
method_attr = method
def get_constant(x: Union[SymInt, int, SymFloat, float, SymBool, bool]):
if isinstance(x, (int, float, bool)):
return x
if isinstance(x, SymInt):
return x.node.guard_int("", 0)
if isinstance(x, SymBool):
return x.node.guard_bool("", 0)
raise AssertionError("expect to be called with constant SymBools")
def is_constant(x):
if isinstance(x, (int, float, bool)):
return True
if isinstance(x, (SymInt, SymFloat, SymBool)):
return x.node.is_constant()
return False
# Promotion rules for binary operations. NB: we preserve PYTHON semantics
# - if args are same type, do nothing
# - if one arg is float, promote other arg to float
# - nb: this applies to floordiv, even though output is integral
# (it's still float)
# - pow is funny business
# - if both ints
# - trigger a guard on exponent >= 0
# - if non-negative, output is int
# - otherwise, output is float
# - otherwise, promote other arg to float
# - nb: complex is impossible to handle correctly lol, with
# negative base and integral float need to diverge semantics and
# just always return complex. Neener neener pretend this problem
# doesn't exist
# - equality is pain: Python does the fancy thing where it unpacks the
# mantissa from the float and then compares that against the int.
# Which means it is able to tell that
# 9007199254740993 != 9007199254740992. (rather than if the LHS was
# promoted to float, in which case it would have truncated to the RHS
# and subsequently been equal). We'll model this exactly by having
# special mixed type equality operations. Unfortunately, we need to
# do this for all comparison operations (maybe I'll only implement
# compare)
# - sym_ite mumble mumble really shouldn't allow mixed but whatever
if method in bool_becomes_int_magic_methods:
def promote(x):
"""Implements True+True=2, which works in python but not sympy"""
if isinstance(x, SymBool):
return SymInt(x.node.wrap_int(int(x)))
return x
else:
def promote(x):
return x
def promote2(self, other):
# TODO: Remove eq and other relations from this list.
# CPython has fancy implementations for these to get as much precision
# as possible instead of just promoting to float64 and praying, so we
# need to handle them specially too.
# Also, note that int_truediv doesn't go through this path: both
# arguments are "int" so there isn't any promotion
if method not in [
"add",
"sub",
"mul",
"mod",
"float_pow",
"float_truediv",
"int_floordiv",
"sym_min",
"sym_max",
# TODO: remove these
"eq",
"ne",
"gt",
"lt",
"le",
"ge",
]:
return self, other
f_self = isinstance(self, (float, torch.SymFloat))
f_other = isinstance(other, (float, torch.SymFloat))
if f_self or f_other:
if not f_self:
self = torch.sym_float(self)
if not f_other:
other = torch.sym_float(other)
return self, other
# Before and after performing the operation, check if any operands are constant.
# If so, extract out the constant values first. If `self` itself is a
# constant, then "redispatch" by calling back into the operator. Sometimes
# this means that operations involving SymBool return plain bools.
# Alternatively, we could also rewrap into constant Symbool (i.e. by
# implementing wrap_bool in ConstantSymNodeImpl), but we're not doing that
# today for no particular reason.
def unary_magic_impl(self):
self = promote(self)
if is_constant(self):
return (method_to_operator(method))(get_constant(self))
return wrap_node(getattr(self.node, method_attr)())
def binary_magic_impl(self, other):
if not isinstance(other, (int, float, bool, SymInt, SymFloat, SymBool)):
return NotImplemented
sym_node_log.debug("MAGIC %s %s %s", method, self, other)
self = promote(self)
other = promote(other)
self, other = promote2(self, other)
if is_constant(self):
return (method_to_operator(method))(get_constant(self), other)
if is_constant(other):
other = get_constant(other)
other_node = to_node(self.node, other)
if other_node is NotImplemented:
return NotImplemented
ret = wrap_node(getattr(self.node, method_attr)(other_node))
return get_constant(ret) if is_constant(ret) else ret
def rbinary_magic_impl(self, other):
if not isinstance(other, (int, float, bool, SymInt, SymFloat, SymBool)):
return NotImplemented
self = promote(self)
other = promote(other)
self, other = promote2(self, other)
if is_constant(self):
return (method_to_operator(method))(other, get_constant(self))
if is_constant(other):
other = get_constant(other)
other_node = to_node(self.node, other)
if other_node is NotImplemented:
return NotImplemented
ret = wrap_node(getattr(other_node, method_attr)(self.node))
return get_constant(ret) if is_constant(ret) else ret
def setattrs(user_type, attr, symnode_impl):
"""
Registers the SymNode magic method on SymInt/Float/Bool,
and optionally registers a corresponding wrapped method on DynamicInt.
"""
# SymInt/Float/Bool
setattr(user_type, attr, symnode_impl)
# DynamicInt impl
def dynamic_int_impl(*args):
args = [x.real if isinstance(x, DynamicInt) else x for x in args]
out = getattr(int, attr)(*args)
if isinstance(out, int) and not isinstance(out, bool):
return DynamicInt(out)
return out
if user_type is SymInt:
setattr(DynamicInt, attr, dynamic_int_impl)
if method in unary_magic_methods:
setattrs(user_type, f"__{method}__", unary_magic_impl)
elif method in unary_nonmagic_methods:
orig = getattr(user_type, method)
setattrs(user_type, method, update_wrapper(unary_magic_impl, orig))
elif method == "sym_ite":
def sym_ite_magic_impl(pred, then_val, else_val):
pred_node = pred.node
then_node = to_node(pred_node, then_val)
else_node = to_node(pred_node, else_val)
if then_node is NotImplemented or else_node is NotImplemented:
return NotImplemented
assert (
isinstance(then_node, SymNode)
and isinstance(else_node, SymNode)
and then_node.pytype == else_node.pytype
)
ret = wrap_node(getattr(pred.node, method_attr)(then_node, else_node))
return get_constant(ret) if ret.node.is_constant() else ret
setattrs(user_type, f"__{method}__", sym_ite_magic_impl)
elif method == "round":
def round_magic_impl(self, ndigits=None):
if is_constant(self):
return builtins.round(get_constant(self), ndigits)
return wrap_node(getattr(self.node, method)(ndigits))
setattrs(user_type, f"__{method}__", round_magic_impl)
else:
method_name = method
if method in bitwise_ops:
method_name = bitwise_ops[method]
setattrs(user_type, f"__{method_name}__", binary_magic_impl)
if method in reflectable_magic_methods:
setattrs(user_type, f"__r{method_name}__", rbinary_magic_impl)
for method in magic_methods: # type: ignore[assignment]
if method in only_bool_magic_methods:
_make_user_magic(method, SymBool)
continue
if method in only_float_magic_methods:
_make_user_magic(method, SymFloat)
continue
if method in also_bool_magic_methods or method in bool_becomes_int_magic_methods:
_make_user_magic(method, SymBool)
_make_user_magic(method, SymInt)
if method not in bitwise_ops:
_make_user_magic(method, SymFloat)
del method
del func
| DynamicInt |
python | TheAlgorithms__Python | machine_learning/gradient_boosting_classifier.py | {
"start": 203,
"end": 4275
} | class ____:
def __init__(self, n_estimators: int = 100, learning_rate: float = 0.1) -> None:
"""
Initialize a GradientBoostingClassifier.
Parameters:
- n_estimators (int): The number of weak learners to train.
- learning_rate (float): The learning rate for updating the model.
Attributes:
- n_estimators (int): The number of weak learners.
- learning_rate (float): The learning rate.
- models (list): A list to store the trained weak learners.
"""
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.models: list[tuple[DecisionTreeRegressor, float]] = []
def fit(self, features: np.ndarray, target: np.ndarray) -> None:
"""
Fit the GradientBoostingClassifier to the training data.
Parameters:
- features (np.ndarray): The training features.
- target (np.ndarray): The target values.
Returns:
None
>>> import numpy as np
>>> from sklearn.datasets import load_iris
>>> clf = GradientBoostingClassifier(n_estimators=100, learning_rate=0.1)
>>> iris = load_iris()
>>> X, y = iris.data, iris.target
>>> clf.fit(X, y)
>>> # Check if the model is trained
>>> len(clf.models) == 100
True
"""
for _ in range(self.n_estimators):
# Calculate the pseudo-residuals
residuals = -self.gradient(target, self.predict(features))
# Fit a weak learner (e.g., decision tree) to the residuals
model = DecisionTreeRegressor(max_depth=1)
model.fit(features, residuals)
# Update the model by adding the weak learner with a learning rate
self.models.append((model, self.learning_rate))
def predict(self, features: np.ndarray) -> np.ndarray:
"""
Make predictions on input data.
Parameters:
- features (np.ndarray): The input data for making predictions.
Returns:
- np.ndarray: An array of binary predictions (-1 or 1).
>>> import numpy as np
>>> from sklearn.datasets import load_iris
>>> clf = GradientBoostingClassifier(n_estimators=100, learning_rate=0.1)
>>> iris = load_iris()
>>> X, y = iris.data, iris.target
>>> clf.fit(X, y)
>>> y_pred = clf.predict(X)
>>> # Check if the predictions have the correct shape
>>> y_pred.shape == y.shape
True
"""
# Initialize predictions with zeros
predictions = np.zeros(features.shape[0])
for model, learning_rate in self.models:
predictions += learning_rate * model.predict(features)
return np.sign(predictions) # Convert to binary predictions (-1 or 1)
def gradient(self, target: np.ndarray, y_pred: np.ndarray) -> np.ndarray:
"""
Calculate the negative gradient (pseudo-residuals) for logistic loss.
Parameters:
- target (np.ndarray): The target values.
- y_pred (np.ndarray): The predicted values.
Returns:
- np.ndarray: An array of pseudo-residuals.
>>> import numpy as np
>>> clf = GradientBoostingClassifier(n_estimators=100, learning_rate=0.1)
>>> target = np.array([0, 1, 0, 1])
>>> y_pred = np.array([0.2, 0.8, 0.3, 0.7])
>>> residuals = clf.gradient(target, y_pred)
>>> # Check if residuals have the correct shape
>>> residuals.shape == target.shape
True
"""
return -target / (1 + np.exp(target * y_pred))
if __name__ == "__main__":
iris = load_iris()
X, y = iris.data, iris.target
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
clf = GradientBoostingClassifier(n_estimators=100, learning_rate=0.1)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
print(f"Accuracy: {accuracy:.2f}")
| GradientBoostingClassifier |
python | sphinx-doc__sphinx | tests/roots/test-prolog/prolog_markdown_parser.py | {
"start": 38,
"end": 299
} | class ____(Parser):
supported = ('markdown',)
def parse(self, inputstring, document):
document.rawsource = inputstring
def setup(app):
app.add_source_suffix('.md', 'markdown')
app.add_source_parser(DummyMarkdownParser)
| DummyMarkdownParser |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/core_tests/test_mask_user_code_errors.py | {
"start": 706,
"end": 888
} | class ____(Exception):
def __init__(self):
super().__init__(
"This is an error which has some sensitive information! My password is hunter2"
)
| UserError |
python | django__django | tests/model_forms/tests.py | {
"start": 105053,
"end": 108545
} | class ____(SimpleTestCase):
def test_big_integer_field(self):
bif = BigIntForm({"biggie": "-9223372036854775808"})
self.assertTrue(bif.is_valid())
bif = BigIntForm({"biggie": "-9223372036854775809"})
self.assertFalse(bif.is_valid())
self.assertEqual(
bif.errors,
{
"biggie": [
"Ensure this value is greater than or equal to "
"-9223372036854775808."
]
},
)
bif = BigIntForm({"biggie": "9223372036854775807"})
self.assertTrue(bif.is_valid())
bif = BigIntForm({"biggie": "9223372036854775808"})
self.assertFalse(bif.is_valid())
self.assertEqual(
bif.errors,
{
"biggie": [
"Ensure this value is less than or equal to 9223372036854775807."
]
},
)
def test_url_on_modelform(self):
"Check basic URL field validation on model forms"
class HomepageForm(forms.ModelForm):
class Meta:
model = Homepage
fields = "__all__"
self.assertFalse(HomepageForm({"url": "foo"}).is_valid())
self.assertFalse(HomepageForm({"url": "http://"}).is_valid())
self.assertFalse(HomepageForm({"url": "http://example"}).is_valid())
self.assertFalse(HomepageForm({"url": "http://example."}).is_valid())
self.assertFalse(HomepageForm({"url": "http://com."}).is_valid())
self.assertTrue(HomepageForm({"url": "http://localhost"}).is_valid())
self.assertTrue(HomepageForm({"url": "http://example.com"}).is_valid())
self.assertTrue(HomepageForm({"url": "http://www.example.com"}).is_valid())
self.assertTrue(HomepageForm({"url": "http://www.example.com:8000"}).is_valid())
self.assertTrue(HomepageForm({"url": "http://www.example.com/test"}).is_valid())
self.assertTrue(
HomepageForm({"url": "http://www.example.com:8000/test"}).is_valid()
)
self.assertTrue(HomepageForm({"url": "http://example.com/foo/bar"}).is_valid())
def test_modelform_non_editable_field(self):
"""
When explicitly including a non-editable field in a ModelForm, the
error message should be explicit.
"""
# 'created', non-editable, is excluded by default
self.assertNotIn("created", ArticleForm().fields)
msg = (
"'created' cannot be specified for Article model form as it is a "
"non-editable field"
)
with self.assertRaisesMessage(FieldError, msg):
class InvalidArticleForm(forms.ModelForm):
class Meta:
model = Article
fields = ("headline", "created")
def test_https_prefixing(self):
"""
If the https:// prefix is omitted on form input, the field adds it
again.
"""
class HomepageForm(forms.ModelForm):
class Meta:
model = Homepage
fields = "__all__"
form = HomepageForm({"url": "example.com"})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data["url"], "https://example.com")
form = HomepageForm({"url": "example.com/test"})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data["url"], "https://example.com/test")
| ModelOtherFieldTests |
python | astropy__astropy | astropy/coordinates/tests/test_representation.py | {
"start": 4336,
"end": 19921
} | class ____:
def test_name(self):
assert SphericalRepresentation.name == "spherical"
assert SphericalRepresentation.name in REPRESENTATION_CLASSES
def test_empty_init(self):
with pytest.raises(TypeError) as exc:
s = SphericalRepresentation()
def test_init_quantity(self):
s3 = SphericalRepresentation(
lon=8 * u.hourangle, lat=5 * u.deg, distance=10 * u.kpc
)
assert s3.lon == 8.0 * u.hourangle
assert s3.lat == 5.0 * u.deg
assert s3.distance == 10 * u.kpc
assert isinstance(s3.lon, Longitude)
assert isinstance(s3.lat, Latitude)
assert isinstance(s3.distance, Distance)
def test_init_no_mutate_input(self):
lon = -1 * u.hourangle
s = SphericalRepresentation(
lon=lon, lat=-1 * u.deg, distance=1 * u.kpc, copy=True
)
# The longitude component should be wrapped at 24 hours
assert_allclose_quantity(s.lon, 23 * u.hourangle)
# The input should not have been mutated by the constructor
assert_allclose_quantity(lon, -1 * u.hourangle)
def test_init_lonlat(self):
s2 = SphericalRepresentation(
Longitude(8, u.hour), Latitude(5, u.deg), Distance(10, u.kpc)
)
assert s2.lon == 8.0 * u.hourangle
assert s2.lat == 5.0 * u.deg
assert s2.distance == 10.0 * u.kpc
assert isinstance(s2.lon, Longitude)
assert isinstance(s2.lat, Latitude)
assert isinstance(s2.distance, Distance)
# also test that wrap_angle is preserved
s3 = SphericalRepresentation(
Longitude(-90, u.degree, wrap_angle=180 * u.degree),
Latitude(-45, u.degree),
Distance(1.0, u.Rsun),
)
assert s3.lon == -90.0 * u.degree
assert s3.lon.wrap_angle == 180 * u.degree
def test_init_subclass(self):
class Longitude180(Longitude):
_default_wrap_angle = 180 * u.degree
s = SphericalRepresentation(
Longitude180(-90, u.degree), Latitude(-45, u.degree), Distance(1.0, u.Rsun)
)
assert isinstance(s.lon, Longitude180)
assert s.lon == -90.0 * u.degree
assert s.lon.wrap_angle == 180 * u.degree
def test_init_array(self):
s1 = SphericalRepresentation(
lon=[8, 9] * u.hourangle, lat=[5, 6] * u.deg, distance=[1, 2] * u.kpc
)
assert_allclose(s1.lon.degree, [120, 135])
assert_allclose(s1.lat.degree, [5, 6])
assert_allclose(s1.distance.kpc, [1, 2])
assert isinstance(s1.lon, Longitude)
assert isinstance(s1.lat, Latitude)
assert isinstance(s1.distance, Distance)
def test_init_array_nocopy(self):
lon = Longitude([8, 9] * u.hourangle)
lat = Latitude([5, 6] * u.deg)
distance = Distance([1, 2] * u.kpc)
s1 = SphericalRepresentation(lon=lon, lat=lat, distance=distance, copy=False)
lon[:] = [1, 2] * u.rad
lat[:] = [3, 4] * u.arcmin
distance[:] = [8, 9] * u.Mpc
assert_allclose_quantity(lon, s1.lon)
assert_allclose_quantity(lat, s1.lat)
assert_allclose_quantity(distance, s1.distance)
def test_init_float32_array(self):
"""Regression test against #2983"""
lon = Longitude(np.float32([1.0, 2.0]), u.degree)
lat = Latitude(np.float32([3.0, 4.0]), u.degree)
s1 = UnitSphericalRepresentation(lon=lon, lat=lat, copy=False)
assert s1.lon.dtype == np.float32
assert s1.lat.dtype == np.float32
assert s1._values["lon"].dtype == np.float32
assert s1._values["lat"].dtype == np.float32
def test_reprobj(self):
s1 = SphericalRepresentation(
lon=8 * u.hourangle, lat=5 * u.deg, distance=10 * u.kpc
)
s2 = SphericalRepresentation.from_representation(s1)
assert_allclose_quantity(s2.lon, 8.0 * u.hourangle)
assert_allclose_quantity(s2.lat, 5.0 * u.deg)
assert_allclose_quantity(s2.distance, 10 * u.kpc)
s3 = SphericalRepresentation(s1)
assert representation_equal(s1, s3)
def test_broadcasting(self):
s1 = SphericalRepresentation(
lon=[8, 9] * u.hourangle, lat=[5, 6] * u.deg, distance=10 * u.kpc
)
assert_allclose_quantity(s1.lon, [120, 135] * u.degree)
assert_allclose_quantity(s1.lat, [5, 6] * u.degree)
assert_allclose_quantity(s1.distance, [10, 10] * u.kpc)
def test_broadcasting_mismatch(self):
with pytest.raises(ValueError) as exc:
s1 = SphericalRepresentation(
lon=[8, 9, 10] * u.hourangle,
lat=[5, 6] * u.deg,
distance=[1, 2] * u.kpc,
)
assert (
exc.value.args[0]
== "Input parameters lon, lat, and distance cannot be broadcast"
)
def test_broadcasting_and_nocopy(self):
s1 = SphericalRepresentation(
lon=[200] * u.deg, lat=[0] * u.deg, distance=[0] * u.kpc, copy=False
)
# With no copying, we should be able to modify the wrap angle of the longitude component
s1.lon.wrap_angle = 180 * u.deg
s2 = SphericalRepresentation(
lon=[200] * u.deg, lat=0 * u.deg, distance=0 * u.kpc, copy=False
)
# We should be able to modify the wrap angle of the longitude component even if other
# components need to be broadcasted
s2.lon.wrap_angle = 180 * u.deg
def test_readonly(self):
s1 = SphericalRepresentation(
lon=8 * u.hourangle, lat=5 * u.deg, distance=1.0 * u.kpc
)
with pytest.raises(AttributeError):
s1.lon = 1.0 * u.deg
with pytest.raises(AttributeError):
s1.lat = 1.0 * u.deg
with pytest.raises(AttributeError):
s1.distance = 1.0 * u.kpc
def test_getitem_len_iterable(self):
s = SphericalRepresentation(
lon=np.arange(10) * u.deg, lat=-np.arange(10) * u.deg, distance=1 * u.kpc
)
s_slc = s[2:8:2]
assert_allclose_quantity(s_slc.lon, [2, 4, 6] * u.deg)
assert_allclose_quantity(s_slc.lat, [-2, -4, -6] * u.deg)
assert_allclose_quantity(s_slc.distance, [1, 1, 1] * u.kpc)
assert len(s) == 10
assert np.iterable(s)
def test_getitem_len_iterable_scalar(self):
s = SphericalRepresentation(lon=1 * u.deg, lat=-2 * u.deg, distance=3 * u.kpc)
with pytest.raises(TypeError):
s_slc = s[0]
with pytest.raises(TypeError):
len(s)
assert not np.iterable(s)
def test_setitem(self):
s = SphericalRepresentation(
lon=np.arange(5) * u.deg, lat=-np.arange(5) * u.deg, distance=1 * u.kpc
)
s[:2] = SphericalRepresentation(
lon=10.0 * u.deg, lat=2.0 * u.deg, distance=5.0 * u.kpc
)
assert_allclose_quantity(s.lon, [10, 10, 2, 3, 4] * u.deg)
assert_allclose_quantity(s.lat, [2, 2, -2, -3, -4] * u.deg)
assert_allclose_quantity(s.distance, [5, 5, 1, 1, 1] * u.kpc)
def test_negative_distance(self):
"""Only allowed if explicitly passed on."""
with pytest.raises(ValueError, match="allow_negative"):
SphericalRepresentation(10 * u.deg, 20 * u.deg, -10 * u.m)
s1 = SphericalRepresentation(
10 * u.deg, 20 * u.deg, Distance(-10 * u.m, allow_negative=True)
)
assert s1.distance == -10.0 * u.m
def test_nan_distance(self):
"""This is a regression test: calling represent_as() and passing in the
same class as the object shouldn't round-trip through cartesian.
"""
sph = SphericalRepresentation(1 * u.deg, 2 * u.deg, np.nan * u.kpc)
new_sph = sph.represent_as(SphericalRepresentation)
assert_allclose_quantity(new_sph.lon, sph.lon)
assert_allclose_quantity(new_sph.lat, sph.lat)
dif = SphericalCosLatDifferential(
1 * u.mas / u.yr, 2 * u.mas / u.yr, 3 * u.km / u.s
)
sph = sph.with_differentials(dif)
new_sph = sph.represent_as(SphericalRepresentation)
assert_allclose_quantity(new_sph.lon, sph.lon)
assert_allclose_quantity(new_sph.lat, sph.lat)
def test_raise_on_extra_arguments(self):
with pytest.raises(TypeError, match="got multiple values"):
SphericalRepresentation(1 * u.deg, 2 * u.deg, 1.0 * u.kpc, lat=10)
with pytest.raises(TypeError, match="unexpected keyword.*parrot"):
SphericalRepresentation(1 * u.deg, 2 * u.deg, 1.0 * u.kpc, parrot=10)
def test_representation_shortcuts(self):
"""Test that shortcuts in ``represent_as`` don't fail."""
difs = SphericalCosLatDifferential(
4 * u.mas / u.yr, 5 * u.mas / u.yr, 6 * u.km / u.s
)
sph = SphericalRepresentation(
1 * u.deg, 2 * u.deg, 3 * u.kpc, differentials={"s": difs}
)
got = sph.represent_as(
PhysicsSphericalRepresentation, PhysicsSphericalDifferential
)
assert np.may_share_memory(sph.lon, got.phi)
assert np.may_share_memory(sph.distance, got.r)
expected = BaseRepresentation.represent_as(
sph, PhysicsSphericalRepresentation, PhysicsSphericalDifferential
)
# equal up to angular type
assert representation_equal_up_to_angular_type(got, expected)
got = sph.represent_as(UnitSphericalRepresentation, UnitSphericalDifferential)
assert np.may_share_memory(sph.lon, got.lon)
assert np.may_share_memory(sph.lat, got.lat)
expected = BaseRepresentation.represent_as(
sph, UnitSphericalRepresentation, UnitSphericalDifferential
)
assert representation_equal_up_to_angular_type(got, expected)
got = sph.represent_as(RadialRepresentation, RadialDifferential)
assert np.may_share_memory(sph.distance, got.distance)
expected = BaseRepresentation.represent_as(
sph, RadialRepresentation, RadialDifferential
)
assert representation_equal_up_to_angular_type(got, expected)
def test_transform(self):
"""Test ``.transform()`` on rotation and general matrices."""
# set up representation
ds1 = SphericalDifferential(
d_lon=[1, 2] * u.mas / u.yr,
d_lat=[3, 4] * u.mas / u.yr,
d_distance=[-5, 6] * u.km / u.s,
)
s1 = SphericalRepresentation(
lon=[1, 2] * u.deg,
lat=[3, 4] * u.deg,
distance=[5, 6] * u.kpc,
differentials=ds1,
)
# transform representation & get comparison (thru CartesianRep)
s2 = s1.transform(matrices["rotation"])
ds2 = s2.differentials["s"]
dexpected = SphericalDifferential.from_cartesian(
ds1.to_cartesian(base=s1).transform(matrices["rotation"]), base=s2
)
assert_allclose_quantity(s2.lon, s1.lon + 10 * u.deg)
assert_allclose_quantity(s2.lat, s1.lat)
assert_allclose_quantity(s2.distance, s1.distance)
# check differentials. they shouldn't have changed.
assert_allclose_quantity(ds2.d_lon, ds1.d_lon)
assert_allclose_quantity(ds2.d_lat, ds1.d_lat)
assert_allclose_quantity(ds2.d_distance, ds1.d_distance)
assert_allclose_quantity(ds2.d_lon, dexpected.d_lon)
assert_allclose_quantity(ds2.d_lat, dexpected.d_lat)
assert_allclose_quantity(ds2.d_distance, dexpected.d_distance)
# now with a non rotation matrix
# transform representation & get comparison (thru CartesianRep)
s3 = s1.transform(matrices["general"])
ds3 = s3.differentials["s"]
expected = (
s1.represent_as(CartesianRepresentation, CartesianDifferential)
.transform(matrices["general"])
.represent_as(SphericalRepresentation, SphericalDifferential)
)
dexpected = expected.differentials["s"]
assert_allclose_quantity(s3.lon, expected.lon)
assert_allclose_quantity(s3.lat, expected.lat)
assert_allclose_quantity(s3.distance, expected.distance)
assert_allclose_quantity(ds3.d_lon, dexpected.d_lon)
assert_allclose_quantity(ds3.d_lat, dexpected.d_lat)
assert_allclose_quantity(ds3.d_distance, dexpected.d_distance)
def test_transform_with_NaN(self):
# all over again, but with a NaN in the distance
ds1 = SphericalDifferential(
d_lon=[1, 2] * u.mas / u.yr,
d_lat=[3, 4] * u.mas / u.yr,
d_distance=[-5, 6] * u.km / u.s,
)
s1 = SphericalRepresentation(
lon=[1, 2] * u.deg,
lat=[3, 4] * u.deg,
distance=[5, np.nan] * u.kpc,
differentials=ds1,
)
# transform representation & get comparison (thru CartesianRep)
s2 = s1.transform(matrices["rotation"])
ds2 = s2.differentials["s"]
dexpected = SphericalDifferential.from_cartesian(
ds1.to_cartesian(base=s1).transform(matrices["rotation"]), base=s2
)
assert_allclose_quantity(s2.lon, s1.lon + 10 * u.deg)
assert_allclose_quantity(s2.lat, s1.lat)
assert_allclose_quantity(s2.distance, s1.distance)
assert_allclose_quantity(ds2.d_lon, dexpected.d_lon)
assert_allclose_quantity(ds2.d_lat, dexpected.d_lat)
assert_allclose_quantity(ds2.d_distance, dexpected.d_distance)
# the 2nd component is NaN since the 2nd distance is NaN
# TODO! this will change when ``.transform`` skips Cartesian
assert_array_equal(np.isnan(ds2.d_lon), (False, True))
assert_array_equal(np.isnan(ds2.d_lat), (False, True))
assert_array_equal(np.isnan(ds2.d_distance), (False, True))
# now with a non rotation matrix
s3 = s1.transform(matrices["general"])
ds3 = s3.differentials["s"]
thruC = (
s1.represent_as(CartesianRepresentation, CartesianDifferential)
.transform(matrices["general"])
.represent_as(
SphericalRepresentation, differential_class=SphericalDifferential
)
)
dthruC = thruC.differentials["s"]
# s3 should not propagate Nan.
assert_array_equal(np.isnan(s3.lon), (False, False))
assert_array_equal(np.isnan(s3.lat), (False, False))
assert_array_equal(np.isnan(s3.distance), (False, True))
# ds3 does b/c currently aren't any shortcuts on the transform
assert_array_equal(np.isnan(ds3.d_lon), (False, True))
assert_array_equal(np.isnan(ds3.d_lat), (False, True))
assert_array_equal(np.isnan(ds3.d_distance), (False, True))
# through Cartesian should
assert_array_equal(np.isnan(thruC.lon), (False, True))
assert_array_equal(np.isnan(thruC.lat), (False, True))
assert_array_equal(np.isnan(thruC.distance), (False, True))
assert_array_equal(np.isnan(dthruC.d_lon), (False, True))
assert_array_equal(np.isnan(dthruC.d_lat), (False, True))
assert_array_equal(np.isnan(dthruC.d_distance), (False, True))
# test that they are close on the first value
assert_allclose_quantity(s3.lon[0], thruC.lon[0])
assert_allclose_quantity(s3.lat[0], thruC.lat[0])
assert_allclose_quantity(ds3.d_lon[0], dthruC.d_lon[0])
assert_allclose_quantity(ds3.d_lat[0], dthruC.d_lat[0])
| TestSphericalRepresentation |
python | sqlalchemy__sqlalchemy | test/engine/test_execute.py | {
"start": 35753,
"end": 36661
} | class ____(fixtures.TestBase):
def _engine_fixture(self):
buf = StringIO()
def dump(sql, *multiparams, **params):
buf.write(str(sql.compile(dialect=engine.dialect)))
engine = create_mock_engine("postgresql+psycopg2://", executor=dump)
return engine, buf
def test_sequence_not_duped(self):
engine, buf = self._engine_fixture()
metadata = MetaData()
t = Table(
"testtable",
metadata,
Column(
"pk",
Integer,
normalize_sequence(config, Sequence("testtable_pk_seq")),
primary_key=True,
),
)
t.create(engine)
t.drop(engine)
eq_(re.findall(r"CREATE (\w+)", buf.getvalue()), ["SEQUENCE", "TABLE"])
eq_(re.findall(r"DROP (\w+)", buf.getvalue()), ["TABLE", "SEQUENCE"])
| MockStrategyTest |
python | ApeWorX__ape | src/ape/exceptions.py | {
"start": 1286,
"end": 1446
} | class ____(ApeException, NotImplementedError):
"""
An error raised when an API class does not implement an abstract method.
"""
| APINotImplementedError |
python | django__django | tests/fixtures_regress/models.py | {
"start": 3877,
"end": 3987
} | class ____(models.Manager):
def get_by_natural_key(self, data):
return self.get(data=data)
| NKManager |
python | encode__django-rest-framework | rest_framework/fields.py | {
"start": 8689,
"end": 9202
} | class ____(Exception):
pass
REGEX_TYPE = type(re.compile(''))
NOT_READ_ONLY_WRITE_ONLY = 'May not set both `read_only` and `write_only`'
NOT_READ_ONLY_REQUIRED = 'May not set both `read_only` and `required`'
NOT_REQUIRED_DEFAULT = 'May not set both `required` and `default`'
USE_READONLYFIELD = 'Field(read_only=True) should be ReadOnlyField'
MISSING_ERROR_MESSAGE = (
'ValidationError raised by `{class_name}`, but error key `{key}` does '
'not exist in the `error_messages` dictionary.'
)
| SkipField |
python | PrefectHQ__prefect | src/prefect/client/schemas/objects.py | {
"start": 25069,
"end": 25164
} | class ____(RunInput):
input_type: Literal["flow_run"] = "flow_run"
id: UUID
| FlowRunResult |
python | patrick-kidger__equinox | equinox/nn/_pool.py | {
"start": 12098,
"end": 13679
} | class ____(Pool):
"""Three-dimensional downsample using an average over a sliding window."""
def __init__(
self,
kernel_size: int | Sequence[int],
stride: int | Sequence[int] = 1,
padding: int | Sequence[int] | Sequence[tuple[int, int]] = 0,
use_ceil: bool = False,
):
"""**Arguments:**
- `kernel_size`: The size of the convolutional kernel.
- `stride`: The stride of the convolution.
- `padding`: The amount of padding to apply before and after each
spatial dimension.
- `use_ceil`: If `True`, then `ceil` is used to compute the final output
shape instead of `floor`. For `ceil`, if required, extra padding is added.
Defaults to `False`.
"""
super().__init__(
init=0,
operation=lax.add,
num_spatial_dims=3,
kernel_size=kernel_size,
stride=stride,
padding=padding,
use_ceil=use_ceil,
)
@named_scope("eqx.nn.AvgPool3d")
def __call__(self, x: Array, *, key: PRNGKeyArray | None = None) -> Array:
"""**Arguments:**
- `x`: The input. Should be a JAX array of shape
`(channels, dim_1, dim_2, dim_3)`.
- `key`: Ignored; provided for compatibility with the rest of the Equinox API.
(Keyword only argument.)
**Returns:**
A JAX array of shape `(channels, new_dim_1, new_dim_2, new_dim_3)`.
"""
return super().__call__(x) / math.prod(self.kernel_size)
| AvgPool3d |
python | keon__algorithms | algorithms/tree/trie/trie.py | {
"start": 167,
"end": 296
} | class ____:
def __init__(self):
self.children = collections.defaultdict(TrieNode)
self.is_word = False
| TrieNode |
python | astropy__astropy | astropy/nddata/covariance.py | {
"start": 2805,
"end": 38197
} | class ____(NDUncertainty):
r"""
A general utility for storing, manipulating, and I/O of covariance matrices.
Covariance matrices are symmetric by definition, :math:`\Sigma_{ij} =
\Sigma_{ji}`. The object therefore only stores the upper triangle of the
matrix using a `scipy.sparse.csr_matrix`. By default, instantiation will
check for symmetry and issue a warning if the matrix is not symmetric. This
check can be skipped using the ``assume_symmetric`` keyword. However, by
virtue of how the data is stored, symmetry is *always imposed* on the
matrix. That is, if a non-symmetric matrix is used to instantiate a
`Covariance` object, the stored data will yield a matrix that is different
from the original input.
Covariance matrices of higher dimensional arrays are always assumed to be
stored following row-major indexing. For example, the covariance value
:math:`\Sigma_{ij}` for an image of size :math:`(N_x,N_y)` is the covariance
between image pixels :math:`I_{x_i,y_i}` and :math:`I_{x_j,y_j}`, where
:math:`i = x_i + N_x y_i` and, similarly, :math:`j = x_j + N_x y_j`.
See :ref:`nddata-covariance` for additional documentation and examples.
Parameters
----------
array : array-like, `~scipy.sparse.csr_matrix`
Covariance matrix to store. If the array is not a
`~scipy.sparse.csr_matrix` instance, it must be convertible to one. To
match the calling sequence for `NDUncertainty`, ``array`` has a default
value of None, but the array *must* be provided for this `Covariance`
object.
data_shape : :obj:`tuple`, optional
The covariance data is for a higher dimensional array with this shape.
For example, if the covariance data is for a 2D image with shape
``(nx,ny)``, set ``data_shape=(nx,ny)``; the shape of the covariance
array must then be ``(nx*ny, nx*ny)``. If None, any higher
dimensionality is ignored.
assume_symmetric : bool, optional
Assume the matrix is symmetric. This means that a check for symmetry is
not performed, and the user is not warned if the matrix is not
symmetric.
unit : unit-like, optional
Unit for the covariance values.
Raises
------
TypeError
Raised if the input array not a `~scipy.sparse.csr_matrix` object and
cannot be converted to one.
ValueError
Raised if ``data_shape`` is provided and the input covariance matrix
``array`` does not have the expected shape or if ``array`` is None.
"""
def __init__(self, array=None, data_shape=None, assume_symmetric=False, unit=None):
if array is None:
raise ValueError("Covariance object cannot be instantiated with None.")
# Ingest the matrix
self._cov = triu(
Covariance._ingest_matrix(array, assume_symmetric=assume_symmetric)
)
# Save the diagonal as a variance array for convenience
self._var = self._cov.diagonal()
# Set the shape and check it; note self._cov must be defined so that
# call to self.shape below is valid.
self._data_shape = data_shape
if self._data_shape is not None and np.prod(self._data_shape) != self.shape[0]:
raise ValueError(
"Product of ``data_shape`` must match the covariance axis length."
)
# Workspace for index mapping from flattened to original data arrays
self._data_index_map = None
super().__init__(array=self._cov, copy=False, unit=unit)
@staticmethod
def _ingest_matrix(arr, assume_symmetric=False):
"""
Helper method to ingest a covariance or correlation matrix.
This function converts the input to a '~scipy.sparse.csr_matrix` using
:func:`_get_csr`, and checks that the array is 2D, square, and symmetric.
Parameters
----------
arr : array-like
An array that either is a `~scipy.sparse.csr_matrix` or can be converted
to one.
assume_symmetric : bool, optional
Assume the matrix is symmetric. This means that a check for
symmetry is not performed, and the user is not warned if the matrix
is not symmetric.
Returns
-------
`~scipy.sparse.csr_matrix`
Converted or original matrix.
"""
# Make sure it's a sparse matrix or can be converted to one.
_arr = _get_csr(arr)
# Check that it's 2D
if _arr.ndim != 2:
raise ValueError("Covariance arrays must be 2-dimensional.")
# Check that it's square
if _arr.shape[0] != _arr.shape[1]:
raise ValueError("Covariance matrices must be square.")
# Skip the symmetry check, if requested
if assume_symmetric:
return _arr
# Check that it's symmetric
flip_diff = _arr - _arr.T
if not np.allclose(flip_diff.data, np.zeros_like(flip_diff.data)):
warnings.warn(
"Asymmetry detected in covariance/correlation matrix. Matrix will be modified "
"to be symmetric using its upper triangle.",
AstropyUserWarning,
)
_arr = triu(_arr) + triu(_arr, 1).T
return _arr
@property
def shape(self):
"""Tuple with the shape of the covariance matrix"""
return self._cov.shape
@property
def nnz(self):
"""
The number of non-zero (NNZ) elements in the full covariance matrix,
*including* both the upper and lower triangles.
"""
return self.stored_nnz * 2 - self._cov.shape[0]
@property
def stored_nnz(self):
"""
The number of non-zero elements stored by the object, which only
counts the non-zero elements in the upper triangle.
"""
return self._cov.nnz
@property
def variance(self):
"""
The diagonal of the covariance matrix.
"""
return self._var
@variance.setter
def variance(self, value):
raise NotImplementedError(
"Directly setting variance values is not allowed for Covariance objects."
)
@property
def uncertainty_type(self):
"""``"cov"``: `Covariance` implements a covariance matrix."""
return "cov"
@property
def quantity(self):
"""
The covariance matrix as an dense `~astropy.units.Quantity` object.
"""
return Quantity(self.to_dense(), self.unit, copy=False, dtype=self._cov.dtype)
def _data_unit_to_uncertainty_unit(self, value):
"""
Return the uncertainty unit for covariances given the data unit.
"""
return value**2
def __repr__(self):
return f"<{self.__class__.__name__}; shape = {self.shape}>"
# Skip error propagation for now
def _propagate_add(self, other_uncert, result_data, correlation):
return None
def _propagate_subtract(self, other_uncert, result_data, correlation):
return None
def _propagate_multiply(self, other_uncert, result_data, correlation):
return None
def _propagate_divide(self, other_uncert, result_data, correlation):
return None
@classmethod
def from_samples(cls, samples, cov_tol=None, rho_tol=None, **kwargs):
r"""
Build a covariance object using discrete samples.
The covariance is generated using `~numpy.cov` for a set of discretely
sampled data for an :math:`N`-dimensional parameter space.
Parameters
----------
samples : `~numpy.ndarray`
Array with samples drawn from an :math:`N`-dimensional parameter
space. The shape of the input array must be :math:`N_{\rm par}\times
N_{\rm samples}`.
cov_tol : :obj:`float`, optional
The absolute value of any *covariance matrix* entry less than this
is assumed to be equivalent to (and set to) 0.
rho_tol : :obj:`float`, optional
The absolute value of any *correlation coefficient* less than this
is assumed to be equivalent to (and set to) 0.
**kwargs : dict, optional
Passed directly to main instantiation method.
Returns
-------
`Covariance`
An :math:`N_{\rm par}\times N_{\rm par}` covariance matrix built
using the provided samples.
Raises
------
ValueError
Raised if the input array is not 2D or if the number of samples (length
of the second axis) is less than 2.
"""
if samples.ndim != 2:
raise ValueError("Input samples for covariance matrix must be a 2D array!")
if samples.shape[1] < 2:
raise ValueError("Fewer than two samples provided!")
return Covariance.from_array(
np.cov(samples), cov_tol=cov_tol, rho_tol=rho_tol, **kwargs
)
@classmethod
def from_array(cls, covar, cov_tol=None, rho_tol=None, **kwargs):
r"""
Define a covariance object from an array.
.. note::
The only difference between this method and the direct instantiation
method (i.e., ``Covariance(array=covar)``) is that it can be used to
impose tolerances on the covariance value and/or correlation
coefficients.
Parameters
----------
covar : array-like
Array with the covariance data. The object must be either a
`~scipy.sparse.csr_matrix` or an object that can be converted to
one. It must also be 2-dimensional and square.
cov_tol : :obj:`float`, optional
The absolute value of any *covariance* matrix entry less than this
is assumed to be equivalent to (and set to) 0.
rho_tol : :obj:`float`, optional
The absolute value of any *correlation coefficient* less than this
is assumed to be equivalent to (and set to) 0.
**kwargs : dict, optional
Passed directly to main instantiation method.
Returns
-------
`Covariance`
The covariance matrix built using the provided array.
"""
# Get the assume_symmetric flag, either from kwargs or as the default
# value
assume_symmetric = kwargs.pop("assume_symmetric", False)
# Convert the covariance to a correlation matrix. If rho_tol is None,
# this just serves to symmetrize the matrix if it's not already. Set
# assume_symmetric to True hereafter
var, rho = Covariance.to_correlation(covar, assume_symmetric=assume_symmetric)
if rho_tol is not None:
rho = _impose_sparse_value_threshold(rho, rho_tol)
_covar = Covariance.revert_correlation(var, rho, assume_symmetric=True)
if cov_tol is not None:
_covar = _impose_sparse_value_threshold(_covar, cov_tol)
return cls(array=_covar, assume_symmetric=True, **kwargs)
@classmethod
def from_table(cls, triu_covar):
r"""
Construct the covariance matrix from a table with the non-zero elements
of the upper triangle of the covariance matrix in coordinate format.
This is the inverse operation of :func:`to_table`. The class can read
covariance data written by other programs *as long as they have a
commensurate format*; see :func:`to_table`.
Parameters
----------
triu_covar : `~astropy.table.Table`
The non-zero elements of the upper triangle of the covariance matrix
in coordinate format; see :func:`to_table`.
Returns
-------
`Covariance`
The covariance matrix constructed from the tabulated data.
Raises
------
ValueError
Raised if ``triu_covar.meta`` is ``None``, if the provided variance array
does not have the correct size, or if the data is multidimensional
and the table columns do not have the right shape.
"""
# Read shapes
if "COVSHAPE" not in triu_covar.meta:
raise ValueError("Table meta dictionary *must* contain COVSHAPE")
shape = _parse_shape(triu_covar.meta["COVSHAPE"])
data_shape = (
_parse_shape(triu_covar.meta["COVDSHP"])
if "COVDSHP" in triu_covar.meta
else None
)
# Number of non-zero elements
nnz = len(triu_covar)
# Read coordinate data
# WARNING: If the data is written correctly, it should always be true that i<=j
if data_shape is None:
i = triu_covar["INDXI"].data
j = triu_covar["INDXJ"].data
else:
ndim = triu_covar["INDXI"].shape[1]
if len(data_shape) != ndim:
raise ValueError("Mismatch between COVDSHP keyword and tabulated data.")
i = np.ravel_multi_index(triu_covar["INDXI"].data.T, data_shape)
j = np.ravel_multi_index(triu_covar["INDXJ"].data.T, data_shape)
# Units
unit = triu_covar.meta.get("BUNIT", None)
# Set covariance data
cij = triu_covar["COVARIJ"].data
# NOTE: the astype conversion of cij when instantiating the matrix below
# is because of how scipy.sparse restricts instantiation of sparse
# arrays. It doesn't like big-endian byte order. To reproduce the
# underlying error:
# >>> import numpy as np
# >>> from scipy.sparse._sputils import getdtype
# >>> getdtype(np.dtype('>f8'))
# Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
# File "/Users/westfall/.virtualenvs/astropy/lib/python3.12/site-packages/scipy/sparse/_sputils.py", line 137, in getdtype
# raise ValueError(f"scipy.sparse does not support dtype {newdtype.name}. "
# ValueError: scipy.sparse does not support dtype float64. The only supported types are: bool, int8, uint8, int16, uint16, int32, uint32, int64, uint64, longlong, ulonglong, float32, float64, longdouble, complex64, complex128, clongdouble.
# >>> getdtype(np.dtype('<f8'))
# dtype('float64')
cov = coo_matrix((cij.astype(cij.dtype.type), (i, j)), shape=shape).tocsr()
# Instantiate. Set assume_symmetric to true to avoid the warning from
# the _ingest_matrix method
return cls(array=cov, data_shape=data_shape, unit=unit, assume_symmetric=True)
@classmethod
def from_matrix_multiplication(cls, T, covar, **kwargs):
r"""
Construct the covariance matrix that results from a matrix
multiplication.
Linear operations on a dataset (e.g., binning or smoothing) can be
written as matrix multiplications of the form
.. math::
{\mathbf y} = {\mathbf T}\ {\mathbf x},
where :math:`{\mathbf T}` is a transfer matrix of size :math:`N_y\times
N_x`, :math:`{\mathbf x}` is a vector of size :math:`N_x`, and
:math:`{\mathbf y}` is a vector of length :math:`{N_y}` that results
from the multiplication. If :math:`{\mathbf \Sigma}_x` is the
covariance matrix for :math:`{\mathbf x}`, then the covariance matrix
for :math:`{\mathbf Y}` is
.. math::
{\mathbf \Sigma}_y = {\mathbf T}\ {\mathbf \Sigma}_x\
{\mathbf T}^\top.
If ``covar`` is provided as a vector of length :math:`N_x`, it is
assumed that the elements of :math:`{\mathbf X}` are independent and the
provided vector gives the *variance* in each element; i.e., the provided
data represent the diagonal of :math:`{\mathbf \Sigma}`.
Parameters
----------
T : `~scipy.sparse.csr_matrix`, `~numpy.ndarray`
Transfer matrix. See above.
covar : `~scipy.sparse.csr_matrix`, `~numpy.ndarray`
Covariance matrix. See above.
**kwargs : dict, optional
Passed directly to main instantiation method.
Returns
-------
`Covariance`
The covariance matrix resulting from the matrix multiplication.
Raises
------
ValueError
Raised if the provided arrays are not two dimensional or if there is
a shape mismatch.
"""
if T.ndim != 2:
raise ValueError("Input transfer matrix must be two-dimensional.")
nx = T.shape[1]
if covar.shape != (nx, nx) and covar.shape != (nx,):
raise ValueError(
f"Shape of input variance matrix must be either ({nx}, {nx}) or ({nx},)."
)
# If it isn't already, convert T to a csr_matrix
_T = T if isinstance(T, csr_matrix) else csr_matrix(T)
# Set the covariance matrix in X
_covar = (
coo_matrix((covar, (np.arange(nx), np.arange(nx))), shape=(nx, nx)).tocsr()
if covar.ndim == 1
else (covar if isinstance(covar, csr_matrix) else csr_matrix(covar))
)
# Construct the covariance matrix
return cls(_T.dot(_covar.dot(_T.transpose())).tocsr(), **kwargs)
@classmethod
def from_variance(cls, variance, **kwargs):
"""
Construct a diagonal covariance matrix using the provided variance.
Parameters
----------
variance : `~numpy.ndarray`
The variance vector.
**kwargs : dict, optional
Passed directly to main instantiation method.
Returns
-------
`Covariance`
The diagonal covariance matrix.
"""
return cls(csr_matrix(np.diagflat(variance)), **kwargs)
def to_sparse(self, correlation=False):
"""
Return the full covariance matrix as a `~scipy.sparse.csr_matrix`
object.
This method is essentially equivalent to `to_dense` except that it
returns a sparse array.
Parameters
----------
correlation : :obj:`bool`, optional
Return the *correlation* matrix. If False, return the covariance
matrix.
Returns
-------
`~scipy.sparse.csr_matrix`
The sparse matrix with both the upper and lower triangles filled
(with symmetric information).
"""
cov = triu(self._cov) + triu(self._cov, 1).T
if not correlation:
return cov
return Covariance.to_correlation(cov, assume_symmetric=True)[1]
def apply_new_variance(self, var):
"""
Using the same correlation coefficients, return a new `Covariance`
object with the provided variance.
Parameters
----------
var : array-like
Variance vector. Must have a length that matches this `Covariance`
instance; e.g., if this instance is ``cov``, the length of ``var``
must be ``cov.shape[0]``). Note that, if the covariance is for
higher dimensional data, this variance array *must* be flattened to
1D.
Returns
-------
`Covariance`
A covariance matrix with the same shape and correlation coefficients
and this object, but with the provided variance.
Raises
------
ValueError
Raised if the length of the variance vector is incorrect.
"""
_var = np.asarray(var)
if _var.shape != self._var.shape:
raise ValueError(
f"Provided variance has incorrect shape. Expected {self._var.shape}, "
f"found {_var.shape}."
)
i, j, cij = find(self._cov)
_cov = coo_matrix(
(cij * np.sqrt(_var[i] / self._var[i] * _var[j] / self._var[j]), (i, j)),
shape=self.shape,
).tocsr()
return Covariance(
array=_cov, data_shape=self._data_shape, assume_symmetric=True
)
def copy(self):
"""
Return a copy of this Covariance object.
Returns
-------
`Covariance`
A copy of the current covariance matrix.
"""
# Create the new Covariance instance with a copy of the data
return Covariance(
array=self._cov.copy(),
data_shape=self._data_shape,
assume_symmetric=True,
unit=self.unit,
)
def to_dense(self, correlation=False):
"""
Return the full covariance matrix as a `numpy.ndarray` object (a "dense"
array).
Parameters
----------
correlation : bool, optional
Flag to return the correlation matrix, instead of the covariance
matrix. Note that setting this to ``True`` does *not* also return the
variance vector.
Returns
-------
`~numpy.ndarray`
Dense array with the full covariance matrix.
"""
return self.to_sparse(correlation=correlation).toarray()
def find(self, correlation=False):
"""
Find the non-zero values in the **full** covariance matrix (not just the
upper triangle).
This is a simple wrapper for `to_sparse` and `~scipy.sparse.find`.
Parameters
----------
correlation : bool, optional
Flag to return the correlation data, instead of the covariance data.
Note that setting this to ``True`` does *not* also return the variance
vector.
Returns
-------
i, j : `numpy.ndarray`
Arrays containing the index coordinates of the non-zero values in
the covariance (or correlation) matrix.
c : `numpy.ndarray`
The non-zero covariance (or correlation) matrix values located at
the provided ``i,j`` coordinates.
"""
return find(self.to_sparse(correlation=correlation))
def covariance_to_data_indices(self, i, j):
r"""
Given indices along the two axes of the covariance matrix, return the
relevant indices in the data array. This is the inverse of
:func:`data_to_covariance_indices`.
Parameters
----------
i : `~numpy.ndarray`
1D array with the index along the first axis of the covariance
matrix. Must be in the range :math:`0...n-1`, where :math:`n` is
the length of the covariance-matrix axes.
j : `~numpy.ndarray`
1D array with the index along the second axis of the covariance
matrix. Must be in the range :math:`0...n-1`, where :math:`n` is
the length of the covariance-matrix axes.
Returns
-------
i_data, i_data : tuple, `numpy.ndarray`
If `data_shape` is not defined, the input arrays are simply returned
(and not copied). Otherwise, the code uses `~numpy.unravel_index`
to calculate the relevant data-array indices; each element in the
two-tuple is itself a tuple of :math:`N_{\rm dim}` arrays, one array
per dimension of the data array.
Raises
------
ValueError
Raised if the provided indices fall outside the range of covariance
matrix.
"""
if self._data_shape is None:
if np.any(
(i < 0) | (i > self.shape[0] - 1) | (j < 0) | (j > self.shape[1] - 1)
):
raise ValueError(
"Some indices not valid for covariance matrix with shape "
f"{self.shape}."
)
return i, j
return np.unravel_index(
np.atleast_1d(i).ravel(), self._data_shape
), np.unravel_index(np.atleast_1d(j).ravel(), self._data_shape)
def data_to_covariance_indices(self, i, j):
r"""
Given indices of elements in the source data array, return the matrix
coordinates with the associated covariance. This is the inverse of
:func:`covariance_to_data_indices`.
Parameters
----------
i : array-like, tuple
A tuple of :math:`N_{\rm dim}` array-like objects providing the
indices of elements in the N-dimensional data array. This can be an
array-like object if ``data_shape`` is undefined, in which case the
values must be in the range :math:`0...n-1`, where :math:`n` is the
length of the data array.
j : array-like, tuple
The same as ``i``, but providing a second set of coordinates at which
to access the covariance.
Returns
-------
i_covar, j_covar : `numpy.ndarray`
Arrays providing the indices in the covariance matrix associated
with the provided data array coordinates. If ``data_shape`` is not
defined, the input arrays are simply returned (and not copied).
Otherwise, the code uses `~numpy.ravel_multi_index` to calculate the
relevant covariance indices.
Raises
------
ValueError
Raised if the provided indices fall outside the range of data array,
or if the length of the ``i`` or ``j`` tuples is not :math:`N_{\rm
dim}`.
"""
if self._data_shape is None:
if np.any(
(i < 0) | (i > self.shape[0] - 1) | (j < 0) | (j > self.shape[1] - 1)
):
raise ValueError(
"Some indices not valid for covariance matrix with shape "
f"{self.shape}."
)
return i, j
if len(i) != len(self.data_shape):
raise ValueError(
"Length of input coordinate list (i) is incorrect; expected "
f"{len(self.data_shape)}, found {len(i)}"
)
if len(j) != len(self.data_shape):
raise ValueError(
"Length of input coordinate list (j) is incorrect; expected "
f"{len(self.data_shape)}, found {len(i)}"
)
return np.ravel_multi_index(i, self.data_shape), np.ravel_multi_index(
j, self.data_shape
)
def coordinate_data(self, reshape=False):
r"""
Construct data arrays with the non-zero covariance components in
coordinate format.
Coordinate format means that the covariance matrix data is provided in
three columns providing :math:`\Sigma_{ij}` and the (0-indexed) matrix
coordinates :math:`i,j`.
This procedure is primarily used when constructing the data arrays for
storage. Matching the class convention, the returned data only includes
the upper triangle.
Parameters
----------
reshape : :obj:`bool`, optional
If ``reshape`` is ``True`` and `data_shape` is defined, the :math:`i,j`
indices are converted to the expected data-array indices; see
:func:`covariance_to_data_indices`. These can be reverted to the
coordinates in the covariance matrix using
:func:`data_to_covariance_indices`.
Returns
-------
i, j : tuple, `numpy.ndarray`
The row and column indices, :math:`i,j`: of the covariance matrix.
If reshaping, these are tuples with the index arrays along each of
the reshaped axes.
cij : `numpy.ndarray`
The covariance, :math:`\Sigma_{ij}`, between array elements at
indices :math:`i` and :math:`j`.
Raises
------
ValueError
Raised if ``reshape`` is True but `data_shape` is undefined.
"""
if reshape and self._data_shape is None:
raise ValueError(
"If reshaping, the shape of the data before flattening to the "
"covariance array (``data_shape``) must be defined."
)
# Get the data (only stores the upper triangle!)
i, j, cij = find(self._cov)
# Return the data.
if reshape:
# Reshape the indices and the variance array.
return (
np.unravel_index(i, self._data_shape),
np.unravel_index(j, self._data_shape),
cij,
)
return i, j, cij
def to_table(self):
r"""
Return the covariance data in a `~astropy.table.Table` using coordinate
format.
Coordinate format means that the covariance matrix data is provided in
three columns providing :math:`\Sigma_{ij}` and the (0-indexed) matrix
coordinates :math:`i,j`.
The output table has three columns:
- ``'INDXI'``: The row index in the covariance matrix.
- ``'INDXJ'``: The column index in the covariance matrix.
- ``'COVARIJ'``: The covariance at the relevant :math:`i,j` coordinate.
The table also contains the following metadata:
- ``'COVSHAPE'``: The shape of the covariance matrix
- ``'BUNIT'``: (If ``unit`` is defined) The string representation of the
covariance units.
- ``'COVDSHP'``: (If ``data_shape`` is defined) The shape of the
associated data array.
If ``data_shape`` is set, the covariance matrix indices are reformatted
to match the coordinates in the N-dimensional array.
.. warning::
Recall that the storage of covariance matrices for higher
dimensional data always assumes a row-major storage order.
Objects instantiated by this method can be used to re-instantiate the
`Covariance` object using `from_table`.
Returns
-------
`~astropy.table.Table`
Table with the covoariance matrix in coordinate format and the
relevant metadata.
"""
meta = {}
meta["COVSHAPE"] = str(self.shape)
if self.unit is not None:
meta["BUNIT"] = self.unit.to_string()
reshape = self._data_shape is not None
i, j, cij = self.coordinate_data(reshape=reshape)
triu_nnz = cij.size
if reshape:
meta["COVDSHP"] = str(self._data_shape)
i = np.column_stack(i)
j = np.column_stack(j)
coo_shape = (i.shape[1],)
else:
coo_shape = None
return table.Table(
[
table.Column(
data=i, name="INDXI", dtype=int, length=triu_nnz, shape=coo_shape
),
table.Column(
data=j, name="INDXJ", dtype=int, length=triu_nnz, shape=coo_shape
),
table.Column(data=cij, name="COVARIJ", dtype=float, length=triu_nnz),
],
meta=meta,
)
@property
def data_shape(self):
"""
The expected shape of the data array associated with this covariance array.
"""
return (self.shape[0],) if self._data_shape is None else self._data_shape
@property
def data_index_map(self):
"""
An array mapping the index along each axis of the covariance matrix to
the shape of the associated data array.
"""
if self._data_index_map is None:
self._data_index_map = np.arange(self.shape[0])
if self._data_shape is not None:
self._data_index_map = self._data_index_map.reshape(self._data_shape)
return self._data_index_map
def match_to_data_slice(self, data_slice):
"""
Return a new `Covariance` instance that is matched to a slice of its
parent data array.
Parameters
----------
data_slice : slice, array-like
Anything that can be used to slice a `numpy.ndarray`. To generate a
slice using syntax that mimics accessing numpy array elements, use
`numpy.s_`; see examples
:ref:`here<covariance-match-to-data-slice>`.
Returns
-------
`Covariance`
A new covariance object for the sliced data array.
"""
remap = self.data_index_map[data_slice]
index = remap.ravel()
return Covariance(
self.to_sparse()[np.ix_(index, index)],
data_shape=None if len(remap.shape) == 1 else remap.shape,
)
@staticmethod
def to_correlation(cov, assume_symmetric=False):
r"""
Convert a covariance matrix into a correlation matrix by dividing each
element by the variances.
Specifically, extract ``var`` (:math:`V_i = C_{ii} \equiv \sigma^2_i`)
and convert ``cov`` from a covariance matrix with elements
:math:`C_{ij}` to a correlation matrix with :math:`\rho_{ij}` such that
.. math::
C_{ij} \equiv \rho_{ij} \sigma_i \sigma_j.
To revert a variance vector and correlation matrix back to a covariance
matrix, use :func:`revert_correlation`.
Parameters
----------
cov : array-like
Covariance matrix to convert. Must be a `~scipy.sparse.csr_matrix`
instance or convertible to one.
assume_symmetric : bool, optional
Assume the matrix is symmetric. This means that a check for
symmetry is not performed, and the user is not warned if the matrix
is not symmetric.
Returns
-------
var : `numpy.ndarray`
Variance vector
rho : `~scipy.sparse.csr_matrix`
Correlation matrix
Raises
------
ValueError
Raised if the input array is not 2D and square.
"""
# Ingest the matrix
_cov = Covariance._ingest_matrix(cov, assume_symmetric=assume_symmetric)
# Save the diagonal
var = _cov.diagonal()
# Find all the non-zero elements
i, j, cij = find(_cov)
rho = coo_matrix(
(cij / np.sqrt(var[i] * var[j]), (i, j)), shape=_cov.shape
).tocsr()
return var, rho
@staticmethod
def revert_correlation(var, rho, assume_symmetric=False):
r"""
Revert a variance vector and correlation matrix into a covariance matrix.
This is the reverse operation of `to_correlation`.
Parameters
----------
var : `~numpy.ndarray`
Variance vector. Length must match the diagonal of ``rho``.
rho : `~numpy.ndarray`, `~scipy.sparse.csr_matrix`
Correlation matrix. Diagonal must have the same length as ``var``.
assume_symmetric : bool, optional
Assume the matrix is symmetric. This means that a check for
symmetry is not performed, and the user is not warned if the matrix
is not symmetric.
Returns
-------
`~scipy.sparse.csr_matrix`
Covariance matrix.
"""
i, j, rhoij = find(
Covariance._ingest_matrix(rho, assume_symmetric=assume_symmetric)
)
return coo_matrix(
(rhoij * np.sqrt(var[i] * var[j]), (i, j)), shape=rho.shape
).tocsr()
| Covariance |
python | pypa__pipenv | pipenv/patched/pip/_internal/metadata/importlib/_envs.py | {
"start": 971,
"end": 4376
} | class ____:
"""Finder to locate distributions.
The main purpose of this class is to memoize found distributions' names, so
only one distribution is returned for each package name. At lot of pip code
assumes this (because it is setuptools's behavior), and not doing the same
can potentially cause a distribution in lower precedence path to override a
higher precedence one if the caller is not careful.
Eventually we probably want to make it possible to see lower precedence
installations as well. It's useful feature, after all.
"""
FoundResult = Tuple[importlib.metadata.Distribution, Optional[BasePath]]
def __init__(self) -> None:
self._found_names: Set[NormalizedName] = set()
def _find_impl(self, location: str) -> Iterator[FoundResult]:
"""Find distributions in a location."""
# Skip looking inside a wheel. Since a package inside a wheel is not
# always valid (due to .data directories etc.), its .dist-info entry
# should not be considered an installed distribution.
if _looks_like_wheel(location):
return
# To know exactly where we find a distribution, we have to feed in the
# paths one by one, instead of dumping the list to importlib.metadata.
for dist in importlib.metadata.distributions(path=[location]):
info_location = get_info_location(dist)
try:
name = get_dist_canonical_name(dist)
except BadMetadata as e:
logger.warning("Skipping %s due to %s", info_location, e.reason)
continue
if name in self._found_names:
continue
self._found_names.add(name)
yield dist, info_location
def find(self, location: str) -> Iterator[BaseDistribution]:
"""Find distributions in a location.
The path can be either a directory, or a ZIP archive.
"""
for dist, info_location in self._find_impl(location):
if info_location is None:
installed_location: Optional[BasePath] = None
else:
installed_location = info_location.parent
yield Distribution(dist, info_location, installed_location)
def find_legacy_editables(self, location: str) -> Iterator[BaseDistribution]:
"""Read location in egg-link files and return distributions in there.
The path should be a directory; otherwise this returns nothing. This
follows how setuptools does this for compatibility. The first non-empty
line in the egg-link is read as a path (resolved against the egg-link's
containing directory if relative). Distributions found at that linked
location are returned.
"""
path = pathlib.Path(location)
if not path.is_dir():
return
for child in path.iterdir():
if child.suffix != ".egg-link":
continue
with child.open() as f:
lines = (line.strip() for line in f)
target_rel = next((line for line in lines if line), "")
if not target_rel:
continue
target_location = str(path.joinpath(target_rel))
for dist, info_location in self._find_impl(target_location):
yield Distribution(dist, info_location, path)
| _DistributionFinder |
python | allegroai__clearml | clearml/utilities/locks/utils.py | {
"start": 10664,
"end": 11389
} | class ____(Lock):
def __init__(
self,
filename: str = ".lock",
timeout: int = DEFAULT_TIMEOUT,
check_interval: float = DEFAULT_CHECK_INTERVAL,
fail_when_locked: bool = True,
flags: int = LOCK_METHOD,
) -> None:
Lock.__init__(
self,
filename=filename,
mode="w",
timeout=timeout,
check_interval=check_interval,
fail_when_locked=fail_when_locked,
flags=flags,
)
atexit.register(self.release)
def release(self) -> None:
Lock.release(self)
if os.path.isfile(self.filename): # pragma: no branch
os.unlink(self.filename)
| TemporaryFileLock |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/settings.py | {
"start": 31525,
"end": 32058
} | class ____:
width: int = parser.get_default("width")
height: int = parser.get_default("height")
quality_level: int = parser.get_default("quality_level")
time_scale: float = parser.get_default("time_scale")
target_frame_rate: int = parser.get_default("target_frame_rate")
capture_frame_rate: int = parser.get_default("capture_frame_rate")
no_graphics: bool = parser.get_default("no_graphics")
no_graphics_monitor: bool = parser.get_default("no_graphics_monitor")
@attr.s(auto_attribs=True)
| EngineSettings |
python | ethereum__web3.py | web3/exceptions.py | {
"start": 728,
"end": 905
} | class ____(Web3Exception, AssertionError):
"""
A web3.py exception wrapper for `AssertionError`, for better control over
exception handling.
"""
| Web3AssertionError |
python | tensorflow__tensorflow | tensorflow/python/checkpoint/tensor_callable.py | {
"start": 801,
"end": 1679
} | class ____(saveable_object.SaveSpec):
"""A callable that represents a Tensor that should be saved to checkpoint.
This can be returned from `_serialize_to_tensor` in place of a Tensor. The
callable will be executed on the specified device when the checkpoint is
about to be written.
Any class can use `Callable` for checkpointing, but for SavedModel export,
only resource-type variables* are supported.
* `resource_variable_ops.is_resource_variable(obj)` must return True.
"""
def __init__(self, tensor_callable, dtype, device):
"""Initializes a `Callable` object.
Args:
tensor_callable: A callable that takes no arguments and returns a Tensor.
dtype: Dtype of the tensor returned by the callable.
device: Device of the tensor returned by the callable.
"""
super().__init__(tensor_callable, None, None, dtype, device)
| Callable |
python | numba__numba | numba/core/typeconv/castgraph.py | {
"start": 87,
"end": 956
} | class ____(enum.IntEnum):
"""
A conversion kind from one type to the other. The enum members
are ordered from stricter to looser.
"""
# The two types are identical
exact = 1
# The two types are of the same kind, the destination type has more
# extension or precision than the source type (e.g. float32 -> float64,
# or int32 -> int64)
promote = 2
# The source type can be converted to the destination type without loss
# of information (e.g. int32 -> int64). Note that the conversion may
# still fail explicitly at runtime (e.g. Optional(int32) -> int32)
safe = 3
# The conversion may appear to succeed at runtime while losing information
# or precision (e.g. int32 -> uint32, float64 -> float32, int64 -> int32,
# etc.)
unsafe = 4
# This value is only used internally
nil = 99
| Conversion |
python | tensorflow__tensorflow | tensorflow/python/autograph/pyct/ast_util.py | {
"start": 909,
"end": 2319
} | class ____(object):
"""NodeTransformer-like visitor that copies an AST."""
def __init__(self, preserve_annos):
super(CleanCopier, self).__init__()
self.preserve_annos = preserve_annos
def copy(self, node):
"""Returns a deep copy of node (excluding some fields, see copy_clean)."""
if isinstance(node, list):
return [self.copy(n) for n in node]
elif isinstance(node, tuple):
return tuple(self.copy(n) for n in node)
elif not isinstance(node, (gast.AST, ast.AST)):
# Assuming everything that's not an AST, list or tuple is a value type
# and may simply be assigned.
return node
assert isinstance(node, (gast.AST, ast.AST))
new_fields = {}
for f in node._fields:
if not f.startswith('__') and hasattr(node, f):
new_fields[f] = self.copy(getattr(node, f))
new_node = type(node)(**new_fields)
if self.preserve_annos:
for k in self.preserve_annos:
anno.copyanno(node, new_node, k)
return new_node
def copy_clean(node, preserve_annos=None):
"""Creates a deep copy of an AST.
The copy will not include fields that are prefixed by '__', with the
exception of user-specified annotations.
Args:
node: ast.AST
preserve_annos: Optional[Set[Hashable]], annotation keys to include in the
copy
Returns:
ast.AST
"""
return CleanCopier(preserve_annos).copy(node)
| CleanCopier |
python | huggingface__transformers | tests/models/glm4v/test_modeling_glm4v.py | {
"start": 1293,
"end": 6149
} | class ____:
def __init__(
self,
parent,
batch_size=3,
seq_length=7,
num_channels=3,
ignore_index=-100,
image_size=112,
video_start_token_id=3,
video_end_token_id=4,
image_start_token_id=5,
image_end_token_id=6,
image_token_id=7,
video_token_id=8,
is_training=True,
text_config={
"vocab_size": 99,
"hidden_size": 16,
"intermediate_size": 22,
"num_hidden_layers": 2,
"num_attention_heads": 2,
"num_key_value_heads": 1,
"output_channels": 64,
"hidden_act": "silu",
"max_position_embeddings": 512,
"rope_parameters": {"type": "default", "mrope_section": [2, 1, 1]},
"rope_theta": 10000,
"tie_word_embeddings": True,
"bos_token_id": 0,
"eos_token_id": 0,
"pad_token_id": 0,
},
vision_config={
"depth": 2,
"hidden_act": "silu",
"hidden_size": 48,
"out_hidden_size": 16,
"intermediate_size": 22,
"patch_size": 14,
"spatial_merge_size": 1,
"temporal_patch_size": 2,
},
):
self.parent = parent
self.ignore_index = ignore_index
self.bos_token_id = text_config["bos_token_id"]
self.eos_token_id = text_config["eos_token_id"]
self.pad_token_id = text_config["pad_token_id"]
self.video_start_token_id = video_start_token_id
self.video_end_token_id = video_end_token_id
self.image_start_token_id = image_start_token_id
self.image_end_token_id = image_end_token_id
self.image_token_id = image_token_id
self.video_token_id = video_token_id
self.text_config = text_config
self.vision_config = vision_config
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.is_training = is_training
self.hidden_size = text_config["hidden_size"]
self.num_hidden_layers = text_config["num_hidden_layers"]
self.num_attention_heads = text_config["num_attention_heads"]
self.vocab_size = text_config["vocab_size"]
self.num_image_tokens = 64
self.seq_length = seq_length + self.num_image_tokens
def get_config(self):
return Glm4vConfig(
text_config=self.text_config,
vision_config=self.vision_config,
image_token_id=self.image_token_id,
video_token_id=self.video_token_id,
video_start_token_id=self.video_start_token_id,
video_end_token_id=self.video_end_token_id,
image_start_token_id=self.image_start_token_id,
image_end_token_id=self.image_end_token_id,
)
def prepare_config_and_inputs(self):
config = self.get_config()
patch_size = config.vision_config.patch_size
temporal_patch_size = config.vision_config.temporal_patch_size
pixel_values = floats_tensor(
[
self.batch_size * (self.image_size**2) // (patch_size**2),
self.num_channels * (patch_size**2) * temporal_patch_size,
]
)
return config, pixel_values
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
input_ids[input_ids == self.video_token_id] = self.pad_token_id
input_ids[input_ids == self.image_token_id] = self.pad_token_id
input_ids[input_ids == self.video_start_token_id] = self.pad_token_id
input_ids[input_ids == self.image_start_token_id] = self.pad_token_id
input_ids[input_ids == self.video_end_token_id] = self.pad_token_id
input_ids[input_ids == self.image_end_token_id] = self.pad_token_id
input_ids[:, 0] = self.image_start_token_id
input_ids[:, 1 : 1 + self.num_image_tokens] = self.image_token_id
input_ids[:, 1 + self.num_image_tokens] = self.image_end_token_id
patch_size = config.vision_config.patch_size
patches_per_side = self.image_size // patch_size
inputs_dict = {
"pixel_values": pixel_values,
"image_grid_thw": torch.tensor(
[[1, patches_per_side, patches_per_side]] * self.batch_size, device=torch_device
),
"input_ids": input_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
| Glm4vVisionText2TextModelTester |
python | allegroai__clearml | clearml/backend_api/services/v2_23/tasks.py | {
"start": 455937,
"end": 456174
} | class ____(Response):
"""
Response of tasks.ping endpoint.
"""
_service = "tasks"
_action = "ping"
_version = "2.23"
_schema = {"additionalProperties": False, "definitions": {}, "type": "object"}
| PingResponse |
python | getsentry__sentry | tests/sentry/incidents/endpoints/test_organization_alert_rule_details.py | {
"start": 2851,
"end": 8438
} | class ____(AlertRuleBase):
__test__ = Abstract(__module__, __qualname__)
endpoint = "sentry-api-0-organization-alert-rule-details"
@patch(
"sentry.seer.anomaly_detection.store_data.seer_anomaly_detection_connection_pool.urlopen"
)
def new_alert_rule(self, mock_seer_request, data=None):
mock_seer_request.return_value = HTTPResponse(orjson.dumps({"success": True}), status=200)
if data is None:
data = deepcopy(self.alert_rule_dict)
serializer = AlertRuleSerializer(
context={
"organization": self.organization,
"access": OrganizationGlobalAccess(self.organization, settings.SENTRY_SCOPES),
"user": self.user,
"installations": app_service.installations_for_organization(
organization_id=self.organization.id
),
},
data=data,
)
assert serializer.is_valid(), serializer.errors
alert_rule = serializer.save()
return alert_rule
def get_serialized_alert_rule(self):
# Only call after calling self.alert_rule to create it.
original_endpoint = self.endpoint
original_method = self.method
self.endpoint = "sentry-api-0-organization-alert-rules"
self.method = "get"
with self.feature("organizations:incidents"):
resp = self.get_success_response(self.organization.slug)
assert len(resp.data) >= 1
serialized_alert_rule = resp.data[0]
if serialized_alert_rule["environment"]:
serialized_alert_rule["environment"] = serialized_alert_rule["environment"][0]
else:
serialized_alert_rule.pop("environment", None)
self.endpoint = original_endpoint
self.method = original_method
return serialized_alert_rule
@cached_property
def alert_rule(self):
return self.new_alert_rule(data=deepcopy(self.alert_rule_dict))
@cached_property
def dynamic_alert_rule(self):
return self.new_alert_rule(data=deepcopy(self.dynamic_alert_rule_dict))
@cached_property
def valid_params(self):
email_action_type = AlertRuleTriggerAction.get_registered_factory(
AlertRuleTriggerAction.Type.EMAIL
).slug
return {
"name": "hello",
"time_window": 10,
"query": "level:error",
"threshold_type": 0,
"resolve_threshold": 100,
"alert_threshold": 0,
"aggregate": "count_unique(user)",
"threshold_period": 1,
"projects": [self.project.slug],
"triggers": [
{
"label": "critical",
"alertThreshold": 200,
"actions": [
{
"type": email_action_type,
"targetType": ACTION_TARGET_TYPE_TO_STRING[
AlertRuleTriggerAction.TargetType.TEAM
],
"targetIdentifier": self.team.id,
},
],
},
{
"label": "warning",
"alertThreshold": 150,
"actions": [
{
"type": email_action_type,
"targetType": ACTION_TARGET_TYPE_TO_STRING[
AlertRuleTriggerAction.TargetType.TEAM
],
"targetIdentifier": self.team.id,
},
{
"type": email_action_type,
"targetType": ACTION_TARGET_TYPE_TO_STRING[
AlertRuleTriggerAction.TargetType.USER
],
"targetIdentifier": self.user.id,
},
],
},
],
}
def test_invalid_rule_id(self) -> None:
self.create_member(
user=self.user, organization=self.organization, role="owner", teams=[self.team]
)
self.login_as(self.user)
with self.feature("organizations:incidents"):
resp = self.get_response(self.organization.slug, 1234)
assert resp.status_code == 404
def test_permissions(self) -> None:
self.create_team(organization=self.organization, members=[self.user])
self.login_as(self.create_user())
with self.feature("organizations:incidents"):
resp = self.get_response(self.organization.slug, self.alert_rule.id)
assert resp.status_code == 403
def test_no_feature(self) -> None:
self.create_member(
user=self.user, organization=self.organization, role="owner", teams=[self.team]
)
self.login_as(self.user)
resp = self.get_response(self.organization.slug, self.alert_rule.id)
assert resp.status_code == 404
def test_no_project(self) -> None:
self.create_team(organization=self.organization, members=[self.user])
self.login_as(self.user)
project = self.alert_rule.projects.get()
Project.objects.get(id=project.id).delete()
with self.feature("organizations:incidents"):
resp = self.get_response(self.organization.slug, self.alert_rule.id)
assert resp.status_code == 404
| AlertRuleDetailsBase |
python | getsentry__sentry | tests/sentry/integrations/slack/test_integration.py | {
"start": 18329,
"end": 19197
} | class ____(TestCase):
def setUp(self) -> None:
self.integration = self.create_provider_integration(
provider="slack", name="Slack", metadata={}
)
self.installation = SlackIntegration(self.integration, self.organization.id)
def test_config_data_workspace_app(self) -> None:
assert self.installation.get_config_data()["installationType"] == "workspace_app"
def test_config_data_user_token(self) -> None:
self.integration.metadata["user_access_token"] = "token"
assert self.installation.get_config_data()["installationType"] == "classic_bot"
def test_config_data_born_as_bot(self) -> None:
self.integration.metadata["installation_type"] = "born_as_bot"
assert self.installation.get_config_data()["installationType"] == "born_as_bot"
@control_silo_test
| SlackIntegrationConfigTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 265225,
"end": 265694
} | class ____(sgqlc.types.Input):
"""Ordering options for project v2 view connections"""
__schema__ = github_schema
__field_names__ = ("field", "direction")
field = sgqlc.types.Field(sgqlc.types.non_null(ProjectV2ViewOrderField), graphql_name="field")
"""The field to order the project v2 views by."""
direction = sgqlc.types.Field(sgqlc.types.non_null(OrderDirection), graphql_name="direction")
"""The ordering direction."""
| ProjectV2ViewOrder |
python | pydata__xarray | xarray/tests/test_plot.py | {
"start": 115273,
"end": 130750
} | class ____:
@pytest.fixture(params=[1, 2, 3])
def data_array(self, request) -> DataArray:
"""
Return a simple DataArray
"""
dims = request.param
if dims == 1:
return DataArray(easy_array((10,)))
elif dims == 2:
return DataArray(easy_array((10, 3)))
elif dims == 3:
return DataArray(easy_array((10, 3, 2)))
else:
raise ValueError(f"No DataArray implemented for {dims=}.")
@pytest.fixture(params=[1, 2])
def data_array_logspaced(self, request) -> DataArray:
"""
Return a simple DataArray with logspaced coordinates
"""
dims = request.param
if dims == 1:
return DataArray(
np.arange(7), dims=("x",), coords={"x": np.logspace(-3, 3, 7)}
)
elif dims == 2:
return DataArray(
np.arange(16).reshape(4, 4),
dims=("y", "x"),
coords={"x": np.logspace(-1, 2, 4), "y": np.logspace(-5, -1, 4)},
)
else:
raise ValueError(f"No DataArray implemented for {dims=}.")
@pytest.mark.parametrize("xincrease", [True, False])
def test_xincrease_kwarg(self, data_array, xincrease) -> None:
with figure_context():
data_array.plot(xincrease=xincrease)
assert plt.gca().xaxis_inverted() == (not xincrease)
@pytest.mark.parametrize("yincrease", [True, False])
def test_yincrease_kwarg(self, data_array, yincrease) -> None:
with figure_context():
data_array.plot(yincrease=yincrease)
assert plt.gca().yaxis_inverted() == (not yincrease)
@pytest.mark.parametrize("xscale", ["linear", "logit", "symlog"])
def test_xscale_kwarg(self, data_array, xscale) -> None:
with figure_context():
data_array.plot(xscale=xscale)
assert plt.gca().get_xscale() == xscale
@pytest.mark.parametrize("yscale", ["linear", "logit", "symlog"])
def test_yscale_kwarg(self, data_array, yscale) -> None:
with figure_context():
data_array.plot(yscale=yscale)
assert plt.gca().get_yscale() == yscale
def test_xscale_log_kwarg(self, data_array_logspaced) -> None:
xscale = "log"
with figure_context():
data_array_logspaced.plot(xscale=xscale)
assert plt.gca().get_xscale() == xscale
def test_yscale_log_kwarg(self, data_array_logspaced) -> None:
yscale = "log"
with figure_context():
data_array_logspaced.plot(yscale=yscale)
assert plt.gca().get_yscale() == yscale
def test_xlim_kwarg(self, data_array) -> None:
with figure_context():
expected = (0.0, 1000.0)
data_array.plot(xlim=[0, 1000])
assert plt.gca().get_xlim() == expected
def test_ylim_kwarg(self, data_array) -> None:
with figure_context():
data_array.plot(ylim=[0, 1000])
expected = (0.0, 1000.0)
assert plt.gca().get_ylim() == expected
def test_xticks_kwarg(self, data_array) -> None:
with figure_context():
data_array.plot(xticks=np.arange(5))
expected = np.arange(5).tolist()
assert_array_equal(plt.gca().get_xticks(), expected)
def test_yticks_kwarg(self, data_array) -> None:
with figure_context():
data_array.plot(yticks=np.arange(5))
expected = np.arange(5)
assert_array_equal(plt.gca().get_yticks(), expected)
@requires_matplotlib
@pytest.mark.parametrize("plotfunc", ["pcolormesh", "contourf", "contour"])
def test_plot_transposed_nondim_coord(plotfunc) -> None:
x = np.linspace(0, 10, 101)
h = np.linspace(3, 7, 101)
s = np.linspace(0, 1, 51)
z = s[:, np.newaxis] * h[np.newaxis, :]
da = xr.DataArray(
np.sin(x) * np.cos(z),
dims=["s", "x"],
coords={"x": x, "s": s, "z": (("s", "x"), z), "zt": (("x", "s"), z.T)},
)
with figure_context():
getattr(da.plot, plotfunc)(x="x", y="zt")
with figure_context():
getattr(da.plot, plotfunc)(x="zt", y="x")
@requires_matplotlib
@pytest.mark.parametrize("plotfunc", ["pcolormesh", "imshow"])
def test_plot_transposes_properly(plotfunc) -> None:
# test that we aren't mistakenly transposing when the 2 dimensions have equal sizes.
da = xr.DataArray([np.sin(2 * np.pi / 10 * np.arange(10))] * 10, dims=("y", "x"))
with figure_context():
hdl = getattr(da.plot, plotfunc)(x="x", y="y")
# get_array doesn't work for contour, contourf. It returns the colormap intervals.
# pcolormesh returns 1D array but imshow returns a 2D array so it is necessary
# to ravel() on the LHS
assert_array_equal(hdl.get_array().ravel(), da.to_masked_array().ravel())
@requires_matplotlib
def test_facetgrid_single_contour() -> None:
# regression test for GH3569
x, y = np.meshgrid(np.arange(12), np.arange(12))
z = xr.DataArray(np.hypot(x, y))
z2 = xr.DataArray(np.hypot(x, y) + 1)
ds = xr.concat([z, z2], dim="time")
ds["time"] = [0, 1]
with figure_context():
ds.plot.contour(col="time", levels=[4], colors=["k"])
@requires_matplotlib
def test_get_axis_raises() -> None:
# test get_axis raises an error if trying to do invalid things
# cannot provide both ax and figsize
with pytest.raises(ValueError, match="both `figsize` and `ax`"):
get_axis(figsize=[4, 4], size=None, aspect=None, ax="something") # type: ignore[arg-type]
# cannot provide both ax and size
with pytest.raises(ValueError, match="both `size` and `ax`"):
get_axis(figsize=None, size=200, aspect=4 / 3, ax="something") # type: ignore[arg-type]
# cannot provide both size and figsize
with pytest.raises(ValueError, match="both `figsize` and `size`"):
get_axis(figsize=[4, 4], size=200, aspect=None, ax=None)
# cannot provide aspect and size
with pytest.raises(ValueError, match="`aspect` argument without `size`"):
get_axis(figsize=None, size=None, aspect=4 / 3, ax=None)
# cannot provide axis and subplot_kws
with pytest.raises(ValueError, match="cannot use subplot_kws with existing ax"):
get_axis(figsize=None, size=None, aspect=None, ax=1, something_else=5) # type: ignore[arg-type]
@requires_matplotlib
@pytest.mark.parametrize(
["figsize", "size", "aspect", "ax", "kwargs"],
[
pytest.param((3, 2), None, None, False, {}, id="figsize"),
pytest.param(
(3.5, 2.5), None, None, False, {"label": "test"}, id="figsize_kwargs"
),
pytest.param(None, 5, None, False, {}, id="size"),
pytest.param(None, 5.5, None, False, {"label": "test"}, id="size_kwargs"),
pytest.param(None, 5, 1, False, {}, id="size+aspect"),
pytest.param(None, 5, "auto", False, {}, id="auto_aspect"),
pytest.param(None, 5, "equal", False, {}, id="equal_aspect"),
pytest.param(None, None, None, True, {}, id="ax"),
pytest.param(None, None, None, False, {}, id="default"),
pytest.param(None, None, None, False, {"label": "test"}, id="default_kwargs"),
],
)
def test_get_axis(
figsize: tuple[float, float] | None,
size: float | None,
aspect: float | None,
ax: bool,
kwargs: dict[str, Any],
) -> None:
with figure_context():
inp_ax = plt.axes() if ax else None
out_ax = get_axis(
figsize=figsize, size=size, aspect=aspect, ax=inp_ax, **kwargs
)
assert isinstance(out_ax, mpl.axes.Axes)
@requires_matplotlib
@requires_cartopy
@pytest.mark.parametrize(
["figsize", "size", "aspect"],
[
pytest.param((3, 2), None, None, id="figsize"),
pytest.param(None, 5, None, id="size"),
pytest.param(None, 5, 1, id="size+aspect"),
pytest.param(None, None, None, id="default"),
],
)
def test_get_axis_cartopy(
figsize: tuple[float, float] | None, size: float | None, aspect: float | None
) -> None:
kwargs = {"projection": cartopy.crs.PlateCarree()}
with figure_context():
out_ax = get_axis(figsize=figsize, size=size, aspect=aspect, **kwargs)
assert isinstance(out_ax, cartopy.mpl.geoaxes.GeoAxesSubplot)
@requires_matplotlib
def test_get_axis_current() -> None:
with figure_context():
_, ax = plt.subplots()
out_ax = get_axis()
assert ax is out_ax
@requires_matplotlib
def test_maybe_gca() -> None:
with figure_context():
ax = _maybe_gca(aspect=1)
assert isinstance(ax, mpl.axes.Axes)
assert ax.get_aspect() == 1
with figure_context():
# create figure without axes
plt.figure()
ax = _maybe_gca(aspect=1)
assert isinstance(ax, mpl.axes.Axes)
assert ax.get_aspect() == 1
with figure_context():
existing_axes = plt.axes()
ax = _maybe_gca(aspect=1)
# reuses the existing axes
assert existing_axes == ax
# kwargs are ignored when reusing axes
assert ax.get_aspect() == "auto"
@requires_matplotlib
@pytest.mark.parametrize(
"x, y, z, hue, markersize, row, col, add_legend, add_colorbar",
[
("A", "B", None, None, None, None, None, None, None),
("B", "A", None, "w", None, None, None, True, None),
("A", "B", None, "y", "x", None, None, True, True),
("A", "B", "z", None, None, None, None, None, None),
("B", "A", "z", "w", None, None, None, True, None),
("A", "B", "z", "y", "x", None, None, True, True),
("A", "B", "z", "y", "x", "w", None, True, True),
],
)
def test_datarray_scatter(
x, y, z, hue, markersize, row, col, add_legend, add_colorbar
) -> None:
"""Test datarray scatter. Merge with TestPlot1D eventually."""
ds = xr.tutorial.scatter_example_dataset()
extra_coords = [v for v in [x, hue, markersize] if v is not None]
# Base coords:
coords = dict(ds.coords)
# Add extra coords to the DataArray:
coords.update({v: ds[v] for v in extra_coords})
darray = xr.DataArray(ds[y], coords=coords)
with figure_context():
darray.plot.scatter(
x=x,
z=z,
hue=hue,
markersize=markersize,
add_legend=add_legend,
add_colorbar=add_colorbar,
)
@requires_dask
@requires_matplotlib
@pytest.mark.parametrize(
"plotfunc",
["scatter"],
)
def test_dataarray_not_loading_inplace(plotfunc: str) -> None:
ds = xr.tutorial.scatter_example_dataset()
ds = ds.chunk()
with figure_context():
getattr(ds.A.plot, plotfunc)(x="x")
from dask.array import Array
assert isinstance(ds.A.data, Array)
@requires_matplotlib
def test_assert_valid_xy() -> None:
ds = xr.tutorial.scatter_example_dataset()
darray = ds.A
# x is valid and should not error:
_assert_valid_xy(darray=darray, xy="x", name="x")
# None should be valid as well even though it isn't in the valid list:
_assert_valid_xy(darray=darray, xy=None, name="x")
# A hashable that is not valid should error:
with pytest.raises(ValueError, match="x must be one of"):
_assert_valid_xy(darray=darray, xy="error_now", name="x")
@requires_matplotlib
@pytest.mark.parametrize(
"val", [pytest.param([], id="empty"), pytest.param(0, id="scalar")]
)
@pytest.mark.parametrize(
"method",
[
"__call__",
"line",
"step",
"contour",
"contourf",
"hist",
"imshow",
"pcolormesh",
"scatter",
"surface",
],
)
def test_plot_empty_raises(val: list | float, method: str) -> None:
da = xr.DataArray(val)
with pytest.raises(TypeError, match="No numeric data"):
getattr(da.plot, method)()
@requires_matplotlib
def test_facetgrid_axes_raises_deprecation_warning() -> None:
with pytest.warns(
DeprecationWarning,
match=(
"self.axes is deprecated since 2022.11 in order to align with "
"matplotlibs plt.subplots, use self.axs instead."
),
):
with figure_context():
ds = xr.tutorial.scatter_example_dataset()
g = ds.plot.scatter(x="A", y="B", col="x")
_ = g.axes
@requires_matplotlib
def test_plot1d_default_rcparams() -> None:
import matplotlib as mpl
ds = xr.tutorial.scatter_example_dataset(seed=42)
with figure_context():
# scatter markers should by default have white edgecolor to better
# see overlapping markers:
_fig, ax = plt.subplots(1, 1)
ds.plot.scatter(x="A", y="B", marker="o", ax=ax)
actual: np.ndarray = mpl.colors.to_rgba_array("w")
expected: np.ndarray = ax.collections[0].get_edgecolor() # type: ignore[assignment]
np.testing.assert_allclose(actual, expected)
# Facetgrids should have the default value as well:
fg = ds.plot.scatter(x="A", y="B", col="x", marker="o")
ax = fg.axs.ravel()[0]
actual = mpl.colors.to_rgba_array("w")
expected = ax.collections[0].get_edgecolor() # type: ignore[assignment,unused-ignore]
np.testing.assert_allclose(actual, expected)
# scatter should not emit any warnings when using unfilled markers:
with assert_no_warnings():
_fig, ax = plt.subplots(1, 1)
ds.plot.scatter(x="A", y="B", ax=ax, marker="x")
# Prioritize edgecolor argument over default plot1d values:
_fig, ax = plt.subplots(1, 1)
ds.plot.scatter(x="A", y="B", marker="o", ax=ax, edgecolor="k")
actual = mpl.colors.to_rgba_array("k")
expected = ax.collections[0].get_edgecolor() # type: ignore[assignment]
np.testing.assert_allclose(actual, expected)
@requires_matplotlib
def test_plot1d_filtered_nulls() -> None:
ds = xr.tutorial.scatter_example_dataset(seed=42)
y = ds.y.where(ds.y > 0.2)
expected = y.notnull().sum().item()
with figure_context():
pc = y.plot.scatter()
actual = pc.get_offsets().shape[0]
assert expected == actual
@requires_matplotlib
def test_9155() -> None:
# A test for types from issue #9155
with figure_context():
data = xr.DataArray([1, 2, 3], dims=["x"])
_fig, ax = plt.subplots(ncols=1, nrows=1)
data.plot(ax=ax) # type: ignore[call-arg]
@requires_matplotlib
def test_temp_dataarray() -> None:
from xarray.plot.dataset_plot import _temp_dataarray
x = np.arange(1, 4)
y = np.arange(4, 6)
var1 = np.arange(x.size * y.size).reshape((x.size, y.size))
var2 = np.arange(x.size * y.size).reshape((x.size, y.size))
ds = xr.Dataset(
{
"var1": (["x", "y"], var1),
"var2": (["x", "y"], 2 * var2),
"var3": (["x"], 3 * x),
},
coords={
"x": x,
"y": y,
"model": np.arange(7),
},
)
# No broadcasting:
y_ = "var1"
locals_ = {"x": "var2"}
da = _temp_dataarray(ds, y_, locals_)
assert da.shape == (3, 2)
# Broadcast from 1 to 2dim:
y_ = "var3"
locals_ = {"x": "var1"}
da = _temp_dataarray(ds, y_, locals_)
assert da.shape == (3, 2)
# Ignore non-valid coord kwargs:
y_ = "var3"
locals_ = dict(x="x", extend="var2")
da = _temp_dataarray(ds, y_, locals_)
assert da.shape == (3,)
| TestAxesKwargs |
python | huggingface__transformers | src/transformers/models/longformer/modeling_longformer.py | {
"start": 64019,
"end": 75897
} | class ____(LongformerPreTrainedModel):
"""
This class copied code from [`RobertaModel`] and overwrote standard self-attention with longformer self-attention
to provide the ability to process long sequences following the self-attention approach described in [Longformer:
the Long-Document Transformer](https://huggingface.co/papers/2004.05150) by Iz Beltagy, Matthew E. Peters, and Arman Cohan.
Longformer self-attention combines a local (sliding window) and global attention to extend to long documents
without the O(n^2) increase in memory and compute.
The self-attention module `LongformerSelfAttention` implemented here supports the combination of local and global
attention but it lacks support for autoregressive attention and dilated attention. Autoregressive and dilated
attention are more relevant for autoregressive language modeling than finetuning on downstream tasks. Future
release will add support for autoregressive attention, but the support for dilated attention requires a custom CUDA
kernel to be memory and compute efficient.
"""
def __init__(self, config, add_pooling_layer=True):
r"""
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
"""
super().__init__(config)
self.config = config
if isinstance(config.attention_window, int):
assert config.attention_window % 2 == 0, "`config.attention_window` has to be an even value"
assert config.attention_window > 0, "`config.attention_window` has to be positive"
config.attention_window = [config.attention_window] * config.num_hidden_layers # one value per layer
else:
assert len(config.attention_window) == config.num_hidden_layers, (
"`len(config.attention_window)` should equal `config.num_hidden_layers`. "
f"Expected {config.num_hidden_layers}, given {len(config.attention_window)}"
)
self.embeddings = LongformerEmbeddings(config)
self.encoder = LongformerEncoder(config)
self.pooler = LongformerPooler(config) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _pad_to_window_size(
self,
input_ids: torch.Tensor,
attention_mask: torch.Tensor,
token_type_ids: torch.Tensor,
position_ids: torch.Tensor,
inputs_embeds: torch.Tensor,
pad_token_id: int,
):
"""A helper function to pad tokens and mask to work with implementation of Longformer self-attention."""
# padding
attention_window = (
self.config.attention_window
if isinstance(self.config.attention_window, int)
else max(self.config.attention_window)
)
assert attention_window % 2 == 0, f"`attention_window` should be an even value. Given {attention_window}"
input_shape = input_ids.shape if input_ids is not None else inputs_embeds.shape
batch_size, seq_len = input_shape[:2]
padding_len = (attention_window - seq_len % attention_window) % attention_window
# this path should be recorded in the ONNX export, it is fine with padding_len == 0 as well
if padding_len > 0:
logger.warning_once(
f"Input ids are automatically padded to be a multiple of `config.attention_window`: {attention_window}"
)
if input_ids is not None:
input_ids = nn.functional.pad(input_ids, (0, padding_len), value=pad_token_id)
if position_ids is not None:
# pad with position_id = pad_token_id as in modeling_roberta.RobertaEmbeddings
position_ids = nn.functional.pad(position_ids, (0, padding_len), value=pad_token_id)
if inputs_embeds is not None:
input_ids_padding = inputs_embeds.new_full(
(batch_size, padding_len),
self.config.pad_token_id,
dtype=torch.long,
)
inputs_embeds_padding = self.embeddings(input_ids_padding)
inputs_embeds = torch.cat([inputs_embeds, inputs_embeds_padding], dim=-2)
attention_mask = nn.functional.pad(
attention_mask, (0, padding_len), value=0
) # no attention on the padding tokens
token_type_ids = nn.functional.pad(token_type_ids, (0, padding_len), value=0) # pad with token_type_id = 0
return padding_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds
def _merge_to_attention_mask(self, attention_mask: torch.Tensor, global_attention_mask: torch.Tensor):
# longformer self attention expects attention mask to have 0 (no attn), 1 (local attn), 2 (global attn)
# (global_attention_mask + 1) => 1 for local attention, 2 for global attention
# => final attention_mask => 0 for no attention, 1 for local attention 2 for global attention
if attention_mask is not None:
attention_mask = attention_mask * (global_attention_mask + 1)
else:
# simply use `global_attention_mask` as `attention_mask`
# if no `attention_mask` is given
attention_mask = global_attention_mask + 1
return attention_mask
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
global_attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, LongformerBaseModelOutputWithPooling]:
r"""
global_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to decide the attention given on each token, local attention or global attention. Tokens with global
attention attends to all other tokens, and all other tokens attend to them. This is important for
task-specific finetuning because it makes the model more flexible at representing the task. For example,
for classification, the <s> token should be given global attention. For QA, all question tokens should also
have global attention. Please refer to the [Longformer paper](https://huggingface.co/papers/2004.05150) for more
details. Mask values selected in `[0, 1]`:
- 0 for local attention (a sliding window attention),
- 1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them).
Examples:
```python
>>> import torch
>>> from transformers import LongformerModel, AutoTokenizer
>>> model = LongformerModel.from_pretrained("allenai/longformer-base-4096")
>>> tokenizer = AutoTokenizer.from_pretrained("allenai/longformer-base-4096")
>>> SAMPLE_TEXT = " ".join(["Hello world! "] * 1000) # long input document
>>> input_ids = torch.tensor(tokenizer.encode(SAMPLE_TEXT)).unsqueeze(0) # batch of size 1
>>> attention_mask = torch.ones(
... input_ids.shape, dtype=torch.long, device=input_ids.device
... ) # initialize to local attention
>>> global_attention_mask = torch.zeros(
... input_ids.shape, dtype=torch.long, device=input_ids.device
... ) # initialize to global attention to be deactivated for all tokens
>>> global_attention_mask[
... :,
... [
... 1,
... 4,
... 21,
... ],
... ] = 1 # Set global attention to random tokens for the sake of this example
>>> # Usually, set global attention based on the task. For example,
>>> # classification: the <s> token
>>> # QA: question tokens
>>> # LM: potentially on the beginning of sentences and paragraphs
>>> outputs = model(input_ids, attention_mask=attention_mask, global_attention_mask=global_attention_mask)
>>> sequence_output = outputs.last_hidden_state
>>> pooled_output = outputs.pooler_output
```
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# merge `global_attention_mask` and `attention_mask`
if global_attention_mask is not None:
attention_mask = self._merge_to_attention_mask(attention_mask, global_attention_mask)
padding_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds = self._pad_to_window_size(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
pad_token_id=self.config.pad_token_id,
)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)[
:, 0, 0, :
]
embedding_output = self.embeddings(
input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
padding_len=padding_len,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return LongformerBaseModelOutputWithPooling(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
global_attentions=encoder_outputs.global_attentions,
)
@auto_docstring
| LongformerModel |
python | ray-project__ray | python/ray/serve/tests/unit/test_deployment_scheduler.py | {
"start": 31266,
"end": 45480
} | class ____:
def test_basic(self):
d_id1 = DeploymentID(name="deployment1")
d_id2 = DeploymentID(name="deployment2")
node_id_1 = NodeID.from_random().hex()
node_id_2 = NodeID.from_random().hex()
cluster_node_info_cache = MockClusterNodeInfoCache()
cluster_node_info_cache.add_node(node_id_1, {"CPU": 3})
cluster_node_info_cache.add_node(node_id_2, {"CPU": 2})
scheduler = default_impl.create_deployment_scheduler(
cluster_node_info_cache,
head_node_id_override="fake-head-node-id",
create_placement_group_fn_override=None,
)
scheduler.on_deployment_created(d_id1, SpreadDeploymentSchedulingPolicy())
scheduler.on_deployment_created(d_id2, SpreadDeploymentSchedulingPolicy())
scheduler.on_deployment_deployed(
d_id1,
ReplicaConfig.create(dummy, ray_actor_options={"num_cpus": 1}),
)
scheduler.on_deployment_deployed(
d_id2,
ReplicaConfig.create(dummy, ray_actor_options={"num_cpus": 3}),
)
on_scheduled_mock = Mock()
on_scheduled_mock2 = Mock()
scheduler.schedule(
upscales={
d_id1: [
ReplicaSchedulingRequest(
replica_id=ReplicaID(unique_id=f"r{i}", deployment_id=d_id1),
actor_def=MockActorClass(),
actor_resources={"CPU": 1},
actor_options={},
actor_init_args=(),
on_scheduled=on_scheduled_mock,
)
for i in range(2)
],
d_id2: [
ReplicaSchedulingRequest(
replica_id=ReplicaID(unique_id="r2", deployment_id=d_id2),
actor_def=MockActorClass(),
actor_resources={"CPU": 3},
actor_options={},
actor_init_args=(),
on_scheduled=on_scheduled_mock2,
)
],
},
downscales={},
)
assert len(on_scheduled_mock.call_args_list) == 2
for call in on_scheduled_mock.call_args_list:
assert call.kwargs == {"placement_group": None}
assert len(call.args) == 1
scheduling_strategy = call.args[0]._options["scheduling_strategy"]
assert isinstance(scheduling_strategy, NodeAffinitySchedulingStrategy)
assert scheduling_strategy.node_id == node_id_2
assert len(on_scheduled_mock2.call_args_list) == 1
call = on_scheduled_mock2.call_args_list[0]
assert call.kwargs == {"placement_group": None}
assert len(call.args) == 1
scheduling_strategy = call.args[0]._options["scheduling_strategy"]
assert isinstance(scheduling_strategy, NodeAffinitySchedulingStrategy)
assert scheduling_strategy.node_id == node_id_1
def test_placement_groups(self):
d_id1 = DeploymentID(name="deployment1")
d_id2 = DeploymentID(name="deployment2")
cluster_node_info_cache = MockClusterNodeInfoCache()
cluster_node_info_cache.add_node("node1", {"CPU": 3})
cluster_node_info_cache.add_node("node2", {"CPU": 2})
scheduler = default_impl.create_deployment_scheduler(
cluster_node_info_cache,
head_node_id_override="fake-head-node-id",
create_placement_group_fn_override=lambda *args, **kwargs: MockPlacementGroup( # noqa
*args, **kwargs
),
)
_ = ray.util.placement_group
scheduler.on_deployment_created(d_id1, SpreadDeploymentSchedulingPolicy())
scheduler.on_deployment_created(d_id2, SpreadDeploymentSchedulingPolicy())
scheduler.on_deployment_deployed(
d_id1,
ReplicaConfig.create(
dummy,
ray_actor_options={"num_cpus": 0},
placement_group_bundles=[{"CPU": 0.5}, {"CPU": 0.5}],
placement_group_strategy="STRICT_PACK",
),
)
scheduler.on_deployment_deployed(
d_id2,
ReplicaConfig.create(
dummy,
ray_actor_options={"num_cpus": 0},
placement_group_bundles=[{"CPU": 0.5}, {"CPU": 2.5}],
placement_group_strategy="STRICT_PACK",
),
)
on_scheduled_mock = Mock()
on_scheduled_mock2 = Mock()
scheduler.schedule(
upscales={
d_id1: [
ReplicaSchedulingRequest(
replica_id=ReplicaID(unique_id=f"r{i}", deployment_id=d_id1),
actor_def=MockActorClass(),
actor_resources={"CPU": 0},
placement_group_bundles=[{"CPU": 0.5}, {"CPU": 0.5}],
placement_group_strategy="STRICT_PACK",
actor_options={"name": "random_replica"},
actor_init_args=(),
on_scheduled=on_scheduled_mock,
)
for i in range(2)
],
d_id2: [
ReplicaSchedulingRequest(
replica_id=ReplicaID(unique_id="r2", deployment_id=d_id2),
actor_def=MockActorClass(),
actor_resources={"CPU": 0},
placement_group_bundles=[{"CPU": 0.5}, {"CPU": 2.5}],
placement_group_strategy="STRICT_PACK",
actor_options={"name": "some_replica"},
actor_init_args=(),
on_scheduled=on_scheduled_mock2,
)
],
},
downscales={},
)
assert len(on_scheduled_mock.call_args_list) == 2
for call in on_scheduled_mock.call_args_list:
assert len(call.args) == 1
scheduling_strategy = call.args[0]._options["scheduling_strategy"]
assert isinstance(scheduling_strategy, PlacementGroupSchedulingStrategy)
assert call.kwargs.get("placement_group")._soft_target_node_id == "node2"
assert len(on_scheduled_mock2.call_args_list) == 1
call = on_scheduled_mock2.call_args_list[0]
assert len(call.args) == 1
scheduling_strategy = call.args[0]._options["scheduling_strategy"]
assert isinstance(scheduling_strategy, PlacementGroupSchedulingStrategy)
assert call.kwargs.get("placement_group")._soft_target_node_id == "node1"
def test_heterogeneous_resources(self):
d_id1 = DeploymentID(name="deployment1")
d_id2 = DeploymentID(name="deployment2")
node_id_1 = NodeID.from_random().hex()
node_id_2 = NodeID.from_random().hex()
cluster_node_info_cache = MockClusterNodeInfoCache()
cluster_node_info_cache.add_node(node_id_1, {"GPU": 4, "CPU": 6})
cluster_node_info_cache.add_node(node_id_2, {"GPU": 10, "CPU": 2})
scheduler = default_impl.create_deployment_scheduler(
cluster_node_info_cache,
head_node_id_override="fake-head-node-id",
create_placement_group_fn_override=None,
)
scheduler.on_deployment_created(d_id1, SpreadDeploymentSchedulingPolicy())
scheduler.on_deployment_created(d_id2, SpreadDeploymentSchedulingPolicy())
scheduler.on_deployment_deployed(
d_id1,
ReplicaConfig.create(
dummy, ray_actor_options={"num_gpus": 2, "num_cpus": 2}
),
)
scheduler.on_deployment_deployed(
d_id2,
ReplicaConfig.create(
dummy, ray_actor_options={"num_gpus": 1, "num_cpus": 1}
),
)
on_scheduled_mock = Mock()
scheduler.schedule(
upscales={
d_id1: [
ReplicaSchedulingRequest(
replica_id=ReplicaID(unique_id="r0", deployment_id=d_id1),
actor_def=MockActorClass(),
actor_resources={"GPU": 2, "CPU": 2},
actor_options={},
actor_init_args=(),
on_scheduled=on_scheduled_mock,
)
],
d_id2: [
ReplicaSchedulingRequest(
replica_id=ReplicaID(unique_id=f"r{i+1}", deployment_id=d_id2),
actor_def=MockActorClass(),
actor_resources={"GPU": 1, "CPU": 1},
actor_options={},
actor_init_args=(),
on_scheduled=on_scheduled_mock,
)
for i in range(2)
],
},
downscales={},
)
# Even though scheduling on node 2 would minimize fragmentation
# of CPU resources, we should prioritize minimizing fragmentation
# of GPU resources first, so all 3 replicas should be scheduled
# to node 1
assert len(on_scheduled_mock.call_args_list) == 3
for call in on_scheduled_mock.call_args_list:
assert len(call.args) == 1
scheduling_strategy = call.args[0]._options["scheduling_strategy"]
assert isinstance(scheduling_strategy, NodeAffinitySchedulingStrategy)
assert scheduling_strategy.node_id == node_id_1
assert call.kwargs == {"placement_group": None}
def test_max_replicas_per_node(self):
"""Test that at most `max_replicas_per_node` number of replicas
are scheduled onto a node even if that node has more resources.
"""
d_id1 = DeploymentID(name="deployment1")
node_id_1 = NodeID.from_random().hex()
node_id_2 = NodeID.from_random().hex()
cluster_node_info_cache = MockClusterNodeInfoCache()
# Should try to schedule on node1 to minimize fragmentation
cluster_node_info_cache.add_node(node_id_1, {"CPU": 20})
cluster_node_info_cache.add_node(node_id_2, {"CPU": 21})
scheduler = default_impl.create_deployment_scheduler(
cluster_node_info_cache,
head_node_id_override="fake-head-node-id",
create_placement_group_fn_override=lambda *args, **kwargs: MockPlacementGroup( # noqa
*args, **kwargs
),
)
scheduler.on_deployment_created(d_id1, SpreadDeploymentSchedulingPolicy())
scheduler.on_deployment_deployed(
d_id1,
ReplicaConfig.create(
dummy, max_replicas_per_node=4, ray_actor_options={"num_cpus": 2}
),
)
state = defaultdict(int)
def on_scheduled(actor_handle, placement_group):
scheduling_strategy = actor_handle._options["scheduling_strategy"]
if isinstance(scheduling_strategy, NodeAffinitySchedulingStrategy):
state[scheduling_strategy.node_id] += 1
elif isinstance(scheduling_strategy, PlacementGroupSchedulingStrategy):
state[placement_group._soft_target_node_id] += 1
scheduler.schedule(
upscales={
d_id1: [
ReplicaSchedulingRequest(
replica_id=ReplicaID(
unique_id=f"replica{i}", deployment_id=d_id1
),
actor_def=MockActorClass(),
actor_resources={"CPU": 2},
max_replicas_per_node=4,
actor_options={"name": "random"},
actor_init_args=(),
on_scheduled=on_scheduled,
)
for i in range(5)
]
},
downscales={},
)
assert state[node_id_1] == 4
assert state[node_id_2] == 1
def test_custom_resources(self):
d_id = DeploymentID(name="deployment1")
node_id_1 = NodeID.from_random().hex()
node_id_2 = NodeID.from_random().hex()
cluster_node_info_cache = MockClusterNodeInfoCache()
cluster_node_info_cache.add_node(node_id_1, {"CPU": 3})
cluster_node_info_cache.add_node(node_id_2, {"CPU": 100, "customA": 1})
scheduler = default_impl.create_deployment_scheduler(
cluster_node_info_cache,
head_node_id_override="fake-head-node-id",
create_placement_group_fn_override=lambda *args, **kwargs: MockPlacementGroup( # noqa
*args, **kwargs
),
)
scheduler.on_deployment_created(d_id, SpreadDeploymentSchedulingPolicy())
scheduler.on_deployment_deployed(
d_id,
ReplicaConfig.create(
dummy, ray_actor_options={"num_cpus": 2, "resources": {"customA": 0.1}}
),
)
# Despite trying to schedule on node that minimizes fragmentation,
# should respect custom resources and schedule onto node2
def on_scheduled(actor_handle, placement_group):
scheduling_strategy = actor_handle._options["scheduling_strategy"]
assert isinstance(scheduling_strategy, NodeAffinitySchedulingStrategy)
assert scheduling_strategy.node_id == node_id_2
scheduler.schedule(
upscales={
d_id: [
ReplicaSchedulingRequest(
replica_id=ReplicaID(unique_id="r0", deployment_id=d_id),
actor_def=MockActorClass(),
actor_resources={"CPU": 2, "customA": 0.1},
actor_options={"name": "random"},
actor_init_args=(),
on_scheduled=on_scheduled,
)
]
},
downscales={},
)
if __name__ == "__main__":
sys.exit(pytest.main(["-v", "-s", __file__]))
| TestCompactScheduling |
python | python__mypy | mypyc/annotate.py | {
"start": 9082,
"end": 17993
} | class ____(TraverserVisitor):
"""Generate annotations from mypy AST and inferred types."""
def __init__(self, type_map: dict[Expression, Type], mapper: Mapper) -> None:
self.anns: dict[int, list[Annotation]] = {}
self.ignored_lines: set[int] = set()
self.func_depth = 0
self.type_map = type_map
self.mapper = mapper
def visit_func_def(self, o: FuncDef, /) -> None:
if self.func_depth > 0:
self.annotate(
o,
"A nested function object is allocated each time statement is executed. "
+ "A module-level function would be faster.",
)
self.func_depth += 1
super().visit_func_def(o)
self.func_depth -= 1
def visit_for_stmt(self, o: ForStmt, /) -> None:
self.check_iteration([o.expr], "For loop")
super().visit_for_stmt(o)
def visit_dictionary_comprehension(self, o: DictionaryComprehension, /) -> None:
self.check_iteration(o.sequences, "Comprehension")
super().visit_dictionary_comprehension(o)
def visit_generator_expr(self, o: GeneratorExpr, /) -> None:
self.check_iteration(o.sequences, "Comprehension or generator")
super().visit_generator_expr(o)
def check_iteration(self, expressions: list[Expression], kind: str) -> None:
for expr in expressions:
typ = self.get_type(expr)
if isinstance(typ, AnyType):
self.annotate(expr, f'{kind} uses generic operations (iterable has type "Any").')
elif isinstance(typ, Instance) and typ.type.fullname in (
"typing.Iterable",
"typing.Iterator",
"typing.Sequence",
"typing.MutableSequence",
):
self.annotate(
expr,
f'{kind} uses generic operations (iterable has the abstract type "{typ.type.fullname}").',
)
def visit_class_def(self, o: ClassDef, /) -> None:
super().visit_class_def(o)
if self.func_depth == 0:
# Don't complain about base classes at top level
for base in o.base_type_exprs:
self.ignored_lines.add(base.line)
for s in o.defs.body:
if isinstance(s, AssignmentStmt):
# Don't complain about attribute initializers
self.ignored_lines.add(s.line)
elif isinstance(s, Decorator):
# Don't complain about decorator definitions that generate some
# dynamic operations. This is a bit heavy-handed.
self.ignored_lines.add(s.func.line)
def visit_with_stmt(self, o: WithStmt, /) -> None:
for expr in o.expr:
if isinstance(expr, CallExpr) and isinstance(expr.callee, RefExpr):
node = expr.callee.node
if isinstance(node, Decorator):
if any(
isinstance(d, RefExpr)
and d.node
and d.node.fullname == "contextlib.contextmanager"
for d in node.decorators
):
self.annotate(
expr,
f'"{node.name}" uses @contextmanager, which is slow '
+ "in compiled code. Use a native class with "
+ '"__enter__" and "__exit__" methods instead.',
priority=3,
)
super().visit_with_stmt(o)
def visit_assignment_stmt(self, o: AssignmentStmt, /) -> None:
special_form = False
if self.func_depth == 0:
analyzed: Expression | None = o.rvalue
if isinstance(o.rvalue, (CallExpr, IndexExpr, OpExpr)):
analyzed = o.rvalue.analyzed
if o.is_alias_def or isinstance(
analyzed, (TypeVarExpr, NamedTupleExpr, TypedDictExpr, NewTypeExpr)
):
special_form = True
if special_form:
# TODO: Ignore all lines if multi-line
self.ignored_lines.add(o.line)
super().visit_assignment_stmt(o)
def visit_name_expr(self, o: NameExpr, /) -> None:
if ann := stdlib_hints.get(o.fullname):
self.annotate(o, ann)
def visit_member_expr(self, o: MemberExpr, /) -> None:
super().visit_member_expr(o)
if ann := stdlib_hints.get(o.fullname):
self.annotate(o, ann)
def visit_call_expr(self, o: CallExpr, /) -> None:
super().visit_call_expr(o)
if (
isinstance(o.callee, RefExpr)
and o.callee.fullname == "builtins.isinstance"
and len(o.args) == 2
):
arg = o.args[1]
self.check_isinstance_arg(arg)
elif isinstance(o.callee, RefExpr) and isinstance(o.callee.node, TypeInfo):
info = o.callee.node
class_ir = self.mapper.type_to_ir.get(info)
if (class_ir and not class_ir.is_ext_class) or (
class_ir is None and not info.fullname.startswith("builtins.")
):
self.annotate(
o, f'Creating an instance of non-native class "{info.name}" ' + "is slow.", 2
)
elif class_ir and class_ir.is_augmented:
self.annotate(
o,
f'Class "{info.name}" is only partially native, and '
+ "constructing an instance is slow.",
2,
)
elif isinstance(o.callee, RefExpr) and isinstance(o.callee.node, Decorator):
decorator = o.callee.node
if self.mapper.is_native_ref_expr(o.callee):
self.annotate(
o,
f'Calling a decorated function ("{decorator.name}") is inefficient, even if it\'s native.',
2,
)
def check_isinstance_arg(self, arg: Expression) -> None:
if isinstance(arg, RefExpr):
if isinstance(arg.node, TypeInfo) and arg.node.is_protocol:
self.annotate(
arg, f'Expensive isinstance() check against protocol "{arg.node.name}".'
)
elif isinstance(arg, TupleExpr):
for item in arg.items:
self.check_isinstance_arg(item)
def visit_lambda_expr(self, o: LambdaExpr, /) -> None:
self.annotate(
o,
"A new object is allocated for lambda each time it is evaluated. "
+ "A module-level function would be faster.",
)
super().visit_lambda_expr(o)
def annotate(self, o: Node, ann: str | Annotation, priority: int = 1) -> None:
if isinstance(ann, str):
ann = Annotation(ann, priority=priority)
self.anns.setdefault(o.line, []).append(ann)
def get_type(self, e: Expression) -> ProperType:
t = self.type_map.get(e)
if t:
return get_proper_type(t)
return AnyType(TypeOfAny.unannotated)
def get_str_literal(v: Value) -> str | None:
if isinstance(v, LoadLiteral) and isinstance(v.value, str):
return v.value
return None
def get_max_prio(anns: list[Annotation]) -> list[Annotation]:
max_prio = max(a.priority for a in anns)
return [a for a in anns if a.priority == max_prio]
def generate_html_report(sources: list[AnnotatedSource]) -> str:
html = []
html.append("<html>\n<head>\n")
html.append(f"<style>\n{CSS}\n</style>")
html.append("</head>\n")
html.append("<body>\n")
for src in sources:
html.append(f"<h2><tt>{src.path}</tt></h2>\n")
html.append("<pre>")
src_anns = src.annotations
with open(src.path) as f:
lines = f.readlines()
for i, s in enumerate(lines):
s = escape(s)
line = i + 1
linenum = "%5d" % line
if line in src_anns:
anns = get_max_prio(src_anns[line])
ann_strs = [a.message for a in anns]
hint = " ".join(ann_strs)
s = colorize_line(linenum, s, hint_html=hint)
else:
s = linenum + " " + s
html.append(s)
html.append("</pre>")
html.append("<script>")
html.append(JS)
html.append("</script>")
html.append("</body></html>\n")
return "".join(html)
def colorize_line(linenum: str, s: str, hint_html: str) -> str:
hint_prefix = " " * len(linenum) + " "
line_span = f'<div class="collapsible" style="background-color: #fcc">{linenum} {s}</div>'
hint_div = f'<div class="content">{hint_prefix}<div class="hint">{hint_html}</div></div>'
return f"<span>{line_span}{hint_div}</span>"
| ASTAnnotateVisitor |
python | celery__celery | celery/worker/consumer/tasks.py | {
"start": 353,
"end": 4001
} | class ____(bootsteps.StartStopStep):
"""Bootstep starting the task message consumer."""
requires = (Mingle,)
def __init__(self, c, **kwargs):
c.task_consumer = c.qos = None
super().__init__(c, **kwargs)
def start(self, c):
"""Start task consumer."""
c.update_strategies()
qos_global = self.qos_global(c)
# set initial prefetch count
c.connection.default_channel.basic_qos(
0, c.initial_prefetch_count, qos_global,
)
c.task_consumer = c.app.amqp.TaskConsumer(
c.connection, on_decode_error=c.on_decode_error,
)
def set_prefetch_count(prefetch_count):
return c.task_consumer.qos(
prefetch_count=prefetch_count,
apply_global=qos_global,
)
eta_task_limit = c.app.conf.worker_eta_task_limit
c.qos = QoS(
set_prefetch_count, c.initial_prefetch_count, max_prefetch=eta_task_limit
)
if c.app.conf.worker_disable_prefetch:
# Only apply disable-prefetch for Redis brokers
is_redis_broker = c.connection.transport.driver_type == 'redis'
if not is_redis_broker:
logger.warning(
f"worker_disable_prefetch is only supported for Redis brokers. "
f"Current broker transport: {c.connection.transport.driver_type}. "
f"Ignoring disable_prefetch setting."
)
return
from types import MethodType
from celery.worker import state
channel_qos = c.task_consumer.channel.qos
original_can_consume = channel_qos.can_consume
def can_consume(self):
# Prefer autoscaler's max_concurrency if set; otherwise fall back to pool size
limit = getattr(c.controller, "max_concurrency", None) or c.pool.num_processes
if len(state.reserved_requests) >= limit:
return False
return original_can_consume()
channel_qos.can_consume = MethodType(can_consume, channel_qos)
def stop(self, c):
"""Stop task consumer."""
if c.task_consumer:
debug('Canceling task consumer...')
ignore_errors(c, c.task_consumer.cancel)
def shutdown(self, c):
"""Shutdown task consumer."""
if c.task_consumer:
self.stop(c)
debug('Closing consumer channel...')
ignore_errors(c, c.task_consumer.close)
c.task_consumer = None
def info(self, c):
"""Return task consumer info."""
return {'prefetch_count': c.qos.value if c.qos else 'N/A'}
def qos_global(self, c) -> bool:
"""Determine if global QoS should be applied.
Additional information:
https://www.rabbitmq.com/docs/consumer-prefetch
https://www.rabbitmq.com/docs/quorum-queues#global-qos
"""
# - RabbitMQ 3.3 completely redefines how basic_qos works...
# This will detect if the new qos semantics is in effect,
# and if so make sure the 'apply_global' flag is set on qos updates.
qos_global = not c.connection.qos_semantics_matches_spec
if c.app.conf.worker_detect_quorum_queues:
using_quorum_queues, _ = detect_quorum_queues(
c.app, c.connection.transport.driver_type
)
if using_quorum_queues:
qos_global = False
logger.info("Global QoS is disabled. Prefetch count in now static.")
return qos_global
| Tasks |
python | tensorflow__tensorflow | tensorflow/python/checkpoint/sharding/sharding_policies.py | {
"start": 1526,
"end": 2625
} | class ____(sharding_util.ShardingCallback):
"""Policy that splits tensors into shards based on their device spec task."""
@property
def description(self) -> str:
return "Split tensors into shards based on their device spec task."
def __call__(
self,
shardable_tensors: Sequence[sharding_util.ShardableTensor]
) -> Sequence[sharding_util.Shard]:
"""Callback to split tensors into shards based on their device spec task.
Args:
shardable_tensors: A list of ShardableTensors.
Returns:
List of shard dicts containing tensors.
[ {checkpoint key: {slice_spec: tensor} } ]
"""
tensors_by_task = {}
for shardable_tensor in shardable_tensors:
tensor = shardable_tensor.tensor
checkpoint_key = shardable_tensor.checkpoint_key
slice_spec = shardable_tensor.slice_spec
(tensors_by_task
.setdefault(checkpoint_key, {})[slice_spec]) = tensor
return [tensors_by_task]
_OffsetAndShape = tuple[Sequence[int], Sequence[int]]
@tf_export.tf_export("train.experimental.MaxShardSizePolicy")
| ShardByTaskPolicy |
python | pytorch__pytorch | torch/testing/_internal/common_pruning.py | {
"start": 9728,
"end": 11111
} | class ____(nn.Module):
r"""Model with Conv2d layers, all with bias, some in a Sequential and some following, and then a Pool2d
and a functional Flatten followed by a Linear layer.
Activation functions and Pool2ds in between each layer also.
Used to test pruned Conv2d-Pool2d-Flatten-Linear fusion."""
def __init__(self) -> None:
super().__init__()
self.seq = nn.Sequential(
nn.Conv2d(1, 3, kernel_size=3, padding=1, bias=True),
nn.MaxPool2d(kernel_size=2, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(3, 5, kernel_size=3, padding=1, bias=True),
nn.Tanh(),
nn.AvgPool2d(kernel_size=2, stride=2, padding=1),
)
self.conv2d1 = nn.Conv2d(5, 7, kernel_size=3, padding=1, bias=True)
self.af1 = nn.ReLU()
self.conv2d2 = nn.Conv2d(7, 11, kernel_size=3, padding=1, bias=True)
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(11, 13, bias=True)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.seq(x)
x = self.conv2d1(x)
x = F.max_pool2d(x, kernel_size=2, stride=2, padding=1)
x = self.af1(x)
x = self.conv2d2(x)
x = self.avg_pool(x)
x = torch.flatten(x, 1) # test functional flatten
x = self.fc(x)
return x
| Conv2dPoolFlattenFunctional |
python | aio-libs__aiohttp | aiohttp/web_exceptions.py | {
"start": 8542,
"end": 8613
} | class ____(HTTPClientError):
status_code = 412
| HTTPPreconditionFailed |
python | tensorflow__tensorflow | tensorflow/python/feature_column/feature_column_test.py | {
"start": 83442,
"end": 110431
} | class ____(test.TestCase):
def test_raises_if_empty_feature_columns(self):
with self.assertRaisesRegex(ValueError,
'feature_columns must not be empty'):
get_keras_linear_model_predictions(features={}, feature_columns=[])
def test_should_be_feature_column(self):
with self.assertRaisesRegex(ValueError, 'must be a _FeatureColumn'):
get_keras_linear_model_predictions(
features={'a': [[0]]}, feature_columns='NotSupported')
def test_should_be_dense_or_categorical_column(self):
class NotSupportedColumn(_FeatureColumn):
@property
def name(self):
return 'NotSupportedColumn'
def _transform_feature(self, cache):
pass
@property
def _parse_example_spec(self):
pass
with self.assertRaisesRegex(
ValueError, 'must be either a _DenseColumn or _CategoricalColumn'):
get_keras_linear_model_predictions(
features={'a': [[0]]}, feature_columns=[NotSupportedColumn()])
def test_does_not_support_dict_columns(self):
with self.assertRaisesRegex(
ValueError, 'Expected feature_columns to be iterable, found dict.'):
fc.linear_model(
features={'a': [[0]]}, feature_columns={'a': fc._numeric_column('a')})
def test_raises_if_duplicate_name(self):
with self.assertRaisesRegex(
ValueError, 'Duplicate feature column name found for columns'):
get_keras_linear_model_predictions(
features={'a': [[0]]},
feature_columns=[fc._numeric_column('a'),
fc._numeric_column('a')])
def test_dense_bias(self):
price = fc._numeric_column('price')
with ops.Graph().as_default():
features = {'price': [[1.], [5.]]}
predictions = get_keras_linear_model_predictions(features, [price])
bias = get_linear_model_bias()
price_var = get_linear_model_column_var(price)
with _initialized_session() as sess:
self.assertAllClose([0.], self.evaluate(bias))
sess.run(price_var.assign([[10.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[15.], [55.]], self.evaluate(predictions))
def test_sparse_bias(self):
wire_cast = fc._categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default():
wire_tensor = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3]
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {'wire_cast': wire_tensor}
predictions = get_keras_linear_model_predictions(features, [wire_cast])
bias = get_linear_model_bias()
wire_cast_var = get_linear_model_column_var(wire_cast)
with _initialized_session() as sess:
self.assertAllClose([0.], self.evaluate(bias))
self.assertAllClose([[0.], [0.], [0.], [0.]],
self.evaluate(wire_cast_var))
sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[1005.], [10015.]], self.evaluate(predictions))
def test_dense_and_sparse_bias(self):
wire_cast = fc._categorical_column_with_hash_bucket('wire_cast', 4)
price = fc._numeric_column('price')
with ops.Graph().as_default():
wire_tensor = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3]
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {'wire_cast': wire_tensor, 'price': [[1.], [5.]]}
predictions = get_keras_linear_model_predictions(features,
[wire_cast, price])
bias = get_linear_model_bias()
wire_cast_var = get_linear_model_column_var(wire_cast)
price_var = get_linear_model_column_var(price)
with _initialized_session() as sess:
sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(bias.assign([5.]))
sess.run(price_var.assign([[10.]]))
self.assertAllClose([[1015.], [10065.]], self.evaluate(predictions))
def test_dense_and_sparse_column(self):
"""When the column is both dense and sparse, uses sparse tensors."""
class _DenseAndSparseColumn(_DenseColumn, _CategoricalColumn):
@property
def name(self):
return 'dense_and_sparse_column'
@property
def _parse_example_spec(self):
return {self.name: parsing_ops.VarLenFeature(self.dtype)}
def _transform_feature(self, inputs):
return inputs.get(self.name)
@property
def _variable_shape(self):
raise ValueError('Should not use this method.')
def _get_dense_tensor(self,
inputs,
weight_collections=None,
trainable=None):
raise ValueError('Should not use this method.')
@property
def _num_buckets(self):
return 4
def _get_sparse_tensors(self,
inputs,
weight_collections=None,
trainable=None):
sp_tensor = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 0], [1, 1]],
values=[2, 0, 3],
dense_shape=[2, 2])
return _CategoricalColumn.IdWeightPair(sp_tensor, None)
dense_and_sparse_column = _DenseAndSparseColumn()
with ops.Graph().as_default():
sp_tensor = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {dense_and_sparse_column.name: sp_tensor}
predictions = get_keras_linear_model_predictions(
features, [dense_and_sparse_column])
bias = get_linear_model_bias()
dense_and_sparse_column_var = get_linear_model_column_var(
dense_and_sparse_column)
with _initialized_session() as sess:
sess.run(
dense_and_sparse_column_var.assign([[10.], [100.], [1000.],
[10000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[1005.], [10015.]], self.evaluate(predictions))
def test_dense_multi_output(self):
price = fc._numeric_column('price')
with ops.Graph().as_default():
features = {'price': [[1.], [5.]]}
predictions = get_keras_linear_model_predictions(
features, [price], units=3)
bias = get_linear_model_bias()
price_var = get_linear_model_column_var(price)
with _initialized_session() as sess:
self.assertAllClose(np.zeros((3,)), self.evaluate(bias))
self.assertAllClose(np.zeros((1, 3)), self.evaluate(price_var))
sess.run(price_var.assign([[10., 100., 1000.]]))
sess.run(bias.assign([5., 6., 7.]))
self.assertAllClose([[15., 106., 1007.], [55., 506., 5007.]],
self.evaluate(predictions))
def test_sparse_multi_output(self):
wire_cast = fc._categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default():
wire_tensor = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3]
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {'wire_cast': wire_tensor}
predictions = get_keras_linear_model_predictions(
features, [wire_cast], units=3)
bias = get_linear_model_bias()
wire_cast_var = get_linear_model_column_var(wire_cast)
with _initialized_session() as sess:
self.assertAllClose(np.zeros((3,)), self.evaluate(bias))
self.assertAllClose(np.zeros((4, 3)), self.evaluate(wire_cast_var))
sess.run(
wire_cast_var.assign([[10., 11., 12.], [100., 110., 120.],
[1000., 1100.,
1200.], [10000., 11000., 12000.]]))
sess.run(bias.assign([5., 6., 7.]))
self.assertAllClose([[1005., 1106., 1207.], [10015., 11017., 12019.]],
self.evaluate(predictions))
def test_dense_multi_dimension(self):
price = fc._numeric_column('price', shape=2)
with ops.Graph().as_default():
features = {'price': [[1., 2.], [5., 6.]]}
predictions = get_keras_linear_model_predictions(features, [price])
price_var = get_linear_model_column_var(price)
with _initialized_session() as sess:
self.assertAllClose([[0.], [0.]], self.evaluate(price_var))
sess.run(price_var.assign([[10.], [100.]]))
self.assertAllClose([[210.], [650.]], self.evaluate(predictions))
def test_sparse_multi_rank(self):
wire_cast = fc._categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default():
wire_tensor = array_ops.sparse_placeholder(dtypes.string)
wire_value = sparse_tensor.SparseTensorValue(
values=['omar', 'stringer', 'marlo', 'omar'], # hashed = [2, 0, 3, 2]
indices=[[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 0, 1]],
dense_shape=[2, 2, 2])
features = {'wire_cast': wire_tensor}
predictions = get_keras_linear_model_predictions(features, [wire_cast])
wire_cast_var = get_linear_model_column_var(wire_cast)
with _initialized_session() as sess:
self.assertAllClose(np.zeros((4, 1)), self.evaluate(wire_cast_var))
self.assertAllClose(
np.zeros((2, 1)),
predictions.eval(feed_dict={wire_tensor: wire_value}))
sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]]))
self.assertAllClose(
[[1010.], [11000.]],
predictions.eval(feed_dict={wire_tensor: wire_value}))
def test_sparse_combiner(self):
wire_cast = fc._categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default():
wire_tensor = sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3]
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
features = {'wire_cast': wire_tensor}
predictions = get_keras_linear_model_predictions(
features, [wire_cast], sparse_combiner='mean')
bias = get_linear_model_bias()
wire_cast_var = get_linear_model_column_var(wire_cast)
with _initialized_session() as sess:
sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[1005.], [5010.]], self.evaluate(predictions))
def test_dense_multi_dimension_multi_output(self):
price = fc._numeric_column('price', shape=2)
with ops.Graph().as_default():
features = {'price': [[1., 2.], [5., 6.]]}
predictions = get_keras_linear_model_predictions(
features, [price], units=3)
bias = get_linear_model_bias()
price_var = get_linear_model_column_var(price)
with _initialized_session() as sess:
self.assertAllClose(np.zeros((3,)), self.evaluate(bias))
self.assertAllClose(np.zeros((2, 3)), self.evaluate(price_var))
sess.run(price_var.assign([[1., 2., 3.], [10., 100., 1000.]]))
sess.run(bias.assign([2., 3., 4.]))
self.assertAllClose([[23., 205., 2007.], [67., 613., 6019.]],
self.evaluate(predictions))
def test_raises_if_shape_mismatch(self):
price = fc._numeric_column('price', shape=2)
with ops.Graph().as_default():
features = {'price': [[1.], [5.]]}
with self.assertRaisesRegex(
Exception,
r'Cannot reshape a tensor with 2 elements to shape \[2,2\]'):
get_keras_linear_model_predictions(features, [price])
def test_dense_reshaping(self):
price = fc._numeric_column('price', shape=[1, 2])
with ops.Graph().as_default():
features = {'price': [[[1., 2.]], [[5., 6.]]]}
predictions = get_keras_linear_model_predictions(features, [price])
bias = get_linear_model_bias()
price_var = get_linear_model_column_var(price)
with _initialized_session() as sess:
self.assertAllClose([0.], self.evaluate(bias))
self.assertAllClose([[0.], [0.]], self.evaluate(price_var))
self.assertAllClose([[0.], [0.]], self.evaluate(predictions))
sess.run(price_var.assign([[10.], [100.]]))
self.assertAllClose([[210.], [650.]], self.evaluate(predictions))
def test_dense_multi_column(self):
price1 = fc._numeric_column('price1', shape=2)
price2 = fc._numeric_column('price2')
with ops.Graph().as_default():
features = {'price1': [[1., 2.], [5., 6.]], 'price2': [[3.], [4.]]}
predictions = get_keras_linear_model_predictions(features,
[price1, price2])
bias = get_linear_model_bias()
price1_var = get_linear_model_column_var(price1)
price2_var = get_linear_model_column_var(price2)
with _initialized_session() as sess:
self.assertAllClose([0.], self.evaluate(bias))
self.assertAllClose([[0.], [0.]], self.evaluate(price1_var))
self.assertAllClose([[0.]], self.evaluate(price2_var))
self.assertAllClose([[0.], [0.]], self.evaluate(predictions))
sess.run(price1_var.assign([[10.], [100.]]))
sess.run(price2_var.assign([[1000.]]))
sess.run(bias.assign([7.]))
self.assertAllClose([[3217.], [4657.]], self.evaluate(predictions))
def test_fills_cols_to_vars(self):
price1 = fc._numeric_column('price1', shape=2)
price2 = fc._numeric_column('price2')
with ops.Graph().as_default():
features = {'price1': [[1., 2.], [5., 6.]], 'price2': [[3.], [4.]]}
cols_to_vars = {}
get_keras_linear_model_predictions(
features, [price1, price2], cols_to_vars=cols_to_vars)
bias = get_linear_model_bias()
price1_var = get_linear_model_column_var(price1)
price2_var = get_linear_model_column_var(price2)
self.assertEqual(cols_to_vars['bias'], [bias])
self.assertEqual(cols_to_vars[price1], [price1_var])
self.assertEqual(cols_to_vars[price2], [price2_var])
def test_fills_cols_to_vars_partitioned_variables(self):
price1 = fc._numeric_column('price1', shape=2)
price2 = fc._numeric_column('price2', shape=3)
with ops.Graph().as_default():
features = {
'price1': [[1., 2.], [6., 7.]],
'price2': [[3., 4., 5.], [8., 9., 10.]]
}
cols_to_vars = {}
with variable_scope.variable_scope(
'linear',
partitioner=partitioned_variables.fixed_size_partitioner(2, axis=0)):
get_keras_linear_model_predictions(
features, [price1, price2], cols_to_vars=cols_to_vars)
with _initialized_session():
self.assertEqual([0.], cols_to_vars['bias'][0].eval())
# Partitioning shards the [2, 1] price1 var into 2 [1, 1] Variables.
self.assertAllEqual([[0.]], cols_to_vars[price1][0])
self.assertAllEqual([[0.]], cols_to_vars[price1][1])
# Partitioning shards the [3, 1] price2 var into a [2, 1] Variable and
# a [1, 1] Variable.
self.assertAllEqual([[0.], [0.]], cols_to_vars[price2][0])
self.assertAllEqual([[0.]], cols_to_vars[price2][1])
def test_dense_collection(self):
price = fc._numeric_column('price')
with ops.Graph().as_default() as g:
features = {'price': [[1.], [5.]]}
get_keras_linear_model_predictions(
features, [price], weight_collections=['my-vars'])
my_vars = g.get_collection('my-vars')
bias = get_linear_model_bias()
price_var = get_linear_model_column_var(price)
self.assertIn(bias, my_vars)
self.assertIn(price_var, my_vars)
def test_sparse_collection(self):
wire_cast = fc._categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default() as g:
wire_tensor = sparse_tensor.SparseTensor(
values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
features = {'wire_cast': wire_tensor}
get_keras_linear_model_predictions(
features, [wire_cast], weight_collections=['my-vars'])
my_vars = g.get_collection('my-vars')
bias = get_linear_model_bias()
wire_cast_var = get_linear_model_column_var(wire_cast)
self.assertIn(bias, my_vars)
self.assertIn(wire_cast_var, my_vars)
def test_dense_trainable_default(self):
price = fc._numeric_column('price')
with ops.Graph().as_default() as g:
features = {'price': [[1.], [5.]]}
get_keras_linear_model_predictions(features, [price])
bias = get_linear_model_bias()
price_var = get_linear_model_column_var(price)
trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertIn(bias, trainable_vars)
self.assertIn(price_var, trainable_vars)
def test_sparse_trainable_default(self):
wire_cast = fc._categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default() as g:
wire_tensor = sparse_tensor.SparseTensor(
values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
features = {'wire_cast': wire_tensor}
get_keras_linear_model_predictions(features, [wire_cast])
trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
bias = get_linear_model_bias()
wire_cast_var = get_linear_model_column_var(wire_cast)
self.assertIn(bias, trainable_vars)
self.assertIn(wire_cast_var, trainable_vars)
def test_dense_trainable_false(self):
price = fc._numeric_column('price')
with ops.Graph().as_default() as g:
features = {'price': [[1.], [5.]]}
get_keras_linear_model_predictions(features, [price], trainable=False)
trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertEqual([], trainable_vars)
def test_sparse_trainable_false(self):
wire_cast = fc._categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default() as g:
wire_tensor = sparse_tensor.SparseTensor(
values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
features = {'wire_cast': wire_tensor}
get_keras_linear_model_predictions(features, [wire_cast], trainable=False)
trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
self.assertEqual([], trainable_vars)
def test_column_order(self):
price_a = fc._numeric_column('price_a')
price_b = fc._numeric_column('price_b')
wire_cast = fc._categorical_column_with_hash_bucket('wire_cast', 4)
with ops.Graph().as_default() as g:
features = {
'price_a': [[1.]],
'price_b': [[3.]],
'wire_cast':
sparse_tensor.SparseTensor(
values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
}
get_keras_linear_model_predictions(
features, [price_a, wire_cast, price_b],
weight_collections=['my-vars'])
my_vars = g.get_collection('my-vars')
self.assertIn('price_a', my_vars[0].name)
self.assertIn('price_b', my_vars[1].name)
self.assertIn('wire_cast', my_vars[2].name)
with ops.Graph().as_default() as g:
features = {
'price_a': [[1.]],
'price_b': [[3.]],
'wire_cast':
sparse_tensor.SparseTensor(
values=['omar'], indices=[[0, 0]], dense_shape=[1, 1])
}
get_keras_linear_model_predictions(
features, [wire_cast, price_b, price_a],
weight_collections=['my-vars'])
my_vars = g.get_collection('my-vars')
self.assertIn('price_a', my_vars[0].name)
self.assertIn('price_b', my_vars[1].name)
self.assertIn('wire_cast', my_vars[2].name)
def test_static_batch_size_mismatch(self):
price1 = fc._numeric_column('price1')
price2 = fc._numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': [[1.], [5.], [7.]], # batchsize = 3
'price2': [[3.], [4.]] # batchsize = 2
}
with self.assertRaisesRegex(
ValueError,
r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
get_keras_linear_model_predictions(features, [price1, price2])
def test_subset_of_static_batch_size_mismatch(self):
price1 = fc._numeric_column('price1')
price2 = fc._numeric_column('price2')
price3 = fc._numeric_column('price3')
with ops.Graph().as_default():
features = {
'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3
'price2': [[3.], [4.]], # batchsize = 2
'price3': [[3.], [4.], [5.]] # batchsize = 3
}
with self.assertRaisesRegex(
ValueError,
r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
get_keras_linear_model_predictions(features, [price1, price2, price3])
def test_runtime_batch_size_mismatch(self):
price1 = fc._numeric_column('price1')
price2 = fc._numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3
'price2': [[3.], [4.]] # batchsize = 2
}
predictions = get_keras_linear_model_predictions(features,
[price1, price2])
with _initialized_session() as sess:
with self.assertRaisesRegex(errors.OpError,
'must have the same size and shape'):
sess.run(
predictions, feed_dict={features['price1']: [[1.], [5.], [7.]]})
def test_runtime_batch_size_matches(self):
price1 = fc._numeric_column('price1')
price2 = fc._numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2
'price2': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2
}
predictions = get_keras_linear_model_predictions(features,
[price1, price2])
with _initialized_session() as sess:
sess.run(
predictions,
feed_dict={
features['price1']: [[1.], [5.]],
features['price2']: [[1.], [5.]],
})
@test_util.run_deprecated_v1
# FeatureColumnV1 uses variable scope etc. which is TF1.
def test_with_1d_sparse_tensor(self):
price = fc._numeric_column('price')
price_buckets = fc._bucketized_column(
price, boundaries=[
0.,
10.,
100.,
])
body_style = fc._categorical_column_with_vocabulary_list(
'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
# Provides 1-dim tensor and dense tensor.
features = {
'price':
constant_op.constant([
-1.,
12.,
]),
'body-style':
sparse_tensor.SparseTensor(
indices=((0,), (1,)),
values=('sedan', 'hardtop'),
dense_shape=(2,)),
}
self.assertEqual(1, features['price'].shape.ndims)
self.assertEqual(1, features['body-style'].dense_shape.get_shape()[0])
net = get_keras_linear_model_predictions(features,
[price_buckets, body_style])
with _initialized_session() as sess:
bias = get_linear_model_bias()
price_buckets_var = get_linear_model_column_var(price_buckets)
body_style_var = get_linear_model_column_var(body_style)
sess.run(price_buckets_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(body_style_var.assign([[-10.], [-100.], [-1000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[10 - 1000 + 5.], [1000 - 10 + 5.]],
self.evaluate(net))
@test_util.run_deprecated_v1
# Placeholders are TF1. Replacing with tf.function not feasible because of V1
# variable creation.
def test_with_1d_unknown_shape_sparse_tensor(self):
price = fc._numeric_column('price')
price_buckets = fc._bucketized_column(
price, boundaries=[
0.,
10.,
100.,
])
body_style = fc._categorical_column_with_vocabulary_list(
'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
country = fc._categorical_column_with_vocabulary_list(
'country', vocabulary_list=['US', 'JP', 'CA'])
# Provides 1-dim tensor and dense tensor.
features = {
'price': array_ops.placeholder(dtypes.float32),
'body-style': array_ops.sparse_placeholder(dtypes.string),
'country': array_ops.placeholder(dtypes.string),
}
self.assertIsNone(features['price'].shape.ndims)
self.assertIsNone(features['body-style'].get_shape().ndims)
price_data = np.array([-1., 12.])
body_style_data = sparse_tensor.SparseTensorValue(
indices=((0,), (1,)), values=('sedan', 'hardtop'), dense_shape=(2,))
country_data = np.array(['US', 'CA'])
net = get_keras_linear_model_predictions(
features, [price_buckets, body_style, country])
bias = get_linear_model_bias()
price_buckets_var = get_linear_model_column_var(price_buckets)
body_style_var = get_linear_model_column_var(body_style)
with _initialized_session() as sess:
sess.run(price_buckets_var.assign([[10.], [100.], [1000.], [10000.]]))
sess.run(body_style_var.assign([[-10.], [-100.], [-1000.]]))
sess.run(bias.assign([5.]))
self.assertAllClose([[10 - 1000 + 5.], [1000 - 10 + 5.]],
sess.run(
net,
feed_dict={
features['price']: price_data,
features['body-style']: body_style_data,
features['country']: country_data
}))
@test_util.run_deprecated_v1
# Placeholders are TF1. Replacing with tf.function not feasible because of V1
# variable creation.
def test_with_rank_0_feature(self):
price = fc._numeric_column('price')
features = {
'price': constant_op.constant(0),
}
self.assertEqual(0, features['price'].shape.ndims)
# Static rank 0 should fail
with self.assertRaisesRegex(ValueError, 'Feature .* cannot have rank 0'):
get_keras_linear_model_predictions(features, [price])
# Dynamic rank 0 should fail
features = {
'price': array_ops.placeholder(dtypes.float32),
}
net = get_keras_linear_model_predictions(features, [price])
self.assertEqual(1, net.shape[1])
with _initialized_session() as sess:
with self.assertRaisesOpError('Feature .* cannot have rank 0'):
sess.run(net, feed_dict={features['price']: np.array(1)})
| _LinearModelTest |
python | tensorflow__tensorflow | tensorflow/python/distribute/distribute_lib.py | {
"start": 28138,
"end": 31822
} | class ____(object):
"""Context manager setting the current `tf.distribute.Strategy`.
Also: overrides the variable creator and optionally the current device.
"""
def __init__(
self,
strategy,
var_creator_scope,
var_scope=None,
resource_creator_scope=None,
default_device_scope=None,
):
self._context = _CrossReplicaThreadMode( # pylint: disable=protected-access
strategy)
self._var_creator_scope = var_creator_scope
self._var_scope = var_scope
self._resource_creator_scope = resource_creator_scope
if default_device_scope:
self._device_scope = default_device_scope
else:
self._device_scope = None
self._same_scope_again_count = 0
def __enter__(self):
# Allow this scope to be entered if this strategy is already in scope.
if has_strategy():
_require_cross_replica_or_default_context_extended(
self._context.strategy.extended)
self._same_scope_again_count += 1
else:
_push_per_thread_mode(self._context)
if self._var_scope:
self._var_scope.__enter__()
self._var_creator_scope.__enter__()
if self._resource_creator_scope:
nest.map_structure(lambda scope: scope.__enter__(),
self._resource_creator_scope)
if self._device_scope:
self._device_scope.__enter__()
return self._context.strategy
def __exit__(self, exception_type, exception_value, traceback):
if hasattr(self._context.strategy.extended, "_lazy_variable_tracker"):
self._context.strategy.extended._lazy_variable_tracker.initialize_all()
if self._same_scope_again_count > 0:
self._same_scope_again_count -= 1
return
if self._device_scope:
try:
self._device_scope.__exit__(exception_type, exception_value, traceback)
except RuntimeError as e:
six.raise_from(
RuntimeError("Device scope nesting error: move call to "
"tf.distribute.set_strategy() out of `with` scope."),
e)
try:
self._var_creator_scope.__exit__(
exception_type, exception_value, traceback)
except RuntimeError as e:
six.raise_from(
RuntimeError("Variable creator scope nesting error: move call to "
"tf.distribute.set_strategy() out of `with` scope."),
e)
if self._resource_creator_scope:
try:
if isinstance(self._resource_creator_scope, list):
reversed_resource_creator_scope = self._resource_creator_scope[::-1]
nest.map_structure(
lambda scope: scope.__exit__(exception_type, exception_value, # pylint:disable=g-long-lambda
traceback),
reversed_resource_creator_scope)
else:
self._resource_creator_scope.__exit__(exception_type, exception_value,
traceback)
except RuntimeError as e:
six.raise_from(
RuntimeError("Resource creator scope nesting error: move call "
"to tf.distribute.set_strategy() out of `with` "
"scope."), e)
if self._var_scope:
try:
self._var_scope.__exit__(exception_type, exception_value, traceback)
except RuntimeError as e:
six.raise_from(
RuntimeError("Variable scope nesting error: move call to "
"tf.distribute.set_strategy() out of `with` scope."),
e)
_pop_per_thread_mode()
# TODO(yuefengz): add more replication modes.
@tf_export("distribute.InputReplicationMode")
| _CurrentDistributionContext |
python | streamlit__streamlit | lib/tests/streamlit/elements/vega_charts_test.py | {
"start": 82740,
"end": 89430
} | class ____(DeltaGeneratorTestCase):
"""Test width and height parameter functionality for modernized chart commands."""
CHART_COMMANDS: ClassVar[list[tuple[Callable, str]]] = [
(st.line_chart, "line_chart"),
(st.scatter_chart, "scatter_chart"),
(st.bar_chart, "bar_chart"),
(st.area_chart, "area_chart"),
]
@parameterized.expand(
[
# width, height, expected_width_spec, expected_width_value, expected_height_spec, expected_height_value
(
"stretch",
"content",
"use_stretch",
True,
"use_content",
True,
), # defaults
("content", "content", "use_content", True, "use_content", True),
("stretch", "stretch", "use_stretch", True, "use_stretch", True),
(500, "content", "pixel_width", 500, "use_content", True),
("stretch", 400, "use_stretch", True, "pixel_height", 400),
(600, 400, "pixel_width", 600, "pixel_height", 400),
]
)
def test_chart_width_height_combinations(
self,
width: str | int,
height: str | int,
expected_width_spec: str,
expected_width_value: bool | int,
expected_height_spec: str,
expected_height_value: bool | int,
):
"""Test chart commands with various width and height combinations."""
df = pd.DataFrame([[20, 30, 50]], columns=["a", "b", "c"])
for chart_command, chart_name in self.CHART_COMMANDS:
with self.subTest(chart=chart_name):
chart_command(df, x="a", y="b", width=width, height=height)
el = self.get_delta_from_queue().new_element
assert el.width_config.WhichOneof("width_spec") == expected_width_spec
assert (
getattr(el.width_config, expected_width_spec)
== expected_width_value
)
assert (
el.height_config.WhichOneof("height_spec") == expected_height_spec
)
assert (
getattr(el.height_config, expected_height_spec)
== expected_height_value
)
@parameterized.expand(
[
# use_container_width, width, expected_width_spec, expected_width_value
(
True,
None,
"use_stretch",
True,
), # use_container_width=True -> width="stretch"
(
False,
None,
"use_content",
True,
), # use_container_width=False -> width="content"
(
True,
500,
"use_stretch",
True,
), # use_container_width=True overrides integer width
(
True,
"content",
"use_stretch",
True,
), # use_container_width=True overrides string width
(
False,
"content",
"use_content",
True,
), # use_container_width=False, width="content"
(
False,
500,
"pixel_width",
500,
), # use_container_width=False, integer width -> respect integer
(
False,
300,
"pixel_width",
300,
), # use_container_width=False, different integer -> respect integer
]
)
@patch("streamlit.elements.vega_charts.show_deprecation_warning")
def test_chart_use_container_width_deprecation(
self,
use_container_width: bool,
width: int | str | None,
expected_width_spec: str,
expected_width_value: bool | int,
mock_warning: Mock,
):
"""Test that use_container_width shows deprecation warning and is correctly translated to
the new width parameter."""
df = pd.DataFrame([[20, 30, 50]], columns=["a", "b", "c"])
kwargs = {"use_container_width": use_container_width}
if width is not None:
kwargs["width"] = width
for chart_command, chart_name in self.CHART_COMMANDS:
with self.subTest(chart=chart_name):
chart_command(df, x="a", y="b", **kwargs)
mock_warning.assert_called()
el = self.get_delta_from_queue().new_element
assert el.width_config.WhichOneof("width_spec") == expected_width_spec
assert (
getattr(el.width_config, expected_width_spec)
== expected_width_value
)
# Verify the warning was called for each chart command
assert mock_warning.call_count == len(self.CHART_COMMANDS)
@parameterized.expand(
[
# param_name, invalid_value
("width", "invalid_width"),
("height", "invalid_height"),
("width", 0), # width must be positive
("height", 0), # height must be positive
("width", -100), # negative width
("height", -100), # negative height
]
)
def test_chart_validation_errors(self, param_name: str, invalid_value: str | int):
"""Test that invalid width/height values raise validation errors."""
df = pd.DataFrame([[20, 30, 50]], columns=["a", "b", "c"])
kwargs = {param_name: invalid_value}
for chart_command, chart_name in self.CHART_COMMANDS:
with self.subTest(chart=chart_name):
with pytest.raises(StreamlitAPIException):
chart_command(df, x="a", y="b", **kwargs)
def test_chart_default_width_height(self):
"""Test that default width is 'stretch' and default height is 'content'."""
df = pd.DataFrame([[20, 30, 50]], columns=["a", "b", "c"])
for chart_command, chart_name in self.CHART_COMMANDS:
with self.subTest(chart=chart_name):
chart_command(df, x="a", y="b") # No width/height specified
el = self.get_delta_from_queue().new_element
# Should default to stretch width and content height
assert el.width_config.WhichOneof("width_spec") == "use_stretch"
assert el.width_config.use_stretch is True
assert el.height_config.WhichOneof("height_spec") == "use_content"
assert el.height_config.use_content is True
| ChartWidthHeightTest |
python | sphinx-doc__sphinx | sphinx/ext/coverage.py | {
"start": 5399,
"end": 22563
} | class ____(Builder):
"""Evaluates coverage of code in the documentation."""
name = 'coverage'
epilog = __(
'Testing of coverage in the sources finished, look at the '
'results in %(outdir)s{sep}python.txt.'
).format(sep=os.path.sep)
def init(self) -> None:
self.c_sourcefiles: list[str] = []
for pattern in self.config.coverage_c_path:
pattern = self.srcdir / pattern
self.c_sourcefiles.extend(glob.glob(str(pattern))) # NoQA: PTH207
self.c_regexes: list[tuple[str, re.Pattern[str]]] = []
for name, exp in self.config.coverage_c_regexes.items():
try:
self.c_regexes.append((name, re.compile(exp)))
except Exception:
logger.warning(__('invalid regex %r in coverage_c_regexes'), exp)
self.c_ignorexps: dict[str, list[re.Pattern[str]]] = {
name: compile_regex_list('coverage_ignore_c_items', exps)
for name, exps in self.config.coverage_ignore_c_items.items()
}
self.mod_ignorexps = compile_regex_list(
'coverage_ignore_modules', self.config.coverage_ignore_modules
)
self.cls_ignorexps = compile_regex_list(
'coverage_ignore_classes', self.config.coverage_ignore_classes
)
self.fun_ignorexps = compile_regex_list(
'coverage_ignore_functions', self.config.coverage_ignore_functions
)
self.py_ignorexps = compile_regex_list(
'coverage_ignore_pyobjects', self.config.coverage_ignore_pyobjects
)
def get_outdated_docs(self) -> str:
return 'coverage overview'
def write_documents(self, _docnames: Set[str]) -> None:
self.py_undoc: dict[str, dict[str, Any]] = {}
self.py_undocumented: dict[str, Set[str]] = {}
self.py_documented: dict[str, Set[str]] = {}
self.build_py_coverage()
self.write_py_coverage()
self.c_undoc: dict[str, Set[tuple[str, str]]] = {}
self.build_c_coverage()
self.write_c_coverage()
def build_c_coverage(self) -> None:
c_objects = {}
for obj in self.env.domains.c_domain.get_objects():
c_objects[obj[2]] = obj[1]
for filename in self.c_sourcefiles:
undoc: set[tuple[str, str]] = set()
with open(filename, encoding='utf-8') as f:
for line in f:
for key, regex in self.c_regexes:
match = regex.match(line)
if match:
name = match.groups()[0]
if key not in c_objects:
undoc.add((key, name))
continue
if name not in c_objects[key]:
for exp in self.c_ignorexps.get(key, ()):
if exp.match(name):
break
else:
undoc.add((key, name))
continue
if undoc:
self.c_undoc[filename] = undoc
def write_c_coverage(self) -> None:
output_file = self.outdir / 'c.txt'
with open(output_file, 'w', encoding='utf-8') as op:
if self.config.coverage_write_headline:
write_header(op, 'Undocumented C API elements', '=')
op.write('\n')
for filename, undoc in self.c_undoc.items():
write_header(op, filename)
for typ, name in sorted(undoc):
op.write(f' * {name:<50} [{typ:>9}]\n')
if self.config.coverage_show_missing_items:
if self.config.verbosity < 0:
logger.warning(
__('undocumented c api: %s [%s] in file %s'),
name,
typ,
filename,
)
else:
logger.info(
red('undocumented ') # NoQA: G003
+ f'c api {f"{name} [{typ:>9}]":<30}'
+ red(' - in file ')
+ filename
)
op.write('\n')
def ignore_pyobj(self, full_name: str) -> bool:
return any(exp.search(full_name) for exp in self.py_ignorexps)
def build_py_coverage(self) -> None:
seen_objects = frozenset(self.env.domaindata['py']['objects'])
seen_modules = frozenset(self.env.domaindata['py']['modules'])
skip_undoc = self.config.coverage_skip_undoc_in_source
modules = _determine_py_coverage_modules(
self.config.coverage_modules,
seen_modules,
self.mod_ignorexps,
self.py_undoc,
)
for mod_name in modules:
ignore = False
for exp in self.mod_ignorexps:
if exp.match(mod_name):
ignore = True
break
if ignore or self.ignore_pyobj(mod_name):
continue
try:
mod = import_module(mod_name)
except ImportError as err:
logger.warning(__('module %s could not be imported: %s'), mod_name, err)
self.py_undoc[mod_name] = {'error': err}
continue
documented_objects: set[str] = set()
undocumented_objects: set[str] = set()
funcs = []
classes: dict[str, list[str]] = {}
for name, obj in inspect.getmembers(mod):
# diverse module attributes are ignored:
if name[0] == '_':
# begins in an underscore
continue
if not hasattr(obj, '__module__'):
# cannot be attributed to a module
continue
if obj.__module__ != mod_name:
# is not defined in this module
continue
full_name = f'{mod_name}.{name}'
if self.ignore_pyobj(full_name):
continue
if inspect.isfunction(obj):
if full_name not in seen_objects:
for exp in self.fun_ignorexps:
if exp.match(name):
break
else:
if skip_undoc and not obj.__doc__:
continue
funcs.append(name)
undocumented_objects.add(full_name)
else:
documented_objects.add(full_name)
elif inspect.isclass(obj):
for exp in self.cls_ignorexps:
if exp.match(name):
break
else:
if full_name not in seen_objects:
if skip_undoc and not obj.__doc__:
continue
# not documented at all
classes[name] = []
continue
attrs: list[str] = []
for attr_name in dir(obj):
if attr_name not in obj.__dict__:
continue
try:
attr = safe_getattr(obj, attr_name)
except AttributeError:
continue
if not (inspect.ismethod(attr) or inspect.isfunction(attr)):
continue
if attr_name[0] == '_':
# starts with an underscore, ignore it
continue
if skip_undoc and not attr.__doc__:
# skip methods without docstring if wished
continue
full_attr_name = f'{full_name}.{attr_name}'
if self.ignore_pyobj(full_attr_name):
continue
if full_attr_name not in seen_objects:
attrs.append(attr_name)
undocumented_objects.add(full_attr_name)
else:
documented_objects.add(full_attr_name)
if attrs:
# some attributes are undocumented
classes[name] = attrs
self.py_undoc[mod_name] = {'funcs': funcs, 'classes': classes}
self.py_undocumented[mod_name] = undocumented_objects
self.py_documented[mod_name] = documented_objects
def _write_py_statistics(self, op: TextIO) -> None:
"""Outputs the table of ``op``."""
all_modules = frozenset(self.py_documented.keys() | self.py_undocumented.keys())
all_objects: Set[str] = set()
all_documented_objects: Set[str] = set()
for module in all_modules:
all_objects |= self.py_documented[module] | self.py_undocumented[module]
all_documented_objects |= self.py_documented[module]
# prepare tabular
table = [['Module', 'Coverage', 'Undocumented']]
for module in sorted(all_modules):
module_objects = self.py_documented[module] | self.py_undocumented[module]
if len(module_objects):
value = 100.0 * len(self.py_documented[module]) / len(module_objects)
else:
value = 100.0
table.append([
module,
f'{value:.2f}%',
str(len(self.py_undocumented[module])),
])
if all_objects:
table.append([
'TOTAL',
f'{100 * len(all_documented_objects) / len(all_objects):.2f}%',
f'{len(all_objects) - len(all_documented_objects)}',
])
else:
table.append(['TOTAL', '100', '0'])
op.writelines(f'{line}\n' for line in _write_table(table))
def write_py_coverage(self) -> None:
output_file = self.outdir / 'python.txt'
failed = []
with open(output_file, 'w', encoding='utf-8') as op:
if self.config.coverage_write_headline:
write_header(op, 'Undocumented Python objects', '=')
if self.config.coverage_statistics_to_stdout:
self._write_py_statistics(sys.stdout)
if self.config.coverage_statistics_to_report:
write_header(op, 'Statistics')
self._write_py_statistics(op)
op.write('\n')
keys = sorted(self.py_undoc.keys())
for name in keys:
undoc = self.py_undoc[name]
if 'error' in undoc:
failed.append((name, undoc['error']))
else:
if not undoc['classes'] and not undoc['funcs']:
continue
write_header(op, name)
if undoc['funcs']:
op.write('Functions:\n')
op.writelines(f' * {x}\n' for x in undoc['funcs'])
if self.config.coverage_show_missing_items:
if self.config.verbosity < 0:
for func in undoc['funcs']:
logger.warning(
__('undocumented python function: %s :: %s'),
name,
func,
)
else:
for func in undoc['funcs']:
logger.info(
red('undocumented ') # NoQA: G003
+ f'py function {func:<30}'
+ red(' - in module ')
+ name
)
op.write('\n')
if undoc['classes']:
op.write('Classes:\n')
for class_name, methods in sorted(undoc['classes'].items()):
if not methods:
op.write(f' * {class_name}\n')
if self.config.coverage_show_missing_items:
if self.config.verbosity < 0:
logger.warning(
__('undocumented python class: %s :: %s'),
name,
class_name,
)
else:
logger.info(
red('undocumented ') # NoQA: G003
+ f'py class {class_name:<30}'
+ red(' - in module ')
+ name
)
else:
op.write(f' * {class_name} -- missing methods:\n\n')
op.writelines(f' - {x}\n' for x in methods)
if self.config.coverage_show_missing_items:
if self.config.verbosity < 0:
for meth in methods:
logger.warning(
__(
'undocumented python method:'
' %s :: %s :: %s'
),
name,
class_name,
meth,
)
else:
for meth in methods:
logger.info(
red('undocumented ') # NoQA: G003
+ f'py method {f"{class_name}.{meth}":<30}'
+ red(' - in module ')
+ name
)
op.write('\n')
if failed:
write_header(op, 'Modules that failed to import')
op.writelines(f' * {name} -- {err}\n' for name, err in failed)
def finish(self) -> None:
# dump the coverage data to a pickle file too
picklepath = self.outdir / 'undoc.pickle'
with open(picklepath, 'wb') as dumpfile:
pickle.dump(
(self.py_undoc, self.c_undoc, self.py_undocumented, self.py_documented),
dumpfile,
)
def setup(app: Sphinx) -> ExtensionMetadata:
app.add_builder(CoverageBuilder)
app.add_config_value('coverage_modules', (), '', types=frozenset({list, tuple}))
app.add_config_value(
'coverage_ignore_modules', [], '', types=frozenset({list, tuple})
)
app.add_config_value(
'coverage_ignore_functions', [], '', types=frozenset({list, tuple})
)
app.add_config_value(
'coverage_ignore_classes', [], '', types=frozenset({list, tuple})
)
app.add_config_value(
'coverage_ignore_pyobjects', [], '', types=frozenset({list, tuple})
)
app.add_config_value('coverage_c_path', [], '', types=frozenset({list, tuple}))
app.add_config_value('coverage_c_regexes', {}, '', types=frozenset({dict}))
app.add_config_value('coverage_ignore_c_items', {}, '', types=frozenset({dict}))
app.add_config_value('coverage_write_headline', True, '', types=frozenset({bool}))
app.add_config_value(
'coverage_statistics_to_report', True, '', types=frozenset({bool})
)
app.add_config_value(
'coverage_statistics_to_stdout', True, '', types=frozenset({bool})
)
app.add_config_value(
'coverage_skip_undoc_in_source', False, '', types=frozenset({bool})
)
app.add_config_value(
'coverage_show_missing_items', False, '', types=frozenset({bool})
)
return {
'version': sphinx.__display_version__,
'parallel_read_safe': True,
}
| CoverageBuilder |
python | getsentry__sentry | src/sentry/issues/grouptype.py | {
"start": 15571,
"end": 15961
} | class ____(GroupType):
type_id = 1012
slug = "performance_uncompressed_assets"
description = "Uncompressed Asset"
category = GroupCategory.PERFORMANCE.value
category_v2 = GroupCategory.FRONTEND.value
noise_config = NoiseConfig(ignore_limit=100)
default_priority = PriorityLevel.LOW
released = True
@dataclass(frozen=True)
| PerformanceUncompressedAssetsGroupType |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_display_units08.py | {
"start": 315,
"end": 1235
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_display_units08.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [56159232, 61364096]
data = [
[10000000, 20000000, 30000000, 20000000, 10000000],
]
worksheet.write_column(0, 0, data[0])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.set_y_axis(
{"display_units": "hundred_millions", "display_units_visible": 0}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | dask__dask | dask/dataframe/dask_expr/_groupby.py | {
"start": 16634,
"end": 16692
} | class ____(SingleAggregation):
groupby_chunk = M.min
| Min |
python | scipy__scipy | scipy/optimize/tests/test_minimize_constrained.py | {
"start": 6893,
"end": 7490
} | class ____(Rosenbrock):
"""Rosenbrock subject to inequality constraints.
The following optimization problem:
minimize sum(100.0*(x[1] - x[0]**2)**2.0 + (1 - x[0])**2)
subject to: x[0] + 2 x[1] <= 1
Taken from matlab ``fmincon`` documentation.
"""
def __init__(self, random_state=0):
Rosenbrock.__init__(self, 2, random_state)
self.x0 = [-1, -0.5]
self.x_opt = [0.5022, 0.2489]
self.bounds = None
@property
def constr(self):
A = [[1, 2]]
b = 1
return LinearConstraint(A, -np.inf, b)
| IneqRosenbrock |
python | langchain-ai__langchain | libs/langchain/langchain_classic/chains/router/llm_router.py | {
"start": 5461,
"end": 6806
} | class ____(BaseOutputParser[dict[str, str]]):
"""Parser for output of router chain in the multi-prompt chain."""
default_destination: str = "DEFAULT"
next_inputs_type: type = str
next_inputs_inner_key: str = "input"
@override
def parse(self, text: str) -> dict[str, Any]:
try:
expected_keys = ["destination", "next_inputs"]
parsed = parse_and_check_json_markdown(text, expected_keys)
if not isinstance(parsed["destination"], str):
msg = "Expected 'destination' to be a string."
raise TypeError(msg)
if not isinstance(parsed["next_inputs"], self.next_inputs_type):
msg = f"Expected 'next_inputs' to be {self.next_inputs_type}."
raise TypeError(msg)
parsed["next_inputs"] = {self.next_inputs_inner_key: parsed["next_inputs"]}
if (
parsed["destination"].strip().lower()
== self.default_destination.lower()
):
parsed["destination"] = None
else:
parsed["destination"] = parsed["destination"].strip()
except Exception as e:
msg = f"Parsing text\n{text}\n raised following error:\n{e}"
raise OutputParserException(msg) from e
return parsed
| RouterOutputParser |
python | django__django | tests/managers_regress/models.py | {
"start": 464,
"end": 580
} | class ____(models.Manager):
def get_queryset(self):
return super().get_queryset().filter(value=42)
| Value42 |
python | urllib3__urllib3 | test/test_collections.py | {
"start": 3032,
"end": 3479
} | class ____:
def __init__(self, **kwargs: str) -> None:
self._data = {}
self._data.update(kwargs)
def keys(self) -> typing.Iterator[str]:
return iter(self._data)
def __getitem__(self, key: str) -> str:
return self._data[key]
@pytest.fixture()
def d() -> HTTPHeaderDict:
header_dict = HTTPHeaderDict(Cookie="foo")
header_dict.add("cookie", "bar")
return header_dict
| NonMappingHeaderContainer |
python | google__pytype | pytype/overlays/typing_extensions_overlay.py | {
"start": 108,
"end": 801
} | class ____(typing_overlay.Redirect):
"""A custom overlay for the 'typing_extensions' module."""
def __init__(self, ctx):
aliases = {"runtime": "typing.runtime_checkable"}
super().__init__("typing_extensions", aliases, ctx)
def _convert_member(self, name, member, subst=None):
var = super()._convert_member(name, member, subst)
for val in var.data:
# typing_extensions backports typing features to older versions.
# Pretending that the backports are in typing is easier than remembering
# to check for both typing.X and typing_extensions.X every time we match
# on an abstract value.
val.module = "typing"
return var
| TypingExtensionsOverlay |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-genius/llama_index/readers/genius/base.py | {
"start": 157,
"end": 5406
} | class ____(BaseReader):
"""GeniusReader for various operations with lyricsgenius."""
def __init__(self, access_token: str):
"""Initialize the GeniusReader with an access token."""
try:
import lyricsgenius
except ImportError:
raise ImportError(
"Please install lyricsgenius via 'pip install lyricsgenius'"
)
self.genius = lyricsgenius.Genius(access_token)
def load_artist_songs(
self, artist_name: str, max_songs: Optional[int] = None
) -> List[Document]:
"""Load all or a specified number of songs by an artist."""
artist = self.genius.search_artist(artist_name, max_songs=max_songs)
return [Document(text=song.lyrics) for song in artist.songs] if artist else []
def load_all_artist_songs(self, artist_name: str) -> List[Document]:
artist = self.genius.search_artist(artist_name)
artist.save_lyrics()
return [Document(text=song.lyrics) for song in artist.songs]
def load_artist_songs_with_filters(
self,
artist_name: str,
most_popular: bool = True,
max_songs: Optional[int] = None,
max_pages: int = 50,
) -> Document:
"""
Load the most or least popular song of an artist.
Args:
artist_name (str): The artist's name.
most_popular (bool): True for most popular, False for least popular song.
max_songs (Optional[int]): Maximum number of songs to consider for popularity.
max_pages (int): Maximum number of pages to fetch.
Returns:
Document: A document containing lyrics of the most/least popular song.
"""
artist = self.genius.search_artist(artist_name, max_songs=1)
if not artist:
return None
songs_fetched = 0
page = 1
songs = []
while (
page
and page <= max_pages
and (max_songs is None or songs_fetched < max_songs)
):
request = self.genius.artist_songs(
artist.id, sort="popularity", per_page=50, page=page
)
songs.extend(request["songs"])
songs_fetched += len(request["songs"])
page = (
request["next_page"]
if (max_songs is None or songs_fetched < max_songs)
else None
)
target_song = songs[0] if most_popular else songs[-1]
song_details = self.genius.search_song(target_song["title"], artist.name)
return Document(text=song_details.lyrics) if song_details else None
def load_song_by_url_or_id(
self, song_url: Optional[str] = None, song_id: Optional[int] = None
) -> List[Document]:
"""Load song by URL or ID."""
if song_url:
song = self.genius.song(url=song_url)
elif song_id:
song = self.genius.song(song_id)
else:
return []
return [Document(text=song.lyrics)] if song else []
def search_songs_by_lyrics(self, lyrics: str) -> List[Document]:
"""
Search for songs by a snippet of lyrics.
Args:
lyrics (str): The lyric snippet you're looking for.
Returns:
List[Document]: A list of documents containing songs with those lyrics.
"""
search_results = self.genius.search_songs(lyrics)
songs = search_results["hits"] if search_results else []
results = []
for hit in songs:
song_url = hit["result"]["url"]
song_lyrics = self.genius.lyrics(song_url=song_url)
results.append(Document(text=song_lyrics))
return results
def load_songs_by_tag(
self, tag: str, max_songs: Optional[int] = None, max_pages: int = 50
) -> List[Document]:
"""
Load songs by a specific tag.
Args:
tag (str): The tag or genre to load songs for.
max_songs (Optional[int]): Maximum number of songs to fetch. If None, no specific limit.
max_pages (int): Maximum number of pages to fetch.
Returns:
List[Document]: A list of documents containing song lyrics.
"""
lyrics = []
total_songs_fetched = 0
page = 1
while (
page
and page <= max_pages
and (max_songs is None or total_songs_fetched < max_songs)
):
res = self.genius.tag(tag, page=page)
for hit in res["hits"]:
if max_songs is None or total_songs_fetched < max_songs:
song_lyrics = self.genius.lyrics(song_url=hit["url"])
lyrics.append(Document(text=song_lyrics))
total_songs_fetched += 1
else:
break
page = (
res["next_page"]
if max_songs is None or total_songs_fetched < max_songs
else None
)
return lyrics
if __name__ == "__main__":
access_token = ""
reader = GeniusReader(access_token)
# Example usage
print(reader.load_artist_songs("Chance the Rapper", max_songs=1))
| GeniusReader |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/core_api/datamodels/ui/calendar.py | {
"start": 957,
"end": 1276
} | class ____(BaseModel):
"""Represents a summary of DAG runs for a specific calendar time range."""
date: datetime
state: Literal[
DagRunState.QUEUED,
DagRunState.RUNNING,
DagRunState.SUCCESS,
DagRunState.FAILED,
"planned",
]
count: int
| CalendarTimeRangeResponse |
python | ray-project__ray | python/ray/train/lightning/_lightning_utils.py | {
"start": 8081,
"end": 10611
} | class ____(pl.callbacks.Callback):
"""A simple callback that reports checkpoints to Ray on train epoch end.
This callback is a subclass of `lightning.pytorch.callbacks.Callback
<https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.callbacks.Callback.html#lightning.pytorch.callbacks.Callback>`_.
It fetches the latest `trainer.callback_metrics` and reports together with
the checkpoint on each training epoch end.
Checkpoints will be saved in the following structure::
checkpoint_00000*/ Ray Train Checkpoint
└─ checkpoint.ckpt PyTorch Lightning Checkpoint
For customized reporting and checkpointing logic, implement your own
`lightning.pytorch.callbacks.Callback` following this user
guide: :ref:`Saving and Loading Checkpoints <train-dl-saving-checkpoints>`.
"""
CHECKPOINT_NAME = "checkpoint.ckpt"
def __init__(self) -> None:
super().__init__()
job_id = ray.get_runtime_context().get_job_id()
experiment_name = ray.train.get_context().get_experiment_name()
self.local_rank = ray.train.get_context().get_local_rank()
self.tmpdir_prefix = Path(
tempfile.gettempdir(),
f"lightning_checkpoints-job_id={job_id}-name={experiment_name}",
).as_posix()
if os.path.isdir(self.tmpdir_prefix) and self.local_rank == 0:
shutil.rmtree(self.tmpdir_prefix)
record_extra_usage_tag(TagKey.TRAIN_LIGHTNING_RAYTRAINREPORTCALLBACK, "1")
def on_train_epoch_end(self, trainer, pl_module) -> None:
# Creates a checkpoint dir with fixed name
tmpdir = Path(self.tmpdir_prefix, str(trainer.current_epoch)).as_posix()
os.makedirs(tmpdir, exist_ok=True)
# Fetch metrics
metrics = trainer.callback_metrics
metrics = {k: v.item() for k, v in metrics.items()}
# (Optional) Add customized metrics
metrics["epoch"] = trainer.current_epoch
metrics["step"] = trainer.global_step
# Save checkpoint to local
ckpt_path = Path(tmpdir, self.CHECKPOINT_NAME).as_posix()
trainer.save_checkpoint(ckpt_path, weights_only=False)
# Report to train session
checkpoint = Checkpoint.from_directory(tmpdir)
ray.train.report(metrics=metrics, checkpoint=checkpoint)
# Add a barrier to ensure all workers finished reporting here
trainer.strategy.barrier()
if self.local_rank == 0:
shutil.rmtree(tmpdir)
| RayTrainReportCallback |
python | redis__redis-py | redis/commands/search/suggestion.py | {
"start": 60,
"end": 466
} | class ____:
"""
Represents a single suggestion being sent or returned from the
autocomplete server
"""
def __init__(
self, string: str, score: float = 1.0, payload: Optional[str] = None
) -> None:
self.string = to_string(string)
self.payload = to_string(payload)
self.score = score
def __repr__(self) -> str:
return self.string
| Suggestion |
python | arrow-py__arrow | tests/test_locales.py | {
"start": 62999,
"end": 65419
} | class ____:
def test_format_timeframe(self):
# Now
assert self.locale._format_timeframe("now", 0) == "ახლა"
# Second(s)
assert self.locale._format_timeframe("second", -1) == "წამის"
assert self.locale._format_timeframe("second", 1) == "წამის"
assert self.locale._format_timeframe("seconds", -3) == "3 წამის"
assert self.locale._format_timeframe("seconds", 3) == "3 წამის"
# Minute(s)
assert self.locale._format_timeframe("minute", -1) == "წუთის"
assert self.locale._format_timeframe("minute", 1) == "წუთის"
assert self.locale._format_timeframe("minutes", -4) == "4 წუთის"
assert self.locale._format_timeframe("minutes", 4) == "4 წუთის"
# Hour(s)
assert self.locale._format_timeframe("hour", -1) == "საათის"
assert self.locale._format_timeframe("hour", 1) == "საათის"
assert self.locale._format_timeframe("hours", -23) == "23 საათის"
assert self.locale._format_timeframe("hours", 23) == "23 საათის"
# Day(s)
assert self.locale._format_timeframe("day", -1) == "დღის"
assert self.locale._format_timeframe("day", 1) == "დღის"
assert self.locale._format_timeframe("days", -12) == "12 დღის"
assert self.locale._format_timeframe("days", 12) == "12 დღის"
# Day(s)
assert self.locale._format_timeframe("week", -1) == "კვირის"
assert self.locale._format_timeframe("week", 1) == "კვირის"
assert self.locale._format_timeframe("weeks", -12) == "12 კვირის"
assert self.locale._format_timeframe("weeks", 12) == "12 კვირის"
# Month(s)
assert self.locale._format_timeframe("month", -1) == "თვის"
assert self.locale._format_timeframe("month", 1) == "თვის"
assert self.locale._format_timeframe("months", -2) == "2 თვის"
assert self.locale._format_timeframe("months", 2) == "2 თვის"
# Year(s)
assert self.locale._format_timeframe("year", -1) == "წლის"
assert self.locale._format_timeframe("year", 1) == "წლის"
assert self.locale._format_timeframe("years", -2) == "2 წლის"
assert self.locale._format_timeframe("years", 2) == "2 წლის"
def test_weekday(self):
dt = arrow.Arrow(2015, 4, 11, 17, 30, 00)
assert self.locale.day_name(dt.isoweekday()) == "შაბათი"
@pytest.mark.usefixtures("lang_locale")
| TestGeorgianLocale |
python | jackfrued__Python-100-Days | Day31-35/code/example12.py | {
"start": 901,
"end": 1349
} | class ____():
"""创建员工的工厂(工厂模式 - 通过工厂实现对象使用者和对象之间的解耦合)"""
@staticmethod
def create(emp_type, *args, **kwargs):
"""创建员工"""
emp_type = emp_type.upper()
emp = None
if emp_type == 'M':
emp = Manager(*args, **kwargs)
elif emp_type == 'P':
emp = Programmer(*args, **kwargs)
elif emp_type == 'S':
emp = Salesman(*args, **kwargs)
return emp
| EmployeeFactory |
python | Textualize__textual | src/textual/css/query.py | {
"start": 975,
"end": 1054
} | class ____(Exception):
"""Base class for a query related error."""
| QueryError |
python | readthedocs__readthedocs.org | readthedocs/core/views/__init__.py | {
"start": 1430,
"end": 2641
} | class ____(View):
"""
Conditionally redirect to website home page or to dashboard.
User hitting readthedocs.org / readthedocs.com is redirected to this view (at /welcome).
This view will redirect the user based on auth/unauthed:
1. when user is logged in, redirect to dashboard
2. when user is logged off, redirect to https://about.readthedocs.com/
User hitting app.readthedocs.org / app.readthedocs.com
1. when user is logged in, redirect to dashboard
2. when user is logged off, redirect to login page
"""
def get(self, request, *args, **kwargs):
# Redirect to user dashboard for logged in users
if request.user.is_authenticated:
return redirect(reverse("projects_dashboard"))
# Redirect to ``about.`` in production
query_string = f"?ref={settings.PRODUCTION_DOMAIN}"
if request.META["QUERY_STRING"]:
# Small hack to not append `&` to URLs without a query_string
query_string += "&" + request.META["QUERY_STRING"]
# Do a 302 here so that it varies on logged in status
return redirect(f"https://about.readthedocs.com/{query_string}", permanent=False)
| WelcomeView |
python | huggingface__transformers | src/transformers/models/sam3/modeling_sam3.py | {
"start": 44285,
"end": 51780
} | class ____(nn.Module):
"""
Encoder for geometric prompts (boxes).
Boxes are encoded using three approaches:
- Direct projection: linear projection from coordinate space to hidden_size
- Pooling: pool features from the backbone at the specified location (ROI align for boxes)
- Position encoding: use position encoding of the box center
These encodings are combined additively and further processed with transformer layers.
"""
def __init__(self, config: Sam3GeometryEncoderConfig):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.roi_size = config.roi_size
self.position_encoding = Sam3SinePositionEmbedding(num_pos_feats=config.hidden_size // 2, normalize=True)
self.label_embed = nn.Embedding(2, self.hidden_size)
self.cls_embed = nn.Embedding(1, self.hidden_size)
# Box encoding layers
self.boxes_direct_project = nn.Linear(4, self.hidden_size)
self.boxes_pool_project = nn.Conv2d(self.hidden_size, self.hidden_size, self.roi_size)
self.boxes_pos_enc_project = nn.Linear(self.hidden_size + 2, self.hidden_size)
# Image feature normalization
self.vision_layer_norm = nn.LayerNorm(self.hidden_size)
# Prompt projection and normalization
self.final_proj = nn.Linear(self.hidden_size, self.hidden_size)
self.prompt_layer_norm = nn.LayerNorm(self.hidden_size)
# Transformer layers
self.layers = nn.ModuleList([Sam3GeometryEncoderLayer(config) for _ in range(config.num_layers)])
self.output_layer_norm = nn.LayerNorm(self.hidden_size)
def _encode_box_coordinates(
self, center_x: torch.Tensor, center_y: torch.Tensor, width: torch.Tensor, height: torch.Tensor
) -> torch.Tensor:
"""
Encode box coordinates by combining position-encoded centers with raw width/height.
Args:
center_x: 1D tensor of box center x coordinates
center_y: 1D tensor of box center y coordinates
width: 1D tensor of box widths
height: 1D tensor of box heights
Returns:
Encoded box coordinates [N, embedding_dim]
"""
pos_x, pos_y = self.position_encoding.encode_1d_positions(center_x, center_y)
pos = torch.cat((pos_y, pos_x, height[:, None], width[:, None]), dim=1)
return pos
def _encode_boxes(self, boxes, boxes_mask, boxes_labels, vision_features):
"""Encode box prompts. Mask convention: True=valid, False=padding."""
batch_size, num_boxes = boxes.shape[:2]
height, width = vision_features.shape[-2:]
boxes_embed = self.boxes_direct_project(boxes)
# Pool features using ROI align
# Convert boxes from CxCyWH to xyxy format and denormalize
boxes_xyxy = box_cxcywh_to_xyxy(boxes)
scale = torch.tensor([width, height, width, height], dtype=boxes_xyxy.dtype, device=boxes_xyxy.device)
scale = scale.view(1, 1, 4)
boxes_xyxy = boxes_xyxy * scale
# ROI align expects list of boxes per batch element,
# convert from bfloat16 to float16 as roi_align only supports float16 and float32
dtype = torch.float16 if vision_features.dtype == torch.bfloat16 else vision_features.dtype
sampled_features = torchvision.ops.roi_align(
vision_features.to(dtype), boxes_xyxy.to(dtype).unbind(0), self.roi_size
).to(vision_features.dtype)
pooled_projection = self.boxes_pool_project(sampled_features)
pooled_projection = pooled_projection.view(batch_size, num_boxes, self.hidden_size)
boxes_embed = boxes_embed + pooled_projection
# Add position encoding
center_x, center_y, box_width, box_height = boxes.unbind(-1)
pos_enc = self._encode_box_coordinates(
center_x.flatten(), center_y.flatten(), box_width.flatten(), box_height.flatten()
)
pos_enc = pos_enc.view(batch_size, num_boxes, pos_enc.shape[-1])
pos_projection = self.boxes_pos_enc_project(pos_enc)
boxes_embed = boxes_embed + pos_projection
# Add label embeddings (positive/negative)
label_embed = self.label_embed(boxes_labels.long())
return label_embed + boxes_embed, boxes_mask
def forward(
self,
box_embeddings: torch.Tensor,
box_mask: torch.Tensor,
box_labels: torch.Tensor,
img_feats: tuple[torch.Tensor, ...],
img_pos_embeds: Optional[tuple[torch.Tensor, ...]] = None,
):
"""
Forward pass for encoding geometric prompts.
Args:
box_embeddings: Box coordinates in CxCyWH format [batch_size, num_boxes, 4]
box_mask: Attention mask for boxes [batch_size, num_boxes]
box_labels: Labels for boxes (positive/negative) [batch_size, num_boxes]
img_feats: Image features from vision encoder
img_pos_embeds: Optional position embeddings for image features
Returns:
Sam3GeometryEncoderOutput containing encoded geometry features and attention mask.
"""
batch_size = box_embeddings.shape[0]
# Prepare vision features for cross-attention: flatten spatial dimensions
vision_feats = img_feats[-1] # [B, C, H, W]
vision_pos_embeds = img_pos_embeds[-1] if img_pos_embeds is not None else torch.zeros_like(vision_feats)
vision_feats_flat = vision_feats.flatten(2).transpose(1, 2) # [B, H*W, C]
vision_pos_embeds_flat = vision_pos_embeds.flatten(2).transpose(1, 2) # [B, H*W, C]
# Normalize image features for pooling operations
img_feats_last = img_feats[-1] # [B, C, H, W]
img_feats_last = img_feats_last.permute(0, 2, 3, 1) # [B, H, W, C]
normalized_img_feats = self.vision_layer_norm(img_feats_last)
normalized_img_feats = normalized_img_feats.permute(0, 3, 1, 2) # [B, C, H, W]
prompt_embeds, prompt_mask = self._encode_boxes(box_embeddings, box_mask, box_labels, normalized_img_feats)
# Add CLS token (always valid)
cls_embed = self.cls_embed.weight.view(1, self.hidden_size).unsqueeze(0).expand(batch_size, -1, -1)
cls_mask = torch.ones(batch_size, 1, dtype=prompt_mask.dtype, device=prompt_mask.device)
prompt_embeds, prompt_mask = concat_padded_sequences(prompt_embeds, prompt_mask, cls_embed, cls_mask)
prompt_embeds = self.prompt_layer_norm(self.final_proj(prompt_embeds))
# Create bidirectional attention mask for transformer layers
prompt_attention_mask = None
if prompt_mask is not None:
prompt_attention_mask = create_bidirectional_mask(
config=self.config,
input_embeds=prompt_embeds,
attention_mask=prompt_mask,
)
# Apply transformer layers with cross-attention to vision features
for layer in self.layers:
prompt_embeds = layer(
prompt_feats=prompt_embeds,
vision_feats=vision_feats_flat,
vision_pos_encoding=vision_pos_embeds_flat,
prompt_mask=prompt_attention_mask,
)
# Final output normalization
prompt_embeds = self.output_layer_norm(prompt_embeds)
return Sam3GeometryEncoderOutput(
last_hidden_state=prompt_embeds,
attention_mask=prompt_mask,
)
| Sam3GeometryEncoder |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/matchValue1.py | {
"start": 668,
"end": 713
} | class ____(Enum):
V1 = 0
V2 = 1
| MyEnum1 |
python | dask__distributed | distributed/diagnostics/plugin.py | {
"start": 8103,
"end": 11319
} | class ____:
"""Interface to extend the Worker
A worker plugin enables custom code to run at different stages of the Workers'
lifecycle.
A plugin enables custom code to run at each of step of a Workers's life. Whenever such
an event happens, the corresponding method on this class will be called. Note that the
user code always runs within the Worker's main thread.
To implement a plugin:
1. inherit from this class
2. override some of its methods
3. register the plugin using :meth:`Client.register_plugin<distributed.Client.register_plugin>`.
The ``idempotent`` attribute is used to control whether or not the plugin should
be ignored upon registration if a worker plugin with the same name already exists.
If ``True``, the plugin is ignored, otherwise the existing plugin is replaced.
Defaults to ``False``.
Examples
--------
>>> class ErrorLogger(WorkerPlugin):
... def __init__(self, logger):
... self.logger = logger
...
... def setup(self, worker):
... self.worker = worker
...
... def transition(self, key, start, finish, *args, **kwargs):
... if finish == 'error':
... ts = self.worker.tasks[key]
... exc_info = (type(ts.exception), ts.exception, ts.traceback)
... self.logger.error(
... "Error during computation of '%s'.", key,
... exc_info=exc_info
... )
>>> import logging
>>> plugin = ErrorLogger(logging)
>>> client.register_plugin(plugin) # doctest: +SKIP
"""
idempotent: bool = False
def setup(self, worker: Worker) -> None | Awaitable[None]:
"""
Run when the plugin is attached to a worker. This happens when the plugin is registered
and attached to existing workers, or when a worker is created after the plugin has been
registered.
"""
def teardown(self, worker: Worker) -> None | Awaitable[None]:
"""Run when the worker to which the plugin is attached is closed, or
when the plugin is removed."""
def transition(
self,
key: Key,
start: WorkerTaskStateState,
finish: WorkerTaskStateState,
**kwargs: Any,
) -> None:
"""
Throughout the lifecycle of a task (see :doc:`Worker State
<worker-state>`), Workers are instructed by the scheduler to compute
certain tasks, resulting in transitions in the state of each task. The
Worker owning the task is then notified of this state transition.
Whenever a task changes its state, this method will be called.
.. warning::
This is an advanced feature and the transition mechanism and details
of task states are subject to change without deprecation cycle.
Parameters
----------
key :
start :
Start state of the transition.
One of waiting, ready, executing, long-running, memory, error.
finish :
Final state of the transition.
kwargs :
More options passed when transitioning
"""
| WorkerPlugin |
python | pymupdf__PyMuPDF | src/__init__.py | {
"start": 519821,
"end": 525361
} | class ____:
def __abs__(self):
return math.sqrt(self.x * self.x + self.y * self.y)
def __add__(self, p):
if hasattr(p, "__float__"):
return Point(self.x + p, self.y + p)
if len(p) != 2:
raise ValueError("Point: bad seq len")
return Point(self.x + p[0], self.y + p[1])
def __bool__(self):
return not (max(self) == min(self) == 0)
def __eq__(self, p):
if not hasattr(p, "__len__"):
return False
return len(p) == 2 and not (self - p)
def __getitem__(self, i):
return (self.x, self.y)[i]
def __hash__(self):
return hash(tuple(self))
def __init__(self, *args, x=None, y=None):
'''
Point() - all zeros
Point(x, y)
Point(Point) - new copy
Point(sequence) - from 'sequence'
Explicit keyword args x, y override earlier settings if not None.
'''
if not args:
self.x = 0.0
self.y = 0.0
elif len(args) > 2:
raise ValueError("Point: bad seq len")
elif len(args) == 2:
self.x = float(args[0])
self.y = float(args[1])
elif len(args) == 1:
l = args[0]
if isinstance(l, (mupdf.FzPoint, mupdf.fz_point)):
self.x = l.x
self.y = l.y
else:
if not hasattr(l, "__getitem__"):
raise ValueError("Point: bad args")
if len(l) != 2:
raise ValueError("Point: bad seq len")
self.x = float(l[0])
self.y = float(l[1])
else:
raise ValueError("Point: bad seq len")
if x is not None: self.x = x
if y is not None: self.y = y
def __len__(self):
return 2
def __mul__(self, m):
if hasattr(m, "__float__"):
return Point(self.x * m, self.y * m)
if hasattr(m, "__getitem__") and len(m) == 2:
# dot product
return self.x * m[0] + self.y * m[1]
p = Point(self)
return p.transform(m)
def __neg__(self):
return Point(-self.x, -self.y)
def __nonzero__(self):
return not (max(self) == min(self) == 0)
def __pos__(self):
return Point(self)
def __repr__(self):
return "Point" + str(tuple(self))
def __setitem__(self, i, v):
v = float(v)
if i == 0: self.x = v
elif i == 1: self.y = v
else:
raise IndexError("index out of range")
return None
def __sub__(self, p):
if hasattr(p, "__float__"):
return Point(self.x - p, self.y - p)
if len(p) != 2:
raise ValueError("Point: bad seq len")
return Point(self.x - p[0], self.y - p[1])
def __truediv__(self, m):
if hasattr(m, "__float__"):
return Point(self.x * 1./m, self.y * 1./m)
m1 = util_invert_matrix(m)[1]
if not m1:
raise ZeroDivisionError("matrix not invertible")
p = Point(self)
return p.transform(m1)
@property
def abs_unit(self):
"""Unit vector with positive coordinates."""
s = self.x * self.x + self.y * self.y
if s < EPSILON:
return Point(0,0)
s = math.sqrt(s)
return Point(abs(self.x) / s, abs(self.y) / s)
def distance_to(self, *args):
"""Return distance to rectangle or another point."""
if not len(args) > 0:
raise ValueError("at least one parameter must be given")
x = args[0]
if len(x) == 2:
x = Point(x)
elif len(x) == 4:
x = Rect(x)
else:
raise ValueError("arg1 must be point-like or rect-like")
if len(args) > 1:
unit = args[1]
else:
unit = "px"
u = {"px": (1.,1.), "in": (1.,72.), "cm": (2.54, 72.),
"mm": (25.4, 72.)}
f = u[unit][0] / u[unit][1]
if type(x) is Point:
return abs(self - x) * f
# from here on, x is a rectangle
# as a safeguard, make a finite copy of it
r = Rect(x.top_left, x.top_left)
r = r | x.bottom_right
if self in r:
return 0.0
if self.x > r.x1:
if self.y >= r.y1:
return self.distance_to(r.bottom_right, unit)
elif self.y <= r.y0:
return self.distance_to(r.top_right, unit)
else:
return (self.x - r.x1) * f
elif r.x0 <= self.x <= r.x1:
if self.y >= r.y1:
return (self.y - r.y1) * f
else:
return (r.y0 - self.y) * f
else:
if self.y >= r.y1:
return self.distance_to(r.bottom_left, unit)
elif self.y <= r.y0:
return self.distance_to(r.top_left, unit)
else:
return (r.x0 - self.x) * f
def transform(self, m):
"""Replace point by its transformation with matrix-like m."""
if len(m) != 6:
raise ValueError("Matrix: bad seq len")
self.x, self.y = util_transform_point(self, m)
return self
@property
def unit(self):
"""Unit vector of the point."""
s = self.x * self.x + self.y * self.y
if s < EPSILON:
return Point(0,0)
s = math.sqrt(s)
return Point(self.x / s, self.y / s)
__div__ = __truediv__
norm = __abs__
| Point |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-hardcoded-records/source_hardcoded_records/streams.py | {
"start": 3601,
"end": 3905
} | class ____(HardcodedStream):
sample_record = {
"field1": "valuevaluevaluevaluevalue1",
"field2": "valuevaluevaluevaluevalue1",
"field3": "valuevaluevaluevaluevalue1",
"field4": "valuevaluevaluevaluevalue1",
"field5": "valuevaluevaluevaluevalue1",
}
| DummyFields |
python | huggingface__transformers | src/transformers/models/unispeech_sat/modeling_unispeech_sat.py | {
"start": 55137,
"end": 60086
} | class ____(UniSpeechSatPreTrainedModel):
def __init__(self, config):
super().__init__(config)
if hasattr(config, "add_adapter") and config.add_adapter:
raise ValueError(
"Sequence classification does not support the use of UniSpeechSat adapters (config.add_adapter=True)"
)
self.unispeech_sat = UniSpeechSatModel(config)
num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings
if config.use_weighted_layer_sum:
self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)
self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size)
self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
def freeze_feature_encoder(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
self.unispeech_sat.feature_extractor._freeze_parameters()
def freeze_base_model(self):
"""
Calling this function will disable the gradient computation for the base model so that its parameters will not
be updated during training. Only the classification head will be updated.
"""
for param in self.unispeech_sat.parameters():
param.requires_grad = False
@auto_docstring
def forward(
self,
input_values: Optional[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[torch.Tensor] = None,
) -> Union[tuple, SequenceClassifierOutput]:
r"""
input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file
into an array of type `list[float]`, a `numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library
(`pip install torchcodec`) or the soundfile library (`pip install soundfile`).
To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and conversion
into a tensor of type `torch.FloatTensor`. See [`UniSpeechSatProcessor.__call__`] for details.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states
outputs = self.unispeech_sat(
input_values,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if self.config.use_weighted_layer_sum:
hidden_states = outputs[_HIDDEN_STATES_START_POSITION]
hidden_states = torch.stack(hidden_states, dim=1)
norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)
hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)
else:
hidden_states = outputs[0]
hidden_states = self.projector(hidden_states)
if attention_mask is None:
pooled_output = hidden_states.mean(dim=1)
else:
padding_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask)
expand_padding_mask = padding_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2])
hidden_states[~expand_padding_mask] = 0.0
pooled_output = hidden_states.sum(dim=1) / padding_mask.sum(dim=1).view(-1, 1)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@auto_docstring
| UniSpeechSatForSequenceClassification |
python | pola-rs__polars | py-polars/src/polars/datatypes/classes.py | {
"start": 13249,
"end": 14432
} | class ____(TemporalType):
"""
Data type representing the time of day.
Notes
-----
The underlying representation of this type is a 64-bit signed integer.
The integer indicates the number of nanoseconds since midnight.
"""
@classmethod
def max(cls) -> pl.Expr:
"""
Return a literal expression representing the maximum value of this data type.
Examples
--------
>>> pl.select(pl.Time.max() == 86_399_999_999_999)
shape: (1, 1)
┌─────────┐
│ literal │
│ --- │
│ bool │
╞═════════╡
│ true │
└─────────┘
"""
return pl.Expr._from_pyexpr(plr._get_dtype_max(cls))
@classmethod
def min(cls) -> pl.Expr:
"""
Return a literal expression representing the minimum value of this data type.
Examples
--------
>>> pl.select(pl.Time.min() == 0)
shape: (1, 1)
┌─────────┐
│ literal │
│ --- │
│ bool │
╞═════════╡
│ true │
└─────────┘
"""
return pl.Expr._from_pyexpr(plr._get_dtype_min(cls))
| Time |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pytest_style/PT023.py | {
"start": 874,
"end": 990
} | class ____:
@pytest.mark.foo()
class TestNestedClass:
def test_something():
pass
| TestClass |
python | spyder-ide__spyder | spyder/api/utils.py | {
"start": 1399,
"end": 1668
} | class ____(PrefixNode):
"""Utility class to store and iterate over prefixed string tuples."""
def __iter__(self):
for key in self.children:
child = self.children[key]
for prefix in child:
yield prefix
| PrefixedTuple |
python | streamlit__streamlit | lib/tests/streamlit/runtime/scriptrunner_utils/script_run_context_test.py | {
"start": 2386,
"end": 9887
} | class ____(unittest.TestCase):
def setUp(self):
try:
# clear context variable as it otherwise would be carried over between tests
delattr(threading.current_thread(), SCRIPT_RUN_CONTEXT_ATTR_NAME)
except AttributeError:
pass
def test_allow_set_page_config_once(self):
"""st.set_page_config can be called once"""
def fake_enqueue(msg):
return None
ctx = _create_script_run_context(fake_enqueue)
msg = ForwardMsg()
msg.page_config_changed.title = "foo"
ctx.enqueue(msg)
def test_allow_set_page_config_twice(self):
"""st.set_page_config can be called twice"""
def fake_enqueue(msg):
return None
ctx = _create_script_run_context(fake_enqueue)
msg = ForwardMsg()
msg.page_config_changed.title = "foo"
ctx.enqueue(msg)
same_msg = ForwardMsg()
same_msg.page_config_changed.title = "bar"
ctx.enqueue(same_msg)
def test_active_script_hash(self):
"""ensures active script hash is set correctly when enqueueing messages"""
fake_path = "my/custom/script/path"
pg_mgr = PagesManager(fake_path)
def fake_enqueue(msg):
return None
ctx = _create_script_run_context(fake_enqueue, pages_manager=pg_mgr)
ctx.reset(page_script_hash="main_script_hash")
ctx.on_script_start()
msg = ForwardMsg()
msg.delta.new_element.markdown.body = "foo"
ctx.enqueue(msg)
assert msg.metadata.active_script_hash == ctx.active_script_hash
ctx.set_mpa_v2_page("new_hash")
with ctx.run_with_active_hash("new_hash"):
new_msg = ForwardMsg()
new_msg.delta.new_element.markdown.body = "bar"
ctx.enqueue(new_msg)
assert new_msg.metadata.active_script_hash == "new_hash"
@parameterized.expand(
[
(True, True, True), # Both APIs used
(True, False, False), # Only experimental API used
(False, True, False), # Only final API used
(False, False, False), # Neither API used
]
)
def test_both_query_params_used(
self, experimental_used, production_used, should_raise
):
def fake_enqueue(msg):
return None
ctx = _create_script_run_context(fake_enqueue)
ctx._experimental_query_params_used = experimental_used
ctx._production_query_params_used = production_used
if should_raise:
with pytest.raises(StreamlitAPIException):
ctx.ensure_single_query_api_used()
else:
ctx.ensure_single_query_api_used()
def test_mark_experimental_query_params_used_sets_true(self):
def fake_enqueue(msg):
return None
ctx = _create_script_run_context(fake_enqueue)
ctx.mark_experimental_query_params_used()
assert ctx._experimental_query_params_used is True
def test_mark_production_query_params_used_sets_true(self):
def fake_enqueue(msg):
return None
ctx = _create_script_run_context(fake_enqueue)
ctx.mark_production_query_params_used()
assert ctx._production_query_params_used is True
def test_enqueue_message_raise_if_ctx_is_none(self):
msg = ForwardMsg()
msg.delta.new_element.markdown.body = "foo"
with pytest.raises(NoSessionContext):
enqueue_message(msg)
def test_enqueue_message(self):
fake_enqueue_result: dict[str, ForwardMsg] = {}
def fake_enqueue(msg: ForwardMsg):
fake_enqueue_result["msg"] = msg
ctx = _create_script_run_context(fake_enqueue)
add_script_run_ctx(ctx=ctx)
msg = ForwardMsg()
msg.delta.new_element.markdown.body = "foo"
enqueue_message(msg)
assert fake_enqueue_result is not None
assert (
fake_enqueue_result["msg"].delta.new_element.markdown.body
== msg.delta.new_element.markdown.body
)
def test_enqueue_message_sets_cacheable_flag(self):
"""Test that the metadata.cacheable flag is set correctly on outgoing ForwardMsgs."""
fake_enqueue_result: dict[str, ForwardMsg] = {}
def fake_enqueue(msg: ForwardMsg):
fake_enqueue_result["msg"] = msg
ctx = _create_script_run_context(fake_enqueue)
add_script_run_ctx(ctx=ctx)
with patch_config_options({"global.minCachedMessageSize": 0}):
cacheable_msg = create_dataframe_msg([1, 2, 3])
enqueue_message(cacheable_msg)
assert fake_enqueue_result is not None
assert fake_enqueue_result["msg"].metadata.cacheable
with patch_config_options({"global.minCachedMessageSize": 1000}):
cacheable_msg = create_dataframe_msg([4, 5, 6])
enqueue_message(cacheable_msg)
assert fake_enqueue_result is not None
assert not fake_enqueue_result["msg"].metadata.cacheable
def test_enqueue_reference_message_if_cached(self):
"""Test that a reference message is enqueued if the original message is cached."""
fake_enqueue_result: dict[str, ForwardMsg] = {}
def fake_enqueue(msg: ForwardMsg):
fake_enqueue_result["msg"] = msg
with patch_config_options({"global.minCachedMessageSize": 0}):
cacheable_msg = create_dataframe_msg([1, 2, 3])
populate_hash_if_needed(cacheable_msg)
assert bool(cacheable_msg.hash)
ctx = _create_script_run_context(
fake_enqueue, cached_message_hashes={cacheable_msg.hash}
)
add_script_run_ctx(ctx=ctx)
enqueue_message(cacheable_msg)
assert fake_enqueue_result is not None
assert fake_enqueue_result["msg"].WhichOneof("type") == "ref_hash"
def test_enqueue_message_with_fragment_id(self):
fake_enqueue_result = {}
def fake_enqueue(msg: ForwardMsg):
fake_enqueue_result["msg"] = msg
ctx = _create_script_run_context(
fake_enqueue, current_fragment_id="my_fragment_id"
)
add_script_run_ctx(ctx=ctx)
msg = ForwardMsg()
msg.delta.new_element.markdown.body = "foo"
enqueue_message(msg)
assert fake_enqueue_result is not None
assert (
fake_enqueue_result["msg"].delta.new_element.markdown.body
== msg.delta.new_element.markdown.body
)
assert fake_enqueue_result["msg"].delta.fragment_id == "my_fragment_id"
def test_run_with_active_hash(self):
"""Ensure the active script is set correctly"""
pages_manager = PagesManager("")
ctx = _create_script_run_context(
lambda _msg: None,
current_fragment_id="my_fragment_id",
pages_manager=pages_manager,
)
ctx.reset(page_script_hash=pages_manager.main_script_hash)
assert ctx.active_script_hash == pages_manager.main_script_hash
pages_manager.set_pages({})
ctx.set_mpa_v2_page("new_hash")
assert ctx.active_script_hash == pages_manager.main_script_hash
with ctx.run_with_active_hash("new_hash"):
assert ctx.active_script_hash == "new_hash"
assert ctx.active_script_hash == pages_manager.main_script_hash
| ScriptRunContextTest |
python | tornadoweb__tornado | tornado/web.py | {
"start": 125171,
"end": 126074
} | class ____(RequestHandler):
"""A `RequestHandler` that wraps another HTTP server callback.
The fallback is a callable object that accepts an
`~.httputil.HTTPServerRequest`, such as an `Application` or
`tornado.wsgi.WSGIContainer`. This is most useful to use both
Tornado ``RequestHandlers`` and WSGI in the same server. Typical
usage::
wsgi_app = tornado.wsgi.WSGIContainer(
django.core.handlers.wsgi.WSGIHandler())
application = tornado.web.Application([
(r"/foo", FooHandler),
(r".*", FallbackHandler, dict(fallback=wsgi_app)),
])
"""
def initialize(
self, fallback: Callable[[httputil.HTTPServerRequest], None]
) -> None:
self.fallback = fallback
def prepare(self) -> None:
self.fallback(self.request)
self._finished = True
self.on_finish()
| FallbackHandler |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1589306,
"end": 1589465
} | class ____(sgqlc.types.Union):
"""Types that can be inside a Milestone."""
__schema__ = github_schema
__types__ = (Issue, PullRequest)
| MilestoneItem |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/core_tests/instance_tests/test_instance.py | {
"start": 34147,
"end": 38535
} | class ____(RunLauncher, ConfigurableClass):
def launch_run(self, context: LaunchRunContext) -> None:
pass
def terminate(self, run_id): # pyright: ignore[reportIncompatibleMethodOverride]
pass
def test_configurable_class_missing_methods():
with pytest.raises(
NotImplementedError,
match="InvalidRunLauncher must implement the config_type classmethod",
):
with dg.instance_for_test(
overrides={
"run_launcher": {
"module": "dagster_tests.core_tests.instance_tests.test_instance",
"class": "InvalidRunLauncher",
}
}
) as instance:
print(instance.run_launcher) # noqa: T201
@patch("dagster._core.storage.partition_status_cache.get_and_update_asset_status_cache_value")
def test_get_status_by_partition(mock_get_and_update):
mock_cached_value = MagicMock(spec=AssetStatusCacheValue)
mock_cached_value.deserialize_materialized_partition_subsets.return_value = [
"2023-06-01",
"2023-06-02",
]
mock_cached_value.deserialize_failed_partition_subsets.return_value = ["2023-06-15"]
mock_cached_value.deserialize_in_progress_partition_subsets.return_value = ["2023-07-01"]
mock_get_and_update.return_value = mock_cached_value
with dg.instance_for_test() as instance:
partition_status = instance.get_status_by_partition(
dg.AssetKey("test-asset"),
["2023-07-01"],
dg.DailyPartitionsDefinition(start_date="2023-06-01"),
)
assert partition_status == {"2023-07-01": AssetPartitionStatus.IN_PROGRESS}
def test_report_runless_asset_event() -> None:
with dg.instance_for_test() as instance:
my_asset_key = dg.AssetKey("my_asset")
instance.report_runless_asset_event(dg.AssetMaterialization(my_asset_key))
mats = instance.get_latest_materialization_events([my_asset_key])
assert mats[my_asset_key]
instance.report_runless_asset_event(dg.AssetObservation(my_asset_key))
records = instance.fetch_observations(my_asset_key, limit=1).records
assert len(records) == 1
my_check = "my_check"
instance.report_runless_asset_event(
dg.AssetCheckEvaluation(
asset_key=my_asset_key,
check_name=my_check,
passed=True,
metadata={},
)
)
records = instance.event_log_storage.get_asset_check_execution_history(
check_key=dg.AssetCheckKey(asset_key=my_asset_key, name=my_check),
limit=1,
)
assert len(records) == 1
assert records[0].status == AssetCheckExecutionRecordStatus.SUCCEEDED
instance.report_runless_asset_event(
dg.AssetCheckEvaluation(
asset_key=my_asset_key,
check_name=my_check,
passed=False,
metadata={},
)
)
records = instance.event_log_storage.get_asset_check_execution_history(
check_key=dg.AssetCheckKey(asset_key=my_asset_key, name=my_check),
limit=1,
)
assert len(records) == 1
assert records[0].status == AssetCheckExecutionRecordStatus.FAILED
def test_report_runless_asset_event_freshness_state_change() -> None:
"""Test that report_runless_asset_event accepts FreshnessStateChange events."""
from dagster._core.definitions.freshness import FreshnessState, FreshnessStateChange
with dg.instance_for_test() as instance:
my_asset_key = dg.AssetKey("my_asset")
freshness_change = FreshnessStateChange(
key=my_asset_key,
new_state=FreshnessState.FAIL,
previous_state=FreshnessState.UNKNOWN,
state_change_timestamp=1234567890.0,
)
# This should not raise an exception - this is the main test
# Previously this would have raised DagsterInvariantViolationError
instance.report_runless_asset_event(freshness_change)
def test_invalid_run_id():
with dg.instance_for_test() as instance:
with pytest.raises(
CheckError,
match="run_id must be a valid UUID. Got invalid_run_id",
):
create_run_for_test(instance, job_name="foo_job", run_id="invalid_run_id")
| InvalidRunLauncher |
python | ray-project__ray | python/ray/dag/function_node.py | {
"start": 196,
"end": 1657
} | class ____(DAGNode):
"""Represents a bound task node in a Ray task DAG."""
def __init__(
self,
func_body,
func_args,
func_kwargs,
func_options,
other_args_to_resolve=None,
):
self._body = func_body
super().__init__(
func_args,
func_kwargs,
func_options,
other_args_to_resolve=other_args_to_resolve,
)
def _copy_impl(
self,
new_args: List[Any],
new_kwargs: Dict[str, Any],
new_options: Dict[str, Any],
new_other_args_to_resolve: Dict[str, Any],
):
return FunctionNode(
self._body,
new_args,
new_kwargs,
new_options,
other_args_to_resolve=new_other_args_to_resolve,
)
def _execute_impl(self, *args, **kwargs):
"""Executor of FunctionNode by ray.remote().
Args and kwargs are to match base class signature, but not in the
implementation. All args and kwargs should be resolved and replaced
with value in bound_args and bound_kwargs via bottom-up recursion when
current node is executed.
"""
return (
ray.remote(self._body)
.options(**self._bound_options)
.remote(*self._bound_args, **self._bound_kwargs)
)
def __str__(self) -> str:
return get_dag_node_str(self, str(self._body))
| FunctionNode |
python | huggingface__transformers | src/transformers/models/big_bird/modeling_big_bird.py | {
"start": 53178,
"end": 56643
} | class ____(nn.Module):
def __init__(self, config, seed=None):
super().__init__()
self.attention_type = config.attention_type
self.config = config
self.seed = seed
if self.config.attention_type == "original_full":
self.self = BigBirdSelfAttention(config, layer_idx=seed)
elif self.config.attention_type == "block_sparse":
self.self = BigBirdBlockSparseAttention(config, seed)
else:
raise ValueError(
f"attention_type can either be original_full or block_sparse, but is {self.config.attention_type}"
)
self.output = BigBirdSelfOutput(config)
def set_attention_type(self, value: str, layer_idx=None):
if value not in ["original_full", "block_sparse"]:
raise ValueError(
f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}"
)
# attention type is already correctly set
if value == self.attention_type:
return
self.attention_type = value
if value == "original_full":
# copy all weights to new full attention class
attn_weights = BigBirdSelfAttention(self.config, layer_idx=layer_idx)
else:
# copy all weights to new sparse attention class
attn_weights = BigBirdBlockSparseAttention(self.config, self.seed)
attn_weights.query = self.self.query
attn_weights.value = self.self.value
attn_weights.key = self.self.key
self.self = attn_weights
if not self.training:
self.self.eval()
def forward(
self,
hidden_states,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
output_attentions=False,
# block_sparse config
band_mask=None,
from_mask=None,
to_mask=None,
from_blocked_mask=None,
to_blocked_mask=None,
cache_position=None,
):
# fp16 compatibility
if band_mask is not None:
band_mask = band_mask.to(hidden_states.dtype)
if from_mask is not None:
from_mask = from_mask.to(hidden_states.dtype)
if to_mask is not None:
to_mask = to_mask.to(hidden_states.dtype)
if self.attention_type == "original_full":
self_outputs = self.self(
hidden_states,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
output_attentions=output_attentions,
cache_position=cache_position,
)
else:
if encoder_hidden_states is not None:
raise ValueError("BigBird cannot be used as a decoder when config.attention_type != 'original_full'")
self_outputs = self.self(
hidden_states, band_mask, from_mask, to_mask, from_blocked_mask, to_blocked_mask, output_attentions
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->BigBird
| BigBirdAttention |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 341498,
"end": 341825
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
node = sgqlc.types.Field("ExternalIdentity", graphql_name="node")
| ExternalIdentityEdge |
python | pytorch__pytorch | test/distributed/test_c10d_gloo.py | {
"start": 7759,
"end": 60484
} | class ____(MultiProcessTestCase):
lazy_init = False
def _create_process_group_gloo(self, store, rank, world_size, opts):
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, opts)
dist.barrier(group=pg)
return pg
def setUp(self):
super().setUp()
self._spawn_processes()
def opts(self, threads=2, group_name="0"):
opts = c10d.ProcessGroupGloo._Options()
opts._timeout = 50.0
opts._devices = [create_device(interface=LOOPBACK, lazy_init=self.lazy_init)]
opts._threads = threads
opts.group_name = group_name
return opts
@requires_gloo()
def test_multi_device_constructor(self):
store = c10d.FileStore(self.file_name, self.world_size)
opts = c10d.ProcessGroupGloo._Options()
opts._timeout = 5.0
opts._devices = [
create_device(interface=LOOPBACK, lazy_init=self.lazy_init),
create_device(interface=LOOPBACK, lazy_init=self.lazy_init),
]
pg = self._create_process_group_gloo(store, self.rank, self.world_size, opts)
# Execute 2x the number of operations to ensure we use every device.
for fut in [pg.allreduce(torch.ones(i + 1)).get_future() for i in range(4)]:
fut.wait()
@requires_gloo()
def test_empty_tensors(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts()
)
xs = [torch.FloatTensor([])]
fut = pg.broadcast(xs).get_future()
fut.wait()
output = fut.value()
self.assertEqual(0, output[0].numel())
self.assertEqual(xs[0], output[0])
@requires_gloo()
def test_broadcast_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts()
)
t1 = torch.zeros([1], dtype=torch.float32)
t2 = torch.zeros([1], dtype=torch.float64)
t3 = torch.zeros([2], dtype=torch.float32)
with self.assertRaisesRegex(RuntimeError, "invalid root rank"):
opts = c10d.BroadcastOptions()
opts.rootRank = -1
opts.rootTensor = 0
pg.broadcast([t1], opts)
with self.assertRaisesRegex(RuntimeError, "invalid root rank"):
opts = c10d.BroadcastOptions()
opts.rootRank = self.world_size
opts.rootTensor = 0
pg.broadcast([t1], opts)
with self.assertRaisesRegex(RuntimeError, "invalid root tensor"):
opts = c10d.BroadcastOptions()
opts.rootRank = self.rank
opts.rootTensor = -1
pg.broadcast([t1], opts)
with self.assertRaisesRegex(RuntimeError, "invalid root tensor"):
opts = c10d.BroadcastOptions()
opts.rootRank = self.rank
opts.rootTensor = 1
pg.broadcast([t1], opts)
with self.assertRaisesRegex(RuntimeError, "invalid root tensor"):
opts = c10d.BroadcastOptions()
opts.rootRank = self.rank
opts.rootTensor = 0
pg.broadcast([], opts)
with self.assertRaisesRegex(RuntimeError, "invalid tensor type"):
opts = c10d.BroadcastOptions()
opts.rootRank = self.rank
opts.rootTensor = 0
pg.broadcast([t1, t2], opts)
with self.assertRaisesRegex(RuntimeError, "invalid tensor size"):
opts = c10d.BroadcastOptions()
opts.rootRank = self.rank
opts.rootTensor = 0
pg.broadcast([t1, t3], opts)
def _test_broadcast_basics(self, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts()
)
def broadcast(xs, rootRank, rootTensor):
opts = c10d.BroadcastOptions()
opts.rootRank = rootRank
opts.rootTensor = rootTensor
fut = pg.broadcast(xs, opts).get_future()
fut.wait()
return fut.value()
# Every rank is root once
for i in range(self.world_size):
# Run with 1 input tensor
x = fn(torch.tensor([self.rank]))
output = broadcast([x], i, 0)
self.assertEqual(torch.tensor([i]), output[0])
# Run with 2 input tensors
num = 2
for j in range(num):
xs = [
fn(torch.tensor([self.rank * num + 0.0])),
fn(torch.tensor([self.rank * num + 1.0])),
]
output = broadcast(xs, i, j)
self.assertEqual(
torch.tensor([i * num + j], dtype=torch.float32), output[0]
)
self.assertEqual(
torch.tensor([i * num + j], dtype=torch.float32), output[1]
)
# Run with 1 input tensor of cfloat dtype
x = fn(torch.tensor([complex(self.rank, self.rank)], dtype=torch.cfloat))
output = broadcast([x], i, 0)
self.assertEqual(
torch.tensor([complex(i, i)], dtype=torch.cfloat), output[0]
)
# Test overloaded convenience function
x = torch.tensor([self.rank + 1.0])
fut = pg.broadcast(x, root=0).get_future()
fut.wait()
result = fut.value()
self.assertEqual(torch.tensor([1.0]), result[0])
@requires_gloo()
def test_broadcast_basics(self):
self._test_broadcast_basics(lambda t: t.clone())
@skip_if_lt_x_gpu(2)
@requires_gloo()
def test_broadcast_basics_cuda(self):
self._test_broadcast_basics(lambda t: t.clone().cuda())
def _test_broadcast_stress(self, inputs):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts(threads=8)
)
work_handles = [
pg.broadcast(inputs[i], root=(i % self.world_size))
for i in range(len(inputs))
]
for i, work_handle in enumerate(work_handles):
work_handle.wait()
self.assertEqual(
torch.tensor([(i * self.world_size) + (i % self.world_size)]),
inputs[i],
msg=(f"Mismatch in iteration {i:d}"),
)
@requires_gloo()
def test_broadcast_stress(self):
inputs = [torch.tensor([i * self.world_size + self.rank]) for i in range(1000)]
self._test_broadcast_stress(inputs)
@skip_if_lt_x_gpu(2)
@requires_gloo()
@skipIfRocm
def test_broadcast_stress_cuda(self):
inputs = [
torch.tensor([i * self.world_size + self.rank]).cuda() for i in range(1000)
]
self._test_broadcast_stress(inputs)
@requires_gloo()
def test_allreduce_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts()
)
t1 = torch.zeros([1], dtype=torch.float32)
t2 = torch.zeros([1], dtype=torch.float64)
t3 = torch.zeros([2], dtype=torch.float32)
with self.assertRaisesRegex(RuntimeError, "requires non-empty tensor list"):
opts = c10d.AllreduceOptions()
pg.allreduce([], opts)
with self.assertRaisesRegex(RuntimeError, "invalid tensor type"):
opts = c10d.AllreduceOptions()
pg.allreduce([t1, t2], opts)
with self.assertRaisesRegex(RuntimeError, "invalid tensor size"):
opts = c10d.AllreduceOptions()
pg.allreduce([t1, t3], opts)
@requires_gloo()
def test_allreduce_op_timeout(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts()
)
opts = c10d.AllreduceOptions()
opts.timeout = timedelta(milliseconds=1)
if self.rank == 0:
t1 = torch.zeros([1], dtype=torch.float32)
with self.assertRaisesRegex(RuntimeError, "Timed out waiting 1ms"):
pg.allreduce([t1], opts).wait()
@requires_gloo()
def test_allreduce_overall_timeout(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts()
)
pg.set_timeout(timedelta(milliseconds=1))
if self.rank == 0:
t1 = torch.zeros([1], dtype=torch.float32)
with self.assertRaisesRegex(RuntimeError, "Timed out waiting 1ms"):
pg.allreduce([t1]).wait()
def _test_allreduce_basics(self, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts()
)
# Single input tests
tests = simple_reduce_tests(self.rank, self.world_size)
for op, input, expected in tests:
opts = c10d.AllreduceOptions()
opts.reduceOp = op
tensor = fn(input)
fut = pg.allreduce([tensor], opts).get_future()
fut.wait()
result = fut.value()
self.assertEqual(expected, result[0])
# Multi input tests
tests = simple_multi_input_reduce_tests(self.rank, self.world_size)
for op, inputs, output in tests:
opts = c10d.AllreduceOptions()
opts.reduceOp = op
tensors = [fn(input) for input in inputs]
fut = pg.allreduce(tensors, opts).get_future()
fut.wait()
result = fut.value()
for tensor in result:
self.assertEqual(output, tensor)
# Test overloaded convenience function (defaults to using sum)
x = fn(torch.tensor([self.rank + 1.0]))
fut = pg.allreduce(x).get_future()
fut.wait()
result = fut.value()
self.assertEqual(
torch.tensor([float(self.world_size * (self.world_size + 1) / 2)]),
result[0],
)
# Test fp16 numerical correctness for all-reduce SUM.
torch.manual_seed(self.rank)
# TODO: when create larger sizes of tensors, numerical instability will be observed.
# We need to investigate the root cause and ensure it is fixed.
tensor = (
(torch.rand(200, 1, dtype=torch.float32) * 2 - 1) * 65504 / self.world_size
)
opts = c10d.AllreduceOptions()
tensor = tensor.to(torch.float16)
output = [[torch.zeros_like(tensor) for _ in range(self.world_size)]]
# allgather all local tensors first and then sum up.
fut = pg.allgather(output, [tensor]).get_future()
fut.wait()
ag_result = fut.value()
total = torch.stack(ag_result, dim=0).sum(dim=0)
# result from fp16 all-reduce.
fut = pg.allreduce([tensor], opts).get_future()
fut.wait()
result_fp16 = fut.value()
# float16 has only ~11 bits of mantissa, and is sensitive to accumulation
# order and rounding errors so we use a larger tolerance.
self.assertEqual(total, result_fp16[0], rtol=1e-2, atol=1e-3)
@requires_gloo()
def test_allreduce_basics(self):
self._test_allreduce_basics(lambda t: t.clone())
@skip_if_lt_x_gpu(2)
@requires_gloo()
def test_allreduce_basics_cuda(self):
self._test_allreduce_basics(lambda t: t.clone().cuda())
def _test_allreduce_stress(self, inputs):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts(threads=8)
)
future_handles = [
pg.allreduce(inputs[i]).get_future() for i in range(len(inputs))
]
for i, future_handle in enumerate(future_handles):
future_handle.wait()
self.assertEqual(
torch.tensor(
[
(i * self.world_size)
+ (self.world_size * (self.world_size - 1) // 2)
]
),
future_handle.value()[0],
msg=(f"Mismatch in iteration {i:d}"),
)
@requires_gloo()
def test_allreduce_stress(self):
inputs = [torch.tensor([i + self.rank]) for i in range(1000)]
self._test_allreduce_stress(inputs)
@skip_if_lt_x_gpu(2)
@requires_gloo()
@skipIfRocm
def test_allreduce_stress_cuda(self):
inputs = [torch.tensor([i + self.rank]).cuda() for i in range(1000)]
self._test_allreduce_stress(inputs)
@requires_gloo()
def test_allreduce_coalesced_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts()
)
t1 = torch.zeros(1, dtype=torch.float32)
t2 = torch.zeros(1, dtype=torch.float64)
t3 = torch.sparse_coo_tensor([[0]], [1], size=(1,))
with self.assertRaisesRegex(RuntimeError, "requires non-empty tensor list"):
opts = c10d.AllreduceCoalescedOptions()
pg.allreduce_coalesced([], opts)
with self.assertRaisesRegex(
RuntimeError, "tensors must all have the same type"
):
opts = c10d.AllreduceCoalescedOptions()
pg.allreduce_coalesced([t1, t2], opts)
with self.assertRaisesRegex(RuntimeError, "invalid tensor layout at index"):
opts = c10d.AllreduceCoalescedOptions()
pg.allreduce_coalesced([t1, t3], opts)
with self.assertRaisesRegex(RuntimeError, "unsupported layout"):
opts = c10d.AllreduceCoalescedOptions()
pg.allreduce_coalesced([t3, t3.clone()], opts)
@skip_if_lt_x_gpu(1)
@requires_gloo()
def test_allreduce_coalesced_checks_cuda(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts()
)
t1 = torch.zeros(1, dtype=torch.float32)
with self.assertRaisesRegex(RuntimeError, "unsupported device type"):
opts = c10d.AllreduceCoalescedOptions()
pg.allreduce_coalesced([t1.cuda(), t1.cuda()], opts)
def _test_allreduce_coalesced_basics(self, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts()
)
test_cases = simple_coalesced_reduce_tests(self.rank, self.world_size)
for op, inputs, outputs in test_cases:
opts = c10d.AllreduceCoalescedOptions()
opts.reduceOp = op
tensors = [fn(x) for x in inputs]
fut = pg.allreduce_coalesced(tensors, opts).get_future()
fut.wait()
result = fut.value()
for result_tensor, expected in zip(result, outputs):
self.assertEqual(result_tensor, expected)
@requires_gloo()
def test_allreduce_coalesced_basics(self):
self._test_allreduce_coalesced_basics(lambda t: t.clone())
def _expected_output(self, i):
ws = self.world_size
return 2 * [torch.tensor([(i * ws) + (ws * (ws - 1) // 2)])]
def _test_allreduce_coalesced_stress(self, inputs):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts(threads=8)
)
future_handles = [
pg.allreduce_coalesced(input).get_future() for input in inputs
]
for i, future_handle in enumerate(future_handles):
future_handle.wait()
result = future_handle.value()
self.assertEqual(
self._expected_output(i),
result,
msg=f"Mismatch in iteration {i}",
)
@requires_gloo()
def test_allreduce_coalesced_stress(self):
inputs = [2 * [torch.tensor([i + self.rank])] for i in range(1000)]
self._test_allreduce_coalesced_stress(inputs)
@requires_gloo()
def test_allreduce_coalesced_async(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="gloo", rank=self.rank, world_size=self.world_size, store=store
)
xs = [2 * [torch.tensor([i + self.rank])] for i in range(2)]
futs = [c10d.all_reduce_coalesced(x, async_op=True) for x in xs]
torch.futures.wait_all(futs)
for i, fut in enumerate(futs):
self.assertEqual(
self._expected_output(i),
fut.wait(),
msg=f"Mismatch in iteration {i}",
)
@requires_gloo()
def test_sparse_allreduce_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts()
)
t1 = torch.zeros([1])
t2 = torch.sparse_coo_tensor([[0]], [1], size=(2,))
t3 = torch.sparse_coo_tensor([[0]], [1], size=(4,))
with self.assertRaisesRegex(RuntimeError, "requires non-empty tensor list"):
opts = c10d.AllreduceOptions()
pg.allreduce([], opts)
with self.assertRaisesRegex(RuntimeError, "invalid tensor layout"):
opts = c10d.AllreduceOptions()
pg.allreduce([t1, t2], opts)
with self.assertRaisesRegex(RuntimeError, "invalid tensor size"):
opts = c10d.AllreduceOptions()
pg.allreduce([t2, t3], opts)
# Sparse allreduce only works with c10d.ReduceOp.SUM.
for op in [c10d.ReduceOp.PRODUCT, c10d.ReduceOp.MIN, c10d.ReduceOp.MAX]:
with self.assertRaisesRegex(
RuntimeError, "unsupported reduction operation"
):
opts = c10d.AllreduceOptions()
opts.reduceOp = op
pg.allreduce([t3], opts)
def _test_sparse_allreduce_basics(self, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts()
)
for num_inputs_per_rank in [1, 2]:
tests = simple_sparse_reduce_tests(
self.rank, self.world_size, num_inputs=num_inputs_per_rank
)
for inputs, outputs in tests:
tensors = [fn(input) for input in inputs]
fut = pg.allreduce(tensors).get_future()
fut.wait()
result = fut.value()
self.assertEqual(tensors, outputs)
self.assertEqual(result, outputs)
@requires_gloo()
def test_sparse_allreduce_basics(self):
self._test_sparse_allreduce_basics(lambda t: t)
@skip_if_lt_x_gpu(2)
@requires_gloo()
def test_sparse_allreduce_basics_cuda(self):
self._test_sparse_allreduce_basics(lambda t: t.clone().cuda())
@skip_if_lt_x_gpu(2)
@requires_gloo()
def test_sparse_allreduce_cuda_dispatched(self):
store = c10d.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend="gloo", store=store, rank=self.rank, world_size=self.world_size
)
tests = simple_sparse_reduce_tests(self.rank, self.world_size, num_inputs=1)
for inputs, outputs in tests:
tensors = inputs[-1].clone().cuda()
work = dist.all_reduce(tensors, async_op=True)
work.wait()
self.assertEqual([tensors], outputs)
@requires_gloo()
def test_allgather_into_tensor_coalesced(self):
store = c10d.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend="gloo",
store=store,
rank=self.rank,
world_size=self.world_size,
)
torch.manual_seed(42)
in_shapes = [(5, 5), (10, 10), (15, 15)]
out_shapes = [(s[0] * self.world_size,) + s[1:] for s in in_shapes]
outputs = [torch.empty(s) for s in out_shapes]
inputs = [torch.rand(s) for s in in_shapes]
work = dist.group.WORLD.allgather_into_tensor_coalesced(outputs, inputs)
work.wait()
for output, input in zip(outputs, inputs):
expect = torch.cat([input] * self.world_size)
self.assertTrue(torch.allclose(output, expect))
@requires_gloo()
def test_reduce_scatter(self):
store = c10d.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend="gloo",
store=store,
rank=self.rank,
world_size=self.world_size,
)
torch.manual_seed(42)
# variable size per rank
inputs = [torch.rand(i) for i in range(self.world_size)]
output = torch.empty(self.rank)
work = dist.reduce_scatter(output, inputs, async_op=True)
work.wait()
expect = inputs[self.rank] * self.world_size
self.assertTrue(torch.allclose(output, expect))
@requires_gloo()
def test_reduce_scatter_tensor(self):
store = c10d.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend="gloo",
store=store,
rank=self.rank,
world_size=self.world_size,
)
torch.manual_seed(42)
out_shape = (20, 20)
in_shape = (out_shape[0] * self.world_size,) + out_shape[1:]
output = torch.empty(out_shape)
input = torch.rand(in_shape)
work = dist.reduce_scatter_tensor(output, input, async_op=True)
work.wait()
expect = (
input.view(self.world_size, *out_shape).chunk(self.world_size)[self.rank]
* self.world_size
)
self.assertTrue(torch.allclose(output, expect))
@requires_gloo()
def test_reduce_scatter_tensor_coalesced(self):
store = c10d.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend="gloo",
store=store,
rank=self.rank,
world_size=self.world_size,
)
torch.manual_seed(42)
out_shapes = [(5, 5), (10, 10), (15, 15)]
in_shapes = [(s[0] * self.world_size,) + s[1:] for s in out_shapes]
outputs = [torch.empty(s) for s in out_shapes]
inputs = [torch.rand(s) for s in in_shapes]
work = dist.group.WORLD.reduce_scatter_tensor_coalesced(outputs, inputs)
work.wait()
for output, input in zip(outputs, inputs):
expect = (
input.view(self.world_size, *output.shape).chunk(self.world_size)[
self.rank
]
* self.world_size
)
self.assertTrue(torch.allclose(output, expect))
@requires_gloo()
def test_scatter_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts()
)
t1 = torch.zeros([1], dtype=torch.float32)
t2 = torch.zeros([1], dtype=torch.float64)
t3 = torch.zeros([2], dtype=torch.float32)
with self.assertRaisesRegex(RuntimeError, "invalid root rank"):
opts = c10d.ScatterOptions()
opts.rootRank = -1
pg.scatter([t1], [], opts)
with self.assertRaisesRegex(RuntimeError, "invalid root rank"):
opts = c10d.ScatterOptions()
opts.rootRank = self.world_size
pg.scatter([t1], [], opts)
with self.assertRaisesRegex(
RuntimeError, "requires a single-element output tensor list"
):
opts = c10d.ScatterOptions()
opts.rootRank = 0
pg.scatter([], [], opts)
with self.assertRaisesRegex(
RuntimeError, "requires a single-element output tensor list"
):
opts = c10d.ScatterOptions()
opts.rootRank = 0
pg.scatter([t1, t1], [], opts)
with self.assertRaisesRegex(
RuntimeError, "requires a single-element input list"
):
opts = c10d.ScatterOptions()
opts.rootRank = self.rank
pg.scatter([t1], [], opts)
with self.assertRaisesRegex(
RuntimeError, "requires a single-element input list"
):
opts = c10d.ScatterOptions()
opts.rootRank = self.rank
pg.scatter([t1], [[t1] * self.world_size, [t1] * self.world_size], opts)
desired_list_size = self.world_size
incorrect_list_size = self.world_size - 1
err_str = "Incorrect input list size {}. Input list size should be {}"
with self.assertRaisesRegex(
RuntimeError, err_str.format(incorrect_list_size, desired_list_size)
):
opts = c10d.ScatterOptions()
opts.rootRank = self.rank
pg.scatter([t1], [[t1] * incorrect_list_size], opts)
incorrect_list_size = self.world_size + 1
with self.assertRaisesRegex(
RuntimeError, err_str.format(incorrect_list_size, desired_list_size)
):
opts = c10d.ScatterOptions()
opts.rootRank = self.rank
pg.scatter([t1], [[t1] * incorrect_list_size], opts)
with self.assertRaisesRegex(RuntimeError, "invalid tensor type"):
opts = c10d.ScatterOptions()
opts.rootRank = self.rank
pg.scatter([t1], [[t2] * self.world_size], opts)
with self.assertRaisesRegex(RuntimeError, "invalid tensor size"):
opts = c10d.ScatterOptions()
opts.rootRank = self.rank
pg.scatter([t1], [[t3] * self.world_size], opts)
with self.assertRaisesRegex(RuntimeError, "requires empty input on non-root"):
opts = c10d.ScatterOptions()
opts.rootRank = (self.rank + 1) % self.world_size
pg.scatter([t1], [[t1] * self.world_size], opts)
def _test_scatter_basics(self, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts()
)
# Preallocate tensors for input/output
input = [fn(torch.tensor([self.rank])) for _ in range(self.world_size)]
outputs = [fn(torch.tensor([-1])) for _ in range(self.world_size)]
# Take turns being the scatter root and accumulate work items
futures = []
for i in range(self.world_size):
opts = c10d.ScatterOptions()
opts.rootRank = i
if i == self.rank:
futures.append(pg.scatter([outputs[i]], [input], opts).get_future())
else:
futures.append(pg.scatter([outputs[i]], [], opts).get_future())
# Wait for work to complete
for i in range(self.world_size):
futures[i].wait()
result = futures[i].value()
self.assertEqual(torch.tensor([i]), result[0])
@requires_gloo()
def test_scatter_basics(self):
self._test_scatter_basics(lambda t: t.clone())
@skip_if_lt_x_gpu(2)
@requires_gloo()
def test_scatter_basics_cuda(self):
self._test_scatter_basics(lambda t: t.clone().cuda())
def _test_scatter_stress(self, inputs, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts(threads=8)
)
outputs = [
[fn(torch.tensor([-1])) for _ in range(self.world_size)]
for _ in range(len(inputs))
]
future_handles = []
for i in range(len(inputs)):
for root in range(self.world_size):
opts = c10d.ScatterOptions()
opts.rootRank = root
if root == self.rank:
fut = pg.scatter(
[outputs[i][root]], [[fn(e) for e in inputs[i]]], opts
).get_future()
else:
fut = pg.scatter([outputs[i][root]], [], opts).get_future()
future_handles.append(fut)
for i, future_handle in enumerate(future_handles):
future_handle.wait()
iter = i // self.world_size
root = i % self.world_size
result = future_handle.value()
self.assertEqual(
torch.tensor([iter + root]),
result[0],
msg=(f"Mismatch in iteration {iter:d} for rank {root:d}"),
)
@requires_gloo()
def test_set_gloo_pg_timeout(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts()
)
pg.allreduce(torch.rand(10))
self.assertEqual(pg.options._timeout, timedelta(seconds=50))
pg._set_default_timeout(timedelta(seconds=23))
self.assertEqual(pg.options._timeout, timedelta(seconds=23))
@requires_gloo()
def test_scatter_stress(self):
inputs = [
[torch.tensor([i + self.rank]) for _ in range(self.world_size)]
for i in range(1000)
]
self._test_scatter_stress(inputs, lambda t: t.clone())
@skip_but_pass_in_sandcastle(
"Test is flaky, see https://github.com/pytorch/pytorch/issues/15963"
)
@skip_if_lt_x_gpu(2)
@requires_gloo()
@skipIfRocm
def test_scatter_stress_cuda(self):
inputs = [
[torch.tensor([i + self.rank]) for _ in range(self.world_size)]
for i in range(1000)
]
self._test_scatter_stress(inputs, lambda t: t.clone().cuda())
@requires_gloo()
def test_gather_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts()
)
t1 = torch.zeros([1], dtype=torch.float32)
t2 = torch.zeros([1], dtype=torch.float64)
t3 = torch.zeros([2], dtype=torch.float32)
with self.assertRaisesRegex(RuntimeError, "invalid root rank"):
opts = c10d.GatherOptions()
opts.rootRank = -1
pg.gather([], [t1], opts)
with self.assertRaisesRegex(RuntimeError, "invalid root rank"):
opts = c10d.GatherOptions()
opts.rootRank = self.world_size
pg.gather([], [t1], opts)
with self.assertRaisesRegex(
RuntimeError, "requires a single-element input tensor list"
):
opts = c10d.GatherOptions()
opts.rootRank = 0
pg.gather([], [], opts)
with self.assertRaisesRegex(
RuntimeError, "requires a single-element input tensor list"
):
opts = c10d.GatherOptions()
opts.rootRank = 0
pg.gather([], [t1, t1], opts)
with self.assertRaisesRegex(
RuntimeError, "requires a single-element output list"
):
opts = c10d.GatherOptions()
opts.rootRank = self.rank
pg.gather([], [t1], opts)
with self.assertRaisesRegex(
RuntimeError, "requires a single-element output list"
):
opts = c10d.GatherOptions()
opts.rootRank = self.rank
pg.gather([[t1] * self.world_size, [t1] * self.world_size], [t1], opts)
desired_list_size = self.world_size
incorrect_list_size = self.world_size - 1
err_str = "Incorrect output list size {}. Output list size should be {}"
with self.assertRaisesRegex(
RuntimeError, err_str.format(incorrect_list_size, desired_list_size)
):
opts = c10d.GatherOptions()
opts.rootRank = self.rank
pg.gather([[t1] * incorrect_list_size], [t1], opts)
incorrect_list_size = self.world_size + 1
with self.assertRaisesRegex(
RuntimeError, err_str.format(incorrect_list_size, desired_list_size)
):
opts = c10d.GatherOptions()
opts.rootRank = self.rank
pg.gather([[t1] * incorrect_list_size], [t1], opts)
with self.assertRaisesRegex(RuntimeError, "invalid tensor type"):
opts = c10d.GatherOptions()
opts.rootRank = self.rank
pg.gather([[t2] * self.world_size], [t1], opts)
with self.assertRaisesRegex(RuntimeError, "invalid tensor size"):
opts = c10d.GatherOptions()
opts.rootRank = self.rank
pg.gather([[t3] * self.world_size], [t1], opts)
with self.assertRaisesRegex(RuntimeError, "requires empty output on non-root"):
opts = c10d.GatherOptions()
opts.rootRank = (self.rank + 1) % self.world_size
pg.gather([[t1] * self.world_size], [t1], opts)
def _test_gather_basics(self, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts()
)
# Preallocate tensors for input/output
input = [fn(torch.tensor([self.rank]))]
outputs = [fn(torch.tensor([-1])) for _ in range(self.world_size)]
# Take turns being the gather root and accumulate work items
futures = []
for i in range(self.world_size):
opts = c10d.GatherOptions()
opts.rootRank = i
if i == self.rank:
futures.append(pg.gather([outputs], input, opts).get_future())
else:
futures.append(pg.gather([], input, opts).get_future())
# Wait for work to complete
expected = [fn(torch.tensor([rank])) for rank in range(self.world_size)]
for i in range(self.world_size):
futures[i].wait()
result = futures[i].value()
if i == self.rank:
self.assertEqual(expected, result)
@requires_gloo()
def test_gather_basics(self):
self._test_gather_basics(lambda t: t.clone())
@skip_if_lt_x_gpu(2)
@requires_gloo()
def test_gather_basics_cuda(self):
self._test_gather_basics(lambda t: t.clone().cuda())
@requires_gloo()
def test_gather_noncontiguous_input(self):
# Take a column of 2D tensor, such that memory is not dense
self._test_gather_basics(lambda t: t.expand(2, 2).tril().contiguous()[:, 0])
def _test_gather_stress(self, inputs, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts(threads=8)
)
future_handles = []
outputs = [
[[fn(torch.tensor([-1])) for _ in range(self.world_size)]]
for _ in range(len(inputs))
]
expected_outputs = [
[[torch.tensor([i + j]) for j in range(self.world_size)]]
for i in range(len(inputs))
]
for i in range(len(inputs)):
for root in range(self.world_size):
opts = c10d.GatherOptions()
opts.rootRank = root
if root == self.rank:
fut = pg.gather(outputs[i], [fn(inputs[i])], opts).get_future()
else:
fut = pg.gather([], [fn(inputs[i])], opts).get_future()
future_handles.append(fut)
for i, future_handle in enumerate(future_handles):
future_handle.wait()
iter = i // self.world_size
root = i % self.world_size
if root == self.rank:
result = future_handle.value()
self.assertEqual(
expected_outputs[iter],
[result],
msg=(f"Mismatch in iteration {iter:d} for root {root:d}"),
)
@requires_gloo()
def test_gather_stress(self):
inputs = [torch.tensor([i + self.rank]) for i in range(1000)]
self._test_gather_stress(inputs, lambda t: t.clone())
@skip_if_lt_x_gpu(2)
@skipIfRocm
@requires_gloo()
def test_gather_stress_cuda(self):
inputs = [torch.tensor([i + self.rank]).cuda() for i in range(1000)]
self._test_gather_stress(inputs, lambda t: t.clone().cuda())
@requires_gloo()
def test_allgather_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts()
)
t1 = torch.zeros([1], dtype=torch.float32)
t2 = torch.zeros([1], dtype=torch.float64)
t3 = torch.zeros([2], dtype=torch.float32)
with self.assertRaisesRegex(
RuntimeError, "requires non-empty input tensor list"
):
pg.allgather([], [])
with self.assertRaisesRegex(
RuntimeError, "requires input/output tensor lists to have the same length"
):
pg.allgather([], [t1])
with self.assertRaisesRegex(
RuntimeError, "requires input/output tensor lists to have the same length"
):
pg.allgather([[t1] * self.world_size, [t1] * self.world_size], [t1])
with self.assertRaisesRegex(RuntimeError, "invalid output tensor list"):
pg.allgather([[t1] * (self.world_size - 1)], [t1])
with self.assertRaisesRegex(RuntimeError, "invalid output tensor list"):
pg.allgather([[t1] * (self.world_size + 1)], [t1])
with self.assertRaisesRegex(RuntimeError, "invalid tensor type"):
pg.allgather(
[[t1, t1] * (self.world_size), [t1, t1] * (self.world_size)], [t1, t2]
)
with self.assertRaisesRegex(RuntimeError, "invalid tensor size"):
pg.allgather(
[[t1, t1] * (self.world_size), [t1, t1] * (self.world_size)], [t1, t3]
)
with self.assertRaisesRegex(RuntimeError, "invalid tensor type"):
pg.allgather([([t1, t2] * (self.world_size))[: self.world_size]], [t1])
with self.assertRaisesRegex(RuntimeError, "invalid tensor size"):
pg.allgather([([t1, t3] * (self.world_size))[: self.world_size]], [t1])
def _test_allgather_basics(self, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts()
)
# Run with N input tensor per rank
for n in [1, 2, 3]:
input = [fn(torch.tensor([n * self.rank + i])) for i in range(n)]
output = [
[fn(torch.tensor([-1])) for _ in range(n * self.world_size)]
for _ in range(n)
]
expected_output = [
[fn(torch.tensor([i])) for i in range(n * self.world_size)]
for _ in range(n)
]
fut = pg.allgather(output, input).get_future()
fut.wait()
result = fut.value()
if n == 1:
result = [result]
self.assertEqual(expected_output, result)
@requires_gloo()
def test_allgather_basics(self):
self._test_allgather_basics(lambda t: t.clone())
@skip_if_lt_x_gpu(2)
@requires_gloo()
def test_allgather_basics_cuda(self):
self._test_allgather_basics(lambda t: t.clone().cuda())
@requires_gloo()
def test_allgather_noncontiguous_input(self):
# Take a column of 2D tensor, such that memory is not dense
self._test_allgather_basics(lambda t: t.expand(2, 2).tril().contiguous()[:, 0])
@requires_gloo()
def test_allgather_inference_mode(self):
with torch.inference_mode():
self._test_allgather_basics(lambda t: t.clone())
def _test_allgather_stress(self, inputs, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts(threads=8)
)
future_handles = []
outputs = [
[[fn(torch.tensor([-1])) for _ in range(self.world_size)]]
for _ in range(len(inputs))
]
expected_outputs = [
[[torch.tensor([i + j]) for j in range(self.world_size)]]
for i in range(len(inputs))
]
input_holder = {}
for i in range(len(inputs)):
# Note that this works around the data race discussed in
# https://github.com/pytorch/pytorch/issues/75529, but we should
# actually be able to pass the list directly into allgather when
# that race is fixed.
input_holder[i] = [fn(inputs[i])]
fut = pg.allgather(outputs[i], input_holder[i]).get_future()
future_handles.append(fut)
for i, future_handle in enumerate(future_handles):
future_handle.wait()
result = future_handle.value()
self.assertEqual(
expected_outputs[i],
[result],
msg=(f"Mismatch in iteration {i:d}"),
)
@requires_gloo()
def test_allgather_stress(self):
inputs = [torch.tensor([i + self.rank]) for i in range(1000)]
self._test_allgather_stress(inputs, lambda t: t.clone())
@skip_if_lt_x_gpu(2)
@requires_gloo()
@skipIfRocm
def test_allgather_stress_cuda(self):
inputs = [torch.tensor([i + self.rank]).cuda() for i in range(1000)]
self._test_allgather_stress(inputs, lambda t: t.clone().cuda())
@requires_gloo()
def test_allgather_coalesced_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts()
)
dummy_input = [torch.zeros([1], dtype=torch.float32)]
dummy_output_lists = [
[torch.zeros([1], dtype=torch.float32)] for _ in range(self.world_size)
]
# One of output tensors does not match input list.
dummy_output_lists[0] = [torch.zeros([0], dtype=torch.float32)]
with self.assertRaisesRegex(
RuntimeError, "invalid size of output tensor at index 0"
):
c10d.all_gather_coalesced(dummy_output_lists, dummy_input, pg)
# One of output tensors does not match input list.
dummy_output_lists[0] = [torch.zeros([1], dtype=torch.float64)]
with self.assertRaisesRegex(RuntimeError, "invalid tensor type at index 0"):
c10d.all_gather_coalesced(dummy_output_lists, dummy_input, pg)
# Output lists have too many elements
dummy_output_lists = [
[torch.zeros([1], dtype=torch.float32)] for _ in range(self.world_size + 1)
]
with self.assertRaisesRegex(
RuntimeError, "output lists should be equal to world size"
):
c10d.all_gather_coalesced(dummy_output_lists, dummy_input, pg)
# Output is not a list of lists.
dummy_output_lists = [torch.zeros([0], dtype=torch.float32)]
with self.assertRaisesRegex(
TypeError, "Invalid function argument.*output_tensor_lists"
):
c10d.all_gather_coalesced(dummy_output_lists, dummy_input, pg)
@requires_gloo()
def test_allgather_coalesced_async(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="gloo", rank=self.rank, world_size=self.world_size, store=store
)
xxs = [2 * [torch.tensor([i + self.rank])] for i in range(2)]
yys = [
[[torch.zeros_like(x) for x in xx] for _ in range(self.world_size)]
for xx in xxs
]
futs = [
c10d.all_gather_coalesced(yy, xx, async_op=True) for xx, yy in zip(xxs, yys)
]
# expected outputs
zzs = [
[2 * [torch.tensor([i + r])] for r in range(self.world_size)]
for i in range(2)
]
torch.futures.wait_all(futs)
for yy, zz in zip(yys, zzs):
# one iteration
for y_out, z_out in zip(yy, zz):
# one output tensor list
for y, z in zip(y_out, z_out):
# one tensor in output tensor list
self.assertEqual(y, z)
# Added to address https://github.com/pytorch/pytorch/issues/65231
# In the failed tests, all assertEqual are passed on all processes.
# However, one of the processes didn't call ProcessGroupGloo
# destructor before exiting program. This is not surprising as the only
# guarantee that Python makes is that garbage collection MAY happen
# before the program exits. If GC didn't happen, the two threads in
# ProcessGroup might be destructed before joined.
# FIXME: it's still unclear why only this test require explicit
# destroy_process_group()
c10d.destroy_process_group()
@requires_gloo()
def test_reduce_checks(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts()
)
t1 = torch.zeros([1], dtype=torch.float32)
with self.assertRaisesRegex(RuntimeError, "invalid root rank"):
opts = c10d.ReduceOptions()
opts.rootRank = -1
opts.rootTensor = 0
pg.reduce([t1], opts)
with self.assertRaisesRegex(RuntimeError, "invalid root rank"):
opts = c10d.ReduceOptions()
opts.rootRank = self.world_size
opts.rootTensor = 0
pg.reduce([t1], opts)
with self.assertRaisesRegex(RuntimeError, "invalid root tensor"):
opts = c10d.ReduceOptions()
opts.rootRank = self.rank
opts.rootTensor = 1
pg.reduce([t1], opts)
with self.assertRaisesRegex(
RuntimeError, "requires a single-element tensor list"
):
opts = c10d.ReduceOptions()
opts.rootRank = self.rank
opts.rootTensor = 0
pg.reduce([t1, t1], opts)
def _test_reduce_basics(self, fn):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts()
)
for op, input, output in simple_reduce_tests(self.rank, self.world_size):
for root in range(self.world_size):
opts = c10d.ReduceOptions()
opts.reduceOp = op
opts.rootRank = root
tmp = fn(input)
fut = pg.reduce([tmp], opts).get_future()
fut.wait()
result = fut.value()
if root == self.rank:
self.assertEqual(output, result[0])
@requires_gloo()
def test_reduce_basics(self):
self._test_reduce_basics(lambda t: t.clone())
@skip_if_lt_x_gpu(2)
@requires_gloo()
def test_reduce_basics_cuda(self):
self._test_reduce_basics(lambda t: t.clone().cuda())
def _test_reduce_stress(self, inputs):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts(threads=8)
)
future_handles = []
outputs = []
for i in range(len(inputs)):
for root in range(self.world_size):
opts = c10d.ReduceOptions()
opts.rootRank = root
tmp = inputs[i].clone()
outputs.append(tmp)
fut = pg.reduce([tmp], opts).get_future()
future_handles.append(fut)
for i, future_handle in enumerate(future_handles):
future_handle.wait()
result = future_handle.value()
iter = i // self.world_size
root = i % self.world_size
if root == self.rank:
self.assertEqual(
torch.tensor(
[
(iter * self.world_size)
+ (self.world_size * (self.world_size - 1) // 2)
]
),
result[0],
msg=(f"Mismatch in iteration {iter:d} with root rank {root:d}"),
)
@requires_gloo()
def test_reduce_stress(self):
inputs = [torch.tensor([i + self.rank]) for i in range(1000)]
self._test_reduce_stress(inputs)
@skip_if_lt_x_gpu(2)
@requires_gloo()
@skipIfRocm
def test_reduce_stress_cuda(self):
inputs = [torch.tensor([i + self.rank]).cuda() for i in range(1000)]
self._test_reduce_stress(inputs)
@requires_gloo()
def test_send_recv_all_to_all(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts()
)
# Preallocate tensors for input/output
inputs = [torch.tensor([self.rank]) for _ in range(self.world_size)]
outputs = [torch.tensor([-1]) for _ in range(self.world_size)]
# Issue sends
send_work = []
for i in range(self.world_size):
if i == self.rank:
continue
send_work.append(pg.send([inputs[i]], i, 0))
# Issue recvs
recv_work = []
for i in range(self.world_size):
if i == self.rank:
continue
recv_work.append(pg.recv([outputs[i]], i, 0))
# Wait for sends to complete
for work in send_work:
work.wait()
self.assertTrue(work.is_completed())
# Wait for recvs to complete
for work in recv_work:
work.wait()
self.assertTrue(work.is_completed())
# Test that every output other than our own contains the respective rank
for i in range(self.world_size):
if i == self.rank:
continue
self.assertEqual(torch.tensor([i]), outputs[i])
@requires_gloo()
def test_barrier_implies_wait(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts()
)
# Kick off allreduce operations
size = (100, 100)
num = 16
tensors = [torch.full(size, float(i)) for i in range(num)]
for tensor in tensors:
# Note: leak the returned work handle
pg.allreduce(tensor)
# Barrier should ensure all previous work has completed
pg.barrier().get_future().wait()
for i, tensor in enumerate(tensors):
self.assertEqual(torch.full(size, float(i * self.world_size)), tensor)
@skip_if_lt_x_gpu(2)
@requires_gloo()
@skipIfRocm
def test_block_current_stream_cuda(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts()
)
t = torch.zeros(10, device="cuda")
work = pg.allreduce(t)
work.block_current_stream()
torch.cuda.current_stream().synchronize()
work.wait()
@requires_gloo()
def test_send_recv_complex(self):
store = c10d.FileStore(self.file_name, self.world_size)
pg = self._create_process_group_gloo(
store, self.rank, self.world_size, self.opts()
)
# Generate the same random tensor
torch.manual_seed(0)
send_tensor = torch.rand(10, 10, dtype=torch.cfloat)
if self.rank == 0:
pg.send([send_tensor], 1, 0).wait()
if self.rank == 1:
recv_tensor = torch.rand(10, 10, dtype=torch.cfloat)
pg.recv([recv_tensor], 0, 0).wait()
self.assertEqual(send_tensor, recv_tensor)
| ProcessGroupGlooTest |
python | pytorch__pytorch | torch/_functorch/_aot_autograd/aot_autograd_result.py | {
"start": 9367,
"end": 10228
} | class ____(
GenericCompiledBackward[TOutputCode],
BundledOutputCodeLoadable[TOutputCode],
Generic[TOutputCode],
):
"""
Generic backward function for bundled compilation.
Works with any OutputCode type (CompiledFxGraph, RegionalOutputCode, etc.)
"""
def post_compile(
self, result: TOutputCode, fx_config: _CompileFxKwargs
) -> TOutputCode:
compiled_bw = super().post_compile(result, fx_config)
# See note [Wrapping bw_compiler in disable]
# This is done by _wrapped_bw_compiler in torch/_dynamo/backends/common.py
# But since on cache hit we do not call the bw_compiler, we need to reapply the disable
return torch._dynamo.disable( # type: ignore[return-value]
compiled_bw, reason="do not trace generated backwards pass"
)
@dataclass
| BundledCompiledBackward |
python | pytorch__pytorch | torchgen/model.py | {
"start": 1500,
"end": 1683
} | class ____:
file: str
line: int
def __str__(self) -> str:
return f"{self.file}:{self.line}"
# Valid values of the 'variants' field in native_functions.yaml
| Location |
python | pdm-project__pdm | src/pdm/formats/base.py | {
"start": 1321,
"end": 3402
} | class ____(metaclass=_MetaConverterMeta):
"""Convert a metadata dictionary to PDM's format"""
_converters: dict[str, Callable]
def __init__(self, source: dict, ui: termui.UI | None = None) -> None:
self.source = source
self.settings: dict[str, Any] = {}
self._data: dict[str, Any] = {}
self._ui = ui
def convert(self) -> tuple[Mapping[str, Any], Mapping[str, Any]]:
source = self.source
errors: list[str] = []
for key, func in self._converters.items():
if func._convert_from and func._convert_from not in source: # type: ignore[attr-defined]
continue
value = source if func._convert_from is None else source[func._convert_from] # type: ignore[attr-defined]
try:
self._data[key] = func(self, value)
except Unset:
pass
except Exception as e:
errors.append(f"{key}: {e}")
# Delete all used fields
for func in self._converters.values():
if func._convert_from is None: # type: ignore[attr-defined]
continue
try:
del source[func._convert_from] # type: ignore[attr-defined]
except KeyError:
pass
# Add remaining items to the data
self._data.update(source)
if errors:
raise MetaConvertError(errors, data=self._data, settings=self.settings)
return self._data, self.settings
def make_inline_table(data: Mapping) -> dict:
"""Create an inline table from the given data."""
table = cast(dict, tomlkit.inline_table())
table.update(data)
return table
def make_array(data: list, multiline: bool = False) -> list:
array = cast(list, tomlkit.array().multiline(multiline))
if not data:
return array
array.extend(data)
return array
def array_of_inline_tables(value: list[Mapping], multiline: bool = True) -> list[dict]:
return make_array([make_inline_table(item) for item in value], multiline)
| MetaConverter |
python | neetcode-gh__leetcode | python/0021-merge-two-sorted-lists.py | {
"start": 164,
"end": 624
} | class ____:
def mergeTwoLists(self, list1: ListNode, list2: ListNode) -> ListNode:
dummy = node = ListNode()
while list1 and list2:
if list1.val < list2.val:
node.next = list1
list1 = list1.next
else:
node.next = list2
list2 = list2.next
node = node.next
node.next = list1 or list2
return dummy.next
# Recursive
| Solution |
python | pydantic__pydantic | pydantic-core/tests/serializers/test_model.py | {
"start": 12438,
"end": 47352
} | class ____:
__slots__ = '__dict__', '__pydantic_fields_set__', '__pydantic_extra__', '__pydantic_private__'
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def test_exclude_unset():
s = SchemaSerializer(
core_schema.model_schema(
FieldsSetModel,
core_schema.model_fields_schema(
{
'foo': core_schema.model_field(core_schema.int_schema()),
'bar': core_schema.model_field(core_schema.int_schema()),
'spam': core_schema.model_field(core_schema.int_schema()),
},
extra_behavior='ignore', # this is the default
),
)
)
m = FieldsSetModel(foo=1, bar=2, spam=3, __pydantic_fields_set__={'bar', 'spam'})
assert s.to_python(m) == {'foo': 1, 'bar': 2, 'spam': 3}
assert s.to_python(m, exclude_unset=True) == {'bar': 2, 'spam': 3}
assert s.to_python(m, exclude=None, exclude_unset=True) == {'bar': 2, 'spam': 3}
assert s.to_python(m, exclude={'bar'}, exclude_unset=True) == {'spam': 3}
assert s.to_python(m, exclude={'bar': ...}, exclude_unset=True) == {'spam': 3}
assert s.to_python(m, exclude={'bar': {}}, exclude_unset=True) == {'bar': 2, 'spam': 3}
assert s.to_json(m, exclude=None, exclude_unset=True) == b'{"bar":2,"spam":3}'
assert s.to_json(m, exclude={'bar'}, exclude_unset=True) == b'{"spam":3}'
assert s.to_json(m, exclude={'bar': ...}, exclude_unset=True) == b'{"spam":3}'
assert s.to_json(m, exclude={'bar': {}}, exclude_unset=True) == b'{"bar":2,"spam":3}'
m2 = FieldsSetModel(foo=1, bar=2, spam=3, __pydantic_fields_set__={'bar', 'spam', 'missing'})
assert s.to_python(m2) == {'foo': 1, 'bar': 2, 'spam': 3}
assert s.to_python(m2, exclude_unset=True) == {'bar': 2, 'spam': 3}
@pytest.mark.parametrize(
'exclude,expected',
[
pytest.param(
{'subs': {'__all__': {'subsubs': {'__all__': {'i'}}}}},
{'subs': [{'k': 1, 'subsubs': [{'j': 1}, {'j': 2}]}, {'k': 2, 'subsubs': [{'j': 3}]}]},
id='Normal nested __all__',
),
pytest.param(
{'subs': {'__all__': {'subsubs': {'__all__': {'i'}}}, 0: {'subsubs': {'__all__': {'j'}}}}},
{'subs': [{'k': 1, 'subsubs': [{}, {}]}, {'k': 2, 'subsubs': [{'j': 3}]}]},
id='Merge sub dicts 1',
),
pytest.param(
{'subs': {'__all__': {'subsubs': ...}, 0: {'subsubs': {'__all__': {'j'}}}}},
{'subs': [{'k': 1, 'subsubs': [{'i': 1}, {'i': 2}]}, {'k': 2}]},
# {'subs': [{'k': 1 }, {'k': 2}]}
id='Merge sub sets 2',
),
pytest.param(
{'subs': {'__all__': {'subsubs': {'__all__': {'j'}}}, 0: {'subsubs': ...}}},
{'subs': [{'k': 1}, {'k': 2, 'subsubs': [{'i': 3}]}]},
id='Merge sub sets 3',
),
pytest.param(
{'subs': {'__all__': {'subsubs': {0}}, 0: {'subsubs': {1}}}},
{'subs': [{'k': 1, 'subsubs': []}, {'k': 2, 'subsubs': []}]},
id='Merge sub sets 1',
),
pytest.param(
{'subs': {'__all__': {'subsubs': {0: {'i'}}}, 0: {'subsubs': {1}}}},
{'subs': [{'k': 1, 'subsubs': [{'j': 1}]}, {'k': 2, 'subsubs': [{'j': 3}]}]},
id='Merge sub dict-set',
),
pytest.param({'subs': {'__all__': {'subsubs'}, 0: {'k'}}}, {'subs': [{}, {'k': 2}]}, id='Different keys 1'),
pytest.param(
{'subs': {'__all__': {'subsubs': ...}, 0: {'k'}}}, {'subs': [{}, {'k': 2}]}, id='Different keys 2'
),
pytest.param(
{'subs': {'__all__': {'subsubs'}, 0: {'k': ...}}}, {'subs': [{}, {'k': 2}]}, id='Different keys 3'
),
pytest.param(
{'subs': {'__all__': {'subsubs': {'__all__': {'i'}, 0: {'j'}}}}},
{'subs': [{'k': 1, 'subsubs': [{}, {'j': 2}]}, {'k': 2, 'subsubs': [{}]}]},
id='Nested different keys 1',
),
pytest.param(
{'subs': {'__all__': {'subsubs': {'__all__': {'i': ...}, 0: {'j'}}}}},
{'subs': [{'k': 1, 'subsubs': [{}, {'j': 2}]}, {'k': 2, 'subsubs': [{}]}]},
id='Nested different keys 2',
),
pytest.param(
{'subs': {'__all__': {'subsubs': {'__all__': {'i'}, 0: {'j': ...}}}}},
{'subs': [{'k': 1, 'subsubs': [{}, {'j': 2}]}, {'k': 2, 'subsubs': [{}]}]},
id='Nested different keys 3',
),
pytest.param(
{'subs': {'__all__': {'subsubs'}, 0: {'subsubs': {'__all__': {'j'}}}}},
{'subs': [{'k': 1, 'subsubs': [{'i': 1}, {'i': 2}]}, {'k': 2}]},
id='Ignore __all__ for index with defined exclude 1',
),
pytest.param(
{'subs': {'__all__': {'subsubs': {'__all__': {'j'}}}, 0: ...}},
{'subs': [{'k': 2, 'subsubs': [{'i': 3}]}]},
id='Ignore __all__ for index with defined exclude 2',
),
pytest.param(
{'subs': {'__all__': ..., 0: {'subsubs'}}},
{'subs': [{'k': 1}]},
id='Ignore __all__ for index with defined exclude 3',
),
],
)
def test_advanced_exclude_nested_lists(exclude, expected):
"""
Taken from pydantic and modified to generate the schema directly.
"""
# class SubSubModel(BaseModel):
# i: int
# j: int
sub_sub_model_schema = core_schema.model_schema(
type('SubSubModel', (), {}),
core_schema.model_fields_schema(
dict(
i=core_schema.model_field(core_schema.int_schema()), j=core_schema.model_field(core_schema.int_schema())
)
),
)
# class SubModel(BaseModel):
# k: int
# subsubs: list[SubSubModel]
sub_model_schema = core_schema.model_schema(
type('SubModel', (), {}),
core_schema.model_fields_schema(
dict(
k=core_schema.model_field(core_schema.int_schema()),
subsubs=core_schema.model_field(core_schema.list_schema(sub_sub_model_schema)),
)
),
)
# class Model(BaseModel):
# subs: list[SubModel]
model_schema = core_schema.model_schema(
BasicModel,
core_schema.model_fields_schema(dict(subs=core_schema.model_field(core_schema.list_schema(sub_model_schema)))),
)
v = SchemaValidator(model_schema)
data = v.validate_python(
dict(subs=[dict(k=1, subsubs=[dict(i=1, j=1), dict(i=2, j=2)]), dict(k=2, subsubs=[dict(i=3, j=3)])])
)
s = SchemaSerializer(model_schema)
assert s.to_python(data, exclude=exclude) == expected
def test_function_plain_field_serializer_to_python():
@dataclasses.dataclass
class Model:
x: int
def ser_x(self, v: Any, _) -> str:
assert self.x == 1_000
return f'{v:_}'
s = SchemaSerializer(
core_schema.model_schema(
Model,
core_schema.model_fields_schema(
{
'x': core_schema.model_field(
core_schema.int_schema(
serialization=core_schema.plain_serializer_function_ser_schema(
Model.ser_x, is_field_serializer=True, info_arg=True
)
)
)
}
),
)
)
assert s.to_python(Model(x=1000)) == {'x': '1_000'}
def test_field_serializer_cached_property():
@dataclasses.dataclass
class Model:
x: int
y: int
@cached_property
def x_formatted(self) -> str:
return f'{self.x:_}'
# This is a @computed_field
@cached_property
def y_formatted(self) -> str:
return f'{self.y:_}'
def ser_x(self, v: Any, _) -> str:
assert self.x == 1_000 == v
return self.x_formatted
def ser_y(self, v: Any, _) -> str:
assert self.y == 2_000 == v
return self.y_formatted
s = SchemaSerializer(
core_schema.model_schema(
Model,
core_schema.model_fields_schema(
{
'x': core_schema.model_field(
core_schema.int_schema(
serialization=core_schema.plain_serializer_function_ser_schema(
Model.ser_x, is_field_serializer=True, info_arg=True
)
)
),
'y': core_schema.model_field(
core_schema.int_schema(
serialization=core_schema.plain_serializer_function_ser_schema(
Model.ser_y, is_field_serializer=True, info_arg=True
)
)
),
},
computed_fields=[core_schema.computed_field('y_formatted', core_schema.str_schema())],
),
)
)
assert s.to_python(Model(x=1000, y=2000)) == {'x': '1_000', 'y': '2_000', 'y_formatted': '2_000'}
assert s.to_json(Model(x=1000, y=2000)) == b'{"x":"1_000","y":"2_000","y_formatted":"2_000"}'
def test_function_wrap_field_serializer_to_python():
@dataclasses.dataclass
class Model:
x: int
def ser_x(self, v: Any, serializer: core_schema.SerializerFunctionWrapHandler, _) -> str:
x = serializer(v)
assert self.x == 1_000
return f'{x:_}'
s = SchemaSerializer(
core_schema.model_schema(
Model,
core_schema.model_fields_schema(
{
'x': core_schema.model_field(
core_schema.int_schema(
serialization=core_schema.wrap_serializer_function_ser_schema(
Model.ser_x, is_field_serializer=True, info_arg=True, schema=core_schema.any_schema()
)
)
)
}
),
)
)
assert s.to_python(Model(x=1000)) == {'x': '1_000'}
def test_function_plain_field_serializer_to_json():
@dataclasses.dataclass
class Model:
x: int
def ser_x(self, v: Any, _) -> str:
assert self.x == 1_000
return f'{v:_}'
s = SchemaSerializer(
core_schema.model_schema(
Model,
core_schema.model_fields_schema(
{
'x': core_schema.model_field(
core_schema.int_schema(
serialization=core_schema.plain_serializer_function_ser_schema(
Model.ser_x, is_field_serializer=True, info_arg=True
)
)
)
}
),
)
)
assert json.loads(s.to_json(Model(x=1000))) == {'x': '1_000'}
def test_function_plain_field_serializer_with_computed_field():
@dataclasses.dataclass
class Model:
x: int
@property
def computed_field_x(self) -> int:
return self.x + 200
def ser_func(self, v: Any, info: core_schema.FieldSerializationInfo) -> str:
return info.field_name + '_' + str(v * 2)
field_str_with_field_serializer = core_schema.str_schema(
serialization=core_schema.plain_serializer_function_ser_schema(
Model.ser_func,
is_field_serializer=True,
info_arg=True,
return_schema=core_schema.any_schema(),
)
)
s = SchemaSerializer(
core_schema.model_schema(
Model,
core_schema.model_fields_schema(
{'x': core_schema.model_field(field_str_with_field_serializer)},
computed_fields=[
core_schema.computed_field('computed_field_x', field_str_with_field_serializer),
],
),
)
)
assert json.loads(s.to_json(Model(x=1000))) == {'x': 'x_2000', 'computed_field_x': 'computed_field_x_2400'}
assert s.to_python(Model(x=2000)) == {'x': 'x_4000', 'computed_field_x': 'computed_field_x_4400'}
def test_function_wrap_field_serializer_to_json():
@dataclasses.dataclass
class Model:
x: int
def ser_x(self, v: Any, serializer: core_schema.SerializerFunctionWrapHandler, _) -> str:
assert self.x == 1_000
x = serializer(v)
return f'{x:_}'
s = SchemaSerializer(
core_schema.model_schema(
Model,
core_schema.model_fields_schema(
{
'x': core_schema.model_field(
core_schema.int_schema(
serialization=core_schema.wrap_serializer_function_ser_schema(
Model.ser_x, is_field_serializer=True, info_arg=True, schema=core_schema.any_schema()
)
)
)
}
),
)
)
assert json.loads(s.to_json(Model(x=1000))) == {'x': '1_000'}
def test_property():
@dataclasses.dataclass
class Model:
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
@property
def area(self) -> bytes:
a = self.width * self.height
return b'%d' % a
s = SchemaSerializer(
core_schema.model_schema(
Model,
core_schema.model_fields_schema(
{
'width': core_schema.model_field(core_schema.int_schema()),
'height': core_schema.model_field(core_schema.int_schema()),
},
computed_fields=[core_schema.computed_field('area', core_schema.bytes_schema())],
),
)
)
assert s.to_python(Model(width=3, height=4)) == {'width': 3, 'height': 4, 'area': b'12'}
assert s.to_python(Model(width=3, height=4), mode='json') == {'width': 3, 'height': 4, 'area': '12'}
assert s.to_json(Model(width=3, height=4)) == b'{"width":3,"height":4,"area":"12"}'
assert s.to_python(Model(width=3, height=4), round_trip=True) == {'width': 3, 'height': 4}
assert s.to_json(Model(width=3, height=4), round_trip=True) == b'{"width":3,"height":4}'
assert s.to_python(Model(width=3, height=4), exclude_computed_fields=True) == {'width': 3, 'height': 4}
assert s.to_json(Model(width=3, height=4), exclude_computed_fields=True) == b'{"width":3,"height":4}'
def test_property_alias():
@dataclasses.dataclass
class Model:
width: int
height: int
@property
def area(self) -> int:
return self.width * self.height
@property
def volume(self) -> int:
return self.area * self.height
s = SchemaSerializer(
core_schema.model_schema(
Model,
core_schema.model_fields_schema(
{
'width': core_schema.model_field(core_schema.int_schema()),
'height': core_schema.model_field(core_schema.int_schema()),
},
computed_fields=[
core_schema.computed_field('area', core_schema.int_schema(), alias='Area'),
core_schema.computed_field('volume', core_schema.int_schema()),
],
),
)
)
assert s.to_python(Model(3, 4), by_alias=True) == {'width': 3, 'height': 4, 'Area': 12, 'volume': 48}
assert s.to_python(Model(3, 4), mode='json', by_alias=True) == {'width': 3, 'height': 4, 'Area': 12, 'volume': 48}
assert s.to_json(Model(3, 4), by_alias=True) == b'{"width":3,"height":4,"Area":12,"volume":48}'
def test_computed_field_without_fields() -> None:
"""https://github.com/pydantic/pydantic/issues/5551"""
# Original test introduced in https://github.com/pydantic/pydantic-core/pull/550
class A:
@property
def b(self) -> str:
return 'b'
schema = core_schema.model_schema(
cls=A,
config={},
schema=core_schema.model_fields_schema(
fields={},
computed_fields=[
core_schema.computed_field('b', return_schema=core_schema.any_schema()),
],
),
)
a = A()
serializer = SchemaSerializer(schema)
assert serializer.to_json(a) == b'{"b":"b"}'
def test_computed_field_exclude_none():
@dataclasses.dataclass
class Model:
width: int
height: int
@property
def area(self) -> int:
return self.width * self.height
@property
def volume(self) -> None:
return None
s = SchemaSerializer(
core_schema.model_schema(
Model,
core_schema.model_fields_schema(
{
'width': core_schema.model_field(core_schema.int_schema()),
'height': core_schema.model_field(core_schema.int_schema()),
},
computed_fields=[
core_schema.computed_field('area', core_schema.int_schema(), alias='Area'),
core_schema.computed_field('volume', core_schema.int_schema()),
],
),
)
)
assert s.to_python(Model(3, 4), exclude_none=False, by_alias=True) == {
'width': 3,
'height': 4,
'Area': 12,
'volume': None,
}
assert s.to_python(Model(3, 4), exclude_none=True, by_alias=True) == {'width': 3, 'height': 4, 'Area': 12}
assert s.to_python(Model(3, 4), mode='json', exclude_none=False, by_alias=True) == {
'width': 3,
'height': 4,
'Area': 12,
'volume': None,
}
assert s.to_python(Model(3, 4), mode='json', exclude_none=True, by_alias=True) == {
'width': 3,
'height': 4,
'Area': 12,
}
assert (
s.to_json(Model(3, 4), exclude_none=False, by_alias=True) == b'{"width":3,"height":4,"Area":12,"volume":null}'
)
assert s.to_json(Model(3, 4), exclude_none=True, by_alias=True) == b'{"width":3,"height":4,"Area":12}'
def test_computed_field_exclude_none_different_order():
# verify that order of computed fields doesn't matter
# issue originally reported via: https://github.com/pydantic/pydantic/issues/8691
@dataclasses.dataclass
class Model:
width: int
height: int
@property
def volume(self) -> None:
return None
@property
def area(self) -> int:
return self.width * self.height
s = SchemaSerializer(
core_schema.model_schema(
Model,
core_schema.model_fields_schema(
{
'width': core_schema.model_field(core_schema.int_schema()),
'height': core_schema.model_field(core_schema.int_schema()),
},
computed_fields=[
core_schema.computed_field('volume', core_schema.int_schema()),
core_schema.computed_field('area', core_schema.int_schema(), alias='Area'),
],
),
)
)
assert s.to_python(Model(3, 4), by_alias=True, exclude_none=False) == {
'width': 3,
'height': 4,
'Area': 12,
'volume': None,
}
assert s.to_python(Model(3, 4), by_alias=True, exclude_none=True) == {'width': 3, 'height': 4, 'Area': 12}
assert s.to_python(Model(3, 4), by_alias=True, mode='json', exclude_none=False) == {
'width': 3,
'height': 4,
'Area': 12,
'volume': None,
}
assert s.to_python(Model(3, 4), mode='json', by_alias=True, exclude_none=True) == {
'width': 3,
'height': 4,
'Area': 12,
}
assert (
s.to_json(Model(3, 4), exclude_none=False, by_alias=True) == b'{"width":3,"height":4,"volume":null,"Area":12}'
)
assert s.to_json(Model(3, 4), exclude_none=True, by_alias=True) == b'{"width":3,"height":4,"Area":12}'
def test_cached_property_alias():
@dataclasses.dataclass
class Model:
width: int
height: int
@cached_property
def area(self) -> int:
return self.width * self.height
s = SchemaSerializer(
core_schema.model_schema(
Model,
core_schema.model_fields_schema(
{
'width': core_schema.model_field(core_schema.int_schema()),
'height': core_schema.model_field(core_schema.int_schema()),
},
computed_fields=[core_schema.computed_field('area', core_schema.int_schema())],
),
)
)
assert s.to_python(Model(3, 4)) == {'width': 3, 'height': 4, 'area': 12}
assert s.to_python(Model(3, 4), mode='json') == {'width': 3, 'height': 4, 'area': 12}
assert s.to_json(Model(3, 4)) == b'{"width":3,"height":4,"area":12}'
def test_property_attribute_error():
@dataclasses.dataclass
class Model:
width: int
s = SchemaSerializer(
core_schema.model_schema(
Model,
core_schema.model_fields_schema(
{'width': core_schema.model_field(core_schema.int_schema())},
computed_fields=[core_schema.computed_field('area', core_schema.bytes_schema())],
),
)
)
with pytest.raises(AttributeError, match="^'Model' object has no attribute 'area'$"):
s.to_python(Model(3))
with pytest.raises(AttributeError, match="^'Model' object has no attribute 'area'$"):
s.to_python(Model(3), mode='json')
e = "^Error serializing to JSON: AttributeError: 'Model' object has no attribute 'area'$"
with pytest.raises(PydanticSerializationError, match=e):
s.to_json(Model(3))
def test_property_other_error():
@dataclasses.dataclass
class Model:
width: int
@property
def area(self) -> int:
raise ValueError('xxx')
s = SchemaSerializer(
core_schema.model_schema(
Model,
core_schema.model_fields_schema(
{'width': core_schema.model_field(core_schema.int_schema())},
computed_fields=[core_schema.computed_field('area', core_schema.bytes_schema())],
),
)
)
with pytest.raises(ValueError, match='^xxx$'):
s.to_python(Model(3))
with pytest.raises(ValueError, match='^xxx$'):
s.to_python(Model(3), mode='json')
e = '^Error serializing to JSON: ValueError: xxx$'
with pytest.raises(PydanticSerializationError, match=e):
s.to_json(Model(3))
def test_property_include_exclude():
@dataclasses.dataclass
class Model:
a: int
@property
def b(self):
return [1, 2, b'3']
s = SchemaSerializer(
core_schema.model_schema(
Model,
core_schema.model_fields_schema(
{'a': core_schema.model_field(core_schema.int_schema())},
computed_fields=[core_schema.computed_field('b', core_schema.list_schema())],
),
)
)
assert s.to_python(Model(1)) == {'a': 1, 'b': [1, 2, b'3']}
assert s.to_python(Model(1), exclude={'b'}) == {'a': 1}
assert s.to_python(Model(1), include={'a'}) == {'a': 1}
assert s.to_python(Model(1), exclude={'b': [0]}) == {'a': 1, 'b': [2, b'3']}
assert s.to_python(Model(1), mode='json') == {'a': 1, 'b': [1, 2, '3']}
assert s.to_python(Model(1), mode='json', exclude={'b'}) == {'a': 1}
assert s.to_python(Model(1), mode='json', include={'a'}) == {'a': 1}
assert s.to_python(Model(1), mode='json', exclude={'b': [0]}) == {'a': 1, 'b': [2, '3']}
assert s.to_json(Model(1)) == b'{"a":1,"b":[1,2,"3"]}'
assert s.to_json(Model(1), exclude={'b'}) == b'{"a":1}'
assert s.to_json(Model(1), include={'a'}) == b'{"a":1}'
assert s.to_json(Model(1), exclude={'b': [0]}) == b'{"a":1,"b":[2,"3"]}'
def test_property_setter():
class Square:
side: float
def __init__(self, **kwargs):
self.__dict__ = kwargs
@property
def area(self) -> float:
return self.side**2
@area.setter
def area(self, area: float) -> None:
self.side = area**0.5
@area.deleter
def area(self) -> None:
self.side = 0.0
@cached_property
def random_n(self) -> int:
return randint(0, 1_000)
s = SchemaSerializer(
core_schema.model_schema(
Square,
core_schema.model_fields_schema(
{'side': core_schema.model_field(core_schema.float_schema())},
computed_fields=[
core_schema.computed_field('area', core_schema.float_schema()),
core_schema.computed_field('random_n', core_schema.int_schema(), alias='The random number'),
],
),
)
)
sq = Square(side=10.0)
the_random_n = sq.random_n
assert s.to_python(sq, by_alias=True) == {'side': 10.0, 'area': 100.0, 'The random number': the_random_n}
assert s.to_json(sq, by_alias=True) == b'{"side":10.0,"area":100.0,"The random number":%d}' % the_random_n
sq.area = 49.0
assert s.to_python(sq, by_alias=False) == {'side': 7, 'area': 49, 'random_n': the_random_n}
assert s.to_json(sq, by_alias=False) == b'{"side":7.0,"area":49.0,"random_n":%d}' % the_random_n
del sq.area
assert s.to_python(sq, by_alias=False) == {'side': 0, 'area': 0, 'random_n': the_random_n}
assert s.to_python(sq, exclude={'random_n'}) == {'side': 0, 'area': 0}
def test_extra():
class MyModel:
# this is not required, but it avoids `__pydantic_fields_set__` being included in `__dict__`
__slots__ = '__dict__', '__pydantic_fields_set__', '__pydantic_extra__', '__pydantic_private__'
field_a: str
field_b: int
schema = core_schema.model_schema(
MyModel,
core_schema.model_fields_schema(
{
'field_a': core_schema.model_field(core_schema.bytes_schema()),
'field_b': core_schema.model_field(core_schema.int_schema()),
},
extra_behavior='allow',
),
extra_behavior='allow',
)
v = SchemaValidator(schema)
m = v.validate_python({'field_a': b'test', 'field_b': 12, 'field_c': 'extra'})
assert isinstance(m, MyModel)
assert m.__dict__ == {'field_a': b'test', 'field_b': 12}
assert m.__pydantic_extra__ == {'field_c': 'extra'}
assert m.__pydantic_fields_set__ == {'field_a', 'field_b', 'field_c'}
s = SchemaSerializer(schema)
assert 'mode:ModelExtra' in plain_repr(s)
assert 'has_extra:true' in plain_repr(s)
assert s.to_python(m) == {'field_a': b'test', 'field_b': 12, 'field_c': 'extra'}
assert s.to_python(m, mode='json') == {'field_a': 'test', 'field_b': 12, 'field_c': 'extra'}
assert s.to_json(m) == b'{"field_a":"test","field_b":12,"field_c":"extra"}'
# test filtering
m = v.validate_python({'field_a': b'test', 'field_b': 12, 'field_c': None, 'field_d': [1, 2, 3]})
assert isinstance(m, MyModel)
assert m.__dict__ == {'field_a': b'test', 'field_b': 12}
assert m.__pydantic_extra__ == {'field_c': None, 'field_d': [1, 2, 3]}
assert m.__pydantic_fields_set__ == {'field_a', 'field_b', 'field_c', 'field_d'}
assert s.to_python(m) == {'field_a': b'test', 'field_b': 12, 'field_c': None, 'field_d': [1, 2, 3]}
assert s.to_json(m) == b'{"field_a":"test","field_b":12,"field_c":null,"field_d":[1,2,3]}'
assert s.to_python(m, exclude_none=True) == {'field_a': b'test', 'field_b': 12, 'field_d': [1, 2, 3]}
assert s.to_json(m, exclude_none=True) == b'{"field_a":"test","field_b":12,"field_d":[1,2,3]}'
assert s.to_python(m, exclude={'field_c'}) == {'field_a': b'test', 'field_b': 12, 'field_d': [1, 2, 3]}
assert s.to_json(m, exclude={'field_c'}) == b'{"field_a":"test","field_b":12,"field_d":[1,2,3]}'
assert s.to_python(m, exclude={'field_d': [0]}) == {
'field_a': b'test',
'field_b': 12,
'field_c': None,
'field_d': [2, 3],
}
assert s.to_json(m, exclude={'field_d': [0]}) == b'{"field_a":"test","field_b":12,"field_c":null,"field_d":[2,3]}'
def test_extra_config():
class MyModel:
# this is not required, but it avoids `__pydantic_fields_set__` being included in `__dict__`
__slots__ = '__dict__', '__pydantic_fields_set__', '__pydantic_extra__', '__pydantic_private__'
field_a: str
field_b: int
schema = core_schema.model_schema(
MyModel,
core_schema.model_fields_schema(
{
'field_a': core_schema.model_field(core_schema.bytes_schema()),
'field_b': core_schema.model_field(core_schema.int_schema()),
}
),
config=core_schema.CoreConfig(extra_fields_behavior='allow'),
)
s = SchemaSerializer(schema)
assert 'mode:ModelExtra' in plain_repr(s)
assert 'has_extra:true' in plain_repr(s)
def test_extra_config_nested_model():
class OuterModel:
pass
class InnerModel:
pass
schema = core_schema.model_schema(
OuterModel,
core_schema.model_fields_schema(
{
'sub_model': core_schema.model_field(
core_schema.model_schema(
InnerModel,
core_schema.model_fields_schema({'int': core_schema.model_field(core_schema.int_schema())}),
config=core_schema.CoreConfig(extra_fields_behavior='allow'),
)
)
}
),
config={},
)
s = SchemaSerializer(schema)
# debug(s)
s_repr = plain_repr(s)
assert 'has_extra:true,root_model:false,name:"InnerModel"' in s_repr
assert 'has_extra:false,root_model:false,name:"OuterModel"' in s_repr
def test_extra_custom_serializer():
class Model:
__slots__ = ('__pydantic_extra__', '__dict__')
__pydantic_extra__: dict[str, Any]
schema = core_schema.model_schema(
Model,
core_schema.model_fields_schema(
{},
extra_behavior='allow',
extras_schema=core_schema.any_schema(
serialization=core_schema.plain_serializer_function_ser_schema(lambda v: v + ' bam!')
),
),
extra_behavior='allow',
)
s = SchemaSerializer(schema)
m = Model()
m.__pydantic_extra__ = {'extra': 'extra'}
assert s.to_python(m) == {'extra': 'extra bam!'}
def test_no_warn_on_exclude() -> None:
with warnings.catch_warnings():
warnings.simplefilter('error')
s = SchemaSerializer(
core_schema.model_schema(
BasicModel,
core_schema.model_fields_schema(
{
'a': core_schema.model_field(core_schema.int_schema()),
'b': core_schema.model_field(core_schema.int_schema()),
}
),
)
)
value = BasicModel(a=0, b=1)
assert s.to_python(value, exclude={'b'}) == {'a': 0}
assert s.to_python(value, mode='json', exclude={'b'}) == {'a': 0}
def test_warn_on_missing_field() -> None:
class AModel(BasicModel): ...
class BModel(BasicModel): ...
s = SchemaSerializer(
core_schema.model_schema(
BasicModel,
core_schema.model_fields_schema(
{
'root': core_schema.model_field(
core_schema.tagged_union_schema(
choices={
'a': core_schema.model_schema(
AModel,
core_schema.model_fields_schema(
{
'type': core_schema.model_field(core_schema.literal_schema(['a'])),
'a': core_schema.model_field(core_schema.int_schema()),
}
),
),
'b': core_schema.model_schema(
BModel,
core_schema.model_fields_schema(
{
'type': core_schema.model_field(core_schema.literal_schema(['b'])),
'b': core_schema.model_field(core_schema.int_schema()),
}
),
),
},
discriminator='type',
)
),
}
),
)
)
with pytest.warns(
UserWarning, match='Expected 2 fields but got 1: Expected `AModel` - serialized value may not be as expected .+'
):
value = BasicModel(root=AModel(type='a'))
s.to_python(value)
@pytest.mark.parametrize(
'config,runtime,expected',
[
(True, True, {'my_alias': 1}),
(True, False, {'my_field': 1}),
(True, None, {'my_alias': 1}),
(False, True, {'my_alias': 1}),
(False, False, {'my_field': 1}),
(False, None, {'my_field': 1}),
(None, True, {'my_alias': 1}),
(None, False, {'my_field': 1}),
(None, None, {'my_field': 1}),
],
)
def test_by_alias_and_name_config_interaction(config, runtime, expected) -> None:
"""This test reflects the priority that applies for config vs runtime serialization alias configuration.
If the runtime value (by_alias) is set, that value is used.
If the runtime value is unset, the config value (serialize_by_alias) is used.
If neither are set, the default, False, is used.
"""
class Model:
def __init__(self, my_field: int) -> None:
self.my_field = my_field
schema = core_schema.model_schema(
Model,
core_schema.model_fields_schema(
{
'my_field': core_schema.model_field(core_schema.int_schema(), serialization_alias='my_alias'),
}
),
config=core_schema.CoreConfig(serialize_by_alias=config or False),
)
s = SchemaSerializer(schema)
assert s.to_python(Model(1), by_alias=runtime) == expected
| FieldsSetModel |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.