language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | jmcnamara__XlsxWriter | xlsxwriter/test/worksheet/test_cond_format11.py | {
"start": 375,
"end": 3051
} | class ____(unittest.TestCase):
"""
Test assembling a complete Worksheet file.
"""
def test_assemble_xml_file(self):
"""Test writing a worksheet with conditional formatting."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
worksheet.select()
worksheet.write("A1", 10)
worksheet.write("A2", 20)
worksheet.write("A3", 30)
worksheet.write("A4", 40)
worksheet.conditional_format(
"A1:A4",
{
"type": "date",
"criteria": "between",
"minimum": datetime.strptime("2011-01-01", "%Y-%m-%d"),
"maximum": datetime.strptime("2011-12-31", "%Y-%m-%d"),
"format": None,
},
)
worksheet._assemble_xml_file()
exp = _xml_to_list(
"""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<dimension ref="A1:A4"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15"/>
<sheetData>
<row r="1" spans="1:1">
<c r="A1">
<v>10</v>
</c>
</row>
<row r="2" spans="1:1">
<c r="A2">
<v>20</v>
</c>
</row>
<row r="3" spans="1:1">
<c r="A3">
<v>30</v>
</c>
</row>
<row r="4" spans="1:1">
<c r="A4">
<v>40</v>
</c>
</row>
</sheetData>
<conditionalFormatting sqref="A1:A4">
<cfRule type="cellIs" priority="1" operator="between">
<formula>40544</formula>
<formula>40908</formula>
</cfRule>
</conditionalFormatting>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
</worksheet>
"""
)
got = _xml_to_list(fh.getvalue())
self.assertEqual(exp, got)
| TestAssembleWorksheet |
python | django-extensions__django-extensions | django_extensions/management/commands/compile_pyc.py | {
"start": 262,
"end": 1322
} | class ____(BaseCommand):
help = "Compile python bytecode files for the project."
requires_system_checks: List[str] = []
def add_arguments(self, parser):
parser.add_argument(
"--path",
"-p",
action="store",
dest="path",
help="Specify path to recurse into",
)
@signalcommand
def handle(self, *args, **options):
project_root = options["path"]
if not project_root:
project_root = getattr(settings, "BASE_DIR", None)
verbosity = options["verbosity"]
if not project_root:
raise CommandError(
"No --path specified and settings.py does not contain BASE_DIR"
)
for root, dirs, filenames in os.walk(project_root):
for filename in fnmatch.filter(filenames, "*.py"):
full_path = _j(root, filename)
if verbosity > 1:
self.stdout.write("Compiling %s...\n" % full_path)
py_compile.compile(full_path)
| Command |
python | huggingface__transformers | src/transformers/models/qwen3_next/modular_qwen3_next.py | {
"start": 27549,
"end": 27615
} | class ____(Qwen2MoeSparseMoeBlock):
pass
| Qwen3NextSparseMoeBlock |
python | great-expectations__great_expectations | great_expectations/expectations/core/expect_column_values_to_be_increasing.py | {
"start": 1091,
"end": 7569
} | class ____(ColumnMapExpectation):
"""Expect the column values to be increasing.
By default, this expectation only works for numeric or datetime data.
If 'strictly=True', then this expectation is only satisfied if each consecutive value \
is strictly increasing--equal values are treated as failures.
ExpectColumnValuesToBeIncreasing is a \
Column Map Expectation.
Args:
column (str): \
The column name.
Keyword Args:
strictly (Boolean or None): \
If True, values must be strictly greater than previous values
mostly (None or a float between 0 and 1): \
Successful if at least mostly fraction of values match the expectation. \
For more detail, see [mostly](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#mostly).
Other Parameters:
result_format (str or None): \
Which output mode to use: BOOLEAN_ONLY, BASIC, COMPLETE, or SUMMARY. \
For more detail, see [result_format](https://docs.greatexpectations.io/docs/reference/expectations/result_format).
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see [catch_exceptions](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#catch_exceptions).
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without
modification. For more detail, see [meta](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#meta).
severity (str or None): \
{FAILURE_SEVERITY_DESCRIPTION} \
For more detail, see [failure severity](https://docs.greatexpectations.io/docs/cloud/expectations/expectations_overview/#failure-severity).
Returns:
An [ExpectationSuiteValidationResult](https://docs.greatexpectations.io/docs/terms/validation_result)
Exact fields vary depending on the values passed to result_format, catch_exceptions, and meta.
See Also:
[ExpectColumnValuesToBeDecreasing](https://greatexpectations.io/expectations/expect_column_values_to_be_decreasing)
""" # noqa: E501 # FIXME CoP
strictly: Union[bool, SuiteParameterDict, None] = None
# This dictionary contains metadata for display in the public gallery
library_metadata = {
"maturity": "production",
"tags": ["core expectation", "column map expectation"],
"contributors": ["@great_expectations"],
"requirements": [],
"has_full_test_suite": True,
"manually_reviewed_code": True,
}
map_metric = "column_values.increasing"
success_keys = ("strictly", "mostly")
args_keys = ("column",)
@classmethod
def _prescriptive_template(
cls,
renderer_configuration: RendererConfiguration,
) -> RendererConfiguration:
add_param_args: AddParamArgs = (
("column", RendererValueType.STRING),
("strictly", RendererValueType.BOOLEAN),
("mostly", RendererValueType.NUMBER),
)
for name, param_type in add_param_args:
renderer_configuration.add_param(name=name, param_type=param_type)
params = renderer_configuration.params
if params.strictly:
template_str = "values must be strictly greater than previous values"
else:
template_str = "values must be greater than or equal to previous values"
if params.mostly and params.mostly.value < 1.0:
renderer_configuration = cls._add_mostly_pct_param(
renderer_configuration=renderer_configuration
)
template_str += ", at least $mostly_pct % of the time."
else:
template_str += "."
if renderer_configuration.include_column_name:
template_str = f"$column {template_str}"
renderer_configuration.template_str = template_str
return renderer_configuration
@classmethod
@renderer(renderer_type=LegacyRendererType.PRESCRIPTIVE)
@render_suite_parameter_string
def _prescriptive_renderer(
cls,
configuration: Optional[ExpectationConfiguration] = None,
result: Optional[ExpectationValidationResult] = None,
runtime_configuration: Optional[dict] = None,
**kwargs,
):
runtime_configuration = runtime_configuration or {}
include_column_name = runtime_configuration.get("include_column_name") is not False
styling = runtime_configuration.get("styling")
params = substitute_none_for_missing(
configuration.kwargs,
[
"column",
"strictly",
"mostly",
"row_condition",
"condition_parser",
],
)
if params.get("strictly"):
template_str = "values must be strictly greater than previous values"
else:
template_str = "values must be greater than or equal to previous values"
if params["mostly"] is not None:
if isinstance(params["mostly"], (int, float)) and params["mostly"] < 1.0:
params["mostly_pct"] = num_to_str(params["mostly"] * 100, no_scientific=True)
# params["mostly_pct"] = "{:.14f}".format(params["mostly"]*100).rstrip("0").rstrip(".") # noqa: E501 # FIXME CoP
template_str += ", at least $mostly_pct % of the time."
else:
template_str += "."
if include_column_name:
template_str = f"$column {template_str}"
if params["row_condition"] is not None:
conditional_template_str = parse_row_condition_string(params["row_condition"])
template_str, styling = _style_row_condition(
conditional_template_str,
template_str,
params,
styling,
)
return [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": template_str,
"params": params,
"styling": styling,
},
}
)
]
| ExpectColumnValuesToBeIncreasing |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pep8_naming/N805.py | {
"start": 1422,
"end": 1728
} | class ____:
def self_as_argument(this, self):
pass
def self_as_pos_only_argument(this, self, /):
pass
def self_as_kw_only_argument(this, *, self):
pass
def self_as_varags(this, *self):
pass
def self_as_kwargs(this, **self):
pass
| SelfInArgsClass |
python | google__pytype | pytype/abstract/mixin_test.py | {
"start": 112,
"end": 754
} | class ____(unittest.TestCase):
def test_mixin_super(self):
"""Test the imitation 'super' method on MixinMeta."""
# pylint: disable=g-wrong-blank-lines,undefined-variable
class A:
def f(self, x):
return x
class MyMixin(metaclass=mixin.MixinMeta):
overloads = ("f",)
def f(self, x):
if x == 0:
return "hello"
return MyMixin.super(self.f)(x)
class B(A, MyMixin):
pass
# pylint: enable=g-wrong-blank-lines,undefined-variable
b = B()
v_mixin = b.f(0)
v_a = b.f(1)
self.assertEqual(v_mixin, "hello")
self.assertEqual(v_a, 1)
| MixinMetaTest |
python | django__django | tests/many_to_one/models.py | {
"start": 1379,
"end": 1443
} | class ____(models.Model):
second = models.IntegerField()
| First |
python | sqlalchemy__sqlalchemy | test/sql/test_operators.py | {
"start": 161028,
"end": 161123
} | class ____(enum.Enum):
ONE = enum.auto()
TWO = enum.auto()
THREE = enum.auto()
| MyEnum |
python | getsentry__sentry | src/sentry/sentry_metrics/querying/data/preparation/base.py | {
"start": 345,
"end": 1134
} | class ____:
"""
Represents an intermediate query that can be run through a series of immutable transformations.
Attributes:
metrics_query: The query that needs to be run.
order: The order of the groups that are returned.
limit: The maximum number of groups to return.
unit_family: The UnitFamily of the query.
unit: The unit of the query.
scaling_factor: The scaling factor that was applied on the query to normalize it to unit.
"""
metrics_query: MetricsQuery
order: QueryOrder | None = None
limit: int | None = None
unit_family: UnitFamily | None = None
unit: MeasurementUnit | None = None
scaling_factor: float | None = None
mappers: list[Mapper] = field(default_factory=list)
| IntermediateQuery |
python | huggingface__transformers | tests/utils/import_structures/import_structure_raw_register.py | {
"start": 799,
"end": 937
} | class ____:
def __init__(self):
pass
@requires(backends=("torch",))
def a1():
pass
@requires(
backends=("torch",)
)
| A1 |
python | dagster-io__dagster | python_modules/libraries/dagster-fivetran/dagster_fivetran/translator.py | {
"start": 1626,
"end": 4539
} | class ____:
"""Represents a Fivetran connector, based on data as returned from the API."""
id: str
name: str
service: str
group_id: str
setup_state: str
sync_state: str
paused: bool
succeeded_at: Optional[str]
failed_at: Optional[str]
@property
def url(self) -> str:
return f"https://fivetran.com/dashboard/connectors/{self.id}"
@property
def destination_id(self) -> str:
return self.group_id
@property
def is_connected(self) -> bool:
return self.setup_state == FivetranConnectorSetupStateType.CONNECTED.value
@property
def is_paused(self) -> bool:
return self.paused
@property
def last_sync_completed_at(self) -> datetime:
"""Gets the datetime of the last completed sync of the Fivetran connector.
Returns:
datetime.datetime:
The datetime of the last completed sync of the Fivetran connector.
"""
succeeded_at = parser.parse(self.succeeded_at or MIN_TIME_STR)
failed_at = parser.parse(self.failed_at or MIN_TIME_STR)
return max(succeeded_at, failed_at) # pyright: ignore[reportReturnType]
@property
def is_last_sync_successful(self) -> bool:
"""Gets a boolean representing whether the last completed sync of the Fivetran connector was successful or not.
Returns:
bool:
Whether the last completed sync of the Fivetran connector was successful or not.
"""
succeeded_at = parser.parse(self.succeeded_at or MIN_TIME_STR)
failed_at = parser.parse(self.failed_at or MIN_TIME_STR)
return succeeded_at > failed_at # pyright: ignore[reportOperatorIssue]
def validate_syncable(self) -> bool:
"""Confirms that the connector can be sync. Will raise a Failure in the event that
the connector is either paused or not fully set up.
"""
if self.is_paused:
raise Failure(f"Connector '{self.id}' cannot be synced as it is currently paused.")
if not self.is_connected:
raise Failure(f"Connector '{self.id}' cannot be synced as it has not been setup")
return True
@classmethod
def from_connector_details(
cls,
connector_details: Mapping[str, Any],
) -> "FivetranConnector":
return cls(
id=connector_details["id"],
name=connector_details["schema"],
service=connector_details["service"],
group_id=connector_details["group_id"],
setup_state=connector_details["status"]["setup_state"],
sync_state=connector_details["status"]["sync_state"],
paused=connector_details["paused"],
succeeded_at=connector_details.get("succeeded_at"),
failed_at=connector_details.get("failed_at"),
)
@whitelist_for_serdes
@record
| FivetranConnector |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 80853,
"end": 81042
} | class ____(BaseModel):
"""
Something wrong happened with optimizers
"""
error: str = Field(..., description="Something wrong happened with optimizers")
| OptimizersStatusOneOf1 |
python | plotly__plotly.py | plotly/graph_objs/contourcarpet/contours/_labelfont.py | {
"start": 233,
"end": 10091
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "contourcarpet.contours"
_path_str = "contourcarpet.contours.labelfont"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Labelfont object
Sets the font used for labeling the contour levels. The default
color comes from the lines, if shown. The default family and
size come from `layout.font`.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.contourcarpet.
contours.Labelfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Labelfont
"""
super().__init__("labelfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.contourcarpet.contours.Labelfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.contourcarpet.contours.Labelfont`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Labelfont |
python | pola-rs__polars | py-polars/src/polars/selectors.py | {
"start": 8307,
"end": 85616
} | class ____(Expr):
"""Base column selector expression/proxy."""
# NOTE: This `= None` is needed to generate the docs with sphinx_accessor.
_pyselector: PySelector = None # type: ignore[assignment]
@classmethod
def _from_pyselector(cls, pyselector: PySelector) -> Selector:
slf = cls()
slf._pyselector = pyselector
slf._pyexpr = PyExpr.new_selector(pyselector)
return slf
def __getstate__(self) -> bytes:
return self._pyexpr.__getstate__()
def __setstate__(self, state: bytes) -> None:
self._pyexpr = F.lit(0)._pyexpr # Initialize with a dummy
self._pyexpr.__setstate__(state)
self._pyselector = self.meta.as_selector()._pyselector
def __repr__(self) -> str:
return str(Expr._from_pyexpr(self._pyexpr))
def __hash__(self) -> int:
# note: this is a suitable hash for selectors (but NOT expressions in general),
# as the repr is guaranteed to be unique across all selector/param permutations
return self._pyselector.hash()
@classmethod
def _by_dtype(
cls, dtypes: builtins.list[PythonDataType | PolarsDataType]
) -> Selector:
selectors = []
concrete_dtypes = []
for dt in dtypes:
if is_polars_dtype(dt):
if dt is pldt.Datetime:
selectors += [datetime()]
elif isinstance(dt, pldt.Datetime) and dt.time_zone == "*":
selectors += [datetime(time_unit=dt.time_unit, time_zone="*")]
elif dt is pldt.Duration:
selectors += [duration()]
elif dt is pldt.Categorical:
selectors += [categorical()]
elif dt is pldt.Enum:
selectors += [enum()]
elif dt is pldt.List:
selectors += [list()]
elif dt is pldt.Array:
selectors += [array()]
elif dt is pldt.Struct:
selectors += [struct()]
elif dt is pldt.Decimal:
selectors += [decimal()]
else:
concrete_dtypes += [dt]
elif isinstance(dt, type):
if dt is int:
selectors += [integer()]
elif dt is builtins.float:
selectors += [float()]
elif dt is bool:
selectors += [boolean()]
elif dt is str:
concrete_dtypes += [pldt.String()]
elif dt is bytes:
concrete_dtypes += [pldt.Binary()]
elif dt is object:
selectors += [object()]
elif dt is NoneType:
concrete_dtypes += [pldt.Null()]
elif dt is pydatetime.time:
concrete_dtypes += [pldt.Time()]
elif dt is pydatetime.datetime:
selectors += [datetime()]
elif dt is pydatetime.timedelta:
selectors += [duration()]
elif dt is pydatetime.date:
selectors += [date()]
elif dt is PyDecimal:
selectors += [decimal()]
elif dt is builtins.list or dt is tuple:
selectors += [list()]
else:
input_type = (
input
if type(input) is type
else f"of type {type(input).__name__!r}"
)
input_detail = "" if type(input) is type else f" (given: {input!r})"
msg = f"cannot parse input {input_type} into Polars selector{input_detail}"
raise TypeError(msg) from None
else:
input_type = (
input
if type(input) is type
else f"of type {type(input).__name__!r}"
)
input_detail = "" if type(input) is type else f" (given: {input!r})"
msg = f"cannot parse input {input_type} into Polars selector{input_detail}"
raise TypeError(msg) from None
dtype_selector = cls._from_pyselector(PySelector.by_dtype(concrete_dtypes))
if len(selectors) == 0:
return dtype_selector
selector = selectors[0]
for s in selectors[1:]:
selector = selector | s
if len(concrete_dtypes) == 0:
return selector
else:
return dtype_selector | selector
@classmethod
def _by_name(cls, names: builtins.list[str], *, strict: bool) -> Selector:
return cls._from_pyselector(PySelector.by_name(names, strict))
def __invert__(cls) -> Selector:
"""Invert the selector."""
return all() - cls
def __add__(self, other: Any) -> Expr:
if is_selector(other):
return self.as_expr().__add__(other.as_expr())
else:
return self.as_expr().__add__(other)
def __radd__(self, other: Any) -> Expr:
if is_selector(other):
msg = "unsupported operand type(s) for op: ('Selector' + 'Selector')"
raise TypeError(msg)
else:
return self.as_expr().__radd__(other)
@overload
def __and__(self, other: Selector) -> Selector: ...
@overload
def __and__(self, other: Any) -> Expr: ...
def __and__(self, other: Any) -> Selector | Expr:
if is_column(other): # @2.0: remove
colname = other.meta.output_name()
other = by_name(colname)
if is_selector(other):
return Selector._from_pyselector(
PySelector.intersect(self._pyselector, other._pyselector)
)
else:
return self.as_expr().__and__(other)
def __rand__(self, other: Any) -> Expr:
return self.as_expr().__rand__(other)
@overload
def __or__(self, other: Selector) -> Selector: ...
@overload
def __or__(self, other: Any) -> Expr: ...
def __or__(self, other: Any) -> Selector | Expr:
if is_column(other): # @2.0: remove
other = by_name(other.meta.output_name())
if is_selector(other):
return Selector._from_pyselector(
PySelector.union(self._pyselector, other._pyselector)
)
else:
return self.as_expr().__or__(other)
def __ror__(self, other: Any) -> Expr:
if is_column(other):
other = by_name(other.meta.output_name())
return self.as_expr().__ror__(other)
@overload
def __sub__(self, other: Selector) -> Selector: ...
@overload
def __sub__(self, other: Any) -> Expr: ...
def __sub__(self, other: Any) -> Selector | Expr:
if is_selector(other):
return Selector._from_pyselector(
PySelector.difference(self._pyselector, other._pyselector)
)
else:
return self.as_expr().__sub__(other)
def __rsub__(self, other: Any) -> NoReturn:
msg = "unsupported operand type(s) for op: ('Expr' - 'Selector')"
raise TypeError(msg)
@overload
def __xor__(self, other: Selector) -> Selector: ...
@overload
def __xor__(self, other: Any) -> Expr: ...
def __xor__(self, other: Any) -> Selector | Expr:
if is_column(other): # @2.0: remove
other = by_name(other.meta.output_name())
if is_selector(other):
return Selector._from_pyselector(
PySelector.exclusive_or(self._pyselector, other._pyselector)
)
else:
return self.as_expr().__xor__(other)
def __rxor__(self, other: Any) -> Expr:
if is_column(other): # @2.0: remove
other = by_name(other.meta.output_name())
return self.as_expr().__rxor__(other)
def exclude(
self,
columns: str | PolarsDataType | Collection[str] | Collection[PolarsDataType],
*more_columns: str | PolarsDataType,
) -> Selector:
"""
Exclude columns from a multi-column expression.
Only works after a wildcard or regex column selection, and you cannot provide
both string column names *and* dtypes (you may prefer to use selectors instead).
Parameters
----------
columns
The name or datatype of the column(s) to exclude. Accepts regular expression
input. Regular expressions should start with `^` and end with `$`.
*more_columns
Additional names or datatypes of columns to exclude, specified as positional
arguments.
"""
exclude_cols: builtins.list[str] = []
exclude_dtypes: builtins.list[PolarsDataType] = []
for item in (
*(
columns
if isinstance(columns, Collection) and not isinstance(columns, str)
else [columns]
),
*more_columns,
):
if isinstance(item, str):
exclude_cols.append(item)
elif is_polars_dtype(item):
exclude_dtypes.append(item)
else:
msg = (
"invalid input for `exclude`"
f"\n\nExpected one or more `str` or `DataType`; found {item!r} instead."
)
raise TypeError(msg)
if exclude_cols and exclude_dtypes:
msg = "cannot exclude by both column name and dtype; use a selector instead"
raise TypeError(msg)
elif exclude_dtypes:
return self - by_dtype(exclude_dtypes)
else:
return self - by_name(exclude_cols, require_all=False)
def as_expr(self) -> Expr:
"""
Materialize the `selector` as a normal expression.
This ensures that the operators `|`, `&`, `~` and `-`
are applied on the data and not on the selector sets.
Examples
--------
>>> import polars.selectors as cs
>>> df = pl.DataFrame(
... {
... "colx": ["aa", "bb", "cc"],
... "coly": [True, False, True],
... "colz": [1, 2, 3],
... }
... )
Inverting the boolean selector will choose the non-boolean columns:
>>> df.select(~cs.boolean())
shape: (3, 2)
┌──────┬──────┐
│ colx ┆ colz │
│ --- ┆ --- │
│ str ┆ i64 │
╞══════╪══════╡
│ aa ┆ 1 │
│ bb ┆ 2 │
│ cc ┆ 3 │
└──────┴──────┘
To invert the *values* in the selected boolean columns, we need to
materialize the selector as a standard expression instead:
>>> df.select(~cs.boolean().as_expr())
shape: (3, 1)
┌───────┐
│ coly │
│ --- │
│ bool │
╞═══════╡
│ false │
│ true │
│ false │
└───────┘
"""
return Expr._from_pyexpr(self._pyexpr)
def _re_string(string: str | Collection[str], *, escape: bool = True) -> str:
"""Return escaped regex, potentially representing multiple string fragments."""
if isinstance(string, str):
rx = re_escape(string) if escape else string
else:
strings: builtins.list[str] = []
for st in string:
if isinstance(st, Collection) and not isinstance(st, str): # type: ignore[redundant-expr]
strings.extend(st)
else:
strings.append(st)
rx = "|".join((re_escape(x) if escape else x) for x in strings)
return f"({rx})"
def empty() -> Selector:
"""
Select no columns.
This is useful for composition with other selectors.
See Also
--------
all : Select all columns in the current scope.
Examples
--------
>>> import polars.selectors as cs
>>> pl.DataFrame({"a": 1, "b": 2}).select(cs.empty())
shape: (0, 0)
┌┐
╞╡
└┘
"""
return Selector._from_pyselector(PySelector.empty())
def all() -> Selector:
"""
Select all columns.
See Also
--------
first : Select the first column in the current scope.
last : Select the last column in the current scope.
Examples
--------
>>> from datetime import date
>>> import polars.selectors as cs
>>> df = pl.DataFrame(
... {
... "dt": [date(1999, 12, 31), date(2024, 1, 1)],
... "value": [1_234_500, 5_000_555],
... },
... schema_overrides={"value": pl.Int32},
... )
Select all columns, casting them to string:
>>> df.select(cs.all().cast(pl.String))
shape: (2, 2)
┌────────────┬─────────┐
│ dt ┆ value │
│ --- ┆ --- │
│ str ┆ str │
╞════════════╪═════════╡
│ 1999-12-31 ┆ 1234500 │
│ 2024-01-01 ┆ 5000555 │
└────────────┴─────────┘
Select all columns *except* for those matching the given dtypes:
>>> df.select(cs.all() - cs.numeric())
shape: (2, 1)
┌────────────┐
│ dt │
│ --- │
│ date │
╞════════════╡
│ 1999-12-31 │
│ 2024-01-01 │
└────────────┘
"""
return Selector._from_pyselector(PySelector.all())
def alpha(ascii_only: bool = False, *, ignore_spaces: bool = False) -> Selector: # noqa: FBT001
r"""
Select all columns with alphabetic names (eg: only letters).
Parameters
----------
ascii_only
Indicate whether to consider only ASCII alphabetic characters, or the full
Unicode range of valid letters (accented, idiographic, etc).
ignore_spaces
Indicate whether to ignore the presence of spaces in column names; if so,
only the other (non-space) characters are considered.
Notes
-----
Matching column names cannot contain *any* non-alphabetic characters. Note
that the definition of "alphabetic" consists of all valid Unicode alphabetic
characters (`\p{Alphabetic}`) by default; this can be changed by setting
`ascii_only=True`.
Examples
--------
>>> import polars as pl
>>> import polars.selectors as cs
>>> df = pl.DataFrame(
... {
... "no1": [100, 200, 300],
... "café": ["espresso", "latte", "mocha"],
... "t or f": [True, False, None],
... "hmm": ["aaa", "bbb", "ccc"],
... "都市": ["東京", "大阪", "京都"],
... }
... )
Select columns with alphabetic names; note that accented
characters and kanji are recognised as alphabetic here:
>>> df.select(cs.alpha())
shape: (3, 3)
┌──────────┬─────┬──────┐
│ café ┆ hmm ┆ 都市 │
│ --- ┆ --- ┆ --- │
│ str ┆ str ┆ str │
╞══════════╪═════╪══════╡
│ espresso ┆ aaa ┆ 東京 │
│ latte ┆ bbb ┆ 大阪 │
│ mocha ┆ ccc ┆ 京都 │
└──────────┴─────┴──────┘
Constrain the definition of "alphabetic" to ASCII characters only:
>>> df.select(cs.alpha(ascii_only=True))
shape: (3, 1)
┌─────┐
│ hmm │
│ --- │
│ str │
╞═════╡
│ aaa │
│ bbb │
│ ccc │
└─────┘
>>> df.select(cs.alpha(ascii_only=True, ignore_spaces=True))
shape: (3, 2)
┌────────┬─────┐
│ t or f ┆ hmm │
│ --- ┆ --- │
│ bool ┆ str │
╞════════╪═════╡
│ true ┆ aaa │
│ false ┆ bbb │
│ null ┆ ccc │
└────────┴─────┘
Select all columns *except* for those with alphabetic names:
>>> df.select(~cs.alpha())
shape: (3, 2)
┌─────┬────────┐
│ no1 ┆ t or f │
│ --- ┆ --- │
│ i64 ┆ bool │
╞═════╪════════╡
│ 100 ┆ true │
│ 200 ┆ false │
│ 300 ┆ null │
└─────┴────────┘
>>> df.select(~cs.alpha(ignore_spaces=True))
shape: (3, 1)
┌─────┐
│ no1 │
│ --- │
│ i64 │
╞═════╡
│ 100 │
│ 200 │
│ 300 │
└─────┘
"""
# note that we need to supply a pattern compatible with the *rust* regex crate
re_alpha = r"a-zA-Z" if ascii_only else r"\p{Alphabetic}"
re_space = " " if ignore_spaces else ""
return Selector._from_pyselector(PySelector.matches(f"^[{re_alpha}{re_space}]+$"))
def alphanumeric(
ascii_only: bool = False, # noqa: FBT001
*,
ignore_spaces: bool = False,
) -> Selector:
r"""
Select all columns with alphanumeric names (eg: only letters and the digits 0-9).
Parameters
----------
ascii_only
Indicate whether to consider only ASCII alphabetic characters, or the full
Unicode range of valid letters (accented, idiographic, etc).
ignore_spaces
Indicate whether to ignore the presence of spaces in column names; if so,
only the other (non-space) characters are considered.
Notes
-----
Matching column names cannot contain *any* non-alphabetic or integer characters.
Note that the definition of "alphabetic" consists of all valid Unicode alphabetic
characters (`\p{Alphabetic}`) and digit characters (`\d`) by default; this
can be changed by setting `ascii_only=True`.
Examples
--------
>>> import polars as pl
>>> import polars.selectors as cs
>>> df = pl.DataFrame(
... {
... "1st_col": [100, 200, 300],
... "flagged": [True, False, True],
... "00prefix": ["01:aa", "02:bb", "03:cc"],
... "last col": ["x", "y", "z"],
... }
... )
Select columns with alphanumeric names:
>>> df.select(cs.alphanumeric())
shape: (3, 2)
┌─────────┬──────────┐
│ flagged ┆ 00prefix │
│ --- ┆ --- │
│ bool ┆ str │
╞═════════╪══════════╡
│ true ┆ 01:aa │
│ false ┆ 02:bb │
│ true ┆ 03:cc │
└─────────┴──────────┘
>>> df.select(cs.alphanumeric(ignore_spaces=True))
shape: (3, 3)
┌─────────┬──────────┬──────────┐
│ flagged ┆ 00prefix ┆ last col │
│ --- ┆ --- ┆ --- │
│ bool ┆ str ┆ str │
╞═════════╪══════════╪══════════╡
│ true ┆ 01:aa ┆ x │
│ false ┆ 02:bb ┆ y │
│ true ┆ 03:cc ┆ z │
└─────────┴──────────┴──────────┘
Select all columns *except* for those with alphanumeric names:
>>> df.select(~cs.alphanumeric())
shape: (3, 2)
┌─────────┬──────────┐
│ 1st_col ┆ last col │
│ --- ┆ --- │
│ i64 ┆ str │
╞═════════╪══════════╡
│ 100 ┆ x │
│ 200 ┆ y │
│ 300 ┆ z │
└─────────┴──────────┘
>>> df.select(~cs.alphanumeric(ignore_spaces=True))
shape: (3, 1)
┌─────────┐
│ 1st_col │
│ --- │
│ i64 │
╞═════════╡
│ 100 │
│ 200 │
│ 300 │
└─────────┘
"""
# note that we need to supply patterns compatible with the *rust* regex crate
re_alpha = r"a-zA-Z" if ascii_only else r"\p{Alphabetic}"
re_digit = "0-9" if ascii_only else r"\d"
re_space = " " if ignore_spaces else ""
return Selector._from_pyselector(
PySelector.matches(f"^[{re_alpha}{re_digit}{re_space}]+$")
)
def binary() -> Selector:
"""
Select all binary columns.
See Also
--------
by_dtype : Select all columns matching the given dtype(s).
string : Select all string columns (optionally including categoricals).
Examples
--------
>>> import polars.selectors as cs
>>> df = pl.DataFrame({"a": [b"hello"], "b": ["world"], "c": [b"!"], "d": [":)"]})
>>> df
shape: (1, 4)
┌──────────┬───────┬────────┬─────┐
│ a ┆ b ┆ c ┆ d │
│ --- ┆ --- ┆ --- ┆ --- │
│ binary ┆ str ┆ binary ┆ str │
╞══════════╪═══════╪════════╪═════╡
│ b"hello" ┆ world ┆ b"!" ┆ :) │
└──────────┴───────┴────────┴─────┘
Select binary columns and export as a dict:
>>> df.select(cs.binary()).to_dict(as_series=False)
{'a': [b'hello'], 'c': [b'!']}
Select all columns *except* for those that are binary:
>>> df.select(~cs.binary()).to_dict(as_series=False)
{'b': ['world'], 'd': [':)']}
"""
return by_dtype([Binary])
def boolean() -> Selector:
"""
Select all boolean columns.
See Also
--------
by_dtype : Select all columns matching the given dtype(s).
Examples
--------
>>> import polars.selectors as cs
>>> df = pl.DataFrame({"n": range(1, 5)}).with_columns(n_even=pl.col("n") % 2 == 0)
>>> df
shape: (4, 2)
┌─────┬────────┐
│ n ┆ n_even │
│ --- ┆ --- │
│ i64 ┆ bool │
╞═════╪════════╡
│ 1 ┆ false │
│ 2 ┆ true │
│ 3 ┆ false │
│ 4 ┆ true │
└─────┴────────┘
Select and invert boolean columns:
>>> df.with_columns(is_odd=cs.boolean().not_())
shape: (4, 3)
┌─────┬────────┬────────┐
│ n ┆ n_even ┆ is_odd │
│ --- ┆ --- ┆ --- │
│ i64 ┆ bool ┆ bool │
╞═════╪════════╪════════╡
│ 1 ┆ false ┆ true │
│ 2 ┆ true ┆ false │
│ 3 ┆ false ┆ true │
│ 4 ┆ true ┆ false │
└─────┴────────┴────────┘
Select all columns *except* for those that are boolean:
>>> df.select(~cs.boolean())
shape: (4, 1)
┌─────┐
│ n │
│ --- │
│ i64 │
╞═════╡
│ 1 │
│ 2 │
│ 3 │
│ 4 │
└─────┘
"""
return by_dtype([Boolean])
def by_dtype(
*dtypes: (
PolarsDataType
| PythonDataType
| Iterable[PolarsDataType]
| Iterable[PythonDataType]
),
) -> Selector:
"""
Select all columns matching the given dtypes.
See Also
--------
by_name : Select all columns matching the given names.
by_index : Select all columns matching the given indices.
Examples
--------
>>> from datetime import date
>>> import polars.selectors as cs
>>> df = pl.DataFrame(
... {
... "dt": [date(1999, 12, 31), date(2024, 1, 1), date(2010, 7, 5)],
... "value": [1_234_500, 5_000_555, -4_500_000],
... "other": ["foo", "bar", "foo"],
... }
... )
Select all columns with date or string dtypes:
>>> df.select(cs.by_dtype(pl.Date, pl.String))
shape: (3, 2)
┌────────────┬───────┐
│ dt ┆ other │
│ --- ┆ --- │
│ date ┆ str │
╞════════════╪═══════╡
│ 1999-12-31 ┆ foo │
│ 2024-01-01 ┆ bar │
│ 2010-07-05 ┆ foo │
└────────────┴───────┘
Select all columns that are not of date or string dtype:
>>> df.select(~cs.by_dtype(pl.Date, pl.String))
shape: (3, 1)
┌──────────┐
│ value │
│ --- │
│ i64 │
╞══════════╡
│ 1234500 │
│ 5000555 │
│ -4500000 │
└──────────┘
Group by string columns and sum the numeric columns:
>>> df.group_by(cs.string()).agg(cs.numeric().sum()).sort(by="other")
shape: (2, 2)
┌───────┬──────────┐
│ other ┆ value │
│ --- ┆ --- │
│ str ┆ i64 │
╞═══════╪══════════╡
│ bar ┆ 5000555 │
│ foo ┆ -3265500 │
└───────┴──────────┘
"""
all_dtypes: builtins.list[PolarsDataType | PythonDataType] = []
for tp in dtypes:
if is_polars_dtype(tp) or isinstance(tp, type):
all_dtypes.append(tp)
elif isinstance(tp, Collection):
for t in tp:
if not (is_polars_dtype(t) or isinstance(t, type)):
msg = f"invalid dtype: {t!r}"
raise TypeError(msg)
all_dtypes.append(t)
else:
msg = f"invalid dtype: {tp!r}"
raise TypeError(msg)
return Selector._by_dtype(all_dtypes)
def by_index(
*indices: int | range | Sequence[int | range], require_all: bool = True
) -> Selector:
"""
Select all columns matching the given indices (or range objects).
Parameters
----------
*indices
One or more column indices (or range objects).
Negative indexing is supported.
require_all
By default, all specified indices must be valid; if any index is out of bounds,
an error is raised. If set to `False`, out-of-bounds indices are ignored
Notes
-----
Matching columns are returned in the order in which their indexes
appear in the selector, not the underlying schema order.
See Also
--------
by_dtype : Select all columns matching the given dtypes.
by_name : Select all columns matching the given names.
Examples
--------
>>> import polars.selectors as cs
>>> df = pl.DataFrame(
... {
... "key": ["abc"],
... **{f"c{i:02}": [0.5 * i] for i in range(100)},
... },
... )
>>> print(df)
shape: (1, 101)
┌─────┬─────┬─────┬─────┬───┬──────┬──────┬──────┬──────┐
│ key ┆ c00 ┆ c01 ┆ c02 ┆ … ┆ c96 ┆ c97 ┆ c98 ┆ c99 │
│ --- ┆ --- ┆ --- ┆ --- ┆ ┆ --- ┆ --- ┆ --- ┆ --- │
│ str ┆ f64 ┆ f64 ┆ f64 ┆ ┆ f64 ┆ f64 ┆ f64 ┆ f64 │
╞═════╪═════╪═════╪═════╪═══╪══════╪══════╪══════╪══════╡
│ abc ┆ 0.0 ┆ 0.5 ┆ 1.0 ┆ … ┆ 48.0 ┆ 48.5 ┆ 49.0 ┆ 49.5 │
└─────┴─────┴─────┴─────┴───┴──────┴──────┴──────┴──────┘
Select columns by index ("key" column and the two first/last columns):
>>> df.select(cs.by_index(0, 1, 2, -2, -1))
shape: (1, 5)
┌─────┬─────┬─────┬──────┬──────┐
│ key ┆ c00 ┆ c01 ┆ c98 ┆ c99 │
│ --- ┆ --- ┆ --- ┆ --- ┆ --- │
│ str ┆ f64 ┆ f64 ┆ f64 ┆ f64 │
╞═════╪═════╪═════╪══════╪══════╡
│ abc ┆ 0.0 ┆ 0.5 ┆ 49.0 ┆ 49.5 │
└─────┴─────┴─────┴──────┴──────┘
Select the "key" column and use a `range` object to select various columns.
Note that you can freely mix and match integer indices and `range` objects:
>>> df.select(cs.by_index(0, range(1, 101, 20)))
shape: (1, 6)
┌─────┬─────┬──────┬──────┬──────┬──────┐
│ key ┆ c00 ┆ c20 ┆ c40 ┆ c60 ┆ c80 │
│ --- ┆ --- ┆ --- ┆ --- ┆ --- ┆ --- │
│ str ┆ f64 ┆ f64 ┆ f64 ┆ f64 ┆ f64 │
╞═════╪═════╪══════╪══════╪══════╪══════╡
│ abc ┆ 0.0 ┆ 10.0 ┆ 20.0 ┆ 30.0 ┆ 40.0 │
└─────┴─────┴──────┴──────┴──────┴──────┘
>>> df.select(cs.by_index(0, range(101, 0, -25), require_all=False))
shape: (1, 5)
┌─────┬──────┬──────┬──────┬─────┐
│ key ┆ c75 ┆ c50 ┆ c25 ┆ c00 │
│ --- ┆ --- ┆ --- ┆ --- ┆ --- │
│ str ┆ f64 ┆ f64 ┆ f64 ┆ f64 │
╞═════╪══════╪══════╪══════╪═════╡
│ abc ┆ 37.5 ┆ 25.0 ┆ 12.5 ┆ 0.0 │
└─────┴──────┴──────┴──────┴─────┘
Select all columns *except* for the even-indexed ones:
>>> df.select(~cs.by_index(range(1, 100, 2)))
shape: (1, 51)
┌─────┬─────┬─────┬─────┬───┬──────┬──────┬──────┬──────┐
│ key ┆ c01 ┆ c03 ┆ c05 ┆ … ┆ c93 ┆ c95 ┆ c97 ┆ c99 │
│ --- ┆ --- ┆ --- ┆ --- ┆ ┆ --- ┆ --- ┆ --- ┆ --- │
│ str ┆ f64 ┆ f64 ┆ f64 ┆ ┆ f64 ┆ f64 ┆ f64 ┆ f64 │
╞═════╪═════╪═════╪═════╪═══╪══════╪══════╪══════╪══════╡
│ abc ┆ 0.5 ┆ 1.5 ┆ 2.5 ┆ … ┆ 46.5 ┆ 47.5 ┆ 48.5 ┆ 49.5 │
└─────┴─────┴─────┴─────┴───┴──────┴──────┴──────┴──────┘
"""
all_indices: builtins.list[int] = []
for idx in indices:
if isinstance(idx, (range, Sequence)):
all_indices.extend(idx) # type: ignore[arg-type]
elif isinstance(idx, int):
all_indices.append(idx)
else:
msg = f"invalid index value: {idx!r}"
raise TypeError(msg)
return Selector._from_pyselector(PySelector.by_index(all_indices, require_all))
def by_name(*names: str | Collection[str], require_all: bool = True) -> Selector:
"""
Select all columns matching the given names.
.. versionadded:: 0.20.27
The `require_all` parameter was added.
Parameters
----------
*names
One or more names of columns to select.
require_all
Whether to match *all* names (the default) or *any* of the names.
Notes
-----
Matching columns are returned in the order in which they are declared in
the selector, not the underlying schema order.
See Also
--------
by_dtype : Select all columns matching the given dtypes.
by_index : Select all columns matching the given indices.
Examples
--------
>>> import polars.selectors as cs
>>> df = pl.DataFrame(
... {
... "foo": ["x", "y"],
... "bar": [123, 456],
... "baz": [2.0, 5.5],
... "zap": [False, True],
... }
... )
Select columns by name:
>>> df.select(cs.by_name("foo", "bar"))
shape: (2, 2)
┌─────┬─────┐
│ foo ┆ bar │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════╪═════╡
│ x ┆ 123 │
│ y ┆ 456 │
└─────┴─────┘
Match *any* of the given columns by name:
>>> df.select(cs.by_name("baz", "moose", "foo", "bear", require_all=False))
shape: (2, 2)
┌─────┬─────┐
│ baz ┆ foo │
│ --- ┆ --- │
│ f64 ┆ str │
╞═════╪═════╡
│ 2.0 ┆ x │
│ 5.5 ┆ y │
└─────┴─────┘
Match all columns *except* for those given:
>>> df.select(~cs.by_name("foo", "bar"))
shape: (2, 2)
┌─────┬───────┐
│ baz ┆ zap │
│ --- ┆ --- │
│ f64 ┆ bool │
╞═════╪═══════╡
│ 2.0 ┆ false │
│ 5.5 ┆ true │
└─────┴───────┘
"""
all_names = []
for nm in names:
if isinstance(nm, str):
all_names.append(nm)
elif isinstance(nm, Collection):
for n in nm:
if not isinstance(n, str):
msg = f"invalid name: {n!r}"
raise TypeError(msg)
all_names.append(n)
else:
msg = f"invalid name: {nm!r}"
raise TypeError(msg)
return Selector._by_name(all_names, strict=require_all)
@unstable()
def enum() -> Selector:
"""
Select all enum columns.
.. warning::
This functionality is considered **unstable**. It may be changed
at any point without it being considered a breaking change.
See Also
--------
by_dtype : Select all columns matching the given dtype(s).
categorical : Select all categorical columns.
string : Select all string columns (optionally including categoricals).
Examples
--------
>>> import polars.selectors as cs
>>> df = pl.DataFrame(
... {
... "foo": ["xx", "yy"],
... "bar": [123, 456],
... "baz": [2.0, 5.5],
... },
... schema_overrides={"foo": pl.Enum(["xx", "yy"])},
... )
Select all enum columns:
>>> df.select(cs.enum())
shape: (2, 1)
┌──────┐
│ foo │
│ --- │
│ enum │
╞══════╡
│ xx │
│ yy │
└──────┘
Select all columns *except* for those that are enum:
>>> df.select(~cs.enum())
shape: (2, 2)
┌─────┬─────┐
│ bar ┆ baz │
│ --- ┆ --- │
│ i64 ┆ f64 │
╞═════╪═════╡
│ 123 ┆ 2.0 │
│ 456 ┆ 5.5 │
└─────┴─────┘
"""
return Selector._from_pyselector(PySelector.enum_())
@unstable()
def list(inner: None | Selector = None) -> Selector:
"""
Select all list columns.
.. warning::
This functionality is considered **unstable**. It may be changed
at any point without it being considered a breaking change.
See Also
--------
by_dtype : Select all columns matching the given dtype(s).
array : Select all array columns.
nested : Select all nested columns.
Examples
--------
>>> import polars.selectors as cs
>>> df = pl.DataFrame(
... {
... "foo": [["xx", "yy"], ["x"]],
... "bar": [123, 456],
... "baz": [2.0, 5.5],
... },
... )
Select all list columns:
>>> df.select(cs.list())
shape: (2, 1)
┌──────────────┐
│ foo │
│ --- │
│ list[str] │
╞══════════════╡
│ ["xx", "yy"] │
│ ["x"] │
└──────────────┘
Select all columns *except* for those that are list:
>>> df.select(~cs.list())
shape: (2, 2)
┌─────┬─────┐
│ bar ┆ baz │
│ --- ┆ --- │
│ i64 ┆ f64 │
╞═════╪═════╡
│ 123 ┆ 2.0 │
│ 456 ┆ 5.5 │
└─────┴─────┘
Select all list columns with a certain matching inner type:
>>> df.select(cs.list(cs.string()))
shape: (2, 1)
┌──────────────┐
│ foo │
│ --- │
│ list[str] │
╞══════════════╡
│ ["xx", "yy"] │
│ ["x"] │
└──────────────┘
>>> df.select(cs.list(cs.integer()))
shape: (0, 0)
┌┐
╞╡
└┘
"""
inner_s = inner._pyselector if inner is not None else None
return Selector._from_pyselector(PySelector.list(inner_s))
@unstable()
def array(inner: Selector | None = None, *, width: int | None = None) -> Selector:
"""
Select all array columns.
.. warning::
This functionality is considered **unstable**. It may be changed
at any point without it being considered a breaking change.
See Also
--------
by_dtype : Select all columns matching the given dtype(s).
list : Select all list columns.
nested : Select all nested columns.
Examples
--------
>>> import polars.selectors as cs
>>> df = pl.DataFrame(
... {
... "foo": [["xx", "yy"], ["x", "y"]],
... "bar": [123, 456],
... "baz": [2.0, 5.5],
... },
... schema_overrides={"foo": pl.Array(pl.String, 2)},
... )
Select all array columns:
>>> df.select(cs.array())
shape: (2, 1)
┌───────────────┐
│ foo │
│ --- │
│ array[str, 2] │
╞═══════════════╡
│ ["xx", "yy"] │
│ ["x", "y"] │
└───────────────┘
Select all columns *except* for those that are array:
>>> df.select(~cs.array())
shape: (2, 2)
┌─────┬─────┐
│ bar ┆ baz │
│ --- ┆ --- │
│ i64 ┆ f64 │
╞═════╪═════╡
│ 123 ┆ 2.0 │
│ 456 ┆ 5.5 │
└─────┴─────┘
Select all array columns with a certain matching inner type:
>>> df.select(cs.array(cs.string()))
shape: (2, 1)
┌───────────────┐
│ foo │
│ --- │
│ array[str, 2] │
╞═══════════════╡
│ ["xx", "yy"] │
│ ["x", "y"] │
└───────────────┘
>>> df.select(cs.array(cs.integer()))
shape: (0, 0)
┌┐
╞╡
└┘
>>> df.select(cs.array(width=2))
shape: (2, 1)
┌───────────────┐
│ foo │
│ --- │
│ array[str, 2] │
╞═══════════════╡
│ ["xx", "yy"] │
│ ["x", "y"] │
└───────────────┘
>>> df.select(cs.array(width=3))
shape: (0, 0)
┌┐
╞╡
└┘
"""
inner_s = inner._pyselector if inner is not None else None
return Selector._from_pyselector(PySelector.array(inner_s, width))
@unstable()
def struct() -> Selector:
"""
Select all struct columns.
.. warning::
This functionality is considered **unstable**. It may be changed
at any point without it being considered a breaking change.
See Also
--------
by_dtype : Select all columns matching the given dtype(s).
list : Select all list columns.
array : Select all array columns.
nested : Select all nested columns.
Examples
--------
>>> import polars.selectors as cs
>>> df = pl.DataFrame(
... {
... "foo": [{"a": "xx", "b": "z"}, {"a": "x", "b": "y"}],
... "bar": [123, 456],
... "baz": [2.0, 5.5],
... },
... )
Select all struct columns:
>>> df.select(cs.struct())
shape: (2, 1)
┌────────────┐
│ foo │
│ --- │
│ struct[2] │
╞════════════╡
│ {"xx","z"} │
│ {"x","y"} │
└────────────┘
Select all columns *except* for those that are struct:
>>> df.select(~cs.struct())
shape: (2, 2)
┌─────┬─────┐
│ bar ┆ baz │
│ --- ┆ --- │
│ i64 ┆ f64 │
╞═════╪═════╡
│ 123 ┆ 2.0 │
│ 456 ┆ 5.5 │
└─────┴─────┘
"""
return Selector._from_pyselector(PySelector.struct_())
@unstable()
def nested() -> Selector:
"""
Select all nested columns.
A nested column is a list, array or struct.
.. warning::
This functionality is considered **unstable**. It may be changed
at any point without it being considered a breaking change.
See Also
--------
by_dtype : Select all columns matching the given dtype(s).
list : Select all list columns.
array : Select all array columns.
struct : Select all struct columns.
Examples
--------
>>> import polars.selectors as cs
>>> df = pl.DataFrame(
... {
... "foo": [{"a": "xx", "b": "z"}, {"a": "x", "b": "y"}],
... "bar": [123, 456],
... "baz": [2.0, 5.5],
... "wow": [[1, 2], [3]],
... },
... )
Select all nested columns:
>>> df.select(cs.nested())
shape: (2, 2)
┌────────────┬───────────┐
│ foo ┆ wow │
│ --- ┆ --- │
│ struct[2] ┆ list[i64] │
╞════════════╪═══════════╡
│ {"xx","z"} ┆ [1, 2] │
│ {"x","y"} ┆ [3] │
└────────────┴───────────┘
Select all columns *except* for those that are nested:
>>> df.select(~cs.nested())
shape: (2, 2)
┌─────┬─────┐
│ bar ┆ baz │
│ --- ┆ --- │
│ i64 ┆ f64 │
╞═════╪═════╡
│ 123 ┆ 2.0 │
│ 456 ┆ 5.5 │
└─────┴─────┘
"""
return Selector._from_pyselector(PySelector.nested())
def categorical() -> Selector:
"""
Select all categorical columns.
See Also
--------
by_dtype : Select all columns matching the given dtype(s).
string : Select all string columns (optionally including categoricals).
Examples
--------
>>> import polars.selectors as cs
>>> df = pl.DataFrame(
... {
... "foo": ["xx", "yy"],
... "bar": [123, 456],
... "baz": [2.0, 5.5],
... },
... schema_overrides={"foo": pl.Categorical},
... )
Select all categorical columns:
>>> df.select(cs.categorical())
shape: (2, 1)
┌─────┐
│ foo │
│ --- │
│ cat │
╞═════╡
│ xx │
│ yy │
└─────┘
Select all columns *except* for those that are categorical:
>>> df.select(~cs.categorical())
shape: (2, 2)
┌─────┬─────┐
│ bar ┆ baz │
│ --- ┆ --- │
│ i64 ┆ f64 │
╞═════╪═════╡
│ 123 ┆ 2.0 │
│ 456 ┆ 5.5 │
└─────┴─────┘
"""
return Selector._from_pyselector(PySelector.categorical())
def contains(*substring: str) -> Selector:
"""
Select columns whose names contain the given literal substring(s).
Parameters
----------
substring
Substring(s) that matching column names should contain.
See Also
--------
matches : Select all columns that match the given regex pattern.
ends_with : Select columns that end with the given substring(s).
starts_with : Select columns that start with the given substring(s).
Examples
--------
>>> import polars.selectors as cs
>>> df = pl.DataFrame(
... {
... "foo": ["x", "y"],
... "bar": [123, 456],
... "baz": [2.0, 5.5],
... "zap": [False, True],
... }
... )
Select columns that contain the substring 'ba':
>>> df.select(cs.contains("ba"))
shape: (2, 2)
┌─────┬─────┐
│ bar ┆ baz │
│ --- ┆ --- │
│ i64 ┆ f64 │
╞═════╪═════╡
│ 123 ┆ 2.0 │
│ 456 ┆ 5.5 │
└─────┴─────┘
Select columns that contain the substring 'ba' or the letter 'z':
>>> df.select(cs.contains("ba", "z"))
shape: (2, 3)
┌─────┬─────┬───────┐
│ bar ┆ baz ┆ zap │
│ --- ┆ --- ┆ --- │
│ i64 ┆ f64 ┆ bool │
╞═════╪═════╪═══════╡
│ 123 ┆ 2.0 ┆ false │
│ 456 ┆ 5.5 ┆ true │
└─────┴─────┴───────┘
Select all columns *except* for those that contain the substring 'ba':
>>> df.select(~cs.contains("ba"))
shape: (2, 2)
┌─────┬───────┐
│ foo ┆ zap │
│ --- ┆ --- │
│ str ┆ bool │
╞═════╪═══════╡
│ x ┆ false │
│ y ┆ true │
└─────┴───────┘
"""
escaped_substring = _re_string(substring)
raw_params = f"^.*{escaped_substring}.*$"
return Selector._from_pyselector(PySelector.matches(raw_params))
def date() -> Selector:
"""
Select all date columns.
See Also
--------
datetime : Select all datetime columns, optionally filtering by time unit/zone.
duration : Select all duration columns, optionally filtering by time unit.
temporal : Select all temporal columns.
time : Select all time columns.
Examples
--------
>>> from datetime import date, datetime, time
>>> import polars.selectors as cs
>>> df = pl.DataFrame(
... {
... "dtm": [datetime(2001, 5, 7, 10, 25), datetime(2031, 12, 31, 0, 30)],
... "dt": [date(1999, 12, 31), date(2024, 8, 9)],
... "tm": [time(0, 0, 0), time(23, 59, 59)],
... },
... )
Select all date columns:
>>> df.select(cs.date())
shape: (2, 1)
┌────────────┐
│ dt │
│ --- │
│ date │
╞════════════╡
│ 1999-12-31 │
│ 2024-08-09 │
└────────────┘
Select all columns *except* for those that are dates:
>>> df.select(~cs.date())
shape: (2, 2)
┌─────────────────────┬──────────┐
│ dtm ┆ tm │
│ --- ┆ --- │
│ datetime[μs] ┆ time │
╞═════════════════════╪══════════╡
│ 2001-05-07 10:25:00 ┆ 00:00:00 │
│ 2031-12-31 00:30:00 ┆ 23:59:59 │
└─────────────────────┴──────────┘
"""
return by_dtype([Date])
def datetime(
time_unit: TimeUnit | Collection[TimeUnit] | None = None,
time_zone: (
str | pydatetime.timezone | Collection[str | pydatetime.timezone | None] | None
) = (
"*",
None,
),
) -> Selector:
"""
Select all datetime columns, optionally filtering by time unit/zone.
Parameters
----------
time_unit
One (or more) of the allowed timeunit precision strings, "ms", "us", and "ns".
Omit to select columns with any valid timeunit.
time_zone
* One or more timezone strings, as defined in zoneinfo (to see valid options
run `import zoneinfo; zoneinfo.available_timezones()` for a full list).
* Set `None` to select Datetime columns that do not have a timezone.
* Set "*" to select Datetime columns that have *any* timezone.
See Also
--------
date : Select all date columns.
duration : Select all duration columns, optionally filtering by time unit.
temporal : Select all temporal columns.
time : Select all time columns.
Examples
--------
>>> from datetime import datetime, date, timezone
>>> import polars.selectors as cs
>>> from zoneinfo import ZoneInfo
>>> tokyo_tz = ZoneInfo("Asia/Tokyo")
>>> utc_tz = timezone.utc
>>> df = pl.DataFrame(
... {
... "tstamp_tokyo": [
... datetime(1999, 7, 21, 5, 20, 16, 987654, tzinfo=tokyo_tz),
... datetime(2000, 5, 16, 6, 21, 21, 123465, tzinfo=tokyo_tz),
... ],
... "tstamp_utc": [
... datetime(2023, 4, 10, 12, 14, 16, 999000, tzinfo=utc_tz),
... datetime(2025, 8, 25, 14, 18, 22, 666000, tzinfo=utc_tz),
... ],
... "tstamp": [
... datetime(2000, 11, 20, 18, 12, 16, 600000),
... datetime(2020, 10, 30, 10, 20, 25, 123000),
... ],
... "dt": [date(1999, 12, 31), date(2010, 7, 5)],
... },
... schema_overrides={
... "tstamp_tokyo": pl.Datetime("ns", "Asia/Tokyo"),
... "tstamp_utc": pl.Datetime("us", "UTC"),
... },
... )
Select all datetime columns:
>>> df.select(cs.datetime())
shape: (2, 3)
┌────────────────────────────────┬─────────────────────────────┬─────────────────────────┐
│ tstamp_tokyo ┆ tstamp_utc ┆ tstamp │
│ --- ┆ --- ┆ --- │
│ datetime[ns, Asia/Tokyo] ┆ datetime[μs, UTC] ┆ datetime[μs] │
╞════════════════════════════════╪═════════════════════════════╪═════════════════════════╡
│ 1999-07-21 05:20:16.987654 JST ┆ 2023-04-10 12:14:16.999 UTC ┆ 2000-11-20 18:12:16.600 │
│ 2000-05-16 06:21:21.123465 JST ┆ 2025-08-25 14:18:22.666 UTC ┆ 2020-10-30 10:20:25.123 │
└────────────────────────────────┴─────────────────────────────┴─────────────────────────┘
Select all datetime columns that have 'us' precision:
>>> df.select(cs.datetime("us"))
shape: (2, 2)
┌─────────────────────────────┬─────────────────────────┐
│ tstamp_utc ┆ tstamp │
│ --- ┆ --- │
│ datetime[μs, UTC] ┆ datetime[μs] │
╞═════════════════════════════╪═════════════════════════╡
│ 2023-04-10 12:14:16.999 UTC ┆ 2000-11-20 18:12:16.600 │
│ 2025-08-25 14:18:22.666 UTC ┆ 2020-10-30 10:20:25.123 │
└─────────────────────────────┴─────────────────────────┘
Select all datetime columns that have *any* timezone:
>>> df.select(cs.datetime(time_zone="*"))
shape: (2, 2)
┌────────────────────────────────┬─────────────────────────────┐
│ tstamp_tokyo ┆ tstamp_utc │
│ --- ┆ --- │
│ datetime[ns, Asia/Tokyo] ┆ datetime[μs, UTC] │
╞════════════════════════════════╪═════════════════════════════╡
│ 1999-07-21 05:20:16.987654 JST ┆ 2023-04-10 12:14:16.999 UTC │
│ 2000-05-16 06:21:21.123465 JST ┆ 2025-08-25 14:18:22.666 UTC │
└────────────────────────────────┴─────────────────────────────┘
Select all datetime columns that have a *specific* timezone:
>>> df.select(cs.datetime(time_zone="UTC"))
shape: (2, 1)
┌─────────────────────────────┐
│ tstamp_utc │
│ --- │
│ datetime[μs, UTC] │
╞═════════════════════════════╡
│ 2023-04-10 12:14:16.999 UTC │
│ 2025-08-25 14:18:22.666 UTC │
└─────────────────────────────┘
Select all datetime columns that have NO timezone:
>>> df.select(cs.datetime(time_zone=None))
shape: (2, 1)
┌─────────────────────────┐
│ tstamp │
│ --- │
│ datetime[μs] │
╞═════════════════════════╡
│ 2000-11-20 18:12:16.600 │
│ 2020-10-30 10:20:25.123 │
└─────────────────────────┘
Select all columns *except* for datetime columns:
>>> df.select(~cs.datetime())
shape: (2, 1)
┌────────────┐
│ dt │
│ --- │
│ date │
╞════════════╡
│ 1999-12-31 │
│ 2010-07-05 │
└────────────┘
""" # noqa: W505
if time_unit is None:
time_unit_lst = ["ms", "us", "ns"]
else:
time_unit_lst = (
[time_unit] if isinstance(time_unit, str) else builtins.list(time_unit)
)
time_zone_lst: builtins.list[str | pydatetime.timezone | None]
if time_zone is None:
time_zone_lst = [None]
elif time_zone:
time_zone_lst = (
[time_zone]
if isinstance(time_zone, (str, pydatetime.timezone))
else builtins.list(time_zone)
)
return Selector._from_pyselector(PySelector.datetime(time_unit_lst, time_zone_lst))
def decimal() -> Selector:
"""
Select all decimal columns.
See Also
--------
float : Select all float columns.
integer : Select all integer columns.
numeric : Select all numeric columns.
Examples
--------
>>> from decimal import Decimal as D
>>> import polars.selectors as cs
>>> df = pl.DataFrame(
... {
... "foo": ["x", "y"],
... "bar": [D(123), D(456)],
... "baz": [D("2.0005"), D("-50.5555")],
... },
... schema_overrides={"baz": pl.Decimal(scale=5, precision=10)},
... )
Select all decimal columns:
>>> df.select(cs.decimal())
shape: (2, 2)
┌───────────────┬───────────────┐
│ bar ┆ baz │
│ --- ┆ --- │
│ decimal[38,0] ┆ decimal[10,5] │
╞═══════════════╪═══════════════╡
│ 123 ┆ 2.00050 │
│ 456 ┆ -50.55550 │
└───────────────┴───────────────┘
Select all columns *except* the decimal ones:
>>> df.select(~cs.decimal())
shape: (2, 1)
┌─────┐
│ foo │
│ --- │
│ str │
╞═════╡
│ x │
│ y │
└─────┘
"""
# TODO: allow explicit selection by scale/precision?
return Selector._from_pyselector(PySelector.decimal())
def digit(ascii_only: bool = False) -> Selector: # noqa: FBT001
r"""
Select all columns having names consisting only of digits.
Notes
-----
Matching column names cannot contain *any* non-digit characters. Note that the
definition of "digit" consists of all valid Unicode digit characters (`\d`)
by default; this can be changed by setting `ascii_only=True`.
Examples
--------
>>> import polars as pl
>>> import polars.selectors as cs
>>> df = pl.DataFrame(
... {
... "key": ["aaa", "bbb", "aaa", "bbb", "bbb"],
... "year": [2001, 2001, 2025, 2025, 2001],
... "value": [-25, 100, 75, -15, -5],
... }
... ).pivot(
... values="value",
... index="key",
... on="year",
... aggregate_function="sum",
... )
>>> print(df)
shape: (2, 3)
┌─────┬──────┬──────┐
│ key ┆ 2001 ┆ 2025 │
│ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ i64 │
╞═════╪══════╪══════╡
│ aaa ┆ -25 ┆ 75 │
│ bbb ┆ 95 ┆ -15 │
└─────┴──────┴──────┘
Select columns with digit names:
>>> df.select(cs.digit())
shape: (2, 2)
┌──────┬──────┐
│ 2001 ┆ 2025 │
│ --- ┆ --- │
│ i64 ┆ i64 │
╞══════╪══════╡
│ -25 ┆ 75 │
│ 95 ┆ -15 │
└──────┴──────┘
Select all columns *except* for those with digit names:
>>> df.select(~cs.digit())
shape: (2, 1)
┌─────┐
│ key │
│ --- │
│ str │
╞═════╡
│ aaa │
│ bbb │
└─────┘
Demonstrate use of `ascii_only` flag (by default all valid unicode digits
are considered, but this can be constrained to ascii 0-9):
>>> df = pl.DataFrame({"१९९९": [1999], "२०७७": [2077], "3000": [3000]})
>>> df.select(cs.digit())
shape: (1, 3)
┌──────┬──────┬──────┐
│ १९९९ ┆ २०७७ ┆ 3000 │
│ --- ┆ --- ┆ --- │
│ i64 ┆ i64 ┆ i64 │
╞══════╪══════╪══════╡
│ 1999 ┆ 2077 ┆ 3000 │
└──────┴──────┴──────┘
>>> df.select(cs.digit(ascii_only=True))
shape: (1, 1)
┌──────┐
│ 3000 │
│ --- │
│ i64 │
╞══════╡
│ 3000 │
└──────┘
"""
re_digit = r"[0-9]" if ascii_only else r"\d"
return Selector._from_pyselector(PySelector.matches(rf"^{re_digit}+$"))
def duration(
time_unit: TimeUnit | Collection[TimeUnit] | None = None,
) -> Selector:
"""
Select all duration columns, optionally filtering by time unit.
Parameters
----------
time_unit
One (or more) of the allowed timeunit precision strings, "ms", "us", and "ns".
Omit to select columns with any valid timeunit.
See Also
--------
date : Select all date columns.
datetime : Select all datetime columns, optionally filtering by time unit/zone.
temporal : Select all temporal columns.
time : Select all time columns.
Examples
--------
>>> from datetime import date, timedelta
>>> import polars.selectors as cs
>>> df = pl.DataFrame(
... {
... "dt": [date(2022, 1, 31), date(2025, 7, 5)],
... "td1": [
... timedelta(days=1, milliseconds=123456),
... timedelta(days=1, hours=23, microseconds=987000),
... ],
... "td2": [
... timedelta(days=7, microseconds=456789),
... timedelta(days=14, minutes=999, seconds=59),
... ],
... "td3": [
... timedelta(weeks=4, days=-10, microseconds=999999),
... timedelta(weeks=3, milliseconds=123456, microseconds=1),
... ],
... },
... schema_overrides={
... "td1": pl.Duration("ms"),
... "td2": pl.Duration("us"),
... "td3": pl.Duration("ns"),
... },
... )
Select all duration columns:
>>> df.select(cs.duration())
shape: (2, 3)
┌────────────────┬─────────────────┬────────────────────┐
│ td1 ┆ td2 ┆ td3 │
│ --- ┆ --- ┆ --- │
│ duration[ms] ┆ duration[μs] ┆ duration[ns] │
╞════════════════╪═════════════════╪════════════════════╡
│ 1d 2m 3s 456ms ┆ 7d 456789µs ┆ 18d 999999µs │
│ 1d 23h 987ms ┆ 14d 16h 39m 59s ┆ 21d 2m 3s 456001µs │
└────────────────┴─────────────────┴────────────────────┘
Select all duration columns that have 'ms' precision:
>>> df.select(cs.duration("ms"))
shape: (2, 1)
┌────────────────┐
│ td1 │
│ --- │
│ duration[ms] │
╞════════════════╡
│ 1d 2m 3s 456ms │
│ 1d 23h 987ms │
└────────────────┘
Select all duration columns that have 'ms' OR 'ns' precision:
>>> df.select(cs.duration(["ms", "ns"]))
shape: (2, 2)
┌────────────────┬────────────────────┐
│ td1 ┆ td3 │
│ --- ┆ --- │
│ duration[ms] ┆ duration[ns] │
╞════════════════╪════════════════════╡
│ 1d 2m 3s 456ms ┆ 18d 999999µs │
│ 1d 23h 987ms ┆ 21d 2m 3s 456001µs │
└────────────────┴────────────────────┘
Select all columns *except* for duration columns:
>>> df.select(~cs.duration())
shape: (2, 1)
┌────────────┐
│ dt │
│ --- │
│ date │
╞════════════╡
│ 2022-01-31 │
│ 2025-07-05 │
└────────────┘
"""
if time_unit is None:
time_unit = ["ms", "us", "ns"]
else:
time_unit = (
[time_unit] if isinstance(time_unit, str) else builtins.list(time_unit)
)
return Selector._from_pyselector(PySelector.duration(time_unit))
def ends_with(*suffix: str) -> Selector:
"""
Select columns that end with the given substring(s).
See Also
--------
contains : Select columns that contain the given literal substring(s).
matches : Select all columns that match the given regex pattern.
starts_with : Select columns that start with the given substring(s).
Parameters
----------
suffix
Substring(s) that matching column names should end with.
Examples
--------
>>> import polars.selectors as cs
>>> df = pl.DataFrame(
... {
... "foo": ["x", "y"],
... "bar": [123, 456],
... "baz": [2.0, 5.5],
... "zap": [False, True],
... }
... )
Select columns that end with the substring 'z':
>>> df.select(cs.ends_with("z"))
shape: (2, 1)
┌─────┐
│ baz │
│ --- │
│ f64 │
╞═════╡
│ 2.0 │
│ 5.5 │
└─────┘
Select columns that end with *either* the letter 'z' or 'r':
>>> df.select(cs.ends_with("z", "r"))
shape: (2, 2)
┌─────┬─────┐
│ bar ┆ baz │
│ --- ┆ --- │
│ i64 ┆ f64 │
╞═════╪═════╡
│ 123 ┆ 2.0 │
│ 456 ┆ 5.5 │
└─────┴─────┘
Select all columns *except* for those that end with the substring 'z':
>>> df.select(~cs.ends_with("z"))
shape: (2, 3)
┌─────┬─────┬───────┐
│ foo ┆ bar ┆ zap │
│ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ bool │
╞═════╪═════╪═══════╡
│ x ┆ 123 ┆ false │
│ y ┆ 456 ┆ true │
└─────┴─────┴───────┘
"""
escaped_suffix = _re_string(suffix)
raw_params = f"^.*{escaped_suffix}$"
return Selector._from_pyselector(PySelector.matches(raw_params))
def exclude(
columns: (
str
| PolarsDataType
| Selector
| Expr
| Collection[str | PolarsDataType | Selector | Expr]
),
*more_columns: str | PolarsDataType | Selector | Expr,
) -> Selector:
"""
Select all columns except those matching the given columns, datatypes, or selectors.
Parameters
----------
columns
One or more columns (col or name), datatypes, columns, or selectors representing
the columns to exclude.
*more_columns
Additional columns, datatypes, or selectors to exclude, specified as positional
arguments.
Notes
-----
If excluding a single selector it is simpler to write as `~selector` instead.
Examples
--------
Exclude by column name(s):
>>> import polars.selectors as cs
>>> df = pl.DataFrame(
... {
... "aa": [1, 2, 3],
... "ba": ["a", "b", None],
... "cc": [None, 2.5, 1.5],
... }
... )
>>> df.select(cs.exclude("ba", "xx"))
shape: (3, 2)
┌─────┬──────┐
│ aa ┆ cc │
│ --- ┆ --- │
│ i64 ┆ f64 │
╞═════╪══════╡
│ 1 ┆ null │
│ 2 ┆ 2.5 │
│ 3 ┆ 1.5 │
└─────┴──────┘
Exclude using a column name, a selector, and a dtype:
>>> df.select(cs.exclude("aa", cs.string(), pl.UInt32))
shape: (3, 1)
┌──────┐
│ cc │
│ --- │
│ f64 │
╞══════╡
│ null │
│ 2.5 │
│ 1.5 │
└──────┘
"""
return ~_combine_as_selector(columns, *more_columns)
def first(*, strict: bool = True) -> Selector:
"""
Select the first column in the current scope.
See Also
--------
all : Select all columns.
last : Select the last column in the current scope.
Examples
--------
>>> import polars.selectors as cs
>>> df = pl.DataFrame(
... {
... "foo": ["x", "y"],
... "bar": [123, 456],
... "baz": [2.0, 5.5],
... "zap": [0, 1],
... }
... )
Select the first column:
>>> df.select(cs.first())
shape: (2, 1)
┌─────┐
│ foo │
│ --- │
│ str │
╞═════╡
│ x │
│ y │
└─────┘
Select everything *except* for the first column:
>>> df.select(~cs.first())
shape: (2, 3)
┌─────┬─────┬─────┐
│ bar ┆ baz ┆ zap │
│ --- ┆ --- ┆ --- │
│ i64 ┆ f64 ┆ i64 │
╞═════╪═════╪═════╡
│ 123 ┆ 2.0 ┆ 0 │
│ 456 ┆ 5.5 ┆ 1 │
└─────┴─────┴─────┘
"""
return Selector._from_pyselector(PySelector.first(strict))
def float() -> Selector:
"""
Select all float columns.
See Also
--------
integer : Select all integer columns.
numeric : Select all numeric columns.
signed_integer : Select all signed integer columns.
unsigned_integer : Select all unsigned integer columns.
Examples
--------
>>> import polars.selectors as cs
>>> df = pl.DataFrame(
... {
... "foo": ["x", "y"],
... "bar": [123, 456],
... "baz": [2.0, 5.5],
... "zap": [0.0, 1.0],
... },
... schema_overrides={"baz": pl.Float32, "zap": pl.Float64},
... )
Select all float columns:
>>> df.select(cs.float())
shape: (2, 2)
┌─────┬─────┐
│ baz ┆ zap │
│ --- ┆ --- │
│ f32 ┆ f64 │
╞═════╪═════╡
│ 2.0 ┆ 0.0 │
│ 5.5 ┆ 1.0 │
└─────┴─────┘
Select all columns *except* for those that are float:
>>> df.select(~cs.float())
shape: (2, 2)
┌─────┬─────┐
│ foo ┆ bar │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════╪═════╡
│ x ┆ 123 │
│ y ┆ 456 │
└─────┴─────┘
"""
return Selector._from_pyselector(PySelector.float())
def integer() -> Selector:
"""
Select all integer columns.
See Also
--------
by_dtype : Select columns by dtype.
float : Select all float columns.
numeric : Select all numeric columns.
signed_integer : Select all signed integer columns.
unsigned_integer : Select all unsigned integer columns.
Examples
--------
>>> import polars.selectors as cs
>>> df = pl.DataFrame(
... {
... "foo": ["x", "y"],
... "bar": [123, 456],
... "baz": [2.0, 5.5],
... "zap": [0, 1],
... }
... )
Select all integer columns:
>>> df.select(cs.integer())
shape: (2, 2)
┌─────┬─────┐
│ bar ┆ zap │
│ --- ┆ --- │
│ i64 ┆ i64 │
╞═════╪═════╡
│ 123 ┆ 0 │
│ 456 ┆ 1 │
└─────┴─────┘
Select all columns *except* for those that are integer :
>>> df.select(~cs.integer())
shape: (2, 2)
┌─────┬─────┐
│ foo ┆ baz │
│ --- ┆ --- │
│ str ┆ f64 │
╞═════╪═════╡
│ x ┆ 2.0 │
│ y ┆ 5.5 │
└─────┴─────┘
"""
return Selector._from_pyselector(PySelector.integer())
def signed_integer() -> Selector:
"""
Select all signed integer columns.
See Also
--------
by_dtype : Select columns by dtype.
float : Select all float columns.
integer : Select all integer columns.
numeric : Select all numeric columns.
unsigned_integer : Select all unsigned integer columns.
Examples
--------
>>> import polars.selectors as cs
>>> df = pl.DataFrame(
... {
... "foo": [-123, -456],
... "bar": [3456, 6789],
... "baz": [7654, 4321],
... "zap": ["ab", "cd"],
... },
... schema_overrides={"bar": pl.UInt32, "baz": pl.UInt64},
... )
Select all signed integer columns:
>>> df.select(cs.signed_integer())
shape: (2, 1)
┌──────┐
│ foo │
│ --- │
│ i64 │
╞══════╡
│ -123 │
│ -456 │
└──────┘
>>> df.select(~cs.signed_integer())
shape: (2, 3)
┌──────┬──────┬─────┐
│ bar ┆ baz ┆ zap │
│ --- ┆ --- ┆ --- │
│ u32 ┆ u64 ┆ str │
╞══════╪══════╪═════╡
│ 3456 ┆ 7654 ┆ ab │
│ 6789 ┆ 4321 ┆ cd │
└──────┴──────┴─────┘
Select all integer columns (both signed and unsigned):
>>> df.select(cs.integer())
shape: (2, 3)
┌──────┬──────┬──────┐
│ foo ┆ bar ┆ baz │
│ --- ┆ --- ┆ --- │
│ i64 ┆ u32 ┆ u64 │
╞══════╪══════╪══════╡
│ -123 ┆ 3456 ┆ 7654 │
│ -456 ┆ 6789 ┆ 4321 │
└──────┴──────┴──────┘
"""
return Selector._from_pyselector(PySelector.signed_integer())
def unsigned_integer() -> Selector:
"""
Select all unsigned integer columns.
See Also
--------
by_dtype : Select columns by dtype.
float : Select all float columns.
integer : Select all integer columns.
numeric : Select all numeric columns.
signed_integer : Select all signed integer columns.
Examples
--------
>>> import polars.selectors as cs
>>> df = pl.DataFrame(
... {
... "foo": [-123, -456],
... "bar": [3456, 6789],
... "baz": [7654, 4321],
... "zap": ["ab", "cd"],
... },
... schema_overrides={"bar": pl.UInt32, "baz": pl.UInt64},
... )
Select all unsigned integer columns:
>>> df.select(cs.unsigned_integer())
shape: (2, 2)
┌──────┬──────┐
│ bar ┆ baz │
│ --- ┆ --- │
│ u32 ┆ u64 │
╞══════╪══════╡
│ 3456 ┆ 7654 │
│ 6789 ┆ 4321 │
└──────┴──────┘
Select all columns *except* for those that are unsigned integers:
>>> df.select(~cs.unsigned_integer())
shape: (2, 2)
┌──────┬─────┐
│ foo ┆ zap │
│ --- ┆ --- │
│ i64 ┆ str │
╞══════╪═════╡
│ -123 ┆ ab │
│ -456 ┆ cd │
└──────┴─────┘
Select all integer columns (both signed and unsigned):
>>> df.select(cs.integer())
shape: (2, 3)
┌──────┬──────┬──────┐
│ foo ┆ bar ┆ baz │
│ --- ┆ --- ┆ --- │
│ i64 ┆ u32 ┆ u64 │
╞══════╪══════╪══════╡
│ -123 ┆ 3456 ┆ 7654 │
│ -456 ┆ 6789 ┆ 4321 │
└──────┴──────┴──────┘
"""
return Selector._from_pyselector(PySelector.unsigned_integer())
def last(*, strict: bool = True) -> Selector:
"""
Select the last column in the current scope.
See Also
--------
all : Select all columns.
first : Select the first column in the current scope.
Examples
--------
>>> import polars.selectors as cs
>>> df = pl.DataFrame(
... {
... "foo": ["x", "y"],
... "bar": [123, 456],
... "baz": [2.0, 5.5],
... "zap": [0, 1],
... }
... )
Select the last column:
>>> df.select(cs.last())
shape: (2, 1)
┌─────┐
│ zap │
│ --- │
│ i64 │
╞═════╡
│ 0 │
│ 1 │
└─────┘
Select everything *except* for the last column:
>>> df.select(~cs.last())
shape: (2, 3)
┌─────┬─────┬─────┐
│ foo ┆ bar ┆ baz │
│ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ f64 │
╞═════╪═════╪═════╡
│ x ┆ 123 ┆ 2.0 │
│ y ┆ 456 ┆ 5.5 │
└─────┴─────┴─────┘
"""
return Selector._from_pyselector(PySelector.last(strict))
def matches(pattern: str) -> Selector:
"""
Select all columns that match the given regex pattern.
See Also
--------
contains : Select all columns that contain the given substring.
ends_with : Select all columns that end with the given substring(s).
starts_with : Select all columns that start with the given substring(s).
Parameters
----------
pattern
A valid regular expression pattern, compatible with the `regex crate
<https://docs.rs/regex/latest/regex/>`_.
Examples
--------
>>> import polars.selectors as cs
>>> df = pl.DataFrame(
... {
... "foo": ["x", "y"],
... "bar": [123, 456],
... "baz": [2.0, 5.5],
... "zap": [0, 1],
... }
... )
Match column names containing an 'a', preceded by a character that is not 'z':
>>> df.select(cs.matches("[^z]a"))
shape: (2, 2)
┌─────┬─────┐
│ bar ┆ baz │
│ --- ┆ --- │
│ i64 ┆ f64 │
╞═════╪═════╡
│ 123 ┆ 2.0 │
│ 456 ┆ 5.5 │
└─────┴─────┘
Do not match column names ending in 'R' or 'z' (case-insensitively):
>>> df.select(~cs.matches(r"(?i)R|z$"))
shape: (2, 2)
┌─────┬─────┐
│ foo ┆ zap │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════╪═════╡
│ x ┆ 0 │
│ y ┆ 1 │
└─────┴─────┘
"""
if pattern == ".*":
return all()
else:
if pattern.startswith(".*"):
pattern = pattern[2:]
elif pattern.endswith(".*"):
pattern = pattern[:-2]
pfx = "^.*" if not pattern.startswith("^") else ""
sfx = ".*$" if not pattern.endswith("$") else ""
raw_params = f"{pfx}{pattern}{sfx}"
return Selector._from_pyselector(PySelector.matches(raw_params))
def numeric() -> Selector:
"""
Select all numeric columns.
See Also
--------
by_dtype : Select columns by dtype.
float : Select all float columns.
integer : Select all integer columns.
signed_integer : Select all signed integer columns.
unsigned_integer : Select all unsigned integer columns.
Examples
--------
>>> import polars.selectors as cs
>>> df = pl.DataFrame(
... {
... "foo": ["x", "y"],
... "bar": [123, 456],
... "baz": [2.0, 5.5],
... "zap": [0, 0],
... },
... schema_overrides={"bar": pl.Int16, "baz": pl.Float32, "zap": pl.UInt8},
... )
Match all numeric columns:
>>> df.select(cs.numeric())
shape: (2, 3)
┌─────┬─────┬─────┐
│ bar ┆ baz ┆ zap │
│ --- ┆ --- ┆ --- │
│ i16 ┆ f32 ┆ u8 │
╞═════╪═════╪═════╡
│ 123 ┆ 2.0 ┆ 0 │
│ 456 ┆ 5.5 ┆ 0 │
└─────┴─────┴─────┘
Match all columns *except* for those that are numeric:
>>> df.select(~cs.numeric())
shape: (2, 1)
┌─────┐
│ foo │
│ --- │
│ str │
╞═════╡
│ x │
│ y │
└─────┘
"""
return Selector._from_pyselector(PySelector.numeric())
def object() -> Selector:
"""
Select all object columns.
See Also
--------
by_dtype : Select columns by dtype.
Examples
--------
>>> import polars.selectors as cs
>>> from uuid import uuid4
>>> with pl.Config(fmt_str_lengths=36):
... df = pl.DataFrame(
... {
... "idx": [0, 1],
... "uuid_obj": [uuid4(), uuid4()],
... "uuid_str": [str(uuid4()), str(uuid4())],
... },
... schema_overrides={"idx": pl.Int32},
... )
... print(df) # doctest: +IGNORE_RESULT
shape: (2, 3)
┌─────┬──────────────────────────────────────┬──────────────────────────────────────┐
│ idx ┆ uuid_obj ┆ uuid_str │
│ --- ┆ --- ┆ --- │
│ i32 ┆ object ┆ str │
╞═════╪══════════════════════════════════════╪══════════════════════════════════════╡
│ 0 ┆ 6be063cf-c9c6-43be-878e-e446cfd42981 ┆ acab9fea-c05d-4b91-b639-418004a63f33 │
│ 1 ┆ 7849d8f9-2cac-48e7-96d3-63cf81c14869 ┆ 28c65415-8b7d-4857-a4ce-300dca14b12b │
└─────┴──────────────────────────────────────┴──────────────────────────────────────┘
Select object columns and export as a dict:
>>> df.select(cs.object()).to_dict(as_series=False) # doctest: +IGNORE_RESULT
{
"uuid_obj": [
UUID("6be063cf-c9c6-43be-878e-e446cfd42981"),
UUID("7849d8f9-2cac-48e7-96d3-63cf81c14869"),
]
}
Select all columns *except* for those that are object and export as dict:
>>> df.select(~cs.object()) # doctest: +IGNORE_RESULT
{
"idx": [0, 1],
"uuid_str": [
"acab9fea-c05d-4b91-b639-418004a63f33",
"28c65415-8b7d-4857-a4ce-300dca14b12b",
],
}
""" # noqa: W505
return Selector._from_pyselector(PySelector.object())
def starts_with(*prefix: str) -> Selector:
"""
Select columns that start with the given substring(s).
Parameters
----------
prefix
Substring(s) that matching column names should start with.
See Also
--------
contains : Select all columns that contain the given substring.
ends_with : Select all columns that end with the given substring(s).
matches : Select all columns that match the given regex pattern.
Examples
--------
>>> import polars.selectors as cs
>>> df = pl.DataFrame(
... {
... "foo": [1.0, 2.0],
... "bar": [3.0, 4.0],
... "baz": [5, 6],
... "zap": [7, 8],
... }
... )
Match columns starting with a 'b':
>>> df.select(cs.starts_with("b"))
shape: (2, 2)
┌─────┬─────┐
│ bar ┆ baz │
│ --- ┆ --- │
│ f64 ┆ i64 │
╞═════╪═════╡
│ 3.0 ┆ 5 │
│ 4.0 ┆ 6 │
└─────┴─────┘
Match columns starting with *either* the letter 'b' or 'z':
>>> df.select(cs.starts_with("b", "z"))
shape: (2, 3)
┌─────┬─────┬─────┐
│ bar ┆ baz ┆ zap │
│ --- ┆ --- ┆ --- │
│ f64 ┆ i64 ┆ i64 │
╞═════╪═════╪═════╡
│ 3.0 ┆ 5 ┆ 7 │
│ 4.0 ┆ 6 ┆ 8 │
└─────┴─────┴─────┘
Match all columns *except* for those starting with 'b':
>>> df.select(~cs.starts_with("b"))
shape: (2, 2)
┌─────┬─────┐
│ foo ┆ zap │
│ --- ┆ --- │
│ f64 ┆ i64 │
╞═════╪═════╡
│ 1.0 ┆ 7 │
│ 2.0 ┆ 8 │
└─────┴─────┘
"""
escaped_prefix = _re_string(prefix)
raw_params = f"^{escaped_prefix}.*$"
return Selector._from_pyselector(PySelector.matches(raw_params))
def string(*, include_categorical: bool = False) -> Selector:
"""
Select all String (and, optionally, Categorical) string columns.
See Also
--------
binary : Select all binary columns.
by_dtype : Select all columns matching the given dtype(s).
categorical: Select all categorical columns.
Examples
--------
>>> import polars.selectors as cs
>>> df = pl.DataFrame(
... {
... "w": ["xx", "yy", "xx", "yy", "xx"],
... "x": [1, 2, 1, 4, -2],
... "y": [3.0, 4.5, 1.0, 2.5, -2.0],
... "z": ["a", "b", "a", "b", "b"],
... },
... ).with_columns(
... z=pl.col("z").cast(pl.Categorical()),
... )
Group by all string columns, sum the numeric columns, then sort by the string cols:
>>> df.group_by(cs.string()).agg(cs.numeric().sum()).sort(by=cs.string())
shape: (2, 3)
┌─────┬─────┬─────┐
│ w ┆ x ┆ y │
│ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ f64 │
╞═════╪═════╪═════╡
│ xx ┆ 0 ┆ 2.0 │
│ yy ┆ 6 ┆ 7.0 │
└─────┴─────┴─────┘
Group by all string *and* categorical columns:
>>> df.group_by(cs.string(include_categorical=True)).agg(cs.numeric().sum()).sort(
... by=cs.string(include_categorical=True)
... )
shape: (3, 4)
┌─────┬─────┬─────┬──────┐
│ w ┆ z ┆ x ┆ y │
│ --- ┆ --- ┆ --- ┆ --- │
│ str ┆ cat ┆ i64 ┆ f64 │
╞═════╪═════╪═════╪══════╡
│ xx ┆ a ┆ 2 ┆ 4.0 │
│ xx ┆ b ┆ -2 ┆ -2.0 │
│ yy ┆ b ┆ 6 ┆ 7.0 │
└─────┴─────┴─────┴──────┘
"""
string_dtypes: builtins.list[PolarsDataType] = [String]
if include_categorical:
string_dtypes.append(Categorical)
return by_dtype(string_dtypes)
def temporal() -> Selector:
"""
Select all temporal columns.
See Also
--------
by_dtype : Select all columns matching the given dtype(s).
date : Select all date columns.
datetime : Select all datetime columns, optionally filtering by time unit/zone.
duration : Select all duration columns, optionally filtering by time unit.
time : Select all time columns.
Examples
--------
>>> from datetime import date, time
>>> import polars.selectors as cs
>>> df = pl.DataFrame(
... {
... "dt": [date(2021, 1, 1), date(2021, 1, 2)],
... "tm": [time(12, 0, 0), time(20, 30, 45)],
... "value": [1.2345, 2.3456],
... }
... )
Match all temporal columns:
>>> df.select(cs.temporal())
shape: (2, 2)
┌────────────┬──────────┐
│ dt ┆ tm │
│ --- ┆ --- │
│ date ┆ time │
╞════════════╪══════════╡
│ 2021-01-01 ┆ 12:00:00 │
│ 2021-01-02 ┆ 20:30:45 │
└────────────┴──────────┘
Match all temporal columns *except* for time columns:
>>> df.select(cs.temporal() - cs.time())
shape: (2, 1)
┌────────────┐
│ dt │
│ --- │
│ date │
╞════════════╡
│ 2021-01-01 │
│ 2021-01-02 │
└────────────┘
Match all columns *except* for temporal columns:
>>> df.select(~cs.temporal())
shape: (2, 1)
┌────────┐
│ value │
│ --- │
│ f64 │
╞════════╡
│ 1.2345 │
│ 2.3456 │
└────────┘
"""
return Selector._from_pyselector(PySelector.temporal())
def time() -> Selector:
"""
Select all time columns.
See Also
--------
date : Select all date columns.
datetime : Select all datetime columns, optionally filtering by time unit/zone.
duration : Select all duration columns, optionally filtering by time unit.
temporal : Select all temporal columns.
Examples
--------
>>> from datetime import date, datetime, time
>>> import polars.selectors as cs
>>> df = pl.DataFrame(
... {
... "dtm": [datetime(2001, 5, 7, 10, 25), datetime(2031, 12, 31, 0, 30)],
... "dt": [date(1999, 12, 31), date(2024, 8, 9)],
... "tm": [time(0, 0, 0), time(23, 59, 59)],
... },
... )
Select all time columns:
>>> df.select(cs.time())
shape: (2, 1)
┌──────────┐
│ tm │
│ --- │
│ time │
╞══════════╡
│ 00:00:00 │
│ 23:59:59 │
└──────────┘
Select all columns *except* for those that are times:
>>> df.select(~cs.time())
shape: (2, 2)
┌─────────────────────┬────────────┐
│ dtm ┆ dt │
│ --- ┆ --- │
│ datetime[μs] ┆ date │
╞═════════════════════╪════════════╡
│ 2001-05-07 10:25:00 ┆ 1999-12-31 │
│ 2031-12-31 00:30:00 ┆ 2024-08-09 │
└─────────────────────┴────────────┘
"""
return by_dtype([Time])
| Selector |
python | PyCQA__pylint | tests/functional/s/slots_checks.py | {
"start": 1598,
"end": 1669
} | class ____:
__slots__ = [1 + 2 + 3] # [invalid-slots-object]
| TenthBad |
python | python-openxml__python-docx | src/docx/image/png.py | {
"start": 180,
"end": 962
} | class ____(BaseImageHeader):
"""Image header parser for PNG images."""
@property
def content_type(self):
"""MIME content type for this image, unconditionally `image/png` for PNG
images."""
return MIME_TYPE.PNG
@property
def default_ext(self):
"""Default filename extension, always 'png' for PNG images."""
return "png"
@classmethod
def from_stream(cls, stream):
"""Return a |Png| instance having header properties parsed from image in
`stream`."""
parser = _PngParser.parse(stream)
px_width = parser.px_width
px_height = parser.px_height
horz_dpi = parser.horz_dpi
vert_dpi = parser.vert_dpi
return cls(px_width, px_height, horz_dpi, vert_dpi)
| Png |
python | h5py__h5py | h5py/tests/test_attrs.py | {
"start": 4087,
"end": 4612
} | class ____(BaseAttrs):
"""
Feature: Deletion of attributes using __delitem__
"""
def test_delete(self):
""" Deletion via "del" """
name = make_name()
self.f.attrs[name] = 4.0
self.assertIn(name, self.f.attrs)
del self.f.attrs[name]
self.assertNotIn(name, self.f.attrs)
def test_delete_exc(self):
""" Attempt to delete missing item raises KeyError """
with self.assertRaises(KeyError):
del self.f.attrs['notexist']
| TestDelete |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/hg_test/package.py | {
"start": 217,
"end": 397
} | class ____(Package):
"""Test package that does fetching with mercurial."""
homepage = "http://www.hg-fetch-example.com"
version("hg", hg="to-be-filled-in-by-test")
| HgTest |
python | joke2k__faker | tests/providers/test_person.py | {
"start": 12305,
"end": 13019
} | class ____(unittest.TestCase):
"""Tests person in the de_LI locale"""
def setUp(self):
self.fake = Faker("de_LI")
Faker.seed(0)
def test_first_name(self):
first_name = self.fake.first_name()
assert isinstance(first_name, str)
assert first_name in DeLiProvider.first_names
def test_first_name_female(self):
name_female = self.fake.first_name_female()
assert isinstance(name_female, str)
assert name_female in DeLiProvider.first_names_female
def test_first_name_male(self):
name_male = self.fake.first_name_male()
assert isinstance(name_male, str)
assert name_male in DeLiProvider.first_names_male
| TestDeLi |
python | gevent__gevent | src/gevent/socket.py | {
"start": 1030,
"end": 6503
} | class ____(Exception):
errno = None
def getfqdn(*args):
# pylint:disable=unused-argument
raise NotImplementedError()
copy_globals(_source, globals(),
dunder_names_to_keep=('__implements__', '__dns__', '__all__',
'__extensions__', '__imports__', '__socket__'),
cleanup_globs=False)
# The _socket2 and _socket3 don't import things defined in
# __extensions__, to help avoid confusing reference cycles in the
# documentation and to prevent importing from the wrong place, but we
# *do* need to expose them here. (NOTE: This may lead to some sphinx
# warnings like:
# WARNING: missing attribute mentioned in :members: or __all__:
# module gevent._socket2, attribute cancel_wait
# These can be ignored.)
from gevent import _socketcommon
copy_globals(_socketcommon, globals(),
only_names=_socketcommon.__extensions__)
try:
_GLOBAL_DEFAULT_TIMEOUT = __socket__._GLOBAL_DEFAULT_TIMEOUT
except AttributeError:
_GLOBAL_DEFAULT_TIMEOUT = object()
def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT, source_address=None, *,
all_errors=False):
"""
create_connection(address, timeout=None, source_address=None, *, all_errors=False) -> socket
Connect to *address* and return the :class:`gevent.socket.socket`
object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by
:func:`getdefaulttimeout` is used. If *source_address* is set it
must be a tuple of (host, port) for the socket to bind as a source
address before making the connection. A host of '' or port 0 tells
the OS to use the default.
.. versionchanged:: 20.6.0
If the host part of the address includes an IPv6 scope ID,
it will be used instead of ignored, if the platform supplies
:func:`socket.inet_pton`.
.. versionchanged:: 22.08.0
Add the *all_errors* argument. This only has meaning on Python 3.11+;
it is a programming error to pass it on earlier versions.
.. versionchanged:: 23.7.0
You can pass a value for ``all_errors`` on any version of Python.
It is forced to false for any version before 3.11 inside the function.
"""
# Sigh. This function is a near-copy of the CPython implementation.
# Even though we simplified some things, it's still a little complex to
# cope with error handling, which got even more complicated in 3.11.
# pylint:disable=too-many-locals,too-many-branches
if not PY311:
all_errors = False
host, port = address
exceptions = []
# getaddrinfo is documented as returning a list, but our interface
# is pluggable, so be sure it does.
addrs = list(getaddrinfo(host, port, 0, SOCK_STREAM))
if not addrs:
raise error("getaddrinfo returns an empty list")
for res in addrs:
af, socktype, proto, _canonname, sa = res
sock = None
try:
sock = socket(af, socktype, proto)
if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
except error as exc:
if not all_errors:
exceptions = [exc] # raise only the last error
else:
exceptions.append(exc)
del exc # cycle
if sock is not None:
sock.close()
sock = None
if res is addrs[-1]:
if not all_errors:
del exceptions[:]
raise
try:
# pylint isn't smart enough to see that we only use this
# on supported versions.
# pylint:disable=using-exception-groups-in-unsupported-version
raise ExceptionGroup("create_connection failed", exceptions)
finally:
# Break explicitly a reference cycle
del exceptions[:]
# without exc_clear(), if connect() fails once, the socket
# is referenced by the frame in exc_info and the next
# bind() fails (see test__socket.TestCreateConnection)
# that does not happen with regular sockets though,
# because _socket.socket.connect() is a built-in. this is
# similar to "getnameinfo loses a reference" failure in
# test_socket.py
exc_clear()
except BaseException:
# Things like GreenletExit, Timeout and KeyboardInterrupt.
# These get raised immediately, being sure to
# close the socket
if sock is not None:
sock.close()
sock = None
raise
else:
# break reference cycles
del exceptions[:]
try:
return sock
finally:
sock = None
# This is promised to be in the __all__ of the _source, but, for circularity reasons,
# we implement it in this module. Mostly for documentation purposes, put it
# in the _source too.
_source.create_connection = create_connection
| error |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/contrib/regular_languages/lexer.py | {
"start": 490,
"end": 3415
} | class ____(Lexer):
"""
Lexer which can be used for highlighting of fragments according to variables in the grammar.
(It does not actual lexing of the string, but it exposes an API, compatible
with the Pygments lexer class.)
:param compiled_grammar: Grammar as returned by the `compile()` function.
:param lexers: Dictionary mapping variable names of the regular grammar to
the lexers that should be used for this part. (This can
call other lexers recursively.) If you wish a part of the
grammar to just get one fragment, use a
`prompt_toolkit.lexers.SimpleLexer`.
"""
def __init__(
self,
compiled_grammar: _CompiledGrammar,
default_style: str = "",
lexers: dict[str, Lexer] | None = None,
) -> None:
self.compiled_grammar = compiled_grammar
self.default_style = default_style
self.lexers = lexers or {}
def _get_text_fragments(self, text: str) -> StyleAndTextTuples:
m = self.compiled_grammar.match_prefix(text)
if m:
characters: StyleAndTextTuples = [(self.default_style, c) for c in text]
for v in m.variables():
# If we have a `Lexer` instance for this part of the input.
# Tokenize recursively and apply tokens.
lexer = self.lexers.get(v.varname)
if lexer:
document = Document(text[v.start : v.stop])
lexer_tokens_for_line = lexer.lex_document(document)
text_fragments: StyleAndTextTuples = []
for i in range(len(document.lines)):
text_fragments.extend(lexer_tokens_for_line(i))
text_fragments.append(("", "\n"))
if text_fragments:
text_fragments.pop()
i = v.start
for t, s, *_ in text_fragments:
for c in s:
if characters[i][0] == self.default_style:
characters[i] = (t, characters[i][1])
i += 1
# Highlight trailing input.
trailing_input = m.trailing_input()
if trailing_input:
for i in range(trailing_input.start, trailing_input.stop):
characters[i] = ("class:trailing-input", characters[i][1])
return characters
else:
return [("", text)]
def lex_document(self, document: Document) -> Callable[[int], StyleAndTextTuples]:
lines = list(split_lines(self._get_text_fragments(document.text)))
def get_line(lineno: int) -> StyleAndTextTuples:
try:
return lines[lineno]
except IndexError:
return []
return get_line
| GrammarLexer |
python | dagster-io__dagster | examples/development_to_production/development_to_production/resources.py | {
"start": 1293,
"end": 2048
} | class ____(HNClient):
"""Hacker News Client that returns fake data."""
def __init__(self):
self.data = {
1: {
"id": 1,
"type": "comment",
"title": "the first comment",
"by": "user1",
},
2: {
"id": 2,
"type": "story",
"title": "an awesome story",
"by": "user2",
},
}
def fetch_item_by_id(self, item_id: int) -> Optional[dict[str, Any]]:
return self.data.get(item_id)
def fetch_max_item_id(self) -> int:
return 2
@property
def item_field_names(self) -> Sequence[str]:
return ["id", "type", "title", "by"]
| StubHNClient |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 36514,
"end": 36928
} | class ____:
xlAllFaces = 7 # from enum XlChartPicturePlacement
xlEnd = 2 # from enum XlChartPicturePlacement
xlEndSides = 3 # from enum XlChartPicturePlacement
xlFront = 4 # from enum XlChartPicturePlacement
xlFrontEnd = 6 # from enum XlChartPicturePlacement
xlFrontSides = 5 # from enum XlChartPicturePlacement
xlSides = 1 # from enum XlChartPicturePlacement
| ChartPicturePlacement |
python | openai__openai-python | src/openai/types/beta/realtime/realtime_client_event_param.py | {
"start": 1153,
"end": 1887
} | class ____(TypedDict, total=False):
type: Required[Literal["output_audio_buffer.clear"]]
"""The event type, must be `output_audio_buffer.clear`."""
event_id: str
"""The unique ID of the client event used for error handling."""
RealtimeClientEventParam: TypeAlias = Union[
ConversationItemCreateEventParam,
ConversationItemDeleteEventParam,
ConversationItemRetrieveEventParam,
ConversationItemTruncateEventParam,
InputAudioBufferAppendEventParam,
InputAudioBufferClearEventParam,
OutputAudioBufferClear,
InputAudioBufferCommitEventParam,
ResponseCancelEventParam,
ResponseCreateEventParam,
SessionUpdateEventParam,
TranscriptionSessionUpdateParam,
]
| OutputAudioBufferClear |
python | python-pillow__Pillow | src/PIL/TiffImagePlugin.py | {
"start": 12671,
"end": 18708
} | class ____(Rational):
"""Implements a rational class where 0/0 is a legal value to match
the in the wild use of exif rationals.
e.g., DigitalZoomRatio - 0.00/0.00 indicates that no digital zoom was used
"""
""" If the denominator is 0, store this as a float('nan'), otherwise store
as a fractions.Fraction(). Delegate as appropriate
"""
__slots__ = ("_numerator", "_denominator", "_val")
def __init__(
self, value: float | Fraction | IFDRational, denominator: int = 1
) -> None:
"""
:param value: either an integer numerator, a
float/rational/other number, or an IFDRational
:param denominator: Optional integer denominator
"""
self._val: Fraction | float
if isinstance(value, IFDRational):
self._numerator = value.numerator
self._denominator = value.denominator
self._val = value._val
return
if isinstance(value, Fraction):
self._numerator = value.numerator
self._denominator = value.denominator
else:
if TYPE_CHECKING:
self._numerator = cast(IntegralLike, value)
else:
self._numerator = value
self._denominator = denominator
if denominator == 0:
self._val = float("nan")
elif denominator == 1:
self._val = Fraction(value)
elif int(value) == value:
self._val = Fraction(int(value), denominator)
else:
self._val = Fraction(value / denominator)
@property
def numerator(self) -> IntegralLike:
return self._numerator
@property
def denominator(self) -> int:
return self._denominator
def limit_rational(self, max_denominator: int) -> tuple[IntegralLike, int]:
"""
:param max_denominator: Integer, the maximum denominator value
:returns: Tuple of (numerator, denominator)
"""
if self.denominator == 0:
return self.numerator, self.denominator
assert isinstance(self._val, Fraction)
f = self._val.limit_denominator(max_denominator)
return f.numerator, f.denominator
def __repr__(self) -> str:
return str(float(self._val))
def __hash__(self) -> int: # type: ignore[override]
return self._val.__hash__()
def __eq__(self, other: object) -> bool:
val = self._val
if isinstance(other, IFDRational):
other = other._val
if isinstance(other, float):
val = float(val)
return val == other
def __getstate__(self) -> list[float | Fraction | IntegralLike]:
return [self._val, self._numerator, self._denominator]
def __setstate__(self, state: list[float | Fraction | IntegralLike]) -> None:
IFDRational.__init__(self, 0)
_val, _numerator, _denominator = state
assert isinstance(_val, (float, Fraction))
self._val = _val
if TYPE_CHECKING:
self._numerator = cast(IntegralLike, _numerator)
else:
self._numerator = _numerator
assert isinstance(_denominator, int)
self._denominator = _denominator
""" a = ['add','radd', 'sub', 'rsub', 'mul', 'rmul',
'truediv', 'rtruediv', 'floordiv', 'rfloordiv',
'mod','rmod', 'pow','rpow', 'pos', 'neg',
'abs', 'trunc', 'lt', 'gt', 'le', 'ge', 'bool',
'ceil', 'floor', 'round']
print("\n".join("__%s__ = _delegate('__%s__')" % (s,s) for s in a))
"""
__add__ = _delegate("__add__")
__radd__ = _delegate("__radd__")
__sub__ = _delegate("__sub__")
__rsub__ = _delegate("__rsub__")
__mul__ = _delegate("__mul__")
__rmul__ = _delegate("__rmul__")
__truediv__ = _delegate("__truediv__")
__rtruediv__ = _delegate("__rtruediv__")
__floordiv__ = _delegate("__floordiv__")
__rfloordiv__ = _delegate("__rfloordiv__")
__mod__ = _delegate("__mod__")
__rmod__ = _delegate("__rmod__")
__pow__ = _delegate("__pow__")
__rpow__ = _delegate("__rpow__")
__pos__ = _delegate("__pos__")
__neg__ = _delegate("__neg__")
__abs__ = _delegate("__abs__")
__trunc__ = _delegate("__trunc__")
__lt__ = _delegate("__lt__")
__gt__ = _delegate("__gt__")
__le__ = _delegate("__le__")
__ge__ = _delegate("__ge__")
__bool__ = _delegate("__bool__")
__ceil__ = _delegate("__ceil__")
__floor__ = _delegate("__floor__")
__round__ = _delegate("__round__")
# Python >= 3.11
if hasattr(Fraction, "__int__"):
__int__ = _delegate("__int__")
_LoaderFunc = Callable[["ImageFileDirectory_v2", bytes, bool], Any]
def _register_loader(idx: int, size: int) -> Callable[[_LoaderFunc], _LoaderFunc]:
def decorator(func: _LoaderFunc) -> _LoaderFunc:
from .TiffTags import TYPES
if func.__name__.startswith("load_"):
TYPES[idx] = func.__name__[5:].replace("_", " ")
_load_dispatch[idx] = size, func # noqa: F821
return func
return decorator
def _register_writer(idx: int) -> Callable[[Callable[..., Any]], Callable[..., Any]]:
def decorator(func: Callable[..., Any]) -> Callable[..., Any]:
_write_dispatch[idx] = func # noqa: F821
return func
return decorator
def _register_basic(idx_fmt_name: tuple[int, str, str]) -> None:
from .TiffTags import TYPES
idx, fmt, name = idx_fmt_name
TYPES[idx] = name
size = struct.calcsize(f"={fmt}")
def basic_handler(
self: ImageFileDirectory_v2, data: bytes, legacy_api: bool = True
) -> tuple[Any, ...]:
return self._unpack(f"{len(data) // size}{fmt}", data)
_load_dispatch[idx] = size, basic_handler # noqa: F821
_write_dispatch[idx] = lambda self, *values: ( # noqa: F821
b"".join(self._pack(fmt, value) for value in values)
)
if TYPE_CHECKING:
_IFDv2Base = MutableMapping[int, Any]
else:
_IFDv2Base = MutableMapping
| IFDRational |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_userdict.py | {
"start": 671,
"end": 1750
} | class ____(importlib.abc.MetaPathFinder):
def find_spec(self, fullname, path, target=None):
# Check if the import is the problematic one
if fullname in redirect_imports:
try:
# Attempt to import the standalone module
name = fullname.removeprefix("test.")
r = importlib.import_module(name)
# Redirect the module in sys.modules
sys.modules[fullname] = r
# Return a module spec from the found module
return importlib.util.find_spec(name)
except ImportError:
return None
return None
# Add the custom finder to sys.meta_path
sys.meta_path.insert(0, RedirectImportFinder())
# ======= END DYNAMO PATCH =======
# Check every path through every method of UserDict
from test import mapping_tests, support
import unittest
import collections
d0 = {}
d1 = {"one": 1}
d2 = {"one": 1, "two": 2}
d3 = {"one": 1, "two": 3, "three": 5}
d4 = {"one": None, "two": None}
d5 = {"one": 1, "two": 1}
| RedirectImportFinder |
python | ray-project__ray | python/ray/serve/config.py | {
"start": 13224,
"end": 16355
} | class ____(BaseModel):
# Cloudpickled policy definition.
_serialized_policy_def: bytes = PrivateAttr(default=b"")
policy_function: Union[str, Callable] = Field(
default=DEFAULT_AUTOSCALING_POLICY_NAME,
description="Policy function can be a string import path or a function callable. "
"If it's a string import path, it must be of the form `path.to.module:function_name`. ",
)
def __init__(self, **kwargs):
serialized_policy_def = kwargs.pop("_serialized_policy_def", None)
super().__init__(**kwargs)
if serialized_policy_def:
self._serialized_policy_def = serialized_policy_def
else:
self.serialize_policy()
def set_serialized_policy_def(self, serialized_policy_def: bytes) -> None:
self._serialized_policy_def = serialized_policy_def
@classmethod
def from_serialized_policy_def(
cls, policy_config: dict, serialized_policy_def: bytes
) -> "AutoscalingPolicy":
config = policy_config.copy()
config["_serialized_policy_def"] = serialized_policy_def
return cls(**config)
def get_serialized_policy_def(self) -> Optional[bytes]:
return self._serialized_policy_def
def serialize_policy(self) -> None:
"""Serialize policy with cloudpickle.
Import the policy if it's passed in as a string import path. Then cloudpickle
the policy and set `serialized_policy_def` if it's empty.
"""
policy_path = self.policy_function
if isinstance(policy_path, Callable):
policy_path = f"{policy_path.__module__}.{policy_path.__name__}"
if not self._serialized_policy_def:
policy_module, policy_function = import_module_and_attr(policy_path)
cloudpickle.register_pickle_by_value(policy_module)
self.set_serialized_policy_def(cloudpickle.dumps(policy_function))
cloudpickle.unregister_pickle_by_value(policy_module)
self.policy_function = policy_path
def is_default_policy_function(self) -> bool:
return self.policy_function == DEFAULT_AUTOSCALING_POLICY_NAME
def get_policy(self) -> Callable:
"""Deserialize policy from cloudpickled bytes."""
try:
return cloudpickle.loads(self._serialized_policy_def)
except (ModuleNotFoundError, ImportError) as e:
raise ImportError(
f"Failed to deserialize custom autoscaling policy: {e}\n\n"
"This typically happens when the policy depends on external modules "
"that aren't available in the current environment. To fix this:\n"
" - Ensure all dependencies are installed in your Docker image or environment\n"
" - Package your policy as a Python package and install it\n"
" - Place the policy module in PYTHONPATH\n\n"
"For more details, see: https://docs.ray.io/en/latest/serve/advanced-guides/"
"advanced-autoscaling.html#gotchas-and-limitations"
) from e
@PublicAPI(stability="stable")
| AutoscalingPolicy |
python | scipy__scipy | scipy/stats/tests/test_mgc.py | {
"start": 244,
"end": 2340
} | class ____:
""" Tests errors and warnings derived from MGC.
"""
def test_error_notndarray(self):
# raises error if x or y is not a ndarray
x = np.arange(20)
y = [5] * 20
assert_raises(ValueError, stats.multiscale_graphcorr, x, y)
assert_raises(ValueError, stats.multiscale_graphcorr, y, x)
def test_error_shape(self):
# raises error if number of samples different (n)
x = np.arange(100).reshape(25, 4)
y = x.reshape(10, 10)
assert_raises(ValueError, stats.multiscale_graphcorr, x, y)
def test_error_lowsamples(self):
# raises error if samples are low (< 3)
x = np.arange(3)
y = np.arange(3)
assert_raises(ValueError, stats.multiscale_graphcorr, x, y)
def test_error_nans(self):
# raises error if inputs contain NaNs
x = np.arange(20, dtype=float)
x[0] = np.nan
assert_raises(ValueError, stats.multiscale_graphcorr, x, x)
y = np.arange(20)
assert_raises(ValueError, stats.multiscale_graphcorr, x, y)
def test_error_wrongdisttype(self):
# raises error if metric is not a function
x = np.arange(20)
compute_distance = 0
assert_raises(ValueError, stats.multiscale_graphcorr, x, x,
compute_distance=compute_distance)
@pytest.mark.parametrize("reps", [
-1, # reps is negative
'1', # reps is not integer
])
def test_error_reps(self, reps):
# raises error if reps is negative
x = np.arange(20)
assert_raises(ValueError, stats.multiscale_graphcorr, x, x, reps=reps)
def test_warns_reps(self):
# raises warning when reps is less than 1000
x = np.arange(20)
reps = 100
assert_warns(RuntimeWarning, stats.multiscale_graphcorr, x, x, reps=reps)
def test_error_infty(self):
# raises error if input contains infinities
x = np.arange(20)
y = np.ones(20) * np.inf
assert_raises(ValueError, stats.multiscale_graphcorr, x, y)
| TestMGCErrorWarnings |
python | instagram__MonkeyType | tests/test_stubs.py | {
"start": 34308,
"end": 38460
} | class ____:
def test_update_arg(self):
"""Update arg annotations from types"""
sig = Signature.from_callable(UpdateSignatureHelper.has_annos)
sig = update_signature_args(sig, {'b': int}, False)
params = [
Parameter('a', Parameter.POSITIONAL_OR_KEYWORD, annotation=int),
Parameter('b', Parameter.POSITIONAL_OR_KEYWORD, annotation=int),
]
assert sig == Signature(parameters=params, return_annotation=int)
def test_update_arg_with_anno(self):
"""Leave existing arg annotations alone"""
sig = Signature.from_callable(UpdateSignatureHelper.has_annos)
sig = update_signature_args(sig, {'a': str}, False)
expected = Signature(
parameters=[
Parameter('a', Parameter.POSITIONAL_OR_KEYWORD, annotation=int),
Parameter('b', Parameter.POSITIONAL_OR_KEYWORD)
],
return_annotation=int
)
assert sig == expected
def test_update_self(self):
"""Don't annotate first arg of instance methods"""
sig = Signature.from_callable(UpdateSignatureHelper.an_instance_method)
sig = update_signature_args(sig, {'self': UpdateSignatureHelper}, True)
expected = Signature(parameters=[Parameter('self', Parameter.POSITIONAL_OR_KEYWORD)])
assert sig == expected
def test_update_class(self):
"""Don't annotate the first arg of classmethods"""
sig = Signature.from_callable(UpdateSignatureHelper.a_class_method.__func__)
sig = update_signature_args(sig, {'cls': Type[UpdateSignatureHelper]}, True)
expected = Signature(parameters=[Parameter('cls', Parameter.POSITIONAL_OR_KEYWORD)])
assert sig == expected
def test_update_arg_ignore_existing_anno(self):
"""Update stubs only bases on traces."""
sig = Signature.from_callable(UpdateSignatureHelper.has_annos)
sig = update_signature_args(
sig, {'a': str, 'b': bool}, has_self=False, existing_annotation_strategy=ExistingAnnotationStrategy.IGNORE)
params = [
Parameter('a', Parameter.POSITIONAL_OR_KEYWORD, annotation=str),
Parameter('b', Parameter.POSITIONAL_OR_KEYWORD, annotation=bool),
]
assert sig == Signature(parameters=params, return_annotation=int)
def test_update_self_ignore_existing_anno(self):
"""Don't annotate first arg of instance methods if asked to ignore"""
sig = Signature.from_callable(UpdateSignatureHelper.an_instance_method)
sig = update_signature_args(sig, {'self': UpdateSignatureHelper}, has_self=True,
existing_annotation_strategy=ExistingAnnotationStrategy.IGNORE)
expected = Signature(parameters=[Parameter('self', Parameter.POSITIONAL_OR_KEYWORD)])
assert sig == expected
def test_update_arg_ignore_existing_anno_None(self):
"""Update arg annotations from types"""
sig = Signature.from_callable(UpdateSignatureHelper.has_annos)
sig = update_signature_args(
sig, {'a': None, 'b': int}, has_self=False, existing_annotation_strategy=ExistingAnnotationStrategy.IGNORE)
params = [
Parameter('a', Parameter.POSITIONAL_OR_KEYWORD, annotation=inspect.Parameter.empty),
Parameter('b', Parameter.POSITIONAL_OR_KEYWORD, annotation=int),
]
assert sig == Signature(parameters=params, return_annotation=int)
def test_update_arg_avoid_incompatible_anno(self):
"""Can generate stub with no annotations where they already exist in the source."""
sig = Signature.from_callable(UpdateSignatureHelper.has_annos)
sig = update_signature_args(
sig, {'a': int, 'b': int}, has_self=False, existing_annotation_strategy=ExistingAnnotationStrategy.OMIT)
params = [
Parameter('a', Parameter.POSITIONAL_OR_KEYWORD, annotation=inspect.Parameter.empty),
Parameter('b', Parameter.POSITIONAL_OR_KEYWORD, annotation=int)
]
assert sig == Signature(parameters=params, return_annotation=int)
| TestUpdateSignatureArgs |
python | huggingface__transformers | src/transformers/models/switch_transformers/modular_switch_transformers.py | {
"start": 7806,
"end": 7865
} | class ____(T5LayerNorm):
pass
| SwitchTransformersLayerNorm |
python | tensorflow__tensorflow | tensorflow/python/keras/layers/merge.py | {
"start": 13171,
"end": 14024
} | class ____(_Merge):
"""Layer that computes the minimum (element-wise) a list of inputs.
It takes as input a list of tensors, all of the same shape, and returns
a single tensor (also of the same shape).
>>> tf.keras.layers.Minimum()([np.arange(5).reshape(5, 1),
... np.arange(5, 10).reshape(5, 1)])
<tf.Tensor: shape=(5, 1), dtype=int64, numpy=
array([[0],
[1],
[2],
[3],
[4]])>
>>> x1 = tf.keras.layers.Dense(8)(np.arange(10).reshape(5, 2))
>>> x2 = tf.keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2))
>>> minned = tf.keras.layers.Minimum()([x1, x2])
>>> minned.shape
TensorShape([5, 8])
"""
def _merge_function(self, inputs):
output = inputs[0]
for i in range(1, len(inputs)):
output = math_ops.minimum(output, inputs[i])
return output
| Minimum |
python | keras-team__keras | guides/functional_api.py | {
"start": 27073,
"end": 28215
} | class ____(layers.Layer):
def __init__(self):
super().__init__()
self.units = units
self.projection_1 = layers.Dense(units=units, activation="tanh")
self.projection_2 = layers.Dense(units=units, activation="tanh")
self.classifier = layers.Dense(1)
def call(self, inputs):
outputs = []
state = ops.zeros(shape=(inputs.shape[0], self.units))
for t in range(inputs.shape[1]):
x = inputs[:, t, :]
h = self.projection_1(x)
y = h + self.projection_2(state)
state = y
outputs.append(y)
features = ops.stack(outputs, axis=1)
return self.classifier(features)
# Note that you specify a static batch size for the inputs with the `batch_shape`
# arg, because the inner computation of `CustomRNN` requires a static batch size
# (when you create the `state` zeros tensor).
inputs = keras.Input(batch_shape=(batch_size, timesteps, input_dim))
x = layers.Conv1D(32, 3)(inputs)
outputs = CustomRNN()(x)
model = keras.Model(inputs, outputs)
rnn_model = CustomRNN()
_ = rnn_model(ops.zeros((1, 10, 5)))
| CustomRNN |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/code_pointer.py | {
"start": 8844,
"end": 9622
} | class ____(CodePointer, LegacyNamedTupleMixin):
module: str
attribute: str
working_directory: Optional[str] = None
def load_target(self) -> object:
module = load_python_module(self.module, self.working_directory)
return _load_target_from_module(
module, self.attribute, f"in module {self.module}. dir: {dir(module)}"
)
def describe(self) -> str:
return f"from {self.module} import {self.attribute}"
def get_python_file_from_target(target: object) -> Optional[str]:
module = inspect.getmodule(target)
python_file = getattr(module, "__file__", None)
if not python_file:
return None
return os.path.abspath(python_file)
@whitelist_for_serdes
@record_custom(checked=False)
| PackageCodePointer |
python | walkccc__LeetCode | solutions/3450. Maximum Students on a Single Bench/3450.py | {
"start": 0,
"end": 281
} | class ____:
def maxStudentsOnBench(self, students: list[list[int]]) -> int:
benchToStudents = collections.defaultdict(set)
for studentId, benchId in students:
benchToStudents[benchId].add(studentId)
return max(map(len, benchToStudents.values()), default=0)
| Solution |
python | wandb__wandb | hatch_build.py | {
"start": 820,
"end": 7516
} | class ____(BuildHookInterface):
@override
def initialize(self, version: str, build_data: Dict[str, Any]) -> None:
if self.target_name == "wheel":
self._prepare_wheel(build_data)
def _prepare_wheel(self, build_data: Dict[str, Any]) -> None:
build_data["tag"] = f"py3-none-{self._get_platform_tag()}"
artifacts: list[str] = build_data["artifacts"]
artifacts.extend(self._build_wandb_core())
if self._include_gpu_stats():
artifacts.extend(self._build_gpu_stats())
def _get_platform_tag(self) -> str:
"""Returns the platform tag for the current platform."""
# Replace dots, spaces and dashes with underscores following
# https://packaging.python.org/en/latest/specifications/platform-compatibility-tags/#platform-tag
platform_tag = re.sub("[-. ]", "_", sysconfig.get_platform())
# On macOS versions >=11, pip expects the minor version to be 0:
# https://github.com/pypa/packaging/issues/435
#
# You can see the list of tags that pip would support on your machine
# using `pip debug --verbose`. On my macOS, get_platform() returns
# 14.1, but `pip debug --verbose` reports only these py3 tags with 14:
#
# * py3-none-macosx_14_0_arm64
# * py3-none-macosx_14_0_universal2
#
# We do this remapping here because otherwise, it's possible for `pip wheel`
# to successfully produce a wheel that you then cannot `pip install` on the
# same machine.
macos_match = re.fullmatch(r"macosx_(\d+_\d+)_(\w+)", platform_tag)
if macos_match:
major, _ = macos_match.group(1).split("_")
if int(major) >= 11:
arch = macos_match.group(2)
platform_tag = f"macosx_{major}_0_{arch}"
return platform_tag
def _include_gpu_stats(self) -> bool:
"""Returns whether we should produce a wheel with gpu_stats."""
return not _get_env_bool(_WANDB_BUILD_SKIP_GPU_STATS, default=False)
def _get_and_require_cargo_binary(self) -> pathlib.Path:
cargo = shutil.which("cargo")
if not cargo:
self.app.abort(
"Did not find the 'cargo' binary. You need Rust to build wandb"
" from source. See https://www.rust-lang.org/tools/install.",
)
raise AssertionError("unreachable")
return pathlib.Path(cargo)
def _build_gpu_stats(self) -> List[str]:
output = pathlib.Path("wandb", "bin", "gpu_stats")
if self._target_platform().goos == "windows":
output = output.with_suffix(".exe")
self.app.display_waiting("Building gpu_stats Rust binary...")
hatch_gpu_stats.build_gpu_stats(
cargo_binary=self._get_and_require_cargo_binary(),
output_path=output,
)
return [output.as_posix()]
def _git_commit_sha(self) -> str:
import subprocess
src_dir = pathlib.Path(__file__).parent
try:
return (
subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=src_dir)
.decode("utf-8")
.strip()
)
except Exception:
return ""
def _build_wandb_core(self) -> List[str]:
output = pathlib.Path("wandb", "bin", "wandb-core")
with_coverage = _get_env_bool(_WANDB_BUILD_COVERAGE, default=False)
with_race_detection = _get_env_bool(_WANDB_BUILD_GORACEDETECT, default=False)
with_cgo = _get_env_bool(_WANDB_ENABLE_CGO, default=False)
plat = self._target_platform()
self.app.display_waiting("Building wandb-core Go binary...")
hatch_core.build_wandb_core(
go_binary=self._get_and_require_go_binary(),
output_path=output,
with_code_coverage=with_coverage,
with_race_detection=with_race_detection,
with_cgo=with_cgo,
wandb_commit_sha=os.getenv(_WANDB_RELEASE_COMMIT) or self._git_commit_sha(),
target_system=plat.goos,
target_arch=plat.goarch,
)
# NOTE: as_posix() is used intentionally. Hatch expects forward slashes
# even on Windows.
return [output.as_posix()]
def _get_and_require_go_binary(self) -> pathlib.Path:
go = shutil.which("go")
if not go:
self.app.abort(
"Did not find the 'go' binary. You need Go to build wandb"
" from source. See https://go.dev/doc/install.",
)
raise AssertionError("unreachable")
return pathlib.Path(go)
def _target_platform(self) -> "TargetPlatform":
"""Returns the platform we're building for (for cross-compilation)."""
# Checking sysconfig.get_platform() is the "standard" way of getting the
# target platform in Python cross-compilation. Build tools like
# cibuildwheel control its output by setting the undocumented
# _PYTHON_HOST_PLATFORM environment variable which is also a good way
# of manually testing this function.
plat = sysconfig.get_platform()
match = re.match(
r"(win|linux|macosx-.+)-(aarch64|arm64|x86_64|amd64)",
plat,
)
if match:
if match.group(1).startswith("macosx"):
goos = "darwin"
elif match.group(1) == "win":
goos = "windows"
else:
goos = match.group(1)
goarch = _to_goarch(match.group(2))
return TargetPlatform(
goos=goos,
goarch=goarch,
)
self.app.display_warning(
f"Failed to parse sysconfig.get_platform() ({plat}); disabling"
" cross-compilation.",
)
os = platform.system().lower()
if os in ("windows", "darwin", "linux"):
goos = os
else:
goos = ""
goarch = _to_goarch(platform.machine().lower())
return TargetPlatform(
goos=goos,
goarch=goarch,
)
def _get_env_bool(name: str, default: bool) -> bool:
"""Returns the value of a boolean environment variable."""
value = os.getenv(name)
if value is None:
return default
elif value.lower() in ("1", "true"):
return True
elif value.lower() in ("0", "false"):
return False
else:
raise ValueError(
f"Environment variable '{name}' has invalid value '{value}'"
" expected one of {1,true,0,false}."
)
@dataclasses.dataclass(frozen=True)
| CustomBuildHook |
python | pytorch__pytorch | torch/fx/_graph_pickler.py | {
"start": 18627,
"end": 19828
} | class ____(_OpPickleData):
"""
Supports pickling a set of standard/common functions
These must be prefixed with the full namespace in order to properly
be pickled (i.e `einops.rearrange` and not `from einops import rearrange`)
"""
# Static variable listing supported root names
SUPPORTED_ROOTS = ("builtins.", "math.", "torch.", "operator.", "einops.")
def __init__(self, root: str, name: str) -> None:
self.root = root
self.name = name
def unpickle(self, unpickle_state: _UnpickleState) -> object:
if self.root == "builtins":
return __builtins__.get(self.name) # type: ignore[attr-defined]
elif self.root == "math":
import math
return self._getattr_by_name(math, self.name)
elif self.root == "torch":
return self._getattr_by_name(torch, self.name)
elif self.root == "operator":
import operator
return self._getattr_by_name(operator, self.name)
elif self.root == "einops":
import einops
return self._getattr_by_name(einops, self.name)
else:
raise NotImplementedError
| _OpFunctionPickleData |
python | PrefectHQ__prefect | src/prefect/client/schemas/filters.py | {
"start": 6474,
"end": 6896
} | class ____(PrefectBaseModel):
"""Filter by `FlowRun.expected_start_time`."""
before_: Optional[DateTime] = Field(
default=None,
description="Only include flow runs scheduled to start at or before this time",
)
after_: Optional[DateTime] = Field(
default=None,
description="Only include flow runs scheduled to start at or after this time",
)
| FlowRunFilterExpectedStartTime |
python | sanic-org__sanic | guide/webapp/display/search/search.py | {
"start": 189,
"end": 2145
} | class ____:
STOP_WORDS: ClassVar[set[str]] = set(
"a about above after again against all am an and any are aren't as at be because been before being below between both but by can't cannot could couldn't did didn't do does doesn't doing don't down during each few for from further had hadn't has hasn't have haven't having he he'd he'll he's her here here's hers herself him himself his how how's i i'd i'll i'm i've if in into is isn't it it's its itself let's me more most mustn't my myself no nor not of off on once only or other ought our ours ourselves out over own same shan't she she'd she'll she's should shouldn't so some such than that that's the their theirs them themselves then there there's these they they'd they'll they're they've this those through to too under until up very was wasn't we we'd we'll we're we've were weren't what what's when when's where where's which while who who's whom why why's with won't would wouldn't you you'd you'll you're you've your yours yourself yourselves".split() # noqa: E501
)
PREFIXES = set("auto be fore over re un under".split())
SUFFIXES = set(
"able al ance ant ate ed en er ful hood ing ion ish ity ive ize less ly ment ness ous ship sion tion y".split() # noqa: E501
)
VOWELS = set("aeiou")
PLURALIZATION = set("s es ies".split())
def stem(self, word: str) -> str:
if word in self.STOP_WORDS:
return word
if word in self.PREFIXES:
return word
for suffix in self.SUFFIXES | self.PLURALIZATION:
if word.endswith(suffix):
return self._stem(word[: -len(suffix)])
return word
def _stem(self, word: str) -> str:
if word.endswith("e"):
return word[:-1]
if word.endswith("y") and word[-2] not in self.VOWELS:
return word[:-1]
return word
def __call__(self, word: str) -> str:
return self.stem(word)
| Stemmer |
python | redis__redis-py | redis/typing.py | {
"start": 1779,
"end": 1881
} | class ____(Protocol):
def execute_command(self, *args, **options) -> ResponseT: ...
| CommandsProtocol |
python | weaviate__weaviate-python-client | weaviate/collections/classes/internal.py | {
"start": 5658,
"end": 6620
} | class ____(Generic[P, R]):
"""The return type of a query within the `generate` namespace of a collection."""
__generated: Optional[str]
objects: List[GenerativeObject[P, R]]
generative: Optional[GenerativeGrouped]
# init required because of nuances of dataclass when defining @property generated and private var __generated
def __init__(
self,
generated: Optional[str],
objects: List[GenerativeObject[P, R]],
generative: Optional[GenerativeGrouped],
) -> None:
self.__generated = generated
self.objects = objects
self.generative = generative
@property
@deprecated(
"The generated field is deprecated. Use generative.text instead.", category=None
) # todo: turn into a runtime warning in the future
def generated(self) -> Optional[str]:
"""The grouped generated text of the objects."""
return self.__generated
@dataclass
| GenerativeReturn |
python | getsentry__sentry | src/sentry/models/statistical_detectors.py | {
"start": 317,
"end": 822
} | class ____(Enum):
ENDPOINT = 0
FUNCTION = 1
@classmethod
def as_choices(cls) -> Sequence[tuple[int, str]]:
return (
(cls.ENDPOINT.value, "endpoint"),
(cls.FUNCTION.value, "function"),
)
def abbreviate(self) -> str:
if self is RegressionType.ENDPOINT:
return "e"
elif self is RegressionType.FUNCTION:
return "f"
raise ValueError(f"Unknown regression type: {self}")
@region_silo_model
| RegressionType |
python | dask__dask | dask/blockwise.py | {
"start": 14819,
"end": 53772
} | class ____(Layer):
"""Tensor Operation
This is a lazily constructed mapping for tensor operation graphs.
This defines a dictionary using an operation and an indexing pattern.
It is built for many operations like elementwise, transpose, tensordot, and
so on. We choose to keep these as symbolic mappings rather than raw
dictionaries because we are able to fuse them during optimization,
sometimes resulting in much lower overhead.
Parameters
----------
output: str
The name of the output collection. Used in keynames
output_indices: tuple
The output indices, like ``('i', 'j', 'k')`` used to determine the
structure of the block computations
dsk: dict
A small graph to apply per-output-block. May include keys from the
input indices.
indices: tuple[tuple[str, tuple[str, ...] | None], ...]
An ordered mapping from input key name, like ``'x'``
to input indices, like ``('i', 'j')``
Or includes literals, which have ``None`` for an index value.
In place of input-key names, the first tuple element may also be a
``BlockwiseDep`` object.
numblocks: Mapping[key, Sequence[int]]
Number of blocks along each dimension for each input
concatenate: bool
Whether or not to pass contracted dimensions as a list of inputs or a
single input to the block function
new_axes: Mapping
New index dimensions that may have been created and their size,
e.g. ``{'j': 2, 'k': 3}``
output_blocks: set[tuple[int, ...]]
Specify a specific set of required output blocks. Since the graph
will only contain the necessary tasks to generate these outputs,
this kwarg can be used to "cull" the abstract layer (without needing
to materialize the low-level graph).
annotations: dict (optional)
Layer annotations
io_deps: dict[str, BlockwiseDep] (optional)
Dictionary containing the mapping between "place-holder" collection
keys and ``BlockwiseDep``-based objects.
**WARNING**: This argument should only be used internally (for culling,
fusion and cloning of existing Blockwise layers). Explicit use of this
argument will be deprecated in the future.
See Also
--------
dask.blockwise.blockwise
dask.array.blockwise
"""
output: str
output_indices: tuple[str, ...]
task: Task
indices: tuple[tuple[str | TaskRef, tuple[str, ...] | None], ...]
numblocks: Mapping[str, Sequence[int]]
concatenate: bool | None
new_axes: Mapping[str, int]
output_blocks: set[tuple[int, ...]] | None
io_deps: Mapping[str, BlockwiseDep]
def __init__(
self,
output: str,
output_indices: Iterable[str],
task: Task,
indices: Iterable[tuple[str | TaskRef | BlockwiseDep, Iterable[str] | None]],
numblocks: Mapping[str, Sequence[int]],
concatenate: bool | None = None,
new_axes: Mapping[str, int] | None = None,
output_blocks: set[tuple[int, ...]] | None = None,
annotations: Mapping[str, Any] | None = None,
io_deps: Mapping[str, BlockwiseDep] | None = None,
):
super().__init__(annotations=annotations)
self.output = output
self.output_indices = tuple(output_indices)
self.output_blocks = output_blocks
self.task = task
assert isinstance(task, Task)
# Remove `BlockwiseDep` arguments from input indices
# and add them to `self.io_deps`.
# TODO: Remove `io_deps` and handle indexable objects
# in `self.indices` throughout `Blockwise`.
_tmp_indices = []
numblocks = dict(numblocks)
io_deps = dict(io_deps or {})
if indices:
for dep, ind in indices:
if ind is not None:
# FIXME: The Blockwise API is a little weird this way
assert not isinstance(
dep, TaskRef
), "TaskRef objects are only allowed for broadcasted inputs with None as index."
if isinstance(dep, BlockwiseDep):
name = tokenize(dep)
io_deps[name] = dep
numblocks[name] = dep.numblocks
else:
name = dep # type: ignore[assignment]
_tmp_indices.append((name, tuple(ind) if ind is not None else ind))
self.numblocks = numblocks
self.io_deps = io_deps or {}
self.indices = tuple(_tmp_indices)
# optimize_blockwise won't merge where `concatenate` doesn't match, so
# enforce a canonical value if there are no axes for reduction.
output_indices_set = set(self.output_indices)
if concatenate is not None and all(
i in output_indices_set
for name, ind in self.indices
if ind is not None
for i in ind
):
concatenate = None
self.concatenate = concatenate
self.new_axes = new_axes or {}
@property
def has_legacy_tasks(self):
return False
@property
def dims(self):
"""Returns a dictionary mapping between each index specified in
`self.indices` and the number of output blocks for that indice.
"""
if not hasattr(self, "_dims"):
self._dims = _make_dims(self.indices, self.numblocks, self.new_axes)
return self._dims
def __repr__(self):
return f"Blockwise<{self.indices} -> {self.output}>"
@property
def _dict(self):
if hasattr(self, "_cached_dict"):
return self._cached_dict["dsk"]
else:
keys = tuple(map(blockwise_token, range(len(self.indices))))
dsk = _make_blockwise_graph(
self.task,
self.output,
self.output_indices,
*list(toolz.concat(self.indices)),
new_axes=self.new_axes,
numblocks=self.numblocks,
concatenate=self.concatenate,
output_blocks=self.output_blocks,
dims=self.dims,
io_deps=self.io_deps,
keys=keys,
)
self._cached_dict = {"dsk": dsk}
return self._cached_dict["dsk"]
def get_output_keys(self):
if self.output_blocks:
# Culling has already generated a list of output blocks
return {(self.output, *p) for p in self.output_blocks}
# Return all possible output keys (no culling)
return {
(self.output, *p)
for p in itertools.product(
*[range(self.dims[i]) for i in self.output_indices]
)
}
def __getitem__(self, key):
return self._dict[key]
def __iter__(self):
return iter(self._dict)
def __len__(self) -> int:
# same method as `get_output_keys`, without manifesting the keys themselves
return (
len(self.output_blocks)
if self.output_blocks
else prod(self.dims[i] for i in self.output_indices)
)
def is_materialized(self):
return hasattr(self, "_cached_dict")
def _cull_dependencies(self, output_blocks):
"""Determine the necessary dependencies to produce `output_blocks`.
This method does not require graph materialization.
"""
# Check `concatenate` option
concatenate = None
if self.concatenate is True:
from dask.array.core import concatenate_axes as concatenate
# Generate coordinate map
(coord_maps, concat_axes, dummies) = _get_coord_mapping(
self.dims,
self.output_indices,
self.numblocks,
self.indices,
concatenate,
)
# Gather constant dependencies (for all output keys)
const_deps = set()
for arg, _ in self.indices:
if isinstance(arg, TaskRef):
const_deps.add(arg.key)
# Get dependencies for each output block
key_deps = {}
for out_coords in output_blocks:
deps = set()
coords = out_coords + dummies
for cmap, axes, (arg, ind) in zip(coord_maps, concat_axes, self.indices):
if ind is not None and arg not in self.io_deps:
arg_coords = tuple(coords[c] for c in cmap)
if axes:
tups = _lol_product((arg,), arg_coords)
deps.update(flatten(tups))
if concatenate:
tups = (concatenate, tups, axes)
else:
tups = (arg,) + arg_coords
deps.add(tups)
key_deps[(self.output,) + out_coords] = deps | const_deps
# Add valid-key dependencies from io_deps
for key, io_dep in self.io_deps.items():
if io_dep.produces_keys:
for out_coords in output_blocks:
key = (self.output,) + out_coords
valid_key_dep = io_dep[out_coords]
if isinstance(valid_key_dep, TaskRef):
valid_key_dep = valid_key_dep.key
key_deps[key] |= {valid_key_dep}
return key_deps
def _cull(self, output_blocks):
return Blockwise(
self.output,
self.output_indices,
self.task,
self.indices,
self.numblocks,
concatenate=self.concatenate,
new_axes=self.new_axes,
output_blocks=output_blocks,
annotations=self.annotations,
io_deps=self.io_deps,
)
def cull(
self, keys: set, all_hlg_keys: Iterable
) -> tuple[Layer, Mapping[Key, set[Key]]]:
# Culling is simple for Blockwise layers. We can just
# collect a set of required output blocks (tuples), and
# only construct graph for these blocks in `make_blockwise_graph`
output_blocks: set[tuple[int, ...]] = set()
for key in keys:
if key[0] == self.output:
output_blocks.add(key[1:])
culled_deps = self._cull_dependencies(output_blocks)
out_size_iter = (self.dims[i] for i in self.output_indices)
if prod(out_size_iter) != len(culled_deps):
culled_layer = self._cull(output_blocks)
return culled_layer, culled_deps
else:
return self, culled_deps
def clone(
self,
keys: set[Key],
seed: Hashable,
bind_to: Key | None = None,
) -> tuple[Layer, bool]:
names = {get_name_from_key(k) for k in keys}
# We assume that 'keys' will contain either all or none of the output keys of
# each of the layers, because clone/bind are always invoked at collection level.
# Asserting this is very expensive, so we only check it during unit tests.
if "PYTEST_CURRENT_TEST" in os.environ:
assert not self.get_output_keys() - keys
for name, nb in self.numblocks.items():
if name in names:
for block in product(*(list(range(nbi)) for nbi in nb)):
assert (name, *block) in keys
is_leaf = True
indices = []
k: Key | TaskRef
for k, idxv in self.indices:
# Note: k may not be a key and thus not be hashable in the case where
# one or more args of blockwise() are sequences of literals;
# e.g. k = (list, [0, 1, 2])
# See https://github.com/dask/dask/issues/8978
if ishashable(k) and k in names:
is_leaf = False
k = clone_key(k, seed) # type: ignore[type-var]
elif isinstance(k, TaskRef) and k.key in names:
is_leaf = False
k = TaskRef(clone_key(k.key, seed))
indices.append((k, idxv))
numblocks: dict[str, Sequence[int]] = {}
for k, nbv in self.numblocks.items():
if k in names:
is_leaf = False
k = clone_key(k, seed)
numblocks[k] = nbv
if bind_to is not None and is_leaf:
from dask.graph_manipulation import chunks
# It's always a Delayed generated by dask.graph_manipulation.checkpoint;
# the layer name always matches the key
assert isinstance(bind_to, str)
newtask = Task(
clone_key(self.task.key, seed),
chunks.bind,
self.task,
TaskRef(blockwise_token(len(indices))),
_data_producer=self.task.data_producer,
)
indices.append((TaskRef(bind_to), None))
else:
newtask = self.task.substitute({}, key=clone_key(self.task.key, seed))
return (
Blockwise(
output=clone_key(self.output, seed),
output_indices=self.output_indices,
task=newtask,
indices=indices,
numblocks=numblocks,
concatenate=self.concatenate,
new_axes=self.new_axes,
output_blocks=self.output_blocks,
annotations=self.annotations,
io_deps=self.io_deps,
),
(bind_to is not None and is_leaf),
)
def _get_coord_mapping(
dims,
out_indices,
numblocks,
argpairs,
concatenate,
):
"""Calculate coordinate mapping for graph construction.
This function handles the high-level logic behind Blockwise graph
construction. The output is a tuple containing: The mapping between
input and output block coordinates (`coord_maps`), the axes along
which to concatenate for each input (`concat_axes`), and the dummy
indices needed for broadcasting (`dummies`).
Used by `make_blockwise_graph` and `Blockwise._cull_dependencies`.
Parameters
----------
dims : dict
Mapping between each index specified in `argpairs` and
the number of output blocks for that index. Corresponds
to the Blockwise `dims` attribute.
out_indices : tuple
Corresponds to the Blockwise `output_indices` attribute.
numblocks : dict
Corresponds to the Blockwise `numblocks` attribute.
argpairs : tuple
Corresponds to the Blockwise `indices` attribute.
concatenate : bool
Corresponds to the Blockwise `concatenate` attribute.
"""
block_names = set()
all_indices = set()
for name, ind in argpairs:
if ind is not None:
block_names.add(name)
for x in ind:
all_indices.add(x)
assert set(numblocks) == block_names, (numblocks, block_names)
dummy_indices = all_indices - set(out_indices)
# For each position in the output space, we'll construct a
# "coordinate set" that consists of
# - the output indices
# - the dummy indices
# - the dummy indices, with indices replaced by zeros (for broadcasting), we
# are careful to only emit a single dummy zero when concatenate=True to not
# concatenate the same array with itself several times.
# - a 0 to assist with broadcasting.
index_pos, zero_pos = {}, {}
for i, ind in enumerate(out_indices):
index_pos[ind] = i
zero_pos[ind] = -1
_dummies_list = []
for i, ind in enumerate(dummy_indices):
index_pos[ind] = 2 * i + len(out_indices)
zero_pos[ind] = 2 * i + 1 + len(out_indices)
reps = 1 if concatenate else dims[ind]
_dummies_list.append([list(range(dims[ind])), [0] * reps])
# ([0, 1, 2], [0, 0, 0], ...) For a dummy index of dimension 3
dummies = tuple(itertools.chain.from_iterable(_dummies_list))
dummies += (0,)
# For each coordinate position in each input, gives the position in
# the coordinate set.
coord_maps = []
# Axes along which to concatenate, for each input
concat_axes = []
for arg, ind in argpairs:
if ind is not None:
coord_maps.append(
[
zero_pos[i] if nb == 1 else index_pos[i]
for i, nb in zip(ind, numblocks[arg])
]
)
concat_axes.append([n for n, i in enumerate(ind) if i in dummy_indices])
else:
coord_maps.append(None)
concat_axes.append(None)
return coord_maps, concat_axes, dummies
def _make_blockwise_graph(
task,
output,
out_indices,
*arrind_pairs,
numblocks=None,
concatenate=None,
new_axes=None,
output_blocks=None,
dims=None,
io_deps=None,
keys=None,
):
if numblocks is None:
raise ValueError("Missing required numblocks argument.")
new_axes = new_axes or {}
io_deps = io_deps or {}
argpairs = list(toolz.partition(2, arrind_pairs))
if concatenate is True:
from dask.array.core import concatenate_axes as concatenate
# Dictionary mapping {i: 3, j: 4, ...} for i, j, ... the dimensions
dims = dims or _make_dims(argpairs, numblocks, new_axes)
# Generate the abstract "plan" before constructing
# the actual graph
(coord_maps, concat_axes, dummies) = _get_coord_mapping(
dims,
out_indices,
numblocks,
argpairs,
concatenate,
)
# Apply Culling.
# Only need to construct the specified set of output blocks.
# Note that we must convert itertools.product to list,
# because we may need to loop through output_blocks more than
# once below (itertools.product already uses an internal list,
# so this is not a memory regression)
output_blocks = output_blocks or list(
itertools.product(*[range(dims[i]) for i in out_indices])
)
from dask._task_spec import DataNode
dsk = {}
for out_coords in output_blocks:
this_task = task
coords = out_coords + dummies
args = []
for cmap, axes, (arg, ind), key in zip(
coord_maps, concat_axes, argpairs, keys, strict=True
):
if key not in task.dependencies:
# FIXME: This feels like a bug
continue
if ind is None:
if not isinstance(arg, (GraphNode, TaskRef)):
this_task = this_task.substitute({key: DataNode(None, arg)})
else:
this_task = this_task.substitute({key: arg})
continue
arg_coords = tuple(coords[c] for c in cmap)
if arg in io_deps:
val = parse_input(io_deps[arg].get(arg_coords, arg_coords))
if not isinstance(val, GraphNode):
val = DataNode(None, val)
this_task = this_task.substitute({key: val})
else:
subs = {}
if axes:
tups = _lol_product((arg,), arg_coords, as_taskref=True)
if concatenate:
tups = Task(key, concatenate, tups, axes)
subs[key] = tups
else:
subs[key] = (arg, *arg_coords)
this_task = this_task.substitute(subs)
new_key = (output,) + out_coords
assert isinstance(this_task, Task)
dsk[new_key] = Task.fuse(this_task, *args, key=new_key)
return dsk
def _lol_product(head, values, as_taskref=False):
"""List of list of tuple keys, similar to `itertools.product`.
Parameters
----------
head : tuple
Prefix prepended to all results.
values : sequence
Mix of singletons and lists. Each list is substituted with every
possible value and introduces another level of list in the output.
Examples
--------
>>> _lol_product(('x',), (1, 2, 3))
('x', 1, 2, 3)
>>> _lol_product(('x',), (1, [2, 3], 4, [5, 6])) # doctest: +NORMALIZE_WHITESPACE
[[('x', 1, 2, 4, 5), ('x', 1, 2, 4, 6)],
[('x', 1, 3, 4, 5), ('x', 1, 3, 4, 6)]]
"""
if not values:
if as_taskref:
return TaskRef(head)
return head
elif isinstance(values[0], list):
# FIXME: Constructor of List is odd
if as_taskref:
return List(
*(
_lol_product(head + (x,), values[1:], as_taskref=as_taskref)
for x in values[0]
)
)
else:
return list(
_lol_product(head + (x,), values[1:], as_taskref=as_taskref)
for x in values[0]
)
else:
return _lol_product(head + (values[0],), values[1:], as_taskref=as_taskref)
def lol_tuples(head, ind, values, dummies):
"""List of list of tuple keys
Parameters
----------
head : tuple
The known tuple so far
ind : Iterable
An iterable of indices not yet covered
values : dict
Known values for non-dummy indices
dummies : dict
Ranges of values for dummy indices
Examples
--------
>>> lol_tuples(('x',), 'ij', {'i': 1, 'j': 0}, {})
('x', 1, 0)
>>> lol_tuples(('x',), 'ij', {'i': 1}, {'j': range(3)})
[('x', 1, 0), ('x', 1, 1), ('x', 1, 2)]
>>> lol_tuples(('x',), 'ijk', {'i': 1}, {'j': [0, 1, 2], 'k': [0, 1]}) # doctest: +NORMALIZE_WHITESPACE
[[('x', 1, 0, 0), ('x', 1, 0, 1)],
[('x', 1, 1, 0), ('x', 1, 1, 1)],
[('x', 1, 2, 0), ('x', 1, 2, 1)]]
"""
if not ind:
return head
if ind[0] not in dummies:
return lol_tuples(head + (values[ind[0]],), ind[1:], values, dummies)
else:
return [
lol_tuples(head + (v,), ind[1:], values, dummies) for v in dummies[ind[0]]
]
def optimize_blockwise(graph, keys=()):
"""High level optimization of stacked Blockwise layers
For operations that have multiple Blockwise operations one after the other, like
``x.T + 123`` we can fuse these into a single Blockwise operation. This happens
before any actual tasks are generated, and so can reduce overhead.
This finds groups of Blockwise operations that can be safely fused, and then
passes them to ``rewrite_blockwise`` for rewriting.
Parameters
----------
graph : HighLevelGraph
keys : Iterable
The keys of all outputs of all collections.
Used to make sure that we don't fuse a layer needed by an output
Returns
-------
HighLevelGraph
See Also
--------
rewrite_blockwise
"""
out = _optimize_blockwise(graph, keys=keys)
while out.dependencies != graph.dependencies:
graph = out
out = _optimize_blockwise(graph, keys=keys)
return out
def _optimize_blockwise(full_graph, keys=()):
keep = {k[0] if type(k) is tuple else k for k in keys}
layers = full_graph.layers
dependents = reverse_dict(full_graph.dependencies)
roots = {k for k in full_graph.layers if not dependents.get(k)}
stack = list(roots)
out = {}
dependencies = {}
seen = set()
io_names = set()
while stack:
layer = stack.pop()
if layer in seen or layer not in layers:
continue
seen.add(layer)
# Outer loop walks through possible output Blockwise layers
if isinstance(layers[layer], Blockwise):
blockwise_layers = {layer}
deps = set(blockwise_layers)
io_names |= layers[layer].io_deps.keys()
while deps: # we gather as many sub-layers as we can
dep = deps.pop()
if dep not in layers:
stack.append(dep)
continue
if not isinstance(layers[dep], Blockwise):
stack.append(dep)
continue
if dep != layer and dep in keep:
stack.append(dep)
continue
if layers[dep].concatenate != layers[layer].concatenate:
stack.append(dep)
continue
if (
sum(k == dep for k, ind in layers[layer].indices if ind is not None)
> 1
):
stack.append(dep)
continue
if blockwise_layers and not _can_fuse_annotations(
layers[next(iter(blockwise_layers))].annotations,
layers[dep].annotations,
):
stack.append(dep)
continue
# passed everything, proceed
blockwise_layers.add(dep)
# traverse further to this child's children
output_indices = set(layers[dep].output_indices)
input_indices = {
i for _, ind in layers[dep].indices if ind for i in ind
}
is_io_superset = output_indices.issuperset(input_indices)
for d in full_graph.dependencies.get(dep, ()):
# Don't allow reductions to proceed
if is_io_superset and len(dependents[d]) <= 1:
deps.add(d)
else:
stack.append(d)
# Merge these Blockwise layers into one
new_layer = rewrite_blockwise([layers[l] for l in blockwise_layers])
out[layer] = new_layer
# Get the new (external) dependencies for this layer.
# This corresponds to the dependencies defined in
# full_graph.dependencies and are not in blockwise_layers
new_deps = set()
for l in blockwise_layers:
new_deps |= set(
{
d
for d in full_graph.dependencies[l]
if d not in blockwise_layers and d in full_graph.dependencies
}
)
for k, v in new_layer.indices:
if v is None:
new_deps |= keys_in_tasks(full_graph.dependencies, [k])
elif k not in io_names:
new_deps.add(k)
dependencies[layer] = new_deps
else:
out[layer] = layers[layer]
dependencies[layer] = full_graph.dependencies.get(layer, set())
stack.extend(full_graph.dependencies.get(layer, ()))
return HighLevelGraph(out, dependencies)
def _unique_dep(dep, ind):
# Append blockwise index information to dependency name
return dep + "_" + "_".join(str(i) for i in list(ind))
def _can_fuse_annotations(a: dict | None, b: dict | None) -> bool:
"""
Treat the special annotation keys, as fusable since we can apply simple
rules to capture their intent in a fused layer.
"""
if a == b:
return True
if dask.config.get("optimization.annotations.fuse") is False:
return False
fusable = {"retries", "priority", "resources", "workers", "allow_other_workers"}
return (not a or all(k in fusable for k in a)) and (
not b or all(k in fusable for k in b)
)
def _fuse_annotations(*args: dict) -> dict:
"""
Given an iterable of annotations dictionaries, fuse them according
to some simple rules.
"""
# First, do a basic dict merge -- we are presuming that these have already
# been gated by `_can_fuse_annotations`.
annotations = toolz.merge(*args)
# Max of layer retries
retries = [a["retries"] for a in args if "retries" in a]
if retries:
annotations["retries"] = max(retries)
# Max of layer priorities
priorities = [a["priority"] for a in args if "priority" in a]
if priorities:
annotations["priority"] = max(priorities)
# Max of all the layer resources
resources = [a["resources"] for a in args if "resources" in a]
if resources:
annotations["resources"] = toolz.merge_with(max, *resources)
# Intersection of all the worker restrictions
workers = [a["workers"] for a in args if "workers" in a]
if workers:
annotations["workers"] = list(set.intersection(*[set(w) for w in workers]))
# More restrictive of allow_other_workers
allow_other_workers = [
a["allow_other_workers"] for a in args if "allow_other_workers" in a
]
if allow_other_workers:
annotations["allow_other_workers"] = all(allow_other_workers)
return annotations
def rewrite_blockwise(inputs):
"""Rewrite a stack of Blockwise expressions into a single blockwise expression
Given a set of Blockwise layers, combine them into a single layer. The provided
layers are expected to fit well together. That job is handled by
``optimize_blockwise``
Parameters
----------
inputs : list[Blockwise]
Returns
-------
blockwise: Blockwise
See Also
--------
optimize_blockwise
"""
if len(inputs) == 1:
# Fast path: if there's only one input we can just use it as-is.
return inputs[0]
fused_annotations = _fuse_annotations(
*[i.annotations for i in inputs if i.annotations]
)
inputs = {inp.output: inp for inp in inputs}
dependencies = {
inp.output: {d for d, v in inp.indices if v is not None and d in inputs}
for inp in inputs.values()
}
dependents = reverse_dict(dependencies)
new_index_iter = (
c + (str(d) if d else "") # A, B, ... A1, B1, ...
for d in itertools.count()
for c in "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
)
[root] = [k for k, v in dependents.items() if not v]
# Our final results. These will change during fusion below
indices = list(inputs[root].indices)
new_axes = inputs[root].new_axes
concatenate = inputs[root].concatenate
task = inputs[root].task
dsk = {task.key: task}
changed = True
while changed:
changed = False
for i, (dep, current_dep_indices) in enumerate(indices):
if current_dep_indices is None:
continue
if dep not in inputs:
continue
changed = True
# Change dep name to avoid fusing the same dep
# (in different iteration orders) into a single
# subgraph key/dependency
# (see: https://github.com/dask/dask/issues/8535)
local_dep = dep if dep == root else _unique_dep(dep, current_dep_indices)
# Replace _n with dep name in existing tasks
# (inc, _0) -> (inc, 'b')
dsk = {
k: v.substitute({blockwise_token(i): local_dep}) for k, v in dsk.items()
}
# Remove current input from input indices
# [('a', 'i'), ('b', 'i')] -> [('a', 'i')]
indices.pop(i)
sub = {
blockwise_token(i): blockwise_token(i - 1)
for i in range(i + 1, len(indices) + 1)
}
dsk = {k: v.substitute(sub) for k, v in dsk.items()}
# Change new input_indices to match give index from current computation
# [('c', j')] -> [('c', 'i')]
new_indices = inputs[dep].indices
sub = dict(zip(inputs[dep].output_indices, current_dep_indices))
contracted = {
x
for _, j in new_indices
if j is not None
for x in j
if x not in inputs[dep].output_indices
}
extra = dict(zip(contracted, new_index_iter))
sub.update(extra)
new_indices = [(x, index_subs(j, sub)) for x, j in new_indices]
# Update new_axes
for k, v in inputs[dep].new_axes.items():
new_axes[sub[k]] = v
# Bump new inputs up in list
sub = {}
# Map from (id(key), inds or None) -> index in indices. Used to deduplicate indices.
index_map = {(id(k), inds): n for n, (k, inds) in enumerate(indices)}
for ii, index in enumerate(new_indices):
id_key = (id(index[0]), index[1])
if id_key in index_map: # use old inputs if available
sub[blockwise_token(ii)] = blockwise_token(index_map[id_key])
else:
index_map[id_key] = len(indices)
sub[blockwise_token(ii)] = blockwise_token(len(indices))
indices.append(index)
if dep != local_dep:
key = local_dep
else:
key = dep
new_dsk = {key: inputs[dep].task.substitute(sub, key=key)}
# indices.extend(new_indices)
dsk.update(new_dsk)
# De-duplicate indices like [(a, ij), (b, i), (a, ij)] -> [(a, ij), (b, i)]
# Make sure that we map everything else appropriately as we remove inputs
new_indices = []
seen = {}
sub = {} # like {_0: _0, _1: _0, _2: _1}
for i, x in enumerate(indices):
if x[1] is not None and x in seen:
sub[i] = seen[x]
else:
if x[1] is not None:
seen[x] = len(new_indices)
sub[i] = len(new_indices)
new_indices.append(x)
sub = {blockwise_token(k): blockwise_token(v) for k, v in sub.items()}
dsk = {k: v.substitute(sub) for k, v in dsk.items() if k not in sub.keys()}
task = Task.fuse(*dsk.values(), key=root)
indices_check = {k for k, v in indices if v is not None}
numblocks = toolz.merge([inp.numblocks for inp in inputs.values()])
numblocks = {k: v for k, v in numblocks.items() if v is None or k in indices_check}
# Update IO-dependency information
io_deps = {}
for v in inputs.values():
io_deps.update(v.io_deps)
return Blockwise(
root,
inputs[root].output_indices,
task,
new_indices,
numblocks=numblocks,
new_axes=new_axes,
concatenate=concatenate,
annotations=fused_annotations,
io_deps=io_deps,
)
@_deprecated()
def zero_broadcast_dimensions(lol, nblocks):
"""
>>> lol = [('x', 1, 0), ('x', 1, 1), ('x', 1, 2)]
>>> nblocks = (4, 1, 2) # note singleton dimension in second place
>>> lol = [[('x', 1, 0, 0), ('x', 1, 0, 1)],
... [('x', 1, 1, 0), ('x', 1, 1, 1)],
... [('x', 1, 2, 0), ('x', 1, 2, 1)]]
>>> zero_broadcast_dimensions(lol, nblocks) # doctest: +SKIP
[[('x', 1, 0, 0), ('x', 1, 0, 1)],
[('x', 1, 0, 0), ('x', 1, 0, 1)],
[('x', 1, 0, 0), ('x', 1, 0, 1)]]
See Also
--------
lol_tuples
"""
f = lambda t: (t[0],) + tuple(0 if d == 1 else i for i, d in zip(t[1:], nblocks))
return homogeneous_deepmap(f, lol)
def broadcast_dimensions(argpairs, numblocks, sentinels=(1, (1,)), consolidate=None):
"""Find block dimensions from arguments
Parameters
----------
argpairs : iterable
name, ijk index pairs
numblocks : dict
maps {name: number of blocks}
sentinels : iterable (optional)
values for singleton dimensions
consolidate : func (optional)
use this to reduce each set of common blocks into a smaller set
Examples
--------
>>> argpairs = [('x', 'ij'), ('y', 'ji')]
>>> numblocks = {'x': (2, 3), 'y': (3, 2)}
>>> broadcast_dimensions(argpairs, numblocks)
{'i': 2, 'j': 3}
Supports numpy broadcasting rules
>>> argpairs = [('x', 'ij'), ('y', 'ij')]
>>> numblocks = {'x': (2, 1), 'y': (1, 3)}
>>> broadcast_dimensions(argpairs, numblocks)
{'i': 2, 'j': 3}
Works in other contexts too
>>> argpairs = [('x', 'ij'), ('y', 'ij')]
>>> d = {'x': ('Hello', 1), 'y': (1, (2, 3))}
>>> broadcast_dimensions(argpairs, d)
{'i': 'Hello', 'j': (2, 3)}
"""
# List like [('i', 2), ('j', 1), ('i', 1), ('j', 2)]
argpairs2 = [(a, ind) for a, ind in argpairs if ind is not None]
L = toolz.concat(
[
zip(inds, dims)
for (x, inds), (x, dims) in toolz.join(
toolz.first, argpairs2, toolz.first, numblocks.items()
)
]
)
g = toolz.groupby(0, L)
g = {k: {d for i, d in v} for k, v in g.items()}
g2 = {k: v - set(sentinels) if len(v) > 1 else v for k, v in g.items()}
if consolidate:
return toolz.valmap(consolidate, g2)
if g2 and not set(map(len, g2.values())) == {1}:
raise ValueError(f"Shapes do not align {g}")
return toolz.valmap(toolz.first, g2)
def _make_dims(indices, numblocks, new_axes):
"""Returns a dictionary mapping between each index specified in
`indices` and the number of output blocks for that indice.
"""
dims = broadcast_dimensions(indices, numblocks)
for k, v in new_axes.items():
dims[k] = len(v) if isinstance(v, tuple) else 1
return dims
def fuse_roots(graph: HighLevelGraph, keys: list):
"""
Fuse nearby layers if they don't have dependencies
Often Blockwise sections of the graph fill out all of the computation
except for the initial data access or data loading layers::
Large Blockwise Layer
| | |
X Y Z
This can be troublesome because X, Y, and Z tasks may be executed on
different machines, and then require communication to move around.
This optimization identifies this situation, lowers all of the graphs to
concrete dicts, and then calls ``fuse`` on them, with a width equal to the
number of layers like X, Y, and Z.
This is currently used within array and dataframe optimizations.
Parameters
----------
graph : HighLevelGraph
The full graph of the computation
keys : list
The output keys of the computation, to be passed on to fuse
See Also
--------
Blockwise
fuse
"""
layers = ensure_dict(graph.layers, copy=True)
dependencies = ensure_dict(graph.dependencies, copy=True)
dependents = reverse_dict(dependencies)
for name, layer in graph.layers.items():
deps = graph.dependencies[name]
if (
isinstance(layer, Blockwise)
and len(deps) > 1
and not any(dependencies[dep] for dep in deps) # no need to fuse if 0 or 1
and all(len(dependents[dep]) == 1 for dep in deps)
and all(layer.annotations == graph.layers[dep].annotations for dep in deps)
):
new = toolz.merge(layer, *[layers[dep] for dep in deps])
new, _ = fuse(new, keys, ave_width=len(deps))
for dep in deps:
del layers[dep]
del dependencies[dep]
layers[name] = new
dependencies[name] = set()
return HighLevelGraph(layers, dependencies)
| Blockwise |
python | huggingface__transformers | src/transformers/models/qwen3_vl/modeling_qwen3_vl.py | {
"start": 22541,
"end": 24454
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: Qwen3VLTextConfig, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = Qwen3VLTextAttention(config=config, layer_idx=layer_idx)
self.mlp = Qwen3VLTextMLP(config)
self.input_layernorm = Qwen3VLTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = Qwen3VLTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
@dataclass
@auto_docstring(
custom_intro="""
Base class for Llava outputs, with hidden states and attentions.
"""
)
| Qwen3VLTextDecoderLayer |
python | streamlit__streamlit | lib/streamlit/runtime/caching/cached_message_replay.py | {
"start": 1440,
"end": 1545
} | class ____:
media: bytes | str
mimetype: str
media_id: str
@dataclass(frozen=True)
| MediaMsgData |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/conjecture/test_provider.py | {
"start": 12706,
"end": 15503
} | class ____(TrivialProvider):
# self-documenting constant
REALIZED = 42
avoid_realization = True
def realize(self, value, *, for_failure=False):
if isinstance(value, int):
return self.REALIZED
return value
def test_realize():
with temp_register_backend("realize", RealizeProvider):
values = []
@given(st.integers())
@settings(backend="realize")
def test_function(n):
values.append(current_build_context().data.provider.realize(n))
test_function()
# first draw is 0 from ChoiceTemplate(type="simplest")
assert values[0] == 0
assert all(n == RealizeProvider.REALIZED for n in values[1:])
def test_realize_dependent_draw():
with temp_register_backend("realize", RealizeProvider):
@given(st.data())
@settings(backend="realize")
def test_function(data):
n1 = data.draw(st.integers())
n2 = data.draw(st.integers(n1, n1 + 10))
assert n1 <= n2
test_function()
@pytest.mark.parametrize("verbosity", [Verbosity.verbose, Verbosity.debug])
def test_realization_with_verbosity(verbosity):
with temp_register_backend("realize", RealizeProvider):
@given(st.floats())
@settings(backend="realize", verbosity=verbosity)
def test_function(f):
pass
with capture_out() as out:
test_function()
assert "Trying example: test_function(\n f=<symbolic>,\n)" in out.getvalue()
@pytest.mark.parametrize("verbosity", [Verbosity.verbose, Verbosity.debug])
def test_realization_with_verbosity_draw(verbosity):
with temp_register_backend("realize", RealizeProvider):
@given(st.data())
@settings(backend="realize", verbosity=verbosity)
def test_function(data):
data.draw(st.integers())
with capture_out() as out:
test_function()
assert "Draw 1: <symbolic>" in out.getvalue()
def test_realization_with_observability():
with temp_register_backend("realize", RealizeProvider):
@given(st.data())
@settings(backend="realize")
def test_function(data):
data.draw(st.integers())
with capture_observations() as observations:
test_function()
test_cases = [tc for tc in observations if tc.type == "test_case"]
assert {tc.representation for tc in test_cases} == {
# from the first ChoiceTemplate(type="simplest") example
"test_function(\n data=data(...),\n)\nDraw 1: 0",
# from all other examples. data=<symbolic> isn't ideal; we should special
# case this as data=data(...).
f"test_function(\n data=<symbolic>,\n)\nDraw 1: {RealizeProvider.REALIZED}",
}
| RealizeProvider |
python | altair-viz__altair | tools/datasets/models.py | {
"start": 1016,
"end": 1141
} | class ____(TypedDict):
"""https://datapackage.org/standard/table-schema/#properties."""
fields: Sequence[Field]
| Schema |
python | dagster-io__dagster | helm/dagster/schema/schema/charts/dagster/subschema/rabbitmq.py | {
"start": 119,
"end": 197
} | class ____(BaseModel):
username: str
password: str
| RabbitMQConfiguration |
python | mlflow__mlflow | mlflow/genai/judges/adapters/databricks_adapter.py | {
"start": 21457,
"end": 23029
} | class ____:
feedback: Feedback
model_provider: str
model_name: str
request_id: str | None
num_prompt_tokens: int | None
num_completion_tokens: int | None
def _invoke_databricks_serving_endpoint_judge(
*,
model_name: str,
prompt: str,
assessment_name: str,
num_retries: int = 10,
response_format: type[pydantic.BaseModel] | None = None,
) -> InvokeJudgeModelHelperOutput:
output = _invoke_databricks_serving_endpoint(
model_name=model_name,
prompt=prompt,
num_retries=num_retries,
response_format=response_format,
)
try:
response_dict = json.loads(output.response)
feedback = Feedback(
name=assessment_name,
value=response_dict["result"],
rationale=_sanitize_justification(response_dict.get("rationale", "")),
source=AssessmentSource(
source_type=AssessmentSourceType.LLM_JUDGE,
source_id=f"databricks:/{model_name}",
),
)
except json.JSONDecodeError as e:
raise MlflowException(
f"Failed to parse the response from the judge. Response: {output.response}",
error_code=INVALID_PARAMETER_VALUE,
) from e
return InvokeJudgeModelHelperOutput(
feedback=feedback,
model_provider="databricks",
model_name=model_name,
request_id=output.request_id,
num_prompt_tokens=output.num_prompt_tokens,
num_completion_tokens=output.num_completion_tokens,
)
| InvokeJudgeModelHelperOutput |
python | great-expectations__great_expectations | great_expectations/expectations/metrics/query_metric_provider.py | {
"start": 1492,
"end": 5608
} | class ____(MetricProvider):
"""Base class for all Query Metrics, which define metrics to construct SQL queries.
An example of this is `query.table`,
which takes in a SQL query & target table name, and returns the result of that query.
In some cases, subclasses of MetricProvider, such as QueryMetricProvider, will already
have correct values that may simply be inherited by Metric classes.
---Documentation---
- https://docs.greatexpectations.io/docs/guides/expectations/creating_custom_expectations/how_to_create_custom_query_expectations
Args:
metric_name (str): A name identifying the metric. Metric Name must be globally unique in
a great_expectations installation.
domain_keys (tuple): A tuple of the keys used to determine the domain of the metric.
value_keys (tuple): A tuple of the keys used to determine the value of the metric.
query (str): A valid SQL query.
"""
domain_keys = ("batch_id", "row_condition", "condition_parser")
query_param_name: ClassVar[str] = "query"
dialect_columns_require_subquery_aliases: ClassVar[set[GXSqlDialect]] = {
GXSqlDialect.POSTGRESQL
}
@classmethod
def _get_query_from_metric_value_kwargs(cls, metric_value_kwargs: dict) -> str:
query_param = cls.query_param_name
query: str | None = metric_value_kwargs.get(query_param) or cls.default_kwarg_values.get(
query_param
)
if not query:
raise MissingParameterError(query_param)
if not isinstance(query, str):
raise InvalidParameterTypeError(query_param, str)
return query
@classmethod
def _get_parameters_dict_from_query_parameters(
cls, query_parameters: Optional[QueryParameters]
) -> dict[str, Any]:
if not query_parameters:
return {}
elif query_parameters and "columns" in query_parameters:
columns = query_parameters.pop("columns")
query_columns = {f"col_{i}": col for i, col in enumerate(columns, 1)}
return {**query_parameters, **query_columns}
else:
return {**query_parameters}
@classmethod
def _get_substituted_batch_subquery_from_query_and_batch_selectable(
cls,
query: str,
batch_selectable: sa.Selectable,
execution_engine: SqlAlchemyExecutionEngine,
query_parameters: Optional[QueryParameters] = None,
) -> str:
parameters = cls._get_parameters_dict_from_query_parameters(query_parameters)
if isinstance(batch_selectable, sa.Table):
query = query.format(batch=batch_selectable, **parameters)
elif isinstance(
batch_selectable, (sa.sql.Select, get_sqlalchemy_subquery_type())
): # specifying a row_condition returns the active batch as a Select
# specifying an unexpected_rows_query returns the active batch as a Subquery or Alias
# this requires compilation & aliasing when formatting the parameterized query
batch = batch_selectable.compile(compile_kwargs={"literal_binds": True})
# all join queries require the user to have taken care of aliasing themselves
if "JOIN" in query.upper():
query = query.format(batch=f"({batch})", **parameters)
else:
query = query.format(batch=f"({batch}) AS subselect", **parameters)
else:
query = query.format(batch=f"({batch_selectable})", **parameters)
return query
@classmethod
def _get_sqlalchemy_records_from_substituted_batch_subquery(
cls,
substituted_batch_subquery: str,
execution_engine: SqlAlchemyExecutionEngine,
) -> list[dict]:
result: Union[Sequence[sa.Row[Any]], Any] = execution_engine.execute_query(
sa.text(substituted_batch_subquery)
).fetchmany(MAX_RESULT_RECORDS)
if isinstance(result, Sequence):
return [element._asdict() for element in result]
else:
return [result]
| QueryMetricProvider |
python | google__jax | jax/_src/pallas/mosaic/sc_core.py | {
"start": 4724,
"end": 7455
} | class ____:
axis_name: str
num_cores: int
@property
def backend(self) -> str:
return "mosaic_tpu"
@property
def shape(self):
return collections.OrderedDict(core=self.num_cores)
def discharges_effect(self, effect):
del effect # Unused.
return False
def gather_global_allocations(jaxpr):
def _gather_from_eqns(*, eqn=None, jaxpr=None):
if eqn is not None:
if eqn.primitive is pallas_primitives.get_global_p:
what = eqn.params["what"]
yield pallas_core.MemoryRef(what.inner_aval, what.memory_space)
for subjaxpr in jax_core.jaxprs_in_params(eqn.params):
yield from _gather_from_eqns(jaxpr=subjaxpr)
else:
for eqn in jaxpr.eqns:
yield from _gather_from_eqns(eqn=eqn)
allocations = collections.defaultdict(list)
for memref in _gather_from_eqns(jaxpr=jaxpr):
allocations[memref].append(memref)
return allocations
def _scalar_subcore_mesh_discharge_rule(
in_avals,
out_avals,
*args,
mesh,
jaxpr,
compiler_params,
interpret,
debug,
cost_estimate,
name,
metadata,
):
if not isinstance(mesh, ScalarSubcoreMesh):
raise TypeError(f"Mesh must be a ScalarSubcoreMesh, got {type(mesh)}")
assert len(mesh.shape) == 1
sc_info = get_sparse_core_info()
if mesh.num_cores > (num_expected := sc_info.num_cores):
raise ValueError(
f"Mesh has {mesh.num_cores} cores, but the current TPU chip has only"
f" {num_expected} SparseCores"
)
if compiler_params is None:
compiler_params = tpu_core.CompilerParams()
if compiler_params.dimension_semantics is not None:
raise ValueError("ScalarSubcoreMesh does not support dimension_semantics=")
return pallas_core.default_mesh_discharge_rule(
in_avals,
out_avals,
*args,
mesh=mesh,
jaxpr=jaxpr,
compiler_params=dataclasses.replace(
compiler_params,
dimension_semantics=["core_parallel"],
kernel_type=tpu_core.KernelType.SC_SCALAR_SUBCORE,
),
interpret=interpret,
debug=debug,
cost_estimate=cost_estimate,
name=name,
memory_space=tpu_core.MemorySpace.HBM,
metadata=metadata,
scratch_shapes=tree_util.tree_leaves(gather_global_allocations(jaxpr)),
)
pallas_core._core_map_mesh_rules[ScalarSubcoreMesh] = (
_scalar_subcore_mesh_discharge_rule
)
def _get_num_cores() -> int:
"""Returns the number of cores for the current SparseCore."""
return get_sparse_core_info().num_cores
def _get_num_subcores() -> int:
"""Returns the number of subcores for the current SparseCore."""
return get_sparse_core_info().num_subcores
@dataclasses.dataclass(frozen=True, kw_only=True)
| ScalarSubcoreMesh |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-facebook-marketing/source_facebook_marketing/streams/streams.py | {
"start": 4529,
"end": 4993
} | class ____(FBMarketingIncrementalStream):
"""doc: https://developers.facebook.com/docs/marketing-api/reference/ad-campaign"""
entity_prefix = "adset"
status_field = "effective_status"
valid_statuses = [status.value for status in ValidAdSetStatuses]
def list_objects(self, params: Mapping[str, Any], account_id: str) -> Iterable:
return self._api.get_account(account_id=account_id).get_ad_sets(params=params, fields=self.fields())
| AdSets |
python | great-expectations__great_expectations | great_expectations/core/expectation_diagnostics/expectation_test_data_cases.py | {
"start": 316,
"end": 513
} | class ____(Enum):
"""Backends with some level of testing and support"""
BIGQUERY = "CONCEPT_ONLY"
MSSQL = "EXPERIMENTAL"
SQLITE = "BETA"
PYSPARK = "PRODUCTION"
@dataclass
| Backend |
python | pydata__xarray | xarray/backends/pydap_.py | {
"start": 6992,
"end": 14124
} | class ____(BackendEntrypoint):
"""
Backend for steaming datasets over the internet using
the Data Access Protocol, also known as DODS or OPeNDAP
based on the pydap package.
This backend is selected by default for urls.
For more information about the underlying library, visit:
https://pydap.github.io/pydap/en/intro.html
See Also
--------
backends.PydapDataStore
"""
description = "Open remote datasets via OPeNDAP using pydap in Xarray"
url = "https://docs.xarray.dev/en/stable/generated/xarray.backends.PydapBackendEntrypoint.html"
def guess_can_open(self, filename_or_obj: T_PathFileOrDataStore) -> bool:
if not isinstance(filename_or_obj, str):
return False
return _is_likely_dap_url(filename_or_obj)
def open_dataset(
self,
filename_or_obj: (
str | os.PathLike[Any] | ReadBuffer | bytes | memoryview | AbstractDataStore
),
*,
mask_and_scale=True,
decode_times=True,
concat_characters=True,
decode_coords=True,
drop_variables: str | Iterable[str] | None = None,
use_cftime=None,
decode_timedelta=None,
group=None,
application=None,
session=None,
output_grid=None,
timeout=None,
verify=None,
user_charset=None,
checksums=True,
) -> Dataset:
store = PydapDataStore.open(
url=filename_or_obj,
group=group,
application=application,
session=session,
output_grid=output_grid,
timeout=timeout,
verify=verify,
user_charset=user_charset,
checksums=checksums,
)
store_entrypoint = StoreBackendEntrypoint()
with close_on_error(store):
ds = store_entrypoint.open_dataset(
store,
mask_and_scale=mask_and_scale,
decode_times=decode_times,
concat_characters=concat_characters,
decode_coords=decode_coords,
drop_variables=drop_variables,
use_cftime=use_cftime,
decode_timedelta=decode_timedelta,
)
return ds
def open_datatree(
self,
filename_or_obj: T_PathFileOrDataStore,
*,
mask_and_scale=True,
decode_times=True,
concat_characters=True,
decode_coords=True,
drop_variables: str | Iterable[str] | None = None,
use_cftime=None,
decode_timedelta=None,
group: str | None = None,
application=None,
session=None,
timeout=None,
verify=None,
user_charset=None,
checksums=True,
) -> DataTree:
groups_dict = self.open_groups_as_dict(
filename_or_obj,
mask_and_scale=mask_and_scale,
decode_times=decode_times,
concat_characters=concat_characters,
decode_coords=decode_coords,
drop_variables=drop_variables,
use_cftime=use_cftime,
decode_timedelta=decode_timedelta,
group=group,
application=application,
session=session,
timeout=timeout,
verify=verify,
user_charset=user_charset,
checksums=checksums,
)
return datatree_from_dict_with_io_cleanup(groups_dict)
def open_groups_as_dict(
self,
filename_or_obj: T_PathFileOrDataStore,
*,
mask_and_scale=True,
decode_times=True,
concat_characters=True,
decode_coords=True,
drop_variables: str | Iterable[str] | None = None,
use_cftime=None,
decode_timedelta=None,
group: str | None = None,
application=None,
session=None,
timeout=None,
verify=None,
user_charset=None,
checksums=True,
) -> dict[str, Dataset]:
from xarray.core.treenode import NodePath
filename_or_obj = _normalize_path(filename_or_obj)
store = PydapDataStore.open(
url=filename_or_obj,
application=application,
session=session,
timeout=timeout,
verify=verify,
user_charset=user_charset,
checksums=checksums,
)
# Check for a group and make it a parent if it exists
if group:
parent = str(NodePath("/") / NodePath(group))
else:
parent = str(NodePath("/"))
groups_dict = {}
group_names = [parent]
# construct fully qualified path to group
try:
# this works for pydap >= 3.5.1
Groups = store.ds[parent].groups()
except AttributeError:
# THIS IS ONLY NEEDED FOR `pydap == 3.5.0`
# `pydap>= 3.5.1` has a new method `groups()`
# that returns a dict of group names and their paths
def group_fqn(store, path=None, g_fqn=None) -> dict[str, str]:
"""To be removed for pydap > 3.5.0.
Derives the fully qualifying name of a Group."""
from pydap.model import GroupType
if not path:
path = "/" # parent
if not g_fqn:
g_fqn = {}
groups = [
store[key].id
for key in store.keys()
if isinstance(store[key], GroupType)
]
for g in groups:
g_fqn.update({g: path})
subgroups = [
var for var in store[g] if isinstance(store[g][var], GroupType)
]
if len(subgroups) > 0:
npath = path + g
g_fqn = group_fqn(store[g], npath, g_fqn)
return g_fqn
Groups = group_fqn(store.ds)
group_names += [
str(NodePath(path_to_group) / NodePath(group))
for group, path_to_group in Groups.items()
]
for path_group in group_names:
# get a group from the store
store.group = path_group
store_entrypoint = StoreBackendEntrypoint()
with close_on_error(store):
group_ds = store_entrypoint.open_dataset(
store,
mask_and_scale=mask_and_scale,
decode_times=decode_times,
concat_characters=concat_characters,
decode_coords=decode_coords,
drop_variables=drop_variables,
use_cftime=use_cftime,
decode_timedelta=decode_timedelta,
)
if group:
group_name = str(NodePath(path_group).relative_to(parent))
else:
group_name = str(NodePath(path_group))
groups_dict[group_name] = group_ds
return groups_dict
BACKEND_ENTRYPOINTS["pydap"] = ("pydap", PydapBackendEntrypoint)
| PydapBackendEntrypoint |
python | joke2k__faker | tests/providers/test_user_agent.py | {
"start": 184,
"end": 3005
} | class ____:
"""Test user agent provider methods"""
num_samples = 1000
android_token_pattern: Pattern = re.compile(r"Android (?P<android_version>\d+(?:\.\d){0,2})")
ios_token_pattern: Pattern = re.compile(
r"^(?P<apple_device>.*?); CPU \1 OS " + r"(?P<ios_version>\d+(?:_\d){0,2}) like Mac OS X"
)
mac_token_pattern: Pattern = re.compile(r"Macintosh; (?P<mac_processor>.*?) Mac OS X 10_([5-9]|1[0-2])_(\d)")
one_day = dt.timedelta(1.0)
def test_android_platform_token(self, faker, num_samples):
for _ in range(num_samples):
match = self.android_token_pattern.fullmatch(faker.android_platform_token())
assert match.group("android_version") in UaProvider.android_versions
def test_ios_platform_token(self, faker, num_samples):
for _ in range(num_samples):
match = self.ios_token_pattern.fullmatch(faker.ios_platform_token())
assert match.group("apple_device") in UaProvider.apple_devices
assert match.group("ios_version").replace("_", ".") in UaProvider.ios_versions
def test_mac_platform_token(self, faker, num_samples):
for _ in range(num_samples):
match = self.mac_token_pattern.fullmatch(faker.mac_platform_token())
assert match.group("mac_processor") in UaProvider.mac_processors
def test_firefox_deterministic_output(self, faker: Faker, num_samples: int) -> None:
"""Check whether ``faker.firefox()`` is deterministic, given the same seed."""
for _ in range(num_samples):
# GIVEN a (new) random seed
seed = faker.random.random()
# AND a DevOpsTester using a Faker instance seeded with this seed
# It is a bit tricky to feed the faker with its own random
# value, but it is sufficient for this particular test
faker.seed_instance(seed)
# AND the DevOpsTester using the fake library tomorrow
with freeze_time(dt.datetime.now() + self.one_day):
# AND the DevOpsTester requests a faked Mozilla Firefox web browser user agent (str)
fake_firefox_ua_output_tomorrow = faker.firefox()
# WHEN the DevOpsTester would use the fake library with the same seed
faker.seed_instance(seed)
# AND the DevOpsTester would use the fake library some time later
with freeze_time(dt.datetime.max - self.one_day):
# AND the DevOpsTester requests again faked Mozilla Firefox web browser user agent
fake_firefox_ua_output_much_later = faker.firefox()
# THEN the later Firefox U/A output should (always) be equal to the previous one
assert fake_firefox_ua_output_much_later == fake_firefox_ua_output_tomorrow
| TestUserAgentProvider |
python | getsentry__sentry | tests/sentry/features/test_manager.py | {
"start": 546,
"end": 1758
} | class ____(features.BatchFeatureHandler):
features = {"auth:register", "organizations:feature", "projects:feature"}
def has(
self,
feature: Feature,
actor: User | RpcUser | AnonymousUser | None,
skip_entity: bool | None = False,
) -> bool:
return True
def batch_has(self, feature_names, *args: Any, projects=None, organization=None, **kwargs: Any):
feature_results = {
feature_name: True for feature_name in feature_names if feature_name in self.features
}
if projects:
return {f"project:{project.id}": feature_results for project in projects}
if organization:
return {f"organization:{organization.id}": feature_results}
return {"unscoped": feature_results}
def _check_for_batch(self, feature_name, organization, actor):
raise NotImplementedError
def batch_has_for_organizations(self, feature_name, organizations) -> dict[str, bool]:
results: dict[str, bool] = {}
for org in organizations:
entity_key = f"organization:{org.id}"
results[entity_key] = feature_name in self.features
return results
| MockBatchHandler |
python | pola-rs__polars | py-polars/src/polars/exceptions.py | {
"start": 7004,
"end": 7142
} | class ____(PerformanceWarning):
"""Warning issued when a potentially slow `map_*` operation is performed."""
| PolarsInefficientMapWarning |
python | jmcnamara__XlsxWriter | xlsxwriter/test/worksheet/test_write_dimension.py | {
"start": 301,
"end": 790
} | class ____(unittest.TestCase):
"""
Test the Worksheet _write_dimension() method.
"""
def setUp(self):
self.fh = StringIO()
self.worksheet = Worksheet()
self.worksheet._set_filehandle(self.fh)
def test_write_dimension(self):
"""Test the _write_dimension() method"""
self.worksheet._write_dimension()
exp = """<dimension ref="A1"/>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
| TestWriteDimension |
python | numpy__numpy | numpy/linalg/_linalg.py | {
"start": 2158,
"end": 2227
} | class ____(NamedTuple):
Q: NDArray[Any]
R: NDArray[Any]
| QRResult |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/operators/test_rds.py | {
"start": 32695,
"end": 35141
} | class ____:
@classmethod
def setup_class(cls):
cls.dag = DAG(
dag_id="test_dag",
schedule=None,
default_args={"owner": "airflow", "start_date": DEFAULT_DATE},
)
cls.hook = RdsHook(aws_conn_id=AWS_CONN, region_name="us-east-1")
_patch_hook_get_connection(cls.hook)
@classmethod
def teardown_class(cls):
del cls.dag
del cls.hook
@mock_aws
def test_delete_db_instance(self):
_create_db_instance(self.hook)
delete_db_instance_operator = RdsDeleteDbInstanceOperator(
task_id="test_delete_db_instance",
db_instance_identifier=DB_INSTANCE_NAME,
rds_kwargs={
"SkipFinalSnapshot": True,
},
aws_conn_id=AWS_CONN,
dag=self.dag,
)
_patch_hook_get_connection(delete_db_instance_operator.hook)
delete_db_instance_operator.execute(None)
with pytest.raises(self.hook.conn.exceptions.ClientError):
self.hook.conn.describe_db_instances(DBInstanceIdentifier=DB_INSTANCE_NAME)
@mock_aws
@patch.object(RdsHook, "wait_for_db_instance_state")
def test_delete_db_instance_no_wait(self, mock_await_status):
_create_db_instance(self.hook)
delete_db_instance_operator = RdsDeleteDbInstanceOperator(
task_id="test_delete_db_instance_no_wait",
db_instance_identifier=DB_INSTANCE_NAME,
rds_kwargs={
"SkipFinalSnapshot": True,
},
aws_conn_id=AWS_CONN,
dag=self.dag,
wait_for_completion=False,
)
_patch_hook_get_connection(delete_db_instance_operator.hook)
delete_db_instance_operator.execute(None)
with pytest.raises(self.hook.conn.exceptions.ClientError):
self.hook.conn.describe_db_instances(DBInstanceIdentifier=DB_INSTANCE_NAME)
mock_await_status.assert_not_called()
def test_template_fields(self):
operator = RdsDeleteDbInstanceOperator(
task_id="test_delete_db_instance_no_wait",
db_instance_identifier=DB_INSTANCE_NAME,
rds_kwargs={
"SkipFinalSnapshot": True,
},
aws_conn_id=AWS_CONN,
wait_for_completion=False,
region_name=REGION,
)
validate_template_fields(operator)
| TestRdsDeleteDbInstanceOperator |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/view_not_ignored/package.py | {
"start": 228,
"end": 1391
} | class ____(Package):
"""Install files that should not be ignored by spack."""
homepage = "http://www.spack.org"
url = "http://www.spack.org/downloads/aml-1.0.tar.gz"
has_code = False
version("0.1.0")
install_test_files = [
"foo.spack",
".spack.bar",
"aspack",
"bin/foo.spack",
"bin/.spack.bar",
"bin/aspack",
]
def install(self, spec, prefix):
for test_file in self.install_test_files:
path = os.path.join(prefix, test_file)
mkdirp(os.path.dirname(path))
with open(path, "w", encoding="utf-8") as f:
f.write(test_file)
@classmethod
def assert_installed(cls, prefix):
for test_file in cls.install_test_files:
path = os.path.join(prefix, test_file)
assert os.path.exists(path), "Missing installed file: {}".format(path)
@classmethod
def assert_not_installed(cls, prefix):
for test_file in cls.install_test_files:
path = os.path.join(prefix, test_file)
assert not os.path.exists(path), "File was not uninstalled: {}".format(path)
| ViewNotIgnored |
python | getsentry__sentry | src/sentry/models/groupowner.py | {
"start": 1802,
"end": 3543
} | class ____(BaseManager["GroupOwner"]):
def update_or_create_and_preserve_context(
self, lookup_kwargs: dict, defaults: dict, context_defaults: dict
) -> tuple[GroupOwner, bool]:
"""
update_or_create doesn't have great support for json fields like context.
To preserve the existing content and update only the keys we specify,
we have to handle the operation this way.
use lookup_kwargs to perform the .get()
if found: update the object with defaults and the context with context_defaults
if not found: create the object with the values in lookup_kwargs, defaults, and context_defaults
Note: lookup_kwargs is modified if the GroupOwner is created, do not reuse it for other purposes!
"""
try:
group_owner = GroupOwner.objects.annotate(
context__asjsonb=Cast("context", models.JSONField())
).get(**lookup_kwargs)
for k, v in defaults.items():
setattr(group_owner, k, v)
existing_context = group_owner.context or {}
existing_context.update(context_defaults)
group_owner.context = existing_context
group_owner.save()
return group_owner, False
except GroupOwner.DoesNotExist:
# modify lookup_kwargs so they can be used to create the GroupOwner
keys_to_delete = [k for k in lookup_kwargs.keys() if "__" in k]
for k in keys_to_delete:
del lookup_kwargs[k]
lookup_kwargs.update(defaults)
lookup_kwargs["context"] = context_defaults
return GroupOwner.objects.create(**lookup_kwargs), True
@region_silo_model
| GroupOwnerManager |
python | viewflow__viewflow | viewflow/workflow/base.py | {
"start": 7288,
"end": 15589
} | class ____(Viewset, metaclass=FlowMetaClass):
"""
Base class for defining a task in a flow.
:param task_type: A human-readable string describing the task type.
:type task_type: str
:param activation_class: The activation class to use for this node. If not
specified, a default activation class will be used.
:type activation_class: class
"""
instance: Optional["Flow"] = None
process_class: Optional[type] = None
task_class: Optional[type] = None
lock_impl: Any = lock.no_lock
process_title: str = ""
process_description: str = ""
process_summary_template: str = ""
process_result_template: str = ""
def __init_subclass__(cls, **kwargs: Any) -> None:
"""
Create a new node instance.
:param activation_class: The activation class to use for this node.
:type activation_class: class
:param kwargs: Additional keyword arguments to pass to the superclass.
"""
super().__init_subclass__(**kwargs)
cls.instance = LazySingletonDescriptor()
# process and task default values
from .models import Process, Task # avoid app not loaded error
if cls.process_class is None:
cls.process_class = Process
if cls.task_class is None:
cls.task_class = Task
# viewset.app_name
cls.app_name = strip_suffixes(cls.__name__, ["Flow"]).lower()
# flow description
if not cls.process_title or not cls.process_description:
if cls.__doc__:
docstring = cls.__doc__.split("\n\n", 1)
if not cls.process_title and len(docstring) > 0:
cls.process_title = docstring[0].strip()
if not cls.process_description and len(docstring) > 1:
cls.process_description = dedent(docstring[1]).strip()
else:
if not cls.process_title:
cls.process_title = camel_case_to_title(
strip_suffixes(cls.__name__, ["Flow"])
)
# nodes collect/copy
cls._nodes_by_name = {}
for attr_name in dir(cls):
if attr_name.startswith("_") or attr_name == "instance":
continue
node = getattr(cls, attr_name)
if not isinstance(node, Node):
continue
node = copy.copy(node)
node.name = attr_name
node.flow_class = cls
cls._nodes_by_name[attr_name] = node
setattr(cls, attr_name, node)
# resolve inner links
for _, node in cls._nodes_by_name.items():
node._resolve(cls.instance)
# setup flow graph
incoming = defaultdict(lambda: []) # node -> [incoming_nodes]
for _, node in cls._nodes_by_name.items():
for outgoing_edge in node._outgoing():
incoming[outgoing_edge.dst].append(outgoing_edge)
for target, edges in incoming.items():
target._incoming_edges = edges
# process permissions
process_options = cls.process_class._meta
for permission in ("manage",):
if permission not in process_options.default_permissions:
process_options.default_permissions += (permission,)
# complete node setup
for _, node in cls._nodes_by_name.items():
node._ready()
def __str__(self) -> str:
return str(self.process_title)
def _get_resolver_extra(self) -> Dict[str, Any]:
"""
Get additional context for views
"""
return {"flow": self}
@classmethod
def lock(cls, process_pk: int) -> Any:
"""
Acquire a lock for the specified process.
"""
return cls.lock_impl(cls, process_pk)
@property
def app_label(self) -> str:
"""
Get the application label for the flow.
"""
module = "{}.{}".format(self.__module__, self.__class__.__name__)
app_config = apps.get_containing_app_config(module)
return app_config.label
@property
def flow_label(self) -> str:
"""
Get the flow label for the flow.
"""
module = "{}.{}".format(self.__module__, self.__class__.__name__)
app_config = apps.get_containing_app_config(module)
subpath = module[len(app_config.module.__name__) + 1 :]
if subpath.startswith("flows."):
subpath = subpath[len("flows.") :]
if subpath.endswith("Flow"):
subpath = subpath[: -len("Flow")]
return subpath.lower().replace(".", "/")
def nodes(self) -> Iterator[Node]:
return self._nodes_by_name.values()
def node(self, name: str, no_obsolete: bool = False) -> Optional[Node]:
"""
Get a node by name.
"""
node = self._nodes_by_name.get(name, None)
if node is None and not no_obsolete:
from .nodes import Obsolete
obsolete_factory = self._nodes_by_name.get("obsolete", Obsolete())
node = obsolete_factory.create_node(name, flow_class=self.__class__)
return node
def has_view_permission(self, user: Any, obj: Optional[Any] = None) -> bool:
opts = self.process_class._meta
return user.is_authenticated and user.has_perm(
f"{opts.app_label}.view_{ opts.model_name}"
)
def has_manage_permission(self, user: Any, obj: Optional[Any] = None) -> bool:
opts = self.process_class._meta
return user.is_authenticated and user.has_perm(
f"{opts.app_label}.manage_{ opts.model_name}"
)
def _get_urls(self) -> List:
urlpatterns = super()._get_urls()
for node in self.nodes():
node.parent = self
patterns, app_name, namespace = node.urls
urlpatterns.append(
path("", include((patterns, app_name), namespace=namespace))
)
return urlpatterns
def _this_owner(self, flow_task: type) -> Any:
"""Return user that was assigned to the task."""
def get_task_owner(activation):
flow_class = activation.process.flow_class
task_class = flow_class.task_class
task = (
task_class._default_manager.filter(
process=activation.process, flow_task=flow_task, status=STATUS.DONE
)
.order_by("-id")
.first()
)
return task.owner if task else None
return get_task_owner
@classmethod
def get_start_nodes(cls, user: Optional[Any] = None) -> List[Node]:
"""
List of flow start nodes.
If user is not None, returns only permitted nodes for the provided user
"""
from .nodes import Start
return [
node
for node in cls.instance.nodes()
if isinstance(node, Start)
if user is None or node.can_execute(user)
]
def get_available_process_actions(
self,
process: Any,
user: Optional[Any] = None,
) -> List[Dict[str, str]]:
"""List of {name, url} process actions available for the current user"""
# TODO process cancel
return []
def cancel(self, process: Any) -> None:
with transaction.atomic(), self.lock(process.pk):
active_tasks = process.task_set.exclude(
status__in=[STATUS.DONE, STATUS.CANCELED, STATUS.REVIVED]
)
activations = [
task.flow_task.activation_class(task) for task in active_tasks
]
not_cancellable = [
activation
for activation in activations
if not activation.cancel.can_proceed()
]
if not_cancellable:
raise FlowRuntimeError(
"Can't cancel {}".format(
",".join(str(activation.task) for activation in not_cancellable)
)
)
for activation in activations:
activation.cancel()
process.status = PROCESS.CANCELED
process.finished = now()
process.save()
| Flow |
python | astropy__astropy | astropy/units/tests/test_quantity_erfa_ufuncs.py | {
"start": 12866,
"end": 19681
} | class ____:
@classmethod
def setup_class(cls):
ldbody = np.array(
[
(0.00028574, 3e-10, ([-7.81014427, -5.60956681, -1.98079819],
[0.0030723249, -0.00406995477, -0.00181335842])),
(0.00095435, 3e-9, ([0.738098796, 4.63658692, 1.9693136],
[-0.00755816922, 0.00126913722, 0.000727999001])),
(1.0, 6e-6, ([-0.000712174377, -0.00230478303, -0.00105865966],
[6.29235213e-6, -3.30888387e-7, -2.96486623e-7]))
],
dtype=erfa_ufunc.dt_eraLDBODY
) # fmt: skip
ldbody_unit = u.StructuredUnit("Msun,radian,(AU,AU/day)", ldbody.dtype)
cls.ldbody = ldbody << ldbody_unit
cls.ob = [-0.974170437, -0.2115201, -0.0917583114] << u.AU
cls.sc = np.array([-0.763276255, -0.608633767, -0.216735543])
# From t_atciq in t_erfa_c.c
astrom, eo = erfa_ufunc.apci13(2456165.5, 0.401182685)
cls.astrom = astrom << ASTROM_UNIT
cls.rc = 2.71 * u.rad
cls.dc = 0.174 * u.rad
cls.pr = 1e-5 * u.rad / u.year
cls.pd = 5e-6 * u.rad / u.year
cls.px = 0.1 * u.arcsec
cls.rv = 55.0 * u.km / u.s
def test_ldn_basic(self):
sn = erfa_ufunc.ldn(self.ldbody, self.ob, self.sc)
assert_quantity_allclose(
sn,
[-0.7632762579693333866, -0.6086337636093002660, -0.2167355420646328159]
* u.one,
atol=1e-12,
rtol=0,
)
def test_ldn_in_other_unit(self):
ldbody = self.ldbody.to("kg,rad,(m,m/s)")
ob = self.ob.to("m")
sn = erfa_ufunc.ldn(ldbody, ob, self.sc)
assert_quantity_allclose(
sn,
[-0.7632762579693333866, -0.6086337636093002660, -0.2167355420646328159]
* u.one,
atol=1e-12,
rtol=0,
)
def test_ldn_in_SI(self):
sn = erfa_ufunc.ldn(self.ldbody.si, self.ob.si, self.sc)
assert_quantity_allclose(
sn,
[-0.7632762579693333866, -0.6086337636093002660, -0.2167355420646328159]
* u.one,
atol=1e-12,
rtol=0,
)
def test_aper(self):
along = self.astrom["along"]
astrom2 = erfa_ufunc.aper(10 * u.deg, self.astrom)
assert astrom2["eral"].unit == u.radian
assert_quantity_allclose(astrom2["eral"], along + 10 * u.deg)
astrom3 = self.astrom.to("s,km,1,km,1,1,1,deg,deg,deg,deg,1,1,1,rad,rad,rad")
astrom4 = erfa_ufunc.aper(10 * u.deg, astrom3)
assert astrom3["eral"].unit == u.rad
assert astrom4["eral"].unit == u.deg
assert astrom4.unit == "s,km,1,km,1,1,1,deg,deg,deg,deg,1,1,1,deg,rad,rad"
assert_quantity_allclose(astrom4["eral"], along + 10 * u.deg)
def test_atciq_basic(self):
ri, di = erfa_ufunc.atciq(
self.rc, self.dc, self.pr, self.pd, self.px, self.rv, self.astrom
)
assert_quantity_allclose(ri, 2.710121572968696744 * u.rad)
assert_quantity_allclose(di, 0.1729371367219539137 * u.rad)
def test_atciq_in_other_unit(self):
astrom = self.astrom.to("s,km,1,km,1,1,1,deg,deg,deg,deg,1,1,1,deg,deg,deg")
ri, di = erfa_ufunc.atciq(
self.rc.to(u.deg),
self.dc.to(u.deg),
self.pr.to(u.mas / u.yr),
self.pd.to(u.mas / u.yr),
self.px,
self.rv.to(u.m / u.s),
astrom,
)
assert_quantity_allclose(ri, 2.710121572968696744 * u.rad, atol=1e-12 * u.rad)
assert_quantity_allclose(di, 0.1729371367219539137 * u.rad, atol=1e-12 * u.rad)
def test_atciqn(self):
ri, di = erfa_ufunc.atciqn(
self.rc.to(u.deg),
self.dc.to(u.deg),
self.pr.to(u.mas / u.yr),
self.pd.to(u.mas / u.yr),
self.px,
self.rv.to(u.m / u.s),
self.astrom.si,
self.ldbody.si,
)
assert_quantity_allclose(ri, 2.710122008104983335 * u.rad, atol=1e-12 * u.rad)
assert_quantity_allclose(di, 0.1729371916492767821 * u.rad, atol=1e-12 * u.rad)
def test_atciqz(self):
ri, di = erfa_ufunc.atciqz(self.rc.to(u.deg), self.dc.to(u.deg), self.astrom.si)
assert_quantity_allclose(ri, 2.709994899247256984 * u.rad, atol=1e-12 * u.rad)
assert_quantity_allclose(di, 0.1728740720984931891 * u.rad, atol=1e-12 * u.rad)
def test_aticq(self):
ri = 2.710121572969038991 * u.rad
di = 0.1729371367218230438 * u.rad
rc, dc = erfa_ufunc.aticq(ri.to(u.deg), di.to(u.deg), self.astrom.si)
assert_quantity_allclose(rc, 2.710126504531716819 * u.rad, atol=1e-12 * u.rad)
assert_quantity_allclose(dc, 0.1740632537627034482 * u.rad, atol=1e-12 * u.rad)
def test_aticqn(self):
ri = 2.709994899247599271 * u.rad
di = 0.1728740720983623469 * u.rad
rc, dc = erfa_ufunc.aticqn(
ri.to(u.deg), di.to(u.deg), self.astrom.si, self.ldbody.si
)
assert_quantity_allclose(rc, 2.709999575033027333 * u.rad, atol=1e-12 * u.rad)
assert_quantity_allclose(dc, 0.1739999656316469990 * u.rad, atol=1e-12 * u.rad)
def test_atioq_atoiq(self):
astrom, _ = erfa_ufunc.apio13(
2456384.5,
0.969254051,
0.1550675,
-0.527800806,
-1.2345856,
2738.0,
2.47230737e-7,
1.82640464e-6,
731.0,
12.8,
0.59,
0.55,
)
astrom = astrom << ASTROM_UNIT
ri = 2.710121572969038991 * u.rad
di = 0.1729371367218230438 * u.rad
aob, zob, hob, dob, rob = erfa_ufunc.atioq(
ri.to(u.deg), di.to(u.deg), astrom.si
)
assert_quantity_allclose(
aob, 0.9233952224895122499e-1 * u.rad, atol=1e-12 * u.rad
)
assert_quantity_allclose(zob, 1.407758704513549991 * u.rad, atol=1e-12 * u.rad)
assert_quantity_allclose(
hob, -0.9247619879881698140e-1 * u.rad, atol=1e-12 * u.rad
)
assert_quantity_allclose(dob, 0.1717653435756234676 * u.rad, atol=1e-12 * u.rad)
assert_quantity_allclose(rob, 2.710085107988480746 * u.rad, atol=1e-12 * u.rad)
# Sadly does not just use the values from above.
ob1 = 2.710085107986886201 * u.rad
ob2 = 0.1717653435758265198 * u.rad
ri2, di2 = erfa_ufunc.atoiq("R", ob1.to(u.deg), ob2.to(u.deg), astrom.si)
assert_quantity_allclose(ri2, 2.710121574447540810 * u.rad, atol=1e-12 * u.rad)
assert_quantity_allclose(
di2, 0.17293718391166087785 * u.rad, atol=1e-12 * u.rad
)
| TestEraStructUfuncs |
python | pytorch__pytorch | torch/_dynamo/variables/dicts.py | {
"start": 43573,
"end": 55014
} | class ____(ConstDictVariable):
"""We model a sets as dictionary with None values"""
CONTAINS_GUARD = GuardBuilder.SET_CONTAINS
def __init__(
self,
items: list[VariableTracker],
**kwargs: Any,
) -> None:
# pyrefly: ignore[bad-assignment]
items = dict.fromkeys(items, SetVariable._default_value())
# pyrefly: ignore[bad-argument-type]
super().__init__(items, **kwargs)
def debug_repr(self) -> str:
if not self.items:
return "set()"
else:
return "{" + ",".join(k.vt.debug_repr() for k in self.items) + "}"
@property
def set_items(self) -> set["ConstDictVariable._HashableTracker"]:
return set(self.items.keys())
@staticmethod
def _default_value() -> VariableTracker:
# Variable to fill in he keys of the dictionary
return ConstantVariable.create(None)
def as_proxy(self) -> Any:
return {k.vt.as_proxy() for k in self.set_items}
def python_type(self) -> type:
return set
def as_python_constant(self) -> Any:
return {k.vt.as_python_constant() for k in self.set_items}
def reconstruct(self, codegen: "PyCodegen") -> None:
codegen.foreach([x.vt for x in self.set_items])
codegen.append_output(create_instruction("BUILD_SET", arg=len(self.set_items)))
def _fast_set_method(
self,
tx: "InstructionTranslator",
fn: Any,
args: list[VariableTracker],
kwargs: dict[str, VariableTracker],
) -> VariableTracker:
try:
res = fn(
*[x.as_python_constant() for x in [self, *args]],
**{k: v.as_python_constant() for k, v in kwargs.items()},
)
except Exception as exc:
raise_observed_exception(
type(exc), tx, args=list(map(ConstantVariable.create, exc.args))
)
# pyrefly: ignore[unbound-name]
return VariableTracker.build(tx, res)
def call_method(
self,
tx: "InstructionTranslator",
name: str,
args: list[VariableTracker],
kwargs: dict[str, VariableTracker],
) -> VariableTracker:
# We forward the calls to the dictionary model
from ..utils import check_constant_args
if (
name
in (
"isdisjoint",
"union",
"intersection",
"difference",
"symmetric_difference",
)
and check_constant_args(args, kwargs)
and self.python_type() is set
):
py_type = self.python_type()
return self._fast_set_method(tx, getattr(py_type, name), args, kwargs)
if name == "__init__":
temp_set_vt = variables.BuiltinVariable(set).call_set(tx, *args, **kwargs)
tx.output.side_effects.mutation(self)
self.items.clear()
self.items.update(temp_set_vt.items) # type: ignore[attr-defined]
return ConstantVariable.create(None)
elif name == "add":
if kwargs or len(args) != 1:
raise_args_mismatch(
tx,
name,
"1 args and 0 kwargs",
f"{len(args)} args and {len(kwargs)} kwargs",
)
name = "__setitem__"
args = [args[0], SetVariable._default_value()]
elif name == "pop":
if kwargs or args:
raise_args_mismatch(
tx,
name,
"0 args and 0 kwargs",
f"{len(args)} args and {len(kwargs)} kwargs",
)
# Choose an item at random and pop it via the Dict.pop method
try:
result: VariableTracker = self.set_items.pop().vt # type: ignore[assignment]
except KeyError as e:
raise_observed_exception(
KeyError, tx, args=list(map(ConstantVariable.create, e.args))
)
# pyrefly: ignore[unbound-name]
super().call_method(tx, name, [result], kwargs)
# pyrefly: ignore[unbound-name]
return result
elif name == "isdisjoint":
if kwargs or len(args) != 1:
raise_args_mismatch(
tx,
name,
"1 args and 0 kwargs",
f"{len(args)} args and {len(kwargs)} kwargs",
)
return variables.UserFunctionVariable(
polyfills.set_isdisjoint
).call_function(tx, [self, args[0]], {})
elif name == "intersection":
if kwargs:
raise_args_mismatch(tx, name, "0 kwargs", f"{len(kwargs)} kwargs")
return variables.UserFunctionVariable(
polyfills.set_intersection
).call_function(tx, [self, *args], {})
elif name == "intersection_update":
if kwargs:
raise_args_mismatch(tx, name, "0 kwargs", f"{len(kwargs)} kwargs")
return variables.UserFunctionVariable(
polyfills.set_intersection_update
).call_function(tx, [self, *args], {})
elif name == "union":
if kwargs:
raise_args_mismatch(tx, name, "0 kwargs", f"{len(kwargs)} kwargs")
return variables.UserFunctionVariable(polyfills.set_union).call_function(
tx, [self, *args], {}
)
elif name == "difference":
if kwargs:
raise_args_mismatch(
tx, name, f"Expect: 0 kwargs, Actual: {len(kwargs)} kwargs"
)
return variables.UserFunctionVariable(
polyfills.set_difference
).call_function(tx, [self, *args], {})
elif name == "difference_update":
if kwargs:
raise_args_mismatch(tx, name, "0 kwargs", f"{len(kwargs)} kwargs")
return variables.UserFunctionVariable(
polyfills.set_difference_update
).call_function(tx, [self, *args], {})
elif name == "symmetric_difference":
if kwargs or len(args) != 1:
raise_args_mismatch(
tx,
name,
"1 args and 0 kwargs",
f"{len(args)} args and {len(kwargs)} kwargs",
)
return variables.UserFunctionVariable(
polyfills.set_symmetric_difference
).call_function(tx, [self, *args], {})
elif name == "symmetric_difference_update":
if kwargs or len(args) != 1:
raise_args_mismatch(
tx,
name,
"1 args and 0 kwargs",
f"{len(args)} args and {len(kwargs)} kwargs",
)
return variables.UserFunctionVariable(
polyfills.set_symmetric_difference_update
).call_function(tx, [self, *args], {})
elif name == "update" and self.is_mutable():
if kwargs:
raise_args_mismatch(tx, name, "0 kwargs", f"{len(kwargs)} kwargs")
return variables.UserFunctionVariable(polyfills.set_update).call_function(
tx, [self, *args], {}
)
elif name == "remove":
if kwargs or len(args) != 1:
raise_args_mismatch(
tx,
name,
"1 args and 0 kwargs",
f"{len(args)} args and {len(kwargs)} kwargs",
)
if args[0] not in self:
raise_observed_exception(KeyError, tx, args=args)
return super().call_method(tx, "pop", args, kwargs)
elif name == "discard":
if kwargs or len(args) != 1:
raise_args_mismatch(
tx,
name,
"1 args and 0 kwargs",
f"{len(args)} args and {len(kwargs)} kwargs",
)
if args[0] in self:
return super().call_method(tx, "pop", args, kwargs)
else:
return ConstantVariable.create(value=None)
elif name in ("issubset", "issuperset"):
if len(args) != 1:
raise_args_mismatch(tx, name, "1 args", f"{len(args)} args")
op = {
"issubset": operator.le,
"issuperset": operator.ge,
}
other = args[0].realize()
if not istype(other, SetVariable):
other = variables.BuiltinVariable(set).call_function(tx, [other], {})
return variables.BuiltinVariable(op.get(name)).call_function(
tx, [self, other], {}
)
elif name in ("__and__", "__or__", "__xor__", "__sub__"):
m = {
"__and__": "intersection",
"__or__": "union",
"__xor__": "symmetric_difference",
"__sub__": "difference",
}.get(name)
if not isinstance(args[0], (SetVariable, variables.UserDefinedSetVariable)):
msg = ConstantVariable.create(
f"unsupported operand type(s) for {name}: '{self.python_type_name()}' and '{args[0].python_type_name()}'"
)
raise_observed_exception(TypeError, tx, args=[msg])
assert m is not None
return self.call_method(tx, m, args, kwargs)
elif name in ("__iand__", "__ior__", "__ixor__", "__isub__"):
if not isinstance(args[0], (SetVariable, variables.UserDefinedSetVariable)):
msg = ConstantVariable.create(
f"unsupported operand type(s) for {name}: '{self.python_type_name()}' and '{args[0].python_type_name()}'"
)
raise_observed_exception(TypeError, tx, args=[msg])
m = {
"__iand__": "intersection_update",
"__ior__": "update",
"__ixor__": "symmetric_difference_update",
"__isub__": "difference_update",
}.get(name)
assert m is not None
self.call_method(tx, m, args, kwargs)
return self
elif name == "__eq__":
if not isinstance(args[0], (SetVariable, variables.UserDefinedSetVariable)):
return ConstantVariable.create(False)
r = self.call_method(tx, "symmetric_difference", args, kwargs)
return ConstantVariable.create(len(r.set_items) == 0) # type: ignore[attr-defined]
elif name in cmp_name_to_op_mapping:
if not isinstance(args[0], (SetVariable, variables.UserDefinedSetVariable)):
return ConstantVariable.create(NotImplemented)
return ConstantVariable.create(
cmp_name_to_op_mapping[name](self.set_items, args[0].set_items) # type: ignore[attr-defined]
)
return super().call_method(tx, name, args, kwargs)
def getitem_const(
self, tx: "InstructionTranslator", arg: VariableTracker
) -> VariableTracker:
raise RuntimeError("Illegal to getitem on a set")
def install_dict_keys_match_guard(self) -> None:
# Already EQUALS_MATCH guarded
pass
| SetVariable |
python | PyCQA__pylint | tests/functional/u/unused/unused_private_member.py | {
"start": 5512,
"end": 5619
} | class ____:
def __init__(self):
self.__messages = None # [unused-private-member]
| Crash4755Context |
python | cython__cython | Cython/Compiler/Dataclass.py | {
"start": 29773,
"end": 36595
} | class ____(ExprNodes.ExprNode):
"""
__dataclass_fields__ contains a bunch of field objects recording how each field
of the dataclass was initialized (mainly corresponding to the arguments passed to
the "field" function). This node is used for the attributes of these field objects.
If possible, coerces `arg` to a Python object.
Otherwise, generates a sensible backup string.
"""
subexprs = ['arg']
def __init__(self, pos, arg):
super().__init__(pos, arg=arg)
def analyse_types(self, env):
self.arg.analyse_types(env)
self.type = self.arg.type
return self
def coerce_to_pyobject(self, env):
if self.arg.type.can_coerce_to_pyobject(env):
return self.arg.coerce_to_pyobject(env)
else:
# A string representation of the code that gave the field seems like a reasonable
# fallback. This'll mostly happen for "default" and "default_factory" where the
# type may be a C-type that can't be converted to Python.
return self._make_string()
def _make_string(self):
from .AutoDocTransforms import AnnotationWriter
writer = AnnotationWriter(description="Dataclass field")
string = writer.write(self.arg)
return ExprNodes.UnicodeNode(self.pos, value=EncodedString(string))
def generate_evaluation_code(self, code):
return self.arg.generate_evaluation_code(code)
def _set_up_dataclass_fields(node, fields, dataclass_module):
# For defaults and default_factories containing things like lambda,
# they're already declared in the class scope, and it creates a big
# problem if multiple copies are floating around in both the __init__
# function, and in the __dataclass_fields__ structure.
# Therefore, create module-level constants holding these values and
# pass those around instead
#
# If possible we use the `Field` class defined in the standard library
# module so that the information stored here is as close to a regular
# dataclass as is possible.
variables_assignment_stats = []
for name, field in fields.items():
if field.private:
continue # doesn't appear in the public interface
for attrname in [ "default", "default_factory" ]:
field_default = getattr(field, attrname)
if field_default is MISSING or field_default.is_literal or field_default.is_name:
# some simple cases where we don't need to set up
# the variable as a module-level constant
continue
global_scope = node.scope.global_scope()
module_field_name = global_scope.mangle(
global_scope.mangle(Naming.dataclass_field_default_cname, node.class_name),
name)
# create an entry in the global scope for this variable to live
field_node = ExprNodes.NameNode(field_default.pos, name=EncodedString(module_field_name))
field_node.entry = global_scope.declare_var(
field_node.name, type=field_default.type or PyrexTypes.unspecified_type,
pos=field_default.pos, cname=field_node.name, is_cdef=True,
# TODO: do we need to set 'pytyping_modifiers' here?
)
# replace the field so that future users just receive the namenode
setattr(field, attrname, field_node)
variables_assignment_stats.append(
Nodes.SingleAssignmentNode(field_default.pos, lhs=field_node, rhs=field_default))
placeholders = {}
field_func = ExprNodes.AttributeNode(node.pos, obj=dataclass_module,
attribute=EncodedString("field"))
dc_fields = ExprNodes.DictNode(node.pos, key_value_pairs=[])
dc_fields_namevalue_assignments = []
for name, field in fields.items():
if field.private:
continue # doesn't appear in the public interface
type_placeholder_name = "PLACEHOLDER_%s" % name
placeholders[type_placeholder_name] = get_field_type(
node.pos, node.scope.entries[name]
)
# defining these make the fields introspect more like a Python dataclass
field_type_placeholder_name = "PLACEHOLDER_FIELD_TYPE_%s" % name
if field.is_initvar:
placeholders[field_type_placeholder_name] = ExprNodes.AttributeNode(
node.pos, obj=dataclass_module,
attribute=EncodedString("_FIELD_INITVAR")
)
elif field.is_classvar:
# TODO - currently this isn't triggered
placeholders[field_type_placeholder_name] = ExprNodes.AttributeNode(
node.pos, obj=dataclass_module,
attribute=EncodedString("_FIELD_CLASSVAR")
)
else:
placeholders[field_type_placeholder_name] = ExprNodes.AttributeNode(
node.pos, obj=dataclass_module,
attribute=EncodedString("_FIELD")
)
dc_field_keywords = ExprNodes.DictNode.from_pairs(
node.pos,
[(ExprNodes.IdentifierStringNode(node.pos, value=EncodedString(k)),
FieldRecordNode(node.pos, arg=v))
for k, v in field.iterate_record_node_arguments()]
)
dc_field_call = make_dataclass_call_helper(
node.pos, field_func, dc_field_keywords
)
dc_fields.key_value_pairs.append(
ExprNodes.DictItemNode(
node.pos,
key=ExprNodes.IdentifierStringNode(node.pos, value=EncodedString(name)),
value=dc_field_call))
dc_fields_namevalue_assignments.append(
dedent(f"""\
__dataclass_fields__[{name!r}].name = {name!r}
__dataclass_fields__[{name!r}].type = {type_placeholder_name}
__dataclass_fields__[{name!r}]._field_type = {field_type_placeholder_name}
"""))
dataclass_fields_assignment = \
Nodes.SingleAssignmentNode(node.pos,
lhs = ExprNodes.NameNode(node.pos,
name=EncodedString("__dataclass_fields__")),
rhs = dc_fields)
dc_fields_namevalue_assignments = "\n".join(dc_fields_namevalue_assignments)
dc_fields_namevalue_assignments = TreeFragment(dc_fields_namevalue_assignments,
level="c_class",
pipeline=[NormalizeTree(None)])
dc_fields_namevalue_assignments = dc_fields_namevalue_assignments.substitute(placeholders)
return (variables_assignment_stats
+ [dataclass_fields_assignment]
+ dc_fields_namevalue_assignments.stats)
| FieldRecordNode |
python | getsentry__sentry | src/sentry/sentry_apps/api/serializers/app_platform_event.py | {
"start": 510,
"end": 604
} | class ____(StrEnum):
USER = "user"
APPLICATION = "application"
| AppPlatformEventActorType |
python | fluentpython__example-code | 19-dyn-attr-prop/oscon/schedule2.py | {
"start": 1238,
"end": 2111
} | class ____(Record): # <2>
__db = None # <3>
@staticmethod # <4>
def set_db(db):
DbRecord.__db = db # <5>
@staticmethod # <6>
def get_db():
return DbRecord.__db
@classmethod # <7>
def fetch(cls, ident):
db = cls.get_db()
try:
return db[ident] # <8>
except TypeError:
if db is None: # <9>
msg = "database not set; call '{}.set_db(my_db)'"
raise MissingDatabaseError(msg.format(cls.__name__))
else: # <10>
raise
def __repr__(self):
if hasattr(self, 'serial'): # <11>
cls_name = self.__class__.__name__
return '<{} serial={!r}>'.format(cls_name, self.serial)
else:
return super().__repr__() # <12>
# END SCHEDULE2_DBRECORD
# BEGIN SCHEDULE2_EVENT
| DbRecord |
python | PrefectHQ__prefect | src/prefect/server/orchestration/rules.py | {
"start": 39797,
"end": 39932
} | class ____(
BaseOrchestrationRule[orm_models.Run, Union[core.FlowRunPolicy, core.TaskRunPolicy]]
):
pass
| GenericOrchestrationRule |
python | Netflix__metaflow | metaflow/includefile.py | {
"start": 2701,
"end": 8702
} | class ____(click.ParamType):
name = "FilePath"
def __init__(self, is_text, encoding):
self._is_text = is_text
self._encoding = encoding
def convert(self, value, param, ctx):
# Click can call convert multiple times, so we need to make sure to only
# convert once. This function will return a DelayedEvaluationParameter
# (if it needs to still perform an upload) or an IncludedFile if not
if isinstance(value, (DelayedEvaluationParameter, IncludedFile)):
return value
# Value will be a string containing one of two things:
# - Scenario A: a JSON blob indicating that the file has already been uploaded.
# This scenario this happens in is as follows:
# + `step-functions create` is called and the IncludeFile has a default
# value. At the time of creation, the file is uploaded and a URL is
# returned; this URL is packaged in a blob by Uploader and passed to
# step-functions as the value of the parameter.
# + when the step function actually runs, the value is passed to click
# through METAFLOW_INIT_XXX; this value is the one returned above
# - Scenario B: A path. The path can either be:
# + B.1: <prefix>://<something> like s3://foo/bar or local:///foo/bar
# (right now, we are disabling support for this because the artifact
# can change unlike all other artifacts. It is trivial to re-enable
# + B.2: an actual path to a local file like /foo/bar
# In the first case, we just store an *external* reference to it (so we
# won't upload anything). In the second case we will want to upload something,
# but we only do that in the DelayedEvaluationParameter step.
# ctx can be one of two things:
# - the click context (when called normally)
# - the ParameterContext (when called through _eval_default)
# If not a ParameterContext, we convert it to that
if not isinstance(ctx, ParameterContext):
ctx = ParameterContext(
flow_name=ctx.obj.flow.name,
user_name=get_username(),
parameter_name=param.name,
logger=ctx.obj.echo,
ds_type=ctx.obj.datastore_impl.TYPE,
configs=None,
)
if len(value) > 0 and (value.startswith("{") or value.startswith('"{')):
# This is a blob; no URL starts with `{`. We are thus in scenario A
try:
value = json.loads(value)
# to handle quoted json strings
if not isinstance(value, dict):
value = json.loads(value)
except json.JSONDecodeError as e:
raise MetaflowException(
"IncludeFile '%s' (value: %s) is malformed" % (param.name, value)
)
# All processing has already been done, so we just convert to an `IncludedFile`
return IncludedFile(value)
path = os.path.expanduser(value)
prefix_pos = path.find("://")
if prefix_pos > 0:
# Scenario B.1
raise MetaflowException(
"IncludeFile using a direct reference to a file in cloud storage is no "
"longer supported. Contact the Metaflow team if you need this supported"
)
# if _dict_dataclients.get(path[:prefix_pos]) is None:
# self.fail(
# "IncludeFile: no handler for external file of type '%s' "
# "(given path is '%s')" % (path[:prefix_pos], path)
# )
# # We don't need to do anything more -- the file is already uploaded so we
# # just return a blob indicating how to get the file.
# return IncludedFile(
# CURRENT_UPLOADER.encode_url(
# "external", path, is_text=self._is_text, encoding=self._encoding
# )
# )
else:
# Scenario B.2
# Check if this is a valid local file
try:
with open(path, mode="r") as _:
pass
except OSError:
self.fail("IncludeFile: could not open file '%s' for reading" % path)
handler = _dict_dataclients.get(ctx.ds_type)
if handler is None:
self.fail(
"IncludeFile: no data-client for datastore of type '%s'"
% ctx.ds_type
)
# Now that we have done preliminary checks, we will delay uploading it
# until later (so it happens after PyLint checks the flow, but we prepare
# everything for it)
lambda_ctx = _DelayedExecContext(
flow_name=ctx.flow_name,
path=path,
is_text=self._is_text,
encoding=self._encoding,
handler_type=ctx.ds_type,
echo=ctx.logger,
)
def _delayed_eval_func(ctx=lambda_ctx, return_str=False):
incl_file = IncludedFile(
CURRENT_UPLOADER.store(
ctx.flow_name,
ctx.path,
ctx.is_text,
ctx.encoding,
_dict_dataclients[ctx.handler_type],
ctx.echo,
)
)
if return_str:
return json.dumps(incl_file.descriptor)
return incl_file
return DelayedEvaluationParameter(
ctx.parameter_name,
"default",
functools.partial(_delayed_eval_func, ctx=lambda_ctx),
)
def __str__(self):
return repr(self)
def __repr__(self):
return "FilePath"
| FilePathClass |
python | spyder-ide__spyder | external-deps/spyder-kernels/spyder_kernels/utils/tests/test_iofuncs.py | {
"start": 1884,
"end": 12303
} | class ____(UnDeepCopyableObj):
"""A class of objects that can deepcopied, but not pickled."""
def __deepcopy__(self, memo):
new_one = self.__class__.__new__(self.__class__)
new_one.__dict__.update(self.__dict__)
return new_one
# =============================================================================
# ---- Fixtures
# =============================================================================
@pytest.fixture
def spydata_values():
"""
Define spydata file ground truth values.
The file export_data.spydata contains five variables to be loaded.
This fixture declares those variables in a static way.
"""
A = 1
B = 'ham'
C = np.eye(3)
D = {'a': True, 'b': np.eye(4, dtype=np.complex128)}
E = [np.eye(2, dtype=np.int64), 42.0, np.eye(3, dtype=np.bool_), np.eye(4, dtype=object)]
return {'A': A, 'B': B, 'C': C, 'D': D, 'E': E}
@pytest.fixture
def real_values():
"""
Load a Numpy pickled file.
The file numpy_data.npz contains six variables, each one represents the
expected test values after a manual conversion of the same variables
defined and evaluated in MATLAB. The manual type conversion was done
over several variable types, such as: Matrices/Vectors, Scalar and
Complex numbers, Structs, Strings and Cell Arrays. The set of variables
was defined to allow and test the deep conversion of a compound type,
i.e., a struct that contains other types that need to be converted,
like other structs, matrices and Cell Arrays.
"""
path = os.path.join(LOCATION, 'numpy_data.npz')
file_s = np.load(path, allow_pickle=True)
A = file_s['A'].item()
B = file_s['B']
C = file_s['C']
D = file_s['D'].item()
E = file_s['E']
return {'A': A, 'B': B, 'C': C, 'D': D, 'E': E}
@pytest.fixture
def namespace_objects_full(spydata_values):
"""
Define a dictionary of objects of a variety of different types to be saved.
This fixture reprisents the state of the namespace before saving and
filtering out un-deep-copyable, un-pickleable, and uninteresting objects.
"""
namespace_dict = copy.deepcopy(spydata_values)
namespace_dict['expected_error_string'] = (
'Some objects could not be saved: '
'undeepcopyable_instance, unpickleable_instance')
namespace_dict['module_obj'] = io
namespace_dict['class_obj'] = Exception
namespace_dict['function_obj'] = os.path.join
namespace_dict['unpickleable_instance'] = UnPickleableObj("spam")
namespace_dict['undeepcopyable_instance'] = UnDeepCopyableObj("ham")
namespace_dict['custom_instance'] = CustomObj("eggs")
return namespace_dict
@pytest.fixture
def namespace_objects_filtered(spydata_values):
"""
Define a dictionary of the objects from the namespace that can be saved.
This fixture reprisents the state of the namespace after saving and
filtering out un-deep-copyable, un-pickleable, and uninteresting objects.
"""
namespace_dict = copy.deepcopy(spydata_values)
namespace_dict['custom_instance'] = CustomObj("eggs")
return namespace_dict
@pytest.fixture
def namespace_objects_nocopyable():
"""
Define a dictionary of that cannot be deepcopied.
"""
namespace_dict = {}
namespace_dict['expected_error_string'] = 'No supported objects to save'
namespace_dict['class_obj'] = Exception
namespace_dict['undeepcopyable_instance'] = UnDeepCopyableObj("ham")
return namespace_dict
@pytest.fixture
def namespace_objects_nopickleable():
"""
Define a dictionary of objects that cannot be pickled.
"""
namespace_dict = {}
namespace_dict['expected_error_string'] = 'No supported objects to save'
namespace_dict['function_obj'] = os.path.join
namespace_dict['unpickleable_instance'] = UnPickleableObj("spam")
return namespace_dict
@pytest.fixture
def input_namespace(request):
if request.param is None:
return None
else:
return request.getfixturevalue(request.param)
@pytest.fixture
def expected_namespace(request):
if request.param is None:
return None
else:
return request.getfixturevalue(request.param)
# =============================================================================
# ---- Tests
# =============================================================================
def test_npz_import():
"""
Test the load of .npz files as dictionaries.
"""
filename = os.path.join(LOCATION, 'import_data.npz')
data = iofuncs.load_array(filename)
assert isinstance(data, tuple)
variables, error = data
assert variables['val1'] == np.array(1) and not error
@pytest.mark.skipif(iofuncs.load_matlab is None, reason="SciPy required")
def test_matlab_import(real_values):
"""
Test the automatic conversion and import of variables from MATLAB.
This test loads a file stored in MATLAB, the variables defined are
equivalent to the manually converted values done over Numpy. This test
allows to evaluate the function which processes the conversion automa-
tically. i.e., The automatic conversion results should be equal to the
manual conversion of the variables.
"""
path = os.path.join(LOCATION, 'data.mat')
inf, _ = iofuncs.load_matlab(path)
valid = True
for var in sorted(real_values.keys()):
valid = valid and bool(np.mean(real_values[var] == inf[var]))
assert valid
@pytest.mark.parametrize('spydata_file_name', ['export_data.spydata',
'export_data_renamed.spydata'])
def test_spydata_import(spydata_file_name, spydata_values):
"""
Test spydata handling and variable importing.
This test loads all the variables contained inside a spydata tar
container and compares them against their static values.
It tests both a file with the original name, and one that has been renamed
in order to catch Issue #9 .
"""
path = os.path.join(LOCATION, spydata_file_name)
data, error = iofuncs.load_dictionary(path)
assert error is None
assert are_namespaces_equal(data, spydata_values)
def test_spydata_import_witherror():
"""
Test that import fails gracefully with a fn not present in the namespace.
Checks that the error is caught, the message is passed back,
and the current working directory is restored afterwards.
"""
original_cwd = os.getcwd()
path = os.path.join(LOCATION, 'export_data_withfunction.spydata')
data, error = iofuncs.load_dictionary(path)
assert error and isinstance(error, str)
assert data is None
assert os.getcwd() == original_cwd
def test_spydata_import_missing_file():
"""
Test that import fails properly when file is missing, and resets the cwd.
"""
original_cwd = os.getcwd()
path = os.path.join(LOCATION, 'non_existant_path_2019-01-23.spydata')
try:
iofuncs.load_dictionary(path)
except IOError:
pass
else:
# Fail if exception did not occur when it should
assert False
assert os.getcwd() == original_cwd
@pytest.mark.skipif(iofuncs.load_matlab is None, reason="SciPy required")
def test_matlabstruct():
"""Test support for matlab stlye struct."""
a = iofuncs.MatlabStruct()
a.b = 'spam'
assert a["b"] == 'spam'
a.c["d"] = 'eggs'
assert a.c.d == 'eggs'
assert a == {'c': {'d': 'eggs'}, 'b': 'spam'}
a['d'] = [1, 2, 3]
buf = io.BytesIO()
iofuncs.save_matlab(a, buf)
buf.seek(0)
data, error = iofuncs.load_matlab(buf)
assert error is None
assert data['b'] == 'spam'
assert data['c'].d == 'eggs'
assert data['d'].tolist() == [[1, 2, 3]]
@pytest.mark.parametrize('input_namespace,expected_namespace,filename', [
('spydata_values', 'spydata_values', 'export_data_copy'),
('namespace_objects_full', 'namespace_objects_filtered', 'export_data_2'),
('namespace_objects_nocopyable', None, 'export_data_none_1'),
('namespace_objects_nopickleable', None, 'export_data_none_2'),
], indirect=['input_namespace', 'expected_namespace'])
def test_spydata_export(input_namespace, expected_namespace,
filename):
"""
Test spydata export and re-import.
This test saves the variables in ``spydata`` format and then
reloads and checks them to make sure they save/restore properly
and no errors occur during the process.
"""
path = os.path.join(LOCATION, filename + '.spydata')
expected_error = None
if 'expected_error_string' in input_namespace:
expected_error = input_namespace['expected_error_string']
del input_namespace['expected_error_string']
cwd_original = os.getcwd()
try:
export_error = iofuncs.save_dictionary(input_namespace, path)
assert export_error == expected_error
if expected_namespace is None:
assert not os.path.isfile(path)
else:
data_actual, import_error = iofuncs.load_dictionary(path)
assert import_error is None
print(data_actual.keys())
print(expected_namespace.keys())
assert are_namespaces_equal(data_actual, expected_namespace)
assert cwd_original == os.getcwd()
finally:
if os.path.isfile(path):
try:
os.remove(path)
except (IOError, OSError, PermissionError):
pass
def test_save_load_hdf5_files(tmp_path):
"""Simple test to check that we can save and load HDF5 files."""
import h5py
h5_file = tmp_path / "test.h5"
data = {'a' : [1, 2, 3, 4], 'b' : 4.5}
iofuncs.save_hdf5(data, h5_file)
expected = ({'a': np.array([1, 2, 3, 4]), 'b': np.array(4.5)}, None)
assert repr(iofuncs.load_hdf5(h5_file)) == repr(expected)
@pytest.mark.skipif(
os.environ.get("USE_CONDA") == "true",
reason="Pydicom is not installed correctly in Conda envs"
)
def test_load_dicom_files():
"""Check that we can load DICOM files."""
# This test pass locally but we need to set the variable below for it to
# pass on CIs.
# See https://stackoverflow.com/a/47958486/438386 for context.
ImageFile.LOAD_TRUNCATED_IMAGES = True
data = iofuncs.load_dicom(os.path.join(LOCATION, 'data.dcm'))
assert data[0]['data'].shape == (512, 512)
if __name__ == "__main__":
pytest.main()
| UnPickleableObj |
python | cython__cython | Cython/Compiler/Nodes.py | {
"start": 217072,
"end": 223639
} | class ____(StatNode):
# A Node for dispatching to the def method if it
# is overridden.
#
# py_func
#
# args
# func_temp
# body
child_attrs = ['body']
body = None
def analyse_expressions(self, env):
self.args = env.arg_entries
if self.py_func.is_module_scope:
first_arg = 0
else:
first_arg = 1
from . import ExprNodes
self.func_node = ExprNodes.RawCNameExprNode(self.pos, py_object_type)
call_node = ExprNodes.SimpleCallNode(
self.pos, function=self.func_node,
args=[ExprNodes.NameNode(self.pos, name=arg.name)
for arg in self.args[first_arg:]])
if env.return_type.is_void or env.return_type.is_returncode:
self.body = StatListNode(self.pos, stats=[
ExprStatNode(self.pos, expr=call_node),
ReturnStatNode(self.pos, value=None)])
else:
self.body = ReturnStatNode(self.pos, value=call_node)
self.body = self.body.analyse_expressions(env)
return self
def generate_execution_code(self, code):
# For fused functions, look up the dispatch function, not the specialisation.
method_entry = self.py_func.fused_py_func.entry if self.py_func.fused_py_func else self.py_func.entry
interned_attr_cname = code.intern_identifier(method_entry.name)
# Check to see if we are an extension type
if self.py_func.is_module_scope:
self_arg = "((PyObject *)%s)" % Naming.module_cname
else:
self_arg = "((PyObject *)%s)" % self.args[0].cname
code.putln("/* Check if called by wrapper */")
code.putln("if (unlikely(%s)) ;" % Naming.skip_dispatch_cname)
code.putln("/* Check if overridden in Python */")
if self.py_func.is_module_scope or self.py_func.entry.scope.lookup_here("__dict__"):
code.putln("else {")
else:
code.putln("else if (")
code.putln("#if !CYTHON_USE_TYPE_SLOTS")
# If CYTHON_USE_TYPE_SPECS then all extension types are heap-types so the check below automatically
# passes and thus takes the slow route.
# Therefore we do a less thorough check - if the type hasn't changed then clearly it hasn't
# been overridden, and if the type isn't GC then it also won't have been overridden.
typeptr_cname = code.name_in_module_state(self.py_func.entry.scope.parent_type.typeptr_cname)
code.putln(f"unlikely(Py_TYPE({self_arg}) != "
f"{typeptr_cname} &&")
code.putln(f"__Pyx_PyType_HasFeature(Py_TYPE({self_arg}), Py_TPFLAGS_HAVE_GC))")
code.putln("#else")
code.putln(f"unlikely(Py_TYPE({self_arg})->tp_dictoffset != 0 || "
f"__Pyx_PyType_HasFeature(Py_TYPE({self_arg}), (Py_TPFLAGS_IS_ABSTRACT | Py_TPFLAGS_HEAPTYPE)))")
code.putln("#endif")
code.putln(") {")
code.putln("#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_PYTYPE_LOOKUP && CYTHON_USE_TYPE_SLOTS")
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyDictVersioning", "ObjectHandling.c"))
# TODO: remove the object dict version check by 'inlining' the getattr implementation for methods.
# This would allow checking the dict versions around _PyType_Lookup() if it returns a descriptor,
# and would (tada!) make this check a pure type based thing instead of supporting only a single
# instance at a time.
code.putln("static PY_UINT64_T %s = __PYX_DICT_VERSION_INIT, %s = __PYX_DICT_VERSION_INIT;" % (
Naming.tp_dict_version_temp, Naming.obj_dict_version_temp))
code.putln("if (unlikely(!__Pyx_object_dict_version_matches(%s, %s, %s))) {" % (
self_arg, Naming.tp_dict_version_temp, Naming.obj_dict_version_temp))
code.putln("PY_UINT64_T %s = __Pyx_get_tp_dict_version(%s);" % (
Naming.type_dict_guard_temp, self_arg))
code.putln("#endif")
func_node_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
self.func_node.set_cname(func_node_temp)
# need to get attribute manually--scope would return cdef method
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectGetAttrStr", "ObjectHandling.c"))
code.putln("%s = __Pyx_PyObject_GetAttrStr(%s, %s); %s" % (
func_node_temp, self_arg, interned_attr_cname,
code.error_goto_if_null(func_node_temp, self.pos)))
code.put_gotref(func_node_temp, py_object_type)
code.putln("if (!__Pyx_IsSameCFunction(%s, (void(*)(void)) %s)) {" % (func_node_temp, method_entry.func_cname))
self.body.generate_execution_code(code)
code.putln("}")
# NOTE: it's not 100% sure that we catch the exact versions here that were used for the lookup,
# but it is very unlikely that the versions change during lookup, and the type dict safe guard
# should increase the chance of detecting such a case.
code.putln("#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_PYTYPE_LOOKUP && CYTHON_USE_TYPE_SLOTS")
code.putln("%s = __Pyx_get_tp_dict_version(%s);" % (
Naming.tp_dict_version_temp, self_arg))
code.putln("%s = __Pyx_get_object_dict_version(%s);" % (
Naming.obj_dict_version_temp, self_arg))
# Safety check that the type dict didn't change during the lookup. Since CPython looks up the
# attribute (descriptor) first in the type dict and then in the instance dict or through the
# descriptor, the only really far-away lookup when we get here is one in the type dict. So we
# double check the type dict version before and afterwards to guard against later changes of
# the type dict during the lookup process.
code.putln("if (unlikely(%s != %s)) {" % (
Naming.type_dict_guard_temp, Naming.tp_dict_version_temp))
code.putln("%s = %s = __PYX_DICT_VERSION_INIT;" % (
Naming.tp_dict_version_temp, Naming.obj_dict_version_temp))
code.putln("}")
code.putln("#endif")
code.put_decref_clear(func_node_temp, PyrexTypes.py_object_type)
code.funcstate.release_temp(func_node_temp)
code.putln("#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_PYTYPE_LOOKUP && CYTHON_USE_TYPE_SLOTS")
code.putln("}")
code.putln("#endif")
code.putln("}")
| OverrideCheckNode |
python | lepture__authlib | authlib/oauth2/rfc6749/errors.py | {
"start": 3478,
"end": 3869
} | class ____(OAuth2Error):
"""The provided authorization grant (e.g., authorization
code, resource owner credentials) or refresh token is
invalid, expired, revoked, does not match the redirection
URI used in the authorization request, or was issued to
another client.
https://tools.ietf.org/html/rfc6749#section-5.2
"""
error = "invalid_grant"
| InvalidGrantError |
python | apache__airflow | providers/atlassian/jira/tests/unit/atlassian/jira/sensors/test_jira.py | {
"start": 1609,
"end": 2721
} | class ____:
@pytest.fixture(autouse=True)
def setup_test_cases(self, monkeypatch):
monkeypatch.setenv(
"AIRFLOW_CONN_JIRA_DEFAULT".upper(),
connection_as_json(
Connection(
conn_id="jira_default",
conn_type="jira",
host="https://localhost/jira/",
port=443,
login="user",
password="password",
extra='{"verify": false, "project": "AIRFLOW"}',
)
),
)
def test_issue_label_set(self, mocked_jira_client):
mocked_jira_client.return_value.issue.return_value = MINIMAL_TEST_TICKET
sensor = JiraTicketSensor(
task_id="search-ticket-test",
ticket_id="TEST-1226",
field="labels",
expected_value="test-label-1",
timeout=518400,
poke_interval=10,
)
assert sensor.poke({})
assert mocked_jira_client.called
assert mocked_jira_client.return_value.issue.called
| TestJiraSensor |
python | wandb__wandb | wandb/sdk/artifacts/_generated/fragments.py | {
"start": 5674,
"end": 6147
} | class ____(GQLResult):
typename__: Typename[Literal["Project"]] = "Project"
id: GQLId
name: str
entity: RegistryFragmentEntity
description: Optional[str]
created_at: str = Field(alias="createdAt")
updated_at: Optional[str] = Field(alias="updatedAt")
access: Optional[str]
allow_all_artifact_types: bool = Field(alias="allowAllArtifactTypes")
artifact_types: RegistryFragmentArtifactTypes = Field(alias="artifactTypes")
| RegistryFragment |
python | doocs__leetcode | solution/1300-1399/1368.Minimum Cost to Make at Least One Valid Path in a Grid/Solution.py | {
"start": 0,
"end": 732
} | class ____:
def minCost(self, grid: List[List[int]]) -> int:
m, n = len(grid), len(grid[0])
dirs = [[0, 0], [0, 1], [0, -1], [1, 0], [-1, 0]]
q = deque([(0, 0, 0)])
vis = set()
while q:
i, j, d = q.popleft()
if (i, j) in vis:
continue
vis.add((i, j))
if i == m - 1 and j == n - 1:
return d
for k in range(1, 5):
x, y = i + dirs[k][0], j + dirs[k][1]
if 0 <= x < m and 0 <= y < n:
if grid[i][j] == k:
q.appendleft((x, y, d))
else:
q.append((x, y, d + 1))
return -1
| Solution |
python | openai__openai-python | src/openai/resources/realtime/client_secrets.py | {
"start": 742,
"end": 3673
} | class ____(SyncAPIResource):
@cached_property
def with_raw_response(self) -> ClientSecretsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return ClientSecretsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> ClientSecretsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return ClientSecretsWithStreamingResponse(self)
def create(
self,
*,
expires_after: client_secret_create_params.ExpiresAfter | Omit = omit,
session: client_secret_create_params.Session | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ClientSecretCreateResponse:
"""
Create a Realtime client secret with an associated session configuration.
Args:
expires_after: Configuration for the client secret expiration. Expiration refers to the time
after which a client secret will no longer be valid for creating sessions. The
session itself may continue after that time once started. A secret can be used
to create multiple sessions until it expires.
session: Session configuration to use for the client secret. Choose either a realtime
session or a transcription session.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._post(
"/realtime/client_secrets",
body=maybe_transform(
{
"expires_after": expires_after,
"session": session,
},
client_secret_create_params.ClientSecretCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ClientSecretCreateResponse,
)
| ClientSecrets |
python | kamyu104__LeetCode-Solutions | Python/reverse-words-in-a-string-iii.py | {
"start": 504,
"end": 664
} | class ____(object):
def reverseWords(self, s):
reversed_words = [word[::-1] for word in s.split(' ')]
return ' '.join(reversed_words)
| Solution2 |
python | tensorflow__tensorflow | tensorflow/python/distribute/coordinator/cluster_coordinator.py | {
"start": 5196,
"end": 7251
} | class ____(errors.UnavailableError):
"""Specifies that a parameter server is the unavailable task."""
def __init__(self, original_exception):
assert isinstance(original_exception, errors.UnavailableError)
# TF Errors should have init args set as attributes for serialization.
self.original_exception = original_exception
super().__init__(
original_exception.node_def,
original_exception.op,
original_exception.message,
)
def _get_error_from_remote_values(structure):
"""Attempts to return errors from `RemoteValue`s. Rebuilds them if needed."""
errors_in_structure = []
def _get_error(val):
if isinstance(val, RemoteValue):
error = val._get_error() # pylint: disable=protected-access
if error:
errors_in_structure.append(error)
nest.map_structure(_get_error, structure)
if errors_in_structure:
return errors_in_structure[0]
else:
return None
def _maybe_as_type_spec(val):
if isinstance(val, (RemoteValue, PerWorkerValues)):
if val._type_spec is None: # pylint: disable=protected-access
raise ValueError("Output of a scheduled function that is not "
"tf.function cannot be the input of another function.")
return val._type_spec # pylint: disable=protected-access
else:
return val
def _select_worker_slice(worker_id, structured):
"""Selects the worker slice of each of the items in `structured`."""
def _get(x):
return x._values[worker_id] if isinstance(x, PerWorkerValues) else x # pylint: disable=protected-access
return nest.map_structure(_get, structured)
def _disallow_remote_value_as_input(structured):
"""Raises if any element of `structured` is a RemoteValue."""
def _raise_if_remote_value(x):
if isinstance(x, RemoteValue):
raise ValueError(
"`tf.distribute.experimental.coordinator.RemoteValue` used "
"as an input to scheduled function is not yet "
"supported.")
nest.map_structure(_raise_if_remote_value, structured)
| PSUnavailableError |
python | huggingface__transformers | tests/models/pixtral/test_modeling_pixtral.py | {
"start": 3579,
"end": 4476
} | class ____(ModelTesterMixin, unittest.TestCase):
"""
Model tester for `PixtralVisionModel`.
"""
all_model_classes = (PixtralVisionModel,) if is_torch_available() else ()
additional_model_inputs = ["image_sizes"]
test_resize_embeddings = False
def setUp(self):
self.model_tester = PixtralVisionModelTester(self)
self.config_tester = ConfigTester(self, config_class=PixtralVisionConfig, has_text_modality=False)
def test_model_get_set_embeddings(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (torch.nn.Module))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, torch.nn.Linear))
| PixtralVisionModelModelTest |
python | dask__dask | dask/tests/test_tokenize.py | {
"start": 22475,
"end": 22790
} | class ____:
def __init__(self, x):
self.x = x
def __dask_tokenize__(self):
return normalize_token(type(self)), self.x
def normal_method(self):
pass
@staticmethod
def static_method():
pass
@classmethod
def class_method(cls):
pass
| HasStaticMethods |
python | tensorflow__tensorflow | tensorflow/python/training/device_setter.py | {
"start": 1525,
"end": 2348
} | class ____:
"""Returns the next ps task index for placement in round-robin order.
This class is not to be used directly by users. See instead
`replica_device_setter()` below.
"""
def __init__(self, num_tasks):
"""Create a new `_RoundRobinStrategy`.
Args:
num_tasks: Number of ps tasks to cycle among.
"""
self._num_tasks = num_tasks
self._next_task = 0
def __call__(self, unused_op):
"""Choose a ps task index for the given `Operation`.
Args:
unused_op: An `Operation` to be placed on ps.
Returns:
The next ps task index to use for the `Operation`. Returns the next
index, in the range `[offset, offset + num_tasks)`.
"""
task = self._next_task
self._next_task = (self._next_task + 1) % self._num_tasks
return task
| _RoundRobinStrategy |
python | ansible__ansible | lib/ansible/module_utils/errors.py | {
"start": 2219,
"end": 2306
} | class ____(AnsibleValidationError):
"""Error when validating elements"""
| ElementError |
python | django__django | django/db/models/lookups.py | {
"start": 8780,
"end": 9700
} | class ____:
"""
Some lookups require Field.get_db_prep_value() to be called on their
inputs.
"""
get_db_prep_lookup_value_is_iterable = False
def get_db_prep_lookup(self, value, connection):
# For relational fields, use the 'target_field' attribute of the
# output_field.
field = getattr(self.lhs.output_field, "target_field", None)
get_db_prep_value = (
getattr(field, "get_db_prep_value", None)
or self.lhs.output_field.get_db_prep_value
)
if not self.get_db_prep_lookup_value_is_iterable:
value = [value]
return (
"%s",
[
(
v
if hasattr(v, "as_sql")
else get_db_prep_value(v, connection, prepared=True)
)
for v in value
],
)
| FieldGetDbPrepValueMixin |
python | huggingface__transformers | src/transformers/models/vaultgemma/modeling_vaultgemma.py | {
"start": 7224,
"end": 10815
} | class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: VaultGemmaConfig, layer_idx: int):
super().__init__()
self.layer_type = config.layer_types[layer_idx] if hasattr(config, "layer_types") else None
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = config.query_pre_attn_scalar**-0.5
self.attention_dropout = self.config.attention_dropout
self.is_causal = True
self.q_proj = nn.Linear(
config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
)
self.k_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.v_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.o_proj = nn.Linear(
config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias
)
self.rotary_fn = apply_rotary_pos_emb
self.attn_logit_softcapping = self.config.attn_logit_softcapping
self.sliding_window = config.sliding_window if self.layer_type == "sliding_attention" else None
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=self.attention_dropout if self.training else 0.0,
scaling=self.scaling,
sliding_window=self.sliding_window,
softcap=self.attn_logit_softcapping,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
| VaultGemmaAttention |
python | run-llama__llama_index | llama-index-core/llama_index/core/instrumentation/events/llm.py | {
"start": 1586,
"end": 1904
} | class ____(BaseEvent):
"""
LLMStructuredPredictEndEvent.
Args:
output (BaseModel): Predicted output class.
"""
output: SerializeAsAny[Any]
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "LLMStructuredPredictEndEvent"
| LLMStructuredPredictEndEvent |
python | pypa__pip | src/pip/_vendor/pygments/filter.py | {
"start": 1018,
"end": 1294
} | class ____:
"""
Default filter. Subclass this class or use the `simplefilter`
decorator to create own filters.
"""
def __init__(self, **options):
self.options = options
def filter(self, lexer, stream):
raise NotImplementedError()
| Filter |
python | PrefectHQ__prefect | tests/server/orchestration/api/ui/test_schemas.py | {
"start": 32,
"end": 4404
} | class ____:
async def test_empty_schema_and_values(
self,
client: AsyncClient,
):
res = await client.post(
"/ui/schemas/validate",
json={"schema": {}, "values": {}},
)
assert res.status_code == 200
async def test_invalid_schema(
self,
client: AsyncClient,
):
res = await client.post(
"/ui/schemas/validate",
json={
"schema": {
"title": "Parameters",
"type": "NOT A REAL OBJECT RAWR",
"properties": {
"param": {"title": "param", "position": 0, "type": "integer"}
},
"required": ["param"],
},
"values": {"param": 1},
},
)
assert res.status_code == 422, res.text
res = res.json()
assert (
res["detail"]
== "Invalid schema: 'NOT A REAL OBJECT RAWR' is not valid under any of the given schemas"
)
async def test_validation_passed(
self,
client: AsyncClient,
):
res = await client.post(
"/ui/schemas/validate",
json={
"schema": {
"title": "Parameters",
"type": "object",
"properties": {
"param": {"title": "param", "position": 0, "type": "integer"}
},
"required": ["param"],
},
"values": {"param": 1},
},
)
assert res.status_code == 200, res.text
res = res.json()
assert "errors" in res and len(res["errors"]) == 0
assert "valid" in res and res["valid"] is True
async def test_validation_failed(
self,
client: AsyncClient,
):
res = await client.post(
"/ui/schemas/validate",
json={
"schema": {
"title": "Parameters",
"type": "object",
"properties": {
"param": {"title": "param", "position": 0, "type": "integer"}
},
"required": ["param"],
},
"values": {"param": "not an int"},
},
)
assert res.status_code == 200, res.text
res = res.json()
assert res["errors"] == [
{
"property": "param",
"errors": ["'not an int' is not of type 'integer'"],
}
]
assert "valid" in res and res["valid"] is False
async def test_circular_schema_reference(
self,
client: AsyncClient,
):
res = await client.post(
"/ui/schemas/validate",
json={
"schema": {
"title": "Parameters",
"type": "object",
"properties": {
"param": {
"title": "param",
"position": 0,
"allOf": [{"$ref": "#/definitions/City"}],
}
},
"required": ["param"],
"definitions": {
"City": {
"title": "City",
"properties": {
"population": {
"title": "Population",
"type": "integer",
},
"name": {"title": "Name", "type": "string"},
},
"required": ["population", "name"],
# City definition references itself here
"allOf": [{"$ref": "#/definitions/City"}],
}
},
},
"values": {"param": "maybe a city, but we'll never know"},
},
)
assert res.status_code == 422, res.text
res = res.json()
assert (
res["detail"]
== "Invalid schema: Unable to validate schema with circular references."
)
| TestUISchemasValidate |
python | python-excel__xlwt | xlwt/antlr.py | {
"start": 77763,
"end": 82160
} | class ____(object):
def __init__(self,table=None):
self._class = None
self._classmap = ifelse(table,table,None)
def create(self,*args):
if not args:
return self.create(INVALID_TYPE)
arg0 = args[0]
arg1 = None
arg2 = None
try:
arg1 = args[1]
arg2 = args[2]
except:
pass
# ctor(int)
if isinstance(arg0,int) and not arg2:
### get class for 'self' type
c = self.getASTNodeType(arg0)
t = self.create(c)
if t:
t.initialize(arg0, ifelse(arg1,arg1,""))
return t
# ctor(int,something)
if isinstance(arg0,int) and arg2:
t = self.create(arg2)
if t:
t.initialize(arg0,arg1)
return t
# ctor(AST)
if isinstance(arg0,AST):
t = self.create(arg0.getType())
if t:
t.initialize(arg0)
return t
# ctor(token)
if isinstance(arg0,Token) and not arg1:
ttype = arg0.getType()
assert isinstance(ttype,int)
t = self.create(ttype)
if t:
t.initialize(arg0)
return t
# ctor(token,class)
if isinstance(arg0,Token) and arg1:
assert isinstance(arg1,type)
assert issubclass(arg1,AST)
# this creates instance of 'arg1' using 'arg0' as
# argument. Wow, that's magic!
t = arg1(arg0)
assert t and isinstance(t,AST)
return t
# ctor(class)
if isinstance(arg0,type):
### next statement creates instance of type (!)
t = arg0()
assert isinstance(t,AST)
return t
def setASTNodeClass(self,className=None):
if not className:
return
assert isinstance(className,type)
assert issubclass(className,AST)
self._class = className
### kind of misnomer - use setASTNodeClass instead.
setASTNodeType = setASTNodeClass
def getASTNodeClass(self):
return self._class
def getTokenTypeToASTClassMap(self):
return self._classmap
def setTokenTypeToASTClassMap(self,amap):
self._classmap = amap
def error(self, e):
import sys
print(e, file=sys.stderr)
def setTokenTypeASTNodeType(self, tokenType, className):
"""
Specify a mapping between a token type and a (AST) class.
"""
if not self._classmap:
self._classmap = {}
if not className:
try:
del self._classmap[tokenType]
except:
pass
else:
### here we should also perform actions to ensure that
### a. class can be loaded
### b. class is a subclass of AST
###
assert isinstance(className,type)
assert issubclass(className,AST) ## a & b
### enter the class
self._classmap[tokenType] = className
def getASTNodeType(self,tokenType):
"""
For a given token type return the AST node type. First we
lookup a mapping table, second we try _class
and finally we resolve to "antlr.CommonAST".
"""
# first
if self._classmap:
try:
c = self._classmap[tokenType]
if c:
return c
except:
pass
# second
if self._class:
return self._class
# default
return CommonAST
### methods that have been moved to file scope - just listed
### here to be somewhat consistent with original API
def dup(self,t):
return dup(t,self)
def dupList(self,t):
return dupList(t,self)
def dupTree(self,t):
return dupTree(t,self)
### methods moved to other classes
### 1. makeASTRoot -> Parser
### 2. addASTChild -> Parser
### non-standard: create alias for longish method name
maptype = setTokenTypeASTNodeType
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### ASTVisitor ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
| ASTFactory |
python | sphinx-doc__sphinx | tests/roots/test-ext-autodoc/target/autoclass_content.py | {
"start": 785,
"end": 889
} | class ____(E):
"""A class inherits __new__ without docstring."""
def __init__(self):
pass
| H |
python | pytorch__pytorch | torch/_export/db/case.py | {
"start": 1769,
"end": 5040
} | class ____:
example_args: ArgsType
description: str # A description of the use case.
model: torch.nn.Module
name: str
example_kwargs: dict[str, Any] = field(default_factory=dict)
extra_args: Optional[ArgsType] = None # For testing graph generalization.
# Tags associated with the use case. (e.g dynamic-shape, escape-hatch)
tags: set[str] = field(default_factory=set)
support_level: SupportLevel = SupportLevel.SUPPORTED
dynamic_shapes: Optional[dict[str, Any]] = None
def __post_init__(self):
check_inputs_type(self.example_args, self.example_kwargs)
if self.extra_args is not None:
check_inputs_type(self.extra_args, {})
for tag in self.tags:
_validate_tag(tag)
if not isinstance(self.description, str) or len(self.description) == 0:
raise ValueError(f'Invalid description: "{self.description}"')
_EXAMPLE_CASES: dict[str, ExportCase] = {}
_MODULES: set[ModuleType] = set()
_EXAMPLE_CONFLICT_CASES: dict[str, list[ExportCase]] = {}
_EXAMPLE_REWRITE_CASES: dict[str, list[ExportCase]] = {}
def register_db_case(case: ExportCase) -> None:
"""
Registers a user provided ExportCase into example bank.
"""
if case.name in _EXAMPLE_CASES:
if case.name not in _EXAMPLE_CONFLICT_CASES:
_EXAMPLE_CONFLICT_CASES[case.name] = [_EXAMPLE_CASES[case.name]]
_EXAMPLE_CONFLICT_CASES[case.name].append(case)
return
_EXAMPLE_CASES[case.name] = case
def to_snake_case(name):
name = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", name).lower()
def _make_export_case(m, name, configs):
if not isinstance(m, torch.nn.Module):
raise TypeError("Export case class should be a torch.nn.Module.")
if "description" not in configs:
# Fallback to docstring if description is missing.
assert (
m.__doc__ is not None
), f"Could not find description or docstring for export case: {m}"
configs = {**configs, "description": m.__doc__}
# pyrefly: ignore [bad-argument-type]
return ExportCase(**{**configs, "model": m, "name": name})
def export_case(**kwargs):
"""
Decorator for registering a user provided case into example bank.
"""
def wrapper(m):
configs = kwargs
module = inspect.getmodule(m)
if module in _MODULES:
raise RuntimeError("export_case should only be used once per example file.")
assert module is not None
_MODULES.add(module)
module_name = module.__name__.split(".")[-1]
case = _make_export_case(m, module_name, configs)
register_db_case(case)
return case
return wrapper
def export_rewrite_case(**kwargs):
def wrapper(m):
configs = kwargs
parent = configs.pop("parent")
assert isinstance(parent, ExportCase)
key = parent.name
if key not in _EXAMPLE_REWRITE_CASES:
_EXAMPLE_REWRITE_CASES[key] = []
configs["example_args"] = parent.example_args
case = _make_export_case(m, to_snake_case(m.__name__), configs)
_EXAMPLE_REWRITE_CASES[key].append(case)
return case
return wrapper
| ExportCase |
python | pypa__pip | src/pip/_vendor/rich/color_triplet.py | {
"start": 39,
"end": 1054
} | class ____(NamedTuple):
"""The red, green, and blue components of a color."""
red: int
"""Red component in 0 to 255 range."""
green: int
"""Green component in 0 to 255 range."""
blue: int
"""Blue component in 0 to 255 range."""
@property
def hex(self) -> str:
"""get the color triplet in CSS style."""
red, green, blue = self
return f"#{red:02x}{green:02x}{blue:02x}"
@property
def rgb(self) -> str:
"""The color in RGB format.
Returns:
str: An rgb color, e.g. ``"rgb(100,23,255)"``.
"""
red, green, blue = self
return f"rgb({red},{green},{blue})"
@property
def normalized(self) -> Tuple[float, float, float]:
"""Convert components into floats between 0 and 1.
Returns:
Tuple[float, float, float]: A tuple of three normalized colour components.
"""
red, green, blue = self
return red / 255.0, green / 255.0, blue / 255.0
| ColorTriplet |
python | doocs__leetcode | solution/0400-0499/0432.All O`one Data Structure/Solution.py | {
"start": 0,
"end": 409
} | class ____:
def __init__(self, key='', cnt=0):
self.prev = None
self.next = None
self.cnt = cnt
self.keys = {key}
def insert(self, node):
node.prev = self
node.next = self.next
node.prev.next = node
node.next.prev = node
return node
def remove(self):
self.prev.next = self.next
self.next.prev = self.prev
| Node |
python | pdm-project__pdm | src/pdm/resolver/reporters.py | {
"start": 1869,
"end": 6041
} | class ____(LockReporter):
def __init__(self, requirements: list[Requirement], ui: UI) -> None:
self.ui = ui
self.console = get_console()
self.requirements = requirements
self.progress = Progress(
"[progress.description]{task.description}",
"[info]{task.fields[text]}",
BarColumn(),
TaskProgressColumn(),
console=self.console,
)
self._spinner = Progress(
SpinnerColumn(SPINNER, style="primary"),
TimeElapsedColumn(),
"[bold]{task.description}",
"{task.fields[info]}",
console=self.console,
)
self._spinner_task = self._spinner.add_task("Resolving dependencies", info="", total=1)
self.live = Live(self)
@contextmanager
def make_candidate_reporter(self, candidate: Candidate) -> Generator[CandidateReporter]:
task_id = self.progress.add_task(f"Resolving {candidate.format()}", text="", total=None)
try:
yield RichProgressReporter(self.progress, task_id)
finally:
self.progress.update(task_id, visible=False)
if candidate._prepared:
candidate._prepared.reporter = CandidateReporter()
def update(self, description: str | None = None, info: str | None = None, completed: float | None = None) -> None:
self._spinner.update(self._spinner_task, description=description, info=info, completed=completed)
self.live.refresh()
def __rich_console__(self, console: Console, options: ConsoleOptions) -> RenderResult: # pragma: no cover
yield self._spinner
yield self.progress
def start(self) -> None:
"""Start the progress display."""
if self.ui.verbosity < Verbosity.DETAIL:
self.live.start(refresh=True)
def stop(self) -> None:
"""Stop the progress display."""
self.live.stop()
if not self.console.is_interactive: # pragma: no cover
self.console.print()
def __enter__(self) -> RichLockReporter:
self.start()
return self
def __exit__(self, *args: Any) -> None:
self.stop()
def starting_round(self, index: int) -> None:
log_title(f"Starting round {index}")
def starting(self) -> None:
"""Called before the resolution actually starts."""
log_title("Start resolving requirements")
for req in self.requirements:
logger.info(" " + req.as_line())
def ending_round(self, index: int, state: State) -> None:
"""Called before each round of resolution ends.
This is NOT called if the resolution ends at this round. Use `ending`
if you want to report finalization. The index is zero-based.
"""
resolved = len(state.mapping)
to_resolve = len(state.criteria) - resolved
self.update(info=f"[info]{resolved}[/] resolved, [info]{to_resolve}[/] to resolve")
def rejecting_candidate(self, criterion: Criterion, candidate: Candidate) -> None:
if not criterion.information:
logger.info("Candidate rejected because it contains invalid metadata: %s", candidate)
return
*others, last = criterion.information
logger.info(
"Candidate rejected: %s because it introduces a new requirement %s"
" that conflicts with other requirements:\n %s",
candidate,
last.requirement.as_line(), # type: ignore[attr-defined]
" \n".join(
sorted({f" {req.as_line()} (from {parent if parent else 'project'})" for req, parent in others})
),
)
def pinning(self, candidate: Candidate) -> None:
"""Called when adding a candidate to the potential solution."""
logger.info("Adding new pin: %s %s", candidate.name, candidate.version)
def resolving_conflicts(self, causes: list[RequirementInformation]) -> None:
conflicts = sorted({f" {req.as_line()} (from {parent if parent else 'project'})" for req, parent in causes})
logger.info("Conflicts detected: \n%s", "\n".join(conflicts))
| RichLockReporter |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 46116,
"end": 50777
} | class ____(Exception):
_valClassMapping = dict()
# List of currently known error codes
_errcode_to_string = {
NVML_ERROR_UNINITIALIZED: "Uninitialized",
NVML_ERROR_INVALID_ARGUMENT: "Invalid Argument",
NVML_ERROR_NOT_SUPPORTED: "Not Supported",
NVML_ERROR_NO_PERMISSION: "Insufficient Permissions",
NVML_ERROR_ALREADY_INITIALIZED: "Already Initialized",
NVML_ERROR_NOT_FOUND: "Not Found",
NVML_ERROR_INSUFFICIENT_SIZE: "Insufficient Size",
NVML_ERROR_INSUFFICIENT_POWER: "Insufficient External Power",
NVML_ERROR_DRIVER_NOT_LOADED: "Driver Not Loaded",
NVML_ERROR_TIMEOUT: "Timeout",
NVML_ERROR_IRQ_ISSUE: "Interrupt Request Issue",
NVML_ERROR_LIBRARY_NOT_FOUND: "NVML Shared Library Not Found",
NVML_ERROR_FUNCTION_NOT_FOUND: "Function Not Found",
NVML_ERROR_CORRUPTED_INFOROM: "Corrupted infoROM",
NVML_ERROR_GPU_IS_LOST: "GPU is lost",
NVML_ERROR_RESET_REQUIRED: "GPU requires restart",
NVML_ERROR_OPERATING_SYSTEM: "The operating system has blocked the request.",
NVML_ERROR_LIB_RM_VERSION_MISMATCH: "RM has detected an NVML/RM version mismatch.",
NVML_ERROR_MEMORY: "Insufficient Memory",
NVML_ERROR_UNKNOWN: "Unknown Error",
}
def __new__(typ, value):
'''
Maps value to a proper subclass of NVMLError.
See _extractNVMLErrorsAsClasses function for more details
'''
if typ == NVMLError:
typ = NVMLError._valClassMapping.get(value, typ)
obj = Exception.__new__(typ)
obj.value = value
return obj
def __str__(self):
try:
if self.value not in NVMLError._errcode_to_string:
NVMLError._errcode_to_string[self.value] = str(nvmlErrorString(self.value))
return NVMLError._errcode_to_string[self.value]
except NVMLError:
return "NVML Error with code %d" % self.value
def __eq__(self, other):
return self.value == other.value
def nvmlExceptionClass(nvmlErrorCode):
if nvmlErrorCode not in NVMLError._valClassMapping:
raise ValueError('nvmlErrorCode %s is not valid' % nvmlErrorCode)
return NVMLError._valClassMapping[nvmlErrorCode]
def _extractNVMLErrorsAsClasses():
'''
Generates a hierarchy of classes on top of NVMLError class.
Each NVML Error gets a new NVMLError subclass. This way try,except blocks can filter appropriate
exceptions more easily.
NVMLError is a parent class. Each NVML_ERROR_* gets it's own subclass.
e.g. NVML_ERROR_ALREADY_INITIALIZED will be turned into NVMLError_AlreadyInitialized
'''
this_module = sys.modules[__name__]
nvmlErrorsNames = [x for x in dir(this_module) if x.startswith("NVML_ERROR_")]
for err_name in nvmlErrorsNames:
# e.g. Turn NVML_ERROR_ALREADY_INITIALIZED into NVMLError_AlreadyInitialized
class_name = "NVMLError_" + string.capwords(err_name.replace("NVML_ERROR_", ""), "_").replace("_", "")
err_val = getattr(this_module, err_name)
def gen_new(val):
def new(typ, *args):
obj = NVMLError.__new__(typ, val)
return obj
return new
new_error_class = type(class_name, (NVMLError,), {'__new__': gen_new(err_val)})
new_error_class.__module__ = __name__
setattr(this_module, class_name, new_error_class)
NVMLError._valClassMapping[err_val] = new_error_class
_extractNVMLErrorsAsClasses()
def _nvmlCheckReturn(ret):
if (ret != NVML_SUCCESS):
raise NVMLError(ret)
return ret
## Function access ##
_nvmlGetFunctionPointer_cache = dict() # function pointers are cached to prevent unnecessary libLoadLock locking
def _nvmlGetFunctionPointer(name):
global nvmlLib
if name in _nvmlGetFunctionPointer_cache:
return _nvmlGetFunctionPointer_cache[name]
libLoadLock.acquire()
try:
# ensure library was loaded
if (nvmlLib == None):
raise NVMLError(NVML_ERROR_UNINITIALIZED)
try:
_nvmlGetFunctionPointer_cache[name] = getattr(nvmlLib, name)
return _nvmlGetFunctionPointer_cache[name]
except AttributeError:
raise NVMLError(NVML_ERROR_FUNCTION_NOT_FOUND)
finally:
# lock is always freed
libLoadLock.release()
## Alternative object
# Allows the object to be printed
# Allows mismatched types to be assigned
# - like None when the Structure variant requires c_uint
| NVMLError |
python | django__django | tests/postgres_tests/test_search.py | {
"start": 23098,
"end": 24432
} | class ____(PostgreSQLSimpleTestCase):
def test_str(self):
tests = (
(~SearchQuery("a"), "~SearchQuery(Value('a'))"),
(
(SearchQuery("a") | SearchQuery("b"))
& (SearchQuery("c") | SearchQuery("d")),
"((SearchQuery(Value('a')) || SearchQuery(Value('b'))) && "
"(SearchQuery(Value('c')) || SearchQuery(Value('d'))))",
),
(
SearchQuery("a") & (SearchQuery("b") | SearchQuery("c")),
"(SearchQuery(Value('a')) && (SearchQuery(Value('b')) || "
"SearchQuery(Value('c'))))",
),
(
(SearchQuery("a") | SearchQuery("b")) & SearchQuery("c"),
"((SearchQuery(Value('a')) || SearchQuery(Value('b'))) && "
"SearchQuery(Value('c')))",
),
(
SearchQuery("a")
& (SearchQuery("b") & (SearchQuery("c") | SearchQuery("d"))),
"(SearchQuery(Value('a')) && (SearchQuery(Value('b')) && "
"(SearchQuery(Value('c')) || SearchQuery(Value('d')))))",
),
)
for query, expected_str in tests:
with self.subTest(query=query):
self.assertEqual(str(query), expected_str)
| SearchQueryTests |
python | tornadoweb__tornado | tornado/test/web_test.py | {
"start": 97195,
"end": 98271
} | class ____(SimpleHandlerTestCase):
class Handler(RequestHandler):
def get(self):
if self.request.version.startswith("HTTP/1"):
# Simulate a connection closed by the client during
# request processing. The client will see an error, but the
# server should respond gracefully (without logging errors
# because we were unable to write out as many bytes as
# Content-Length said we would)
self.request.connection.stream.close() # type: ignore
self.write("hello")
else:
# TODO: add a HTTP2-compatible version of this test.
self.write("requires HTTP/1.x")
def test_client_close(self):
with self.assertRaises((HTTPClientError, unittest.SkipTest)): # type: ignore
response = self.fetch("/", raise_error=True)
if response.body == b"requires HTTP/1.x":
self.skipTest("requires HTTP/1.x")
self.assertEqual(response.code, 599)
| ClientCloseTest |
python | jazzband__django-polymorphic | src/polymorphic/tests/models.py | {
"start": 11430,
"end": 11494
} | class ____(Duck):
class Meta:
proxy = True
| RedheadDuck |
python | doocs__leetcode | solution/1700-1799/1724.Checking Existence of Edge Length Limited Paths II/Solution.py | {
"start": 0,
"end": 692
} | class ____:
def __init__(self, n):
self.rank = [0] * n
self.p = list(range(n))
self.version = [inf] * n
def find(self, x, t=inf):
if self.p[x] == x or self.version[x] >= t:
return x
return self.find(self.p[x], t)
def union(self, a, b, t):
pa, pb = self.find(a), self.find(b)
if pa == pb:
return False
if self.rank[pa] > self.rank[pb]:
self.version[pb] = t
self.p[pb] = pa
else:
self.version[pa] = t
self.p[pa] = pb
if self.rank[pa] == self.rank[pb]:
self.rank[pb] += 1
return True
| PersistentUnionFind |
python | walkccc__LeetCode | solutions/41. First Missing Positive/41.py | {
"start": 0,
"end": 451
} | class ____:
def firstMissingPositive(self, nums: list[int]) -> int:
n = len(nums)
# Correct slot:
# nums[i] = i + 1
# nums[i] - 1 = i
# nums[nums[i] - 1] = nums[i]
for i in range(n):
while nums[i] > 0 and nums[i] <= n and nums[nums[i] - 1] != nums[i]:
nums[nums[i] - 1], nums[i] = nums[i], nums[nums[i] - 1]
for i, num in enumerate(nums):
if num != i + 1:
return i + 1
return n + 1
| Solution |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.