language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | doocs__leetcode | lcof2/剑指 Offer II 030. 插入、删除和随机访问都是 O(1) 的容器/Solution.py | {
"start": 0,
"end": 1188
} | class ____:
def __init__(self):
"""
Initialize your data structure here.
"""
self.a = []
self.m = {}
def insert(self, val: int) -> bool:
"""
Inserts a value to the set. Returns true if the set did not already contain the specified element.
"""
if val in self.m:
return False
self.m[val] = len(self.a)
self.a.append(val)
return True
def remove(self, val: int) -> bool:
"""
Removes a value from the set. Returns true if the set contained the specified element.
"""
if val in self.m:
idx = self.m[val]
self.a[idx], self.a[-1] = self.a[-1], self.a[idx]
self.m[self.a[idx]] = idx
self.a.pop()
del self.m[val]
return True
return False
def getRandom(self) -> int:
"""
Get a random element from the set.
"""
return random.choice(self.a)
# Your RandomizedSet object will be instantiated and called as such:
# obj = RandomizedSet()
# param_1 = obj.insert(val)
# param_2 = obj.remove(val)
# param_3 = obj.getRandom()
| RandomizedSet |
python | scipy__scipy | scipy/interpolate/tests/test_fitpack2.py | {
"start": 23257,
"end": 28604
} | class ____:
def test_linear_constant(self):
x = [1,1,1,2,2,2,3,3,3]
y = [1,2,3,1,2,3,1,2,3]
z = [3,3,3,3,3,3,3,3,3]
lut = SmoothBivariateSpline(x,y,z,kx=1,ky=1)
for t in lut.get_knots():
assert_array_almost_equal(t, [1, 1, 3, 3])
assert_array_almost_equal(lut.get_coeffs(), [3, 3, 3, 3])
assert abs(lut.get_residual()) < 1e-15
assert_array_almost_equal(lut([1, 1.5, 2], [1, 1.5]), [[3, 3], [3, 3], [3, 3]])
def test_linear_1d(self):
x = [1,1,1,2,2,2,3,3,3]
y = [1,2,3,1,2,3,1,2,3]
z = [0,0,0,2,2,2,4,4,4]
lut = SmoothBivariateSpline(x,y,z,kx=1,ky=1)
for t in lut.get_knots():
xp_assert_close(t, np.asarray([1.0, 1, 3, 3]))
assert_array_almost_equal(lut.get_coeffs(), [0, 0, 4, 4])
assert abs(lut.get_residual()) < 1e-15
assert_array_almost_equal(lut([1,1.5,2],[1,1.5]),[[0,0],[1,1],[2,2]])
def test_integral(self):
x = [1,1,1,2,2,2,4,4,4]
y = [1,2,3,1,2,3,1,2,3]
z = array([0,7,8,3,4,7,1,3,4])
with warnings.catch_warnings():
# This seems to fail (ier=1, see ticket 1642).
warnings.filterwarnings(
"ignore", "\nThe required storage space", UserWarning)
lut = SmoothBivariateSpline(x, y, z, kx=1, ky=1, s=0)
tx = [1,2,4]
ty = [1,2,3]
tz = lut(tx, ty)
trpz = .25*(diff(tx)[:,None]*diff(ty)[None,:]
* (tz[:-1,:-1]+tz[1:,:-1]+tz[:-1,1:]+tz[1:,1:])).sum()
assert_almost_equal(np.asarray(lut.integral(tx[0], tx[-1], ty[0], ty[-1])),
np.asarray(trpz))
lut2 = SmoothBivariateSpline(x, y, z, kx=2, ky=2, s=0)
assert_almost_equal(np.asarray(lut2.integral(tx[0], tx[-1], ty[0], ty[-1])),
np.asarray(trpz),
decimal=0) # the quadratures give 23.75 and 23.85
tz = lut(tx[:-1], ty[:-1])
trpz = .25*(diff(tx[:-1])[:,None]*diff(ty[:-1])[None,:]
* (tz[:-1,:-1]+tz[1:,:-1]+tz[:-1,1:]+tz[1:,1:])).sum()
assert_almost_equal(np.asarray(lut.integral(tx[0], tx[-2], ty[0], ty[-2])),
np.asarray(trpz))
def test_rerun_lwrk2_too_small(self):
# in this setting, lwrk2 is too small in the default run. Here we
# check for equality with the bisplrep/bisplev output because there,
# an automatic re-run of the spline representation is done if ier>10.
x = np.linspace(-2, 2, 80)
y = np.linspace(-2, 2, 80)
z = x + y
xi = np.linspace(-1, 1, 100)
yi = np.linspace(-2, 2, 100)
tck = bisplrep(x, y, z)
res1 = bisplev(xi, yi, tck)
interp_ = SmoothBivariateSpline(x, y, z)
res2 = interp_(xi, yi)
assert_almost_equal(res1, res2)
def test_invalid_input(self):
with assert_raises(ValueError) as info:
x = np.linspace(1.0, 10.0)
y = np.linspace(1.0, 10.0)
z = np.linspace(1.0, 10.0, num=10)
SmoothBivariateSpline(x, y, z)
assert "x, y, and z should have a same length" in str(info.value)
with assert_raises(ValueError) as info:
x = np.linspace(1.0, 10.0)
y = np.linspace(1.0, 10.0)
z = np.linspace(1.0, 10.0)
w = np.linspace(1.0, 10.0, num=20)
SmoothBivariateSpline(x, y, z, w=w)
assert "x, y, z, and w should have a same length" in str(info.value)
with assert_raises(ValueError) as info:
w = np.linspace(-1.0, 10.0)
SmoothBivariateSpline(x, y, z, w=w)
assert "w should be positive" in str(info.value)
with assert_raises(ValueError) as info:
bbox = (-100, 100, -100)
SmoothBivariateSpline(x, y, z, bbox=bbox)
assert "bbox shape should be (4,)" in str(info.value)
with assert_raises(ValueError) as info:
SmoothBivariateSpline(x, y, z, kx=10, ky=10)
assert "The length of x, y and z should be at least (kx+1) * (ky+1)" in\
str(info.value)
with assert_raises(ValueError) as info:
SmoothBivariateSpline(x, y, z, s=-1.0)
assert "s should be s >= 0.0" in str(info.value)
with assert_raises(ValueError) as exc_info:
SmoothBivariateSpline(x, y, z, eps=0.0)
assert "eps should be between (0, 1)" in str(exc_info.value)
with assert_raises(ValueError) as exc_info:
SmoothBivariateSpline(x, y, z, eps=1.0)
assert "eps should be between (0, 1)" in str(exc_info.value)
def test_array_like_input(self):
x = np.array([1, 1, 1, 2, 2, 2, 3, 3, 3])
y = np.array([1, 2, 3, 1, 2, 3, 1, 2, 3])
z = np.array([3, 3, 3, 3, 3, 3, 3, 3, 3])
w = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1])
bbox = np.array([1.0, 3.0, 1.0, 3.0])
# np.array input
spl1 = SmoothBivariateSpline(x, y, z, w=w, bbox=bbox, kx=1, ky=1)
# list input
spl2 = SmoothBivariateSpline(x.tolist(), y.tolist(), z.tolist(),
bbox=bbox.tolist(), w=w.tolist(),
kx=1, ky=1)
xp_assert_close(spl1(0.1, 0.5), spl2(0.1, 0.5))
| TestSmoothBivariateSpline |
python | pyqtgraph__pyqtgraph | pyqtgraph/graphicsItems/PColorMeshItem.py | {
"start": 479,
"end": 594
} | class ____(enum.Flag):
XY = enum.auto()
Z = enum.auto()
LUT = enum.auto()
DIM = enum.auto()
| DirtyFlag |
python | crytic__slither | slither/core/source_mapping/source_mapping.py | {
"start": 683,
"end": 7276
} | class ____:
def __init__(self, compilation_unit: "SlitherCompilationUnit") -> None:
self.start: int = 0
self.length: int = 0
self.filename: Filename = Filename("", "", "", "")
self.is_dependency: bool = False
self.lines: List[int] = []
self.starting_column: int = 0
self.ending_column: int = 0
self.end: int = 0
self.compilation_unit = compilation_unit
def to_json(self) -> Dict:
return {
"start": self.start,
"length": self.length,
# TODO investigate filename_used usecase
# It creates non-deterministic result
# As it sometimes refer to the relative and sometimes to the absolute
# "filename_used": self.filename.used,
"filename_relative": self.filename.relative,
"filename_absolute": self.filename.absolute,
"filename_short": self.filename.short,
"is_dependency": self.is_dependency,
"lines": self.lines,
"starting_column": self.starting_column,
"ending_column": self.ending_column,
}
def to_markdown(self, markdown_root: str) -> str:
lines = self._get_lines_str(line_descr="L")
filename_relative: str = self.filename.relative if self.filename.relative else ""
return f"{markdown_root}{filename_relative}{lines}"
def to_detailed_str(self) -> str:
lines = self._get_lines_str()
filename_short: str = self.filename.short if self.filename.short else ""
return f"{filename_short}{lines} ({self.starting_column} - {self.ending_column})"
def _get_lines_str(self, line_descr: str = "") -> str:
line_prefix = self.compilation_unit.core.line_prefix
lines = self.lines
if not lines:
return ""
if len(lines) == 1:
return f"{line_prefix}{line_descr}{lines[0]}"
return f"{line_prefix}{line_descr}{lines[0]}-{line_descr}{lines[-1]}"
@property
def content(self) -> str:
"""
Return the txt content of the Source
Use this property instead of eg source_code[start:end]
Above will return incorrect content if source_code contains any unicode
because self.start and self.end are byte offsets, not char offsets
Returns: str
"""
# If the compilation unit was not initialized, it means that the set_offset was never called
# on the corresponding object, which should not happen
assert self.compilation_unit
return (
self.compilation_unit.core.source_code[self.filename.absolute]
.encode("utf8")[self.start : self.end]
.decode("utf8")
)
@property
def content_hash(self) -> str:
"""
Return sha1(self.content)
Returns:
"""
h = SHA1.new()
h.update(self.content.encode("utf8"))
return h.hexdigest()
def __str__(self) -> str:
lines = self._get_lines_str()
filename_short: str = self.filename.short if self.filename.short else ""
return f"{filename_short}{lines}"
def __hash__(self) -> int:
return hash(
(
self.start,
self.length,
self.filename.relative,
self.end,
)
)
def __eq__(self, other: Any) -> bool:
try:
return (
self.start == other.start
and self.filename.relative == other.filename.relative
and self.is_dependency == other.is_dependency
and self.end == other.end
)
except AttributeError:
return NotImplemented
def _compute_line(
compilation_unit: "SlitherCompilationUnit", filename: Filename, start: int, length: int
) -> Tuple[List[int], int, int]:
"""
Compute line(s) numbers and starting/ending columns
from a start/end offset. All numbers start from 1.
Not done in an efficient way
"""
start_line, starting_column = compilation_unit.core.crytic_compile.get_line_from_offset(
filename, start
)
try:
end_line, ending_column = compilation_unit.core.crytic_compile.get_line_from_offset(
filename, start + length
)
except KeyError:
# This error may occur when the build is not synchronised with the source code on disk.
# See the GitHub issue https://github.com/crytic/slither/issues/2296
msg = f"""The source code appears to be out of sync with the build artifacts on disk.
This discrepancy can occur after recent modifications to {filename.short}. To resolve this
issue, consider executing the clean command of the build system (e.g. forge clean).
"""
# We still re-raise the exception as a SlitherException here
raise SlitherException(msg) from None
return list(range(start_line, end_line + 1)), starting_column, ending_column
def _convert_source_mapping(
offset: str, compilation_unit: "SlitherCompilationUnit"
) -> Source: # pylint: disable=too-many-locals
"""
Convert a text offset to a real offset
see https://solidity.readthedocs.io/en/develop/miscellaneous.html#source-mappings
Returns:
(dict): {'start':0, 'length':0, 'filename': 'file.sol'}
"""
sourceUnits = compilation_unit.source_units
position = re.findall("([0-9]*):([0-9]*):([-]?[0-9]*)", offset)
if len(position) != 1:
return Source(compilation_unit)
s, l, f = position[0]
s = int(s)
l = int(l)
f = int(f)
if f not in sourceUnits:
new_source = Source(compilation_unit)
new_source.start = s
new_source.length = l
return new_source
filename_used = sourceUnits[f]
# If possible, convert the filename to its absolute/relative version
assert compilation_unit.core.crytic_compile
filename: Filename = compilation_unit.core.crytic_compile.filename_lookup(filename_used)
is_dependency = compilation_unit.core.crytic_compile.is_dependency(filename.absolute)
(lines, starting_column, ending_column) = _compute_line(compilation_unit, filename, s, l)
new_source = Source(compilation_unit)
new_source.start = s
new_source.length = l
new_source.filename = filename
new_source.is_dependency = is_dependency
new_source.lines = lines
new_source.starting_column = starting_column
new_source.ending_column = ending_column
new_source.end = new_source.start + l
return new_source
| Source |
python | kevin1024__vcrpy | vcr/cassette.py | {
"start": 587,
"end": 5786
} | class ____:
"""Context manager/decorator that handles installing the cassette and
removing cassettes.
This class defers the creation of a new cassette instance until
the point at which it is installed by context manager or
decorator. The fact that a new cassette is used with each
application prevents the state of any cassette from interfering
with another.
Instances of this class are NOT reentrant as context managers.
However, functions that are decorated by
``CassetteContextDecorator`` instances ARE reentrant. See the
implementation of ``__call__`` on this class for more details.
There is also a guard against attempts to reenter instances of
this class as a context manager in ``__exit__``.
"""
_non_cassette_arguments = (
"path_transformer",
"func_path_generator",
"record_on_exception",
)
@classmethod
def from_args(cls, cassette_class, **kwargs):
return cls(cassette_class, lambda: dict(kwargs))
def __init__(self, cls, args_getter):
self.cls = cls
self._args_getter = args_getter
self.__finish = None
self.__cassette = None
def _patch_generator(self, cassette):
with contextlib.ExitStack() as exit_stack:
for patcher in CassettePatcherBuilder(cassette).build():
exit_stack.enter_context(patcher)
log_format = "{action} context for cassette at {path}."
log.debug(log_format.format(action="Entering", path=cassette._path))
yield cassette
log.debug(log_format.format(action="Exiting", path=cassette._path))
def __enter__(self):
# This assertion is here to prevent the dangerous behavior
# that would result from forgetting about a __finish before
# completing it.
# How might this condition be met? Here is an example:
# context_decorator = Cassette.use('whatever')
# with context_decorator:
# with context_decorator:
# pass
assert self.__finish is None, "Cassette already open."
other_kwargs, cassette_kwargs = partition_dict(
lambda key, _: key in self._non_cassette_arguments,
self._args_getter(),
)
if other_kwargs.get("path_transformer"):
transformer = other_kwargs["path_transformer"]
cassette_kwargs["path"] = transformer(cassette_kwargs["path"])
self.__cassette = self.cls.load(**cassette_kwargs)
self.__finish = self._patch_generator(self.__cassette)
return next(self.__finish)
def __exit__(self, *exc_info):
exception_was_raised = any(exc_info)
record_on_exception = self._args_getter().get("record_on_exception", True)
if record_on_exception or not exception_was_raised:
self.__cassette._save()
self.__cassette = None
# Fellow programmer, don't remove this `next`, if `self.__finish` is
# not consumed the unpatcher functions accumulated in the `exit_stack`
# object created in `_patch_generator` will not be called until
# `exit_stack` is not garbage collected.
# This works in CPython but not in Pypy, where the unpatchers will not
# be called until much later.
next(self.__finish, None)
self.__finish = None
@wrapt.decorator
def __call__(self, function, instance, args, kwargs):
# This awkward cloning thing is done to ensure that decorated
# functions are reentrant. This is required for thread
# safety and the correct operation of recursive functions.
args_getter = self._build_args_getter_for_decorator(function)
return type(self)(self.cls, args_getter)._execute_function(function, args, kwargs)
def _execute_function(self, function, args, kwargs):
def handle_function(cassette):
if cassette.inject:
return function(cassette, *args, **kwargs)
else:
return function(*args, **kwargs)
if iscoroutinefunction(function):
return handle_coroutine(vcr=self, fn=handle_function)
if inspect.isgeneratorfunction(function):
return self._handle_generator(fn=handle_function)
return self._handle_function(fn=handle_function)
def _handle_generator(self, fn):
"""Wraps a generator so that we're inside the cassette context for the
duration of the generator.
"""
with self as cassette:
return (yield from fn(cassette))
def _handle_function(self, fn):
with self as cassette:
return fn(cassette)
@staticmethod
def get_function_name(function):
return function.__name__
def _build_args_getter_for_decorator(self, function):
def new_args_getter():
kwargs = self._args_getter()
if "path" not in kwargs:
name_generator = kwargs.get("func_path_generator") or self.get_function_name
path = name_generator(function)
kwargs["path"] = path
return kwargs
return new_args_getter
| CassetteContextDecorator |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_annotations/allow_overload.py | {
"start": 245,
"end": 558
} | class ____:
def bar(i):
return i
# TODO(charlie): This third case should raise an error (as in Mypy), because we have a
# statement between the interfaces and implementation.
@overload
def baz(i: int) -> "int":
...
@overload
def baz(i: "str") -> "str":
...
x = 1
def baz(i):
return i
| X |
python | apache__thrift | lib/py/src/transport/TTransport.py | {
"start": 3211,
"end": 3352
} | class ____(object):
"""Base class for a Transport Factory"""
def getTransport(self, trans):
return trans
| TTransportFactoryBase |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 205899,
"end": 206664
} | class ____(sgqlc.types.Interface):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("assignees",)
assignees = sgqlc.types.Field(
sgqlc.types.non_null("UserConnection"),
graphql_name="assignees",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
| Assignable |
python | huggingface__transformers | src/transformers/models/instructblipvideo/modular_instructblipvideo.py | {
"start": 6749,
"end": 6823
} | class ____(InstructBlipQFormerModel):
pass
| InstructBlipVideoQFormerModel |
python | run-llama__llama_index | llama-index-core/tests/tools/test_eval_query_engine_tool.py | {
"start": 1005,
"end": 1190
} | class ____(CustomQueryEngine):
"""Custom query engine."""
def custom_query(self, query_str: str) -> str:
"""Query."""
return "custom_" + query_str
| MockQueryEngine |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/core_api/routes/ui/test_calendar.py | {
"start": 1201,
"end": 7083
} | class ____:
DAG_NAME = "test_dag1"
@pytest.fixture(autouse=True)
@provide_session
def setup_dag_runs(self, dag_maker, session=None) -> None:
clear_db_runs()
clear_db_dags()
with dag_maker(
self.DAG_NAME,
schedule="0 0,1 * * *",
start_date=datetime(2025, 1, 1),
end_date=datetime(2025, 1, 3, 2),
catchup=True,
serialized=True,
session=session,
):
EmptyOperator(task_id="test_task1")
dag_maker.create_dagrun(run_id="run_1", state=DagRunState.FAILED, logical_date=datetime(2025, 1, 1))
dag_maker.create_dagrun(
run_id="run_2",
state=DagRunState.SUCCESS,
logical_date=datetime(2025, 1, 1, 1),
)
dag_maker.create_dagrun(run_id="run_3", state=DagRunState.RUNNING, logical_date=datetime(2025, 1, 2))
dag_maker.sync_dagbag_to_db()
session.commit()
def teardown_method(self) -> None:
clear_db_runs()
clear_db_dags()
@pytest.mark.parametrize(
("query_params", "result"),
[
(
{},
{
"total_entries": 5,
"dag_runs": [
{"date": "2025-01-01T00:00:00Z", "state": "failed", "count": 1},
{"date": "2025-01-01T00:00:00Z", "state": "success", "count": 1},
{"date": "2025-01-02T00:00:00Z", "state": "running", "count": 1},
{"date": "2025-01-02T00:00:00Z", "state": "planned", "count": 1},
{"date": "2025-01-03T00:00:00Z", "state": "planned", "count": 2},
],
},
),
(
{"logical_date_gte": "2025-01-01T00:00:00Z", "logical_date_lte": "2025-01-01T23:23:59Z"},
{
"total_entries": 2,
"dag_runs": [
{"date": "2025-01-01T00:00:00Z", "state": "failed", "count": 1},
{"date": "2025-01-01T00:00:00Z", "state": "success", "count": 1},
],
},
),
(
{"logical_date_gte": "2025-01-02T00:00:00Z", "logical_date_lte": "2025-01-02T23:23:59Z"},
{
"total_entries": 2,
"dag_runs": [
{"date": "2025-01-02T00:00:00Z", "state": "running", "count": 1},
{"date": "2025-01-02T00:00:00Z", "state": "planned", "count": 1},
],
},
),
],
)
def test_daily_calendar(self, test_client, query_params, result):
with assert_queries_count(4):
response = test_client.get(f"/calendar/{self.DAG_NAME}", params=query_params)
assert response.status_code == 200
body = response.json()
print(body)
assert body == result
@pytest.mark.parametrize(
("query_params", "result"),
[
(
{"granularity": "hourly"},
{
"total_entries": 6,
"dag_runs": [
{"date": "2025-01-01T00:00:00Z", "state": "failed", "count": 1},
{"date": "2025-01-01T01:00:00Z", "state": "success", "count": 1},
{"date": "2025-01-02T00:00:00Z", "state": "running", "count": 1},
{"date": "2025-01-02T01:00:00Z", "state": "planned", "count": 1},
{"date": "2025-01-03T00:00:00Z", "state": "planned", "count": 1},
{"date": "2025-01-03T01:00:00Z", "state": "planned", "count": 1},
],
},
),
(
{
"granularity": "hourly",
"logical_date_gte": "2025-01-02T00:00:00Z",
"logical_date_lte": "2025-01-02T23:23:59Z",
},
{
"total_entries": 2,
"dag_runs": [
{"date": "2025-01-02T00:00:00Z", "state": "running", "count": 1},
{"date": "2025-01-02T01:00:00Z", "state": "planned", "count": 1},
],
},
),
(
{
"granularity": "hourly",
"logical_date_gte": "2025-01-02T00:00:00Z",
"logical_date_lte": "2025-01-02T23:23:59Z",
"logical_date_gt": "2025-01-02T00:00:00Z",
"logical_date_lt": "2025-01-02T23:23:59Z",
},
{
"total_entries": 0,
"dag_runs": [],
},
),
(
{
"granularity": "hourly",
"logical_date_gte": "2025-01-02T00:00:00Z",
"logical_date_lte": "2025-01-02T23:23:59Z",
"logical_date_gt": "2025-01-01T23:00:00Z",
"logical_date_lt": "2025-01-03T00:00:00Z",
},
{
"total_entries": 2,
"dag_runs": [
{"date": "2025-01-02T00:00:00Z", "state": "running", "count": 1},
{"date": "2025-01-02T01:00:00Z", "state": "planned", "count": 1},
],
},
),
],
)
def test_hourly_calendar(self, setup_dag_runs, test_client, query_params, result):
with assert_queries_count(4):
response = test_client.get(f"/calendar/{self.DAG_NAME}", params=query_params)
assert response.status_code == 200
body = response.json()
assert body == result
| TestCalendar |
python | PyCQA__pylint | pylint/checkers/variables.py | {
"start": 1977,
"end": 17967
} | class ____(Enum):
"""Reported by _check_consumer() and its sub-methods to determine the
subsequent action to take in _undefined_and_used_before_checker().
Continue -> continue loop to next consumer
Return -> return and thereby break the loop
"""
CONTINUE = 0
RETURN = 1
def _is_from_future_import(stmt: nodes.ImportFrom, name: str) -> bool | None:
"""Check if the name is a future import from another module."""
try:
module = stmt.do_import_module(stmt.modname)
except astroid.AstroidBuildingError:
return None
for local_node in module.locals.get(name, []):
if isinstance(local_node, nodes.ImportFrom) and local_node.modname == FUTURE:
return True
return None
def _get_unpacking_extra_info(node: nodes.Assign, inferred: InferenceResult) -> str:
"""Return extra information to add to the message for unpacking-non-sequence
and unbalanced-tuple/dict-unpacking errors.
"""
more = ""
if isinstance(inferred, DICT_TYPES):
match node:
case nodes.Assign():
more = node.value.as_string()
case nodes.For():
more = node.iter.as_string()
return more
inferred_module = inferred.root().name
if node.root().name == inferred_module:
if node.lineno == inferred.lineno:
more = f"'{inferred.as_string()}'"
elif inferred.lineno:
more = f"defined at line {inferred.lineno}"
elif inferred.lineno:
more = f"defined at line {inferred.lineno} of {inferred_module}"
return more
def _detect_global_scope(
node: nodes.Name,
frame: nodes.LocalsDictNodeNG,
defframe: nodes.LocalsDictNodeNG,
) -> bool:
"""Detect that the given frames share a global scope.
Two frames share a global scope when neither
of them are hidden under a function scope, as well
as any parent scope of them, until the root scope.
In this case, depending from something defined later on
will only work if guarded by a nested function definition.
Example:
class A:
# B has the same global scope as `C`, leading to a NameError.
# Return True to indicate a shared scope.
class B(C): ...
class C: ...
Whereas this does not lead to a NameError:
class A:
def guard():
# Return False to indicate no scope sharing.
class B(C): ...
class C: ...
"""
def_scope = scope = None
if frame and frame.parent:
scope = frame.parent.scope()
if defframe and defframe.parent:
def_scope = defframe.parent.scope()
if (
isinstance(frame, nodes.ClassDef)
and scope is not def_scope
and scope is utils.get_node_first_ancestor_of_type(node, nodes.FunctionDef)
):
# If the current node's scope is a class nested under a function,
# and the def_scope is something else, then they aren't shared.
return False
if isinstance(frame, nodes.FunctionDef):
# If the parent of the current node is a
# function, then it can be under its scope (defined in); or
# the `->` part of annotations. The same goes
# for annotations of function arguments, they'll have
# their parent the Arguments node.
if frame.parent_of(defframe):
return node.lineno < defframe.lineno # type: ignore[no-any-return]
if not isinstance(node.parent, (nodes.FunctionDef, nodes.Arguments)):
return False
break_scopes = []
for current_scope in (scope or frame, def_scope):
# Look for parent scopes. If there is anything different
# than a module or a class scope, then the frames don't
# share a global scope.
parent_scope = current_scope
while parent_scope:
if not isinstance(parent_scope, (nodes.ClassDef, nodes.Module)):
break_scopes.append(parent_scope)
break
if parent_scope.parent:
parent_scope = parent_scope.parent.scope()
else:
break
if len(set(break_scopes)) > 1:
# Store different scopes than expected.
# If the stored scopes are, in fact, the very same, then it means
# that the two frames (frame and defframe) share the same scope,
# and we could apply our lineno analysis over them.
# For instance, this works when they are inside a function, the node
# that uses a definition and the definition itself.
return False
# At this point, we are certain that frame and defframe share a scope
# and the definition of the first depends on the second.
return frame.lineno < defframe.lineno # type: ignore[no-any-return]
def _infer_name_module(node: nodes.Import, name: str) -> Generator[InferenceResult]:
context = astroid.context.InferenceContext()
context.lookupname = name
return node.infer(context, asname=False) # type: ignore[no-any-return]
def _fix_dot_imports(
not_consumed: Consumption,
) -> list[tuple[str, _base_nodes.ImportNode]]:
"""Try to fix imports with multiple dots, by returning a dictionary
with the import names expanded.
The function unflattens root imports,
like 'xml' (when we have both 'xml.etree' and 'xml.sax'), to 'xml.etree'
and 'xml.sax' respectively.
"""
names: dict[str, _base_nodes.ImportNode] = {}
for name, stmts in not_consumed.items():
if any(
isinstance(stmt, nodes.AssignName)
and isinstance(stmt.assign_type(), nodes.AugAssign)
for stmt in stmts
):
continue
for stmt in stmts:
if not isinstance(stmt, (nodes.ImportFrom, nodes.Import)):
continue
for imports in stmt.names:
second_name = None
import_module_name = imports[0]
if import_module_name == "*":
# In case of wildcard imports,
# pick the name from inside the imported module.
second_name = name
else:
name_matches_dotted_import = False
if (
import_module_name.startswith(name)
and import_module_name.find(".") > -1
):
name_matches_dotted_import = True
if name_matches_dotted_import or name in imports:
# Most likely something like 'xml.etree',
# which will appear in the .locals as 'xml'.
# Only pick the name if it wasn't consumed.
second_name = import_module_name
if second_name and second_name not in names:
names[second_name] = stmt
return sorted(names.items(), key=lambda a: a[1].fromlineno)
def _find_frame_imports(name: str, frame: nodes.LocalsDictNodeNG) -> bool:
"""Detect imports in the frame, with the required *name*.
Such imports can be considered assignments if they are not globals.
Returns True if an import for the given name was found.
"""
if name in _flattened_scope_names(frame.nodes_of_class(nodes.Global)):
return False
imports = frame.nodes_of_class((nodes.Import, nodes.ImportFrom))
for import_node in imports:
for import_name, import_alias in import_node.names:
# If the import uses an alias, check only that.
# Otherwise, check only the import name.
if import_alias:
if import_alias == name:
return True
elif import_name and import_name == name:
return True
return False
def _import_name_is_global(
stmt: nodes.Global | _base_nodes.ImportNode,
global_names: set[str],
) -> bool:
for import_name, import_alias in stmt.names:
# If the import uses an alias, check only that.
# Otherwise, check only the import name.
if import_alias:
if import_alias in global_names:
return True
elif import_name in global_names:
return True
return False
def _flattened_scope_names(
iterator: Iterator[nodes.Global | nodes.Nonlocal],
) -> set[str]:
values = (set(stmt.names) for stmt in iterator)
return set(itertools.chain.from_iterable(values))
def _assigned_locally(name_node: nodes.Name) -> bool:
"""Checks if name_node has corresponding assign statement in same scope."""
name_node_scope = name_node.scope()
assign_stmts = name_node_scope.nodes_of_class(nodes.AssignName)
return any(a.name == name_node.name for a in assign_stmts) or _find_frame_imports(
name_node.name, name_node_scope
)
def _is_before(node: nodes.NodeNG, reference_node: nodes.NodeNG) -> bool:
"""Checks if node appears before reference_node."""
if node.lineno < reference_node.lineno:
return True
if (
node.lineno == reference_node.lineno
and node.col_offset < reference_node.col_offset
):
return True
return False
def _is_nonlocal_name(node: nodes.Name, frame: nodes.LocalsDictNodeNG) -> bool:
"""Checks if name node has a nonlocal declaration in the given frame."""
if not isinstance(frame, nodes.FunctionDef):
return False
return any(
isinstance(stmt, nodes.Nonlocal)
and node.name in stmt.names
and _is_before(stmt, node)
for stmt in frame.body
)
def _has_locals_call_after_node(stmt: nodes.NodeNG, scope: nodes.FunctionDef) -> bool:
skip_nodes = (
nodes.FunctionDef,
nodes.ClassDef,
nodes.Import,
nodes.ImportFrom,
)
for call in scope.nodes_of_class(nodes.Call, skip_klass=skip_nodes):
inferred = utils.safe_infer(call.func)
if (
utils.is_builtin_object(inferred)
and getattr(inferred, "name", None) == "locals"
):
if stmt.lineno < call.lineno:
return True
return False
MSGS: dict[str, MessageDefinitionTuple] = {
"E0601": (
"Using variable %r before assignment",
"used-before-assignment",
"Emitted when a local variable is accessed before its assignment took place. "
"Assignments in try blocks are assumed not to have occurred when evaluating "
"associated except/finally blocks. Assignments in except blocks are assumed "
"not to have occurred when evaluating statements outside the block, except "
"when the associated try block contains a return statement.",
),
"E0602": (
"Undefined variable %r",
"undefined-variable",
"Used when an undefined variable is accessed.",
),
"E0603": (
"Undefined variable name %r in __all__",
"undefined-all-variable",
"Used when an undefined variable name is referenced in __all__.",
),
"E0604": (
"Invalid object %r in __all__, must contain only strings",
"invalid-all-object",
"Used when an invalid (non-string) object occurs in __all__.",
),
"E0605": (
"Invalid format for __all__, must be tuple or list",
"invalid-all-format",
"Used when __all__ has an invalid format.",
),
"E0606": (
"Possibly using variable %r before assignment",
"possibly-used-before-assignment",
"Emitted when a local variable is accessed before its assignment took place "
"in both branches of an if/else switch.",
),
"E0611": (
"No name %r in module %r",
"no-name-in-module",
"Used when a name cannot be found in a module.",
),
"W0601": (
"Global variable %r undefined at the module level",
"global-variable-undefined",
'Used when a variable is defined through the "global" statement '
"but the variable is not defined in the module scope.",
),
"W0602": (
"Using global for %r but no assignment is done",
"global-variable-not-assigned",
"When a variable defined in the global scope is modified in an inner scope, "
"the 'global' keyword is required in the inner scope only if there is an "
"assignment operation done in the inner scope.",
),
"W0603": (
"Using the global statement", # W0121
"global-statement",
'Used when you use the "global" statement to update a global '
"variable. Pylint discourages its usage. That doesn't mean you cannot "
"use it!",
),
"W0604": (
"Using the global statement at the module level", # W0103
"global-at-module-level",
'Used when you use the "global" statement at the module level '
"since it has no effect.",
),
"W0611": (
"Unused %s",
"unused-import",
"Used when an imported module or variable is not used.",
),
"W0612": (
"Unused variable %r",
"unused-variable",
"Used when a variable is defined but not used.",
),
"W0613": (
"Unused argument %r",
"unused-argument",
"Used when a function or method argument is not used.",
),
"W0614": (
"Unused import(s) %s from wildcard import of %s",
"unused-wildcard-import",
"Used when an imported module or variable is not used from a "
"`'from X import *'` style import.",
),
"W0621": (
"Redefining name %r from outer scope (line %s)",
"redefined-outer-name",
"Used when a variable's name hides a name defined in an outer scope or except handler.",
),
"W0622": (
"Redefining built-in %r",
"redefined-builtin",
"Used when a variable or function override a built-in.",
),
"W0631": (
"Using possibly undefined loop variable %r",
"undefined-loop-variable",
"Used when a loop variable (i.e. defined by a for loop or "
"a list comprehension or a generator expression) is used outside "
"the loop.",
),
"W0632": (
"Possible unbalanced tuple unpacking with sequence %s: left side has %d "
"label%s, right side has %d value%s",
"unbalanced-tuple-unpacking",
"Used when there is an unbalanced tuple unpacking in assignment",
{"old_names": [("E0632", "old-unbalanced-tuple-unpacking")]},
),
"E0633": (
"Attempting to unpack a non-sequence%s",
"unpacking-non-sequence",
"Used when something which is not a sequence is used in an unpack assignment",
{"old_names": [("W0633", "old-unpacking-non-sequence")]},
),
"W0640": (
"Cell variable %s defined in loop",
"cell-var-from-loop",
"A variable used in a closure is defined in a loop. "
"This will result in all closures using the same value for "
"the closed-over variable.",
),
"W0641": (
"Possibly unused variable %r",
"possibly-unused-variable",
"Used when a variable is defined but might not be used. "
"The possibility comes from the fact that locals() might be used, "
"which could consume or not the said variable",
),
"W0642": (
"Invalid assignment to %s in method",
"self-cls-assignment",
"Invalid assignment to self or cls in instance or class method "
"respectively.",
),
"E0643": (
"Invalid index for iterable length",
"potential-index-error",
"Emitted when an index used on an iterable goes beyond the length of that "
"iterable.",
),
"W0644": (
"Possible unbalanced dict unpacking with %s: "
"left side has %d label%s, right side has %d value%s",
"unbalanced-dict-unpacking",
"Used when there is an unbalanced dict unpacking in assignment or for loop",
),
}
| VariableVisitConsumerAction |
python | PyCQA__pylint | tests/functional/a/alternative/alternative_union_syntax.py | {
"start": 1533,
"end": 1602
} | class ____:
my_var: int | str
@dataclasses.dataclass
| CustomDataClass |
python | django__django | django/core/management/commands/dbshell.py | {
"start": 139,
"end": 1762
} | class ____(BaseCommand):
help = (
"Runs the command-line client for specified database, or the "
"default database if none is provided."
)
requires_system_checks = []
def add_arguments(self, parser):
parser.add_argument(
"--database",
default=DEFAULT_DB_ALIAS,
choices=tuple(connections),
help=(
"Nominates a database onto which to open a shell. Defaults to the "
'"default" database.'
),
)
parameters = parser.add_argument_group("parameters")
parameters.add_argument("parameters", nargs="*")
def handle(self, **options):
connection = connections[options["database"]]
try:
connection.client.runshell(options["parameters"])
except FileNotFoundError:
# Note that we're assuming the FileNotFoundError relates to the
# command missing. It could be raised for some other reason, in
# which case this error message would be inaccurate. Still, this
# message catches the common case.
raise CommandError(
"You appear not to have the %r program installed or on your path."
% connection.client.executable_name
)
except subprocess.CalledProcessError as e:
raise CommandError(
'"%s" returned non-zero exit status %s.'
% (
" ".join(map(str, e.cmd)),
e.returncode,
),
returncode=e.returncode,
)
| Command |
python | apache__airflow | providers/snowflake/tests/unit/snowflake/operators/test_snowflake.py | {
"start": 4554,
"end": 5149
} | class ____:
@mock.patch("airflow.providers.common.sql.operators.sql.SQLCheckOperator.get_db_hook")
def test_get_db_hook(
self,
mock_get_db_hook,
):
operator = SnowflakeCheckOperator(
task_id="snowflake_check",
snowflake_conn_id="snowflake_default",
sql="Select * from test_table",
parameters={"param1": "value1"},
)
operator.execute({})
mock_get_db_hook.assert_has_calls(
[call().get_first("Select * from test_table", {"param1": "value1"})]
)
| TestSnowflakeCheckOperator |
python | jazzband__django-simple-history | simple_history/tests/models.py | {
"start": 10241,
"end": 10551
} | class ____(models.Model):
name = models.CharField(max_length=100)
history = HistoricalRecords()
def save(self, *args, **kwargs):
if hasattr(self, "skip_history_when_saving"):
raise RuntimeError("error while saving")
else:
super().save(*args, **kwargs)
| Person |
python | numpy__numpy | numpy/distutils/system_info.py | {
"start": 87226,
"end": 87463
} | class ____(openblas_ilp64_info):
# ILP64 Openblas, with default symbol suffix
section = 'openblas64_'
dir_env_var = 'OPENBLAS64_'
_lib_names = ['openblas64_']
symbol_suffix = '64_'
symbol_prefix = ''
| openblas64__info |
python | pypa__pip | src/pip/_vendor/platformdirs/macos.py | {
"start": 193,
"end": 6322
} | class ____(PlatformDirsABC):
"""
Platform directories for the macOS operating system.
Follows the guidance from
`Apple documentation <https://developer.apple.com/library/archive/documentation/FileManagement/Conceptual/FileSystemProgrammingGuide/MacOSXDirectories/MacOSXDirectories.html>`_.
Makes use of the `appname <platformdirs.api.PlatformDirsABC.appname>`,
`version <platformdirs.api.PlatformDirsABC.version>`,
`ensure_exists <platformdirs.api.PlatformDirsABC.ensure_exists>`.
"""
@property
def user_data_dir(self) -> str:
""":return: data directory tied to the user, e.g. ``~/Library/Application Support/$appname/$version``"""
return self._append_app_name_and_version(os.path.expanduser("~/Library/Application Support")) # noqa: PTH111
@property
def site_data_dir(self) -> str:
"""
:return: data directory shared by users, e.g. ``/Library/Application Support/$appname/$version``.
If we're using a Python binary managed by `Homebrew <https://brew.sh>`_, the directory
will be under the Homebrew prefix, e.g. ``$homebrew_prefix/share/$appname/$version``.
If `multipath <platformdirs.api.PlatformDirsABC.multipath>` is enabled, and we're in Homebrew,
the response is a multi-path string separated by ":", e.g.
``$homebrew_prefix/share/$appname/$version:/Library/Application Support/$appname/$version``
"""
is_homebrew = "/opt/python" in sys.prefix
homebrew_prefix = sys.prefix.split("/opt/python")[0] if is_homebrew else ""
path_list = [self._append_app_name_and_version(f"{homebrew_prefix}/share")] if is_homebrew else []
path_list.append(self._append_app_name_and_version("/Library/Application Support"))
if self.multipath:
return os.pathsep.join(path_list)
return path_list[0]
@property
def site_data_path(self) -> Path:
""":return: data path shared by users. Only return the first item, even if ``multipath`` is set to ``True``"""
return self._first_item_as_path_if_multipath(self.site_data_dir)
@property
def user_config_dir(self) -> str:
""":return: config directory tied to the user, same as `user_data_dir`"""
return self.user_data_dir
@property
def site_config_dir(self) -> str:
""":return: config directory shared by the users, same as `site_data_dir`"""
return self.site_data_dir
@property
def user_cache_dir(self) -> str:
""":return: cache directory tied to the user, e.g. ``~/Library/Caches/$appname/$version``"""
return self._append_app_name_and_version(os.path.expanduser("~/Library/Caches")) # noqa: PTH111
@property
def site_cache_dir(self) -> str:
"""
:return: cache directory shared by users, e.g. ``/Library/Caches/$appname/$version``.
If we're using a Python binary managed by `Homebrew <https://brew.sh>`_, the directory
will be under the Homebrew prefix, e.g. ``$homebrew_prefix/var/cache/$appname/$version``.
If `multipath <platformdirs.api.PlatformDirsABC.multipath>` is enabled, and we're in Homebrew,
the response is a multi-path string separated by ":", e.g.
``$homebrew_prefix/var/cache/$appname/$version:/Library/Caches/$appname/$version``
"""
is_homebrew = "/opt/python" in sys.prefix
homebrew_prefix = sys.prefix.split("/opt/python")[0] if is_homebrew else ""
path_list = [self._append_app_name_and_version(f"{homebrew_prefix}/var/cache")] if is_homebrew else []
path_list.append(self._append_app_name_and_version("/Library/Caches"))
if self.multipath:
return os.pathsep.join(path_list)
return path_list[0]
@property
def site_cache_path(self) -> Path:
""":return: cache path shared by users. Only return the first item, even if ``multipath`` is set to ``True``"""
return self._first_item_as_path_if_multipath(self.site_cache_dir)
@property
def user_state_dir(self) -> str:
""":return: state directory tied to the user, same as `user_data_dir`"""
return self.user_data_dir
@property
def user_log_dir(self) -> str:
""":return: log directory tied to the user, e.g. ``~/Library/Logs/$appname/$version``"""
return self._append_app_name_and_version(os.path.expanduser("~/Library/Logs")) # noqa: PTH111
@property
def user_documents_dir(self) -> str:
""":return: documents directory tied to the user, e.g. ``~/Documents``"""
return os.path.expanduser("~/Documents") # noqa: PTH111
@property
def user_downloads_dir(self) -> str:
""":return: downloads directory tied to the user, e.g. ``~/Downloads``"""
return os.path.expanduser("~/Downloads") # noqa: PTH111
@property
def user_pictures_dir(self) -> str:
""":return: pictures directory tied to the user, e.g. ``~/Pictures``"""
return os.path.expanduser("~/Pictures") # noqa: PTH111
@property
def user_videos_dir(self) -> str:
""":return: videos directory tied to the user, e.g. ``~/Movies``"""
return os.path.expanduser("~/Movies") # noqa: PTH111
@property
def user_music_dir(self) -> str:
""":return: music directory tied to the user, e.g. ``~/Music``"""
return os.path.expanduser("~/Music") # noqa: PTH111
@property
def user_desktop_dir(self) -> str:
""":return: desktop directory tied to the user, e.g. ``~/Desktop``"""
return os.path.expanduser("~/Desktop") # noqa: PTH111
@property
def user_runtime_dir(self) -> str:
""":return: runtime directory tied to the user, e.g. ``~/Library/Caches/TemporaryItems/$appname/$version``"""
return self._append_app_name_and_version(os.path.expanduser("~/Library/Caches/TemporaryItems")) # noqa: PTH111
@property
def site_runtime_dir(self) -> str:
""":return: runtime directory shared by users, same as `user_runtime_dir`"""
return self.user_runtime_dir
__all__ = [
"MacOS",
]
| MacOS |
python | Netflix__metaflow | metaflow/package/__init__.py | {
"start": 796,
"end": 1384
} | class ____(MetaflowException):
headline = "Non-unique file path for a file name included in code package"
def __init__(self, filename, file_paths, lineno=None):
msg = (
"Filename %s included in the code package includes multiple different "
"paths for the same name : %s.\n"
"The `filename` in the `add_to_package` decorator hook requires a unique "
"`file_path` to `file_name` mapping" % (filename, ", ".join(file_paths))
)
super().__init__(msg=msg, lineno=lineno)
| NonUniqueFileNameToFilePathMappingException |
python | python-markdown__markdown | tests/test_apis.py | {
"start": 16926,
"end": 22761
} | class ____(unittest.TestCase):
""" Test the html and xhtml serializers. """
def testHtml(self):
""" Test HTML serialization. """
el = etree.Element('div')
el.set('id', 'foo<&">')
p = etree.SubElement(el, 'p')
p.text = 'foo <&escaped>'
p.set('hidden', 'hidden')
etree.SubElement(el, 'hr')
non_element = etree.SubElement(el, None)
non_element.text = 'non-element text'
script = etree.SubElement(non_element, 'script')
script.text = '<&"test\nescaping">'
el.tail = "tail text"
self.assertEqual(
markdown.serializers.to_html_string(el),
'<div id="foo<&">">'
'<p hidden>foo <&escaped></p>'
'<hr>'
'non-element text'
'<script><&"test\nescaping"></script>'
'</div>tail text'
)
def testXhtml(self):
"""" Test XHTML serialization. """
el = etree.Element('div')
el.set('id', 'foo<&">')
p = etree.SubElement(el, 'p')
p.text = 'foo<&escaped>'
p.set('hidden', 'hidden')
etree.SubElement(el, 'hr')
non_element = etree.SubElement(el, None)
non_element.text = 'non-element text'
script = etree.SubElement(non_element, 'script')
script.text = '<&"test\nescaping">'
el.tail = "tail text"
self.assertEqual(
markdown.serializers.to_xhtml_string(el),
'<div id="foo<&">">'
'<p hidden="hidden">foo<&escaped></p>'
'<hr />'
'non-element text'
'<script><&"test\nescaping"></script>'
'</div>tail text'
)
def testMixedCaseTags(self):
"""" Test preservation of tag case. """
el = etree.Element('MixedCase')
el.text = 'not valid '
em = etree.SubElement(el, 'EMPHASIS')
em.text = 'html'
etree.SubElement(el, 'HR')
self.assertEqual(
markdown.serializers.to_xhtml_string(el),
'<MixedCase>not valid <EMPHASIS>html</EMPHASIS><HR /></MixedCase>'
)
def testProsessingInstruction(self):
""" Test serialization of `ProcessignInstruction`. """
pi = ProcessingInstruction('foo', text='<&"test\nescaping">')
self.assertIs(pi.tag, ProcessingInstruction)
self.assertEqual(
markdown.serializers.to_xhtml_string(pi),
'<?foo <&"test\nescaping">?>'
)
def testQNameTag(self):
""" Test serialization of `QName` tag. """
div = etree.Element('div')
qname = etree.QName('http://www.w3.org/1998/Math/MathML', 'math')
math = etree.SubElement(div, qname)
math.set('display', 'block')
sem = etree.SubElement(math, 'semantics')
msup = etree.SubElement(sem, 'msup')
mi = etree.SubElement(msup, 'mi')
mi.text = 'x'
mn = etree.SubElement(msup, 'mn')
mn.text = '2'
ann = etree.SubElement(sem, 'annotations')
ann.text = 'x^2'
self.assertEqual(
markdown.serializers.to_xhtml_string(div),
'<div>'
'<math display="block" xmlns="http://www.w3.org/1998/Math/MathML">'
'<semantics>'
'<msup>'
'<mi>x</mi>'
'<mn>2</mn>'
'</msup>'
'<annotations>x^2</annotations>'
'</semantics>'
'</math>'
'</div>'
)
def testQNameAttribute(self):
""" Test serialization of `QName` attribute. """
div = etree.Element('div')
div.set(etree.QName('foo'), etree.QName('bar'))
self.assertEqual(
markdown.serializers.to_xhtml_string(div),
'<div foo="bar"></div>'
)
def testBadQNameTag(self):
""" Test serialization of `QName` with no tag. """
qname = etree.QName('http://www.w3.org/1998/Math/MathML')
el = etree.Element(qname)
self.assertRaises(ValueError, markdown.serializers.to_xhtml_string, el)
def testQNameEscaping(self):
""" Test `QName` escaping. """
qname = etree.QName('<&"test\nescaping">', 'div')
el = etree.Element(qname)
self.assertEqual(
markdown.serializers.to_xhtml_string(el),
'<div xmlns="<&"test escaping">"></div>'
)
def testQNamePreEscaping(self):
""" Test `QName` that is already partially escaped. """
qname = etree.QName('<&"test escaping">', 'div')
el = etree.Element(qname)
self.assertEqual(
markdown.serializers.to_xhtml_string(el),
'<div xmlns="<&"test escaping">"></div>'
)
def buildExtension(self):
""" Build an extension which registers `fakeSerializer`. """
def fakeSerializer(elem):
# Ignore input and return hard-coded output
return '<div><p>foo</p></div>'
class registerFakeSerializer(markdown.extensions.Extension):
def extendMarkdown(self, md):
md.output_formats['fake'] = fakeSerializer
return registerFakeSerializer()
def testRegisterSerializer(self):
self.assertEqual(
markdown.markdown(
'baz', extensions=[self.buildExtension()], output_format='fake'
),
'<p>foo</p>'
)
def testXHTMLOutput(self):
self.assertEqual(
markdown.markdown('foo \nbar', output_format='xhtml'),
'<p>foo<br />\nbar</p>'
)
def testHTMLOutput(self):
self.assertEqual(
markdown.markdown('foo \nbar', output_format='html'),
'<p>foo<br>\nbar</p>'
)
| testSerializers |
python | ray-project__ray | python/ray/tune/utils/mock.py | {
"start": 254,
"end": 2197
} | class ____(Callback):
"""Adds random failure injection to the TrialExecutor."""
def __init__(
self,
config_path="~/ray_bootstrap_config.yaml",
probability=0.1,
time_between_checks=0,
disable=False,
):
self.probability = probability
self.config_path = Path(config_path).expanduser().as_posix()
self.disable = disable
self.time_between_checks = time_between_checks
# Initialize with current time so we don't fail right away
self.last_fail_check = time.monotonic()
def on_step_begin(self, **info):
if not os.path.exists(self.config_path):
return
if time.monotonic() < self.last_fail_check + self.time_between_checks:
return
self.last_fail_check = time.monotonic()
import click
from ray.autoscaler._private.commands import kill_node
failures = 0
max_failures = 3
# With 10% probability inject failure to a worker.
if random.random() < self.probability and not self.disable:
# With 10% probability fully terminate the node.
should_terminate = random.random() < self.probability
while failures < max_failures:
try:
kill_node(
self.config_path,
yes=True,
hard=should_terminate,
override_cluster_name=None,
)
return
except click.exceptions.ClickException:
failures += 1
logger.exception(
"Killing random node failed in attempt "
"{}. "
"Retrying {} more times".format(
str(failures), str(max_failures - failures)
)
)
| FailureInjectorCallback |
python | wandb__wandb | wandb/vendor/pygments/lexers/grammar_notation.py | {
"start": 1799,
"end": 3686
} | class ____(RegexLexer):
"""
Lexer for `IETF 7405 ABNF
<http://www.ietf.org/rfc/rfc7405.txt>`_
(Updates `5234 <http://www.ietf.org/rfc/rfc5234.txt>`_)
grammars.
.. versionadded:: 2.1
"""
name = 'ABNF'
aliases = ['abnf']
filenames = ['*.abnf']
mimetypes = ['text/x-abnf']
_core_rules = (
'ALPHA', 'BIT', 'CHAR', 'CR', 'CRLF', 'CTL', 'DIGIT',
'DQUOTE', 'HEXDIG', 'HTAB', 'LF', 'LWSP', 'OCTET',
'SP', 'VCHAR', 'WSP')
tokens = {
'root': [
# comment
(r';.*$', Comment.Single),
# quoted
# double quote itself in this state, it is as '%x22'.
(r'(%[si])?"[^"]*"', Literal),
# binary (but i have never seen...)
(r'%b[01]+\-[01]+\b', Literal), # range
(r'%b[01]+(\.[01]+)*\b', Literal), # concat
# decimal
(r'%d[0-9]+\-[0-9]+\b', Literal), # range
(r'%d[0-9]+(\.[0-9]+)*\b', Literal), # concat
# hexadecimal
(r'%x[0-9a-fA-F]+\-[0-9a-fA-F]+\b', Literal), # range
(r'%x[0-9a-fA-F]+(\.[0-9a-fA-F]+)*\b', Literal), # concat
# repetition (<a>*<b>element) including nRule
(r'\b[0-9]+\*[0-9]+', Operator),
(r'\b[0-9]+\*', Operator),
(r'\b[0-9]+', Operator),
(r'\*', Operator),
# Strictly speaking, these are not keyword but
# are called `Core Rule'.
(words(_core_rules, suffix=r'\b'), Keyword),
# nonterminals (ALPHA *(ALPHA / DIGIT / "-"))
(r'[a-zA-Z][a-zA-Z0-9-]+\b', Name.Class),
# operators
(r'(=/|=|/)', Operator),
# punctuation
(r'[\[\]()]', Punctuation),
# fallback
(r'\s+', Text),
(r'.', Text),
],
}
| AbnfLexer |
python | huggingface__transformers | tests/models/mask2former/test_image_processing_mask2former.py | {
"start": 6289,
"end": 29654
} | class ____(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = Mask2FormerImageProcessor if (is_vision_available() and is_torch_available()) else None
fast_image_processing_class = (
Mask2FormerImageProcessorFast if (is_vision_available() and is_torchvision_available()) else None
)
def setUp(self):
super().setUp()
self.image_processor_tester = Mask2FormerImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "image_mean"))
self.assertTrue(hasattr(image_processing, "image_std"))
self.assertTrue(hasattr(image_processing, "do_normalize"))
self.assertTrue(hasattr(image_processing, "do_resize"))
self.assertTrue(hasattr(image_processing, "size"))
self.assertTrue(hasattr(image_processing, "ignore_index"))
self.assertTrue(hasattr(image_processing, "num_labels"))
def comm_get_image_processing_inputs(
self,
image_processor_tester,
image_processing_class,
with_segmentation_maps=False,
is_instance_map=False,
segmentation_type="np",
numpify=False,
input_data_format=None,
):
image_processing = image_processing_class(**image_processor_tester.prepare_image_processor_dict())
# prepare image and target
num_labels = image_processor_tester.num_labels
annotations = None
instance_id_to_semantic_id = None
image_inputs = image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=numpify)
if with_segmentation_maps:
high = num_labels
if is_instance_map:
labels_expanded = list(range(num_labels)) * 2
instance_id_to_semantic_id = dict(enumerate(labels_expanded))
annotations = [
np.random.randint(0, high * 2, img.shape[:2] if numpify else (img.size[1], img.size[0])).astype(
np.uint8
)
for img in image_inputs
]
if segmentation_type == "pil":
annotations = [Image.fromarray(annotation) for annotation in annotations]
if input_data_format is ChannelDimension.FIRST and numpify:
image_inputs = [np.moveaxis(img, -1, 0) for img in image_inputs]
inputs = image_processing(
image_inputs,
annotations,
return_tensors="pt",
instance_id_to_semantic_id=instance_id_to_semantic_id,
input_data_format=input_data_format,
)
return inputs
def test_with_size_divisor(self):
size_divisors = [8, 16, 32]
weird_input_sizes = [(407, 802), (582, 1094)]
for image_processing_class in self.image_processor_list:
for size_divisor in size_divisors:
image_processor_dict = {**self.image_processor_dict, **{"size_divisor": size_divisor}}
image_processing = image_processing_class(**image_processor_dict)
for weird_input_size in weird_input_sizes:
inputs = image_processing([np.ones((3, *weird_input_size))], return_tensors="pt")
pixel_values = inputs["pixel_values"]
# check if divisible
self.assertTrue((pixel_values.shape[-1] % size_divisor) == 0)
self.assertTrue((pixel_values.shape[-2] % size_divisor) == 0)
def test_call_with_segmentation_maps(self):
def common(
is_instance_map=False,
segmentation_type=None,
numpify=False,
num_channels=3,
input_data_format=None,
do_resize=True,
):
image_processor_tester = Mask2FormerImageProcessingTester(
self,
num_channels=num_channels,
do_resize=do_resize,
image_mean=[0.5] * num_channels,
image_std=[0.5] * num_channels,
)
for image_processing_class in self.image_processor_list:
inputs = self.comm_get_image_processing_inputs(
image_processor_tester=image_processor_tester,
image_processing_class=image_processing_class,
with_segmentation_maps=True,
is_instance_map=is_instance_map,
segmentation_type=segmentation_type,
numpify=numpify,
input_data_format=input_data_format,
)
mask_labels = inputs["mask_labels"]
class_labels = inputs["class_labels"]
pixel_values = inputs["pixel_values"]
# check the batch_size
for mask_label, class_label in zip(mask_labels, class_labels):
self.assertEqual(mask_label.shape[0], class_label.shape[0])
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:], pixel_values.shape[2:])
common()
common(is_instance_map=True)
common(is_instance_map=False, segmentation_type="pil")
common(is_instance_map=True, segmentation_type="pil")
common(num_channels=1, numpify=True)
common(num_channels=1, numpify=True, input_data_format=ChannelDimension.FIRST)
common(num_channels=2, numpify=True, input_data_format=ChannelDimension.LAST)
common(num_channels=5, numpify=True, input_data_format=ChannelDimension.LAST, do_resize=False)
common(num_channels=5, numpify=True, input_data_format=ChannelDimension.FIRST, do_resize=False)
with self.assertRaisesRegex(ValueError, expected_regex="Unable to infer channel dimension format"):
common(num_channels=5, numpify=True, do_resize=False)
with self.assertRaisesRegex(TypeError, expected_regex=r"Cannot handle this data type: .*"):
common(num_channels=5, numpify=True, input_data_format=ChannelDimension.LAST)
def test_integration_instance_segmentation(self):
# load 2 images and corresponding annotations from the hub
repo_id = "nielsr/image-segmentation-toy-data"
image1 = Image.open(
hf_hub_download(repo_id=repo_id, filename="instance_segmentation_image_1.png", repo_type="dataset")
)
image2 = Image.open(
hf_hub_download(repo_id=repo_id, filename="instance_segmentation_image_2.png", repo_type="dataset")
)
annotation1 = Image.open(
hf_hub_download(repo_id=repo_id, filename="instance_segmentation_annotation_1.png", repo_type="dataset")
)
annotation2 = Image.open(
hf_hub_download(repo_id=repo_id, filename="instance_segmentation_annotation_2.png", repo_type="dataset")
)
# get instance segmentations and instance-to-segmentation mappings
def get_instance_segmentation_and_mapping(annotation):
instance_seg = np.array(annotation)[:, :, 1]
class_id_map = np.array(annotation)[:, :, 0]
class_labels = np.unique(class_id_map)
# create mapping between instance IDs and semantic category IDs
inst2class = {}
for label in class_labels:
instance_ids = np.unique(instance_seg[class_id_map == label])
inst2class.update(dict.fromkeys(instance_ids, label))
return instance_seg, inst2class
instance_seg1, inst2class1 = get_instance_segmentation_and_mapping(annotation1)
instance_seg2, inst2class2 = get_instance_segmentation_and_mapping(annotation2)
# create a image processor
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(do_reduce_labels=True, ignore_index=255, size=(512, 512))
# prepare the images and annotations
inputs = image_processing(
[image1, image2],
[instance_seg1, instance_seg2],
instance_id_to_semantic_id=[inst2class1, inst2class2],
return_tensors="pt",
)
# verify the pixel values and pixel mask
self.assertEqual(inputs["pixel_values"].shape, (2, 3, 512, 512))
self.assertEqual(inputs["pixel_mask"].shape, (2, 512, 512))
# verify the class labels
self.assertEqual(len(inputs["class_labels"]), 2)
torch.testing.assert_close(inputs["class_labels"][0], torch.tensor([30, 55]))
torch.testing.assert_close(inputs["class_labels"][1], torch.tensor([4, 4, 23, 55]))
# verify the mask labels
self.assertEqual(len(inputs["mask_labels"]), 2)
self.assertEqual(inputs["mask_labels"][0].shape, (2, 512, 512))
self.assertEqual(inputs["mask_labels"][1].shape, (4, 512, 512))
self.assertEqual(inputs["mask_labels"][0].sum().item(), 41527.0)
self.assertEqual(inputs["mask_labels"][1].sum().item(), 26259.0)
def test_integration_semantic_segmentation(self):
# load 2 images and corresponding semantic annotations from the hub
repo_id = "nielsr/image-segmentation-toy-data"
image1 = Image.open(
hf_hub_download(repo_id=repo_id, filename="semantic_segmentation_image_1.png", repo_type="dataset")
)
image2 = Image.open(
hf_hub_download(repo_id=repo_id, filename="semantic_segmentation_image_2.png", repo_type="dataset")
)
annotation1 = Image.open(
hf_hub_download(repo_id=repo_id, filename="semantic_segmentation_annotation_1.png", repo_type="dataset")
)
annotation2 = Image.open(
hf_hub_download(repo_id=repo_id, filename="semantic_segmentation_annotation_2.png", repo_type="dataset")
)
# create a image processor
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(do_reduce_labels=True, ignore_index=255, size=(512, 512))
# prepare the images and annotations
inputs = image_processing(
[image1, image2],
[annotation1, annotation2],
return_tensors="pt",
)
# verify the pixel values and pixel mask
self.assertEqual(inputs["pixel_values"].shape, (2, 3, 512, 512))
self.assertEqual(inputs["pixel_mask"].shape, (2, 512, 512))
# verify the class labels
self.assertEqual(len(inputs["class_labels"]), 2)
torch.testing.assert_close(inputs["class_labels"][0], torch.tensor([2, 4, 60]))
torch.testing.assert_close(inputs["class_labels"][1], torch.tensor([0, 3, 7, 8, 15, 28, 30, 143]))
# verify the mask labels
self.assertEqual(len(inputs["mask_labels"]), 2)
self.assertEqual(inputs["mask_labels"][0].shape, (3, 512, 512))
self.assertEqual(inputs["mask_labels"][1].shape, (8, 512, 512))
self.assertEqual(inputs["mask_labels"][0].sum().item(), 170200.0)
self.assertEqual(inputs["mask_labels"][1].sum().item(), 257036.0)
def test_integration_panoptic_segmentation(self):
# load 2 images and corresponding panoptic annotations from the hub
dataset = load_dataset("nielsr/ade20k-panoptic-demo")
image1 = dataset["train"][0]["image"]
image2 = dataset["train"][1]["image"]
segments_info1 = dataset["train"][0]["segments_info"]
segments_info2 = dataset["train"][1]["segments_info"]
annotation1 = dataset["train"][0]["label"]
annotation2 = dataset["train"][1]["label"]
def rgb_to_id(color):
if isinstance(color, np.ndarray) and len(color.shape) == 3:
if color.dtype == np.uint8:
color = color.astype(np.int32)
return color[:, :, 0] + 256 * color[:, :, 1] + 256 * 256 * color[:, :, 2]
return int(color[0] + 256 * color[1] + 256 * 256 * color[2])
def create_panoptic_map(annotation, segments_info):
annotation = np.array(annotation)
# convert RGB to segment IDs per pixel
# 0 is the "ignore" label, for which we don't need to make binary masks
panoptic_map = rgb_to_id(annotation)
# create mapping between segment IDs and semantic classes
inst2class = {segment["id"]: segment["category_id"] for segment in segments_info}
return panoptic_map, inst2class
panoptic_map1, inst2class1 = create_panoptic_map(annotation1, segments_info1)
panoptic_map2, inst2class2 = create_panoptic_map(annotation2, segments_info2)
# create a image processor
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(ignore_index=0, do_resize=False)
# prepare the images and annotations
pixel_values_list = [np.moveaxis(np.array(image1), -1, 0), np.moveaxis(np.array(image2), -1, 0)]
inputs = image_processing(
pixel_values_list,
[panoptic_map1, panoptic_map2],
instance_id_to_semantic_id=[inst2class1, inst2class2],
return_tensors="pt",
)
# verify the pixel values and pixel mask
self.assertEqual(inputs["pixel_values"].shape, (2, 3, 512, 711))
self.assertEqual(inputs["pixel_mask"].shape, (2, 512, 711))
# verify the class labels
self.assertEqual(len(inputs["class_labels"]), 2)
expected_class_labels = torch.tensor([4, 17, 32, 42, 42, 42, 42, 42, 42, 42, 32, 12, 12, 12, 12, 12, 42, 42, 12, 12, 12, 42, 12, 12, 12, 12, 12, 3, 12, 12, 12, 12, 42, 42, 42, 12, 42, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 5, 12, 12, 12, 12, 12, 12, 12, 0, 43, 43, 43, 96, 43, 104, 43, 31, 125, 31, 125, 138, 87, 125, 149, 138, 125, 87, 87]) # fmt: skip
torch.testing.assert_close(inputs["class_labels"][0], torch.tensor(expected_class_labels))
expected_class_labels = torch.tensor([19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 67, 82, 19, 19, 17, 19, 19, 19, 19, 19, 19, 19, 19, 19, 12, 12, 42, 12, 12, 12, 12, 3, 14, 12, 12, 12, 12, 12, 12, 12, 12, 14, 5, 12, 12, 0, 115, 43, 43, 115, 43, 43, 43, 8, 8, 8, 138, 138, 125, 143]) # fmt: skip
torch.testing.assert_close(inputs["class_labels"][1], expected_class_labels)
# verify the mask labels
self.assertEqual(len(inputs["mask_labels"]), 2)
self.assertEqual(inputs["mask_labels"][0].shape, (79, 512, 711))
self.assertEqual(inputs["mask_labels"][1].shape, (61, 512, 711))
self.assertEqual(inputs["mask_labels"][0].sum().item(), 315193.0)
self.assertEqual(inputs["mask_labels"][1].sum().item(), 350747.0)
def test_binary_mask_to_rle(self):
fake_binary_mask = np.zeros((20, 50))
fake_binary_mask[0, 20:] = 1
fake_binary_mask[1, :15] = 1
fake_binary_mask[5, :10] = 1
rle = binary_mask_to_rle(fake_binary_mask)
self.assertEqual(len(rle), 4)
self.assertEqual(rle[0], 21)
self.assertEqual(rle[1], 45)
def test_post_process_semantic_segmentation(self):
for image_processing_class in self.image_processor_list:
feature_extractor = image_processing_class(num_labels=self.image_processor_tester.num_classes)
outputs = self.image_processor_tester.get_fake_mask2former_outputs()
segmentation = feature_extractor.post_process_semantic_segmentation(outputs)
self.assertEqual(len(segmentation), self.image_processor_tester.batch_size)
self.assertEqual(segmentation[0].shape, (384, 384))
target_sizes = [(1, 4) for i in range(self.image_processor_tester.batch_size)]
segmentation = feature_extractor.post_process_semantic_segmentation(outputs, target_sizes=target_sizes)
self.assertEqual(segmentation[0].shape, target_sizes[0])
def test_post_process_instance_segmentation(self):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class(num_labels=self.image_processor_tester.num_classes)
outputs = self.image_processor_tester.get_fake_mask2former_outputs()
segmentation = image_processor.post_process_instance_segmentation(outputs, threshold=0)
self.assertTrue(len(segmentation) == self.image_processor_tester.batch_size)
for el in segmentation:
self.assertTrue("segmentation" in el)
self.assertTrue("segments_info" in el)
self.assertEqual(type(el["segments_info"]), list)
self.assertEqual(el["segmentation"].shape, (384, 384))
segmentation = image_processor.post_process_instance_segmentation(
outputs, threshold=0, return_binary_maps=True
)
self.assertTrue(len(segmentation) == self.image_processor_tester.batch_size)
for el in segmentation:
self.assertTrue("segmentation" in el)
self.assertTrue("segments_info" in el)
self.assertEqual(type(el["segments_info"]), list)
self.assertEqual(len(el["segmentation"].shape), 3)
self.assertEqual(el["segmentation"].shape[1:], (384, 384))
def test_post_process_panoptic_segmentation(self):
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(num_labels=self.image_processor_tester.num_classes)
outputs = self.image_processor_tester.get_fake_mask2former_outputs()
segmentation = image_processing.post_process_panoptic_segmentation(outputs, threshold=0)
self.assertTrue(len(segmentation) == self.image_processor_tester.batch_size)
for el in segmentation:
self.assertTrue("segmentation" in el)
self.assertTrue("segments_info" in el)
self.assertEqual(type(el["segments_info"]), list)
self.assertEqual(el["segmentation"].shape, (384, 384))
def test_post_process_label_fusing(self):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class(num_labels=self.image_processor_tester.num_classes)
outputs = self.image_processor_tester.get_fake_mask2former_outputs()
segmentation = image_processor.post_process_panoptic_segmentation(
outputs, threshold=0, mask_threshold=0, overlap_mask_area_threshold=0
)
unfused_segments = [el["segments_info"] for el in segmentation]
fused_segmentation = image_processor.post_process_panoptic_segmentation(
outputs, threshold=0, mask_threshold=0, overlap_mask_area_threshold=0, label_ids_to_fuse={1}
)
fused_segments = [el["segments_info"] for el in fused_segmentation]
for el_unfused, el_fused in zip(unfused_segments, fused_segments):
if len(el_unfused) == 0:
self.assertEqual(len(el_unfused), len(el_fused))
continue
# Get number of segments to be fused
fuse_targets = [1 for el in el_unfused if el["label_id"] == 1]
num_to_fuse = 0 if len(fuse_targets) == 0 else sum(fuse_targets) - 1
# Expected number of segments after fusing
expected_num_segments = max(el["id"] for el in el_unfused) - num_to_fuse
num_segments_fused = max(el["id"] for el in el_fused)
self.assertEqual(num_segments_fused, expected_num_segments)
def test_slow_fast_equivalence(self):
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
dummy_image, dummy_map = prepare_semantic_single_inputs()
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
image_encoding_slow = image_processor_slow(dummy_image, segmentation_maps=dummy_map, return_tensors="pt")
image_encoding_fast = image_processor_fast(dummy_image, segmentation_maps=dummy_map, return_tensors="pt")
self._assert_slow_fast_tensors_equivalence(image_encoding_slow.pixel_values, image_encoding_fast.pixel_values)
for mask_label_slow, mask_label_fast in zip(image_encoding_slow.mask_labels, image_encoding_fast.mask_labels):
self._assert_slow_fast_tensors_equivalence(mask_label_slow, mask_label_fast)
for class_label_slow, class_label_fast in zip(
image_encoding_slow.class_labels, image_encoding_fast.class_labels
):
self._assert_slow_fast_tensors_equivalence(class_label_slow.float(), class_label_fast.float())
def test_slow_fast_equivalence_batched(self):
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
if hasattr(self.image_processor_tester, "do_center_crop") and self.image_processor_tester.do_center_crop:
self.skipTest(
reason="Skipping as do_center_crop is True and center_crop functions are not equivalent for fast and slow processors"
)
dummy_images, dummy_maps = prepare_semantic_batch_inputs()
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
encoding_slow = image_processor_slow(dummy_images, segmentation_maps=dummy_maps, return_tensors="pt")
encoding_fast = image_processor_fast(dummy_images, segmentation_maps=dummy_maps, return_tensors="pt")
self._assert_slow_fast_tensors_equivalence(encoding_slow.pixel_values, encoding_fast.pixel_values)
for mask_label_slow, mask_label_fast in zip(encoding_slow.mask_labels, encoding_fast.mask_labels):
self._assert_slow_fast_tensors_equivalence(mask_label_slow, mask_label_fast)
for class_label_slow, class_label_fast in zip(encoding_slow.class_labels, encoding_fast.class_labels):
self._assert_slow_fast_tensors_equivalence(class_label_slow.float(), class_label_fast.float())
| Mask2FormerImageProcessingTest |
python | PrefectHQ__prefect | src/prefect/server/schemas/sorting.py | {
"start": 546,
"end": 2101
} | class ____(AutoEnum):
"""Defines flow run sorting options."""
ID_DESC = AutoEnum.auto()
START_TIME_ASC = AutoEnum.auto()
START_TIME_DESC = AutoEnum.auto()
EXPECTED_START_TIME_ASC = AutoEnum.auto()
EXPECTED_START_TIME_DESC = AutoEnum.auto()
NAME_ASC = AutoEnum.auto()
NAME_DESC = AutoEnum.auto()
NEXT_SCHEDULED_START_TIME_ASC = AutoEnum.auto()
END_TIME_DESC = AutoEnum.auto()
@db_injector
def as_sql_sort(self, db: "PrefectDBInterface") -> Iterable[sa.ColumnElement[Any]]:
"""Return an expression used to sort flow runs"""
sort_mapping: dict[str, Iterable[sa.ColumnElement[Any]]] = {
"ID_DESC": [db.FlowRun.id.desc()],
"START_TIME_ASC": [
sa.func.coalesce(
db.FlowRun.start_time, db.FlowRun.expected_start_time
).asc()
],
"START_TIME_DESC": [
sa.func.coalesce(
db.FlowRun.start_time, db.FlowRun.expected_start_time
).desc()
],
"EXPECTED_START_TIME_ASC": [db.FlowRun.expected_start_time.asc()],
"EXPECTED_START_TIME_DESC": [db.FlowRun.expected_start_time.desc()],
"NAME_ASC": [db.FlowRun.name.asc()],
"NAME_DESC": [db.FlowRun.name.desc()],
"NEXT_SCHEDULED_START_TIME_ASC": [
db.FlowRun.next_scheduled_start_time.asc()
],
"END_TIME_DESC": [db.FlowRun.end_time.desc()],
}
return sort_mapping[self.value]
| FlowRunSort |
python | neetcode-gh__leetcode | python/0540-single-element-in-a-sorted-array.py | {
"start": 0,
"end": 829
} | class ____:
def singleNonDuplicate(self, nums: List[int]) -> int:
def is_non_duplicate(i):
is_left_different = i == 0 or nums[i-1] != nums[i]
is_right_different = i == len(nums)-1 or nums[i+1] != nums[i]
return is_left_different and is_right_different
if len(nums) == 1:
return nums[0]
l, r = 0, len(nums) - 1
while l <= r:
mid = (l + r) // 2
if is_non_duplicate(mid):
return nums[mid]
if mid % 2 == 0:
if nums[mid+1] == nums[mid]:
l = mid + 1
else:
r = mid - 1
else:
if nums[mid+1] == nums[mid]:
r = mid - 1
else:
l = mid + 1
| Solution |
python | h5py__h5py | h5py/tests/test_dataset.py | {
"start": 26677,
"end": 28586
} | class ____(BaseDataset):
"""
Feature: Datasets created with a compression code
"""
@pytest.mark.thread_unsafe(reason="monkey-patch")
def test_compression_number(self):
""" Create with compression number of gzip (h5py.h5z.FILTER_DEFLATE) and a compression level of 7"""
original_compression_vals = h5py._hl.dataset._LEGACY_GZIP_COMPRESSION_VALS
try:
h5py._hl.dataset._LEGACY_GZIP_COMPRESSION_VALS = tuple()
dset = self.f.create_dataset('foo', (20, 30), compression=h5py.h5z.FILTER_DEFLATE, compression_opts=(7,))
finally:
h5py._hl.dataset._LEGACY_GZIP_COMPRESSION_VALS = original_compression_vals
self.assertEqual(dset.compression, 'gzip')
self.assertEqual(dset.compression_opts, 7)
@pytest.mark.thread_unsafe(reason="monkey-patch")
def test_compression_number_invalid(self):
""" Create with invalid compression numbers """
with self.assertRaises(ValueError) as e:
self.f.create_dataset('foo', (20, 30), compression=-999)
self.assertIn("Invalid filter", str(e.exception))
with self.assertRaises(ValueError) as e:
self.f.create_dataset('foo', (20, 30), compression=100)
self.assertIn("Unknown compression", str(e.exception))
original_compression_vals = h5py._hl.dataset._LEGACY_GZIP_COMPRESSION_VALS
try:
h5py._hl.dataset._LEGACY_GZIP_COMPRESSION_VALS = tuple()
# Using gzip compression requires a compression level specified in compression_opts
with self.assertRaises(IndexError):
self.f.create_dataset('foo', (20, 30), compression=h5py.h5z.FILTER_DEFLATE)
finally:
h5py._hl.dataset._LEGACY_GZIP_COMPRESSION_VALS = original_compression_vals
@ut.skipIf('lzf' not in h5py.filters.encode, "LZF is not installed")
| TestCreateCompressionNumber |
python | huggingface__transformers | tests/generation/test_fsdp.py | {
"start": 4552,
"end": 5702
} | class ____(TestCasePlus):
@require_torch_multi_accelerator
def test_fsdp_generate(self):
device_count = backend_device_count(torch_device)
distributed_args = f"""--nproc_per_node={device_count}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_fsdp.py
""".split()
args = ["--fsdp"]
cmd = ["torchrun"] + distributed_args + args
execute_subprocess_async(cmd, env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
@require_torch_multi_accelerator
def test_fsdp2_generate(self):
device_count = backend_device_count(torch_device)
distributed_args = f"""--nproc_per_node={device_count}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_fsdp.py
""".split()
args = ["--fsdp2"]
cmd = ["torchrun"] + distributed_args + args
execute_subprocess_async(cmd, env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
| TestFSDPGeneration |
python | streamlit__streamlit | lib/tests/streamlit/elements/button_group_test.py | {
"start": 7348,
"end": 13366
} | class ____(DeltaGeneratorTestCase):
"""Tests that are specific for the feedback command."""
@parameterized.expand(
[
("thumbs", list(_THUMB_ICONS)),
("faces", list(_FACES_ICONS)),
("stars", list([_STAR_ICON] * 5)),
]
)
def test_call_feedback_with_all_options(
self, option: Literal["thumbs", "faces", "stars"], expected_icons: list[str]
):
st.feedback(option)
delta = self.get_delta_from_queue().new_element.button_group
assert delta.default == []
assert [option.content_icon for option in delta.options] == expected_icons
def test_invalid_option_literal(self):
with pytest.raises(StreamlitAPIException) as e:
st.feedback("foo")
assert str(e.value) == (
"The options argument to st.feedback must be one of "
"['thumbs', 'faces', 'stars']. The argument passed was 'foo'."
)
@parameterized.expand([(0,), (1,)])
def test_widget_state_changed_via_session_state(self, session_state_index: int):
st.session_state.feedback_command_key = session_state_index
val = st.feedback("thumbs", key="feedback_command_key")
assert val == session_state_index
def test_feedback_converts_small_width_to_content(self):
"""Test that st.feedback converts small pixel widths to content width.
The threshold is calculated dynamically based on theme.baseFontSize,
so this tests with default 16px base font size.
"""
# With default 16px base font: thumbs threshold ~55px (3.125rem x 16 x 1.1)
st.feedback("thumbs", width=30, key="thumbs_small")
el = self.get_delta_from_queue().new_element
assert (
el.width_config.WhichOneof("width_spec")
== WidthConfigFields.USE_CONTENT.value
)
assert el.width_config.use_content is True
# With default 16px base font: faces threshold ~141px (8rem x 16 x 1.1)
st.feedback("faces", width=100, key="faces_small")
el = self.get_delta_from_queue().new_element
assert (
el.width_config.WhichOneof("width_spec")
== WidthConfigFields.USE_CONTENT.value
)
assert el.width_config.use_content is True
def test_feedback_preserves_adequate_pixel_widths(self):
"""Test that st.feedback preserves pixel widths above the threshold."""
# Large widths well above any threshold should be preserved
st.feedback("thumbs", width=100, key="thumbs_adequate")
el = self.get_delta_from_queue().new_element
assert (
el.width_config.WhichOneof("width_spec")
== WidthConfigFields.PIXEL_WIDTH.value
)
assert el.width_config.pixel_width == 100
st.feedback("stars", width=200, key="stars_adequate")
el = self.get_delta_from_queue().new_element
assert (
el.width_config.WhichOneof("width_spec")
== WidthConfigFields.PIXEL_WIDTH.value
)
assert el.width_config.pixel_width == 200
def test_feedback_threshold_adapts_to_base_font_size(self):
"""Test that the conversion threshold adapts to theme.baseFontSize."""
# Test with 20px base font size (larger than default 16px)
# Threshold calculation: 3.125rem x 20 x 1.1 = 68.75px (thumbs)
# So width=65 should convert to "content" at 20px, but preserves at 16px
with patch_config_options({"theme.baseFontSize": 20}):
st.feedback("thumbs", width=65, key="thumbs_20px_font")
el = self.get_delta_from_queue().new_element
# At 20px base font, 65px is below threshold, converts to content
assert (
el.width_config.WhichOneof("width_spec")
== WidthConfigFields.USE_CONTENT.value
)
assert el.width_config.use_content is True
# At 16px base font, same 65px width is above threshold, preserved
with patch_config_options({"theme.baseFontSize": 16}):
st.feedback("thumbs", width=65, key="thumbs_16px_font")
el = self.get_delta_from_queue().new_element
assert (
el.width_config.WhichOneof("width_spec")
== WidthConfigFields.PIXEL_WIDTH.value
)
assert el.width_config.pixel_width == 65
def get_command_matrix(
test_args: list[Any], with_st_feedback: bool = False
) -> list[tuple[Any]]:
"""Return a test matrix for the different button group commands and the
passed arguments.
If the test args is a list like [("foo", ("a", "b")), ("bar", ("c", "d"))],
this function returns following test matrix:
[
(st.pills, "foo", ("a", "b")),
(st.pills, "bar", ("c", "d")),
(st.segmented_control, "foo", ("a", "b")),
(st.segmented_control, "bar", ("c", "d")),
(_interal_button_group, "foo", ("a", "b")),
(_interal_button_group, "bar", ("c", "d")),
]
The pills, segmented_control, and _internal_button_group are wrapped in a lambda to pass default
arguments that are not shared between them.
"""
matrix = []
commands: list[Callable[..., Any]] = [
lambda *args, **kwargs: st.pills("label", *args, **kwargs),
lambda *args, **kwargs: st.segmented_control("label", *args, **kwargs),
lambda *args, **kwargs: ButtonGroupMixin._internal_button_group(
st._main, *args, **kwargs
),
]
if with_st_feedback:
commands.append(lambda *args, **kwargs: st.feedback(*args, **kwargs))
for command in commands:
if command is None:
continue
if len(test_args) == 0:
matrix.append((command,))
continue
for args in test_args:
matrix.append((command, *args))
return matrix
# TODO: Some tests are very similar to the ones in multi_test.py -> maybe we can refactor them and share even more
| TestFeedbackCommand |
python | pytorch__pytorch | torch/testing/_internal/distributed/distributed_test.py | {
"start": 7469,
"end": 7710
} | class ____(nn.Module):
def __init__(self) -> None:
super().__init__()
self.fc = nn.Linear(10, 50, bias=True)
self.fc.bias.requires_grad = False
def forward(self, x):
x = self.fc(x)
return x
| _FC2 |
python | nedbat__coveragepy | tests/test_files.py | {
"start": 30081,
"end": 30316
} | class ____(CoverageTest):
"""Windows-specific tests of file name handling."""
run_in_temp_dir = False
def test_actual_path(self) -> None:
assert actual_path(r"c:\Windows") == actual_path(r"C:\wINDOWS")
| WindowsFileTest |
python | apache__airflow | task-sdk/src/airflow/sdk/definitions/xcom_arg.py | {
"start": 16948,
"end": 18463
} | class ____(XComArg):
"""
An XCom reference with ``zip()`` applied.
This is constructed from multiple XComArg instances, and presents an
iterable that "zips" them together like the built-in ``zip()`` (and
``itertools.zip_longest()`` if ``fillvalue`` is provided).
"""
args: Sequence[XComArg] = attrs.field(validator=attrs.validators.min_len(1))
fillvalue: Any = attrs.field(default=NOTSET, kw_only=True)
def __repr__(self) -> str:
args_iter = iter(self.args)
first = repr(next(args_iter))
rest = ", ".join(repr(arg) for arg in args_iter)
if is_arg_set(self.fillvalue):
return f"{first}.zip({rest}, fillvalue={self.fillvalue!r})"
return f"{first}.zip({rest})"
def _serialize(self) -> dict[str, Any]:
args = [serialize_xcom_arg(arg) for arg in self.args]
if is_arg_set(self.fillvalue):
return {"args": args, "fillvalue": self.fillvalue}
return {"args": args}
def iter_references(self) -> Iterator[tuple[Operator, str]]:
for arg in self.args:
yield from arg.iter_references()
def resolve(self, context: Mapping[str, Any]) -> Any:
values = [arg.resolve(context) for arg in self.args]
for value in values:
if not isinstance(value, (Sequence, dict)):
raise ValueError(f"XCom zip expects sequence or dict, not {type(value).__name__}")
return _ZipResult(values, fillvalue=self.fillvalue)
@attrs.define
| ZipXComArg |
python | realpython__materials | duck-typing-python/vehicles_abc.py | {
"start": 711,
"end": 913
} | class ____(Vehicle):
def start(self):
print("The truck is starting")
def stop(self):
print("The truck is stopping")
def drive(self):
print("The truck is driving")
| Truck |
python | walkccc__LeetCode | solutions/3194. Minimum Average of Smallest and Largest Elements/3194.py | {
"start": 0,
"end": 177
} | class ____:
def minimumAverage(self, nums: list[int]) -> float:
nums.sort()
return min((nums[i] + nums[~i]) / 2
for i in range(len(nums) // 2 + 1))
| Solution |
python | pytorch__pytorch | torch/onnx/ops/_symbolic_impl.py | {
"start": 889,
"end": 11765
} | class ____:
"""Class to encode attributes from dictionary into lists of FX compatible attributes.
Since FX does not support dictionaries, we need to encode the attributes into
lists. This class provides a way to encode and decode the attributes.
Attributes:
attr_keys: List of attribute keys.
attr_types: List of attribute types. Values can be "i" (int), "f" (float),
"s" (string), "is" (int sequence), "fs" (float sequence), or "ss" (string sequence).
attr_pos: List of tuples representing the start and end positions of each
attribute in the corresponding list.
attr_ints: List of integer attributes.
attr_floats: List of float attributes.
attr_strs: List of string attributes.
"""
attr_keys: list[str]
attr_types: list[str]
attr_pos: list[tuple[int, int]]
attr_ints: list[int]
attr_floats: list[float]
attr_strs: list[str]
@classmethod
def from_dict(
cls,
attrs: dict[
str,
Union[
int,
float,
str,
bool,
Sequence[int],
Sequence[float],
Sequence[str],
Sequence[bool],
],
],
) -> "EncodedAttrs":
encoded = cls(
attr_keys=[],
attr_types=[],
attr_pos=[],
attr_ints=[],
attr_floats=[],
attr_strs=[],
)
for k, v in attrs.items():
encoded.attr_keys.append(k)
if isinstance(v, int):
start_pos = len(encoded.attr_ints)
encoded.attr_ints.append(v)
encoded.attr_pos.append((start_pos, start_pos + 1))
encoded.attr_types.append(_INT_TYPE)
elif isinstance(v, float):
start_pos = len(encoded.attr_floats)
encoded.attr_floats.append(v)
encoded.attr_pos.append((start_pos, start_pos + 1))
encoded.attr_types.append(_FLOAT_TYPE)
elif isinstance(v, str):
start_pos = len(encoded.attr_strs)
encoded.attr_strs.append(v)
encoded.attr_pos.append((start_pos, start_pos + 1))
encoded.attr_types.append(_STRING_TYPE)
elif isinstance(v, Sequence):
if len(v) == 0:
raise ValueError(f"Empty sequence for attribute {k}")
if any(isinstance(elem, float) for elem in v):
start_pos = len(encoded.attr_floats)
encoded.attr_floats.extend([float(elem) for elem in v])
encoded.attr_pos.append((start_pos, start_pos + len(v)))
encoded.attr_types.append(_FLOAT_SEQ_TYPE)
elif isinstance(v[0], int):
start_pos = len(encoded.attr_ints)
encoded.attr_ints.extend([int(elem) for elem in v])
encoded.attr_pos.append((start_pos, start_pos + len(v)))
encoded.attr_types.append(_INT_SEQ_TYPE)
elif isinstance(v[0], str):
start_pos = len(encoded.attr_strs)
encoded.attr_strs.extend([str(elem) for elem in v])
encoded.attr_pos.append((start_pos, start_pos + len(v)))
encoded.attr_types.append(_STRING_SEQ_TYPE)
else:
raise ValueError(f"Unsupported sequence type for attribute {k}")
else:
raise ValueError(f"Unsupported attribute type for {k}: {type(v)}")
assert len(encoded.attr_keys) == len(encoded.attr_types), (
f"Mismatch between number of attribute keys and types: {len(encoded.attr_keys)} != {len(encoded.attr_types)}"
)
assert len(encoded.attr_keys) == len(encoded.attr_pos), (
f"Mismatch between number of attribute keys and positions: {len(encoded.attr_keys)} != {len(encoded.attr_pos)}"
)
return encoded
def to_dict(
self,
) -> dict[
str,
Union[
int,
float,
str,
list[int],
list[float],
list[str],
],
]:
"""Convert the encoded attributes back to a dictionary for creating an ONNX node."""
attrs: dict[
str,
Union[
int,
float,
str,
list[int],
list[float],
list[str],
],
] = {}
for i, key in enumerate(self.attr_keys):
attr_type = self.attr_types[i]
if attr_type == _INT_TYPE:
attrs[key] = self.attr_ints[self.attr_pos[i][0]]
elif attr_type == _FLOAT_TYPE:
attrs[key] = self.attr_floats[self.attr_pos[i][0]]
elif attr_type == _STRING_TYPE:
attrs[key] = self.attr_strs[self.attr_pos[i][0]]
elif attr_type == _FLOAT_SEQ_TYPE:
attrs[key] = self.attr_floats[self.attr_pos[i][0] : self.attr_pos[i][1]]
elif attr_type == _INT_SEQ_TYPE:
attrs[key] = self.attr_ints[self.attr_pos[i][0] : self.attr_pos[i][1]]
elif attr_type == _STRING_SEQ_TYPE:
attrs[key] = self.attr_strs[self.attr_pos[i][0] : self.attr_pos[i][1]]
else:
raise ValueError(f"Unsupported attribute type: {attr_type}")
return attrs
@torch.library.custom_op(
"onnx_symbolic::_symbolic",
mutates_args=(),
schema=(
"(Tensor?[] inputs, str op_type, int onnx_dtype, *,"
" SymInt[] shape, str[] attr_keys, str[] attr_types, int[][] attr_pos,"
" int[] attr_ints, float[] attr_floats, str[] attr_strs, str[] metadata_props_keys,"
" str[] metadata_props_values, str domain='', int? version=None"
") -> Tensor"
),
)
def _symbolic(
inputs: Sequence[Optional[torch.Tensor]],
op_type: str,
onnx_dtype: int,
*,
shape: Sequence[Union[int, torch.SymInt]],
attr_keys: Sequence[str],
attr_types: Sequence[str],
attr_pos: Sequence[tuple[int, int]],
attr_ints: Sequence[int],
attr_floats: Sequence[float],
attr_strs: Sequence[str],
metadata_props_keys: Sequence[str] = (),
metadata_props_values: Sequence[str] = (),
domain: str = "",
version: Optional[int] = None,
) -> torch.Tensor:
torch._check(
onnx_dtype in _dtype_mappings.ONNX_DTYPE_TO_TORCH_DTYPE,
lambda: f"{onnx_dtype} is invalid as an ONNX data type. Valid values are {list(_dtype_mappings.ONNX_DTYPE_TO_TORCH_DTYPE.keys())}",
)
return torch.zeros(
shape, dtype=_dtype_mappings.ONNX_DTYPE_TO_TORCH_DTYPE[onnx_dtype]
)
@_symbolic.register_fake
def _(
inputs: Sequence[torch.Tensor],
op_type: str,
onnx_dtype: int,
*,
shape: Sequence[Union[int, torch.SymInt]],
attr_keys: Sequence[str],
attr_types: Sequence[str],
attr_pos: Sequence[tuple[int, int]],
attr_ints: Sequence[int],
attr_floats: Sequence[float],
attr_strs: Sequence[str],
metadata_props_keys: Sequence[str] = (),
metadata_props_values: Sequence[str] = (),
domain: str = "",
version: Optional[int] = None,
) -> torch.Tensor:
torch._check(
onnx_dtype in _dtype_mappings.ONNX_DTYPE_TO_TORCH_DTYPE,
lambda: f"{onnx_dtype} is invalid as an ONNX data type. Valid values are {list(_dtype_mappings.ONNX_DTYPE_TO_TORCH_DTYPE.keys())}",
)
# NOTE(justinchuby): Use zeros instead of torch.empty because I haven't figured
# out how it can handle empty shapes
return torch.zeros(
shape, dtype=_dtype_mappings.ONNX_DTYPE_TO_TORCH_DTYPE[onnx_dtype]
)
@torch.library.custom_op(
"onnx_symbolic::_symbolic_multi_out",
mutates_args=(),
schema=(
"(Tensor?[] inputs, str op_type, int[] onnx_dtypes, *,"
" SymInt[][] shapes, str[] attr_keys, str[] attr_types, int[][] attr_pos,"
" int[] attr_ints, float[] attr_floats, str[] attr_strs, str[] metadata_props_keys,"
" str[] metadata_props_values, str domain='', int? version=None"
") -> Tensor[]"
),
)
def _symbolic_multi_out(
inputs: Sequence[Optional[torch.Tensor]],
op_type: str,
onnx_dtypes: Sequence[int],
*,
shapes: Sequence[Sequence[Union[int, torch.SymInt]]],
attr_keys: Sequence[str],
attr_types: Sequence[str],
attr_pos: Sequence[tuple[int, int]],
attr_ints: Sequence[int],
attr_floats: Sequence[float],
attr_strs: Sequence[str],
metadata_props_keys: Sequence[str] = (),
metadata_props_values: Sequence[str] = (),
domain: str = "",
version: Optional[int] = None,
) -> list[torch.Tensor]:
outputs = []
torch._check(
len(shapes) == len(onnx_dtypes),
lambda: f"Number of shapes ({len(shapes)}) must match number of ONNX dtypes ({len(onnx_dtypes)})",
)
for shape, onnx_dtype in zip(shapes, onnx_dtypes):
torch._check(
onnx_dtype in _dtype_mappings.ONNX_DTYPE_TO_TORCH_DTYPE,
lambda: f"{onnx_dtype} is invalid as an ONNX data type. Valid values are {list(_dtype_mappings.ONNX_DTYPE_TO_TORCH_DTYPE.keys())}",
)
outputs.append(
torch.zeros(
shape, dtype=_dtype_mappings.ONNX_DTYPE_TO_TORCH_DTYPE[onnx_dtype]
)
)
return outputs
@_symbolic_multi_out.register_fake
def _(
inputs: Sequence[torch.Tensor],
op_type: str,
onnx_dtypes: Sequence[int],
*,
shapes: Sequence[Sequence[Union[int, torch.SymInt]]],
attr_keys: Sequence[str],
attr_types: Sequence[str],
attr_pos: Sequence[tuple[int, int]],
attr_ints: Sequence[int],
attr_floats: Sequence[float],
attr_strs: Sequence[str],
metadata_props_keys: Sequence[str] = (),
metadata_props_values: Sequence[str] = (),
domain: str = "",
version: Optional[int] = None,
) -> list[torch.Tensor]:
outputs = []
torch._check(
len(shapes) == len(onnx_dtypes),
lambda: f"Number of shapes ({len(shapes)}) must match number of ONNX dtypes ({len(onnx_dtypes)})",
)
for shape, onnx_dtype in zip(shapes, onnx_dtypes):
torch._check(
onnx_dtype in _dtype_mappings.ONNX_DTYPE_TO_TORCH_DTYPE,
lambda: f"{onnx_dtype} is invalid as an ONNX data type. Valid values are {list(_dtype_mappings.ONNX_DTYPE_TO_TORCH_DTYPE.keys())}",
)
# NOTE(justinchuby): Use zeros instead of torch.empty because I haven't figured
# out how it can handle empty shapes
outputs.append(
torch.zeros(
shape, dtype=_dtype_mappings.ONNX_DTYPE_TO_TORCH_DTYPE[onnx_dtype]
)
)
return outputs
| EncodedAttrs |
python | keras-team__keras | keras/src/losses/losses.py | {
"start": 12298,
"end": 13899
} | class ____(LossFunctionWrapper):
"""Computes the logarithm of the hyperbolic cosine of the prediction error.
Formula:
```python
error = y_pred - y_true
logcosh = mean(log((exp(error) + exp(-error))/2), axis=-1)`
```
where x is the error `y_pred - y_true`.
Args:
reduction: Type of reduction to apply to the loss. In almost all cases
this should be `"sum_over_batch_size"`. Supported options are
`"sum"`, `"sum_over_batch_size"`, `"mean"`,
`"mean_with_sample_weight"` or `None`. `"sum"` sums the loss,
`"sum_over_batch_size"` and `"mean"` sum the loss and divide by the
sample size, and `"mean_with_sample_weight"` sums the loss and
divides by the sum of the sample weights. `"none"` and `None`
perform no aggregation. Defaults to `"sum_over_batch_size"`.
name: Optional name for the instance.
dtype: The dtype of the loss's computations. Defaults to `None`, which
means using `keras.backend.floatx()`. `keras.backend.floatx()` is a
`"float32"` unless set to different value
(via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
provided, then the `compute_dtype` will be utilized.
"""
def __init__(
self,
reduction="sum_over_batch_size",
name="log_cosh",
dtype=None,
):
super().__init__(log_cosh, name=name, reduction=reduction, dtype=dtype)
def get_config(self):
return Loss.get_config(self)
@keras_export("keras.losses.Hinge")
| LogCosh |
python | huggingface__transformers | src/transformers/models/hubert/modeling_hubert.py | {
"start": 18989,
"end": 19871
} | class ____(nn.Module):
def __init__(self, config):
"""
Implements adapter modules directly with 3D tensor weight as parameters and without using ModuleList to speed
up training throughput.
"""
super().__init__()
self.input_dim = config.adapter_attn_dim
self.hidden_dim = config.hidden_size
self.norm = nn.LayerNorm(self.hidden_dim)
self.linear_1 = nn.Linear(self.hidden_dim, self.input_dim)
self.act_fn = nn.ReLU()
self.linear_2 = nn.Linear(self.input_dim, self.hidden_dim)
def forward(self, hidden_states: torch.FloatTensor):
hidden_states = self.norm(hidden_states)
hidden_states = self.linear_1(hidden_states)
hidden_states = self.act_fn(hidden_states)
hidden_states = self.linear_2(hidden_states)
return hidden_states
| HubertAttnAdapterLayer |
python | scipy__scipy | scipy/stats/_multivariate.py | {
"start": 229156,
"end": 230333
} | class ____(multi_rv_frozen):
def __init__(self, alpha, n, seed=None):
alpha, Sa, n = _dirichlet_multinomial_check_parameters(alpha, n)
self.alpha = alpha
self.n = n
self._dist = dirichlet_multinomial_gen(seed)
def logpmf(self, x):
return self._dist.logpmf(x, self.alpha, self.n)
def pmf(self, x):
return self._dist.pmf(x, self.alpha, self.n)
def mean(self):
return self._dist.mean(self.alpha, self.n)
def var(self):
return self._dist.var(self.alpha, self.n)
def cov(self):
return self._dist.cov(self.alpha, self.n)
# Set frozen generator docstrings from corresponding docstrings in
# dirichlet_multinomial and fill in default strings in class docstrings.
for name in ['logpmf', 'pmf', 'mean', 'var', 'cov']:
method = dirichlet_multinomial_gen.__dict__[name]
method_frozen = dirichlet_multinomial_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, dirichlet_mn_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__,
dirichlet_mn_docdict_params)
| dirichlet_multinomial_frozen |
python | huggingface__transformers | tests/models/mobilenet_v1/test_modeling_mobilenet_v1.py | {
"start": 1367,
"end": 1655
} | class ____(ConfigTester):
def create_and_test_config_common_properties(self):
config = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(config, "tf_padding"))
self.parent.assertTrue(hasattr(config, "depth_multiplier"))
| MobileNetV1ConfigTester |
python | getsentry__sentry | tests/sentry/core/endpoints/test_organization_member_invite_details.py | {
"start": 1063,
"end": 1332
} | class ____(APITestCase):
endpoint = "sentry-api-0-organization-member-invite-details"
def setUp(self) -> None:
super().setUp()
self.login_as(self.user)
@with_feature("organizations:new-organization-member-invite")
| OrganizationMemberInviteTestBase |
python | redis__redis-py | tests/test_asyncio/test_pubsub.py | {
"start": 22502,
"end": 22777
} | class ____:
async def test_channel_subscribe(self, r: redis.Redis):
r = redis.Redis(host="localhost", port=6390)
p = r.pubsub()
with pytest.raises(ConnectionError):
await p.subscribe("foo")
@pytest.mark.onlynoncluster
| TestPubSubRedisDown |
python | walkccc__LeetCode | solutions/1622. Fancy Sequence/1622.py | {
"start": 0,
"end": 1162
} | class ____:
def __init__(self):
self.MOD = 1_000_000_007
# For each `val` in `vals`, it actually represents a * val + b.
self.vals = []
self.a = 1
self.b = 0
# To undo a * val + b and get the original value, we append (val - b) // a.
# By Fermat's little theorem:
# a^(p - 1) ≡ 1 (mod p)
# a^(p - 2) ≡ a^(-1) (mod p)
# So, (val - b) / a ≡ (val - b) * a^(p - 2) (mod p)
def append(self, val: int) -> None:
x = (val - self.b + self.MOD) % self.MOD
self.vals.append(x * pow(self.a, self.MOD - 2, self.MOD))
# If the value is a * val + b, then the value after adding by `inc` will be
# a * val + b + inc. So, we adjust b to b + inc.
def addAll(self, inc: int) -> None:
self.b = (self.b + inc) % self.MOD
# If the value is a * val + b, then the value after multiplying by `m` will
# be a * m * val + b * m. So, we adjust a to a * m and b to b * m.
def multAll(self, m: int) -> None:
self.a = (self.a * m) % self.MOD
self.b = (self.b * m) % self.MOD
def getIndex(self, idx: int) -> int:
return (-1 if idx >= len(self.vals)
else (self.a * self.vals[idx] + self.b) % self.MOD)
| Fancy |
python | jina-ai__jina | jina/proto/serializer.py | {
"start": 6931,
"end": 7917
} | class ____:
"""Placeholder that delegates the serialization and deserialization to the internal protobuf"""
@staticmethod
def SerializeToString(x: 'SingleDocumentRequest'):
"""
# noqa: DAR101
# noqa: DAR102
# noqa: DAR201
"""
if not x.is_decompressed:
r = x.buffer
else:
r = x.proto.SerializePartialToString()
os.environ['JINA_GRPC_SEND_BYTES'] = str(
len(r) + int(os.environ.get('JINA_GRPC_SEND_BYTES', 0))
)
return r
@staticmethod
def FromString(x: bytes):
"""
# noqa: DAR101
# noqa: DAR102
# noqa: DAR201
"""
import os
if x:
os.environ['JINA_GRPC_RECV_BYTES'] = str(
len(x) + int(os.environ.get('JINA_GRPC_RECV_BYTES', 0))
)
return SingleDocumentRequest(x)
else:
return SingleDocumentRequest()
| SingleDocumentRequestProto |
python | huggingface__transformers | src/transformers/models/segformer/image_processing_segformer_fast.py | {
"start": 1781,
"end": 9225
} | class ____(BaseImageProcessorFast):
resample = PILImageResampling.BILINEAR
image_mean = IMAGENET_DEFAULT_MEAN
image_std = IMAGENET_DEFAULT_STD
size = {"height": 512, "width": 512}
default_to_square = True
crop_size = None
do_resize = True
do_center_crop = None
do_rescale = True
do_normalize = True
do_reduce_labels = False
valid_kwargs = SegformerImageProcessorKwargs
rescale_factor = 1 / 255
def __init__(self, **kwargs: Unpack[SegformerImageProcessorKwargs]):
super().__init__(**kwargs)
def reduce_label(self, labels: list["torch.Tensor"]):
for idx in range(len(labels)):
label = labels[idx]
label = torch.where(label == 0, torch.tensor(255, dtype=label.dtype), label)
label = label - 1
label = torch.where(label == 254, torch.tensor(255, dtype=label.dtype), label)
labels[idx] = label
return label
@auto_docstring
def preprocess(
self,
images: ImageInput,
segmentation_maps: Optional[ImageInput] = None,
**kwargs: Unpack[SegformerImageProcessorKwargs],
) -> BatchFeature:
r"""
segmentation_maps (`ImageInput`, *optional*):
The segmentation maps to preprocess.
"""
return super().preprocess(images, segmentation_maps, **kwargs)
def _preprocess_image_like_inputs(
self,
images: ImageInput,
segmentation_maps: Optional[ImageInput],
do_convert_rgb: bool,
input_data_format: ChannelDimension,
device: Optional[Union[str, "torch.device"]] = None,
**kwargs: Unpack[SegformerImageProcessorKwargs],
) -> BatchFeature:
"""
Preprocess image-like inputs.
"""
images = self._prepare_image_like_inputs(
images=images, do_convert_rgb=do_convert_rgb, input_data_format=input_data_format, device=device
)
images_kwargs = kwargs.copy()
images_kwargs["do_reduce_labels"] = False
batch_feature = self._preprocess(images, **images_kwargs)
if segmentation_maps is not None:
processed_segmentation_maps = self._prepare_image_like_inputs(
images=segmentation_maps,
expected_ndims=2,
do_convert_rgb=False,
input_data_format=ChannelDimension.FIRST,
)
segmentation_maps_kwargs = kwargs.copy()
segmentation_maps_kwargs.update(
{
"do_normalize": False,
"do_rescale": False,
# Nearest interpolation is used for segmentation maps instead of BILINEAR.
"interpolation": F.InterpolationMode.NEAREST_EXACT,
}
)
processed_segmentation_maps = self._preprocess(
images=processed_segmentation_maps, **segmentation_maps_kwargs
).pixel_values
batch_feature["labels"] = processed_segmentation_maps.squeeze(1).to(torch.int64)
return batch_feature
def _preprocess(
self,
images: list["torch.Tensor"],
do_reduce_labels: bool,
interpolation: Optional["F.InterpolationMode"],
do_resize: bool,
do_rescale: bool,
do_normalize: bool,
size: SizeDict,
rescale_factor: float,
image_mean: Union[float, list[float]],
image_std: Union[float, list[float]],
disable_grouping: bool,
return_tensors: Optional[Union[str, TensorType]],
**kwargs,
) -> BatchFeature: # Return type can be list if return_tensors=None
if do_reduce_labels:
images = self.reduce_label(images) # Apply reduction if needed
# Group images by size for batched resizing
resized_images = images
if do_resize:
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
resized_images_grouped = {}
for shape, stacked_images in grouped_images.items():
resized_stacked_images = self.resize(image=stacked_images, size=size, interpolation=interpolation)
resized_images_grouped[shape] = resized_stacked_images
resized_images = reorder_images(resized_images_grouped, grouped_images_index)
# Group images by size for further processing (rescale/normalize)
# Needed in case do_resize is False, or resize returns images with different sizes
grouped_images, grouped_images_index = group_images_by_shape(resized_images, disable_grouping=disable_grouping)
processed_images_grouped = {}
for shape, stacked_images in grouped_images.items():
# Fused rescale and normalize
stacked_images = self.rescale_and_normalize(
stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
processed_images_grouped[shape] = stacked_images
processed_images = reorder_images(processed_images_grouped, grouped_images_index)
# Stack images into a single tensor if return_tensors is set
processed_images = torch.stack(processed_images, dim=0) if return_tensors else processed_images
return BatchFeature(data={"pixel_values": processed_images}, tensor_type=return_tensors)
def post_process_semantic_segmentation(self, outputs, target_sizes: Optional[list[tuple]] = None):
"""
Converts the output of [`SegformerForSemanticSegmentation`] into semantic segmentation maps.
Args:
outputs ([`SegformerForSemanticSegmentation`]):
Raw outputs of the model.
target_sizes (`list[Tuple]` of length `batch_size`, *optional*):
List of tuples corresponding to the requested final size (height, width) of each prediction. If unset,
predictions will not be resized.
Returns:
semantic_segmentation: `list[torch.Tensor]` of length `batch_size`, where each item is a semantic
segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is
specified). Each entry of each `torch.Tensor` correspond to a semantic class id.
"""
logits = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(logits) != len(target_sizes):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits"
)
if is_torch_tensor(target_sizes):
target_sizes = target_sizes.numpy()
semantic_segmentation = []
for idx in range(len(logits)):
resized_logits = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False
)
semantic_map = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(semantic_map)
else:
semantic_segmentation = logits.argmax(dim=1)
semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
__all__ = ["SegformerImageProcessorFast"]
| SegformerImageProcessorFast |
python | python-pillow__Pillow | src/PIL/ImageFilter.py | {
"start": 894,
"end": 1201
} | class ____(MultibandFilter):
filterargs: tuple[Any, ...]
def filter(self, image: _imaging.ImagingCore) -> _imaging.ImagingCore:
if image.mode == "P":
msg = "cannot filter palette images"
raise ValueError(msg)
return image.filter(*self.filterargs)
| BuiltinFilter |
python | pytorch__pytorch | torch/autograd/profiler_legacy.py | {
"start": 684,
"end": 12210
} | class ____:
"""DEPRECATED: use torch.profiler instead."""
def __init__(
self,
enabled=True,
*,
use_cuda=False,
record_shapes=False,
with_flops=False,
profile_memory=False,
with_stack=False,
with_modules=False,
):
self.enabled: bool = enabled
if not self.enabled:
return
self.use_cuda = use_cuda
self.function_events = None
self.entered = False
self.record_shapes = record_shapes
self.with_flops = with_flops
self.record_shapes |= self.with_flops
self.profile_memory = profile_memory
self.with_stack = with_stack
self.with_modules = with_modules
if self.use_cuda and not torch.cuda.is_available():
warnings.warn(
"CUDA is not available, disabling CUDA profiling",
stacklevel=2,
)
self.use_cuda = False
if self.use_cuda:
self.profiler_kind = ProfilerState.CUDA
else:
self.profiler_kind = ProfilerState.CPU
def config(self):
return ProfilerConfig(
self.profiler_kind,
self.record_shapes,
self.profile_memory,
self.with_stack,
self.with_flops,
self.with_modules,
# avoid exposing _ExperimentalConfig this in legacy public API
torch._C._profiler._ExperimentalConfig(),
)
def __enter__(self):
if not self.enabled:
return
if self.entered:
raise RuntimeError("Profiler context manager is not reentrant")
self.entered = True
self._start_trace()
return self
def _start_trace(self):
_enable_profiler_legacy(self.config())
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.enabled:
return
if self.use_cuda:
torch.cuda.synchronize()
records = _disable_profiler_legacy()
parsed_results = _parse_legacy_records(records)
# pyrefly: ignore [bad-assignment]
self.function_events = EventList(
parsed_results,
use_device="cuda" if self.use_cuda else None,
profile_memory=self.profile_memory,
with_flops=self.with_flops,
)
# pyrefly: ignore [missing-attribute]
self.function_events._build_tree()
return False
def __repr__(self):
if self.function_events is None:
return "<unfinished profiler_legacy.profile>"
return repr(self.function_events)
def __str__(self):
if self.function_events is None:
return "<unfinished profile.profiler_legacy.profile>"
return str(self.function_events)
def _check_finish(self):
if self.function_events is None:
raise RuntimeError("Profiler didn't finish running")
def table(
self,
sort_by=None,
row_limit=100,
max_src_column_width=75,
max_name_column_width=55,
max_shapes_column_width=80,
header=None,
top_level_events_only=False,
):
self._check_finish()
if self.function_events is None:
raise AssertionError("Expected profiling results")
return self.function_events.table(
sort_by=sort_by,
row_limit=row_limit,
max_src_column_width=max_src_column_width,
max_name_column_width=max_name_column_width,
max_shapes_column_width=max_shapes_column_width,
header=header,
top_level_events_only=top_level_events_only,
)
table.__doc__ = EventList.table.__doc__
def export_chrome_trace(self, path):
self._check_finish()
if self.function_events is None:
raise AssertionError("Expected profiling results")
return self.function_events.export_chrome_trace(path)
export_chrome_trace.__doc__ = EventList.export_chrome_trace.__doc__
def export_stacks(self, path: str, metric: str = "self_cpu_time_total"):
self._check_finish()
if self.function_events is None:
raise AssertionError("Expected profiling results")
if not self.with_stack:
raise AssertionError("export_stacks() requires with_stack=True")
return self.function_events.export_stacks(path, metric)
def key_averages(self, group_by_input_shape=False, group_by_stack_n=0):
self._check_finish()
if self.function_events is None:
raise AssertionError("Expected profiling results")
return self.function_events.key_averages(group_by_input_shape, group_by_stack_n)
key_averages.__doc__ = EventList.key_averages.__doc__
def total_average(self):
self._check_finish()
if self.function_events is None:
raise AssertionError("Expected profiling results")
return self.function_events.total_average()
total_average.__doc__ = EventList.total_average.__doc__
@property
def self_cpu_time_total(self):
"""Return CPU time as the sum of self times across all events."""
self._check_finish()
if self.function_events is None:
raise AssertionError("Expected profiling results")
return self.function_events.self_cpu_time_total
def _parse_legacy_records(thread_records):
def _get_record_key(record):
"""Return a tuple for correlating start and end records in `_parse_legacy_records`."""
return (record.handle(), record.node_id())
start_record = None
functions = []
# '__start_profile' is not guaranteed to be first, so we must find it here
for record in itertools.chain.from_iterable(thread_records):
name = record.name()
if start_record is None and name == "__start_profile":
start_record = record
if start_record is None or start_record.is_remote():
raise AssertionError("Expected a valid local start_record")
for thread_record_list in thread_records:
# accumulated memory allocations per handle
cpu_memory_allocs = {}
cuda_memory_allocs = {}
# ranges per handle
range_starts = {}
filtered_handles = set()
prev_record = None
for record in thread_record_list:
record_key = _get_record_key(record)
if _filter_name(record.name()) or record_key in filtered_handles:
filtered_handles.add(record_key)
continue
if record.kind() == "push":
# workaround to reduce double logging from operator
# wrappers and redispatch
if prev_record is not None:
duplicate = (
prev_record.name() == record.name()
and prev_record.kind() == record.kind()
and prev_record.node_id() == record.node_id()
)
if duplicate:
filtered_handles.add(record_key)
continue
range_starts[record_key] = record
cpu_memory_allocs[record_key] = 0
cuda_memory_allocs[record_key] = 0
elif record.kind() == "pop":
if record_key not in range_starts:
raise AssertionError(
f"Expected record with key {record_key} to exist in range_starts. "
"This means that the pop event did not have a corresponding push."
)
start = range_starts[record_key]
cpu_memory_usage = cpu_memory_allocs[record_key]
cuda_memory_usage = cuda_memory_allocs[record_key]
is_async = start.is_async() or (start.thread_id() != record.thread_id())
is_remote_event = record.is_remote()
start_flops = start.flops()
fe = FunctionEvent(
id=record.handle(),
node_id=record.node_id(),
name=_rewrite_name(name=start.name(), with_wildcard=True),
trace_name=_rewrite_name(name=start.name(), with_wildcard=False),
thread=start.thread_id(),
start_us=start_record.cpu_elapsed_us(start),
end_us=start_record.cpu_elapsed_us(record),
fwd_thread=start.fwd_thread_id(),
input_shapes=start.shapes(),
stack=[
entry for entry in start.stack() if _filter_stack_entry(entry)
],
scope=start.scope(),
use_device="cuda" if start.has_cuda() else None,
cpu_memory_usage=cpu_memory_usage,
device_memory_usage=cuda_memory_usage,
is_async=is_async,
is_remote=is_remote_event,
sequence_nr=start.sequence_nr(),
device_type=DeviceType.CPU,
is_legacy=True,
flops=start_flops,
)
# note: async events have only cpu total time
if not is_async and start.has_cuda():
duration = start.cuda_elapsed_us(record)
if duration > 0:
fe.append_kernel(start.name(), start.device(), duration)
functions.append(fe)
del range_starts[record_key]
del cpu_memory_allocs[record_key]
del cuda_memory_allocs[record_key]
elif record.kind() == "memory_alloc":
num_open_handles_cpu = len(cpu_memory_allocs)
num_open_handles_cuda = len(cuda_memory_allocs)
if num_open_handles_cpu != num_open_handles_cuda:
raise AssertionError(
f"Expected CPU and CUDA memory allocation handles to match, "
f"but got {num_open_handles_cpu} CPU and {num_open_handles_cuda} CUDA"
)
for handle in cpu_memory_allocs:
cpu_memory_allocs[handle] += record.cpu_memory_usage()
for handle in cuda_memory_allocs:
cuda_memory_allocs[handle] += record.cuda_memory_usage()
if num_open_handles_cpu == 0:
# output event as a top-level memory event
fe = FunctionEvent(
id=0,
name=MEMORY_EVENT_NAME,
trace_name=None,
thread=0,
start_us=0,
end_us=0,
stack=[],
cpu_memory_usage=record.cpu_memory_usage(),
device_memory_usage=record.cuda_memory_usage(),
is_legacy=True,
)
functions.append(fe)
prev_record = record
# Sort functions by start time then by end time ascending.
# This ensures that--in the case of nested events which
# have the same start time (which may happen due to the
# granularity of the given clock tick)--we always show
# the outermost nested call first. This adds stability
# in how FunctionEvents appear
functions.sort(key=lambda evt: [evt.time_range.start, -evt.time_range.end])
return functions
| profile |
python | getsentry__sentry | tests/sentry/sentry_metrics/test_base_indexer.py | {
"start": 7419,
"end": 15045
} | class ____(TestCase):
def test_basic(self) -> None:
use_case_key_results = UseCaseKeyResults()
assert use_case_key_results.results == {}
assert use_case_key_results.get_mapped_results() == {}
assert use_case_key_results.get_mapped_strings_to_ints() == {}
use_case_collection = UseCaseKeyCollection(
{
UseCaseID.SPANS: {1: {"a", "b", "c"}, 2: {"e", "f"}},
UseCaseID.TRANSACTIONS: {1: {"a", "j"}},
UseCaseID.SESSIONS: {5: {"a", "c"}},
}
)
assert (
use_case_key_results.get_unmapped_use_case_keys(use_case_collection)
== use_case_collection
)
results_with_meta = [
(
[
UseCaseKeyResult(use_case_id=UseCaseID.SPANS, org_id=1, string="a", id=1),
],
None,
),
(
[
UseCaseKeyResult(use_case_id=UseCaseID.SPANS, org_id=1, string="c", id=2),
UseCaseKeyResult(
use_case_id=UseCaseID.TRANSACTIONS, org_id=1, string="a", id=3
),
UseCaseKeyResult(
use_case_id=UseCaseID.TRANSACTIONS, org_id=1, string="j", id=4
),
],
FetchType.CACHE_HIT,
),
(
[
UseCaseKeyResult(
use_case_id=UseCaseID.ESCALATING_ISSUES, org_id=2, string="j", id=5
),
],
FetchType.FIRST_SEEN,
),
]
for results, meta in results_with_meta:
use_case_key_results.add_use_case_key_results(results, meta)
assert use_case_key_results.get_mapped_results() == {
UseCaseID.SPANS: {1: {"a": 1, "c": 2}},
UseCaseID.TRANSACTIONS: {1: {"a": 3, "j": 4}},
UseCaseID.ESCALATING_ISSUES: {2: {"j": 5}},
}
assert use_case_key_results.get_fetch_metadata() == {
UseCaseID.SPANS: defaultdict(
dict, {1: {"c": Metadata(id=2, fetch_type=FetchType.CACHE_HIT)}}
),
UseCaseID.TRANSACTIONS: defaultdict(
dict,
{
1: {
"a": Metadata(id=3, fetch_type=FetchType.CACHE_HIT),
"j": Metadata(id=4, fetch_type=FetchType.CACHE_HIT),
}
},
),
UseCaseID.ESCALATING_ISSUES: defaultdict(
dict, {2: {"j": Metadata(id=5, fetch_type=FetchType.FIRST_SEEN)}}
),
}
assert use_case_key_results.get_unmapped_use_case_keys(
use_case_collection
) == UseCaseKeyCollection(
{
UseCaseID.SPANS: {1: {"b"}, 2: {"e", "f"}},
UseCaseID.SESSIONS: {5: {"a", "c"}},
}
)
assert use_case_key_results.get_mapped_strings_to_ints() == {
"spans:1:a": 1,
"spans:1:c": 2,
"transactions:1:a": 3,
"transactions:1:j": 4,
"escalating_issues:2:j": 5,
}
def test_merge(self) -> None:
use_case_key_results_1 = UseCaseKeyResults()
use_case_key_results_2 = UseCaseKeyResults()
assert (
use_case_key_results_1.merge(UseCaseKeyResults()).merge(UseCaseKeyResults())
== UseCaseKeyResults()
)
results_with_meta_1 = [
(
[
UseCaseKeyResult(use_case_id=UseCaseID.SPANS, org_id=1, string="a", id=1),
],
None,
),
(
[
UseCaseKeyResult(use_case_id=UseCaseID.SPANS, org_id=1, string="c", id=2),
UseCaseKeyResult(
use_case_id=UseCaseID.TRANSACTIONS, org_id=1, string="a", id=3
),
UseCaseKeyResult(use_case_id=UseCaseID.SESSIONS, org_id=1, string="e", id=4),
],
FetchType.CACHE_HIT,
),
(
[
UseCaseKeyResult(use_case_id=UseCaseID.SESSIONS, org_id=2, string="e", id=5),
],
FetchType.FIRST_SEEN,
),
]
results_with_meta_2 = [
(
[
UseCaseKeyResult(use_case_id=UseCaseID.SPANS, org_id=1, string="a", id=1),
],
None,
),
(
[
UseCaseKeyResult(use_case_id=UseCaseID.SPANS, org_id=1, string="c", id=2),
UseCaseKeyResult(use_case_id=UseCaseID.SPANS, org_id=1, string="d", id=3),
UseCaseKeyResult(
use_case_id=UseCaseID.TRANSACTIONS, org_id=2, string="a", id=4
),
UseCaseKeyResult(
use_case_id=UseCaseID.ESCALATING_ISSUES, org_id=1, string="e", id=5
),
],
FetchType.CACHE_HIT,
),
(
[
UseCaseKeyResult(use_case_id=UseCaseID.SESSIONS, org_id=2, string="e", id=5),
],
FetchType.FIRST_SEEN,
),
]
for results, meta in results_with_meta_1:
use_case_key_results_1.add_use_case_key_results(results, meta)
assert (
use_case_key_results_1.merge(UseCaseKeyResults()).merge(UseCaseKeyResults())
== use_case_key_results_1
)
assert (
UseCaseKeyResults().merge(UseCaseKeyResults()).merge(use_case_key_results_1)
== use_case_key_results_1
)
for results, meta in results_with_meta_2:
use_case_key_results_2.add_use_case_key_results(results, meta)
assert use_case_key_results_1.merge(use_case_key_results_2) == use_case_key_results_2.merge(
use_case_key_results_1
)
assert use_case_key_results_1.merge(use_case_key_results_2).get_mapped_results() == {
UseCaseID.SPANS: {1: {"a": 1, "c": 2, "d": 3}},
UseCaseID.TRANSACTIONS: {1: {"a": 3}, 2: {"a": 4}},
UseCaseID.SESSIONS: {1: {"e": 4}, 2: {"e": 5}},
UseCaseID.ESCALATING_ISSUES: {1: {"e": 5}},
}
assert use_case_key_results_1.merge(use_case_key_results_2).get_fetch_metadata() == {
UseCaseID.SPANS: defaultdict(
dict,
{
1: {
"c": Metadata(id=2, fetch_type=FetchType.CACHE_HIT),
"d": Metadata(id=3, fetch_type=FetchType.CACHE_HIT),
}
},
),
UseCaseID.TRANSACTIONS: defaultdict(
dict,
{
1: {"a": Metadata(id=3, fetch_type=FetchType.CACHE_HIT)},
2: {"a": Metadata(id=4, fetch_type=FetchType.CACHE_HIT)},
},
),
UseCaseID.SESSIONS: defaultdict(
dict,
{
1: {"e": Metadata(id=4, fetch_type=FetchType.CACHE_HIT)},
2: {"e": Metadata(id=5, fetch_type=FetchType.FIRST_SEEN)},
},
),
UseCaseID.ESCALATING_ISSUES: defaultdict(
dict, {1: {"e": Metadata(id=5, fetch_type=FetchType.CACHE_HIT)}}
),
}
| UseCaseResultsTest |
python | getsentry__sentry | src/sentry/api/helpers/group_index/validators/inbox_details.py | {
"start": 67,
"end": 172
} | class ____(serializers.Serializer[Never]):
# Support undo / snooze reasons
pass
| InboxDetailsValidator |
python | pytorch__pytorch | torch/distributed/checkpoint/_experimental/barriers.py | {
"start": 5214,
"end": 9137
} | class ____(Barrier):
"""
A barrier implementation using PyTorch's TCPStore for synchronization.
This barrier uses a TCP-based distributed key-value store to coordinate
synchronization across multiple processes. It uses a single TCP store
for all barrier operations, with different prefixes to distinguish between
different barrier types.
"""
barrier_type = "tcp_store"
def __init__(
self,
global_rank: int,
global_world_size: int,
barrier_prefix: str,
timeout_barrier_init_secs: int,
use_checkpoint_barrier_tcpstore_libuv: bool,
tcpstore_port: int,
master_address: str,
timeout_secs: int,
):
"""
Initialize a TCPStoreBarrier.
Args:
global_rank: The rank of the current process in the distributed environment.
global_world_size: The total number of processes in the distributed environment.
barrier_prefix: A string prefix to identify this specific barrier.
timeout_barrier_init_secs: Timeout in seconds for initializing the TCPStore.
use_checkpoint_barrier_tcpstore_libuv: Whether to use libuv for the TCPStore.
tcpstore_port: Port number for the TCPStore.
master_address: Address of the master node for the TCPStore.
timeout_secs: Maximum time in seconds to wait for all ranks to reach the barrier.
"""
logger.info(
"Initializing TCPStore master_address=%s tcpstore_port=%s rank=%s "
"world_size=%s barrier_prefix=%s timeout_barrier_init_secs=%s "
"use_checkpoint_barrier_tcpstore_libuv=%s timeout_secs=%s",
master_address,
tcpstore_port,
global_rank,
global_world_size,
barrier_prefix,
timeout_barrier_init_secs,
use_checkpoint_barrier_tcpstore_libuv,
timeout_secs,
)
# Counter collection to track barrier seq on a per barrier prefix basis.
self._tcp_store_barrier_seq: Counter = Counter()
self._barrier_prefix = barrier_prefix
# Store rank and world size for barrier operations
self._global_rank = global_rank
self._global_world_size = global_world_size
self._timeout_secs = timeout_secs
# Create a single TCP store for all barrier operations
self._tcp_store = dist.TCPStore(
master_address,
int(tcpstore_port),
world_size=self._global_world_size,
timeout=timedelta(seconds=timeout_barrier_init_secs),
is_master=(self._global_rank == 0),
)
def execute_barrier(self) -> None:
"""
Execute a synchronization barrier using the prefix provided during initialization.
The implementation uses a sequence number that is incremented every time
a barrier is reached. The sequence number is per barrier prefix to allow
different barriers to operate concurrently.
"""
barrier_prefix = self._barrier_prefix
logger.info(
"Executing barrier barrier_prefix=%s timeout_secs=%s",
barrier_prefix,
self._timeout_secs,
)
def _rank_key(rank: int) -> str:
return f"rank{rank}"
# Track which barrier sequence this rank is joining.
self._tcp_store.set(
_rank_key(self._global_rank),
str(self._tcp_store_barrier_seq[barrier_prefix]),
)
# Execute barrier for that sequence number (for the specific prefix).
store_util.barrier(
store=self._tcp_store,
world_size=self._global_world_size,
key_prefix=(
barrier_prefix + str(self._tcp_store_barrier_seq[barrier_prefix])
),
)
self._tcp_store_barrier_seq[barrier_prefix] += 1
| TCPStoreBarrier |
python | huggingface__transformers | src/transformers/models/edgetam_video/modular_edgetam_video.py | {
"start": 31171,
"end": 31239
} | class ____(Sam2VideoMemoryEncoder):
pass
| EdgeTamVideoMemoryEncoder |
python | getsentry__sentry | src/sentry/auth/access.py | {
"start": 25484,
"end": 26084
} | class ____(OrganizationGlobalAccess):
"""Access to all an organization's teams and projects with simulated membership."""
@property
def team_ids_with_membership(self) -> frozenset[int]:
return self.accessible_team_ids
@property
def project_ids_with_team_membership(self) -> frozenset[int]:
return self.accessible_project_ids
def has_team_membership(self, team: Team) -> bool:
return self.has_team_access(team)
def has_project_membership(self, project: Project) -> bool:
return self.has_project_access(project)
| OrganizationGlobalMembership |
python | keon__algorithms | algorithms/maths/polynomial.py | {
"start": 10324,
"end": 22052
} | class ____:
"""
A simple implementation
of a polynomial class that
records the details about two polynomials
that are potentially comprised of multiple
variables.
"""
def __init__(self, monomials: Iterable[Union[int, float, Fraction, Monomial]]) -> None:
'''
Create a polynomial in the given variables:
Examples:
Polynomial([
Monomial({1:1}, 2),
Monomial({2:3, 1:-1}, -1),
math.pi,
Fraction(-1, 2)
]) = (a_1)^2 + (-1)(a_2)^3(a_1)^(-1) + 2.6415926536
Polynomial([]) = 0
'''
self.monomials = set()
for m in monomials:
if any(map(lambda x: isinstance(m, x), [int, float, Fraction])):
self.monomials |= {Monomial({}, m)}
elif isinstance(m, Monomial):
self.monomials |= {m}
else:
raise ValueError('Iterable should have monomials, int, float, or Fraction.')
self.monomials -= {Monomial({}, 0)}
@staticmethod
def _rationalize_if_possible(num):
'''
A helper for converting numbers
to Fraction only when possible.
'''
if isinstance(num, Rational):
res = Fraction(num, 1)
return Fraction(res.numerator, res.denominator)
else:
return num
# def __add__(self, other: Union[int, float, Fraction, Monomial, Polynomial]) -> Polynomial:
def __add__(self, other: Union[int, float, Fraction, Monomial]):
"""
Add a given poylnomial to a copy of self.
"""
if isinstance(other, int) or isinstance(other, float) or isinstance(other, Fraction):
return self.__add__(Monomial({}, Polynomial._rationalize_if_possible(other)))
elif isinstance(other, Monomial):
monos = {m.clone() for m in self.monomials}
for _own_monos in monos:
if _own_monos.equal_upto_scalar(other):
scalar = _own_monos.coeff
monos -= {_own_monos}
temp_variables = {i: other.variables[i] for i in other.variables}
monos |= {Monomial(temp_variables, Polynomial._rationalize_if_possible(scalar + other.coeff))}
return Polynomial([z for z in monos])
monos |= {other.clone()}
return Polynomial([z for z in monos])
elif isinstance(other, Polynomial):
temp = list(z for z in {m.clone() for m in self.all_monomials()})
p = Polynomial(temp)
for o in other.all_monomials():
p = p.__add__(o.clone())
return p
else:
raise ValueError('Can only add int, float, Fraction, Monomials, or Polynomials to Polynomials.')
# def __sub__(self, other: Union[int, float, Fraction, Monomial, Polynomial]) -> Polynomial:
def __sub__(self, other: Union[int, float, Fraction, Monomial]):
"""
Subtract the given polynomial
from a copy of self.
"""
if isinstance(other, int) or isinstance(other, float) or isinstance(other, Fraction):
return self.__sub__(Monomial({}, Polynomial._rationalize_if_possible(other)))
elif isinstance(other, Monomial):
monos = {m.clone() for m in self.all_monomials()}
for _own_monos in monos:
if _own_monos.equal_upto_scalar(other):
scalar = _own_monos.coeff
monos -= {_own_monos}
temp_variables = {i: other.variables[i] for i in other.variables}
monos |= {Monomial(temp_variables, Polynomial._rationalize_if_possible(scalar - other.coeff))}
return Polynomial([z for z in monos])
to_insert = other.clone()
to_insert.coeff *= -1
monos |= {to_insert}
return Polynomial([z for z in monos])
elif isinstance(other, Polynomial):
p = Polynomial(list(z for z in {m.clone() for m in self.all_monomials()}))
for o in other.all_monomials():
p = p.__sub__(o.clone())
return p
else:
raise ValueError('Can only subtract int, float, Fraction, Monomials, or Polynomials from Polynomials.')
return
# def __mul__(self, other: Union[int, float, Fraction, Monomial, Polynomial]) -> Polynomial:
def __mul__(self, other: Union[int, float, Fraction, Monomial]):
"""
Multiply a given polynomial
to a copy of self.
"""
if isinstance(other, int) or isinstance(other, float) or isinstance(other, Fraction):
result = Polynomial([])
monos = {m.clone() for m in self.all_monomials()}
for m in monos:
result = result.__add__(m.clone()*other)
return result
elif isinstance(other, Monomial):
result = Polynomial([])
monos = {m.clone() for m in self.all_monomials()}
for m in monos:
result = result.__add__(m.clone() * other)
return result
elif isinstance(other, Polynomial):
temp_self = {m.clone() for m in self.all_monomials()}
temp_other = {m.clone() for m in other.all_monomials()}
result = Polynomial([])
for i in temp_self:
for j in temp_other:
result = result.__add__(i * j)
return result
else:
raise ValueError('Can only multiple int, float, Fraction, Monomials, or Polynomials with Polynomials.')
# def __floordiv__(self, other: Union[int, float, Fraction, Monomial, Polynomial]) -> Polynomial:
def __floordiv__(self, other: Union[int, float, Fraction, Monomial]):
"""
For Polynomials, floordiv is the same
as truediv.
"""
return self.__truediv__(other)
# def __truediv__(self, other: Union[int, float, Fraction, Monomial, Polynomial]) -> Polynomial:
def __truediv__(self, other: Union[int, float, Fraction, Monomial]):
"""
For Polynomial division, no remainder is provided. Must use poly_long_division() to capture remainder
"""
if isinstance(other, int) or isinstance(other, float) or isinstance(other, Fraction):
return self.__truediv__( Monomial({}, other) )
elif isinstance(other, Monomial):
poly_temp = reduce(lambda acc, val: acc + val, map(lambda x: x / other, [z for z in self.all_monomials()]), Polynomial([Monomial({}, 0)]))
return poly_temp
elif isinstance(other, Polynomial):
# Call long division
quotient, remainder = self.poly_long_division(other)
return quotient # Return just the quotient, remainder is ignored here
raise ValueError('Can only divide a polynomial by an int, float, Fraction, Monomial, or Polynomial.')
return
# def clone(self) -> Polynomial:
def clone(self):
"""
Clone the polynomial.
"""
return Polynomial(list({m.clone() for m in self.all_monomials()}))
def variables(self) -> Set:
"""
Get all the variables present
in this polynomials.
"""
res = set()
for i in self.all_monomials():
res |= {j for j in i.variables}
res = list(res)
# res.sort()
return set(res)
def all_monomials(self) -> Iterable[Monomial]:
"""
Get the monomials of this polynomial.
"""
return {m for m in self.monomials if m != Monomial({}, 0)}
def __eq__(self, other) -> bool:
"""
Return True if the other polynomial is the same as
this.
"""
if isinstance(other, int) or isinstance(other, float) or isinstance(other, Fraction):
other_poly = Polynomial([Monomial({}, other)])
return self.__eq__(other_poly)
elif isinstance(other, Monomial):
return self.__eq__(Polynomial([other]))
elif isinstance(other, Polynomial):
return self.all_monomials() == other.all_monomials()
else:
raise ValueError('Can only compare a polynomial with an int, float, Fraction, Monomial, or another Polynomial.')
def subs(self, substitutions: Union[int, float, Fraction, Dict[int, Union[int, float, Fraction]]]) -> Union[int, float, Fraction]:
"""
Get the value after substituting
certain values for the variables
defined in substitutions.
"""
if isinstance(substitutions, int) or isinstance(substitutions, float) or isinstance(substitutions, Fraction):
substitutions = {i: Polynomial._rationalize_if_possible(substitutions) for i in set(self.variables())}
return self.subs(substitutions)
elif not isinstance(substitutions, dict):
raise ValueError('The substitutions should be a dictionary.')
if not self.variables().issubset(set(substitutions.keys())):
raise ValueError('Some variables didn\'t receive their values.')
ans = 0
for m in self.all_monomials():
ans += Polynomial._rationalize_if_possible(m.substitute(substitutions))
return Polynomial._rationalize_if_possible(ans)
def __str__(self) -> str:
"""
Get a properly formatted string representation of the polynomial.
"""
sorted_monos = sorted(self.all_monomials(), key=lambda m: sorted(m.variables.items(), reverse=True),
reverse=True)
return ' + '.join(str(m) for m in sorted_monos if m.coeff != Fraction(0, 1))
def poly_long_division(self, other: 'Polynomial') -> tuple['Polynomial', 'Polynomial']:
"""
Perform polynomial long division
Returns (quotient, remainder)
"""
if not isinstance(other, Polynomial):
raise ValueError("Can only divide by another Polynomial.")
if len(other.all_monomials()) == 0:
raise ValueError("Cannot divide by zero polynomial.")
quotient = Polynomial([])
remainder = self.clone()
divisor_monos = sorted(other.all_monomials(), key=lambda m: sorted(m.variables.items(), reverse=True),
reverse=True)
divisor_lead = divisor_monos[0]
while remainder.all_monomials() and max(remainder.variables(), default=-1) >= max(other.variables(),
default=-1):
remainder_monos = sorted(remainder.all_monomials(), key=lambda m: sorted(m.variables.items(), reverse=True),
reverse=True)
remainder_lead = remainder_monos[0]
if not all(remainder_lead.variables.get(var, 0) >= divisor_lead.variables.get(var, 0) for var in
divisor_lead.variables):
break
lead_quotient = remainder_lead / divisor_lead
quotient = quotient + Polynomial([lead_quotient]) # Convert Monomial to Polynomial
remainder = remainder - (
Polynomial([lead_quotient]) * other) # Convert Monomial to Polynomial before multiplication
return quotient, remainder
dividend = Polynomial([
Monomial({1: 3}, 4), # 4(a_1)^3
Monomial({1: 2}, 3), # 3(a_1)^2
Monomial({1: 1}, -2), # -2(a_1)
Monomial({}, 5) # +5
])
divisor = Polynomial([
Monomial({1: 1}, 2), # 2(a_1)
Monomial({}, -1) # -1
])
quotient = dividend / divisor
print("Quotient:", quotient)
| Polynomial |
python | doocs__leetcode | solution/0100-0199/0105.Construct Binary Tree from Preorder and Inorder Traversal/Solution.py | {
"start": 192,
"end": 681
} | class ____:
def buildTree(self, preorder: List[int], inorder: List[int]) -> Optional[TreeNode]:
def dfs(i: int, j: int, n: int) -> Optional[TreeNode]:
if n <= 0:
return None
v = preorder[i]
k = d[v]
l = dfs(i + 1, j, k - j)
r = dfs(i + 1 + k - j, k + 1, n - k + j - 1)
return TreeNode(v, l, r)
d = {v: i for i, v in enumerate(inorder)}
return dfs(0, 0, len(preorder))
| Solution |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/sensors/test_glue_crawler.py | {
"start": 1069,
"end": 3633
} | class ____:
def setup_method(self):
self.sensor = GlueCrawlerSensor(
task_id="test_glue_crawler_sensor",
crawler_name="aws_test_glue_crawler",
poke_interval=1,
timeout=5,
aws_conn_id="aws_default",
)
@mock.patch.object(GlueCrawlerHook, "get_crawler")
def test_poke_success(self, mock_get_crawler):
mock_get_crawler.return_value["LastCrawl"]["Status"] = "SUCCEEDED"
assert self.sensor.poke({}) is False
mock_get_crawler.assert_called_once_with("aws_test_glue_crawler")
@mock.patch.object(GlueCrawlerHook, "get_crawler")
def test_poke_failed(self, mock_get_crawler):
mock_get_crawler.return_value["LastCrawl"]["Status"] = "FAILED"
assert self.sensor.poke({}) is False
mock_get_crawler.assert_called_once_with("aws_test_glue_crawler")
@mock.patch.object(GlueCrawlerHook, "get_crawler")
def test_poke_cancelled(self, mock_get_crawler):
mock_get_crawler.return_value["LastCrawl"]["Status"] = "CANCELLED"
assert self.sensor.poke({}) is False
mock_get_crawler.assert_called_once_with("aws_test_glue_crawler")
@mock.patch("airflow.providers.amazon.aws.hooks.glue_crawler.GlueCrawlerHook.get_crawler")
def test_fail_poke(self, get_crawler):
crawler_status = "FAILED"
get_crawler.return_value = {"State": "READY", "LastCrawl": {"Status": crawler_status}}
message = f"Status: {crawler_status}"
with pytest.raises(AirflowException, match=message):
self.sensor.poke(context={})
def test_base_aws_op_attributes(self):
op = GlueCrawlerSensor(
task_id="test_glue_crawler_sensor",
crawler_name="aws_test_glue_crawler",
)
assert op.hook.client_type == "glue"
assert op.hook.aws_conn_id == "aws_default"
assert op.hook._region_name is None
assert op.hook._verify is None
assert op.hook._config is None
op = GlueCrawlerSensor(
task_id="test_glue_crawler_sensor",
crawler_name="aws_test_glue_crawler",
aws_conn_id="aws-test-custom-conn",
region_name="eu-west-1",
verify=False,
botocore_config={"read_timeout": 42},
)
assert op.hook.aws_conn_id == "aws-test-custom-conn"
assert op.hook._region_name == "eu-west-1"
assert op.hook._verify is False
assert op.hook._config is not None
assert op.hook._config.read_timeout == 42
| TestGlueCrawlerSensor |
python | getsentry__sentry | tests/sentry/middleware/test_devtoolbar.py | {
"start": 597,
"end": 6876
} | class ____(TestCase):
middleware = cached_property(DevToolbarAnalyticsMiddleware)
analytics_event_name = DevToolbarApiRequestEvent.type
@cached_property
def factory(self):
return RequestFactory()
def setUp(self) -> None:
# Allows changing the get_response mock for each test.
self.middleware.get_response = MagicMock(return_value=HttpResponse(status=200))
@override_options({"devtoolbar.analytics.enabled": True})
@patch("sentry.analytics.record")
def test_basic(self, mock_record: MagicMock) -> None:
request = self.factory.get("/?queryReferrer=devtoolbar")
view_name = "my-endpoint"
route = "/issues/(?P<issue_id>)/"
request.resolver_match = MagicMock(view_name=view_name, route=route)
self.middleware(request)
event = get_last_analytics_event(mock_record)
assert event.type == self.analytics_event_name
@override_options({"devtoolbar.analytics.enabled": True})
@patch("sentry.analytics.record")
def test_no_devtoolbar_header(self, mock_record: MagicMock) -> None:
request = self.factory.get("/")
request.resolver_match = MagicMock()
self.middleware(request)
mock_record.assert_not_called()
request = self.factory.get("/?queryReferrer=not-toolbar")
request.resolver_match = MagicMock()
self.middleware(request)
mock_record.assert_not_called()
@override_options({"devtoolbar.analytics.enabled": True})
@patch("sentry.middleware.devtoolbar.logger.exception")
@patch("sentry.analytics.record")
def test_request_not_resolved(self, mock_record: MagicMock, mock_logger: MagicMock) -> None:
request = self.factory.get("/?queryReferrer=devtoolbar")
request.resolver_match = None
self.middleware(request)
mock_record.assert_not_called()
mock_logger.assert_called()
#################
# Attribute tests
#################
@override_options({"devtoolbar.analytics.enabled": True})
@patch("sentry.analytics.record")
def test_view_name_and_route(self, mock_record: MagicMock) -> None:
# Integration tests do a better job of testing these fields, since they involve route resolver.
view_name = "my-endpoint"
route = "/issues/(?P<issue_id>)/"
request = self.factory.get("/?queryReferrer=devtoolbar")
request.resolver_match = MagicMock(view_name=view_name, route=route)
self.middleware(request)
event = cast(DevToolbarApiRequestEvent, get_last_analytics_event(mock_record))
assert event.type == self.analytics_event_name
assert event.view_name == view_name
assert event.route == route
@override_options({"devtoolbar.analytics.enabled": True})
@patch("sentry.analytics.record")
def test_query_string(self, mock_record: MagicMock) -> None:
query = "?a=b&statsPeriod=14d&queryReferrer=devtoolbar"
request = self.factory.get("/" + query)
request.resolver_match = MagicMock()
request.resolver_match.view_name = "my-endpoint"
request.resolver_match.route = "/issues/(?P<issue_id>)/"
self.middleware(request)
event = cast(DevToolbarApiRequestEvent, get_last_analytics_event(mock_record))
assert event.type == self.analytics_event_name
assert event.query_string == query
@override_options({"devtoolbar.analytics.enabled": True})
@patch("sentry.analytics.record")
def test_origin(self, mock_record: MagicMock) -> None:
origin = "https://potato.com"
request = self.factory.get(
f"{origin}/?queryReferrer=devtoolbar", headers={"Origin": origin}
)
request.resolver_match = MagicMock(view_name="my-endpoint", route="/issues/(?P<issue_id>)/")
self.middleware(request)
event = cast(DevToolbarApiRequestEvent, get_last_analytics_event(mock_record))
assert event.type == self.analytics_event_name
assert event.origin == origin
@override_options({"devtoolbar.analytics.enabled": True})
@patch("sentry.analytics.record")
def test_origin_from_referrer(self, mock_record: MagicMock) -> None:
origin = "https://potato.com"
url = origin + "/issues/?a=b&queryReferrer=devtoolbar"
request = self.factory.get(url, headers={"Referer": url})
request.resolver_match = MagicMock(view_name="my-endpoint", route="/issues/(?P<issue_id>)/")
self.middleware(request)
event = cast(DevToolbarApiRequestEvent, get_last_analytics_event(mock_record))
assert event.type == self.analytics_event_name
assert event.origin == origin
@override_options({"devtoolbar.analytics.enabled": True})
@patch("sentry.analytics.record")
def test_response_status_code(self, mock_record: MagicMock) -> None:
request = self.factory.get("/?queryReferrer=devtoolbar")
request.resolver_match = MagicMock(view_name="my-endpoint", route="/issues/(?P<issue_id>)/")
self.middleware.get_response.return_value = HttpResponse(status=420)
self.middleware(request)
event = cast(DevToolbarApiRequestEvent, get_last_analytics_event(mock_record))
assert event.type == self.analytics_event_name
assert event.status_code == 420
@override_options({"devtoolbar.analytics.enabled": True})
@patch("sentry.analytics.record")
def test_methods(self, mock_record: MagicMock) -> None:
for method in ["GET", "POST", "PUT", "DELETE"]:
request = getattr(self.factory, method.lower())("/?queryReferrer=devtoolbar")
request.resolver_match = MagicMock(
view_name="my-endpoint", route="/issues/(?P<issue_id>)/"
)
self.middleware(request)
event = cast(DevToolbarApiRequestEvent, get_last_analytics_event(mock_record))
assert event.type == self.analytics_event_name
assert event.method == method
TEST_MIDDLEWARE = (
"django.middleware.common.CommonMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"sentry.middleware.auth.AuthenticationMiddleware",
"sentry.middleware.devtoolbar.DevToolbarAnalyticsMiddleware",
)
| DevToolbarAnalyticsMiddlewareUnitTest |
python | kubernetes-client__python | kubernetes/client/models/v1_device_class_spec.py | {
"start": 383,
"end": 7192
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'config': 'list[V1DeviceClassConfiguration]',
'extended_resource_name': 'str',
'selectors': 'list[V1DeviceSelector]'
}
attribute_map = {
'config': 'config',
'extended_resource_name': 'extendedResourceName',
'selectors': 'selectors'
}
def __init__(self, config=None, extended_resource_name=None, selectors=None, local_vars_configuration=None): # noqa: E501
"""V1DeviceClassSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._config = None
self._extended_resource_name = None
self._selectors = None
self.discriminator = None
if config is not None:
self.config = config
if extended_resource_name is not None:
self.extended_resource_name = extended_resource_name
if selectors is not None:
self.selectors = selectors
@property
def config(self):
"""Gets the config of this V1DeviceClassSpec. # noqa: E501
Config defines configuration parameters that apply to each device that is claimed via this class. Some classses may potentially be satisfied by multiple drivers, so each instance of a vendor configuration applies to exactly one driver. They are passed to the driver, but are not considered while allocating the claim. # noqa: E501
:return: The config of this V1DeviceClassSpec. # noqa: E501
:rtype: list[V1DeviceClassConfiguration]
"""
return self._config
@config.setter
def config(self, config):
"""Sets the config of this V1DeviceClassSpec.
Config defines configuration parameters that apply to each device that is claimed via this class. Some classses may potentially be satisfied by multiple drivers, so each instance of a vendor configuration applies to exactly one driver. They are passed to the driver, but are not considered while allocating the claim. # noqa: E501
:param config: The config of this V1DeviceClassSpec. # noqa: E501
:type: list[V1DeviceClassConfiguration]
"""
self._config = config
@property
def extended_resource_name(self):
"""Gets the extended_resource_name of this V1DeviceClassSpec. # noqa: E501
ExtendedResourceName is the extended resource name for the devices of this class. The devices of this class can be used to satisfy a pod's extended resource requests. It has the same format as the name of a pod's extended resource. It should be unique among all the device classes in a cluster. If two device classes have the same name, then the class created later is picked to satisfy a pod's extended resource requests. If two classes are created at the same time, then the name of the class lexicographically sorted first is picked. This is an alpha field. # noqa: E501
:return: The extended_resource_name of this V1DeviceClassSpec. # noqa: E501
:rtype: str
"""
return self._extended_resource_name
@extended_resource_name.setter
def extended_resource_name(self, extended_resource_name):
"""Sets the extended_resource_name of this V1DeviceClassSpec.
ExtendedResourceName is the extended resource name for the devices of this class. The devices of this class can be used to satisfy a pod's extended resource requests. It has the same format as the name of a pod's extended resource. It should be unique among all the device classes in a cluster. If two device classes have the same name, then the class created later is picked to satisfy a pod's extended resource requests. If two classes are created at the same time, then the name of the class lexicographically sorted first is picked. This is an alpha field. # noqa: E501
:param extended_resource_name: The extended_resource_name of this V1DeviceClassSpec. # noqa: E501
:type: str
"""
self._extended_resource_name = extended_resource_name
@property
def selectors(self):
"""Gets the selectors of this V1DeviceClassSpec. # noqa: E501
Each selector must be satisfied by a device which is claimed via this class. # noqa: E501
:return: The selectors of this V1DeviceClassSpec. # noqa: E501
:rtype: list[V1DeviceSelector]
"""
return self._selectors
@selectors.setter
def selectors(self, selectors):
"""Sets the selectors of this V1DeviceClassSpec.
Each selector must be satisfied by a device which is claimed via this class. # noqa: E501
:param selectors: The selectors of this V1DeviceClassSpec. # noqa: E501
:type: list[V1DeviceSelector]
"""
self._selectors = selectors
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1DeviceClassSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1DeviceClassSpec):
return True
return self.to_dict() != other.to_dict()
| V1DeviceClassSpec |
python | tensorflow__tensorflow | tensorflow/python/checkpoint/checkpoint_management_test.py | {
"start": 5192,
"end": 10892
} | class ____(test.TestCase):
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
def testAbsPath(self):
save_dir = self._get_test_dir("abs_paths")
abs_path = os.path.join(save_dir, "model-0")
ckpt = checkpoint_management.generate_checkpoint_state_proto(
save_dir, abs_path)
self.assertEqual(ckpt.model_checkpoint_path, abs_path)
self.assertTrue(os.path.isabs(ckpt.model_checkpoint_path))
self.assertEqual(len(ckpt.all_model_checkpoint_paths), 1)
self.assertEqual(ckpt.all_model_checkpoint_paths[-1], abs_path)
def testRelPath(self):
train_dir = "train"
model = os.path.join(train_dir, "model-0")
# model_checkpoint_path should have no "train" directory part.
new_rel_path = "model-0"
ckpt = checkpoint_management.generate_checkpoint_state_proto(
train_dir, model)
self.assertEqual(ckpt.model_checkpoint_path, new_rel_path)
self.assertEqual(len(ckpt.all_model_checkpoint_paths), 1)
self.assertEqual(ckpt.all_model_checkpoint_paths[-1], new_rel_path)
def testAllModelCheckpointPaths(self):
save_dir = self._get_test_dir("all_models_test")
abs_path = os.path.join(save_dir, "model-0")
for paths in [None, [], ["model-2"]]:
ckpt = checkpoint_management.generate_checkpoint_state_proto(
save_dir, abs_path, all_model_checkpoint_paths=paths)
self.assertEqual(ckpt.model_checkpoint_path, abs_path)
self.assertTrue(os.path.isabs(ckpt.model_checkpoint_path))
self.assertEqual(
len(ckpt.all_model_checkpoint_paths), len(paths) if paths else 1)
self.assertEqual(ckpt.all_model_checkpoint_paths[-1], abs_path)
def testUpdateCheckpointState(self):
save_dir = self._get_test_dir("update_checkpoint_state")
os.chdir(save_dir)
# Make a temporary train directory.
train_dir = "train"
os.mkdir(train_dir)
abs_path = os.path.join(save_dir, "model-0")
rel_path = os.path.join("train", "model-2")
checkpoint_management.update_checkpoint_state(
train_dir, rel_path, all_model_checkpoint_paths=[abs_path, rel_path])
ckpt = checkpoint_management.get_checkpoint_state(train_dir)
self.assertEqual(ckpt.model_checkpoint_path, rel_path)
self.assertEqual(len(ckpt.all_model_checkpoint_paths), 2)
self.assertEqual(ckpt.all_model_checkpoint_paths[-1], rel_path)
self.assertEqual(ckpt.all_model_checkpoint_paths[0], abs_path)
def testFSPath(self):
save_dir = self._get_test_dir("fspath")
os.chdir(save_dir)
# Make a temporary train directory.
train_dir = "train"
os.mkdir(train_dir)
abs_path = os.path.join(save_dir, "model-0")
rel_path = os.path.join("train", "model-2")
checkpoint_management.update_checkpoint_state(
train_dir, rel_path, all_model_checkpoint_paths=[abs_path, rel_path])
ckpt = checkpoint_management.get_checkpoint_state(pathlib.Path(train_dir))
self.assertEqual(ckpt.model_checkpoint_path, rel_path)
def testUpdateCheckpointStateSaveRelativePaths(self):
save_dir = self._get_test_dir("update_checkpoint_state")
os.chdir(save_dir)
abs_path2 = os.path.join(save_dir, "model-2")
rel_path2 = "model-2"
abs_path0 = os.path.join(save_dir, "model-0")
rel_path0 = "model-0"
checkpoint_management.update_checkpoint_state_internal(
save_dir=save_dir,
model_checkpoint_path=abs_path2,
all_model_checkpoint_paths=[rel_path0, abs_path2],
save_relative_paths=True)
# File should contain relative paths.
file_content = file_io.read_file_to_string(
os.path.join(save_dir, "checkpoint"))
ckpt = CheckpointState()
text_format.Merge(file_content, ckpt)
self.assertEqual(ckpt.model_checkpoint_path, rel_path2)
self.assertEqual(len(ckpt.all_model_checkpoint_paths), 2)
self.assertEqual(ckpt.all_model_checkpoint_paths[-1], rel_path2)
self.assertEqual(ckpt.all_model_checkpoint_paths[0], rel_path0)
# get_checkpoint_state should return absolute paths.
ckpt = checkpoint_management.get_checkpoint_state(save_dir)
self.assertEqual(ckpt.model_checkpoint_path, abs_path2)
self.assertEqual(len(ckpt.all_model_checkpoint_paths), 2)
self.assertEqual(ckpt.all_model_checkpoint_paths[-1], abs_path2)
self.assertEqual(ckpt.all_model_checkpoint_paths[0], abs_path0)
def testCheckPointStateFailsWhenIncomplete(self):
save_dir = self._get_test_dir("checkpoint_state_fails_when_incomplete")
os.chdir(save_dir)
ckpt_path = os.path.join(save_dir, "checkpoint")
ckpt_file = open(ckpt_path, "w")
ckpt_file.write("")
ckpt_file.close()
with self.assertRaises(ValueError):
checkpoint_management.get_checkpoint_state(save_dir)
def testCheckPointCompletesRelativePaths(self):
save_dir = self._get_test_dir("checkpoint_completes_relative_paths")
os.chdir(save_dir)
ckpt_path = os.path.join(save_dir, "checkpoint")
ckpt_file = open(ckpt_path, "w")
ckpt_file.write("""
model_checkpoint_path: "./model.ckpt-687529"
all_model_checkpoint_paths: "./model.ckpt-687500"
all_model_checkpoint_paths: "./model.ckpt-687529"
""")
ckpt_file.close()
ckpt = checkpoint_management.get_checkpoint_state(save_dir)
self.assertEqual(ckpt.model_checkpoint_path,
os.path.join(save_dir, "./model.ckpt-687529"))
self.assertEqual(ckpt.all_model_checkpoint_paths[0],
os.path.join(save_dir, "./model.ckpt-687500"))
self.assertEqual(ckpt.all_model_checkpoint_paths[1],
os.path.join(save_dir, "./model.ckpt-687529"))
| CheckpointStateTest |
python | facebookresearch__faiss | tests/torch_test_neural_net.py | {
"start": 8931,
"end": 9979
} | class ____(unittest.TestCase):
@torch.no_grad()
def test_decode(self):
torch.manual_seed(123)
qinco = QINCo(d=16, K=20, L=2, M=3, h=8)
codes = torch.randint(0, 20, (10, 3))
x_ref = qinco.decode(codes)
qinco2 = faiss.QINCo(qinco)
codes2 = faiss.Int32Tensor2D(codes.to(dtype=torch.int32))
x_new = qinco2.decode(codes2)
np.testing.assert_allclose(x_ref.numpy(), x_new.numpy(), atol=2e-6)
@torch.no_grad()
def test_encode(self):
torch.manual_seed(123)
qinco = QINCo(d=16, K=20, L=2, M=3, h=8)
codes = torch.randint(0, 20, (10, 3))
x = qinco.decode(codes)
del codes
ref_codes, _ = qinco.encode(x)
qinco2 = faiss.QINCo(qinco)
x2 = faiss.Tensor2D(x)
new_codes = qinco2.encode(x2)
np.testing.assert_allclose(ref_codes.numpy(), new_codes.numpy(), atol=2e-6)
######################################################
# Test index
######################################################
| TestQINCo |
python | doocs__leetcode | solution/3600-3699/3627.Maximum Median Sum of Subsequences of Size 3/Solution.py | {
"start": 0,
"end": 138
} | class ____:
def maximumMedianSum(self, nums: List[int]) -> int:
nums.sort()
return sum(nums[len(nums) // 3 :: 2])
| Solution |
python | psf__black | tests/data/cases/class_blank_parentheses.py | {
"start": 737,
"end": 978
} | class ____(object):
def func_with_blank_parentheses():
return 5
def public_func_with_blank_parentheses():
return None
def class_under_the_func_with_blank_parentheses():
class InsideFunc:
pass
| ClassWithEmptyFunc |
python | getsentry__sentry | src/sentry/testutils/cases.py | {
"start": 79269,
"end": 80034
} | class ____(TestCase):
def setUp(self):
super().setUp()
assert requests.post(settings.SENTRY_SNUBA + "/tests/outcomes/drop").status_code == 200
def store_outcomes(self, outcome, num_times=1):
outcomes = []
for _ in range(num_times):
outcome_copy = outcome.copy()
outcome_copy["timestamp"] = outcome_copy["timestamp"].strftime("%Y-%m-%dT%H:%M:%S.%fZ")
outcomes.append(outcome_copy)
assert (
requests.post(
settings.SENTRY_SNUBA + "/tests/entities/outcomes/insert",
data=json.dumps(outcomes),
).status_code
== 200
)
@pytest.mark.snuba
@requires_snuba
@pytest.mark.usefixtures("reset_snuba")
| OutcomesSnubaTest |
python | huggingface__transformers | src/transformers/models/focalnet/modeling_focalnet.py | {
"start": 11377,
"end": 14190
} | class ____(nn.Module):
def __init__(self, config, index, dim, focal_factor=2, bias=True, projection_dropout=0.0):
super().__init__()
self.dim = dim
self.focal_window = config.focal_windows[index]
self.focal_level = config.focal_levels[index]
self.focal_factor = focal_factor
self.use_post_layernorm_in_modulation = config.use_post_layernorm_in_modulation
self.normalize_modulator = config.normalize_modulator
self.projection_in = nn.Linear(dim, 2 * dim + (self.focal_level + 1), bias=bias)
self.projection_context = nn.Conv2d(dim, dim, kernel_size=1, stride=1, bias=bias)
self.activation = nn.GELU()
self.projection_out = nn.Linear(dim, dim)
self.projection_dropout = nn.Dropout(projection_dropout)
self.focal_layers = nn.ModuleList()
self.kernel_sizes = []
for k in range(self.focal_level):
kernel_size = self.focal_factor * k + self.focal_window
self.focal_layers.append(
nn.Sequential(
nn.Conv2d(
dim, dim, kernel_size=kernel_size, stride=1, groups=dim, padding=kernel_size // 2, bias=False
),
nn.GELU(),
)
)
self.kernel_sizes.append(kernel_size)
if self.use_post_layernorm_in_modulation:
self.layernorm = nn.LayerNorm(dim, eps=config.layer_norm_eps)
def forward(self, hidden_state):
"""
Args:
hidden_state:
Input features with shape of (batch_size, height, width, num_channels)
"""
num_channels = hidden_state.shape[-1]
# pre linear projection
x = self.projection_in(hidden_state).permute(0, 3, 1, 2).contiguous()
q, ctx, gates = torch.split(x, (num_channels, num_channels, self.focal_level + 1), 1)
# context aggregation
ctx_all = 0
for level in range(self.focal_level):
ctx = self.focal_layers[level](ctx)
ctx_all = ctx_all + ctx * gates[:, level : level + 1]
ctx_global = self.activation(ctx.mean(2, keepdim=True).mean(3, keepdim=True))
ctx_all = ctx_all + ctx_global * gates[:, self.focal_level :]
# normalize context
if self.normalize_modulator:
ctx_all = ctx_all / (self.focal_level + 1)
# focal modulation
modulator = self.projection_context(ctx_all)
x_out = q * modulator
x_out = x_out.permute(0, 2, 3, 1).contiguous()
if self.use_post_layernorm_in_modulation:
x_out = self.layernorm(x_out)
# post linear projection
x_out = self.projection_out(x_out)
x_out = self.projection_dropout(x_out)
return x_out
| FocalNetModulation |
python | apache__airflow | providers/google/tests/unit/google/common/hooks/test_base_google.py | {
"start": 6385,
"end": 11909
} | class ____:
def setup_method(self):
with mock.patch(
MODULE_NAME + ".GoogleBaseHook.__init__",
new=mock_base_gcp_hook_default_project_id,
):
self.instance = hook.GoogleBaseHook(gcp_conn_id="google-cloud-default")
def test_provide_gcp_credential_file_decorator_key_path_and_keyfile_dict(self):
key_path = "/test/key-path"
self.instance.extras = {
"key_path": key_path,
"keyfile_dict": '{"foo": "bar"}',
}
@hook.GoogleBaseHook.provide_gcp_credential_file
def assert_gcp_credential_file_in_env(_):
assert os.environ[CREDENTIALS] == key_path
with pytest.raises(
AirflowException,
match="The `keyfile_dict` and `key_path` fields are mutually exclusive. "
"Please provide only one value.",
):
assert_gcp_credential_file_in_env(self.instance)
def test_provide_gcp_credential_keyfile_dict_json(self):
"""
Historically, keyfile_dict had to be str in the conn extra. Now it
can be dict and this is verified here.
"""
conn_dict = {
"extra": {
"keyfile_dict": {"foo": "bar", "private_key": "hi"}, # notice keyfile_dict is dict not str
}
}
@GoogleBaseHook.provide_gcp_credential_file
def assert_gcp_credential_file_in_env(instance):
assert Path(os.environ[CREDENTIALS]).read_text() == json.dumps(conn_dict["extra"]["keyfile_dict"])
with patch.dict("os.environ", AIRFLOW_CONN_MY_GOOGLE=json.dumps(conn_dict)):
# keyfile dict is handled in two different areas
hook = GoogleBaseHook("my_google")
# the first is in provide_gcp_credential_file
assert_gcp_credential_file_in_env(hook)
with patch("google.oauth2.service_account.Credentials.from_service_account_info") as m:
# the second is in get_credentials_and_project_id
hook.get_credentials_and_project_id()
m.assert_called_once_with(
conn_dict["extra"]["keyfile_dict"],
scopes=("https://www.googleapis.com/auth/cloud-platform",),
)
def test_provide_gcp_credential_file_decorator_key_path(self):
key_path = "/test/key-path"
self.instance.extras = {"key_path": key_path}
@hook.GoogleBaseHook.provide_gcp_credential_file
def assert_gcp_credential_file_in_env(_):
assert os.environ[CREDENTIALS] == key_path
assert_gcp_credential_file_in_env(self.instance)
@mock.patch("tempfile.NamedTemporaryFile")
def test_provide_gcp_credential_file_decorator_key_content(self, mock_file):
string_file = StringIO()
file_content = '{"foo": "bar"}'
file_name = "/test/mock-file"
self.instance.extras = {"keyfile_dict": file_content}
mock_file_handler = mock_file.return_value.__enter__.return_value
mock_file_handler.name = file_name
mock_file_handler.write = string_file.write
@hook.GoogleBaseHook.provide_gcp_credential_file
def assert_gcp_credential_file_in_env(_):
assert os.environ[CREDENTIALS] == file_name
assert file_content == string_file.getvalue()
assert_gcp_credential_file_in_env(self.instance)
@mock.patch.dict(os.environ, {CREDENTIALS: ENV_VALUE})
def test_provide_gcp_credential_keep_environment(self):
key_path = "/test/key-path"
self.instance.extras = {"key_path": key_path}
@hook.GoogleBaseHook.provide_gcp_credential_file
def assert_gcp_credential_file_in_env(_):
assert os.environ[CREDENTIALS] == key_path
assert_gcp_credential_file_in_env(self.instance)
assert os.environ[CREDENTIALS] == ENV_VALUE
@mock.patch.dict(os.environ, {CREDENTIALS: ENV_VALUE})
def test_provide_gcp_credential_keep_environment_when_exception(self):
key_path = "/test/key-path"
self.instance.extras = {"key_path": key_path}
@hook.GoogleBaseHook.provide_gcp_credential_file
def assert_gcp_credential_file_in_env(_):
raise RuntimeError("Some exception occurred")
with pytest.raises(RuntimeError, match="Some exception occurred"):
assert_gcp_credential_file_in_env(self.instance)
assert os.environ[CREDENTIALS] == ENV_VALUE
@mock.patch.dict(os.environ, clear=True)
def test_provide_gcp_credential_keep_clear_environment(self):
key_path = "/test/key-path"
self.instance.extras = {"key_path": key_path}
@hook.GoogleBaseHook.provide_gcp_credential_file
def assert_gcp_credential_file_in_env(_):
assert os.environ[CREDENTIALS] == key_path
assert_gcp_credential_file_in_env(self.instance)
assert CREDENTIALS not in os.environ
@mock.patch.dict(os.environ, clear=True)
def test_provide_gcp_credential_keep_clear_environment_when_exception(self):
key_path = "/test/key-path"
self.instance.extras = {"key_path": key_path}
@hook.GoogleBaseHook.provide_gcp_credential_file
def assert_gcp_credential_file_in_env(_):
raise RuntimeError("Some exception occurred")
with pytest.raises(RuntimeError, match="Some exception occurred"):
assert_gcp_credential_file_in_env(self.instance)
assert CREDENTIALS not in os.environ
| TestProvideGcpCredentialFile |
python | celery__celery | t/unit/utils/test_time.py | {
"start": 9229,
"end": 11850
} | class ____:
def test_standard_tz(self):
class tzz(tzinfo):
def utcoffset(self, dt):
return None # Mock no utcoffset specified
tz = tzz()
assert localize(make_aware(datetime.now(_timezone.utc), tz), tz)
@patch('dateutil.tz.datetime_ambiguous')
def test_when_zoneinfo(self, datetime_ambiguous_mock):
datetime_ambiguous_mock.return_value = False
tz = ZoneInfo("US/Eastern")
assert localize(make_aware(datetime.now(_timezone.utc), tz), tz)
datetime_ambiguous_mock.return_value = True
tz2 = ZoneInfo("US/Eastern")
assert localize(make_aware(datetime.now(_timezone.utc), tz2), tz2)
@patch('dateutil.tz.datetime_ambiguous')
def test_when_is_ambiguous(self, datetime_ambiguous_mock):
class tzz(tzinfo):
def utcoffset(self, dt):
return None # Mock no utcoffset specified
def is_ambiguous(self, dt):
return True
datetime_ambiguous_mock.return_value = False
tz = tzz()
assert localize(make_aware(datetime.now(_timezone.utc), tz), tz)
datetime_ambiguous_mock.return_value = True
tz2 = tzz()
assert localize(make_aware(datetime.now(_timezone.utc), tz2), tz2)
def test_localize_changes_utc_dt(self):
now_utc_time = datetime.now(tz=ZoneInfo("UTC"))
local_tz = ZoneInfo('US/Eastern')
localized_time = localize(now_utc_time, local_tz)
assert localized_time == now_utc_time
def test_localize_aware_dt_idempotent(self):
t = (2017, 4, 23, 21, 36, 59, 0)
local_zone = ZoneInfo('America/New_York')
local_time = datetime(*t)
local_time_aware = datetime(*t, tzinfo=local_zone)
alternate_zone = ZoneInfo('America/Detroit')
localized_time = localize(local_time_aware, alternate_zone)
assert localized_time == local_time_aware
assert local_zone.utcoffset(
local_time) == alternate_zone.utcoffset(local_time)
localized_utc_offset = localized_time.tzinfo.utcoffset(local_time)
assert localized_utc_offset == alternate_zone.utcoffset(local_time)
assert localized_utc_offset == local_zone.utcoffset(local_time)
@pytest.mark.parametrize('s,expected', [
(999, 999),
(7.5, 7.5),
('2.5/s', 2.5),
('1456/s', 1456),
('100/m', 100 / 60.0),
('10/h', 10 / 60.0 / 60.0),
(0, 0),
(None, 0),
('0/m', 0),
('0/h', 0),
('0/s', 0),
('0.0/s', 0),
])
def test_rate_limit_string(s, expected):
assert rate(s) == expected
| test_localize |
python | bokeh__bokeh | src/bokeh/application/handlers/server_request_handler.py | {
"start": 1748,
"end": 4387
} | class ____(RequestHandler):
''' Load a script which contains server request handler callbacks.
'''
_module: ModuleType
def __init__(self, *, filename: PathLike, argv: list[str] = [], package: ModuleType | None = None) -> None:
'''
Keyword Args:
filename (str) : path to a module to load request handler callbacks from
argv (list[str], optional) : a list of string arguments to use as
``sys.argv`` when the callback code is executed. (default: [])
'''
super().__init__()
with open(filename, encoding='utf-8') as f:
source = f.read()
self._runner = CodeRunner(source, filename, argv, package=package)
if not self._runner.failed:
# unlike ScriptHandler, we only load the module one time
self._module = self._runner.new_module()
def extract_callbacks() -> None:
contents = self._module.__dict__
if 'process_request' in contents:
self._process_request = contents['process_request']
_check_callback(self._process_request, ('request',), what="process_request")
self._runner.run(self._module, extract_callbacks)
# Properties --------------------------------------------------------------
@property
def error(self) -> str | None:
''' If the handler fails, may contain a related error message.
'''
return self._runner.error
@property
def error_detail(self) -> str | None:
''' If the handler fails, may contain a traceback or other details.
'''
return self._runner.error_detail
@property
def failed(self) -> bool:
''' ``True`` if the request handler callbacks failed to execute
'''
return self._runner.failed
# Public methods ----------------------------------------------------------
def url_path(self) -> str | None:
''' The last path component for the basename of the path to the
callback module.
'''
if self.failed:
return None
else:
# TODO should fix invalid URL characters
return '/' + os.path.splitext(os.path.basename(self._runner.path))[0]
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| ServerRequestHandler |
python | walkccc__LeetCode | solutions/1863. Sum of All Subset XOR Totals/1863-2.py | {
"start": 0,
"end": 131
} | class ____:
def subsetXORSum(self, nums: list[int]) -> int:
return functools.reduce(operator.or_, nums) << len(nums) - 1
| Solution |
python | langchain-ai__langchain | libs/core/langchain_core/runnables/configurable.py | {
"start": 15347,
"end": 24053
} | class ____(DynamicRunnable[Input, Output]):
"""`Runnable` that can be dynamically configured.
A `RunnableConfigurableAlternatives` should be initiated using the
`configurable_alternatives` method of a `Runnable` or can be
initiated directly as well.
Here is an example of using a `RunnableConfigurableAlternatives` that uses
alternative prompts to illustrate its functionality:
```python
from langchain_core.runnables import ConfigurableField
from langchain_openai import ChatOpenAI
# This creates a RunnableConfigurableAlternatives for Prompt Runnable
# with two alternatives.
prompt = PromptTemplate.from_template(
"Tell me a joke about {topic}"
).configurable_alternatives(
ConfigurableField(id="prompt"),
default_key="joke",
poem=PromptTemplate.from_template("Write a short poem about {topic}"),
)
# When invoking the created RunnableSequence, you can pass in the
# value for your ConfigurableField's id which in this case will either be
# `joke` or `poem`.
chain = prompt | ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0)
# The `with_config` method brings in the desired Prompt Runnable in your
# Runnable Sequence.
chain.with_config(configurable={"prompt": "poem"}).invoke({"topic": "bears"})
```
Equivalently, you can initialize `RunnableConfigurableAlternatives` directly
and use in LCEL in the same way:
```python
from langchain_core.runnables import ConfigurableField
from langchain_core.runnables.configurable import (
RunnableConfigurableAlternatives,
)
from langchain_openai import ChatOpenAI
prompt = RunnableConfigurableAlternatives(
which=ConfigurableField(id="prompt"),
default=PromptTemplate.from_template("Tell me a joke about {topic}"),
default_key="joke",
prefix_keys=False,
alternatives={
"poem": PromptTemplate.from_template("Write a short poem about {topic}")
},
)
chain = prompt | ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0)
chain.with_config(configurable={"prompt": "poem"}).invoke({"topic": "bears"})
```
"""
which: ConfigurableField
"""The `ConfigurableField` to use to choose between alternatives."""
alternatives: dict[
str,
Runnable[Input, Output] | Callable[[], Runnable[Input, Output]],
]
"""The alternatives to choose from."""
default_key: str = "default"
"""The enum value to use for the default option."""
prefix_keys: bool
"""Whether to prefix configurable fields of each alternative with a namespace
of the form <which.id>==<alternative_key>, e.g. a key named "temperature" used by
the alternative named "gpt3" becomes "model==gpt3/temperature".
"""
@property
@override
def config_specs(self) -> list[ConfigurableFieldSpec]:
with _enums_for_spec_lock:
if which_enum := _enums_for_spec.get(self.which):
pass
else:
which_enum = StrEnum( # type: ignore[call-overload]
self.which.name or self.which.id,
(
(v, v)
for v in [*list(self.alternatives.keys()), self.default_key]
),
)
_enums_for_spec[self.which] = cast("type[StrEnum]", which_enum)
return get_unique_config_specs(
# which alternative
[
ConfigurableFieldSpec(
id=self.which.id,
name=self.which.name,
description=self.which.description,
annotation=which_enum,
default=self.default_key,
is_shared=self.which.is_shared,
),
]
# config specs of the default option
+ (
[
prefix_config_spec(s, f"{self.which.id}=={self.default_key}")
for s in self.default.config_specs
]
if self.prefix_keys
else self.default.config_specs
)
# config specs of the alternatives
+ [
(
prefix_config_spec(s, f"{self.which.id}=={alt_key}")
if self.prefix_keys
else s
)
for alt_key, alt in self.alternatives.items()
if isinstance(alt, RunnableSerializable)
for s in alt.config_specs
]
)
@override
def configurable_fields(
self, **kwargs: AnyConfigurableField
) -> RunnableSerializable[Input, Output]:
return self.__class__(
which=self.which,
default=self.default.configurable_fields(**kwargs),
alternatives=self.alternatives,
default_key=self.default_key,
prefix_keys=self.prefix_keys,
)
def _prepare(
self, config: RunnableConfig | None = None
) -> tuple[Runnable[Input, Output], RunnableConfig]:
config = ensure_config(config)
which = config.get("configurable", {}).get(self.which.id, self.default_key)
# remap configurable keys for the chosen alternative
if self.prefix_keys:
config = cast(
"RunnableConfig",
{
**config,
"configurable": {
_strremoveprefix(k, f"{self.which.id}=={which}/"): v
for k, v in config.get("configurable", {}).items()
},
},
)
# return the chosen alternative
if which == self.default_key:
return (self.default, config)
if which in self.alternatives:
alt = self.alternatives[which]
if isinstance(alt, Runnable):
return (alt, config)
return (alt(), config)
msg = f"Unknown alternative: {which}"
raise ValueError(msg)
def _strremoveprefix(s: str, prefix: str) -> str:
"""`str.removeprefix()` is only available in Python 3.9+."""
return s.replace(prefix, "", 1) if s.startswith(prefix) else s
def prefix_config_spec(
spec: ConfigurableFieldSpec, prefix: str
) -> ConfigurableFieldSpec:
"""Prefix the id of a `ConfigurableFieldSpec`.
This is useful when a `RunnableConfigurableAlternatives` is used as a
`ConfigurableField` of another `RunnableConfigurableAlternatives`.
Args:
spec: The `ConfigurableFieldSpec` to prefix.
prefix: The prefix to add.
Returns:
The prefixed `ConfigurableFieldSpec`.
"""
return (
ConfigurableFieldSpec(
id=f"{prefix}/{spec.id}",
name=spec.name,
description=spec.description,
annotation=spec.annotation,
default=spec.default,
is_shared=spec.is_shared,
)
if not spec.is_shared
else spec
)
def make_options_spec(
spec: ConfigurableFieldSingleOption | ConfigurableFieldMultiOption,
description: str | None,
) -> ConfigurableFieldSpec:
"""Make options spec.
Make a `ConfigurableFieldSpec` for a `ConfigurableFieldSingleOption` or
`ConfigurableFieldMultiOption`.
Args:
spec: The `ConfigurableFieldSingleOption` or `ConfigurableFieldMultiOption`.
description: The description to use if the spec does not have one.
Returns:
The `ConfigurableFieldSpec`.
"""
with _enums_for_spec_lock:
if enum := _enums_for_spec.get(spec):
pass
else:
enum = StrEnum( # type: ignore[call-overload]
spec.name or spec.id,
((v, v) for v in list(spec.options.keys())),
)
_enums_for_spec[spec] = cast("type[StrEnum]", enum)
if isinstance(spec, ConfigurableFieldSingleOption):
return ConfigurableFieldSpec(
id=spec.id,
name=spec.name,
description=spec.description or description,
annotation=enum,
default=spec.default,
is_shared=spec.is_shared,
)
return ConfigurableFieldSpec(
id=spec.id,
name=spec.name,
description=spec.description or description,
annotation=Sequence[enum], # type: ignore[valid-type]
default=spec.default,
is_shared=spec.is_shared,
)
| RunnableConfigurableAlternatives |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/triggers/glue_databrew.py | {
"start": 978,
"end": 2593
} | class ____(AwsBaseWaiterTrigger):
"""
Watches for a Glue DataBrew job, triggers when it finishes.
:param job_name: Glue DataBrew job name
:param run_id: the ID of the specific run to watch for that job
:param waiter_delay: Number of seconds to wait between two checks. Default is 30 seconds.
:param waiter_max_attempts: Maximum number of attempts to wait for the job to complete. Default is 60 attempts.
:param aws_conn_id: The Airflow connection used for AWS credentials.
"""
def __init__(
self,
job_name: str,
run_id: str,
waiter_delay: int = 30,
waiter_max_attempts: int = 60,
aws_conn_id: str | None = "aws_default",
**kwargs,
):
super().__init__(
serialized_fields={"job_name": job_name, "run_id": run_id},
waiter_name="job_complete",
waiter_args={"Name": job_name, "RunId": run_id},
failure_message=f"Error while waiting for job {job_name} with run id {run_id} to complete",
status_message=f"Run id: {run_id}",
status_queries=["State"],
return_value=run_id,
return_key="run_id",
waiter_delay=waiter_delay,
waiter_max_attempts=waiter_max_attempts,
aws_conn_id=aws_conn_id,
**kwargs,
)
def hook(self) -> GlueDataBrewHook:
return GlueDataBrewHook(
aws_conn_id=self.aws_conn_id,
region_name=self.region_name,
verify=self.verify,
config=self.botocore_config,
)
| GlueDataBrewJobCompleteTrigger |
python | walkccc__LeetCode | solutions/57. Insert Interval/57.py | {
"start": 0,
"end": 596
} | class ____:
def insert(self, intervals: list[list[int]],
newInterval: list[int]) -> list[list[int]]:
n = len(intervals)
ans = []
i = 0
while i < n and intervals[i][1] < newInterval[0]:
ans.append(intervals[i])
i += 1
# Merge overlapping intervals.
while i < n and intervals[i][0] <= newInterval[1]:
newInterval[0] = min(newInterval[0], intervals[i][0])
newInterval[1] = max(newInterval[1], intervals[i][1])
i += 1
ans.append(newInterval)
while i < n:
ans.append(intervals[i])
i += 1
return ans
| Solution |
python | django__django | tests/m2m_multiple/models.py | {
"start": 471,
"end": 908
} | class ____(models.Model):
headline = models.CharField(max_length=50)
pub_date = models.DateTimeField()
primary_categories = models.ManyToManyField(
Category, related_name="primary_article_set"
)
secondary_categories = models.ManyToManyField(
Category, related_name="secondary_article_set"
)
class Meta:
ordering = ("pub_date",)
def __str__(self):
return self.headline
| Article |
python | networkx__networkx | networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality.py | {
"start": 3248,
"end": 8088
} | class ____:
def test_K4_normalized(self):
"Approximate current-flow betweenness centrality: K4 normalized"
G = nx.complete_graph(4)
b = nx.current_flow_betweenness_centrality(G, normalized=True)
epsilon = 0.1
ba = approximate_cfbc(G, normalized=True, epsilon=0.5 * epsilon)
for n in sorted(G):
np.testing.assert_allclose(b[n], ba[n], atol=epsilon)
def test_K4(self):
"Approximate current-flow betweenness centrality: K4"
G = nx.complete_graph(4)
b = nx.current_flow_betweenness_centrality(G, normalized=False)
epsilon = 0.1
ba = approximate_cfbc(G, normalized=False, epsilon=0.5 * epsilon)
for n in sorted(G):
np.testing.assert_allclose(b[n], ba[n], atol=epsilon * len(G) ** 2)
def test_star(self):
"Approximate current-flow betweenness centrality: star"
G = nx.Graph()
nx.add_star(G, ["a", "b", "c", "d"])
b = nx.current_flow_betweenness_centrality(G, normalized=True)
epsilon = 0.1
ba = approximate_cfbc(G, normalized=True, epsilon=0.5 * epsilon)
for n in sorted(G):
np.testing.assert_allclose(b[n], ba[n], atol=epsilon)
def test_grid(self):
"Approximate current-flow betweenness centrality: 2d grid"
G = nx.grid_2d_graph(4, 4)
b = nx.current_flow_betweenness_centrality(G, normalized=True)
epsilon = 0.1
ba = approximate_cfbc(G, normalized=True, epsilon=0.5 * epsilon)
for n in sorted(G):
np.testing.assert_allclose(b[n], ba[n], atol=epsilon)
def test_seed(self):
G = nx.complete_graph(4)
b = approximate_cfbc(G, normalized=False, epsilon=0.05, seed=1)
b_answer = {0: 0.75, 1: 0.75, 2: 0.75, 3: 0.75}
for n in sorted(G):
np.testing.assert_allclose(b[n], b_answer[n], atol=0.1)
def test_solvers(self):
"Approximate current-flow betweenness centrality: solvers"
G = nx.complete_graph(4)
epsilon = 0.1
for solver in ["full", "lu", "cg"]:
b = approximate_cfbc(
G, normalized=False, solver=solver, epsilon=0.5 * epsilon
)
b_answer = {0: 0.75, 1: 0.75, 2: 0.75, 3: 0.75}
for n in sorted(G):
np.testing.assert_allclose(b[n], b_answer[n], atol=epsilon)
def test_lower_kmax(self):
G = nx.complete_graph(4)
with pytest.raises(nx.NetworkXError, match="Increase kmax or epsilon"):
nx.approximate_current_flow_betweenness_centrality(G, kmax=4)
def test_sample_weight_positive_effect(self):
G = nx.complete_graph(4)
b1 = approximate_cfbc(G, epsilon=0.1, seed=42)
b2 = approximate_cfbc(G, epsilon=0.1, sample_weight=2.0, seed=42)
assert len(b1) == len(b2) == 4
for node in G.nodes():
assert node in b1 and node in b2
assert isinstance(b1[node], float) and isinstance(b2[node], float)
def test_sample_weight_validation(self):
G = nx.complete_graph(4)
with pytest.raises(
nx.NetworkXError,
match="Sample weight must be positive. Got sample_weight=-1.0",
):
approximate_cfbc(G, sample_weight=-1.0)
with pytest.raises(
nx.NetworkXError,
match="Sample weight must be positive. Got sample_weight=0.0",
):
approximate_cfbc(G, sample_weight=0.0)
result = approximate_cfbc(G, sample_weight=0.1, seed=42)
assert len(result) == 4
def test_epsilon_validation(self):
G = nx.complete_graph(4)
with pytest.raises(
nx.NetworkXError, match="Epsilon must be positive. Got epsilon=-0.1"
):
approximate_cfbc(G, epsilon=-0.1)
with pytest.raises(
nx.NetworkXError, match="Epsilon must be positive. Got epsilon=0.0"
):
approximate_cfbc(G, epsilon=0.0)
def test_normalization_edge_case_small_graph(self):
G = nx.path_graph(2)
result_norm = approximate_cfbc(G, normalized=True, seed=42)
result_unnorm = approximate_cfbc(G, normalized=False, seed=42)
assert len(result_norm) == 2
assert len(result_unnorm) == 2
assert all(v == 0.0 for v in result_norm.values())
assert all(v == 0.0 for v in result_unnorm.values())
G1 = nx.Graph()
G1.add_node(0)
result1 = approximate_cfbc(G1, normalized=True, seed=42)
assert result1 == {0: 0.0}
def test_sample_weight_interaction_with_kmax(self):
G = nx.complete_graph(4)
with pytest.raises(nx.NetworkXError, match="Number random pairs k>kmax"):
approximate_cfbc(G, sample_weight=10.0, epsilon=0.01, kmax=10)
| TestApproximateFlowBetweennessCentrality |
python | doocs__leetcode | solution/1400-1499/1401.Circle and Rectangle Overlapping/Solution.py | {
"start": 0,
"end": 455
} | class ____:
def checkOverlap(
self,
radius: int,
xCenter: int,
yCenter: int,
x1: int,
y1: int,
x2: int,
y2: int,
) -> bool:
def f(i: int, j: int, k: int) -> int:
if i <= k <= j:
return 0
return i - k if k < i else k - j
a = f(x1, x2, xCenter)
b = f(y1, y2, yCenter)
return a * a + b * b <= radius * radius
| Solution |
python | redis__redis-py | redis/exceptions.py | {
"start": 2078,
"end": 2242
} | class ____(RedisError):
"""
Cluster errors occurred multiple times, resulting in an exhaustion of the
command execution TTL
"""
pass
| ClusterError |
python | pytorch__pytorch | test/quantization/fx/test_quantize_fx.py | {
"start": 380569,
"end": 404006
} | class ____(QuantizationTestCase):
@skipIfNoFBGEMM
@unittest.skipIf(not TEST_CUDA, "gpu is not available.")
def test_static_gpu_convert_basic(self):
class Net(nn.Module):
def __init__(self) -> None:
super().__init__()
self.relu1 = nn.ReLU()
self.conv1 = nn.Conv2d(1, 6, 5)
self.linear1 = nn.Linear(120, 1)
def forward(self, x):
x = self.relu1(self.conv1(x))
y = self.linear1(x.view(-1))
return y
input = torch.randn((5, 1, 6, 6)).to('cuda')
example_inputs = (input,)
model = Net().to('cuda').eval()
qconfig_dict = {"": torch.ao.quantization.get_default_qconfig('fbgemm')}
model_prepared = prepare_fx(model, qconfig_dict, example_inputs=example_inputs)
model_prepared(*example_inputs)
model_quantized = convert_to_reference_fx(model_prepared)
out = model_quantized(*example_inputs)
self.assertEqual(out.device.type, 'cuda')
@skipIfNoFBGEMM
@unittest.skipIf(not TEST_CUDA, "gpu is not available.")
def test_switch_device_prepare_convert(self):
class Net(nn.Module):
def __init__(self) -> None:
super().__init__()
self.relu1 = nn.ReLU()
self.conv1 = nn.Conv2d(1, 6, 5)
self.linear1 = nn.Linear(120, 1)
def forward(self, x):
x = self.relu1(self.conv1(x))
y = self.linear1(x.view(-1))
return y
for device in ['cuda', 'cpu']:
device_after = 'cuda' if device == 'cpu' else 'cpu'
input = torch.randn((5, 1, 6, 6)).to(device)
model = Net().to(device).eval()
qconfig_dict = {"": torch.ao.quantization.get_default_qconfig('fbgemm')}
model_prepared = prepare_fx(model, qconfig_dict, example_inputs=(input,))
model_prepared(input)
model_prepared.to(device_after)
model_quantized = convert_to_reference_fx(model_prepared)
out = model_quantized(input.to(device_after))
self.assertEqual(out.device.type, device_after)
@skipIfNoFBGEMM
@unittest.skipIf(not TEST_CUDA, "gpu is not available.")
def test_prepare_serialize_switch_device_convert(self):
class Net(nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = nn.Conv2d(1, 6, 5)
self.linear1 = nn.Linear(120, 1)
def forward(self, x):
x = self.conv1(x)
y = self.linear1(x.view(-1))
return y
for device in ['cuda', 'cpu']:
for device_after in ['cuda', 'cpu']:
input = torch.randn((5, 1, 6, 6)).to(device)
model = Net().to(device).eval()
qconfig_dict = {"": torch.ao.quantization.get_default_qconfig('fbgemm')}
model_prepared_first = prepare_fx(model, qconfig_dict, example_inputs=(input,))
model_prepared_second = prepare_fx(model, qconfig_dict, example_inputs=(input,))
model_prepared_first(input)
state_dict = model_prepared_first.state_dict()
del model_prepared_first
model_prepared_second.load_state_dict(state_dict)
model_prepared_second.to(device_after)
model_quantized = convert_to_reference_fx(model_prepared_second)
out = model_quantized(input.to(device_after))
self.assertEqual(out.device.type, device_after)
@skipIfTorchDynamo("too slow")
@skip_if_no_torchvision
def test_model_dropout(self):
from torchvision import models
m = models.mobilenet_v3_small()
qconfig_mapping = torch.ao.quantization.get_default_qat_qconfig_mapping('fbgemm')
example_inputs = (torch.randn(1, 3, 224, 224),)
mp = prepare_qat_fx(m, qconfig_mapping, example_inputs=example_inputs)
mp(*example_inputs)
with override_quantized_engine("qnnpack") if IS_ARM64 else contextlib.nullcontext():
mq = convert_fx(mp)
mq(*example_inputs)
def _test_model_impl(
self, mode, name, model, eager_quantizable_model,
check_with_eager=True,
diff_of_quant=None,
diff_from_eager=None):
if diff_of_quant is None or diff_from_eager is None:
diff_of_quant = {}
diff_from_eager = {}
if mode not in diff_of_quant or mode not in diff_from_eager:
diff_of_quant[mode] = {}
diff_from_eager[mode] = {}
input_tensor = torch.rand(1, 3, 224, 224)
input_tensor_inception = torch.rand(1, 3, 299, 299)
output_value = torch.randint(0, 1, (1,))
# print('quantizing:', name, ' mode:', mode)
if name == 'inception_v3':
input_value = input_tensor_inception
else:
input_value = input_tensor
qconfig = default_qconfig if mode == 'static' else default_qat_qconfig
qconfig_dict = {'': qconfig}
script = torch.jit.script(model)
# make sure graph module and script module are both runanble
original_out = model(input_value)
is_not_tuple_out = not isinstance(original_out, tuple)
script_out = script(input_value)
# set to train just before quantization
prepare_fx_fn = prepare_fx
if mode != 'static':
model.train()
prepare_fx_fn = prepare_qat_fx
prepared = prepare_fx_fn(model, qconfig_dict)
if mode == 'ddp':
mp.spawn(run_ddp,
args=(world_size, prepared), # noqa: F821
nprocs=world_size, # noqa: F821
join=True)
elif mode == 'qat':
assert prepared.training, 'prepared must be in training mode for qat'
optimizer = torch.optim.SGD(prepared.parameters(), lr=0.0001)
criterion = nn.CrossEntropyLoss()
train_one_epoch(prepared, criterion, optimizer, [(input_value, output_value)], torch.device('cpu'), 1)
else:
for _ in range(10):
prepared(input_value)
# print('after observation root:', prepared.root)
qgraph = convert_fx(prepared)
# print('after quantization root:', qgraph.root)
# print('after quantization code:', qgraph.src)
qgraph.eval()
qgraph_script = torch.jit.script(qgraph)
# print('quantized and scripted:', qgraph_script.graph)
qgraph_out = qgraph(input_value)
qgraph_script = qgraph_script(input_value)
if is_not_tuple_out:
diff_of_quant[mode][name] = (original_out - qgraph_out).abs().max()
assert torch.allclose(qgraph_out, qgraph_script), 'graph, scripted graph'
else:
print('tuple output')
if eager_quantizable_model is not None:
# comparing to eager mode quantization
qeager = eager_quantizable_model
ref_out = qeager(input_value)
qeager.qconfig = qconfig
if mode == 'static':
qeager.fuse_model()
prepare(qeager, inplace=True)
else:
qeager.train()
qeager.fuse_model()
prepare_qat(qeager, inplace=True)
# calibration
if mode == 'ddp':
mp.spawn(run_ddp,
args=(world_size, qeager), # noqa: F821
nprocs=world_size, # noqa: F821
join=True)
elif mode == 'qat':
assert qeager.training, 'qeager should be in training mode for qat'
optimizer = torch.optim.SGD(qeager.parameters(), lr=0.0001)
train_one_epoch(qeager, criterion, optimizer, [(input_value, output_value)], torch.device('cpu'), 1)
else:
for _ in range(10):
qeager(input_value)
# print('ref after observation:', qeager)
convert(qeager, inplace=True)
qeager.eval()
# print('ref after quantization:', qeager)
qeager_out = qeager(input_value)
qeager_script = torch.jit.script(qeager)
qscript_out = qeager_script(input_value)
if is_not_tuple_out:
diff_from_eager[mode][name] = (qeager_out - qgraph_out).abs().max()
if check_with_eager:
self.assertEqual(diff_from_eager[mode][name], 0,
'Result of graph mode quantization and ' +
'eager mode quantization on model: ' + name +
' should match. Mode: ' + mode +
' diff:' + str(diff_from_eager[mode][name]))
def _test_building_block(self, quant_type, BB):
eager = BB().float()
graph = copy.deepcopy(eager)
if quant_type == QuantType.STATIC:
qconfig = default_qconfig
eager_prepare = prepare
graph_prepare = prepare_fx
eager.eval()
graph.eval()
calibrate_or_train = test_only_eval_fn
data = self.img_data_2d
is_qat = False
else:
assert quant_type == QuantType.QAT
qconfig = default_qat_qconfig
eager_prepare = prepare_qat
graph_prepare = prepare_qat_fx
eager.train()
graph.train()
calibrate_or_train = test_only_train_fn
data = self.img_data_2d_train
is_qat = True
if hasattr(eager, "fuse_model"):
eager.fuse_model()
eager = QuantWrapper(eager)
eager.qconfig = qconfig
eager = eager_prepare(eager)
qconfig_dict = {"": qconfig}
graph = graph_prepare(graph, qconfig_dict, example_inputs=(data[0][0],))
eager_out = eager(data[0][0])
graph_out = graph(data[0][0])
# Eager Mode and FX Graph Mode QAT now differ in numerics both
# in Post Training and QAT because FX Graph Mode uses same fake_quant instances
# for input and output of CopyNode
# self.assertEqual(eager_out, graph_out)
calibrate_or_train(eager, data)
calibrate_or_train(graph, data)
eager = convert(eager)
graph = convert_fx(graph)
eager_out = eager(data[0][0])
graph_out = graph(data[0][0])
@override_qengines
def test_resnet_base(self):
models = [ResNetBase]
options = itertools.product(self.static_quant_types, models)
for quant_type, M in options:
self._test_building_block(quant_type, M)
@skip_if_no_torchvision
@skipIfNoFBGEMM
@unittest.skip("skip for now since tbb failed")
def test_torchvision(self):
from torchvision import models
from torchvision.models import quantization as quantized_models
from torchvision.models.quantization.utils import _replace_relu
def get_available_classification_models(models):
return [k for k, v in models.__dict__.items() if callable(v) and k[0].lower() == k[0] and k[0] != "_"]
model_list = get_available_classification_models(models)
quantized_model_list = get_available_classification_models(quantized_models)
quantized_model_list = set(quantized_model_list)
# test eager and graph consistency
model_list = quantized_model_list
# mobilenet/inception_v3/googlenet qat is not working due to AdaptiveAveragePool qat
# we might observe the output of AdaptiveAveragePool in the future
# and re-enable the test
fx_eager_not_matching = [
("mobilenet_v2", "qat"),
("inception_v3", "qat"),
("googlenet", "qat")
] # because relu6 is replaced as relu in mobilenetv2
diff_of_quant = {}
diff_from_eager = {}
modes = ['static', 'qat']
options = itertools.product(modes, model_list)
for mode, name in options:
pretrained = name in quantized_model_list # load pretrained model to compare with quantized model
kwargs = {}
# turn off transform input for inception_v3 since
# it's not quantized in eager mode and in fx graph
# mode we can't skip quantizing a method right now
# (might be supported in the future)
if name in ["inception_v3", "googlenet"]:
kwargs["transform_input"] = False
eager_quantizable_model = None
if name in quantized_model_list:
eager_quantizable_model = quantized_models.__dict__[name](pretrained=False, quantize=False, **kwargs).eval().float()
# compare with eager mode quantized model when it is available
pretrained = eager_quantizable_model is not None
model = models.__dict__[name](pretrained=pretrained, **kwargs).eval().float()
if name == "mobilenet_v2":
_replace_relu(model)
# disable aux logits
if hasattr(model, "aux_logits"):
model.aux_logits = False
model.AuxLogits = None
if eager_quantizable_model:
eager_quantizable_model.aux_logits = False
eager_quantizable_model.AuxLogits = None
check_with_eager = (name, mode) not in fx_eager_not_matching
self._test_model_impl(
mode, name, model, eager_quantizable_model,
check_with_eager,
diff_of_quant, diff_from_eager)
def print_diffs(diffs):
for mode, diffs_for_mode in diffs.items():
print('mode:', mode)
for name, diff in diffs_for_mode.items():
print(name, ':', diff)
# print('differences between float and quantized')
# print_diffs(diff_of_quant)
# print('----------------------')
# print('differences between graph mode and eager mode')
# print_diffs(diff_from_eager)
# print('----------------------')
@skip_if_no_torchvision
@skipIfNoFBGEMM
@unittest.skip("TODO: Test is always failing - https://github.com/pytorch/pytorch/issues/54979")
def test_resnet18_ddp(self):
from torchvision import models
from torchvision.models import quantization as quantized_models
eager_quantizable_model = quantized_models.__dict__[name](pretrained=False, quantize=False).eval().float() # noqa: F821
model = models.__dict__[name](pretrained=False).eval().float() # noqa: F821
self._test_model_impl(
'ddp', 'resnet18', model, eager_quantizable_model)
@override_qengines
def test_qat_embeddingbag_linear(self):
for device in get_supported_device_types():
class EmbeddingBagLinear(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.emb = torch.nn.EmbeddingBag(num_embeddings=10, embedding_dim=12, mode='sum')
self.linear = torch.nn.Linear(12, 1).to(dtype=torch.float)
def forward(self, input: torch.Tensor, offsets: Optional[torch.Tensor] = None,
per_sample_weights: Optional[torch.Tensor] = None):
x = self.emb(input, offsets, per_sample_weights)
x = self.linear(x)
return x
qengine = torch.backends.quantized.engine
qconfig_dict = QConfigMapping() \
.set_global(get_default_qat_qconfig(qengine)) \
.set_object_type(torch.nn.EmbeddingBag, default_embedding_qat_qconfig)
train_indices = [[torch.randint(0, 10, (12, 12), device=device), torch.randn((12, 1), device=device)] for _ in range(2)]
eval_output = [[torch.randint(0, 10, (12, 1), device=device)]]
model = EmbeddingBagLinear().to(device).train()
prepared_fx_model = prepare_qat_fx(model, qconfig_dict, example_inputs=(train_indices[0][0],))
test_only_train_fn(prepared_fx_model, train_indices)
quant_model = convert_fx(prepared_fx_model,
qconfig_mapping=qconfig_dict)
def checkQuantized(model):
# Make sure EmbeddingBag is now a quantized EmbeddingBag.
self.assertTrue(type(model.emb), nn.quantized.EmbeddingBag)
# Also test that Linear has been quantized.
self.assertTrue(type(model.linear), nnq.Linear)
test_only_eval_fn(model, eval_output)
self.checkScriptable(model, eval_output)
self.checkNoQconfig(model)
checkQuantized(quant_model)
@override_qengines
def test_qat_embedding_linear(self):
for device in get_supported_device_types():
class EmbeddingLinear(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.emb = torch.nn.Embedding(num_embeddings=10, embedding_dim=12)
self.linear = torch.nn.Linear(12, 1).to(dtype=torch.float)
def forward(self, input: torch.Tensor):
x = torch.sum(self.emb(input), dim=1)
x = self.linear(x)
return x
qengine = torch.backends.quantized.engine
qconfig_dict = {"": get_default_qat_qconfig(qengine),
"object_type": [(torch.nn.Embedding, default_embedding_qat_qconfig)]}
train_indices = [[torch.randint(0, 10, (12, 12)), torch.randn((12, 1))] for _ in range(2)]
eval_output = [[torch.randint(0, 10, (12, 1))]]
model = EmbeddingLinear().train()
prepared_fx_model = prepare_qat_fx(model, qconfig_dict, example_inputs=(train_indices[0][0],))
test_only_train_fn(prepared_fx_model, train_indices)
quant_model = convert_fx(prepared_fx_model,
qconfig_mapping=qconfig_dict)
def checkQuantized(model):
# Make sure EmbeddingBag is now a quantized EmbeddingBag.
self.assertTrue(type(model.emb), nn.quantized.Embedding)
# Also test that Linear has been quantized.
self.assertTrue(type(model.linear), nnq.Linear)
test_only_eval_fn(model, eval_output)
self.checkScriptable(model, eval_output)
self.checkNoQconfig(model)
checkQuantized(quant_model)
@given(
device=st.sampled_from(
["cpu", "cuda"] if torch.cuda.is_available() else ["cpu"]
)
)
@settings(deadline=None)
@override_qengines
def test_qat_functional_linear(self, device):
if torch.backends.quantized.engine not in ('fbgemm', 'qnnpack'):
return
class Linear(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.w = torch.ones(5, 5)
self.b = torch.zeros(5)
def forward(self, x):
return torch.nn.functional.linear(x, self.w, self.b)
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.mods1 = torch.nn.Sequential(Linear(), Linear())
self.mods2 = Linear()
def forward(self, x):
x = self.mods1(x)
x = self.mods2(x)
return x
model = M().train()
ref_fake_quant = FakeQuantize.with_args(
observer=MovingAverageMinMaxObserver,
quant_min=0,
quant_max=255,
dtype=torch.quint8,
reduce_range=False,
)
ref_weight_fake_quant = FakeQuantize.with_args(
observer=MovingAverageMinMaxObserver,
quant_min=-128,
quant_max=127,
dtype=torch.qint8,
reduce_range=False,
)
ref_qat_qconfig = QConfig(
activation=ref_fake_quant, weight=ref_weight_fake_quant
)
qconfig_dict = {"": ref_qat_qconfig}
example_inputs = (torch.randn(1, 5),)
prepared_ref = prepare_qat_fx(model, qconfig_dict, example_inputs=example_inputs)
custom_fake_quant = FusedMovingAvgObsFakeQuantize.with_args(
observer=MovingAverageMinMaxObserver,
quant_min=0,
quant_max=255,
dtype=torch.quint8,
reduce_range=False,
)
custom_weight_fake_quant = FusedMovingAvgObsFakeQuantize.with_args(
observer=MovingAverageMinMaxObserver,
quant_min=-128,
quant_max=127,
dtype=torch.qint8,
reduce_range=False,
)
custom_qconfig = QConfig(
activation=custom_fake_quant, weight=custom_weight_fake_quant
)
custom_qconfig_dict = {"": custom_qconfig}
prepared = prepare_qat_fx(model, custom_qconfig_dict, example_inputs=example_inputs)
prepared.to(device)
prepared_ref.to(device)
prepared.apply(torch.ao.quantization.disable_fake_quant)
prepared.apply(torch.ao.quantization.disable_observer)
prepared_ref.apply(torch.ao.quantization.disable_fake_quant)
prepared_ref.apply(torch.ao.quantization.disable_observer)
inp = torch.randn(5, 5, device=device, requires_grad=True)
for i in range(10):
if i == 2:
prepared.apply(torch.ao.quantization.enable_observer)
prepared_ref.apply(torch.ao.quantization.enable_observer)
if i == 4:
prepared.apply(torch.ao.quantization.enable_fake_quant)
prepared_ref.apply(torch.ao.quantization.enable_fake_quant)
inp = torch.randn(5, 5, device=device, requires_grad=True)
out_ref = prepared_ref(inp)
out = prepared(inp)
torch.testing.assert_close(out, out_ref)
# try backward pass
labels = torch.randn(5, 5, device=device)
loss = (out - labels).sum()
grad = torch.autograd.grad(loss, [inp])
loss_ref = (out_ref - labels).sum()
grad_ref = torch.autograd.grad(loss_ref, [inp])
torch.testing.assert_close(grad[0], grad_ref[0])
if 'fbgemm' in torch.backends.quantized.supported_engines:
# During the lowering step in convert, fold_weight calls quantized::linear_prepack
# which doesn't support QuantizedCuda backend
prepared.cpu()
prepared_ref.cpu()
converted = convert_fx(prepared)
converted_ref = convert_fx(prepared_ref)
inp = torch.rand(5, 5)
out = converted(inp)
out_ref = converted_ref(inp)
torch.testing.assert_close(out, out_ref)
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_quantization.py TESTNAME\n\n"
"instead.")
| TestQuantizeFxModels |
python | huggingface__transformers | src/transformers/models/nystromformer/modeling_nystromformer.py | {
"start": 1401,
"end": 4073
} | class ____(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings + 2, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer(
"position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)) + 2, persistent=False
)
self.register_buffer(
"token_type_ids",
torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device),
persistent=False,
)
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
# Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
# when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
# issue #5664
if token_type_ids is None:
if hasattr(self, "token_type_ids"):
buffered_token_type_ids = self.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
| NystromformerEmbeddings |
python | pola-rs__polars | py-polars/src/polars/series/series.py | {
"start": 4599,
"end": 273868
} | class ____:
"""
A Series represents a single column in a Polars DataFrame.
Parameters
----------
name : str, default None
Name of the Series. Will be used as a column name when used in a DataFrame.
When not specified, name is set to an empty string.
values : ArrayLike, default None
One-dimensional data in various forms. Supported are: Sequence, Series,
pyarrow Array, and numpy ndarray.
dtype : DataType, default None
Data type of the resulting Series. If set to `None` (default), the data type is
inferred from the `values` input. The strategy for data type inference depends
on the `strict` parameter:
- If `strict` is set to True (default), the inferred data type is equal to the
first non-null value, or `Null` if all values are null.
- If `strict` is set to False, the inferred data type is the supertype of the
values, or :class:`Object` if no supertype can be found. **WARNING**: A full
pass over the values is required to determine the supertype.
- If no values were passed, the resulting data type is :class:`Null`.
strict : bool, default True
Throw an error if any value does not exactly match the given or inferred data
type. If set to `False`, values that do not match the data type are cast to
that data type or, if casting is not possible, set to null instead.
nan_to_null : bool, default False
In case a numpy array is used to create this Series, indicate how to deal
with np.nan values. (This parameter is a no-op on non-numpy data).
Examples
--------
Constructing a Series by specifying name and values positionally:
>>> s = pl.Series("a", [1, 2, 3])
>>> s
shape: (3,)
Series: 'a' [i64]
[
1
2
3
]
Notice that the dtype is automatically inferred as a polars Int64:
>>> s.dtype
Int64
Constructing a Series with a specific dtype:
>>> s2 = pl.Series("a", [1, 2, 3], dtype=pl.Float32)
>>> s2
shape: (3,)
Series: 'a' [f32]
[
1.0
2.0
3.0
]
It is possible to construct a Series with values as the first positional argument.
This syntax considered an anti-pattern, but it can be useful in certain
scenarios. You must specify any other arguments through keywords.
>>> s3 = pl.Series([1, 2, 3])
>>> s3
shape: (3,)
Series: '' [i64]
[
1
2
3
]
"""
# NOTE: This `= None` is needed to generate the docs with sphinx_accessor.
_s: PySeries = None # type: ignore[assignment]
_accessors: ClassVar[set[str]] = {
"arr",
"bin",
"cat",
"dt",
"ext",
"list",
"plot",
"str",
"struct",
}
def __init__(
self,
name: str | ArrayLike | None = None,
values: ArrayLike | None = None,
dtype: PolarsDataType | None = None,
*,
strict: bool = True,
nan_to_null: bool = False,
) -> None:
# If 'Unknown' treat as None to trigger type inference
if dtype == Unknown:
dtype = None
elif dtype is not None and not is_polars_dtype(dtype):
dtype = parse_into_dtype(dtype)
# Handle case where values are passed as the first argument
original_name: str | None = None
if name is None:
name = ""
elif isinstance(name, str):
original_name = name
else:
if values is None:
values = name
name = ""
else:
msg = "Series name must be a string"
raise TypeError(msg)
if isinstance(values, Sequence):
self._s = sequence_to_pyseries(
name,
values,
dtype=dtype,
strict=strict,
nan_to_null=nan_to_null,
)
elif values is None:
self._s = sequence_to_pyseries(name, [], dtype=dtype)
elif _check_for_numpy(values) and isinstance(values, np.ndarray):
self._s = numpy_to_pyseries(
name, values, strict=strict, nan_to_null=nan_to_null
)
if values.dtype.type in [np.datetime64, np.timedelta64]:
# cast to appropriate dtype, handling NaT values
input_dtype = _resolve_temporal_dtype(None, values.dtype)
dtype = _resolve_temporal_dtype(dtype, values.dtype)
if dtype is not None:
self._s = (
# `values.dtype` has already been validated in
# `numpy_to_pyseries`, so `input_dtype` can't be `None`
self.cast(input_dtype, strict=False) # type: ignore[arg-type]
.cast(dtype)
.scatter(np.argwhere(np.isnat(values)).flatten(), None)
._s
)
return
if dtype is not None:
self._s = self.cast(dtype, strict=strict)._s
elif _check_for_torch(values) and isinstance(values, torch.Tensor):
self._s = numpy_to_pyseries(
name, values.numpy(force=False), strict=strict, nan_to_null=nan_to_null
)
if dtype is not None:
self._s = self.cast(dtype, strict=strict)._s
elif _check_for_pyarrow(values) and isinstance(
values, (pa.Array, pa.ChunkedArray)
):
self._s = arrow_to_pyseries(name, values, dtype=dtype, strict=strict)
elif _check_for_pandas(values) and isinstance(
values, (pd.Series, pd.Index, pd.DatetimeIndex)
):
self._s = pandas_to_pyseries(name, values, dtype=dtype, strict=strict)
elif not hasattr(values, "__arrow_c_stream__") and _is_generator(values):
self._s = iterable_to_pyseries(name, values, dtype=dtype, strict=strict)
elif isinstance(values, Series):
self._s = series_to_pyseries(
original_name, values, dtype=dtype, strict=strict
)
elif isinstance(values, pl.DataFrame):
self._s = dataframe_to_pyseries(
original_name, values, dtype=dtype, strict=strict
)
elif hasattr(values, "__arrow_c_array__"):
self._s = PySeries.from_arrow_c_array(values)
elif hasattr(values, "__arrow_c_stream__"):
self._s = PySeries.from_arrow_c_stream(values)
else:
msg = (
f"Series constructor called with unsupported type {type(values).__name__!r}"
" for the `values` parameter"
)
raise TypeError(msg)
@classmethod
def _from_pyseries(cls, pyseries: PySeries) -> Self:
series = cls.__new__(cls)
series._s = pyseries
return series
@classmethod
@deprecated(
"`_import_from_c` is deprecated; use `_import_arrow_from_c` instead. If "
"you are using an extension, please compile it with the latest 'pyo3-polars'"
)
def _import_from_c(cls, name: str, pointers: list[tuple[int, int]]) -> Self:
# `_import_from_c` was deprecated in 1.3
return cls._from_pyseries(PySeries._import_arrow_from_c(name, pointers))
@classmethod
def _import_arrow_from_c(cls, name: str, pointers: list[tuple[int, int]]) -> Self:
"""
Construct a Series from Arrows C interface.
Parameters
----------
name
The name that should be given to the `Series`.
pointers
A list with tuples containing two entries:
- The raw pointer to a C ArrowArray struct
- The raw pointer to a C ArrowSchema struct
Warnings
--------
This will read the `array` pointer without moving it. The host process should
garbage collect the heap pointer, but not its contents.
"""
return cls._from_pyseries(PySeries._import_arrow_from_c(name, pointers))
@classmethod
def _import(cls, pointer: int) -> Self:
return cls._from_pyseries(PySeries._import(pointer))
def _export_arrow_to_c(self, out_ptr: int, out_schema_ptr: int) -> None:
"""
Export to a C ArrowArray and C ArrowSchema struct, given their pointers.
Parameters
----------
out_ptr: int
The raw pointer to a C ArrowArray struct.
out_schema_ptr: int (optional)
The raw pointer to a C ArrowSchema struct.
Notes
-----
The series should only contain a single chunk. If you want to export all chunks,
first call `Series.get_chunks` to give you a list of chunks.
Warnings
--------
* Safety: This function will write to the pointers given in `out_ptr`
and `out_schema_ptr` and thus is highly unsafe.
* Leaking: If you don't pass the ArrowArray struct to a consumer,
array memory will leak. This is a low-level function intended for
expert users.
"""
self._s._export_arrow_to_c(out_ptr, out_schema_ptr)
def _get_buffer_info(self) -> BufferInfo:
"""
Return pointer, offset, and length information about the underlying buffer.
Returns
-------
tuple of ints
Tuple of the form (pointer, offset, length)
Raises
------
TypeError
If the `Series` data type is not physical.
ComputeError
If the `Series` contains multiple chunks.
Notes
-----
This method is mainly intended for use with the dataframe interchange protocol.
"""
return self._s._get_buffer_info()
def _get_buffers(self) -> SeriesBuffers:
"""
Return the underlying values, validity, and offsets buffers as Series.
The values buffer always exists.
The validity buffer may not exist if the column contains no null values.
The offsets buffer only exists for Series of data type `String` and `List`.
Returns
-------
dict
Dictionary with `"values"`, `"validity"`, and `"offsets"` keys mapping
to the corresponding buffer or `None` if the buffer doesn't exist.
Warnings
--------
The underlying buffers for `String` Series cannot be represented in this
format. Instead, the buffers are converted to a values and offsets buffer.
Notes
-----
This method is mainly intended for use with the dataframe interchange protocol.
"""
buffers = self._s._get_buffers()
keys = ("values", "validity", "offsets")
return { # type: ignore[return-value]
k: self._from_pyseries(b) if b is not None else b
for k, b in zip(keys, buffers)
}
@classmethod
def _from_buffer(
cls, dtype: PolarsDataType, buffer_info: BufferInfo, owner: Any
) -> Self:
"""
Construct a Series from information about its underlying buffer.
Parameters
----------
dtype
The data type of the buffer.
Must be a physical type (integer, float, or boolean).
buffer_info
Tuple containing buffer information in the form `(pointer, offset, length)`.
owner
The object owning the buffer.
Returns
-------
Series
Raises
------
TypeError
When the given `dtype` is not supported.
Notes
-----
This method is mainly intended for use with the dataframe interchange protocol.
"""
return cls._from_pyseries(PySeries._from_buffer(dtype, buffer_info, owner))
@classmethod
def _from_buffers(
cls,
dtype: PolarsDataType,
data: Series | Sequence[Series],
validity: Series | None = None,
) -> Self:
"""
Construct a Series from information about its underlying buffers.
Parameters
----------
dtype
The data type of the resulting Series.
data
Buffers describing the data. For most data types, this is a single Series of
the physical data type of `dtype`. Some data types require multiple buffers:
- `String`: A data buffer of type `UInt8` and an offsets buffer
of type `Int64`. Note that this does not match how the data
is represented internally and data copy is required to construct
the Series.
validity
Validity buffer. If specified, must be a Series of data type `Boolean`.
Returns
-------
Series
Raises
------
TypeError
When the given `dtype` is not supported or the other inputs do not match
the requirements for constructing a Series of the given `dtype`.
Warnings
--------
Constructing a `String` Series requires specifying a values and offsets buffer,
which does not match the actual underlying buffers. The values and offsets
buffer are converted into the actual buffers, which copies data.
Notes
-----
This method is mainly intended for use with the dataframe interchange protocol.
"""
if isinstance(data, Series):
data_lst = [data._s]
else:
data_lst = [s._s for s in data]
validity_series: plr.PySeries | None = None
if validity is not None:
validity_series = validity._s
return cls._from_pyseries(
PySeries._from_buffers(dtype, data_lst, validity_series)
)
@staticmethod
def _newest_compat_level() -> int:
"""
Get the newest supported compat level.
This is for pyo3-polars.
"""
return CompatLevel._newest()._version
@property
def dtype(self) -> DataType:
"""
Get the data type of this Series.
Examples
--------
>>> s = pl.Series("a", [1, 2, 3])
>>> s.dtype
Int64
"""
return self._s.dtype()
@property
def flags(self) -> dict[str, bool]:
"""
Get flags that are set on the Series.
Examples
--------
>>> s = pl.Series("a", [1, 2, 3])
>>> s.flags
{'SORTED_ASC': False, 'SORTED_DESC': False}
"""
out = {
"SORTED_ASC": self._s.is_sorted_ascending_flag(),
"SORTED_DESC": self._s.is_sorted_descending_flag(),
}
if self.dtype == List:
out["FAST_EXPLODE"] = self._s.can_fast_explode_flag()
return out
@property
def name(self) -> str:
"""
Get the name of this Series.
Examples
--------
>>> s = pl.Series("a", [1, 2, 3])
>>> s.name
'a'
"""
return self._s.name()
@property
def shape(self) -> tuple[int]:
"""
Shape of this Series.
Examples
--------
>>> s = pl.Series("a", [1, 2, 3])
>>> s.shape
(3,)
"""
return (self._s.len(),)
def __bool__(self) -> NoReturn:
msg = (
"the truth value of a Series is ambiguous"
"\n\n"
"Here are some things you might want to try:\n"
"- instead of `if s`, use `if not s.is_empty()`\n"
"- instead of `s1 and s2`, use `s1 & s2`\n"
"- instead of `s1 or s2`, use `s1 | s2`\n"
"- instead of `s in [y, z]`, use `s.is_in([y, z])`\n"
)
raise TypeError(msg)
def __getstate__(self) -> bytes:
return self._s.__getstate__()
def __setstate__(self, state: bytes) -> None:
self._s = Series()._s # Initialize with a dummy
self._s.__setstate__(state)
def __str__(self) -> str:
s_repr: str = self._s.as_str()
return s_repr.replace("Series", f"{self.__class__.__name__}", 1)
def __repr__(self) -> str:
return self.__str__()
def __len__(self) -> int:
return self.len()
@overload
def __and__(self, other: Expr) -> Expr: ...
@overload
def __and__(self, other: Any) -> Series: ...
def __and__(self, other: Any) -> Expr | Series:
if isinstance(other, pl.Expr):
return F.lit(self) & other
if not isinstance(other, Series):
other = Series([other])
return self._from_pyseries(self._s.bitand(other._s))
@overload
def __rand__(self, other: Expr) -> Expr: ...
@overload
def __rand__(self, other: Any) -> Series: ...
def __rand__(self, other: Any) -> Expr | Series:
if isinstance(other, pl.Expr):
return other & F.lit(self)
if not isinstance(other, Series):
other = Series([other])
return other & self
@overload
def __or__(self, other: Expr) -> Expr: ...
@overload
def __or__(self, other: Any) -> Series: ...
def __or__(self, other: Any) -> Expr | Series:
if isinstance(other, pl.Expr):
return F.lit(self) | other
if not isinstance(other, Series):
other = Series([other])
return self._from_pyseries(self._s.bitor(other._s))
@overload
def __ror__(self, other: Expr) -> Expr: ...
@overload
def __ror__(self, other: Any) -> Series: ...
def __ror__(self, other: Any) -> Expr | Series:
if isinstance(other, pl.Expr):
return other | F.lit(self)
if not isinstance(other, Series):
other = Series([other])
return other | self
@overload
def __xor__(self, other: Expr) -> Expr: ...
@overload
def __xor__(self, other: Any) -> Series: ...
def __xor__(self, other: Any) -> Expr | Series:
if isinstance(other, pl.Expr):
return F.lit(self) ^ other
if not isinstance(other, Series):
other = Series([other])
return self._from_pyseries(self._s.bitxor(other._s))
@overload
def __rxor__(self, other: Expr) -> Expr: ...
@overload
def __rxor__(self, other: Any) -> Series: ...
def __rxor__(self, other: Any) -> Expr | Series:
if isinstance(other, pl.Expr):
return other ^ F.lit(self)
if not isinstance(other, Series):
other = Series([other])
return other ^ self
def _comp(self, other: Any, op: ComparisonOperator) -> Series:
# special edge-case; boolean broadcast series (eq/neq) is its own result
if self.dtype == Boolean and isinstance(other, bool) and op in ("eq", "neq"):
if (other is True and op == "eq") or (other is False and op == "neq"):
return self.clone()
elif (other is False and op == "eq") or (other is True and op == "neq"):
return ~self
elif isinstance(other, float) and self.dtype.is_integer():
# require upcast when comparing int series to float value
self = self.cast(Float64)
f = get_ffi_func(op + "_<>", Float64, self._s)
assert f is not None
return self._from_pyseries(f(other))
elif isinstance(other, datetime):
if self.dtype == Date:
# require upcast when comparing date series to datetime
self = self.cast(Datetime("us"))
time_unit = "us"
elif self.dtype == Datetime:
# Use local time zone info
time_zone = self.dtype.time_zone # type: ignore[attr-defined]
if str(other.tzinfo) != str(time_zone):
msg = f"datetime time zone {other.tzinfo!r} does not match Series timezone {time_zone!r}"
raise TypeError(msg)
time_unit = self.dtype.time_unit # type: ignore[attr-defined]
else:
msg = f"cannot compare datetime.datetime to Series of type {self.dtype}"
raise ValueError(msg)
ts = datetime_to_int(other, time_unit) # type: ignore[arg-type]
f = get_ffi_func(op + "_<>", Int64, self._s)
assert f is not None
return self._from_pyseries(f(ts))
elif isinstance(other, time) and self.dtype == Time:
d = time_to_int(other)
f = get_ffi_func(op + "_<>", Int64, self._s)
assert f is not None
return self._from_pyseries(f(d))
elif isinstance(other, timedelta) and self.dtype == Duration:
time_unit = self.dtype.time_unit # type: ignore[attr-defined]
td = timedelta_to_int(other, time_unit) # type: ignore[arg-type]
f = get_ffi_func(op + "_<>", Int64, self._s)
assert f is not None
return self._from_pyseries(f(td))
elif self.dtype in [Categorical, Enum] and not isinstance(other, Series):
other = Series([other])
elif isinstance(other, date) and self.dtype == Date:
d = date_to_int(other)
f = get_ffi_func(op + "_<>", Int32, self._s)
assert f is not None
return self._from_pyseries(f(d))
if isinstance(other, Sequence) and not isinstance(other, str):
if self.dtype in (List, Array):
other = [other]
other = Series("", other)
if other.dtype == Null:
other.cast(self.dtype)
if isinstance(other, Series):
return self._from_pyseries(getattr(self._s, op)(other._s))
try:
f = get_ffi_func(op + "_<>", self.dtype, self._s)
except NotImplementedError:
f = None
if f is None:
msg = f"Series of type {self.dtype} does not have {op} operator"
raise NotImplementedError(msg)
if other is not None:
other = maybe_cast(other, self.dtype)
return self._from_pyseries(f(other))
@overload # type: ignore[override]
def __eq__(self, other: Expr) -> Expr: ... # type: ignore[overload-overlap]
@overload
def __eq__(self, other: object) -> Series: ...
def __eq__(self, other: object) -> Series | Expr:
warn_null_comparison(other)
if isinstance(other, pl.Expr):
return F.lit(self).__eq__(other)
return self._comp(other, "eq")
@overload # type: ignore[override]
def __ne__(self, other: Expr) -> Expr: ... # type: ignore[overload-overlap]
@overload
def __ne__(self, other: object) -> Series: ...
def __ne__(self, other: object) -> Series | Expr:
warn_null_comparison(other)
if isinstance(other, pl.Expr):
return F.lit(self).__ne__(other)
return self._comp(other, "neq")
@overload
def __gt__(self, other: Expr) -> Expr: ...
@overload
def __gt__(self, other: Any) -> Series: ...
def __gt__(self, other: Any) -> Series | Expr:
warn_null_comparison(other)
if isinstance(other, pl.Expr):
return F.lit(self).__gt__(other)
return self._comp(other, "gt")
@overload
def __lt__(self, other: Expr) -> Expr: ...
@overload
def __lt__(self, other: Any) -> Series: ...
def __lt__(self, other: Any) -> Series | Expr:
warn_null_comparison(other)
if isinstance(other, pl.Expr):
return F.lit(self).__lt__(other)
return self._comp(other, "lt")
@overload
def __ge__(self, other: Expr) -> Expr: ...
@overload
def __ge__(self, other: Any) -> Series: ...
def __ge__(self, other: Any) -> Series | Expr:
warn_null_comparison(other)
if isinstance(other, pl.Expr):
return F.lit(self).__ge__(other)
return self._comp(other, "gt_eq")
@overload
def __le__(self, other: Expr) -> Expr: ...
@overload
def __le__(self, other: Any) -> Series: ...
def __le__(self, other: Any) -> Series | Expr:
warn_null_comparison(other)
if isinstance(other, pl.Expr):
return F.lit(self).__le__(other)
return self._comp(other, "lt_eq")
@overload
def le(self, other: Expr) -> Expr: ...
@overload
def le(self, other: Any) -> Series: ...
def le(self, other: Any) -> Series | Expr:
"""Method equivalent of operator expression `series <= other`."""
return self.__le__(other)
@overload
def lt(self, other: Expr) -> Expr: ...
@overload
def lt(self, other: Any) -> Series: ...
def lt(self, other: Any) -> Series | Expr:
"""Method equivalent of operator expression `series < other`."""
return self.__lt__(other)
@overload
def eq(self, other: Expr) -> Expr: ...
@overload
def eq(self, other: Any) -> Series: ...
def eq(self, other: Any) -> Series | Expr:
"""Method equivalent of operator expression `series == other`."""
return self.__eq__(other)
@overload
def eq_missing(self, other: Expr) -> Expr: ...
@overload
def eq_missing(self, other: Any) -> Series: ...
def eq_missing(self, other: Any) -> Series | Expr:
"""
Method equivalent of equality operator `series == other` where `None == None`.
This differs from the standard `eq` where null values are propagated.
Parameters
----------
other
A literal or expression value to compare with.
See Also
--------
ne_missing
eq
Examples
--------
>>> s1 = pl.Series("a", [333, 200, None])
>>> s2 = pl.Series("a", [100, 200, None])
>>> s1.eq(s2)
shape: (3,)
Series: 'a' [bool]
[
false
true
null
]
>>> s1.eq_missing(s2)
shape: (3,)
Series: 'a' [bool]
[
false
true
true
]
"""
if isinstance(other, pl.Expr):
return F.lit(self).eq_missing(other)
return self.to_frame().select(F.col(self.name).eq_missing(other)).to_series()
@overload
def ne(self, other: Expr) -> Expr: ...
@overload
def ne(self, other: Any) -> Series: ...
def ne(self, other: Any) -> Series | Expr:
"""Method equivalent of operator expression `series != other`."""
return self.__ne__(other)
@overload
def ne_missing(self, other: Expr) -> Expr: ...
@overload
def ne_missing(self, other: Any) -> Series: ...
def ne_missing(self, other: Any) -> Series | Expr:
"""
Method equivalent of equality operator `series != other` where `None == None`.
This differs from the standard `ne` where null values are propagated.
Parameters
----------
other
A literal or expression value to compare with.
See Also
--------
eq_missing
ne
Examples
--------
>>> s1 = pl.Series("a", [333, 200, None])
>>> s2 = pl.Series("a", [100, 200, None])
>>> s1.ne(s2)
shape: (3,)
Series: 'a' [bool]
[
true
false
null
]
>>> s1.ne_missing(s2)
shape: (3,)
Series: 'a' [bool]
[
true
false
false
]
"""
if isinstance(other, pl.Expr):
return F.lit(self).ne_missing(other)
return self.to_frame().select(F.col(self.name).ne_missing(other)).to_series()
@overload
def ge(self, other: Expr) -> Expr: ...
@overload
def ge(self, other: Any) -> Series: ...
def ge(self, other: Any) -> Series | Expr:
"""Method equivalent of operator expression `series >= other`."""
return self.__ge__(other)
@overload
def gt(self, other: Expr) -> Expr: ...
@overload
def gt(self, other: Any) -> Series: ...
def gt(self, other: Any) -> Series | Expr:
"""Method equivalent of operator expression `series > other`."""
return self.__gt__(other)
def _arithmetic(self, other: Any, op_s: str, op_ffi: str) -> Self:
if isinstance(other, pl.Expr):
# expand pl.lit, pl.datetime, pl.duration Exprs to compatible Series
other = self.to_frame().select_seq(other).to_series()
elif other is None:
other = pl.Series("", [None])
if isinstance(other, Series):
return self._from_pyseries(getattr(self._s, op_s)(other._s))
elif _check_for_numpy(other) and isinstance(other, np.ndarray):
return self._from_pyseries(getattr(self._s, op_s)(Series(other)._s))
elif (
isinstance(other, (float, date, datetime, timedelta, str))
and not self.dtype.is_float()
):
_s = sequence_to_pyseries(self.name, [other])
if "rhs" in op_ffi:
return self._from_pyseries(getattr(_s, op_s)(self._s))
else:
return self._from_pyseries(getattr(self._s, op_s)(_s))
if self.dtype.is_decimal() and isinstance(other, (PyDecimal, int)):
if isinstance(other, int):
pyseries = sequence_to_pyseries(self.name, [other])
_s = self._from_pyseries(pyseries).cast(Decimal(scale=0))._s
else:
_s = sequence_to_pyseries(self.name, [other], dtype=Decimal)
if "rhs" in op_ffi:
return self._from_pyseries(getattr(_s, op_s)(self._s))
else:
return self._from_pyseries(getattr(self._s, op_s)(_s))
else:
other = maybe_cast(other, self.dtype)
f = get_ffi_func(op_ffi, self.dtype, self._s)
if f is None:
msg = (
f"cannot do arithmetic with Series of dtype: {self.dtype!r} and argument"
f" of type: {type(other).__name__!r}"
)
raise TypeError(msg)
return self._from_pyseries(f(other))
@overload
def __add__(self, other: DataFrame) -> DataFrame: ...
@overload
def __add__(self, other: Expr) -> Expr: ...
@overload
def __add__(self, other: Any) -> Self: ...
def __add__(self, other: Any) -> Series | DataFrame | Expr:
if isinstance(other, str):
other = Series("", [other])
elif isinstance(other, pl.DataFrame):
return other + self
elif isinstance(other, pl.Expr):
return F.lit(self) + other
if self.dtype.is_decimal() and isinstance(other, (float, int)):
return self.to_frame().select(F.col(self.name) + other).to_series()
return self._arithmetic(other, "add", "add_<>")
@overload
def __sub__(self, other: Expr) -> Expr: ...
@overload
def __sub__(self, other: Any) -> Self: ...
def __sub__(self, other: Any) -> Series | Expr:
if isinstance(other, pl.Expr):
return F.lit(self) - other
if self.dtype.is_decimal() and isinstance(other, (float, int)):
return self.to_frame().select(F.col(self.name) - other).to_series()
return self._arithmetic(other, "sub", "sub_<>")
def _recursive_cast_to_dtype(self, leaf_dtype: PolarsDataType) -> Series:
"""
Convert leaf dtype the to given primitive datatype.
This is equivalent to logic in DataType::cast_leaf() in Rust.
"""
def convert_to_primitive(dtype: PolarsDataType) -> PolarsDataType:
if isinstance(dtype, Array):
return Array(convert_to_primitive(dtype.inner), shape=dtype.shape)
if isinstance(dtype, List):
return List(convert_to_primitive(dtype.inner))
return leaf_dtype
return self.cast(convert_to_primitive(self.dtype))
@overload
def __truediv__(self, other: Expr) -> Expr: ...
@overload
def __truediv__(self, other: Any) -> Series: ...
def __truediv__(self, other: Any) -> Series | Expr:
if isinstance(other, pl.Expr):
return F.lit(self) / other
if self.dtype.is_temporal() and not isinstance(self.dtype, Duration):
msg = "first cast to integer before dividing datelike dtypes"
raise TypeError(msg)
if isinstance(other, (int, float)) and (
self.dtype.is_decimal() or isinstance(self.dtype, Duration)
):
return self.to_frame().select(F.col(self.name) / other).to_series()
self = (
self
if (
self.dtype.is_float()
or self.dtype.is_decimal()
or isinstance(self.dtype, (List, Array, Duration))
or (
isinstance(other, Series) and isinstance(other.dtype, (List, Array))
)
)
else self._recursive_cast_to_dtype(Float64())
)
return self._arithmetic(other, "div", "div_<>")
@overload
def __floordiv__(self, other: Expr) -> Expr: ...
@overload
def __floordiv__(self, other: Any) -> Series: ...
def __floordiv__(self, other: Any) -> Series | Expr:
if isinstance(other, pl.Expr):
return F.lit(self) // other
if self.dtype.is_temporal():
msg = "first cast to integer before dividing datelike dtypes"
raise TypeError(msg)
if self.dtype.is_decimal() and isinstance(other, (float, int)):
return self.to_frame().select(F.col(self.name) // other).to_series()
if not isinstance(other, pl.Expr):
other = F.lit(other)
return self.to_frame().select_seq(F.col(self.name) // other).to_series()
def __invert__(self) -> Series:
return self.not_()
@overload
def __mul__(self, other: Expr) -> Expr: ...
@overload
def __mul__(self, other: DataFrame) -> DataFrame: ...
@overload
def __mul__(self, other: Any) -> Series: ...
def __mul__(self, other: Any) -> Series | DataFrame | Expr:
if isinstance(other, pl.Expr):
return F.lit(self) * other
if self.dtype.is_temporal() and not isinstance(self.dtype, Duration):
msg = "first cast to integer before multiplying datelike dtypes"
raise TypeError(msg)
if isinstance(other, (int, float)) and (
self.dtype.is_decimal() or isinstance(self.dtype, Duration)
):
return self.to_frame().select(F.col(self.name) * other).to_series()
elif isinstance(other, pl.DataFrame):
return other * self
else:
return self._arithmetic(other, "mul", "mul_<>")
@overload
def __mod__(self, other: Expr) -> Expr: ...
@overload
def __mod__(self, other: Any) -> Series: ...
def __mod__(self, other: Any) -> Series | Expr:
if isinstance(other, pl.Expr):
return F.lit(self).__mod__(other)
if self.dtype.is_temporal():
msg = "first cast to integer before applying modulo on datelike dtypes"
raise TypeError(msg)
if self.dtype.is_decimal() and isinstance(other, (float, int)):
return self.to_frame().select(F.col(self.name) % other).to_series()
return self._arithmetic(other, "rem", "rem_<>")
def __rmod__(self, other: Any) -> Series:
if self.dtype.is_temporal():
msg = "first cast to integer before applying modulo on datelike dtypes"
raise TypeError(msg)
return self._arithmetic(other, "rem", "rem_<>_rhs")
def __radd__(self, other: Any) -> Series:
if isinstance(other, str) or (
isinstance(other, (int, float)) and self.dtype.is_decimal()
):
return self.to_frame().select(other + F.col(self.name)).to_series()
return self._arithmetic(other, "add", "add_<>_rhs")
def __rsub__(self, other: Any) -> Series:
if isinstance(other, (int, float)) and self.dtype.is_decimal():
return self.to_frame().select(other - F.col(self.name)).to_series()
return self._arithmetic(other, "sub", "sub_<>_rhs")
def __rtruediv__(self, other: Any) -> Series:
if self.dtype.is_temporal():
msg = "first cast to integer before dividing datelike dtypes"
raise TypeError(msg)
if self.dtype.is_float():
self.__rfloordiv__(other)
if isinstance(other, (int, float)) and self.dtype.is_decimal():
return self.to_frame().select(other / F.col(self.name)).to_series()
if isinstance(other, int):
other = float(other)
return self.cast(Float64).__rfloordiv__(other)
def __rfloordiv__(self, other: Any) -> Series:
if self.dtype.is_temporal():
msg = "first cast to integer before dividing datelike dtypes"
raise TypeError(msg)
return self._arithmetic(other, "div", "div_<>_rhs")
def __rmul__(self, other: Any) -> Series:
if self.dtype.is_temporal() and not isinstance(self.dtype, Duration):
msg = "first cast to integer before multiplying datelike dtypes"
raise TypeError(msg)
if isinstance(other, (int, float)) and (
self.dtype.is_decimal() or isinstance(self.dtype, Duration)
):
return self.to_frame().select(other * F.col(self.name)).to_series()
return self._arithmetic(other, "mul", "mul_<>")
def __pow__(self, exponent: int | float | Series) -> Series:
return self.pow(exponent)
def __rpow__(self, other: Any) -> Series:
return (
self.to_frame()
.select_seq((other ** F.col(self.name)).alias(self.name))
.to_series()
)
def __matmul__(self, other: Any) -> float | Series | None:
if isinstance(other, Sequence) or (
_check_for_numpy(other) and isinstance(other, np.ndarray)
):
other = Series(other)
# elif isinstance(other, pl.DataFrame):
# return other.__rmatmul__(self) # type: ignore[return-value]
return self.dot(other)
def __rmatmul__(self, other: Any) -> float | Series | None:
if isinstance(other, Sequence) or (
_check_for_numpy(other) and isinstance(other, np.ndarray)
):
other = Series(other)
return other.dot(self)
def __neg__(self) -> Series:
return self.to_frame().select_seq(-F.col(self.name)).to_series()
def __pos__(self) -> Series:
return self
def __abs__(self) -> Series:
return self.abs()
def __copy__(self) -> Self:
return self.clone()
def __deepcopy__(self, memo: None = None) -> Self:
return self.clone()
def __contains__(self, item: Any) -> bool:
if item is None:
return self.has_nulls()
return self.implode().list.contains(item).item()
def __iter__(self) -> Generator[Any]:
if self.dtype in (List, Array):
# TODO: either make a change and return py-native list data here, or find
# a faster way to return nested/List series; sequential 'get_index' calls
# make this path a lot slower (~10x) than it needs to be.
get_index = self._s.get_index
for idx in range(self.len()):
yield get_index(idx)
else:
buffer_size = 25_000
for offset in range(0, self.len(), buffer_size):
yield from self.slice(offset, buffer_size).to_list()
@overload
def __getitem__(self, key: SingleIndexSelector) -> Any: ...
@overload
def __getitem__(self, key: MultiIndexSelector) -> Series: ...
def __getitem__(
self, key: SingleIndexSelector | MultiIndexSelector
) -> Any | Series:
"""
Get part of the Series as a new Series or scalar.
Parameters
----------
key
Row(s) to select.
Returns
-------
Series or scalar, depending on `key`.
Examples
--------
>>> s = pl.Series("a", [1, 4, 2])
>>> s[0]
1
>>> s[0:2]
shape: (2,)
Series: 'a' [i64]
[
1
4
]
"""
return get_series_item_by_key(self, key)
def __setitem__(
self,
key: int | Series | np.ndarray[Any, Any] | Sequence[object] | tuple[object],
value: Any,
) -> None:
# do the single idx as first branch as those are likely in a tight loop
if isinstance(key, int) and not isinstance(key, bool):
self.scatter(key, value)
return None
elif isinstance(value, Sequence) and not isinstance(value, str):
if self.dtype.is_numeric() or self.dtype.is_temporal():
self.scatter(key, value) # type: ignore[arg-type]
return None
msg = (
f"cannot set Series of dtype: {self.dtype!r} with list/tuple as value;"
" use a scalar value"
)
raise TypeError(msg)
if isinstance(key, Series):
if key.dtype == Boolean:
self._s = self.set(key, value)._s
elif key.dtype == UInt64:
self._s = self.scatter(key.cast(UInt32), value)._s
elif key.dtype == UInt32:
self._s = self.scatter(key, value)._s
# TODO: implement for these types without casting to series
elif _check_for_numpy(key) and isinstance(key, np.ndarray):
if key.dtype == np.bool_:
# boolean numpy mask
self._s = self.scatter(np.argwhere(key)[:, 0], value)._s
else:
s = self._from_pyseries(
PySeries.new_u32("", np.array(key, np.uint32), _strict=True)
)
self.__setitem__(s, value)
elif isinstance(key, (list, tuple)):
s = self._from_pyseries(sequence_to_pyseries("", key, dtype=UInt32))
self.__setitem__(s, value)
else:
msg = f'cannot use "{key!r}" for indexing'
raise TypeError(msg)
def __array__(
self,
dtype: npt.DTypeLike | None = None,
copy: bool | None = None, # noqa: FBT001
) -> np.ndarray[Any, Any]:
"""
Return a NumPy ndarray with the given data type.
This method ensures a Polars Series can be treated as a NumPy ndarray.
It enables `np.asarray` and NumPy universal functions.
See the NumPy documentation for more information:
https://numpy.org/doc/stable/user/basics.interoperability.html#the-array-method
See Also
--------
__array_ufunc__
"""
# Cast String types to fixed-length string to support string ufuncs
# TODO: Use variable-length strings instead when NumPy 2.0.0 comes out:
# https://numpy.org/devdocs/reference/routines.dtypes.html#numpy.dtypes.StringDType
if dtype is None and not self.has_nulls() and self.dtype == String:
dtype = np.dtype("U")
if copy is None:
writable, allow_copy = False, True
elif copy is True:
writable, allow_copy = True, True
elif copy is False:
writable, allow_copy = False, False
else:
msg = f"invalid input for `copy`: {copy!r}"
raise TypeError(msg)
arr = self.to_numpy(writable=writable, allow_copy=allow_copy)
if dtype is not None and dtype != arr.dtype:
if copy is False:
# TODO: Only raise when data must be copied
msg = f"copy not allowed: cast from {arr.dtype} to {dtype} prohibited"
raise RuntimeError(msg)
arr = arr.__array__(dtype)
return arr
def __array_ufunc__(
self, ufunc: np.ufunc, method: str, *inputs: Any, **kwargs: Any
) -> Series:
"""Numpy universal functions."""
if self._s.n_chunks() > 1:
self._s.rechunk(in_place=True)
s = self._s
if method == "__call__":
if ufunc.nout != 1:
msg = "only ufuncs that return one 1D array are supported"
raise NotImplementedError(msg)
args: list[int | float | np.ndarray[Any, Any]] = []
for arg in inputs:
if isinstance(arg, (int, float, np.ndarray)):
args.append(arg)
elif isinstance(arg, Series):
phys_arg = arg.to_physical()
if phys_arg._s.n_chunks() > 1:
phys_arg._s.rechunk(in_place=True)
args.append(phys_arg._s.to_numpy_view()) # type: ignore[arg-type]
else:
msg = f"unsupported type {qualified_type_name(arg)!r} for {arg!r}"
raise TypeError(msg)
# Get minimum dtype needed to be able to cast all input arguments to the
# same dtype.
dtype_char_minimum: str = np.result_type(*args).char
# Get all possible output dtypes for ufunc.
# Input dtypes and output dtypes seem to always match for ufunc.types,
# so pick all the different output dtypes.
dtypes_ufunc = [
input_output_type[-1]
for input_output_type in ufunc.types
if supported_numpy_char_code(input_output_type[-1])
]
# Get the first ufunc dtype from all possible ufunc dtypes for which
# the input arguments can be safely cast to that ufunc dtype.
for dtype_ufunc in dtypes_ufunc:
if np.can_cast(dtype_char_minimum, dtype_ufunc):
dtype_char_minimum = dtype_ufunc
break
# Override minimum dtype if requested.
dtype_char = (
np.dtype(kwargs.pop("dtype")).char
if "dtype" in kwargs
else dtype_char_minimum
)
# Only generalized ufuncs have a signature set:
is_generalized_ufunc = bool(ufunc.signature)
if is_generalized_ufunc:
# Generalized ufuncs will operate on the whole array, so
# missing data can corrupt the results.
if self.has_nulls():
msg = "can't pass a Series with missing data to a generalized ufunc, as it might give unexpected results. See https://docs.pola.rs/user-guide/expressions/missing-data/ for suggestions on how to remove or fill in missing data."
raise ComputeError(msg)
# If the input and output are the same size, e.g. "(n)->(n)" we
# can allocate ourselves and save a copy. If they're different,
# we let the ufunc do the allocation, since only it knows the
# output size.
assert ufunc.signature is not None # pacify MyPy
ufunc_input, ufunc_output = ufunc.signature.split("->")
if ufunc_output == "()":
# If the result a scalar, just let the function do its
# thing, no need for any song and dance involving
# allocation:
return ufunc(*args, dtype=dtype_char, **kwargs)
else:
allocate_output = ufunc_input == ufunc_output
else:
allocate_output = True
f = get_ffi_func("apply_ufunc_<>", numpy_char_code_to_dtype(dtype_char), s)
if f is None:
msg = (
"could not find "
f"`apply_ufunc_{numpy_char_code_to_dtype(dtype_char)}`"
)
raise NotImplementedError(msg)
series = f(
lambda out: ufunc(*args, out=out, dtype=dtype_char, **kwargs),
allocate_output,
)
result = self._from_pyseries(series)
if is_generalized_ufunc:
# In this case we've disallowed passing in missing data, so no
# further processing is needed.
return result
# We're using a regular ufunc, that operates value by value. That
# means we allowed missing data in the input, so filter it out:
validity_mask = self.is_not_null()
for arg in inputs:
if isinstance(arg, Series):
validity_mask &= arg.is_not_null()
return (
result.to_frame()
.select(F.when(validity_mask).then(F.col(self.name)))
.to_series(0)
)
else:
msg = (
"only `__call__` is implemented for numpy ufuncs on a Series, got "
f"`{method!r}`"
)
raise NotImplementedError(msg)
def __arrow_c_stream__(self, requested_schema: object | None = None) -> object:
"""
Export a Series via the Arrow PyCapsule Interface.
https://arrow.apache.org/docs/dev/format/CDataInterface/PyCapsuleInterface.html
"""
return self._s.__arrow_c_stream__(requested_schema)
def _repr_html_(self) -> str:
"""Format output data in HTML for display in Jupyter Notebooks."""
return self.to_frame()._repr_html_(_from_series=True)
def item(self, index: int | None = None) -> Any:
"""
Return the Series as a scalar, or return the element at the given index.
If no index is provided, this is equivalent to `s[0]`, with a check
that the shape is (1,). With an index, this is equivalent to `s[index]`.
Examples
--------
>>> s1 = pl.Series("a", [1])
>>> s1.item()
1
>>> s2 = pl.Series("a", [9, 8, 7])
>>> s2.cum_sum().item(-1)
24
"""
if index is None:
if len(self) != 1:
msg = (
"can only call '.item()' if the Series is of length 1,"
f" or an explicit index is provided (Series is of length {len(self)})"
)
raise ValueError(msg)
return self._s.get_index(0)
return self._s.get_index_signed(index)
def estimated_size(self, unit: SizeUnit = "b") -> int | float:
"""
Return an estimation of the total (heap) allocated size of the Series.
Estimated size is given in the specified unit (bytes by default).
This estimation is the sum of the size of its buffers, validity, including
nested arrays. Multiple arrays may share buffers and bitmaps. Therefore, the
size of 2 arrays is not the sum of the sizes computed from this function. In
particular, [`StructArray`]'s size is an upper bound.
When an array is sliced, its allocated size remains constant because the buffer
unchanged. However, this function will yield a smaller number. This is because
this function returns the visible size of the buffer, not its total capacity.
FFI buffers are included in this estimation.
Notes
-----
For data with Object dtype, the estimated size only reports the pointer
size, which is a huge underestimation.
Parameters
----------
unit : {'b', 'kb', 'mb', 'gb', 'tb'}
Scale the returned size to the given unit.
Examples
--------
>>> s = pl.Series("values", list(range(1_000_000)), dtype=pl.UInt32)
>>> s.estimated_size()
4000000
>>> s.estimated_size("mb")
3.814697265625
"""
sz = self._s.estimated_size()
return scale_bytes(sz, unit)
def sqrt(self) -> Series:
"""
Compute the square root of the elements.
Syntactic sugar for
>>> pl.Series([1, 2]) ** 0.5
shape: (2,)
Series: '' [f64]
[
1.0
1.414214
]
Examples
--------
>>> s = pl.Series([1, 2, 3])
>>> s.sqrt()
shape: (3,)
Series: '' [f64]
[
1.0
1.414214
1.732051
]
"""
def cbrt(self) -> Series:
"""
Compute the cube root of the elements.
Optimization for
>>> pl.Series([1, 2]) ** (1.0 / 3)
shape: (2,)
Series: '' [f64]
[
1.0
1.259921
]
Examples
--------
>>> s = pl.Series([1, 2, 3])
>>> s.cbrt()
shape: (3,)
Series: '' [f64]
[
1.0
1.259921
1.44225
]
"""
@overload
def any(self, *, ignore_nulls: Literal[True] = ...) -> bool: ...
@overload
def any(self, *, ignore_nulls: bool) -> bool | None: ...
def any(self, *, ignore_nulls: bool = True) -> bool | None:
"""
Return whether any of the values in the column are `True`.
Only works on columns of data type :class:`Boolean`.
Parameters
----------
ignore_nulls
* If set to `True` (default), null values are ignored. If there
are no non-null values, the output is `False`.
* If set to `False`, `Kleene logic`_ is used to deal with nulls:
if the column contains any null values and no `True` values,
the output is `None`.
.. _Kleene logic: https://en.wikipedia.org/wiki/Three-valued_logic
Returns
-------
bool or None
Examples
--------
>>> pl.Series([True, False]).any()
True
>>> pl.Series([False, False]).any()
False
>>> pl.Series([None, False]).any()
False
Enable Kleene logic by setting `ignore_nulls=False`.
>>> pl.Series([None, False]).any(ignore_nulls=False) # Returns None
"""
return self._s.any(ignore_nulls=ignore_nulls)
@overload
def all(self, *, ignore_nulls: Literal[True] = ...) -> bool: ...
@overload
def all(self, *, ignore_nulls: bool) -> bool | None: ...
def all(self, *, ignore_nulls: bool = True) -> bool | None:
"""
Return whether all values in the column are `True`.
Only works on columns of data type :class:`Boolean`.
Parameters
----------
ignore_nulls
* If set to `True` (default), null values are ignored. If there
are no non-null values, the output is `True`.
* If set to `False`, `Kleene logic`_ is used to deal with nulls:
if the column contains any null values and no `False` values,
the output is `None`.
.. _Kleene logic: https://en.wikipedia.org/wiki/Three-valued_logic
Returns
-------
bool or None
Examples
--------
>>> pl.Series([True, True]).all()
True
>>> pl.Series([False, True]).all()
False
>>> pl.Series([None, True]).all()
True
Enable Kleene logic by setting `ignore_nulls=False`.
>>> pl.Series([None, True]).all(ignore_nulls=False) # Returns None
"""
return self._s.all(ignore_nulls=ignore_nulls)
def log(self, base: float | Series = math.e) -> Series:
"""
Compute the logarithm to a given base.
Examples
--------
>>> s = pl.Series([1, 2, 3])
>>> s.log()
shape: (3,)
Series: '' [f64]
[
0.0
0.693147
1.098612
]
"""
def log1p(self) -> Series:
"""
Compute the natural logarithm of the input array plus one, element-wise.
Examples
--------
>>> s = pl.Series([1, 2, 3])
>>> s.log1p()
shape: (3,)
Series: '' [f64]
[
0.693147
1.098612
1.386294
]
"""
def log10(self) -> Series:
"""
Compute the base 10 logarithm of the input array, element-wise.
Examples
--------
>>> s = pl.Series([10, 100, 1000])
>>> s.log10()
shape: (3,)
Series: '' [f64]
[
1.0
2.0
3.0
]
"""
def exp(self) -> Series:
"""
Compute the exponential, element-wise.
Examples
--------
>>> s = pl.Series([1, 2, 3])
>>> s.exp()
shape: (3,)
Series: '' [f64]
[
2.718282
7.389056
20.085537
]
"""
def drop_nulls(self) -> Series:
"""
Drop all null values.
The original order of the remaining elements is preserved.
See Also
--------
drop_nans
Notes
-----
A null value is not the same as a NaN value.
To drop NaN values, use :func:`drop_nans`.
Examples
--------
>>> s = pl.Series([1.0, None, 3.0, float("nan")])
>>> s.drop_nulls()
shape: (3,)
Series: '' [f64]
[
1.0
3.0
NaN
]
"""
def drop_nans(self) -> Series:
"""
Drop all floating point NaN values.
The original order of the remaining elements is preserved.
See Also
--------
drop_nulls
Notes
-----
A NaN value is not the same as a null value.
To drop null values, use :func:`drop_nulls`.
Examples
--------
>>> s = pl.Series([1.0, None, 3.0, float("nan")])
>>> s.drop_nans()
shape: (3,)
Series: '' [f64]
[
1.0
null
3.0
]
"""
def to_frame(self, name: str | None = None) -> DataFrame:
"""
Cast this Series to a DataFrame.
Parameters
----------
name
optionally name/rename the Series column in the new DataFrame.
Examples
--------
>>> s = pl.Series("a", [123, 456])
>>> df = s.to_frame()
>>> df
shape: (2, 1)
┌─────┐
│ a │
│ --- │
│ i64 │
╞═════╡
│ 123 │
│ 456 │
└─────┘
>>> df = s.to_frame("xyz")
>>> df
shape: (2, 1)
┌─────┐
│ xyz │
│ --- │
│ i64 │
╞═════╡
│ 123 │
│ 456 │
└─────┘
"""
if isinstance(name, str):
return wrap_df(PyDataFrame([self.rename(name)._s]))
return wrap_df(PyDataFrame([self._s]))
def describe(
self,
percentiles: Sequence[float] | float | None = (0.25, 0.50, 0.75),
interpolation: QuantileMethod = "nearest",
) -> DataFrame:
"""
Quick summary statistics of a Series.
Series with mixed datatypes will return summary statistics for the datatype of
the first value.
Parameters
----------
percentiles
One or more percentiles to include in the summary statistics (if the
Series has a numeric dtype). All values must be in the range `[0, 1]`.
interpolation : {'nearest', 'higher', 'lower', 'midpoint', 'linear', 'equiprobable'}
Interpolation method used when calculating percentiles.
Notes
-----
The median is included by default as the 50% percentile.
Returns
-------
DataFrame
Mapping with summary statistics of a Series.
Examples
--------
>>> s = pl.Series([1, 2, 3, 4, 5])
>>> s.describe()
shape: (9, 2)
┌────────────┬──────────┐
│ statistic ┆ value │
│ --- ┆ --- │
│ str ┆ f64 │
╞════════════╪══════════╡
│ count ┆ 5.0 │
│ null_count ┆ 0.0 │
│ mean ┆ 3.0 │
│ std ┆ 1.581139 │
│ min ┆ 1.0 │
│ 25% ┆ 2.0 │
│ 50% ┆ 3.0 │
│ 75% ┆ 4.0 │
│ max ┆ 5.0 │
└────────────┴──────────┘
Non-numeric data types may not have all statistics available.
>>> s = pl.Series(["aa", "aa", None, "bb", "cc"])
>>> s.describe()
shape: (4, 2)
┌────────────┬───────┐
│ statistic ┆ value │
│ --- ┆ --- │
│ str ┆ str │
╞════════════╪═══════╡
│ count ┆ 4 │
│ null_count ┆ 1 │
│ min ┆ aa │
│ max ┆ cc │
└────────────┴───────┘
""" # noqa: W505
stats = self.to_frame().describe(
percentiles=percentiles,
interpolation=interpolation,
)
stats.columns = ["statistic", "value"]
return stats.filter(F.col("value").is_not_null())
def sum(self) -> int | float:
"""
Reduce this Series to the sum value.
Notes
-----
* Dtypes in {Int8, UInt8, Int16, UInt16} are cast to
Int64 before summing to prevent overflow issues.
* If there are no non-null values, then the output is `0`.
If you would prefer empty sums to return `None`, you can
use `s.sum() if s.count() else None` instead
of `s.sum()`.
Examples
--------
>>> s = pl.Series("a", [1, 2, 3])
>>> s.sum()
6
"""
return self._s.sum()
def mean(self) -> PythonLiteral | None:
"""
Reduce this Series to the mean value.
Examples
--------
>>> s = pl.Series("a", [1, 2, 3])
>>> s.mean()
2.0
"""
return self._s.mean()
def product(self) -> int | float:
"""
Reduce this Series to the product value.
Notes
-----
If there are no non-null values, then the output is `1`.
If you would prefer empty products to return `None`, you can
use `s.product() if s.count() else None` instead
of `s.product()`.
Examples
--------
>>> s = pl.Series("a", [1, 2, 3])
>>> s.product()
6
"""
return self._s.product()
def pow(self, exponent: int | float | Series) -> Series:
"""
Raise to the power of the given exponent.
If the exponent is float, the result follows the dtype of exponent.
Otherwise, it follows dtype of base.
Parameters
----------
exponent
The exponent. Accepts Series input.
Examples
--------
Raising integers to positive integers results in integers:
>>> s = pl.Series("foo", [1, 2, 3, 4])
>>> s.pow(3)
shape: (4,)
Series: 'foo' [i64]
[
1
8
27
64
]
In order to raise integers to negative integers, you can cast either the
base or the exponent to float:
>>> s.pow(-3.0)
shape: (4,)
Series: 'foo' [f64]
[
1.0
0.125
0.037037
0.015625
]
"""
if _check_for_numpy(exponent) and isinstance(exponent, np.ndarray):
exponent = Series(exponent)
return self.to_frame().select_seq(F.col(self.name).pow(exponent)).to_series()
def min(self) -> PythonLiteral | None:
"""
Get the minimal value in this Series.
Examples
--------
>>> s = pl.Series("a", [1, 2, 3])
>>> s.min()
1
"""
return self._s.min()
def max(self) -> PythonLiteral | None:
"""
Get the maximum value in this Series.
Examples
--------
>>> s = pl.Series("a", [1, 2, 3])
>>> s.max()
3
"""
return self._s.max()
def nan_max(self) -> int | float | date | datetime | timedelta | str:
"""
Get maximum value, but propagate/poison encountered NaN values.
This differs from numpy's `nanmax` as numpy defaults to propagating NaN values,
whereas polars defaults to ignoring them.
Examples
--------
>>> s = pl.Series("a", [1, 3, 4])
>>> s.nan_max()
4
>>> s = pl.Series("a", [1.0, float("nan"), 4.0])
>>> s.nan_max()
nan
"""
return self.to_frame().select_seq(F.col(self.name).nan_max()).item()
def nan_min(self) -> int | float | date | datetime | timedelta | str:
"""
Get minimum value, but propagate/poison encountered NaN values.
This differs from numpy's `nanmax` as numpy defaults to propagating NaN values,
whereas polars defaults to ignoring them.
Examples
--------
>>> s = pl.Series("a", [1, 3, 4])
>>> s.nan_min()
1
>>> s = pl.Series("a", [1.0, float("nan"), 4.0])
>>> s.nan_min()
nan
"""
return self.to_frame().select_seq(F.col(self.name).nan_min()).item()
def std(self, ddof: int = 1) -> float | timedelta | None:
"""
Get the standard deviation of this Series.
Parameters
----------
ddof
“Delta Degrees of Freedom”: the divisor used in the calculation is N - ddof,
where N represents the number of elements.
By default ddof is 1.
Examples
--------
>>> s = pl.Series("a", [1, 2, 3])
>>> s.std()
1.0
"""
return self._s.std(ddof)
def var(self, ddof: int = 1) -> float | timedelta | None:
"""
Get variance of this Series.
Parameters
----------
ddof
“Delta Degrees of Freedom”: the divisor used in the calculation is N - ddof,
where N represents the number of elements.
By default ddof is 1.
Examples
--------
>>> s = pl.Series("a", [1, 2, 3])
>>> s.var()
1.0
"""
return self._s.var(ddof)
def median(self) -> PythonLiteral | None:
"""
Get the median of this Series.
Examples
--------
>>> s = pl.Series("a", [1, 2, 3])
>>> s.median()
2.0
"""
return self._s.median()
def quantile(
self, quantile: float, interpolation: QuantileMethod = "nearest"
) -> float | None:
"""
Get the quantile value of this Series.
Parameters
----------
quantile
Quantile between 0.0 and 1.0.
interpolation : {'nearest', 'higher', 'lower', 'midpoint', 'linear', 'equiprobable'}
Interpolation method.
Examples
--------
>>> s = pl.Series("a", [1, 2, 3])
>>> s.quantile(0.5)
2.0
""" # noqa: W505
return self._s.quantile(quantile, interpolation)
def to_dummies(
self,
*,
separator: str = "_",
drop_first: bool = False,
drop_nulls: bool = False,
) -> DataFrame:
"""
Get dummy/indicator variables.
Parameters
----------
separator
Separator/delimiter used when generating column names.
drop_first
Remove the first category from the variable being encoded.
drop_nulls
If there are `None` values in the series, a `null` column is not generated
Examples
--------
>>> s = pl.Series("a", [1, 2, 3])
>>> s.to_dummies()
shape: (3, 3)
┌─────┬─────┬─────┐
│ a_1 ┆ a_2 ┆ a_3 │
│ --- ┆ --- ┆ --- │
│ u8 ┆ u8 ┆ u8 │
╞═════╪═════╪═════╡
│ 1 ┆ 0 ┆ 0 │
│ 0 ┆ 1 ┆ 0 │
│ 0 ┆ 0 ┆ 1 │
└─────┴─────┴─────┘
>>> s.to_dummies(drop_first=True)
shape: (3, 2)
┌─────┬─────┐
│ a_2 ┆ a_3 │
│ --- ┆ --- │
│ u8 ┆ u8 │
╞═════╪═════╡
│ 0 ┆ 0 │
│ 1 ┆ 0 │
│ 0 ┆ 1 │
└─────┴─────┘
"""
return wrap_df(self._s.to_dummies(separator, drop_first, drop_nulls))
@unstable()
def cut(
self,
breaks: Sequence[float],
*,
labels: Sequence[str] | None = None,
left_closed: bool = False,
include_breaks: bool = False,
) -> Series:
"""
Bin continuous values into discrete categories.
.. warning::
This functionality is considered **unstable**. It may be changed
at any point without it being considered a breaking change.
Parameters
----------
breaks
List of unique cut points.
labels
Names of the categories. The number of labels must be equal to the number
of cut points plus one.
left_closed
Set the intervals to be left-closed instead of right-closed.
include_breaks
Include a column with the right endpoint of the bin each observation falls
in. This will change the data type of the output from a
:class:`Categorical` to a :class:`Struct`.
Returns
-------
Series
Series of data type :class:`Categorical` if `include_breaks` is set to
`False` (default), otherwise a Series of data type :class:`Struct`.
See Also
--------
qcut
Examples
--------
Divide the column into three categories.
>>> s = pl.Series("foo", [-2, -1, 0, 1, 2])
>>> s.cut([-1, 1], labels=["a", "b", "c"])
shape: (5,)
Series: 'foo' [cat]
[
"a"
"a"
"b"
"b"
"c"
]
Create a DataFrame with the breakpoint and category for each value.
>>> cut = s.cut([-1, 1], include_breaks=True).alias("cut")
>>> s.to_frame().with_columns(cut).unnest("cut")
shape: (5, 3)
┌─────┬────────────┬────────────┐
│ foo ┆ breakpoint ┆ category │
│ --- ┆ --- ┆ --- │
│ i64 ┆ f64 ┆ cat │
╞═════╪════════════╪════════════╡
│ -2 ┆ -1.0 ┆ (-inf, -1] │
│ -1 ┆ -1.0 ┆ (-inf, -1] │
│ 0 ┆ 1.0 ┆ (-1, 1] │
│ 1 ┆ 1.0 ┆ (-1, 1] │
│ 2 ┆ inf ┆ (1, inf] │
└─────┴────────────┴────────────┘
"""
@unstable()
def qcut(
self,
quantiles: Sequence[float] | int,
*,
labels: Sequence[str] | None = None,
left_closed: bool = False,
allow_duplicates: bool = False,
include_breaks: bool = False,
) -> Series:
"""
Bin continuous values into discrete categories based on their quantiles.
.. warning::
This functionality is considered **unstable**. It may be changed
at any point without it being considered a breaking change.
Parameters
----------
quantiles
Either a list of quantile probabilities between 0 and 1 or a positive
integer determining the number of bins with uniform probability.
labels
Names of the categories. The number of labels must be equal to the number
of cut points plus one.
left_closed
Set the intervals to be left-closed instead of right-closed.
allow_duplicates
If set to `True`, duplicates in the resulting quantiles are dropped,
rather than raising a `DuplicateError`. This can happen even with unique
probabilities, depending on the data.
include_breaks
Include a column with the right endpoint of the bin each observation falls
in. This will change the data type of the output from a
:class:`Categorical` to a :class:`Struct`.
Returns
-------
Series
Series of data type :class:`Categorical` if `include_breaks` is set to
`False` (default), otherwise a Series of data type :class:`Struct`.
See Also
--------
cut
Examples
--------
Divide a column into three categories according to pre-defined quantile
probabilities.
>>> s = pl.Series("foo", [-2, -1, 0, 1, 2])
>>> s.qcut([0.25, 0.75], labels=["a", "b", "c"])
shape: (5,)
Series: 'foo' [cat]
[
"a"
"a"
"b"
"b"
"c"
]
Divide a column into two categories using uniform quantile probabilities.
>>> s.qcut(2, labels=["low", "high"], left_closed=True)
shape: (5,)
Series: 'foo' [cat]
[
"low"
"low"
"high"
"high"
"high"
]
Create a DataFrame with the breakpoint and category for each value.
>>> cut = s.qcut([0.25, 0.75], include_breaks=True).alias("cut")
>>> s.to_frame().with_columns(cut).unnest("cut")
shape: (5, 3)
┌─────┬────────────┬────────────┐
│ foo ┆ breakpoint ┆ category │
│ --- ┆ --- ┆ --- │
│ i64 ┆ f64 ┆ cat │
╞═════╪════════════╪════════════╡
│ -2 ┆ -1.0 ┆ (-inf, -1] │
│ -1 ┆ -1.0 ┆ (-inf, -1] │
│ 0 ┆ 1.0 ┆ (-1, 1] │
│ 1 ┆ 1.0 ┆ (-1, 1] │
│ 2 ┆ inf ┆ (1, inf] │
└─────┴────────────┴────────────┘
"""
def rle(self) -> Series:
"""
Compress the Series data using run-length encoding.
Run-length encoding (RLE) encodes data by storing each *run* of identical values
as a single value and its length.
Returns
-------
Series
Series of data type `Struct` with fields `len` of data type `UInt32`
and `value` of the original data type.
Examples
--------
>>> s = pl.Series("s", [1, 1, 2, 1, None, 1, 3, 3])
>>> s.rle().struct.unnest()
shape: (6, 2)
┌─────┬───────┐
│ len ┆ value │
│ --- ┆ --- │
│ u32 ┆ i64 │
╞═════╪═══════╡
│ 2 ┆ 1 │
│ 1 ┆ 2 │
│ 1 ┆ 1 │
│ 1 ┆ null │
│ 1 ┆ 1 │
│ 2 ┆ 3 │
└─────┴───────┘
"""
def rle_id(self) -> Series:
"""
Get a distinct integer ID for each run of identical values.
The ID starts at 0 and increases by one each time the value of the column
changes.
Returns
-------
Series
Series of data type `UInt32`.
See Also
--------
rle
Notes
-----
This functionality is especially useful for defining a new group for every time
a column's value changes, rather than for every distinct value of that column.
Examples
--------
>>> s = pl.Series("s", [1, 1, 2, 1, None, 1, 3, 3])
>>> s.rle_id()
shape: (8,)
Series: 's' [u32]
[
0
0
1
2
3
4
5
5
]
"""
@unstable()
def hist(
self,
bins: list[float] | None = None,
*,
bin_count: int | None = None,
include_category: bool = True,
include_breakpoint: bool = True,
) -> DataFrame:
"""
Bin values into buckets and count their occurrences.
.. warning::
This functionality is considered **unstable**. It may be changed
at any point without it being considered a breaking change.
Parameters
----------
bins
Bin edges. If None given, we determine the edges based on the data.
bin_count
If `bins` is not provided, `bin_count` uniform bins are created that fully
encompass the data.
include_breakpoint
Include a column that indicates the upper breakpoint.
include_category
Include a column that shows the intervals as categories.
Returns
-------
DataFrame
Examples
--------
>>> a = pl.Series("a", [1, 3, 8, 8, 2, 1, 3])
>>> a.hist(bin_count=4)
shape: (4, 3)
┌────────────┬─────────────┬───────┐
│ breakpoint ┆ category ┆ count │
│ --- ┆ --- ┆ --- │
│ f64 ┆ cat ┆ u32 │
╞════════════╪═════════════╪═══════╡
│ 2.75 ┆ [1.0, 2.75] ┆ 3 │
│ 4.5 ┆ (2.75, 4.5] ┆ 2 │
│ 6.25 ┆ (4.5, 6.25] ┆ 0 │
│ 8.0 ┆ (6.25, 8.0] ┆ 2 │
└────────────┴─────────────┴───────┘
"""
out = (
self.to_frame()
.select_seq(
F.col(self.name).hist(
bins=bins,
bin_count=bin_count,
include_category=include_category,
include_breakpoint=include_breakpoint,
)
)
.to_series()
)
if not include_breakpoint and not include_category:
return out.to_frame()
else:
return out.struct.unnest()
def value_counts(
self,
*,
sort: bool = False,
parallel: bool = False,
name: str | None = None,
normalize: bool = False,
) -> DataFrame:
"""
Count the occurrences of unique values.
Parameters
----------
sort
Sort the output by count, in descending order.
If set to `False` (default), the order is non-deterministic.
parallel
Execute the computation in parallel.
.. note::
This option should likely *not* be enabled in a `group_by` context,
as the computation will already be parallelized per group.
name
Give the resulting count column a specific name; if `normalize` is
True this defaults to "proportion", otherwise defaults to "count".
normalize
If True, the count is returned as the relative frequency of unique
values normalized to 1.0.
Returns
-------
DataFrame
Columns map the unique values to their count (or proportion).
Examples
--------
>>> s = pl.Series("color", ["red", "blue", "red", "green", "blue", "blue"])
>>> s.value_counts() # doctest: +IGNORE_RESULT
shape: (3, 2)
┌───────┬───────┐
│ color ┆ count │
│ --- ┆ --- │
│ str ┆ u32 │
╞═══════╪═══════╡
│ red ┆ 2 │
│ green ┆ 1 │
│ blue ┆ 3 │
└───────┴───────┘
Sort the output by count and customize the count column name.
>>> s.value_counts(sort=True, name="n")
shape: (3, 2)
┌───────┬─────┐
│ color ┆ n │
│ --- ┆ --- │
│ str ┆ u32 │
╞═══════╪═════╡
│ blue ┆ 3 │
│ red ┆ 2 │
│ green ┆ 1 │
└───────┴─────┘
Return the count as a relative frequency, normalized to 1.0:
>>> s.value_counts(sort=True, normalize=True, name="fraction")
shape: (3, 2)
┌───────┬──────────┐
│ color ┆ fraction │
│ --- ┆ --- │
│ str ┆ f64 │
╞═══════╪══════════╡
│ blue ┆ 0.5 │
│ red ┆ 0.333333 │
│ green ┆ 0.166667 │
└───────┴──────────┘
"""
name = name or ("proportion" if normalize else "count")
return pl.DataFrame._from_pydf(
self._s.value_counts(
sort=sort, parallel=parallel, name=name, normalize=normalize
)
)
def unique_counts(self) -> Series:
"""
Return a count of the unique values in the order of appearance.
Examples
--------
>>> s = pl.Series("id", ["a", "b", "b", "c", "c", "c"])
>>> s.unique_counts()
shape: (3,)
Series: 'id' [u32]
[
1
2
3
]
"""
def entropy(self, base: float = math.e, *, normalize: bool = True) -> float | None:
"""
Computes the entropy.
Uses the formula `-sum(pk * log(pk))` where `pk` are discrete probabilities.
Parameters
----------
base
Given base, defaults to `e`
normalize
Normalize pk if it doesn't sum to 1.
Examples
--------
>>> a = pl.Series([0.99, 0.005, 0.005])
>>> a.entropy(normalize=True)
0.06293300616044681
>>> b = pl.Series([0.65, 0.10, 0.25])
>>> b.entropy(normalize=True)
0.8568409950394724
"""
return (
self.to_frame()
.select_seq(F.col(self.name).entropy(base, normalize=normalize))
.to_series()
.item()
)
@unstable()
@deprecate_renamed_parameter("min_periods", "min_samples", version="1.21.0")
def cumulative_eval(
self, expr: Expr, *, min_samples: int = 1, parallel: bool = False
) -> Series:
"""
Run an expression over a sliding window that increases `1` slot every iteration.
.. warning::
This functionality is considered **unstable**. It may be changed
at any point without it being considered a breaking change.
.. versionchanged:: 1.21.0
The `min_periods` parameter was renamed `min_samples`.
Parameters
----------
expr
Expression to evaluate
min_samples
Number of valid values there should be in the window before the expression
is evaluated. valid values = `length - null_count`
parallel
Run in parallel. Don't do this in a group by or another operation that
already has much parallelization.
Warnings
--------
This can be really slow as it can have `O(n^2)` complexity. Don't use this
for operations that visit all elements.
Examples
--------
>>> s = pl.Series("values", [1, 2, 3, 4, 5])
>>> s.cumulative_eval(pl.element().first() - pl.element().last() ** 2)
shape: (5,)
Series: 'values' [i64]
[
0
-3
-8
-15
-24
]
"""
def alias(self, name: str) -> Series:
"""
Rename the series.
Parameters
----------
name
The new name.
Examples
--------
>>> s = pl.Series("a", [1, 2, 3])
>>> s.alias("b")
shape: (3,)
Series: 'b' [i64]
[
1
2
3
]
"""
s = self.clone()
s._s.rename(name)
return s
def rename(self, name: str) -> Series:
"""
Rename this Series.
Alias for :func:`Series.alias`.
Parameters
----------
name
New name.
Examples
--------
>>> s = pl.Series("a", [1, 2, 3])
>>> s.rename("b")
shape: (3,)
Series: 'b' [i64]
[
1
2
3
]
"""
return self.alias(name)
def chunk_lengths(self) -> list[int]:
"""
Get the length of each individual chunk.
Examples
--------
>>> s = pl.Series("a", [1, 2, 3])
>>> s2 = pl.Series("a", [4, 5, 6])
Concatenate Series with rechunk = True
>>> pl.concat([s, s2], rechunk=True).chunk_lengths()
[6]
Concatenate Series with rechunk = False
>>> pl.concat([s, s2], rechunk=False).chunk_lengths()
[3, 3]
"""
return self._s.chunk_lengths()
def n_chunks(self) -> int:
"""
Get the number of chunks that this Series contains.
Examples
--------
>>> s = pl.Series("a", [1, 2, 3])
>>> s.n_chunks()
1
>>> s2 = pl.Series("a", [4, 5, 6])
Concatenate Series with rechunk = True
>>> pl.concat([s, s2], rechunk=True).n_chunks()
1
Concatenate Series with rechunk = False
>>> pl.concat([s, s2], rechunk=False).n_chunks()
2
"""
return self._s.n_chunks()
def cum_max(self, *, reverse: bool = False) -> Series:
"""
Get an array with the cumulative max computed at every element.
Parameters
----------
reverse
reverse the operation.
Examples
--------
>>> s = pl.Series("s", [3, 5, 1])
>>> s.cum_max()
shape: (3,)
Series: 's' [i64]
[
3
5
5
]
"""
def cum_min(self, *, reverse: bool = False) -> Series:
"""
Get an array with the cumulative min computed at every element.
Parameters
----------
reverse
reverse the operation.
Examples
--------
>>> s = pl.Series("s", [1, 2, 3])
>>> s.cum_min()
shape: (3,)
Series: 's' [i64]
[
1
1
1
]
"""
def cum_prod(self, *, reverse: bool = False) -> Series:
"""
Get an array with the cumulative product computed at every element.
Parameters
----------
reverse
reverse the operation.
Notes
-----
Dtypes in {Int8, UInt8, Int16, UInt16} are cast to
Int64 before summing to prevent overflow issues.
Examples
--------
>>> s = pl.Series("a", [1, 2, 3])
>>> s.cum_prod()
shape: (3,)
Series: 'a' [i64]
[
1
2
6
]
"""
def cum_sum(self, *, reverse: bool = False) -> Series:
"""
Get an array with the cumulative sum computed at every element.
Parameters
----------
reverse
reverse the operation.
Notes
-----
Dtypes in {Int8, UInt8, Int16, UInt16} are cast to
Int64 before summing to prevent overflow issues.
Examples
--------
>>> s = pl.Series("a", [1, 2, 3])
>>> s.cum_sum()
shape: (3,)
Series: 'a' [i64]
[
1
3
6
]
"""
def cum_count(self, *, reverse: bool = False) -> Self:
"""
Return the cumulative count of the non-null values in the column.
Parameters
----------
reverse
Reverse the operation.
Examples
--------
>>> s = pl.Series(["x", "k", None, "d"])
>>> s.cum_count()
shape: (4,)
Series: '' [u32]
[
1
2
2
3
]
"""
def slice(self, offset: int, length: int | None = None) -> Series:
"""
Get a slice of this Series.
Parameters
----------
offset
Start index. Negative indexing is supported.
length
Length of the slice. If set to `None`, all rows starting at the offset
will be selected.
Examples
--------
>>> s = pl.Series("a", [1, 2, 3, 4])
>>> s.slice(1, 2)
shape: (2,)
Series: 'a' [i64]
[
2
3
]
"""
return self._from_pyseries(self._s.slice(offset=offset, length=length))
def append(self, other: Series) -> Self:
"""
Append a Series to this one.
The resulting series will consist of multiple chunks.
Parameters
----------
other
Series to append.
Warnings
--------
This method modifies the series in-place. The series is returned for
convenience only.
See Also
--------
extend
Examples
--------
>>> a = pl.Series("a", [1, 2, 3])
>>> b = pl.Series("b", [4, 5])
>>> a.append(b)
shape: (5,)
Series: 'a' [i64]
[
1
2
3
4
5
]
The resulting series will consist of multiple chunks.
>>> a.n_chunks()
2
"""
require_same_type(self, other)
self._s.append(other._s)
return self
def extend(self, other: Series) -> Self:
"""
Extend the memory backed by this Series with the values from another.
Different from `append`, which adds the chunks from `other` to the chunks of
this series, `extend` appends the data from `other` to the underlying memory
locations and thus may cause a reallocation (which is expensive).
If this does `not` cause a reallocation, the resulting data structure will not
have any extra chunks and thus will yield faster queries.
Prefer `extend` over `append` when you want to do a query after a single
append. For instance, during online operations where you add `n` rows
and rerun a query.
Prefer `append` over `extend` when you want to append many times
before doing a query. For instance, when you read in multiple files and want
to store them in a single `Series`. In the latter case, finish the sequence
of `append` operations with a `rechunk`.
Parameters
----------
other
Series to extend the series with.
Warnings
--------
This method modifies the series in-place. The series is returned for
convenience only.
See Also
--------
append
Examples
--------
>>> a = pl.Series("a", [1, 2, 3])
>>> b = pl.Series("b", [4, 5])
>>> a.extend(b)
shape: (5,)
Series: 'a' [i64]
[
1
2
3
4
5
]
The resulting series will consist of a single chunk.
>>> a.n_chunks()
1
"""
require_same_type(self, other)
self._s.extend(other._s)
return self
def filter(self, predicate: Series | Iterable[bool]) -> Self:
"""
Filter elements by a boolean mask.
The original order of the remaining elements is preserved.
Elements where the filter does not evaluate to True are discarded, including
nulls.
Parameters
----------
predicate
Boolean mask.
Examples
--------
>>> s = pl.Series("a", [1, 2, 3])
>>> mask = pl.Series("", [True, False, True])
>>> s.filter(mask)
shape: (2,)
Series: 'a' [i64]
[
1
3
]
"""
if not isinstance(predicate, Series):
predicate = Series("", predicate)
return self._from_pyseries(self._s.filter(predicate._s))
def head(self, n: int = 10) -> Series:
"""
Get the first `n` elements.
Parameters
----------
n
Number of elements to return. If a negative value is passed, return all
elements except the last `abs(n)`.
See Also
--------
tail, slice
Examples
--------
>>> s = pl.Series("a", [1, 2, 3, 4, 5])
>>> s.head(3)
shape: (3,)
Series: 'a' [i64]
[
1
2
3
]
Pass a negative value to get all rows `except` the last `abs(n)`.
>>> s.head(-3)
shape: (2,)
Series: 'a' [i64]
[
1
2
]
"""
if n < 0:
n = max(0, self.len() + n)
return self._from_pyseries(self._s.head(n))
def tail(self, n: int = 10) -> Series:
"""
Get the last `n` elements.
Parameters
----------
n
Number of elements to return. If a negative value is passed, return all
elements except the first `abs(n)`.
See Also
--------
head, slice
Examples
--------
>>> s = pl.Series("a", [1, 2, 3, 4, 5])
>>> s.tail(3)
shape: (3,)
Series: 'a' [i64]
[
3
4
5
]
Pass a negative value to get all rows `except` the first `abs(n)`.
>>> s.tail(-3)
shape: (2,)
Series: 'a' [i64]
[
4
5
]
"""
if n < 0:
n = max(0, self.len() + n)
return self._from_pyseries(self._s.tail(n))
def limit(self, n: int = 10) -> Series:
"""
Get the first `n` elements.
Alias for :func:`Series.head`.
Parameters
----------
n
Number of elements to return. If a negative value is passed, return all
elements except the last `abs(n)`.
See Also
--------
head
Examples
--------
>>> s = pl.Series("a", [1, 2, 3, 4, 5])
>>> s.limit(3)
shape: (3,)
Series: 'a' [i64]
[
1
2
3
]
Pass a negative value to get all rows `except` the last `abs(n)`.
>>> s.limit(-3)
shape: (2,)
Series: 'a' [i64]
[
1
2
]
"""
return self.head(n)
def gather_every(self, n: int, offset: int = 0) -> Series:
"""
Take every nth value in the Series and return as new Series.
Parameters
----------
n
Gather every *n*-th row.
offset
Start the row index at this offset.
Examples
--------
>>> s = pl.Series("a", [1, 2, 3, 4])
>>> s.gather_every(2)
shape: (2,)
Series: 'a' [i64]
[
1
3
]
>>> s.gather_every(2, offset=1)
shape: (2,)
Series: 'a' [i64]
[
2
4
]
"""
def sort(
self,
*,
descending: bool = False,
nulls_last: bool = False,
multithreaded: bool = True,
in_place: bool = False,
) -> Self:
"""
Sort this Series.
Parameters
----------
descending
Sort in descending order.
nulls_last
Place null values last instead of first.
multithreaded
Sort using multiple threads.
in_place
Sort in-place.
Examples
--------
>>> s = pl.Series("a", [1, 3, 4, 2])
>>> s.sort()
shape: (4,)
Series: 'a' [i64]
[
1
2
3
4
]
>>> s.sort(descending=True)
shape: (4,)
Series: 'a' [i64]
[
4
3
2
1
]
"""
if in_place:
self._s = self._s.sort(descending, nulls_last, multithreaded)
return self
else:
return self._from_pyseries(
self._s.sort(descending, nulls_last, multithreaded)
)
def top_k(self, k: int = 5) -> Series:
r"""
Return the `k` largest elements.
Non-null elements are always preferred over null elements. The output is
not guaranteed to be in any particular order, call :func:`sort` after
this function if you wish the output to be sorted.
This has time complexity:
.. math:: O(n)
Parameters
----------
k
Number of elements to return.
See Also
--------
top_k_by
bottom_k
bottom_k_by
Examples
--------
>>> s = pl.Series("a", [2, 5, 1, 4, 3])
>>> s.top_k(3)
shape: (3,)
Series: 'a' [i64]
[
5
4
3
]
"""
def top_k_by(
self,
by: IntoExpr | Iterable[IntoExpr],
k: int = 5,
*,
reverse: bool | Sequence[bool] = False,
) -> Series:
r"""
Return the `k` largest elements of the `by` column.
Non-null elements are always preferred over null elements, regardless of
the value of `reverse`. The output is not guaranteed to be in any
particular order, call :func:`sort` after this function if you wish the
output to be sorted.
This has time complexity:
.. math:: O(n \log{n})
Parameters
----------
by
Column used to determine the largest elements.
Accepts expression input. Strings are parsed as column names.
k
Number of elements to return.
reverse
Consider the `k` smallest elements of the `by` column (instead of the `k`
largest). This can be specified per column by passing a sequence of
booleans.
See Also
--------
top_k
bottom_k
bottom_k_by
Examples
--------
>>> s = pl.Series("a", [2, 5, 1, 4, 3])
>>> s.top_k_by("a", 3)
shape: (3,)
Series: 'a' [i64]
[
5
4
3
]
"""
def bottom_k(self, k: int = 5) -> Series:
r"""
Return the `k` smallest elements.
Non-null elements are always preferred over null elements. The output is
not guaranteed to be in any particular order, call :func:`sort` after
this function if you wish the output to be sorted.
This has time complexity:
.. math:: O(n)
Parameters
----------
k
Number of elements to return.
See Also
--------
top_k
top_k_by
bottom_k_by
Examples
--------
>>> s = pl.Series("a", [2, 5, 1, 4, 3])
>>> s.bottom_k(3)
shape: (3,)
Series: 'a' [i64]
[
1
2
3
]
"""
def bottom_k_by(
self,
by: IntoExpr | Iterable[IntoExpr],
k: int = 5,
*,
reverse: bool | Sequence[bool] = False,
) -> Series:
r"""
Return the `k` smallest elements of the `by` column.
Non-null elements are always preferred over null elements, regardless of
the value of `reverse`. The output is not guaranteed to be in any
particular order, call :func:`sort` after this function if you wish the
output to be sorted.
This has time complexity:
.. math:: O(n \log{n})
Parameters
----------
by
Column used to determine the smallest elements.
Accepts expression input. Strings are parsed as column names.
k
Number of elements to return.
reverse
Consider the `k` largest elements of the `by` column( (instead of the `k`
smallest). This can be specified per column by passing a sequence of
booleans.
See Also
--------
top_k
top_k_by
bottom_k
Examples
--------
>>> s = pl.Series("a", [2, 5, 1, 4, 3])
>>> s.bottom_k_by("a", 3)
shape: (3,)
Series: 'a' [i64]
[
1
2
3
]
"""
def arg_sort(self, *, descending: bool = False, nulls_last: bool = False) -> Series:
"""
Get the index values that would sort this Series.
Parameters
----------
descending
Sort in descending order.
nulls_last
Place null values last instead of first.
See Also
--------
Series.gather: Take values by index.
Series.rank : Get the rank of each row.
Examples
--------
>>> s = pl.Series("a", [5, 3, 4, 1, 2])
>>> s.arg_sort()
shape: (5,)
Series: 'a' [u32]
[
3
4
1
2
0
]
"""
def arg_unique(self) -> Series:
"""
Get unique index as Series.
Returns
-------
Series
Examples
--------
>>> s = pl.Series("a", [1, 2, 2, 3])
>>> s.arg_unique()
shape: (3,)
Series: 'a' [u32]
[
0
1
3
]
"""
def arg_min(self) -> int | None:
"""
Get the index of the minimal value.
Returns
-------
int
Examples
--------
>>> s = pl.Series("a", [3, 2, 1])
>>> s.arg_min()
2
"""
return self._s.arg_min()
def arg_max(self) -> int | None:
"""
Get the index of the maximal value.
Returns
-------
int
Examples
--------
>>> s = pl.Series("a", [3, 2, 1])
>>> s.arg_max()
0
"""
return self._s.arg_max()
@overload
def search_sorted(
self,
element: NonNestedLiteral | None,
side: SearchSortedSide = ...,
*,
descending: bool = ...,
) -> int: ...
@overload
def search_sorted(
self,
element: list[NonNestedLiteral | None] | np.ndarray[Any, Any] | Expr | Series,
side: SearchSortedSide = ...,
*,
descending: bool = ...,
) -> Series: ...
def search_sorted(
self,
element: IntoExpr | np.ndarray[Any, Any] | None,
side: SearchSortedSide = "any",
*,
descending: bool = False,
) -> int | Series:
"""
Find indices where elements should be inserted to maintain order.
.. math:: a[i-1] < v <= a[i]
Parameters
----------
element
Expression or scalar value.
side : {'any', 'left', 'right'}
If 'any', the index of the first suitable location found is given.
If 'left', the index of the leftmost suitable location found is given.
If 'right', return the rightmost suitable location found is given.
descending
Boolean indicating whether the values are descending or not (they
are required to be sorted either way).
Examples
--------
>>> s = pl.Series("set", [1, 2, 3, 4, 4, 5, 6, 7])
>>> s.search_sorted(4)
3
>>> s.search_sorted(4, "left")
3
>>> s.search_sorted(4, "right")
5
>>> s.search_sorted([1, 4, 5])
shape: (3,)
Series: 'set' [u32]
[
0
3
5
]
>>> s.search_sorted([1, 4, 5], "left")
shape: (3,)
Series: 'set' [u32]
[
0
3
5
]
>>> s.search_sorted([1, 4, 5], "right")
shape: (3,)
Series: 'set' [u32]
[
1
5
6
]
"""
df = F.select(F.lit(self).search_sorted(element, side, descending=descending))
if isinstance(element, (list, Series, pl.Expr)):
return df.to_series()
elif _check_for_numpy(element) and isinstance(element, np.ndarray):
return df.to_series()
else:
return df.item()
def unique(self, *, maintain_order: bool = False) -> Series:
"""
Get unique elements in series.
Parameters
----------
maintain_order
Maintain order of data. This requires more work.
Examples
--------
>>> s = pl.Series("a", [1, 2, 2, 3])
>>> s.unique().sort()
shape: (3,)
Series: 'a' [i64]
[
1
2
3
]
"""
def gather(
self, indices: int | list[int] | Expr | Series | np.ndarray[Any, Any]
) -> Series:
"""
Take values by index.
Parameters
----------
indices
Index location used for selection.
Examples
--------
>>> s = pl.Series("a", [1, 2, 3, 4])
>>> s.gather([1, 3])
shape: (2,)
Series: 'a' [i64]
[
2
4
]
"""
def null_count(self) -> int:
"""
Count the null values in this Series.
Examples
--------
>>> s = pl.Series([1, None, None])
>>> s.null_count()
2
"""
return self._s.null_count()
def has_nulls(self) -> bool:
"""
Check whether the Series contains one or more null values.
Examples
--------
>>> s = pl.Series([1, 2, None])
>>> s.has_nulls()
True
>>> s[:2].has_nulls()
False
"""
return self._s.has_nulls()
@deprecated(
"`has_validity` is deprecated; use `has_nulls` "
"instead to check for the presence of null values."
)
def has_validity(self) -> bool:
"""
Check whether the Series contains one or more null values.
.. deprecated:: 0.20.30
Use the :meth:`has_nulls` method instead.
"""
return self._s.has_nulls()
def is_empty(self) -> bool:
"""
Check if the Series is empty.
Examples
--------
>>> s = pl.Series("a", [], dtype=pl.Float32)
>>> s.is_empty()
True
"""
return self.len() == 0
def is_sorted(self, *, descending: bool = False, nulls_last: bool = False) -> bool:
"""
Check if the Series is sorted.
Parameters
----------
descending
Check if the Series is sorted in descending order
nulls_last
Set nulls at the end of the Series in sorted check.
Examples
--------
>>> s = pl.Series([1, 3, 2])
>>> s.is_sorted()
False
>>> s = pl.Series([3, 2, 1])
>>> s.is_sorted(descending=True)
True
"""
return self._s.is_sorted(descending, nulls_last)
def not_(self) -> Series:
"""
Negate a boolean Series.
Returns
-------
Series
Series of data type :class:`Boolean`.
Examples
--------
>>> s = pl.Series("a", [True, False, False])
>>> s.not_()
shape: (3,)
Series: 'a' [bool]
[
false
true
true
]
"""
return self._from_pyseries(self._s.not_())
def is_null(self) -> Series:
"""
Returns a boolean Series indicating which values are null.
Returns
-------
Series
Series of data type :class:`Boolean`.
Examples
--------
>>> s = pl.Series("a", [1.0, 2.0, 3.0, None])
>>> s.is_null()
shape: (4,)
Series: 'a' [bool]
[
false
false
false
true
]
"""
def is_not_null(self) -> Series:
"""
Returns a boolean Series indicating which values are not null.
Returns
-------
Series
Series of data type :class:`Boolean`.
Examples
--------
>>> s = pl.Series("a", [1.0, 2.0, 3.0, None])
>>> s.is_not_null()
shape: (4,)
Series: 'a' [bool]
[
true
true
true
false
]
"""
def is_finite(self) -> Series:
"""
Returns a boolean Series indicating which values are finite.
Returns
-------
Series
Series of data type :class:`Boolean`.
Examples
--------
>>> import numpy as np
>>> s = pl.Series("a", [1.0, 2.0, np.inf])
>>> s.is_finite()
shape: (3,)
Series: 'a' [bool]
[
true
true
false
]
"""
def is_infinite(self) -> Series:
"""
Returns a boolean Series indicating which values are infinite.
Returns
-------
Series
Series of data type :class:`Boolean`.
Examples
--------
>>> import numpy as np
>>> s = pl.Series("a", [1.0, 2.0, np.inf])
>>> s.is_infinite()
shape: (3,)
Series: 'a' [bool]
[
false
false
true
]
"""
def is_nan(self) -> Series:
"""
Returns a boolean Series indicating which values are NaN.
Returns
-------
Series
Series of data type :class:`Boolean`.
Examples
--------
>>> import numpy as np
>>> s = pl.Series("a", [1.0, 2.0, 3.0, np.nan])
>>> s.is_nan()
shape: (4,)
Series: 'a' [bool]
[
false
false
false
true
]
"""
def is_not_nan(self) -> Series:
"""
Returns a boolean Series indicating which values are not NaN.
Returns
-------
Series
Series of data type :class:`Boolean`.
Examples
--------
>>> import numpy as np
>>> s = pl.Series("a", [1.0, 2.0, 3.0, np.nan])
>>> s.is_not_nan()
shape: (4,)
Series: 'a' [bool]
[
true
true
true
false
]
"""
def is_in(
self,
other: Series | Collection[Any],
*,
nulls_equal: bool = False,
) -> Series:
"""
Check if elements of this Series are in the other Series.
Parameters
----------
other
A Series or collection to search in.
nulls_equal : bool, default False
If True, treat null as a distinct value. Null values will not propagate.
Returns
-------
Series
Series of data type :class:`Boolean`.
Examples
--------
>>> s = pl.Series("a", [1, 2, 3])
>>> s2 = pl.Series("b", [2, 4, None])
>>> s2.is_in(s)
shape: (3,)
Series: 'b' [bool]
[
true
false
null
]
>>> # when nulls_equal=True, None is treated as a distinct value
>>> s2.is_in(s, nulls_equal=True)
shape: (3,)
Series: 'b' [bool]
[
true
false
false
]
>>> # check if some values are a member of sublists
>>> sets = pl.Series("sets", [[1, 2, 3], [1, 2], [9, 10]])
>>> optional_members = pl.Series("optional_members", [1, 2, 3])
>>> print(sets)
shape: (3,)
Series: 'sets' [list[i64]]
[
[1, 2, 3]
[1, 2]
[9, 10]
]
>>> print(optional_members)
shape: (3,)
Series: 'optional_members' [i64]
[
1
2
3
]
>>> optional_members.is_in(sets)
shape: (3,)
Series: 'optional_members' [bool]
[
true
true
false
]
"""
def arg_true(self) -> Series:
"""
Get index values where Boolean Series evaluate True.
Returns
-------
Series
Series of data type :class:`UInt32`.
Examples
--------
>>> s = pl.Series("a", [1, 2, 3])
>>> (s == 2).arg_true()
shape: (1,)
Series: 'a' [u32]
[
1
]
"""
return F.arg_where(self, eager=True)
def is_unique(self) -> Series:
"""
Get mask of all unique values.
Returns
-------
Series
Series of data type :class:`Boolean`.
Examples
--------
>>> s = pl.Series("a", [1, 2, 2, 3])
>>> s.is_unique()
shape: (4,)
Series: 'a' [bool]
[
true
false
false
true
]
"""
def is_first_distinct(self) -> Series:
"""
Return a boolean mask indicating the first occurrence of each distinct value.
Returns
-------
Series
Series of data type :class:`Boolean`.
Examples
--------
>>> s = pl.Series([1, 1, 2, 3, 2])
>>> s.is_first_distinct()
shape: (5,)
Series: '' [bool]
[
true
false
true
true
false
]
"""
def is_last_distinct(self) -> Series:
"""
Return a boolean mask indicating the last occurrence of each distinct value.
Returns
-------
Series
Series of data type :class:`Boolean`.
Examples
--------
>>> s = pl.Series([1, 1, 2, 3, 2])
>>> s.is_last_distinct()
shape: (5,)
Series: '' [bool]
[
false
true
false
true
true
]
"""
def is_duplicated(self) -> Series:
"""
Get mask of all duplicated values.
Returns
-------
Series
Series of data type :class:`Boolean`.
Examples
--------
>>> s = pl.Series("a", [1, 2, 2, 3])
>>> s.is_duplicated()
shape: (4,)
Series: 'a' [bool]
[
false
true
true
false
]
"""
def explode(self, *, empty_as_null: bool = True, keep_nulls: bool = True) -> Series:
"""
Explode a list Series.
This means that every item is expanded to a new row.
Parameters
----------
empty_as_null
Explode an empty list into a `null`.
keep_nulls
Explode a `null` list into a `null`.
Returns
-------
Series
Series with the data type of the list elements.
See Also
--------
Series.list.explode : Explode a list column.
Examples
--------
>>> s = pl.Series("a", [[1, 2, 3], [4, 5, 6]])
>>> s
shape: (2,)
Series: 'a' [list[i64]]
[
[1, 2, 3]
[4, 5, 6]
]
>>> s.explode()
shape: (6,)
Series: 'a' [i64]
[
1
2
3
4
5
6
]
"""
@deprecate_renamed_parameter("strict", "check_dtypes", version="0.20.31")
def equals(
self,
other: Series,
*,
check_dtypes: bool = False,
check_names: bool = False,
null_equal: bool = True,
) -> bool:
"""
Check whether the Series is equal to another Series.
.. versionchanged:: 0.20.31
The `strict` parameter was renamed `check_dtypes`.
Parameters
----------
other
Series to compare with.
check_dtypes
Require data types to match.
check_names
Require names to match.
null_equal
Consider null values as equal.
See Also
--------
polars.testing.assert_series_equal
Examples
--------
>>> s1 = pl.Series("a", [1, 2, 3])
>>> s2 = pl.Series("b", [4, 5, 6])
>>> s1.equals(s1)
True
>>> s1.equals(s2)
False
"""
require_same_type(self, other)
return self._s.equals(
other._s,
check_dtypes=check_dtypes,
check_names=check_names,
null_equal=null_equal,
)
def cast(
self,
dtype: type[int | float | str | bool] | PolarsDataType,
*,
strict: bool = True,
wrap_numerical: bool = False,
) -> Self:
r"""
Cast between data types.
Parameters
----------
dtype
DataType to cast to.
strict
If True invalid casts generate exceptions instead of `null`\s.
wrap_numerical
If True numeric casts wrap overflowing values instead of
marking the cast as invalid.
Examples
--------
>>> s = pl.Series("a", [True, False, True])
>>> s
shape: (3,)
Series: 'a' [bool]
[
true
false
true
]
>>> s.cast(pl.UInt32)
shape: (3,)
Series: 'a' [u32]
[
1
0
1
]
"""
# Do not dispatch cast as it is expensive and used in other functions.
dtype = parse_into_dtype(dtype)
return self._from_pyseries(self._s.cast(dtype, strict, wrap_numerical))
def to_physical(self) -> Series:
"""
Cast to physical representation of the logical dtype.
- :func:`polars.datatypes.Date` -> :func:`polars.datatypes.Int32`
- :func:`polars.datatypes.Datetime` -> :func:`polars.datatypes.Int64`
- :func:`polars.datatypes.Time` -> :func:`polars.datatypes.Int64`
- :func:`polars.datatypes.Duration` -> :func:`polars.datatypes.Int64`
- :func:`polars.datatypes.Categorical` -> :func:`polars.datatypes.UInt32`
- `List(inner)` -> `List(physical of inner)`
- `Array(inner)` -> `Array(physical of inner)`
- `Struct(fields)` -> `Struct(physical of fields)`
- Other data types will be left unchanged.
Warnings
--------
The physical representations are an implementation detail
and not guaranteed to be stable.
Examples
--------
Replicating the pandas
`pd.Series.factorize
<https://pandas.pydata.org/docs/reference/api/pandas.Series.factorize.html>`_
method.
>>> s = pl.Series("values", ["a", None, "x", "a"])
>>> s.cast(pl.Categorical).to_physical()
shape: (4,)
Series: 'values' [u32]
[
0
null
1
0
]
"""
def to_list(self) -> list[Any]:
"""
Convert this Series to a Python list.
This operation copies data.
Examples
--------
>>> s = pl.Series("a", [1, 2, 3])
>>> s.to_list()
[1, 2, 3]
>>> type(s.to_list())
<class 'list'>
"""
return self._s.to_list()
def rechunk(self, *, in_place: bool = False) -> Self:
"""
Create a single chunk of memory for this Series.
Parameters
----------
in_place
In place or not.
Examples
--------
>>> s1 = pl.Series("a", [1, 2, 3])
>>> s1.n_chunks()
1
>>> s2 = pl.Series("a", [4, 5, 6])
>>> s = pl.concat([s1, s2], rechunk=False)
>>> s.n_chunks()
2
>>> s.rechunk(in_place=True)
shape: (6,)
Series: 'a' [i64]
[
1
2
3
4
5
6
]
>>> s.n_chunks()
1
"""
opt_s = self._s.rechunk(in_place)
if in_place:
return self
else:
assert opt_s is not None
return self._from_pyseries(opt_s)
def reverse(self) -> Series:
"""
Return Series in reverse order.
Examples
--------
>>> s = pl.Series("a", [1, 2, 3], dtype=pl.Int8)
>>> s.reverse()
shape: (3,)
Series: 'a' [i8]
[
3
2
1
]
"""
def is_between(
self,
lower_bound: IntoExpr,
upper_bound: IntoExpr,
closed: ClosedInterval = "both",
) -> Series:
"""
Get a boolean mask of the values that are between the given lower/upper bounds.
Parameters
----------
lower_bound
Lower bound value. Accepts expression input. Non-expression inputs
(including strings) are parsed as literals.
upper_bound
Upper bound value. Accepts expression input. Non-expression inputs
(including strings) are parsed as literals.
closed : {'both', 'left', 'right', 'none'}
Define which sides of the interval are closed (inclusive).
Notes
-----
If the value of the `lower_bound` is greater than that of the `upper_bound`
then the result will be False, as no value can satisfy the condition.
Examples
--------
>>> s = pl.Series("num", [1, 2, 3, 4, 5])
>>> s.is_between(2, 4)
shape: (5,)
Series: 'num' [bool]
[
false
true
true
true
false
]
Use the `closed` argument to include or exclude the values at the bounds:
>>> s.is_between(2, 4, closed="left")
shape: (5,)
Series: 'num' [bool]
[
false
true
true
false
false
]
You can also use strings as well as numeric/temporal values:
>>> s = pl.Series("s", ["a", "b", "c", "d", "e"])
>>> s.is_between("b", "d", closed="both")
shape: (5,)
Series: 's' [bool]
[
false
true
true
true
false
]
"""
if closed == "none":
out = (self > lower_bound) & (self < upper_bound)
elif closed == "both":
out = (self >= lower_bound) & (self <= upper_bound)
elif closed == "right":
out = (self > lower_bound) & (self <= upper_bound)
elif closed == "left":
out = (self >= lower_bound) & (self < upper_bound)
if isinstance(out, pl.Expr):
out = F.select(out).to_series()
return out
def is_close(
self,
other: IntoExpr,
*,
abs_tol: float = 0.0,
rel_tol: float = 1e-09,
nans_equal: bool = False,
) -> Series:
r"""
Get a boolean mask of the values being close to the other values.
Two values `a` and `b` are considered close if the following condition holds:
.. math::
|a-b| \le max \{ \text{rel_tol} \cdot max \{ |a|, |b| \}, \text{abs_tol} \}
Parameters
----------
other
A literal or expression value to compare with.
abs_tol
Absolute tolerance. This is the maximum allowed absolute difference between
two values. Must be non-negative.
rel_tol
Relative tolerance. This is the maximum allowed difference between two
values, relative to the larger absolute value. Must be non-negative.
nans_equal
Whether NaN values should be considered equal.
Returns
-------
Series
Series of data type :class:`Boolean`.
Notes
-----
The implementation of this method is symmetric and mirrors the behavior of
:meth:`math.isclose`. Specifically note that this behavior is different to
:meth:`numpy.isclose`.
Examples
--------
>>> s = pl.Series("s", [1.0, 1.2, 1.4, 1.45, 1.6])
>>> s.is_close(1.4, abs_tol=0.1)
shape: (5,)
Series: 's' [bool]
[
false
false
true
true
false
]
"""
return F.select(
F.lit(self).is_close(
other, abs_tol=abs_tol, rel_tol=rel_tol, nans_equal=nans_equal
)
).to_series()
def to_numpy(
self,
*,
writable: bool = False,
allow_copy: bool = True,
use_pyarrow: bool | None = None,
zero_copy_only: bool | None = None,
) -> np.ndarray[Any, Any]:
"""
Convert this Series to a NumPy ndarray.
This operation copies data only when necessary. The conversion is zero copy when
all of the following hold:
- The data type is an integer, float, `Datetime`, `Duration`, or `Array`.
- The Series contains no null values.
- The Series consists of a single chunk.
- The `writable` parameter is set to `False` (default).
Parameters
----------
writable
Ensure the resulting array is writable. This will force a copy of the data
if the array was created without copy as the underlying Arrow data is
immutable.
allow_copy
Allow memory to be copied to perform the conversion. If set to `False`,
causes conversions that are not zero-copy to fail.
use_pyarrow
First convert to PyArrow, then call `pyarrow.Array.to_numpy
<https://arrow.apache.org/docs/python/generated/pyarrow.Array.html#pyarrow.Array.to_numpy>`_
to convert to NumPy. If set to `False`, Polars' own conversion logic is
used.
.. deprecated:: 0.20.28
Polars now uses its native engine by default for conversion to NumPy.
To use PyArrow's engine, call `.to_arrow().to_numpy()` instead.
zero_copy_only
Raise an exception if the conversion to a NumPy would require copying
the underlying data. Data copy occurs, for example, when the Series contains
nulls or non-numeric types.
.. deprecated:: 0.20.10
Use the `allow_copy` parameter instead, which is the inverse of this
one.
Examples
--------
Numeric data without nulls can be converted without copying data.
The resulting array will not be writable.
>>> s = pl.Series([1, 2, 3], dtype=pl.Int8)
>>> arr = s.to_numpy()
>>> arr
array([1, 2, 3], dtype=int8)
>>> arr.flags.writeable
False
Set `writable=True` to force data copy to make the array writable.
>>> s.to_numpy(writable=True).flags.writeable
True
Integer Series containing nulls will be cast to a float type with `nan`
representing a null value. This requires data to be copied.
>>> s = pl.Series([1, 2, None], dtype=pl.UInt16)
>>> s.to_numpy()
array([ 1., 2., nan], dtype=float32)
Set `allow_copy=False` to raise an error if data would be copied.
>>> s.to_numpy(allow_copy=False) # doctest: +SKIP
Traceback (most recent call last):
...
RuntimeError: copy not allowed: cannot convert to a NumPy array without copying data
Series of data type `Array` and `Struct` will result in an array with more than
one dimension.
>>> s = pl.Series([[1, 2, 3], [4, 5, 6]], dtype=pl.Array(pl.Int64, 3))
>>> s.to_numpy()
array([[1, 2, 3],
[4, 5, 6]])
""" # noqa: W505
if zero_copy_only is not None:
issue_deprecation_warning(
"the `zero_copy_only` parameter for `Series.to_numpy` is deprecated."
" Use the `allow_copy` parameter instead, which is the inverse of `zero_copy_only`.",
version="0.20.10",
)
allow_copy = not zero_copy_only
if use_pyarrow is not None:
issue_deprecation_warning(
"the `use_pyarrow` parameter for `Series.to_numpy` is deprecated."
" Polars now uses its native engine for conversion to NumPy by default."
" To use PyArrow's engine, call `.to_arrow().to_numpy()` instead.",
version="0.20.28",
)
else:
use_pyarrow = False
if (
use_pyarrow
and _PYARROW_AVAILABLE
and self.dtype not in (Date, Datetime, Duration, Array, Object)
):
if not allow_copy and self.n_chunks() > 1 and not self.is_empty():
msg = "cannot return a zero-copy array"
raise ValueError(msg)
return self.to_arrow().to_numpy(
zero_copy_only=not allow_copy, writable=writable
)
return self._s.to_numpy(writable=writable, allow_copy=allow_copy)
@unstable()
def to_jax(self, device: jax.Device | str | None = None) -> jax.Array:
"""
Convert this Series to a Jax Array.
.. versionadded:: 0.20.27
.. warning::
This functionality is currently considered **unstable**. It may be
changed at any point without it being considered a breaking change.
Parameters
----------
device
Specify the jax `Device` on which the array will be created; can provide
a string (such as "cpu", "gpu", or "tpu") in which case the device is
retrieved as `jax.devices(string)[0]`. For more specific control you
can supply the instantiated `Device` directly. If None, arrays are
created on the default device.
Examples
--------
>>> s = pl.Series("x", [10.5, 0.0, -10.0, 5.5])
>>> s.to_jax()
Array([ 10.5, 0. , -10. , 5.5], dtype=float32)
"""
jx = import_optional(
"jax",
install_message="Please see `https://jax.readthedocs.io/en/latest/installation.html` "
"for specific installation recommendations for the Jax package",
)
if isinstance(device, str):
device = jx.devices(device)[0]
if (
jx.config.jax_enable_x64
or bool(int(os.environ.get("JAX_ENABLE_X64", "0")))
or self.dtype not in {Float64, Int64, UInt64}
):
srs = self
else:
single_precision = {Float64: Float32, Int64: Int32, UInt64: UInt32}
srs = self.cast(single_precision[self.dtype]) # type: ignore[index]
with nullcontext() if device is None else jx.default_device(device):
return jx.numpy.asarray(
# note: jax arrays are immutable, so can avoid a copy (vs torch)
a=srs.to_numpy(writable=False),
order="K",
)
@unstable()
def to_torch(self) -> torch.Tensor:
"""
Convert this Series to a PyTorch Tensor.
.. versionadded:: 0.20.23
.. warning::
This functionality is currently considered **unstable**. It may be
changed at any point without it being considered a breaking change.
Notes
-----
PyTorch tensors do not support UInt16, UInt32, or UInt64; these dtypes
will be automatically cast to Int32, Int64, and Int64, respectively.
Examples
--------
>>> s = pl.Series("x", [1, 0, 1, 2, 0], dtype=pl.UInt8)
>>> s.to_torch()
tensor([1, 0, 1, 2, 0], dtype=torch.uint8)
>>> s = pl.Series("x", [5.5, -10.0, 2.5], dtype=pl.Float32)
>>> s.to_torch()
tensor([ 5.5000, -10.0000, 2.5000])
"""
torch = import_optional("torch")
# PyTorch tensors do not support uint16/32/64
if self.dtype in (UInt32, UInt64):
srs = self.cast(Int64)
elif self.dtype == UInt16:
srs = self.cast(Int32)
else:
srs = self
# we have to build the tensor from a writable array or PyTorch will complain
# about it (writing to a readonly array results in undefined behavior)
numpy_array = srs.to_numpy(writable=True)
try:
tensor = torch.from_numpy(numpy_array)
except TypeError:
if self.dtype == List:
msg = "cannot convert List dtype to Tensor (use Array dtype instead)"
raise TypeError(msg) from None
raise
# note: named tensors are currently experimental
# tensor.rename(self.name)
return tensor
@deprecate_renamed_parameter("future", "compat_level", version="1.1")
def to_arrow(self, *, compat_level: CompatLevel | None = None) -> pa.Array:
"""
Return the underlying Arrow array.
If the Series contains only a single chunk this operation is zero copy.
.. versionchanged:: 1.24
The `future` parameter was renamed `compat_level`.
Parameters
----------
compat_level
Use a specific compatibility level
when exporting Polars' internal data structures.
Examples
--------
>>> s = pl.Series("a", [1, 2, 3])
>>> s = s.to_arrow()
>>> s
<pyarrow.lib.Int64Array object at ...>
[
1,
2,
3
]
"""
compat_level_py: int | bool
if compat_level is None:
compat_level_py = False
elif isinstance(compat_level, CompatLevel):
compat_level_py = compat_level._version
else:
msg = f"`compat_level` has invalid type: {qualified_type_name(compat_level)!r}"
raise TypeError(msg)
return self._s.to_arrow(compat_level_py)
def to_pandas(
self, *, use_pyarrow_extension_array: bool = False, **kwargs: Any
) -> pd.Series[Any]:
"""
Convert this Series to a pandas Series.
This operation copies data if `use_pyarrow_extension_array` is not enabled.
Parameters
----------
use_pyarrow_extension_array
Use a PyArrow-backed extension array instead of a NumPy array for the pandas
Series. This allows zero copy operations and preservation of null values.
Subsequent operations on the resulting pandas Series may trigger conversion
to NumPy if those operations are not supported by PyArrow compute functions.
**kwargs
Additional keyword arguments to be passed to
:meth:`pyarrow.Array.to_pandas`.
Returns
-------
:class:`pandas.Series`
Notes
-----
This operation requires that both :mod:`pandas` and :mod:`pyarrow` are
installed.
Examples
--------
>>> s = pl.Series("a", [1, 2, 3])
>>> s.to_pandas()
0 1
1 2
2 3
Name: a, dtype: int64
Null values are converted to `NaN`.
>>> s = pl.Series("b", [1, 2, None])
>>> s.to_pandas()
0 1.0
1 2.0
2 NaN
Name: b, dtype: float64
Pass `use_pyarrow_extension_array=True` to get a pandas Series backed by a
PyArrow extension array. This will preserve null values.
>>> s.to_pandas(use_pyarrow_extension_array=True)
0 1
1 2
2 <NA>
Name: b, dtype: int64[pyarrow]
"""
if self.dtype == Object:
# Can't convert via PyArrow, so do it via NumPy
return pd.Series(self.to_numpy(), dtype=object, name=self.name)
if use_pyarrow_extension_array:
if parse_version(pd.__version__) < (1, 5):
msg = f'pandas>=1.5.0 is required for `to_pandas("use_pyarrow_extension_array=True")`, found Pandas {pd.__version__}'
raise ModuleUpgradeRequiredError(msg)
if not _PYARROW_AVAILABLE or parse_version(pa.__version__) < (8, 0):
raise ModuleUpgradeRequiredError(
f'pyarrow>=8.0.0 is required for `to_pandas("use_pyarrow_extension_array=True")`'
f", found pyarrow {pa.__version__!r}"
if _PYARROW_AVAILABLE
else ""
)
pa_arr = self.to_arrow()
# pandas does not support unsigned dictionary indices
if pa.types.is_dictionary(pa_arr.type):
pa_arr = pa_arr.cast(pa.dictionary(pa.int64(), pa.large_string()))
if use_pyarrow_extension_array:
pd_series = pa_arr.to_pandas(
self_destruct=True,
split_blocks=True,
types_mapper=lambda pa_dtype: pd.ArrowDtype(pa_dtype),
**kwargs,
)
else:
date_as_object = kwargs.pop("date_as_object", False)
pd_series = pa_arr.to_pandas(date_as_object=date_as_object, **kwargs)
pd_series.name = self.name
return pd_series
def to_init_repr(self, n: int = 1000) -> str:
"""
Convert Series to instantiable string representation.
Parameters
----------
n
Only use first n elements.
See Also
--------
polars.Series.to_init_repr
polars.from_repr
Examples
--------
>>> s = pl.Series("a", [1, 2, None, 4], dtype=pl.Int16)
>>> print(s.to_init_repr())
pl.Series('a', [1, 2, None, 4], dtype=pl.Int16)
>>> s_from_str_repr = eval(s.to_init_repr())
>>> s_from_str_repr
shape: (4,)
Series: 'a' [i16]
[
1
2
null
4
]
"""
values = self.head(n).to_list()
dtype_init_repr = dtype_to_init_repr(self.dtype)
return f"pl.Series({self.name!r}, {values}, dtype={dtype_init_repr})"
def count(self) -> int:
"""
Return the number of non-null elements in the column.
See Also
--------
len
Examples
--------
>>> s = pl.Series("a", [1, 2, None])
>>> s.count()
2
"""
return self.len() - self.null_count()
def len(self) -> int:
"""
Return the number of elements in the Series.
Null values count towards the total.
See Also
--------
count
Examples
--------
>>> s = pl.Series("a", [1, 2, None])
>>> s.len()
3
"""
return self._s.len()
def set(self, filter: Series, value: Any) -> Series:
"""
Set masked values.
Parameters
----------
filter
Boolean mask.
value
Value with which to replace the masked values.
Notes
-----
Use of this function is frequently an anti-pattern, as it can
block optimisation (predicate pushdown, etc). Consider using
`pl.when(predicate).then(value).otherwise(self)` instead.
Examples
--------
>>> s = pl.Series("a", [1, 2, 3])
>>> s.set(s == 2, 10)
shape: (3,)
Series: 'a' [i64]
[
1
10
3
]
It is better to implement this as follows:
>>> s.to_frame().select(
... pl.when(pl.col("a") == 2).then(10).otherwise(pl.col("a"))
... )
shape: (3, 1)
┌─────────┐
│ literal │
│ --- │
│ i64 │
╞═════════╡
│ 1 │
│ 10 │
│ 3 │
└─────────┘
"""
value_s = Series([value], dtype=self.dtype)
return wrap_s(self._s.set(filter._s, value_s._s))
def scatter(
self,
indices: Series | Iterable[int] | int | np.ndarray[Any, Any],
values: Series | Iterable[PythonLiteral] | PythonLiteral | None,
) -> Series:
"""
Set values at the index locations.
Parameters
----------
indices
Integers representing the index locations.
values
Replacement values.
Notes
-----
Use of this function is frequently an anti-pattern, as it can
block optimization (predicate pushdown, etc). Consider using
`pl.when(predicate).then(value).otherwise(self)` instead.
Examples
--------
>>> s = pl.Series("a", [1, 2, 3])
>>> s.scatter(1, 10)
shape: (3,)
Series: 'a' [i64]
[
1
10
3
]
It is better to implement this as follows:
>>> s.to_frame().with_row_index().select(
... pl.when(pl.col("index") == 1).then(10).otherwise(pl.col("a"))
... )
shape: (3, 1)
┌─────────┐
│ literal │
│ --- │
│ i64 │
╞═════════╡
│ 1 │
│ 10 │
│ 3 │
└─────────┘
"""
if not isinstance(indices, Iterable):
index: Any = indices # Workaround for older NumPy versions
indices = [index]
indices = Series(values=indices)
if indices.is_empty():
return self
if not isinstance(values, Series):
if not isinstance(values, Iterable) or isinstance(values, str):
values = [values]
values = Series(values=values)
self._s.scatter(indices._s, values._s)
return self
def index_of(self, element: IntoExpr) -> int | None:
"""
Get the index of the first occurrence of a value, or ``None`` if it's not found.
Parameters
----------
element
Value to find.
Examples
--------
>>> s = pl.Series("a", [1, None, 17])
>>> s.index_of(17)
2
>>> s.index_of(None) # search for a null
1
>>> s.index_of(55) is None
True
"""
return F.select(F.lit(self).index_of(element)).item()
def clear(self, n: int = 0) -> Series:
"""
Create an empty copy of the current Series, with zero to 'n' elements.
The copy has an identical name/dtype, but no data.
Parameters
----------
n
Number of (empty) elements to return in the cleared frame.
See Also
--------
clone : Cheap deepcopy/clone.
Examples
--------
>>> s = pl.Series("a", [None, True, False])
>>> s.clear()
shape: (0,)
Series: 'a' [bool]
[
]
>>> s.clear(n=2)
shape: (2,)
Series: 'a' [bool]
[
null
null
]
"""
if not (is_int := isinstance(n, int)) or n < 0: # type: ignore[redundant-expr]
msg = f"`n` should be an integer >= 0, got {n}"
err = TypeError if not is_int else ValueError
raise err(msg)
if n == 0:
return self._from_pyseries(self._s.clear())
s = (
self.__class__(name=self.name, values=[], dtype=self.dtype)
if len(self) > 0
else self.clone()
)
return s.extend_constant(None, n=n) if n > 0 else s
def clone(self) -> Self:
"""
Create a copy of this Series.
This is a cheap operation that does not copy data.
See Also
--------
clear : Create an empty copy of the current Series, with identical
schema but no data.
Examples
--------
>>> s = pl.Series("a", [1, 2, 3])
>>> s.clone()
shape: (3,)
Series: 'a' [i64]
[
1
2
3
]
"""
return self._from_pyseries(self._s.clone())
def fill_nan(self, value: int | float | Expr | None) -> Series:
"""
Fill floating point NaN value with a fill value.
Parameters
----------
value
Value used to fill NaN values.
See Also
--------
fill_null
Notes
-----
A NaN value is not the same as a null value.
To fill null values, use :func:`fill_null`.
Examples
--------
>>> s = pl.Series("a", [1.0, 2.0, 3.0, float("nan")])
>>> s.fill_nan(0)
shape: (4,)
Series: 'a' [f64]
[
1.0
2.0
3.0
0.0
]
"""
def fill_null(
self,
value: Any | Expr | None = None,
strategy: FillNullStrategy | None = None,
limit: int | None = None,
) -> Series:
"""
Fill null values using the specified value or strategy.
Parameters
----------
value
Value used to fill null values.
strategy : {None, 'forward', 'backward', 'min', 'max', 'mean', 'zero', 'one'}
Strategy used to fill null values.
limit
Number of consecutive null values to fill when using the 'forward' or
'backward' strategy.
See Also
--------
backward_fill
fill_nan
forward_fill
Notes
-----
A null value is not the same as a NaN value.
To fill NaN values, use :func:`fill_nan`.
Examples
--------
>>> s = pl.Series("a", [1, 2, 3, None])
>>> s.fill_null(strategy="forward")
shape: (4,)
Series: 'a' [i64]
[
1
2
3
3
]
>>> s.fill_null(strategy="min")
shape: (4,)
Series: 'a' [i64]
[
1
2
3
1
]
>>> s = pl.Series("b", ["x", None, "z"])
>>> s.fill_null(pl.lit(""))
shape: (3,)
Series: 'b' [str]
[
"x"
""
"z"
]
"""
def backward_fill(self, limit: int | None = None) -> Series:
"""
Fill missing values with the next non-null value.
This is an alias of `.fill_null(strategy="backward")`.
Parameters
----------
limit
The number of consecutive null values to backward fill.
See Also
--------
fill_null
forward_fill
shift
"""
return self.fill_null(strategy="backward", limit=limit)
def forward_fill(self, limit: int | None = None) -> Series:
"""
Fill missing values with the last non-null value.
This is an alias of `.fill_null(strategy="forward")`.
Parameters
----------
limit
The number of consecutive null values to forward fill.
See Also
--------
backward_fill
fill_null
shift
"""
return self.fill_null(strategy="forward", limit=limit)
def floor(self) -> Series:
"""
Rounds down to the nearest integer value.
Only works on floating point Series.
Examples
--------
>>> s = pl.Series("a", [1.12345, 2.56789, 3.901234])
>>> s.floor()
shape: (3,)
Series: 'a' [f64]
[
1.0
2.0
3.0
]
"""
def ceil(self) -> Series:
"""
Rounds up to the nearest integer value.
Only works on floating point Series.
Examples
--------
>>> s = pl.Series("a", [1.12345, 2.56789, 3.901234])
>>> s.ceil()
shape: (3,)
Series: 'a' [f64]
[
2.0
3.0
4.0
]
"""
def round(self, decimals: int = 0, mode: RoundMode = "half_to_even") -> Series:
"""
Round underlying floating point data by `decimals` digits.
The default rounding mode is "half to even" (also known as "bankers' rounding").
Parameters
----------
decimals
Number of decimals to round by.
mode : {'half_to_even', 'half_away_from_zero'}
Rounding mode.
Examples
--------
>>> s = pl.Series("a", [1.12345, 2.56789, 3.901234])
>>> s.round(2)
shape: (3,)
Series: 'a' [f64]
[
1.12
2.57
3.9
]
>>> s = pl.Series([-3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5])
>>> s.round(mode="half_to_even")
shape: (8,)
Series: '' [f64]
[
-4.0
-2.0
-2.0
-0.0
0.0
2.0
2.0
4.0
]
"""
def round_sig_figs(self, digits: int) -> Series:
"""
Round to a number of significant figures.
Parameters
----------
digits
Number of significant figures to round to.
Examples
--------
>>> s = pl.Series([0.01234, 3.333, 3450.0])
>>> s.round_sig_figs(2)
shape: (3,)
Series: '' [f64]
[
0.012
3.3
3500.0
]
"""
def dot(self, other: Series | ArrayLike) -> int | float | None:
"""
Compute the dot/inner product between two Series.
Examples
--------
>>> s = pl.Series("a", [1, 2, 3])
>>> s2 = pl.Series("b", [4.0, 5.0, 6.0])
>>> s.dot(s2)
32.0
Parameters
----------
other
Series (or array) to compute dot product with.
"""
if not isinstance(other, Series):
other = Series(other)
if len(self) != len(other):
n, m = len(self), len(other)
msg = f"Series length mismatch: expected {n!r}, found {m!r}"
raise ShapeError(msg)
return self._s.dot(other._s)
def mode(self, *, maintain_order: bool = False) -> Series:
"""
Compute the most occurring value(s).
Can return multiple Values.
Parameters
----------
maintain_order
Maintain order of data. This requires more work.
Examples
--------
>>> s = pl.Series("a", [1, 2, 2, 3])
>>> s.mode()
shape: (1,)
Series: 'a' [i64]
[
2
]
"""
def sign(self) -> Series:
"""
Compute the element-wise sign function on numeric types.
The returned value is computed as follows:
* -1 if x < 0.
* 1 if x > 0.
* x otherwise (typically 0, but could be NaN if the input is).
Null values are preserved as-is, and the dtype of the input is preserved.
Examples
--------
>>> s = pl.Series("a", [-9.0, -0.0, 0.0, 4.0, float("nan"), None])
>>> s.sign()
shape: (6,)
Series: 'a' [f64]
[
-1.0
-0.0
0.0
1.0
NaN
null
]
"""
def sin(self) -> Series:
"""
Compute the element-wise value for the sine.
Examples
--------
>>> import math
>>> s = pl.Series("a", [0.0, math.pi / 2.0, math.pi])
>>> s.sin()
shape: (3,)
Series: 'a' [f64]
[
0.0
1.0
1.2246e-16
]
"""
def cos(self) -> Series:
"""
Compute the element-wise value for the cosine.
Examples
--------
>>> import math
>>> s = pl.Series("a", [0.0, math.pi / 2.0, math.pi])
>>> s.cos()
shape: (3,)
Series: 'a' [f64]
[
1.0
6.1232e-17
-1.0
]
"""
def tan(self) -> Series:
"""
Compute the element-wise value for the tangent.
Examples
--------
>>> import math
>>> s = pl.Series("a", [0.0, math.pi / 2.0, math.pi])
>>> s.tan()
shape: (3,)
Series: 'a' [f64]
[
0.0
1.6331e16
-1.2246e-16
]
"""
def cot(self) -> Series:
"""
Compute the element-wise value for the cotangent.
Examples
--------
>>> import math
>>> s = pl.Series("a", [0.0, math.pi / 2.0, math.pi])
>>> s.cot()
shape: (3,)
Series: 'a' [f64]
[
inf
6.1232e-17
-8.1656e15
]
"""
def arcsin(self) -> Series:
"""
Compute the element-wise value for the inverse sine.
Examples
--------
>>> s = pl.Series("a", [1.0, 0.0, -1.0])
>>> s.arcsin()
shape: (3,)
Series: 'a' [f64]
[
1.570796
0.0
-1.570796
]
"""
def arccos(self) -> Series:
"""
Compute the element-wise value for the inverse cosine.
Examples
--------
>>> s = pl.Series("a", [1.0, 0.0, -1.0])
>>> s.arccos()
shape: (3,)
Series: 'a' [f64]
[
0.0
1.570796
3.141593
]
"""
def arctan(self) -> Series:
"""
Compute the element-wise value for the inverse tangent.
Examples
--------
>>> s = pl.Series("a", [1.0, 0.0, -1.0])
>>> s.arctan()
shape: (3,)
Series: 'a' [f64]
[
0.785398
0.0
-0.785398
]
"""
def arcsinh(self) -> Series:
"""
Compute the element-wise value for the inverse hyperbolic sine.
Examples
--------
>>> s = pl.Series("a", [1.0, 0.0, -1.0])
>>> s.arcsinh()
shape: (3,)
Series: 'a' [f64]
[
0.881374
0.0
-0.881374
]
"""
def arccosh(self) -> Series:
"""
Compute the element-wise value for the inverse hyperbolic cosine.
Examples
--------
>>> s = pl.Series("a", [5.0, 1.0, 0.0, -1.0])
>>> s.arccosh()
shape: (4,)
Series: 'a' [f64]
[
2.292432
0.0
NaN
NaN
]
"""
def arctanh(self) -> Series:
"""
Compute the element-wise value for the inverse hyperbolic tangent.
Examples
--------
>>> s = pl.Series("a", [2.0, 1.0, 0.5, 0.0, -0.5, -1.0, -1.1])
>>> s.arctanh()
shape: (7,)
Series: 'a' [f64]
[
NaN
inf
0.549306
0.0
-0.549306
-inf
NaN
]
"""
def sinh(self) -> Series:
"""
Compute the element-wise value for the hyperbolic sine.
Examples
--------
>>> s = pl.Series("a", [1.0, 0.0, -1.0])
>>> s.sinh()
shape: (3,)
Series: 'a' [f64]
[
1.175201
0.0
-1.175201
]
"""
def cosh(self) -> Series:
"""
Compute the element-wise value for the hyperbolic cosine.
Examples
--------
>>> s = pl.Series("a", [1.0, 0.0, -1.0])
>>> s.cosh()
shape: (3,)
Series: 'a' [f64]
[
1.543081
1.0
1.543081
]
"""
def tanh(self) -> Series:
"""
Compute the element-wise value for the hyperbolic tangent.
Examples
--------
>>> s = pl.Series("a", [1.0, 0.0, -1.0])
>>> s.tanh()
shape: (3,)
Series: 'a' [f64]
[
0.761594
0.0
-0.761594
]
"""
def map_elements(
self,
function: Callable[[Any], Any],
return_dtype: PolarsDataType | None = None,
*,
skip_nulls: bool = True,
) -> Self:
"""
Map a custom/user-defined function (UDF) over elements in this Series.
.. warning::
This method is much slower than the native expressions API.
Only use it if you cannot implement your logic otherwise.
Suppose that the function is: `x ↦ sqrt(x)`:
- For mapping elements of a series, consider: `s.sqrt()`.
- For mapping inner elements of lists, consider:
`s.list.eval(pl.element().sqrt())`.
- For mapping elements of struct fields, consider:
`s.struct.field("field_name").sqrt()`.
If the function returns a different datatype, the return_dtype arg should
be set, otherwise the method will fail.
Implementing logic using a Python function is almost always *significantly*
slower and more memory intensive than implementing the same logic using
the native expression API because:
- The native expression engine runs in Rust; UDFs run in Python.
- Use of Python UDFs forces the DataFrame to be materialized in memory.
- Polars-native expressions can be parallelised (UDFs typically cannot).
- Polars-native expressions can be logically optimised (UDFs cannot).
Wherever possible you should strongly prefer the native expression API
to achieve the best performance.
Parameters
----------
function
Custom function or lambda.
return_dtype
Output datatype.
If not set, the dtype will be inferred based on the first non-null value
that is returned by the function.
skip_nulls
Nulls will be skipped and not passed to the python function.
This is faster because python can be skipped and because we call
more specialized functions.
Warnings
--------
If `return_dtype` is not provided, this may lead to unexpected results.
We allow this, but it is considered a bug in the user's query.
Notes
-----
* If your function is expensive and you don't want it to be called more than
once for a given input, consider applying an `@lru_cache` decorator to it.
If your data is suitable you may achieve *significant* speedups.
* A UDF passed to `map_elements` must be pure, meaning that it cannot modify
or depend on state other than its arguments.
Examples
--------
>>> s = pl.Series("a", [1, 2, 3])
>>> s.map_elements(lambda x: x + 10, return_dtype=pl.Int64) # doctest: +SKIP
shape: (3,)
Series: 'a' [i64]
[
11
12
13
]
Returns
-------
Series
"""
from polars._utils.udfs import warn_on_inefficient_map
if return_dtype is None:
pl_return_dtype = None
else:
pl_return_dtype = parse_into_dtype(return_dtype)
warn_on_inefficient_map(function, columns=[self.name], map_target="series")
return self._from_pyseries(
self._s.map_elements(
function, return_dtype=pl_return_dtype, skip_nulls=skip_nulls
)
)
def shift(self, n: int = 1, *, fill_value: IntoExpr | None = None) -> Series:
"""
Shift values by the given number of indices.
Parameters
----------
n
Number of indices to shift forward. If a negative value is passed, values
are shifted in the opposite direction instead.
fill_value
Fill the resulting null values with this value. Accepts scalar expression
input. Non-expression inputs are parsed as literals.
Notes
-----
This method is similar to the `LAG` operation in SQL when the value for `n`
is positive. With a negative value for `n`, it is similar to `LEAD`.
Examples
--------
By default, values are shifted forward by one index.
>>> s = pl.Series([1, 2, 3, 4])
>>> s.shift()
shape: (4,)
Series: '' [i64]
[
null
1
2
3
]
Pass a negative value to shift in the opposite direction instead.
>>> s.shift(-2)
shape: (4,)
Series: '' [i64]
[
3
4
null
null
]
Specify `fill_value` to fill the resulting null values.
>>> s.shift(-2, fill_value=100)
shape: (4,)
Series: '' [i64]
[
3
4
100
100
]
"""
def zip_with(self, mask: Series, other: Series) -> Self:
"""
Take values from self or other based on the given mask.
Where mask evaluates true, take values from self. Where mask evaluates false,
take values from other.
Parameters
----------
mask
Boolean Series.
other
Series of same type.
Returns
-------
Series
Examples
--------
>>> s1 = pl.Series([1, 2, 3, 4, 5])
>>> s2 = pl.Series([5, 4, 3, 2, 1])
>>> s1.zip_with(s1 < s2, s2)
shape: (5,)
Series: '' [i64]
[
1
2
3
2
1
]
>>> mask = pl.Series([True, False, True, False, True])
>>> s1.zip_with(mask, s2)
shape: (5,)
Series: '' [i64]
[
1
4
3
2
5
]
"""
require_same_type(self, other)
return self._from_pyseries(self._s.zip_with(mask._s, other._s))
@unstable()
def rolling_min_by(
self,
by: IntoExpr,
window_size: timedelta | str,
*,
min_samples: int = 1,
closed: ClosedInterval = "right",
) -> Self:
"""
Compute a rolling min based on another series.
.. warning::
This functionality is considered **unstable**. It may be changed
at any point without it being considered a breaking change.
Given a `by` column `<t_0, t_1, ..., t_n>`, then `closed="right"`
(the default) means the windows will be:
- (t_0 - window_size, t_0]
- (t_1 - window_size, t_1]
- ...
- (t_n - window_size, t_n]
Parameters
----------
by
Should be ``DateTime``, ``Date``, ``UInt64``, ``UInt32``, ``Int64``,
or ``Int32`` data type (note that the integral ones require using `'i'`
in `window size`).
window_size
The length of the window. Can be a dynamic temporal
size indicated by a timedelta or the following string language:
- 1ns (1 nanosecond)
- 1us (1 microsecond)
- 1ms (1 millisecond)
- 1s (1 second)
- 1m (1 minute)
- 1h (1 hour)
- 1d (1 calendar day)
- 1w (1 calendar week)
- 1mo (1 calendar month)
- 1q (1 calendar quarter)
- 1y (1 calendar year)
- 1i (1 index count)
By "calendar day", we mean the corresponding time on the next day
(which may not be 24 hours, due to daylight savings). Similarly for
"calendar week", "calendar month", "calendar quarter", and
"calendar year".
min_samples
The number of values in the window that should be non-null before computing
a result.
closed : {'left', 'right', 'both', 'none'}
Define which sides of the temporal interval are closed (inclusive),
defaults to `'right'`.
Notes
-----
If you want to compute multiple aggregation statistics over the same dynamic
window, consider using `rolling` - this method can cache the window size
computation.
Examples
--------
Create a series with a row index value
>>> from datetime import timedelta, datetime
>>> start = datetime(2001, 1, 1)
>>> stop = datetime(2001, 1, 2)
>>> s = pl.Series("index", range(25))
>>> s
shape: (25,)
Series: 'index' [i64]
[
0
1
2
3
4
…
20
21
22
23
24
]
Create another series to apply the window mask:
>>> d = pl.Series("date", pl.datetime_range(start, stop, "1h", eager=True))
>>> d
shape: (25,)
Series: 'date' [datetime[μs]]
[
2001-01-01 00:00:00
2001-01-01 01:00:00
2001-01-01 02:00:00
2001-01-01 03:00:00
2001-01-01 04:00:00
…
2001-01-01 20:00:00
2001-01-01 21:00:00
2001-01-01 22:00:00
2001-01-01 23:00:00
2001-01-02 00:00:00
]
Compute the rolling min with the temporal windows
from the second series closed on the right:
>>> s.rolling_min_by(d, "3h")
shape: (25,)
Series: 'index' [i64]
[
0
0
0
1
2
…
18
19
20
21
22
]
"""
@deprecate_renamed_parameter("min_periods", "min_samples", version="1.21.0")
def rolling_min(
self,
window_size: int,
weights: list[float] | None = None,
*,
min_samples: int | None = None,
center: bool = False,
) -> Series:
"""
Apply a rolling min (moving min) over the values in this array.
A window of length `window_size` will traverse the array. The values that fill
this window will (optionally) be multiplied with the weights given by the
`weight` vector. The resulting values will be aggregated to their min.
The window at a given row will include the row itself and the `window_size - 1`
elements before it.
.. versionchanged:: 1.21.0
The `min_periods` parameter was renamed `min_samples`.
Parameters
----------
window_size
The length of the window in number of elements.
weights
An optional slice with the same length as the window that will be multiplied
elementwise with the values in the window.
min_samples
The number of values in the window that should be non-null before computing
a result. If set to `None` (default), it will be set equal to `window_size`.
center
Set the labels at the center of the window.
Examples
--------
>>> s = pl.Series("a", [100, 200, 300, 400, 500])
>>> s.rolling_min(window_size=3)
shape: (5,)
Series: 'a' [i64]
[
null
null
100
200
300
]
"""
@unstable()
def rolling_max_by(
self,
by: IntoExpr,
window_size: timedelta | str,
*,
min_samples: int = 1,
closed: ClosedInterval = "right",
) -> Self:
"""
Compute a rolling max based on another series.
.. warning::
This functionality is considered **unstable**. It may be changed
at any point without it being considered a breaking change.
Given a `by` column `<t_0, t_1, ..., t_n>`, then `closed="right"`
(the default) means the windows will be:
- (t_0 - window_size, t_0]
- (t_1 - window_size, t_1]
- ...
- (t_n - window_size, t_n]
Parameters
----------
by
Should be ``DateTime``, ``Date``, ``UInt64``, ``UInt32``, ``Int64``,
or ``Int32`` data type (note that the integral ones require using `'i'`
in `window size`).
window_size
The length of the window. Can be a dynamic temporal
size indicated by a timedelta or the following string language:
- 1ns (1 nanosecond)
- 1us (1 microsecond)
- 1ms (1 millisecond)
- 1s (1 second)
- 1m (1 minute)
- 1h (1 hour)
- 1d (1 calendar day)
- 1w (1 calendar week)
- 1mo (1 calendar month)
- 1q (1 calendar quarter)
- 1y (1 calendar year)
- 1i (1 index count)
By "calendar day", we mean the corresponding time on the next day
(which may not be 24 hours, due to daylight savings). Similarly for
"calendar week", "calendar month", "calendar quarter", and
"calendar year".
min_samples
The number of values in the window that should be non-null before computing
a result.
closed : {'left', 'right', 'both', 'none'}
Define which sides of the temporal interval are closed (inclusive),
defaults to `'right'`.
Notes
-----
If you want to compute multiple aggregation statistics over the same dynamic
window, consider using `rolling` - this method can cache the window size
computation.
Examples
--------
Create a series with a row index value
>>> from datetime import timedelta, datetime
>>> start = datetime(2001, 1, 1)
>>> stop = datetime(2001, 1, 2)
>>> s = pl.Series("index", range(25))
>>> s
shape: (25,)
Series: 'index' [i64]
[
0
1
2
3
4
…
20
21
22
23
24
]
Create another series to apply the window mask:
>>> d = pl.Series("date", pl.datetime_range(start, stop, "1h", eager=True))
>>> d
shape: (25,)
Series: 'date' [datetime[μs]]
[
2001-01-01 00:00:00
2001-01-01 01:00:00
2001-01-01 02:00:00
2001-01-01 03:00:00
2001-01-01 04:00:00
…
2001-01-01 20:00:00
2001-01-01 21:00:00
2001-01-01 22:00:00
2001-01-01 23:00:00
2001-01-02 00:00:00
]
Compute the rolling max with the temporal windows
from the second series closed on the right:
>>> s.rolling_max_by(d, "3h")
shape: (25,)
Series: 'index' [i64]
[
0
1
2
3
4
…
20
21
22
23
24
]
"""
@deprecate_renamed_parameter("min_periods", "min_samples", version="1.21.0")
def rolling_max(
self,
window_size: int,
weights: list[float] | None = None,
*,
min_samples: int | None = None,
center: bool = False,
) -> Series:
"""
Apply a rolling max (moving max) over the values in this array.
A window of length `window_size` will traverse the array. The values that fill
this window will (optionally) be multiplied with the weights given by the
`weight` vector. The resulting values will be aggregated to their max.
The window at a given row will include the row itself and the `window_size - 1`
elements before it.
.. versionchanged:: 1.21.0
The `min_periods` parameter was renamed `min_samples`.
Parameters
----------
window_size
The length of the window in number of elements.
weights
An optional slice with the same length as the window that will be multiplied
elementwise with the values in the window.
min_samples
The number of values in the window that should be non-null before computing
a result. If set to `None` (default), it will be set equal to `window_size`.
center
Set the labels at the center of the window.
Examples
--------
>>> s = pl.Series("a", [100, 200, 300, 400, 500])
>>> s.rolling_max(window_size=2)
shape: (5,)
Series: 'a' [i64]
[
null
200
300
400
500
]
"""
@unstable()
def rolling_mean_by(
self,
by: IntoExpr,
window_size: timedelta | str,
*,
min_samples: int = 1,
closed: ClosedInterval = "right",
) -> Self:
"""
Compute a rolling mean based on another series.
.. warning::
This functionality is considered **unstable**. It may be changed
at any point without it being considered a breaking change.
Given a `by` column `<t_0, t_1, ..., t_n>`, then `closed="right"`
(the default) means the windows will be:
- (t_0 - window_size, t_0]
- (t_1 - window_size, t_1]
- ...
- (t_n - window_size, t_n]
Parameters
----------
by
Should be ``DateTime``, ``Date``, ``UInt64``, ``UInt32``, ``Int64``,
or ``Int32`` data type (note that the integral ones require using `'i'`
in `window size`).
window_size
The length of the window. Can be a dynamic temporal
size indicated by a timedelta or the following string language:
- 1ns (1 nanosecond)
- 1us (1 microsecond)
- 1ms (1 millisecond)
- 1s (1 second)
- 1m (1 minute)
- 1h (1 hour)
- 1d (1 calendar day)
- 1w (1 calendar week)
- 1mo (1 calendar month)
- 1q (1 calendar quarter)
- 1y (1 calendar year)
- 1i (1 index count)
By "calendar day", we mean the corresponding time on the next day
(which may not be 24 hours, due to daylight savings). Similarly for
"calendar week", "calendar month", "calendar quarter", and
"calendar year".
min_samples
The number of values in the window that should be non-null before computing
a result.
closed : {'left', 'right', 'both', 'none'}
Define which sides of the temporal interval are closed (inclusive),
defaults to `'right'`.
Notes
-----
If you want to compute multiple aggregation statistics over the same dynamic
window, consider using `rolling` - this method can cache the window size
computation.
Examples
--------
Create a series with a row index value
>>> from datetime import timedelta, datetime
>>> start = datetime(2001, 1, 1)
>>> stop = datetime(2001, 1, 2)
>>> s = pl.Series("index", range(25))
>>> s
shape: (25,)
Series: 'index' [i64]
[
0
1
2
3
4
…
20
21
22
23
24
]
Create another series to apply the window mask:
>>> d = pl.Series("date", pl.datetime_range(start, stop, "1h", eager=True))
>>> d
shape: (25,)
Series: 'date' [datetime[μs]]
[
2001-01-01 00:00:00
2001-01-01 01:00:00
2001-01-01 02:00:00
2001-01-01 03:00:00
2001-01-01 04:00:00
…
2001-01-01 20:00:00
2001-01-01 21:00:00
2001-01-01 22:00:00
2001-01-01 23:00:00
2001-01-02 00:00:00
]
Compute the rolling mean with the temporal windows
from the second series closed on the right:
>>> s.rolling_mean_by(d, "3h")
shape: (25,)
Series: 'index' [f64]
[
0.0
0.5
1.0
2.0
3.0
…
19.0
20.0
21.0
22.0
23.0
]
"""
@deprecate_renamed_parameter("min_periods", "min_samples", version="1.21.0")
def rolling_mean(
self,
window_size: int,
weights: list[float] | None = None,
*,
min_samples: int | None = None,
center: bool = False,
) -> Series:
"""
Apply a rolling mean (moving mean) over the values in this array.
A window of length `window_size` will traverse the array. The values that fill
this window will (optionally) be multiplied with the weights given by the
`weight` vector. The resulting values will be aggregated to their mean.
The window at a given row will include the row itself and the `window_size - 1`
elements before it.
.. versionchanged:: 1.21.0
The `min_periods` parameter was renamed `min_samples`.
Parameters
----------
window_size
The length of the window in number of elements.
weights
An optional slice with the same length as the window that will be multiplied
elementwise with the values in the window.
min_samples
The number of values in the window that should be non-null before computing
a result. If set to `None` (default), it will be set equal to `window_size`.
center
Set the labels at the center of the window.
Examples
--------
>>> s = pl.Series("a", [100, 200, 300, 400, 500])
>>> s.rolling_mean(window_size=2)
shape: (5,)
Series: 'a' [f64]
[
null
150.0
250.0
350.0
450.0
]
"""
@unstable()
def rolling_sum_by(
self,
by: IntoExpr,
window_size: timedelta | str,
*,
min_samples: int = 1,
closed: ClosedInterval = "right",
) -> Self:
"""
Compute a rolling sum based on another series.
.. warning::
This functionality is considered **unstable**. It may be changed
at any point without it being considered a breaking change.
Given a `by` column `<t_0, t_1, ..., t_n>`, then `closed="right"`
(the default) means the windows will be:
- (t_0 - window_size, t_0]
- (t_1 - window_size, t_1]
- ...
- (t_n - window_size, t_n]
Parameters
----------
window_size
The length of the window. Can be a dynamic temporal
size indicated by a timedelta or the following string language:
- 1ns (1 nanosecond)
- 1us (1 microsecond)
- 1ms (1 millisecond)
- 1s (1 second)
- 1m (1 minute)
- 1h (1 hour)
- 1d (1 calendar day)
- 1w (1 calendar week)
- 1mo (1 calendar month)
- 1q (1 calendar quarter)
- 1y (1 calendar year)
- 1i (1 index count)
By "calendar day", we mean the corresponding time on the next day
(which may not be 24 hours, due to daylight savings). Similarly for
"calendar week", "calendar month", "calendar quarter", and
"calendar year".
min_samples
The number of values in the window that should be non-null before computing
a result.
by
Should be ``DateTime``, ``Date``, ``UInt64``, ``UInt32``, ``Int64``,
or ``Int32`` data type (note that the integral ones require using `'i'`
in `window size`).
closed : {'left', 'right', 'both', 'none'}
Define which sides of the temporal interval are closed (inclusive),
defaults to `'right'`.
Notes
-----
If you want to compute multiple aggregation statistics over the same dynamic
window, consider using `rolling` - this method can cache the window size
computation.
Examples
--------
Create a series with a row index value
>>> from datetime import timedelta, datetime
>>> start = datetime(2001, 1, 1)
>>> stop = datetime(2001, 1, 2)
>>> s = pl.Series("index", range(25))
>>> s
shape: (25,)
Series: 'index' [i64]
[
0
1
2
3
4
…
20
21
22
23
24
]
Create another series to apply the window mask:
>>> d = pl.Series("date", pl.datetime_range(start, stop, "1h", eager=True))
>>> d
shape: (25,)
Series: 'date' [datetime[μs]]
[
2001-01-01 00:00:00
2001-01-01 01:00:00
2001-01-01 02:00:00
2001-01-01 03:00:00
2001-01-01 04:00:00
…
2001-01-01 20:00:00
2001-01-01 21:00:00
2001-01-01 22:00:00
2001-01-01 23:00:00
2001-01-02 00:00:00
]
Compute the rolling mean with the temporal windows
from the second series closed on the right:
>>> s.rolling_sum_by(d, "3h")
shape: (25,)
Series: 'index' [i64]
[
0
1
3
6
9
…
57
60
63
66
69
]
"""
@deprecate_renamed_parameter("min_periods", "min_samples", version="1.21.0")
def rolling_sum(
self,
window_size: int,
weights: list[float] | None = None,
*,
min_samples: int | None = None,
center: bool = False,
) -> Series:
"""
Apply a rolling sum (moving sum) over the values in this array.
A window of length `window_size` will traverse the array. The values that fill
this window will (optionally) be multiplied with the weights given by the
`weight` vector. The resulting values will be aggregated to their sum.
The window at a given row will include the row itself and the `window_size - 1`
elements before it.
.. versionchanged:: 1.21.0
The `min_periods` parameter was renamed `min_samples`.
Parameters
----------
window_size
The length of the window in number of elements.
weights
An optional slice with the same length as the window that will be multiplied
elementwise with the values in the window.
min_samples
The number of values in the window that should be non-null before computing
a result. If set to `None` (default), it will be set equal to `window_size`.
center
Set the labels at the center of the window.
Examples
--------
>>> s = pl.Series("a", [1, 2, 3, 4, 5])
>>> s.rolling_sum(window_size=2)
shape: (5,)
Series: 'a' [i64]
[
null
3
5
7
9
]
"""
@unstable()
def rolling_std_by(
self,
by: IntoExpr,
window_size: timedelta | str,
*,
min_samples: int = 1,
closed: ClosedInterval = "right",
ddof: int = 1,
) -> Self:
"""
Compute a rolling standard deviation based on another series.
.. warning::
This functionality is considered **unstable**. It may be changed
at any point without it being considered a breaking change.
Given a `by` column `<t_0, t_1, ..., t_n>`, then `closed="right"`
(the default) means the windows will be:
- (t_0 - window_size, t_0]
- (t_1 - window_size, t_1]
- ...
- (t_n - window_size, t_n]
Parameters
----------
by
Should be ``DateTime``, ``Date``, ``UInt64``, ``UInt32``, ``Int64``,
or ``Int32`` data type (note that the integral ones require using `'i'`
in `window size`).
window_size
The length of the window. Can be a dynamic temporal
size indicated by a timedelta or the following string language:
- 1ns (1 nanosecond)
- 1us (1 microsecond)
- 1ms (1 millisecond)
- 1s (1 second)
- 1m (1 minute)
- 1h (1 hour)
- 1d (1 calendar day)
- 1w (1 calendar week)
- 1mo (1 calendar month)
- 1q (1 calendar quarter)
- 1y (1 calendar year)
- 1i (1 index count)
By "calendar day", we mean the corresponding time on the next day
(which may not be 24 hours, due to daylight savings). Similarly for
"calendar week", "calendar month", "calendar quarter", and
"calendar year".
min_samples
The number of values in the window that should be non-null before computing
a result.
closed : {'left', 'right', 'both', 'none'}
Define which sides of the temporal interval are closed (inclusive),
defaults to `'right'`.
ddof
"Delta Degrees of Freedom": The divisor for a length N window is N - ddof
Notes
-----
If you want to compute multiple aggregation statistics over the same dynamic
window, consider using `rolling` - this method can cache the window size
computation.
Examples
--------
Create a series with a row index value
>>> from datetime import timedelta, datetime
>>> start = datetime(2001, 1, 1)
>>> stop = datetime(2001, 1, 2)
>>> s = pl.Series("index", range(25))
>>> s
shape: (25,)
Series: 'index' [i64]
[
0
1
2
3
4
…
20
21
22
23
24
]
Create another series to apply the window mask:
>>> d = pl.Series("date", pl.datetime_range(start, stop, "1h", eager=True))
>>> d
shape: (25,)
Series: 'date' [datetime[μs]]
[
2001-01-01 00:00:00
2001-01-01 01:00:00
2001-01-01 02:00:00
2001-01-01 03:00:00
2001-01-01 04:00:00
…
2001-01-01 20:00:00
2001-01-01 21:00:00
2001-01-01 22:00:00
2001-01-01 23:00:00
2001-01-02 00:00:00
]
Compute the rolling std with the temporal windows
from the second series closed on the right:
>>> s.rolling_std_by(d, "3h")
shape: (25,)
Series: 'index' [f64]
[
null
0.707107
1.0
1.0
1.0
…
1.0
1.0
1.0
1.0
1.0
]
"""
@deprecate_renamed_parameter("min_periods", "min_samples", version="1.21.0")
def rolling_std(
self,
window_size: int,
weights: list[float] | None = None,
*,
min_samples: int | None = None,
center: bool = False,
ddof: int = 1,
) -> Series:
"""
Compute a rolling std dev.
A window of length `window_size` will traverse the array. The values that fill
this window will (optionally) be multiplied with the weights given by the
`weight` vector. The resulting values will be aggregated to their std dev.
The window at a given row will include the row itself and the `window_size - 1`
elements before it.
.. versionchanged:: 1.21.0
The `min_periods` parameter was renamed `min_samples`.
Parameters
----------
window_size
The length of the window in number of elements.
weights
An optional slice with the same length as the window that will be multiplied
elementwise with the values in the window.
min_samples
The number of values in the window that should be non-null before computing
a result. If set to `None` (default), it will be set equal to `window_size`.
center
Set the labels at the center of the window.
ddof
"Delta Degrees of Freedom": The divisor for a length N window is N - ddof
Examples
--------
>>> s = pl.Series("a", [1.0, 2.0, 3.0, 4.0, 6.0, 8.0])
>>> s.rolling_std(window_size=3)
shape: (6,)
Series: 'a' [f64]
[
null
null
1.0
1.0
1.527525
2.0
]
"""
@unstable()
def rolling_var_by(
self,
by: IntoExpr,
window_size: timedelta | str,
*,
min_samples: int = 1,
closed: ClosedInterval = "right",
ddof: int = 1,
) -> Self:
"""
Compute a rolling variance based on another series.
.. warning::
This functionality is considered **unstable**. It may be changed
at any point without it being considered a breaking change.
Given a `by` column `<t_0, t_1, ..., t_n>`, then `closed="right"`
(the default) means the windows will be:
- (t_0 - window_size, t_0]
- (t_1 - window_size, t_1]
- ...
- (t_n - window_size, t_n]
Parameters
----------
by
Should be ``DateTime``, ``Date``, ``UInt64``, ``UInt32``, ``Int64``,
or ``Int32`` data type (note that the integral ones require using `'i'`
in `window size`).
window_size
The length of the window. Can be a dynamic temporal
size indicated by a timedelta or the following string language:
- 1ns (1 nanosecond)
- 1us (1 microsecond)
- 1ms (1 millisecond)
- 1s (1 second)
- 1m (1 minute)
- 1h (1 hour)
- 1d (1 calendar day)
- 1w (1 calendar week)
- 1mo (1 calendar month)
- 1q (1 calendar quarter)
- 1y (1 calendar year)
- 1i (1 index count)
By "calendar day", we mean the corresponding time on the next day
(which may not be 24 hours, due to daylight savings). Similarly for
"calendar week", "calendar month", "calendar quarter", and
"calendar year".
min_samples
The number of values in the window that should be non-null before computing
a result.
closed : {'left', 'right', 'both', 'none'}
Define which sides of the temporal interval are closed (inclusive),
defaults to `'right'`.
ddof
"Delta Degrees of Freedom": The divisor for a length N window is N - ddof
Notes
-----
If you want to compute multiple aggregation statistics over the same dynamic
window, consider using `rolling` - this method can cache the window size
computation.
Examples
--------
Create a series with a row index value
>>> from datetime import timedelta, datetime
>>> start = datetime(2001, 1, 1)
>>> stop = datetime(2001, 1, 2)
>>> s = pl.Series("index", range(25))
>>> s
shape: (25,)
Series: 'index' [i64]
[
0
1
2
3
4
…
20
21
22
23
24
]
Create another series to apply the window mask:
>>> d = pl.Series("date", pl.datetime_range(start, stop, "1h", eager=True))
>>> d
shape: (25,)
Series: 'date' [datetime[μs]]
[
2001-01-01 00:00:00
2001-01-01 01:00:00
2001-01-01 02:00:00
2001-01-01 03:00:00
2001-01-01 04:00:00
…
2001-01-01 20:00:00
2001-01-01 21:00:00
2001-01-01 22:00:00
2001-01-01 23:00:00
2001-01-02 00:00:00
]
Compute the rolling std with the temporal windows
from the second series closed on the right:
>>> s.rolling_std_by(d, "3h")
shape: (25,)
Series: 'index' [f64]
[
null
0.707107
1.0
1.0
1.0
…
1.0
1.0
1.0
1.0
1.0
]
"""
@deprecate_renamed_parameter("min_periods", "min_samples", version="1.21.0")
def rolling_var(
self,
window_size: int,
weights: list[float] | None = None,
*,
min_samples: int | None = None,
center: bool = False,
ddof: int = 1,
) -> Series:
"""
Compute a rolling variance.
A window of length `window_size` will traverse the array. The values that fill
this window will (optionally) be multiplied with the weights given by the
`weight` vector. The resulting values will be aggregated to their variance.
The window at a given row will include the row itself and the `window_size - 1`
elements before it.
.. versionchanged:: 1.21.0
The `min_periods` parameter was renamed `min_samples`.
Parameters
----------
window_size
The length of the window in number of elements.
weights
An optional slice with the same length as the window that will be multiplied
elementwise with the values in the window.
min_samples
The number of values in the window that should be non-null before computing
a result. If set to `None` (default), it will be set equal to `window_size`.
center
Set the labels at the center of the window.
ddof
"Delta Degrees of Freedom": The divisor for a length N window is N - ddof
Examples
--------
>>> s = pl.Series("a", [1.0, 2.0, 3.0, 4.0, 6.0, 8.0])
>>> s.rolling_var(window_size=3)
shape: (6,)
Series: 'a' [f64]
[
null
null
1.0
1.0
2.333333
4.0
]
"""
@unstable()
@deprecate_renamed_parameter("min_periods", "min_samples", version="1.21.0")
def rolling_map(
self,
function: Callable[[Series], Any],
window_size: int,
weights: list[float] | None = None,
*,
min_samples: int | None = None,
center: bool = False,
) -> Series:
"""
Compute a custom rolling window function.
.. warning::
This functionality is considered **unstable**. It may be changed
at any point without it being considered a breaking change.
.. versionchanged:: 1.21.0
The `min_periods` parameter was renamed `min_samples`.
Parameters
----------
function
Custom aggregation function.
window_size
The length of the window in number of elements.
weights
An optional slice with the same length as the window that will be multiplied
elementwise with the values in the window.
min_samples
The number of values in the window that should be non-null before computing
a result. If set to `None` (default), it will be set equal to `window_size`.
center
Set the labels at the center of the window.
Warnings
--------
Computing custom functions is extremely slow. Use specialized rolling
functions such as :func:`Series.rolling_sum` if at all possible.
Examples
--------
>>> from numpy import nansum
>>> s = pl.Series([11.0, 2.0, 9.0, float("nan"), 8.0])
>>> s.rolling_map(nansum, window_size=3)
shape: (5,)
Series: '' [f64]
[
null
null
22.0
11.0
17.0
]
"""
@unstable()
def rolling_median_by(
self,
by: IntoExpr,
window_size: timedelta | str,
*,
min_samples: int = 1,
closed: ClosedInterval = "right",
) -> Self:
"""
Compute a rolling median based on another series.
.. warning::
This functionality is considered **unstable**. It may be changed
at any point without it being considered a breaking change.
Given a `by` column `<t_0, t_1, ..., t_n>`, then `closed="right"`
(the default) means the windows will be:
- (t_0 - window_size, t_0]
- (t_1 - window_size, t_1]
- ...
- (t_n - window_size, t_n]
Parameters
----------
by
Should be ``DateTime``, ``Date``, ``UInt64``, ``UInt32``, ``Int64``,
or ``Int32`` data type (note that the integral ones require using `'i'`
in `window size`).
window_size
The length of the window. Can be a dynamic temporal
size indicated by a timedelta or the following string language:
- 1ns (1 nanosecond)
- 1us (1 microsecond)
- 1ms (1 millisecond)
- 1s (1 second)
- 1m (1 minute)
- 1h (1 hour)
- 1d (1 calendar day)
- 1w (1 calendar week)
- 1mo (1 calendar month)
- 1q (1 calendar quarter)
- 1y (1 calendar year)
- 1i (1 index count)
By "calendar day", we mean the corresponding time on the next day
(which may not be 24 hours, due to daylight savings). Similarly for
"calendar week", "calendar month", "calendar quarter", and
"calendar year".
min_samples
The number of values in the window that should be non-null before computing
a result.
closed : {'left', 'right', 'both', 'none'}
Define which sides of the temporal interval are closed (inclusive),
defaults to `'right'`.
Notes
-----
If you want to compute multiple aggregation statistics over the same dynamic
window, consider using `rolling` - this method can cache the window size
computation.
Examples
--------
Create a series with a row index value
>>> from datetime import timedelta, datetime
>>> start = datetime(2001, 1, 1)
>>> stop = datetime(2001, 1, 2)
>>> s = pl.Series("index", range(25))
>>> s
shape: (25,)
Series: 'index' [i64]
[
0
1
2
3
4
…
20
21
22
23
24
]
Create another series to apply the window mask:
>>> d = pl.Series("date", pl.datetime_range(start, stop, "1h", eager=True))
>>> d
shape: (25,)
Series: 'date' [datetime[μs]]
[
2001-01-01 00:00:00
2001-01-01 01:00:00
2001-01-01 02:00:00
2001-01-01 03:00:00
2001-01-01 04:00:00
…
2001-01-01 20:00:00
2001-01-01 21:00:00
2001-01-01 22:00:00
2001-01-01 23:00:00
2001-01-02 00:00:00
]
Compute the rolling median with the temporal windows
from the second series closed on the right:
>>> s.rolling_median_by(d, "3h")
shape: (25,)
Series: 'index' [f64]
[
0.0
0.5
1.0
2.0
3.0
…
19.0
20.0
21.0
22.0
23.0
]
"""
@unstable()
@deprecate_renamed_parameter("min_periods", "min_samples", version="1.21.0")
def rolling_median(
self,
window_size: int,
weights: list[float] | None = None,
*,
min_samples: int | None = None,
center: bool = False,
) -> Series:
"""
Compute a rolling median.
.. warning::
This functionality is considered **unstable**. It may be changed
at any point without it being considered a breaking change.
The window at a given row will include the row itself and the `window_size - 1`
elements before it.
.. versionchanged:: 1.21.0
The `min_periods` parameter was renamed `min_samples`.
Parameters
----------
window_size
The length of the window in number of elements.
weights
An optional slice with the same length as the window that will be multiplied
elementwise with the values in the window.
min_samples
The number of values in the window that should be non-null before computing
a result. If set to `None` (default), it will be set equal to `window_size`.
center
Set the labels at the center of the window.
Examples
--------
>>> s = pl.Series("a", [1.0, 2.0, 3.0, 4.0, 6.0, 8.0])
>>> s.rolling_median(window_size=3)
shape: (6,)
Series: 'a' [f64]
[
null
null
2.0
3.0
4.0
6.0
]
"""
@unstable()
def rolling_quantile_by(
self,
by: IntoExpr,
window_size: timedelta | str,
*,
quantile: float,
interpolation: QuantileMethod = "nearest",
min_samples: int = 1,
closed: ClosedInterval = "right",
) -> Self:
"""
Compute a rolling quantile based on another series.
.. warning::
This functionality is considered **unstable**. It may be changed
at any point without it being considered a breaking change.
Given a `by` column `<t_0, t_1, ..., t_n>`, then `closed="right"`
(the default) means the windows will be:
- (t_0 - window_size, t_0]
- (t_1 - window_size, t_1]
- ...
- (t_n - window_size, t_n]
Parameters
----------
by
Should be ``DateTime``, ``Date``, ``UInt64``, ``UInt32``, ``Int64``,
or ``Int32`` data type (note that the integral ones require using `'i'`
in `window size`).
quantile
Quantile between 0.0 and 1.0.
interpolation : {'nearest', 'higher', 'lower', 'midpoint', 'linear', 'equiprobable'}
Interpolation method.
window_size
The length of the window. Can be a dynamic
temporal size indicated by a timedelta or the following string language:
- 1ns (1 nanosecond)
- 1us (1 microsecond)
- 1ms (1 millisecond)
- 1s (1 second)
- 1m (1 minute)
- 1h (1 hour)
- 1d (1 calendar day)
- 1w (1 calendar week)
- 1mo (1 calendar month)
- 1q (1 calendar quarter)
- 1y (1 calendar year)
- 1i (1 index count)
By "calendar day", we mean the corresponding time on the next day
(which may not be 24 hours, due to daylight savings). Similarly for
"calendar week", "calendar month", "calendar quarter", and
"calendar year".
min_samples
The number of values in the window that should be non-null before computing
a result.
closed : {'left', 'right', 'both', 'none'}
Define which sides of the temporal interval are closed (inclusive),
defaults to `'right'`.
Notes
-----
If you want to compute multiple aggregation statistics over the same dynamic
window, consider using `rolling` - this method can cache the window size
computation.
Examples
--------
Create a series with a row index value
>>> from datetime import timedelta, datetime
>>> start = datetime(2001, 1, 1)
>>> stop = datetime(2001, 1, 2)
>>> s = pl.Series("index", range(25))
>>> s
shape: (25,)
Series: 'index' [i64]
[
0
1
2
3
4
…
20
21
22
23
24
]
Create another series to apply the window mask:
>>> d = pl.Series("date", pl.datetime_range(start, stop, "1h", eager=True))
>>> d
shape: (25,)
Series: 'date' [datetime[μs]]
[
2001-01-01 00:00:00
2001-01-01 01:00:00
2001-01-01 02:00:00
2001-01-01 03:00:00
2001-01-01 04:00:00
…
2001-01-01 20:00:00
2001-01-01 21:00:00
2001-01-01 22:00:00
2001-01-01 23:00:00
2001-01-02 00:00:00
]
Compute the rolling quantile with the temporal windows from the second series closed on the right:
>>> s.rolling_quantile_by(d, "3h", quantile=0.5)
shape: (25,)
Series: 'index' [f64]
[
0.0
1.0
1.0
2.0
3.0
…
19.0
20.0
21.0
22.0
23.0
]
""" # noqa: W505
@unstable()
@deprecate_renamed_parameter("min_periods", "min_samples", version="1.21.0")
def rolling_quantile(
self,
quantile: float,
interpolation: QuantileMethod = "nearest",
window_size: int = 2,
weights: list[float] | None = None,
*,
min_samples: int | None = None,
center: bool = False,
) -> Series:
"""
Compute a rolling quantile.
The window at a given row will include the row itself and the `window_size - 1`
elements before it.
.. warning::
This functionality is considered **unstable**. It may be changed
at any point without it being considered a breaking change.
.. versionchanged:: 1.21.0
The `min_periods` parameter was renamed `min_samples`.
Parameters
----------
quantile
Quantile between 0.0 and 1.0.
interpolation : {'nearest', 'higher', 'lower', 'midpoint', 'linear', 'equiprobable'}
Interpolation method.
window_size
The length of the window in number of elements.
weights
An optional slice with the same length as the window that will be multiplied
elementwise with the values in the window.
min_samples
The number of values in the window that should be non-null before computing
a result. If set to `None` (default), it will be set equal to `window_size`.
center
Set the labels at the center of the window.
Examples
--------
>>> s = pl.Series("a", [1.0, 2.0, 3.0, 4.0, 6.0, 8.0])
>>> s.rolling_quantile(quantile=0.33, window_size=3)
shape: (6,)
Series: 'a' [f64]
[
null
null
2.0
3.0
4.0
6.0
]
>>> s.rolling_quantile(quantile=0.33, interpolation="linear", window_size=3)
shape: (6,)
Series: 'a' [f64]
[
null
null
1.66
2.66
3.66
5.32
]
""" # noqa: W505
@unstable()
def rolling_rank_by(
self,
by: IntoExpr,
window_size: timedelta | str,
method: RankMethod = "average",
*,
seed: int | None = None,
min_samples: int = 1,
closed: ClosedInterval = "right",
) -> Series:
"""
Compute a rolling rank based on another column.
.. warning::
This functionality is considered **unstable**. It may be changed
at any point without it being considered a breaking change.
Given a `by` column `<t_0, t_1, ..., t_n>`, then `closed="right"`
(the default) means the windows will be:
- (t_0 - window_size, t_0]
- (t_1 - window_size, t_1]
- ...
- (t_n - window_size, t_n]
Parameters
----------
by
Should be ``DateTime``, ``Date``, ``UInt64``, ``UInt32``, ``Int64``,
or ``Int32`` data type (note that the integral ones require using `'i'`
in `window size`).
window_size
The length of the window. Can be a dynamic
temporal size indicated by a timedelta or the following string language:
- 1ns (1 nanosecond)
- 1us (1 microsecond)
- 1ms (1 millisecond)
- 1s (1 second)
- 1m (1 minute)
- 1h (1 hour)
- 1d (1 calendar day)
- 1w (1 calendar week)
- 1mo (1 calendar month)
- 1q (1 calendar quarter)
- 1y (1 calendar year)
- 1i (1 index count)
By "calendar day", we mean the corresponding time on the next day
(which may not be 24 hours, due to daylight savings). Similarly for
"calendar week", "calendar month", "calendar quarter", and
"calendar year".
method : {'average', 'min', 'max', 'dense', 'random'}
The method used to assign ranks to tied elements.
The following methods are available (default is 'average'):
- 'average' : The average of the ranks that would have been assigned to
all the tied values is assigned to each value.
- 'min' : The minimum of the ranks that would have been assigned to all
the tied values is assigned to each value. (This is also referred to
as "competition" ranking.)
- 'max' : The maximum of the ranks that would have been assigned to all
the tied values is assigned to each value.
- 'dense' : Like 'min', but the rank of the next highest element is
assigned the rank immediately after those assigned to the tied
elements.
- 'random' : Choose a random rank for each value in a tie.
seed
Random seed used when `method='random'`. If set to None (default), a
random seed is generated for each rolling rank operation.
min_samples
The number of values in the window that should be non-null before computing
a result.
closed : {'left', 'right', 'both', 'none'}
Define which sides of the temporal interval are closed (inclusive),
defaults to `'right'`.
Returns
-------
Series
A Series of data :class:`.Float64` if `method` is `"average"` or,
the index size (see :func:`.get_index_type()`) otherwise.
"""
@unstable()
def rolling_rank(
self,
window_size: int,
method: RankMethod = "average",
*,
seed: int | None = None,
min_samples: int | None = None,
center: bool = False,
) -> Series:
"""
Compute a rolling rank.
.. warning::
This functionality is considered **unstable**. It may be changed
at any point without it being considered a breaking change.
A window of length `window_size` will traverse the array. The values
that fill this window will be ranked according to the `method`
parameter. The resulting values will be the rank of the value that is
at the end of the sliding window.
Parameters
----------
window_size
Integer size of the rolling window.
method : {'average', 'min', 'max', 'dense', 'random'}
The method used to assign ranks to tied elements.
The following methods are available (default is 'average'):
- 'average' : The average of the ranks that would have been assigned to
all the tied values is assigned to each value.
- 'min' : The minimum of the ranks that would have been assigned to all
the tied values is assigned to each value. (This is also referred to
as "competition" ranking.)
- 'max' : The maximum of the ranks that would have been assigned to all
the tied values is assigned to each value.
- 'dense' : Like 'min', but the rank of the next highest element is
assigned the rank immediately after those assigned to the tied
elements.
- 'random' : Choose a random rank for each value in a tie.
seed
Random seed used when `method='random'`. If set to None (default), a
random seed is generated for each rolling rank operation.
min_samples
The number of values in the window that should be non-null before computing
a result. If set to `None` (default), it will be set equal to `window_size`.
center
Set the labels at the center of the window.
Returns
-------
Series
A Series of data :class:`.Float64` if `method` is `"average"` or,
the index size (see :func:`.get_index_type()`) otherwise.
Examples
--------
>>> pl.Series([1, 4, 4, 1, 9]).rolling_rank(3, method="average")
shape: (5,)
Series: '' [f64]
[
null
null
2.5
1.0
3.0
]
"""
@unstable()
def rolling_skew(
self,
window_size: int,
*,
bias: bool = True,
min_samples: int | None = None,
center: bool = False,
) -> Series:
"""
Compute a rolling skew.
.. warning::
This functionality is considered **unstable**. It may be changed
at any point without it being considered a breaking change.
The window at a given row includes the row itself and the
`window_size - 1` elements before it.
Parameters
----------
window_size
Integer size of the rolling window.
bias
If False, the calculations are corrected for statistical bias.
min_samples
The number of values in the window that should be non-null before computing
a result. If set to `None` (default), it will be set equal to `window_size`.
center
Set the labels at the center of the window.
See Also
--------
Series.skew
Examples
--------
>>> pl.Series([1, 4, 2, 9]).rolling_skew(3)
shape: (4,)
Series: '' [f64]
[
null
null
0.381802
0.47033
]
Note how the values match
>>> pl.Series([1, 4, 2]).skew(), pl.Series([4, 2, 9]).skew()
(0.38180177416060584, 0.47033046033698594)
"""
@unstable()
def rolling_kurtosis(
self,
window_size: int,
*,
fisher: bool = True,
bias: bool = True,
min_samples: int | None = None,
center: bool = False,
) -> Series:
"""
Compute a rolling kurtosis.
.. warning::
This functionality is considered **unstable**. It may be changed
at any point without it being considered a breaking change.
The window at a given row will include the row itself, and the `window_size - 1`
elements before it.
Parameters
----------
window_size
Integer size of the rolling window.
fisher : bool, optional
If True, Fisher's definition is used (normal ==> 0.0). If False,
Pearson's definition is used (normal ==> 3.0).
bias : bool, optional
If False, the calculations are corrected for statistical bias.
min_samples
The number of values in the window that should be non-null before computing
a result. If set to `None` (default), it will be set equal to `window_size`.
center
Set the labels at the center of the window.
See Also
--------
Series.kurtosis
Examples
--------
>>> pl.Series([1, 4, 2, 9]).rolling_kurtosis(3)
shape: (4,)
Series: '' [f64]
[
null
null
-1.5
-1.5
]
"""
def sample(
self,
n: int | None = None,
*,
fraction: float | None = None,
with_replacement: bool = False,
shuffle: bool = False,
seed: int | None = None,
) -> Series:
"""
Sample from this Series.
Parameters
----------
n
Number of items to return. Cannot be used with `fraction`. Defaults to 1 if
`fraction` is None.
fraction
Fraction of items to return. Cannot be used with `n`.
with_replacement
Allow values to be sampled more than once.
shuffle
Shuffle the order of sampled data points.
seed
Seed for the random number generator. If set to None (default), a
random seed is generated for each sample operation.
Examples
--------
>>> s = pl.Series("a", [1, 2, 3, 4, 5])
>>> s.sample(2, seed=0) # doctest: +IGNORE_RESULT
shape: (2,)
Series: 'a' [i64]
[
1
5
]
"""
def peak_max(self) -> Self:
"""
Get a boolean mask of the local maximum peaks.
Examples
--------
>>> s = pl.Series("a", [1, 2, 3, 4, 5])
>>> s.peak_max()
shape: (5,)
Series: 'a' [bool]
[
false
false
false
false
true
]
"""
def peak_min(self) -> Self:
"""
Get a boolean mask of the local minimum peaks.
Examples
--------
>>> s = pl.Series("a", [4, 1, 3, 2, 5])
>>> s.peak_min()
shape: (5,)
Series: 'a' [bool]
[
false
true
false
true
false
]
"""
def n_unique(self) -> int:
"""
Count the number of unique values in this Series.
Examples
--------
>>> s = pl.Series("a", [1, 2, 2, 3])
>>> s.n_unique()
3
"""
return self._s.n_unique()
def shrink_to_fit(self, *, in_place: bool = False) -> Series:
"""
Shrink Series memory usage.
Shrinks the underlying array capacity to exactly fit the actual data.
(Note that this function does not change the Series data type).
"""
if in_place:
self._s.shrink_to_fit()
return self
else:
series = self.clone()
series._s.shrink_to_fit()
return series
def hash(
self,
seed: int = 0,
seed_1: int | None = None,
seed_2: int | None = None,
seed_3: int | None = None,
) -> Series:
"""
Hash the Series.
The hash value is of type `UInt64`.
Parameters
----------
seed
Random seed parameter. Defaults to 0.
seed_1
Random seed parameter. Defaults to `seed` if not set.
seed_2
Random seed parameter. Defaults to `seed` if not set.
seed_3
Random seed parameter. Defaults to `seed` if not set.
Notes
-----
This implementation of `hash` does not guarantee stable results
across different Polars versions. Its stability is only guaranteed within a
single version.
Examples
--------
>>> s = pl.Series("a", [1, 2, 3])
>>> s.hash(seed=42) # doctest: +IGNORE_RESULT
shape: (3,)
Series: 'a' [u64]
[
10734580197236529959
3022416320763508302
13756996518000038261
]
"""
def reinterpret(self, *, signed: bool = True) -> Series:
"""
Reinterpret the underlying bits as a signed/unsigned integer.
This operation is only allowed for 64bit integers. For lower bits integers,
you can safely use that cast operation.
Parameters
----------
signed
If True, reinterpret as `pl.Int64`. Otherwise, reinterpret as `pl.UInt64`.
Examples
--------
>>> s = pl.Series("a", [-(2**60), -2, 3])
>>> s
shape: (3,)
Series: 'a' [i64]
[
-1152921504606846976
-2
3
]
>>> s.reinterpret(signed=False)
shape: (3,)
Series: 'a' [u64]
[
17293822569102704640
18446744073709551614
3
]
"""
def interpolate(self, method: InterpolationMethod = "linear") -> Series:
"""
Interpolate intermediate values.
Nulls at the beginning and end of the series remain null.
Parameters
----------
method : {'linear', 'nearest'}
Interpolation method.
Examples
--------
>>> s = pl.Series("a", [1, 2, None, None, 5])
>>> s.interpolate()
shape: (5,)
Series: 'a' [f64]
[
1.0
2.0
3.0
4.0
5.0
]
"""
def interpolate_by(self, by: IntoExpr) -> Series:
"""
Interpolate intermediate values with x-coordinate based on another column.
Nulls at the beginning and end of the series remain null.
Parameters
----------
by
Column to interpolate values based on.
Examples
--------
Fill null values using linear interpolation.
>>> s = pl.Series([1, None, None, 3])
>>> by = pl.Series([1, 2, 7, 8])
>>> s.interpolate_by(by)
shape: (4,)
Series: '' [f64]
[
1.0
1.285714
2.714286
3.0
]
"""
def abs(self) -> Series:
"""
Compute absolute values.
Same as `abs(series)`.
Examples
--------
>>> s = pl.Series([1, -2, -3])
>>> s.abs()
shape: (3,)
Series: '' [i64]
[
1
2
3
]
"""
def rank(
self,
method: RankMethod = "average",
*,
descending: bool = False,
seed: int | None = None,
) -> Series:
"""
Assign ranks to data, dealing with ties appropriately.
Parameters
----------
method : {'average', 'min', 'max', 'dense', 'ordinal', 'random'}
The method used to assign ranks to tied elements.
The following methods are available (default is 'average'):
- 'average' : The average of the ranks that would have been assigned to
all the tied values is assigned to each value.
- 'min' : The minimum of the ranks that would have been assigned to all
the tied values is assigned to each value. (This is also referred to
as "competition" ranking.)
- 'max' : The maximum of the ranks that would have been assigned to all
the tied values is assigned to each value.
- 'dense' : Like 'min', but the rank of the next highest element is
assigned the rank immediately after those assigned to the tied
elements.
- 'ordinal' : All values are given a distinct rank, corresponding to
the order that the values occur in the Series.
- 'random' : Like 'ordinal', but the rank for ties is not dependent
on the order that the values occur in the Series.
descending
Rank in descending order.
seed
If `method="random"`, use this as seed.
Examples
--------
The 'average' method:
>>> s = pl.Series("a", [3, 6, 1, 1, 6])
>>> s.rank()
shape: (5,)
Series: 'a' [f64]
[
3.0
4.5
1.5
1.5
4.5
]
The 'ordinal' method:
>>> s = pl.Series("a", [3, 6, 1, 1, 6])
>>> s.rank("ordinal")
shape: (5,)
Series: 'a' [u32]
[
3
4
1
2
5
]
"""
def diff(self, n: int = 1, null_behavior: NullBehavior = "ignore") -> Series:
"""
Calculate the first discrete difference between shifted items.
Parameters
----------
n
Number of slots to shift.
null_behavior : {'ignore', 'drop'}
How to handle null values.
Examples
--------
>>> s = pl.Series("s", values=[20, 10, 30, 25, 35], dtype=pl.Int8)
>>> s.diff()
shape: (5,)
Series: 's' [i8]
[
null
-10
20
-5
10
]
>>> s.diff(n=2)
shape: (5,)
Series: 's' [i8]
[
null
null
10
15
5
]
>>> s.diff(n=2, null_behavior="drop")
shape: (3,)
Series: 's' [i8]
[
10
15
5
]
"""
def pct_change(self, n: int | IntoExprColumn = 1) -> Series:
"""
Computes percentage change between values.
Percentage change (as fraction) between current element and most-recent
non-null element at least `n` period(s) before the current element.
Computes the change from the previous row by default.
Parameters
----------
n
periods to shift for forming percent change.
Notes
-----
Null values are preserved. If you're coming from pandas, this matches
their ``fill_method=None`` behaviour.
Examples
--------
>>> pl.Series(range(10)).pct_change()
shape: (10,)
Series: '' [f64]
[
null
inf
1.0
0.5
0.333333
0.25
0.2
0.166667
0.142857
0.125
]
>>> pl.Series([1, 2, 4, 8, 16, 32, 64, 128, 256, 512]).pct_change(2)
shape: (10,)
Series: '' [f64]
[
null
null
3.0
3.0
3.0
3.0
3.0
3.0
3.0
3.0
]
"""
def skew(self, *, bias: bool = True) -> float | None:
r"""
Compute the sample skewness of a data set.
For normally distributed data, the skewness should be about zero. For
unimodal continuous distributions, a skewness value greater than zero means
that there is more weight in the right tail of the distribution. The
function `skewtest` can be used to determine if the skewness value
is close enough to zero, statistically speaking.
See scipy.stats for more information.
Parameters
----------
bias : bool, optional
If False, the calculations are corrected for statistical bias.
Notes
-----
The sample skewness is computed as the Fisher-Pearson coefficient
of skewness, i.e.
.. math:: g_1=\frac{m_3}{m_2^{3/2}}
where
.. math:: m_i=\frac{1}{N}\sum_{n=1}^N(x[n]-\bar{x})^i
is the biased sample :math:`i\texttt{th}` central moment, and
:math:`\bar{x}` is
the sample mean. If `bias` is False, the calculations are
corrected for bias and the value computed is the adjusted
Fisher-Pearson standardized moment coefficient, i.e.
.. math::
G_1 = \frac{k_3}{k_2^{3/2}} = \frac{\sqrt{N(N-1)}}{N-2}\frac{m_3}{m_2^{3/2}}
Examples
--------
>>> s = pl.Series([1, 2, 2, 4, 5])
>>> s.skew()
0.34776706224699483
"""
return self._s.skew(bias)
def kurtosis(self, *, fisher: bool = True, bias: bool = True) -> float | None:
"""
Compute the kurtosis (Fisher or Pearson) of a dataset.
Kurtosis is the fourth central moment divided by the square of the
variance. If Fisher's definition is used, then 3.0 is subtracted from
the result to give 0.0 for a normal distribution.
If bias is False then the kurtosis is calculated using k statistics to
eliminate bias coming from biased moment estimators
See scipy.stats for more information
Parameters
----------
fisher : bool, optional
If True, Fisher's definition is used (normal ==> 0.0). If False,
Pearson's definition is used (normal ==> 3.0).
bias : bool, optional
If False, the calculations are corrected for statistical bias.
Examples
--------
>>> s = pl.Series("grades", [66, 79, 54, 97, 96, 70, 69, 85, 93, 75])
>>> s.kurtosis()
-1.0522623626787952
>>> s.kurtosis(fisher=False)
1.9477376373212048
>>> s.kurtosis(fisher=False, bias=False)
2.1040361802642717
"""
return self._s.kurtosis(fisher, bias)
def clip(
self,
lower_bound: NumericLiteral | TemporalLiteral | IntoExprColumn | None = None,
upper_bound: NumericLiteral | TemporalLiteral | IntoExprColumn | None = None,
) -> Series:
"""
Set values outside the given boundaries to the boundary value.
Parameters
----------
lower_bound
Lower bound. Accepts expression input.
Non-expression inputs are parsed as literals.
If set to `None` (default), no lower bound is applied.
upper_bound
Upper bound. Accepts expression input.
Non-expression inputs are parsed as literals.
If set to `None` (default), no upper bound is applied.
See Also
--------
when
Notes
-----
This method only works for numeric and temporal columns. To clip other data
types, consider writing a `when-then-otherwise` expression. See :func:`when`.
Examples
--------
Specifying both a lower and upper bound:
>>> s = pl.Series([-50, 5, 50, None])
>>> s.clip(1, 10)
shape: (4,)
Series: '' [i64]
[
1
5
10
null
]
Specifying only a single bound:
>>> s.clip(upper_bound=10)
shape: (4,)
Series: '' [i64]
[
-50
5
10
null
]
"""
def lower_bound(self) -> Self:
"""
Return the lower bound of this Series' dtype as a unit Series.
See Also
--------
upper_bound : return the upper bound of the given Series' dtype.
Examples
--------
>>> s = pl.Series("s", [-1, 0, 1], dtype=pl.Int32)
>>> s.lower_bound()
shape: (1,)
Series: 's' [i32]
[
-2147483648
]
>>> s = pl.Series("s", [1.0, 2.5, 3.0], dtype=pl.Float32)
>>> s.lower_bound()
shape: (1,)
Series: 's' [f32]
[
-inf
]
"""
def upper_bound(self) -> Self:
"""
Return the upper bound of this Series' dtype as a unit Series.
See Also
--------
lower_bound : return the lower bound of the given Series' dtype.
Examples
--------
>>> s = pl.Series("s", [-1, 0, 1], dtype=pl.Int8)
>>> s.upper_bound()
shape: (1,)
Series: 's' [i8]
[
127
]
>>> s = pl.Series("s", [1.0, 2.5, 3.0], dtype=pl.Float64)
>>> s.upper_bound()
shape: (1,)
Series: 's' [f64]
[
inf
]
"""
def replace(
self,
old: IntoExpr | Sequence[Any] | Mapping[Any, Any],
new: IntoExpr | Sequence[Any] | NoDefault = no_default,
*,
default: IntoExpr | NoDefault = no_default,
return_dtype: PolarsDataType | None = None,
) -> Self:
"""
Replace values by different values of the same data type.
Parameters
----------
old
Value or sequence of values to replace.
Also accepts a mapping of values to their replacement as syntactic sugar for
`replace(old=Series(mapping.keys()), new=Series(mapping.values()))`.
new
Value or sequence of values to replace by.
Length must match the length of `old` or have length 1.
default
Set values that were not replaced to this value.
Defaults to keeping the original value.
Accepts expression input. Non-expression inputs are parsed as literals.
.. deprecated:: 0.20.31
Use :meth:`replace_strict` instead to set a default while
replacing values.
return_dtype
The data type of the resulting expression. If set to `None` (default),
the data type is determined automatically based on the other inputs.
.. deprecated:: 0.20.31
Use :meth:`replace_strict` instead to set a return data type while
replacing values.
See Also
--------
replace_strict
str.replace
Notes
-----
The global string cache must be enabled when replacing categorical values.
Examples
--------
Replace a single value by another value. Values that were not replaced remain
unchanged.
>>> s = pl.Series([1, 2, 2, 3])
>>> s.replace(2, 100)
shape: (4,)
Series: '' [i64]
[
1
100
100
3
]
Replace multiple values by passing sequences to the `old` and `new` parameters.
>>> s.replace([2, 3], [100, 200])
shape: (4,)
Series: '' [i64]
[
1
100
100
200
]
Passing a mapping with replacements is also supported as syntactic sugar.
>>> mapping = {2: 100, 3: 200}
>>> s.replace(mapping)
shape: (4,)
Series: '' [i64]
[
1
100
100
200
]
The original data type is preserved when replacing by values of a different
data type. Use :meth:`replace_strict` to replace and change the return data
type.
>>> s = pl.Series(["x", "y", "z"])
>>> mapping = {"x": 1, "y": 2, "z": 3}
>>> s.replace(mapping)
shape: (3,)
Series: '' [str]
[
"1"
"2"
"3"
]
"""
def replace_strict(
self,
old: IntoExpr | Sequence[Any] | Mapping[Any, Any],
new: IntoExpr | Sequence[Any] | NoDefault = no_default,
*,
default: IntoExpr | NoDefault = no_default,
return_dtype: PolarsDataType | None = None,
) -> Self:
"""
Replace all values by different values.
Parameters
----------
old
Value or sequence of values to replace.
Also accepts a mapping of values to their replacement as syntactic sugar for
`replace_strict(old=Series(mapping.keys()), new=Series(mapping.values()))`.
new
Value or sequence of values to replace by.
Length must match the length of `old` or have length 1.
default
Set values that were not replaced to this value. If no default is specified,
(default), an error is raised if any values were not replaced.
Accepts expression input. Non-expression inputs are parsed as literals.
return_dtype
The data type of the resulting Series. If set to `None` (default),
the data type is determined automatically based on the other inputs.
Raises
------
InvalidOperationError
If any non-null values in the original column were not replaced, and no
`default` was specified.
See Also
--------
replace
str.replace
Notes
-----
The global string cache must be enabled when replacing categorical values.
Examples
--------
Replace values by passing sequences to the `old` and `new` parameters.
>>> s = pl.Series([1, 2, 2, 3])
>>> s.replace_strict([1, 2, 3], [100, 200, 300])
shape: (4,)
Series: '' [i64]
[
100
200
200
300
]
Passing a mapping with replacements is also supported as syntactic sugar.
>>> mapping = {1: 100, 2: 200, 3: 300}
>>> s.replace_strict(mapping)
shape: (4,)
Series: '' [i64]
[
100
200
200
300
]
By default, an error is raised if any non-null values were not replaced.
Specify a default to set all values that were not matched.
>>> mapping = {2: 200, 3: 300}
>>> s.replace_strict(mapping) # doctest: +SKIP
Traceback (most recent call last):
...
polars.exceptions.InvalidOperationError: incomplete mapping specified for `replace_strict`
>>> s.replace_strict(mapping, default=-1)
shape: (4,)
Series: '' [i64]
[
-1
200
200
300
]
The default can be another Series.
>>> default = pl.Series([2.5, 5.0, 7.5, 10.0])
>>> s.replace_strict(2, 200, default=default)
shape: (4,)
Series: '' [f64]
[
2.5
200.0
200.0
10.0
]
Replacing by values of a different data type sets the return type based on
a combination of the `new` data type and the `default` data type.
>>> s = pl.Series(["x", "y", "z"])
>>> mapping = {"x": 1, "y": 2, "z": 3}
>>> s.replace_strict(mapping)
shape: (3,)
Series: '' [i64]
[
1
2
3
]
>>> s.replace_strict(mapping, default="x")
shape: (3,)
Series: '' [str]
[
"1"
"2"
"3"
]
Set the `return_dtype` parameter to control the resulting data type directly.
>>> s.replace_strict(mapping, return_dtype=pl.UInt8)
shape: (3,)
Series: '' [u8]
[
1
2
3
]
""" # noqa: W505
def reshape(self, dimensions: tuple[int, ...]) -> Series:
"""
Reshape this Series to a flat Series or an Array Series.
Parameters
----------
dimensions
Tuple of the dimension sizes. If a -1 is used in any of the dimensions, that
dimension is inferred.
Returns
-------
Series
If a single dimension is given, results in a Series of the original
data type.
If a multiple dimensions are given, results in a Series of data type
:class:`Array` with shape `dimensions`.
See Also
--------
Series.list.explode : Explode a list column.
Examples
--------
>>> s = pl.Series("foo", [1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> square = s.reshape((3, 3))
>>> square
shape: (3,)
Series: 'foo' [array[i64, 3]]
[
[1, 2, 3]
[4, 5, 6]
[7, 8, 9]
]
>>> square.reshape((9,))
shape: (9,)
Series: 'foo' [i64]
[
1
2
3
4
5
6
7
8
9
]
"""
return self._from_pyseries(self._s.reshape(dimensions))
def shuffle(self, seed: int | None = None) -> Series:
"""
Shuffle the contents of this Series.
Parameters
----------
seed
Seed for the random number generator. If set to None (default), a
random seed is generated each time the shuffle is called.
Examples
--------
>>> s = pl.Series("a", [1, 2, 3])
>>> s.shuffle(seed=1)
shape: (3,)
Series: 'a' [i64]
[
2
3
1
]
"""
@deprecate_renamed_parameter("min_periods", "min_samples", version="1.21.0")
def ewm_mean(
self,
*,
com: float | None = None,
span: float | None = None,
half_life: float | None = None,
alpha: float | None = None,
adjust: bool = True,
min_samples: int = 1,
ignore_nulls: bool = False,
) -> Series:
r"""
Compute exponentially-weighted moving average.
.. versionchanged:: 1.21.0
The `min_periods` parameter was renamed `min_samples`.
Parameters
----------
com
Specify decay in terms of center of mass, :math:`\gamma`, with
.. math::
\alpha = \frac{1}{1 + \gamma} \; \forall \; \gamma \geq 0
span
Specify decay in terms of span, :math:`\theta`, with
.. math::
\alpha = \frac{2}{\theta + 1} \; \forall \; \theta \geq 1
half_life
Specify decay in terms of half-life, :math:`\tau`, with
.. math::
\alpha = 1 - \exp \left\{ \frac{ -\ln(2) }{ \tau } \right\} \;
\forall \; \tau > 0
alpha
Specify smoothing factor alpha directly, :math:`0 < \alpha \leq 1`.
adjust
Divide by decaying adjustment factor in beginning periods to account for
imbalance in relative weightings
- When `adjust=True` (the default) the EW function is calculated
using weights :math:`w_i = (1 - \alpha)^i`
- When `adjust=False` the EW function is calculated
recursively by
.. math::
y_0 &= x_0 \\
y_t &= (1 - \alpha)y_{t - 1} + \alpha x_t
min_samples
Minimum number of observations in window required to have a value
(otherwise result is null).
ignore_nulls
Ignore missing values when calculating weights.
- When `ignore_nulls=False` (default), weights are based on absolute
positions.
For example, the weights of :math:`x_0` and :math:`x_2` used in
calculating the final weighted average of
[:math:`x_0`, None, :math:`x_2`] are
:math:`(1-\alpha)^2` and :math:`1` if `adjust=True`, and
:math:`(1-\alpha)^2` and :math:`\alpha` if `adjust=False`.
- When `ignore_nulls=True`, weights are based
on relative positions. For example, the weights of
:math:`x_0` and :math:`x_2` used in calculating the final weighted
average of [:math:`x_0`, None, :math:`x_2`] are
:math:`1-\alpha` and :math:`1` if `adjust=True`,
and :math:`1-\alpha` and :math:`\alpha` if `adjust=False`.
Examples
--------
>>> s = pl.Series([1, 2, 3])
>>> s.ewm_mean(com=1, ignore_nulls=False)
shape: (3,)
Series: '' [f64]
[
1.0
1.666667
2.428571
]
"""
def ewm_mean_by(
self,
by: IntoExpr,
*,
half_life: str | timedelta,
) -> Series:
r"""
Compute time-based exponentially weighted moving average.
Given observations :math:`x_0, x_1, \ldots, x_{n-1}` at times
:math:`t_0, t_1, \ldots, t_{n-1}`, the EWMA is calculated as
.. math::
y_0 &= x_0
\alpha_i &= 1 - \exp \left\{ \frac{ -\ln(2)(t_i-t_{i-1}) }
{ \tau } \right\}
y_i &= \alpha_i x_i + (1 - \alpha_i) y_{i-1}; \quad i > 0
where :math:`\tau` is the `half_life`.
Parameters
----------
by
Times to calculate average by. Should be ``DateTime``, ``Date``, ``UInt64``,
``UInt32``, ``Int64``, or ``Int32`` data type.
half_life
Unit over which observation decays to half its value.
Can be created either from a timedelta, or
by using the following string language:
- 1ns (1 nanosecond)
- 1us (1 microsecond)
- 1ms (1 millisecond)
- 1s (1 second)
- 1m (1 minute)
- 1h (1 hour)
- 1d (1 day)
- 1w (1 week)
- 1i (1 index count)
Or combine them:
"3d12h4m25s" # 3 days, 12 hours, 4 minutes, and 25 seconds
Note that `half_life` is treated as a constant duration - calendar
durations such as months (or even days in the time-zone-aware case)
are not supported, please express your duration in an approximately
equivalent number of hours (e.g. '370h' instead of '1mo').
Returns
-------
Expr
:class:`Float16` if input is `Float16`, :class:`.Float32` if input is
`Float32`, otherwise :class:`.Float64`.
Examples
--------
>>> from datetime import date, timedelta
>>> df = pl.DataFrame(
... {
... "values": [0, 1, 2, None, 4],
... "times": [
... date(2020, 1, 1),
... date(2020, 1, 3),
... date(2020, 1, 10),
... date(2020, 1, 15),
... date(2020, 1, 17),
... ],
... }
... ).sort("times")
>>> df["values"].ewm_mean_by(df["times"], half_life="4d")
shape: (5,)
Series: 'values' [f64]
[
0.0
0.292893
1.492474
null
3.254508
]
"""
@deprecate_renamed_parameter("min_periods", "min_samples", version="1.21.0")
def ewm_std(
self,
*,
com: float | None = None,
span: float | None = None,
half_life: float | None = None,
alpha: float | None = None,
adjust: bool = True,
bias: bool = False,
min_samples: int = 1,
ignore_nulls: bool = False,
) -> Series:
r"""
Compute exponentially-weighted moving standard deviation.
.. versionchanged:: 1.21.0
The `min_periods` parameter was renamed `min_samples`.
Parameters
----------
com
Specify decay in terms of center of mass, :math:`\gamma`, with
.. math::
\alpha = \frac{1}{1 + \gamma} \; \forall \; \gamma \geq 0
span
Specify decay in terms of span, :math:`\theta`, with
.. math::
\alpha = \frac{2}{\theta + 1} \; \forall \; \theta \geq 1
half_life
Specify decay in terms of half-life, :math:`\lambda`, with
.. math::
\alpha = 1 - \exp \left\{ \frac{ -\ln(2) }{ \lambda } \right\} \;
\forall \; \lambda > 0
alpha
Specify smoothing factor alpha directly, :math:`0 < \alpha \leq 1`.
adjust
Divide by decaying adjustment factor in beginning periods to account for
imbalance in relative weightings
- When `adjust=True` (the default) the EW function is calculated
using weights :math:`w_i = (1 - \alpha)^i`
- When `adjust=False` the EW function is calculated
recursively by
.. math::
y_0 &= x_0 \\
y_t &= (1 - \alpha)y_{t - 1} + \alpha x_t
bias
When `bias=False`, apply a correction to make the estimate statistically
unbiased.
min_samples
Minimum number of observations in window required to have a value
(otherwise result is null).
ignore_nulls
Ignore missing values when calculating weights.
- When `ignore_nulls=False` (default), weights are based on absolute
positions.
For example, the weights of :math:`x_0` and :math:`x_2` used in
calculating the final weighted average of
[:math:`x_0`, None, :math:`x_2`] are
:math:`(1-\alpha)^2` and :math:`1` if `adjust=True`, and
:math:`(1-\alpha)^2` and :math:`\alpha` if `adjust=False`.
- When `ignore_nulls=True`, weights are based
on relative positions. For example, the weights of
:math:`x_0` and :math:`x_2` used in calculating the final weighted
average of [:math:`x_0`, None, :math:`x_2`] are
:math:`1-\alpha` and :math:`1` if `adjust=True`,
and :math:`1-\alpha` and :math:`\alpha` if `adjust=False`.
Examples
--------
>>> s = pl.Series("a", [1, 2, 3])
>>> s.ewm_std(com=1, ignore_nulls=False)
shape: (3,)
Series: 'a' [f64]
[
0.0
0.707107
0.963624
]
"""
@deprecate_renamed_parameter("min_periods", "min_samples", version="1.21.0")
def ewm_var(
self,
*,
com: float | None = None,
span: float | None = None,
half_life: float | None = None,
alpha: float | None = None,
adjust: bool = True,
bias: bool = False,
min_samples: int = 1,
ignore_nulls: bool = False,
) -> Series:
r"""
Compute exponentially-weighted moving variance.
.. versionchanged:: 1.21.0
The `min_periods` parameter was renamed `min_samples`.
Parameters
----------
com
Specify decay in terms of center of mass, :math:`\gamma`, with
.. math::
\alpha = \frac{1}{1 + \gamma} \; \forall \; \gamma \geq 0
span
Specify decay in terms of span, :math:`\theta`, with
.. math::
\alpha = \frac{2}{\theta + 1} \; \forall \; \theta \geq 1
half_life
Specify decay in terms of half-life, :math:`\lambda`, with
.. math::
\alpha = 1 - \exp \left\{ \frac{ -\ln(2) }{ \lambda } \right\} \;
\forall \; \lambda > 0
alpha
Specify smoothing factor alpha directly, :math:`0 < \alpha \leq 1`.
adjust
Divide by decaying adjustment factor in beginning periods to account for
imbalance in relative weightings
- When `adjust=True` (the default) the EW function is calculated
using weights :math:`w_i = (1 - \alpha)^i`
- When `adjust=False` the EW function is calculated
recursively by
.. math::
y_0 &= x_0 \\
y_t &= (1 - \alpha)y_{t - 1} + \alpha x_t
bias
When `bias=False`, apply a correction to make the estimate statistically
unbiased.
min_samples
Minimum number of observations in window required to have a value
(otherwise result is null).
ignore_nulls
Ignore missing values when calculating weights.
- When `ignore_nulls=False` (default), weights are based on absolute
positions.
For example, the weights of :math:`x_0` and :math:`x_2` used in
calculating the final weighted average of
[:math:`x_0`, None, :math:`x_2`] are
:math:`(1-\alpha)^2` and :math:`1` if `adjust=True`, and
:math:`(1-\alpha)^2` and :math:`\alpha` if `adjust=False`.
- When `ignore_nulls=True`, weights are based
on relative positions. For example, the weights of
:math:`x_0` and :math:`x_2` used in calculating the final weighted
average of [:math:`x_0`, None, :math:`x_2`] are
:math:`1-\alpha` and :math:`1` if `adjust=True`,
and :math:`1-\alpha` and :math:`\alpha` if `adjust=False`.
Examples
--------
>>> s = pl.Series("a", [1, 2, 3])
>>> s.ewm_var(com=1, ignore_nulls=False)
shape: (3,)
Series: 'a' [f64]
[
0.0
0.5
0.928571
]
"""
def extend_constant(self, value: IntoExpr, n: int | IntoExprColumn) -> Series:
"""
Extremely fast method for extending the Series with 'n' copies of a value.
Parameters
----------
value
A constant literal value or a unit expression with which to extend the
expression result Series; can pass None to extend with nulls.
n
The number of additional values that will be added.
Examples
--------
>>> s = pl.Series([1, 2, 3])
>>> s.extend_constant(99, n=2)
shape: (5,)
Series: '' [i64]
[
1
2
3
99
99
]
"""
def set_sorted(self, *, descending: bool = False) -> Self:
"""
Flags the Series as 'sorted'.
Enables downstream code to user fast paths for sorted arrays.
Parameters
----------
descending
If the `Series` order is descending.
Warnings
--------
This can lead to incorrect results if this `Series` is not sorted!!
Use with care!
Examples
--------
>>> s = pl.Series("a", [1, 2, 3])
>>> s.set_sorted().max()
3
"""
return self._from_pyseries(self._s.set_sorted_flag(descending))
def new_from_index(self, index: int, length: int) -> Self:
"""
Create a new Series filled with values from the given index.
Examples
--------
>>> s = pl.Series("a", [1, 2, 3, 4, 5])
>>> s.new_from_index(1, 3)
shape: (3,)
Series: 'a' [i64]
[
2
2
2
]
"""
return self._from_pyseries(self._s.new_from_index(index, length))
def shrink_dtype(self) -> Series:
"""
Shrink numeric columns to the minimal required datatype.
Shrink to the dtype needed to fit the extrema of this [`Series`].
This can be used to reduce memory pressure.
Examples
--------
>>> s = pl.Series("a", [1, 2, 3, 4, 5, 6])
>>> s
shape: (6,)
Series: 'a' [i64]
[
1
2
3
4
5
6
]
>>> s.shrink_dtype()
shape: (6,)
Series: 'a' [i8]
[
1
2
3
4
5
6
]
"""
return wrap_s(self._s.shrink_dtype())
def get_chunks(self) -> list[Series]:
"""
Get the chunks of this Series as a list of Series.
Examples
--------
>>> s1 = pl.Series("a", [1, 2, 3])
>>> s2 = pl.Series("a", [4, 5, 6])
>>> s = pl.concat([s1, s2], rechunk=False)
>>> s.get_chunks()
[shape: (3,)
Series: 'a' [i64]
[
1
2
3
], shape: (3,)
Series: 'a' [i64]
[
4
5
6
]]
"""
return self._s.get_chunks()
def implode(self) -> Self:
"""
Aggregate values into a list.
The returned list itself is a scalar value of `list` dtype.
Examples
--------
>>> s = pl.Series("a", [1, 2, 3])
>>> s.implode()
shape: (1,)
Series: 'a' [list[i64]]
[
[1, 2, 3]
]
"""
def bitwise_count_ones(self) -> Self:
"""Evaluate the number of set bits."""
def bitwise_count_zeros(self) -> Self:
"""Evaluate the number of unset Self."""
def bitwise_leading_ones(self) -> Self:
"""Evaluate the number most-significant set bits before seeing an unset bit."""
def bitwise_leading_zeros(self) -> Self:
"""Evaluate the number most-significant unset bits before seeing a set bit."""
def bitwise_trailing_ones(self) -> Self:
"""Evaluate the number least-significant set bits before seeing an unset bit."""
def bitwise_trailing_zeros(self) -> Self:
"""Evaluate the number least-significant unset bits before seeing a set bit."""
def bitwise_and(self) -> PythonLiteral | None:
"""Perform an aggregation of bitwise ANDs."""
return self._s.bitwise_and()
def bitwise_or(self) -> PythonLiteral | None:
"""Perform an aggregation of bitwise ORs."""
return self._s.bitwise_or()
def bitwise_xor(self) -> PythonLiteral | None:
"""Perform an aggregation of bitwise XORs."""
return self._s.bitwise_xor()
def first(self, *, ignore_nulls: bool = False) -> PythonLiteral | None:
"""
Get the first element of the Series.
Parameters
----------
ignore_nulls
Ignore null values (default `False`).
If set to `True`, the first non-null value is returned, otherwise `None` is
returned if no non-null value exists.
Returns `None` if the Series is empty.
"""
return self._s.first(ignore_nulls=ignore_nulls)
def last(self, *, ignore_nulls: bool = False) -> PythonLiteral | None:
"""
Get the last element of the Series.
Parameters
----------
ignore_nulls
Ignore null values (default `False`).
If set to `True`, the last non-null value is returned, otherwise `None` is
returned if no non-null value exists.
Returns `None` if the Series is empty.
"""
return self._s.last(ignore_nulls=ignore_nulls)
def approx_n_unique(self) -> PythonLiteral | None:
"""
Approximate count of unique values.
This is done using the HyperLogLog++ algorithm for cardinality estimation.
"""
return self._s.approx_n_unique()
def _row_encode(
self,
*,
unordered: bool = False,
descending: bool | None = None,
nulls_last: bool | None = None,
) -> Series:
"""Encode to the row encoding."""
return (
self.to_frame()
.select_seq(
F.col(self.name)._row_encode(
unordered=unordered, descending=descending, nulls_last=nulls_last
)
)
.to_series()
)
def _row_decode(
self,
names: Sequence[str],
dtypes: Sequence[PolarsDataType],
*,
unordered: bool = False,
descending: Sequence[bool] | None = None,
nulls_last: Sequence[bool] | None = None,
) -> Series:
"""Decode from the row encoding."""
return (
self.to_frame()
.select_seq(
F.col(self.name)._row_decode(
names,
dtypes,
unordered=unordered,
descending=descending,
nulls_last=nulls_last,
)
)
.to_series()
)
def repeat_by(self, by: int | IntoExprColumn) -> Self:
"""
Repeat the elements in this Series as specified in the given expression.
The repeated elements are expanded into a List.
Parameters
----------
by
Numeric column that determines how often the values will be repeated.
The column will be coerced to UInt32. Give this dtype to make the coercion
a no-op.
Returns
-------
Expr
Expression of data type List, where the inner data type is equal to the
original data type.
"""
# Keep the `list` and `str` properties below at the end of the definition of Series,
# as to not confuse mypy with the type annotation `str` and `list`
@property
def bin(self) -> BinaryNameSpace:
"""Create an object namespace of all binary related methods."""
return BinaryNameSpace(self)
@property
def cat(self) -> CatNameSpace:
"""Create an object namespace of all categorical related methods."""
return CatNameSpace(self)
@property
def dt(self) -> DateTimeNameSpace:
"""Create an object namespace of all datetime related methods."""
return DateTimeNameSpace(self)
@property
def list(self) -> ListNameSpace:
"""Create an object namespace of all list related methods."""
return ListNameSpace(self)
@property
def arr(self) -> ArrayNameSpace:
"""Create an object namespace of all array related methods."""
return ArrayNameSpace(self)
@property
def str(self) -> StringNameSpace:
"""Create an object namespace of all string related methods."""
return StringNameSpace(self)
@property
def struct(self) -> StructNameSpace:
"""Create an object namespace of all struct related methods."""
return StructNameSpace(self)
@property
def ext(self) -> ExtensionNameSpace:
"""Create an object namespace of all extension type related methods."""
return ExtensionNameSpace(self)
@property
@unstable()
def plot(self) -> SeriesPlot:
"""
Create a plot namespace.
.. warning::
This functionality is currently considered **unstable**. It may be
changed at any point without it being considered a breaking change.
.. versionchanged:: 1.6.0
In prior versions of Polars, HvPlot was the plotting backend. If you would
like to restore the previous plotting functionality, all you need to do
is add `import hvplot.polars` at the top of your script and replace
`df.plot` with `df.hvplot`.
Polars does not implement plotting logic itself, but instead defers to
Altair:
- `s.plot.hist(**kwargs)`
is shorthand for
`alt.Chart(s.to_frame()).mark_bar(tooltip=True).encode(x=alt.X(f'{s.name}:Q', bin=True), y='count()', **kwargs).interactive()`
- `s.plot.kde(**kwargs)`
is shorthand for
`alt.Chart(s.to_frame()).transform_density(s.name, as_=[s.name, 'density']).mark_area(tooltip=True).encode(x=s.name, y='density:Q', **kwargs).interactive()`
- for any other attribute `attr`, `s.plot.attr(**kwargs)`
is shorthand for
`alt.Chart(s.to_frame().with_row_index()).mark_attr(tooltip=True).encode(x='index', y=s.name, **kwargs).interactive()`
For configuration, we suggest reading
`Chart Configuration <https://altair-viz.github.io/altair-tutorial/notebooks/08-Configuration.html>`_.
For example, you can:
- Change the width/height/title with ``.properties(width=500, height=350, title="My amazing plot")``.
- Change the x-axis label rotation with ``.configure_axisX(labelAngle=30)``.
- Change the opacity of the points in your scatter plot with ``.configure_point(opacity=.5)``.
Examples
--------
Histogram:
>>> s = pl.Series([1, 4, 4, 6, 2, 4, 3, 5, 5, 7, 1])
>>> s.plot.hist() # doctest: +SKIP
KDE plot:
>>> s.plot.kde() # doctest: +SKIP
Line plot:
>>> s.plot.line() # doctest: +SKIP
""" # noqa: W505
if not _ALTAIR_AVAILABLE or parse_version(altair.__version__) < (5, 4, 0):
msg = "altair>=5.4.0 is required for `.plot`"
raise ModuleUpgradeRequiredError(msg)
return SeriesPlot(self)
def _resolve_temporal_dtype(
dtype: PolarsDataType | None,
ndtype: np.dtype[np.datetime64] | np.dtype[np.timedelta64],
) -> PolarsDataType | None:
"""Given polars/numpy temporal dtypes, resolve to an explicit unit."""
PolarsType = Duration if ndtype.type == np.timedelta64 else Datetime
if dtype is None or (dtype == Datetime and not getattr(dtype, "time_unit", None)):
time_unit = getattr(dtype, "time_unit", None) or np.datetime_data(ndtype)[0]
# explicit formulation is verbose, but keeps mypy happy
# (and avoids unsupported timeunits such as "s")
if time_unit == "ns":
dtype = PolarsType("ns")
elif time_unit == "us":
dtype = PolarsType("us")
elif time_unit == "ms":
dtype = PolarsType("ms")
elif time_unit == "D" and ndtype.type == np.datetime64:
dtype = Date
return dtype
| Series |
python | ansible__ansible | test/lib/ansible_test/_internal/commands/integration/cloud/gcp.py | {
"start": 404,
"end": 967
} | class ____(CloudProvider):
"""GCP cloud provider plugin. Sets up cloud resources before delegation."""
def __init__(self, args: IntegrationConfig) -> None:
super().__init__(args)
self.uses_config = True
def setup(self) -> None:
"""Setup the cloud resource before delegation and register a cleanup callback."""
super().setup()
if not self._use_static_config():
display.notice(
'static configuration could not be used. are you missing a template file?'
)
| GcpCloudProvider |
python | pytorch__pytorch | test/onnx/ops/test_ops.py | {
"start": 2672,
"end": 15957
} | class ____(common_utils.TestCase):
def test_symbolic_accepts_valid_inputs(self):
output = torch.onnx.ops.symbolic(
"custom_domain::CustomOp",
(torch.tensor(1),),
dict(
int_key=1,
float_key=1.0,
str_key="attr",
bool_key=True,
list_int_key=[1, 2],
list_float_key=[1.0, 2.0],
list_str_key=["attr1", "attr2"],
list_bool_key=[True, False],
),
dtype=torch.float32,
shape=[1, 2, 3],
version=1,
metadata_props={"meta_key": "meta_value"},
)
self.assertEqual(output.shape, torch.Size([1, 2, 3]))
self.assertEqual(output.dtype, torch.float32)
self.assertEqual(output.device, torch.device("cpu"))
def test_symbolic_accepts_valid_inputs_empty_shape(self):
output = torch.onnx.ops.symbolic(
"custom_domain::CustomOp",
(torch.tensor(1),),
dtype=torch.float32,
shape=[],
)
self.assertEqual(output.shape, torch.Size([]))
def test_symbolic_accepts_valid_inputs_integer_types(self):
output = torch.onnx.ops.symbolic(
"custom_domain::CustomOp",
(torch.tensor(1),),
dtype=1, # 1 is float32 in ONNX
shape=[42],
)
self.assertEqual(output.dtype, torch.float32)
def test_symbolic_accepts_valid_inputs_int4_type(self):
output = torch.onnx.ops.symbolic(
"custom_domain::CustomOp",
(torch.tensor(1),),
dtype=22, # 22 is INT4 in ONNX
shape=[42],
)
# We use torch uint8 for int4
self.assertEqual(output.dtype, torch.uint8)
def test_symbolic_is_exportable(self):
class Model(torch.nn.Module):
def forward(self, x: torch.Tensor):
return torch.onnx.ops.symbolic(
"custom_domain::CustomOp",
(x, None),
dict(
int_key=1,
float_key=1.0,
str_key="attr",
bool_key=True,
list_int_key=[1, 2],
list_float_key=[1.0, 2.0],
list_str_key=["attr1", "attr2"],
list_bool_key=[True, False],
),
dtype=x.dtype,
shape=[1, 2, 3],
version=1,
metadata_props={"meta_key": "meta_value"},
)
onnx_program = torch.onnx.export(
Model(), (torch.tensor(1),), dynamo=True, verbose=False
)
assert onnx_program is not None
node = onnx_program.model.graph.node(0)
self.assertEqual(node.op_type, "CustomOp")
self.assertEqual(node.domain, "custom_domain")
attributes = node.attributes
self.assertEqual(
attributes,
dict(
int_key=ir.AttrInt64("int_key", 1),
float_key=ir.AttrFloat32("float_key", 1.0),
str_key=ir.AttrString("str_key", "attr"),
bool_key=ir.AttrInt64("bool_key", 1),
list_int_key=ir.AttrInt64s("list_int_key", [1, 2]),
list_float_key=ir.AttrFloat32s("list_float_key", [1.0, 2.0]),
list_str_key=ir.AttrStrings("list_str_key", ["attr1", "attr2"]),
list_bool_key=ir.AttrInt64s("list_bool_key", [1, 0]),
),
)
self.assertEqual(node.metadata_props["meta_key"], "meta_value")
outputs = node.outputs
self.assertEqual(list(outputs[0].shape), [1, 2, 3])
self.assertEqual(outputs[0].dtype, ir.DataType.INT64)
def test_symbolic_preserves_dynamic_shapes(self):
class Model(torch.nn.Module):
def forward(self, x: torch.Tensor, y: torch.Tensor):
return torch.onnx.ops.symbolic(
"custom_domain::CustomOp",
(x, y),
dtype=x.dtype,
shape=[*x.shape, *y.shape],
version=1,
)
onnx_program = torch.onnx.export(
Model(),
(torch.zeros(2, 3), torch.zeros(1, 2)),
dynamic_shapes=({0: "batch"}, {1: "something_else"}),
dynamo=True,
verbose=False,
)
assert onnx_program is not None
node = onnx_program.model.graph.node(0)
self.assertEqual(node.op_type, "CustomOp")
self.assertEqual(node.domain, "custom_domain")
inputs = onnx_program.model.graph.inputs
self.assertEqual(str(inputs[0].shape[0]), "batch")
self.assertEqual(inputs[0].shape[1], 3)
self.assertEqual(inputs[1].shape[0], 1)
self.assertEqual(str(inputs[1].shape[1]), "something_else")
outputs = node.outputs
self.assertEqual(str(outputs[0].shape[0]), "batch")
self.assertEqual(outputs[0].shape[1], 3)
self.assertEqual(outputs[0].shape[2], 1)
self.assertEqual(str(outputs[0].shape[3]), "something_else")
self.assertEqual(outputs[0].dtype, ir.DataType.FLOAT)
def test_symbolic_multi_out_accepts_valid_inputs(self):
outputs = torch.onnx.ops.symbolic_multi_out(
"custom_domain::CustomMultiOutOp",
(torch.tensor(1),),
dict(
int_key=1,
float_key=1.0,
str_key="attr",
bool_key=True,
list_int_key=[1, 2],
list_float_key=[1.0, 2.0],
list_str_key=["attr1", "attr2"],
list_bool_key=[True, False],
),
dtypes=(
1, # 1 is float32 in ONNX
torch.int32,
torch.float8_e4m3fn,
),
shapes=([1, 2], [42], []),
version=1,
metadata_props={"meta_key": "meta_value"},
)
self.assertEqual(len(outputs), 3)
self.assertEqual(outputs[0].shape, torch.Size([1, 2]))
self.assertEqual(outputs[0].dtype, torch.float32)
self.assertEqual(outputs[1].shape, torch.Size([42]))
self.assertEqual(outputs[1].dtype, torch.int32)
self.assertEqual(outputs[2].shape, torch.Size([]))
self.assertEqual(outputs[2].dtype, torch.float8_e4m3fn)
self.assertEqual(outputs[0].device, torch.device("cpu"))
self.assertEqual(outputs[1].device, torch.device("cpu"))
self.assertEqual(outputs[2].device, torch.device("cpu"))
def test_symbolic_multi_out_accepts_valid_inputs_empty_shape(self):
outputs = torch.onnx.ops.symbolic_multi_out(
"custom_domain::CustomOp",
(torch.tensor(1),),
dtypes=(torch.float32,),
shapes=[[]],
)
self.assertEqual(outputs[0].shape, torch.Size([]))
def test_symbolic_multi_out_accepts_valid_inputs_integer_types(self):
outputs = torch.onnx.ops.symbolic_multi_out(
"custom_domain::CustomOp",
(torch.tensor(1),),
dtypes=(1,), # 1 is float32 in ONNX
shapes=[[42]],
)
self.assertEqual(outputs[0].dtype, torch.float32)
def test_symbolic_multi_out_accepts_valid_inputs_int4_type(self):
outputs = torch.onnx.ops.symbolic_multi_out(
"custom_domain::CustomOp",
(torch.tensor(1),),
dtypes=(22,), # 22 is INT4 in ONNX
shapes=[[42]],
)
# We use torch uint8 for int4
self.assertEqual(outputs[0].dtype, torch.uint8)
def test_symbolic_multi_out_is_exportable(self):
class Model(torch.nn.Module):
def forward(self, x: torch.Tensor):
return torch.onnx.ops.symbolic_multi_out(
"custom_domain::CustomOp",
(x, None),
dict(
int_key=1,
float_key=1.0,
str_key="attr",
bool_key=True,
list_int_key=[1, 2],
list_float_key=[1.0, 2.0],
list_str_key=["attr1", "attr2"],
list_bool_key=[True, False],
),
dtypes=(torch.float32, torch.int32, torch.float8_e4m3fn),
shapes=([1, 2], [42], []),
version=1,
metadata_props={"meta_key": "meta_value"},
)
onnx_program = torch.onnx.export(
Model(), (torch.tensor(1),), dynamo=True, verbose=False
)
assert onnx_program is not None
node = onnx_program.model.graph.node(0)
self.assertEqual(node.op_type, "CustomOp")
self.assertEqual(node.domain, "custom_domain")
attributes = node.attributes
self.assertEqual(
attributes,
dict(
int_key=ir.AttrInt64("int_key", 1),
float_key=ir.AttrFloat32("float_key", 1.0),
str_key=ir.AttrString("str_key", "attr"),
bool_key=ir.AttrInt64("bool_key", 1),
list_int_key=ir.AttrInt64s("list_int_key", [1, 2]),
list_float_key=ir.AttrFloat32s("list_float_key", [1.0, 2.0]),
list_str_key=ir.AttrStrings("list_str_key", ["attr1", "attr2"]),
list_bool_key=ir.AttrInt64s("list_bool_key", [1, 0]),
),
)
self.assertEqual(node.metadata_props["meta_key"], "meta_value")
outputs = node.outputs
self.assertEqual(list(outputs[0].shape), [1, 2])
self.assertEqual(outputs[0].dtype, ir.DataType.FLOAT)
self.assertEqual(list(outputs[1].shape), [42])
self.assertEqual(outputs[1].dtype, ir.DataType.INT32)
self.assertEqual(list(outputs[2].shape), [])
self.assertEqual(outputs[2].dtype, ir.DataType.FLOAT8E4M3FN)
def test_symbolic_multi_out_preserves_dynamic_shapes(self):
class Model(torch.nn.Module):
def forward(self, x: torch.Tensor, y: torch.Tensor):
return torch.onnx.ops.symbolic_multi_out(
"custom_domain::CustomOp",
(x, y),
dtypes=(x.dtype, 22), # 22 is INT4
shapes=[[*x.shape, *y.shape], [42]],
version=1,
)
onnx_program = torch.onnx.export(
Model(),
(torch.zeros(2, 3), torch.zeros(1, 2)),
dynamic_shapes=({0: "batch"}, {1: "something_else"}),
dynamo=True,
verbose=False,
)
assert onnx_program is not None
node = onnx_program.model.graph.node(0)
self.assertEqual(node.op_type, "CustomOp")
self.assertEqual(node.domain, "custom_domain")
inputs = onnx_program.model.graph.inputs
self.assertEqual(str(inputs[0].shape[0]), "batch")
self.assertEqual(inputs[0].shape[1], 3)
self.assertEqual(inputs[1].shape[0], 1)
self.assertEqual(str(inputs[1].shape[1]), "something_else")
outputs = node.outputs
self.assertEqual(str(outputs[0].shape[0]), "batch")
self.assertEqual(outputs[0].shape[1], 3)
self.assertEqual(outputs[0].shape[2], 1)
self.assertEqual(str(outputs[0].shape[3]), "something_else")
self.assertEqual(outputs[0].dtype, ir.DataType.FLOAT)
self.assertEqual(list(outputs[1].shape), [42])
self.assertEqual(outputs[1].dtype, ir.DataType.INT4)
def test_symbolic_multi_out_raises_when_dtypes_and_shapes_differ(self):
with self.assertRaises(RuntimeError):
torch.onnx.ops.symbolic_multi_out(
"custom_domain::CustomMultiOutOp",
(torch.tensor(1),),
dict(
int_key=1,
float_key=1.0,
str_key="attr",
bool_key=True,
list_int_key=[1, 2],
list_float_key=[1.0, 2.0],
list_str_key=["attr1", "attr2"],
list_bool_key=[True, False],
),
dtypes=(torch.float32, torch.int32),
shapes=([1, 2], [42], []),
version=1,
metadata_props={"meta_key": "meta_value"},
)
with self.assertRaises(RuntimeError):
torch.onnx.ops.symbolic_multi_out(
"custom_domain::CustomMultiOutOp",
(torch.tensor(1),),
dict(
int_key=1,
float_key=1.0,
str_key="attr",
bool_key=True,
list_int_key=[1, 2],
list_float_key=[1.0, 2.0],
list_str_key=["attr1", "attr2"],
list_bool_key=[True, False],
),
dtypes=(torch.float32,),
shapes=([1, 2], [42]),
version=1,
metadata_props={"meta_key": "meta_value"},
)
@common_utils.instantiate_parametrized_tests
| SymbolicOpsTest |
python | catalyst-team__catalyst | catalyst/callbacks/misc.py | {
"start": 4607,
"end": 8454
} | class ____(Callback):
"""Logs pipeline execution time.
.. code-block:: python
import torch
from torch.utils.data import DataLoader, TensorDataset
from catalyst import dl
# data
num_samples, num_features = int(1e4), int(1e1)
X, y = torch.rand(num_samples, num_features), torch.rand(num_samples)
dataset = TensorDataset(X, y)
loader = DataLoader(dataset, batch_size=32, num_workers=1)
loaders = {"train": loader, "valid": loader}
# model, criterion, optimizer, scheduler
model = torch.nn.Linear(num_features, 1)
criterion = torch.nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters())
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [3, 6])
# model training
runner = dl.SupervisedRunner()
runner.train(
model=model,
criterion=criterion,
optimizer=optimizer,
scheduler=scheduler,
loaders=loaders,
logdir="./logdir",
num_epochs=1,
verbose=True,
callbacks=[dl.TimerCallback()]
)
You should see additional extra metrics, such as:
- ``_timer/_fps`` - number handled samples per second during run.
- ``_timer/batch_time`` - time required for single batch handling.
- ``_timer/data_time`` - time required for single batch data preparation handling.
- ``_timer/model_time`` - time required for single batch model forwarding.
Moreover, you could use it throught ``timeit=True`` flag:
.. code-block:: python
import torch
from torch.utils.data import DataLoader, TensorDataset
from catalyst import dl
# data
num_samples, num_features = int(1e4), int(1e1)
X, y = torch.rand(num_samples, num_features), torch.rand(num_samples)
dataset = TensorDataset(X, y)
loader = DataLoader(dataset, batch_size=32, num_workers=1)
loaders = {"train": loader, "valid": loader}
# model, criterion, optimizer, scheduler
model = torch.nn.Linear(num_features, 1)
criterion = torch.nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters())
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [3, 6])
# model training
runner = dl.SupervisedRunner()
runner.train(
model=model,
criterion=criterion,
optimizer=optimizer,
scheduler=scheduler,
loaders=loaders,
logdir="./logdir",
num_epochs=1,
verbose=True,
timeit=True,
)
"""
def __init__(self):
"""Init."""
super().__init__(order=CallbackOrder.metric + 1)
self.timer = TimeManager()
def on_loader_start(self, runner: "IRunner") -> None:
"""Event handler."""
self.timer.reset()
self.timer.start("_timer/batch_time")
self.timer.start("_timer/data_time")
def on_loader_end(self, runner: "IRunner") -> None:
"""Event handler."""
self.timer.reset()
def on_batch_start(self, runner: "IRunner") -> None:
"""Event handler."""
self.timer.stop("_timer/data_time")
self.timer.start("_timer/model_time")
def on_batch_end(self, runner: "IRunner") -> None:
"""Event handler."""
self.timer.stop("_timer/model_time")
self.timer.stop("_timer/batch_time")
self.timer.elapsed["_timer/_fps"] = runner.batch_size / (
self.timer.elapsed["_timer/batch_time"] + EPS
)
for key, value in self.timer.elapsed.items():
runner.batch_metrics[key] = value
self.timer.reset()
self.timer.start("_timer/batch_time")
self.timer.start("_timer/data_time")
| TimerCallback |
python | ray-project__ray | doc/source/serve/doc_code/streaming_tutorial.py | {
"start": 3109,
"end": 6345
} | class ____:
def __init__(self, model_id: str):
self.loop = asyncio.get_running_loop()
self.model_id = model_id
self.model = AutoModelForCausalLM.from_pretrained(self.model_id)
self.tokenizer = AutoTokenizer.from_pretrained(self.model_id)
# __chatbot_constructor_end__
# __chatbot_logic_start__
@fastapi_app.websocket("/")
async def handle_request(self, ws: WebSocket) -> None:
await ws.accept()
conversation = ""
try:
while True:
prompt = await ws.receive_text()
logger.info(f'Got prompt: "{prompt}"')
conversation += prompt
streamer = TextIteratorStreamer(
self.tokenizer,
timeout=0,
skip_prompt=True,
skip_special_tokens=True,
)
self.loop.run_in_executor(
None, self.generate_text, conversation, streamer
)
response = ""
async for text in self.consume_streamer(streamer):
await ws.send_text(text)
response += text
await ws.send_text("<<Response Finished>>")
conversation += response
except WebSocketDisconnect:
print("Client disconnected.")
def generate_text(self, prompt: str, streamer: TextIteratorStreamer):
input_ids = self.tokenizer([prompt], return_tensors="pt").input_ids
self.model.generate(input_ids, streamer=streamer, max_length=10000)
async def consume_streamer(self, streamer: TextIteratorStreamer):
while True:
try:
for token in streamer:
logger.info(f'Yielding token: "{token}"')
yield token
break
except Empty:
await asyncio.sleep(0.001)
# __chatbot_logic_end__
# __chatbot_bind_start__
app = Chatbot.bind("microsoft/DialoGPT-small")
# __chatbot_bind_end__
serve.run(app)
chunks = []
# Monkeypatch `print` for testing
original_print, print = print, (lambda chunk, end=None: chunks.append(chunk))
# __ws_client_start__
from websockets.sync.client import connect
with connect("ws://localhost:8000") as websocket:
websocket.send("Space the final")
while True:
received = websocket.recv()
if received == "<<Response Finished>>":
break
print(received, end="")
print("\n")
websocket.send(" These are the voyages")
while True:
received = websocket.recv()
if received == "<<Response Finished>>":
break
print(received, end="")
print("\n")
# __ws_client_end__
assert chunks == [
" ",
"",
"",
"frontier.",
"\n",
" ",
"of ",
"the ",
"starship ",
"",
"",
"Enterprise.",
"\n",
]
print = original_print
# __batchbot_setup_start__
import asyncio
import logging
from queue import Empty, Queue
from fastapi import FastAPI
from transformers import AutoModelForCausalLM, AutoTokenizer
from ray import serve
logger = logging.getLogger("ray.serve")
# __batchbot_setup_end__
# __raw_streamer_start__
| Chatbot |
python | huggingface__transformers | src/transformers/models/yoso/modeling_yoso.py | {
"start": 31572,
"end": 34936
} | class ____(YosoPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.yoso = YosoModel(config)
self.classifier = YosoClassificationHead(config)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, SequenceClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.yoso(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@auto_docstring
| YosoForSequenceClassification |
python | getsentry__sentry | src/sentry/sentry_apps/api/endpoints/installation_details.py | {
"start": 1208,
"end": 4243
} | class ____(SentryAppInstallationBaseEndpoint):
owner = ApiOwner.INTEGRATIONS
publish_status = {
"DELETE": ApiPublishStatus.UNKNOWN,
"GET": ApiPublishStatus.UNKNOWN,
"PUT": ApiPublishStatus.UNKNOWN,
}
def get(self, request: Request, installation) -> Response:
return Response(
serialize(
objects=SentryAppInstallation.objects.get(id=installation.id),
access=request.access,
serializer=SentryAppInstallationSerializer(),
)
)
def delete(self, request: Request, installation) -> Response:
sentry_app_installation = SentryAppInstallation.objects.get(id=installation.id)
with transaction.atomic(using=router.db_for_write(SentryAppInstallation)):
try:
assert (
request.user.is_authenticated
), "User must be authenticated to delete installation"
SentryAppInstallationNotifier(
sentry_app_installation=sentry_app_installation,
user=request.user,
action="deleted",
).run()
# if the error is from a request exception, log the error and continue
except RequestException as exc:
sentry_sdk.capture_exception(exc)
sentry_app_installation.update(status=SentryAppInstallationStatus.PENDING_DELETION)
ScheduledDeletion.schedule(sentry_app_installation, days=0, actor=request.user)
create_audit_entry(
request=request,
organization_id=sentry_app_installation.organization_id,
target_object=sentry_app_installation.organization_id,
event=audit_log.get_event_id("SENTRY_APP_UNINSTALL"),
data={"sentry_app": sentry_app_installation.sentry_app.name},
)
if request.user.is_authenticated:
analytics.record(
SentryAppUninstalledEvent(
user_id=request.user.id,
organization_id=sentry_app_installation.organization_id,
sentry_app=sentry_app_installation.sentry_app.slug,
),
)
return Response(status=204)
def put(self, request: Request, installation) -> Response:
serializer = SentryAppInstallationParser(installation, data=request.data, partial=True)
if serializer.is_valid():
result = serializer.validated_data
SentryAppInstallationUpdater(
sentry_app_installation=installation, status=result.get("status")
).run()
return Response(
serialize(
objects=SentryAppInstallation.objects.get(id=installation.id),
user=request.user,
serializer=SentryAppInstallationSerializer(),
)
)
return Response(serializer.errors, status=400)
| SentryAppInstallationDetailsEndpoint |
python | scipy__scipy | scipy/stats/_continuous_distns.py | {
"start": 32975,
"end": 34414
} | class ____(rv_continuous):
r"""A Bradford continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `bradford` is:
.. math::
f(x, c) = \frac{c}{\log(1+c) (1+cx)}
for :math:`0 <= x <= 1` and :math:`c > 0`.
`bradford` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _shape_info(self):
return [_ShapeInfo("c", False, (0, np.inf), (False, False))]
def _pdf(self, x, c):
# bradford.pdf(x, c) = c / (k * (1+c*x))
return c / (c*x + 1.0) / sc.log1p(c)
def _cdf(self, x, c):
return sc.log1p(c*x) / sc.log1p(c)
def _ppf(self, q, c):
return sc.expm1(q * sc.log1p(c)) / c
def _stats(self, c, moments='mv'):
k = np.log(1.0+c)
mu = (c-k)/(c*k)
mu2 = ((c+2.0)*k-2.0*c)/(2*c*k*k)
g1 = None
g2 = None
if 's' in moments:
g1 = np.sqrt(2)*(12*c*c-9*c*k*(c+2)+2*k*k*(c*(c+3)+3))
g1 /= np.sqrt(c*(c*(k-2)+2*k))*(3*c*(k-2)+6*k)
if 'k' in moments:
g2 = (c**3*(k-3)*(k*(3*k-16)+24)+12*k*c*c*(k-4)*(k-3) +
6*c*k*k*(3*k-14) + 12*k**3)
g2 /= 3*c*(c*(k-2)+2*k)**2
return mu, mu2, g1, g2
def _entropy(self, c):
k = np.log(1+c)
return k/2.0 - np.log(c/k)
bradford = bradford_gen(a=0.0, b=1.0, name='bradford')
| bradford_gen |
python | pytorch__pytorch | torch/_inductor/exc.py | {
"start": 2228,
"end": 2460
} | class ____(RuntimeError):
def __init__(self) -> None:
from . import config
super().__init__(
f"No working C++ compiler found in {config.__name__}.cpp.cxx: {config.cpp.cxx}"
)
| InvalidCxxCompiler |
python | getsentry__sentry | tests/sentry/incidents/subscription_processor/test_subscription_processor_base.py | {
"start": 1186,
"end": 8110
} | class ____(TestCase, SpanTestCase, SnubaTestCase):
@pytest.fixture(autouse=True)
def _setup_metrics_patch(self):
with mock.patch("sentry.incidents.subscription_processor.metrics") as self.metrics:
yield
def setUp(self) -> None:
super().setUp()
self._run_tasks = self.tasks()
self._run_tasks.__enter__()
def tearDown(self) -> None:
super().tearDown()
self._run_tasks.__exit__(None, None, None)
@cached_property
def sub(self):
subscription_id = int(self.metric_detector.data_sources.first().source_id)
return QuerySubscription.objects.get(id=subscription_id)
def create_detector_data_source_and_data_conditions(self):
detector = self.create_detector(
project=self.project,
workflow_condition_group=self.create_data_condition_group(),
type=MetricIssue.slug,
created_by_id=self.user.id,
)
self.create_detector_state(detector=detector)
with self.tasks():
snuba_query = create_snuba_query(
query_type=SnubaQuery.Type.ERROR,
dataset=Dataset.Events,
query="",
aggregate="count()",
time_window=timedelta(minutes=1),
resolution=timedelta(minutes=1),
environment=self.environment,
event_types=[
SnubaQueryEventType.EventType.ERROR,
SnubaQueryEventType.EventType.DEFAULT,
],
)
query_subscription = create_snuba_subscription(
project=detector.project,
subscription_type=INCIDENTS_SNUBA_SUBSCRIPTION_TYPE,
snuba_query=snuba_query,
)
data_source = self.create_data_source(
organization=self.organization,
source_id=str(query_subscription.id),
type=DATA_SOURCE_SNUBA_QUERY_SUBSCRIPTION,
)
self.create_data_source_detector(data_source, detector)
self.set_up_data_conditions(detector, Condition.GREATER, 100, None, 10)
return detector
def set_up_data_conditions(
self,
detector: Detector,
threshold_type: Condition,
critical_threshold: int,
warning_threshold: int | None = None,
resolve_threshold: int | None = None,
):
if resolve_threshold is None:
resolve_threshold = (
critical_threshold if warning_threshold is None else warning_threshold
)
resolve_threshold_type = (
Condition.LESS_OR_EQUAL
if threshold_type == Condition.GREATER
else Condition.GREATER_OR_EQUAL
)
self.create_data_condition(
type=threshold_type,
comparison=critical_threshold,
condition_result=DetectorPriorityLevel.HIGH,
condition_group=detector.workflow_condition_group,
)
if warning_threshold is not None:
self.create_data_condition(
type=threshold_type,
comparison=warning_threshold,
condition_result=DetectorPriorityLevel.MEDIUM,
condition_group=detector.workflow_condition_group,
)
self.create_data_condition(
type=resolve_threshold_type,
comparison=resolve_threshold,
condition_result=DetectorPriorityLevel.OK,
condition_group=detector.workflow_condition_group,
)
@cached_property
def metric_detector(self):
return self.create_detector_data_source_and_data_conditions()
@cached_property
def critical_threshold(self):
critical_detector_trigger = DataCondition.objects.get(
condition_group=self.metric_detector.workflow_condition_group,
condition_result=DetectorPriorityLevel.HIGH,
)
return critical_detector_trigger.comparison
@cached_property
def warning_threshold(self):
warning_detector_trigger = DataCondition.objects.get(
condition_group=self.metric_detector.workflow_condition_group,
condition_result=DetectorPriorityLevel.MEDIUM,
)
return warning_detector_trigger.comparison
@cached_property
def resolve_threshold(self):
resolve_detector_trigger = DataCondition.objects.get(
condition_group=self.metric_detector.workflow_condition_group,
condition_result=DetectorPriorityLevel.OK,
)
return resolve_detector_trigger.comparison
def get_snuba_query(self, detector: Detector):
data_source_detector = DataSourceDetector.objects.get(detector=detector)
data_source = DataSource.objects.get(id=data_source_detector.data_source.id)
query_subscription = QuerySubscription.objects.get(id=data_source.source_id)
snuba_query = SnubaQuery.objects.get(id=query_subscription.snuba_query.id)
return snuba_query
def update_threshold(
self, detector: Detector, priority_level: DetectorPriorityLevel, new_threshold: float
) -> None:
detector_trigger = DataCondition.objects.get(
condition_group=detector.workflow_condition_group,
condition_result=priority_level,
)
detector_trigger.comparison = new_threshold
detector_trigger.save()
def build_subscription_update(self, subscription, time_delta=None, value=EMPTY):
if time_delta is not None:
timestamp = timezone.now() + time_delta
else:
timestamp = timezone.now()
timestamp = timestamp.replace(microsecond=0)
data = {}
if subscription:
data = {"some_col_name": randint(0, 100) if value is EMPTY else value}
values = {"data": [data]}
return {
"subscription_id": subscription.subscription_id if subscription else uuid4().hex,
"values": values,
"timestamp": timestamp,
"interval": 1,
"partition": 1,
"offset": 1,
}
def send_update(self, value, time_delta=None, subscription=None):
if time_delta is None:
time_delta = timedelta()
if subscription is None:
subscription = self.sub
processor = SubscriptionProcessor(subscription)
message = self.build_subscription_update(subscription, value=value, time_delta=time_delta)
with (
self.feature(["organizations:incidents", "organizations:performance-view"]),
self.capture_on_commit_callbacks(execute=True),
):
processor.process_update(message)
return processor
def get_detector_state(self, detector: Detector) -> int:
detector_state = DetectorState.objects.get(detector=detector)
return int(detector_state.state)
| ProcessUpdateBaseClass |
python | tiangolo__fastapi | docs_src/body_updates/tutorial001_py39.py | {
"start": 150,
"end": 900
} | class ____(BaseModel):
name: Union[str, None] = None
description: Union[str, None] = None
price: Union[float, None] = None
tax: float = 10.5
tags: list[str] = []
items = {
"foo": {"name": "Foo", "price": 50.2},
"bar": {"name": "Bar", "description": "The bartenders", "price": 62, "tax": 20.2},
"baz": {"name": "Baz", "description": None, "price": 50.2, "tax": 10.5, "tags": []},
}
@app.get("/items/{item_id}", response_model=Item)
async def read_item(item_id: str):
return items[item_id]
@app.put("/items/{item_id}", response_model=Item)
async def update_item(item_id: str, item: Item):
update_item_encoded = jsonable_encoder(item)
items[item_id] = update_item_encoded
return update_item_encoded
| Item |
python | keon__algorithms | tests/test_strings.py | {
"start": 2941,
"end": 3245
} | class ____(unittest.TestCase):
"""[summary]
Test for the file delete_reoccurring.py
Arguments:
unittest {[type]} -- [description]
"""
def test_delete_reoccurring_characters(self):
self.assertEqual("abc", delete_reoccurring_characters("aaabcccc"))
| TestDeleteReoccurring |
python | PrefectHQ__prefect | src/prefect/server/schemas/filters.py | {
"start": 83658,
"end": 84157
} | class ____(PrefectFilterBaseModel):
"""Filter by `Variable.id`."""
any_: Optional[list[UUID]] = Field(
default=None, description="A list of variable ids to include"
)
def _get_filter_list(
self, db: "PrefectDBInterface"
) -> Iterable[sa.ColumnExpressionArgument[bool]]:
filters: list[sa.ColumnExpressionArgument[bool]] = []
if self.any_ is not None:
filters.append(db.Variable.id.in_(self.any_))
return filters
| VariableFilterId |
python | mlflow__mlflow | tests/store/tracking/test_plugin_validation.py | {
"start": 1588,
"end": 2902
} | class ____(SqlAlchemyStore):
pass
"""
subprocess.check_call([sys.executable, "-c", code], timeout=20)
def test_plugin_can_create_dataset_without_name_error(tmp_path):
"""
Regression test for plugin runtime usage (https://github.com/mlflow/mlflow/issues/18386).
Store plugins that inherit from SqlAlchemyStore need to be able to call methods like
create_dataset() which instantiate EvaluationDataset at runtime.
This test ensures that after a plugin loads, it can actually use store methods that reference
EvaluationDataset. This catches the actual runtime failure that users experienced, where the
plugin would load successfully but fail when trying to perform dataset operations.
"""
# Pre-initialize the database to avoid expensive migrations in subprocess
db_path = tmp_path / "test.db"
artifact_path = tmp_path / "artifacts"
artifact_path.mkdir()
# Initialize database with SqlAlchemyStore (runs migrations)
store = SqlAlchemyStore(f"sqlite:///{db_path}", str(artifact_path))
store.engine.dispose() # Close connection to allow subprocess to use the database
# Now run the test code in subprocess with the pre-initialized database
code = f"""
from mlflow.store.tracking.sqlalchemy_store import SqlAlchemyStore
| CustomTrackingStore |
python | huggingface__transformers | tests/models/deberta_v2/test_modeling_deberta_v2.py | {
"start": 12095,
"end": 12947
} | class ____(unittest.TestCase):
@unittest.skip(reason="Model not available yet")
def test_inference_masked_lm(self):
pass
@slow
def test_inference_no_head(self):
model = DebertaV2Model.from_pretrained("microsoft/deberta-v2-xlarge")
input_ids = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]])
attention_mask = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
output = model(input_ids, attention_mask=attention_mask)[0]
# compare the actual values for a slice.
expected_slice = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]]
)
torch.testing.assert_close(output[:, 1:4, 1:4], expected_slice, rtol=1e-4, atol=1e-4)
| DebertaV2ModelIntegrationTest |
python | cython__cython | docs/examples/tutorial/cdef_classes/math_function.py | {
"start": 0,
"end": 202
} | class ____(object):
def __init__(self, name, operator):
self.name = name
self.operator = operator
def __call__(self, *operands):
return self.operator(*operands)
| MathFunction |
python | openai__openai-python | src/openai/types/realtime/realtime_conversation_item_system_message_param.py | {
"start": 466,
"end": 1226
} | class ____(TypedDict, total=False):
content: Required[Iterable[Content]]
"""The content of the message."""
role: Required[Literal["system"]]
"""The role of the message sender. Always `system`."""
type: Required[Literal["message"]]
"""The type of the item. Always `message`."""
id: str
"""The unique ID of the item.
This may be provided by the client or generated by the server.
"""
object: Literal["realtime.item"]
"""Identifier for the API object being returned - always `realtime.item`.
Optional when creating a new item.
"""
status: Literal["completed", "incomplete", "in_progress"]
"""The status of the item. Has no effect on the conversation."""
| RealtimeConversationItemSystemMessageParam |
python | django-import-export__django-import-export | tests/core/tests/test_forms.py | {
"start": 312,
"end": 408
} | class ____(resources.ModelResource):
class Meta:
name = "My super resource"
| MyResource |
python | huggingface__transformers | src/transformers/models/blt/modeling_blt.py | {
"start": 16404,
"end": 19426
} | class ____(nn.Module):
"""Cross-attention module for Blt, following transformers style"""
def __init__(self, config: BltConfig, layer_idx: int, hidden_size: Optional[int] = None):
super().__init__()
self.config = config
self.num_heads = self.config.num_attention_heads
self.num_key_value_heads = self.config.num_key_value_heads
self.dropout = config.dropout
self.hidden_size = config.hidden_size
self.head_dim = config.hidden_size // self.num_heads
self.layer_idx = layer_idx
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.q_norm = BltRMSNorm(self.hidden_size, eps=config.rms_norm_eps)
self.k_norm = BltRMSNorm(self.hidden_size, eps=config.rms_norm_eps)
self.is_causal = False
def forward(
self,
hidden_states: torch.Tensor,
cross_attention_states: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
bsz, q_len, _ = hidden_states.size()
query_states = self.q_norm(hidden_states)
query_states = self.q_proj(query_states)
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
cross_attention_states = self.k_norm(cross_attention_states)
key_states = self.k_proj(cross_attention_states)
value_states = self.v_proj(cross_attention_states)
key_states = key_states.view(bsz, -1, self.num_key_value_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, -1, self.num_key_value_heads, self.head_dim).transpose(1, 2)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(bsz, q_len, -1).contiguous()
attn_output = self.o_proj(attn_output)
attn_output = attn_output + hidden_states
return attn_output, attn_weights
@auto_docstring
| BltCrossAttention |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 335978,
"end": 336329
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
node = sgqlc.types.Field("EnterpriseServerInstallation", graphql_name="node")
| EnterpriseServerInstallationEdge |
python | Pylons__pyramid | tests/test_scripts/dummy.py | {
"start": 508,
"end": 761
} | class ____:
env = {}
help = ''
called = False
dummy_attr = 1
def __call__(self, env, help):
self.env = env
self.help = help
self.called = True
self.env['request'].dummy_attr = self.dummy_attr
| DummyShell |
python | pandas-dev__pandas | pandas/tests/extension/date/array.py | {
"start": 1222,
"end": 6023
} | class ____(ExtensionArray):
def __init__(
self,
dates: (
dt.date
| Sequence[dt.date]
| tuple[np.ndarray, np.ndarray, np.ndarray]
| np.ndarray
),
) -> None:
if isinstance(dates, dt.date):
self._year = np.array([dates.year])
self._month = np.array([dates.month])
self._day = np.array([dates.year])
return
ldates = len(dates)
if isinstance(dates, list):
# pre-allocate the arrays since we know the size before hand
self._year = np.zeros(ldates, dtype=np.uint16) # 65535 (0, 9999)
self._month = np.zeros(ldates, dtype=np.uint8) # 255 (1, 31)
self._day = np.zeros(ldates, dtype=np.uint8) # 255 (1, 12)
# populate them
for i, (y, m, d) in enumerate(
(date.year, date.month, date.day) for date in dates
):
self._year[i] = y
self._month[i] = m
self._day[i] = d
elif isinstance(dates, tuple):
# only support triples
if ldates != 3:
raise ValueError("only triples are valid")
# check if all elements have the same type
if any(not isinstance(x, np.ndarray) for x in dates):
raise TypeError("invalid type")
ly, lm, ld = (len(cast(np.ndarray, d)) for d in dates)
if not ly == lm == ld:
raise ValueError(
f"tuple members must have the same length: {(ly, lm, ld)}"
)
self._year = dates[0].astype(np.uint16)
self._month = dates[1].astype(np.uint8)
self._day = dates[2].astype(np.uint8)
elif isinstance(dates, np.ndarray) and dates.dtype == "U10":
self._year = np.zeros(ldates, dtype=np.uint16) # 65535 (0, 9999)
self._month = np.zeros(ldates, dtype=np.uint8) # 255 (1, 31)
self._day = np.zeros(ldates, dtype=np.uint8) # 255 (1, 12)
# error: "object_" object is not iterable
obj = np.char.split(dates, sep="-")
for (i,), (y, m, d) in np.ndenumerate(obj):
self._year[i] = int(y)
self._month[i] = int(m)
self._day[i] = int(d)
else:
raise TypeError(f"{type(dates)} is not supported")
@property
def dtype(self) -> ExtensionDtype:
return DateDtype()
def astype(self, dtype, copy=True):
dtype = pandas_dtype(dtype)
if isinstance(dtype, DateDtype):
data = self.copy() if copy else self
else:
data = self.to_numpy(dtype=dtype, copy=copy, na_value=dt.date.min)
return data
@property
def nbytes(self) -> int:
return self._year.nbytes + self._month.nbytes + self._day.nbytes
def __len__(self) -> int:
return len(self._year) # all 3 arrays are enforced to have the same length
def __getitem__(self, item: PositionalIndexer):
if isinstance(item, int):
return dt.date(self._year[item], self._month[item], self._day[item])
else:
raise NotImplementedError("only ints are supported as indexes")
def __setitem__(self, key: int | slice | np.ndarray, value: Any) -> None:
if self._readonly:
raise ValueError("Cannot modify read-only array")
if not isinstance(key, int):
raise NotImplementedError("only ints are supported as indexes")
if not isinstance(value, dt.date):
raise TypeError("you can only set datetime.date types")
self._year[key] = value.year
self._month[key] = value.month
self._day[key] = value.day
def __repr__(self) -> str:
return f"DateArray{list(zip(self._year, self._month, self._day))}"
def copy(self) -> DateArray:
return DateArray((self._year.copy(), self._month.copy(), self._day.copy()))
def isna(self) -> np.ndarray:
return np.logical_and(
np.logical_and(
self._year == dt.date.min.year, self._month == dt.date.min.month
),
self._day == dt.date.min.day,
)
@classmethod
def _from_sequence(cls, scalars, *, dtype: Dtype | None = None, copy=False):
if isinstance(scalars, dt.date):
raise TypeError
elif isinstance(scalars, DateArray):
if dtype is not None:
return scalars.astype(dtype, copy=copy)
if copy:
return scalars.copy()
return scalars[:]
elif isinstance(scalars, np.ndarray):
scalars = scalars.astype("U10") # 10 chars for yyyy-mm-dd
return DateArray(scalars)
| DateArray |
python | readthedocs__readthedocs.org | readthedocs/builds/migrations/0012_add-predefined-match-arg-field.py | {
"start": 150,
"end": 2025
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("builds", "0011_version-media-availability"),
]
operations = [
migrations.AddField(
model_name="versionautomationrule",
name="predefined_match_arg",
field=models.CharField(
blank=True,
choices=[
("all-versions", "Any version"),
("semver-versions", "SemVer versions"),
(None, "Custom match"),
],
default=None,
help_text="Match argument defined by us, it is used if is not None, otherwise match_arg will be used.",
max_length=255,
null=True,
verbose_name="Predefined match argument",
),
),
migrations.AlterField(
model_name="versionautomationrule",
name="action",
field=models.CharField(
choices=[
("activate-version", "Activate version"),
("set-default-version", "Set version as default"),
],
help_text="Action to apply to matching versions",
max_length=32,
verbose_name="Action",
),
),
migrations.AlterField(
model_name="versionautomationrule",
name="version_type",
field=models.CharField(
choices=[
("branch", "Branch"),
("tag", "Tag"),
("external", "External"),
("unknown", "Unknown"),
],
help_text="Type of version the rule should be applied to",
max_length=32,
verbose_name="Version type",
),
),
]
| Migration |
python | getsentry__sentry | tests/sentry/api/test_client.py | {
"start": 163,
"end": 1405
} | class ____(TestCase):
@patch("sentry.api.client.resolve")
def test_mixed_parameters_in_query_string(self, mock_resolve):
mock_view = Mock(return_value=JsonResponse({"success": True}))
mock_resolve.return_value = (mock_view, (), {})
mock_auth = Mock()
mock_auth.organization_id = 1
mock_auth.scope_list = ["org:read"]
params = {
"project": [1, 2, 3],
"query": "test",
"yAxis": ["count()", "p95()"],
"statsPeriod": "14d",
"tags": [],
}
client = ApiClient()
response = client.get(auth=mock_auth, user=None, path="/test/", params=params)
assert response.status_code == 200
mock_view.assert_called_once()
request = mock_view.call_args[0][0]
expected_queries = {
"project": ["1", "2", "3"], # numeric values converted to strings; list preserved
"query": ["test"],
"yAxis": ["count()", "p95()"], # list preserved
"statsPeriod": ["14d"],
"tags": [], # empty list preserved
}
actual_queries = dict(request.GET.lists())
assert actual_queries == expected_queries
| ClientParameterHandlingTest |
python | pytorch__pytorch | torch/nn/modules/loss.py | {
"start": 1147,
"end": 1489
} | class ____(_Loss):
def __init__(
self,
weight: Optional[Tensor] = None,
size_average=None,
reduce=None,
reduction: str = "mean",
) -> None:
super().__init__(size_average, reduce, reduction)
self.register_buffer("weight", weight)
self.weight: Optional[Tensor]
| _WeightedLoss |
python | huggingface__transformers | src/transformers/models/lightglue/modular_lightglue.py | {
"start": 11200,
"end": 11586
} | class ____(SuperGlueImageProcessorFast):
def post_process_keypoint_matching(
self,
outputs: "LightGlueKeypointMatchingOutput",
target_sizes: Union[TensorType, list[tuple]],
threshold: float = 0.0,
) -> list[dict[str, torch.Tensor]]:
return super().post_process_keypoint_matching(outputs, target_sizes, threshold)
| LightGlueImageProcessorFast |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.