language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
google__pytype
|
pytype/tests/test_functools.py
|
{
"start": 68,
"end": 1781
}
|
class ____(test_base.BaseTest):
"""Tests for @cached.property."""
def test_basic(self):
self.Check("""
import functools
class A:
@functools.cached_property
def f(self):
return 42
a = A()
x = a.f
assert_type(x, int)
a.f = 43
x = a.f
assert_type(x, int)
del a.f
x = a.f
assert_type(x, int)
""")
def test_reingest(self):
with self.DepTree([(
"foo.py",
"""
import functools
class A:
@functools.cached_property
def f(self):
return 42
""",
)]):
self.Check("""
import foo
a = foo.A()
x = a.f
assert_type(x, int)
a.f = 43
x = a.f
assert_type(x, int)
del a.f
x = a.f
assert_type(x, int)
""")
@test_base.skip("Not supported yet")
def test_pyi(self):
with self.DepTree([(
"foo.pyi",
"""
import functools
class A:
@functools.cached_property
def f(self) -> int: ...
""",
)]):
self.Check("""
import foo
a = A()
x = a.f
assert_type(x, int)
a.f = 43
x = a.f
assert_type(x, int)
del a.f
x = a.f
assert_type(x, int)
""")
def test_infer(self):
ty = self.Infer("""
from functools import cached_property
""")
self.assertTypesMatchPytd(
ty,
"""
import functools
cached_property: type[functools.cached_property]
""",
)
if __name__ == "__main__":
test_base.main()
|
TestCachedProperty
|
python
|
sqlalchemy__sqlalchemy
|
test/sql/test_query.py
|
{
"start": 28255,
"end": 32738
}
|
class ____(fixtures.TablesTest):
__sparse_driver_backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column("user_id", INT, primary_key=True),
Column("user_name", VARCHAR(20)),
)
Table(
"addresses",
metadata,
Column("address_id", Integer, primary_key=True),
Column("user_id", Integer, ForeignKey("users.user_id")),
Column("address", String(30)),
)
@classmethod
def insert_data(cls, connection):
users, addresses = cls.tables("users", "addresses")
conn = connection
conn.execute(users.insert(), dict(user_id=1, user_name="john"))
conn.execute(
addresses.insert(), dict(address_id=1, user_id=1, address="addr1")
)
conn.execute(users.insert(), dict(user_id=2, user_name="jack"))
conn.execute(
addresses.insert(), dict(address_id=2, user_id=2, address="addr1")
)
conn.execute(users.insert(), dict(user_id=3, user_name="ed"))
conn.execute(
addresses.insert(), dict(address_id=3, user_id=3, address="addr2")
)
conn.execute(users.insert(), dict(user_id=4, user_name="wendy"))
conn.execute(
addresses.insert(), dict(address_id=4, user_id=4, address="addr3")
)
conn.execute(users.insert(), dict(user_id=5, user_name="laura"))
conn.execute(
addresses.insert(), dict(address_id=5, user_id=5, address="addr4")
)
conn.execute(users.insert(), dict(user_id=6, user_name="ralph"))
conn.execute(
addresses.insert(), dict(address_id=6, user_id=6, address="addr5")
)
conn.execute(users.insert(), dict(user_id=7, user_name="fido"))
conn.execute(
addresses.insert(), dict(address_id=7, user_id=7, address="addr5")
)
def test_select_limit(self, connection):
users, addresses = self.tables("users", "addresses")
r = connection.execute(
users.select().limit(3).order_by(users.c.user_id)
).fetchall()
self.assert_(r == [(1, "john"), (2, "jack"), (3, "ed")], repr(r))
@testing.requires.offset
def test_select_limit_offset(self, connection):
"""Test the interaction between limit and offset"""
users, addresses = self.tables("users", "addresses")
r = connection.execute(
users.select().limit(3).offset(2).order_by(users.c.user_id)
).fetchall()
self.assert_(r == [(3, "ed"), (4, "wendy"), (5, "laura")])
r = connection.execute(
users.select().offset(5).order_by(users.c.user_id)
).fetchall()
self.assert_(r == [(6, "ralph"), (7, "fido")])
def test_select_distinct_limit(self, connection):
"""Test the interaction between limit and distinct"""
users, addresses = self.tables("users", "addresses")
r = sorted(
[
x[0]
for x in connection.execute(
select(addresses.c.address).distinct().limit(3)
)
]
)
self.assert_(len(r) == 3, repr(r))
self.assert_(r[0] != r[1] and r[1] != r[2], repr(r))
@testing.requires.offset
def test_select_distinct_offset(self, connection):
"""Test the interaction between distinct and offset"""
users, addresses = self.tables("users", "addresses")
r = sorted(
[
x[0]
for x in connection.execute(
select(addresses.c.address)
.distinct()
.offset(1)
.order_by(addresses.c.address)
).fetchall()
]
)
eq_(len(r), 4)
self.assert_(r[0] != r[1] and r[1] != r[2] and r[2] != [3], repr(r))
@testing.requires.offset
def test_select_distinct_limit_offset(self, connection):
"""Test the interaction between limit and limit/offset"""
users, addresses = self.tables("users", "addresses")
r = connection.execute(
select(addresses.c.address)
.order_by(addresses.c.address)
.distinct()
.offset(2)
.limit(3)
).fetchall()
self.assert_(len(r) == 3, repr(r))
self.assert_(r[0] != r[1] and r[1] != r[2], repr(r))
|
LimitTest
|
python
|
sympy__sympy
|
sympy/functions/special/beta_functions.py
|
{
"start": 786,
"end": 5651
}
|
class ____(DefinedFunction):
r"""
The beta integral is called the Eulerian integral of the first kind by
Legendre:
.. math::
\mathrm{B}(x,y) \int^{1}_{0} t^{x-1} (1-t)^{y-1} \mathrm{d}t.
Explanation
===========
The Beta function or Euler's first integral is closely associated
with the gamma function. The Beta function is often used in probability
theory and mathematical statistics. It satisfies properties like:
.. math::
\mathrm{B}(a,1) = \frac{1}{a} \\
\mathrm{B}(a,b) = \mathrm{B}(b,a) \\
\mathrm{B}(a,b) = \frac{\Gamma(a) \Gamma(b)}{\Gamma(a+b)}
Therefore for integral values of $a$ and $b$:
.. math::
\mathrm{B} = \frac{(a-1)! (b-1)!}{(a+b-1)!}
A special case of the Beta function when `x = y` is the
Central Beta function. It satisfies properties like:
.. math::
\mathrm{B}(x) = 2^{1 - 2x}\mathrm{B}(x, \frac{1}{2}) \\
\mathrm{B}(x) = 2^{1 - 2x} cos(\pi x) \mathrm{B}(\frac{1}{2} - x, x) \\
\mathrm{B}(x) = \int_{0}^{1} \frac{t^x}{(1 + t)^{2x}} dt \\
\mathrm{B}(x) = \frac{2}{x} \prod_{n = 1}^{\infty} \frac{n(n + 2x)}{(n + x)^2}
Examples
========
>>> from sympy import I, pi
>>> from sympy.abc import x, y
The Beta function obeys the mirror symmetry:
>>> from sympy import beta, conjugate
>>> conjugate(beta(x, y))
beta(conjugate(x), conjugate(y))
Differentiation with respect to both $x$ and $y$ is supported:
>>> from sympy import beta, diff
>>> diff(beta(x, y), x)
(polygamma(0, x) - polygamma(0, x + y))*beta(x, y)
>>> diff(beta(x, y), y)
(polygamma(0, y) - polygamma(0, x + y))*beta(x, y)
>>> diff(beta(x), x)
2*(polygamma(0, x) - polygamma(0, 2*x))*beta(x, x)
We can numerically evaluate the Beta function to
arbitrary precision for any complex numbers x and y:
>>> from sympy import beta
>>> beta(pi).evalf(40)
0.02671848900111377452242355235388489324562
>>> beta(1 + I).evalf(20)
-0.2112723729365330143 - 0.7655283165378005676*I
See Also
========
gamma: Gamma function.
uppergamma: Upper incomplete gamma function.
lowergamma: Lower incomplete gamma function.
polygamma: Polygamma function.
loggamma: Log Gamma function.
digamma: Digamma function.
trigamma: Trigamma function.
References
==========
.. [1] https://en.wikipedia.org/wiki/Beta_function
.. [2] https://mathworld.wolfram.com/BetaFunction.html
.. [3] https://dlmf.nist.gov/5.12
"""
unbranched = True
def fdiff(self, argindex):
x, y = self.args
if argindex == 1:
# Diff wrt x
return beta(x, y)*(digamma(x) - digamma(x + y))
elif argindex == 2:
# Diff wrt y
return beta(x, y)*(digamma(y) - digamma(x + y))
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, x, y=None):
if y is None:
return beta(x, x)
if x.is_Number and y.is_Number:
return beta(x, y, evaluate=False).doit()
def doit(self, **hints):
x = xold = self.args[0]
# Deal with unevaluated single argument beta
single_argument = len(self.args) == 1
y = yold = self.args[0] if single_argument else self.args[1]
if hints.get('deep', True):
x = x.doit(**hints)
y = y.doit(**hints)
if y.is_zero or x.is_zero:
return S.ComplexInfinity
if y is S.One:
return 1/x
if x is S.One:
return 1/y
if y == x + 1:
return 1/(x*y*catalan(x))
s = x + y
if (s.is_integer and s.is_negative and x.is_integer is False and
y.is_integer is False):
return S.Zero
if x == xold and y == yold and not single_argument:
return self
return beta(x, y)
def _eval_expand_func(self, **hints):
x, y = self.args
return gamma(x)*gamma(y) / gamma(x + y)
def _eval_is_real(self):
return self.args[0].is_real and self.args[1].is_real
def _eval_conjugate(self):
return self.func(self.args[0].conjugate(), self.args[1].conjugate())
def _eval_rewrite_as_gamma(self, x, y, piecewise=True, **kwargs):
return self._eval_expand_func(**kwargs)
def _eval_rewrite_as_Integral(self, x, y, **kwargs):
from sympy.integrals.integrals import Integral
t = Dummy(uniquely_named_symbol('t', [x, y]).name)
return Integral(t**(x - 1)*(1 - t)**(y - 1), (t, 0, 1))
###############################################################################
########################## INCOMPLETE BETA FUNCTION ###########################
###############################################################################
|
beta
|
python
|
django__django
|
django/db/models/aggregates.py
|
{
"start": 9851,
"end": 10232
}
|
class ____(NumericOutputFieldMixin, Aggregate):
name = "StdDev"
arity = 1
def __init__(self, expression, sample=False, **extra):
self.function = "STDDEV_SAMP" if sample else "STDDEV_POP"
super().__init__(expression, **extra)
def _get_repr_options(self):
return {**super()._get_repr_options(), "sample": self.function == "STDDEV_SAMP"}
|
StdDev
|
python
|
scrapy__scrapy
|
tests/test_downloader_handlers_http_base.py
|
{
"start": 24656,
"end": 26281
}
|
class ____(ABC):
"""Base class for special cases tested with just one simple request"""
keyfile = "keys/localhost.key"
certfile = "keys/localhost.crt"
host = "localhost"
cipher_string: str | None = None
@pytest.fixture(scope="class")
def simple_mockserver(self) -> Generator[SimpleMockServer]:
with SimpleMockServer(
self.keyfile, self.certfile, self.cipher_string
) as simple_mockserver:
yield simple_mockserver
@pytest.fixture(scope="class")
def url(self, simple_mockserver: SimpleMockServer) -> str:
# need to use self.host instead of what mockserver returns
return f"https://{self.host}:{simple_mockserver.port(is_secure=True)}/file"
@property
@abstractmethod
def download_handler_cls(self) -> type[DownloadHandlerProtocol]:
raise NotImplementedError
@async_yield_fixture
async def download_handler(self) -> AsyncGenerator[DownloadHandlerProtocol]:
if self.cipher_string is not None:
settings_dict = {"DOWNLOADER_CLIENT_TLS_CIPHERS": self.cipher_string}
else:
settings_dict = None
crawler = get_crawler(settings_dict=settings_dict)
dh = build_from_crawler(self.download_handler_cls, crawler)
yield dh
await close_dh(dh)
@deferred_f_from_coro_f
async def test_download(
self, url: str, download_handler: DownloadHandlerProtocol
) -> None:
request = Request(url)
response = await download_request(download_handler, request)
assert response.body == b"0123456789"
|
TestSimpleHttpsBase
|
python
|
huggingface__transformers
|
src/transformers/convert_slow_tokenizer.py
|
{
"start": 26543,
"end": 27023
}
|
class ____(SpmConverter):
def unk_id(self, proto):
unk_id = 3
return unk_id
def post_processor(self):
return processors.TemplateProcessing(
single="<s> $A </s>",
pair="<s> $A </s> </s> $B </s>",
special_tokens=[
("<s>", self.original_tokenizer.convert_tokens_to_ids("<s>")),
("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")),
],
)
|
BarthezConverter
|
python
|
ansible__ansible
|
test/integration/targets/ansible-test-sanity-yamllint/ansible_collections/ns/col/plugins/inventory/inventory1.py
|
{
"start": 596,
"end": 665
}
|
class ____(BaseInventoryPlugin):
NAME = 'inventory1'
|
InventoryModule
|
python
|
facelessuser__soupsieve
|
soupsieve/css_parser.py
|
{
"start": 9547,
"end": 10749
}
|
class ____(SelectorPattern):
"""Selector pattern."""
def __init__(self, patterns: tuple[tuple[str, tuple[str, ...], str, type[SelectorPattern]], ...]) -> None:
"""Initialize."""
self.patterns = {}
for p in patterns:
name = p[0]
pattern = p[3](name, p[2])
for pseudo in p[1]:
self.patterns[pseudo] = pattern
self.matched_name = None # type: SelectorPattern | None
self.re_pseudo_name = re.compile(PAT_PSEUDO_CLASS_SPECIAL, re.I | re.X | re.U)
def get_name(self) -> str:
"""Get name."""
return '' if self.matched_name is None else self.matched_name.get_name()
def match(self, selector: str, index: int, flags: int) -> Match[str] | None:
"""Match the selector."""
pseudo = None
m = self.re_pseudo_name.match(selector, index)
if m:
name = util.lower(css_unescape(m.group('name')))
pattern = self.patterns.get(name)
if pattern:
pseudo = pattern.match(selector, index, flags)
if pseudo:
self.matched_name = pattern
return pseudo
|
SpecialPseudoPattern
|
python
|
walkccc__LeetCode
|
solutions/1836. Remove Duplicates From an Unsorted Linked List/1836.py
|
{
"start": 0,
"end": 424
}
|
class ____:
def deleteDuplicatesUnsorted(self, head: ListNode) -> ListNode:
dummy = ListNode(0, head)
count = collections.Counter()
curr = head
while curr:
count[curr.val] += 1
curr = curr.next
curr = dummy
while curr:
while curr.next and curr.next.val in count and count[curr.next.val] > 1:
curr.next = curr.next.next
curr = curr.next
return dummy.next
|
Solution
|
python
|
pandas-dev__pandas
|
asv_bench/benchmarks/index_object.py
|
{
"start": 1959,
"end": 2804
}
|
class ____:
def setup(self):
self.idx_inc = RangeIndex(start=0, stop=10**6, step=3)
self.idx_dec = RangeIndex(start=10**6, stop=-1, step=-3)
def time_max(self):
self.idx_inc.max()
def time_max_trivial(self):
self.idx_dec.max()
def time_min(self):
self.idx_dec.min()
def time_min_trivial(self):
self.idx_inc.min()
def time_get_loc_inc(self):
self.idx_inc.get_loc(900_000)
def time_get_loc_dec(self):
self.idx_dec.get_loc(100_000)
def time_iter_inc(self):
for _ in self.idx_inc:
pass
def time_iter_dec(self):
for _ in self.idx_dec:
pass
def time_sort_values_asc(self):
self.idx_inc.sort_values()
def time_sort_values_des(self):
self.idx_inc.sort_values(ascending=False)
|
Range
|
python
|
kamyu104__LeetCode-Solutions
|
Python/minimum-cost-for-cutting-cake-ii.py
|
{
"start": 792,
"end": 1496
}
|
class ____(object):
def minimumCost(self, m, n, horizontalCut, verticalCut):
"""
:type m: int
:type n: int
:type horizontalCut: List[int]
:type verticalCut: List[int]
:rtype: int
"""
horizontalCut.sort(reverse=True)
verticalCut.sort(reverse=True)
result = i = j = 0
while i < len(horizontalCut) or j < len(verticalCut):
if j == len(verticalCut) or (i < len(horizontalCut) and horizontalCut[i] > verticalCut[j]):
result += horizontalCut[i]*(j+1)
i += 1
else:
result += verticalCut[j]*(i+1)
j += 1
return result
|
Solution2
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/linalg/matrix_solve_op_test.py
|
{
"start": 1432,
"end": 6132
}
|
class ____(test.TestCase):
def _verifySolve(self, x, y, batch_dims=None):
for np_type in [np.float32, np.float64, np.complex64, np.complex128]:
if np_type == np.float32 or np_type == np.complex64:
tol = 1e-5
else:
tol = 1e-12
for adjoint in False, True:
if np_type in (np.float32, np.float64):
a = x.real.astype(np_type)
b = y.real.astype(np_type)
a_np = np.transpose(a) if adjoint else a
else:
a = x.astype(np_type)
b = y.astype(np_type)
a_np = np.conj(np.transpose(a)) if adjoint else a
if batch_dims is not None:
a = np.tile(a, batch_dims + [1, 1])
a_np = np.tile(a_np, batch_dims + [1, 1])
b = np.tile(b, batch_dims + [1, 1])
np_ans = np.linalg.solve(a_np, b)
for use_placeholder in set((False, not context.executing_eagerly())):
if use_placeholder:
a_ph = array_ops.placeholder(dtypes.as_dtype(np_type))
b_ph = array_ops.placeholder(dtypes.as_dtype(np_type))
tf_ans = linalg_ops.matrix_solve(a_ph, b_ph, adjoint=adjoint)
with self.cached_session() as sess:
out = sess.run(tf_ans, {a_ph: a, b_ph: b})
else:
tf_ans = linalg_ops.matrix_solve(a, b, adjoint=adjoint)
out = self.evaluate(tf_ans)
self.assertEqual(tf_ans.get_shape(), out.shape)
self.assertEqual(np_ans.shape, out.shape)
self.assertAllClose(np_ans, out, atol=tol, rtol=tol)
def _generateMatrix(self, m, n):
matrix = (np.random.normal(-5, 5,
m * n).astype(np.complex128).reshape([m, n]))
matrix.imag = (np.random.normal(-5, 5, m * n).astype(np.complex128).reshape(
[m, n]))
return matrix
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
def testSolve(self):
for n in 1, 2, 4, 9:
matrix = self._generateMatrix(n, n)
for nrhs in 1, 2, n:
rhs = self._generateMatrix(n, nrhs)
self._verifySolve(matrix, rhs)
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
def testSolveBatch(self):
for n in 2, 5:
matrix = self._generateMatrix(n, n)
for nrhs in 1, n:
rhs = self._generateMatrix(n, nrhs)
for batch_dims in [[2], [2, 2], [7, 4]]:
self._verifySolve(matrix, rhs, batch_dims=batch_dims)
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
def testNonSquareMatrix(self):
# When the solve of a non-square matrix is attempted we should return
# an error
with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)):
matrix = constant_op.constant([[1., 2., 3.], [3., 4., 5.]])
self.evaluate(linalg_ops.matrix_solve(matrix, matrix))
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
def testWrongDimensions(self):
# The matrix and right-hand sides should have the same number of rows.
matrix = constant_op.constant([[1., 0.], [0., 1.]])
rhs = constant_op.constant([[1., 0.]])
with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)):
self.evaluate(linalg_ops.matrix_solve(matrix, rhs))
# The matrix and right-hand side should have the same batch dimensions
matrix = np.random.normal(size=(2, 6, 2, 2))
rhs = np.random.normal(size=(2, 3, 2, 2))
with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)):
self.evaluate(linalg_ops.matrix_solve(matrix, rhs))
def testNotInvertible(self):
# The input should be invertible.
with self.assertRaisesOpError("Input matrix is not invertible."):
# All rows of the matrix below add to zero
matrix = constant_op.constant([[1., 0., -1.], [-1., 1., 0.],
[0., -1., 1.]])
self.evaluate(linalg_ops.matrix_solve(matrix, matrix))
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
def testConcurrent(self):
seed = [42, 24]
matrix_shape = [3, 3]
all_ops = []
for adjoint_ in False, True:
lhs1 = stateless_random_ops.stateless_random_normal(
matrix_shape, seed=seed)
lhs2 = stateless_random_ops.stateless_random_normal(
matrix_shape, seed=seed)
rhs1 = stateless_random_ops.stateless_random_normal(
matrix_shape, seed=seed)
rhs2 = stateless_random_ops.stateless_random_normal(
matrix_shape, seed=seed)
s1 = linalg_ops.matrix_solve(lhs1, rhs1, adjoint=adjoint_)
s2 = linalg_ops.matrix_solve(lhs2, rhs2, adjoint=adjoint_)
all_ops += [s1, s2]
val = self.evaluate(all_ops)
for i in range(0, len(all_ops), 2):
self.assertAllEqual(val[i], val[i + 1])
|
MatrixSolveOpTest
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_9/auth.py
|
{
"start": 6677,
"end": 8286
}
|
class ____(Request):
"""
Edit a users' auth data properties
:param user: User ID
:type user: str
:param role: The new user's role within the company
:type role: str
"""
_service = "auth"
_action = "edit_user"
_version = "2.9"
_schema = {
"definitions": {},
"properties": {
"role": {
"description": "The new user's role within the company",
"enum": ["admin", "superuser", "user", "annotator"],
"type": ["string", "null"],
},
"user": {"description": "User ID", "type": ["string", "null"]},
},
"type": "object",
}
def __init__(self, user: Optional[str] = None, role: Optional[str] = None, **kwargs: Any) -> None:
super(EditUserRequest, self).__init__(**kwargs)
self.user = user
self.role = role
@schema_property("user")
def user(self) -> Optional[str]:
return self._property_user
@user.setter
def user(self, value: Optional[str]) -> None:
if value is None:
self._property_user = None
return
self.assert_isinstance(value, "user", six.string_types)
self._property_user = value
@schema_property("role")
def role(self) -> Optional[str]:
return self._property_role
@role.setter
def role(self, value: Optional[str]) -> None:
if value is None:
self._property_role = None
return
self.assert_isinstance(value, "role", six.string_types)
self._property_role = value
|
EditUserRequest
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_textbox42.py
|
{
"start": 315,
"end": 904
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("textbox42.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with textbox(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_textbox(
"E9", "This is some text", {"description": "Some alternative text"}
)
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
getsentry__sentry
|
tests/sentry/grouping/test_builtin_fingerprinting.py
|
{
"start": 22610,
"end": 34354
}
|
class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self.project = self.create_project()
self.chunkload_error_trace: dict[str, Any] = {
"fingerprint": ["my-route", "{{ default }}"],
"exception": {
"values": [
{
"stacktrace": {
"frames": [
{
"function": "main",
"abs_path": "foo/bar.tsx",
"module": "foo.bar",
"filename": "bar.tsx",
"lineno": 13,
"in_app": False,
}
]
},
"type": "ChunkLoadError",
"value": "ChunkLoadError: something something...",
}
]
},
"platform": "javascript",
"sdk": {"name": "sentry.javascript.nextjs", "version": "1.2.3"},
}
self.hydration_error_trace: dict[str, Any] = {
"fingerprint": ["my-route", "{{ default }}"],
"message": "Text content does not match server-rendered HTML.",
"platform": "javascript",
"sdk": {"name": "sentry.javascript.nextjs", "version": "1.2.3"},
"tags": {"transaction": "/"},
}
def _get_event_for_trace(self, stacktrace: dict[str, Any]) -> Event:
mgr = EventManager(data=stacktrace, grouping_config=GROUPING_CONFIG)
mgr.normalize()
data = mgr.get_data()
data.setdefault("fingerprint", ["{{ default }}"])
fingerprinting_config = get_fingerprinting_config_for_project(project=self.project)
apply_server_side_fingerprinting(data, fingerprinting_config)
event_type = get_event_type(data)
event_metadata = event_type.get_metadata(data)
data.update(materialize_metadata(data, event_type, event_metadata))
return eventstore.backend.create_event(project_id=1, data=data)
def test_built_in_chunkload_rules(self) -> None:
"""
With flag enabled, the built-in rules for ChunkLoadError should be applied.
"""
event = self._get_event_for_trace(stacktrace=self.chunkload_error_trace)
assert event.data["fingerprint"] == ["chunkloaderror"]
assert event.data["_fingerprint_info"]["matched_rule"] == {
"attributes": {},
"fingerprint": ["chunkloaderror"],
"matchers": [["family", "javascript"], ["type", "ChunkLoadError"]],
"text": 'family:"javascript" type:"ChunkLoadError" -> "chunkloaderror"',
"is_builtin": True,
}
def test_built_in_chunkload_rules_variants(self) -> None:
event = self._get_event_for_trace(stacktrace=self.chunkload_error_trace)
variants = event.get_grouping_variants(GROUPING_CONFIG)
assert "built_in_fingerprint" in variants
assert variants["built_in_fingerprint"].as_dict() == {
"hash": mock.ANY, # ignore hash as it can change for unrelated reasons
"type": "custom_fingerprint",
"key": "built_in_fingerprint",
"contributes": True,
"description": "Sentry defined fingerprint",
"values": ["chunkloaderror"],
"client_values": ["my-route", "{{ default }}"],
"matched_rule": 'family:"javascript" type:"ChunkLoadError" -> "chunkloaderror"',
"hint": None,
}
def test_built_in_chunkload_rules_value_only(self) -> None:
"""
ChunkLoadError rule based on value should apply even if error is not ChunkLoadError type.
"""
self.chunkload_error_trace["exception"]["values"][0]["type"] = "chunky"
event = self._get_event_for_trace(stacktrace=self.chunkload_error_trace)
assert event.data["fingerprint"] == ["chunkloaderror"]
assert event.data["_fingerprint_info"]["matched_rule"] == {
"attributes": {},
"fingerprint": ["chunkloaderror"],
"matchers": [["family", "javascript"], ["value", "ChunkLoadError*"]],
"text": 'family:"javascript" value:"ChunkLoadError*" -> "chunkloaderror"',
"is_builtin": True,
}
def test_built_in_chunkload_rules_wrong_sdk(self) -> None:
"""
Built-in ChunkLoadError rule should also apply regardless of the SDK value.
"""
self.chunkload_error_trace["sdk"]["name"] = "not.a.real.SDK"
event = self._get_event_for_trace(stacktrace=self.chunkload_error_trace)
assert event.data["fingerprint"] == ["chunkloaderror"]
assert event.data["_fingerprint_info"]["matched_rule"] == {
"attributes": {},
"fingerprint": ["chunkloaderror"],
"matchers": [["family", "javascript"], ["type", "ChunkLoadError"]],
"text": 'family:"javascript" type:"ChunkLoadError" -> "chunkloaderror"',
"is_builtin": True,
}
def test_built_in_hydration_rules_same_transactions(self) -> None:
"""
Hydration errors with the same transaction should be grouped and the built-in rules for
hydration errors should be applied.
"""
event_message1 = self.store_event(data=self.hydration_error_trace, project_id=self.project)
data_message2 = self.hydration_error_trace.copy()
data_message2["message"] = (
"Hydration failed because the initial UI does not match what was rendered on the server."
)
event_message2 = self.store_event(data=data_message2, project_id=self.project)
assert event_message1.data.data["fingerprint"] == ["hydrationerror", "{{tags.transaction}}"]
assert event_message1.data.data["_fingerprint_info"]["matched_rule"] == {
"attributes": {},
"fingerprint": ["hydrationerror", "{{tags.transaction}}"],
"matchers": [
["family", "javascript"],
["tags.transaction", "*"],
["message", self.hydration_error_trace["message"]],
],
"text": 'family:"javascript" tags.transaction:"*" message:"Text content does not match server-rendered HTML." -> "hydrationerror{{tags.transaction}}"',
"is_builtin": True,
}
assert event_message2.data.data["fingerprint"] == ["hydrationerror", "{{tags.transaction}}"]
assert event_message2.data.data["_fingerprint_info"]["matched_rule"] == {
"attributes": {},
"fingerprint": ["hydrationerror", "{{tags.transaction}}"],
"matchers": [
["family", "javascript"],
["tags.transaction", "*"],
["message", data_message2["message"]],
],
"text": 'family:"javascript" tags.transaction:"*" message:"Hydration failed because the initial UI does not match what was rendered on the server." -> "hydrationerror{{tags.transaction}}"',
"is_builtin": True,
}
assert event_message1.group == event_message2.group
def test_built_in_hydration_rules_different_transactions(self) -> None:
"""
Hydration errors with different transactions should not be grouped and the built-in rules
for hydration errors should be applied.
"""
event_transaction_slash = self.store_event(
data=self.hydration_error_trace, project_id=self.project
)
data_transaction_text = self.hydration_error_trace.copy()
data_transaction_text["tags"]["transaction"] = "/text/"
event_transaction_text = self.store_event(
data=data_transaction_text, project_id=self.project
)
assert event_transaction_slash.data.data["fingerprint"] == [
"hydrationerror",
"{{tags.transaction}}",
]
assert event_transaction_slash.data.data["_fingerprint_info"]["matched_rule"] == {
"attributes": {},
"fingerprint": ["hydrationerror", "{{tags.transaction}}"],
"matchers": [
["family", "javascript"],
["tags.transaction", "*"],
["message", self.hydration_error_trace["message"]],
],
"text": 'family:"javascript" tags.transaction:"*" message:"Text content does not match server-rendered HTML." -> "hydrationerror{{tags.transaction}}"',
"is_builtin": True,
}
assert event_transaction_text.data.data["fingerprint"] == [
"hydrationerror",
"{{tags.transaction}}",
]
assert event_transaction_text.data.data["_fingerprint_info"]["matched_rule"] == {
"attributes": {},
"fingerprint": ["hydrationerror", "{{tags.transaction}}"],
"matchers": [
["family", "javascript"],
["tags.transaction", "*"],
["message", self.hydration_error_trace["message"]],
],
"text": 'family:"javascript" tags.transaction:"*" message:"Text content does not match server-rendered HTML." -> "hydrationerror{{tags.transaction}}"',
"is_builtin": True,
}
assert event_transaction_slash.group != event_transaction_text.group
def test_built_in_hydration_rules_no_transactions(self) -> None:
"""
For hydration errors with no transactions the built-in HydrationError rules should NOT be
applied.
"""
data_transaction_no_tx = self.hydration_error_trace
del data_transaction_no_tx["tags"]["transaction"]
event_transaction_no_tx = self.store_event(
data=data_transaction_no_tx, project_id=self.project
)
variants = {
variant_name: variant.as_dict()
for variant_name, variant in event_transaction_no_tx.get_grouping_variants(
force_config=GROUPING_CONFIG
).items()
}
assert "built_in_fingerprint" not in variants
assert event_transaction_no_tx.data["fingerprint"] == ["my-route", "{{ default }}"]
def test_hydration_rule_w_family_matcher(self) -> None:
"""
Testing if rules are applied correctly with a family matcher
"""
mgr = EventManager(data=self.hydration_error_trace, grouping_config=GROUPING_CONFIG)
mgr.normalize()
data = mgr.get_data()
data.setdefault("fingerprint", ["{{ default }}"])
fingerprinting_config = FingerprintingConfig.from_config_string(
'family:javascript tags.transaction:"*" message:"Text content does not match server-rendered HTML." -> hydrationerror, {{tags.transaction}}'
)
apply_server_side_fingerprinting(data, fingerprinting_config)
event_type = get_event_type(data)
event_metadata = event_type.get_metadata(data)
data.update(materialize_metadata(data, event_type, event_metadata))
event = eventstore.backend.create_event(project_id=1, data=data)
assert event.data.data["_fingerprint_info"]["matched_rule"] == {
"attributes": {},
"fingerprint": ["hydrationerror", "{{tags.transaction}}"],
"matchers": [
["family", "javascript"],
["tags.transaction", "*"],
["message", self.hydration_error_trace["message"]],
],
"text": 'family:"javascript" tags.transaction:"*" message:"Text content does not match server-rendered HTML." -> "hydrationerror{{tags.transaction}}"',
}
|
BuiltInFingerprintingTest
|
python
|
pypa__warehouse
|
warehouse/accounts/forms.py
|
{
"start": 4164,
"end": 5242
}
|
class ____:
username = wtforms.StringField(
validators=[
wtforms.validators.InputRequired(),
PreventNullBytesValidator(message=INVALID_USERNAME_MESSAGE),
wtforms.validators.Length(
max=50, message=_("Choose a username with 50 characters or less.")
),
# the regexp below must match the CheckConstraint
# for the username field in accounts.models.User
wtforms.validators.Regexp(
r"^[a-zA-Z0-9][a-zA-Z0-9._-]*[a-zA-Z0-9]$",
message=INVALID_USERNAME_MESSAGE,
),
]
)
def validate_username(self, field):
if (
self.user_service.username_is_prohibited(field.data)
or self.user_service.find_userid(field.data) is not None
):
raise wtforms.validators.ValidationError(
_(
"This username is already being used by another "
"account. Choose a different username."
)
)
|
NewUsernameMixin
|
python
|
realpython__materials
|
python-protocol/contents.py
|
{
"start": 648,
"end": 1200
}
|
class ____:
def __init__(self):
self.videos = []
def create_content(self) -> str:
return "Recording a video."
def add_video(self, title: str, path: str) -> None:
self.videos.append(f"{title}: {path}")
print(f"Video added: {title}")
def produce_content(creator: ContentCreator):
print(creator.create_content())
def add_post(blogger: Blogger, title: str, content: str):
blogger.add_post(title, content)
def add_video(vlogger: Vlogger, title: str, path: str):
vlogger.add_video(title, path)
|
Vlog
|
python
|
ray-project__ray
|
python/ray/data/_internal/logical/interfaces/plan.py
|
{
"start": 131,
"end": 598
}
|
class ____:
"""Abstract class for logical/physical execution plans.
This plan should hold an operator representing the plan DAG and any auxiliary data
that's useful for plan optimization or execution.
"""
def __init__(self, context: "DataContext"):
self._context = context
@property
def dag(self) -> Operator:
raise NotImplementedError
@property
def context(self) -> "DataContext":
return self._context
|
Plan
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/destinations.py
|
{
"start": 88662,
"end": 89569
}
|
class ____(GeneratedAirbyteDestination):
@public
def __init__(self, name: str, project_id: str, credentials_json: Optional[str] = None):
"""Airbyte Destination for Firestore.
Documentation can be found at https://docs.airbyte.com/integrations/destinations/firestore
Args:
name (str): The name of the destination.
project_id (str): The GCP project ID for the project containing the target BigQuery dataset.
credentials_json (Optional[str]): The contents of the JSON service account key. Check out the docs if you need help generating this key. Default credentials will be used if this field is left empty.
"""
self.project_id = check.str_param(project_id, "project_id")
self.credentials_json = check.opt_str_param(credentials_json, "credentials_json")
super().__init__("Firestore", name)
|
FirestoreDestination
|
python
|
html5lib__html5lib-python
|
setup.py
|
{
"start": 668,
"end": 4831
}
|
class ____(dict):
def __setitem__(self, key, value):
pass
def pop(self, i=-1):
return self[i]
if _markerlib and sys.version_info[0] == 3:
env = _markerlib.markers._VARS
for key in list(env.keys()):
new_key = key.replace('.', '_')
if new_key != key:
env[new_key] = env[key]
_markerlib.markers._VARS = Python3MarkerDict(env)
def default_environment():
return _markerlib.markers._VARS
_markerlib.default_environment = default_environment
# Avoid the very buggy pkg_resources.parser, which doesn't consistently
# recognise the markers needed by this setup.py
# Change this to setuptools 20.10.0 to support all markers.
if pkg_resources:
if parse_version(setuptools_version) < parse_version('18.5'):
MarkerEvaluation = pkg_resources.MarkerEvaluation
del pkg_resources.parser
pkg_resources.evaluate_marker = MarkerEvaluation._markerlib_evaluate
MarkerEvaluation.evaluate_marker = MarkerEvaluation._markerlib_evaluate
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: 3.11',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing :: Markup :: HTML'
]
here = dirname(__file__)
with codecs.open(join(here, 'README.rst'), 'r', 'utf8') as readme_file:
with codecs.open(join(here, 'CHANGES.rst'), 'r', 'utf8') as changes_file:
long_description = readme_file.read() + '\n' + changes_file.read()
version = None
with open(join(here, "html5lib", "__init__.py"), "rb") as init_file:
t = ast.parse(init_file.read(), filename="__init__.py", mode="exec")
assert isinstance(t, ast.Module)
assignments = filter(lambda x: isinstance(x, ast.Assign), t.body)
for a in assignments:
if (len(a.targets) == 1 and
isinstance(a.targets[0], ast.Name) and
a.targets[0].id == "__version__" and
isinstance(a.value, ast.Str)):
version = a.value.s
setup(name='html5lib',
version=version,
url='https://github.com/html5lib/html5lib-python',
license="MIT License",
description='HTML parser based on the WHATWG HTML specification',
long_description=long_description,
classifiers=classifiers,
maintainer='James Graham',
maintainer_email='james@hoppipolla.co.uk',
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
install_requires=[
'six>=1.9',
'webencodings>=0.5.1',
],
python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*",
extras_require={
# A conditional extra will only install these items when the extra is
# requested and the condition matches.
"lxml:platform_python_implementation == 'CPython'": ["lxml>=3.4.0"],
# Standard extras, will be installed when the extra is requested.
"genshi": ["genshi>=0.7.1"],
"chardet": ["chardet>=2.2.1"],
# The all extra combines a standard extra which will be used anytime
# the all extra is requested, and it extends it with a conditional
# extra that will be installed whenever the condition matches and the
# all extra is requested.
"all": ["genshi>=0.7.1", "chardet>=2.2.1"],
"all:platform_python_implementation == 'CPython'": ["lxml>=3.4.0"],
},
)
|
Python3MarkerDict
|
python
|
jazzband__django-pipeline
|
tests/tests/test_utils.py
|
{
"start": 92,
"end": 549
}
|
class ____(TestCase):
def test_guess_type(self):
self.assertEqual("text/css", guess_type("stylesheet.css"))
self.assertEqual("text/coffeescript", guess_type("application.coffee"))
self.assertEqual("text/less", guess_type("stylesheet.less"))
def test_mimetypes_are_str(self):
for ext, mtype in mimetypes.types_map.items():
self.assertIsInstance(ext, str)
self.assertIsInstance(mtype, str)
|
UtilTest
|
python
|
scikit-learn__scikit-learn
|
sklearn/base.py
|
{
"start": 24177,
"end": 25834
}
|
class ____:
"""Mixin class for all cluster estimators in scikit-learn.
- set estimator type to `"clusterer"` through the `estimator_type` tag;
- `fit_predict` method returning the cluster labels associated to each sample.
Examples
--------
>>> import numpy as np
>>> from sklearn.base import BaseEstimator, ClusterMixin
>>> class MyClusterer(ClusterMixin, BaseEstimator):
... def fit(self, X, y=None):
... self.labels_ = np.ones(shape=(len(X),), dtype=np.int64)
... return self
>>> X = [[1, 2], [2, 3], [3, 4]]
>>> MyClusterer().fit_predict(X)
array([1, 1, 1])
"""
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.estimator_type = "clusterer"
if tags.transformer_tags is not None:
tags.transformer_tags.preserves_dtype = []
return tags
def fit_predict(self, X, y=None, **kwargs):
"""
Perform clustering on `X` and returns cluster labels.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
y : Ignored
Not used, present for API consistency by convention.
**kwargs : dict
Arguments to be passed to ``fit``.
.. versionadded:: 1.4
Returns
-------
labels : ndarray of shape (n_samples,), dtype=np.int64
Cluster labels.
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
self.fit(X, **kwargs)
return self.labels_
|
ClusterMixin
|
python
|
kamyu104__LeetCode-Solutions
|
Python/insertion-sort-list.py
|
{
"start": 276,
"end": 1129
}
|
class ____(object):
# @param head, a ListNode
# @return a ListNode
def insertionSortList(self, head):
if head is None or self.isSorted(head):
return head
dummy = ListNode(-2147483648)
dummy.next = head
cur, sorted_tail = head.next, head
while cur:
prev = dummy
while prev.next.val < cur.val:
prev = prev.next
if prev == sorted_tail:
cur, sorted_tail = cur.next, cur
else:
cur.next, prev.next, sorted_tail.next = prev.next, cur, cur.next
cur = sorted_tail.next
return dummy.next
def isSorted(self, head):
while head and head.next:
if head.val > head.next.val:
return False
head = head.next
return True
|
Solution
|
python
|
python-excel__xlwt
|
xlwt/BIFFRecords.py
|
{
"start": 15350,
"end": 16095
}
|
class ____(BiffRecord):
"""
This record specifies the base date for displaying date values. All
dates are stored as count of days past this base date. In BIFF2-BIFF4
this record is part of the Calculation Settings Block.
In BIFF5-BIFF8 it is stored in the Workbook Globals Substream.
Record DATEMODE, BIFF2-BIFF8:
Offset Size Contents
0 2 0 = Base is 1899-Dec-31 (the cell = 1 represents 1900-Jan-01)
1 = Base is 1904-Jan-01 (the cell = 1 represents 1904-Jan-02)
"""
_REC_ID = 0x0022
def __init__(self, from1904):
if from1904:
self._rec_data = pack('<H', 1)
else:
self._rec_data = pack('<H', 0)
|
DateModeRecord
|
python
|
lazyprogrammer__machine_learning_examples
|
ann_class2/dropout_theano.py
|
{
"start": 1517,
"end": 5111
}
|
class ____(object):
def __init__(self, hidden_layer_sizes, p_keep):
self.hidden_layer_sizes = hidden_layer_sizes
self.dropout_rates = p_keep
def fit(self, X, Y, Xvalid, Yvalid, learning_rate=1e-2, mu=0.9, decay=0.9, epochs=10, batch_sz=100, show_fig=False):
X = X.astype(np.float32)
Y = Y.astype(np.int32)
Xvalid = Xvalid.astype(np.float32)
Yvalid = Yvalid.astype(np.int32)
self.rng = RandomStreams()
# initialize hidden layers
N, D = X.shape
K = len(set(Y))
self.hidden_layers = []
M1 = D
count = 0
for M2 in self.hidden_layer_sizes:
h = HiddenLayer(M1, M2, count)
self.hidden_layers.append(h)
M1 = M2
count += 1
W = np.random.randn(M1, K) * np.sqrt(2.0 / M1)
b = np.zeros(K)
self.W = theano.shared(W, 'W_logreg')
self.b = theano.shared(b, 'b_logreg')
# collect params for later use
self.params = [self.W, self.b]
for h in self.hidden_layers:
self.params += h.params
# set up theano functions and variables
thX = T.matrix('X')
thY = T.ivector('Y')
pY_train = self.forward_train(thX)
# this cost is for training
cost = -T.mean(T.log(pY_train[T.arange(thY.shape[0]), thY]))
updates = momentum_updates(cost, self.params, learning_rate, mu)
train_op = theano.function(
inputs=[thX, thY],
updates=updates
)
# for evaluation and prediction
pY_predict = self.forward_predict(thX)
cost_predict = -T.mean(T.log(pY_predict[T.arange(thY.shape[0]), thY]))
prediction = self.predict(thX)
cost_predict_op = theano.function(inputs=[thX, thY], outputs=[cost_predict, prediction])
n_batches = N // batch_sz
costs = []
for i in range(epochs):
X, Y = shuffle(X, Y)
for j in range(n_batches):
Xbatch = X[j*batch_sz:(j*batch_sz+batch_sz)]
Ybatch = Y[j*batch_sz:(j*batch_sz+batch_sz)]
train_op(Xbatch, Ybatch)
if j % 50 == 0:
c, p = cost_predict_op(Xvalid, Yvalid)
costs.append(c)
e = error_rate(Yvalid, p)
print("i:", i, "j:", j, "nb:", n_batches, "cost:", c, "error rate:", e)
if show_fig:
plt.plot(costs)
plt.show()
def forward_train(self, X):
Z = X
for h, p in zip(self.hidden_layers, self.dropout_rates[:-1]):
mask = self.rng.binomial(n=1, p=p, size=Z.shape)
Z = mask * Z
Z = h.forward(Z)
mask = self.rng.binomial(n=1, p=self.dropout_rates[-1], size=Z.shape)
Z = mask * Z
return T.nnet.softmax(Z.dot(self.W) + self.b)
def forward_predict(self, X):
Z = X
for h, p in zip(self.hidden_layers, self.dropout_rates[:-1]):
Z = h.forward(p * Z)
return T.nnet.softmax((self.dropout_rates[-1] * Z).dot(self.W) + self.b)
def predict(self, X):
pY = self.forward_predict(X)
return T.argmax(pY, axis=1)
def error_rate(p, t):
return np.mean(p != t)
def relu(a):
return a * (a > 0)
def main():
# step 1: get the data and define all the usual variables
Xtrain, Xtest, Ytrain, Ytest = get_normalized_data()
ann = ANN([500, 300], [0.8, 0.5, 0.5])
ann.fit(Xtrain, Ytrain, Xtest, Ytest, show_fig=True)
if __name__ == '__main__':
main()
|
ANN
|
python
|
plotly__plotly.py
|
tests/test_core/test_update_objects/test_update_annotations.py
|
{
"start": 134,
"end": 18089
}
|
class ____(TestCase):
def setUp(self):
self.fig = make_subplots(
rows=2, cols=2, specs=[[{}, {"secondary_y": True}], [{}, {"type": "polar"}]]
)
def assert_selected(
self, prop, inds, selector=None, row=None, col=None, secondary_y=None
):
# ## Test select_*
# Get select_ method
prefix = "layout_" if prop == "images" else ""
fn = getattr(self.fig, "select_" + prefix + prop)
# Perform selection
res = fn(selector=selector, row=row, col=col, secondary_y=secondary_y)
self.assertIsInstance(res, types.GeneratorType)
objs = list(res)
# Check length of selected objects
self.assertEqual(len(objs), len(inds))
# Check individual annotations
for i, obj in zip(inds, objs):
self.assertEqual(self.fig.layout[prop][i], obj)
# ## Test for_each_*
objs = []
fn = getattr(self.fig, "for_each_" + prefix + prop[:-1])
fn(
lambda v: objs.append(v),
selector=selector,
row=row,
col=col,
secondary_y=secondary_y,
)
self.assertEqual(len(objs), len(inds))
for i, obj in zip(inds, objs):
self.assertEqual(self.fig.layout[prop][i], obj)
def assert_update(
self, prop, inds, patch, selector=None, row=None, col=None, secondary_y=None
):
# Copy figure and perform update
prefix = "layout_" if prop == "images" else ""
fig_orig = go.Figure(self.fig)
fig = go.Figure(self.fig)
fn = getattr(fig, "update_" + prefix + prop)
fn(patch, selector=selector, row=row, col=col, secondary_y=secondary_y)
# Get original up updated object lis
objs_orig = fig_orig.layout[prop]
objs = fig.layout[prop]
for i, (obj, obj_orig) in enumerate(zip(objs, objs_orig)):
if i in inds:
# Check that object changed from original
self.assertNotEqual(obj, obj_orig)
# Apply update to original and check that they match now
obj_orig.update(patch)
self.assertEqual(obj, obj_orig)
else:
# Check object unchanged
self.assertEqual(obj, obj_orig)
def test_add_annotation_no_grid(self):
# Paper annotation
fig = go.Figure()
fig.add_annotation(text="A", yref="paper")
annot = fig.layout.annotations[-1]
self.assertEqual(annot.text, "A")
self.assertEqual(annot.xref, None)
self.assertEqual(annot.yref, "paper")
# Not valid to add annotation by row/col
with self.assertRaisesRegex(Exception, "make_subplots"):
fig.add_annotation(text="B", row=1, col=1)
def test_add_annotations(self):
# Paper annotation
self.fig.add_annotation(text="A", yref="paper")
annot = self.fig.layout.annotations[-1]
self.assertEqual(annot.text, "A")
self.assertEqual(annot.xref, None)
self.assertEqual(annot.yref, "paper")
# (1, 1) annotation
self.fig.add_annotation(text="B", row=1, col=1)
annot = self.fig.layout.annotations[-1]
self.assertEqual(annot.text, "B")
self.assertEqual(annot.xref, "x")
self.assertEqual(annot.yref, "y")
# (1, 2) annotation, primary y-axis
self.fig.add_annotation(text="C1", row=1, col=2)
annot = self.fig.layout.annotations[-1]
self.assertEqual(annot.text, "C1")
self.assertEqual(annot.xref, "x2")
self.assertEqual(annot.yref, "y2")
# (1, 2) annotation, secondary y-axis
self.fig.add_annotation(text="C2", row=1, col=2, secondary_y=True)
annot = self.fig.layout.annotations[-1]
self.assertEqual(annot.text, "C2")
self.assertEqual(annot.xref, "x2")
self.assertEqual(annot.yref, "y3")
# (2, 1) annotation
self.fig.add_annotation(text="D", row=2, col=1)
annot = self.fig.layout.annotations[-1]
self.assertEqual(annot.text, "D")
self.assertEqual(annot.xref, "x3")
self.assertEqual(annot.yref, "y4")
# Try to add to (2, 2), which not a valid
with self.assertRaisesRegex(ValueError, "of type polar"):
self.fig.add_annotation(text="D", row=2, col=2)
def test_select_annotations_no_grid(self):
(
self.fig.add_annotation(text="A1", arrowcolor="red")
.add_annotation(text="A2", arrowcolor="blue")
.add_annotation(text="A3", arrowcolor="blue")
)
self.assert_selected("annotations", [0, 1, 2])
self.assert_selected("annotations", [0], selector=dict(arrowcolor="red"))
self.assert_selected("annotations", [1, 2], selector=dict(arrowcolor="blue"))
def test_select_annotations(self):
(
self.fig.add_annotation(
text="A1", arrowcolor="red", xref="paper", yref="paper"
)
.add_annotation(text="A2", arrowcolor="blue", xref="paper", yref="paper")
.add_annotation(text="B", arrowcolor="red", row=1, col=1)
.add_annotation(text="C1", row=1, col=2)
.add_annotation(text="C2", row=1, col=2, secondary_y=True)
.add_annotation(text="D", arrowcolor="blue", row=2, col=1)
)
# Test selections
self.assert_selected("annotations", [0, 1, 2, 3, 4, 5])
self.assert_selected("annotations", [0, 2], selector=dict(arrowcolor="red"))
self.assert_selected("annotations", [2, 3, 4], row=1)
self.assert_selected("annotations", [2], selector=dict(arrowcolor="red"), row=1)
self.assert_selected("annotations", [0, 1], dict(yref="paper", xref="paper"))
self.assert_selected("annotations", [4], secondary_y=True)
def test_select_shapes(self):
(
self.fig.add_shape(opacity=0.1, fillcolor="red", xref="paper", yref="paper")
.add_shape(opacity=0.2, fillcolor="blue", xref="paper", yref="paper")
.add_shape(opacity=0.3, fillcolor="red", row=1, col=1)
.add_shape(opacity=0.4, row=1, col=2)
.add_shape(opacity=0.5, row=1, col=2, secondary_y=True)
.add_shape(opacity=0.6, fillcolor="blue", row=2, col=1)
)
# Test selections
self.assert_selected("shapes", [0, 1, 2, 3, 4, 5])
self.assert_selected("shapes", [0, 2], selector=dict(fillcolor="red"))
self.assert_selected("shapes", [2, 3, 4], row=1)
self.assert_selected("shapes", [2], selector=dict(fillcolor="red"), row=1)
self.assert_selected("shapes", [0, 1], dict(yref="paper", xref="paper"))
self.assert_selected("shapes", [4], secondary_y=True)
def test_select_images(self):
(
self.fig.add_layout_image(
opacity=0.1, source="red", xref="paper", yref="paper"
)
.add_layout_image(opacity=0.2, source="blue", xref="paper", yref="paper")
.add_layout_image(opacity=0.3, source="red", row=1, col=1)
.add_layout_image(opacity=0.4, row=1, col=2)
.add_layout_image(opacity=0.5, row=1, col=2, secondary_y=True)
.add_layout_image(opacity=0.6, source="blue", row=2, col=1)
)
# Test selections
self.assert_selected("images", [0, 1, 2, 3, 4, 5])
self.assert_selected("images", [0, 2], selector=dict(source="red"))
self.assert_selected("images", [2, 3, 4], row=1)
self.assert_selected("images", [2], selector=dict(source="red"), row=1)
self.assert_selected("images", [0, 1], dict(yref="paper", xref="paper"))
self.assert_selected("images", [4], secondary_y=True)
def test_update_annotations(self):
(
self.fig.add_annotation(text="A1", arrowcolor="red")
.add_annotation(text="A2", arrowcolor="blue")
.add_annotation(text="B", arrowcolor="red", row=1, col=1)
.add_annotation(text="C1", row=1, col=2)
.add_annotation(text="C2", row=1, col=2, secondary_y=True)
.add_annotation(text="D", arrowcolor="blue", row=2, col=1)
)
self.assert_update(
"annotations", [0, 1, 2, 3, 4, 5], patch=dict(showarrow=False)
)
self.assert_update(
"annotations",
[1, 5],
patch=dict(showarrow=False),
selector=dict(arrowcolor="blue"),
)
self.assert_update("annotations", [2, 3, 4], patch=dict(showarrow=False), row=1)
self.assert_update("annotations", [2, 5], patch=dict(showarrow=False), col=1)
self.assert_update(
"annotations", [4], patch=dict(showarrow=False), secondary_y=True
)
def test_annotation_attributes(self):
self.fig.add_annotation(text="this text", yref="paper")
self.fig.update_annotations(text="hi")
def test_update_shapes(self):
(
self.fig.add_shape(opacity=0.1, fillcolor="red")
.add_shape(opacity=0.2, fillcolor="blue")
.add_shape(opacity=0.3, fillcolor="red", row=1, col=1)
.add_shape(opacity=0.4, row=1, col=2)
.add_shape(opacity=0.5, row=1, col=2, secondary_y=True)
.add_shape(opacity=0.6, fillcolor="blue", row=2, col=1)
)
self.assert_update("shapes", [0, 1, 2, 3, 4, 5], patch=dict(opacity=0))
self.assert_update(
"shapes", [1, 5], patch=dict(opacity=0), selector=dict(fillcolor="blue")
)
self.assert_update("shapes", [2, 3, 4], patch=dict(opacity=0), row=1)
self.assert_update("shapes", [2, 5], patch=dict(opacity=0), col=1)
self.assert_update("shapes", [4], patch=dict(opacity=0), secondary_y=True)
def test_shape_attributes(self):
self.fig.add_shape(fillcolor="blue", opacity=0.3)
self.fig.update_shapes(fillcolor="red")
def test_update_images(self):
(
self.fig.add_layout_image(opacity=0.1, source="red")
.add_layout_image(opacity=0.2, source="blue")
.add_layout_image(opacity=0.3, source="red", row=1, col=1)
.add_layout_image(opacity=0.4, row=1, col=2)
.add_layout_image(opacity=0.5, row=1, col=2, secondary_y=True)
.add_layout_image(opacity=0.6, source="blue", row=2, col=1)
)
self.assert_update("images", [0, 1, 2, 3, 4, 5], patch=dict(opacity=0))
self.assert_update(
"images", [1, 5], patch=dict(opacity=0), selector=dict(source="blue")
)
self.assert_update("images", [2, 3, 4], patch=dict(opacity=0), row=1)
self.assert_update("images", [2, 5], patch=dict(opacity=0), col=1)
self.assert_update("images", [4], patch=dict(opacity=0), secondary_y=True)
def test_image_attributes(self):
self.fig.add_layout_image(name="my name", x=1, y=2)
self.fig.update_layout_images(opacity=0.1)
def test_exclude_empty_subplots():
for k, fun, d, fun2, d2 in [
(
"shapes",
go.Figure.add_shape,
dict(type="rect", x0=1.5, x1=2.5, y0=3.5, y1=4.5),
# add a different type to make the check easier (otherwise we might
# mix up the objects added before and after fun was run)
go.Figure.add_annotation,
dict(x=1, y=2, text="A"),
),
(
"annotations",
go.Figure.add_annotation,
dict(x=1, y=2, text="A"),
go.Figure.add_layout_image,
dict(x=3, y=4, sizex=2, sizey=3, source="test"),
),
(
"images",
go.Figure.add_layout_image,
dict(x=3, y=4, sizex=2, sizey=3, source="test"),
go.Figure.add_shape,
dict(type="rect", x0=1.5, x1=2.5, y0=3.5, y1=4.5),
),
]:
# make a figure where not all the subplots are populated
fig = make_subplots(2, 2)
fig.add_trace(go.Scatter(x=[1, 2, 3], y=[5, 1, 2]), row=1, col=1)
fig.add_trace(go.Scatter(x=[1, 2, 3], y=[2, 1, -7]), row=2, col=2)
fun2(fig, d2, row=1, col=2)
# add a thing to all subplots but make sure it only goes on the
# plots without data or layout objects
fun(fig, d, row="all", col="all", exclude_empty_subplots="anything_truthy")
assert len(fig.layout[k]) == 3
assert fig.layout[k][0]["xref"] == "x" and fig.layout[k][0]["yref"] == "y"
assert fig.layout[k][1]["xref"] == "x2" and fig.layout[k][1]["yref"] == "y2"
assert fig.layout[k][2]["xref"] == "x4" and fig.layout[k][2]["yref"] == "y4"
def test_no_exclude_empty_subplots():
for k, fun, d, fun2, d2 in [
(
"shapes",
go.Figure.add_shape,
dict(type="rect", x0=1.5, x1=2.5, y0=3.5, y1=4.5),
# add a different type to make the check easier (otherwise we might
# mix up the objects added before and after fun was run)
go.Figure.add_annotation,
dict(x=1, y=2, text="A"),
),
(
"annotations",
go.Figure.add_annotation,
dict(x=1, y=2, text="A"),
go.Figure.add_layout_image,
dict(x=3, y=4, sizex=2, sizey=3, source="test"),
),
(
"images",
go.Figure.add_layout_image,
dict(x=3, y=4, sizex=2, sizey=3, source="test"),
go.Figure.add_shape,
dict(type="rect", x0=1.5, x1=2.5, y0=3.5, y1=4.5),
),
]:
# make a figure where not all the subplots are populated
fig = make_subplots(2, 2)
fig.add_trace(go.Scatter(x=[1, 2, 3], y=[5, 1, 2]), row=1, col=1)
fig.add_trace(go.Scatter(x=[1, 2, 3], y=[2, 1, -7]), row=2, col=2)
fun2(fig, d2, row=1, col=2)
# add a thing to all subplots but make sure it only goes on the
# plots without data or layout objects
fun(fig, d, row="all", col="all", exclude_empty_subplots=False)
assert len(fig.layout[k]) == 4
assert fig.layout[k][0]["xref"] == "x" and fig.layout[k][0]["yref"] == "y"
assert fig.layout[k][1]["xref"] == "x2" and fig.layout[k][1]["yref"] == "y2"
assert fig.layout[k][2]["xref"] == "x3" and fig.layout[k][2]["yref"] == "y3"
assert fig.layout[k][3]["xref"] == "x4" and fig.layout[k][3]["yref"] == "y4"
def test_supplied_yref_on_single_plot_subplot():
"""test a (1,1) subplot figure object"""
fig = make_subplots(1, 1)
fig.add_trace(go.Scatter(x=[1, 2, 3, 4], y=[1, 2, 2, 1]))
fig.add_trace(go.Scatter(x=[1, 2, 3, 4], y=[4, 3, 2, 1], yaxis="y2"))
fig.update_layout(
yaxis=dict(title="yaxis1 title"),
yaxis2=dict(title="yaxis2 title", overlaying="y", side="right"),
)
# add horizontal line on y2. Secondary_y can be True or False when yref is supplied
fig.add_hline(y=3, yref="y2", secondary_y=True)
assert fig.layout["shapes"][0]["yref"] == "y2"
def test_supplied_yref_on_non_subplot_figure_object():
"""test a non-subplot figure object from go.Figure"""
trace1 = go.Scatter(x=[1, 2, 3, 4], y=[1, 2, 2, 1])
trace2 = go.Scatter(x=[1, 2, 3, 4], y=[4, 3, 2, 1], yaxis="y2")
data = [trace1, trace2]
layout = go.Layout(
yaxis=dict(title="yaxis1 title"),
yaxis2=dict(title="yaxis2 title", overlaying="y", side="right"),
)
fig = go.Figure(data=data, layout=layout)
# add horizontal line on y2. Secondary_y can be True or False when yref is supplied
fig.add_hline(y=3, yref="y2", secondary_y=False)
assert fig.layout["shapes"][0]["yref"] == "y2"
def test_supplied_yref_on_multi_plot_subplot():
"""test multiple subploted figure object with subplots.make_subplots"""
fig = make_subplots(
rows=1,
cols=2,
shared_yaxes=False,
specs=[[{"secondary_y": True}, {"secondary_y": True}]],
)
# Add traces to the first subplot
fig.add_trace(go.Scatter(x=[1, 2, 3], y=[1, 2, 3]), row=1, col=1)
fig.add_trace(
go.Scatter(x=[1, 2, 3], y=[3, 2, 1], yaxis="y2"), row=1, col=1, secondary_y=True
)
# Add traces to the second subplot
fig.add_trace(go.Scatter(x=[1, 2, 3], y=[1, 2, 3], yaxis="y"), row=1, col=2)
fig.add_trace(
go.Scatter(x=[1, 2, 3], y=[1, 1, 2], yaxis="y2"), row=1, col=2, secondary_y=True
)
# add a horizontal line on both subplots on their respective secondary y.
# When using the subplots.make_subplots() method yref parameter should NOT be supplied per docstring instructions.
# Instead secondary_y specs and secondary_y parameter MUST be True to plot on secondary y
fig.add_hline(y=2, row=1, col=1, secondary_y=True)
fig.add_hline(y=1, row=1, col=2, secondary_y=True)
assert fig.layout["shapes"][0]["yref"] == "y2"
assert fig.layout["shapes"][0]["xref"] == "x domain"
assert fig.layout["shapes"][1]["yref"] == "y4"
assert fig.layout["shapes"][1]["xref"] == "x2 domain"
@pytest.fixture
def select_annotations_integer():
fig = make_subplots(2, 3)
fig.add_annotation(row=1, col=2, text="B")
fig.add_annotation(row=2, col=2, text="A")
fig.add_annotation(row=2, col=2, text="B")
fig.add_annotation(row=2, col=2, text="AB")
fig.add_annotation(text="hello")
return fig
def test_select_annotations_integer(select_annotations_integer):
fig = select_annotations_integer
anns = list(fig.select_annotations(selector=-1))
assert (len(anns) == 1) and (anns[0]["text"] == "hello")
anns = list(fig.select_annotations(row=2, col=2, selector=-1))
assert (len(anns) == 1) and anns[0]["text"] == "AB"
anns = list(fig.select_annotations(row=1, col=2, selector=-1))
assert (len(anns) == 1) and anns[0]["text"] == "B"
with pytest.raises(IndexError):
fig.select_annotations(row=2, col=2, selector=3)
|
TestSelectForEachUpdateAnnotations
|
python
|
run-llama__llama_index
|
llama-index-integrations/readers/llama-index-readers-apify/llama_index/readers/apify/dataset/base.py
|
{
"start": 170,
"end": 1596
}
|
class ____(BaseReader):
"""
Apify Dataset reader.
Reads a dataset on the Apify platform.
Args:
apify_api_token (str): Apify API token.
"""
def __init__(self, apify_api_token: str) -> None:
"""Initialize Apify dataset reader."""
from apify_client import ApifyClient
client = ApifyClient(apify_api_token)
if hasattr(client.http_client, "httpx_client"):
client.http_client.httpx_client.headers["user-agent"] += (
"; Origin/llama_index"
)
self.apify_client = client
def load_data(
self, dataset_id: str, dataset_mapping_function: Callable[[Dict], Document]
) -> List[Document]:
"""
Load data from the Apify dataset.
Args:
dataset_id (str): Dataset ID.
dataset_mapping_function (Callable[[Dict], Document]): Function to map dataset items to Document.
Returns:
List[Document]: List of documents.
"""
items_list = self.apify_client.dataset(dataset_id).list_items(clean=True)
document_list = []
for item in items_list.items:
document = dataset_mapping_function(item)
if not isinstance(document, Document):
raise ValueError("Dataset_mapping_function must return a Document")
document_list.append(document)
return document_list
|
ApifyDataset
|
python
|
pikepdf__pikepdf
|
src/pikepdf/form.py
|
{
"start": 700,
"end": 5828
}
|
class ____:
"""Utility class to make it easier to work with interactive forms.
This is easier to use than the core {class}`pikepdf.AcroForm` implementation, but is
higher-level, and abstracts over details in ways which do impose some limitations,
such as failing for PDFs which have multiple fields with the same name.
A non-exhaustive list of limitations:
* No support for signatures
* No support for password fields
* No support for rich text fields
* Multiselect choice fields are treated as single-select
* Generating appearance streams imposes additional limitations (see
{class}`pikepdf.form.DefaultAppearanceStreamGenerator` and
{class}`pikepdf.form.ExtendedAppearanceStreamGenerator` for details.)
"""
generate_appearances: AppearanceStreamGenerator | None = None
"""If provided, this object will be used to generate appearance streams for fields
as the form is filled. If not, the `needs_appearances` flag will be set on the form.
"""
ignore_max_length: bool
"""If True, we will ignore the MaxLen property of any text fields in this form. This
produces a PDF that would typically not be possible to create in an interactive PDF
reader, but this may be desirable or useful if the PDF is intended to be read by
another automated system rather than a human.
"""
_pdf: Pdf
_acroform: AcroForm
_cache: Mapping[str, _FieldWrapper]
def __init__(
self,
pdf: Pdf,
generate_appearances: type[AppearanceStreamGenerator] | None = None,
*,
ignore_max_length=False,
):
"""Initialize the form."""
self._pdf = pdf
self._acroform = pdf.acroform
self._cache = {}
if generate_appearances is not None:
self.generate_appearances = generate_appearances(self._pdf, self._acroform)
self.ignore_max_length = ignore_max_length
def __getattr__(self, name):
return getattr(self._acroform, name)
def __getitem__(self, name: str):
if name in self._cache:
return self._cache[name]
fields = self._acroform.get_fields_with_qualified_name(name)
if not fields:
raise KeyError(name)
if len(fields) > 1:
raise RuntimeError(f'Multiple fields with same name: {name}')
return self._wrap(fields[0], name)
def __contains__(self, name: str):
try:
self.__getitem__(name)
return True
except KeyError:
return False
def items(self) -> Generator[tuple[str, _FieldWrapper]]:
"""Yield (name, field) pairs for all fields in this form."""
seen = set()
for field in self._acroform.fields:
name = field.fully_qualified_name
if name in self._cache and name not in seen:
seen.add(name)
yield name, self._cache[name]
elif (
name in self._cache
and field.is_radio_button
and field.parent == self._cache[name]._field
):
# We already returned the parent of this radio button
continue
elif name in self._cache:
raise RuntimeError(f'Multiple fields with same name: {name}')
elif field.is_radio_button:
# QPDF does something here which is perhaps not entirely correct by the
# spec, and which causes issues. By the spec, a radio button group is a
# single field with multiple widget annotations in the Kids array. (See
# 12.7.5.2.4 of the 2.0 spec) However, QPDF here treats is as a group
# containing separate terminal fields for each button, each inheriting
# the same name. Fortunately, the implementation of
# `get_fields_with_qualified_name` seems to be correct, so we'll fall
# back to using that.
fields = self._acroform.get_fields_with_qualified_name(name)
if len(fields) > 1:
raise RuntimeError(f'Multiple fields with same name: {name}')
seen.add(name)
yield name, self._wrap(fields[0], name)
else:
seen.add(name)
yield name, self._wrap(field, name)
def __iter__(self):
for name, item in self.items():
yield item
def _wrap(self, field: AcroFormField, name: str):
if field.is_text:
wrapped = TextField(self, field)
elif field.is_checkbox:
wrapped = CheckboxField(self, field)
elif field.is_radio_button:
wrapped = RadioButtonGroup(self, field)
elif field.is_pushbutton:
wrapped = PushbuttonField(self, field)
elif field.is_choice:
wrapped = ChoiceField(self, field)
elif field.field_type == Name.Sig:
wrapped = SignatureField(self, field)
else:
raise RuntimeError('Unknown field type')
self._cache[name] = wrapped
return wrapped
|
Form
|
python
|
dagster-io__dagster
|
python_modules/dagster-test/dagster_test/components/test_utils/basic_components.py
|
{
"start": 1249,
"end": 1376
}
|
class ____(BaseModel):
nested: dict[str, MyNestedModel]
model_config = ConfigDict(extra="forbid")
|
MyNestedComponentModel
|
python
|
keras-team__keras
|
keras/src/saving/serialization_lib_test.py
|
{
"start": 1198,
"end": 1294
}
|
class ____(keras.layers.Wrapper):
def call(self, x):
return self.layer(x)
|
WrapperLayer
|
python
|
doocs__leetcode
|
solution/2200-2299/2293.Min Max Game/Solution.py
|
{
"start": 0,
"end": 302
}
|
class ____:
def minMaxGame(self, nums: List[int]) -> int:
n = len(nums)
while n > 1:
n >>= 1
for i in range(n):
a, b = nums[i << 1], nums[i << 1 | 1]
nums[i] = min(a, b) if i % 2 == 0 else max(a, b)
return nums[0]
|
Solution
|
python
|
django__django
|
tests/test_client/views.py
|
{
"start": 12711,
"end": 12868
}
|
class ____(Exception):
def __init__(self, one, two):
pass
def two_arg_exception(request):
raise TwoArgException("one", "two")
|
TwoArgException
|
python
|
RaRe-Technologies__gensim
|
gensim/test/test_corpora.py
|
{
"start": 16093,
"end": 17011
}
|
class ____(CorpusTestCase):
def setUp(self):
self.corpus_class = bleicorpus.BleiCorpus
self.file_extension = '.blei'
def test_save_format_for_dtm(self):
corpus = [[(1, 1.0)], [], [(0, 5.0), (2, 1.0)], []]
test_file = get_tmpfile('gensim_corpus.tst')
self.corpus_class.save_corpus(test_file, corpus)
with open(test_file) as f:
for line in f:
# unique_word_count index1:count1 index2:count2 ... indexn:count
tokens = line.split()
words_len = int(tokens[0])
if words_len > 0:
tokens = tokens[1:]
else:
tokens = []
self.assertEqual(words_len, len(tokens))
for token in tokens:
word, count = token.split(':')
self.assertEqual(count, str(int(count)))
|
TestBleiCorpus
|
python
|
numba__numba
|
numba/tests/test_serialize.py
|
{
"start": 7462,
"end": 8053
}
|
class ____(TestCase):
def test_numba_unpickle(self):
# Test that _numba_unpickle is memorizing its output
from numba.core.serialize import _numba_unpickle
random_obj = object()
bytebuf = pickle.dumps(random_obj)
hashed = hash(random_obj)
got1 = _numba_unpickle(id(random_obj), bytebuf, hashed)
# not the original object
self.assertIsNot(got1, random_obj)
got2 = _numba_unpickle(id(random_obj), bytebuf, hashed)
# unpickled results are the same objects
self.assertIs(got1, got2)
|
TestSerializationMisc
|
python
|
eventlet__eventlet
|
eventlet/hubs/__init__.py
|
{
"start": 5979,
"end": 6013
}
|
class ____(IOError):
pass
|
IOClosed
|
python
|
doocs__leetcode
|
solution/0300-0399/0362.Design Hit Counter/Solution.py
|
{
"start": 0,
"end": 406
}
|
class ____:
def __init__(self):
self.ts = []
def hit(self, timestamp: int) -> None:
self.ts.append(timestamp)
def getHits(self, timestamp: int) -> int:
return len(self.ts) - bisect_left(self.ts, timestamp - 300 + 1)
# Your HitCounter object will be instantiated and called as such:
# obj = HitCounter()
# obj.hit(timestamp)
# param_2 = obj.getHits(timestamp)
|
HitCounter
|
python
|
realpython__materials
|
structural-pattern-matching/repl_enhanced.py
|
{
"start": 366,
"end": 1778
}
|
class ____:
indentation_level: int = 0
def __post_init__(self) -> None:
readline.parse_and_bind("tab: complete")
readline.set_completer(rlcompleter.Completer().complete)
if PYTHON_HISTORY.exists():
readline.read_history_file(PYTHON_HISTORY)
atexit.register(readline.write_history_file, PYTHON_HISTORY)
@property
def prompt(self) -> str:
if self.indentation_level > 0:
return INDENTED_PROMPT
else:
return STANDARD_PROMPT
@property
def indentation(self) -> str:
return TAB * self.indentation_level
def indent(self) -> None:
self.indentation_level += 1
def dedent(self) -> None:
if self.indentation_level > 0:
self.indentation_level -= 1
def reindent(self, line: str) -> None:
num_leading_spaces = len(line) - len(line.lstrip())
new_indentation_level = num_leading_spaces // TAB_WIDTH
if new_indentation_level < self.indentation_level:
self.indentation_level = new_indentation_level
def input(self) -> str:
def hook():
readline.insert_text(self.indentation)
readline.redisplay()
try:
readline.set_pre_input_hook(hook)
result = input(self.prompt)
return result
finally:
readline.set_pre_input_hook()
@dataclass
|
Console
|
python
|
walkccc__LeetCode
|
solutions/1877. Minimize Maximum Pair Sum in Array/1877.py
|
{
"start": 0,
"end": 161
}
|
class ____:
def minPairSum(self, nums: list[int]) -> int:
nums.sort()
return max(nums[i] + nums[len(nums) - 1 - i] for i in range(len(nums) // 2))
|
Solution
|
python
|
huggingface__transformers
|
src/transformers/models/emu3/image_processing_emu3.py
|
{
"start": 2734,
"end": 27211
}
|
class ____(BaseImageProcessor):
r"""
Constructs a Emu3 image processor that dynamically resizes images based on the original images.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
Resampling filter to use when resizing the image.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`):
Mean to use if normalizing the image. This is a float or list of floats for each channel in the image.
image_std (`float` or `list[float]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`):
Standard deviation to use if normalizing the image. This is a float or list of floats for each channel in the image.
do_convert_rgb (`bool`, *optional*, defaults to `True`):
Whether to convert the image to RGB.
do_pad (`bool`, *optional*, defaults to `True`):
Whether to pad the image. If `True`, will pad the patch dimension of the images in the batch to the largest
number of patches in the batch. Padding will be applied to the bottom and right with zeros.
min_pixels (`int`, *optional*, defaults to `512 * 512`):
The min pixels of the image to resize the image.
max_pixels (`int`, *optional*, defaults to `1024 * 1024`):
The max pixels of the image to resize the image.
spatial_factor (`int`, *optional*, defaults to 8):
The spatial downsample factor the image will be downsampled in feature extracting phase
"""
model_input_names = ["pixel_values", "image_sizes"]
valid_kwargs = Emu3ImageProcessorKwargs
def __init__(
self,
do_resize: bool = True,
resample: PILImageResampling = PILImageResampling.BICUBIC,
do_rescale: bool = True,
rescale_factor: Union[int, float] = 1 / 255,
do_normalize: bool = True,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
do_convert_rgb: bool = True,
do_pad: bool = True,
min_pixels: int = 512 * 512,
max_pixels: int = 1024 * 1024,
spatial_factor: int = 8,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.do_resize = do_resize
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD
self.min_pixels = min_pixels
self.max_pixels = max_pixels
self.spatial_factor = spatial_factor
self.size = {"min_pixels": min_pixels, "max_pixels": max_pixels}
self.do_convert_rgb = do_convert_rgb
def _preprocess(
self,
images: ImageInput,
do_resize: Optional[bool] = None,
resample: Optional[PILImageResampling] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
do_convert_rgb: Optional[bool] = None,
data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
):
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image or batch of images to preprocess. Expects pixel values ranging from 0 to 255. If pixel values range from 0 to 1, set `do_rescale=False`.
vision_info (`list[Dict]`, *optional*):
Optional list of dictionaries containing additional information about vision inputs.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the `PILImageResampling` enums.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Scale factor to use if rescaling the image.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Mean to use if normalizing the image. Can be a float or a list of floats corresponding to the number of channels in the image.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Standard deviation to use if normalizing the image. Can be a float or a list of floats corresponding to the number of channels in the image.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
Whether to convert the image to RGB.
data_format (`ChannelDimension`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
images = make_flat_list_of_images(images)
if do_convert_rgb:
images = [convert_to_rgb(image) for image in images]
# All transformations expect numpy arrays.
images = [to_numpy_array(image) for image in images]
if is_scaled_image(images[0]) and do_rescale:
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])
height, width = get_image_size(images[0], channel_dim=input_data_format)
resized_height, resized_width = height, width
processed_images = []
for image in images:
if do_resize:
resized_height, resized_width = smart_resize(
height,
width,
factor=self.spatial_factor,
min_pixels=self.min_pixels,
max_pixels=self.max_pixels,
)
image = resize(
image, size=(resized_height, resized_width), resample=resample, input_data_format=input_data_format
)
if do_rescale:
image = self.rescale(image, scale=rescale_factor, input_data_format=input_data_format)
if do_normalize:
image = self.normalize(
image=image, mean=image_mean, std=image_std, input_data_format=input_data_format
)
image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
processed_images.append(image)
images = np.array(processed_images)
return images
def _pad_for_batching(
self,
pixel_values: list[np.ndarray],
image_sizes: list[list[int]],
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
):
"""
Pads images on the `num_of_patches` dimension with zeros to form a batch of same number of patches.
Args:
pixel_values (`list[np.ndarray]`):
An array of pixel values of each images of shape (`batch_size`, `num_patches`, `image_in_3D`)
image_sizes (`list[list[int]]`):
A list of sizes for each image in `pixel_values` in (height, width) format.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
If unset, will use same as the input image.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
If unset, will use the inferred format of the input image.
Returns:
list[`np.ndarray`]: The padded images.
"""
max_shape = (
max(size[0] for size in image_sizes),
max(size[1] for size in image_sizes),
)
pixel_values = [
pad(
image,
padding=((0, max_shape[0] - size[0]), (0, max_shape[1] - size[1])),
data_format=data_format,
input_data_format=input_data_format,
)
for image, size in zip(pixel_values, image_sizes)
]
return pixel_values
def preprocess(
self,
images: ImageInput,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
resample: Optional[PILImageResampling] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
do_convert_rgb: Optional[bool] = None,
do_pad: bool = True,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
):
"""
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
the longest edge resized to keep the input aspect ratio.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
`True`.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
Whether to convert the image to RGB.
do_pad (`bool`, *optional*, defaults to `True`):
Whether to pad the image. If `True`, will pad the patch dimension of the images in the batch to the largest
number of patches in the batch. Padding will be applied to the bottom and right with zeros.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
size = size if size is not None else self.size
resample = resample if resample is not None else self.resample
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
do_pad = do_pad if do_pad is not None else self.do_pad
if images is not None:
images = self.fetch_images(images)
images = make_nested_list_of_images(images)
if images is not None and not valid_images(images):
raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor")
validate_preprocess_arguments(
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_resize=do_resize,
size=size,
resample=resample,
)
pixel_values = []
for image in images:
if image:
image = self._preprocess(
image,
do_resize=do_resize,
resample=resample,
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
data_format=data_format,
do_convert_rgb=do_convert_rgb,
input_data_format=input_data_format,
)
pixel_values.extend(image)
image_sizes = [image.shape[-2:] for image in pixel_values]
if do_pad:
pixel_values = self._pad_for_batching(pixel_values, image_sizes)
pixel_values = np.array(pixel_values)
return BatchFeature(
data={"pixel_values": pixel_values, "image_sizes": image_sizes}, tensor_type=return_tensors
)
def postprocess(
self,
images: ImageInput,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
return_tensors: Union[str, TensorType] = "PIL.Image.Image",
input_data_format: Optional[Union[str, ChannelDimension]] = None,
):
"""
Postprocess an image or batch of images tensor. Postprocess is the reverse process of preprocess.
The parameters should be same as in preprocess.
Args:
images (`ImageInput`):
Image to postprocess. Expects a single or batch of images with pixel values ranging from -1 to 1.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to `True`.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = 1.0 / self.rescale_factor if rescale_factor is None else rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
images = make_flat_list_of_images(images)
if isinstance(images[0], Image.Image):
return images if len(images) > 1 else images[0]
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])
pixel_values = []
for image in images:
image = to_numpy_array(image)
if do_normalize:
image = self.unnormalize(
image=image, image_mean=image_mean, image_std=image_std, input_data_format=input_data_format
)
if do_rescale:
image = self.rescale(image, scale=rescale_factor, input_data_format=input_data_format)
image = image.clip(0, 255).astype(np.uint8)
if do_normalize and do_rescale and return_tensors == "PIL.Image.Image":
image = to_channel_dimension_format(image, ChannelDimension.LAST, input_channel_dim=input_data_format)
pixel_values.append(Image.fromarray(image))
else:
pixel_values.extend(image)
data = {"pixel_values": pixel_values}
return_tensors = return_tensors if return_tensors != "PIL.Image.Image" else None
return BatchFeature(data=data, tensor_type=return_tensors)
def unnormalize(
self,
image: np.ndarray,
image_mean: Union[float, Iterable[float]],
image_std: Union[float, Iterable[float]],
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> np.ndarray:
"""
Unnormalizes `image` using the mean and standard deviation specified by `mean` and `std`.
image = (image * image_std) + image_mean
Args:
image (`torch.Tensor` of shape `(batch_size, num_channels, image_size, image_size)` or `(num_channels, image_size, image_size)`):
Batch of pixel values to postprocess.
image_mean (`float` or `Iterable[float]`):
The mean to use for unnormalization.
image_std (`float` or `Iterable[float]`):
The standard deviation to use for unnormalization.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
num_channels = 3
if isinstance(image_mean, Iterable):
if len(image_mean) != num_channels:
raise ValueError(f"mean must have {num_channels} elements if it is an iterable, got {len(image_mean)}")
else:
image_mean = [image_mean] * num_channels
if isinstance(image_std, Iterable):
if len(image_std) != num_channels:
raise ValueError(f"std must have {num_channels} elements if it is an iterable, got {len(image_std)}")
else:
image_std = [image_std] * num_channels
rev_image_mean = tuple(-mean / std for mean, std in zip(image_mean, image_std))
rev_image_std = tuple(1 / std for std in image_std)
image = self.normalize(
image=image, mean=rev_image_mean, std=rev_image_std, input_data_format=input_data_format
)
return image
__all__ = ["Emu3ImageProcessor"]
|
Emu3ImageProcessor
|
python
|
h5py__h5py
|
h5py/_hl/dataset.py
|
{
"start": 12011,
"end": 15429
}
|
class ____:
"""
Class to iterate through list of chunks of a given dataset
"""
def __init__(self, dset, source_sel=None):
self._shape = dset.shape
rank = len(dset.shape)
if not dset.chunks:
# can only use with chunked datasets
raise TypeError("Chunked dataset required")
self._layout = dset.chunks
if source_sel is None:
# select over entire dataset
self._sel = tuple(
slice(0, self._shape[dim]) for dim in range(rank)
)
else:
if isinstance(source_sel, (slice, int)):
sel = [source_sel]
else:
sel = list(source_sel)
if len(sel) != rank:
raise ValueError("Invalid selection - selection region must have same rank as dataset")
for dim, s in enumerate(sel):
start: int | None
stop: int | None
step: int | None
match s:
case int():
start = s
stop = s + 1
step = None
case slice():
start = s.start or 0
stop = s.stop or self._shape[dim]
step = s.step
case _:
# TODO: use typing.assert_never when Python 3.10 is dropped
raise AssertionError(f'{s}: Selection object must be a slice or integer')
sel[dim] = slice(start, stop, step)
self._sel = tuple(sel)
self._chunk_index = []
for dim in range(rank):
s = self._sel[dim]
if s.start < 0 or s.stop > self._shape[dim] or s.stop <= s.start:
raise ValueError("Invalid selection - selection region must be within dataset space")
index = s.start // self._layout[dim]
self._chunk_index.append(index)
def __iter__(self):
return self
def __next__(self):
rank = len(self._shape)
slices = []
if rank == 0 or self._chunk_index[0] * self._layout[0] >= self._sel[0].stop:
# ran past the last chunk, end iteration
raise StopIteration()
for dim in range(rank):
s = self._sel[dim]
start = self._chunk_index[dim] * self._layout[dim]
stop = (self._chunk_index[dim] + 1) * self._layout[dim]
# adjust the start if this is an edge chunk
if start < s.start:
start = s.start
if stop > s.stop:
stop = s.stop # trim to end of the selection
s = slice(start, stop, 1)
slices.append(s)
# bump up the last index and carry forward if we run outside the selection
dim = rank - 1
while dim >= 0:
s = self._sel[dim]
self._chunk_index[dim] += 1
chunk_end = self._chunk_index[dim] * self._layout[dim]
if chunk_end < s.stop:
# we still have room to extend along this dimensions
return tuple(slices)
if dim > 0:
# reset to the start and continue iterating with higher dimension
self._chunk_index[dim] = s.start // self._layout[dim]
dim -= 1
return tuple(slices)
|
ChunkIterator
|
python
|
rapidsai__cudf
|
python/cudf/cudf/core/series.py
|
{
"start": 14290,
"end": 123173
}
|
class ____(SingleColumnFrame, IndexedFrame):
"""
One-dimensional GPU array (including time series).
Labels need not be unique but must be a hashable type. The object
supports both integer- and label-based indexing and provides a
host of methods for performing operations involving the index.
Statistical methods from ndarray have been overridden to
automatically exclude missing data (currently represented
as null/NaN).
Operations between Series (`+`, `-`, `/`, `*`, `**`) align
values based on their associated index values, they need
not be the same length. The result index will be the
sorted union of the two indexes.
``Series`` objects are used as columns of ``DataFrame``.
Parameters
----------
data : array-like, Iterable, dict, or scalar value
Contains data stored in Series.
index : array-like or Index (1d)
Values must be hashable and have the same length
as data. Non-unique index values are allowed. Will
default to RangeIndex (0, 1, 2, ..., n) if not provided.
If both a dict and index sequence are used, the index will
override the keys found in the dict.
dtype : str, :class:`numpy.dtype`, or ExtensionDtype, optional
Data type for the output Series. If not specified,
this will be inferred from data.
name : str, optional
The name to give to the Series.
copy : bool, default False
Copy input data. Only affects Series or 1d ndarray input.
nan_as_null : bool, Default True
If ``None``/``True``, converts ``np.nan`` values to
``null`` values.
If ``False``, leaves ``np.nan`` values as is.
"""
_accessors: set[Any] = set()
_loc_indexer_type = _SeriesLocIndexer
_iloc_indexer_type = _SeriesIlocIndexer
_groupby = SeriesGroupBy
_resampler = SeriesResampler
# The `constructor*` properties are used by `dask` (and `dask_cudf`)
@property
def _constructor(self):
return Series
@property
def _constructor_sliced(self):
raise NotImplementedError(
"_constructor_sliced not supported for Series!"
)
@property
def _constructor_expanddim(self):
return cudf.DataFrame
@classmethod
@_performance_tracking
def from_arrow(cls, array: pa.Array) -> Self:
"""Create from PyArrow Array/ChunkedArray.
Parameters
----------
array : PyArrow Array/ChunkedArray
PyArrow Object which has to be converted.
Raises
------
TypeError for invalid input type.
Returns
-------
SingleColumnFrame
Examples
--------
>>> import cudf
>>> import pyarrow as pa
>>> cudf.Series.from_arrow(pa.array(["a", "b", None]))
0 a
1 b
2 <NA>
dtype: object
"""
return cls._from_column(ColumnBase.from_arrow(array))
@_performance_tracking
def __init__(
self,
data=None,
index=None,
dtype: Dtype | None = None,
name=None,
copy=False,
nan_as_null=no_default,
):
if nan_as_null is no_default:
nan_as_null = not cudf.get_option("mode.pandas_compatible")
index_from_data = None
name_from_data = None
if data is None:
data = {}
if dtype is not None:
dtype = cudf.dtype(dtype)
attrs = None
if isinstance(data, (pd.Series, pd.Index, Index, Series)):
attrs = deepcopy(getattr(data, "attrs", None))
if copy and not isinstance(data, (pd.Series, pd.Index)):
data = data.copy(deep=True)
name_from_data = data.name
column = as_column(data, nan_as_null=nan_as_null, dtype=dtype)
if isinstance(data, (pd.Series, Series)):
index_from_data = ensure_index(data.index)
elif isinstance(data, ColumnAccessor):
raise TypeError(
"Use cudf.Series._from_data for constructing a Series from "
"ColumnAccessor"
)
elif isinstance(data, ColumnBase):
raise TypeError(
"Use cudf.Series._from_column for constructing a Series from "
"a ColumnBase"
)
elif isinstance(data, dict):
if not data:
column = as_column(data, nan_as_null=nan_as_null, dtype=dtype)
index_from_data = RangeIndex(0)
else:
column = as_column(
list(data.values()), nan_as_null=nan_as_null, dtype=dtype
)
index_from_data = cudf.Index(list(data.keys()))
else:
# Using `getattr_static` to check if
# `data` is on device memory and perform
# a deep copy later. This is different
# from `hasattr` because, it doesn't
# invoke the property we are looking
# for and the latter actually invokes
# the property, which in this case could
# be expensive or mark a buffer as
# unspillable.
has_cai = (
type(
inspect.getattr_static(
data, "__cuda_array_interface__", None
)
)
is property
)
column = as_column(
data,
nan_as_null=nan_as_null,
dtype=dtype,
length=len(index) if index is not None else None,
)
if copy and has_cai:
column = column.copy(deep=True)
assert isinstance(column, ColumnBase)
if name_from_data is not None and name is None:
name = name_from_data
if index is not None:
index = ensure_index(index)
if index_from_data is not None:
first_index = index_from_data
second_index = index
elif index is None:
first_index = RangeIndex(len(column))
second_index = None
else:
first_index = index
second_index = None
super().__init__({name: column}, index=first_index, attrs=attrs)
if second_index is not None:
reindexed = self.reindex(index=second_index, copy=False)
self._data = reindexed._data
self._index = second_index
@classmethod
@_performance_tracking
def _from_column(
cls,
column: ColumnBase,
*,
name: Hashable = None,
index: Index | None = None,
attrs: dict | None = None,
) -> Self:
ca = ColumnAccessor({name: column}, verify=False)
return cls._from_data(ca, index=index, attrs=attrs)
@classmethod
@_performance_tracking
def _from_data(
cls,
data: MutableMapping,
index: Index | None = None,
name: Any = no_default,
attrs: dict | None = None,
) -> Series:
out = super()._from_data(data=data, index=index, attrs=attrs)
if name is not no_default:
out.name = name
return out
@_performance_tracking
def _from_data_like_self(self, data: MutableMapping) -> Self:
out = super()._from_data_like_self(data)
out.name = self.name
return out
@_performance_tracking
def __contains__(self, item) -> bool:
return item in self.index
@property
def iat(self):
"""
Alias for ``Series.iloc``; provided for compatibility with Pandas.
"""
return _SeriesiAtIndexer(self)
@property
def at(self):
"""
Alias for ``Series.loc``; provided for compatibility with Pandas.
"""
return _SeriesAtIndexer(self)
@classmethod
@_performance_tracking
def from_pandas(cls, s: pd.Series, nan_as_null=no_default) -> Series:
"""
Convert from a Pandas Series.
Parameters
----------
s : Pandas Series object
A Pandas Series object which has to be converted
to cuDF Series.
nan_as_null : bool, Default None
If ``None``/``True``, converts ``np.nan`` values to
``null`` values.
If ``False``, leaves ``np.nan`` values as is.
Raises
------
TypeError for invalid input type.
Examples
--------
>>> import cudf
>>> import pandas as pd
>>> import numpy as np
>>> data = [10, 20, 30, np.nan]
>>> pds = pd.Series(data, dtype='float64')
>>> cudf.Series.from_pandas(pds)
0 10.0
1 20.0
2 30.0
3 <NA>
dtype: float64
>>> cudf.Series.from_pandas(pds, nan_as_null=False)
0 10.0
1 20.0
2 30.0
3 NaN
dtype: float64
"""
warnings.warn(
"from_pandas is deprecated and will be removed in a future version. "
"Use the Series constructor instead.",
FutureWarning,
)
if nan_as_null is no_default:
nan_as_null = (
False if cudf.get_option("mode.pandas_compatible") else None
)
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
result = cls(s, nan_as_null=nan_as_null)
return result
@property
@_performance_tracking
def is_unique(self) -> bool:
"""Return boolean if values in the object are unique.
Returns
-------
bool
"""
return self._column.is_unique
@property
@_performance_tracking
def dt(self):
"""
Accessor object for datetime-like properties of the Series values.
Examples
--------
>>> s = cudf.Series(cudf.date_range(
... start='2001-02-03 12:00:00',
... end='2001-02-03 14:00:00',
... freq='1H'))
>>> s.dt.hour
0 12
1 13
2 14
dtype: int16
>>> s.dt.second
0 0
1 0
2 0
dtype: int16
>>> s.dt.day
0 3
1 3
2 3
dtype: int16
Returns
-------
A Series indexed like the original Series.
Raises
------
TypeError if the Series does not contain datetimelike values.
"""
if self.dtype.kind == "M":
return DatetimeProperties(self)
elif self.dtype.kind == "m":
return TimedeltaProperties(self)
else:
raise AttributeError(
"Can only use .dt accessor with datetimelike values"
)
@property
@_performance_tracking
def hasnans(self) -> bool:
"""
Return True if there are any NaNs or nulls.
Returns
-------
out : bool
If Series has at least one NaN or null value, return True,
if not return False.
Examples
--------
>>> import cudf
>>> import numpy as np
>>> series = cudf.Series([1, 2, np.nan, 3, 4], nan_as_null=False)
>>> series
0 1.0
1 2.0
2 NaN
3 3.0
4 4.0
dtype: float64
>>> series.hasnans
True
`hasnans` returns `True` for the presence of any `NA` values:
>>> series = cudf.Series([1, 2, 3, None, 4])
>>> series
0 1
1 2
2 3
3 <NA>
4 4
dtype: int64
>>> series.hasnans
True
"""
return self._column.has_nulls(include_nan=True)
@_performance_tracking
def drop(
self,
labels=None,
axis=0,
index=None,
columns=None,
level=None,
inplace: bool = False,
errors: Literal["ignore", "raise"] = "raise",
) -> Self | None:
if axis == 1:
raise ValueError("No axis named 1 for object type Series")
# Ignore columns for Series
if columns is not None:
columns = []
return super().drop(
labels, axis, index, columns, level, inplace, errors
)
@_performance_tracking
def to_dict(self, into: type[dict] = dict) -> dict:
"""
Convert Series to {label -> value} dict or dict-like object.
Parameters
----------
into : class, default dict
The collections.abc.Mapping subclass to use as the return
object. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
Returns
-------
collections.abc.Mapping
Key-value representation of Series.
Examples
--------
>>> import cudf
>>> s = cudf.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.to_dict()
{0: 1, 1: 2, 2: 3, 3: 4}
>>> from collections import OrderedDict, defaultdict
>>> s.to_dict(OrderedDict) # doctest: +SKIP
OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> dd = defaultdict(list)
>>> s.to_dict(dd)
defaultdict(<class 'list'>, {0: 1, 1: 2, 2: 3, 3: 4})
"""
return self.to_pandas().to_dict(into=into)
@_performance_tracking
def reindex(
self,
index=None,
*,
axis: Axis | None = None,
method: str | None = None,
copy: bool = True,
level=None,
fill_value: ScalarLike | None = None,
limit: int | None = None,
tolerance=None,
) -> Self:
"""
Conform Series to new index.
Parameters
----------
index : Index, Series-convertible, default None
New labels / index to conform to,
should be specified using keywords.
axis: int, default None
Unused.
method: Not Supported
copy : boolean, default True
level: Not Supported
fill_value : Value to use for missing values.
Defaults to ``NA``, but can be any "compatible" value.
limit: Not Supported
tolerance: Not Supported
Returns
-------
Series with changed index.
Examples
--------
>>> import cudf
>>> series = cudf.Series([10, 20, 30, 40], index=['a', 'b', 'c', 'd'])
>>> series
a 10
b 20
c 30
d 40
dtype: int64
>>> series.reindex(['a', 'b', 'y', 'z'])
a 10
b 20
y <NA>
z <NA>
dtype: int64
.. pandas-compat::
:meth:`pandas.Series.reindex`
Note: One difference from Pandas is that ``NA`` is used for rows
that do not match, rather than ``NaN``. One side effect of this is
that the series retains an integer dtype in cuDF
where it is cast to float in Pandas.
"""
if index is None:
index = self.index
if fill_value is None:
fill_value = cudf.NA
name = self.name or 0
series = self._reindex(
deep=copy,
dtypes={name: self.dtype},
index=index,
column_names=[name],
inplace=False,
fill_value=fill_value,
level=level,
method=method,
limit=limit,
tolerance=tolerance,
)
series.name = self.name
return series
@_performance_tracking
@docutils.doc_apply(
doc_reset_index_template.format(
klass="Series",
argument="""
name : object, optional
The name to use for the column containing the original Series
values. Uses self.name by default. This argument is ignored when
``drop`` is True.""",
return_type="Series or DataFrame or None",
return_doc=""" For Series, When drop is False (the default), a DataFrame
is returned. The newly created columns will come first in the
DataFrame, followed by the original Series values. When `drop` is
True, a `Series` is returned. In either case, if ``inplace=True``,
no value is returned.
""",
example="""
>>> series = cudf.Series(['a', 'b', 'c', 'd'], index=[10, 11, 12, 13])
>>> series
10 a
11 b
12 c
13 d
dtype: object
>>> series.reset_index()
index 0
0 10 a
1 11 b
2 12 c
3 13 d
>>> series.reset_index(drop=True)
0 a
1 b
2 c
3 d
dtype: object
You can also use ``reset_index`` with MultiIndex.
>>> s2 = cudf.Series(
... range(4), name='foo',
... index=cudf.MultiIndex.from_tuples([
... ('bar', 'one'), ('bar', 'two'),
... ('baz', 'one'), ('baz', 'two')],
... names=['a', 'b']
... ))
>>> s2
a b
bar one 0
two 1
baz one 2
two 3
Name: foo, dtype: int64
>>> s2.reset_index(level='a')
a foo
b
one bar 0
two bar 1
one baz 2
two baz 3
""",
)
)
def reset_index(
self,
level=None,
drop: bool = False,
name=no_default,
inplace: bool = False,
allow_duplicates: bool = False,
) -> Self | None:
if not drop and inplace:
raise TypeError(
"Cannot reset_index inplace on a Series to create a DataFrame"
)
data, index = self._reset_index(
level=level, drop=drop, allow_duplicates=allow_duplicates
)
if not drop:
if name is no_default:
name = 0 if self.name is None else self.name
data[name] = data.pop(self.name)
return self._constructor_expanddim._from_data(
data, index, attrs=self.attrs
)
# For ``name`` behavior, see:
# https://github.com/pandas-dev/pandas/issues/44575
# ``name`` has to be ignored when `drop=True`
return self._mimic_inplace(
Series._from_data(data, index, self.name, attrs=self.attrs),
inplace=inplace,
)
@_performance_tracking
def to_frame(self, name: Hashable = no_default) -> DataFrame:
"""Convert Series into a DataFrame
Parameters
----------
name : str, default None
Name to be used for the column
Returns
-------
DataFrame
cudf DataFrame
Examples
--------
>>> import cudf
>>> series = cudf.Series(['a', 'b', 'c', None, 'd'], name='sample', index=[10, 11, 12, 13, 15])
>>> series
10 a
11 b
12 c
13 <NA>
15 d
Name: sample, dtype: object
>>> series.to_frame()
sample
10 a
11 b
12 c
13 <NA>
15 d
"""
res = self._to_frame(name=name, index=self.index)
res._attrs = self.attrs # type: ignore[has-type]
return res
@_performance_tracking
def memory_usage(self, index: bool = True, deep: bool = False) -> int:
"""
Return the memory usage of the Series.
Parameters
----------
index : bool, default True
Specifies whether to include the memory usage of the index.
deep : bool, default False
The deep parameter is ignored and is only included for pandas
compatibility.
Returns
-------
int
The total memory usage in bytes.
Examples
--------
>>> import cudf
>>> s = cudf.Series(range(3), index=['a','b','c'])
>>> s.memory_usage()
43
Not including the index gives the size of the rest of the data, which
is necessarily smaller:
>>> s.memory_usage(index=False)
24
"""
return self._column.memory_usage + (
self.index.memory_usage() if index else 0
)
@_performance_tracking
def __array_function__(self, func, types, args, kwargs):
if "out" in kwargs or not all(issubclass(t, Series) for t in types):
return NotImplemented
try:
# Apply a Series method if one exists.
if cudf_func := getattr(Series, func.__name__, None):
result = cudf_func(*args, **kwargs)
if func.__name__ == "unique":
# NumPy expects a sorted result for `unique`, which is not
# guaranteed by cudf.Series.unique.
result = result.sort_values()
return result
# Assume that cupy subpackages match numpy and search the
# corresponding cupy submodule based on the func's __module__.
numpy_submodule = func.__module__.split(".")[1:]
cupy_func = cp
for name in (*numpy_submodule, func.__name__):
cupy_func = getattr(cupy_func, name, None)
# Handle case if cupy does not implement the function or just
# aliases the numpy function.
if not cupy_func or cupy_func is func:
return NotImplemented
# For now just fail on cases with mismatched indices. There is
# almost certainly no general solution for all array functions.
index = args[0].index
if not all(s.index.equals(index) for s in args):
return NotImplemented
out = cupy_func(*(s.values for s in args), **kwargs)
# Return (host) scalar values immediately.
if not isinstance(out, cp.ndarray):
return out
# 0D array (scalar)
if out.ndim == 0:
return out.item()
# 1D array
elif (
# Only allow 1D arrays
((out.ndim == 1) or (out.ndim == 2 and out.shape[1] == 1))
# If we have an index, it must be the same length as the
# output for cupy dispatching to be well-defined.
and len(index) == len(out)
):
return Series(out, index=index)
except Exception:
# The rare instance where a "silent" failure is preferable. Except
# in the (highly unlikely) case that some other library
# interoperates with cudf objects, the result will be that numpy
# raises a TypeError indicating that the operation is not
# implemented, which is much friendlier than an arbitrary internal
# cudf error.
pass
return NotImplemented
@_performance_tracking
def map(self, arg, na_action: None | Literal["ignore"] = None) -> Self:
"""
Map values of Series according to input correspondence.
Used for substituting each value in a Series with another value,
that may be derived from a function, a ``dict`` or
a :class:`Series`.
Parameters
----------
arg : function, collections.abc.Mapping subclass or Series
Mapping correspondence.
na_action : {None, 'ignore'}, default None
If 'ignore', propagate NaN values, without passing them to the
mapping correspondence.
Returns
-------
Series
Same index as caller.
Examples
--------
>>> s = cudf.Series(['cat', 'dog', np.nan, 'rabbit'])
>>> s
0 cat
1 dog
2 <NA>
3 rabbit
dtype: object
``map`` accepts a ``dict`` or a ``Series``. Values that are not found
in the ``dict`` are converted to ``NaN``, default values in dicts are
currently not supported.:
>>> s.map({'cat': 'kitten', 'dog': 'puppy'})
0 kitten
1 puppy
2 <NA>
3 <NA>
dtype: object
It also accepts numeric functions:
>>> s = cudf.Series([1, 2, 3, 4, np.nan])
>>> s.map(lambda x: x ** 2)
0 1
1 4
2 9
3 16
4 <NA>
dtype: int64
.. pandas-compat::
:meth:`pandas.Series.map`
Please note map currently only supports fixed-width numeric
type functions.
"""
if not (na_action is None or na_action == "ignore"):
raise ValueError("na_action must either be 'ignore' or None")
elif na_action == "ignore":
raise NotImplementedError(f"{na_action=} is not supported")
if isinstance(arg, dict):
if hasattr(arg, "__missing__"):
raise NotImplementedError(
"default values in dicts are currently not supported."
)
lhs = cudf.DataFrame(
{"x": self, "orig_order": as_column(range(len(self)))}
)
rhs = cudf.DataFrame(
{
"x": arg.keys(),
"s": arg.values(),
"bool": as_column(True, length=len(arg), dtype=self.dtype),
}
)
res = lhs.merge(rhs, on="x", how="left").sort_values(
by="orig_order"
)
result = res["s"]
result.name = self.name
result.index = self.index
elif isinstance(arg, cudf.Series):
if not arg.index.is_unique:
raise ValueError(
"Reindexing only valid with uniquely valued Index objects"
)
lhs = cudf.DataFrame(
{"x": self, "orig_order": as_column(range(len(self)))}
)
rhs = cudf.DataFrame(
{
"x": arg.keys(),
"s": arg,
"bool": as_column(True, length=len(arg), dtype=self.dtype),
}
)
res = lhs.merge(rhs, on="x", how="left").sort_values(
by="orig_order"
)
result = res["s"]
result.name = self.name
result.index = self.index
else:
result = self.apply(arg)
return result
def _getitem_preprocessed(
self,
spec: indexing_utils.IndexingSpec,
) -> Self | ScalarLike:
"""Get subset of entries given structured data
Parameters
----------
spec
Indexing specification
Returns
-------
Subsetted Series or else scalar (if a scalar entry is
requested)
Notes
-----
This function performs no bounds-checking or massaging of the
inputs.
"""
if isinstance(spec, indexing_utils.MapIndexer):
result = self._gather(spec.key, keep_index=True)
elif isinstance(spec, indexing_utils.MaskIndexer):
result = self._apply_boolean_mask(spec.key, keep_index=True)
elif isinstance(spec, indexing_utils.SliceIndexer):
result = self._slice(spec.key)
elif isinstance(spec, indexing_utils.ScalarIndexer):
return self._gather(
spec.key, keep_index=False
)._column.element_indexing(0)
elif isinstance(spec, indexing_utils.EmptyIndexer):
return self._empty_like(keep_index=True)
else:
assert_never(spec)
if isinstance(result.index, cudf.DatetimeIndex):
result.index._freq = (
result.index._get_slice_frequency(spec.key)
if isinstance(spec, indexing_utils.SliceIndexer)
else None
)
return result
@_performance_tracking
def __getitem__(self, arg):
if isinstance(arg, slice):
return self.iloc[arg]
elif is_integer(arg) and not (
(
isinstance(self.index.dtype, CategoricalDtype)
and self.index.dtype.categories.dtype.kind in {"i", "u", "f"}
)
or self.index.dtype.kind in {"i", "u", "f"}
or isinstance(self.index.dtype, IntervalDtype)
):
# Do not remove until pandas 3.0 support is added.
assert PANDAS_LT_300, (
"Need to drop after pandas-3.0 support is added."
)
warnings.warn(
"Series.__getitem__ treating keys as positions is deprecated "
"In a future version, integer keys will always be treated as labels "
"(consistent with DataFrame behavior) To access a value by position, "
"use `ser.iloc[pos]`",
FutureWarning,
)
return self.iloc[arg]
else:
return self.loc[arg]
iteritems = SingleColumnFrame.__iter__
items = SingleColumnFrame.__iter__
@_performance_tracking
def __setitem__(self, key, value) -> None:
if isinstance(key, slice):
self.iloc[key] = value
else:
self.loc[key] = value
def __repr__(self) -> str:
_, height = get_terminal_size()
max_rows = (
height
if pd.get_option("display.max_rows") == 0
else pd.get_option("display.max_rows")
)
if max_rows not in (0, None) and len(self) > max_rows:
top = self.head(int(max_rows / 2 + 1))
bottom = self.tail(int(max_rows / 2 + 1))
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
preprocess = cudf.concat([top, bottom])
else:
preprocess = self
if isinstance(preprocess.dtype, CategoricalDtype):
min_rows = (
height
if pd.get_option("display.min_rows") == 0
else pd.get_option("display.min_rows")
)
show_dimensions = pd.get_option("display.show_dimensions")
preprocess = preprocess.copy(deep=False)
preprocess.index = preprocess.index._pandas_repr_compatible()
if preprocess.dtype.categories.dtype.kind == "f":
pd_series = (
preprocess.astype(CUDF_STRING_DTYPE)
.to_pandas()
.astype(
dtype=pd.CategoricalDtype(
categories=preprocess.dtype.categories.astype(
CUDF_STRING_DTYPE
).to_pandas(),
ordered=preprocess.dtype.ordered,
)
)
)
else:
pd_series = preprocess.to_pandas()
output = pd_series.to_string(
name=self.name,
dtype=self.dtype,
min_rows=min_rows,
max_rows=max_rows,
length=show_dimensions,
na_rep=str(cudf.NA),
)
else:
output = repr(preprocess._pandas_repr_compatible().to_pandas())
lines = output.split("\n")
if isinstance(preprocess.dtype, CategoricalDtype):
category_memory = lines[-1]
if preprocess.dtype.categories.dtype.kind == "f":
category_memory = category_memory.replace("'", "").split(": ")
category_memory = (
category_memory[0].replace(
"object", preprocess.dtype.categories.dtype.name
)
+ ": "
+ category_memory[1]
)
lines = lines[:-1]
if len(lines) > 1:
if lines[-1].startswith("Name: "):
lines = lines[:-1]
lines.append("Name: %s" % str(self.name))
if len(self) > len(preprocess):
lines[-1] = lines[-1] + ", Length: %d" % len(self)
lines[-1] = lines[-1] + ", "
elif lines[-1].startswith("Length: "):
lines = lines[:-1]
lines.append("Length: %d" % len(self))
lines[-1] = lines[-1] + ", "
else:
lines = lines[:-1]
lines[-1] = lines[-1] + "\n"
lines[-1] = lines[-1] + "dtype: %s" % self.dtype
else:
lines = output.split(",")
lines[-1] = " dtype: %s)" % self.dtype
return ",".join(lines)
if isinstance(preprocess._column.dtype, CategoricalDtype):
lines.append(category_memory)
return "\n".join(lines)
def _make_operands_and_index_for_binop(
self,
other: Any,
fn: str,
fill_value: Any = None,
reflect: bool = False,
can_reindex: bool = False,
) -> tuple[
dict[str | None, tuple[ColumnBase, Any, bool, Any]]
| NotImplementedType,
Index | None,
dict[str, Any],
]:
# Specialize binops to align indices.
if isinstance(other, Series):
if (
not can_reindex
and fn in _EQUALITY_OPS
and not self.index.equals(other.index)
):
raise ValueError(
"Can only compare identically-labeled Series objects"
)
lhs, other = _align_indices([self, other], allow_non_unique=True)
else:
lhs = self
ca_attributes = {}
if hasattr(other, "name") and _is_same_name(self.name, other.name):
ca_attributes["level_names"] = self._data._level_names
operands = lhs._make_operands_for_binop(other, fill_value, reflect)
return operands, lhs.index, ca_attributes
@copy_docstring(CategoricalAccessor) # type: ignore[prop-decorator]
@property
@_performance_tracking
def cat(self):
return CategoricalAccessor(parent=self)
@copy_docstring(StringMethods) # type: ignore[prop-decorator]
@property
@_performance_tracking
def str(self):
return StringMethods(parent=self)
@copy_docstring(ListMethods) # type: ignore[prop-decorator]
@property
@_performance_tracking
def list(self):
return ListMethods(parent=self)
@copy_docstring(StructMethods) # type: ignore[prop-decorator]
@property
@_performance_tracking
def struct(self):
return StructMethods(parent=self)
@property
@_performance_tracking
def dtypes(self):
"""The dtype of the Series.
This is an alias for `Series.dtype`.
"""
return self.dtype
@classmethod
@_performance_tracking
def _concat(cls, objs, axis: Axis = 0, index: bool = True) -> Self:
# Concatenate index if not provided
if index is True:
if isinstance(objs[0].index, cudf.MultiIndex):
result_index = cudf.MultiIndex._concat([o.index for o in objs])
else:
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
result_index = Index._concat([o.index for o in objs])
elif index is False:
result_index = None
else:
raise ValueError(f"{index=} must be a bool")
names = {obj.name for obj in objs}
if len(names) == 1:
[name] = names
else:
name = None
if len(objs) > 1:
dtype_mismatch = False
for obj in objs[1:]:
if (
obj.null_count == len(obj)
or len(obj) == 0
or isinstance(obj._column.dtype, CategoricalDtype)
or isinstance(objs[0]._column.dtype, CategoricalDtype)
):
continue
if (
not dtype_mismatch
and (
not isinstance(objs[0]._column.dtype, CategoricalDtype)
and not isinstance(obj._column.dtype, CategoricalDtype)
)
and objs[0].dtype != obj.dtype
):
dtype_mismatch = True
if is_mixed_with_object_dtype(objs[0], obj):
raise TypeError(
"cudf does not support mixed types, please type-cast "
"both series to same dtypes."
)
if dtype_mismatch:
common_dtype = find_common_type([obj.dtype for obj in objs])
objs = [obj.astype(common_dtype) for obj in objs]
col = concat_columns([o._column for o in objs])
if len(objs):
col = col._with_type_metadata(objs[0].dtype)
result = cls._from_column(col, name=name, index=result_index)
if cudf.get_option("mode.pandas_compatible"):
if isinstance(result.index, DatetimeIndex):
try:
result.index._freq = result.index.inferred_freq
except NotImplementedError:
result.index._freq = None
return result
@property
@_performance_tracking
def valid_count(self) -> int:
"""Number of non-null values"""
return len(self) - self._column.null_count
@property
@_performance_tracking
def null_count(self) -> int:
"""Number of null values"""
return self._column.null_count
@property
@_performance_tracking
def has_nulls(self) -> bool:
"""
Indicator whether Series contains null values.
Returns
-------
out : bool
If Series has at least one null value, return True, if not
return False.
Examples
--------
>>> import cudf
>>> series = cudf.Series([1, 2, None, 3, 4])
>>> series
0 1
1 2
2 <NA>
3 3
4 4
dtype: int64
>>> series.has_nulls
True
>>> series.dropna().has_nulls
False
"""
return self._column.has_nulls()
@_performance_tracking
def dropna(
self,
axis: Axis = 0,
inplace: bool = False,
how: Literal["any", "all"] | None = None,
ignore_index: bool = False,
) -> Self | None:
"""
Return a Series with null values removed.
Parameters
----------
axis : {0 or 'index'}, default 0
There is only one axis to drop values from.
inplace : bool, default False
If True, do operation inplace and return None.
how : str, optional
Not in use. Kept for compatibility.
ignore_index : bool, default ``False``
If ``True``, the resulting axis will be labeled 0, 1, …, n - 1.
Returns
-------
Series
Series with null entries dropped from it.
See Also
--------
Series.isna : Indicate null values.
Series.notna : Indicate non-null values.
Series.fillna : Replace null values.
cudf.DataFrame.dropna : Drop rows or columns which
contain null values.
cudf.Index.dropna : Drop null indices.
Examples
--------
>>> import cudf
>>> ser = cudf.Series([1, 2, None])
>>> ser
0 1
1 2
2 <NA>
dtype: int64
Drop null values from a Series.
>>> ser.dropna()
0 1
1 2
dtype: int64
Keep the Series with valid entries in the same variable.
>>> ser.dropna(inplace=True)
>>> ser
0 1
1 2
dtype: int64
Empty strings are not considered null values.
`None` is considered a null value.
>>> ser = cudf.Series(['', None, 'abc'])
>>> ser
0
1 <NA>
2 abc
dtype: object
>>> ser.dropna()
0
2 abc
dtype: object
"""
if axis not in (0, "index"):
raise ValueError(
"Series.dropna supports only one axis to drop values from"
)
result = super().dropna(axis=axis)
if ignore_index:
result.index = RangeIndex(len(result))
return self._mimic_inplace(result, inplace=inplace)
@_performance_tracking
def drop_duplicates(
self,
keep: Literal["first", "last", False] = "first",
inplace: bool = False,
ignore_index: bool = False,
) -> Self | None:
"""
Return Series with duplicate values removed.
Parameters
----------
keep : {'first', 'last', ``False``}, default 'first'
Method to handle dropping duplicates:
- 'first' : Drop duplicates except for the first occurrence.
- 'last' : Drop duplicates except for the last occurrence.
- ``False`` : Drop all duplicates.
inplace : bool, default ``False``
If ``True``, performs operation inplace and returns None.
Returns
-------
Series or None
Series with duplicates dropped or None if ``inplace=True``.
Examples
--------
>>> s = cudf.Series(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'],
... name='animal')
>>> s
0 lama
1 cow
2 lama
3 beetle
4 lama
5 hippo
Name: animal, dtype: object
With the `keep` parameter, the selection behavior of duplicated
values can be changed. The value 'first' keeps the first
occurrence for each set of duplicated entries.
The default value of keep is 'first'. Note that order of
the rows being returned is not guaranteed
to be sorted.
>>> s.drop_duplicates()
0 lama
1 cow
3 beetle
5 hippo
Name: animal, dtype: object
The value 'last' for parameter `keep` keeps the last occurrence
for each set of duplicated entries.
>>> s.drop_duplicates(keep='last')
1 cow
3 beetle
4 lama
5 hippo
Name: animal, dtype: object
The value `False` for parameter `keep` discards all sets
of duplicated entries. Setting the value of 'inplace' to
`True` performs the operation inplace and returns `None`.
>>> s.drop_duplicates(keep=False, inplace=True)
>>> s
1 cow
3 beetle
5 hippo
Name: animal, dtype: object
"""
result = super().drop_duplicates(keep=keep, ignore_index=ignore_index)
return self._mimic_inplace(result, inplace=inplace)
@_performance_tracking
def fillna(
self,
value: None | ScalarLike | Series = None,
method: Literal["ffill", "bfill", "pad", "backfill"] | None = None,
axis: Axis | None = None,
inplace: bool = False,
limit: int | None = None,
) -> Self | None:
if isinstance(value, (pd.Series, Mapping)):
value = Series(value)
if isinstance(value, cudf.Series):
if not self.index.equals(value.index):
value = value.reindex(self.index)
value = {self.name: value._column}
return super().fillna(
value=value, method=method, axis=axis, inplace=inplace, limit=limit
)
def between(
self,
left,
right,
inclusive: Literal["both", "neither", "left", "right"] = "both",
) -> Self:
"""
Return boolean Series equivalent to left <= series <= right.
This function returns a boolean vector containing `True` wherever the
corresponding Series element is between the boundary values `left` and
`right`. NA values are treated as `False`.
Parameters
----------
left : scalar or list-like
Left boundary.
right : scalar or list-like
Right boundary.
inclusive : {"both", "neither", "left", "right"}
Include boundaries. Whether to set each bound as closed or open.
Returns
-------
Series
Series representing whether each element is between left and
right (inclusive).
See Also
--------
Series.gt : Greater than of series and other.
Series.lt : Less than of series and other.
Notes
-----
This function is equivalent to ``(left <= ser) & (ser <= right)``
Examples
--------
>>> import cudf
>>> s = cudf.Series([2, 0, 4, 8, None])
Boundary values are included by default:
>>> s.between(1, 4)
0 True
1 False
2 True
3 False
4 <NA>
dtype: bool
With `inclusive` set to ``"neither"`` boundary values are excluded:
>>> s.between(1, 4, inclusive="neither")
0 True
1 False
2 False
3 False
4 <NA>
dtype: bool
`left` and `right` can be any scalar value:
>>> s = cudf.Series(['Alice', 'Bob', 'Carol', 'Eve'])
>>> s.between('Anna', 'Daniel')
0 False
1 True
2 True
3 False
dtype: bool
"""
left_operand = left if is_scalar(left) else as_column(left)
right_operand = right if is_scalar(right) else as_column(right)
if inclusive == "both":
lmask = self._column >= left_operand
rmask = self._column <= right_operand
elif inclusive == "left":
lmask = self._column >= left_operand
rmask = self._column < right_operand
elif inclusive == "right":
lmask = self._column > left_operand
rmask = self._column <= right_operand
elif inclusive == "neither":
lmask = self._column > left_operand
rmask = self._column < right_operand
else:
raise ValueError(
"Inclusive has to be either string of 'both', "
"'left', 'right', or 'neither'."
)
return self._from_column(
lmask & rmask, name=self.name, index=self.index, attrs=self.attrs
)
@_performance_tracking
def all(
self,
axis: Axis = 0,
bool_only: bool | None = None,
skipna: bool = True,
**kwargs,
) -> bool | np.bool_:
if bool_only not in (None, True):
raise NotImplementedError(
"The bool_only parameter is not supported for Series."
)
result = super().all(axis, skipna, **kwargs)
if (
cudf.get_option("mode.pandas_compatible")
and isinstance(result, bool)
and not isinstance(self.dtype, pd.ArrowDtype)
):
return np.bool_(result)
return result
@_performance_tracking
def any(
self,
axis: Axis = 0,
bool_only: bool | None = None,
skipna: bool = True,
**kwargs,
) -> bool | np.bool_:
if bool_only not in (None, True):
raise NotImplementedError(
"The bool_only parameter is not supported for Series."
)
result = super().any(axis, skipna, **kwargs)
if (
cudf.get_option("mode.pandas_compatible")
and isinstance(result, bool)
and not isinstance(self.dtype, pd.ArrowDtype)
):
return np.bool_(result)
return result
@_performance_tracking
def to_pandas(
self,
*,
index: bool = True,
nullable: bool = False,
arrow_type: bool = False,
) -> pd.Series:
"""
Convert to a pandas Series.
Parameters
----------
index : Boolean, Default True
If ``index`` is ``True``, converts the index of cudf.Series
and sets it to the pandas.Series. If ``index`` is ``False``,
no index conversion is performed and pandas.Series will assign
a default index.
nullable : Boolean, Default False
If ``nullable`` is ``True``, the resulting series will be
having a corresponding nullable Pandas dtype.
If there is no corresponding nullable Pandas dtype present,
the resulting dtype will be a regular pandas dtype.
If ``nullable`` is ``False``, the resulting series will
either convert null values to ``np.nan`` or ``None``
depending on the dtype.
arrow_type : bool, Default False
Return the Series with a ``pandas.ArrowDtype``
Returns
-------
out : pandas Series
Notes
-----
nullable and arrow_type cannot both be set to ``True``
Examples
--------
>>> import cudf
>>> ser = cudf.Series([-3, 2, 0])
>>> pds = ser.to_pandas()
>>> pds
0 -3
1 2
2 0
dtype: int64
>>> type(pds)
<class 'pandas.core.series.Series'>
``nullable=True`` converts the result to pandas nullable types:
>>> ser = cudf.Series([10, 20, None, 30])
>>> ser
0 10
1 20
2 <NA>
3 30
dtype: int64
>>> ser.to_pandas(nullable=True)
0 10
1 20
2 <NA>
3 30
dtype: Int64
>>> ser.to_pandas(nullable=False)
0 10.0
1 20.0
2 NaN
3 30.0
dtype: float64
``arrow_type=True`` converts the result to ``pandas.ArrowDtype``:
>>> ser.to_pandas(arrow_type=True)
0 10
1 20
2 <NA>
3 30
dtype: int64[pyarrow]
"""
if index is True:
index = self.index.to_pandas()
else:
index = None # type: ignore[assignment]
res = pd.Series(
self._column.to_pandas(nullable=nullable, arrow_type=arrow_type),
index=index,
name=self.name,
)
res.attrs = self.attrs
return res
@property
@_performance_tracking
def data(self):
"""The gpu buffer for the data
Returns
-------
out : The GPU buffer of the Series.
Examples
--------
>>> import cudf
>>> series = cudf.Series([1, 2, 3, 4])
>>> series
0 1
1 2
2 3
3 4
dtype: int64
>>> np.array(series.data.memoryview())
array([1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0,
0, 0, 4, 0, 0, 0, 0, 0, 0, 0], dtype=uint8)
"""
warnings.warn(
"Series.data is deprecated and will be removed in a future version. "
"Use Series.to_pylibcudf()[0].data() instead.",
FutureWarning,
)
return self._column.data
@_performance_tracking
def astype(
self,
dtype: Dtype | dict[Hashable, Dtype],
copy: bool | None = None,
errors: Literal["raise", "ignore"] = "raise",
) -> Self:
if copy is None:
copy = True
if cudf.get_option("mode.pandas_compatible"):
if inspect.isclass(dtype) and issubclass(
dtype, pd.api.extensions.ExtensionDtype
):
msg = (
f"Expected an instance of {dtype.__name__}, "
"but got the class instead. Try instantiating 'dtype'."
)
raise TypeError(msg)
if is_dict_like(dtype):
if len(dtype) > 1 or self.name not in dtype: # type: ignore[arg-type,operator]
raise KeyError(
"Only the Series name can be used for the key in Series "
"dtype mappings."
)
dtype = {self.name: cudf.dtype(dtype[self.name])}
else:
dtype = {self.name: cudf.dtype(dtype)}
return super().astype(dtype, copy, errors)
@overload
def sort_index(
self,
axis: Axis = ...,
level=...,
ascending: bool | Iterable[bool] = ...,
inplace: Literal[False] = ...,
kind: str = ..., # type: ignore[valid-type]
na_position: Literal["first", "last"] = ...,
sort_remaining: bool = ...,
ignore_index: bool = ...,
key=...,
) -> Self: ...
@overload
def sort_index(
self,
axis: Axis = ...,
level=...,
ascending: bool | Iterable[bool] = ...,
inplace: Literal[True] = ...,
kind: str = ..., # type: ignore[valid-type]
na_position: Literal["first", "last"] = ...,
sort_remaining: bool = ...,
ignore_index: bool = ...,
key=...,
) -> None: ...
@_performance_tracking
def sort_index(
self,
axis: Axis = 0,
level=None,
ascending: bool | Iterable[bool] = True,
inplace: bool = False,
kind: str = "quicksort", # type: ignore[valid-type]
na_position: Literal["first", "last"] = "last",
sort_remaining: bool = True,
ignore_index: bool = False,
key=None,
) -> Self | None:
if axis not in (0, "index"):
raise ValueError("Only axis=0 is valid for Series.")
return super().sort_index( # type: ignore[call-overload]
axis=axis,
level=level,
ascending=ascending,
inplace=inplace,
kind=kind,
na_position=na_position,
sort_remaining=sort_remaining,
ignore_index=ignore_index,
key=key,
)
@_performance_tracking
def sort_values(
self,
axis: Axis = 0,
ascending: bool | Iterable[bool] = True,
inplace: bool = False,
kind: str = "quicksort", # type: ignore[valid-type]
na_position: Literal["first", "last"] = "last",
ignore_index: bool = False,
key=None,
) -> Self | None:
"""Sort by the values along either axis.
Parameters
----------
ascending : bool or list of bool, default True
Sort ascending vs. descending. Specify list for multiple sort
orders. If this is a list of bools, must match the length of the
by.
na_position : {'first', 'last'}, default 'last'
'first' puts nulls at the beginning, 'last' puts nulls at the end
ignore_index : bool, default False
If True, index will not be sorted.
key : callable, optional
Apply the key function to the values
before sorting. This is similar to the ``key`` argument in the
builtin ``sorted`` function, with the notable difference that
this ``key`` function should be *vectorized*. It should expect a
``Series`` and return a Series with the same shape as the input.
It will be applied to each column in `by` independently.
Currently not supported.
Returns
-------
Series : Series with sorted values.
Examples
--------
>>> import cudf
>>> s = cudf.Series([1, 5, 2, 4, 3])
>>> s.sort_values()
0 1
2 2
4 3
3 4
1 5
dtype: int64
.. pandas-compat::
:meth:`pandas.Series.sort_values`
* Support axis='index' only.
* The inplace and kind argument is currently unsupported
"""
return super().sort_values(
by=self.name,
axis=axis,
ascending=ascending,
inplace=inplace,
kind=kind,
na_position=na_position,
ignore_index=ignore_index,
key=key,
)
@_performance_tracking
def nlargest(
self, n: int = 5, keep: Literal["first", "last"] = "first"
) -> Self:
"""Returns a new Series of the *n* largest element.
Parameters
----------
n : int, default 5
Return this many descending sorted values.
keep : {'first', 'last'}, default 'first'
When there are duplicate values that cannot all fit in a
Series of `n` elements:
- ``first`` : return the first `n` occurrences in order
of appearance.
- ``last`` : return the last `n` occurrences in reverse
order of appearance.
Returns
-------
Series
The `n` largest values in the Series, sorted in decreasing order.
Examples
--------
>>> import cudf
>>> countries_population = {"Italy": 59000000, "France": 65000000,
... "Malta": 434000, "Maldives": 434000,
... "Brunei": 434000, "Iceland": 337000,
... "Nauru": 11300, "Tuvalu": 11300,
... "Anguilla": 11300, "Montserrat": 5200}
>>> series = cudf.Series(countries_population)
>>> series
Italy 59000000
France 65000000
Malta 434000
Maldives 434000
Brunei 434000
Iceland 337000
Nauru 11300
Tuvalu 11300
Anguilla 11300
Montserrat 5200
dtype: int64
>>> series.nlargest()
France 65000000
Italy 59000000
Malta 434000
Maldives 434000
Brunei 434000
dtype: int64
>>> series.nlargest(3)
France 65000000
Italy 59000000
Malta 434000
dtype: int64
>>> series.nlargest(3, keep='last')
France 65000000
Italy 59000000
Brunei 434000
dtype: int64
"""
return self._n_largest_or_smallest(True, n, [self.name], keep)
@_performance_tracking
def nsmallest(
self, n: int = 5, keep: Literal["first", "last"] = "first"
) -> Self:
"""
Returns a new Series of the *n* smallest element.
Parameters
----------
n : int, default 5
Return this many ascending sorted values.
keep : {'first', 'last'}, default 'first'
When there are duplicate values that cannot all fit in a
Series of `n` elements:
- ``first`` : return the first `n` occurrences in order
of appearance.
- ``last`` : return the last `n` occurrences in reverse
order of appearance.
Returns
-------
Series
The `n` smallest values in the Series, sorted in increasing order.
Examples
--------
>>> import cudf
>>> countries_population = {"Italy": 59000000, "France": 65000000,
... "Brunei": 434000, "Malta": 434000,
... "Maldives": 434000, "Iceland": 337000,
... "Nauru": 11300, "Tuvalu": 11300,
... "Anguilla": 11300, "Montserrat": 5200}
>>> s = cudf.Series(countries_population)
>>> s
Italy 59000000
France 65000000
Brunei 434000
Malta 434000
Maldives 434000
Iceland 337000
Nauru 11300
Tuvalu 11300
Anguilla 11300
Montserrat 5200
dtype: int64
The `n` smallest elements where ``n=5`` by default.
>>> s.nsmallest()
Montserrat 5200
Nauru 11300
Tuvalu 11300
Anguilla 11300
Iceland 337000
dtype: int64
The `n` smallest elements where ``n=3``. Default `keep` value is
'first' so Nauru and Tuvalu will be kept.
>>> s.nsmallest(3)
Montserrat 5200
Nauru 11300
Tuvalu 11300
dtype: int64
The `n` smallest elements where ``n=3`` and keeping the last
duplicates. Anguilla and Tuvalu will be kept since they are the last
with value 11300 based on the index order.
>>> s.nsmallest(3, keep='last')
Montserrat 5200
Anguilla 11300
Tuvalu 11300
dtype: int64
"""
return self._n_largest_or_smallest(False, n, [self.name], keep)
@_performance_tracking
def argsort(
self,
axis=0,
kind="quicksort",
order=None,
ascending=True,
na_position="last",
) -> Self:
col = as_column(
super().argsort(
axis=axis,
kind=kind,
order=order,
ascending=ascending,
na_position=na_position,
)
)
return self._from_data_like_self(
self._data._from_columns_like_self([col])
)
@_performance_tracking
def replace(
self,
to_replace=None,
value=no_default,
inplace: bool = False,
limit=None,
regex: bool = False,
method=no_default,
) -> Self | None:
if is_dict_like(to_replace) and value not in {None, no_default}:
raise ValueError(
"Series.replace cannot use dict-like to_replace and non-None "
"value"
)
return super().replace(
to_replace,
value,
inplace=inplace,
limit=limit,
regex=regex,
method=method,
)
@_performance_tracking
def update(self, other):
"""
Modify Series in place using values from passed Series.
Uses non-NA values from passed Series to make updates. Aligns
on index.
Parameters
----------
other : Series, or object coercible into Series
Examples
--------
>>> import cudf
>>> s = cudf.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.update(cudf.Series([4, 5, 6]))
>>> s
0 4
1 5
2 6
dtype: int64
>>> s = cudf.Series(['a', 'b', 'c'])
>>> s
0 a
1 b
2 c
dtype: object
>>> s.update(cudf.Series(['d', 'e'], index=[0, 2]))
>>> s
0 d
1 b
2 e
dtype: object
>>> s = cudf.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.update(cudf.Series([4, 5, 6, 7, 8]))
>>> s
0 4
1 5
2 6
dtype: int64
If ``other`` contains NaNs the corresponding values are not updated
in the original Series.
>>> s = cudf.Series([1.0, 2.0, 3.0])
>>> s
0 1.0
1 2.0
2 3.0
dtype: float64
>>> s.update(cudf.Series([4.0, np.nan, 6.0], nan_as_null=False))
>>> s
0 4.0
1 2.0
2 6.0
dtype: float64
``other`` can also be a non-Series object type
that is coercible into a Series
>>> s = cudf.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.update([4, np.nan, 6])
>>> s
0 4
1 2
2 6
dtype: int64
>>> s = cudf.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.update({1: 9})
>>> s
0 1
1 9
2 3
dtype: int64
"""
if not isinstance(other, cudf.Series):
other = cudf.Series(other)
if not self.index.equals(other.index):
other = other.reindex(index=self.index)
mask = other.notna()
self.mask(mask, other, inplace=True)
# UDF related
@_performance_tracking
def apply(
self,
func,
convert_dtype=True,
args=(),
by_row: Literal[False, "compat"] = "compat",
**kwargs,
):
"""
Apply a scalar function to the values of a Series.
Similar to ``pandas.Series.apply``.
``apply`` relies on Numba to JIT compile ``func``.
Thus the allowed operations within ``func`` are limited to `those
supported by the CUDA Python Numba target
<https://numba.readthedocs.io/en/stable/cuda/cudapysupported.html>`__.
For more information, see the `cuDF guide to user defined functions
<https://docs.rapids.ai/api/cudf/stable/user_guide/guide-to-udfs.html>`__.
Some string functions and methods are supported. Refer to the guide
to UDFs for details.
Parameters
----------
func : function
Scalar Python function to apply.
convert_dtype : bool, default True
In cuDF, this parameter is always True. Because
cuDF does not support arbitrary object dtypes,
the result will always be the common type as determined
by numba based on the function logic and argument types.
See examples for details.
args : tuple
Positional arguments passed to func after the series value.
by_row : False or "compat", default "compat"
If ``"compat"`` and func is a callable, func will be passed each element of
the Series, like ``Series.map``. If func is a list or dict of
callables, will first try to translate each func into pandas methods. If
that doesn't work, will try call to apply again with ``by_row="compat"``
and if that fails, will call apply again with ``by_row=False``
(backward compatible).
If False, the func will be passed the whole Series at once.
``by_row`` has no effect when ``func`` is a string.
Currently not implemented.
**kwargs
Not supported
Returns
-------
result : Series
The mask and index are preserved.
Notes
-----
UDFs are cached in memory to avoid recompilation. The first
call to the UDF will incur compilation overhead. `func` may
call nested functions that are decorated with the decorator
`numba.cuda.jit(device=True)`, otherwise numba will raise a
typing error.
Examples
--------
Apply a basic function to a series:
>>> sr = cudf.Series([1,2,3])
>>> def f(x):
... return x + 1
>>> sr.apply(f)
0 2
1 3
2 4
dtype: int64
Apply a basic function to a series with nulls:
>>> sr = cudf.Series([1,cudf.NA,3])
>>> def f(x):
... return x + 1
>>> sr.apply(f)
0 2
1 <NA>
2 4
dtype: int64
Use a function that does something conditionally,
based on if the value is or is not null:
>>> sr = cudf.Series([1,cudf.NA,3])
>>> def f(x):
... if x is cudf.NA:
... return 42
... else:
... return x - 1
>>> sr.apply(f)
0 0
1 42
2 2
dtype: int64
Results will be upcast to the common dtype required
as derived from the UDFs logic. Note that this means
the common type will be returned even if such data
is passed that would not result in any values of that
dtype:
>>> sr = cudf.Series([1,cudf.NA,3])
>>> def f(x):
... return x + 1.5
>>> sr.apply(f)
0 2.5
1 <NA>
2 4.5
dtype: float64
UDFs manipulating string data are allowed, as long as
they neither modify strings in place nor create new strings.
For example, the following UDF is allowed:
>>> def f(st):
... if len(st) == 0:
... return -1
... elif st.startswith('a'):
... return 1
... elif 'example' in st:
... return 2
... else:
... return 3
...
>>> sr = cudf.Series(['', 'abc', 'some_example'])
>>> sr.apply(f) # doctest: +SKIP
0 -1
1 1
2 2
dtype: int64
However, the following UDF is not allowed since it includes an
operation that requires the creation of a new string: a call to the
``upper`` method. Methods that are not supported in this manner
will raise an ``AttributeError``.
>>> def f(st):
... new = st.upper()
... return 'ABC' in new
...
>>> sr.apply(f) # doctest: +SKIP
For a complete list of supported functions and methods that may be
used to manipulate string data, see the UDF guide,
<https://docs.rapids.ai/api/cudf/stable/user_guide/guide-to-udfs.html>
"""
if convert_dtype is not True:
raise ValueError("Series.apply only supports convert_dtype=True")
elif by_row != "compat":
raise NotImplementedError("by_row is currently not supported.")
result = self._apply(func, SeriesApplyKernel, *args, **kwargs)
result.name = self.name
return result
#
# Stats
#
@_performance_tracking
def count(self):
"""
Return number of non-NA/null observations in the Series
Returns
-------
int
Number of non-null values in the Series.
Examples
--------
>>> import cudf
>>> ser = cudf.Series([1, 5, 2, 4, 3])
>>> ser.count()
5
.. pandas-compat::
:meth:`pandas.Series.count`
Parameters currently not supported is `level`.
"""
valid_count = self.valid_count
if cudf.get_option("mode.pandas_compatible"):
return valid_count - self._column.nan_count
return valid_count
@_performance_tracking
def mode(self, dropna=True):
"""
Return the mode(s) of the dataset.
Always returns Series even if only one value is returned.
Parameters
----------
dropna : bool, default True
Don't consider counts of NA/NaN/NaT.
Returns
-------
Series
Modes of the Series in sorted order.
Examples
--------
>>> import cudf
>>> series = cudf.Series([7, 6, 5, 4, 3, 2, 1])
>>> series
0 7
1 6
2 5
3 4
4 3
5 2
6 1
dtype: int64
>>> series.mode()
0 1
1 2
2 3
3 4
4 5
5 6
6 7
dtype: int64
We can include ``<NA>`` values in mode by
passing ``dropna=False``.
>>> series = cudf.Series([7, 4, 3, 3, 7, None, None])
>>> series
0 7
1 4
2 3
3 3
4 7
5 <NA>
6 <NA>
dtype: int64
>>> series.mode()
0 3
1 7
dtype: int64
>>> series.mode(dropna=False)
0 3
1 7
2 <NA>
dtype: int64
"""
val_counts = self.value_counts(ascending=False, dropna=dropna)
if len(val_counts) > 0:
val_counts = val_counts[val_counts == val_counts.iloc[0]]
return Series._from_column(
val_counts.index.sort_values()._column,
name=self.name,
attrs=self.attrs,
)
@_performance_tracking
def round(self, decimals=0, how="half_even"):
if not is_integer(decimals):
raise ValueError(
f"decimals must be an int, got {type(decimals).__name__}"
)
decimals = int(decimals)
return super().round(decimals, how)
@_performance_tracking
def cov(self, other, min_periods=None, ddof: int | None = None):
"""
Compute covariance with Series, excluding missing values.
Parameters
----------
other : Series
Series with which to compute the covariance.
Returns
-------
float
Covariance between Series and other normalized by N-1
(unbiased estimator).
Examples
--------
>>> import cudf
>>> ser1 = cudf.Series([0.9, 0.13, 0.62])
>>> ser2 = cudf.Series([0.12, 0.26, 0.51])
>>> ser1.cov(ser2)
-0.015750000000000004
.. pandas-compat::
:meth:`pandas.Series.cov`
`min_periods` parameter is not yet supported.
"""
if min_periods is not None:
raise NotImplementedError(
"min_periods parameter is not implemented yet"
)
if ddof is not None:
raise NotImplementedError("ddof parameter is not implemented yet")
if self.empty or other.empty:
return _get_nan_for_dtype(self.dtype)
lhs = self.nans_to_nulls().dropna()
rhs = other.nans_to_nulls().dropna()
lhs, rhs = _align_indices([lhs, rhs], how="inner")
try:
return lhs._column.cov(rhs._column)
except AttributeError:
raise TypeError(
f"cannot perform covariance with types {self.dtype}, "
f"{other.dtype}"
)
@_performance_tracking
def duplicated(self, keep="first"):
"""
Indicate duplicate Series values.
Duplicated values are indicated as ``True`` values in the resulting
Series. Either all duplicates, all except the first or all except the
last occurrence of duplicates can be indicated.
Parameters
----------
keep : {'first', 'last', False}, default 'first'
Method to handle dropping duplicates:
- ``'first'`` : Mark duplicates as ``True`` except for the first
occurrence.
- ``'last'`` : Mark duplicates as ``True`` except for the last
occurrence.
- ``False`` : Mark all duplicates as ``True``.
Returns
-------
Series[bool]
Series indicating whether each value has occurred in the
preceding values.
See Also
--------
Index.duplicated : Equivalent method on cudf.Index.
DataFrame.duplicated : Equivalent method on cudf.DataFrame.
Series.drop_duplicates : Remove duplicate values from Series.
Examples
--------
By default, for each set of duplicated values, the first occurrence is
set on False and all others on True:
>>> import cudf
>>> animals = cudf.Series(['lama', 'cow', 'lama', 'beetle', 'lama'])
>>> animals.duplicated()
0 False
1 False
2 True
3 False
4 True
dtype: bool
which is equivalent to
>>> animals.duplicated(keep='first')
0 False
1 False
2 True
3 False
4 True
dtype: bool
By using 'last', the last occurrence of each set of duplicated values
is set on False and all others on True:
>>> animals.duplicated(keep='last')
0 True
1 False
2 True
3 False
4 False
dtype: bool
By setting keep on ``False``, all duplicates are True:
>>> animals.duplicated(keep=False)
0 True
1 False
2 True
3 False
4 True
dtype: bool
"""
return super().duplicated(keep=keep)
@_performance_tracking
def corr(self, other, method="pearson", min_periods=None):
"""Calculates the sample correlation between two Series,
excluding missing values.
Parameters
----------
other : Series
Series with which to compute the correlation.
method : {'pearson', 'spearman'}, default 'pearson'
Method used to compute correlation:
- pearson : Standard correlation coefficient
- spearman : Spearman rank correlation
min_periods : int, optional
Minimum number of observations needed to have a valid result.
Examples
--------
>>> import cudf
>>> ser1 = cudf.Series([0.9, 0.13, 0.62])
>>> ser2 = cudf.Series([0.12, 0.26, 0.51])
>>> ser1.corr(ser2, method="pearson")
-0.20454263717316126
>>> ser1.corr(ser2, method="spearman")
-0.5
"""
if method not in {"pearson", "spearman"}:
raise ValueError(f"Unknown method {method}")
if min_periods is not None:
raise NotImplementedError("Unsupported argument 'min_periods'")
if self.empty or other.empty:
return _get_nan_for_dtype(self.dtype)
lhs = self.nans_to_nulls().dropna()
rhs = other.nans_to_nulls().dropna()
lhs, rhs = _align_indices([lhs, rhs], how="inner")
if method == "spearman":
lhs = lhs.rank()
rhs = rhs.rank()
try:
return lhs._column.corr(rhs._column)
except AttributeError:
raise TypeError(
f"cannot perform corr with types {self.dtype}, {other.dtype}"
)
@_performance_tracking
def autocorr(self, lag=1):
"""Compute the lag-N autocorrelation. This method computes the Pearson
correlation between the Series and its shifted self.
Parameters
----------
lag : int, default 1
Number of lags to apply before performing autocorrelation.
Returns
-------
result : float
The Pearson correlation between self and self.shift(lag).
Examples
--------
>>> import cudf
>>> s = cudf.Series([0.25, 0.5, 0.2, -0.05, 0.17])
>>> s.autocorr()
0.1438853844...
>>> s.autocorr(lag=2)
-0.9647548490...
"""
return self.corr(self.shift(lag))
@_performance_tracking
def isin(self, values):
"""Check whether values are contained in Series.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a TypeError. Instead, turn a single string into a list
of one element.
Returns
-------
result : Series
Series of booleans indicating if each element is in values.
Raises
------
TypeError
If values is a string
Examples
--------
>>> import cudf
>>> s = cudf.Series(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'], name='animal')
>>> s.isin(['cow', 'lama'])
0 True
1 True
2 True
3 False
4 True
5 False
Name: animal, dtype: bool
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
0 True
1 False
2 True
3 False
4 True
5 False
Name: animal, dtype: bool
Strings and integers are distinct and are therefore not comparable:
>>> cudf.Series([1]).isin(['1'])
0 False
dtype: bool
>>> cudf.Series([1.1]).isin(['1.1'])
0 False
dtype: bool
"""
# Even though only list-like objects are supposed to be passed, only
# scalars throw errors. Other types (like dicts) just transparently
# return False (see the implementation of ColumnBase.isin).
if is_scalar(values):
raise TypeError(
"only list-like objects are allowed to be passed "
f"to isin(), you passed a [{type(values).__name__}]"
)
return Series._from_column(
self._column.isin(values),
name=self.name,
index=self.index,
attrs=self.attrs,
)
@_performance_tracking
def unique(self) -> cp.ndarray | Self:
"""
Returns unique values of this Series.
Returns
-------
Series
A series with only the unique values.
Examples
--------
>>> import cudf
>>> series = cudf.Series(['a', 'a', 'b', None, 'b', None, 'c'])
>>> series
0 a
1 a
2 b
3 <NA>
4 b
5 <NA>
6 c
dtype: object
>>> series.unique()
0 a
1 b
2 <NA>
3 c
dtype: object
"""
res = self._column.unique()
if cudf.get_option("mode.pandas_compatible"):
if is_pandas_nullable_extension_dtype(self.dtype):
raise NotImplementedError(
"cudf does not support ExtensionArrays"
)
elif self.dtype.kind in "mM":
raise NotImplementedError(
"cuDF does not implement DatetimeArray or TimedeltaArray"
)
return res.values
return Series._from_column(res, name=self.name, attrs=self.attrs)
@_performance_tracking
def value_counts(
self,
normalize=False,
sort=True,
ascending=False,
bins=None,
dropna=True,
):
"""Return a Series containing counts of unique values.
The resulting object will be in descending order so that
the first element is the most frequently-occurring element.
Excludes NA values by default.
Parameters
----------
normalize : bool, default False
If True then the object returned will contain
the relative frequencies of the unique values.
sort : bool, default True
Sort by frequencies.
ascending : bool, default False
Sort in ascending order.
bins : int, optional
Rather than count values, group them into half-open bins,
only works with numeric data.
dropna : bool, default True
Don't include counts of NaN and None.
Returns
-------
result : Series containing counts of unique values.
See Also
--------
Series.count
Number of non-NA elements in a Series.
cudf.DataFrame.count
Number of non-NA elements in a DataFrame.
Examples
--------
>>> import cudf
>>> sr = cudf.Series([1.0, 2.0, 2.0, 3.0, 3.0, 3.0, None])
>>> sr
0 1.0
1 2.0
2 2.0
3 3.0
4 3.0
5 3.0
6 <NA>
dtype: float64
>>> sr.value_counts()
3.0 3
2.0 2
1.0 1
Name: count, dtype: int64
The order of the counts can be changed by passing ``ascending=True``:
>>> sr.value_counts(ascending=True)
1.0 1
2.0 2
3.0 3
Name: count, dtype: int64
With ``normalize`` set to True, returns the relative frequency
by dividing all values by the sum of values.
>>> sr.value_counts(normalize=True)
3.0 0.500000
2.0 0.333333
1.0 0.166667
Name: proportion, dtype: float64
To include ``NA`` value counts, pass ``dropna=False``:
>>> sr = cudf.Series([1.0, 2.0, 2.0, 3.0, None, 3.0, 3.0, None])
>>> sr
0 1.0
1 2.0
2 2.0
3 3.0
4 <NA>
5 3.0
6 3.0
7 <NA>
dtype: float64
>>> sr.value_counts(dropna=False)
3.0 3
2.0 2
<NA> 2
1.0 1
Name: count, dtype: int64
>>> s = cudf.Series([3, 1, 2, 3, 4, np.nan])
>>> s.value_counts(bins=3)
(2.0, 3.0] 2
(0.996, 2.0] 2
(3.0, 4.0] 1
Name: count, dtype: int64
"""
if bins is not None:
series_bins = cudf.cut(self, bins, include_lowest=True)
result_name = "proportion" if normalize else "count"
if dropna and self.null_count == len(self):
return Series(
[],
dtype=np.int64,
name=result_name,
index=cudf.Index([], dtype=self.dtype, name=self.name),
)
if bins is not None:
res = self.groupby(series_bins, dropna=dropna).count(dropna=dropna)
res = res[res.index.notna()]
else:
res = self.groupby(self, dropna=dropna).count(dropna=dropna)
if dropna:
res = res[res.index.notna()]
if isinstance(self.dtype, CategoricalDtype) and len(res) != len(
self.dtype.categories
):
# For categorical dtypes: When there exists
# categories in dtypes and they are missing in the
# column, `value_counts` will have to return
# their occurrences as 0.
# TODO: Remove this workaround once `observed`
# parameter support is added to `groupby`
res = res.reindex(self.dtype.categories).fillna(0)
res.index = res.index.astype(self.dtype)
res.index.name = self.name
if sort:
res = res.sort_values(ascending=ascending)
if normalize:
res = res / float(res._column.sum())
# Pandas returns an IntervalIndex as the index of res
# this condition makes sure we do too if bins is given
if bins is not None and len(res) == len(res.index.categories):
struct_col = res.index._column._get_decategorized_column()
interval_col = struct_col._with_type_metadata(
res.index.dtype.categories.dtype
)
res.index = cudf.IntervalIndex._from_column(
interval_col, name=res.index.name
)
res.name = result_name
return res
@_performance_tracking
def quantile(
self, q=0.5, interpolation="linear", exact=True, quant_index=True
):
"""
Return values at the given quantile.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
0 <= q <= 1, the quantile(s) to compute
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points i and j:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
exact : boolean
Whether to use approximate or exact quantile algorithm.
quant_index : boolean
Whether to use the list of quantiles as index.
Returns
-------
float or Series
If ``q`` is an array, a Series will be returned where the
index is ``q`` and the values are the quantiles, otherwise
a float will be returned.
Examples
--------
>>> import cudf
>>> series = cudf.Series([1, 2, 3, 4])
>>> series
0 1
1 2
2 3
3 4
dtype: int64
>>> series.quantile(0.5)
2.5
>>> series.quantile([0.25, 0.5, 0.75])
0.25 1.75
0.50 2.50
0.75 3.25
dtype: float64
"""
return_scalar = is_scalar(q)
if return_scalar:
np_array_q = np.asarray([float(q)])
else:
try:
np_array_q = np.asarray(q)
except TypeError:
try:
np_array_q = cp.asarray(q).get()
except TypeError:
raise TypeError(
f"q must be a scalar or array-like, got {type(q)}"
)
result = self._column.quantile(
np_array_q, interpolation, exact, return_scalar=return_scalar
)
if return_scalar:
return result
return Series._from_column(
result,
name=self.name,
index=cudf.Index(np_array_q) if quant_index else None,
attrs=self.attrs,
)
@docutils.doc_describe()
@_performance_tracking
def describe(
self,
percentiles=None,
include=None,
exclude=None,
) -> Self:
"""{docstring}"""
if cudf.get_option("mode.pandas_compatible") and not (
is_dtype_obj_numeric(self.dtype) and self.dtype.kind != "b"
):
raise NotImplementedError(
"cudf.Series.describe is not implemented in "
"pandas compatibility mode."
)
if percentiles is not None:
if not all(0 <= x <= 1 for x in percentiles):
raise ValueError(
"All percentiles must be between 0 and 1, inclusive."
)
# describe always includes 50th percentile
percentiles = list(percentiles)
if 0.5 not in percentiles:
percentiles.append(0.5)
percentiles = np.sort(percentiles)
else:
# pandas defaults
percentiles = np.array([0.25, 0.5, 0.75])
dtype: Dtype | None = "str"
if self.dtype.kind == "b":
data = _describe_categorical(self)
elif is_dtype_obj_numeric(self.dtype):
data = _describe_numeric(self, percentiles)
if isinstance(self.dtype, pd.ArrowDtype):
dtype = pd.ArrowDtype(pa.float64())
elif is_pandas_nullable_extension_dtype(self.dtype):
dtype = pd.Float64Dtype()
else:
dtype = None
elif self.dtype.kind == "m":
data = _describe_timedelta(self, percentiles)
elif self.dtype.kind == "M":
data = _describe_timestamp(self, percentiles)
else:
data = _describe_categorical(self)
res = Series(
data=data.values(),
index=data.keys(),
dtype=dtype,
name=self.name,
)
res._attrs = self.attrs
return res
@_performance_tracking
def digitize(self, bins: np.ndarray, right: bool = False) -> Self:
"""Return the indices of the bins to which each value belongs.
Notes
-----
Monotonicity of bins is assumed and not checked.
Parameters
----------
bins : np.array
1-D monotonically, increasing array with same type as this series.
right : bool
Indicates whether interval contains the right or left bin edge.
Returns
-------
A new Series containing the indices.
Examples
--------
>>> import cudf
>>> s = cudf.Series([0.2, 6.4, 3.0, 1.6])
>>> bins = cudf.Series([0.0, 1.0, 2.5, 4.0, 10.0])
>>> inds = s.digitize(bins)
>>> inds
0 1
1 4
2 3
3 2
dtype: int32
"""
return type(self)._from_column(
self._column.digitize(bins, right),
name=self.name,
attrs=self.attrs,
)
@_performance_tracking
def diff(self, periods=1):
"""First discrete difference of element.
Calculates the difference of a Series element compared with another
element in the Series (default is element in previous row).
Parameters
----------
periods : int, default 1
Periods to shift for calculating difference,
accepts negative values.
Returns
-------
Series
First differences of the Series.
Examples
--------
>>> import cudf
>>> series = cudf.Series([1, 1, 2, 3, 5, 8])
>>> series
0 1
1 1
2 2
3 3
4 5
5 8
dtype: int64
Difference with previous row
>>> series.diff()
0 <NA>
1 0
2 1
3 1
4 2
5 3
dtype: int64
Difference with 3rd previous row
>>> series.diff(periods=3)
0 <NA>
1 <NA>
2 <NA>
3 2
4 4
5 6
dtype: int64
Difference with following row
>>> series.diff(periods=-1)
0 0
1 -1
2 -1
3 -2
4 -3
5 <NA>
dtype: int64
"""
if not is_integer(periods):
if not (isinstance(periods, float) and periods.is_integer()):
raise ValueError("periods must be an integer")
periods = int(periods)
return self - self.shift(periods=periods)
@_performance_tracking
@docutils.doc_apply(
groupby_doc_template.format( # type: ignore[has-type]
ret=textwrap.dedent(
"""
Returns
-------
SeriesGroupBy
Returns a SeriesGroupBy object that contains
information about the groups.
"""
)
)
)
def groupby(
self,
by=None,
axis=0,
level=None,
as_index=True,
sort=no_default,
group_keys=False,
observed=True,
dropna=True,
):
return super().groupby(
by,
axis,
level,
as_index,
sort,
group_keys,
observed,
dropna,
)
@_performance_tracking
def rename(
self,
index=None,
axis=None,
copy: bool = True,
inplace: bool = False,
level=None,
errors: Literal["ignore", "raise"] = "ignore",
):
"""
Alter Series name
Change Series.name with a scalar value
Parameters
----------
index : Scalar, optional
Scalar to alter the Series.name attribute
axis : {0 or 'index'}
Unused. Parameter needed for compatibility with DataFrame.
copy : boolean, default True
Also copy underlying data
inplace : bool, default False
Whether to return a new Series. If True the value of copy is ignored.
Currently not supported.
level : int or level name, default None
In case of MultiIndex, only rename labels in the specified level.
Currently not supported.
errors : {'ignore', 'raise'}, default 'ignore'
If 'raise', raise `KeyError` when a `dict-like mapper` or
`index` contains labels that are not present in the index being transformed.
If 'ignore', existing keys will be renamed and extra keys will be ignored.
Currently not supported.
Returns
-------
Series
Examples
--------
>>> import cudf
>>> series = cudf.Series([10, 20, 30])
>>> series
0 10
1 20
2 30
dtype: int64
>>> series.name
>>> renamed_series = series.rename('numeric_series')
>>> renamed_series
0 10
1 20
2 30
Name: numeric_series, dtype: int64
>>> renamed_series.name
'numeric_series'
.. pandas-compat::
:meth:`pandas.Series.rename`
- Supports scalar values only for changing name attribute
"""
if inplace is not False:
raise NotImplementedError("inplace is currently not supported.")
if level is not None:
raise NotImplementedError("level is currently not supported.")
if errors != "ignore":
raise NotImplementedError("errors is currently not supported.")
if not is_scalar(index):
raise NotImplementedError(
".rename does not currently support relabeling the index."
)
out_data = self._data.copy(deep=copy)
return Series._from_data(
out_data, self.index, name=index, attrs=self.attrs
)
@_performance_tracking
def add_prefix(self, prefix, axis=None):
if axis is not None:
raise NotImplementedError("axis is currently not implemented.")
return Series._from_data(
# TODO: Change to deep=False when copy-on-write is default
data=self._data.copy(deep=True),
index=prefix + self.index.astype(str),
attrs=self.attrs,
)
@_performance_tracking
def add_suffix(self, suffix, axis=None):
if axis is not None:
raise NotImplementedError("axis is currently not implemented.")
return Series._from_data(
# TODO: Change to deep=False when copy-on-write is default
data=self._data.copy(deep=True),
index=self.index.astype(str) + suffix,
attrs=self.attrs,
)
@_performance_tracking
def keys(self):
"""
Return alias for index.
Returns
-------
Index
Index of the Series.
Examples
--------
>>> import cudf
>>> sr = cudf.Series([10, 11, 12, 13, 14, 15])
>>> sr
0 10
1 11
2 12
3 13
4 14
5 15
dtype: int64
>>> sr.keys()
RangeIndex(start=0, stop=6, step=1)
>>> sr = cudf.Series(['a', 'b', 'c'])
>>> sr
0 a
1 b
2 c
dtype: object
>>> sr.keys()
RangeIndex(start=0, stop=3, step=1)
>>> sr = cudf.Series([1, 2, 3], index=['a', 'b', 'c'])
>>> sr
a 1
b 2
c 3
dtype: int64
>>> sr.keys()
Index(['a', 'b', 'c'], dtype='object')
"""
return self.index
@_performance_tracking
def explode(self, ignore_index=False):
"""
Transform each element of a list-like to a row, replicating index
values.
Parameters
----------
ignore_index : bool, default False
If True, the resulting index will be labeled 0, 1, …, n - 1.
Returns
-------
Series
Examples
--------
>>> import cudf
>>> s = cudf.Series([[1, 2, 3], [], None, [4, 5]])
>>> s
0 [1, 2, 3]
1 []
2 None
3 [4, 5]
dtype: list
>>> s.explode()
0 1
0 2
0 3
1 <NA>
2 <NA>
3 4
3 5
dtype: int64
"""
return super()._explode(self.name, ignore_index)
@_performance_tracking
def pct_change(
self,
periods=1,
fill_method=no_default,
limit=no_default,
freq=None,
**kwargs,
):
"""
Calculates the percent change between sequential elements
in the Series.
Parameters
----------
periods : int, default 1
Periods to shift for forming percent change.
fill_method : str, default 'ffill'
How to handle NAs before computing percent changes.
.. deprecated:: 24.04
All options of `fill_method` are deprecated
except `fill_method=None`.
limit : int, optional
The number of consecutive NAs to fill before stopping.
Not yet implemented.
.. deprecated:: 24.04
`limit` is deprecated.
freq : str, optional
Increment to use from time series API.
Not yet implemented.
**kwargs
Additional keyword arguments are passed into
`Series.shift`.
Returns
-------
Series
"""
if limit is not no_default:
raise NotImplementedError("limit parameter not supported yet.")
if freq is not None:
raise NotImplementedError("freq parameter not supported yet.")
elif fill_method not in {
no_default,
None,
"ffill",
"pad",
"bfill",
"backfill",
}:
raise ValueError(
"fill_method must be one of None, 'ffill', 'pad', "
"'bfill', or 'backfill'."
)
if fill_method not in (no_default, None) or limit is not no_default:
# Do not remove until pandas 3.0 support is added.
assert PANDAS_LT_300, (
"Need to drop after pandas-3.0 support is added."
)
warnings.warn(
"The 'fill_method' and 'limit' keywords in "
f"{type(self).__name__}.pct_change are deprecated and will be "
"removed in a future version. Either fill in any non-leading "
"NA values prior to calling pct_change or specify "
"'fill_method=None' to not fill NA values.",
FutureWarning,
)
if fill_method is no_default:
fill_method = "ffill"
if limit is no_default:
limit = None
with warnings.catch_warnings():
warnings.simplefilter("ignore")
data = self.fillna(method=fill_method, limit=limit)
diff = data.diff(periods=periods)
change = diff / data.shift(periods=periods, freq=freq, **kwargs)
return change
@_performance_tracking
def where(
self, cond, other=None, inplace: bool = False, axis=None, level=None
) -> Self | None:
if axis is not None:
raise NotImplementedError("axis is not supported.")
elif level is not None:
raise NotImplementedError("level is not supported.")
if getattr(other, "ndim", 1) > 1:
raise NotImplementedError(
"Only 1 dimensional other is currently supported"
)
cond = as_column(cond)
if len(cond) != len(self):
raise ValueError(
f"cond must be the same length as self ({len(self)})"
)
if not is_scalar(other):
other = as_column(other)
return self._mimic_inplace(
self._from_column(
self._column.where(cond, other, inplace),
index=self.index,
name=self.name,
attrs=self.attrs,
),
inplace=inplace,
)
@_performance_tracking
def to_pylibcudf(self, copy=False) -> tuple[plc.Column, dict]:
"""
Convert this Series to a pylibcudf.Column.
Parameters
----------
copy : bool
Whether or not to generate a new copy of the underlying device data
Returns
-------
pylibcudf.Column
A new pylibcudf.Column referencing the same data.
dict
Dict of metadata (includes name and series indices)
Notes
-----
User requests to convert to pylibcudf must assume that the
data may be modified afterwards.
"""
if copy:
raise NotImplementedError("copy=True is not supported")
metadata = {"name": self.name, "index": self.index}
return self._column.to_pylibcudf(mode="write"), metadata
@classmethod
@_performance_tracking
def from_pylibcudf(
cls, col: plc.Column, metadata: dict | None = None
) -> Self:
"""
Create a Series from a pylibcudf.Column.
Parameters
----------
col : pylibcudf.Column
The input Column.
metadata : dict | None
The Series metadata.
Returns
-------
pylibcudf.Column
A new pylibcudf.Column referencing the same data.
Notes
-----
This function will generate a Series which contains a Column
pointing to the provided pylibcudf Column. It will directly access
the data and mask buffers of the pylibcudf Column, so the newly created
object is not tied to the lifetime of the original pylibcudf.Column.
"""
name = None
index = None
if metadata is not None:
if not (
isinstance(metadata, dict)
and 1 <= len(metadata) <= 2
and set(metadata).issubset({"name", "index"})
):
raise ValueError(
"Metadata dict must only contain name or index"
)
name = metadata.get("name")
index = metadata.get("index")
return cls._from_column(
ColumnBase.from_pylibcudf(col, data_ptr_exposed=True),
name=name,
index=index,
)
def make_binop_func(op):
# This function is used to wrap binary operations in Frame with an
# appropriate API for Series as required for pandas compatibility. The
# main effect is reordering and error-checking parameters in
# Series-specific ways.
wrapped_func = getattr(IndexedFrame, op)
@functools.wraps(wrapped_func)
def wrapper(self, other, level=None, fill_value=None, axis=0):
if axis != 0:
raise NotImplementedError("Only axis=0 supported at this time.")
return wrapped_func(self, other, axis, level, fill_value)
# functools.wraps copies module level attributes to `wrapper` and sets
# __wrapped__ attributes to `wrapped_func`. Cpython looks up the signature
# string of a function by recursively delving into __wrapped__ until
# it hits the first function that has __signature__ attribute set. To make
# the signature string of `wrapper` matches with its actual parameter list,
# we directly set the __signature__ attribute of `wrapper` below.
new_sig = inspect.signature(
lambda self, other, level=None, fill_value=None, axis=0: None
)
wrapper.__signature__ = new_sig
return wrapper
# Wrap all Frame binop functions with the expected API for Series.
for binop in (
"add",
"radd",
"subtract",
"sub",
"rsub",
"multiply",
"mul",
"rmul",
"mod",
"rmod",
"pow",
"rpow",
"floordiv",
"rfloordiv",
"truediv",
"div",
"divide",
"rtruediv",
"rdiv",
"eq",
"ne",
"lt",
"le",
"gt",
"ge",
):
setattr(Series, binop, make_binop_func(binop))
|
Series
|
python
|
boto__boto3
|
tests/integration/test_dynamodb.py
|
{
"start": 2088,
"end": 2460
}
|
class ____(BaseDynamoDBTest):
def test_put_get_item(self):
self.table.put_item(Item=self.item_data)
self.addCleanup(self.table.delete_item, Key={'MyHashKey': 'mykey'})
response = self.table.get_item(
Key={'MyHashKey': 'mykey'}, ConsistentRead=True
)
self.assertEqual(response['Item'], self.item_data)
|
TestDynamoDBTypes
|
python
|
etianen__django-reversion
|
tests/test_app/tests/test_commands.py
|
{
"start": 5227,
"end": 6055
}
|
class ____(TestModelMixin, TestBase):
databases = {"default", "mysql", "postgres"}
def testDeleteRevisionsDb(self):
with reversion.create_revision(using="postgres"):
TestModel.objects.create()
self.callCommand("deleterevisions", using="postgres")
self.assertNoRevision(using="postgres")
def testDeleteRevisionsDbMySql(self):
with reversion.create_revision(using="mysql"):
TestModel.objects.create()
self.callCommand("deleterevisions", using="mysql")
self.assertNoRevision(using="mysql")
def testDeleteRevisionsDbNoMatch(self):
with reversion.create_revision():
obj = TestModel.objects.create()
self.callCommand("deleterevisions", using="postgres")
self.assertSingleRevision((obj,))
|
DeleteRevisionsDbTest
|
python
|
pydantic__pydantic
|
tests/test_forward_ref.py
|
{
"start": 36014,
"end": 36042
}
|
class ____(DC1):
b: 'A'
|
DC2
|
python
|
pallets__quart
|
src/quart/routing.py
|
{
"start": 1089,
"end": 2752
}
|
class ____(Map):
def bind_to_request(
self,
request: BaseRequestWebsocket,
subdomain: str | None,
server_name: str | None,
) -> MapAdapter:
host: str
if server_name is None:
host = request.host.lower()
else:
host = server_name.lower()
host = _normalise_host(request.scheme, host)
if subdomain is None and not self.host_matching:
request_host_parts = _normalise_host(
request.scheme, request.host.lower()
).split(".")
config_host_parts = host.split(".")
offset = -len(config_host_parts)
if request_host_parts[offset:] != config_host_parts:
warnings.warn(
f"Current server name '{request.host}' doesn't match configured"
f" server name '{host}'",
stacklevel=2,
)
subdomain = "<invalid>"
else:
subdomain = ".".join(filter(None, request_host_parts[:offset]))
return super().bind(
host,
request.root_path,
subdomain,
request.scheme,
request.method,
request.path,
request.query_string.decode(),
)
def _normalise_host(scheme: str, host: str) -> str:
# It is not common to write port 80 or 443 for a hostname,
# so strip it if present.
if scheme in {"http", "ws"} and host.endswith(":80"):
return host[:-3]
elif scheme in {"https", "wss"} and host.endswith(":443"):
return host[:-4]
else:
return host
|
QuartMap
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/remote_representation/external.py
|
{
"start": 42393,
"end": 45098
}
|
class ____:
def __init__(self, partition_set_snap: PartitionSetSnap, handle: RepositoryHandle):
self._partition_set_snap = check.inst_param(
partition_set_snap, "partition_set_snap", PartitionSetSnap
)
self._handle = PartitionSetHandle(
partition_set_name=partition_set_snap.name,
repository_handle=check.inst_param(handle, "handle", RepositoryHandle),
)
@property
def name(self) -> str:
return self._partition_set_snap.name
@property
def op_selection(self) -> Optional[Sequence[str]]:
return self._partition_set_snap.op_selection
@property
def mode(self) -> Optional[str]:
return self._partition_set_snap.mode
@property
def job_name(self) -> str:
return self._partition_set_snap.job_name
@property
def backfill_policy(self) -> Optional[BackfillPolicy]:
return self._partition_set_snap.backfill_policy
@property
def repository_handle(self) -> RepositoryHandle:
return self._handle.repository_handle
def get_remote_origin(self) -> RemotePartitionSetOrigin:
return self._handle.get_remote_origin()
def get_remote_origin_id(self) -> str:
return self.get_remote_origin().get_id()
def has_partition_name_data(self) -> bool:
# Partition sets from older versions of Dagster as well as partition sets using
# a DynamicPartitionsDefinition require calling out to user code to compute the partition
# names
return self._partition_set_snap.partitions is not None
def has_partitions_definition(self) -> bool:
# Partition sets from older versions of Dagster as well as partition sets using
# a DynamicPartitionsDefinition require calling out to user code to get the
# partitions definition
return self._partition_set_snap.partitions is not None
def get_partitions_definition(self) -> PartitionsDefinition:
partitions_data = self._partition_set_snap.partitions
if partitions_data is None:
check.failed(
"Partition set does not have partition data, cannot get partitions definition"
)
return partitions_data.get_partitions_definition()
def get_partition_names(self, instance: DagsterInstance) -> Sequence[str]:
partitions = self._partition_set_snap.partitions
if partitions is None:
check.failed(
"Partition set does not have partition data, cannot get partitions definition"
)
return self.get_partitions_definition().get_partition_keys(
dynamic_partitions_store=instance
)
|
RemotePartitionSet
|
python
|
huggingface__transformers
|
src/transformers/models/glpn/image_processing_glpn.py
|
{
"start": 1518,
"end": 1886
}
|
class ____(ImagesKwargs, total=False):
"""
size_divisor (`int`, *optional*, defaults to 32):
When `do_resize` is `True`, images are resized so their height and width are rounded down to the closest
multiple of `size_divisor`.
"""
size_divisor: int
resample: PILImageResampling
@requires(backends=("vision",))
|
GLPNImageProcessorKwargs
|
python
|
facebook__pyre-check
|
scripts/tests/analyze_leaks_test.py
|
{
"start": 763,
"end": 32088
}
|
class ____(unittest.TestCase):
def test_load_pysa_call_graph_input_format(self) -> None:
json_call_graph: JSON = {
"my_module.my_function": [
"something_that.my_function_calls",
"builtins.print",
"my_module.my_function",
],
"something_that.my_function_calls": ["int.__str__"],
}
call_graph = PysaCallGraphInputFormat(json_call_graph)
result = call_graph.call_graph
self.assertEqual(len(result), 2)
self.assertSetEqual(result["something_that.my_function_calls"], {"int.__str__"})
self.assertSetEqual(
result["my_module.my_function"],
{"something_that.my_function_calls", "builtins.print"},
)
def test_load_pyre_call_graph_input_format(self) -> None:
json_call_graph: JSON = {
"my_module.my_function": [
{
"keys_we_dont_need": [1, 2, 3],
"target": "something_that.my_function_calls",
},
{"target": "builtins.print"},
{"direct_target": "my_module.my_function"},
],
"something_that.my_function_calls": [{"direct_target": "int.__str__"}],
}
call_graph = PyreCallGraphInputFormat(json_call_graph)
result = call_graph.call_graph
self.assertEqual(len(result), 2)
self.assertSetEqual(result["something_that.my_function_calls"], {"int.__str__"})
self.assertSetEqual(
result["my_module.my_function"],
{"something_that.my_function_calls", "builtins.print"},
)
def test_load_pyre_call_graph_input_format_with_response(self) -> None:
json_call_graph: JSON = {
"response": {
"my_module.my_function": [
{
"keys_we_dont_need": [1, 2, 3],
"target": "something_that.my_function_calls",
},
{"target": "builtins.print"},
{"direct_target": "my_module.my_function"},
],
"something_that.my_function_calls": [{"direct_target": "int.__str__"}],
}
}
call_graph = PyreCallGraphInputFormat(json_call_graph)
result = call_graph.call_graph
self.assertEqual(len(result), 2)
self.assertSetEqual(result["something_that.my_function_calls"], {"int.__str__"})
self.assertSetEqual(
result["my_module.my_function"],
{"something_that.my_function_calls", "builtins.print"},
)
def test_load_dynamic_call_graph_input_format(self) -> None:
json_call_graph: JSON = {
"my_module:my_function": [
"something.that:my_module.my_function.calls",
"something_else.that:my_module.<locals>.my_function.<locals>.calls",
],
"something.that:my_module.my_function.calls": [],
"something_else.that:my_module.<locals>.my_function.<locals>.calls": [
"another.function:with.<locals>.in_it"
],
"incorrectly.formatted_qualifier": [
"incorrectly.formatted_qualifier",
"another.incorrectly.formatted",
],
}
call_graph = DynamicCallGraphInputFormat(json_call_graph)
result = call_graph.call_graph
expected_call_graph = {
"my_module.my_function": {
"something.that.my_module.my_function.calls",
"something_else.that.my_module.my_function.calls",
},
"something.that.my_module.my_function.calls": set(),
"something_else.that.my_module.my_function.calls": {
"another.function.with.in_it"
},
"incorrectly.formatted_qualifier": {"another.incorrectly.formatted"},
}
self.assertEqual(result, expected_call_graph)
def test_union_call_graph(self) -> None:
call_graph_from_source_a: Dict[str, Set[str]] = {
"parent.function.one": {
"child_function.one",
"child_function.two",
},
"child_function.one": {"child_function.two"},
}
union_call_graph = UnionCallGraphFormat()
union_call_graph.union_call_graph(call_graph_from_source_a)
result = union_call_graph.call_graph
expected_call_graph: Dict[str, Set[str]] = call_graph_from_source_a
self.assertEqual(result, expected_call_graph)
call_graph_from_source_b: Dict[str, Set[str]] = {
"parent.function.one": {"child_function.four"},
"child_function.two": {"child_function.three"},
"child_function.invalidformat-": {"child_function.four"},
}
union_call_graph.union_call_graph(call_graph_from_source_b)
result = union_call_graph.call_graph
expected_call_graph = {
"parent.function.one": {
"child_function.one",
"child_function.two",
"child_function.four",
},
"child_function.one": {"child_function.two"},
"child_function.two": {"child_function.three"},
"child_function.invalidformat-": {"child_function.four"},
}
self.assertEqual(result, expected_call_graph)
def test_load_call_graph_bad_root(self) -> None:
call_graph: JSON = ["1234"]
with self.assertRaises(ValueError):
PyreCallGraphInputFormat(call_graph)
with self.assertRaises(ValueError):
PysaCallGraphInputFormat(call_graph)
def test_load_call_graph_bad_callers(self) -> None:
call_graph: JSON = {"caller": 1234}
with self.assertRaises(ValueError):
PyreCallGraphInputFormat(call_graph)
with self.assertRaises(ValueError):
PysaCallGraphInputFormat(call_graph)
def test_load_call_graph_bad_callees(self) -> None:
call_graph: JSON = {"caller": [1, 2, 3]}
with self.assertRaises(ValueError):
PyreCallGraphInputFormat(call_graph)
with self.assertRaises(ValueError):
PysaCallGraphInputFormat(call_graph)
def test_load_call_graph_bad_callees_dict_keys(self) -> None:
call_graph: JSON = {"caller": {"callee": 123}}
with self.assertRaises(ValueError):
PyreCallGraphInputFormat(call_graph)
with self.assertRaises(ValueError):
PysaCallGraphInputFormat(call_graph)
def test_load_call_graph_bad_callees_dict_target(self) -> None:
call_graph: JSON = {"caller": {"target": 123}}
with self.assertRaises(ValueError):
PyreCallGraphInputFormat(call_graph)
with self.assertRaises(ValueError):
PysaCallGraphInputFormat(call_graph)
def test_load_call_graph_bad_callees_dict_direct_target(self) -> None:
call_graph: JSON = {"caller": {"direct_target": 123}}
with self.assertRaises(ValueError):
PyreCallGraphInputFormat(call_graph)
with self.assertRaises(ValueError):
PysaCallGraphInputFormat(call_graph)
def test_create_dependency_graph(self) -> None:
call_graph_json: JSON = {
"parent.function.one": [
"child_function.one",
"child_function.two",
"child_function.three",
],
"parent.function.two": [
"child_function.one",
"child_function.two",
],
"child_function.one": ["child_function.two"],
}
input_format = PysaCallGraphInputFormat(call_graph_json)
expected_dependency_graph = {
"child_function.one": {
"parent.function.one",
"parent.function.two",
},
"child_function.two": {
"parent.function.one",
"parent.function.two",
"child_function.one",
},
"child_function.three": {
"parent.function.one",
},
}
actual_dependency_graph = DependencyGraph(input_format, Entrypoints([], set()))
self.assertSetEqual(
set(actual_dependency_graph.dependency_graph),
set(expected_dependency_graph),
)
for callee, expected_callers in expected_dependency_graph.items():
with self.subTest(f"Callee: {callee}"):
actual_callers = actual_dependency_graph.dependency_graph[callee]
self.assertSetEqual(actual_callers, expected_callers)
def test_find_trace_to_parent_simple_path(self) -> None:
input_format = PysaCallGraphInputFormat(
{
"function.start": ["function.one"],
"function.one": ["function.two", "unrelated.call"],
"function.two": ["function.three"],
"function.three": ["print"],
"unrelated.call": ["int"],
},
)
entrypoints = Entrypoints(["function.start"], input_format.get_keys())
dependency_graph = DependencyGraph(input_format, entrypoints)
trace = dependency_graph.find_shortest_trace_to_entrypoint("print")
self.assertIsNotNone(trace)
self.assertListEqual(
trace,
[
"print",
"function.three",
"function.two",
"function.one",
"function.start",
],
)
def test_find_trace_to_parent_no_path(self) -> None:
input_format = PysaCallGraphInputFormat(
{
"function.start": ["function.one"],
"function.one": ["function.two"],
"function.two": ["function.three"],
"function.three": ["print"],
},
)
entrypoints = Entrypoints(["function.one"], input_format.get_keys())
dependency_graph = DependencyGraph(input_format, entrypoints)
trace = dependency_graph.find_shortest_trace_to_entrypoint(
"this_function_does_not_exist",
)
self.assertIsNotNone(trace)
self.assertListEqual(trace, [])
def test_find_trace_to_parent_multi_path(self) -> None:
input_format = PysaCallGraphInputFormat(
{
"function.start": ["function.one"],
"function.one": ["function.two_a", "function.two_b"],
"function.two_a": ["function.three"],
"function.two_b": ["function.two_b.extra_call"],
"function.two_b.extra_call": ["function.three"],
"function.three": ["print"],
},
)
entrypoints = Entrypoints(["function.start"], input_format.get_keys())
dependency_graph = DependencyGraph(input_format, entrypoints)
trace = dependency_graph.find_shortest_trace_to_entrypoint("print")
self.assertIsNotNone(trace)
self.assertListEqual(
trace,
[
"print",
"function.three",
"function.two_a",
"function.one",
"function.start",
],
)
def test_find_trace_to_parent_incomplete_call_graph(self) -> None:
# this function tests a call graph whose keys are not fully represented
# in values and vice-versa
input_format = PysaCallGraphInputFormat(
{
"function.start": ["function.one"],
"function.one": ["function.two", "unrelated.call_1"],
"function.two": ["function.three"],
"function.three": ["print"],
"unrelated.call_2": ["int"],
},
)
entrypoints = Entrypoints(["function.start"], input_format.get_keys())
dependency_graph = DependencyGraph(input_format, entrypoints)
trace = dependency_graph.find_shortest_trace_to_entrypoint("print")
self.assertIsNotNone(trace)
self.assertListEqual(
trace,
[
"print",
"function.three",
"function.two",
"function.one",
"function.start",
],
)
def test_find_trace_to_parent_cycle_from_bottom(self) -> None:
input_format = PysaCallGraphInputFormat(
{
"function.start": ["function.one"],
"function.one": ["function.two", "unrelated.call_1"],
"function.two": ["function.three"],
"function.three": ["function.one", "print"],
"unrelated.call_2": ["int"],
},
)
entrypoints = Entrypoints(["function.start"], input_format.get_keys())
dependency_graph = DependencyGraph(input_format, entrypoints)
trace = dependency_graph.find_shortest_trace_to_entrypoint("print")
self.assertIsNotNone(trace)
self.assertListEqual(
trace,
[
"print",
"function.three",
"function.two",
"function.one",
"function.start",
],
)
def test_find_trace_to_parent_cycle_from_top(self) -> None:
input_format = PysaCallGraphInputFormat(
{
"function.start": ["function.one"],
"function.one": ["function.two", "unrelated.call_1"],
"function.two": ["function.three"],
"function.three": ["function.one", "print"],
"unrelated.call_2": ["int"],
},
)
entrypoints = Entrypoints(["function.start"], input_format.get_keys())
dependency_graph = DependencyGraph(input_format, entrypoints)
trace = dependency_graph.find_shortest_trace_to_entrypoint("function.one")
self.assertIsNotNone(trace)
self.assertListEqual(
trace,
["function.one", "function.start"],
)
def test_find_trace_to_parent_self_call(self) -> None:
input_format = PysaCallGraphInputFormat(
{
"function.start": ["function.one"],
"function.one": ["function.one", "function.two"],
"function.two": ["print"],
},
)
entrypoints = Entrypoints(["function.start"], input_format.get_keys())
dependency_graph = DependencyGraph(input_format, entrypoints)
trace = dependency_graph.find_shortest_trace_to_entrypoint("print")
self.assertIsNotNone(trace)
self.assertListEqual(
trace,
["print", "function.two", "function.one", "function.start"],
)
def test_find_trace_to_parent_start_is_entrypoint(self) -> None:
input_format = PysaCallGraphInputFormat(
{
"function.start": ["function.start"],
},
)
entrypoints = Entrypoints(["function.start"], input_format.get_keys())
dependency_graph = DependencyGraph(input_format, entrypoints)
trace = dependency_graph.find_shortest_trace_to_entrypoint("function.start")
self.assertIsNotNone(trace)
self.assertListEqual(
trace,
["function.start"],
)
def test_find_trace_to_parent_multiple_valid_entrypoints(self) -> None:
input_format = PysaCallGraphInputFormat(
{
"function.start": ["function.one"],
"function.one": ["function.two", "unrelated.call_1"],
"function.two": ["function.three"],
"function.three": ["function.one", "print"],
"unrelated.call_2": ["int"],
},
)
entrypoints = Entrypoints(
["function.start", "function.one"], input_format.get_keys()
)
dependency_graph = DependencyGraph(
input_format,
entrypoints,
)
trace = dependency_graph.find_shortest_trace_to_entrypoint("print")
self.assertIsNotNone(trace)
self.assertListEqual(
trace,
["print", "function.three", "function.two", "function.one"],
)
def test_validate_entrypoints_file_happy_path(self) -> None:
entrypoints_list: JSON = ["my.entrypoint.one", "doesnt.exist"]
input_format = PysaCallGraphInputFormat({"my.entrypoint.one": ["print"]})
entrypoints = Entrypoints(entrypoints_list, input_format.get_keys())
self.assertSetEqual(entrypoints.entrypoints, {"my.entrypoint.one"})
def test_validate_entrypoints_file_bad_root(self) -> None:
entrypoints_list: JSON = {"not_a_list": True}
with self.assertRaises(ValueError):
validate_json_list(entrypoints_list, "ENTRYPOINTS_FILE", "top-level")
def test_validate_entrypoints_file_bad_list_elements(self) -> None:
entrypoints_list: JSON = [True, 1]
with self.assertRaises(ValueError):
validate_json_list(entrypoints_list, "ENTRYPOINTS_FILE", "top-level")
def test_get_transitive_callees_empty(self) -> None:
entrypoints_list: JSON = []
input_format = PysaCallGraphInputFormat({"f1": ["f2", "f3"], "f2": ["f1"]})
entrypoints = Entrypoints(entrypoints_list, input_format.get_keys())
call_graph = CallGraph(input_format, entrypoints)
callees = call_graph.get_transitive_callees_and_traces()
self.assertEqual(callees, {})
def test_get_transitive_callees_f1(self) -> None:
entrypoints_list: JSON = ["f1"]
input_format = PysaCallGraphInputFormat(
{"f1": ["f2", "f3"], "f2": ["f1"], "f3": ["f3"]}
)
entrypoints = Entrypoints(entrypoints_list, input_format.get_keys())
call_graph = CallGraph(input_format, entrypoints)
callees = call_graph.get_transitive_callees_and_traces()
self.assertEqual(
callees, {"f1": ["f1"], "f2": ["f1", "f2"], "f3": ["f1", "f3"]}
)
def test_get_transitive_callees_f2(self) -> None:
entrypoints_list: JSON = ["f2"]
input_format = PysaCallGraphInputFormat(
{"f1": ["f2", "f3"], "f2": ["f1"], "f3": ["f3"]}
)
entrypoints = Entrypoints(entrypoints_list, input_format.get_keys())
call_graph = CallGraph(input_format, entrypoints)
callees = call_graph.get_transitive_callees_and_traces()
self.assertEqual(
callees, {"f1": ["f2", "f1"], "f2": ["f2"], "f3": ["f2", "f1", "f3"]}
)
def test_get_transitive_callees_f3(self) -> None:
entrypoints_list: JSON = ["f3"]
input_format = PysaCallGraphInputFormat(
{"f1": ["f2", "f3"], "f2": ["f1"], "f3": ["f3"]}
)
entrypoints = Entrypoints(entrypoints_list, input_format.get_keys())
call_graph = CallGraph(input_format, entrypoints)
callees = call_graph.get_transitive_callees_and_traces()
self.assertEqual(callees, {"f3": ["f3"]})
def test_get_transitive_callees_multiple(self) -> None:
entrypoints_list: JSON = ["f1", "f4"]
input_format = PysaCallGraphInputFormat(
{
"f1": ["f2", "f3"],
"f2": ["f1"],
"f3": ["f3"],
"f4": ["f5"],
"f5": ["print"],
"f6": [],
},
)
entrypoints = Entrypoints(entrypoints_list, input_format.get_keys())
call_graph = CallGraph(
input_format,
entrypoints,
)
callees = call_graph.get_transitive_callees_and_traces()
self.assertEqual(
callees,
{
"f1": ["f1"],
"f2": ["f1", "f2"],
"f3": ["f1", "f3"],
"f4": ["f4"],
"f5": ["f4", "f5"],
"print": ["f4", "f5", "print"],
},
)
def test_is_valid_callee(self) -> None:
self.assertTrue(is_valid_callee("f1"))
self.assertTrue(is_valid_callee("f1.f2.f3"))
self.assertFalse(is_valid_callee("11"))
self.assertFalse(is_valid_callee("-f1.f2"))
self.assertFalse(is_valid_callee("f1#f2"))
def test_prepare_issues_for_query(self) -> None:
callees = ["f1", "f2", "f3", "f1#f2", "f1.f2.f3", "11", "-f1.f2"]
valid_callees, invalid_callees = partition_valid_invalid_callees(callees)
self.assertListEqual(valid_callees, ["f1", "f2", "f3", "f1.f2.f3"])
self.assertListEqual(invalid_callees, ["f1#f2", "11", "-f1.f2"])
result_query = prepare_issues_for_query(valid_callees)
expected_query = "global_leaks(f1, f2, f3, f1.f2.f3)"
self.assertEqual(result_query, expected_query)
def test_collect_pyre_query_results(self) -> None:
example_pyre_stdout = {
"response": {
"query_errors": [
"we failed to find your callable",
"we failed to find your callable 2",
],
"global_leaks": [
{
"error_msg": "found an error for you",
"location": "your_location",
},
{
"error_msg": "found an error for you2",
"location": "your_location2",
"define": "my_func_with_trace",
},
],
"not_expected": 1,
},
}
results = collect_pyre_query_results(example_pyre_stdout, ["11"])
expected_results = LeakAnalysisResult(
global_leaks=[
{
"error_msg": "found an error for you",
"location": "your_location",
},
{
"error_msg": "found an error for you2",
"location": "your_location2",
"define": "my_func_with_trace",
},
],
query_errors=[
"we failed to find your callable",
"we failed to find your callable 2",
],
script_errors=[
LeakAnalysisScriptError(
error_message="Given callee is invalid",
bad_value="11",
)
],
)
self.assertEqual(results, expected_results)
def test_collect_pyre_query_results_non_json(self) -> None:
example_pyre_response = """
this is not a valid response
"""
with self.assertRaises(RuntimeError):
collect_pyre_query_results(example_pyre_response, [])
def test_collect_pyre_query_results_not_top_level_dict(self) -> None:
example_pyre_response = ["this is a list"]
with self.assertRaises(RuntimeError):
collect_pyre_query_results(example_pyre_response, [])
def test_collect_pyre_query_results_no_response_present(self) -> None:
example_pyre_response = {"not a response": 1}
with self.assertRaises(RuntimeError):
collect_pyre_query_results(example_pyre_response, [])
def test_collect_pyre_query_results_response_not_a_list(self) -> None:
example_pyre_response = {"response": 1}
with self.assertRaises(RuntimeError):
collect_pyre_query_results(example_pyre_response, [])
def test_collect_pyre_query_results_response_not_a_dict(
self,
) -> None:
example_pyre_response = {"response": [123]}
with self.assertRaises(RuntimeError):
collect_pyre_query_results(example_pyre_response, [])
def test_collect_pyre_query_results_response_no_nested_error_or_response(
self,
) -> None:
response_body: JSON = {"not_error": 123, "not_response": 456}
example_pyre_response = {"response": response_body}
results = collect_pyre_query_results(example_pyre_response, [])
expected_results = LeakAnalysisResult(
global_leaks=[],
query_errors=[],
script_errors=[
LeakAnalysisScriptError(
error_message="Expected `global_leaks` key to be present in response",
bad_value=response_body,
),
LeakAnalysisScriptError(
error_message="Expected `query_errors` key to be present in response",
bad_value=response_body,
),
],
)
self.assertEqual(results, expected_results)
def test_collect_pyre_query_results_response_wrong_global_leak_and_error_types(
self,
) -> None:
global_leaks: JSON = {"a": 1}
errors: JSON = 2
example_pyre_response = {
"response": {"global_leaks": global_leaks, "query_errors": errors}
}
results = collect_pyre_query_results(example_pyre_response, [])
expected_results = LeakAnalysisResult(
global_leaks=[],
query_errors=[],
script_errors=[
LeakAnalysisScriptError(
error_message="Expected `global_leaks` to be a list of error JSON objects",
bad_value=global_leaks,
),
LeakAnalysisScriptError(
error_message="Expected `query_errors` to be a list of error JSON objects",
bad_value=errors,
),
],
)
self.assertEqual(results, expected_results)
def test_attach_trace_to_query_results(self) -> None:
pyre_results = LeakAnalysisResult(
global_leaks=[
{
"error_msg": "found an error for you",
"location": "your_location",
},
{
"error_msg": "found an error for you2",
"location": "your_location2",
"define": "my_func_with_trace",
},
{
"error_msg": "found an error for you3",
"location": "your_location3",
"define": "my_func_without_trace",
},
],
query_errors=[
"we failed to find your callable",
"we failed to find your callable 2",
],
script_errors=[],
)
expected = LeakAnalysisResult(
global_leaks=cast(
List[Dict[str, JSON]],
[
{
"error_msg": "found an error for you",
"location": "your_location",
},
{
"error_msg": "found an error for you2",
"location": "your_location2",
"define": "my_func_with_trace",
"trace": ["func_1", "my_func_with_trace"],
},
{
"error_msg": "found an error for you3",
"location": "your_location3",
"define": "my_func_without_trace",
},
],
),
query_errors=[
"we failed to find your callable",
"we failed to find your callable 2",
],
script_errors=[
LeakAnalysisScriptError(
error_message="Key `define` not present in global leak result, skipping trace",
bad_value={
"error_msg": "found an error for you",
"location": "your_location",
},
),
LeakAnalysisScriptError(
error_message="Define not known in analyzed callables, skipping trace",
bad_value={
"error_msg": "found an error for you3",
"location": "your_location3",
"define": "my_func_without_trace",
},
),
],
)
callables_and_traces = {
"my_func_with_trace": ["func_1", "my_func_with_trace"],
}
self.assertNotEqual(pyre_results, expected)
attach_trace_to_query_results(pyre_results, callables_and_traces)
self.assertEqual(pyre_results, expected)
def assert_format_qualifier(self, input: str, expected: str) -> None:
self.assertEqual(DynamicCallGraphInputFormat.format_qualifier(input), expected)
def test_dynamic_call_graph_input_format_format_qualifier_1(self) -> None:
self.assert_format_qualifier(
"this_is.a_normal_qualifier",
"this_is.a_normal_qualifier",
)
def test_dynamic_call_graph_input_format_format_qualifier_2(self) -> None:
self.assert_format_qualifier(
"this.is_a.qualifier:with.an_included.path",
"this.is_a.qualifier.with.an_included.path",
)
def test_dynamic_call_graph_input_format_format_qualifier_3(self) -> None:
self.assert_format_qualifier(
"this_qualifier_is_probably_broken_but_its_ok",
"this_qualifier_is_probably_broken_but_its_ok",
)
def test_dynamic_call_graph_input_format_format_qualifier_4(self) -> None:
self.assert_format_qualifier(
"this_is.<locals>.a_normal_qualifier",
"this_is.a_normal_qualifier",
)
def test_dynamic_call_graph_input_format_format_qualifier_5(self) -> None:
self.assert_format_qualifier(
"this.is_a.qualifier:with.<locals>.an_included.path",
"this.is_a.qualifier.with.an_included.path",
)
def test_dynamic_call_graph_input_format_format_qualifier_6(self) -> None:
self.assert_format_qualifier(
"this.is:a.<locals>.qualifier.<locals>.with.<locals>.and_included.<locals>.path",
"this.is.a.qualifier.with.and_included.path",
)
def test_dynamic_call_graph_input_format_get_keys_extracts_caller(self) -> None:
input_format = DynamicCallGraphInputFormat(
{
"my_module:my_function": [
"something.that:my_module.my_function.calls",
"something_else.that:my_module.<locals>.my_function.<locals>.calls",
],
"something.that:my_module.my_function.calls": [],
"something_else.that:my_module.<locals>.my_function.<locals>.calls": [
"another.function:with.<locals>.in_it"
],
"incorrectly.formatted_qualifier": [
"incorrectly.formatted_qualifier",
"another.incorrectly.formatted",
],
}
)
expected = {
"my_module.my_function",
"something.that.my_module.my_function.calls",
"something_else.that.my_module.my_function.calls",
"incorrectly.formatted_qualifier",
}
self.assertEqual(input_format.get_keys(), expected)
|
AnalyzeIssueTraceTest
|
python
|
docker__docker-py
|
docker/models/images.py
|
{
"start": 4183,
"end": 6472
}
|
class ____(Model):
"""
Image metadata stored on the registry, including available platforms.
"""
def __init__(self, image_name, *args, **kwargs):
super().__init__(*args, **kwargs)
self.image_name = image_name
@property
def id(self):
"""
The ID of the object.
"""
return self.attrs['Descriptor']['digest']
@property
def short_id(self):
"""
The ID of the image truncated to 12 characters, plus the ``sha256:``
prefix.
"""
return self.id[:19]
def pull(self, platform=None):
"""
Pull the image digest.
Args:
platform (str): The platform to pull the image for.
Default: ``None``
Returns:
(:py:class:`Image`): A reference to the pulled image.
"""
repository, _ = parse_repository_tag(self.image_name)
return self.collection.pull(repository, tag=self.id, platform=platform)
def has_platform(self, platform):
"""
Check whether the given platform identifier is available for this
digest.
Args:
platform (str or dict): A string using the ``os[/arch[/variant]]``
format, or a platform dictionary.
Returns:
(bool): ``True`` if the platform is recognized as available,
``False`` otherwise.
Raises:
:py:class:`docker.errors.InvalidArgument`
If the platform argument is not a valid descriptor.
"""
if platform and not isinstance(platform, dict):
parts = platform.split('/')
if len(parts) > 3 or len(parts) < 1:
raise InvalidArgument(
f'"{platform}" is not a valid platform descriptor'
)
platform = {'os': parts[0]}
if len(parts) > 2:
platform['variant'] = parts[2]
if len(parts) > 1:
platform['architecture'] = parts[1]
return normalize_platform(
platform, self.client.version()
) in self.attrs['Platforms']
def reload(self):
self.attrs = self.client.api.inspect_distribution(self.image_name)
reload.__doc__ = Model.reload.__doc__
|
RegistryData
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/subscript4.py
|
{
"start": 233,
"end": 924
}
|
class ____(NamedTuple):
first: int
second: str
recorder_pair: Recorder[tuple[int, str]] = Recorder()
pair = IntStrPair(1, "value")
result1 = recorder_pair[*pair]
reveal_type(result1, expected_text="tuple[int, str]")
recorder_order: Recorder[tuple[int, str]] = Recorder()
tail_value: str = "tail"
result2 = recorder_order[*OneInt(2), tail_value]
reveal_type(result2, expected_text="tuple[int, str]")
recorder_multi: Recorder[tuple[int, *tuple[int | str, ...]]] = Recorder()
values1: list[int] = []
values2: list[str] = []
first_value: int = 0
result3 = recorder_multi[first_value, *values1, *values2]
reveal_type(result3, expected_text="tuple[int, *tuple[int | str, ...]]")
|
IntStrPair
|
python
|
Farama-Foundation__Gymnasium
|
tests/envs/registration/utils_envs.py
|
{
"start": 374,
"end": 808
}
|
class ____(gym.Env):
"""Environment that does not have human-rendering."""
observation_space = gym.spaces.Box(low=-1, high=1, shape=(1,))
action_space = gym.spaces.Box(low=-1, high=1, shape=(1,))
metadata = {"render_modes": ["rgb_array"], "render_fps": 4}
def __init__(self, render_mode: list[str] = None):
assert render_mode in self.metadata["render_modes"]
self.render_mode = render_mode
|
NoHuman
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/dialects/postgresql/hstore.py
|
{
"start": 645,
"end": 7511
}
|
class ____(sqltypes.Indexable, sqltypes.Concatenable, sqltypes.TypeEngine):
"""Represent the PostgreSQL HSTORE type.
The :class:`.HSTORE` type stores dictionaries containing strings, e.g.::
data_table = Table(
"data_table",
metadata,
Column("id", Integer, primary_key=True),
Column("data", HSTORE),
)
with engine.connect() as conn:
conn.execute(
data_table.insert(), data={"key1": "value1", "key2": "value2"}
)
:class:`.HSTORE` provides for a wide range of operations, including:
* Index operations::
data_table.c.data["some key"] == "some value"
* Containment operations::
data_table.c.data.has_key("some key")
data_table.c.data.has_all(["one", "two", "three"])
* Concatenation::
data_table.c.data + {"k1": "v1"}
For a full list of special methods see
:class:`.HSTORE.comparator_factory`.
.. container:: topic
**Detecting Changes in HSTORE columns when using the ORM**
For usage with the SQLAlchemy ORM, it may be desirable to combine the
usage of :class:`.HSTORE` with :class:`.MutableDict` dictionary now
part of the :mod:`sqlalchemy.ext.mutable` extension. This extension
will allow "in-place" changes to the dictionary, e.g. addition of new
keys or replacement/removal of existing keys to/from the current
dictionary, to produce events which will be detected by the unit of
work::
from sqlalchemy.ext.mutable import MutableDict
class MyClass(Base):
__tablename__ = "data_table"
id = Column(Integer, primary_key=True)
data = Column(MutableDict.as_mutable(HSTORE))
my_object = session.query(MyClass).one()
# in-place mutation, requires Mutable extension
# in order for the ORM to detect
my_object.data["some_key"] = "some value"
session.commit()
When the :mod:`sqlalchemy.ext.mutable` extension is not used, the ORM
will not be alerted to any changes to the contents of an existing
dictionary, unless that dictionary value is re-assigned to the
HSTORE-attribute itself, thus generating a change event.
.. seealso::
:class:`.hstore` - render the PostgreSQL ``hstore()`` function.
""" # noqa: E501
__visit_name__ = "HSTORE"
hashable = False
text_type = sqltypes.Text()
operator_classes = (
OperatorClass.BASE
| OperatorClass.CONTAINS
| OperatorClass.INDEXABLE
| OperatorClass.CONCATENABLE
)
def __init__(self, text_type=None):
"""Construct a new :class:`.HSTORE`.
:param text_type: the type that should be used for indexed values.
Defaults to :class:`_types.Text`.
"""
if text_type is not None:
self.text_type = text_type
class Comparator(
sqltypes.Indexable.Comparator, sqltypes.Concatenable.Comparator
):
"""Define comparison operations for :class:`.HSTORE`."""
def has_key(self, other):
"""Boolean expression. Test for presence of a key. Note that the
key may be a SQLA expression.
"""
return self.operate(HAS_KEY, other, result_type=sqltypes.Boolean)
def has_all(self, other):
"""Boolean expression. Test for presence of all keys in jsonb"""
return self.operate(HAS_ALL, other, result_type=sqltypes.Boolean)
def has_any(self, other):
"""Boolean expression. Test for presence of any key in jsonb"""
return self.operate(HAS_ANY, other, result_type=sqltypes.Boolean)
def contains(self, other, **kwargs):
"""Boolean expression. Test if keys (or array) are a superset
of/contained the keys of the argument jsonb expression.
kwargs may be ignored by this operator but are required for API
conformance.
"""
return self.operate(CONTAINS, other, result_type=sqltypes.Boolean)
def contained_by(self, other):
"""Boolean expression. Test if keys are a proper subset of the
keys of the argument jsonb expression.
"""
return self.operate(
CONTAINED_BY, other, result_type=sqltypes.Boolean
)
def _setup_getitem(self, index):
return GETITEM, index, self.type.text_type
def defined(self, key):
"""Boolean expression. Test for presence of a non-NULL value for
the key. Note that the key may be a SQLA expression.
"""
return _HStoreDefinedFunction(self.expr, key)
def delete(self, key):
"""HStore expression. Returns the contents of this hstore with the
given key deleted. Note that the key may be a SQLA expression.
"""
if isinstance(key, dict):
key = _serialize_hstore(key)
return _HStoreDeleteFunction(self.expr, key)
def slice(self, array):
"""HStore expression. Returns a subset of an hstore defined by
array of keys.
"""
return _HStoreSliceFunction(self.expr, array)
def keys(self):
"""Text array expression. Returns array of keys."""
return _HStoreKeysFunction(self.expr)
def vals(self):
"""Text array expression. Returns array of values."""
return _HStoreValsFunction(self.expr)
def array(self):
"""Text array expression. Returns array of alternating keys and
values.
"""
return _HStoreArrayFunction(self.expr)
def matrix(self):
"""Text array expression. Returns array of [key, value] pairs."""
return _HStoreMatrixFunction(self.expr)
comparator_factory = Comparator
def bind_processor(self, dialect):
# note that dialect-specific types like that of psycopg and
# psycopg2 will override this method to allow driver-level conversion
# instead, see _PsycopgHStore
def process(value):
if isinstance(value, dict):
return _serialize_hstore(value)
else:
return value
return process
def result_processor(self, dialect, coltype):
# note that dialect-specific types like that of psycopg and
# psycopg2 will override this method to allow driver-level conversion
# instead, see _PsycopgHStore
def process(value):
if value is not None:
return _parse_hstore(value)
else:
return value
return process
|
HSTORE
|
python
|
pydantic__pydantic
|
tests/mypy/outputs/mypy-default_ini/plugin_success_baseConfig.py
|
{
"start": 757,
"end": 1098
}
|
class ____(BaseModel, from_attributes=True):
x: float
y: str
class NotConfig:
frozen = True
kwargs_model = KwargsModel(x=1, y='y')
KwargsModel(x=1, y='y', z='z')
# MYPY: error: Unexpected keyword argument "z" for "KwargsModel" [call-arg]
kwargs_model.x = 2
kwargs_model.model_validate(kwargs_model.__dict__)
|
KwargsModel
|
python
|
agronholm__apscheduler
|
src/apscheduler/triggers/cron/__init__.py
|
{
"start": 574,
"end": 10181
}
|
class ____(Trigger):
"""
Triggers when current time matches all specified time constraints, similarly to how
the UNIX cron scheduler works.
:param year: 4-digit year
:param month: month (1-12)
:param day: day of the (1-31)
:param week: ISO week (1-53)
:param day_of_week: number or name of weekday (0-7 or sun,mon,tue,wed,thu,fri,sat,
sun)
:param hour: hour (0-23)
:param minute: minute (0-59)
:param second: second (0-59)
:param start_time: earliest possible date/time to trigger on (defaults to current
time)
:param end_time: latest possible date/time to trigger on
:param timezone: time zone to use for the date/time calculations
(defaults to the local timezone)
.. note:: The first weekday is always **monday**.
"""
FIELDS_MAP: ClassVar[list[tuple[str, type[BaseField]]]] = [
("year", BaseField),
("month", MonthField),
("day", DayOfMonthField),
("week", WeekField),
("day_of_week", DayOfWeekField),
("hour", BaseField),
("minute", BaseField),
("second", BaseField),
]
year: int | str | None = None
month: int | str | None = None
day: int | str | None = None
week: int | str | None = None
day_of_week: int | str | None = None
hour: int | str | None = None
minute: int | str | None = None
second: int | str | None = None
start_time: datetime = attrs.field(
converter=as_datetime,
validator=instance_of(datetime),
factory=datetime.now,
)
end_time: datetime | None = attrs.field(
converter=as_datetime,
validator=optional(instance_of(datetime)),
default=None,
)
timezone: tzinfo = attrs.field(
converter=as_timezone, validator=instance_of(tzinfo), factory=get_localzone
)
_fields: list[BaseField] = attrs.field(init=False, eq=False, factory=list)
_last_fire_time: datetime | None = attrs.field(
converter=as_aware_datetime, init=False, eq=False, default=None
)
def __attrs_post_init__(self) -> None:
self.start_time = self._to_trigger_timezone(self.start_time, "start_time")
self.end_time = self._to_trigger_timezone(self.end_time, "end_time")
self._set_fields(
[
self.year,
self.month,
self.day,
self.week,
self.day_of_week,
self.hour,
self.minute,
self.second,
]
)
def _set_fields(self, values: Sequence[int | str | None]) -> None:
self._fields = []
assigned_values = {
field_name: value
for (field_name, _), value in zip(self.FIELDS_MAP, values)
if value is not None
}
for field_name, field_class in self.FIELDS_MAP:
exprs = assigned_values.pop(field_name, None)
if exprs is None:
exprs = "*" if assigned_values else DEFAULT_VALUES[field_name]
field = field_class(field_name, exprs)
self._fields.append(field)
@classmethod
def from_crontab(
cls,
expr: str,
*,
start_time: datetime | None = None,
end_time: datetime | None = None,
timezone: tzinfo | str = "local",
) -> CronTrigger:
"""
Create a :class:`~CronTrigger` from a standard crontab expression.
See https://en.wikipedia.org/wiki/Cron for more information on the format
accepted here.
:param expr: minute, hour, day of month, month, day of week
:param start_time: earliest possible date/time to trigger on (defaults to current
time)
:param end_time: latest possible date/time to trigger on
:param timezone: time zone to use for the date/time calculations
(defaults to local timezone if omitted)
"""
values = expr.split()
if len(values) != 5:
raise ValueError(f"Wrong number of fields; got {len(values)}, expected 5")
return cls(
minute=values[0],
hour=values[1],
day=values[2],
month=values[3],
day_of_week=values[4],
start_time=start_time or datetime.now(),
end_time=end_time,
timezone=timezone,
)
def _to_trigger_timezone(self, dt: datetime | None, name: str) -> datetime | None:
if dt is None:
return None
if dt.tzinfo is None:
dt = dt.replace(tzinfo=self.timezone)
else:
dt = dt.astimezone(self.timezone)
if not time_exists(dt):
raise ValueError(f"{name}={dt} does not exist")
return dt
def _increment_field_value(
self, dateval: datetime, fieldnum: int
) -> tuple[datetime, int]:
"""
Increments the designated field and resets all less significant fields to their
minimum values.
:return: a tuple containing the new date, and the number of the field that was
actually incremented
"""
values = {}
i = 0
while i < len(self._fields):
field = self._fields[i]
if not field.real:
if i == fieldnum:
fieldnum -= 1
i -= 1
else:
i += 1
continue
if i < fieldnum:
values[field.name] = field.get_value(dateval)
i += 1
elif i > fieldnum:
values[field.name] = field.get_min(dateval)
i += 1
else:
value = field.get_value(dateval)
maxval = field.get_max(dateval)
if value == maxval:
fieldnum -= 1
i -= 1
else:
values[field.name] = value + 1
i += 1
difference = datetime(**values) - dateval.replace(tzinfo=None)
dateval = datetime.fromtimestamp(
dateval.timestamp() + difference.total_seconds(), self.timezone
)
return dateval, fieldnum
def _set_field_value(
self, dateval: datetime, fieldnum: int, new_value: int
) -> datetime:
values = {}
for i, field in enumerate(self._fields):
if field.real:
if i < fieldnum:
values[field.name] = field.get_value(dateval)
elif i > fieldnum:
values[field.name] = field.get_min(dateval)
else:
values[field.name] = new_value
return datetime(**values, tzinfo=self.timezone, fold=dateval.fold)
def next(self) -> datetime | None:
if self._last_fire_time:
next_time = datetime.fromtimestamp(
self._last_fire_time.timestamp() + 1, self.timezone
)
else:
next_time = self.start_time
fieldnum = 0
while 0 <= fieldnum < len(self._fields):
field = self._fields[fieldnum]
curr_value = field.get_value(next_time)
next_value = field.get_next_value(next_time)
if next_value is None:
# No valid value was found
next_time, fieldnum = self._increment_field_value(
next_time, fieldnum - 1
)
elif next_value > curr_value:
# A valid, but higher than the starting value, was found
if field.real:
next_time = self._set_field_value(next_time, fieldnum, next_value)
if time_exists(next_time):
fieldnum += 1
else:
# skip non-existent date
next_time, fieldnum = self._increment_field_value(
next_time, fieldnum
)
else:
next_time, fieldnum = self._increment_field_value(
next_time, fieldnum
)
else:
# A valid value was found, no changes necessary
fieldnum += 1
# Return if the date has rolled past the end date
if self.end_time and next_time > self.end_time:
return None
if fieldnum >= 0:
self._last_fire_time = next_time
return next_time
return None
def __getstate__(self) -> dict[str, Any]:
return {
"version": 1,
"timezone": self.timezone,
"fields": [str(f) for f in self._fields],
"start_time": self.start_time,
"end_time": self.end_time,
"last_fire_time": self._last_fire_time,
}
def __setstate__(self, state: dict[str, Any]) -> None:
require_state_version(self, state, 1)
self.timezone = state["timezone"]
self.start_time = state["start_time"]
self.end_time = state["end_time"]
self._last_fire_time = state["last_fire_time"]
self._set_fields(state["fields"])
def __repr__(self) -> str:
fields = [f"{field.name}={str(field)!r}" for field in self._fields]
fields.append(f"start_time={self.start_time.isoformat()!r}")
if self.end_time:
fields.append(f"end_time={self.end_time.isoformat()!r}")
fields.append(f"timezone={timezone_repr(self.timezone)!r}")
return f"CronTrigger({', '.join(fields)})"
|
CronTrigger
|
python
|
facelessuser__pymdown-extensions
|
tests/test_extensions/test_superfences.py
|
{
"start": 6342,
"end": 7211
}
|
class ____(util.MdCase):
"""Test highlight line wraps."""
extension = ['pymdownx.highlight', 'pymdownx.superfences']
extension_configs = {
'pymdownx.highlight': {
'line_spans': '__my_span',
'linenums_style': 'pymdownx-inline'
}
}
def test_linespans(self):
"""Test wrapping a line in line spans."""
self.check_markdown(
r'''
```python linenums="2"
import test
```
''',
r'''
<div class="highlight"><pre><span></span><code><span id="__my_span-0-2"><span class="linenos" data-linenos="2 "></span><span class="kn">import</span><span class="w"> </span><span class="nn">test</span>
</span></code></pre></div>
''', # noqa: E501
True
)
|
TestHighlightLineWrapsPymdownxInline
|
python
|
MongoEngine__mongoengine
|
mongoengine/queryset/manager.py
|
{
"start": 135,
"end": 2222
}
|
class ____:
"""
The default QuerySet Manager.
Custom QuerySet Manager functions can extend this class and users can
add extra queryset functionality. Any custom manager methods must accept a
:class:`~mongoengine.Document` class as its first argument, and a
:class:`~mongoengine.queryset.QuerySet` as its second argument.
The method function should return a :class:`~mongoengine.queryset.QuerySet`
, probably the same one that was passed in, but modified in some way.
"""
get_queryset = None
default = QuerySet
def __init__(self, queryset_func=None):
if queryset_func:
self.get_queryset = queryset_func
def __get__(self, instance, owner):
"""Descriptor for instantiating a new QuerySet object when
Document.objects is accessed.
"""
if instance is not None:
# Document object being used rather than a document class
return self
# owner is the document that contains the QuerySetManager
queryset_class = owner._meta.get("queryset_class", self.default)
queryset = queryset_class(owner, owner._get_collection())
if self.get_queryset:
arg_count = self.get_queryset.__code__.co_argcount
if arg_count == 1:
queryset = self.get_queryset(queryset)
elif arg_count == 2:
queryset = self.get_queryset(owner, queryset)
else:
queryset = partial(self.get_queryset, owner, queryset)
return queryset
def queryset_manager(func):
"""Decorator that allows you to define custom QuerySet managers on
:class:`~mongoengine.Document` classes. The manager must be a function that
accepts a :class:`~mongoengine.Document` class as its first argument, and a
:class:`~mongoengine.queryset.QuerySet` as its second argument. The method
function should return a :class:`~mongoengine.queryset.QuerySet`, probably
the same one that was passed in, but modified in some way.
"""
return QuerySetManager(func)
|
QuerySetManager
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_header_image08.py
|
{
"start": 315,
"end": 1115
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("header_image08.xlsx")
self.ignore_elements = {
"xl/worksheets/sheet1.xml": ["<pageMargins", "<pageSetup"]
}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.write("A1", "Foo")
worksheet.write_comment("B2", "Some text")
worksheet.set_comments_author("John")
worksheet.set_header("&L&G", {"image_left": self.image_dir + "red.jpg"})
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
doocs__leetcode
|
solution/1900-1999/1959.Minimum Total Space Wasted With K Resizing Operations/Solution.py
|
{
"start": 0,
"end": 636
}
|
class ____:
def minSpaceWastedKResizing(self, nums: List[int], k: int) -> int:
k += 1
n = len(nums)
g = [[0] * n for _ in range(n)]
for i in range(n):
s = mx = 0
for j in range(i, n):
s += nums[j]
mx = max(mx, nums[j])
g[i][j] = mx * (j - i + 1) - s
f = [[inf] * (k + 1) for _ in range(n + 1)]
f[0][0] = 0
for i in range(1, n + 1):
for j in range(1, k + 1):
for h in range(i):
f[i][j] = min(f[i][j], f[h][j - 1] + g[h][i - 1])
return f[-1][-1]
|
Solution
|
python
|
tiangolo__fastapi
|
tests/test_dependency_duplicates.py
|
{
"start": 212,
"end": 8577
}
|
class ____(BaseModel):
data: str
def duplicate_dependency(item: Item):
return item
def dependency(item2: Item):
return item2
def sub_duplicate_dependency(
item: Item, sub_item: Item = Depends(duplicate_dependency)
):
return [item, sub_item]
@app.post("/with-duplicates")
async def with_duplicates(item: Item, item2: Item = Depends(duplicate_dependency)):
return [item, item2]
@app.post("/no-duplicates")
async def no_duplicates(item: Item, item2: Item = Depends(dependency)):
return [item, item2]
@app.post("/with-duplicates-sub")
async def no_duplicates_sub(
item: Item, sub_items: List[Item] = Depends(sub_duplicate_dependency)
):
return [item, sub_items]
def test_no_duplicates_invalid():
response = client.post("/no-duplicates", json={"item": {"data": "myitem"}})
assert response.status_code == 422, response.text
assert response.json() == IsDict(
{
"detail": [
{
"type": "missing",
"loc": ["body", "item2"],
"msg": "Field required",
"input": None,
}
]
}
) | IsDict(
# TODO: remove when deprecating Pydantic v1
{
"detail": [
{
"loc": ["body", "item2"],
"msg": "field required",
"type": "value_error.missing",
}
]
}
)
def test_no_duplicates():
response = client.post(
"/no-duplicates",
json={"item": {"data": "myitem"}, "item2": {"data": "myitem2"}},
)
assert response.status_code == 200, response.text
assert response.json() == [{"data": "myitem"}, {"data": "myitem2"}]
def test_duplicates():
response = client.post("/with-duplicates", json={"data": "myitem"})
assert response.status_code == 200, response.text
assert response.json() == [{"data": "myitem"}, {"data": "myitem"}]
def test_sub_duplicates():
response = client.post("/with-duplicates-sub", json={"data": "myitem"})
assert response.status_code == 200, response.text
assert response.json() == [
{"data": "myitem"},
[{"data": "myitem"}, {"data": "myitem"}],
]
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/with-duplicates": {
"post": {
"summary": "With Duplicates",
"operationId": "with_duplicates_with_duplicates_post",
"requestBody": {
"content": {
"application/json": {
"schema": {"$ref": "#/components/schemas/Item"}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
"/no-duplicates": {
"post": {
"summary": "No Duplicates",
"operationId": "no_duplicates_no_duplicates_post",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Body_no_duplicates_no_duplicates_post"
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
"/with-duplicates-sub": {
"post": {
"summary": "No Duplicates Sub",
"operationId": "no_duplicates_sub_with_duplicates_sub_post",
"requestBody": {
"content": {
"application/json": {
"schema": {"$ref": "#/components/schemas/Item"}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
},
"components": {
"schemas": {
"Body_no_duplicates_no_duplicates_post": {
"title": "Body_no_duplicates_no_duplicates_post",
"required": ["item", "item2"],
"type": "object",
"properties": {
"item": {"$ref": "#/components/schemas/Item"},
"item2": {"$ref": "#/components/schemas/Item"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
"Item": {
"title": "Item",
"required": ["data"],
"type": "object",
"properties": {"data": {"title": "Data", "type": "string"}},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
}
},
}
|
Item
|
python
|
huggingface__transformers
|
src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
|
{
"start": 31230,
"end": 33393
}
|
class ____(nn.Module):
"""
A class to patchify the time series sequence into different patches
Returns:
`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`
"""
def __init__(self, config: PatchTSMixerConfig):
super().__init__()
self.sequence_length = config.context_length
self.patch_length = config.patch_length
self.patch_stride = config.patch_stride
if self.sequence_length <= self.patch_length:
raise ValueError(
f"Sequence length ({self.sequence_length}) has to be greater than the patch length ({self.patch_length})"
)
# get the number of patches
self.num_patches = (max(self.sequence_length, self.patch_length) - self.patch_length) // self.patch_stride + 1
new_sequence_length = self.patch_length + self.patch_stride * (self.num_patches - 1)
self.sequence_start = self.sequence_length - new_sequence_length
def forward(self, past_values: torch.Tensor):
"""
Parameters:
past_values (`torch.Tensor` of shape `(batch_size, sequence_length, num_channels)`, *required*):
Input for patchification
Returns:
`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`
"""
sequence_length = past_values.shape[-2]
if sequence_length != self.sequence_length:
raise ValueError(
f"Input sequence length ({sequence_length}) doesn't match model configuration ({self.sequence_length})."
)
# output: [bs x new_sequence_length x num_channels]
output = past_values[:, self.sequence_start :, :]
# output: [bs x num_patches x num_input_channels x patch_length]
output = output.unfold(dimension=-2, size=self.patch_length, step=self.patch_stride)
# output: [bs x num_input_channels x num_patches x patch_length]
output = output.transpose(-2, -3).contiguous()
return output
# Copied from transformers.models.patchtst.modeling_patchtst.PatchTSTMasking with PatchTST->PatchTSMixer
|
PatchTSMixerPatchify
|
python
|
ansible__ansible
|
test/lib/ansible_test/_internal/commands/sanity/yamllint.py
|
{
"start": 597,
"end": 3424
}
|
class ____(SanitySingleVersion):
"""Sanity test using yamllint."""
@property
def error_code(self) -> t.Optional[str]:
"""Error code for ansible-test matching the format used by the underlying test program, or None if the program does not use error codes."""
return 'ansible-test'
@property
def require_libyaml(self) -> bool:
"""True if the test requires PyYAML to have libyaml support."""
return True
def filter_targets(self, targets: list[TestTarget]) -> list[TestTarget]:
"""Return the given list of test targets, filtered to include only those relevant for the test."""
yaml_targets = [target for target in targets if os.path.splitext(target.path)[1] in ('.yml', '.yaml')]
for plugin_type, plugin_path in sorted(data_context().content.plugin_paths.items()):
if plugin_type == 'module_utils':
continue
yaml_targets.extend([target for target in targets if
os.path.splitext(target.path)[1] == '.py' and
os.path.basename(target.path) != '__init__.py' and
is_subdir(target.path, plugin_path)])
return yaml_targets
def test(self, args: SanityConfig, targets: SanityTargets, python: PythonConfig) -> TestResult:
settings = self.load_processor(args)
paths = [target.path for target in targets.include]
results = self.test_paths(args, paths, python)
results = settings.process_errors(results, paths)
if results:
return SanityFailure(self.name, messages=results)
return SanitySuccess(self.name)
@staticmethod
def test_paths(args: SanityConfig, paths: list[str], python: PythonConfig) -> list[SanityMessage]:
"""Test the specified paths using the given Python and return the results."""
cmd = [
python.path,
os.path.join(SANITY_ROOT, 'yamllint', 'yamllinter.py'),
]
data = '\n'.join(paths)
display.info(data, verbosity=4)
try:
stdout, stderr = run_command(args, cmd, data=data, capture=True)
status = 0
except SubprocessError as ex:
stdout = ex.stdout
stderr = ex.stderr
status = ex.status
if stderr:
raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
if args.explain:
return []
results = json.loads(stdout)['messages']
results = [SanityMessage(
code=r['code'],
message=r['message'],
path=r['path'],
line=int(r['line']),
column=int(r['column']),
level=r['level'],
) for r in results]
return results
|
YamllintTest
|
python
|
celery__celery
|
t/unit/worker/test_components.py
|
{
"start": 520,
"end": 1291
}
|
class ____:
def setup_method(self):
self.w = Mock(name='w')
self.hub = Hub(self.w)
self.w.hub = Mock(name='w.hub')
@patch('celery.worker.components.set_event_loop')
@patch('celery.worker.components.get_event_loop')
def test_create(self, get_event_loop, set_event_loop):
self.hub._patch_thread_primitives = Mock(name='ptp')
assert self.hub.create(self.w) is self.hub
self.hub._patch_thread_primitives.assert_called_with(self.w)
def test_start(self):
self.hub.start(self.w)
def test_stop(self):
self.hub.stop(self.w)
self.w.hub.close.assert_called_with()
def test_terminate(self):
self.hub.terminate(self.w)
self.w.hub.close.assert_called_with()
|
test_Hub
|
python
|
doocs__leetcode
|
solution/0700-0799/0746.Min Cost Climbing Stairs/Solution2.py
|
{
"start": 0,
"end": 250
}
|
class ____:
def minCostClimbingStairs(self, cost: List[int]) -> int:
n = len(cost)
f = [0] * (n + 1)
for i in range(2, n + 1):
f[i] = min(f[i - 2] + cost[i - 2], f[i - 1] + cost[i - 1])
return f[n]
|
Solution
|
python
|
altair-viz__altair
|
altair/vegalite/v6/schema/core.py
|
{
"start": 433598,
"end": 437378
}
|
class ____(VegaLiteSchema):
"""
GenericUnitSpecEncodingAnyMark schema wrapper.
Base interface for a unit (single-view) specification.
Parameters
----------
mark : dict, :class:`Mark`, :class:`AnyMark`, :class:`BoxPlot`, :class:`MarkDef`, :class:`ErrorBar`, :class:`ErrorBand`, :class:`BoxPlotDef`, :class:`ErrorBarDef`, :class:`ErrorBandDef`, :class:`CompositeMark`, :class:`CompositeMarkDef`, Literal['arc', 'area', 'bar', 'image', 'line', 'point', 'rect', 'rule', 'text', 'tick', 'trail', 'circle', 'square', 'geoshape', 'boxplot', 'errorband', 'errorbar']
A string describing the mark type (one of ``"bar"``, ``"circle"``, ``"square"``,
``"tick"``, ``"line"``, ``"area"``, ``"point"``, ``"rule"``, ``"geoshape"``, and
``"text"``) or a `mark definition object
<https://vega.github.io/vega-lite/docs/mark.html#mark-def>`__.
data : dict, :class:`Data`, :class:`UrlData`, :class:`Generator`, :class:`NamedData`, :class:`DataSource`, :class:`InlineData`, :class:`SphereGenerator`, :class:`SequenceGenerator`, :class:`GraticuleGenerator`, None
An object describing the data source. Set to ``null`` to ignore the parent's data
source. If no data is set, it is derived from the parent.
description : str
Description of this mark for commenting purpose.
encoding : dict, :class:`Encoding`
A key-value mapping between encoding channels and definition of fields.
name : str
Name of the visualization for later reference.
params : Sequence[dict, :class:`SelectionParameter`]
An array of parameters that may either be simple variables, or more complex
selections that map user input to data queries.
projection : dict, :class:`Projection`
An object defining properties of geographic projection, which will be applied to
``shape`` path for ``"geoshape"`` marks and to ``latitude`` and ``"longitude"``
channels for other marks.
title : str, dict, :class:`Text`, Sequence[str], :class:`TitleParams`
Title for the plot.
transform : Sequence[dict, :class:`Transform`, :class:`BinTransform`, :class:`FoldTransform`, :class:`LoessTransform`, :class:`PivotTransform`, :class:`StackTransform`, :class:`ExtentTransform`, :class:`FilterTransform`, :class:`ImputeTransform`, :class:`LookupTransform`, :class:`SampleTransform`, :class:`WindowTransform`, :class:`DensityTransform`, :class:`FlattenTransform`, :class:`QuantileTransform`, :class:`TimeUnitTransform`, :class:`AggregateTransform`, :class:`CalculateTransform`, :class:`RegressionTransform`, :class:`JoinAggregateTransform`]
An array of data transformations such as filter and new field calculation.
"""
_schema = {"$ref": "#/definitions/GenericUnitSpec<Encoding,AnyMark>"}
def __init__(
self,
mark: Optional[SchemaBase | Map | Mark_T | CompositeMark_T] = Undefined,
data: Optional[SchemaBase | ChartDataType | Map | None] = Undefined,
description: Optional[str] = Undefined,
encoding: Optional[SchemaBase | Map] = Undefined,
name: Optional[str] = Undefined,
params: Optional[Sequence[SchemaBase | Map]] = Undefined,
projection: Optional[SchemaBase | Map] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | Map] = Undefined,
transform: Optional[Sequence[SchemaBase | Map]] = Undefined,
**kwds,
):
super().__init__(
mark=mark,
data=data,
description=description,
encoding=encoding,
name=name,
params=params,
projection=projection,
title=title,
transform=transform,
**kwds,
)
|
GenericUnitSpecEncodingAnyMark
|
python
|
pytorch__pytorch
|
test/distributed/_composable/test_checkpoint.py
|
{
"start": 1390,
"end": 1702
}
|
class ____(nn.Module):
def __init__(self) -> None:
super().__init__()
self.l1 = nn.Linear(100, 100)
self.seq = nn.Sequential(
nn.ReLU(),
nn.Linear(100, 100),
nn.ReLU(),
)
def forward(self, x):
return self.seq(self.l1(x))
|
ToyModel
|
python
|
simonw__datasette
|
datasette/utils/asgi.py
|
{
"start": 1209,
"end": 1301
}
|
class ____(Base400):
status = 400
SAMESITE_VALUES = ("strict", "lax", "none")
|
BadRequest
|
python
|
EpistasisLab__tpot
|
tpot/search_spaces/pipelines/union.py
|
{
"start": 1725,
"end": 3854
}
|
class ____(SklearnIndividual):
"""
Takes in a list of search spaces. each space is a list of SearchSpaces.
Will produce a FeatureUnion pipeline. Each step in the pipeline will correspond to the the search space provided in the same index.
The resulting pipeline will be a FeatureUnion of the steps in the pipeline.
"""
def __init__(self, search_spaces : List[SearchSpace], rng=None) -> None:
super().__init__()
self.search_spaces = search_spaces
self.pipeline = []
for space in self.search_spaces:
self.pipeline.append(space.generate(rng))
def mutate(self, rng=None):
rng = np.random.default_rng(rng)
step = rng.choice(self.pipeline)
return step.mutate(rng)
def crossover(self, other, rng=None):
#swap a random step in the pipeline with the corresponding step in the other pipeline
rng = np.random.default_rng(rng)
cx_funcs = [self._crossover_node, self._crossover_swap_node]
rng.shuffle(cx_funcs)
for cx_func in cx_funcs:
if cx_func(other, rng):
return True
return False
def _crossover_swap_node(self, other, rng):
rng = np.random.default_rng(rng)
idx = rng.integers(1,len(self.pipeline))
self.pipeline[idx], other.pipeline[idx] = other.pipeline[idx], self.pipeline[idx]
return True
def _crossover_node(self, other, rng):
rng = np.random.default_rng(rng)
crossover_success = False
for idx in range(len(self.pipeline)):
if rng.random() < 0.5:
if self.pipeline[idx].crossover(other.pipeline[idx], rng):
crossover_success = True
return crossover_success
def export_pipeline(self, **kwargs):
return sklearn.pipeline.make_union(*[step.export_pipeline(**kwargs) for step in self.pipeline])
def unique_id(self):
l = [step.unique_id() for step in self.pipeline]
l = ["FeatureUnion"] + l
return TupleIndex(tuple(l))
|
UnionPipelineIndividual
|
python
|
apache__airflow
|
task-sdk/tests/task_sdk/bases/test_sensor.py
|
{
"start": 25150,
"end": 26530
}
|
class ____:
def test_poke_mode_only_allows_poke_mode(self):
try:
sensor = DummyPokeOnlySensor(task_id="foo", mode="poke", poke_changes_mode=False)
except ValueError:
self.fail("__init__ failed with mode='poke'.")
try:
sensor.poke({})
except ValueError:
self.fail("poke failed without changing mode from 'poke'.")
try:
sensor.change_mode("poke")
except ValueError:
self.fail("class method failed without changing mode from 'poke'.")
def test_poke_mode_only_bad_class_method(self):
sensor = DummyPokeOnlySensor(task_id="foo", mode="poke", poke_changes_mode=False)
with pytest.raises(ValueError, match="Cannot set mode to 'reschedule'. Only 'poke' is acceptable"):
sensor.change_mode("reschedule")
def test_poke_mode_only_bad_init(self):
with pytest.raises(ValueError, match="Cannot set mode to 'reschedule'. Only 'poke' is acceptable"):
DummyPokeOnlySensor(task_id="foo", mode="reschedule", poke_changes_mode=False)
def test_poke_mode_only_bad_poke(self):
sensor = DummyPokeOnlySensor(task_id="foo", mode="poke", poke_changes_mode=True)
with pytest.raises(ValueError, match="Cannot set mode to 'reschedule'. Only 'poke' is acceptable"):
sensor.poke({})
|
TestPokeModeOnly
|
python
|
django__django
|
tests/admin_views/admin.py
|
{
"start": 37615,
"end": 37984
}
|
class ____(admin.ModelAdmin):
list_display = (
"content",
"date",
callable_year,
"model_year",
"modeladmin_year",
"model_year_reversed",
"section",
)
sortable_by = ("date", callable_year)
@admin.display(ordering="date")
def modeladmin_year(self, obj):
return obj.date.year
|
ArticleAdmin6
|
python
|
streamlit__streamlit
|
lib/tests/streamlit/web/server/server_test.py
|
{
"start": 22969,
"end": 24384
}
|
class ____(tornado.testing.AsyncHTTPTestCase):
async def does_script_run_without_error(self):
return True, "test_message"
def setUp(self):
self._old_config = config.get_option("server.scriptHealthCheckEnabled")
config._set_option("server.scriptHealthCheckEnabled", True, "test")
super().setUp()
def tearDown(self):
config._set_option("server.scriptHealthCheckEnabled", self._old_config, "test")
Runtime._instance = None
super().tearDown()
def get_app(self):
server = Server("mock/script/path", is_hello=False)
server._runtime.does_script_run_without_error = (
self.does_script_run_without_error
)
server._runtime._eventloop = self.io_loop.asyncio_loop
return server._create_app()
def test_endpoint(self):
response = self.fetch("/_stcore/script-health-check")
assert response.code == 200
assert response.body == b"test_message"
def test_deprecated_endpoint(self):
response = self.fetch("/script-health-check")
assert response.code == 200
assert response.body == b"test_message"
assert (
response.headers["link"]
== f'<http://127.0.0.1:{self.get_http_port()}/_stcore/script-health-check>; rel="alternate"'
)
assert response.headers["deprecation"] == "True"
|
ScriptCheckEndpointExistsTest
|
python
|
pandas-dev__pandas
|
pandas/tseries/holiday.py
|
{
"start": 13437,
"end": 13659
}
|
class ____(type):
def __new__(cls, clsname: str, bases, attrs):
calendar_class = super().__new__(cls, clsname, bases, attrs)
register(calendar_class)
return calendar_class
|
HolidayCalendarMetaClass
|
python
|
django-compressor__django-compressor
|
compressor/tests/test_offline.py
|
{
"start": 30158,
"end": 30464
}
|
class ____(
SuperMixin, OfflineTestCaseMixin, TestCase
):
"""
Test that templates extending templates using relative paths
(e.g. ./base.html) are evaluated correctly
"""
templates_dir = "test_extends_relative"
expected_hash = "817b5defb197"
|
OfflineCompressExtendsRelativeTestCase
|
python
|
pyca__cryptography
|
tests/hazmat/asn1/test_serialization.py
|
{
"start": 4601,
"end": 6554
}
|
class ____:
def test_fail_generalized_time_precision(self) -> None:
with pytest.raises(
ValueError,
match="decoded GeneralizedTime data has higher precision than "
"supported",
):
asn1.decode_der(
asn1.GeneralizedTime, b"\x18\x1719990101000000.1234567Z"
)
def test_generalized_time(self) -> None:
assert_roundtrips(
[
(
asn1.GeneralizedTime(
datetime.datetime(
2019,
12,
16,
3,
2,
10,
tzinfo=datetime.timezone.utc,
)
),
b"\x18\x0f20191216030210Z",
),
(
asn1.GeneralizedTime(
datetime.datetime(
1999,
1,
1,
0,
0,
0,
microsecond=500000, # half a second
tzinfo=datetime.timezone.utc,
)
),
b"\x18\x1119990101000000.5Z",
),
(
asn1.GeneralizedTime(
datetime.datetime(
2050,
6,
15,
14,
22,
33,
tzinfo=datetime.timezone.utc,
)
),
b"\x18\x0f20500615142233Z",
),
],
)
|
TestGeneralizedTime
|
python
|
pytorch__pytorch
|
torch/testing/_internal/common_quantization.py
|
{
"start": 101486,
"end": 102336
}
|
class ____(nn.Module):
def __init__(
self, dense_dim, dense_out, embedding_dim, top_out_in, top_out_out
) -> None:
super().__init__()
self.dense_mlp = nn.Sequential(
nn.Linear(dense_dim, dense_out),
)
self.top_mlp = nn.Sequential(
nn.Linear(dense_out + embedding_dim, top_out_in),
nn.Linear(top_out_in, top_out_out),
)
def forward(
self,
sparse_feature: torch.Tensor,
dense: torch.Tensor,
) -> torch.Tensor:
dense_feature = self.dense_mlp(dense)
features = torch.cat([dense_feature] + [sparse_feature], dim=1)
out = self.top_mlp(features)
return out
# thin wrapper around embedding bag, because tracing inside nn.Embedding
# bag is not supported at the moment and this is top level
|
DenseTopMLP
|
python
|
numba__numba
|
numba/tests/test_parfors.py
|
{
"start": 90140,
"end": 91868
}
|
class ____(TestParforsBase):
def test_parfor_options(self):
def test_impl(a):
n = a.shape[0]
b = np.ones(n)
c = np.array([ i for i in range(n) ])
b[:n] = a + b * c
for i in prange(n):
c[i] = b[i] * a[i]
return reduce(lambda x,y:x+y, c, 0)
self.check(test_impl, np.ones(10))
args = (numba.float64[:],)
# everything should fuse with default option
self.assertEqual(countParfors(test_impl, args), 1)
# with no fusion
self.assertEqual(countParfors(test_impl, args, fusion=False), 6)
# with no fusion, comprehension
self.assertEqual(countParfors(test_impl, args, fusion=False,
comprehension=False), 5)
#with no fusion, comprehension, setitem
self.assertEqual(countParfors(test_impl, args, fusion=False,
comprehension=False, setitem=False), 4)
# with no fusion, comprehension, prange
self.assertEqual(countParfors(test_impl, args, fusion=False,
comprehension=False, setitem=False, prange=False), 3)
# with no fusion, comprehension, prange, reduction
self.assertEqual(countParfors(test_impl, args, fusion=False,
comprehension=False, setitem=False, prange=False,
reduction=False), 2)
# with no fusion, comprehension, prange, reduction, numpy
self.assertEqual(countParfors(test_impl, args, fusion=False,
comprehension=False, setitem=False, prange=False,
reduction=False, numpy=False), 0)
@skip_parfors_unsupported
|
TestParforsOptions
|
python
|
Textualize__textual
|
src/textual/events.py
|
{
"start": 17158,
"end": 19221
}
|
class ____(MouseEvent, bubble=True):
"""Sent when a widget is clicked.
- [X] Bubbles
- [ ] Verbose
Args:
chain: The number of clicks in the chain. 2 is a double click, 3 is a triple click, etc.
"""
def __init__(
self,
widget: Widget | None,
x: int,
y: int,
delta_x: int,
delta_y: int,
button: int,
shift: bool,
meta: bool,
ctrl: bool,
screen_x: int | None = None,
screen_y: int | None = None,
style: Style | None = None,
chain: int = 1,
) -> None:
super().__init__(
widget,
x,
y,
delta_x,
delta_y,
button,
shift,
meta,
ctrl,
screen_x,
screen_y,
style,
)
self.chain = chain
@classmethod
def from_event(
cls: Type[Self],
widget: Widget,
event: MouseEvent,
chain: int = 1,
) -> Self:
new_event = cls(
widget,
event.x,
event.y,
event.delta_x,
event.delta_y,
event.button,
event.shift,
event.meta,
event.ctrl,
event.screen_x,
event.screen_y,
event._style,
chain=chain,
)
return new_event
def _apply_offset(self, x: int, y: int) -> Self:
return self.__class__(
self.widget,
x=self.x + x,
y=self.y + y,
delta_x=self.delta_x,
delta_y=self.delta_y,
button=self.button,
shift=self.shift,
meta=self.meta,
ctrl=self.ctrl,
screen_x=self.screen_x,
screen_y=self.screen_y,
style=self.style,
chain=self.chain,
)
def __rich_repr__(self) -> rich.repr.Result:
yield from super().__rich_repr__()
yield "chain", self.chain
@rich.repr.auto
|
Click
|
python
|
google__pytype
|
pytype/ast/visitor_test.py
|
{
"start": 1840,
"end": 3760
}
|
class ____(unittest.TestCase):
"""Tests for visitor.BaseVisitor."""
def test_visit_order(self):
module = ast.parse(textwrap.dedent("""
def f():
def g():
def h():
pass
"""))
v = _VisitOrderVisitor(ast)
v.visit(module)
self.assertEqual(v.funcs, ["h", "g", "f"])
def test_visit_replace(self):
module = ast.parse(textwrap.dedent("""
x.upper()
y.upper()
z.upper()
"""))
v = _VisitReplaceVisitor(ast)
v.visit(module)
x = module.body[0].value.func.value
y = module.body[1].value.func.value
z = module.body[2].value.func.value
self.assertIs(x, True)
self.assertIs(y, False)
self.assertIsInstance(z, ast.Name)
def test_generic_visit(self):
module = ast.parse("x = 0")
v = _GenericVisitVisitor(ast)
v.visit(module)
# Module
# |
# Assign
# / \
# Name Constant (Num)
# |
# Store
# The "Num" ast class is deprecated as of Python 3.8 and ast.parse returns
# "Constant" instead.
if sys.hexversion >= 0x03080000:
constant = "Constant"
else:
constant = "Num"
self.assertEqual(v.nodes, ["Store", "Name", constant, "Assign", "Module"])
def test_enter(self):
module = ast.parse(textwrap.dedent("""
x = 0
y = 1
z = 2
"""))
v = _EnterVisitor(ast)
v.visit(module)
self.assertEqual(v.names, ["x", "y", "z"])
def test_leave(self):
module = ast.parse(textwrap.dedent("""
x = 0
y = 1
z = 2
"""))
v = _LeaveVisitor(ast)
v.visit(module)
self.assertFalse(v.names)
def test_custom_ast(self):
custom_ast_module = custom_ast()
module = custom_ast_module.parse("")
v = _GenericVisitVisitor(custom_ast_module)
v.visit(module)
self.assertEqual(v.nodes, ["Thing", "AST"])
if __name__ == "__main__":
unittest.main()
|
BaseVisitorTest
|
python
|
apache__airflow
|
providers/google/tests/unit/google/firebase/hooks/test_firestore.py
|
{
"start": 10184,
"end": 11220
}
|
class ____:
hook: CloudFirestoreHook | None = None
def setup_method(self):
with mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.__init__",
new=mock_base_gcp_hook_no_default_project_id,
):
self.hook = CloudFirestoreHook(gcp_conn_id="test")
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id",
new_callable=PropertyMock,
return_value=None,
)
@mock.patch("airflow.providers.google.firebase.hooks.firestore.CloudFirestoreHook.get_conn")
def test_create_build(self, mock_get_conn, mock_project_id):
with pytest.raises(AirflowException) as ctx:
self.hook.export_documents(body={})
assert (
str(ctx.value)
== "The project id must be passed either as keyword project_id parameter or as project_id extra in "
"Google Cloud connection definition. Both are not set!"
)
|
TestCloudFirestoreHookWithoutProjectId
|
python
|
huggingface__transformers
|
src/transformers/models/smolvlm/modeling_smolvlm.py
|
{
"start": 9762,
"end": 10925
}
|
class ____(GradientCheckpointingLayer):
def __init__(self, config: SmolVLMVisionConfig):
super().__init__()
self.embed_dim = config.hidden_size
self.self_attn = SmolVLMVisionAttention(config)
self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
self.mlp = SmolVLMVisionMLP(config)
self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
@auto_docstring
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
**kwargs: Unpack[TransformersKwargs],
) -> torch.FloatTensor:
residual = hidden_states
hidden_states = self.layer_norm1(hidden_states)
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
**kwargs,
)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.layer_norm2(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
|
SmolVLMEncoderLayer
|
python
|
doocs__leetcode
|
solution/1000-1099/1004.Max Consecutive Ones III/Solution.py
|
{
"start": 0,
"end": 254
}
|
class ____:
def longestOnes(self, nums: List[int], k: int) -> int:
l = cnt = 0
for x in nums:
cnt += x ^ 1
if cnt > k:
cnt -= nums[l] ^ 1
l += 1
return len(nums) - l
|
Solution
|
python
|
joke2k__faker
|
tests/providers/test_bank.py
|
{
"start": 4887,
"end": 5353
}
|
class ____:
"""Test pl_PL bank provider"""
def test_bban(self, faker, num_samples):
for _ in range(num_samples):
assert re.fullmatch(r"\d{24}", faker.bban())
def test_iban(self, faker, num_samples):
for _ in range(num_samples):
iban = faker.iban()
assert is_valid_iban(iban)
assert iban[:2] == PlPlBankProvider.country_code
assert re.fullmatch(r"\d{2}\d{24}", iban[2:])
|
TestPlPl
|
python
|
sympy__sympy
|
sympy/physics/quantum/cartesian.py
|
{
"start": 7710,
"end": 9092
}
|
class ____(Bra):
"""1D cartesian momentum eigenbra."""
@classmethod
def default_args(self):
return ("px",)
@classmethod
def dual_class(self):
return PxKet
@property
def momentum(self):
"""The momentum of the state."""
return self.label[0]
#-------------------------------------------------------------------------
# Global helper functions
#-------------------------------------------------------------------------
def _enumerate_continuous_1D(*args, **options):
state = args[0]
num_states = args[1]
state_class = state.__class__
index_list = options.pop('index_list', [])
if len(index_list) == 0:
start_index = options.pop('start_index', 1)
index_list = list(range(start_index, start_index + num_states))
enum_states = [0 for i in range(len(index_list))]
for i, ind in enumerate(index_list):
label = state.args[0]
enum_states[i] = state_class(str(label) + "_" + str(ind), **options)
return enum_states
def _lowercase_labels(ops):
if not isinstance(ops, set):
ops = [ops]
return [str(arg.label[0]).lower() for arg in ops]
def _uppercase_labels(ops):
if not isinstance(ops, set):
ops = [ops]
new_args = [str(arg.label[0])[0].upper() +
str(arg.label[0])[1:] for arg in ops]
return new_args
|
PxBra
|
python
|
kamyu104__LeetCode-Solutions
|
Python/maximum-number-of-non-overlapping-substrings.py
|
{
"start": 29,
"end": 1207
}
|
class ____(object):
def maxNumOfSubstrings(self, s):
"""
:type s: str
:rtype: List[str]
"""
def find_right_from_left(s, first, last, left):
right, i = last[ord(s[left])-ord('a')], left
while i <= right:
if first[ord(s[i])-ord('a')] < left:
return -1
right = max(right, last[ord(s[i])-ord('a')])
i += 1
return right
first, last = [float("inf")]*26, [float("-inf")]*26
for i, c in enumerate(s):
first[ord(c)-ord('a')] = min(first[ord(c)-ord('a')], i)
last[ord(c)-ord('a')] = max(last[ord(c)-ord('a')], i)
result = [""]
right = float("inf")
for left, c in enumerate(s):
if left != first[ord(c)-ord('a')]:
continue
new_right = find_right_from_left(s, first, last, left)
if new_right == -1:
continue
if left > right:
result.append("")
right = new_right
result[-1] = s[left:right+1]
return result
# Time: O(n)
# space: O(1)
|
Solution
|
python
|
openai__openai-python
|
src/openai/types/beta/chatkit/thread_list_items_params.py
|
{
"start": 211,
"end": 697
}
|
class ____(TypedDict, total=False):
after: str
"""List items created after this thread item ID.
Defaults to null for the first page.
"""
before: str
"""List items created before this thread item ID.
Defaults to null for the newest results.
"""
limit: int
"""Maximum number of thread items to return. Defaults to 20."""
order: Literal["asc", "desc"]
"""Sort order for results by creation time. Defaults to `desc`."""
|
ThreadListItemsParams
|
python
|
astropy__astropy
|
astropy/modeling/fitting.py
|
{
"start": 70571,
"end": 73385
}
|
class ____(Fitter):
"""
Simplex algorithm and least squares statistic.
Raises
------
`ModelLinearityError`
A linear model is passed to a nonlinear fitter
"""
supported_constraints = Simplex.supported_constraints
def __init__(self):
super().__init__(optimizer=Simplex, statistic=leastsquare)
self.fit_info = {}
@fitter_unit_support
def __call__(
self,
model,
x,
y,
z=None,
weights=None,
*,
inplace=False,
**kwargs,
):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
input coordinates
y : array
input coordinates
z : array, optional
input coordinates
weights : array, optional
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
kwargs : dict
optional keyword arguments to be passed to the optimizer or the statistic
maxiter : int
maximum number of iterations
acc : float
Relative error in approximate solution
equivalencies : list or None, optional, keyword-only
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
inplace : bool, optional
If `False` (the default), a copy of the model with the fitted
parameters set will be returned. If `True`, the returned model will
be the same instance as the model passed in, and the parameter
values will be changed inplace.
Returns
-------
fitted_model : `~astropy.modeling.FittableModel`
If ``inplace`` is `False` (the default), this is a copy of the
input model with parameters set by the fitter. If ``inplace`` is
`True`, this is the same model as the input model, with parameters
updated to be those set by the fitter.
"""
model_copy = _validate_model(
model,
self._opt_method.supported_constraints,
copy=not inplace,
)
model_copy.sync_constraints = False
farg = _convert_input(x, y, z)
farg = (
model_copy,
weights,
) + farg
init_values, _, _ = model_to_fit_params(model_copy)
fitparams, self.fit_info = self._opt_method(
self.objective_function, init_values, farg, **kwargs
)
fitter_to_model_params(model_copy, fitparams)
model_copy.sync_constraints = True
return model_copy
|
SimplexLSQFitter
|
python
|
pytorch__pytorch
|
torch/_dynamo/decorators.py
|
{
"start": 20335,
"end": 33381
}
|
class ____:
"""
This represents an dimension of a tensor and the corresponding
min and max values it can take. Don't create this
class directly; instead, use :func:`mark_dynamic`.
"""
dim: int
min: int
max: int
@forbid_in_graph
def mark_unbacked(
t: Any,
index: Union[int, list[Any], tuple[Any]],
hint_override: Optional[int] = None,
strict: bool = False,
specialize_on: Optional[list[Any]] = None,
) -> None:
"""
Mark a tensor as having an unbacked dimension. This changes the semantics of operations:
- The size of the specified dimension will always be reported as not equal to zero or one.
- Assertions on this index will be turned into runtime asserts.
- Attempting to get the real value of this dimension will raise an exception.
- In effect, this dimension is treated as data-dependent (its value is unknown).
Args:
t (Any): The tensor to mark as having an unbacked dimension.
index (int or list/tuple of int): The dimension(s) to mark as unbacked. Can be a single integer or a list/tuple of integers.
hint_override (Optional[int], default=None): An optional integer to override the size hint for this dimension.
This is only used by the inductor backend for size hint queries, such as during autotuning.
strict (bool, default=False): If True, an error will be raised if the unbacked dimension is specialized.
By default (strict=False), specialization is allowed and will proceed without error.
specialize_on (Optional[list[Any]], default=None): A list of specialization criteria (e.g., lambdas) for this dimension.
If provided, Dynamo will generate specialized compiled regions for each criterion in addition to a generic trace.
"""
# You could have copied the mark_dynamic behavior but I'm not convinced
# it's what you want
assert not is_traceable_wrapper_subclass(t), "not implemented yet"
if isinstance(index, int):
if strict:
if not hasattr(t, "_dynamo_strict_unbacked_indices"):
t._dynamo_strict_unbacked_indices = set()
t._dynamo_strict_unbacked_indices.add(index)
return
if not hasattr(t, "_specialized_on"):
t._specialize_on = {}
if not hasattr(t, "_dynamo_unbacked_indices"):
t._dynamo_unbacked_indices = set()
if not hasattr(t, "_dynamo_hint_overrides"):
t._dynamo_hint_overrides = {}
if hint_override:
t._dynamo_hint_overrides[index] = hint_override
# FX tracers don't respect @forbid_in_graph and choke on the following error since it passes in proxies:
# TypeError: 'Attribute' object does not support item assignment
if isinstance(t._specialize_on, dict):
t._specialize_on[index] = specialize_on if specialize_on is not None else []
t._dynamo_unbacked_indices.add(index)
return
assert isinstance(index, (list, tuple))
for i in index:
mark_unbacked(t, i)
@forbid_in_graph
def mark_dynamic(
t: Any,
index: Union[int, list[Any], tuple[Any]],
*,
hint_override: Optional[int] = None,
min: Optional[int] = None,
max: Optional[int] = None,
specialize_on: Optional[list[Any]] = None,
) -> None:
"""
Mark a tensor as having a dynamic dim and set corresponding min and max range for the dim.
[Note - on the state of mark_dynamic]
The behavior of having a dynamic dimension on a tensor is governed by a few factors:
1) torch._dynamo.config dynamic_shapes True or False.
a) dynamic_shapes=True - dynamic_shapes must be True for mark_dynamic to work.
a) dynamic_shapes=False - This config will raise an exception when used in conjunction with
mark_dynamic. We will eventually support this.
2) If the dimension is fully constrained - as in, it does not allow more than a single value
in both eager (torch.compile, torch._dynamo.optimize) mode and export mode (torch._dynamo.export),
we will raise an error
3) If the dimension is partially constrained - allowing at least 2 values but not the full unbounded
range of shapes, in eager we will pass it through, but export will raise an error.
4) Attempts to trace this function will explicitly raise. As such, all calls to mark_dynamic must be made
before torch.compile.
5) If hint_override is passed, the hint_override for the specified dimension will replace the provided value
from the first example input as the official size hint.
6) If specialize_on is passed in, we will perform a single generic Dynamo trace followed by
multiple specialized compilations in addition to a single generic compilation. NB: For now we only support
per dimension specialization, or in other words we do not generate a cross product of specializations.
At runtime, we will dispatch to a specialized compiled region if the input matches the specialization criteria.
For example:
mark_dynamic(..., specialize_on=[
lambda x: x == 8,
lambda x: x == 16
])
This approach results in one Dynamo trace and two backend compilations. When the input dimension equals 8 or 16
at runtime, execution will be directed to the specialized compiled region. Performance measurements indicate
2-8x speedups depending on the specific specialization and model architecture.
"""
if is_traceable_wrapper_subclass(t):
# default behavior: mirror mark_dynamic() on all inner tensors with same dim as t
# TODO: Make this configurable via a supported public API
_apply_func_to_inner_tensors_of_same_dim(
mark_dynamic, t, index, min=min, max=max
)
if isinstance(index, int):
if not hasattr(t, "_dynamo_dynamic_indices"):
# pyrefly: ignore [missing-attribute]
t._dynamo_dynamic_indices = set()
# pyrefly: ignore [missing-attribute]
t._dynamo_dynamic_range = set()
# pyrefly: ignore [missing-attribute]
t._dynamo_hint_overrides = {}
if not hasattr(t, "_specialize_on"):
# pyrefly: ignore [missing-attribute]
t._specialize_on = {}
if hint_override:
# pyrefly: ignore [missing-attribute]
t._dynamo_hint_overrides[index] = hint_override
# TODO(voz): Should we bounds check?
# pyrefly: ignore [missing-attribute]
t._dynamo_dynamic_indices.add(index)
t._dynamo_dynamic_range.add(_DimRange(index, min, max)) # type: ignore[arg-type]
# FX tracers don't respect @forbid_in_graph and choke on the following error since it passes in proxies:
# TypeError: 'Attribute' object does not support item assignment
# pyrefly: ignore [missing-attribute]
if isinstance(t._specialize_on, dict):
t._specialize_on[index] = specialize_on if specialize_on is not None else []
return
assert isinstance(index, (list, tuple))
for i in index:
mark_dynamic(t, i, min=min, max=max)
mark_dynamic(t, i, min=min, max=max, specialize_on=specialize_on)
@forbid_in_graph
def maybe_mark_dynamic(t: Any, index: Union[int, list[Any], tuple[Any]]) -> None:
"""
Mark a tensor as having a dynamic dim, but don't enforce it (i.e., if this
dimension ends up getting specialized, don't error).
"""
if is_traceable_wrapper_subclass(t):
# default behavior: mirror maybe_mark_dynamic() on all inner tensors with same dim as t
# TODO: Make this configurable via a supported public API
_apply_func_to_inner_tensors_of_same_dim(maybe_mark_dynamic, t, index)
if isinstance(index, int):
if not hasattr(t, "_dynamo_weak_dynamic_indices"):
# pyrefly: ignore [missing-attribute]
t._dynamo_weak_dynamic_indices = set()
# TODO(voz): Should we bounds check?
# pyrefly: ignore [missing-attribute]
t._dynamo_weak_dynamic_indices.add(index)
return
assert isinstance(index, (list, tuple))
for i in index:
maybe_mark_dynamic(t, i)
def mark_static(
t: Any, index: Optional[Union[int, list[Any], tuple[Any]]] = None
) -> None:
"""
Mark a tensor as having a static dim or mark a nn module class as static.
For tensors
===========
This will prevent us from attempting to compile it dynamically
when dynamic=True; this can improve trace-time performance.
This has lower precedence than mark_dynamic.
Unlike mark_dynamic, this can be done inside a graph, in which case it
induces specialization on the tensor.
For nn.Module classes
=====================
For static nn.Module classes, TorchDynamo assumes that the module instance
attributes will not be modified after compilation. This will ensure that
TorchDynamo keeps integer attributes CONSTANT and not symints.
From TorchDynamo implementation side, the instances of static-marked
nn.Module class will be converted to UnspecializedBuiltinNNModuleVariable,
which have the same properties.
Note that we still have to guard on the attributes, because different
instances of the nn.Module can have different values of the attributes. The
key point here is that the attributes are static.
"""
if is_compiling():
if index is None:
for s in t.size():
comptime.force_static(s)
else:
comptime.force_static(t.size(index))
return
if is_traceable_wrapper_subclass(t):
# default behavior: mirror mark_static() on all inner tensors with same dim as t
# TODO: Make this configurable via a supported public API
_apply_func_to_inner_tensors_of_same_dim(mark_static, t, index)
# pyrefly: ignore [bad-argument-type]
if not isinstance(t, torch.Tensor) and issubclass(t, torch.nn.Module):
# pyrefly: ignore [missing-attribute]
t._dynamo_marked_static = True
# pyrefly: ignore [bad-return]
return t
if not isinstance(t, torch.Tensor):
raise TypeError(
f"mark_static expects a tensor/nn.Module class but received {type(t)}"
)
if isinstance(index, int):
if not hasattr(t, "_dynamo_static_indices"):
t._dynamo_static_indices = set() # type: ignore[attr-defined]
# TODO(voz): Should we bounds check?
t._dynamo_static_indices.add(index) # type: ignore[attr-defined]
elif index is None:
for i in range(t.dim()):
mark_static(t, i)
else:
assert isinstance(index, (list, tuple))
for i in index:
mark_static(t, i)
@forbid_in_graph
def mark_static_address(t: Any, guard: bool = False) -> None:
"""
Marks an input tensor whose address should be treated as constant across calls to the
same dynamo-compiled function. This indicates to cudagraphs that an extra allocation
is not needed for this input. The data_ptr will be guarded if guard=True, and cause a full
recompile if the data_ptr changes. Note: If this address changes, cudagraphs will re-record
if guard=False.
"""
if not isinstance(t, torch.Tensor):
raise TypeError(f"mark_static_address expects a tensor but received {type(t)}")
if guard:
t._dynamo_static_input_type = "guarded" # type: ignore[attr-defined]
else:
t._dynamo_static_input_type = "unguarded" # type: ignore[attr-defined]
# One day, Dynamo will support tracing into einops directly (no allow_in_graph needed)
# Note that PyTorch supports multiple versions of einops, so when that day comes,
# we still need to be really careful about version matches.
def _allow_in_graph_einops() -> None:
import einops
try:
# requires einops > 0.6.1, torch >= 2.0
from einops._torch_specific import ( # type: ignore[attr-defined] # noqa: F401
_ops_were_registered_in_torchdynamo,
)
# einops > 0.6.1 will call the op registration logic as it is imported.
except ImportError:
# einops <= 0.6.1
allow_in_graph(einops.rearrange)
allow_in_graph(einops.reduce)
if hasattr(einops, "repeat"):
allow_in_graph(einops.repeat) # available since einops 0.2.0
if hasattr(einops, "einsum"):
allow_in_graph(einops.einsum) # available since einops 0.5.0
if hasattr(einops, "pack"):
allow_in_graph(einops.pack) # available since einops 0.6.0
if hasattr(einops, "unpack"):
allow_in_graph(einops.unpack) # available since einops 0.6.0
# Note: this carefully avoids eagerly import einops.
trace_rules.add_module_init_func("einops", _allow_in_graph_einops)
# Proxy class for torch._dynamo.config patching - so dynamo can identify context managers/decorators
# created by patch_dynamo_config, compared to ones created by a raw torch._dynamo.config.patch.
|
_DimRange
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/paramSpec22.py
|
{
"start": 599,
"end": 848
}
|
class ____(Generic[P, R]):
def __init__(self, func: Callable[Concatenate[int, P], R]):
self.func = func
def create_partial(self, first: int) -> Callable[P, R]:
return MyPartial[P, R](first=first, func=self.func)
|
MyPartialCreator
|
python
|
gevent__gevent
|
src/gevent/_ffi/watcher.py
|
{
"start": 6213,
"end": 14873
}
|
class ____(metaclass=AbstractWatcherType):
_callback = None
_args = None
_watcher = None
# self._handle has a reference to self, keeping it alive.
# We must keep self._handle alive for ffi.from_handle() to be
# able to work. We only fill this in when we are started,
# and when we are stopped we destroy it.
# NOTE: This is a GC cycle, so we keep it around for as short
# as possible.
_handle = None
@tracemalloc
def __init__(self, _loop, ref=True, priority=None, args=_NOARGS):
self.loop = _loop
self.__init_priority = priority
self.__init_args = args
self.__init_ref = ref
self._watcher_full_init()
def _watcher_full_init(self):
priority = self.__init_priority
ref = self.__init_ref
args = self.__init_args
self._watcher_create(ref)
if priority is not None:
self._watcher_ffi_set_priority(priority)
try:
self._watcher_ffi_init(args)
except:
# Let these be GC'd immediately.
# If we keep them around to when *we* are gc'd,
# they're probably invalid, meaning any native calls
# we do then to close() them are likely to fail
self._watcher = None
raise
self._watcher_ffi_set_init_ref(ref)
@classmethod
def _watcher_ffi_close(cls, ffi_watcher):
pass
def _watcher_create(self, ref): # pylint:disable=unused-argument
self._watcher = self._watcher_new()
def _watcher_new(self):
return type(self).new(self._watcher_struct_pointer_type) # pylint:disable=no-member
def _watcher_ffi_set_init_ref(self, ref):
pass
def _watcher_ffi_set_priority(self, priority):
pass
def _watcher_ffi_init(self, args):
raise NotImplementedError()
def _watcher_ffi_start(self):
raise NotImplementedError()
def _watcher_ffi_stop(self):
self._watcher_stop(self.loop.ptr, self._watcher)
def _watcher_ffi_ref(self):
raise NotImplementedError()
def _watcher_ffi_unref(self):
raise NotImplementedError()
def _watcher_ffi_start_unref(self):
# While a watcher is active, we don't keep it
# referenced. This allows a timer, for example, to be started,
# and still allow the loop to end if there is nothing
# else to do. see test__order.TestSleep0 for one example.
self._watcher_ffi_unref()
def _watcher_ffi_stop_ref(self):
self._watcher_ffi_ref()
# A string identifying the type of libev object we watch, e.g., 'ev_io'
# This should be a class attribute.
_watcher_type = None
# A class attribute that is the callback on the libev object that init's the C struct,
# e.g., libev.ev_io_init. If None, will be set by _init_subclasses.
_watcher_init = None
# A class attribute that is the callback on the libev object that starts the C watcher,
# e.g., libev.ev_io_start. If None, will be set by _init_subclasses.
_watcher_start = None
# A class attribute that is the callback on the libev object that stops the C watcher,
# e.g., libev.ev_io_stop. If None, will be set by _init_subclasses.
_watcher_stop = None
# A cffi ctype object identifying the struct pointer we create.
# This is a class attribute set based on the _watcher_type
_watcher_struct_pointer_type = None
# The attribute of the libev object identifying the custom
# callback function for this type of watcher. This is a class
# attribute set based on the _watcher_type in _init_subclasses.
_watcher_callback = None
_watcher_is_active = None
def close(self):
if self._watcher is None:
return
self.stop()
_watcher = self._watcher
self._watcher = None
self._watcher_set_data(_watcher, self._FFI.NULL) # pylint: disable=no-member
self._watcher_ffi_close(_watcher)
self.loop = None
def _watcher_set_data(self, the_watcher, data):
# This abstraction exists for the sole benefit of
# libuv.watcher.stat, which "subclasses" uv_handle_t.
# Can we do something to avoid this extra function call?
the_watcher.data = data
return data
def __enter__(self):
return self
def __exit__(self, t, v, tb):
self.close()
if ALLOW_WATCHER_DEL:
def __del__(self):
if self._watcher:
tb = get_object_traceback(self)
tb_msg = ''
if tb is not None:
tb_msg = '\n'.join(tb.format())
tb_msg = '\nTraceback:\n' + tb_msg
warnings.warn("Failed to close watcher %r%s" % (self, tb_msg),
ResourceWarning)
# may fail if __init__ did; will be harmlessly printed
self.close()
__in_repr = False
def __repr__(self):
basic = "<%s at 0x%x" % (self.__class__.__name__, id(self))
if self.__in_repr:
return basic + '>'
# Running child watchers have been seen to have a
# recursive repr in ``self.args``, thanks to ``gevent.os.fork_and_watch``
# passing the watcher as an argument to its callback.
self.__in_repr = True
try:
result = '%s%s' % (basic, self._format())
if self.pending:
result += " pending"
if self.callback is not None:
fself = getattr(self.callback, '__self__', None)
if fself is self:
result += " callback=<bound method %s of self>" % (self.callback.__name__)
else:
result += " callback=%r" % (self.callback, )
if self.args is not None:
result += " args=%r" % (self.args, )
if self.callback is None and self.args is None:
result += " stopped"
result += " watcher=%s" % (self._watcher)
result += " handle=%s" % (self._watcher_handle)
result += " ref=%s" % (self.ref)
return result + ">"
finally:
self.__in_repr = False
@property
def _watcher_handle(self):
if self._watcher:
return self._watcher.data
def _format(self):
return ''
@property
def ref(self):
raise NotImplementedError()
def _get_callback(self):
return self._callback if '_callback' in self.__dict__ else None
def _set_callback(self, cb):
if not callable(cb) and cb is not None:
raise TypeError("Expected callable, not %r" % (cb, ))
if cb is None:
if '_callback' in self.__dict__:
del self._callback
else:
self._callback = cb
callback = property(_get_callback, _set_callback)
def _get_args(self):
return self._args
def _set_args(self, args):
if not isinstance(args, tuple) and args is not None:
raise TypeError("args must be a tuple or None")
if args is None:
if '_args' in self.__dict__:
del self._args
else:
self._args = args
args = property(_get_args, _set_args)
def start(self, callback, *args):
if callback is None:
raise TypeError('callback must be callable, not None')
self.callback = callback
self.args = args or _NOARGS
self.loop._keepaliveset.add(self)
self._handle = self._watcher_set_data(self._watcher, type(self).new_handle(self)) # pylint:disable=no-member
self._watcher_ffi_start()
self._watcher_ffi_start_unref()
def stop(self):
if self.callback is None:
assert self.loop is None or self not in self.loop._keepaliveset
return
self.callback = None
# Only after setting the signal to make this idempotent do
# we move ahead.
self._watcher_ffi_stop_ref()
self._watcher_ffi_stop()
self.loop._keepaliveset.discard(self)
self._handle = None
self._watcher_set_data(self._watcher, self._FFI.NULL) # pylint:disable=no-member
self.args = None
def _get_priority(self):
return None
@not_while_active
def _set_priority(self, priority):
pass
priority = property(_get_priority, _set_priority)
@property
def active(self):
if self._watcher is not None and self._watcher_is_active(self._watcher):
return True
return False
@property
def pending(self):
return False
|
watcher
|
python
|
pyca__cryptography
|
src/cryptography/hazmat/bindings/openssl/binding.py
|
{
"start": 1763,
"end": 4084
}
|
class ____:
"""
OpenSSL API wrapper.
"""
lib: typing.ClassVar[typing.Any] = None
ffi = _openssl.ffi
_lib_loaded = False
_init_lock = threading.Lock()
def __init__(self) -> None:
self._ensure_ffi_initialized()
@classmethod
def _ensure_ffi_initialized(cls) -> None:
with cls._init_lock:
if not cls._lib_loaded:
cls.lib = build_conditional_library(
_openssl.lib, CONDITIONAL_NAMES
)
cls._lib_loaded = True
@classmethod
def init_static_locks(cls) -> None:
cls._ensure_ffi_initialized()
def _verify_package_version(version: str) -> None:
# Occasionally we run into situations where the version of the Python
# package does not match the version of the shared object that is loaded.
# This may occur in environments where multiple versions of cryptography
# are installed and available in the python path. To avoid errors cropping
# up later this code checks that the currently imported package and the
# shared object that were loaded have the same version and raise an
# ImportError if they do not
so_package_version = _openssl.ffi.string(
_openssl.lib.CRYPTOGRAPHY_PACKAGE_VERSION
)
if version.encode("ascii") != so_package_version:
raise ImportError(
"The version of cryptography does not match the loaded "
"shared object. This can happen if you have multiple copies of "
"cryptography installed in your Python path. Please try creating "
"a new virtual environment to resolve this issue. "
f"Loaded python version: {version}, "
f"shared object version: {so_package_version}"
)
_openssl_assert(
_openssl.lib.OpenSSL_version_num() == openssl.openssl_version(),
)
_verify_package_version(cryptography.__version__)
Binding.init_static_locks()
if (
sys.platform == "win32"
and os.environ.get("PROCESSOR_ARCHITEW6432") is not None
):
warnings.warn(
"You are using cryptography on a 32-bit Python on a 64-bit Windows "
"Operating System. Cryptography will be significantly faster if you "
"switch to using a 64-bit Python.",
UserWarning,
stacklevel=2,
)
|
Binding
|
python
|
imageio__imageio
|
imageio/plugins/_tifffile.py
|
{
"start": 172095,
"end": 176347
}
|
class ____(object):
"""Series of TIFF pages with compatible shape and data type.
Attributes
----------
pages : list of TiffPage
Sequence of TiffPages in series.
dtype : numpy.dtype
Data type (native byte order) of the image array in series.
shape : tuple
Dimensions of the image array in series.
axes : str
Labels of axes in shape. See TiffPage.axes.
offset : int or None
Position of image data in file if memory-mappable, else None.
"""
def __init__(
self,
pages,
shape,
dtype,
axes,
parent=None,
name=None,
transform=None,
stype=None,
truncated=False,
):
"""Initialize instance."""
self.index = 0
self._pages = pages # might contain only first of contiguous pages
self.shape = tuple(shape)
self.axes = "".join(axes)
self.dtype = numpy.dtype(dtype)
self.stype = stype if stype else ""
self.name = name if name else ""
self.transform = transform
if parent:
self.parent = parent
elif pages:
self.parent = pages[0].parent
else:
self.parent = None
if len(pages) == 1 and not truncated:
self._len = int(product(self.shape) // product(pages[0].shape))
else:
self._len = len(pages)
def asarray(self, out=None):
"""Return image data from series of TIFF pages as numpy array."""
if self.parent:
result = self.parent.asarray(series=self, out=out)
if self.transform is not None:
result = self.transform(result)
return result
@lazyattr
def offset(self):
"""Return offset to series data in file, if any."""
if not self._pages:
return
pos = 0
for page in self._pages:
if page is None:
return
if not page.is_final:
return
if not pos:
pos = page.is_contiguous[0] + page.is_contiguous[1]
continue
if pos != page.is_contiguous[0]:
return
pos += page.is_contiguous[1]
page = self._pages[0]
offset = page.is_contiguous[0]
if (page.is_imagej or page.is_shaped) and len(self._pages) == 1:
# truncated files
return offset
if pos == offset + product(self.shape) * self.dtype.itemsize:
return offset
@property
def ndim(self):
"""Return number of array dimensions."""
return len(self.shape)
@property
def size(self):
"""Return number of elements in array."""
return int(product(self.shape))
@property
def pages(self):
"""Return sequence of all pages in series."""
# a workaround to keep the old interface working
return self
def __len__(self):
"""Return number of TiffPages in series."""
return self._len
def __getitem__(self, key):
"""Return specified TiffPage."""
if len(self._pages) == 1 and 0 < key < self._len:
index = self._pages[0].index
return self.parent.pages[index + key]
return self._pages[key]
def __iter__(self):
"""Return iterator over TiffPages in series."""
if len(self._pages) == self._len:
for page in self._pages:
yield page
else:
pages = self.parent.pages
index = self._pages[0].index
for i in range(self._len):
yield pages[index + i]
def __str__(self):
"""Return string with information about series."""
s = " ".join(
s
for s in (
snipstr("'%s'" % self.name, 20) if self.name else "",
"x".join(str(i) for i in self.shape),
str(self.dtype),
self.axes,
self.stype,
"%i Pages" % len(self.pages),
("Offset=%i" % self.offset) if self.offset else "",
)
if s
)
return "TiffPageSeries %i %s" % (self.index, s)
|
TiffPageSeries
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/math_ops/reduction_ops_test.py
|
{
"start": 18107,
"end": 22661
}
|
class ____(BaseReductionTest):
def _tf_reduce(self, x, reduction_axes, keepdims):
return math_ops.reduce_mean(x, reduction_axes, keepdims)
def _np_reduce(self, x, reduction_axes, keepdims):
if isinstance(reduction_axes, list) or isinstance(reduction_axes,
np.ndarray):
reduction_axes = tuple(reduction_axes)
elif isinstance(reduction_axes, numbers.Integral):
reduction_axes = (reduction_axes,)
if reduction_axes is None:
count = np.prod(x.shape)
else:
count = np.prod([x.shape[ax] for ax in reduction_axes])
# np.mean automatically converts integer inputs to float, while TensorFlow's
# reduce_mean does not. For integer inputs, we emulate TensorFlow's behavior
# using np.sum and truncating division.
np_sum = np.sum(x, axis=reduction_axes, keepdims=keepdims)
if np.issubdtype(x.dtype, np.integer):
return np_sum // count
return np_sum / count
def testAxesType(self):
for dtype in [dtypes.int64, dtypes.int32]:
with self.cached_session():
v = math_ops.reduce_mean([0, 0], constant_op.constant(0, dtype=dtype))
tf_v = self.evaluate(v)
self.assertAllEqual(tf_v, 0)
@test_util.run_deprecated_v1
def testInfinity(self):
for dtype in [np.float32, np.float64]:
for special_value_x in [-np.inf, np.inf]:
for special_value_y in [-np.inf, np.inf]:
np_arr = np.array([special_value_x, special_value_y]).astype(dtype)
self._compareAll(np_arr, None)
@test_util.run_deprecated_v1
def testInt32(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.int32)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testUint8(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeRandom((2,) * rank, dtypes.uint8)
self._compareAllAxes(np_arr)
# This tests the issue reported in b/145030710.
@test_util.run_deprecated_v1
def testSizeOverflowUint8(self):
np_arr = self._makeRandom((2**8,), dtypes.uint8)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testSizeOverflowInt8(self):
np_arr = self._makeRandom((2**7,), dtypes.int8)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testSizeOverflowUint16(self):
np_arr = self._makeRandom((2**16,), dtypes.uint16)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testSizeOverflowInt16(self):
np_arr = self._makeRandom((2**15,), dtypes.int16)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testFloat32(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.float32)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testBFloat16(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.bfloat16)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testFloat64(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.float64)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testComplex64(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.complex64)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testComplex128(self):
for rank in range(1, _MAX_RANK + 1):
np_arr = self._makeIncremental((2,) * rank, dtypes.complex128)
self._compareAllAxes(np_arr)
@test_util.run_deprecated_v1
def testGradient(self):
s = [2, 3, 4, 2]
for dtype in [dtypes.float32, dtypes.float64]:
x = self._makeIncremental(s, dtype)
self._compareGradientAxes(x, rtol=1e-3, atol=1e-3)
@test_util.run_deprecated_v1
def testEmptyGradients(self):
with self.session():
x = array_ops.zeros([0, 3])
y = math_ops.reduce_mean(x, [1])
error = gradient_checker.compute_gradient_error(x, [0, 3], y, [0])
self.assertEqual(error, 0)
@test_util.run_deprecated_v1
def testDegenerate(self):
with self.session():
for dtype in (dtypes.bfloat16, dtypes.float16, dtypes.float32,
dtypes.float64):
# A large number is needed to get Eigen to die
x = array_ops.zeros((0, 9938), dtype=dtype)
y = math_ops.reduce_mean(x, [0]).eval()
self.assertEqual(y.shape, (9938,))
self.assertTrue(np.all(np.isnan(y)))
|
MeanReductionTest
|
python
|
sqlalchemy__sqlalchemy
|
examples/extending_query/temporal_range.py
|
{
"start": 539,
"end": 3754
}
|
class ____:
"""Mixin that identifies a class as having a timestamp column"""
timestamp = Column(
DateTime,
default=partial(datetime.datetime.now, datetime.timezone.utc),
nullable=False,
)
def temporal_range(range_lower, range_upper):
return orm.with_loader_criteria(
HasTemporal,
lambda cls: cls.timestamp.between(range_lower, range_upper),
include_aliases=True,
)
if __name__ == "__main__":
Base = declarative_base()
class Parent(HasTemporal, Base):
__tablename__ = "parent"
id = Column(Integer, primary_key=True)
children = relationship("Child")
class Child(HasTemporal, Base):
__tablename__ = "child"
id = Column(Integer, primary_key=True)
parent_id = Column(Integer, ForeignKey("parent.id"), nullable=False)
engine = create_engine("sqlite://", echo=True)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
sess = Session()
c1, c2, c3, c4, c5 = [
Child(timestamp=datetime.datetime(2009, 10, 15, 12, 00, 00)),
Child(timestamp=datetime.datetime(2009, 10, 17, 12, 00, 00)),
Child(timestamp=datetime.datetime(2009, 10, 20, 12, 00, 00)),
Child(timestamp=datetime.datetime(2009, 10, 12, 12, 00, 00)),
Child(timestamp=datetime.datetime(2009, 10, 17, 12, 00, 00)),
]
p1 = Parent(
timestamp=datetime.datetime(2009, 10, 15, 12, 00, 00),
children=[c1, c2, c3],
)
p2 = Parent(
timestamp=datetime.datetime(2009, 10, 17, 12, 00, 00),
children=[c4, c5],
)
sess.add_all([p1, p2])
sess.commit()
# use populate_existing() to ensure the range option takes
# place for elements already in the identity map
parents = (
sess.query(Parent)
.populate_existing()
.options(
temporal_range(
datetime.datetime(2009, 10, 16, 12, 00, 00),
datetime.datetime(2009, 10, 18, 12, 00, 00),
)
)
.all()
)
assert parents[0] == p2
assert parents[0].children == [c5]
sess.expire_all()
# try it with eager load
parents = (
sess.query(Parent)
.options(
temporal_range(
datetime.datetime(2009, 10, 16, 12, 00, 00),
datetime.datetime(2009, 10, 18, 12, 00, 00),
)
)
.options(selectinload(Parent.children))
.all()
)
assert parents[0] == p2
assert parents[0].children == [c5]
sess.expire_all()
# illustrate a 2.0 style query
print("------------------")
parents = (
sess.execute(
select(Parent)
.execution_options(populate_existing=True)
.options(
temporal_range(
datetime.datetime(2009, 10, 15, 11, 00, 00),
datetime.datetime(2009, 10, 18, 12, 00, 00),
)
)
.join(Parent.children)
.filter(Child.id == 2)
)
.scalars()
.all()
)
assert parents[0] == p1
print("-------------------")
assert parents[0].children == [c1, c2]
|
HasTemporal
|
python
|
redis__redis-py
|
tests/test_maint_notifications.py
|
{
"start": 2713,
"end": 9353
}
|
class ____:
"""Test the NodeMovingNotification class."""
def test_init(self):
"""Test NodeMovingNotification initialization."""
with patch("time.monotonic", return_value=1000):
notification = NodeMovingNotification(
id=1, new_node_host="localhost", new_node_port=6379, ttl=10
)
assert notification.id == 1
assert notification.new_node_host == "localhost"
assert notification.new_node_port == 6379
assert notification.ttl == 10
assert notification.creation_time == 1000
def test_repr(self):
"""Test NodeMovingNotification string representation."""
with patch("time.monotonic", return_value=1000):
notification = NodeMovingNotification(
id=1, new_node_host="localhost", new_node_port=6379, ttl=10
)
with patch("time.monotonic", return_value=1005): # 5 seconds later
repr_str = repr(notification)
assert "NodeMovingNotification" in repr_str
assert "id=1" in repr_str
assert "new_node_host='localhost'" in repr_str
assert "new_node_port=6379" in repr_str
assert "ttl=10" in repr_str
assert "remaining=5.0s" in repr_str
assert "expired=False" in repr_str
def test_equality_none_id_none_port(self):
"""Test equality for notifications with same id and host and port - None."""
notification1 = NodeMovingNotification(
id=1, new_node_host=None, new_node_port=None, ttl=10
)
notification2 = NodeMovingNotification(
id=1, new_node_host=None, new_node_port=None, ttl=20
) # Different TTL
assert notification1 == notification2
def test_equality_same_id_host_port(self):
"""Test equality for notifications with same id, host, and port."""
notification1 = NodeMovingNotification(
id=1, new_node_host="localhost", new_node_port=6379, ttl=10
)
notification2 = NodeMovingNotification(
id=1, new_node_host="localhost", new_node_port=6379, ttl=20
) # Different TTL
assert notification1 == notification2
def test_equality_same_id_different_host(self):
"""Test inequality for notifications with same id but different host."""
notification1 = NodeMovingNotification(
id=1, new_node_host="host1", new_node_port=6379, ttl=10
)
notification2 = NodeMovingNotification(
id=1, new_node_host="host2", new_node_port=6379, ttl=10
)
assert notification1 != notification2
def test_equality_same_id_different_port(self):
"""Test inequality for notifications with same id but different port."""
notification1 = NodeMovingNotification(
id=1, new_node_host="localhost", new_node_port=6379, ttl=10
)
notification2 = NodeMovingNotification(
id=1, new_node_host="localhost", new_node_port=6380, ttl=10
)
assert notification1 != notification2
def test_equality_different_id(self):
"""Test inequality for notifications with different id."""
notification1 = NodeMovingNotification(
id=1, new_node_host="localhost", new_node_port=6379, ttl=10
)
notification2 = NodeMovingNotification(
id=2, new_node_host="localhost", new_node_port=6379, ttl=10
)
assert notification1 != notification2
def test_equality_different_type(self):
"""Test inequality for notifications of different types."""
notification1 = NodeMovingNotification(
id=1, new_node_host="localhost", new_node_port=6379, ttl=10
)
notification2 = NodeMigratingNotification(id=1, ttl=10)
assert notification1 != notification2
def test_hash_same_id_host_port(self):
"""Test hash consistency for notifications with same id, host, and port."""
notification1 = NodeMovingNotification(
id=1, new_node_host="localhost", new_node_port=6379, ttl=10
)
notification2 = NodeMovingNotification(
id=1, new_node_host="localhost", new_node_port=6379, ttl=20
) # Different TTL
assert hash(notification1) == hash(notification2)
def test_hash_different_host(self):
"""Test hash difference for notifications with different host."""
notification1 = NodeMovingNotification(
id=1, new_node_host="host1", new_node_port=6379, ttl=10
)
notification2 = NodeMovingNotification(
id=1, new_node_host="host2", new_node_port=6379, ttl=10
)
assert hash(notification1) != hash(notification2)
def test_hash_different_port(self):
"""Test hash difference for notifications with different port."""
notification1 = NodeMovingNotification(
id=1, new_node_host="localhost", new_node_port=6379, ttl=10
)
notification2 = NodeMovingNotification(
id=1, new_node_host="localhost", new_node_port=6380, ttl=10
)
assert hash(notification1) != hash(notification2)
def test_hash_different_id(self):
"""Test hash difference for notifications with different id."""
notification1 = NodeMovingNotification(
id=1, new_node_host="localhost", new_node_port=6379, ttl=10
)
notification2 = NodeMovingNotification(
id=2, new_node_host="localhost", new_node_port=6379, ttl=10
)
assert hash(notification1) != hash(notification2)
def test_set_functionality(self):
"""Test that notifications can be used in sets correctly."""
notification1 = NodeMovingNotification(
id=1, new_node_host="localhost", new_node_port=6379, ttl=10
)
notification2 = NodeMovingNotification(
id=1, new_node_host="localhost", new_node_port=6379, ttl=20
) # Same id, host, port - should be considered the same
notification3 = NodeMovingNotification(
id=1, new_node_host="host2", new_node_port=6380, ttl=10
) # Same id but different host/port - should be different
notification4 = NodeMovingNotification(
id=2, new_node_host="localhost", new_node_port=6379, ttl=10
) # Different id - should be different
notification_set = {notification1, notification2, notification3, notification4}
assert (
len(notification_set) == 3
) # notification1 and notification2 should be considered the same
|
TestNodeMovingNotification
|
python
|
spack__spack
|
lib/spack/spack/test/oci/mock_registry.py
|
{
"start": 689,
"end": 2028
}
|
class ____:
"""This class is a small router for requests to the OCI registry.
It is used to dispatch requests to a handler, and middleware can be
used to transform requests, as well as return responses early
(e.g. for authentication)."""
def __init__(self) -> None:
self.routes: List[Tuple[str, Pattern, Callable]] = []
self.middleware: List[Callable[[Request], Request]] = []
def handle(self, req: Request) -> MockHTTPResponse:
"""Dispatch a request to a handler."""
result = urllib.parse.urlparse(req.full_url)
# Apply middleware
try:
for handler in self.middleware:
req = handler(req)
except MiddlewareError as e:
return e.response
for method, path_regex, handler in self.routes:
if method != req.get_method():
continue
match = re.fullmatch(path_regex, result.path)
if not match:
continue
return handler(req, **match.groupdict())
return MockHTTPResponse(404, "Not found")
def register(self, method, path: str, handler: Callable):
self.routes.append((method, re.compile(path), handler))
def add_middleware(self, handler: Callable[[Request], Request]):
self.middleware.append(handler)
|
Router
|
python
|
numba__numba
|
numba/tests/test_linalg.py
|
{
"start": 21365,
"end": 25484
}
|
class ____(TestCase):
"""
The sample matrix code TestLinalgBase.specific_sample_matrix()
is a bit involved, this class tests it works as intended.
"""
def test_specific_sample_matrix(self):
# add a default test to the ctor, it never runs so doesn't matter
inst = TestLinalgBase('specific_sample_matrix')
sizes = [(7, 1), (11, 5), (5, 11), (3, 3), (1, 7)]
# test loop
for size, dtype, order in product(sizes, inst.dtypes, 'FC'):
m, n = size
minmn = min(m, n)
# test default full rank
A = inst.specific_sample_matrix(size, dtype, order)
self.assertEqual(A.shape, size)
self.assertEqual(np.linalg.matrix_rank(A), minmn)
# test reduced rank if a reduction is possible
if minmn > 1:
rank = minmn - 1
A = inst.specific_sample_matrix(size, dtype, order, rank=rank)
self.assertEqual(A.shape, size)
self.assertEqual(np.linalg.matrix_rank(A), rank)
resolution = 5 * np.finfo(dtype).resolution
# test default condition
A = inst.specific_sample_matrix(size, dtype, order)
self.assertEqual(A.shape, size)
np.testing.assert_allclose(np.linalg.cond(A),
1.,
rtol=resolution,
atol=resolution)
# test specified condition if matrix is > 1D
if minmn > 1:
condition = 10.
A = inst.specific_sample_matrix(
size, dtype, order, condition=condition)
self.assertEqual(A.shape, size)
np.testing.assert_allclose(np.linalg.cond(A),
10.,
rtol=resolution,
atol=resolution)
# check errors are raised appropriately
def check_error(args, msg, err=ValueError):
with self.assertRaises(err) as raises:
inst.specific_sample_matrix(*args)
self.assertIn(msg, str(raises.exception))
# check the checker runs ok
with self.assertRaises(AssertionError) as raises:
msg = "blank"
check_error(((2, 3), np.float64, 'F'), msg, err=ValueError)
# check invalid inputs...
# bad size
msg = "size must be a length 2 tuple."
check_error(((1,), np.float64, 'F'), msg, err=ValueError)
# bad order
msg = "order must be one of 'F' or 'C'."
check_error(((2, 3), np.float64, 'z'), msg, err=ValueError)
# bad type
msg = "dtype must be a numpy floating point type."
check_error(((2, 3), np.int32, 'F'), msg, err=ValueError)
# specifying both rank and condition
msg = "Only one of rank or condition can be specified."
check_error(((2, 3), np.float64, 'F', 1, 1), msg, err=ValueError)
# specifying negative condition
msg = "Condition number must be >=1."
check_error(((2, 3), np.float64, 'F', None, -1), msg, err=ValueError)
# specifying negative matrix dimension
msg = "Negative dimensions given for matrix shape."
check_error(((2, -3), np.float64, 'F'), msg, err=ValueError)
# specifying negative rank
msg = "Rank must be greater than zero."
check_error(((2, 3), np.float64, 'F', -1), msg, err=ValueError)
# specifying a rank greater than maximum rank
msg = "Rank given greater than full rank."
check_error(((2, 3), np.float64, 'F', 4), msg, err=ValueError)
# specifying a condition number for a vector
msg = "Condition number was specified for a vector (always 1.)."
check_error(((1, 3), np.float64, 'F', None, 10), msg, err=ValueError)
# specifying a non integer rank
msg = "Rank must an integer."
check_error(((2, 3), np.float64, 'F', 1.5), msg, err=ValueError)
|
TestTestLinalgBase
|
python
|
huggingface__transformers
|
src/transformers/models/patchtst/modeling_patchtst.py
|
{
"start": 34445,
"end": 35122
}
|
class ____(ModelOutput):
r"""
loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
MSE loss.
prediction_output (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction outputs of the time series modeling heads.
"""
loss: Optional[torch.FloatTensor] = None
prediction_output: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
@dataclass
@auto_docstring(
custom_intro="""
Output type of [`PatchTSTForRegression`].
"""
)
|
PatchTSTForPretrainingOutput
|
python
|
google__pytype
|
pytype/pyc/opcodes_test.py
|
{
"start": 4565,
"end": 5929
}
|
class ____(unittest.TestCase):
"""Tests for opcodes._get_exception_bitmask."""
def assertBitmask(self, *, offset_to_op, exc_ranges, expected_bitmask):
bitmask = bin(opcodes._get_exception_bitmask(offset_to_op, exc_ranges))
self.assertEqual(bitmask, expected_bitmask)
def test_one_exception_range(self):
self.assertBitmask(
offset_to_op={1: None, 5: None, 8: None, 13: None},
exc_ranges={4: 10},
expected_bitmask='0b11111110000',
)
def test_multiple_exception_ranges(self):
self.assertBitmask(
offset_to_op={1: None, 3: None, 5: None, 7: None, 9: None},
exc_ranges={1: 4, 7: 9},
expected_bitmask='0b1110011110',
)
def test_length_one_range(self):
self.assertBitmask(
offset_to_op={0: None, 3: None, 6: None, 7: None, 12: None},
exc_ranges={0: 0, 6: 6, 7: 7, 12: 12},
expected_bitmask='0b1000011000001',
)
def test_overlapping_ranges(self):
self.assertBitmask(
offset_to_op={1: None, 5: None, 8: None, 13: None},
exc_ranges={1: 5, 4: 9},
expected_bitmask='0b1111111110',
)
def test_no_exception(self):
self.assertBitmask(
offset_to_op={1: None, 5: None, 8: None, 13: None},
exc_ranges={},
expected_bitmask='0b0',
)
if __name__ == '__main__':
unittest.main()
|
ExceptionBitmaskTest
|
python
|
django-mptt__django-mptt
|
tests/myapp/models.py
|
{
"start": 3562,
"end": 3711
}
|
class ____(MPTTModel):
parent = TreeForeignKey(
"self", null=True, blank=True, related_name="children", on_delete=models.CASCADE
)
|
Tree
|
python
|
sphinx-doc__sphinx
|
sphinx/transforms/__init__.py
|
{
"start": 14462,
"end": 15164
}
|
class ____(SphinxTransform):
"""Sort glossaries that have the ``sorted`` flag."""
# This must be done after i18n, therefore not right
# away in the glossary directive.
default_priority = 500
def apply(self, **kwargs: Any) -> None:
for glossary in self.document.findall(addnodes.glossary):
if glossary['sorted']:
definition_list = cast('nodes.definition_list', glossary[0])
definition_list[:] = sorted(
definition_list,
key=lambda item: unicodedata.normalize(
'NFD', cast('nodes.term', item)[0].astext().lower()
),
)
|
GlossarySorter
|
python
|
pypa__pipenv
|
pipenv/patched/pip/_vendor/rich/spinner.py
|
{
"start": 309,
"end": 4364
}
|
class ____:
"""A spinner animation.
Args:
name (str): Name of spinner (run python -m rich.spinner).
text (RenderableType, optional): A renderable to display at the right of the spinner (str or Text typically). Defaults to "".
style (StyleType, optional): Style for spinner animation. Defaults to None.
speed (float, optional): Speed factor for animation. Defaults to 1.0.
Raises:
KeyError: If name isn't one of the supported spinner animations.
"""
def __init__(
self,
name: str,
text: "RenderableType" = "",
*,
style: Optional["StyleType"] = None,
speed: float = 1.0,
) -> None:
try:
spinner = SPINNERS[name]
except KeyError:
raise KeyError(f"no spinner called {name!r}")
self.text: "Union[RenderableType, Text]" = (
Text.from_markup(text) if isinstance(text, str) else text
)
self.name = name
self.frames = cast(List[str], spinner["frames"])[:]
self.interval = cast(float, spinner["interval"])
self.start_time: Optional[float] = None
self.style = style
self.speed = speed
self.frame_no_offset: float = 0.0
self._update_speed = 0.0
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
yield self.render(console.get_time())
def __rich_measure__(
self, console: "Console", options: "ConsoleOptions"
) -> Measurement:
text = self.render(0)
return Measurement.get(console, options, text)
def render(self, time: float) -> "RenderableType":
"""Render the spinner for a given time.
Args:
time (float): Time in seconds.
Returns:
RenderableType: A renderable containing animation frame.
"""
if self.start_time is None:
self.start_time = time
frame_no = ((time - self.start_time) * self.speed) / (
self.interval / 1000.0
) + self.frame_no_offset
frame = Text(
self.frames[int(frame_no) % len(self.frames)], style=self.style or ""
)
if self._update_speed:
self.frame_no_offset = frame_no
self.start_time = time
self.speed = self._update_speed
self._update_speed = 0.0
if not self.text:
return frame
elif isinstance(self.text, (str, Text)):
return Text.assemble(frame, " ", self.text)
else:
table = Table.grid(padding=1)
table.add_row(frame, self.text)
return table
def update(
self,
*,
text: "RenderableType" = "",
style: Optional["StyleType"] = None,
speed: Optional[float] = None,
) -> None:
"""Updates attributes of a spinner after it has been started.
Args:
text (RenderableType, optional): A renderable to display at the right of the spinner (str or Text typically). Defaults to "".
style (StyleType, optional): Style for spinner animation. Defaults to None.
speed (float, optional): Speed factor for animation. Defaults to None.
"""
if text:
self.text = Text.from_markup(text) if isinstance(text, str) else text
if style:
self.style = style
if speed:
self._update_speed = speed
if __name__ == "__main__": # pragma: no cover
from time import sleep
from .columns import Columns
from .panel import Panel
from .live import Live
all_spinners = Columns(
[
Spinner(spinner_name, text=Text(repr(spinner_name), style="green"))
for spinner_name in sorted(SPINNERS.keys())
],
column_first=True,
expand=True,
)
with Live(
Panel(all_spinners, title="Spinners", border_style="blue"),
refresh_per_second=20,
) as live:
while True:
sleep(0.1)
|
Spinner
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.