language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | PyCQA__pylint | tests/functional/u/undefined/undefined_variable_py30.py | {
"start": 1799,
"end": 1898
} | class ____(metaclass=ab.ABCMeta): # [undefined-variable]
""" Notice the `ab` module. """
| SecondBad |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/core_api/datamodels/common.py | {
"start": 4006,
"end": 4703
} | class ____(BaseModel):
"""
Serializer for individual bulk action responses.
Represents the outcome of a single bulk operation (create, update, or delete).
The response includes a list of successful keys and any errors encountered during the operation.
This structure helps users understand which key actions succeeded and which failed.
"""
success: list[str] = Field(
default=[], description="A list of unique id/key representing successful operations."
)
errors: list[dict[str, Any]] = Field(
default=[],
description="A list of errors encountered during the operation, each containing details about the issue.",
)
| BulkActionResponse |
python | kamyu104__LeetCode-Solutions | Python/minimum-number-of-primes-to-sum-to-target.py | {
"start": 139,
"end": 761
} | class ____(object):
def minNumberOfPrimes(self, n, m):
"""
:type n: int
:type m: int
:rtype: int
"""
is_prime = [True]*(n+1)
cnt = 0
dp = [float("inf")]*(n+1)
dp[0] = 0
for i in xrange(2, n+1):
if not is_prime[i]:
continue
for j in xrange(i+i, n+1, i):
is_prime[j] = False
for j in xrange(i, n+1):
dp[j] = min(dp[j], dp[j-i]+1)
cnt += 1
if cnt == m:
break
return dp[n] if dp[n] != float("inf") else -1
| Solution |
python | run-llama__llama_index | llama-index-core/llama_index/core/vector_stores/types.py | {
"start": 7579,
"end": 9485
} | class ____(Protocol):
"""Abstract vector store protocol."""
stores_text: bool
is_embedding_query: bool = True
@property
def client(self) -> Any:
"""Get client."""
...
def add(
self,
nodes: List[BaseNode],
**add_kwargs: Any,
) -> List[str]:
"""Add nodes with embedding to vector store."""
...
async def async_add(
self,
nodes: List[BaseNode],
**kwargs: Any,
) -> List[str]:
"""
Asynchronously add nodes with embedding to vector store.
NOTE: this is not implemented for all vector stores. If not implemented,
it will just call add synchronously.
"""
return self.add(nodes)
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""
Delete nodes using with ref_doc_id."""
...
async def adelete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""
Delete nodes using with ref_doc_id.
NOTE: this is not implemented for all vector stores. If not implemented,
it will just call delete synchronously.
"""
self.delete(ref_doc_id, **delete_kwargs)
def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
"""Query vector store."""
...
async def aquery(
self, query: VectorStoreQuery, **kwargs: Any
) -> VectorStoreQueryResult:
"""
Asynchronously query vector store.
NOTE: this is not implemented for all vector stores. If not implemented,
it will just call query synchronously.
"""
return self.query(query, **kwargs)
def persist(
self, persist_path: str, fs: Optional[fsspec.AbstractFileSystem] = None
) -> None:
return None
# TODO: Temp copy of VectorStore for pydantic, can't mix with runtime_checkable
| VectorStore |
python | google__flatbuffers | tests/MyGame/Example/ArrayStruct.py | {
"start": 252,
"end": 3712
} | class ____(object):
__slots__ = ['_tab']
@classmethod
def SizeOf(cls) -> int:
return 160
# ArrayStruct
def Init(self, buf: bytes, pos: int):
self._tab = flatbuffers.table.Table(buf, pos)
# ArrayStruct
def A(self): return self._tab.Get(flatbuffers.number_types.Float32Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(0))
# ArrayStruct
def B(self, j = None):
if j is None:
return [self._tab.Get(flatbuffers.number_types.Int32Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(4 + i * 4)) for i in range(self.BLength())]
elif j >= 0 and j < self.BLength():
return self._tab.Get(flatbuffers.number_types.Int32Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(4 + j * 4))
else:
return None
# ArrayStruct
def BAsNumpy(self):
return self._tab.GetArrayAsNumpy(flatbuffers.number_types.Int32Flags, self._tab.Pos + 4, self.BLength())
# ArrayStruct
def BLength(self) -> int:
return 15
# ArrayStruct
def BIsNone(self) -> bool:
return False
# ArrayStruct
def C(self): return self._tab.Get(flatbuffers.number_types.Int8Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(64))
# ArrayStruct
def D(self, i: int) -> NestedStruct:
obj = NestedStruct()
obj.Init(self._tab.Bytes, self._tab.Pos + 72 + i * 32)
return obj
# ArrayStruct
def DLength(self) -> int:
return 2
# ArrayStruct
def DIsNone(self) -> bool:
return False
# ArrayStruct
def E(self): return self._tab.Get(flatbuffers.number_types.Int32Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(136))
# ArrayStruct
def F(self, j = None):
if j is None:
return [self._tab.Get(flatbuffers.number_types.Int64Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(144 + i * 8)) for i in range(self.FLength())]
elif j >= 0 and j < self.FLength():
return self._tab.Get(flatbuffers.number_types.Int64Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(144 + j * 8))
else:
return None
# ArrayStruct
def FAsNumpy(self):
return self._tab.GetArrayAsNumpy(flatbuffers.number_types.Int64Flags, self._tab.Pos + 144, self.FLength())
# ArrayStruct
def FLength(self) -> int:
return 2
# ArrayStruct
def FIsNone(self) -> bool:
return False
def CreateArrayStruct(builder, a, b, c, d_a, d_b, d_c, d_d, e, f):
builder.Prep(8, 160)
for _idx0 in range(2 , 0, -1):
builder.PrependInt64(f[_idx0-1])
builder.Pad(4)
builder.PrependInt32(e)
for _idx0 in range(2 , 0, -1):
builder.Prep(8, 32)
for _idx1 in range(2 , 0, -1):
builder.PrependInt64(d_d[_idx0-1][_idx1-1])
builder.Pad(5)
for _idx1 in range(2 , 0, -1):
builder.PrependInt8(d_c[_idx0-1][_idx1-1])
builder.PrependInt8(d_b[_idx0-1])
for _idx1 in range(2 , 0, -1):
builder.PrependInt32(d_a[_idx0-1][_idx1-1])
builder.Pad(7)
builder.PrependInt8(c)
for _idx0 in range(15 , 0, -1):
builder.PrependInt32(b[_idx0-1])
builder.PrependFloat32(a)
return builder.Offset()
import MyGame.Example.NestedStruct
try:
from typing import List
except:
pass
| ArrayStruct |
python | kamyu104__LeetCode-Solutions | Python/strange-printer.py | {
"start": 33,
"end": 661
} | class ____(object):
def strangePrinter(self, s):
"""
:type s: str
:rtype: int
"""
def dp(s, i, j, lookup):
if i > j:
return 0
if (i, j) not in lookup:
lookup[(i, j)] = dp(s, i, j-1, lookup) + 1
for k in xrange(i, j):
if s[k] == s[j]:
lookup[(i, j)] = min(lookup[(i, j)], \
dp(s, i, k, lookup) + dp(s, k+1, j-1, lookup))
return lookup[(i, j)]
lookup = {}
return dp(s, 0, len(s)-1, lookup)
| Solution |
python | milvus-io__pymilvus | pymilvus/grpc_gen/milvus_pb2_grpc.py | {
"start": 195375,
"end": 196494
} | class ____(object):
"""Missing associated documentation comment in .proto file."""
def RegisterLink(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ProxyServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'RegisterLink': grpc.unary_unary_rpc_method_handler(
servicer.RegisterLink,
request_deserializer=milvus__pb2.RegisterLinkRequest.FromString,
response_serializer=milvus__pb2.RegisterLinkResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'milvus.proto.milvus.ProxyService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
server.add_registered_method_handlers('milvus.proto.milvus.ProxyService', rpc_method_handlers)
# This class is part of an EXPERIMENTAL API.
| ProxyServiceServicer |
python | matplotlib__matplotlib | lib/matplotlib/backends/backend_qt.py | {
"start": 45982,
"end": 46161
} | class ____(_Backend):
backend_version = __version__
FigureCanvas = FigureCanvasQT
FigureManager = FigureManagerQT
mainloop = FigureManagerQT.start_main_loop
| _BackendQT |
python | Textualize__textual | tests/suggester/test_suggester.py | {
"start": 288,
"end": 3211
} | class ____(DOMNode):
def __init__(self, log_list: list[tuple[str, str]]) -> None:
self.log_list = log_list
def post_message(self, message: SuggestionReady):
# We hijack post_message so we can intercept messages without creating a full app.
self.log_list.append((message.suggestion, message.value))
async def test_cache_on():
log = []
class MySuggester(Suggester):
async def get_suggestion(self, value: str):
log.append(value)
return value
suggester = MySuggester(use_cache=True)
await suggester._get_suggestion(DOMNode(), "hello")
assert log == ["hello"]
await suggester._get_suggestion(DOMNode(), "hello")
assert log == ["hello"]
async def test_cache_off():
log = []
class MySuggester(Suggester):
async def get_suggestion(self, value: str):
log.append(value)
return value
suggester = MySuggester(use_cache=False)
await suggester._get_suggestion(DOMNode(), "hello")
assert log == ["hello"]
await suggester._get_suggestion(DOMNode(), "hello")
assert log == ["hello", "hello"]
async def test_suggestion_ready_message():
log = []
suggester = FillSuggester()
await suggester._get_suggestion(LogListNode(log), "hello")
assert log == [("helloxxxxx", "hello")]
await suggester._get_suggestion(LogListNode(log), "world")
assert log == [("helloxxxxx", "hello"), ("worldxxxxx", "world")]
async def test_no_message_if_no_suggestion():
log = []
suggester = FillSuggester()
await suggester._get_suggestion(LogListNode(log), "this is a longer string")
assert log == []
async def test_suggestion_ready_message_on_cache_hit():
log = []
suggester = FillSuggester(use_cache=True)
await suggester._get_suggestion(LogListNode(log), "hello")
assert log == [("helloxxxxx", "hello")]
await suggester._get_suggestion(LogListNode(log), "hello")
assert log == [("helloxxxxx", "hello"), ("helloxxxxx", "hello")]
@pytest.mark.parametrize(
"value",
[
"hello",
"HELLO",
"HeLlO",
"Hello",
"hELLO",
],
)
async def test_case_insensitive_suggestions(value):
class MySuggester(Suggester):
async def get_suggestion(self, value: str):
assert "hello" == value
suggester = MySuggester(use_cache=False, case_sensitive=False)
await suggester._get_suggestion(DOMNode(), value)
async def test_case_insensitive_cache_hits():
count = 0
class MySuggester(Suggester):
async def get_suggestion(self, value: str):
nonlocal count
count += 1
return value + "abc"
suggester = MySuggester(use_cache=True, case_sensitive=False)
hellos = ["hello", "HELLO", "HeLlO", "Hello", "hELLO"]
for hello in hellos:
await suggester._get_suggestion(DOMNode(), hello)
assert count == 1
| LogListNode |
python | jmcnamara__XlsxWriter | xlsxwriter/exceptions.py | {
"start": 713,
"end": 831
} | class ____(XlsxInputError):
"""Worksheet name is too long or contains restricted characters."""
| InvalidWorksheetName |
python | sympy__sympy | sympy/physics/wigner.py | {
"start": 29565,
"end": 37900
} | class ____(Function):
def doit(self, **hints):
if all(obj.is_number for obj in self.args):
return wigner_3j(*self.args)
else:
return self
def dot_rot_grad_Ynm(j, p, l, m, theta, phi):
r"""
Returns dot product of rotational gradients of spherical harmonics.
Explanation
===========
This function returns the right hand side of the following expression:
.. math ::
\vec{R}Y{_j^{p}} \cdot \vec{R}Y{_l^{m}} = (-1)^{m+p}
\sum\limits_{k=|l-j|}^{l+j}Y{_k^{m+p}} * \alpha_{l,m,j,p,k} *
\frac{1}{2} (k^2-j^2-l^2+k-j-l)
Arguments
=========
j, p, l, m .... indices in spherical harmonics (expressions or integers)
theta, phi .... angle arguments in spherical harmonics
Example
=======
>>> from sympy import symbols
>>> from sympy.physics.wigner import dot_rot_grad_Ynm
>>> theta, phi = symbols("theta phi")
>>> dot_rot_grad_Ynm(3, 2, 2, 0, theta, phi).doit()
3*sqrt(55)*Ynm(5, 2, theta, phi)/(11*sqrt(pi))
"""
j = sympify(j)
p = sympify(p)
l = sympify(l)
m = sympify(m)
theta = sympify(theta)
phi = sympify(phi)
k = Dummy("k")
def alpha(l,m,j,p,k):
return sqrt((2*l+1)*(2*j+1)*(2*k+1)/(4*pi)) * \
Wigner3j(j, l, k, S.Zero, S.Zero, S.Zero) * \
Wigner3j(j, l, k, p, m, -m-p)
return (S.NegativeOne)**(m+p) * Sum(Ynm(k, m+p, theta, phi) * alpha(l,m,j,p,k) / 2 \
*(k**2-j**2-l**2+k-j-l), (k, abs(l-j), l+j))
def wigner_d_small(J, beta):
"""Return the small Wigner d matrix for angular momentum J.
Explanation
===========
J : An integer, half-integer, or SymPy symbol for the total angular
momentum of the angular momentum space being rotated.
beta : A real number representing the Euler angle of rotation about
the so-called line of nodes. See [Edmonds74]_.
Returns
=======
A matrix representing the corresponding Euler angle rotation( in the basis
of eigenvectors of `J_z`).
.. math ::
\\mathcal{d}_{\\beta} = \\exp\\big( \\frac{i\\beta}{\\hbar} J_y\\big)
such that
.. math ::
d^{(J)}_{m',m}(\\beta) = \\mathtt{wigner\\_d\\_small(J,beta)[J-mprime,J-m]}
The components are calculated using the general form [Edmonds74]_,
equation 4.1.15.
Examples
========
>>> from sympy import Integer, symbols, pi, pprint
>>> from sympy.physics.wigner import wigner_d_small
>>> half = 1/Integer(2)
>>> beta = symbols("beta", real=True)
>>> pprint(wigner_d_small(half, beta), use_unicode=True)
β‘ βΞ²β βΞ²ββ€
β’cosβββ sinββββ₯
β’ β2β β2β β₯
β’ β₯
β’ βΞ²β βΞ²ββ₯
β’-sinβββ cosββββ₯
β£ β2β β2β β¦
>>> pprint(wigner_d_small(2*half, beta), use_unicode=True)
β‘ 2βΞ²β βΞ²β βΞ²β 2βΞ²β β€
β’ cos βββ β2β
sinββββ
cosβββ sin βββ β₯
β’ β2β β2β β2β β2β β₯
β’ β₯
β’ βΞ²β βΞ²β 2βΞ²β 2βΞ²β βΞ²β βΞ²ββ₯
β’-β2β
sinββββ
cosβββ - sin βββ + cos βββ β2β
sinββββ
cosββββ₯
β’ β2β β2β β2β β2β β2β β2β β₯
β’ β₯
β’ 2βΞ²β βΞ²β βΞ²β 2βΞ²β β₯
β’ sin βββ -β2β
sinββββ
cosβββ cos βββ β₯
β£ β2β β2β β2β β2β β¦
From table 4 in [Edmonds74]_
>>> pprint(wigner_d_small(half, beta).subs({beta:pi/2}), use_unicode=True)
β‘ β2 β2β€
β’ ββ βββ₯
β’ 2 2 β₯
β’ β₯
β’-β2 β2β₯
β’ββββ βββ₯
β£ 2 2 β¦
>>> pprint(wigner_d_small(2*half, beta).subs({beta:pi/2}),
... use_unicode=True)
β‘ β2 β€
β’1/2 ββ 1/2β₯
β’ 2 β₯
β’ β₯
β’-β2 β2 β₯
β’ββββ 0 ββ β₯
β’ 2 2 β₯
β’ β₯
β’ -β2 β₯
β’1/2 ββββ 1/2β₯
β£ 2 β¦
>>> pprint(wigner_d_small(3*half, beta).subs({beta:pi/2}),
... use_unicode=True)
β‘ β2 β6 β6 β2β€
β’ ββ ββ ββ βββ₯
β’ 4 4 4 4 β₯
β’ β₯
β’-β6 -β2 β2 β6β₯
β’ββββ ββββ ββ βββ₯
β’ 4 4 4 4 β₯
β’ β₯
β’ β6 -β2 -β2 β6β₯
β’ ββ ββββ ββββ βββ₯
β’ 4 4 4 4 β₯
β’ β₯
β’-β2 β6 -β6 β2β₯
β’ββββ ββ ββββ βββ₯
β£ 4 4 4 4 β¦
>>> pprint(wigner_d_small(4*half, beta).subs({beta:pi/2}),
... use_unicode=True)
β‘ β6 β€
β’1/4 1/2 ββ 1/2 1/4β₯
β’ 4 β₯
β’ β₯
β’-1/2 -1/2 0 1/2 1/2β₯
β’ β₯
β’ β6 β6 β₯
β’ ββ 0 -1/2 0 ββ β₯
β’ 4 4 β₯
β’ β₯
β’-1/2 1/2 0 -1/2 1/2β₯
β’ β₯
β’ β6 β₯
β’1/4 -1/2 ββ -1/2 1/4β₯
β£ 4 β¦
"""
M = [J-i for i in range(2*J+1)]
d = zeros(2*J+1)
# Mi corresponds to Edmonds' $m'$, and Mj to $m$.
for i, Mi in enumerate(M):
for j, Mj in enumerate(M):
# We get the maximum and minimum value of sigma.
sigmamax = min([J-Mi, J-Mj])
sigmamin = max([0, -Mi-Mj])
dij = sqrt(binomial(2*J, J+Mj) /
binomial(2*J, J+Mi))
terms = [(-1)**(J-Mi-s) *
binomial(J+Mj, J-Mi-s) *
binomial(J-Mj, s) *
cos(beta/2)**(2*s+Mi+Mj) *
sin(beta/2)**(2*J-2*s-Mj-Mi)
for s in range(sigmamin, sigmamax+1)]
d[i, j] = dij*Add(*terms)
return ImmutableMatrix(d)
def wigner_d(J, alpha, beta, gamma):
"""Return the Wigner D matrix for angular momentum J.
Explanation
===========
J :
An integer, half-integer, or SymPy symbol for the total angular
momentum of the angular momentum space being rotated.
alpha, beta, gamma - Real numbers representing the Euler.
Angles of rotation about the so-called figure axis, line of nodes,
and vertical. See [Edmonds74]_, however note that the symbols alpha
and gamma are swapped in this implementation.
Returns
=======
A matrix representing the corresponding Euler angle rotation (in the basis
of eigenvectors of `J_z`).
.. math ::
\\mathcal{D}_{\\alpha \\beta \\gamma} =
\\exp\\big( \\frac{i\\alpha}{\\hbar} J_z\\big)
\\exp\\big( \\frac{i\\beta}{\\hbar} J_y\\big)
\\exp\\big( \\frac{i\\gamma}{\\hbar} J_z\\big)
such that
.. math ::
\\mathcal{D}^{(J)}_{m',m}(\\alpha, \\beta, \\gamma) =
\\mathtt{wigner_d(J, alpha, beta, gamma)[J-mprime,J-m]}
The components are calculated using the general form [Edmonds74]_,
equation 4.1.12, however note that the angles alpha and gamma are swapped
in this implementation.
Examples
========
The simplest possible example:
>>> from sympy.physics.wigner import wigner_d
>>> from sympy import Integer, symbols, pprint
>>> half = 1/Integer(2)
>>> alpha, beta, gamma = symbols("alpha, beta, gamma", real=True)
>>> pprint(wigner_d(half, alpha, beta, gamma), use_unicode=True)
β‘ β
β
Ξ± β
β
Ξ³ β
β
Ξ± -β
β
Ξ³ β€
β’ βββ βββ βββ βββββ β₯
β’ 2 2 βΞ²β 2 2 βΞ²β β₯
β’ β― β
β― β
cosβββ β― β
β― β
sinβββ β₯
β’ β2β β2β β₯
β’ β₯
β’ -β
β
Ξ± β
β
Ξ³ -β
β
Ξ± -β
β
Ξ³ β₯
β’ βββββ βββ βββββ βββββ β₯
β’ 2 2 βΞ²β 2 2 βΞ²ββ₯
β’-β― β
β― β
sinβββ β― β
β― β
cosββββ₯
β£ β2β β2β β¦
"""
d = wigner_d_small(J, beta)
M = [J-i for i in range(2*J+1)]
# Mi corresponds to Edmonds' $m'$, and Mj to $m$.
D = [[exp(I*Mi*alpha)*d[i, j]*exp(I*Mj*gamma)
for j, Mj in enumerate(M)] for i, Mi in enumerate(M)]
return ImmutableMatrix(D)
| Wigner3j |
python | django-guardian__django-guardian | guardian/testapp/tests/conf.py | {
"start": 897,
"end": 2611
} | class ____:
"""
Acts as either a decorator, or a context manager. If it's a decorator it
takes a function and returns a wrapped function. If it's a contextmanager
it's used with the `with` statement. In either event entering/exiting
are called before and after, respectively, the function/block is executed.
"""
def __init__(self, **kwargs):
self.options = kwargs
self.wrapped = settings._wrapped
def __enter__(self):
self.enable()
def __exit__(self, exc_type, exc_value, traceback):
self.disable()
def __call__(self, test_func):
from django.test import TransactionTestCase
if isinstance(test_func, type) and issubclass(test_func, TransactionTestCase):
original_pre_setup = test_func._pre_setup
original_post_teardown = test_func._post_teardown
def _pre_setup(innerself):
self.enable()
original_pre_setup(innerself)
def _post_teardown(innerself):
original_post_teardown(innerself)
self.disable()
test_func._pre_setup = _pre_setup
test_func._post_teardown = _post_teardown
return test_func
else:
@wraps(test_func)
def inner(*args, **kwargs):
with self:
return test_func(*args, **kwargs)
return inner
def enable(self):
override = UserSettingsHolder(settings._wrapped)
for key, new_value in self.options.items():
setattr(override, key, new_value)
settings._wrapped = override
def disable(self):
settings._wrapped = self.wrapped
| override_settings |
python | django-mptt__django-mptt | tests/myapp/models.py | {
"start": 6662,
"end": 6881
} | class ____(MPTTModel):
parent = TreeForeignKey(
"self", null=True, blank=True, related_name="children", on_delete=models.CASCADE
)
class Meta:
swappable = "MPTT_SWAPPABLE_MODEL"
| SwappableModel |
python | dask__dask | dask/dataframe/dask_expr/_expr.py | {
"start": 116171,
"end": 116425
} | class ____(OpAlignPartitions):
_parameters = ["frame", "other", "op", "axis", "level", "fill_value"]
@staticmethod
def _op(frame, op, other, *args, **kwargs):
return MethodOperator(op, frame, other, *args, **kwargs)
| MethodOperatorAlign |
python | pallets__jinja | tests/test_ext.py | {
"start": 6047,
"end": 7132
} | class ____(Extension):
def filter_stream(self, stream):
for token in stream:
if token.type == "data":
yield from self.interpolate(token)
else:
yield token
def interpolate(self, token):
pos = 0
end = len(token.value)
lineno = token.lineno
while True:
match = _gettext_re.search(token.value, pos)
if match is None:
break
value = token.value[pos : match.start()]
if value:
yield Token(lineno, "data", value)
lineno += count_newlines(token.value)
yield Token(lineno, "variable_begin", None)
yield Token(lineno, "name", "gettext")
yield Token(lineno, "lparen", None)
yield Token(lineno, "string", match.group(1))
yield Token(lineno, "rparen", None)
yield Token(lineno, "variable_end", None)
pos = match.end()
if pos < end:
yield Token(lineno, "data", token.value[pos:])
| StreamFilterExtension |
python | google__pytype | pytype/tests/test_match2.py | {
"start": 13789,
"end": 15488
} | class ____(test_base.BaseTest):
"""Tests for matching types."""
# Forked into py2 and py3 versions
def test_callable(self):
ty = self.Infer("""
import tokenize
def f():
pass
x = tokenize.generate_tokens(f)
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Generator
import tokenize
def f() -> NoneType: ...
x = ... # type: Generator[tokenize.TokenInfo, None, None]
""",
)
def test_callable_against_generic(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import TypeVar, Callable, Generic, Iterable, Iterator
A = TypeVar("A")
N = TypeVar("N")
class Foo(Generic[A]):
def __init__(self, c: Callable[[], N]):
self = Foo[N]
x = ... # type: Iterator[int]
""",
)
self.Check(
"""
import foo
foo.Foo(foo.x.__next__)
""",
pythonpath=[d.path],
)
def test_empty(self):
ty = self.Infer("""
a = []
b = ["%d" % i for i in a]
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import List
a = ... # type: List[nothing]
b = ... # type: List[str]
""",
)
def test_bound_against_callable(self):
ty = self.Infer("""
import io
import tokenize
x = tokenize.generate_tokens(io.StringIO("").readline)
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Generator
import io
import tokenize
x = ... # type: Generator[tokenize.TokenInfo, None, None]
""",
)
| MatchTestPy3 |
python | django__django | tests/logging_tests/tests.py | {
"start": 3016,
"end": 4353
} | class ____:
def assertLogRecord(
self,
logger_cm,
msg,
levelno,
status_code,
request=None,
exc_class=None,
):
self.assertEqual(
records_len := len(logger_cm.records),
1,
f"Wrong number of calls for {logger_cm=} in {levelno=} (expected 1, got "
f"{records_len}).",
)
record = logger_cm.records[0]
self.assertEqual(record.getMessage(), msg)
self.assertEqual(record.levelno, levelno)
self.assertEqual(record.status_code, status_code)
if request is not None:
self.assertEqual(record.request, request)
if exc_class:
self.assertIsNotNone(record.exc_info)
self.assertEqual(record.exc_info[0], exc_class)
return record
def assertLogsRequest(
self, url, level, msg, status_code, logger="django.request", exc_class=None
):
with self.assertLogs(logger, level) as cm:
try:
self.client.get(url)
except views.UncaughtException:
pass
self.assertLogRecord(
cm, msg, getattr(logging, level), status_code, exc_class=exc_class
)
@override_settings(DEBUG=True, ROOT_URLCONF="logging_tests.urls")
| LoggingAssertionMixin |
python | PrefectHQ__prefect | tests/server/orchestration/test_core_policy.py | {
"start": 21669,
"end": 26616
} | class ____:
async def test_can_manual_retry_with_arbitrary_state_name(
self,
session,
initialize_orchestration,
):
manual_retry_policy = [HandleFlowTerminalStateTransitions]
initial_state_type = states.StateType.FAILED
proposed_state_type = states.StateType.SCHEDULED
intended_transition = (initial_state_type, proposed_state_type)
ctx = await initialize_orchestration(
session,
"flow",
*intended_transition,
)
ctx.proposed_state.name = "FooBar"
ctx.run.run_count = 2
ctx.run.deployment_id = uuid4()
ctx.run_settings.retries = 1
async with contextlib.AsyncExitStack() as stack:
for rule in manual_retry_policy:
ctx = await stack.enter_async_context(rule(ctx, *intended_transition))
assert ctx.response_status == SetStateStatus.ACCEPT
assert ctx.run.run_count == 2
async def test_cannot_manual_retry_without_deployment(
self,
session,
initialize_orchestration,
):
manual_retry_policy = [HandleFlowTerminalStateTransitions]
initial_state_type = states.StateType.FAILED
proposed_state_type = states.StateType.SCHEDULED
intended_transition = (initial_state_type, proposed_state_type)
ctx = await initialize_orchestration(
session,
"flow",
*intended_transition,
flow_retries=1,
)
ctx.proposed_state.name = "AwaitingRetry"
ctx.run.run_count = 2
async with contextlib.AsyncExitStack() as stack:
for rule in manual_retry_policy:
ctx = await stack.enter_async_context(rule(ctx, *intended_transition))
assert ctx.response_status == SetStateStatus.ABORT
assert ctx.run.run_count == 2
async def test_manual_retrying_works_even_when_exceeding_max_retries(
self,
session,
initialize_orchestration,
):
manual_retry_policy = [HandleFlowTerminalStateTransitions]
initial_state_type = states.StateType.FAILED
proposed_state_type = states.StateType.SCHEDULED
intended_transition = (initial_state_type, proposed_state_type)
ctx = await initialize_orchestration(
session,
"flow",
*intended_transition,
flow_retries=1,
)
ctx.proposed_state.name = "AwaitingRetry"
ctx.run.deployment_id = uuid4()
ctx.run.run_count = 2
async with contextlib.AsyncExitStack() as stack:
for rule in manual_retry_policy:
ctx = await stack.enter_async_context(rule(ctx, *intended_transition))
assert ctx.response_status == SetStateStatus.ACCEPT
assert ctx.run.run_count == 2
async def test_manual_retrying_bypasses_terminal_state_protection(
self,
session,
initialize_orchestration,
):
manual_retry_policy = [HandleFlowTerminalStateTransitions]
initial_state_type = states.StateType.FAILED
proposed_state_type = states.StateType.SCHEDULED
intended_transition = (initial_state_type, proposed_state_type)
ctx = await initialize_orchestration(
session,
"flow",
*intended_transition,
flow_retries=10,
)
ctx.proposed_state.name = "AwaitingRetry"
ctx.run.deployment_id = uuid4()
ctx.run.run_count = 3
async with contextlib.AsyncExitStack() as stack:
for rule in manual_retry_policy:
ctx = await stack.enter_async_context(rule(ctx, *intended_transition))
assert ctx.response_status == SetStateStatus.ACCEPT
assert ctx.run.run_count == 3
@pytest.mark.parametrize(
"proposed_state_type",
[states.StateType.SCHEDULED, states.StateType.FAILED],
)
async def test_manual_retry_updates_retry_type(
self,
session,
initialize_orchestration,
proposed_state_type,
):
manual_retry_policy = [HandleFlowTerminalStateTransitions]
initial_state_type = states.StateType.FAILED
intended_transition = (initial_state_type, proposed_state_type)
ctx = await initialize_orchestration(
session,
"flow",
*intended_transition,
)
ctx.proposed_state.name = "AwaitingRetry"
ctx.run.deployment_id = uuid4()
ctx.run.run_count = 2
async with contextlib.AsyncExitStack() as stack:
for rule in manual_retry_policy:
ctx = await stack.enter_async_context(rule(ctx, *intended_transition))
if proposed_state_type == states.StateType.SCHEDULED:
assert ctx.run.empirical_policy.retry_type == "reschedule"
else:
assert ctx.run.empirical_policy.retry_type is None
| TestManualFlowRetries |
python | patrick-kidger__equinox | equinox/_module/_module.py | {
"start": 2629,
"end": 2995
} | class ____(Exception):
pass
def _is_array_like(x: object, /) -> None:
if is_array_like(x):
raise _JaxTransformException
_MSG_JAX_XFM_FUNC: Final = """
Possibly assigning a JAX-transformed callable as an attribute on
{0}.{1}. This will not have any of its parameters updated.
For example, the following code is buggy:
```python
| _JaxTransformException |
python | pypa__pipenv | pipenv/vendor/click/core.py | {
"start": 76398,
"end": 92552
} | class ____:
r"""A parameter to a command comes in two versions: they are either
:class:`Option`\s or :class:`Argument`\s. Other subclasses are currently
not supported by design as some of the internals for parsing are
intentionally not finalized.
Some settings are supported by both options and arguments.
:param param_decls: the parameter declarations for this option or
argument. This is a list of flags or argument
names.
:param type: the type that should be used. Either a :class:`ParamType`
or a Python type. The latter is converted into the former
automatically if supported.
:param required: controls if this is optional or not.
:param default: the default value if omitted. This can also be a callable,
in which case it's invoked when the default is needed
without any arguments.
:param callback: A function to further process or validate the value
after type conversion. It is called as ``f(ctx, param, value)``
and must return the value. It is called for all sources,
including prompts.
:param nargs: the number of arguments to match. If not ``1`` the return
value is a tuple instead of single value. The default for
nargs is ``1`` (except if the type is a tuple, then it's
the arity of the tuple). If ``nargs=-1``, all remaining
parameters are collected.
:param metavar: how the value is represented in the help page.
:param expose_value: if this is `True` then the value is passed onwards
to the command callback and stored on the context,
otherwise it's skipped.
:param is_eager: eager values are processed before non eager ones. This
should not be set for arguments or it will inverse the
order of processing.
:param envvar: a string or list of strings that are environment variables
that should be checked.
:param shell_complete: A function that returns custom shell
completions. Used instead of the param's type completion if
given. Takes ``ctx, param, incomplete`` and must return a list
of :class:`~click.shell_completion.CompletionItem` or a list of
strings.
.. versionchanged:: 8.0
``process_value`` validates required parameters and bounded
``nargs``, and invokes the parameter callback before returning
the value. This allows the callback to validate prompts.
``full_process_value`` is removed.
.. versionchanged:: 8.0
``autocompletion`` is renamed to ``shell_complete`` and has new
semantics described above. The old name is deprecated and will
be removed in 8.1, until then it will be wrapped to match the
new requirements.
.. versionchanged:: 8.0
For ``multiple=True, nargs>1``, the default must be a list of
tuples.
.. versionchanged:: 8.0
Setting a default is no longer required for ``nargs>1``, it will
default to ``None``. ``multiple=True`` or ``nargs=-1`` will
default to ``()``.
.. versionchanged:: 7.1
Empty environment variables are ignored rather than taking the
empty string value. This makes it possible for scripts to clear
variables if they can't unset them.
.. versionchanged:: 2.0
Changed signature for parameter callback to also be passed the
parameter. The old callback format will still work, but it will
raise a warning to give you a chance to migrate the code easier.
"""
param_type_name = "parameter"
def __init__(
self,
param_decls: t.Optional[t.Sequence[str]] = None,
type: t.Optional[t.Union[types.ParamType, t.Any]] = None,
required: bool = False,
default: t.Optional[t.Union[t.Any, t.Callable[[], t.Any]]] = None,
callback: t.Optional[t.Callable[[Context, "Parameter", t.Any], t.Any]] = None,
nargs: t.Optional[int] = None,
multiple: bool = False,
metavar: t.Optional[str] = None,
expose_value: bool = True,
is_eager: bool = False,
envvar: t.Optional[t.Union[str, t.Sequence[str]]] = None,
shell_complete: t.Optional[
t.Callable[
[Context, "Parameter", str],
t.Union[t.List["CompletionItem"], t.List[str]],
]
] = None,
) -> None:
self.name: t.Optional[str]
self.opts: t.List[str]
self.secondary_opts: t.List[str]
self.name, self.opts, self.secondary_opts = self._parse_decls(
param_decls or (), expose_value
)
self.type: types.ParamType = types.convert_type(type, default)
# Default nargs to what the type tells us if we have that
# information available.
if nargs is None:
if self.type.is_composite:
nargs = self.type.arity
else:
nargs = 1
self.required = required
self.callback = callback
self.nargs = nargs
self.multiple = multiple
self.expose_value = expose_value
self.default = default
self.is_eager = is_eager
self.metavar = metavar
self.envvar = envvar
self._custom_shell_complete = shell_complete
if __debug__:
if self.type.is_composite and nargs != self.type.arity:
raise ValueError(
f"'nargs' must be {self.type.arity} (or None) for"
f" type {self.type!r}, but it was {nargs}."
)
# Skip no default or callable default.
check_default = default if not callable(default) else None
if check_default is not None:
if multiple:
try:
# Only check the first value against nargs.
check_default = next(_check_iter(check_default), None)
except TypeError:
raise ValueError(
"'default' must be a list when 'multiple' is true."
) from None
# Can be None for multiple with empty default.
if nargs != 1 and check_default is not None:
try:
_check_iter(check_default)
except TypeError:
if multiple:
message = (
"'default' must be a list of lists when 'multiple' is"
" true and 'nargs' != 1."
)
else:
message = "'default' must be a list when 'nargs' != 1."
raise ValueError(message) from None
if nargs > 1 and len(check_default) != nargs:
subject = "item length" if multiple else "length"
raise ValueError(
f"'default' {subject} must match nargs={nargs}."
)
def to_info_dict(self) -> t.Dict[str, t.Any]:
"""Gather information that could be useful for a tool generating
user-facing documentation.
Use :meth:`click.Context.to_info_dict` to traverse the entire
CLI structure.
.. versionadded:: 8.0
"""
return {
"name": self.name,
"param_type_name": self.param_type_name,
"opts": self.opts,
"secondary_opts": self.secondary_opts,
"type": self.type.to_info_dict(),
"required": self.required,
"nargs": self.nargs,
"multiple": self.multiple,
"default": self.default,
"envvar": self.envvar,
}
def __repr__(self) -> str:
return f"<{self.__class__.__name__} {self.name}>"
def _parse_decls(
self, decls: t.Sequence[str], expose_value: bool
) -> t.Tuple[t.Optional[str], t.List[str], t.List[str]]:
raise NotImplementedError()
@property
def human_readable_name(self) -> str:
"""Returns the human readable name of this parameter. This is the
same as the name for options, but the metavar for arguments.
"""
return self.name # type: ignore
def make_metavar(self) -> str:
if self.metavar is not None:
return self.metavar
metavar = self.type.get_metavar(self)
if metavar is None:
metavar = self.type.name.upper()
if self.nargs != 1:
metavar += "..."
return metavar
@t.overload
def get_default(
self, ctx: Context, call: "te.Literal[True]" = True
) -> t.Optional[t.Any]:
...
@t.overload
def get_default(
self, ctx: Context, call: bool = ...
) -> t.Optional[t.Union[t.Any, t.Callable[[], t.Any]]]:
...
def get_default(
self, ctx: Context, call: bool = True
) -> t.Optional[t.Union[t.Any, t.Callable[[], t.Any]]]:
"""Get the default for the parameter. Tries
:meth:`Context.lookup_default` first, then the local default.
:param ctx: Current context.
:param call: If the default is a callable, call it. Disable to
return the callable instead.
.. versionchanged:: 8.0.2
Type casting is no longer performed when getting a default.
.. versionchanged:: 8.0.1
Type casting can fail in resilient parsing mode. Invalid
defaults will not prevent showing help text.
.. versionchanged:: 8.0
Looks at ``ctx.default_map`` first.
.. versionchanged:: 8.0
Added the ``call`` parameter.
"""
value = ctx.lookup_default(self.name, call=False) # type: ignore
if value is None:
value = self.default
if call and callable(value):
value = value()
return value
def add_to_parser(self, parser: OptionParser, ctx: Context) -> None:
raise NotImplementedError()
def consume_value(
self, ctx: Context, opts: t.Mapping[str, t.Any]
) -> t.Tuple[t.Any, ParameterSource]:
value = opts.get(self.name) # type: ignore
source = ParameterSource.COMMANDLINE
if value is None:
value = self.value_from_envvar(ctx)
source = ParameterSource.ENVIRONMENT
if value is None:
value = ctx.lookup_default(self.name) # type: ignore
source = ParameterSource.DEFAULT_MAP
if value is None:
value = self.get_default(ctx)
source = ParameterSource.DEFAULT
return value, source
def type_cast_value(self, ctx: Context, value: t.Any) -> t.Any:
"""Convert and validate a value against the option's
:attr:`type`, :attr:`multiple`, and :attr:`nargs`.
"""
if value is None:
return () if self.multiple or self.nargs == -1 else None
def check_iter(value: t.Any) -> t.Iterator[t.Any]:
try:
return _check_iter(value)
except TypeError:
# This should only happen when passing in args manually,
# the parser should construct an iterable when parsing
# the command line.
raise BadParameter(
_("Value must be an iterable."), ctx=ctx, param=self
) from None
if self.nargs == 1 or self.type.is_composite:
def convert(value: t.Any) -> t.Any:
return self.type(value, param=self, ctx=ctx)
elif self.nargs == -1:
def convert(value: t.Any) -> t.Any: # t.Tuple[t.Any, ...]
return tuple(self.type(x, self, ctx) for x in check_iter(value))
else: # nargs > 1
def convert(value: t.Any) -> t.Any: # t.Tuple[t.Any, ...]
value = tuple(check_iter(value))
if len(value) != self.nargs:
raise BadParameter(
ngettext(
"Takes {nargs} values but 1 was given.",
"Takes {nargs} values but {len} were given.",
len(value),
).format(nargs=self.nargs, len=len(value)),
ctx=ctx,
param=self,
)
return tuple(self.type(x, self, ctx) for x in value)
if self.multiple:
return tuple(convert(x) for x in check_iter(value))
return convert(value)
def value_is_missing(self, value: t.Any) -> bool:
if value is None:
return True
if (self.nargs != 1 or self.multiple) and value == ():
return True
return False
def process_value(self, ctx: Context, value: t.Any) -> t.Any:
value = self.type_cast_value(ctx, value)
if self.required and self.value_is_missing(value):
raise MissingParameter(ctx=ctx, param=self)
if self.callback is not None:
value = self.callback(ctx, self, value)
return value
def resolve_envvar_value(self, ctx: Context) -> t.Optional[str]:
if self.envvar is None:
return None
if isinstance(self.envvar, str):
rv = os.environ.get(self.envvar)
if rv:
return rv
else:
for envvar in self.envvar:
rv = os.environ.get(envvar)
if rv:
return rv
return None
def value_from_envvar(self, ctx: Context) -> t.Optional[t.Any]:
rv: t.Optional[t.Any] = self.resolve_envvar_value(ctx)
if rv is not None and self.nargs != 1:
rv = self.type.split_envvar_value(rv)
return rv
def handle_parse_result(
self, ctx: Context, opts: t.Mapping[str, t.Any], args: t.List[str]
) -> t.Tuple[t.Any, t.List[str]]:
with augment_usage_errors(ctx, param=self):
value, source = self.consume_value(ctx, opts)
ctx.set_parameter_source(self.name, source) # type: ignore
try:
value = self.process_value(ctx, value)
except Exception:
if not ctx.resilient_parsing:
raise
value = None
if self.expose_value:
ctx.params[self.name] = value # type: ignore
return value, args
def get_help_record(self, ctx: Context) -> t.Optional[t.Tuple[str, str]]:
pass
def get_usage_pieces(self, ctx: Context) -> t.List[str]:
return []
def get_error_hint(self, ctx: Context) -> str:
"""Get a stringified version of the param for use in error messages to
indicate which param caused the error.
"""
hint_list = self.opts or [self.human_readable_name]
return " / ".join(f"'{x}'" for x in hint_list)
def shell_complete(self, ctx: Context, incomplete: str) -> t.List["CompletionItem"]:
"""Return a list of completions for the incomplete value. If a
``shell_complete`` function was given during init, it is used.
Otherwise, the :attr:`type`
:meth:`~click.types.ParamType.shell_complete` function is used.
:param ctx: Invocation context for this command.
:param incomplete: Value being completed. May be empty.
.. versionadded:: 8.0
"""
if self._custom_shell_complete is not None:
results = self._custom_shell_complete(ctx, self, incomplete)
if results and isinstance(results[0], str):
from pipenv.vendor.click.shell_completion import CompletionItem
results = [CompletionItem(c) for c in results]
return t.cast(t.List["CompletionItem"], results)
return self.type.shell_complete(ctx, self, incomplete)
| Parameter |
python | mlflow__mlflow | dev/clint/src/clint/rules/log_model_artifact_path.py | {
"start": 177,
"end": 1800
} | class ____(Rule):
def _message(self) -> str:
return "`artifact_path` parameter of `log_model` is deprecated. Use `name` instead."
@staticmethod
def check(node: ast.Call, index: "SymbolIndex") -> bool:
"""
Returns True if the call looks like `mlflow.<flavor>.log_model(...)` and
the `artifact_path` argument is specified.
"""
parts = resolve_expr(node.func)
if not parts or len(parts) != 3:
return False
first, second, third = parts
if not (first == "mlflow" and third == "log_model"):
return False
# TODO: Remove this once spark flavor supports logging models as logged model artifacts
if second == "spark":
return False
function_name = f"{first}.{second}.log_model"
artifact_path_idx = LogModelArtifactPath._find_artifact_path_index(index, function_name)
if artifact_path_idx is None:
return False
if len(node.args) > artifact_path_idx:
return True
else:
return any(kw.arg and kw.arg == "artifact_path" for kw in node.keywords)
@staticmethod
def _find_artifact_path_index(index: "SymbolIndex", function_name: str) -> int | None:
"""
Finds the index of the `artifact_path` argument in the function signature of `log_model`
using the SymbolIndex.
"""
if f := index.resolve(function_name):
try:
return f.all_args.index("artifact_path")
except ValueError:
return None
return None
| LogModelArtifactPath |
python | euske__pdfminer | pdfminer/utils.py | {
"start": 7362,
"end": 9666
} | class ____:
def __init__(self, bbox, gridsize=50):
self._seq = [] # preserve the object order.
self._objs = set()
self._grid = {}
self.gridsize = gridsize
(self.x0, self.y0, self.x1, self.y1) = bbox
return
def __repr__(self):
return ('<Plane objs=%r>' % list(self))
def __iter__(self):
return ( obj for obj in self._seq if obj in self._objs )
def __len__(self):
return len(self._objs)
def __contains__(self, obj):
return obj in self._objs
def _getrange(self, bbox):
(x0, y0, x1, y1) = bbox
if (x1 <= self.x0 or self.x1 <= x0 or
y1 <= self.y0 or self.y1 <= y0): return
x0 = max(self.x0, x0)
y0 = max(self.y0, y0)
x1 = min(self.x1, x1)
y1 = min(self.y1, y1)
for y in drange(y0, y1, self.gridsize):
for x in drange(x0, x1, self.gridsize):
yield (x, y)
return
# extend(objs)
def extend(self, objs):
for obj in objs:
self.add(obj)
return
# add(obj): place an object.
def add(self, obj):
for k in self._getrange((obj.x0, obj.y0, obj.x1, obj.y1)):
if k not in self._grid:
r = []
self._grid[k] = r
else:
r = self._grid[k]
r.append(obj)
self._seq.append(obj)
self._objs.add(obj)
return
# remove(obj): displace an object.
def remove(self, obj):
for k in self._getrange((obj.x0, obj.y0, obj.x1, obj.y1)):
try:
self._grid[k].remove(obj)
except (KeyError, ValueError):
pass
self._objs.remove(obj)
return
# find(): finds objects that are in a certain area.
def find(self, bbox):
(x0, y0, x1, y1) = bbox
done = set()
for k in self._getrange(bbox):
if k not in self._grid:
continue
for obj in self._grid[k]:
if obj in done:
continue
done.add(obj)
if (obj.x1 <= x0 or x1 <= obj.x0 or
obj.y1 <= y0 or y1 <= obj.y0):
continue
yield obj
return
| Plane |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 714161,
"end": 714703
} | class ____(sgqlc.types.Type, Node):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("body", "key", "name", "resource_path", "url")
body = sgqlc.types.Field(String, graphql_name="body")
key = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="key")
name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="name")
resource_path = sgqlc.types.Field(URI, graphql_name="resourcePath")
url = sgqlc.types.Field(URI, graphql_name="url")
| CodeOfConduct |
python | openai__openai-python | src/openai/types/beta/code_interpreter_tool.py | {
"start": 196,
"end": 333
} | class ____(BaseModel):
type: Literal["code_interpreter"]
"""The type of tool being defined: `code_interpreter`"""
| CodeInterpreterTool |
python | sqlalchemy__sqlalchemy | test/sql/test_type_expressions.py | {
"start": 13181,
"end": 15887
} | class ____:
@testing.requires.insertmanyvalues
def test_insertmanyvalues_returning(self, connection):
tt = self.tables.test_table
result = connection.execute(
tt.insert().returning(tt.c["x", "y"]),
[
{"x": "X1", "y": "Y1"},
{"x": "X2", "y": "Y2"},
{"x": "X3", "y": "Y3"},
],
)
eq_(
result.all(),
[("X1", "Y1"), ("X2", "Y2"), ("X3", "Y3")],
)
def test_round_trip(self, connection):
connection.execute(
self.tables.test_table.insert(),
[
{"x": "X1", "y": "Y1"},
{"x": "X2", "y": "Y2"},
{"x": "X3", "y": "Y3"},
],
)
# test insert coercion alone
eq_(
connection.exec_driver_sql(
"select * from test_table order by y"
).fetchall(),
[("X1", "y1"), ("X2", "y2"), ("X3", "y3")],
)
# conversion back to upper
eq_(
connection.execute(
select(self.tables.test_table).order_by(
self.tables.test_table.c.y
)
).fetchall(),
[("X1", "Y1"), ("X2", "Y2"), ("X3", "Y3")],
)
def test_targeting_no_labels(self, connection):
connection.execute(
self.tables.test_table.insert(), {"x": "X1", "y": "Y1"}
)
row = connection.execute(select(self.tables.test_table)).first()
eq_(row._mapping[self.tables.test_table.c.y], "Y1")
def test_targeting_by_string(self, connection):
connection.execute(
self.tables.test_table.insert(), {"x": "X1", "y": "Y1"}
)
row = connection.execute(select(self.tables.test_table)).first()
eq_(row._mapping["y"], "Y1")
def test_targeting_apply_labels(self, connection):
connection.execute(
self.tables.test_table.insert(), {"x": "X1", "y": "Y1"}
)
row = connection.execute(
select(self.tables.test_table).set_label_style(
LABEL_STYLE_TABLENAME_PLUS_COL
)
).first()
eq_(row._mapping[self.tables.test_table.c.y], "Y1")
def test_targeting_individual_labels(self, connection):
connection.execute(
self.tables.test_table.insert(), {"x": "X1", "y": "Y1"}
)
row = connection.execute(
select(
self.tables.test_table.c.x.label("xbar"),
self.tables.test_table.c.y.label("ybar"),
)
).first()
eq_(row._mapping[self.tables.test_table.c.y], "Y1")
| RoundTripTestBase |
python | ray-project__ray | doc/source/ray-core/doc_code/streaming_generator.py | {
"start": 1188,
"end": 4172
} | class ____:
def f(self):
for i in range(5):
yield i
actor = Actor.remote()
for ref in actor.f.remote():
print(ray.get(ref))
actor = AsyncActor.remote()
for ref in actor.f.remote():
print(ray.get(ref))
actor = ThreadedActor.remote()
for ref in actor.f.remote():
print(ray.get(ref))
# __streaming_generator_actor_model_end__
# __streaming_generator_asyncio_start__
import asyncio
@ray.remote
def task():
for i in range(5):
time.sleep(1)
yield i
async def main():
async for ref in task.remote():
print(await ref)
asyncio.run(main())
# __streaming_generator_asyncio_end__
# __streaming_generator_gc_start__
@ray.remote
def task():
for i in range(5):
time.sleep(1)
yield i
gen = task.remote()
ref1 = next(gen)
del gen
# __streaming_generator_gc_end__
# __streaming_generator_concurrency_asyncio_start__
import asyncio
@ray.remote
def task():
for i in range(5):
time.sleep(1)
yield i
async def async_task():
async for ref in task.remote():
print(await ref)
async def main():
t1 = async_task()
t2 = async_task()
await asyncio.gather(t1, t2)
asyncio.run(main())
# __streaming_generator_concurrency_asyncio_end__
# __streaming_generator_wait_simple_start__
@ray.remote
def task():
for i in range(5):
time.sleep(5)
yield i
gen = task.remote()
# Because it takes 5 seconds to make the first yield,
# with 0 timeout, the generator is unready.
ready, unready = ray.wait([gen], timeout=0)
print("timeout 0, nothing is ready.")
print(ready)
assert len(ready) == 0
assert len(unready) == 1
# Without a timeout argument, ray.wait waits until the given argument
# is ready. When a next item is ready, it returns.
ready, unready = ray.wait([gen])
print("Wait for 5 seconds. The next item is ready.")
assert len(ready) == 1
assert len(unready) == 0
next(gen)
# Because the second yield hasn't happened yet,
ready, unready = ray.wait([gen], timeout=0)
print("Wait for 0 seconds. The next item is not ready.")
print(ready, unready)
assert len(ready) == 0
assert len(unready) == 1
# __streaming_generator_wait_simple_end__
# __streaming_generator_wait_complex_start__
from ray._raylet import ObjectRefGenerator
@ray.remote
def generator_task():
for i in range(5):
time.sleep(5)
yield i
@ray.remote
def regular_task():
for i in range(5):
time.sleep(5)
return
gen = [generator_task.remote()]
ref = [regular_task.remote()]
ready, unready = [], [*gen, *ref]
result = []
while unready:
ready, unready = ray.wait(unready)
for r in ready:
if isinstance(r, ObjectRefGenerator):
try:
ref = next(r)
result.append(ray.get(ref))
except StopIteration:
pass
else:
unready.append(r)
else:
result.append(ray.get(r))
# __streaming_generator_wait_complex_end__
| ThreadedActor |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-zoho-crm/source_zoho_crm/streams.py | {
"start": 707,
"end": 2656
} | class ____(HttpStream, ABC):
primary_key: str = "id"
module: ModuleMeta = None
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
if response.status_code in EMPTY_BODY_STATUSES:
return None
pagination = response.json()["info"]
if not pagination["more_records"]:
return None
return {"page": pagination["page"] + 1}
def request_params(
self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, any] = None, next_page_token: Mapping[str, Any] = None
) -> MutableMapping[str, Any]:
return next_page_token or {}
def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
data = [] if response.status_code in EMPTY_BODY_STATUSES else response.json()["data"]
yield from data
def path(self, *args, **kwargs) -> str:
return f"/crm/v2/{self.module.api_name}"
def get_json_schema(self) -> Optional[Dict[Any, Any]]:
try:
return asdict(self.module.schema)
except IncompleteMetaDataException:
# to build a schema for a stream, a sequence of requests is made:
# one `/settings/modules` which introduces a list of modules,
# one `/settings/modules/{module_name}` per module and
# one `/settings/fields?module={module_name}` per module.
# Any of former two can result in 204 and empty body what blocks us
# from generating stream schema and, therefore, a stream.
self.logger.warning(
f"Could not retrieve fields Metadata for module {self.module.api_name}. " f"This stream will not be available for syncs."
)
return None
except UnknownDataTypeException as exc:
self.logger.warning(f"Unknown data type in module {self.module.api_name}, skipping. Details: {exc}")
raise
| ZohoCrmStream |
python | kamyu104__LeetCode-Solutions | Python/counting-elements.py | {
"start": 272,
"end": 672
} | class ____(object):
def countElements(self, arr):
"""
:type arr: List[int]
:rtype: int
"""
arr.sort()
result, l = 0, 1
for i in xrange(len(arr)-1):
if arr[i] == arr[i+1]:
l += 1
continue
if arr[i]+1 == arr[i+1]:
result += l
l = 1
return result
| Solution |
python | facebook__pyre-check | client/language_server/protocol.py | {
"start": 7829,
"end": 8141
} | class ____(json_mixins.CamlCaseAndExcludeJsonMixin):
start: PyrePosition
end: PyrePosition
def to_lsp_range(self) -> "LspRange":
return LspRange(
start=self.start.to_lsp_position(),
end=self.end.to_lsp_position(),
)
@dataclasses.dataclass(frozen=True)
| PyreRange |
python | pytorch__pytorch | torch/fx/experimental/unification/multipledispatch/dispatcher.py | {
"start": 573,
"end": 3147
} | class ____(NotImplementedError):
"""A NotImplementedError for multiple dispatch"""
def ambiguity_warn(dispatcher, ambiguities):
"""Raise warning when ambiguity is detected
Parameters
----------
dispatcher : Dispatcher
The dispatcher on which the ambiguity was detected
ambiguities : set
Set of type signature pairs that are ambiguous within this dispatcher
See Also:
Dispatcher.add
warning_text
"""
warn(warning_text(dispatcher.name, ambiguities), AmbiguityWarning)
@deprecated(
"`halt_ordering` is deprecated, you can safely remove this call.",
category=FutureWarning,
)
def halt_ordering():
"""Deprecated interface to temporarily disable ordering."""
@deprecated(
"`restart_ordering` is deprecated, if you would like to eagerly order the dispatchers, "
"you should call the `reorder()` method on each dispatcher.",
category=FutureWarning,
)
def restart_ordering(on_ambiguity=ambiguity_warn):
"""Deprecated interface to temporarily resume ordering."""
def variadic_signature_matches_iter(types, full_signature):
"""Check if a set of input types matches a variadic signature.
Notes
-----
The algorithm is as follows:
Initialize the current signature to the first in the sequence
For each type in `types`:
If the current signature is variadic
If the type matches the signature
yield True
Else
Try to get the next signature
If no signatures are left we can't possibly have a match
so yield False
Else
yield True if the type matches the current signature
Get the next signature
"""
sigiter = iter(full_signature)
sig = next(sigiter)
for typ in types:
matches = issubclass(typ, sig)
yield matches
if not isvariadic(sig):
# we're not matching a variadic argument, so move to the next
# element in the signature
sig = next(sigiter)
else:
try:
sig = next(sigiter)
except StopIteration:
assert isvariadic(sig)
yield True
else:
# We have signature items left over, so all of our arguments
# haven't matched
yield False
def variadic_signature_matches(types, full_signature):
# No arguments always matches a variadic signature
assert full_signature
return all(variadic_signature_matches_iter(types, full_signature))
| MDNotImplementedError |
python | pytorch__pytorch | torch/distributed/flight_recorder/components/types.py | {
"start": 1190,
"end": 2156
} | class ____(Enum):
"""
Enum representing the possible states of matching for collective operations.
- FULLY_MATCHED: Indicates that all aspects of the collective operations match.
- COLLECTIVE_TYPE_MISMATCH: The types of the collective operations differ.
- SIZE_OR_SYNTAX_MISMATCH: There is a mismatch in input/output sizes or violation of collective syntax.
- COLLECTIVE_STATE_MISMATCH:
The states of the collective not same, such as one finished while another just started or scheduled.
- COLLECTIVE_DTYPE_MISMATCH: The data types of the collective input/output differ.
- UNDECIDED:
The match status is ambiguous or cannot be determined, e.g., we might need to check all ranks for alltoall_base.
"""
FULLY_MATCHED = auto()
COLLECTIVE_TYPE_MISMATCH = auto()
SIZE_OR_SYNTAX_MISMATCH = auto()
COLLECTIVE_STATE_MISMATCH = auto()
COLLECTIVE_DTYPE_MISMATCH = auto()
UNDECIDED = auto()
| MatchState |
python | huggingface__transformers | src/transformers/integrations/mxfp4.py | {
"start": 3730,
"end": 4824
} | class ____(ConversionOps):
def __init__(self, hf_quantizer):
self.hf_quantizer = hf_quantizer
def convert(
self,
input_dict: dict[str, torch.Tensor],
model: Optional[torch.nn.Module] = None,
full_layer_name: str | None = None,
missing_keys=None,
**kwargs,
) -> dict[str, torch.Tensor]:
param_data = {}
if "_blocks" in input_dict.keys():
if isinstance(input_dict["_blocks"], list):
param_data["_blocks"] = input_dict["_blocks"][0]
else:
param_data["_blocks"] = input_dict["_blocks"]
if "_scales" in input_dict.keys():
if isinstance(input_dict["_scales"], list):
param_data["_scales"] = input_dict["_scales"][0]
else:
param_data["_scales"] = input_dict["_scales"]
# Here we are dequantizing the weights
dequantized = dequantize_convertops(param_data["_blocks"], param_data["_scales"], param_data["_blocks"].device)
return {full_layer_name: dequantized}
| Mxfp4Dequantize |
python | numba__numba | numba/core/pythonapi.py | {
"start": 2692,
"end": 3084
} | class ____(object):
"""
Encapsulate the result of converting a Python object to a native value,
recording whether the conversion was successful and how to cleanup.
"""
def __init__(self, value, is_error=None, cleanup=None):
self.value = value
self.is_error = is_error if is_error is not None else cgutils.false_bit
self.cleanup = cleanup
| NativeValue |
python | pytransitions__transitions | transitions/extensions/nesting.py | {
"start": 2423,
"end": 3668
} | class ____(object):
"""A wrapper to enable transitions' convenience function to_<state> for nested states.
This allows to call model.to_A.s1.C() in case a custom separator has been chosen."""
def __init__(self, func):
"""
Args:
func: Function to be called at the end of the path.
path: If path is an empty string, assign function
"""
self._func = func
def add(self, func, path):
"""Assigns a `FunctionWrapper` as an attribute named like the next segment of the substates
path.
Args:
func (callable): Function to be called at the end of the path.
path (list of strings): Remaining segment of the substate path.
"""
if not path:
self._func = func
else:
name = path[0]
if name[0].isdigit():
name = 's' + name
if hasattr(self, name):
getattr(self, name).add(func, path[1:])
else:
assert not path[1:], "nested path should be empty"
setattr(self, name, FunctionWrapper(func))
def __call__(self, *args, **kwargs):
return self._func(*args, **kwargs)
| FunctionWrapper |
python | Pylons__pyramid | docs/tutorials/wiki2/src/authentication/tutorial/models/user.py | {
"start": 137,
"end": 885
} | class ____(Base):
""" The SQLAlchemy declarative model class for a User object. """
__tablename__ = 'users'
id: Mapped[int] = mapped_column(primary_key=True)
name: Mapped[str] = mapped_column(unique=True)
role: Mapped[str]
password_hash: Mapped[Optional[str]]
def set_password(self, pw):
pwhash = bcrypt.hashpw(pw.encode('utf8'), bcrypt.gensalt())
self.password_hash = pwhash.decode('utf8')
def check_password(self, pw):
if self.password_hash is not None:
expected_hash = self.password_hash.encode('utf8')
return bcrypt.checkpw(pw.encode('utf8'), expected_hash)
return False
created_pages: Mapped[List['Page']] = relationship(back_populates='creator')
| User |
python | crytic__slither | slither/slithir/tmp_operations/argument.py | {
"start": 257,
"end": 1201
} | class ____(Operation):
def __init__(self, argument: Expression) -> None:
super().__init__()
self._argument = argument
self._type = ArgumentType.CALL
self._callid: Optional[str] = None
@property
def argument(self) -> Expression:
return self._argument
@property
def call_id(self) -> Optional[str]:
return self._callid
@call_id.setter
def call_id(self, c: str) -> None:
self._callid = c
@property
def read(self) -> List[Expression]:
return [self.argument]
def set_type(self, t: ArgumentType) -> None:
assert isinstance(t, ArgumentType)
self._type = t
def get_type(self) -> ArgumentType:
return self._type
def __str__(self) -> str:
call_id = "none"
if self.call_id:
call_id = f"(id ({self.call_id}))"
return f"ARG_{self._type.name} {str(self._argument)} {call_id}"
| Argument |
python | huggingface__transformers | src/transformers/data/datasets/glue.py | {
"start": 2164,
"end": 2239
} | class ____(Enum):
train = "train"
dev = "dev"
test = "test"
| Split |
python | sqlalchemy__sqlalchemy | test/orm/inheritance/test_assorted_poly.py | {
"start": 65535,
"end": 67387
} | class ____(fixtures.DeclarativeMappedTest):
"""Test [ticket:2419]'s test case."""
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class A(Base):
__tablename__ = "a"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
class B(Base):
__tablename__ = "b"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
ds = relationship("D")
es = relationship("E")
class C(A):
__tablename__ = "c"
id = Column(Integer, ForeignKey("a.id"), primary_key=True)
b_id = Column(Integer, ForeignKey("b.id"))
b = relationship("B", primaryjoin=b_id == B.id)
class D(Base):
__tablename__ = "d"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
b_id = Column(Integer, ForeignKey("b.id"))
class E(Base):
__tablename__ = "e"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
b_id = Column(Integer, ForeignKey("b.id"))
@testing.fails_on(
["oracle", "mssql"],
"Oracle / SQL server engines can't handle this, "
"not clear if there's an expression-level bug on our "
"end though",
)
def test_join_w_eager_w_any(self):
B, C, D = (self.classes.B, self.classes.C, self.classes.D)
s = fixture_session()
b = B(ds=[D()])
s.add_all([C(b=b)])
s.commit()
q = s.query(B, B.ds.any(D.id == 1)).options(joinedload(B.es))
q = q.join(C, C.b_id == B.id)
q = q.limit(5)
eq_(q.all(), [(b, True)])
| Ticket2419Test |
python | google__jax | jax/_src/interpreters/partial_eval.py | {
"start": 32321,
"end": 54012
} | class ____(NamedTuple):
eqn_id: Any
in_tracers: Sequence[JaxprTracer]
out_tracer_refs: Sequence[ref[JaxprTracer]]
out_avals: Sequence[core.AbstractValue]
primitive: Primitive
params: dict[str, Any]
effects: core.Effects
source_info: source_info_util.SourceInfo
ctx: JaxprEqnContext
def new_eqn_recipe(trace: JaxprTrace,
in_tracers: Sequence[JaxprTracer],
out_tracers: Sequence[JaxprTracer],
primitive: Primitive,
params: dict[str, Any],
effects: core.Effects,
source_info: source_info_util.SourceInfo,
ctx: JaxprEqnContext | None = None) -> JaxprEqnRecipe:
# TODO(necula): move these checks to core.check_jaxpr, and call in more places
if primitive.call_primitive or primitive.map_primitive:
assert "call_jaxpr" in params
assert ("donated_invars" not in params or
len(params["donated_invars"]) == len(params["call_jaxpr"].invars))
if primitive.map_primitive:
assert ("in_axes" in params and
len(params["in_axes"]) == len(params["call_jaxpr"].invars))
assert ("donated_invars" in params and
len(params["donated_invars"]) == len(params["call_jaxpr"].invars))
out_avals = [t.aval for t in out_tracers]
ctx = ctx or JaxprEqnContext(
config.compute_on_context_manager.value,
config.threefry_partitionable.value,
xla_metadata_lib.current_xla_metadata(),
)
return JaxprEqnRecipe(next(trace.counter), tuple(in_tracers), map(ref, out_tracers),
out_avals, primitive, params, effects, source_info,
ctx)
def recipe_to_eqn(getvar: Callable[[JaxprTracer], Atom],
recipe: JaxprEqnRecipe) -> core.JaxprEqn:
(_, in_tracers, out_tracer_refs, out_avals, prim, params, eff, src,
ctx) = recipe
invars = [getvar(t) for t in in_tracers]
out_tracers = [t_ref() for t_ref in out_tracer_refs]
outvars = [DropVar(a) if t is None else getvar(t)
for a, t in zip(out_avals, out_tracers)]
return new_jaxpr_eqn(invars, outvars, prim, params, eff, src, ctx)
def tracers_to_jaxpr(
in_tracers: Sequence[JaxprTracer],
out_tracers: Sequence[JaxprTracer],
effect_handles: Sequence[Any],
debug_info: core.DebugInfo,
) -> tuple[Jaxpr, tuple[Any, ...], tuple[Any, ...]]:
"""Constructs Jaxpr given tracers for inputs and outputs.
Params:
in_tracers: the tracers that were created for the function inputs
out_tracers: the tracers that were output by the function.
debug_info: the debug info for the function.
Returns: a triple of a `Jaxpr`, a list of constant values corresponding to
the `constvars` in the returned Jaxps, and a list of environment values.
The vars for the environment values have been prepended to the Jaxpr's
`invars`.
"""
gensym = core.gensym()
t_to_var: dict[TracerId, Var] = {}
consts: dict[Var, Any] = {}
env: dict[Var, JaxprTracer] = {}
constid_to_var: dict[ConstId, Var] = {} # for deduplication
def get_atom(t: JaxprTracer) -> Atom:
return t.recipe if type(t.recipe) is Literal else t_to_var[id(t)]
def newvar(t: JaxprTracer | None) -> Var:
assert t is not None
var = gensym(type_substitute(t.aval))
var_ = t_to_var.setdefault(id(t), var)
assert var is var_
return var
def type_substitute(aval: AbstractValue) -> AbstractValue:
if isinstance(aval, DShapedArray):
# Replace any Tracers in aval.shape with Vars or Literal values
shape = [get_atom(d) if type(d) is JaxprTracer else d for d in aval.shape]
shape = [d.val if type(d) is Literal else d for d in shape]
aval = aval.update(shape=tuple(shape))
return aval
processed_eqn_ids = set()
eqns: list[core.JaxprEqn] = []
reachable = toposort
tracers = reachable((*in_tracers, *out_tracers, *effect_handles))
def sort_key(t):
r = t.recipe
return r.eqn_id if isinstance(r, JaxprEqnRecipe) else -1
tracers = sorted(tracers, key=sort_key)
for t in tracers:
r = t.recipe
if isinstance(r, JaxprEqnRecipe):
# TODO broadcast_in_dim can create a new tracer, not present in parents
if r.eqn_id not in processed_eqn_ids:
in_atoms = map(get_atom, r.in_tracers)
outvars = [DropVar(type_substitute(a)) if rf() is None else newvar(rf())
for a, rf in zip(r.out_avals, r.out_tracer_refs)]
eqns.append(new_jaxpr_eqn(in_atoms, outvars, r.primitive, r.params,
r.effects, r.source_info, r.ctx))
processed_eqn_ids.add(r.eqn_id)
elif isinstance(r, LambdaBinding):
if not any(t is in_tracer for in_tracer in in_tracers):
raise core.escaped_tracer_error(t, f"Tracer not in input tracers: {t}")
newvar(t)
elif isinstance(r, ConstVar):
var = constid_to_var.get(id(r.val))
if var is None:
var = constid_to_var[id(r.val)] = newvar(t)
consts[var] = r.val
t_to_var[id(t)] = var
elif isinstance(r, FreeVar):
env[newvar(t)] = r.val
elif isinstance(r, Literal):
pass
elif r is None:
assert False
else:
raise TypeError(r)
env_vars, env_vals = unzip2(env.items())
invars = [*env_vars, *map(get_atom, in_tracers)]
const_vars, const_vals = unzip2(consts.items())
outvars = map(get_atom, out_tracers) # type: ignore[arg-type]
jaxpr_effects = make_jaxpr_effects(const_vars, invars, outvars, eqns)
jaxpr = Jaxpr(const_vars, invars, # type: ignore[arg-type]
outvars, eqns, jaxpr_effects,
debug_info)
config.enable_checks.value and core.check_jaxpr(jaxpr)
# del getvar # needed to avoid cyclic-reference closure, apparently!
return jaxpr, const_vals, env_vals
@weakref_lru_cache
def move_envvars(jaxpr: Jaxpr, which: tuple[bool, ...]) -> Jaxpr:
constvars, envvars = partition_list(which, jaxpr.constvars)
return jaxpr.replace(constvars=constvars, invars=[*envvars, *jaxpr.invars])
@weakref_lru_cache
def convert_constvars_jaxpr(jaxpr: Jaxpr) -> Jaxpr:
"""Moves the constvars to the start of invars."""
config.enable_checks.value and core.check_jaxpr(jaxpr)
if jaxpr.debug_info.arg_names is None:
arg_names = None
else:
arg_names = ("",) * len(jaxpr.constvars) + (*jaxpr.debug_info.arg_names,)
dbg = jaxpr.debug_info._replace(arg_names=arg_names)
lifted_jaxpr = jaxpr.replace(
constvars=(), invars=jaxpr.constvars + jaxpr.invars, debug_info=dbg)
config.enable_checks.value and core.check_jaxpr(lifted_jaxpr)
return lifted_jaxpr
@weakref_lru_cache
def convert_invars_to_constvars(jaxpr: Jaxpr, n: int) -> Jaxpr:
"""Move n invars to constvars. Like an inverse of convert_constvars_jaxpr."""
if n == 0:
return jaxpr.replace() # 'return jaxpr' would create cache reference cycle
config.enable_checks.value and core.check_jaxpr(jaxpr)
constvars, invars = split_list(jaxpr.invars, [n])
if jaxpr.debug_info.arg_names is None:
dbg = jaxpr.debug_info
else:
dbg = jaxpr.debug_info._replace(
arg_names=jaxpr.debug_info.arg_names[n:])
lifted_jaxpr = jaxpr.replace(constvars=tuple(constvars), invars=invars,
debug_info=dbg)
config.enable_checks.value and core.check_jaxpr(lifted_jaxpr)
return lifted_jaxpr
def convert_envvars_to_constvars(jaxpr: Jaxpr, num_env_vars: int) -> Jaxpr:
if any(isinstance(eff, effects.JaxprInputEffect) for eff in jaxpr.effects):
raise NotImplementedError
config.enable_checks.value and core.check_jaxpr(jaxpr)
env_vars, invars = split_list(jaxpr.invars, [num_env_vars])
converted_jaxpr = jaxpr.replace(constvars=jaxpr.constvars + env_vars,
invars=invars)
config.enable_checks.value and core.check_jaxpr(converted_jaxpr)
return converted_jaxpr
def partial_eval_jaxpr_nounits(
jaxpr: ClosedJaxpr, unknowns: Sequence[bool],
instantiate: bool | Sequence[bool],
) -> tuple[ClosedJaxpr, ClosedJaxpr, list[bool], list[AbstractValue]]:
"""Unzip a jaxpr in two by data dependence into 'known' and 'unknown' parts.
That is, given a jaxpr and a sequence of booleans indicating which jaxpr
inputs (i.e. invars) are considered unknown, produce two jaxprs, a list of
booleans representing which of the original jaxpr's outputs are unknown (i.e.
have a data dependence on an unknown input), and a list of abstract values
representing residuals (part of the first jaxpr's output and the second
jaxpr's input). The two jaxprs result from partitioning the original jaxpr's
first-order primitive applications based on whether all the inputs to the
application are known (in which case the application is represented in the
'known' jaxpr and its result is considered known) or whether any inputs to the
application are unknown (in which case the application is represented in the
'unknown' jaxpr and its result is considered unknown). Higher-order primitives
are recursively unzipped in two.
The `instantiate` argument can be used to ensure some outputs are lifted into
the 'unknown' jaxpr.
For example, give an input jaxpr:
{ lambda ; a:f32[] b:f32[]. let
c:f32[] = cos a
d:f32[] = sin a
e:f32[] = neg d
f:f32[] = mul e b
in (c, f) }
then applying this function with `unknowns=[False, True]` and
`instantiate=False` produces as an output triple:
# jaxpr_known
{ lambda ; a:f32[]. let
b:f32[] = cos a
c:f32[] = sin a
d:f32[] = neg c
in (b, d) }
# jaxpr_unknown
{ lambda ; a:f32[] b:f32[]. let c:f32[] = mul b a in (c,) }
# out_unknowns
[False, True]
Notice in particular that the first output (jaxpr_known) contains all the
primitive applications which do not have a data dependence on an unknown
input. Also notice the input and output types: the input type of the first
jaxpr produced represents the type of the known inputs of the original jaxpr,
and the output type of the second jaxpr produced represents the type of the
unknown outputs of the original jaxpr.
In the above example, the output of jaxpr_known named `d` is a _residual_
output, and corresponds to the input named `a` in jaxpr_unknown. In general,
jaxpr_known will produce extra outputs (at the end of its output list)
corresponding to intermediate values of the original jaxpr which must be
passed to jaxpr_unknown (as leading inputs).
"""
instantiate = tuple(instantiate) if isinstance(instantiate, list) else instantiate
return _partial_eval_jaxpr_nounits(jaxpr, tuple(unknowns), instantiate, False)[:-1]
def partial_eval_jaxpr_nounits_fwd(
jaxpr: ClosedJaxpr, unknowns: Sequence[bool],
instantiate: bool | Sequence[bool],
fwd: bool | Sequence[bool] = True,
) -> tuple[ClosedJaxpr, ClosedJaxpr, list[bool], list[AbstractValue], list[int | None]]:
instantiate = tuple(instantiate) if isinstance(instantiate, list) else instantiate
fwd = tuple(fwd) if isinstance(fwd, list) else fwd
return _partial_eval_jaxpr_nounits(jaxpr, tuple(unknowns), instantiate, fwd)
@weakref_lru_cache
def _partial_eval_jaxpr_nounits(
jaxpr: ClosedJaxpr, in_unknowns: Sequence[bool],
instantiate: bool | Sequence[bool], fwd: bool | Sequence[bool]):
f = lu.wrap_init(core.jaxpr_as_fun(jaxpr), debug_info=jaxpr.jaxpr.debug_info)
cell = []
def fun(*known_vals_in):
known_vals_in_ = iter(known_vals_in)
unknown_avals = (a for a, uk in zip(jaxpr.in_avals, in_unknowns) if uk)
in_pvals = [PartialVal.unknown(next(unknown_avals)) if uk
else PartialVal.known(next(known_vals_in_)) for uk in in_unknowns]
assert next(known_vals_in_, None) is next(unknown_avals, None) is None
jaxpr_unknown_, (fwds, out_pvals, residuals, ()) = trace_to_subjaxpr_nounits_fwd(
f, TraceTag(), jaxpr.jaxpr.debug_info, instantiate).call_wrapped(in_pvals)
jaxpr_unknown = convert_constvars_jaxpr(jaxpr_unknown_)
out_unknowns = [not pval.is_known() for pval in out_pvals]
if type(fwd) is bool and not fwd:
residuals_ = iter(residuals)
residuals = [next(residuals_) if f is None else known_vals_in[f]
for f in fwds]
assert next(residuals_, None) is None
fwds = [None] * len(fwds)
else:
if type(fwd) is tuple:
fwd_ = [f for f, uk in zip(fwd, in_unknowns) if not uk]
residuals_, residuals = iter(residuals), []
fwds = [residuals.append(next(residuals_)) if f is None else
residuals.append(known_vals_in[f]) if not fwd_[f] else
f for f in fwds]
fwds, residuals = _include_consts_in_fwds(jaxpr.consts, fwds, residuals)
res_avals = [core.get_aval(r) for r in residuals]
cell.append((out_unknowns, jaxpr_unknown, res_avals, fwds))
known_vals_out = [pval.get_known() for pval in out_pvals if pval.is_known()]
return [*known_vals_out, *residuals]
known_avals = [a for a, uk in zip(jaxpr.in_aval_qdds, in_unknowns) if not uk]
jaxpr_known, _, consts_known = trace_to_jaxpr_dynamic(
lu.wrap_init(fun, debug_info=f.debug_info.with_unknown_names()),
known_avals)
(out_unknowns, jaxpr_unknown, res_avals, fwds), = cell # pytype: disable=bad-unpacking
if config.enable_checks.value:
core.check_jaxpr(jaxpr_known)
core.check_jaxpr(jaxpr_unknown)
closed_jaxpr_known = ClosedJaxpr(jaxpr_known, consts_known)
closed_jaxpr_unknown = ClosedJaxpr(jaxpr_unknown, ())
return closed_jaxpr_known, closed_jaxpr_unknown, out_unknowns, res_avals, fwds
def _include_consts_in_fwds(consts, fwds, residuals):
if all(f is None for f in fwds):
return fwds, residuals
dummys = [object() for _ in range(max(f for f in fwds if f is not None) + 1)]
residuals_ = iter(residuals)
residuals = [next(residuals_) if f is None else dummys[f] for f in fwds]
assert next(residuals_, None) is None
idxs = {id(x): i for i, x in enumerate((*consts, *dummys))}
fwds = [idxs.get(id(r)) for r in residuals]
residuals = [r for r in residuals if id(r) not in idxs]
return fwds, residuals
def partial_eval_jaxpr_custom(
jaxpr: Jaxpr,
in_unknowns: Sequence[bool],
in_inst: bool | Sequence[bool],
ensure_out_unknowns: bool | Sequence[bool],
ensure_out_inst: bool | Sequence[bool],
saveable: Callable[..., RematCases_],
) -> tuple[Jaxpr, Jaxpr, list[bool], list[bool], int]:
*outs, num_res_ref = partial_eval_jaxpr_stateful(
jaxpr, in_unknowns, in_inst, ensure_out_unknowns, ensure_out_inst, saveable)
if num_res_ref:
raise ValueError("Cannot use `partial_eval_jaxpr_custom` with stateful jaxprs.")
return *outs, # type: ignore
def partial_eval_jaxpr_stateful(
jaxpr: Jaxpr,
in_unknowns: Sequence[bool],
in_inst: bool | Sequence[bool],
ensure_out_unknowns: bool | Sequence[bool],
ensure_out_inst: bool | Sequence[bool],
saveable: Callable[..., RematCases_] | None,
) -> tuple[Jaxpr, Jaxpr, list[bool], list[bool], int, int]:
if type(in_inst) is bool:
in_inst = (in_inst,) * len(jaxpr.invars)
if type(ensure_out_unknowns) is bool:
ensure_out_unknowns = (ensure_out_unknowns,) * len(jaxpr.outvars)
if type(ensure_out_inst) is bool:
ensure_out_inst = (ensure_out_inst,) * len(jaxpr.outvars)
if saveable is None:
saveable = everything_saveable
jaxpr_known, jaxpr_staged, out_unknowns, out_inst, num_res, num_res_ref = \
_partial_eval_jaxpr_custom_cached(
jaxpr, tuple(in_unknowns), tuple(in_inst), tuple(ensure_out_unknowns),
tuple(ensure_out_inst), saveable)
return jaxpr_known, jaxpr_staged, out_unknowns, out_inst, num_res, num_res_ref
everything_saveable = lambda *_, **__: True
@weakref_lru_cache
def _partial_eval_jaxpr_custom_cached(
jaxpr: Jaxpr,
in_unknowns: tuple[bool, ...],
in_inst: tuple[bool, ...],
ensure_out_unknowns: tuple[bool, ...],
ensure_out_inst: tuple[bool, ...],
saveable: Callable[..., RematCases_],
) -> tuple[Jaxpr, Jaxpr, list[bool], list[bool], int, int]:
env: dict[Var, tuple[bool, bool]] = {}
residuals: OrderedSet[Var] = OrderedSet()
residual_refs: OrderedSet[Var] = OrderedSet()
def read(x: Atom) -> tuple[bool, bool]:
if type(x) is Var:
return env[x]
return (False, True)
def write(unk: bool, inst: bool, v: Var) -> None:
assert (unk, inst) != (True, False)
env[v] = (unk, inst)
def ensure_instantiated(inst: bool, x: Atom) -> Atom:
if type(x) is Var and not inst:
residuals.add(x)
return x
def has_effects(effects) -> bool:
return bool({e for e in effects if not isinstance(e, core.NamedAxisEffect)})
known_eqns, staged_eqns = [], []
foreach(write, in_unknowns, in_inst, jaxpr.invars)
foreach(partial(write, False, True), jaxpr.constvars)
for eqn in jaxpr.eqns:
unks_in, inst_in = unzip2(map(read, eqn.invars))
rule = partial_eval_jaxpr_custom_rules.get(eqn.primitive)
if rule:
eqn1, eqn2, unks_out, inst_out, res = rule(saveable, unks_in, inst_in, eqn)
eqn1 and known_eqns.append(eqn1); eqn2 and staged_eqns.append(eqn2) # type: ignore
for r in res:
if isinstance(r.aval, AbstractRef):
residual_refs.add(r)
else:
residuals.add(r)
foreach(write, unks_out, inst_out, eqn.outvars)
elif any(unks_in):
inputs = map(ensure_instantiated, inst_in, eqn.invars)
staged_eqns.append(eqn.replace(invars=inputs))
foreach(partial(write, True, True), eqn.outvars)
else:
known_eqns.append(eqn)
# If it's an effectful primitive, we always to run and avoid staging it.
policy = ensure_enum(saveable(
eqn.primitive, *[x.aval for x in eqn.invars], **eqn.params))
if has_effects(eqn.effects) or isinstance(policy, SaveableType):
foreach(partial(write, False, False), eqn.outvars)
elif isinstance(policy, Offloadable):
# TODO(slebedev): This is a legit error which requires a BUILD fix.
from jax._src.dispatch import device_put_p, ArrayCopySemantics # type: ignore
resvars = [Var(v.aval.update(memory_space=core.mem_kind_to_space(policy.dst)))
for v in eqn.outvars]
offload_eqn = core.JaxprEqn(
eqn.outvars, resvars, device_put_p,
dict(
devices=(core.mem_kind_to_space(policy.dst),) * len(eqn.outvars),
srcs=(None,),
copy_semantics=(ArrayCopySemantics.ALWAYS_COPY,),
),
set(), source_info_util.new_source_info(),
JaxprEqnContext(None, False))
known_eqns.append(offload_eqn)
# resvars are known and available in the backward jaxpr.
foreach(partial(write, False, True), resvars)
assert all(o.aval.memory_space == core.mem_kind_to_space(policy.src) # type: ignore
for o in eqn.outvars)
residuals.update(resvars)
reload_eqn = core.JaxprEqn(
resvars, eqn.outvars, device_put_p,
dict(
devices=(core.mem_kind_to_space(policy.src),) * len(resvars),
srcs=(None,),
copy_semantics=(ArrayCopySemantics.ALWAYS_COPY,)
),
set(), source_info_util.new_source_info(),
JaxprEqnContext(None, False))
staged_eqns.append(reload_eqn)
# outvars are known and available in the backward jaxpr.
foreach(partial(write, False, True), eqn.outvars)
else:
assert isinstance(policy, RecomputeType)
inputs = map(ensure_instantiated, inst_in, eqn.invars)
staged_eqns.append(eqn.replace(invars=inputs))
foreach(partial(write, False, True), eqn.outvars)
unzipped = unzip2(map(read, jaxpr.outvars))
out_unknowns, out_inst = list(unzipped[0]), list(unzipped[1])
assert all(type(v) is Var for v in residuals), residuals
for x, inst, ensure_inst in zip(jaxpr.outvars, out_inst, ensure_out_inst):
if ensure_inst: ensure_instantiated(inst, x)
out_unknowns = map(op.or_, out_unknowns, ensure_out_unknowns)
out_inst = map(op.or_, out_inst, ensure_out_inst)
ins_known, _ = partition_list(in_unknowns, jaxpr.invars)
outs_known, _ = partition_list(out_unknowns, jaxpr.outvars)
ref_res_is_input = [r in ins_known for r in residual_refs]
non_input_res_refs, _ = partition_list(ref_res_is_input, list(residual_refs))
ins_known_and_ref_res = [*ins_known, *non_input_res_refs]
known_outvars = [*outs_known, *residuals]
known_effects = make_jaxpr_effects(jaxpr.constvars, ins_known_and_ref_res,
known_outvars, known_eqns)
# TODO(mattjj,necula): debug info should be updated here
jaxpr_known = jaxpr.replace(
invars=ins_known_and_ref_res, outvars=known_outvars,
eqns=known_eqns, effects=known_effects,
debug_info=jaxpr.debug_info.with_unknown_names())
config.enable_checks.value and core.check_jaxpr(jaxpr_known)
_, ins_staged = partition_list(in_inst, jaxpr.invars)
_, outs_staged = partition_list(out_inst, jaxpr.outvars)
staged_invars = [*residuals, *non_input_res_refs, *ins_staged]
staged_effects = make_jaxpr_effects(jaxpr.constvars, staged_invars,
outs_staged, staged_eqns)
# TODO(mattjj,necula): debug info should be updated here
jaxpr_staged = jaxpr.replace(
invars=staged_invars, outvars=outs_staged, eqns=staged_eqns,
effects=staged_effects,
debug_info=jaxpr.debug_info.with_unknown_names())
config.enable_checks.value and core.check_jaxpr(jaxpr_staged)
return (jaxpr_known, jaxpr_staged, out_unknowns, out_inst, len(residuals),
len(non_input_res_refs))
MemoryKind = str
| JaxprEqnRecipe |
python | pandas-dev__pandas | pandas/core/_numba/extensions.py | {
"start": 1937,
"end": 2671
} | class ____(types.Type):
"""
The type class for Index objects.
"""
def __init__(self, dtype, layout, pyclass: any) -> None:
self.pyclass = pyclass
name = f"index({dtype}, {layout})"
self.dtype = dtype
self.layout = layout
super().__init__(name)
@property
def key(self):
return self.pyclass, self.dtype, self.layout
@property
def as_array(self):
return types.Array(self.dtype, 1, self.layout)
def copy(self, dtype=None, ndim: int = 1, layout=None) -> Self:
assert ndim == 1
if dtype is None:
dtype = self.dtype
layout = layout or self.layout
return type(self)(dtype, layout, self.pyclass)
| IndexType |
python | walkccc__LeetCode | solutions/705. Design HashSet/705.py | {
"start": 0,
"end": 264
} | class ____:
def __init__(self):
self.set = [False] * 1000001
def add(self, key: int) -> None:
self.set[key] = True
def remove(self, key: int) -> None:
self.set[key] = False
def contains(self, key: int) -> bool:
return self.set[key]
| MyHashSet |
python | PyCQA__pylint | tests/functional/i/invalid/invalid_index_returned.py | {
"start": 411,
"end": 517
} | class ____(type):
def __index__(cls):
return 1
@six.add_metaclass(IndexMetaclass)
| IndexMetaclass |
python | rapidsai__cudf | python/cudf_polars/cudf_polars/dsl/ir.py | {
"start": 70738,
"end": 75507
} | class ____(IR):
"""A conditional inner join of two dataframes on a predicate."""
class Predicate:
"""Serializable wrapper for a predicate expression."""
predicate: expr.Expr
ast: plc.expressions.Expression
def __init__(self, predicate: expr.Expr):
self.predicate = predicate
stream = get_cuda_stream()
ast_result = to_ast(predicate, stream=stream)
stream.synchronize()
if ast_result is None:
raise NotImplementedError(
f"Conditional join with predicate {predicate}"
) # pragma: no cover; polars never delivers expressions we can't handle
self.ast = ast_result
def __reduce__(self) -> tuple[Any, ...]:
"""Pickle a Predicate object."""
return (type(self), (self.predicate,))
__slots__ = ("ast_predicate", "options", "predicate")
_non_child = ("schema", "predicate", "options")
predicate: expr.Expr
"""Expression predicate to join on"""
options: tuple[
tuple[
str,
polars._expr_nodes.Operator | Iterable[polars._expr_nodes.Operator],
]
| None,
bool,
Zlice | None,
str,
bool,
Literal["none", "left", "right", "left_right", "right_left"],
]
"""
tuple of options:
- predicates: tuple of ir join type (eg. ie_join) and (In)Equality conditions
- nulls_equal: do nulls compare equal?
- slice: optional slice to perform after joining.
- suffix: string suffix for right columns if names match
- coalesce: should key columns be coalesced (only makes sense for outer joins)
- maintain_order: which DataFrame row order to preserve, if any
"""
def __init__(
self, schema: Schema, predicate: expr.Expr, options: tuple, left: IR, right: IR
) -> None:
self.schema = schema
predicate = _strip_predicate_casts(predicate)
self.predicate = predicate
# options[0] is a tuple[str, Operator, ...]
# The Operator class can't be pickled, but we don't use it anyway so
# just throw that away
if options[0] is not None:
options = (None, *options[1:])
self.options = options
self.children = (left, right)
predicate_wrapper = self.Predicate(predicate)
_, nulls_equal, zlice, suffix, coalesce, maintain_order = self.options
# Preconditions from polars
assert not nulls_equal
assert not coalesce
assert maintain_order == "none"
self._non_child_args = (predicate_wrapper, options)
@classmethod
@log_do_evaluate
@nvtx_annotate_cudf_polars(message="ConditionalJoin")
def do_evaluate(
cls,
predicate_wrapper: Predicate,
options: tuple,
left: DataFrame,
right: DataFrame,
*,
context: IRExecutionContext,
) -> DataFrame:
"""Evaluate and return a dataframe."""
stream = get_joined_cuda_stream(
context.get_cuda_stream,
upstreams=(
left.stream,
right.stream,
),
)
left_casts, right_casts = _collect_decimal_binop_casts(
predicate_wrapper.predicate
)
_, _, zlice, suffix, _, _ = options
lg, rg = plc.join.conditional_inner_join(
_apply_casts(left, left_casts).table,
_apply_casts(right, right_casts).table,
predicate_wrapper.ast,
stream=stream,
)
left_result = DataFrame.from_table(
plc.copying.gather(
left.table, lg, plc.copying.OutOfBoundsPolicy.DONT_CHECK, stream=stream
),
left.column_names,
left.dtypes,
stream=stream,
)
right_result = DataFrame.from_table(
plc.copying.gather(
right.table, rg, plc.copying.OutOfBoundsPolicy.DONT_CHECK, stream=stream
),
right.column_names,
right.dtypes,
stream=stream,
)
right_result = right_result.rename_columns(
{
name: f"{name}{suffix}"
for name in right.column_names
if name in left.column_names_set
}
)
result = left_result.with_columns(right_result.columns, stream=stream)
# Join the original streams back into the result stream to ensure that the
# deallocations (on the original streams) happen after the result is ready
join_cuda_streams(
downstreams=(left.stream, right.stream), upstreams=(result.stream,)
)
return result.slice(zlice)
| ConditionalJoin |
python | langchain-ai__langchain | libs/langchain_v1/langchain/agents/middleware/_redaction.py | {
"start": 602,
"end": 9918
} | class ____(Exception):
"""Raised when configured to block on detected sensitive values."""
def __init__(self, pii_type: str, matches: Sequence[PIIMatch]) -> None:
"""Initialize the exception with match context.
Args:
pii_type: Name of the detected sensitive type.
matches: All matches that were detected for that type.
"""
self.pii_type = pii_type
self.matches = list(matches)
count = len(matches)
msg = f"Detected {count} instance(s) of {pii_type} in text content"
super().__init__(msg)
Detector = Callable[[str], list[PIIMatch]]
"""Callable signature for detectors that locate sensitive values."""
def detect_email(content: str) -> list[PIIMatch]:
"""Detect email addresses in content."""
pattern = r"\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b"
return [
PIIMatch(
type="email",
value=match.group(),
start=match.start(),
end=match.end(),
)
for match in re.finditer(pattern, content)
]
def detect_credit_card(content: str) -> list[PIIMatch]:
"""Detect credit card numbers in content using Luhn validation."""
pattern = r"\b\d{4}[\s-]?\d{4}[\s-]?\d{4}[\s-]?\d{4}\b"
matches = []
for match in re.finditer(pattern, content):
card_number = match.group()
if _passes_luhn(card_number):
matches.append(
PIIMatch(
type="credit_card",
value=card_number,
start=match.start(),
end=match.end(),
)
)
return matches
def detect_ip(content: str) -> list[PIIMatch]:
"""Detect IPv4 or IPv6 addresses in content."""
matches: list[PIIMatch] = []
ipv4_pattern = r"\b(?:[0-9]{1,3}\.){3}[0-9]{1,3}\b"
for match in re.finditer(ipv4_pattern, content):
ip_candidate = match.group()
try:
ipaddress.ip_address(ip_candidate)
except ValueError:
continue
matches.append(
PIIMatch(
type="ip",
value=ip_candidate,
start=match.start(),
end=match.end(),
)
)
return matches
def detect_mac_address(content: str) -> list[PIIMatch]:
"""Detect MAC addresses in content."""
pattern = r"\b([0-9A-Fa-f]{2}[:-]){5}[0-9A-Fa-f]{2}\b"
return [
PIIMatch(
type="mac_address",
value=match.group(),
start=match.start(),
end=match.end(),
)
for match in re.finditer(pattern, content)
]
def detect_url(content: str) -> list[PIIMatch]:
"""Detect URLs in content using regex and stdlib validation."""
matches: list[PIIMatch] = []
# Pattern 1: URLs with scheme (http:// or https://)
scheme_pattern = r"https?://[^\s<>\"{}|\\^`\[\]]+"
for match in re.finditer(scheme_pattern, content):
url = match.group()
result = urlparse(url)
if result.scheme in ("http", "https") and result.netloc:
matches.append(
PIIMatch(
type="url",
value=url,
start=match.start(),
end=match.end(),
)
)
# Pattern 2: URLs without scheme (www.example.com or example.com/path)
# More conservative to avoid false positives
bare_pattern = (
r"\b(?:www\.)?[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?"
r"(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)+(?:/[^\s]*)?"
)
for match in re.finditer(bare_pattern, content):
start, end = match.start(), match.end()
# Skip if already matched with scheme
if any(m["start"] <= start < m["end"] or m["start"] < end <= m["end"] for m in matches):
continue
url = match.group()
# Only accept if it has a path or starts with www
# This reduces false positives like "example.com" in prose
if "/" in url or url.startswith("www."):
# Add scheme for validation (required for urlparse to work correctly)
test_url = f"http://{url}"
result = urlparse(test_url)
if result.netloc and "." in result.netloc:
matches.append(
PIIMatch(
type="url",
value=url,
start=start,
end=end,
)
)
return matches
BUILTIN_DETECTORS: dict[str, Detector] = {
"email": detect_email,
"credit_card": detect_credit_card,
"ip": detect_ip,
"mac_address": detect_mac_address,
"url": detect_url,
}
"""Registry of built-in detectors keyed by type name."""
def _passes_luhn(card_number: str) -> bool:
"""Validate credit card number using the Luhn checksum."""
digits = [int(d) for d in card_number if d.isdigit()]
if not 13 <= len(digits) <= 19:
return False
checksum = 0
for index, digit in enumerate(reversed(digits)):
value = digit
if index % 2 == 1:
value *= 2
if value > 9:
value -= 9
checksum += value
return checksum % 10 == 0
def _apply_redact_strategy(content: str, matches: list[PIIMatch]) -> str:
result = content
for match in sorted(matches, key=lambda item: item["start"], reverse=True):
replacement = f"[REDACTED_{match['type'].upper()}]"
result = result[: match["start"]] + replacement + result[match["end"] :]
return result
def _apply_mask_strategy(content: str, matches: list[PIIMatch]) -> str:
result = content
for match in sorted(matches, key=lambda item: item["start"], reverse=True):
value = match["value"]
pii_type = match["type"]
if pii_type == "email":
parts = value.split("@")
if len(parts) == 2:
domain_parts = parts[1].split(".")
masked = (
f"{parts[0]}@****.{domain_parts[-1]}"
if len(domain_parts) >= 2
else f"{parts[0]}@****"
)
else:
masked = "****"
elif pii_type == "credit_card":
digits_only = "".join(c for c in value if c.isdigit())
separator = "-" if "-" in value else " " if " " in value else ""
if separator:
masked = f"****{separator}****{separator}****{separator}{digits_only[-4:]}"
else:
masked = f"************{digits_only[-4:]}"
elif pii_type == "ip":
octets = value.split(".")
masked = f"*.*.*.{octets[-1]}" if len(octets) == 4 else "****"
elif pii_type == "mac_address":
separator = ":" if ":" in value else "-"
masked = (
f"**{separator}**{separator}**{separator}**{separator}**{separator}{value[-2:]}"
)
elif pii_type == "url":
masked = "[MASKED_URL]"
else:
masked = f"****{value[-4:]}" if len(value) > 4 else "****"
result = result[: match["start"]] + masked + result[match["end"] :]
return result
def _apply_hash_strategy(content: str, matches: list[PIIMatch]) -> str:
result = content
for match in sorted(matches, key=lambda item: item["start"], reverse=True):
digest = hashlib.sha256(match["value"].encode()).hexdigest()[:8]
replacement = f"<{match['type']}_hash:{digest}>"
result = result[: match["start"]] + replacement + result[match["end"] :]
return result
def apply_strategy(
content: str,
matches: list[PIIMatch],
strategy: RedactionStrategy,
) -> str:
"""Apply the configured strategy to matches within content."""
if not matches:
return content
if strategy == "redact":
return _apply_redact_strategy(content, matches)
if strategy == "mask":
return _apply_mask_strategy(content, matches)
if strategy == "hash":
return _apply_hash_strategy(content, matches)
if strategy == "block":
raise PIIDetectionError(matches[0]["type"], matches)
msg = f"Unknown redaction strategy: {strategy}"
raise ValueError(msg)
def resolve_detector(pii_type: str, detector: Detector | str | None) -> Detector:
"""Return a callable detector for the given configuration."""
if detector is None:
if pii_type not in BUILTIN_DETECTORS:
msg = (
f"Unknown PII type: {pii_type}. "
f"Must be one of {list(BUILTIN_DETECTORS.keys())} or provide a custom detector."
)
raise ValueError(msg)
return BUILTIN_DETECTORS[pii_type]
if isinstance(detector, str):
pattern = re.compile(detector)
def regex_detector(content: str) -> list[PIIMatch]:
return [
PIIMatch(
type=pii_type,
value=match.group(),
start=match.start(),
end=match.end(),
)
for match in pattern.finditer(content)
]
return regex_detector
return detector
@dataclass(frozen=True)
| PIIDetectionError |
python | davidhalter__jedi | test/completion/ordering.py | {
"start": 1037,
"end": 1646
} | class ____(object):
a = ""
a = 3
#? int()
a
a = list()
def __init__(self):
self.b = ""
def before(self):
self.b = 3
# TODO should this be so? include entries after cursor?
#? int() str() list
self.b
self.b = list
self.a = 1
#? str() int()
self.a
#? ['after']
self.after
self.c = 3
#? int()
self.c
def after(self):
self.a = ''
c = set()
#? list()
A.a
a = A()
#? ['after']
a.after
#? []
a.upper
#? []
a.append
#? []
a.real
#? str() int()
a.a
a = 3
| A |
python | wntrblm__nox | tests/test_sessions.py | {
"start": 3344,
"end": 39618
} | class ____:
def make_session_and_runner(
self,
) -> tuple[nox.sessions.Session, nox.sessions.SessionRunner]:
func = mock.Mock(spec=["python"], python="3.7")
runner = nox.sessions.SessionRunner(
name="test",
signatures=["test"],
func=func,
global_config=_options.options.namespace(
posargs=[],
error_on_external_run=False,
install_only=False,
invoked_from=os.getcwd(),
),
manifest=mock.create_autospec(nox.manifest.Manifest),
)
runner.venv = make_fake_env(bin_paths=["/no/bin/for/you"])
assert isinstance(runner.venv, nox.virtualenv.VirtualEnv)
return nox.sessions.Session(runner=runner), runner
def test_create_tmp(self) -> None:
session, runner = self.make_session_and_runner()
with tempfile.TemporaryDirectory() as root:
runner.global_config.envdir = root
tmpdir = session.create_tmp()
assert session.env["TMPDIR"] == os.path.abspath(tmpdir)
assert tmpdir.startswith(root)
def test_create_tmp_twice(self) -> None:
session, runner = self.make_session_and_runner()
with tempfile.TemporaryDirectory() as root:
runner.global_config.envdir = root
assert runner.venv
runner.venv.bin = bin # type: ignore[misc, assignment]
session.create_tmp()
tmpdir = session.create_tmp()
assert session.env["TMPDIR"] == os.path.abspath(tmpdir)
assert tmpdir.startswith(root)
def test_properties(self) -> None:
session, runner = self.make_session_and_runner()
with tempfile.TemporaryDirectory() as root:
runner.global_config.envdir = root
assert session.name is runner.friendly_name
assert runner.venv
assert session.env is runner.venv.env
assert session.posargs == runner.global_config.posargs
assert session.virtualenv is runner.venv
assert runner.venv.bin_paths
assert session.bin_paths is runner.venv.bin_paths
assert session.bin is runner.venv.bin_paths[0]
assert session.python is runner.func.python
assert session.invoked_from is runner.global_config.invoked_from
assert session.cache_dir == Path(runner.global_config.envdir).joinpath(
".cache"
)
def test_no_bin_paths(self) -> None:
session, runner = self.make_session_and_runner()
assert runner.venv
runner.venv.bin_paths = None # type: ignore[misc]
with pytest.raises(
ValueError, match=r"^The environment does not have a bin directory\.$"
):
session.bin # noqa: B018
assert session.bin_paths is None
def test_virtualenv_as_none(self) -> None:
session, runner = self.make_session_and_runner()
runner.venv = None
with pytest.raises(ValueError, match="virtualenv"):
_ = session.virtualenv
assert session.venv_backend == "none"
def test_virtualenv_directory(self) -> None:
session, runner = self.make_session_and_runner()
with tempfile.TemporaryDirectory() as root:
runner.global_config.envdir = root
assert session.env_dir == Path(runner.envdir)
def test_interactive(self) -> None:
session, _runner = self.make_session_and_runner()
with mock.patch("nox.sessions.sys.stdin.isatty") as m_isatty:
m_isatty.return_value = True
assert session.interactive is True
m_isatty.return_value = False
assert session.interactive is False
def test_explicit_non_interactive(self) -> None:
session, runner = self.make_session_and_runner()
with mock.patch("nox.sessions.sys.stdin.isatty") as m_isatty:
m_isatty.return_value = True
runner.global_config.non_interactive = True
assert session.interactive is False
def test_chdir(self, tmp_path: Path) -> None:
cdbby = tmp_path / "cdbby"
cdbby.mkdir()
current_cwd = os.getcwd()
session, _ = self.make_session_and_runner()
session.chdir(str(cdbby))
assert cdbby.samefile(".")
os.chdir(current_cwd)
def test_chdir_ctx(self, tmp_path: Path) -> None:
cdbby = tmp_path / "cdbby"
cdbby.mkdir()
current_cwd = Path.cwd().resolve()
session, _ = self.make_session_and_runner()
with session.chdir(cdbby):
assert cdbby.samefile(".")
assert current_cwd.samefile(".")
os.chdir(current_cwd)
def test_invoked_from(self, tmp_path: Path) -> None:
cdbby = tmp_path / "cdbby"
cdbby.mkdir()
current_cwd = Path.cwd().resolve()
session, _ = self.make_session_and_runner()
session.chdir(cdbby)
assert current_cwd.samefile(session.invoked_from)
os.chdir(current_cwd)
def test_chdir_pathlib(self, tmp_path: Path) -> None:
cdbby = tmp_path / "cdbby"
cdbby.mkdir()
current_cwd = Path.cwd().resolve()
session, _ = self.make_session_and_runner()
session.chdir(cdbby)
assert cdbby.samefile(".")
os.chdir(current_cwd)
def test_run_bad_args(self) -> None:
session, _ = self.make_session_and_runner()
with pytest.raises(ValueError, match="arg"):
session.run()
def test_run_with_func(self) -> None:
session, _ = self.make_session_and_runner()
assert session.run(operator.add, 1, 2) == 3 # type: ignore[arg-type]
def test_run_with_func_error(self) -> None:
session, _ = self.make_session_and_runner()
def raise_value_error() -> NoReturn:
msg = "meep"
raise ValueError(msg)
with pytest.raises(nox.command.CommandFailed):
assert session.run(raise_value_error) # type: ignore[arg-type]
def test_run_install_only(self, caplog: pytest.LogCaptureFixture) -> None:
caplog.set_level(logging.INFO)
session, runner = self.make_session_and_runner()
runner.global_config.install_only = True
with mock.patch.object(nox.command, "run") as run:
assert session.run("spam", "eggs") is None
run.assert_not_called()
assert "install-only" in caplog.text
def test_run_install_only_should_install(self) -> None:
session, runner = self.make_session_and_runner()
runner.global_config.install_only = True
with mock.patch.object(nox.command, "run") as run:
session.install("spam")
session.run("spam", "eggs")
env = dict(os.environ)
env["PATH"] = os.pathsep.join(["/no/bin/for/you", env["PATH"]])
run.assert_called_once_with(
("python", "-m", "pip", "install", "spam"),
**run_with_defaults(paths=mock.ANY, silent=True, env=env, external="error"),
)
def test_run_success(self) -> None:
session, _ = self.make_session_and_runner()
result = session.run(sys.executable, "-c", "print(123)")
assert result
def test_run_error(self) -> None:
session, _ = self.make_session_and_runner()
with pytest.raises(nox.command.CommandFailed):
session.run(sys.executable, "-c", "import sys; sys.exit(1)")
def test_run_install_script(self) -> None:
session, _ = self.make_session_and_runner()
with mock.patch.object(nox.command, "run") as run:
session.install_and_run_script(DIR / "resources/pep721example1.py")
assert len(run.call_args_list) == 2
assert "rich" in run.call_args_list[0][0][0]
assert DIR / "resources/pep721example1.py" in run.call_args_list[1][0][0]
def test_run_overly_env(self) -> None:
session, runner = self.make_session_and_runner()
assert runner.venv
runner.venv.env["A"] = "1"
runner.venv.env["B"] = "2"
runner.venv.env["C"] = "4"
result = session.run(
sys.executable,
"-c",
'import os; print(os.environ["A"], os.environ["B"], os.environ.get("C", "5"))',
env={"B": "3", "C": None},
silent=True,
)
assert result
assert result.strip() == "1 3 5"
def test_by_default_all_invocation_env_vars_are_passed(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
monkeypatch.setenv("I_SHOULD_BE_INCLUDED", "happy")
session, runner = self.make_session_and_runner()
assert runner.venv
runner.venv.env["I_SHOULD_BE_INCLUDED_TOO"] = "happier"
runner.venv.env["EVERYONE_SHOULD_BE_INCLUDED_TOO"] = "happiest"
result = session.run(
sys.executable,
"-c",
"import os; print(os.environ)",
silent=True,
)
assert result
assert "happy" in result
assert "happier" in result
assert "happiest" in result
def test_no_included_invocation_env_vars_are_passed(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
monkeypatch.setenv("I_SHOULD_NOT_BE_INCLUDED", "sad")
monkeypatch.setenv("AND_NEITHER_SHOULD_I", "unhappy")
session, runner = self.make_session_and_runner()
assert runner.venv
result = session.run(
sys.executable,
"-c",
"import os; print(os.environ)",
env={"I_SHOULD_BE_INCLUDED": "happy"},
include_outer_env=False,
silent=True,
)
assert result
assert "sad" not in result
assert "unhappy" not in result
assert "happy" in result
def test_no_included_invocation_env_vars_are_passed_empty(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
monkeypatch.setenv("I_SHOULD_NOT_BE_INCLUDED", "sad")
monkeypatch.setenv("AND_NEITHER_SHOULD_I", "unhappy")
session, _runner = self.make_session_and_runner()
result = session.run(
sys.executable,
"-c",
"import os; print(os.environ)",
include_outer_env=False,
silent=True,
)
assert result
assert "sad" not in result
assert "unhappy" not in result
def test_run_external_not_a_virtualenv(self) -> None:
# Non-virtualenv sessions should always allow external programs.
session, runner = self.make_session_and_runner()
runner.venv = nox.virtualenv.PassthroughEnv()
with mock.patch("nox.command.run", autospec=True) as run:
session.run(sys.executable, "--version")
run.assert_called_once_with(
(sys.executable, "--version"),
**run_with_defaults(external=True, env=mock.ANY),
)
def test_run_external_condaenv(self) -> None:
# condaenv sessions should always allow conda.
session, runner = self.make_session_and_runner()
runner.venv = mock.create_autospec(nox.virtualenv.CondaEnv)
assert runner.venv
runner.venv.allowed_globals = ("conda",) # type: ignore[misc]
runner.venv.env = {}
runner.venv.bin_paths = ["/path/to/env/bin"] # type: ignore[misc]
runner.venv.create.return_value = True # type: ignore[union-attr]
with mock.patch("nox.command.run", autospec=True) as run:
session.run("conda", "--version")
run.assert_called_once_with(
("conda", "--version"),
**run_with_defaults(
external=True, env=mock.ANY, paths=["/path/to/env/bin"]
),
)
def test_run_external_with_error_on_external_run(self) -> None:
session, runner = self.make_session_and_runner()
runner.global_config.error_on_external_run = True
with pytest.raises(nox.command.CommandFailed, match="External"):
session.run(sys.executable, "--version")
def test_run_external_with_error_on_external_run_condaenv(self) -> None:
session, runner = self.make_session_and_runner()
runner.venv = mock.create_autospec(nox.virtualenv.CondaEnv)
assert runner.venv
runner.venv.env = {}
runner.venv.bin_paths = ["/path/to/env/bin"] # type: ignore[misc]
runner.global_config.error_on_external_run = True
with pytest.raises(nox.command.CommandFailed, match="External"):
session.run(sys.executable, "--version")
def test_run_install_bad_args(self) -> None:
session, _ = self.make_session_and_runner()
with pytest.raises(
ValueError, match="At least one argument required to run_install"
):
session.run_install()
def test_run_no_install_passthrough(self) -> None:
session, runner = self.make_session_and_runner()
runner.venv = nox.virtualenv.PassthroughEnv()
runner.global_config.no_install = True
session.install("numpy")
session.conda_install("numpy")
def test_run_no_conda_install(self) -> None:
session, _runner = self.make_session_and_runner()
with pytest.raises(TypeError, match="A session without a conda"):
session.conda_install("numpy")
def test_run_install_success(self) -> None:
session, _ = self.make_session_and_runner()
assert session.run_install(operator.add, 1300, 37) == 1337 # type: ignore[arg-type]
def test_run_install_install_only(self) -> None:
session, runner = self.make_session_and_runner()
runner.global_config.install_only = True
assert session.run_install(operator.add, 23, 19) == 42 # type: ignore[arg-type]
@pytest.mark.parametrize(
(
"interrupt_timeout_setting",
"terminate_timeout_setting",
"interrupt_timeout_expected",
"terminate_timeout_expected",
),
[
("default", "default", 0.3, 0.2),
(None, None, None, None),
(0, 0, 0, 0),
(1, 2, 1, 2),
],
)
def test_run_shutdown_process_timeouts(
self,
interrupt_timeout_setting: Literal["default"] | int | None,
terminate_timeout_setting: Literal["default"] | int | None,
interrupt_timeout_expected: float | None,
terminate_timeout_expected: float | None,
) -> None:
session, runner = self.make_session_and_runner()
runner.venv = nox.virtualenv.PassthroughEnv()
subp_popen_instance = mock.Mock()
subp_popen_instance.communicate.side_effect = KeyboardInterrupt()
with (
mock.patch("nox.popen.shutdown_process", autospec=True) as shutdown_process,
mock.patch(
"nox.popen.subprocess.Popen",
new=mock.Mock(return_value=subp_popen_instance),
),
):
shutdown_process.return_value = ("", "")
timeout_kwargs: dict[str, None | float] = {}
if interrupt_timeout_setting != "default":
timeout_kwargs["interrupt_timeout"] = interrupt_timeout_setting
if terminate_timeout_setting != "default":
timeout_kwargs["terminate_timeout"] = terminate_timeout_setting
with pytest.raises(KeyboardInterrupt):
session.run(sys.executable, "--version", **timeout_kwargs) # type: ignore[arg-type]
shutdown_process.assert_called_once_with(
proc=mock.ANY,
interrupt_timeout=interrupt_timeout_expected,
terminate_timeout=terminate_timeout_expected,
)
@pytest.mark.parametrize(
("no_install", "reused", "run_called"),
[
(True, True, False),
(True, False, True),
(False, True, True),
(False, False, True),
],
)
@pytest.mark.parametrize("run_install_func", ["run_always", "run_install"])
def test_run_install_no_install(
self, no_install: bool, reused: bool, run_called: bool, run_install_func: str
) -> None:
session, runner = self.make_session_and_runner()
runner.global_config.no_install = no_install
assert runner.venv
runner.venv._reused = reused
with mock.patch.object(nox.command, "run") as run:
run_install = getattr(session, run_install_func)
run_install("python", "-m", "pip", "install", "requests")
assert run.called is run_called
def test_conda_install_bad_args(self) -> None:
session, runner = self.make_session_and_runner()
runner.venv = mock.create_autospec(nox.virtualenv.CondaEnv)
assert runner.venv
runner.venv.location = "dummy"
with pytest.raises(ValueError, match="arg"):
session.conda_install()
@pytest.mark.skipif(not sys.platform.startswith("win32"), reason="Only on Windows")
def test_conda_install_bad_args_odd_nb_double_quotes(self) -> None:
session, runner = self.make_session_and_runner()
runner.venv = mock.create_autospec(nox.virtualenv.CondaEnv)
assert runner.venv
runner.venv.location = "./not/a/location"
with pytest.raises(ValueError, match="odd number of quotes"):
session.conda_install('a"a')
@pytest.mark.skipif(not sys.platform.startswith("win32"), reason="Only on Windows")
def test_conda_install_bad_args_cannot_escape(self) -> None:
session, runner = self.make_session_and_runner()
runner.venv = mock.create_autospec(nox.virtualenv.CondaEnv)
assert runner.venv
runner.venv.location = "./not/a/location"
with pytest.raises(ValueError, match="Cannot escape"):
session.conda_install('a"o"<a')
def test_conda_install_not_a_condaenv(self) -> None:
session, runner = self.make_session_and_runner()
runner.venv = None
with pytest.raises(TypeError, match="conda environment"):
session.conda_install()
@pytest.mark.parametrize(
"auto_offline", [False, True], ids="auto_offline={}".format
)
@pytest.mark.parametrize("offline", [False, True], ids="offline={}".format)
@pytest.mark.parametrize("conda", ["conda", "mamba"], ids=str)
@pytest.mark.parametrize(
"channel",
["", "conda-forge", ["conda-forge", "bioconda"]],
ids=["default", "conda-forge", "bioconda"],
)
def test_conda_install(
self, auto_offline: bool, offline: bool, conda: str, channel: str | list[str]
) -> None:
runner = nox.sessions.SessionRunner(
name="test",
signatures=["test"],
func=mock.sentinel.func,
global_config=_options.options.namespace(posargs=[]),
manifest=mock.create_autospec(nox.manifest.Manifest),
)
runner.venv = mock.create_autospec(nox.virtualenv.CondaEnv)
assert runner.venv
runner.venv.location = "/path/to/conda/env"
runner.venv.env = {}
runner.venv.is_offline = lambda: offline # type: ignore[union-attr]
runner.venv.conda_cmd = conda # type: ignore[union-attr]
class SessionNoSlots(nox.sessions.Session):
pass
session = SessionNoSlots(runner=runner)
with mock.patch.object(session, "_run", autospec=True) as run:
args = ["--offline"] if auto_offline and offline else []
if channel and isinstance(channel, str):
args.append(f"--channel={channel}")
else:
args += [f"--channel={c}" for c in channel]
session.conda_install(
"requests<99", "urllib3", auto_offline=auto_offline, channel=channel
)
run.assert_called_once_with(
conda,
"install",
"--yes",
*args,
"--prefix",
"/path/to/conda/env",
'"requests<99"' if sys.platform.startswith("win32") else "requests<99",
"urllib3",
**_run_with_defaults(silent=True, external="error"),
)
@pytest.mark.parametrize(
("no_install", "reused", "run_called"),
[
(True, True, False),
(True, False, True),
(False, True, True),
(False, False, True),
],
)
def test_conda_venv_reused_with_no_install(
self, no_install: bool, reused: bool, run_called: bool
) -> None:
session, runner = self.make_session_and_runner()
runner.venv = mock.create_autospec(nox.virtualenv.CondaEnv)
assert runner.venv
runner.venv.location = "/path/to/conda/env"
runner.venv.env = {}
runner.venv.is_offline = lambda: True # type: ignore[union-attr]
runner.venv.conda_cmd = "conda" # type: ignore[union-attr]
runner.global_config.no_install = no_install
runner.venv._reused = reused
with mock.patch.object(nox.command, "run") as run:
session.conda_install("baked beans", "eggs", "spam")
assert run.called is run_called
@pytest.mark.parametrize(
"version_constraint",
["no", "yes", "already_dbl_quoted"],
ids="version_constraint={}".format,
)
def test_conda_install_non_default_kwargs(self, version_constraint: str) -> None:
runner = nox.sessions.SessionRunner(
name="test",
signatures=["test"],
func=mock.sentinel.func,
global_config=_options.options.namespace(posargs=[]),
manifest=mock.create_autospec(nox.manifest.Manifest),
)
runner.venv = mock.create_autospec(nox.virtualenv.CondaEnv)
assert runner.venv
runner.venv.location = "/path/to/conda/env"
runner.venv.env = {}
runner.venv.is_offline = lambda: False # type: ignore[union-attr]
runner.venv.conda_cmd = "conda" # type: ignore[union-attr]
class SessionNoSlots(nox.sessions.Session):
pass
session = SessionNoSlots(runner=runner)
if version_constraint == "no":
pkg_requirement = passed_arg = "urllib3"
elif version_constraint == "yes" and not sys.platform.startswith("win32"):
pkg_requirement = passed_arg = "urllib3<1.25"
elif version_constraint == "yes" and sys.platform.startswith("win32"):
pkg_requirement = "urllib3<1.25"
passed_arg = f'"{pkg_requirement}"'
elif version_constraint == "already_dbl_quoted":
pkg_requirement = passed_arg = '"urllib3<1.25"'
else:
raise ValueError(version_constraint)
with mock.patch.object(session, "_run", autospec=True) as run:
session.conda_install("requests", pkg_requirement, silent=False)
run.assert_called_once_with(
"conda",
"install",
"--yes",
"--prefix",
"/path/to/conda/env",
"requests",
# this will be double quoted if unquoted constraint is present
passed_arg,
**_run_with_defaults(silent=False, external="error"),
)
def test_install_bad_args_no_arg(self) -> None:
session, _ = self.make_session_and_runner()
with pytest.raises(ValueError, match="arg"):
session.install()
def test_install_not_a_virtualenv(self) -> None:
session, runner = self.make_session_and_runner()
runner.venv = None
with pytest.raises(TypeError, match="virtualenv"):
session.install()
def test_install(self) -> None:
runner = nox.sessions.SessionRunner(
name="test",
signatures=["test"],
func=mock.sentinel.func,
global_config=_options.options.namespace(posargs=[]),
manifest=mock.create_autospec(nox.manifest.Manifest),
)
runner.venv = make_fake_env()
assert isinstance(runner.venv, nox.virtualenv.VirtualEnv)
class SessionNoSlots(nox.sessions.Session):
pass
session = SessionNoSlots(runner=runner)
assert session.venv_backend == "venv"
with mock.patch.object(session, "_run", autospec=True) as run:
session.install("requests", "urllib3")
run.assert_called_once_with(
"python",
"-m",
"pip",
"install",
"requests",
"urllib3",
**_run_with_defaults(silent=True, external="error"),
)
def test_install_non_default_kwargs(self) -> None:
runner = nox.sessions.SessionRunner(
name="test",
signatures=["test"],
func=mock.sentinel.func,
global_config=_options.options.namespace(posargs=[]),
manifest=mock.create_autospec(nox.manifest.Manifest),
)
runner.venv = make_fake_env()
assert isinstance(runner.venv, nox.virtualenv.VirtualEnv)
class SessionNoSlots(nox.sessions.Session):
pass
session = SessionNoSlots(runner=runner)
with mock.patch.object(session, "_run", autospec=True) as run:
session.install("requests", "urllib3", silent=False)
run.assert_called_once_with(
"python",
"-m",
"pip",
"install",
"requests",
"urllib3",
**_run_with_defaults(silent=False, external="error"),
)
def test_install_no_venv_failure(self) -> None:
runner = nox.sessions.SessionRunner(
name="test",
signatures=["test"],
func=mock.sentinel.func,
global_config=_options.options.namespace(posargs=[]),
manifest=mock.create_autospec(nox.manifest.Manifest),
)
runner.venv = mock.create_autospec(nox.virtualenv.PassthroughEnv)
assert runner.venv
runner.venv.env = {}
class SessionNoSlots(nox.sessions.Session):
pass
session = SessionNoSlots(runner=runner)
with pytest.raises(
ValueError,
match=(
r"use of session\.install\(\) is no longer allowed since"
r" it would modify the global Python environment"
),
):
session.install("requests", "urllib3")
@pytest.mark.parametrize(
("verbose", "expected_silent"),
[
(True, False),
(False, True),
],
)
def test_install_verbose(self, verbose: bool, expected_silent: bool) -> None:
runner = nox.sessions.SessionRunner(
name="test",
signatures=["test"],
func=mock.sentinel.func,
global_config=_options.options.namespace(posargs=[], verbose=verbose),
manifest=mock.create_autospec(nox.manifest.Manifest),
)
runner.venv = make_fake_env()
class SessionNoSlots(nox.sessions.Session):
pass
session = SessionNoSlots(runner=runner)
with mock.patch.object(session, "_run", autospec=True) as run:
session.install("requests", "urllib3")
run.assert_called_once_with(
"python",
"-m",
"pip",
"install",
"requests",
"urllib3",
**_run_with_defaults(silent=expected_silent, external="error"),
)
@pytest.mark.parametrize(
("verbose", "expected_silent"),
[
(True, False),
(False, True),
],
)
def test_conda_install_verbose(self, verbose: bool, expected_silent: bool) -> None:
runner = nox.sessions.SessionRunner(
name="test",
signatures=["test"],
func=mock.sentinel.func,
global_config=_options.options.namespace(posargs=[], verbose=verbose),
manifest=mock.create_autospec(nox.manifest.Manifest),
)
runner.venv = mock.create_autospec(nox.virtualenv.CondaEnv)
assert runner.venv
runner.venv.location = "/path/to/conda/env"
runner.venv.env = {}
runner.venv.is_offline = lambda: False # type: ignore[union-attr]
runner.venv.conda_cmd = "conda" # type: ignore[union-attr]
class SessionNoSlots(nox.sessions.Session):
pass
session = SessionNoSlots(runner=runner)
with mock.patch.object(session, "_run", autospec=True) as run:
session.conda_install("requests", "urllib3")
run.assert_called_once_with(
"conda",
"install",
"--yes",
"--prefix",
"/path/to/conda/env",
"requests",
"urllib3",
**_run_with_defaults(silent=expected_silent, external="error"),
)
def test_notify(self) -> None:
session, runner = self.make_session_and_runner()
session.notify("other")
runner.manifest.notify.assert_called_once_with("other", None) # type: ignore[attr-defined]
session.notify("other", posargs=["--an-arg"])
runner.manifest.notify.assert_called_with("other", ["--an-arg"]) # type: ignore[attr-defined]
def test_posargs_are_not_shared_between_sessions(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
registry: dict[str, nox._decorators.Func] = {}
monkeypatch.setattr("nox.registry._REGISTRY", registry)
@nox.session(venv_backend="none")
def test(session: nox.Session) -> None:
session.posargs.extend(["-x"])
@nox.session(venv_backend="none")
def lint(session: nox.Session) -> None:
if "-x" in session.posargs:
msg = "invalid option: -x"
raise RuntimeError(msg)
config = _options.options.namespace(posargs=[], envdir=".nox")
manifest = nox.manifest.Manifest(registry, config)
assert manifest["test"].execute()
assert manifest["lint"].execute()
def test_log(self, caplog: pytest.LogCaptureFixture) -> None:
caplog.set_level(logging.INFO)
session, _ = self.make_session_and_runner()
session.log("meep")
assert "meep" in caplog.text
def test_warn(self, caplog: pytest.LogCaptureFixture) -> None:
caplog.set_level(logging.WARNING)
session, _ = self.make_session_and_runner()
session.warn("meep")
assert "meep" in caplog.text
def test_debug(self, caplog: pytest.LogCaptureFixture) -> None:
caplog.set_level(logging.DEBUG)
session, _ = self.make_session_and_runner()
session.debug("meep")
assert "meep" in caplog.text
def test_error(self, caplog: pytest.LogCaptureFixture) -> None:
caplog.set_level(logging.ERROR)
session, _ = self.make_session_and_runner()
with pytest.raises(nox.sessions._SessionQuit, match="meep"):
session.error("meep")
def test_error_no_log(self) -> None:
session, _ = self.make_session_and_runner()
with pytest.raises(nox.sessions._SessionQuit):
session.error()
def test_skip_no_log(self) -> None:
session, _ = self.make_session_and_runner()
with pytest.raises(nox.sessions._SessionSkip):
session.skip()
@pytest.mark.parametrize(
("no_install", "reused", "run_called"),
[
(True, True, False),
(True, False, True),
(False, True, True),
(False, False, True),
],
)
def test_session_venv_reused_with_no_install(
self, no_install: bool, reused: bool, run_called: bool
) -> None:
session, runner = self.make_session_and_runner()
runner.global_config.no_install = no_install
assert runner.venv
runner.venv._reused = reused
with mock.patch.object(nox.command, "run") as run:
session.install("eggs", "spam")
assert run.called is run_called
def test_install_uv(self) -> None:
runner = nox.sessions.SessionRunner(
name="test",
signatures=["test"],
func=mock.sentinel.func,
global_config=_options.options.namespace(posargs=[]),
manifest=mock.create_autospec(nox.manifest.Manifest),
)
runner.venv = make_fake_env(venv_backend="uv")
assert isinstance(runner.venv, nox.virtualenv.VirtualEnv)
assert runner.venv.venv_backend == "uv"
class SessionNoSlots(nox.sessions.Session):
pass
session = SessionNoSlots(runner=runner)
with mock.patch.object(session, "_run", autospec=True) as run:
session.install("requests", "urllib3", silent=False)
run.assert_called_once_with(
"uv",
"pip",
"install",
"requests",
"urllib3",
**_run_with_defaults(silent=False, external="error"),
)
def test_install_uv_command(self, monkeypatch: pytest.MonkeyPatch) -> None:
runner = nox.sessions.SessionRunner(
name="test",
signatures=["test"],
func=mock.sentinel.func,
global_config=_options.options.namespace(posargs=[]),
manifest=mock.create_autospec(nox.manifest.Manifest),
)
runner.venv = make_fake_env(venv_backend="uv")
assert isinstance(runner.venv, nox.virtualenv.VirtualEnv)
assert runner.venv.venv_backend == "uv"
class SessionNoSlots(nox.sessions.Session):
pass
session = SessionNoSlots(runner=runner)
monkeypatch.setattr(nox.virtualenv, "UV", "/some/uv")
monkeypatch.setattr(shutil, "which", lambda x, path=None: None) # noqa: ARG005
with mock.patch.object(nox.command, "run", autospec=True) as run:
session.install("requests", "urllib3", silent=False)
run.assert_called_once()
((call_args,), _) = run.call_args
assert call_args == (
"/some/uv",
"pip",
"install",
"requests",
"urllib3",
)
# User runs uvx
with mock.patch.object(nox.command, "run", autospec=True) as run:
session.run("uvx", "cowsay")
run.assert_called_once()
((call_args,), _) = run.call_args
assert call_args == (
"/some/uvx",
"cowsay",
)
# user installs uv in the session venv
monkeypatch.setattr(
shutil, "which", lambda x, path="": path + "/uv" if x == "uv" else None
)
with mock.patch.object(nox.command, "run", autospec=True) as run:
session.install("requests", "urllib3", silent=False)
run.assert_called_once()
((call_args,), _) = run.call_args
assert call_args == (
"uv",
"pip",
"install",
"requests",
"urllib3",
)
def test___slots__(self) -> None:
session, _ = self.make_session_and_runner()
with pytest.raises(AttributeError):
session.foo = "bar" # type: ignore[attr-defined]
with pytest.raises(AttributeError):
session.quux # type: ignore[attr-defined] # noqa: B018
def test___dict__(self) -> None:
session, _ = self.make_session_and_runner()
expected = {name: getattr(session, name) for name in session.__slots__}
assert session.__dict__ == expected
def test_first_arg_list(self) -> None:
session, _ = self.make_session_and_runner()
with pytest.raises(
ValueError,
match=re.escape("First argument to `session.run` is a list. Did you mean"),
):
session.run(["ls", "-al"]) # type: ignore[arg-type]
| TestSession |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/function7.py | {
"start": 497,
"end": 588
} | class ____(Protocol):
def write(self, a: str, /, b: str) -> object:
pass
| _Writer2 |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-shopify/source_shopify/utils.py | {
"start": 682,
"end": 2151
} | class ____:
"""Holds the errors classification and messaging scenarios."""
def __new__(self, stream: str) -> Mapping[str, Any]:
return {
401: ErrorResolution(
response_action=ResponseAction.IGNORE,
failure_type=FailureType.config_error,
error_message=f"Stream `{stream}`. Failed to access the Shopify store with provided API token. Verify your API token is valid.",
),
402: ErrorResolution(
response_action=ResponseAction.IGNORE,
failure_type=FailureType.config_error,
error_message=f"Stream `{stream}`. The shop's plan does not have access to this feature. Please upgrade your plan to be able to access this stream.",
),
403: ErrorResolution(
response_action=ResponseAction.IGNORE,
failure_type=FailureType.config_error,
error_message=f"Stream `{stream}`. Unable to access Shopify endpoint for {stream}. Check that you have the appropriate access scopes to read data from this endpoint.",
),
404: ErrorResolution(
response_action=ResponseAction.IGNORE,
failure_type=FailureType.config_error,
error_message=f"Stream `{stream}`. Not available or missing.",
),
# extend the mapping with more handable errors, if needed.
}
| ShopifyNonRetryableErrors |
python | has2k1__plotnine | plotnine/scales/scale_color.py | {
"start": 14820,
"end": 14893
} | class ____(scale_color_desaturate):
pass
@alias
| scale_colour_desaturate |
python | falconry__falcon | falcon/asgi/reader.py | {
"start": 1005,
"end": 12595
} | class ____:
"""File-like input object wrapping asynchronous iterator over bytes.
This class implements coroutine functions for asynchronous reading or
iteration, but otherwise provides an interface similar to that defined by
:class:`io.IOBase`.
"""
__slots__ = [
'_buffer',
'_buffer_len',
'_buffer_pos',
'_chunk_size',
'_consumed',
'_exhausted',
'_iteration_started',
'_max_join_size',
'_source',
]
_buffer: bytes
_buffer_len: int
_buffer_pos: int
_chunk_size: int
_consumed: int
_exhausted: bool
_iteration_started: bool
_max_join_size: int
_source: AsyncIterator[bytes]
def __init__(
self,
source: AsyncReadableIO | AsyncIterator[bytes],
chunk_size: int | None = None,
):
self._source = self._iter_normalized(source)
self._chunk_size = chunk_size or DEFAULT_CHUNK_SIZE
self._max_join_size = self._chunk_size * _MAX_JOIN_CHUNKS
self._buffer = b''
self._buffer_len = 0
self._buffer_pos = 0
self._consumed = 0
self._exhausted = False
self._iteration_started = False
async def _iter_normalized(
self, source: AsyncReadableIO | AsyncIterator[bytes]
) -> AsyncIterator[bytes]:
chunk = b''
chunk_size = self._chunk_size
async for item in source:
chunk_len = len(chunk)
if chunk_len >= chunk_size:
self._consumed += chunk_len
yield chunk
chunk = item
continue
chunk += item
if chunk:
self._consumed += len(chunk)
yield chunk
self._exhausted = True
async def _iter_with_buffer(self, size_hint: int = 0) -> AsyncIterator[bytes]:
if self._buffer_len > self._buffer_pos:
if 0 < size_hint < self._buffer_len - self._buffer_pos:
buffer_pos = self._buffer_pos
self._buffer_pos += size_hint
yield self._buffer[buffer_pos : self._buffer_pos]
buffer_pos = self._buffer_pos
self._buffer_pos = self._buffer_len
yield self._buffer[buffer_pos : self._buffer_len]
async for chunk in self._source:
yield chunk
async def _iter_delimited(
self, delimiter: bytes, size_hint: int = 0
) -> AsyncIterator[bytes]:
delimiter_len_1 = len(delimiter) - 1
if not 0 <= delimiter_len_1 < self._chunk_size:
raise ValueError('delimiter length must be within [1, chunk_size]')
if self._buffer_len > self._buffer_pos:
pos = self._buffer.find(delimiter, self._buffer_pos)
if pos == 0:
return
if pos > 0:
if 0 < size_hint < pos - self._buffer_pos:
buffer_pos = self._buffer_pos
self._buffer_pos += size_hint
yield self._buffer[buffer_pos : self._buffer_pos]
buffer_pos = self._buffer_pos
self._buffer_pos = pos
yield self._buffer[buffer_pos:pos]
return
if 0 < size_hint < (self._buffer_len - self._buffer_pos - delimiter_len_1):
buffer_pos = self._buffer_pos
self._buffer_pos += size_hint
yield self._buffer[buffer_pos : self._buffer_pos]
if self._buffer_pos > 0:
self._trim_buffer()
async for chunk in self._source:
offset = self._buffer_len - delimiter_len_1
if offset > 0:
fragment = self._buffer[offset:] + chunk[:delimiter_len_1]
pos = fragment.find(delimiter)
if pos < 0:
output = self._buffer
self._buffer = chunk
self._buffer_len = len(chunk)
yield output
else:
self._buffer += chunk
self._buffer_len += len(chunk)
self._buffer_pos = offset + pos
# PERF(vytas): local1 + local2 was faster than self._attr.
# TODO(vytas): Verify this on 3.12+.
yield self._buffer[: offset + pos]
return
elif self._buffer:
self._buffer += chunk
self._buffer_len += len(chunk)
else:
self._buffer = chunk
self._buffer_len = len(chunk)
pos = self._buffer.find(delimiter)
if pos >= 0: # pragma: no py39,py310 cover
if pos > 0:
self._buffer_pos = pos
yield self._buffer[:pos]
return
yield self._buffer
async def _consume_delimiter(self, delimiter: bytes) -> None:
delimiter_len = len(delimiter)
if await self.peek(delimiter_len) != delimiter:
raise DelimiterError('expected delimiter missing')
self._buffer_pos += delimiter_len
def _prepend_buffer(self, chunk: bytes) -> None:
if self._buffer_len > self._buffer_pos:
self._buffer = chunk + self._buffer[self._buffer_pos :]
self._buffer_len = len(self._buffer)
else:
self._buffer = chunk
self._buffer_len = len(chunk)
self._buffer_pos = 0
def _trim_buffer(self) -> None:
self._buffer = self._buffer[self._buffer_pos :]
self._buffer_len -= self._buffer_pos
self._buffer_pos = 0
async def _read_from(
self, source: AsyncIterator[bytes], size: int | None = -1
) -> bytes:
if size == -1 or size is None:
result_bytes = io.BytesIO()
async for chunk in source:
result_bytes.write(chunk)
return result_bytes.getvalue()
if size <= 0:
return b''
remaining = size
if size <= self._max_join_size:
result: list[bytes] = []
async for chunk in source:
chunk_len = len(chunk)
if remaining < chunk_len:
result.append(chunk[:remaining])
self._prepend_buffer(chunk[remaining:])
break
result.append(chunk)
remaining -= chunk_len
if remaining == 0: # pragma: no py39,py310 cover
break
# PERF(vytas) Don't join unless necessary.
return (
result[0] if len(result) == 1 else b''.join(result)
) # pragma: no py314 cover
# NOTE(vytas): size > self._max_join_size
result_bytes = io.BytesIO()
async for chunk in source:
chunk_len = len(chunk)
if remaining < chunk_len:
result_bytes.write(chunk[:remaining])
self._prepend_buffer(chunk[remaining:])
break
result_bytes.write(chunk)
remaining -= chunk_len
if remaining == 0: # pragma: no py39,py310 cover
break
return result_bytes.getvalue()
def delimit(self, delimiter: bytes) -> BufferedReader: # TODO: should se self
return type(self)(self._iter_delimited(delimiter), chunk_size=self._chunk_size)
# -------------------------------------------------------------------------
# Asynchronous IO interface.
# -------------------------------------------------------------------------
def __aiter__(self) -> AsyncIterator[bytes]:
if self._iteration_started:
raise OperationNotAllowed('This stream is already being iterated over.')
self._iteration_started = True
# PERF(vytas): Do not wrap unless needed.
if self._buffer_len > self._buffer_pos:
return self._iter_with_buffer()
return self._source
async def exhaust(self) -> None:
await self.pipe()
async def peek(self, size: int = -1) -> bytes:
if size < 0 or size > self._chunk_size:
size = self._chunk_size
if self._buffer_pos > 0:
self._trim_buffer()
if self._buffer_len < size:
async for chunk in self._source:
self._buffer += chunk
self._buffer_len = len(self._buffer)
if self._buffer_len >= size: # pragma: no py39,py310 cover
break
return self._buffer[:size] # pragma: no py314 cover
async def pipe(self, destination: AsyncWritableIO | None = None) -> None:
async for chunk in self._iter_with_buffer():
if destination is not None:
await destination.write(chunk)
async def pipe_until(
self,
delimiter: bytes,
destination: AsyncWritableIO | None = None,
consume_delimiter: bool = False,
) -> None:
async for chunk in self._iter_delimited(delimiter):
if destination is not None:
await destination.write(chunk)
if consume_delimiter:
await self._consume_delimiter(delimiter)
async def read(self, size: int | None = -1) -> bytes:
return await self._read_from(self._iter_with_buffer(size_hint=size or 0), size)
async def readall(self) -> bytes:
"""Read and return all remaining data in the request body.
Warning:
Only use this method when you can be certain that you have
enough free memory for the entire request body, and that you
have configured your web server to limit request bodies to a
reasonable size (to guard against malicious requests).
Returns:
bytes: The request body data, or ``b''`` if the body is empty or
has already been consumed.
"""
return await self._read_from(self._iter_with_buffer())
async def read_until(
self, delimiter: bytes, size: int = -1, consume_delimiter: bool = False
) -> bytes:
result = await self._read_from(
self._iter_delimited(delimiter, size_hint=size or 0), size
)
if consume_delimiter:
await self._consume_delimiter(delimiter)
return result
# -------------------------------------------------------------------------
# These methods are included to improve compatibility with Python's
# standard "file-like" IO interface.
# -------------------------------------------------------------------------
# TODO(vytas): Implement the same close() machinery as in asgi.stream?
# def close(self):
# pass
@property
def eof(self) -> bool:
"""Whether the stream is at EOF."""
return self._exhausted and self._buffer_len == self._buffer_pos
def fileno(self) -> NoReturn:
"""Raise an instance of OSError since a file descriptor is not used."""
raise OSError('This IO object does not use a file descriptor')
def isatty(self) -> bool:
"""Return ``False`` always."""
return False
def readable(self) -> bool:
"""Return ``True`` always."""
return True
def seekable(self) -> bool:
"""Return ``False`` always."""
return False
def writable(self) -> bool:
"""Return ``False`` always."""
return False
def tell(self) -> int:
"""Return the number of bytes read from the stream so far."""
return self._consumed - (self._buffer_len - self._buffer_pos)
| BufferedReader |
python | pytorch__pytorch | test/profiler/test_profiler.py | {
"start": 2592,
"end": 5745
} | class ____(TestCase):
def test_mem_leak(self):
"""Checks that there's no memory leak when using profiler with CUDA"""
t = torch.rand(1, 1).cuda()
p = psutil.Process()
last_rss = collections.deque(maxlen=5)
for _ in range(10):
with _profile(use_cuda=True):
for _ in range(1024):
t = torch.mm(t, t)
gc.collect()
torch.cuda.empty_cache()
last_rss.append(p.memory_info().rss)
# with CUDA events leaking the increase in memory was ~7 MB between
# profiler invocations above
is_increasing = all(
last_rss[idx] > last_rss[idx - 1] for idx in range(1, len(last_rss))
)
max_diff = -1
for idx in range(1, len(last_rss)):
max_diff = max(max_diff, last_rss[idx] - last_rss[idx - 1])
self.assertTrue(
not (is_increasing and max_diff > 100 * 1024),
msg=f"memory usage is increasing, {str(last_rss)}",
)
def test_custom_module_input_op_ids(self):
class MyFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return x
@staticmethod
def backward(ctx, gO):
(x,) = ctx.saved_tensors
return x
def custom_layer(input_ten):
return MyFunc.apply(input_ten)
# Only testing that emit_nvtx runs when
# record_shapes option is enabled.
with torch.autograd.profiler.emit_nvtx(record_shapes=True) as prof:
x = torch.randn(10, 10, requires_grad=True)
y = torch.randn(10, 10, requires_grad=True)
z = x + y
s = custom_layer(z)
q = s.sum()
q.backward()
@unittest.skipIf(not torch.cuda.is_available(), "CUDA is required")
def test_cudagraph_profiling_workaround(self):
import subprocess
# repro taken from #75504
# Launch in a separate process to catch hanging/illegal memory errors
# and to make sure CUPTI isn't already initialized.
p = subprocess.check_call(
[
sys.executable,
"-c",
"""
import os
import torch
from torch.profiler import ProfilerActivity, profile
def add_one(in_: torch.Tensor):
return in_ + 1
sample_arg = torch.zeros(10, device="cuda").requires_grad_(True)
# add this before cuda graphs are created
torch.profiler._utils._init_for_cuda_graphs()
add_one_graphed = torch.cuda.graphs.make_graphed_callables(add_one, sample_args=(sample_arg,))
zeros = torch.zeros(10, device="cuda")
out = add_one_graphed(zeros)
assert out[0] == 1
with profile(activities=[ProfilerActivity.CPU]):
add_one_graphed(zeros)
with profile(activities=[ProfilerActivity.CUDA]):
add_one_graphed(zeros)
""",
],
universal_newlines=True,
timeout=60,
)
# ^ this will throw an exception if the script fails.
@unittest.skipIf(not torch.profiler.itt.is_available(), "ITT is required")
| TestProfilerCUDA |
python | pandas-dev__pandas | pandas/tests/arithmetic/test_datetime64.py | {
"start": 59902,
"end": 64300
} | class ____:
# TODO: box + de-duplicate
def test_dt64_overflow_masking(self, box_with_array):
# GH#25317
left = Series([Timestamp("1969-12-31")], dtype="M8[ns]")
right = Series([NaT])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
expected = TimedeltaIndex([NaT], dtype="m8[ns]")
expected = tm.box_expected(expected, box_with_array)
result = left - right
tm.assert_equal(result, expected)
def test_dt64_series_arith_overflow(self):
# GH#12534, fixed by GH#19024
dt = Timestamp("1700-01-31")
td = Timedelta("20000 Days")
dti = date_range("1949-09-30", freq="100YE", periods=4, unit="ns")
ser = Series(dti)
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
ser - dt
with pytest.raises(OverflowError, match=msg):
dt - ser
with pytest.raises(OverflowError, match=msg):
ser + td
with pytest.raises(OverflowError, match=msg):
td + ser
ser.iloc[-1] = NaT
expected = Series(
["2004-10-03", "2104-10-04", "2204-10-04", "NaT"], dtype="datetime64[ns]"
)
res = ser + td
tm.assert_series_equal(res, expected)
res = td + ser
tm.assert_series_equal(res, expected)
ser.iloc[1:] = NaT
expected = Series(["91279 Days", "NaT", "NaT", "NaT"], dtype="timedelta64[ns]")
res = ser - dt
tm.assert_series_equal(res, expected)
res = dt - ser
tm.assert_series_equal(res, -expected)
def test_datetimeindex_sub_timestamp_overflow(self):
dtimax = pd.to_datetime(["2021-12-28 17:19", Timestamp.max]).as_unit("ns")
dtimin = pd.to_datetime(["2021-12-28 17:19", Timestamp.min]).as_unit("ns")
tsneg = Timestamp("1950-01-01").as_unit("ns")
ts_neg_variants = [
tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype("datetime64[ns]"),
tsneg.to_datetime64().astype("datetime64[D]"),
]
tspos = Timestamp("1980-01-01").as_unit("ns")
ts_pos_variants = [
tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype("datetime64[ns]"),
tspos.to_datetime64().astype("datetime64[D]"),
]
msg = "Overflow in int64 addition"
for variant in ts_neg_variants:
with pytest.raises(OverflowError, match=msg):
dtimax - variant
expected = Timestamp.max._value - tspos._value
for variant in ts_pos_variants:
res = dtimax - variant
assert res[1]._value == expected
expected = Timestamp.min._value - tsneg._value
for variant in ts_neg_variants:
res = dtimin - variant
assert res[1]._value == expected
for variant in ts_pos_variants:
with pytest.raises(OverflowError, match=msg):
dtimin - variant
def test_datetimeindex_sub_datetimeindex_overflow(self):
# GH#22492, GH#22508
dtimax = pd.to_datetime(["2021-12-28 17:19", Timestamp.max]).as_unit("ns")
dtimin = pd.to_datetime(["2021-12-28 17:19", Timestamp.min]).as_unit("ns")
ts_neg = pd.to_datetime(["1950-01-01", "1950-01-01"]).as_unit("ns")
ts_pos = pd.to_datetime(["1980-01-01", "1980-01-01"]).as_unit("ns")
# General tests
expected = Timestamp.max._value - ts_pos[1]._value
result = dtimax - ts_pos
assert result[1]._value == expected
expected = Timestamp.min._value - ts_neg[1]._value
result = dtimin - ts_neg
assert result[1]._value == expected
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
dtimax - ts_neg
with pytest.raises(OverflowError, match=msg):
dtimin - ts_pos
# Edge cases
tmin = pd.to_datetime([Timestamp.min])
t1 = tmin + Timedelta.max + Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
t1 - tmin
tmax = pd.to_datetime([Timestamp.max])
t2 = tmax + Timedelta.min - Timedelta("1us")
with pytest.raises(OverflowError, match=msg):
tmax - t2
| TestDatetime64OverflowHandling |
python | doocs__leetcode | solution/1600-1699/1648.Sell Diminishing-Valued Colored Balls/Solution.py | {
"start": 0,
"end": 914
} | class ____:
def maxProfit(self, inventory: List[int], orders: int) -> int:
inventory.sort(reverse=True)
mod = 10**9 + 7
ans = i = 0
n = len(inventory)
while orders > 0:
while i < n and inventory[i] >= inventory[0]:
i += 1
nxt = 0
if i < n:
nxt = inventory[i]
cnt = i
x = inventory[0] - nxt
tot = cnt * x
if tot > orders:
decr = orders // cnt
a1, an = inventory[0] - decr + 1, inventory[0]
ans += (a1 + an) * decr // 2 * cnt
ans += (inventory[0] - decr) * (orders % cnt)
else:
a1, an = nxt + 1, inventory[0]
ans += (a1 + an) * x // 2 * cnt
inventory[0] = nxt
orders -= tot
ans %= mod
return ans
| Solution |
python | google__jax | jax/_src/source_info_util.py | {
"start": 9920,
"end": 10455
} | class ____(contextlib.ContextDecorator):
__slots__ = ['name', 'prev']
def __init__(self, name: str):
self.name = name
def __enter__(self):
self.prev = prev = _source_info_context.context
name_stack = prev.name_stack.transform(self.name)
_source_info_context.context = prev.replace(name_stack=name_stack)
return name_stack
def __exit__(self, exc_type, exc_value, traceback):
_source_info_context.context = self.prev
transform_name_stack = TransformNameStackContextManager
| TransformNameStackContextManager |
python | getsentry__sentry | tests/sentry/workflow_engine/processors/test_data_condition_group.py | {
"start": 7371,
"end": 9539
} | class ____(TestEvaluationConditionCase):
def setUp(self) -> None:
super().setUp()
self.data_condition_group.logic_type = DataConditionGroup.Type.ANY_SHORT_CIRCUIT
def test_evaluate_data_conditions__passes_all(self) -> None:
assert evaluate_data_conditions(
self.get_conditions_to_evaluate(10), self.data_condition_group.logic_type
) == ProcessedDataConditionGroup(
logic_result=TriggerResult.TRUE,
condition_results=[
ProcessedDataCondition(
logic_result=TriggerResult.TRUE,
condition=self.data_condition,
result=DetectorPriorityLevel.HIGH,
)
],
)
def test_evaluate_data_conditions__passes_one(self) -> None:
result = evaluate_data_conditions(
self.get_conditions_to_evaluate(4),
self.data_condition_group.logic_type,
)
expected_result = ProcessedDataConditionGroup(
logic_result=TriggerResult.TRUE,
condition_results=[
ProcessedDataCondition(
logic_result=TriggerResult.TRUE,
condition=self.data_condition_two,
result=DetectorPriorityLevel.LOW,
)
],
)
assert result == expected_result
def test_evaluate_data_conditions__fails_all(self) -> None:
result = evaluate_data_conditions(
self.get_conditions_to_evaluate(1),
self.data_condition_group.logic_type,
)
expected_result = ProcessedDataConditionGroup(
logic_result=TriggerResult.FALSE,
condition_results=[],
)
assert result == expected_result
def test_evaluate_data_conditions__passes_without_conditions(self) -> None:
expected_result = ProcessedDataConditionGroup(
logic_result=TriggerResult.TRUE,
condition_results=[],
)
result = evaluate_data_conditions([], self.data_condition_group.logic_type)
assert result == expected_result
| TestEvaluateConditionGroupTypeAnyShortCircuit |
python | apache__airflow | providers/papermill/src/airflow/providers/papermill/hooks/kernel.py | {
"start": 1368,
"end": 1582
} | class ____:
"""Class to represent kernel connection object."""
ip: str
shell_port: int
iopub_port: int
stdin_port: int
control_port: int
hb_port: int
session_key: str
| KernelConnection |
python | allegroai__clearml | clearml/backend_api/services/v2_9/queues.py | {
"start": 6757,
"end": 12711
} | class ____(NonStrictDataModel):
"""
:param id: Queue id
:type id: str
:param name: Queue name
:type name: str
:param user: Associated user id
:type user: str
:param company: Company id
:type company: str
:param created: Queue creation time
:type created: datetime.datetime
:param tags: User-defined tags
:type tags: Sequence[str]
:param system_tags: System tags. This field is reserved for system use, please
don't use it.
:type system_tags: Sequence[str]
:param entries: List of ordered queue entries
:type entries: Sequence[Entry]
"""
_schema = {
"properties": {
"company": {"description": "Company id", "type": ["string", "null"]},
"created": {
"description": "Queue creation time",
"format": "date-time",
"type": ["string", "null"],
},
"entries": {
"description": "List of ordered queue entries",
"items": {"$ref": "#/definitions/entry"},
"type": ["array", "null"],
},
"id": {"description": "Queue id", "type": ["string", "null"]},
"name": {"description": "Queue name", "type": ["string", "null"]},
"system_tags": {
"description": "System tags. This field is reserved for system use, please don't use it.",
"items": {"type": "string"},
"type": ["array", "null"],
},
"tags": {
"description": "User-defined tags",
"items": {"type": "string"},
"type": ["array", "null"],
},
"user": {"description": "Associated user id", "type": ["string", "null"]},
},
"type": "object",
}
def __init__(
self,
id: Optional[str] = None,
name: Optional[str] = None,
user: Optional[str] = None,
company: Optional[str] = None,
created: Optional[str] = None,
tags: Optional[List[str]] = None,
system_tags: Optional[List[str]] = None,
entries: Optional[List[Any]] = None,
**kwargs: Any
) -> None:
super(Queue, self).__init__(**kwargs)
self.id = id
self.name = name
self.user = user
self.company = company
self.created = created
self.tags = tags
self.system_tags = system_tags
self.entries = entries
@schema_property("id")
def id(self) -> Optional[str]:
return self._property_id
@id.setter
def id(self, value: Optional[str]) -> None:
if value is None:
self._property_id = None
return
self.assert_isinstance(value, "id", six.string_types)
self._property_id = value
@schema_property("name")
def name(self) -> Optional[str]:
return self._property_name
@name.setter
def name(self, value: Optional[str]) -> None:
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property("user")
def user(self) -> Optional[str]:
return self._property_user
@user.setter
def user(self, value: Optional[str]) -> None:
if value is None:
self._property_user = None
return
self.assert_isinstance(value, "user", six.string_types)
self._property_user = value
@schema_property("company")
def company(self) -> Optional[str]:
return self._property_company
@company.setter
def company(self, value: Optional[str]) -> None:
if value is None:
self._property_company = None
return
self.assert_isinstance(value, "company", six.string_types)
self._property_company = value
@schema_property("created")
def created(self) -> Optional[str]:
return self._property_created
@created.setter
def created(self, value: Optional[str]) -> None:
if value is None:
self._property_created = None
return
self.assert_isinstance(value, "created", six.string_types + (datetime,))
if not isinstance(value, datetime):
value = parse_datetime(value)
self._property_created = value
@schema_property("tags")
def tags(self) -> Optional[List[str]]:
return self._property_tags
@tags.setter
def tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property("system_tags")
def system_tags(self) -> Optional[List[str]]:
return self._property_system_tags
@system_tags.setter
def system_tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
@schema_property("entries")
def entries(self) -> Optional[List[Any]]:
return self._property_entries
@entries.setter
def entries(self, value: Optional[List[Any]]) -> None:
if value is None:
self._property_entries = None
return
self.assert_isinstance(value, "entries", (list, tuple))
if any((isinstance(v, dict) for v in value)):
value = [Entry.from_dict(v) if isinstance(v, dict) else v for v in value]
else:
self.assert_isinstance(value, "entries", Entry, is_array=True)
self._property_entries = value
| Queue |
python | fabric__fabric | fabric/exceptions.py | {
"start": 172,
"end": 562
} | class ____(Exception):
"""
Lightweight exception wrapper for `.GroupResult` when one contains errors.
.. versionadded:: 2.0
"""
def __init__(self, result):
#: The `.GroupResult` object which would have been returned, had there
#: been no errors. See its docstring (and that of `.Group`) for
#: details.
self.result = result
| GroupException |
python | dagster-io__dagster | python_modules/dagster-webserver/dagster_webserver_tests/webserver/conftest.py | {
"start": 536,
"end": 1432
} | class ____(DagsterWebserver):
def test_req_ctx_endpoint(self, request: Request):
with self.request_context(request) as ctx:
# instantiate cached property with backref
_ = ctx.instance_queryer
return JSONResponse({"name": ctx.__class__.__name__})
def build_routes(self):
return [
Route("/test_request_context", self.test_req_ctx_endpoint),
*super().build_routes(),
]
@pytest.fixture(scope="session")
def test_client(instance):
process_context = WorkspaceProcessContext(
instance=instance,
version=__version__,
read_only=False,
workspace_load_target=workspace_opts_to_load_target(
WorkspaceOpts(empty_workspace=True),
),
)
app = TestDagsterWebserver(process_context).create_asgi_app(debug=True)
return TestClient(app)
| TestDagsterWebserver |
python | explosion__spaCy | spacy/errors.py | {
"start": 47,
"end": 1559
} | class ____(type):
def __getattribute__(self, code):
msg = super().__getattribute__(code)
if code.startswith("__"): # python system attributes like __class__
return msg
else:
return "[{code}] {msg}".format(code=code, msg=msg)
def setup_default_warnings():
# ignore certain numpy warnings
filter_warning("ignore", error_msg="numpy.dtype size changed") # noqa
filter_warning("ignore", error_msg="numpy.ufunc size changed") # noqa
# warn about entity_ruler, span_ruler & matcher having no patterns only once
for pipe in ["matcher", "entity_ruler", "span_ruler"]:
filter_warning("once", error_msg=Warnings.W036.format(name=pipe))
# warn once about lemmatizer without required POS
filter_warning("once", error_msg=Warnings.W108)
# floret vector table cannot be modified
filter_warning("once", error_msg="[W114]")
def filter_warning(
action: Literal["default", "error", "ignore", "always", "module", "once"],
error_msg: str,
):
"""Customize how spaCy should handle a certain warning.
error_msg (str): e.g. "W006", or a full error message
action (str): "default", "error", "ignore", "always", "module" or "once"
"""
warnings.filterwarnings(action, message=_escape_warning_msg(error_msg))
def _escape_warning_msg(msg):
"""To filter with warnings.filterwarnings, the [] brackets need to be escaped"""
return msg.replace("[", "\\[").replace("]", "\\]")
# fmt: off
| ErrorsWithCodes |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/assets/job/asset_job.py | {
"start": 8315,
"end": 30964
} | class ____(AssetGraph):
"""An AssetGraph that is scoped to a particular job."""
def __init__(
self,
asset_nodes_by_key: Mapping[AssetKey, AssetNode],
assets_defs_by_check_key: Mapping[AssetCheckKey, AssetsDefinition],
source_asset_graph: AssetGraph,
):
super().__init__(asset_nodes_by_key, assets_defs_by_check_key)
self._source_asset_graph = source_asset_graph
@property
def source_asset_graph(self) -> AssetGraph:
"""The source AssetGraph from which this job-scoped graph was created."""
return self._source_asset_graph
def get_asset_graph_for_job(
parent_asset_graph: AssetGraph,
selection: AssetSelection,
allow_different_partitions_defs: bool = False,
) -> AssetGraph:
"""Subset an AssetGraph to create an AssetGraph representing an asset job.
The provided selection must satisfy certain constraints to comprise a valid asset job:
- The selected keys must be a subset of the existing executable asset keys.
- The selected keys must have at most one non-null partitions definition.
The returned AssetGraph will contain only the selected keys within executable AssetsDefinitions.
Any unselected dependencies will be included as unexecutable AssetsDefinitions.
"""
from dagster._core.definitions.external_asset import (
create_unexecutable_external_asset_from_assets_def,
)
selected_keys = selection.resolve(parent_asset_graph)
invalid_keys = selected_keys - parent_asset_graph.executable_asset_keys
if invalid_keys:
raise DagsterInvalidDefinitionError(
"Selected keys must be a subset of existing executable asset keys."
f" Invalid selected keys: {invalid_keys}",
)
_infer_and_validate_common_partitions_def(
parent_asset_graph,
selected_keys,
allow_different_partitions_defs=allow_different_partitions_defs,
)
selected_check_keys = selection.resolve_checks(parent_asset_graph)
# _subset_assets_defs returns two lists of Assetsfinitions-- those included and those
# excluded by the selection. These collections retain their original execution type. We need
# to convert the excluded assets to unexecutable external assets.
executable_assets_defs, excluded_assets_defs = _subset_assets_defs(
parent_asset_graph.assets_defs, selected_keys, selected_check_keys
)
# Ideally we would include only the logical dependencies of our executable asset keys in our job
# asset graph. These could be obtained by calling `AssetsDefinition.dependency_keys` for each
# executable assets def.
#
# However, this is insufficient due to the way multi-asset subsetting works. Our execution
# machinery needs the AssetNodes for any input or output asset of a multi-asset that is touched
# by our selection, regardless of whether these assets are in our selection or their
# dependencies. Thus for now we retrieve all of these keys with `node_keys_by_{input,output}_name`.
# This is something we should probably fix in the future by appropriately adjusting multi-asset
# subsets.
other_keys = {
*(k for ad in executable_assets_defs for k in ad.node_keys_by_input_name.values()),
*(k for ad in executable_assets_defs for k in ad.node_keys_by_output_name.values()),
} - selected_keys
other_assets_defs, _ = _subset_assets_defs(
excluded_assets_defs, other_keys, None, allow_extraneous_asset_keys=True
)
unexecutable_assets_defs = [
create_unexecutable_external_asset_from_assets_def(ad) for ad in other_assets_defs
]
asset_nodes_by_key, assets_defs_by_check_key = JobScopedAssetGraph.key_mappings_from_assets(
[*executable_assets_defs, *unexecutable_assets_defs]
)
return JobScopedAssetGraph(asset_nodes_by_key, assets_defs_by_check_key, parent_asset_graph)
def _subset_assets_defs(
assets: Iterable["AssetsDefinition"],
selected_asset_keys: AbstractSet[AssetKey],
selected_asset_check_keys: Optional[AbstractSet[AssetCheckKey]],
allow_extraneous_asset_keys: bool = False,
) -> tuple[
Sequence["AssetsDefinition"],
Sequence["AssetsDefinition"],
]:
"""Given a list of asset key selection queries, generate a set of AssetsDefinition objects
representing the included/excluded definitions.
"""
included_assets: set[AssetsDefinition] = set()
excluded_assets: set[AssetsDefinition] = set()
# Do not match any assets with no keys
for asset in set(a for a in assets if a.has_keys or a.has_check_keys):
# intersection
selected_subset = selected_asset_keys & asset.keys
# if specific checks were selected, only include those
if selected_asset_check_keys is not None:
selected_check_subset = selected_asset_check_keys & asset.check_keys
# if no checks were selected, filter to checks that target selected assets
else:
selected_check_subset = {
key for key in asset.check_keys if key.asset_key in selected_asset_keys
}
# all assets in this def are selected
if selected_subset == asset.keys and selected_check_subset == asset.check_keys:
included_assets.add(asset)
# no assets in this def are selected
elif len(selected_subset) == 0 and len(selected_check_subset) == 0:
excluded_assets.add(asset)
elif asset.can_subset:
# subset of the asset that we want
subset_asset = asset.subset_for(selected_asset_keys, selected_check_subset)
included_assets.add(subset_asset)
# subset of the asset that we don't want
excluded_assets.add(
asset.subset_for(
selected_asset_keys=asset.keys - subset_asset.keys,
selected_asset_check_keys=(asset.check_keys - subset_asset.check_keys),
)
)
# If the AssetsDefinition is not subsettable, include the whole definition without
# subsetting, even though some keys are not present in our selection.
elif allow_extraneous_asset_keys:
included_assets.add(asset)
else:
raise DagsterInvalidSubsetError(
f"When building job, the AssetsDefinition '{asset.node_def.name}' "
f"contains asset keys {sorted(list(asset.keys))} and check keys "
f"{sorted(list(asset.check_keys))}, but "
f"attempted to select only assets {sorted(list(selected_subset))} and checks "
f"{sorted(list(selected_check_subset))}. "
"This AssetsDefinition does not support subsetting. Please select all "
"asset and check keys produced by this asset.\n\nIf using an AssetSelection, you may "
"use required_multi_asset_neighbors() to select any remaining assets, for "
"example:\nAssetSelection.assets('my_asset').required_multi_asset_neighbors()"
)
return (
list(included_assets),
list(excluded_assets),
)
def _infer_and_validate_common_partitions_def(
asset_graph: AssetGraph,
asset_keys: Iterable[AssetKey],
allow_different_partitions_defs: bool,
required_partitions_def: Optional[PartitionsDefinition] = None,
) -> Optional[PartitionsDefinition]:
keys_by_partitions_def = defaultdict(set)
for key in asset_keys:
partitions_def = asset_graph.get(key).partitions_def
if partitions_def is not None:
if required_partitions_def is not None and partitions_def != required_partitions_def:
raise DagsterInvalidDefinitionError(
f"Executable asset {key} has a different partitions definition than"
f" the one specified for the job. Specifed partitions definition: {required_partitions_def}."
f" Asset partitions definition: {partitions_def}."
)
keys_by_partitions_def[partitions_def].add(key)
if len(keys_by_partitions_def) == 1:
return next(iter(keys_by_partitions_def.keys()))
else:
if len(keys_by_partitions_def) > 1 and not allow_different_partitions_defs:
keys_by_partitions_def_str = "\n".join(
f"{partitions_def}: {asset_keys}"
for partitions_def, asset_keys in keys_by_partitions_def.items()
)
raise DagsterInvalidDefinitionError(
f"Selected assets must have the same partitions definitions, but the"
f" selected assets have different partitions definitions: \n{keys_by_partitions_def_str}"
)
return None
def _get_blocking_asset_check_output_handles_by_asset_key(
assets_defs_by_node_handle: Mapping[NodeHandle, AssetsDefinition],
) -> Mapping[AssetKey, Sequence[NodeOutputHandle]]:
"""For each asset key, returns the set of node output handles that correspond to asset check
specs that should block the execution of downstream assets if they fail.
"""
check_specs_by_node_output_handle: Mapping[NodeOutputHandle, AssetCheckSpec] = {}
for node_handle, assets_def in assets_defs_by_node_handle.items():
for output_name, check_spec in assets_def.check_specs_by_output_name.items():
check_specs_by_node_output_handle[
NodeOutputHandle(node_handle=node_handle, output_name=output_name)
] = check_spec
blocking_asset_check_output_handles_by_asset_key: dict[AssetKey, set[NodeOutputHandle]] = (
defaultdict(set)
)
for node_output_handle, check_spec in check_specs_by_node_output_handle.items():
if check_spec.blocking:
blocking_asset_check_output_handles_by_asset_key[check_spec.asset_key].add(
node_output_handle
)
return {
asset_key: sorted(
blocking_asset_check_output_handles_by_asset_key[asset_key],
key=lambda node_output_handle: node_output_handle.output_name,
)
for asset_key in blocking_asset_check_output_handles_by_asset_key
}
def build_node_deps(
asset_graph: AssetGraph,
) -> tuple[
DependencyMapping[NodeInvocation],
Mapping[NodeHandle, AssetsDefinition],
]:
# sort so that nodes get a consistent name
assets_defs = sorted(asset_graph.assets_defs, key=lambda ad: (sorted(ak for ak in ad.keys)))
# if the same graph/op is used in multiple assets_definitions, their invocations must have
# different names. we keep track of definitions that share a name and add a suffix to their
# invocations to solve this issue
collisions: dict[str, int] = {}
assets_defs_by_node_handle: dict[NodeHandle, AssetsDefinition] = {}
node_alias_and_output_by_asset_key: dict[AssetKey, tuple[str, str]] = {}
for assets_def in (ad for ad in assets_defs if ad.is_executable):
node_name = assets_def.node_def.name
if collisions.get(node_name):
collisions[node_name] += 1
node_alias = f"{node_name}_{collisions[node_name]}"
else:
collisions[node_name] = 1
node_alias = node_name
# unique handle for each AssetsDefinition
assets_defs_by_node_handle[NodeHandle(node_alias, parent=None)] = assets_def
for output_name, key in assets_def.keys_by_output_name.items():
node_alias_and_output_by_asset_key[key] = (node_alias, output_name)
blocking_asset_check_output_handles_by_asset_key = (
_get_blocking_asset_check_output_handles_by_asset_key(
assets_defs_by_node_handle,
)
)
deps: dict[NodeInvocation, dict[str, IDependencyDefinition]] = {}
for node_handle, assets_def in assets_defs_by_node_handle.items():
# the key that we'll use to reference the node inside this AssetsDefinition
node_def_name = assets_def.node_def.name
alias = node_handle.name if node_handle.name != node_def_name else None
node_key = NodeInvocation(node_def_name, alias=alias)
deps[node_key] = {}
# For check-only nodes, we treat additional_deps as execution dependencies regardless
# of if these checks are blocking or not. For other nodes, we do not treat additional_deps
# on checks as execution dependencies.
#
# The precise reason for this is unknown, but this behavior must be preserved for
# backwards compatibility for now.
execution_dep_keys: set[AssetKey] = {
# include the deps of all assets in this assets def
*(
dep.asset_key
for key in assets_def.keys
for dep in assets_def.get_asset_spec(key).deps
),
# include the primary dep of all checks in this assets def
# if they are not targeting a key in this assets def
*(
spec.asset_key
for spec in assets_def.check_specs
if spec.asset_key not in assets_def.keys
),
}
if has_only_asset_checks(assets_def):
# include the additional deps of all checks in this assets def
execution_dep_keys |= {
dep.asset_key
for spec in assets_def.check_specs
for dep in spec.additional_deps
if dep.asset_key not in assets_def.keys
}
inputs_map = {
input_name: node_key
for input_name, node_key in assets_def.node_keys_by_input_name.items()
if node_key in execution_dep_keys
}
# connect each input of this AssetsDefinition to the proper upstream node
for input_name, upstream_asset_key in inputs_map.items():
# ignore self-deps
if upstream_asset_key in assets_def.keys:
continue
# if this assets def itself performs checks on an upstream key, exempt it from being
# blocked on other checks
if upstream_asset_key not in {ck.asset_key for ck in assets_def.check_keys}:
blocking_asset_check_output_handles = (
blocking_asset_check_output_handles_by_asset_key.get(upstream_asset_key, [])
)
asset_check_deps = [
DependencyDefinition(
node_output_handle.node_handle.name,
node_output_handle.output_name,
)
for node_output_handle in blocking_asset_check_output_handles or []
]
else:
blocking_asset_check_output_handles = []
asset_check_deps = []
if upstream_asset_key in node_alias_and_output_by_asset_key:
upstream_node_alias, upstream_output_name = node_alias_and_output_by_asset_key[
upstream_asset_key
]
asset_dep_def = DependencyDefinition(upstream_node_alias, upstream_output_name)
if blocking_asset_check_output_handles:
deps[node_key][input_name] = BlockingAssetChecksDependencyDefinition(
asset_check_dependencies=asset_check_deps, other_dependency=asset_dep_def
)
else:
deps[node_key][input_name] = asset_dep_def
elif asset_check_deps:
deps[node_key][input_name] = BlockingAssetChecksDependencyDefinition(
asset_check_dependencies=asset_check_deps, other_dependency=None
)
return deps, assets_defs_by_node_handle
def _has_cycles(
deps: DependencyMapping[NodeInvocation],
) -> bool:
"""Detect if there are cycles in a dependency dictionary."""
try:
node_deps: dict[str, set[str]] = {}
for upstream_node, downstream_deps in deps.items():
# handle either NodeInvocation or str
node_name = upstream_node.alias or upstream_node.name
node_deps[node_name] = set()
for dep in downstream_deps.values():
if isinstance(dep, DependencyDefinition):
node_deps[node_name].add(dep.node)
elif isinstance(dep, BlockingAssetChecksDependencyDefinition):
for subdep in dep.get_node_dependencies():
node_deps[node_name].add(subdep.node)
else:
check.failed(f"Unexpected dependency type {type(dep)}.")
# make sure that there is a valid topological sorting of these node dependencies
list(toposort(node_deps))
return False
# only try to resolve cycles if we have a cycle
except CircularDependencyError:
return True
def _attempt_resolve_node_cycles(asset_graph: AssetGraph) -> AssetGraph:
"""DFS starting at root nodes to color the asset dependency graph. Each time you leave your
current AssetsDefinition, the color increments.
At the end of this process, we'll have a coloring for the asset graph such that any asset which
is downstream of another asset via a different AssetsDefinition will be guaranteed to have
a different (greater) color.
Once we have our coloring, if any AssetsDefinition contains assets with different colors,
we split that AssetsDefinition into a subset for each individual color.
This ensures that no asset that shares a node with another asset will be downstream of
that asset via a different node (i.e. there will be no cycles).
"""
# color for each asset
colors: dict[AssetKey, int] = {}
# recursively color an asset and all of its downstream assets
def _dfs(key: AssetKey, cur_color: int):
node = asset_graph.get(key)
colors[key] = cur_color
# in an external asset, treat all downstream as if they're in the same node
cur_node_asset_keys = node.assets_def.keys if node.is_materializable else node.child_keys
for child_key in node.child_keys:
# if the downstream asset is in the current node,keep the same color
new_color = cur_color if child_key in cur_node_asset_keys else cur_color + 1
# if current color of the downstream asset is less than the new color, re-do dfs
if colors.get(child_key, -1) < new_color:
_dfs(child_key, new_color)
# dfs for each root node; will throw an error if there are key-level cycles
root_keys = asset_graph.toposorted_asset_keys_by_level[0]
for key in root_keys:
_dfs(key, 0)
color_mapping_by_assets_defs: dict[AssetsDefinition, Any] = defaultdict(
lambda: defaultdict(set)
)
for key, color in colors.items():
node = asset_graph.get(key)
color_mapping_by_assets_defs[node.assets_def][color].add(key)
subsetted_assets_defs: list[AssetsDefinition] = []
for assets_def, color_mapping in color_mapping_by_assets_defs.items():
if assets_def.is_external or len(color_mapping) == 1 or not assets_def.can_subset:
subsetted_assets_defs.append(assets_def)
else:
for asset_keys in color_mapping.values():
subsetted_assets_defs.append(
assets_def.subset_for(asset_keys, selected_asset_check_keys=None)
)
# We didn't color asset checks, so add any that are in their own node.
assets_defs_with_only_checks = [
ad for ad in asset_graph.assets_defs if has_only_asset_checks(ad)
]
asset_nodes_by_key, assets_defs_by_check_key = JobScopedAssetGraph.key_mappings_from_assets(
subsetted_assets_defs + assets_defs_with_only_checks
)
return JobScopedAssetGraph(asset_nodes_by_key, assets_defs_by_check_key, asset_graph)
def _ensure_resources_dont_conflict(
asset_graph: AssetGraph,
resource_defs: Mapping[str, ResourceDefinition],
) -> None:
"""Ensures that resources between assets, source assets, and provided resource dictionary do not conflict."""
resource_defs_from_assets = {}
for asset in asset_graph.assets_defs:
for resource_key, resource_def in asset.resource_defs.items():
if resource_key not in resource_defs_from_assets:
resource_defs_from_assets[resource_key] = resource_def
if resource_defs_from_assets[resource_key] != resource_def:
raise DagsterInvalidDefinitionError(
f"Conflicting versions of resource with key '{resource_key}' "
"were provided to different assets. When constructing a "
"job, all resource definitions provided to assets must "
"match by reference equality for a given key."
)
for resource_key, resource_def in resource_defs.items():
if (
resource_key != DEFAULT_IO_MANAGER_KEY
and resource_key in resource_defs_from_assets
and resource_defs_from_assets[resource_key] != resource_def
):
raise DagsterInvalidDefinitionError(
f"resource with key '{resource_key}' provided to job "
"conflicts with resource provided to assets. When constructing a "
"job, all resource definitions provided must "
"match by reference equality for a given key."
)
def check_resources_satisfy_requirements(
asset_graph: AssetGraph,
resource_defs: Mapping[str, ResourceDefinition],
) -> None:
"""Ensures that between the provided resources on an asset and the resource_defs mapping, that all resource requirements are satisfied.
Note that resources provided on assets cannot satisfy resource requirements provided on other assets.
"""
_ensure_resources_dont_conflict(asset_graph, resource_defs)
for assets_def in asset_graph.assets_defs:
ensure_requirements_satisfied(
merge_dicts(resource_defs, assets_def.resource_defs),
list(assets_def.get_resource_requirements()),
)
def get_all_resource_defs(
asset_graph: AssetGraph,
resource_defs: Mapping[str, ResourceDefinition],
) -> Mapping[str, ResourceDefinition]:
# Ensures that no resource keys conflict, and each asset has its resource requirements satisfied.
check_resources_satisfy_requirements(asset_graph, resource_defs)
all_resource_defs = dict(resource_defs)
for assets_def in asset_graph.assets_defs:
all_resource_defs = merge_dicts(all_resource_defs, assets_def.resource_defs)
return all_resource_defs
| JobScopedAssetGraph |
python | PyCQA__pylint | pylint/extensions/eq_without_hash.py | {
"start": 606,
"end": 1470
} | class ____(checkers.BaseChecker):
name = "eq-without-hash"
msgs = {
"W1641": (
"Implementing __eq__ without also implementing __hash__",
"eq-without-hash",
"Used when a class implements __eq__ but not __hash__. Objects get "
"None as their default __hash__ implementation if they also implement __eq__.",
),
}
@utils.only_required_for_messages("eq-without-hash")
def visit_classdef(self, node: nodes.ClassDef) -> None:
locals_and_methods = set(node.locals).union(x.name for x in node.mymethods())
if "__eq__" in locals_and_methods and "__hash__" not in locals_and_methods:
self.add_message("eq-without-hash", node=node, confidence=interfaces.HIGH)
def register(linter: PyLinter) -> None:
linter.register_checker(EqWithoutHash(linter))
| EqWithoutHash |
python | walkccc__LeetCode | solutions/2113. Elements in Array After Removing and Replacing Elements/2113.py | {
"start": 0,
"end": 459
} | class ____:
def elementInNums(
self,
nums: list[int],
queries: list[list[int]],
) -> list[int]:
n = len(nums)
def f(time: int, index: int) -> int:
if time < n: # [0, 1, 2] -> [1, 2] -> [2]
index += time
return -1 if index >= n else nums[index]
else: # [] -> [0] -> [0, 1]
return -1 if index >= time - n else nums[index]
return [f(time % (2 * n), index) for time, index in queries]
| Solution |
python | sqlalchemy__sqlalchemy | examples/syntax_extensions/qualify.py | {
"start": 669,
"end": 2403
} | class ____(SyntaxExtension, ClauseElement):
"""Define the QUALIFY class."""
predicate: ColumnElement[bool]
"""A single column expression that is the predicate within the QUALIFY."""
_traverse_internals = [
("predicate", visitors.InternalTraversal.dp_clauseelement)
]
"""This structure defines how SQLAlchemy can do a deep traverse of internal
contents of this structure. This is mostly used for cache key generation.
If the traversal is not written yet, the ``inherit_cache=False`` class
level attribute may be used to skip caching for the construct.
"""
def __init__(self, predicate: ColumnExpressionArgument):
self.predicate = coercions.expect(
roles.WhereHavingRole, predicate, apply_propagate_attrs=self
)
def apply_to_select(self, select_stmt: Select) -> None:
"""Called when the :meth:`.Select.ext` method is called.
The extension should apply itself to the :class:`.Select`, typically
using :meth:`.HasStatementExtensions.apply_syntax_extension_point`,
which receives a callable that receives a list of current elements to
be concatenated together and then returns a new list of elements to be
concatenated together in the final structure. The
:meth:`.SyntaxExtension.append_replacing_same_type` callable is
usually used for this.
"""
select_stmt.apply_syntax_extension_point(
self.append_replacing_same_type, "post_criteria"
)
@compiles(Qualify)
def _compile_qualify(element, compiler, **kw):
"""a compiles extension that delivers the SQL text for Qualify"""
return f"QUALIFY {compiler.process(element.predicate, **kw)}"
| Qualify |
python | google__jax | jax/_src/pallas/core.py | {
"start": 9896,
"end": 10227
} | class ____:
"""Use to index an array using an elementwise start index."""
block_size: int
padding: tuple[int, int] = (0, 0)
def __str__(self):
if self.padding == (0, 0):
return f"Element({self.block_size})"
return f"Element({self.block_size}, padding={self.padding})"
@dataclasses.dataclass(frozen=True)
| Element |
python | pandas-dev__pandas | pandas/plotting/_matplotlib/converter.py | {
"start": 12075,
"end": 30846
} | class ____(mdates.DateLocator):
UNIT = 1.0 / (24 * 3600 * 1000)
def __init__(self, tz) -> None:
mdates.DateLocator.__init__(self, tz)
self._interval = 1.0
def _get_unit(self):
return self.get_unit_generic(-1)
@staticmethod
def get_unit_generic(freq):
unit = mdates.RRuleLocator.get_unit_generic(freq)
if unit < 0:
return MilliSecondLocator.UNIT
return unit
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
# We need to cap at the endpoints of valid datetime
nmax, nmin = mdates.date2num((dmax, dmin))
num = (nmax - nmin) * 86400 * 1000
max_millis_ticks = 6
for interval in [1, 10, 50, 100, 200, 500]:
if num <= interval * (max_millis_ticks - 1):
self._interval = interval
break
# We went through the whole loop without breaking, default to 1
self._interval = 1000.0
estimate = (nmax - nmin) / (self._get_unit() * self._get_interval())
if estimate > self.MAXTICKS * 2:
raise RuntimeError(
"MillisecondLocator estimated to generate "
f"{estimate:d} ticks from {dmin} to {dmax}: exceeds Locator.MAXTICKS"
f"* 2 ({self.MAXTICKS * 2:d}) "
)
interval = self._get_interval()
freq = f"{interval}ms"
tz = self.tz.tzname(None)
st = dmin.replace(tzinfo=None)
ed = dmax.replace(tzinfo=None)
all_dates = date_range(start=st, end=ed, freq=freq, tz=tz).astype(object)
try:
if len(all_dates) > 0:
locs = self.raise_if_exceeds(mdates.date2num(all_dates))
return locs
except Exception: # pragma: no cover
pass
lims = mdates.date2num([dmin, dmax])
return lims
def _get_interval(self):
return self._interval
def autoscale(self):
"""
Set the view limits to include the data range.
"""
# We need to cap at the endpoints of valid datetime
dmin, dmax = self.datalim_to_dt()
vmin = mdates.date2num(dmin)
vmax = mdates.date2num(dmax)
return self.nonsingular(vmin, vmax)
# Fixed frequency dynamic tick locators and formatters
# -------------------------------------------------------------------------
# --- Locators ---
# -------------------------------------------------------------------------
def _get_default_annual_spacing(nyears) -> tuple[int, int]:
"""
Returns a default spacing between consecutive ticks for annual data.
"""
if nyears < 11:
(min_spacing, maj_spacing) = (1, 1)
elif nyears < 20:
(min_spacing, maj_spacing) = (1, 2)
elif nyears < 50:
(min_spacing, maj_spacing) = (1, 5)
elif nyears < 100:
(min_spacing, maj_spacing) = (5, 10)
elif nyears < 200:
(min_spacing, maj_spacing) = (5, 25)
elif nyears < 600:
(min_spacing, maj_spacing) = (10, 50)
else:
factor = nyears // 1000 + 1
(min_spacing, maj_spacing) = (factor * 20, factor * 100)
return (min_spacing, maj_spacing)
def _period_break(dates: PeriodIndex, period: str) -> npt.NDArray[np.intp]:
"""
Returns the indices where the given period changes.
Parameters
----------
dates : PeriodIndex
Array of intervals to monitor.
period : str
Name of the period to monitor.
"""
mask = _period_break_mask(dates, period)
return np.nonzero(mask)[0]
def _period_break_mask(dates: PeriodIndex, period: str) -> npt.NDArray[np.bool_]:
current = getattr(dates, period)
previous = getattr(dates - 1 * dates.freq, period)
return current != previous
def has_level_label(label_flags: npt.NDArray[np.intp], vmin: float) -> bool:
"""
Returns true if the ``label_flags`` indicate there is at least one label
for this level.
if the minimum view limit is not an exact integer, then the first tick
label won't be shown, so we must adjust for that.
"""
if label_flags.size == 0 or (
label_flags.size == 1 and label_flags[0] == 0 and vmin % 1 > 0.0
):
return False
else:
return True
def _get_periods_per_ymd(freq: BaseOffset) -> tuple[int, int, int]:
# error: "BaseOffset" has no attribute "_period_dtype_code"
dtype_code = freq._period_dtype_code # type: ignore[attr-defined]
freq_group = FreqGroup.from_period_dtype_code(dtype_code)
ppd = -1 # placeholder for above-day freqs
if dtype_code >= FreqGroup.FR_HR.value: # pyright: ignore[reportAttributeAccessIssue]
# error: "BaseOffset" has no attribute "_creso"
ppd = periods_per_day(freq._creso) # type: ignore[attr-defined]
ppm = 28 * ppd
ppy = 365 * ppd
elif freq_group == FreqGroup.FR_BUS:
ppm = 19
ppy = 261
elif freq_group == FreqGroup.FR_DAY:
ppm = 28
ppy = 365
elif freq_group == FreqGroup.FR_WK:
ppm = 3
ppy = 52
elif freq_group == FreqGroup.FR_MTH:
ppm = 1
ppy = 12
elif freq_group == FreqGroup.FR_QTR:
ppm = -1 # placerholder
ppy = 4
elif freq_group == FreqGroup.FR_ANN:
ppm = -1 # placeholder
ppy = 1
else:
raise NotImplementedError(f"Unsupported frequency: {dtype_code}")
return ppd, ppm, ppy
@functools.cache
def _daily_finder(vmin: float, vmax: float, freq: BaseOffset) -> np.ndarray:
# error: "BaseOffset" has no attribute "_period_dtype_code"
dtype_code = freq._period_dtype_code # type: ignore[attr-defined]
periodsperday, periodspermonth, periodsperyear = _get_periods_per_ymd(freq)
# save this for later usage
vmin_orig = vmin
(vmin, vmax) = (int(vmin), int(vmax))
span = vmax - vmin + 1
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", "Period with BDay freq is deprecated", category=FutureWarning
)
warnings.filterwarnings(
"ignore", r"PeriodDtype\[B\] is deprecated", category=FutureWarning
)
dates_ = period_range(
start=Period(ordinal=vmin, freq=freq),
end=Period(ordinal=vmax, freq=freq),
freq=freq,
)
# Initialize the output
info = np.zeros(
span, dtype=[("val", np.int64), ("maj", bool), ("min", bool), ("fmt", "|S20")]
)
info["val"][:] = dates_.asi8
info["fmt"][:] = ""
info["maj"][[0, -1]] = True
# .. and set some shortcuts
info_maj = info["maj"]
info_min = info["min"]
info_fmt = info["fmt"]
def first_label(label_flags):
if (label_flags[0] == 0) and (label_flags.size > 1) and ((vmin_orig % 1) > 0.0):
return label_flags[1]
else:
return label_flags[0]
# Case 1. Less than a month
if span <= periodspermonth:
day_start = _period_break(dates_, "day")
month_start = _period_break(dates_, "month")
year_start = _period_break(dates_, "year")
def _hour_finder(label_interval: int, force_year_start: bool) -> None:
target = dates_.hour
mask = _period_break_mask(dates_, "hour")
info_maj[day_start] = True
info_min[mask & (target % label_interval == 0)] = True
info_fmt[mask & (target % label_interval == 0)] = "%H:%M"
info_fmt[day_start] = "%H:%M\n%d-%b"
info_fmt[year_start] = "%H:%M\n%d-%b\n%Y"
if force_year_start and not has_level_label(year_start, vmin_orig):
info_fmt[first_label(day_start)] = "%H:%M\n%d-%b\n%Y"
def _minute_finder(label_interval: int) -> None:
target = dates_.minute
hour_start = _period_break(dates_, "hour")
mask = _period_break_mask(dates_, "minute")
info_maj[hour_start] = True
info_min[mask & (target % label_interval == 0)] = True
info_fmt[mask & (target % label_interval == 0)] = "%H:%M"
info_fmt[day_start] = "%H:%M\n%d-%b"
info_fmt[year_start] = "%H:%M\n%d-%b\n%Y"
def _second_finder(label_interval: int) -> None:
target = dates_.second
minute_start = _period_break(dates_, "minute")
mask = _period_break_mask(dates_, "second")
info_maj[minute_start] = True
info_min[mask & (target % label_interval == 0)] = True
info_fmt[mask & (target % label_interval == 0)] = "%H:%M:%S"
info_fmt[day_start] = "%H:%M:%S\n%d-%b"
info_fmt[year_start] = "%H:%M:%S\n%d-%b\n%Y"
if span < periodsperday / 12000:
_second_finder(1)
elif span < periodsperday / 6000:
_second_finder(2)
elif span < periodsperday / 2400:
_second_finder(5)
elif span < periodsperday / 1200:
_second_finder(10)
elif span < periodsperday / 800:
_second_finder(15)
elif span < periodsperday / 400:
_second_finder(30)
elif span < periodsperday / 150:
_minute_finder(1)
elif span < periodsperday / 70:
_minute_finder(2)
elif span < periodsperday / 24:
_minute_finder(5)
elif span < periodsperday / 12:
_minute_finder(15)
elif span < periodsperday / 6:
_minute_finder(30)
elif span < periodsperday / 2.5:
_hour_finder(1, False)
elif span < periodsperday / 1.5:
_hour_finder(2, False)
elif span < periodsperday * 1.25:
_hour_finder(3, False)
elif span < periodsperday * 2.5:
_hour_finder(6, True)
elif span < periodsperday * 4:
_hour_finder(12, True)
else:
info_maj[month_start] = True
info_min[day_start] = True
info_fmt[day_start] = "%d"
info_fmt[month_start] = "%d\n%b"
info_fmt[year_start] = "%d\n%b\n%Y"
if not has_level_label(year_start, vmin_orig):
if not has_level_label(month_start, vmin_orig):
info_fmt[first_label(day_start)] = "%d\n%b\n%Y"
else:
info_fmt[first_label(month_start)] = "%d\n%b\n%Y"
# Case 2. Less than three months
elif span <= periodsperyear // 4:
month_start = _period_break(dates_, "month")
info_maj[month_start] = True
if dtype_code < FreqGroup.FR_HR.value: # pyright: ignore[reportAttributeAccessIssue]
info["min"] = True
else:
day_start = _period_break(dates_, "day")
info["min"][day_start] = True
week_start = _period_break(dates_, "week")
year_start = _period_break(dates_, "year")
info_fmt[week_start] = "%d"
info_fmt[month_start] = "\n\n%b"
info_fmt[year_start] = "\n\n%b\n%Y"
if not has_level_label(year_start, vmin_orig):
if not has_level_label(month_start, vmin_orig):
info_fmt[first_label(week_start)] = "\n\n%b\n%Y"
else:
info_fmt[first_label(month_start)] = "\n\n%b\n%Y"
# Case 3. Less than 14 months ...............
elif span <= 1.15 * periodsperyear:
year_start = _period_break(dates_, "year")
month_start = _period_break(dates_, "month")
week_start = _period_break(dates_, "week")
info_maj[month_start] = True
info_min[week_start] = True
info_min[year_start] = False
info_min[month_start] = False
info_fmt[month_start] = "%b"
info_fmt[year_start] = "%b\n%Y"
if not has_level_label(year_start, vmin_orig):
info_fmt[first_label(month_start)] = "%b\n%Y"
# Case 4. Less than 2.5 years ...............
elif span <= 2.5 * periodsperyear:
year_start = _period_break(dates_, "year")
quarter_start = _period_break(dates_, "quarter")
month_start = _period_break(dates_, "month")
info_maj[quarter_start] = True
info_min[month_start] = True
info_fmt[quarter_start] = "%b"
info_fmt[year_start] = "%b\n%Y"
# Case 4. Less than 4 years .................
elif span <= 4 * periodsperyear:
year_start = _period_break(dates_, "year")
month_start = _period_break(dates_, "month")
info_maj[year_start] = True
info_min[month_start] = True
info_min[year_start] = False
month_break = dates_[month_start].month
jan_or_jul = month_start[(month_break == 1) | (month_break == 7)]
info_fmt[jan_or_jul] = "%b"
info_fmt[year_start] = "%b\n%Y"
# Case 5. Less than 11 years ................
elif span <= 11 * periodsperyear:
year_start = _period_break(dates_, "year")
quarter_start = _period_break(dates_, "quarter")
info_maj[year_start] = True
info_min[quarter_start] = True
info_min[year_start] = False
info_fmt[year_start] = "%Y"
# Case 6. More than 12 years ................
else:
year_start = _period_break(dates_, "year")
year_break = dates_[year_start].year
nyears = span / periodsperyear
(min_anndef, maj_anndef) = _get_default_annual_spacing(nyears)
major_idx = year_start[(year_break % maj_anndef == 0)]
info_maj[major_idx] = True
minor_idx = year_start[(year_break % min_anndef == 0)]
info_min[minor_idx] = True
info_fmt[major_idx] = "%Y"
return info
@functools.cache
def _monthly_finder(vmin: float, vmax: float, freq: BaseOffset) -> np.ndarray:
_, _, periodsperyear = _get_periods_per_ymd(freq)
vmin_orig = vmin
(vmin, vmax) = (int(vmin), int(vmax))
span = vmax - vmin + 1
# Initialize the output
info = np.zeros(
span, dtype=[("val", int), ("maj", bool), ("min", bool), ("fmt", "|S8")]
)
info["val"] = np.arange(vmin, vmax + 1)
dates_ = info["val"]
info["fmt"] = ""
year_start = (dates_ % 12 == 0).nonzero()[0]
info_maj = info["maj"]
info_fmt = info["fmt"]
if span <= 1.15 * periodsperyear:
info_maj[year_start] = True
info["min"] = True
info_fmt[:] = "%b"
info_fmt[year_start] = "%b\n%Y"
if not has_level_label(year_start, vmin_orig):
if dates_.size > 1:
idx = 1
else:
idx = 0
info_fmt[idx] = "%b\n%Y"
elif span <= 2.5 * periodsperyear:
quarter_start = (dates_ % 3 == 0).nonzero()
info_maj[year_start] = True
# TODO: Check the following : is it really info['fmt'] ?
# 2023-09-15 this is reached in test_finder_monthly
info["fmt"][quarter_start] = True
info["min"] = True
info_fmt[quarter_start] = "%b"
info_fmt[year_start] = "%b\n%Y"
elif span <= 4 * periodsperyear:
info_maj[year_start] = True
info["min"] = True
jan_or_jul = (dates_ % 12 == 0) | (dates_ % 12 == 6)
info_fmt[jan_or_jul] = "%b"
info_fmt[year_start] = "%b\n%Y"
elif span <= 11 * periodsperyear:
quarter_start = (dates_ % 3 == 0).nonzero()
info_maj[year_start] = True
info["min"][quarter_start] = True
info_fmt[year_start] = "%Y"
else:
nyears = span / periodsperyear
(min_anndef, maj_anndef) = _get_default_annual_spacing(nyears)
years = dates_[year_start] // 12 + 1
major_idx = year_start[(years % maj_anndef == 0)]
info_maj[major_idx] = True
info["min"][year_start[(years % min_anndef == 0)]] = True
info_fmt[major_idx] = "%Y"
return info
@functools.cache
def _quarterly_finder(vmin: float, vmax: float, freq: BaseOffset) -> np.ndarray:
_, _, periodsperyear = _get_periods_per_ymd(freq)
vmin_orig = vmin
(vmin, vmax) = (int(vmin), int(vmax))
span = vmax - vmin + 1
info = np.zeros(
span, dtype=[("val", int), ("maj", bool), ("min", bool), ("fmt", "|S8")]
)
info["val"] = np.arange(vmin, vmax + 1)
info["fmt"] = ""
dates_ = info["val"]
info_maj = info["maj"]
info_fmt = info["fmt"]
year_start = (dates_ % 4 == 0).nonzero()[0]
if span <= 3.5 * periodsperyear:
info_maj[year_start] = True
info["min"] = True
info_fmt[:] = "Q%q"
info_fmt[year_start] = "Q%q\n%F"
if not has_level_label(year_start, vmin_orig):
if dates_.size > 1:
idx = 1
else:
idx = 0
info_fmt[idx] = "Q%q\n%F"
elif span <= 11 * periodsperyear:
info_maj[year_start] = True
info["min"] = True
info_fmt[year_start] = "%F"
else:
# https://github.com/pandas-dev/pandas/pull/47602
years = dates_[year_start] // 4 + 1970
nyears = span / periodsperyear
(min_anndef, maj_anndef) = _get_default_annual_spacing(nyears)
major_idx = year_start[(years % maj_anndef == 0)]
info_maj[major_idx] = True
info["min"][year_start[(years % min_anndef == 0)]] = True
info_fmt[major_idx] = "%F"
return info
@functools.cache
def _annual_finder(vmin: float, vmax: float, freq: BaseOffset) -> np.ndarray:
# Note: small difference here vs other finders in adding 1 to vmax
(vmin, vmax) = (int(vmin), int(vmax + 1))
span = vmax - vmin + 1
info = np.zeros(
span, dtype=[("val", int), ("maj", bool), ("min", bool), ("fmt", "|S8")]
)
info["val"] = np.arange(vmin, vmax + 1)
info["fmt"] = ""
dates_ = info["val"]
(min_anndef, maj_anndef) = _get_default_annual_spacing(span)
major_idx = dates_ % maj_anndef == 0
minor_idx = dates_ % min_anndef == 0
info["maj"][major_idx] = True
info["min"][minor_idx] = True
info["fmt"][major_idx] = "%Y"
return info
def get_finder(freq: BaseOffset):
# error: "BaseOffset" has no attribute "_period_dtype_code"
dtype_code = freq._period_dtype_code # type: ignore[attr-defined]
fgroup = FreqGroup.from_period_dtype_code(dtype_code)
if fgroup == FreqGroup.FR_ANN:
return _annual_finder
elif fgroup == FreqGroup.FR_QTR:
return _quarterly_finder
elif fgroup == FreqGroup.FR_MTH:
return _monthly_finder
elif (dtype_code >= FreqGroup.FR_BUS.value) or fgroup == FreqGroup.FR_WK: # pyright: ignore[reportAttributeAccessIssue]
return _daily_finder
else: # pragma: no cover
raise NotImplementedError(f"Unsupported frequency: {dtype_code}")
| MilliSecondLocator |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/base.py | {
"start": 54870,
"end": 55332
} | class ____(NamedTuple):
columns: Optional[Sequence[Column[Any]]] = None
is_explicit: bool = False
is_autoinc: bool = False
default_characterization: _SentinelDefaultCharacterization = (
_SentinelDefaultCharacterization.NONE
)
_COLKEY = TypeVar("_COLKEY", Union[None, str], str)
_COL_co = TypeVar("_COL_co", bound="ColumnElement[Any]", covariant=True)
_COL = TypeVar("_COL", bound="ColumnElement[Any]")
| _SentinelColumnCharacterization |
python | huggingface__transformers | src/transformers/models/phi4_multimodal/modular_phi4_multimodal.py | {
"start": 50506,
"end": 58903
} | class ____(Phi4MultimodalAudioPreTrainedModel):
def __init__(self, config: Phi4MultimodalAudioConfig):
super().__init__(config)
self.config = config
self.encoder_embedding = Phi4MultimodalAudioMeanVarianceNormLayer(config)
self.embed = Phi4MultimodalAudioNemoConvSubsampling(config)
self.relative_attention_bias_layer = Phi4MultimodalAudioRelativeAttentionBias(config)
self.encoders = nn.ModuleList(
[Phi4MultimodalAudioConformerEncoderLayer(config) for _ in range(config.num_blocks)]
)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def _streaming_mask(self, seq_len, batch_size, chunk_size, left_chunk):
# Create mask matrix for streaming
# S stores start index. if chunksize is 18, s is [0,18,36,....]
chunk_start_idx = np.arange(0, seq_len, chunk_size)
# avoid randomness when run evaluation or decoding
if self.training and np.random.rand() > 0.5:
# Either first or last chunk is not complete.
# If only the last one is not complete, EOS is not effective
chunk_start_idx = seq_len - chunk_start_idx
chunk_start_idx = chunk_start_idx[::-1]
chunk_start_idx = chunk_start_idx[:-1]
chunk_start_idx = np.insert(chunk_start_idx, 0, 0)
enc_streaming_mask = (
adaptive_enc_mask(seq_len, chunk_start_idx, left_window=left_chunk)
.unsqueeze(0)
.expand([batch_size, -1, -1])
)
return enc_streaming_mask
def forward_embeddings(self, hidden_states, masks):
"""Forwarding the inputs through the top embedding layers"""
seq_len = math.ceil(hidden_states.shape[1] / self.config.time_reduction)
if seq_len <= 0:
raise ValueError(
f"The sequence length after time reduction is invalid: {seq_len}. Your input feature is too short."
)
batch_size = hidden_states.shape[0]
enc_streaming_mask = self._streaming_mask(seq_len, batch_size, self.config.chunk_size, self.config.left_chunk)
enc_streaming_mask = enc_streaming_mask.to(hidden_states.device)
hidden_states, masks = self.embed(hidden_states, masks)
streaming_mask = enc_streaming_mask
if streaming_mask is not None and masks is not None:
hs_mask = masks & streaming_mask
elif masks is not None:
hs_mask = masks
else:
hs_mask = streaming_mask
return hidden_states, hs_mask, masks
def calculate_hs_mask(self, hidden_states, device, mask):
max_audio_length = hidden_states.shape[1]
batch_size = hidden_states.shape[0]
enc_streaming_mask = self._streaming_mask(
max_audio_length, batch_size, self.config.chunk_size, self.config.left_chunk
)
enc_streaming_mask = enc_streaming_mask.to(device)
if mask is None:
return enc_streaming_mask
feature_lens = mask.sum(1)
padding_length = feature_lens
pad_mask = torch.arange(0, max_audio_length, device=device).expand(
padding_length.size(0), -1
) < padding_length.unsqueeze(1)
pad_mask = pad_mask.unsqueeze(1)
pad_mask = pad_mask & enc_streaming_mask
return pad_mask
def forward(self, hidden_states: torch.Tensor, mask: Optional[torch.Tensor]):
hidden_states = self.encoder_embedding(hidden_states)
hidden_states, hs_mask, mask = self.forward_embeddings(hidden_states, mask)
unfolded = False
bs, seq_len, _ = hidden_states.shape
max_seq_len = 500 # maximum position for absolute positional encoding
if seq_len > max_seq_len:
# audio sequence is longer than max_seq_len, unfold it into chunks of max_seq_len
unfolded = True
# the unfold op will drop residual frames, pad it to the multiple of max_seq_len
if seq_len % max_seq_len > 0:
chunk_pad_size = max_seq_len - (seq_len % max_seq_len)
else:
chunk_pad_size = 0
if chunk_pad_size > 0:
hidden_states_pad = F.pad(hidden_states, (0, 0, 0, chunk_pad_size), "constant", 0)
hidden_states = hidden_states_pad.to(hidden_states.device)
hidden_states = unfold_tensor(hidden_states, max_seq_len)
masks_unfold = None
if mask is not None:
# revise hs_mask here because the previous calculated hs_mask did not consider extra pad
subsampled_pad_mask = mask.squeeze(1) # [bz, subsampled_unmask_seq_len]
extra_padded_subsamlped_pad_mask = F.pad(
subsampled_pad_mask, (0, chunk_pad_size), "constant", False
) # extra padding to the pad mask
extra_padded_subsamlped_pad_mask = extra_padded_subsamlped_pad_mask.unsqueeze(-1).float()
masks_unfold = unfold_tensor(
extra_padded_subsamlped_pad_mask, max_seq_len
) # unfold the pad mask like we did to the input tensor
masks_unfold = masks_unfold.squeeze(-1).bool() # unfold op does not support bool tensor
hs_mask = self.calculate_hs_mask(
hidden_states, hidden_states.device, masks_unfold
) # calculate hs_mask based on the unfolded pad mask
relative_attention_bias = self.relative_attention_bias_layer(hidden_states)
attention_mask = hs_mask.unsqueeze(1) + relative_attention_bias
for layer in self.encoders:
hidden_states = layer(hidden_states, attention_mask)
if unfolded:
embed_dim = hidden_states.shape[-1]
hidden_states = hidden_states.reshape(bs, -1, embed_dim)
# if we ever padded before unfolding, we need to remove the padding
if chunk_pad_size > 0:
hidden_states = hidden_states[:, :-chunk_pad_size, :]
return hidden_states
def unfold_tensor(tensor, max_seq_len):
"""
For a given tensor with shape of (N, T, D), if sequence length T is longer than max_seq_len,
this function unfold it to a (NT', max_seq_len, D) where T' is T // max_seq_len.
Args:
tensor: N, T, D
"""
_, _, D = tensor.shape
tensor = tensor.transpose(-1, -2)
# N x D x 1 x T => N x (D x max_seq_len) x T'
tensor = F.unfold(tensor[..., None, :], kernel_size=(1, max_seq_len), stride=(1, max_seq_len))
new_bsz, _, slen = tensor.shape
tensor = tensor.view(new_bsz, -1, max_seq_len, slen)
tensor = tensor.permute(0, 3, 2, 1)
tensor = tensor.view(-1, max_seq_len, D).contiguous()
return tensor
def adaptive_enc_mask(x_len, chunk_start_idx, left_window=0, right_window=0):
"""
The function is very important for Transformer Transducer Streaming mode
Args:
xs_len (int): sequence length
chunk_start_idx (list): first idx of each chunk, such as [0,18,36,48]. It also supports adaptive chunk size [0,10,15,45]
left_window (int): how many left chunks can be seen
right_window (int): how many right chunks can be seen. It is used for chunk overlap model.
Returns:
mask (torch.Tensor): a mask tensor for streaming model
"""
chunk_start_idx = torch.Tensor(chunk_start_idx).long()
start_pad = torch.nn.functional.pad(
chunk_start_idx, (1, 0)
) # append 0 to the beginning, so it becomes [0, 0, 18, 36, 48]
end_pad = torch.nn.functional.pad(
chunk_start_idx, (0, 1), value=x_len
) # append x_len to the end, so it becomes [0,18,36,48, x_len]
seq_range = torch.arange(0, x_len).unsqueeze(-1)
idx = ((seq_range < end_pad) & (seq_range >= start_pad)).nonzero()[:, 1]
seq_range_expand = torch.arange(0, x_len).unsqueeze(0).expand(x_len, -1)
idx_left = idx - left_window
idx_left[idx_left < 0] = 0
boundary_left = start_pad[idx_left]
mask_left = seq_range_expand >= boundary_left.unsqueeze(-1)
idx_right = idx + right_window
idx_right[idx_right > len(chunk_start_idx)] = len(chunk_start_idx)
boundary_right = end_pad[idx_right]
mask_right = seq_range_expand < boundary_right.unsqueeze(-1)
return mask_left & mask_right
| Phi4MultimodalAudioModel |
python | ansible__ansible | lib/ansible/_internal/_wrapt.py | {
"start": 13913,
"end": 14151
} | class ____(ObjectProxy):
def __call__(*args, **kwargs):
def _unpack_self(self, *args):
return self, args
self, args = _unpack_self(*args)
return self.__wrapped__(*args, **kwargs)
| CallableObjectProxy |
python | huggingface__transformers | src/transformers/models/switch_transformers/modular_switch_transformers.py | {
"start": 29896,
"end": 33250
} | class ____(SwitchTransformersPreTrainedModel):
_tied_weights_keys = {
"encoder.embed_tokens.weight": "shared.weight",
"decoder.embed_tokens.weight": "shared.weight",
}
def __init__(self, config: SwitchTransformersConfig):
super().__init__(config)
self.shared = nn.Embedding(config.vocab_size, config.d_model)
encoder_config = copy.deepcopy(config)
encoder_config.is_decoder = False
encoder_config.use_cache = False
encoder_config.tie_encoder_decoder = False
self.encoder = SwitchTransformersStack(encoder_config)
decoder_config = copy.deepcopy(config)
decoder_config.is_decoder = True
decoder_config.tie_encoder_decoder = False
self.decoder = SwitchTransformersStack(decoder_config)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared = new_embeddings
self.encoder.set_input_embeddings(new_embeddings)
self.decoder.set_input_embeddings(new_embeddings)
@auto_docstring
@can_return_tuple
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.BoolTensor] = None,
encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.Tensor] = None,
decoder_inputs_embeds: Optional[torch.Tensor] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple[torch.FloatTensor], Seq2SeqMoEModelOutput]:
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, **kwargs
)
hidden_states = encoder_outputs[0]
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
inputs_embeds=decoder_inputs_embeds,
past_key_values=past_key_values,
encoder_hidden_states=hidden_states,
encoder_attention_mask=attention_mask,
cache_position=cache_position,
**kwargs,
)
return Seq2SeqMoEModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
decoder_router_logits=decoder_outputs.router_logits,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
encoder_router_logits=encoder_outputs.router_logits,
)
@auto_docstring(
custom_intro="""
SWITCH_TRANSFORMERS Model with a `language modeling` head on top.
"""
)
| SwitchTransformersModel |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 953592,
"end": 957027
} | class ____(Predicate):
"""
FieldLTEPredicate schema wrapper.
Parameters
----------
field : str, :class:`FieldName`
Field to be tested.
lte : str, dict, float, :class:`ExprRef`, :class:`DateTime`
The value that the field should be less than or equals to.
timeUnit : dict, :class:`TimeUnit`, :class:`MultiTimeUnit`, :class:`BinnedTimeUnit`, :class:`SingleTimeUnit`, :class:`TimeUnitParams`, :class:`UtcMultiTimeUnit`, :class:`UtcSingleTimeUnit`, :class:`LocalMultiTimeUnit`, :class:`LocalSingleTimeUnit`, Literal['binnedyear', 'binnedyearquarter', 'binnedyearquartermonth', 'binnedyearmonth', 'binnedyearmonthdate', 'binnedyearmonthdatehours', 'binnedyearmonthdatehoursminutes', 'binnedyearmonthdatehoursminutesseconds', 'binnedyearweek', 'binnedyearweekday', 'binnedyearweekdayhours', 'binnedyearweekdayhoursminutes', 'binnedyearweekdayhoursminutesseconds', 'binnedyeardayofyear', 'binnedutcyear', 'binnedutcyearquarter', 'binnedutcyearquartermonth', 'binnedutcyearmonth', 'binnedutcyearmonthdate', 'binnedutcyearmonthdatehours', 'binnedutcyearmonthdatehoursminutes', 'binnedutcyearmonthdatehoursminutesseconds', 'binnedutcyearweek', 'binnedutcyearweekday', 'binnedutcyearweekdayhours', 'binnedutcyearweekdayhoursminutes', 'binnedutcyearweekdayhoursminutesseconds', 'binnedutcyeardayofyear', 'utcyear', 'utcquarter', 'utcmonth', 'utcweek', 'utcday', 'utcdayofyear', 'utcdate', 'utchours', 'utcminutes', 'utcseconds', 'utcmilliseconds', 'year', 'quarter', 'month', 'week', 'day', 'dayofyear', 'date', 'hours', 'minutes', 'seconds', 'milliseconds', 'utcyearquarter', 'utcyearquartermonth', 'utcyearmonth', 'utcyearmonthdate', 'utcyearmonthdatehours', 'utcyearmonthdatehoursminutes', 'utcyearmonthdatehoursminutesseconds', 'utcyearweek', 'utcyearweekday', 'utcyearweekdayhours', 'utcyearweekdayhoursminutes', 'utcyearweekdayhoursminutesseconds', 'utcyeardayofyear', 'utcquartermonth', 'utcmonthdate', 'utcmonthdatehours', 'utcmonthdatehoursminutes', 'utcmonthdatehoursminutesseconds', 'utcweekday', 'utcweekdayhours', 'utcweekdayhoursminutes', 'utcweekdayhoursminutesseconds', 'utcdayhours', 'utcdayhoursminutes', 'utcdayhoursminutesseconds', 'utchoursminutes', 'utchoursminutesseconds', 'utcminutesseconds', 'utcsecondsmilliseconds', 'yearquarter', 'yearquartermonth', 'yearmonth', 'yearmonthdate', 'yearmonthdatehours', 'yearmonthdatehoursminutes', 'yearmonthdatehoursminutesseconds', 'yearweek', 'yearweekday', 'yearweekdayhours', 'yearweekdayhoursminutes', 'yearweekdayhoursminutesseconds', 'yeardayofyear', 'quartermonth', 'monthdate', 'monthdatehours', 'monthdatehoursminutes', 'monthdatehoursminutesseconds', 'weekday', 'weekdayhours', 'weekdayhoursminutes', 'weekdayhoursminutesseconds', 'dayhours', 'dayhoursminutes', 'dayhoursminutesseconds', 'hoursminutes', 'hoursminutesseconds', 'minutesseconds', 'secondsmilliseconds']
Time unit for the field to be tested.
"""
_schema = {"$ref": "#/definitions/FieldLTEPredicate"}
def __init__(
self,
field: Optional[str | SchemaBase] = Undefined,
lte: Optional[
str | float | Temporal | Parameter | SchemaBase | Map
] = Undefined,
timeUnit: Optional[
SchemaBase | Map | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T
] = Undefined,
**kwds,
):
super().__init__(field=field, lte=lte, timeUnit=timeUnit, **kwds)
| FieldLTEPredicate |
python | ray-project__ray | python/ray/tune/search/variant_generator.py | {
"start": 17305,
"end": 17420
} | class ____(Exception):
def __init__(self, msg: str):
Exception.__init__(self, msg)
| RecursiveDependencyError |
python | scipy__scipy | scipy/sparse/linalg/_interface.py | {
"start": 24564,
"end": 25717
} | class ____(LinearOperator):
def __init__(self, A, alpha):
if not isinstance(A, LinearOperator):
raise ValueError('LinearOperator expected as A')
if not np.isscalar(alpha):
raise ValueError('scalar expected as alpha')
if isinstance(A, _ScaledLinearOperator):
A, alpha_original = A.args
# Avoid in-place multiplication so that we don't accidentally mutate
# the original prefactor.
alpha = alpha * alpha_original
dtype = _get_dtype([A], [type(alpha)])
super().__init__(dtype, A.shape)
self.args = (A, alpha)
# Note: args[1] is alpha (a scalar), so use `*` below, not `@`
def _matvec(self, x):
return self.args[1] * self.args[0].matvec(x)
def _rmatvec(self, x):
return np.conj(self.args[1]) * self.args[0].rmatvec(x)
def _rmatmat(self, x):
return np.conj(self.args[1]) * self.args[0].rmatmat(x)
def _matmat(self, x):
return self.args[1] * self.args[0].matmat(x)
def _adjoint(self):
A, alpha = self.args
return A.H * np.conj(alpha)
| _ScaledLinearOperator |
python | altair-viz__altair | tests/utils/test_schemapi.py | {
"start": 2634,
"end": 2759
} | class ____(_TestSchema):
_schema = {"$ref": "#/definitions/StringMapping"}
_rootschema = MySchema._schema
| StringMapping |
python | PrefectHQ__prefect | src/prefect/input/run_input.py | {
"start": 1008,
"end": 1590
} | class ____(RunInput):
number: int
@flow
async def sender_flow(receiver_flow_run_id: UUID):
logger = get_run_logger()
the_number = random.randint(1, 100)
await NumberData(number=the_number).send_to(receiver_flow_run_id)
receiver = NumberData.receive(flow_run_id=receiver_flow_run_id)
squared = await receiver.next()
logger.info(f"{the_number} squared is {squared.number}")
```
Receiver flow:
```python
import random
from uuid import UUID
from prefect import flow
from prefect.logging import get_run_logger
from prefect.input import RunInput
| NumberData |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/executors/ecs/test_utils.py | {
"start": 4681,
"end": 9232
} | class ____:
"""Test EcsExecutorTask class."""
def test_ecs_executor_task_creation(self):
"""Test EcsExecutorTask object creation."""
task_arn = "arn:aws:ecs:us-east-1:123456789012:task/test-task"
last_status = "RUNNING"
desired_status = "RUNNING"
containers = [{"name": "container1", "exit_code": 0}]
started_at = datetime.datetime.now()
stopped_reason = None
external_executor_id = "test-executor-id"
task = EcsExecutorTask(
task_arn=task_arn,
last_status=last_status,
desired_status=desired_status,
containers=containers,
started_at=started_at,
stopped_reason=stopped_reason,
external_executor_id=external_executor_id,
)
assert task.task_arn == task_arn
assert task.last_status == last_status
assert task.desired_status == desired_status
assert task.containers == containers
assert task.started_at == started_at
assert task.stopped_reason == stopped_reason
assert task.external_executor_id == external_executor_id
def test_get_task_state_running(self):
"""Test get_task_state returns RUNNING when last_status is RUNNING."""
task = EcsExecutorTask(
task_arn="arn:aws:ecs:us-east-1:123456789012:task/test-task",
last_status="RUNNING",
desired_status="RUNNING",
containers=[{"name": "container1", "exit_code": 0}],
)
assert task.get_task_state() == State.RUNNING
def test_get_task_state_queued(self):
"""Test get_task_state returns QUEUED when desired_status is RUNNING but last_status is not RUNNING."""
task = EcsExecutorTask(
task_arn="arn:aws:ecs:us-east-1:123456789012:task/test-task",
last_status="PENDING",
desired_status="RUNNING",
containers=[{"name": "container1", "exit_code": 0}],
)
assert task.get_task_state() == State.QUEUED
def test_get_task_state_removed_timeout(self):
"""Test get_task_state returns REMOVED when task timed out."""
task = EcsExecutorTask(
task_arn="arn:aws:ecs:us-east-1:123456789012:task/test-task",
last_status="STOPPED",
desired_status="STOPPED",
containers=[{"name": "container1", "exit_code": 0}],
started_at=None,
)
assert task.get_task_state() == State.REMOVED
def test_get_task_state_running_not_finished(self):
"""Test get_task_state returns RUNNING when task is not finished."""
task = EcsExecutorTask(
task_arn="arn:aws:ecs:us-east-1:123456789012:task/test-task",
last_status="RUNNING",
desired_status="RUNNING",
containers=[{"name": "container1"}], # No exit_code
)
assert task.get_task_state() == State.RUNNING
def test_get_task_state_success(self):
"""Test get_task_state returns SUCCESS when all containers succeeded."""
task = EcsExecutorTask(
task_arn="arn:aws:ecs:us-east-1:123456789012:task/test-task",
last_status="STOPPED",
desired_status="STOPPED",
containers=[
{"name": "container1", "exit_code": 0},
{"name": "container2", "exit_code": 0},
],
started_at=datetime.datetime.now(),
)
assert task.get_task_state() == State.SUCCESS
def test_get_task_state_failed(self):
"""Test get_task_state returns FAILED when at least one container failed."""
task = EcsExecutorTask(
task_arn="arn:aws:ecs:us-east-1:123456789012:task/test-task",
last_status="STOPPED",
desired_status="STOPPED",
containers=[
{"name": "container1", "exit_code": 0},
{"name": "container2", "exit_code": 1},
],
started_at=datetime.datetime.now(),
)
assert task.get_task_state() == State.FAILED
def test_repr(self):
"""Test __repr__ method."""
task = EcsExecutorTask(
task_arn="arn:aws:ecs:us-east-1:123456789012:task/test-task",
last_status="RUNNING",
desired_status="RUNNING",
containers=[{"name": "container1", "exit_code": 0}],
)
expected = "(arn:aws:ecs:us-east-1:123456789012:task/test-task, RUNNING->RUNNING, running)"
assert repr(task) == expected
| TestEcsExecutorTask |
python | Pylons__pyramid | src/pyramid/httpexceptions.py | {
"start": 19454,
"end": 19929
} | class ____(_HTTPMove):
"""
subclass of :class:`~_HTTPMove`
This indicates that the requested resource resides permanently
under a different URI and that the request method must not be
changed.
code: 308, title: Permanent Redirect
"""
code = 308
title = 'Permanent Redirect'
############################################################
# 4xx client error
############################################################
| HTTPPermanentRedirect |
python | readthedocs__readthedocs.org | readthedocs/rtd_tests/tests/test_project_forms.py | {
"start": 23392,
"end": 26128
} | class ____(TestCase):
def setUp(self):
self.user_owner = get(User)
self.social_owner = get(
SocialAccount, user=self.user_owner, provider=GitHubProvider.id
)
self.user_admin = get(User)
self.social_admin = get(
SocialAccount, user=self.user_admin, provider=GitHubProvider.id
)
self.user_readonly = get(User)
self.social_readonly = get(
SocialAccount, user=self.user_readonly, provider=GitHubProvider.id
)
self.organization = get(
Organization,
owners=[self.user_owner],
projects=[],
)
self.team_admin = get(
Team,
access="admin",
organization=self.organization,
members=[self.user_admin],
)
self.team_readonly = get(
Team,
access="readonly",
organization=self.organization,
members=[self.user_readonly],
)
def test_form_prevalidation_readonly_user(self):
form_auto = ProjectAutomaticForm(user=self.user_readonly)
form_manual = ProjectManualForm(user=self.user_readonly)
# Test validation errors directly
self.assertRaises(RichValidationError, form_auto.clean_prevalidation)
self.assertRaises(RichValidationError, form_manual.clean_prevalidation)
# Test downstream
self.assertFalse(form_auto.is_valid())
self.assertEqual(form_auto.errors, {NON_FIELD_ERRORS: mock.ANY})
self.assertFalse(form_manual.is_valid())
self.assertEqual(form_manual.errors, {NON_FIELD_ERRORS: mock.ANY})
def test_form_prevalidation_admin_user(self):
form_auto = ProjectAutomaticForm(user=self.user_admin)
form_manual = ProjectManualForm(user=self.user_admin)
# Test validation errors directly
form_auto.clean_prevalidation()
form_manual.clean_prevalidation()
# Test downstream
self.assertTrue(form_auto.is_valid())
self.assertEqual(form_auto.errors, {})
self.assertTrue(form_manual.is_valid())
self.assertEqual(form_manual.errors, {})
def test_form_prevalidation_owner_user(self):
form_auto = ProjectAutomaticForm(user=self.user_owner)
form_manual = ProjectManualForm(user=self.user_owner)
# Test validation errors directly
form_auto.clean_prevalidation()
form_manual.clean_prevalidation()
# Test downstream
self.assertTrue(form_auto.is_valid())
self.assertEqual(form_auto.errors, {})
self.assertTrue(form_manual.is_valid())
self.assertEqual(form_manual.errors, {})
| TestProjectPrevalidationFormsWithOrganizations |
python | ray-project__ray | rllib/algorithms/impala/impala.py | {
"start": 2434,
"end": 23052
} | class ____(AlgorithmConfig):
"""Defines a configuration class from which an Impala can be built.
.. testcode::
from ray.rllib.algorithms.impala import IMPALAConfig
config = (
IMPALAConfig()
.environment("CartPole-v1")
.env_runners(num_env_runners=1)
.training(lr=0.0003, train_batch_size_per_learner=512)
.learners(num_learners=1)
)
# Build a Algorithm object from the config and run 1 training iteration.
algo = config.build()
algo.train()
del algo
.. testcode::
from ray.rllib.algorithms.impala import IMPALAConfig
from ray import tune
config = (
IMPALAConfig()
.environment("CartPole-v1")
.env_runners(num_env_runners=1)
.training(lr=tune.grid_search([0.0001, 0.0002]), grad_clip=20.0)
.learners(num_learners=1)
)
# Run with tune.
tune.Tuner(
"IMPALA",
param_space=config,
run_config=tune.RunConfig(stop={"training_iteration": 1}),
).fit()
"""
def __init__(self, algo_class=None):
"""Initializes a IMPALAConfig instance."""
self.exploration_config = { # @OldAPIstack
# The Exploration class to use. In the simplest case, this is the name
# (str) of any class present in the `rllib.utils.exploration` package.
# You can also provide the python class directly or the full location
# of your class (e.g. "ray.rllib.utils.exploration.epsilon_greedy.
# EpsilonGreedy").
"type": "StochasticSampling",
# Add constructor kwargs here (if any).
}
super().__init__(algo_class=algo_class or IMPALA)
# fmt: off
# __sphinx_doc_begin__
# IMPALA specific settings:
self.vtrace = True
self.vtrace_clip_rho_threshold = 1.0
self.vtrace_clip_pg_rho_threshold = 1.0
self.learner_queue_size = 3
self.timeout_s_sampler_manager = 0.0
self.timeout_s_aggregator_manager = 0.0
self.broadcast_interval = 1
self.num_gpu_loader_threads = 8
self.grad_clip = 40.0
# Note: Only when using enable_rl_module_and_learner=True can the clipping mode
# be configured by the user. On the old API stack, RLlib will always clip by
# global_norm, no matter the value of `grad_clip_by`.
self.grad_clip_by = "global_norm"
self.vf_loss_coeff = 0.5
self.entropy_coeff = 0.01
# Override some of AlgorithmConfig's default values with IMPALA-specific values.
self.num_learners = 1
self.num_aggregator_actors_per_learner = 0
self.rollout_fragment_length = 50
self.train_batch_size = 500 # @OldAPIstack
self.num_env_runners = 2
self.lr = 0.0005
self.min_time_s_per_iteration = 10
# __sphinx_doc_end__
# fmt: on
# IMPALA takes care of its own EnvRunner (weights, connector, metrics) synching.
self._dont_auto_sync_env_runner_states = True
# `.debugging()`
self._env_runners_only = False
self._skip_learners = False
self.lr_schedule = None # @OldAPIStack
self.entropy_coeff_schedule = None # @OldAPIStack
self.num_multi_gpu_tower_stacks = 1 # @OldAPIstack
self.minibatch_buffer_size = 1 # @OldAPIstack
self.replay_proportion = 0.0 # @OldAPIstack
self.replay_buffer_num_slots = 0 # @OldAPIstack
self.learner_queue_timeout = 300 # @OldAPIstack
self.opt_type = "adam" # @OldAPIstack
self.decay = 0.99 # @OldAPIstack
self.momentum = 0.0 # @OldAPIstack
self.epsilon = 0.1 # @OldAPIstack
self._separate_vf_optimizer = False # @OldAPIstack
self._lr_vf = 0.0005 # @OldAPIstack
self.num_gpus = 1 # @OldAPIstack
self._tf_policy_handles_more_than_one_loss = True # @OldAPIstack
# Deprecated settings.
self.num_aggregation_workers = DEPRECATED_VALUE
self.max_requests_in_flight_per_aggregator_worker = DEPRECATED_VALUE
@override(AlgorithmConfig)
def training(
self,
*,
vtrace: Optional[bool] = NotProvided,
vtrace_clip_rho_threshold: Optional[float] = NotProvided,
vtrace_clip_pg_rho_threshold: Optional[float] = NotProvided,
num_gpu_loader_threads: Optional[int] = NotProvided,
num_multi_gpu_tower_stacks: Optional[int] = NotProvided,
minibatch_buffer_size: Optional[int] = NotProvided,
replay_proportion: Optional[float] = NotProvided,
replay_buffer_num_slots: Optional[int] = NotProvided,
learner_queue_size: Optional[int] = NotProvided,
learner_queue_timeout: Optional[float] = NotProvided,
timeout_s_sampler_manager: Optional[float] = NotProvided,
timeout_s_aggregator_manager: Optional[float] = NotProvided,
broadcast_interval: Optional[int] = NotProvided,
grad_clip: Optional[float] = NotProvided,
opt_type: Optional[str] = NotProvided,
lr_schedule: Optional[List[List[Union[int, float]]]] = NotProvided,
decay: Optional[float] = NotProvided,
momentum: Optional[float] = NotProvided,
epsilon: Optional[float] = NotProvided,
vf_loss_coeff: Optional[float] = NotProvided,
entropy_coeff: Optional[LearningRateOrSchedule] = NotProvided,
entropy_coeff_schedule: Optional[List[List[Union[int, float]]]] = NotProvided,
_separate_vf_optimizer: Optional[bool] = NotProvided,
_lr_vf: Optional[float] = NotProvided,
# Deprecated args.
num_aggregation_workers=DEPRECATED_VALUE,
max_requests_in_flight_per_aggregator_worker=DEPRECATED_VALUE,
**kwargs,
) -> Self:
"""Sets the training related configuration.
Args:
vtrace: V-trace params (see vtrace_tf/torch.py).
vtrace_clip_rho_threshold:
vtrace_clip_pg_rho_threshold:
num_gpu_loader_threads: The number of GPU-loader threads (per Learner
worker), used to load incoming (CPU) batches to the GPU, if applicable.
The incoming batches are produced by each Learner's LearnerConnector
pipeline. After loading the batches on the GPU, the threads place them
on yet another queue for the Learner thread (only one per Learner
worker) to pick up and perform `forward_train/loss` computations.
num_multi_gpu_tower_stacks: For each stack of multi-GPU towers, how many
slots should we reserve for parallel data loading? Set this to >1 to
load data into GPUs in parallel. This will increase GPU memory usage
proportionally with the number of stacks.
Example:
2 GPUs and `num_multi_gpu_tower_stacks=3`:
- One tower stack consists of 2 GPUs, each with a copy of the
model/graph.
- Each of the stacks will create 3 slots for batch data on each of its
GPUs, increasing memory requirements on each GPU by 3x.
- This enables us to preload data into these stacks while another stack
is performing gradient calculations.
minibatch_buffer_size: How many train batches should be retained for
minibatching. This conf only has an effect if `num_epochs > 1`.
replay_proportion: Set >0 to enable experience replay. Saved samples will
be replayed with a p:1 proportion to new data samples.
replay_buffer_num_slots: Number of sample batches to store for replay.
The number of transitions saved total will be
(replay_buffer_num_slots * rollout_fragment_length).
learner_queue_size: Max queue size for train batches feeding into the
learner.
learner_queue_timeout: Wait for train batches to be available in minibatch
buffer queue this many seconds. This may need to be increased e.g. when
training with a slow environment.
timeout_s_sampler_manager: The timeout for waiting for sampling results
for workers -- typically if this is too low, the manager won't be able
to retrieve ready sampling results.
timeout_s_aggregator_manager: The timeout for waiting for replay worker
results -- typically if this is too low, the manager won't be able to
retrieve ready replay requests.
broadcast_interval: Number of training step calls before weights are
broadcasted to rollout workers that are sampled during any iteration.
grad_clip: If specified, clip the global norm of gradients by this amount.
opt_type: Either "adam" or "rmsprop".
lr_schedule: Learning rate schedule. In the format of
[[timestep, lr-value], [timestep, lr-value], ...]
Intermediary timesteps will be assigned to interpolated learning rate
values. A schedule should normally start from timestep 0.
decay: Decay setting for the RMSProp optimizer, in case `opt_type=rmsprop`.
momentum: Momentum setting for the RMSProp optimizer, in case
`opt_type=rmsprop`.
epsilon: Epsilon setting for the RMSProp optimizer, in case
`opt_type=rmsprop`.
vf_loss_coeff: Coefficient for the value function term in the loss function.
entropy_coeff: Coefficient for the entropy regularizer term in the loss
function.
entropy_coeff_schedule: Decay schedule for the entropy regularizer.
_separate_vf_optimizer: Set this to true to have two separate optimizers
optimize the policy-and value networks. Only supported for some
algorithms (APPO, IMPALA) on the old API stack.
_lr_vf: If _separate_vf_optimizer is True, define separate learning rate
for the value network.
Returns:
This updated AlgorithmConfig object.
"""
if num_aggregation_workers != DEPRECATED_VALUE:
deprecation_warning(
old="config.training(num_aggregation_workers=..)",
help="Aggregator workers are no longer supported on the old API "
"stack! To use aggregation (and GPU pre-loading) on the new API "
"stack, activate the new API stack, then set "
"`config.learners(num_aggregator_actors_per_learner=..)`. Good "
"choices are normally 1 or 2, but this depends on your overall "
"setup, especially your `EnvRunner` throughput.",
error=True,
)
if max_requests_in_flight_per_aggregator_worker != DEPRECATED_VALUE:
deprecation_warning(
old="config.training(max_requests_in_flight_per_aggregator_worker=..)",
help="Aggregator workers are no longer supported on the old API "
"stack! To use aggregation (and GPU pre-loading) on the new API "
"stack, activate the new API stack and THEN set "
"`config.learners(max_requests_in_flight_per_aggregator_actor=..)"
"`.",
error=True,
)
# Pass kwargs onto super's `training()` method.
super().training(**kwargs)
if vtrace is not NotProvided:
self.vtrace = vtrace
if vtrace_clip_rho_threshold is not NotProvided:
self.vtrace_clip_rho_threshold = vtrace_clip_rho_threshold
if vtrace_clip_pg_rho_threshold is not NotProvided:
self.vtrace_clip_pg_rho_threshold = vtrace_clip_pg_rho_threshold
if num_gpu_loader_threads is not NotProvided:
self.num_gpu_loader_threads = num_gpu_loader_threads
if num_multi_gpu_tower_stacks is not NotProvided:
self.num_multi_gpu_tower_stacks = num_multi_gpu_tower_stacks
if minibatch_buffer_size is not NotProvided:
self.minibatch_buffer_size = minibatch_buffer_size
if replay_proportion is not NotProvided:
self.replay_proportion = replay_proportion
if replay_buffer_num_slots is not NotProvided:
self.replay_buffer_num_slots = replay_buffer_num_slots
if learner_queue_size is not NotProvided:
self.learner_queue_size = learner_queue_size
if learner_queue_timeout is not NotProvided:
self.learner_queue_timeout = learner_queue_timeout
if broadcast_interval is not NotProvided:
self.broadcast_interval = broadcast_interval
if timeout_s_sampler_manager is not NotProvided:
self.timeout_s_sampler_manager = timeout_s_sampler_manager
if timeout_s_aggregator_manager is not NotProvided:
self.timeout_s_aggregator_manager = timeout_s_aggregator_manager
if grad_clip is not NotProvided:
self.grad_clip = grad_clip
if opt_type is not NotProvided:
self.opt_type = opt_type
if lr_schedule is not NotProvided:
self.lr_schedule = lr_schedule
if decay is not NotProvided:
self.decay = decay
if momentum is not NotProvided:
self.momentum = momentum
if epsilon is not NotProvided:
self.epsilon = epsilon
if vf_loss_coeff is not NotProvided:
self.vf_loss_coeff = vf_loss_coeff
if entropy_coeff is not NotProvided:
self.entropy_coeff = entropy_coeff
if entropy_coeff_schedule is not NotProvided:
self.entropy_coeff_schedule = entropy_coeff_schedule
if _separate_vf_optimizer is not NotProvided:
self._separate_vf_optimizer = _separate_vf_optimizer
if _lr_vf is not NotProvided:
self._lr_vf = _lr_vf
return self
def debugging(
self,
*,
_env_runners_only: Optional[bool] = NotProvided,
_skip_learners: Optional[bool] = NotProvided,
**kwargs,
) -> Self:
"""Sets the debugging related configuration.
Args:
_env_runners_only: If True, only run (remote) EnvRunner requests, discard
their episode/training data, but log their metrics results. Aggregator-
and Learner actors won't be used.
_skip_learners: If True, no `update` requests are sent to the LearnerGroup
and Learner actors. Only EnvRunners and aggregator actors (if
applicable) are used.
"""
super().debugging(**kwargs)
if _env_runners_only is not NotProvided:
self._env_runners_only = _env_runners_only
if _skip_learners is not NotProvided:
self._skip_learners = _skip_learners
return self
@override(AlgorithmConfig)
def validate(self) -> None:
# Call the super class' validation method first.
super().validate()
# IMPALA and APPO need vtrace (A3C Policies no longer exist).
if not self.vtrace:
self._value_error(
"IMPALA and APPO do NOT support vtrace=False anymore! Set "
"`config.training(vtrace=True)`."
)
# New API stack checks.
if self.enable_env_runner_and_connector_v2:
# Does NOT support aggregation workers yet or a mixin replay buffer.
if self.replay_ratio != 0.0:
self._value_error(
"The new API stack in combination with the new EnvRunner API "
"does NOT support a mixin replay buffer yet for "
f"{self} (set `config.replay_proportion` to 0.0)!"
)
# `lr_schedule` checking.
if self.lr_schedule is not None:
self._value_error(
"`lr_schedule` is deprecated and must be None! Use the "
"`lr` setting to setup a schedule."
)
# Entropy coeff schedule checking.
if self.entropy_coeff_schedule is not None:
self._value_error(
"`entropy_coeff_schedule` is deprecated and must be None! Use the "
"`entropy_coeff` setting to setup a schedule."
)
Scheduler.validate(
fixed_value_or_schedule=self.entropy_coeff,
setting_name="entropy_coeff",
description="entropy coefficient",
)
if self.minibatch_size is not None and not (
(self.minibatch_size % self.rollout_fragment_length == 0)
and self.minibatch_size <= self.total_train_batch_size
):
self._value_error(
f"`minibatch_size` ({self.minibatch_size}) must either be None "
"or a multiple of `rollout_fragment_length` "
f"({self.rollout_fragment_length}) while at the same time smaller "
"than or equal to `total_train_batch_size` "
f"({self.total_train_batch_size})!"
)
# Old API stack checks.
else:
if isinstance(self.entropy_coeff, float) and self.entropy_coeff < 0.0:
self._value_error("`entropy_coeff` must be >= 0.0")
# If two separate optimizers/loss terms used for tf, must also set
# `_tf_policy_handles_more_than_one_loss` to True.
if (
self.framework_str in ["tf", "tf2"]
and self._separate_vf_optimizer is True
and self._tf_policy_handles_more_than_one_loss is False
):
self._value_error(
"`_tf_policy_handles_more_than_one_loss` must be set to True, for "
"TFPolicy to support more than one loss term/optimizer! Try setting "
"config.training(_tf_policy_handles_more_than_one_loss=True)."
)
@property
def replay_ratio(self) -> float:
"""Returns replay ratio (between 0.0 and 1.0) based off self.replay_proportion.
Formula: ratio = 1 / proportion
"""
return (1 / self.replay_proportion) if self.replay_proportion > 0 else 0.0
@override(AlgorithmConfig)
def get_default_learner_class(self):
if self.framework_str == "torch":
from ray.rllib.algorithms.impala.torch.impala_torch_learner import (
IMPALATorchLearner,
)
return IMPALATorchLearner
elif self.framework_str in ["tf2", "tf"]:
raise ValueError(
"TensorFlow is no longer supported on the new API stack! "
"Use `framework='torch'`."
)
else:
raise ValueError(
f"The framework {self.framework_str} is not supported. "
"Use `framework='torch'`."
)
@override(AlgorithmConfig)
def get_default_rl_module_spec(self) -> RLModuleSpec:
if self.framework_str == "torch":
from ray.rllib.algorithms.ppo.torch.default_ppo_torch_rl_module import (
DefaultPPOTorchRLModule,
)
return RLModuleSpec(module_class=DefaultPPOTorchRLModule)
else:
raise ValueError(
f"The framework {self.framework_str} is not supported. "
"Use either 'torch' or 'tf2'."
)
@override(AlgorithmConfig)
def build_learner_connector(
self,
input_observation_space,
input_action_space,
device=None,
):
connector = super().build_learner_connector(
input_observation_space,
input_action_space,
device,
)
if self.add_default_connectors_to_learner_pipeline:
# Extend all episodes by one artificial timestep to allow the value function
# net to compute the bootstrap values (and add a mask to the batch to know,
# which slots to mask out).
connector.prepend(AddOneTsToEpisodesAndTruncate())
# Remove the NumpyToTensor connector if we have the GPULoaderThreads.
if self.num_aggregator_actors_per_learner > 0:
connector.remove(NumpyToTensor)
return connector
ImpalaConfig = IMPALAConfig
| IMPALAConfig |
python | huggingface__transformers | src/transformers/models/conditional_detr/modeling_conditional_detr.py | {
"start": 3521,
"end": 4655
} | class ____(Seq2SeqModelOutput):
r"""
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the decoder of the model.
intermediate_hidden_states (`torch.FloatTensor` of shape `(config.decoder_layers, batch_size, sequence_length, hidden_size)`, *optional*, returned when `config.auxiliary_loss=True`):
Intermediate decoder activations, i.e. the output of each decoder layer, each of them gone through a
layernorm.
reference_points (`torch.FloatTensor` of shape `(config.decoder_layers, batch_size, num_queries, 2 (anchor points))`):
Reference points (reference points of each layer of the decoder).
"""
intermediate_hidden_states: Optional[torch.FloatTensor] = None
reference_points: Optional[tuple[torch.FloatTensor]] = None
@dataclass
@auto_docstring(
custom_intro="""
Output type of [`ConditionalDetrForObjectDetection`].
"""
)
# Copied from transformers.models.detr.modeling_detr.DetrObjectDetectionOutput with Detr->ConditionalDetr
| ConditionalDetrModelOutput |
python | getsentry__sentry | src/sentry/integrations/example/integration.py | {
"start": 8489,
"end": 8546
} | class ____(ExampleIntegration):
pass
| AliasedIntegration |
python | walkccc__LeetCode | solutions/1610. Maximum Number of Visible Points/1610.py | {
"start": 0,
"end": 619
} | class ____:
def visiblePoints(
self,
points: list[list[int]],
angle: int,
location: list[int],
) -> int:
posX, posY = location
maxVisible = 0
same = 0
A = []
for x, y in points:
if x == posX and y == posY:
same += 1
else:
A.append(math.atan2(y - posY, x - posX))
A.sort()
A = A + [a + 2.0 * math.pi for a in A]
angleInRadians = math.pi * (angle / 180)
l = 0
for r in range(len(A)):
while A[r] - A[l] > angleInRadians:
l += 1
maxVisible = max(maxVisible, r - l + 1)
return maxVisible + same
| Solution |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/connectors/test/steps/python_connectors.py | {
"start": 1230,
"end": 8129
} | class ____(Step, ABC):
"""An abstract class to run pytest tests and evaluate success or failure according to pytest logs."""
context: ConnectorTestContext
PYTEST_INI_FILE_NAME = "pytest.ini"
PYPROJECT_FILE_NAME = "pyproject.toml"
common_test_dependencies: List[str] = []
skipped_exit_code = 5
bind_to_docker_host = False
accept_extra_params = True
@property
def default_params(self) -> STEP_PARAMS:
"""Default pytest options.
Returns:
dict: The default pytest options.
"""
return super().default_params | {
"-s": [], # Disable capturing stdout/stderr in pytest
}
@property
@abstractmethod
def test_directory_name(self) -> str:
raise NotImplementedError("test_directory_name must be implemented in the child class.")
@property
def extra_dependencies_names(self) -> Sequence[str]:
if self.context.connector.is_using_poetry:
return ("dev",)
return ("dev", "tests")
async def _run(self, connector_under_test: Container) -> StepResult:
"""Run all pytest tests declared in the test directory of the connector code.
Args:
connector_under_test (Container): The connector under test container.
Returns:
StepResult: Failure or success of the unit tests with stdout and stdout.
"""
if not await self.check_if_tests_are_available(self.test_directory_name):
return self.skip(f"No {self.test_directory_name} directory found in the connector.")
test_config_file_name, test_config_file = await self.get_config_file_name_and_file()
test_environment = await self.install_testing_environment(
connector_under_test, test_config_file_name, test_config_file, self.extra_dependencies_names
)
pytest_command = self.get_pytest_command(test_config_file_name)
if self.bind_to_docker_host:
test_environment = await pipelines.dagger.actions.system.docker.with_bound_docker_host(self.context, test_environment)
test_execution = test_environment.with_exec(pytest_command)
return await self.get_step_result(test_execution)
def get_pytest_command(self, test_config_file_name: str) -> List[str]:
"""Get the pytest command to run.
Returns:
List[str]: The pytest command to run.
"""
cmd = ["pytest", self.test_directory_name, "-c", test_config_file_name] + self.params_as_cli_options
if self.context.connector.is_using_poetry:
return ["poetry", "run"] + cmd
return cmd
async def check_if_tests_are_available(self, test_directory_name: str) -> bool:
"""Check if the tests are available in the connector directory.
Returns:
bool: True if the tests are available.
"""
connector_dir = await self.context.get_connector_dir()
connector_dir_entries = await connector_dir.entries()
return test_directory_name in connector_dir_entries
async def get_config_file_name_and_file(self) -> Tuple[str, File]:
"""Get the config file name and file to use for pytest.
The order of priority is:
- pytest.ini file in the connector directory
- pyproject.toml file in the connector directory
- pyproject.toml file in the repository directory
Returns:
Tuple[str, File]: The config file name and file to use for pytest.
"""
connector_dir = await self.context.get_connector_dir()
connector_dir_entries = await connector_dir.entries()
if self.PYTEST_INI_FILE_NAME in connector_dir_entries:
config_file_name = self.PYTEST_INI_FILE_NAME
test_config = (await self.context.get_connector_dir(include=[self.PYTEST_INI_FILE_NAME])).file(self.PYTEST_INI_FILE_NAME)
self.logger.info(f"Found {self.PYTEST_INI_FILE_NAME}, using it for testing.")
elif self.PYPROJECT_FILE_NAME in connector_dir_entries:
config_file_name = self.PYPROJECT_FILE_NAME
test_config = (await self.context.get_connector_dir(include=[self.PYPROJECT_FILE_NAME])).file(self.PYPROJECT_FILE_NAME)
self.logger.info(f"Found {self.PYPROJECT_FILE_NAME} at connector level, using it for testing.")
else:
config_file_name = f"global_{self.PYPROJECT_FILE_NAME}"
test_config = (await self.context.get_repo_dir(include=[self.PYPROJECT_FILE_NAME])).file(self.PYPROJECT_FILE_NAME)
self.logger.info(f"Found {self.PYPROJECT_FILE_NAME} at repo level, using it for testing.")
return config_file_name, test_config
async def install_testing_environment(
self,
built_connector_container: Container,
test_config_file_name: str,
test_config_file: File,
extra_dependencies_names: Sequence[str],
) -> Container:
"""Install the connector with the extra dependencies in /test_environment.
Args:
extra_dependencies_names (List[str]): Extra dependencies to install.
Returns:
Container: The container with the test environment installed.
"""
user = await BuildConnectorImages.get_image_user(built_connector_container)
secret_mounting_function = await secrets.mounted_connector_secrets(self.context, "secrets", self.secrets, owner=user)
container_with_test_deps = (
# Install the connector python package in /test_environment with the extra dependencies
await pipelines.dagger.actions.python.common.with_python_connector_installed(
self.context,
# Reset the entrypoint to run non airbyte commands
built_connector_container.with_entrypoint([]),
str(self.context.connector.code_directory),
user,
additional_dependency_groups=extra_dependencies_names,
)
)
if self.common_test_dependencies:
container_with_test_deps = container_with_test_deps.with_user("root").with_exec(
["pip", "install"] + self.common_test_dependencies
)
container_with_test_deps = (
container_with_test_deps
# Mount the test config file
.with_mounted_file(test_config_file_name, test_config_file, owner=user)
# Mount the secrets
.with_(secret_mounting_function)
.with_env_variable("PYTHONPATH", ".")
# Make sure all files that were created or mounted under /airbyte are owned by the user
.with_user("root")
.with_exec(["chown", "-R", f"{user}:{user}", "/airbyte"])
.with_user(user)
)
await raise_if_not_user(container_with_test_deps, user)
return container_with_test_deps
| PytestStep |
python | kamyu104__LeetCode-Solutions | Python/count-k-reducible-numbers-less-than-n.py | {
"start": 828,
"end": 1931
} | class ____(object):
def countKReducibleNumbers(self, s, k):
"""
:type s: str
:type k: int
:rtype: int
"""
MOD = 10**9+7
fact, inv, inv_fact = [[1]*2 for _ in xrange(3)]
def nCr(n, k):
while len(inv) <= n: # lazy initialization
fact.append(fact[-1]*len(inv) % MOD)
inv.append(inv[MOD%len(inv)]*(MOD-MOD//len(inv)) % MOD) # https://cp-algorithms.com/algebra/module-inverse.html
inv_fact.append(inv_fact[-1]*inv[-1] % MOD)
return (fact[n]*inv_fact[n-k] % MOD) * inv_fact[k] % MOD
def popcount(x):
return bin(x).count('1')
while len(s)-1 >= len(cnt): # cached
cnt.append(cnt[popcount(len(cnt))]+1)
result = curr = 0
for i in xrange(len(s)):
if s[i] != '1':
continue
for c in xrange((len(s)-(i+1))+1):
if cnt[curr+c] < k:
result = (result+nCr(len(s)-(i+1), c))%MOD
curr += 1
return (result-1)%MOD
| Solution2 |
python | PrefectHQ__prefect | tests/cli/test_work_pool.py | {
"start": 17265,
"end": 17675
} | class ____:
async def test_pause(self, prefect_client, work_pool):
assert work_pool.is_paused is False
res = await run_sync_in_worker_thread(
invoke_and_assert,
f"work-pool pause {work_pool.name}",
)
assert res.exit_code == 0
client_res = await prefect_client.read_work_pool(work_pool.name)
assert client_res.is_paused is True
| TestPause |
python | spyder-ide__spyder | spyder/api/widgets/status.py | {
"start": 8297,
"end": 9982
} | class ____(StatusBarWidget):
"""
Base class for status bar widgets that update based on timers.
"""
def __init__(self, parent=None):
"""Base class for status bar widgets that update based on timers."""
self.timer = None # Needs to come before parent call
super().__init__(parent)
self._interval = 2000
# Widget setup
fm = self.label_value.fontMetrics()
self.label_value.setMinimumWidth(fm.width('000%'))
# Setup
self.timer = QTimer(self)
self.timer.timeout.connect(self.update_status)
self.timer.start(self._interval)
# ---- Qt methods
# -------------------------------------------------------------------------
def closeEvent(self, event):
self.timer.stop()
super().closeEvent(event)
def setVisible(self, value):
"""Stop timer if widget is not visible."""
if self.timer is not None:
if value:
self.timer.start(self._interval)
else:
self.timer.stop()
super().setVisible(value)
# ---- Public API
# -------------------------------------------------------------------------
def update_status(self):
"""Update status label widget, if widget is visible."""
if self.isVisible():
self.label_value.setText(self.get_value())
def set_interval(self, interval):
"""Set timer interval (ms)."""
self._interval = interval
if self.timer is not None:
self.timer.setInterval(interval)
def get_value(self):
"""Return formatted text value."""
raise NotImplementedError
| BaseTimerStatus |
python | joke2k__faker | tests/providers/test_phone_number.py | {
"start": 6660,
"end": 11876
} | class ____:
"""Test en_PH phone number provider methods"""
@classmethod
def setup_class(cls):
cls.mobile_number_pattern: Pattern = re.compile(r"^(?:0|\+63)(\d+)-\d{3}-\d{4}$")
cls.area2_landline_number_pattern: Pattern = re.compile(r"^(?:0|\+63)2-(\d{4})-\d{4}")
cls.non_area2_landline_number_pattern: Pattern = re.compile(r"^(?:0|\+63)(\d{2})-(\d{3})-\d{4}")
cls.globe_mobile_number_prefixes = EnPhPhoneNumberProvider.globe_mobile_number_prefixes
cls.smart_mobile_number_prefixes = EnPhPhoneNumberProvider.smart_mobile_number_prefixes
cls.sun_mobile_number_prefixes = EnPhPhoneNumberProvider.sun_mobile_number_prefixes
cls.mobile_number_prefixes = (
cls.globe_mobile_number_prefixes + cls.smart_mobile_number_prefixes + cls.sun_mobile_number_prefixes
)
cls.bayantel_landline_identifiers = EnPhPhoneNumberProvider.bayantel_landline_identifiers
cls.misc_landline_identifiers = EnPhPhoneNumberProvider.misc_landline_identifiers
cls.non_area2_landline_area_codes = EnPhPhoneNumberProvider.non_area2_landline_area_codes
def test_globe_mobile_number(self, faker, num_samples):
for _ in range(num_samples):
number = faker.globe_mobile_number()
match = self.mobile_number_pattern.match(number)
assert match and match.group(1) in self.globe_mobile_number_prefixes
def test_smart_mobile_number(self, faker, num_samples):
for _ in range(num_samples):
number = faker.smart_mobile_number()
match = self.mobile_number_pattern.match(number)
assert match and match.group(1) in self.smart_mobile_number_prefixes
def test_sun_mobile_number(self, faker, num_samples):
for _ in range(num_samples):
number = faker.sun_mobile_number()
match = self.mobile_number_pattern.match(number)
assert match and match.group(1) in self.sun_mobile_number_prefixes
def test_mobile_number(self, faker, num_samples):
for _ in range(num_samples):
number = faker.mobile_number()
match = self.mobile_number_pattern.match(number)
assert match and match.group(1) in self.mobile_number_prefixes
def test_globe_area2_landline_number(self, faker, num_samples):
for _ in range(num_samples):
number = faker.globe_area2_landline_number()
match = self.area2_landline_number_pattern.match(number)
assert match and match.group(1).startswith("7")
def test_pldt_area2_landline_number(self, faker, num_samples):
for _ in range(num_samples):
number = faker.pldt_area2_landline_number()
match = self.area2_landline_number_pattern.match(number)
assert match and match.group(1).startswith("8")
def test_bayantel_area2_landline_number(self, faker, num_samples):
for _ in range(num_samples):
number = faker.bayantel_area2_landline_number()
match = self.area2_landline_number_pattern.match(number)
assert match and match.group(1) in self.bayantel_landline_identifiers
def test_misc_area2_landline_number(self, faker, num_samples):
for _ in range(num_samples):
number = faker.misc_area2_landline_number()
match = self.area2_landline_number_pattern.match(number)
assert match and match.group(1) in self.misc_landline_identifiers
def test_area2_landline_number(self, faker, num_samples):
for _ in range(num_samples):
number = faker.area2_landline_number()
match = self.area2_landline_number_pattern.match(number)
assert match and any(
[
match.group(1).startswith("7"),
match.group(1).startswith("8"),
match.group(1) in self.bayantel_landline_identifiers,
match.group(1) in self.misc_landline_identifiers,
]
)
def test_non_area2_landline_number(self, faker, num_samples):
for _ in range(num_samples):
number = faker.non_area2_landline_number()
match = self.non_area2_landline_number_pattern.match(number)
assert match and match.group(1) in self.non_area2_landline_area_codes
def test_landline_number(self, faker, num_samples):
for _ in range(num_samples):
number = faker.landline_number()
area2_match = self.area2_landline_number_pattern.match(number)
non_area2_match = self.non_area2_landline_number_pattern.match(number)
assert area2_match or non_area2_match
if area2_match:
assert any(
[
area2_match.group(1).startswith("7"),
area2_match.group(1).startswith("8"),
area2_match.group(1) in self.bayantel_landline_identifiers,
area2_match.group(1) in self.misc_landline_identifiers,
]
)
elif non_area2_match:
assert non_area2_match.group(1) in self.non_area2_landline_area_codes
| TestEnPh |
python | spyder-ide__spyder | spyder/api/widgets/menus.py | {
"start": 17530,
"end": 18387
} | class ____(SpyderMenu):
"""
Options menu for PluginMainWidget.
"""
def render(self):
"""Render the menu's bottom section as expected."""
if self._dirty:
self.clear()
self._add_missing_actions()
bottom = OptionsMenuSections.Bottom
actions = []
for section in self._sections:
for (sec, action) in self._actions:
if sec == section and sec != bottom:
actions.append(action)
actions.append(MENU_SEPARATOR)
# Add bottom actions
for (sec, action) in self._actions:
if sec == bottom:
actions.append(action)
add_actions(self, actions)
self._set_icons()
self._dirty = False
| PluginMainWidgetOptionsMenu |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-upstage/llama_index/readers/upstage/document_parse.py | {
"start": 2557,
"end": 14643
} | class ____(BaseReader):
"""
Upstage Document Parse Reader.
To use, you should have the environment variable `UPSTAGE_API_KEY`
set with your API key or pass it as a named parameter to the constructor.
Example:
.. code-block:: python
from llama_index.readers.file import UpstageDocumentParseReader
reader = UpstageDocumentParseReader()
docs = reader.load_data("path/to/file.pdf")
"""
def __init__(
self,
api_key: Optional[str] = None,
base_url: str = DOCUMENT_PARSE_BASE_URL,
model: str = DOCUMENT_PARSE_DEFAULT_MODEL,
split: SplitType = "none",
ocr: OCR = "auto",
output_format: OutputFormat = "html",
coordinates: bool = True,
base64_encoding: List[Category] = [],
):
"""
Initializes an instance of the Upstage Document Parse Reader class.
Args:
api_key (str, optional): The API key for accessing the Upstage API.
Defaults to None, in which case it will be
fetched from the environment variable
`UPSTAGE_API_KEY`.
base_url (str, optional): The base URL for accessing the Upstage API.
split (SplitType, optional): The type of splitting to be applied.
Defaults to "none" (no splitting).
model (str): The model to be used for the document parse.
Defaults to "document-parse".
ocr (OCRMode, optional): Extract text from images in the document using OCR.
If the value is "force", OCR is used to extract
text from an image. If the value is "auto", text is
extracted from a PDF. (An error will occur if the
value is "auto" and the input is NOT in PDF format)
output_format (OutputFormat, optional): Format of the inference results.
coordinates (bool, optional): Whether to include the coordinates of the
OCR in the output.
base64_encoding (List[Category], optional): The category of the elements to
be encoded in base64.
"""
self.api_key = get_from_param_or_env(
"UPSTAGE_API_KEY", api_key, "UPSTAGE_API_KEY"
)
self.base_url = base_url
self.model = model
self.split = split
self.ocr = ocr
self.output_format = output_format
self.coordinates = coordinates
self.base64_encoding = base64_encoding
def _get_response(
self,
files: Dict,
) -> List:
"""
Sends a POST request to the API endpoint with the provided files and
returns the response.
Args:
files (dict): A dictionary containing the files to be sent in the request.
Returns:
dict: The JSON response from the API.
Raises:
ValueError: If there is an error in the API call.
"""
try:
headers = {
"Authorization": f"Bearer {self.api_key}",
}
response = requests.post(
self.base_url,
headers=headers,
files=files,
data={
"ocr": self.ocr,
"model": self.model,
"output_formats": f"['{self.output_format}']",
"coordinates": self.coordinates,
"base64_encoding": f"{self.base64_encoding}",
},
)
response.raise_for_status()
return response.json().get("elements", [])
except requests.HTTPError as e:
raise ValueError(f"HTTP error: {e.response.text}")
except requests.RequestException as e:
# Handle any request-related exceptions
raise ValueError(f"Failed to send request: {e}")
except json.JSONDecodeError as e:
# Handle JSON decode errors
raise ValueError(f"Failed to decode JSON response: {e}")
except Exception as e:
# Handle any other exceptions
raise ValueError(f"An error occurred: {e}")
def _split_and_request(
self,
full_docs: fitzDocument,
start_page: int,
num_pages: int,
) -> List:
"""
Splits the full pdf document into partial pages and sends a request to the
server.
Args:
full_docs (str): The full document to be split and requested.
start_page (int): The starting page number for splitting the document.
num_pages (int, optional): The number of pages to split the document
into.
Defaults to DEFAULT_NUMBER_OF_PAGE.
Returns:
response: The response from the server.
"""
with fitz.open() as chunk_pdf:
chunk_pdf.insert_pdf(
full_docs,
from_page=start_page,
to_page=start_page + num_pages - 1,
)
pdf_bytes = chunk_pdf.write()
with io.BytesIO(pdf_bytes) as buffer:
return self._get_response({"document": buffer})
def _element_document(self, element: Dict) -> Document:
"""
Converts an elements into a Document object.
Args:
element (Dict): The element to be converted into a Document object.
Returns:
Document: A Document object representing the element with its content
and metadata.
"""
extra_info = {
"page": element["page"],
"id": element["id"],
"output_format": self.output_format,
"split": self.split,
"category": element.get("category"),
}
if element.get("coordinates") is not None:
extra_info["coordinates"] = element.get("coordinates")
if element.get("base64_encoding") is not None:
extra_info["base64_encoding"] = element.get("base64_encoding")
return Document(
text=(parse_output(element, self.output_format)), extra_info=extra_info
)
def _page_document(self, elements: List) -> List[Document]:
"""
Combines elements with the same page number into a single Document object.
Args:
elements (List): A list of elements containing page numbers.
Returns:
List[Document]: A list of Document objects, each representing a page
with its content and metadata.
"""
_docs = []
pages = sorted({x["page"] for x in elements})
page_group = [
[element for element in elements if element["page"] == x] for x in pages
]
for group in page_group:
page_content = " ".join(
[parse_output(element, self.output_format) for element in group]
)
coordinates = [
element.get("coordinates")
for element in group
if element.get("coordinates") is not None
]
base64_encodings = [
element.get("base64_encoding")
for element in group
if element.get("base64_encoding") is not None
]
extra_info = {
"page": group[0]["page"],
"output_format": self.output_format,
"split": self.split,
}
if coordinates:
extra_info["coordinates"] = coordinates
if base64_encodings:
extra_info["base64_encodings"] = base64_encodings
_docs.append(
Document(
text=page_content.strip(),
extra_info=extra_info,
)
)
return _docs
def lazy_load_data(
self,
file_path: Union[str, Path, List[str], List[Path]],
) -> Iterable[Document]:
"""
Load data from a file or list of files lazily.
Args:
file_path (Union[str, Path, List[str], List[Path]]): The path or list of paths to the file(s) to load.
Returns:
List[Document]: A list of Document objects containing the loaded data.
Raises:
ValueError: If an invalid split type is provided or if file_path is required.
"""
# Check if the file path is a list of paths
if isinstance(file_path, list):
for path in file_path:
docs = self.load_data(path)
yield from docs
else:
num_pages = DEFAULT_NUMBER_OF_PAGE
if not file_path:
raise ValueError("file_path is required.")
validate_file_path(file_path)
full_docs = fitz.open(file_path)
number_of_pages = full_docs.page_count
if self.split == "none":
if full_docs.is_pdf:
result = ""
start_page = 0
for _ in range(number_of_pages):
if start_page >= number_of_pages:
break
elements = self._split_and_request(
full_docs, start_page, num_pages
)
for element in elements:
result += parse_output(element, self.output_format)
start_page += num_pages
else:
with open(file_path, "rb") as f:
elements = self._get_response({"document": f})
result = ""
for element in elements:
result += parse_output(element, self.output_format)
yield Document(
text=result,
extra_info={
"total_pages": number_of_pages,
"type": self.output_format,
"split": self.split,
},
)
elif self.split == "element":
if full_docs.is_pdf:
start_page = 0
for _ in range(number_of_pages):
if start_page >= number_of_pages:
break
elements = self._split_and_request(
full_docs, start_page, num_pages
)
for element in elements:
yield self._element_document(element)
start_page += num_pages
else:
with open(file_path, "rb") as f:
elements = self._get_response({"document": f})
for element in elements:
yield self._element_document(element)
elif self.split == "page":
if full_docs.is_pdf:
start_page = 0
for _ in range(number_of_pages):
if start_page >= number_of_pages:
break
elements = self._split_and_request(
full_docs, start_page, num_pages
)
yield from self._page_document(elements)
start_page += num_pages
else:
with open(file_path, "rb") as f:
elements = self._get_response({"document": f})
yield from self._page_document(elements)
else:
raise ValueError(f"Invalid split type: {self.split}")
| UpstageDocumentParseReader |
python | google__jax | tests/mosaic/gpu_test.py | {
"start": 153338,
"end": 154403
} | class ____(TestCase, jtu.JaxTestCase):
def test_profiler(self):
def body(ctx, input, result, scratch):
del scratch
with ctx.named_region("load"):
reg = mgpu.FragmentedArray.load_strided(input)
with ctx.named_region("store"):
reg.store_untiled(result)
dtype = jnp.bfloat16
shape = (128, 128)
jax_shape = jax.ShapeDtypeStruct(shape, dtype)
with tempfile.TemporaryDirectory() as tmpdir:
kernel = mgpu.as_gpu_kernel(
body,
grid=(1, 1, 1),
block=(128, 1, 1),
in_shape=(jax_shape),
out_shape=jax_shape,
smem_scratch_shape=[],
prof_spec=profiler.ProfilerSpec(1024, dump_path=tmpdir),
)
param = self.prng.uniform(-1, 1, shape).astype(dtype)
self.assertArraysEqual(kernel(param), param)
[name] = os.listdir(tmpdir)
with open(os.path.join(tmpdir, name)) as f:
data = f.read()
self.assertEqual(data.count('"name": "load"'), 2)
self.assertEqual(data.count('"name": "store"'), 2)
| ProfilerTest |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-file/llama_index/readers/file/tabular/base.py | {
"start": 314,
"end": 1659
} | class ____(BaseReader):
"""
CSV parser.
Args:
concat_rows (bool): whether to concatenate all rows into one document.
If set to False, a Document will be created for each row.
True by default.
"""
def __init__(self, *args: Any, concat_rows: bool = True, **kwargs: Any) -> None:
"""Init params."""
super().__init__(*args, **kwargs)
self._concat_rows = concat_rows
def load_data(
self, file: Path, extra_info: Optional[Dict] = None
) -> List[Document]:
"""
Parse file.
Returns:
Union[str, List[str]]: a string or a List of strings.
"""
try:
import csv
except ImportError:
raise ImportError("csv module is required to read CSV files.")
text_list = []
with open(file) as fp:
csv_reader = csv.reader(fp)
for row in csv_reader:
text_list.append(", ".join(row))
metadata = {"filename": file.name, "extension": file.suffix}
if extra_info:
metadata = {**metadata, **extra_info}
if self._concat_rows:
return [Document(text="\n".join(text_list), metadata=metadata)]
else:
return [Document(text=text, metadata=metadata) for text in text_list]
| CSVReader |
python | numpy__numpy | numpy/testing/_private/utils.py | {
"start": 79206,
"end": 98732
} | class ____:
"""
Context manager and decorator doing much the same as
``warnings.catch_warnings``.
However, it also provides a filter mechanism to work around
https://bugs.python.org/issue4180.
This bug causes Python before 3.4 to not reliably show warnings again
after they have been ignored once (even within catch_warnings). It
means that no "ignore" filter can be used easily, since following
tests might need to see the warning. Additionally it allows easier
specificity for testing warnings and can be nested.
.. deprecated:: 2.4
This is deprecated. Use `warnings.filterwarnings` or
``pytest.filterwarnings`` instead.
Parameters
----------
forwarding_rule : str, optional
One of "always", "once", "module", or "location". Analogous to
the usual warnings module filter mode, it is useful to reduce
noise mostly on the outmost level. Unsuppressed and unrecorded
warnings will be forwarded based on this rule. Defaults to "always".
"location" is equivalent to the warnings "default", match by exact
location the warning warning originated from.
Notes
-----
Filters added inside the context manager will be discarded again
when leaving it. Upon entering all filters defined outside a
context will be applied automatically.
When a recording filter is added, matching warnings are stored in the
``log`` attribute as well as in the list returned by ``record``.
If filters are added and the ``module`` keyword is given, the
warning registry of this module will additionally be cleared when
applying it, entering the context, or exiting it. This could cause
warnings to appear a second time after leaving the context if they
were configured to be printed once (default) and were already
printed before the context was entered.
Nesting this context manager will work as expected when the
forwarding rule is "always" (default). Unfiltered and unrecorded
warnings will be passed out and be matched by the outer level.
On the outmost level they will be printed (or caught by another
warnings context). The forwarding rule argument can modify this
behaviour.
Like ``catch_warnings`` this context manager is not threadsafe.
Examples
--------
With a context manager::
with np.testing.suppress_warnings() as sup:
sup.filter(DeprecationWarning, "Some text")
sup.filter(module=np.ma.core)
log = sup.record(FutureWarning, "Does this occur?")
command_giving_warnings()
# The FutureWarning was given once, the filtered warnings were
# ignored. All other warnings abide outside settings (may be
# printed/error)
assert_(len(log) == 1)
assert_(len(sup.log) == 1) # also stored in log attribute
Or as a decorator::
sup = np.testing.suppress_warnings()
sup.filter(module=np.ma.core) # module must match exactly
@sup
def some_function():
# do something which causes a warning in np.ma.core
pass
"""
def __init__(self, forwarding_rule="always", _warn=True):
if _warn:
warnings.warn(
"NumPy warning suppression and assertion utilities are deprecated. "
"Use warnings.catch_warnings, warnings.filterwarnings, pytest.warns, "
"or pytest.filterwarnings instead. (Deprecated NumPy 2.4)",
DeprecationWarning, stacklevel=2)
self._entered = False
# Suppressions are either instance or defined inside one with block:
self._suppressions = []
if forwarding_rule not in {"always", "module", "once", "location"}:
raise ValueError("unsupported forwarding rule.")
self._forwarding_rule = forwarding_rule
def _clear_registries(self):
if hasattr(warnings, "_filters_mutated"):
# clearing the registry should not be necessary on new pythons,
# instead the filters should be mutated.
warnings._filters_mutated()
return
# Simply clear the registry, this should normally be harmless,
# note that on new pythons it would be invalidated anyway.
for module in self._tmp_modules:
if hasattr(module, "__warningregistry__"):
module.__warningregistry__.clear()
def _filter(self, category=Warning, message="", module=None, record=False):
if record:
record = [] # The log where to store warnings
else:
record = None
if self._entered:
if module is None:
warnings.filterwarnings(
"always", category=category, message=message)
else:
module_regex = module.__name__.replace('.', r'\.') + '$'
warnings.filterwarnings(
"always", category=category, message=message,
module=module_regex)
self._tmp_modules.add(module)
self._clear_registries()
self._tmp_suppressions.append(
(category, message, re.compile(message, re.I), module, record))
else:
self._suppressions.append(
(category, message, re.compile(message, re.I), module, record))
return record
def filter(self, category=Warning, message="", module=None):
"""
Add a new suppressing filter or apply it if the state is entered.
Parameters
----------
category : class, optional
Warning class to filter
message : string, optional
Regular expression matching the warning message.
module : module, optional
Module to filter for. Note that the module (and its file)
must match exactly and cannot be a submodule. This may make
it unreliable for external modules.
Notes
-----
When added within a context, filters are only added inside
the context and will be forgotten when the context is exited.
"""
self._filter(category=category, message=message, module=module,
record=False)
def record(self, category=Warning, message="", module=None):
"""
Append a new recording filter or apply it if the state is entered.
All warnings matching will be appended to the ``log`` attribute.
Parameters
----------
category : class, optional
Warning class to filter
message : string, optional
Regular expression matching the warning message.
module : module, optional
Module to filter for. Note that the module (and its file)
must match exactly and cannot be a submodule. This may make
it unreliable for external modules.
Returns
-------
log : list
A list which will be filled with all matched warnings.
Notes
-----
When added within a context, filters are only added inside
the context and will be forgotten when the context is exited.
"""
return self._filter(category=category, message=message, module=module,
record=True)
def __enter__(self):
if self._entered:
raise RuntimeError("cannot enter suppress_warnings twice.")
self._orig_show = warnings.showwarning
self._filters = warnings.filters
warnings.filters = self._filters[:]
self._entered = True
self._tmp_suppressions = []
self._tmp_modules = set()
self._forwarded = set()
self.log = [] # reset global log (no need to keep same list)
for cat, mess, _, mod, log in self._suppressions:
if log is not None:
del log[:] # clear the log
if mod is None:
warnings.filterwarnings(
"always", category=cat, message=mess)
else:
module_regex = mod.__name__.replace('.', r'\.') + '$'
warnings.filterwarnings(
"always", category=cat, message=mess,
module=module_regex)
self._tmp_modules.add(mod)
warnings.showwarning = self._showwarning
self._clear_registries()
return self
def __exit__(self, *exc_info):
warnings.showwarning = self._orig_show
warnings.filters = self._filters
self._clear_registries()
self._entered = False
del self._orig_show
del self._filters
def _showwarning(self, message, category, filename, lineno,
*args, use_warnmsg=None, **kwargs):
for cat, _, pattern, mod, rec in (
self._suppressions + self._tmp_suppressions)[::-1]:
if (issubclass(category, cat) and
pattern.match(message.args[0]) is not None):
if mod is None:
# Message and category match, either recorded or ignored
if rec is not None:
msg = WarningMessage(message, category, filename,
lineno, **kwargs)
self.log.append(msg)
rec.append(msg)
return
# Use startswith, because warnings strips the c or o from
# .pyc/.pyo files.
elif mod.__file__.startswith(filename):
# The message and module (filename) match
if rec is not None:
msg = WarningMessage(message, category, filename,
lineno, **kwargs)
self.log.append(msg)
rec.append(msg)
return
# There is no filter in place, so pass to the outside handler
# unless we should only pass it once
if self._forwarding_rule == "always":
if use_warnmsg is None:
self._orig_show(message, category, filename, lineno,
*args, **kwargs)
else:
self._orig_showmsg(use_warnmsg)
return
if self._forwarding_rule == "once":
signature = (message.args, category)
elif self._forwarding_rule == "module":
signature = (message.args, category, filename)
elif self._forwarding_rule == "location":
signature = (message.args, category, filename, lineno)
if signature in self._forwarded:
return
self._forwarded.add(signature)
if use_warnmsg is None:
self._orig_show(message, category, filename, lineno, *args,
**kwargs)
else:
self._orig_showmsg(use_warnmsg)
def __call__(self, func):
"""
Function decorator to apply certain suppressions to a whole
function.
"""
@wraps(func)
def new_func(*args, **kwargs):
with self:
return func(*args, **kwargs)
return new_func
@contextlib.contextmanager
def _assert_no_gc_cycles_context(name=None):
__tracebackhide__ = True # Hide traceback for py.test
# not meaningful to test if there is no refcounting
if not HAS_REFCOUNT:
yield
return
assert_(gc.isenabled())
gc.disable()
gc_debug = gc.get_debug()
try:
for i in range(100):
if gc.collect() == 0:
break
else:
raise RuntimeError(
"Unable to fully collect garbage - perhaps a __del__ method "
"is creating more reference cycles?")
gc.set_debug(gc.DEBUG_SAVEALL)
yield
# gc.collect returns the number of unreachable objects in cycles that
# were found -- we are checking that no cycles were created in the context
n_objects_in_cycles = gc.collect()
objects_in_cycles = gc.garbage[:]
finally:
del gc.garbage[:]
gc.set_debug(gc_debug)
gc.enable()
if n_objects_in_cycles:
name_str = f' when calling {name}' if name is not None else ''
raise AssertionError(
"Reference cycles were found{}: {} objects were collected, "
"of which {} are shown below:{}"
.format(
name_str,
n_objects_in_cycles,
len(objects_in_cycles),
''.join(
"\n {} object with id={}:\n {}".format(
type(o).__name__,
id(o),
pprint.pformat(o).replace('\n', '\n ')
) for o in objects_in_cycles
)
)
)
def assert_no_gc_cycles(*args, **kwargs):
"""
Fail if the given callable produces any reference cycles.
If called with all arguments omitted, may be used as a context manager::
with assert_no_gc_cycles():
do_something()
Parameters
----------
func : callable
The callable to test.
\\*args : Arguments
Arguments passed to `func`.
\\*\\*kwargs : Kwargs
Keyword arguments passed to `func`.
Returns
-------
Nothing. The result is deliberately discarded to ensure that all cycles
are found.
"""
if not args:
return _assert_no_gc_cycles_context()
func = args[0]
args = args[1:]
with _assert_no_gc_cycles_context(name=func.__name__):
func(*args, **kwargs)
def break_cycles():
"""
Break reference cycles by calling gc.collect
Objects can call other objects' methods (for instance, another object's
__del__) inside their own __del__. On PyPy, the interpreter only runs
between calls to gc.collect, so multiple calls are needed to completely
release all cycles.
"""
gc.collect()
if IS_PYPY:
# a few more, just to make sure all the finalizers are called
gc.collect()
gc.collect()
gc.collect()
gc.collect()
def requires_memory(free_bytes):
"""Decorator to skip a test if not enough memory is available"""
import pytest
def decorator(func):
@wraps(func)
def wrapper(*a, **kw):
msg = check_free_memory(free_bytes)
if msg is not None:
pytest.skip(msg)
try:
return func(*a, **kw)
except MemoryError:
# Probably ran out of memory regardless: don't regard as failure
pytest.xfail("MemoryError raised")
return wrapper
return decorator
def check_free_memory(free_bytes):
"""
Check whether `free_bytes` amount of memory is currently free.
Returns: None if enough memory available, otherwise error message
"""
env_var = 'NPY_AVAILABLE_MEM'
env_value = os.environ.get(env_var)
if env_value is not None:
try:
mem_free = _parse_size(env_value)
except ValueError as exc:
raise ValueError(f'Invalid environment variable {env_var}: {exc}')
msg = (f'{free_bytes / 1e9} GB memory required, but environment variable '
f'NPY_AVAILABLE_MEM={env_value} set')
else:
mem_free = _get_mem_available()
if mem_free is None:
msg = ("Could not determine available memory; set NPY_AVAILABLE_MEM "
"environment variable (e.g. NPY_AVAILABLE_MEM=16GB) to run "
"the test.")
mem_free = -1
else:
free_bytes_gb = free_bytes / 1e9
mem_free_gb = mem_free / 1e9
msg = f'{free_bytes_gb} GB memory required, but {mem_free_gb} GB available'
return msg if mem_free < free_bytes else None
def _parse_size(size_str):
"""Convert memory size strings ('12 GB' etc.) to float"""
suffixes = {'': 1, 'b': 1,
'k': 1000, 'm': 1000**2, 'g': 1000**3, 't': 1000**4,
'kb': 1000, 'mb': 1000**2, 'gb': 1000**3, 'tb': 1000**4,
'kib': 1024, 'mib': 1024**2, 'gib': 1024**3, 'tib': 1024**4}
pipe_suffixes = "|".join(suffixes.keys())
size_re = re.compile(fr'^\s*(\d+|\d+\.\d+)\s*({pipe_suffixes})\s*$', re.I)
m = size_re.match(size_str.lower())
if not m or m.group(2) not in suffixes:
raise ValueError(f'value {size_str!r} not a valid size')
return int(float(m.group(1)) * suffixes[m.group(2)])
def _get_mem_available():
"""Return available memory in bytes, or None if unknown."""
try:
import psutil
return psutil.virtual_memory().available
except (ImportError, AttributeError):
pass
if sys.platform.startswith('linux'):
info = {}
with open('/proc/meminfo') as f:
for line in f:
p = line.split()
info[p[0].strip(':').lower()] = int(p[1]) * 1024
if 'memavailable' in info:
# Linux >= 3.14
return info['memavailable']
else:
return info['memfree'] + info['cached']
return None
def _no_tracing(func):
"""
Decorator to temporarily turn off tracing for the duration of a test.
Needed in tests that check refcounting, otherwise the tracing itself
influences the refcounts
"""
if not hasattr(sys, 'gettrace'):
return func
else:
@wraps(func)
def wrapper(*args, **kwargs):
original_trace = sys.gettrace()
try:
sys.settrace(None)
return func(*args, **kwargs)
finally:
sys.settrace(original_trace)
return wrapper
def _get_glibc_version():
try:
ver = os.confstr('CS_GNU_LIBC_VERSION').rsplit(' ')[1]
except Exception:
ver = '0.0'
return ver
_glibcver = _get_glibc_version()
_glibc_older_than = lambda x: (_glibcver != '0.0' and _glibcver < x)
def run_threaded(func, max_workers=8, pass_count=False,
pass_barrier=False, outer_iterations=1,
prepare_args=None):
"""Runs a function many times in parallel"""
for _ in range(outer_iterations):
with (concurrent.futures.ThreadPoolExecutor(max_workers=max_workers)
as tpe):
if prepare_args is None:
args = []
else:
args = prepare_args()
if pass_barrier:
barrier = threading.Barrier(max_workers)
args.append(barrier)
if pass_count:
all_args = [(func, i, *args) for i in range(max_workers)]
else:
all_args = [(func, *args) for i in range(max_workers)]
try:
futures = []
for arg in all_args:
futures.append(tpe.submit(*arg))
except RuntimeError as e:
import pytest
pytest.skip(f"Spawning {max_workers} threads failed with "
f"error {e!r} (likely due to resource limits on the "
"system running the tests)")
finally:
if len(futures) < max_workers and pass_barrier:
barrier.abort()
for f in futures:
f.result()
| suppress_warnings |
python | numba__numba | numba/core/typing/arraydecl.py | {
"start": 26287,
"end": 31495
} | class ____(ArrayAttribute):
key = types.NestedArray
def _expand_integer(ty):
"""
If *ty* is an integer, expand it to a machine int (like Numpy).
"""
if isinstance(ty, types.Integer):
if ty.signed:
return max(types.intp, ty)
else:
return max(types.uintp, ty)
elif isinstance(ty, types.Boolean):
return types.intp
else:
return ty
def generic_homog(self, args, kws):
if args:
raise NumbaAssertionError("args not supported")
if kws:
raise NumbaAssertionError("kws not supported")
return signature(self.this.dtype, recvr=self.this)
def generic_expand(self, args, kws):
assert not args
assert not kws
return signature(_expand_integer(self.this.dtype), recvr=self.this)
def sum_expand(self, args, kws):
"""
sum can be called with or without an axis parameter, and with or without
a dtype parameter
"""
pysig = None
if 'axis' in kws and 'dtype' not in kws:
def sum_stub(axis):
pass
pysig = utils.pysignature(sum_stub)
# rewrite args
args = list(args) + [kws['axis']]
elif 'dtype' in kws and 'axis' not in kws:
def sum_stub(dtype):
pass
pysig = utils.pysignature(sum_stub)
# rewrite args
args = list(args) + [kws['dtype']]
elif 'dtype' in kws and 'axis' in kws:
def sum_stub(axis, dtype):
pass
pysig = utils.pysignature(sum_stub)
# rewrite args
args = list(args) + [kws['axis'], kws['dtype']]
args_len = len(args)
assert args_len <= 2
if args_len == 0:
# No axis or dtype parameter so the return type of the summation is a scalar
# of the type of the array.
out = signature(_expand_integer(self.this.dtype), *args,
recvr=self.this)
elif args_len == 1 and 'dtype' not in kws:
# There is an axis parameter, either arg or kwarg
if self.this.ndim == 1:
# 1d reduces to a scalar
return_type = _expand_integer(self.this.dtype)
else:
# the return type of this summation is an array of dimension one
# less than the input array.
return_type = types.Array(dtype=_expand_integer(self.this.dtype),
ndim=self.this.ndim-1, layout='C')
out = signature(return_type, *args, recvr=self.this)
elif args_len == 1 and 'dtype' in kws:
# No axis parameter so the return type of the summation is a scalar
# of the dtype parameter.
from .npydecl import parse_dtype
dtype, = args
dtype = parse_dtype(dtype)
out = signature(dtype, *args, recvr=self.this)
elif args_len == 2:
# There is an axis and dtype parameter, either arg or kwarg
from .npydecl import parse_dtype
dtype = parse_dtype(args[1])
return_type = dtype
if self.this.ndim != 1:
# 1d reduces to a scalar, 2d and above reduce dim by 1
# the return type of this summation is an array of dimension one
# less than the input array.
return_type = types.Array(dtype=return_type,
ndim=self.this.ndim-1, layout='C')
out = signature(return_type, *args, recvr=self.this)
else:
pass
return out.replace(pysig=pysig)
def generic_expand_cumulative(self, args, kws):
if args:
raise NumbaAssertionError("args unsupported")
if kws:
raise NumbaAssertionError("kwargs unsupported")
assert isinstance(self.this, types.Array)
return_type = types.Array(dtype=_expand_integer(self.this.dtype),
ndim=1, layout='C')
return signature(return_type, recvr=self.this)
def generic_hetero_real(self, args, kws):
assert not args
assert not kws
if isinstance(self.this.dtype, (types.Integer, types.Boolean)):
return signature(types.float64, recvr=self.this)
return signature(self.this.dtype, recvr=self.this)
def generic_hetero_always_real(self, args, kws):
assert not args
assert not kws
if isinstance(self.this.dtype, (types.Integer, types.Boolean)):
return signature(types.float64, recvr=self.this)
if isinstance(self.this.dtype, types.Complex):
return signature(self.this.dtype.underlying_float, recvr=self.this)
return signature(self.this.dtype, recvr=self.this)
def generic_index(self, args, kws):
assert not args
assert not kws
return signature(types.intp, recvr=self.this)
def install_array_method(name, generic, prefer_literal=True):
my_attr = {"key": "array." + name, "generic": generic,
"prefer_literal": prefer_literal}
temp_class = type("Array_" + name, (AbstractTemplate,), my_attr)
def array_attribute_attachment(self, ary):
return types.BoundFunction(temp_class, ary)
setattr(ArrayAttribute, "resolve_" + name, array_attribute_attachment)
# Functions that return a machine-width type, to avoid overflows
install_array_method("sum", sum_expand, prefer_literal=True)
@infer_global(operator.eq)
| NestedArrayAttribute |
python | modin-project__modin | modin/core/computation/scope.py | {
"start": 3509,
"end": 11172
} | class ____:
"""
Object to hold scope, with a few bells to deal with some custom syntax
and contexts added by pandas.
Parameters
----------
level : int
global_dict : dict or None, optional, default None
local_dict : dict or Scope or None, optional, default None
resolvers : list-like or None, optional, default None
target : object
Attributes
----------
level : int
scope : DeepChainMap
target : object
temps : dict
"""
__slots__ = ["level", "scope", "target", "resolvers", "temps"]
level: int
scope: DeepChainMap
resolvers: DeepChainMap
temps: dict
def __init__(
self, level: int, global_dict=None, local_dict=None, resolvers=(), target=None
) -> None:
self.level = level + 1
# shallow copy because we don't want to keep filling this up with what
# was there before if there are multiple calls to Scope/_ensure_scope
self.scope = DeepChainMap(DEFAULT_GLOBALS.copy())
self.target = target
if isinstance(local_dict, Scope):
self.scope.update(local_dict.scope)
if local_dict.target is not None:
self.target = local_dict.target
self._update(local_dict.level)
frame = sys._getframe(self.level)
try:
# shallow copy here because we don't want to replace what's in
# scope when we align terms (alignment accesses the underlying
# numpy array of pandas objects)
scope_global = self.scope.new_child(
(global_dict if global_dict is not None else frame.f_globals).copy()
)
self.scope = DeepChainMap(scope_global)
if not isinstance(local_dict, Scope):
scope_local = self.scope.new_child(
(local_dict if local_dict is not None else frame.f_locals).copy()
)
self.scope = DeepChainMap(scope_local)
finally:
del frame
# assumes that resolvers are going from outermost scope to inner
if isinstance(local_dict, Scope):
resolvers += tuple(local_dict.resolvers.maps)
self.resolvers = DeepChainMap(*resolvers)
self.temps = {}
def __repr__(self) -> str:
scope_keys = _get_pretty_string(list(self.scope.keys()))
res_keys = _get_pretty_string(list(self.resolvers.keys()))
return f"{type(self).__name__}(scope={scope_keys}, resolvers={res_keys})"
@property
def has_resolvers(self) -> bool:
"""
Return whether we have any extra scope.
For example, DataFrames pass Their columns as resolvers during calls to
``DataFrame.eval()`` and ``DataFrame.query()``.
Returns
-------
hr : bool
"""
return bool(len(self.resolvers))
def resolve(self, key: str, is_local: bool):
"""
Resolve a variable name in a possibly local context.
Parameters
----------
key : str
A variable name
is_local : bool
Flag indicating whether the variable is local or not (prefixed with
the '@' symbol)
Returns
-------
value : object
The value of a particular variable
"""
try:
# only look for locals in outer scope
if is_local:
return self.scope[key]
# not a local variable so check in resolvers if we have them
if self.has_resolvers:
return self.resolvers[key]
# if we're here that means that we have no locals and we also have
# no resolvers
assert not is_local and not self.has_resolvers
return self.scope[key]
except KeyError:
try:
# last ditch effort we look in temporaries
# these are created when parsing indexing expressions
# e.g., df[df > 0]
return self.temps[key]
except KeyError as err:
raise UndefinedVariableError(key, is_local) from err
def swapkey(self, old_key: str, new_key: str, new_value=None) -> None:
"""
Replace a variable name, with a potentially new value.
Parameters
----------
old_key : str
Current variable name to replace
new_key : str
New variable name to replace `old_key` with
new_value : object
Value to be replaced along with the possible renaming
"""
if self.has_resolvers:
maps = self.resolvers.maps + self.scope.maps
else:
maps = self.scope.maps
maps.append(self.temps)
for mapping in maps:
if old_key in mapping:
mapping[new_key] = new_value
return
def _get_vars(self, stack, scopes: list[str]) -> None:
"""
Get specifically scoped variables from a list of stack frames.
Parameters
----------
stack : list
A list of stack frames as returned by ``inspect.stack()``
scopes : sequence of strings
A sequence containing valid stack frame attribute names that
evaluate to a dictionary. For example, ('locals', 'globals')
"""
variables = itertools.product(scopes, stack)
for scope, (frame, _, _, _, _, _) in variables:
try:
d = getattr(frame, f"f_{scope}")
self.scope = DeepChainMap(self.scope.new_child(d))
finally:
# won't remove it, but DECREF it
# in Py3 this probably isn't necessary since frame won't be
# scope after the loop
del frame
def _update(self, level: int) -> None:
"""
Update the current scope by going back `level` levels.
Parameters
----------
level : int
"""
sl = level + 1
# add sl frames to the scope starting with the
# most distant and overwriting with more current
# makes sure that we can capture variable scope
stack = inspect.stack()
try:
self._get_vars(stack[:sl], scopes=["locals"])
finally:
# explcitly delete the stack according to the advice here:
# https://docs.python.org/3/library/inspect.html#inspect.Traceback
del stack[:], stack
def add_tmp(self, value) -> str:
"""
Add a temporary variable to the scope.
Parameters
----------
value : object
An arbitrary object to be assigned to a temporary variable.
Returns
-------
str
The name of the temporary variable created.
"""
name = f"{type(value).__name__}_{self.ntemps}_{_raw_hex_id(self)}"
# add to inner most scope
assert name not in self.temps
self.temps[name] = value
assert name in self.temps
# only increment if the variable gets put in the scope
return name
@property
def ntemps(self) -> int:
"""The number of temporary variables in this scope"""
return len(self.temps)
@property
def full_scope(self) -> DeepChainMap:
"""
Return the full scope for use with passing to engines transparently
as a mapping.
Returns
-------
vars : DeepChainMap
All variables in this scope.
"""
maps = [self.temps] + self.resolvers.maps + self.scope.maps
return DeepChainMap(*maps)
| Scope |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1589465,
"end": 1589851
} | class ____(sgqlc.types.Union):
"""Types of memberships that can be restored for an Organization
member.
"""
__schema__ = github_schema
__types__ = (
OrgRestoreMemberMembershipOrganizationAuditEntryData,
OrgRestoreMemberMembershipRepositoryAuditEntryData,
OrgRestoreMemberMembershipTeamAuditEntryData,
)
| OrgRestoreMemberAuditEntryMembership |
python | eventlet__eventlet | eventlet/green/zmq.py | {
"start": 3863,
"end": 7058
} | class ____(__zmq__.Context):
"""Subclass of :class:`zmq.Context`
"""
def socket(self, socket_type):
"""Overridden method to ensure that the green version of socket is used
Behaves the same as :meth:`zmq.Context.socket`, but ensures
that a :class:`Socket` with all of its send and recv methods set to be
non-blocking is returned
"""
if self.closed:
raise ZMQError(ENOTSUP)
return Socket(self, socket_type)
def _wraps(source_fn):
"""A decorator that copies the __name__ and __doc__ from the given
function
"""
def wrapper(dest_fn):
dest_fn.__name__ = source_fn.__name__
dest_fn.__doc__ = source_fn.__doc__
return dest_fn
return wrapper
# Implementation notes: Each socket in 0mq contains a pipe that the
# background IO threads use to communicate with the socket. These
# events are important because they tell the socket when it is able to
# send and when it has messages waiting to be received. The read end
# of the events pipe is the same FD that getsockopt(zmq.FD) returns.
#
# Events are read from the socket's event pipe only on the thread that
# the 0mq context is associated with, which is the native thread the
# greenthreads are running on, and the only operations that cause the
# events to be read and processed are send(), recv() and
# getsockopt(zmq.EVENTS). This means that after doing any of these
# three operations, the ability of the socket to send or receive a
# message without blocking may have changed, but after the events are
# read the FD is no longer readable so the hub may not signal our
# listener.
#
# If we understand that after calling send() a message might be ready
# to be received and that after calling recv() a message might be able
# to be sent, what should we do next? There are two approaches:
#
# 1. Always wake the other thread if there is one waiting. This
# wakeup may be spurious because the socket might not actually be
# ready for a send() or recv(). However, if a thread is in a
# tight-loop successfully calling send() or recv() then the wakeups
# are naturally batched and there's very little cost added to each
# send/recv call.
#
# or
#
# 2. Call getsockopt(zmq.EVENTS) and explicitly check if the other
# thread should be woken up. This avoids spurious wake-ups but may
# add overhead because getsockopt will cause all events to be
# processed, whereas send and recv throttle processing
# events. Admittedly, all of the events will need to be processed
# eventually, but it is likely faster to batch the processing.
#
# Which approach is better? I have no idea.
#
# TODO:
# - Support MessageTrackers and make MessageTracker.wait green
_Socket = __zmq__.Socket
_Socket_recv = _Socket.recv
_Socket_send = _Socket.send
_Socket_send_multipart = _Socket.send_multipart
_Socket_recv_multipart = _Socket.recv_multipart
_Socket_send_string = _Socket.send_string
_Socket_recv_string = _Socket.recv_string
_Socket_send_pyobj = _Socket.send_pyobj
_Socket_recv_pyobj = _Socket.recv_pyobj
_Socket_send_json = _Socket.send_json
_Socket_recv_json = _Socket.recv_json
_Socket_getsockopt = _Socket.getsockopt
| Context |
python | anthropics__anthropic-sdk-python | src/anthropic/resources/beta/skills/skills.py | {
"start": 1501,
"end": 11897
} | class ____(SyncAPIResource):
@cached_property
def versions(self) -> Versions:
return Versions(self._client)
@cached_property
def with_raw_response(self) -> SkillsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#accessing-raw-response-data-eg-headers
"""
return SkillsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> SkillsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#with_streaming_response
"""
return SkillsWithStreamingResponse(self)
def create(
self,
*,
display_title: Optional[str] | Omit = omit,
files: Optional[SequenceNotStr[FileTypes]] | Omit = omit,
betas: List[AnthropicBetaParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SkillCreateResponse:
"""
Create Skill
Args:
display_title: Display title for the skill.
This is a human-readable label that is not included in the prompt sent to the
model.
files: Files to upload for the skill.
All files must be in the same top-level directory and must include a SKILL.md
file at the root of that directory.
betas: Optional header to specify the beta version(s) you want to use.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
extra_headers = {
**strip_not_given(
{
"anthropic-beta": ",".join(chain((str(e) for e in betas), ["skills-2025-10-02"]))
if is_given(betas)
else not_given
}
),
**(extra_headers or {}),
}
extra_headers = {"anthropic-beta": "skills-2025-10-02", **(extra_headers or {})}
body = deepcopy_minimal(
{
"display_title": display_title,
"files": files,
}
)
extracted_files = extract_files(cast(Mapping[str, object], body), paths=[["files", "<array>"]])
# It should be noted that the actual Content-Type header that will be
# sent to the server will contain a `boundary` parameter, e.g.
# multipart/form-data; boundary=---abc--
extra_headers["Content-Type"] = "multipart/form-data"
return self._post(
"/v1/skills?beta=true",
body=maybe_transform(body, skill_create_params.SkillCreateParams),
files=extracted_files,
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=SkillCreateResponse,
)
def retrieve(
self,
skill_id: str,
*,
betas: List[AnthropicBetaParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SkillRetrieveResponse:
"""
Get Skill
Args:
skill_id: Unique identifier for the skill.
The format and length of IDs may change over time.
betas: Optional header to specify the beta version(s) you want to use.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not skill_id:
raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
extra_headers = {
**strip_not_given(
{
"anthropic-beta": ",".join(chain((str(e) for e in betas), ["skills-2025-10-02"]))
if is_given(betas)
else not_given
}
),
**(extra_headers or {}),
}
extra_headers = {"anthropic-beta": "skills-2025-10-02", **(extra_headers or {})}
return self._get(
f"/v1/skills/{skill_id}?beta=true",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=SkillRetrieveResponse,
)
def list(
self,
*,
limit: int | Omit = omit,
page: Optional[str] | Omit = omit,
source: Optional[str] | Omit = omit,
betas: List[AnthropicBetaParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SyncPageCursor[SkillListResponse]:
"""
List Skills
Args:
limit: Number of results to return per page.
Maximum value is 100. Defaults to 20.
page: Pagination token for fetching a specific page of results.
Pass the value from a previous response's `next_page` field to get the next page
of results.
source: Filter skills by source.
If provided, only skills from the specified source will be returned:
- `"custom"`: only return user-created skills
- `"anthropic"`: only return Anthropic-created skills
betas: Optional header to specify the beta version(s) you want to use.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
extra_headers = {
**strip_not_given(
{
"anthropic-beta": ",".join(chain((str(e) for e in betas), ["skills-2025-10-02"]))
if is_given(betas)
else not_given
}
),
**(extra_headers or {}),
}
extra_headers = {"anthropic-beta": "skills-2025-10-02", **(extra_headers or {})}
return self._get_api_list(
"/v1/skills?beta=true",
page=SyncPageCursor[SkillListResponse],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"limit": limit,
"page": page,
"source": source,
},
skill_list_params.SkillListParams,
),
),
model=SkillListResponse,
)
def delete(
self,
skill_id: str,
*,
betas: List[AnthropicBetaParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SkillDeleteResponse:
"""
Delete Skill
Args:
skill_id: Unique identifier for the skill.
The format and length of IDs may change over time.
betas: Optional header to specify the beta version(s) you want to use.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not skill_id:
raise ValueError(f"Expected a non-empty value for `skill_id` but received {skill_id!r}")
extra_headers = {
**strip_not_given(
{
"anthropic-beta": ",".join(chain((str(e) for e in betas), ["skills-2025-10-02"]))
if is_given(betas)
else not_given
}
),
**(extra_headers or {}),
}
extra_headers = {"anthropic-beta": "skills-2025-10-02", **(extra_headers or {})}
return self._delete(
f"/v1/skills/{skill_id}?beta=true",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=SkillDeleteResponse,
)
| Skills |
python | wandb__wandb | wandb/apis/public/runs.py | {
"start": 5627,
"end": 16474
} | class ____(SizedPaginator["Run"]):
"""A lazy iterator of `Run` objects associated with a project and optional filter.
Runs are retrieved in pages from the W&B server as needed.
This is generally used indirectly using the `Api.runs` namespace.
Args:
client: (`wandb.apis.public.RetryingClient`) The API client to use
for requests.
entity: (str) The entity (username or team) that owns the project.
project: (str) The name of the project to fetch runs from.
filters: (Optional[Dict[str, Any]]) A dictionary of filters to apply
to the runs query.
order: (str) Order can be `created_at`, `heartbeat_at`, `config.*.value`, or `summary_metrics.*`.
If you prepend order with a + order is ascending (default).
If you prepend order with a - order is descending.
The default order is run.created_at from oldest to newest.
per_page: (int) The number of runs to fetch per request (default is 50).
include_sweeps: (bool) Whether to include sweep information in the
runs. Defaults to True.
Examples:
```python
from wandb.apis.public.runs import Runs
from wandb.apis.public import Api
# Get all runs from a project that satisfy the filters
filters = {"state": "finished", "config.optimizer": "adam"}
runs = Api().runs(
client=api.client,
entity="entity",
project="project_name",
filters=filters,
)
# Iterate over runs and print details
for run in runs:
print(f"Run name: {run.name}")
print(f"Run ID: {run.id}")
print(f"Run URL: {run.url}")
print(f"Run state: {run.state}")
print(f"Run config: {run.config}")
print(f"Run summary: {run.summary}")
print(f"Run history (samples=5): {run.history(samples=5)}")
print("----------")
# Get histories for all runs with specific metrics
histories_df = runs.histories(
samples=100, # Number of samples per run
keys=["loss", "accuracy"], # Metrics to fetch
x_axis="_step", # X-axis metric
format="pandas", # Return as pandas DataFrame
)
```
"""
def __init__(
self,
client: RetryingClient,
entity: str,
project: str,
filters: dict[str, Any] | None = None,
order: str = "+created_at",
per_page: int = 50,
include_sweeps: bool = True,
lazy: bool = True,
api: public.Api | None = None,
):
if not order:
order = "+created_at"
self.QUERY = _create_runs_query(
lazy=lazy,
with_internal_id=_server_provides_internal_id_for_project(client),
with_project_id=_server_provides_project_id_for_run(client),
)
self.entity = entity
self.project = project
self._project_internal_id = None
self.filters = filters or {}
self.order = order
self._sweeps = {}
self._include_sweeps = include_sweeps
self._lazy = lazy
self._api = api
variables = {
"project": self.project,
"entity": self.entity,
"order": self.order,
"filters": json.dumps(self.filters),
}
super().__init__(client, variables, per_page)
@property
def _length(self):
"""Returns the total number of runs.
<!-- lazydoc-ignore: internal -->
"""
if not self.last_response:
self._load_page()
return self.last_response["project"]["runCount"]
@property
def more(self) -> bool:
"""Returns whether there are more runs to fetch.
<!-- lazydoc-ignore: internal -->
"""
if self.last_response:
return bool(
self.last_response["project"]["runs"]["pageInfo"]["hasNextPage"]
)
else:
return True
@property
def cursor(self):
"""Returns the cursor position for pagination of runs results.
<!-- lazydoc-ignore: internal -->
"""
if self.last_response:
return self.last_response["project"]["runs"]["edges"][-1]["cursor"]
else:
return None
def convert_objects(self):
"""Converts GraphQL edges to Runs objects.
<!-- lazydoc-ignore: internal -->
"""
objs = []
if self.last_response is None or self.last_response.get("project") is None:
raise ValueError("Could not find project {}".format(self.project))
for run_response in self.last_response["project"]["runs"]["edges"]:
run = Run(
self.client,
self.entity,
self.project,
run_response["node"]["name"],
run_response["node"],
include_sweeps=self._include_sweeps,
lazy=self._lazy,
api=self._api,
)
objs.append(run)
if self._include_sweeps and run.sweep_name:
if run.sweep_name in self._sweeps:
sweep = self._sweeps[run.sweep_name]
else:
sweep = public.Sweep.get(
self.client,
self.entity,
self.project,
run.sweep_name,
withRuns=False,
)
self._sweeps[run.sweep_name] = sweep
if sweep is None:
continue
run.sweep = sweep
return objs
@normalize_exceptions
def histories(
self,
samples: int = 500,
keys: list[str] | None = None,
x_axis: str = "_step",
format: Literal["default", "pandas", "polars"] = "default",
stream: Literal["default", "system"] = "default",
):
"""Return sampled history metrics for all runs that fit the filters conditions.
Args:
samples: The number of samples to return per run
keys: Only return metrics for specific keys
x_axis: Use this metric as the xAxis defaults to _step
format: Format to return data in, options are "default", "pandas",
"polars"
stream: "default" for metrics, "system" for machine metrics
Returns:
pandas.DataFrame: If `format="pandas"`, returns a `pandas.DataFrame`
of history metrics.
polars.DataFrame: If `format="polars"`, returns a `polars.DataFrame`
of history metrics.
list of dicts: If `format="default"`, returns a list of dicts
containing history metrics with a `run_id` key.
"""
if format not in ("default", "pandas", "polars"):
raise ValueError(
f"Invalid format: {format}. Must be one of 'default', 'pandas', 'polars'"
)
histories = []
if format == "default":
for run in self:
history_data = run.history(
samples=samples,
keys=keys,
x_axis=x_axis,
pandas=False,
stream=stream,
)
if not history_data:
continue
for entry in history_data:
entry["run_id"] = run.id
histories.extend(history_data)
return histories
if format == "pandas":
pd = util.get_module(
"pandas", required="Exporting pandas DataFrame requires pandas"
)
for run in self:
history_data = run.history(
samples=samples,
keys=keys,
x_axis=x_axis,
pandas=False,
stream=stream,
)
if not history_data:
continue
df = pd.DataFrame.from_records(history_data)
df["run_id"] = run.id
histories.append(df)
if not histories:
return pd.DataFrame()
combined_df = pd.concat(histories)
combined_df.reset_index(drop=True, inplace=True)
# sort columns for consistency
combined_df = combined_df[(sorted(combined_df.columns))]
return combined_df
if format == "polars":
pl = util.get_module(
"polars", required="Exporting polars DataFrame requires polars"
)
for run in self:
history_data = run.history(
samples=samples,
keys=keys,
x_axis=x_axis,
pandas=False,
stream=stream,
)
if not history_data:
continue
df = pl.from_records(history_data)
df = df.with_columns(pl.lit(run.id).alias("run_id"))
histories.append(df)
if not histories:
return pl.DataFrame()
combined_df = pl.concat(histories, how="vertical")
# sort columns for consistency
combined_df = combined_df.select(sorted(combined_df.columns))
return combined_df
def __repr__(self):
return f"<Runs {self.entity}/{self.project}>"
def upgrade_to_full(self):
"""Upgrade this Runs collection from lazy to full mode.
This switches to fetching full run data and
upgrades any already-loaded Run objects to have full data.
Uses parallel loading for better performance when upgrading multiple runs.
"""
if not self._lazy:
return # Already in full mode
# Switch to full mode
self._lazy = False
# Regenerate query with full fragment
self.QUERY = _create_runs_query(
lazy=False,
with_internal_id=_server_provides_internal_id_for_project(self.client),
with_project_id=_server_provides_project_id_for_run(self.client),
)
# Upgrade any existing runs that have been loaded - use parallel loading for performance
lazy_runs = [run for run in self.objects if run._lazy]
if lazy_runs:
from concurrent.futures import ThreadPoolExecutor
# Limit workers to avoid overwhelming the server
max_workers = min(len(lazy_runs), 10)
with ThreadPoolExecutor(max_workers=max_workers) as executor:
futures = [executor.submit(run.load_full_data) for run in lazy_runs]
# Wait for all to complete
for future in futures:
future.result()
| Runs |
python | pytorch__pytorch | torch/optim/lbfgs.py | {
"start": 8177,
"end": 20094
} | class ____(Optimizer):
"""Implements L-BFGS algorithm.
Heavily inspired by `minFunc
<https://www.cs.ubc.ca/~schmidtm/Software/minFunc.html>`_.
.. warning::
This optimizer doesn't support per-parameter options and parameter
groups (there can be only one).
.. warning::
Right now all parameters have to be on a single device. This will be
improved in the future.
.. note::
This is a very memory intensive optimizer (it requires additional
``param_bytes * (history_size + 1)`` bytes). If it doesn't fit in memory
try reducing the history size, or use a different algorithm.
Args:
params (iterable): iterable of parameters to optimize. Parameters must be real.
lr (float, optional): learning rate (default: 1)
max_iter (int, optional): maximal number of iterations per optimization step
(default: 20)
max_eval (int, optional): maximal number of function evaluations per optimization
step (default: max_iter * 1.25).
tolerance_grad (float, optional): termination tolerance on first order optimality
(default: 1e-7).
tolerance_change (float, optional): termination tolerance on function
value/parameter changes (default: 1e-9).
history_size (int, optional): update history size (default: 100).
line_search_fn (str, optional): either 'strong_wolfe' or None (default: None).
"""
def __init__(
self,
params: ParamsT,
lr: Union[float, Tensor] = 1,
max_iter: int = 20,
max_eval: Optional[int] = None,
tolerance_grad: float = 1e-7,
tolerance_change: float = 1e-9,
history_size: int = 100,
line_search_fn: Optional[str] = None,
) -> None:
if isinstance(lr, Tensor) and lr.numel() != 1:
raise ValueError("Tensor lr must be 1-element")
if not 0.0 <= lr:
raise ValueError(f"Invalid learning rate: {lr}")
if max_eval is None:
max_eval = max_iter * 5 // 4
defaults = {
"lr": lr,
"max_iter": max_iter,
"max_eval": max_eval,
"tolerance_grad": tolerance_grad,
"tolerance_change": tolerance_change,
"history_size": history_size,
"line_search_fn": line_search_fn,
}
super().__init__(params, defaults)
if len(self.param_groups) != 1:
raise ValueError(
"LBFGS doesn't support per-parameter options (parameter groups)"
)
self._params = self.param_groups[0]["params"]
self._numel_cache = None
def _numel(self):
if self._numel_cache is None:
# pyrefly: ignore [bad-assignment]
self._numel_cache = sum(
2 * p.numel() if torch.is_complex(p) else p.numel()
for p in self._params
)
return self._numel_cache
def _gather_flat_grad(self):
views = []
for p in self._params:
if p.grad is None:
view = p.new(p.numel()).zero_()
elif p.grad.is_sparse:
view = p.grad.to_dense().view(-1)
else:
view = p.grad.view(-1)
if torch.is_complex(view):
view = torch.view_as_real(view).view(-1)
views.append(view)
return torch.cat(views, 0)
def _add_grad(self, step_size, update) -> None:
offset = 0
for p in self._params:
if torch.is_complex(p):
p = torch.view_as_real(p)
numel = p.numel()
# view as to avoid deprecated pointwise semantics
p.add_(update[offset : offset + numel].view_as(p), alpha=step_size)
offset += numel
if offset != self._numel():
raise AssertionError(f"Expected offset {offset} to equal {self._numel()}")
def _clone_param(self):
return [p.clone(memory_format=torch.contiguous_format) for p in self._params]
def _set_param(self, params_data) -> None:
for p, pdata in zip(self._params, params_data, strict=True):
p.copy_(pdata)
def _directional_evaluate(self, closure, x, t, d):
self._add_grad(t, d)
loss = float(closure())
flat_grad = self._gather_flat_grad()
self._set_param(x)
return loss, flat_grad
@torch.no_grad()
def step(self, closure): # type: ignore[override]
"""Perform a single optimization step.
Args:
closure (Callable): A closure that reevaluates the model
and returns the loss.
"""
if len(self.param_groups) != 1:
raise AssertionError(
f"Expected exactly one param_group, but got {len(self.param_groups)}"
)
# Make sure the closure is always called with grad enabled
closure = torch.enable_grad()(closure)
group = self.param_groups[0]
lr = _to_scalar(group["lr"])
max_iter = group["max_iter"]
max_eval = group["max_eval"]
tolerance_grad = group["tolerance_grad"]
tolerance_change = group["tolerance_change"]
line_search_fn = group["line_search_fn"]
history_size = group["history_size"]
# NOTE: LBFGS has only global state, but we register it as state for
# the first param, because this helps with casting in load_state_dict
state = self.state[self._params[0]]
state.setdefault("func_evals", 0)
state.setdefault("n_iter", 0)
# evaluate initial f(x) and df/dx
orig_loss = closure()
loss = float(orig_loss)
current_evals = 1
state["func_evals"] += 1
flat_grad = self._gather_flat_grad()
opt_cond = flat_grad.abs().max() <= tolerance_grad
# optimal condition
if opt_cond:
return orig_loss
# tensors cached in state (for tracing)
d = state.get("d")
t = state.get("t")
old_dirs = state.get("old_dirs")
old_stps = state.get("old_stps")
ro = state.get("ro")
H_diag = state.get("H_diag")
prev_flat_grad = state.get("prev_flat_grad")
prev_loss = state.get("prev_loss")
n_iter = 0
# optimize for a max of max_iter iterations
while n_iter < max_iter:
# keep track of nb of iterations
n_iter += 1
state["n_iter"] += 1
############################################################
# compute gradient descent direction
############################################################
if state["n_iter"] == 1:
d = flat_grad.neg()
old_dirs = []
old_stps = []
ro = []
H_diag = 1
else:
# do lbfgs update (update memory)
y = flat_grad.sub(prev_flat_grad)
s = d.mul(t)
ys = y.dot(s) # y*s
if ys > 1e-10:
# updating memory
if len(old_dirs) == history_size:
# shift history by one (limited-memory)
old_dirs.pop(0)
old_stps.pop(0)
ro.pop(0)
# store new direction/step
old_dirs.append(y)
old_stps.append(s)
ro.append(1.0 / ys)
# update scale of initial Hessian approximation
H_diag = ys / y.dot(y) # (y*y)
# compute the approximate (L-BFGS) inverse Hessian
# multiplied by the gradient
num_old = len(old_dirs)
if "al" not in state:
state["al"] = [None] * history_size
al = state["al"]
# iteration in L-BFGS loop collapsed to use just one buffer
q = flat_grad.neg()
for i in range(num_old - 1, -1, -1):
al[i] = old_stps[i].dot(q) * ro[i]
q.add_(old_dirs[i], alpha=-al[i])
# multiply by initial Hessian
# r/d is the final direction
d = r = torch.mul(q, H_diag)
for i in range(num_old):
be_i = old_dirs[i].dot(r) * ro[i]
r.add_(old_stps[i], alpha=al[i] - be_i)
if prev_flat_grad is None:
prev_flat_grad = flat_grad.clone(memory_format=torch.contiguous_format)
else:
prev_flat_grad.copy_(flat_grad)
prev_loss = loss
############################################################
# compute step length
############################################################
# reset initial guess for step size
if state["n_iter"] == 1:
t = min(1.0, 1.0 / flat_grad.abs().sum()) * lr
else:
t = lr
# directional derivative
gtd = flat_grad.dot(d) # g * d
# directional derivative is below tolerance
if gtd > -tolerance_change:
break
# optional line search: user function
ls_func_evals = 0
if line_search_fn is not None:
# perform line search, using user function
if line_search_fn != "strong_wolfe":
raise RuntimeError("only 'strong_wolfe' is supported")
else:
x_init = self._clone_param()
def obj_func(x, t, d):
return self._directional_evaluate(closure, x, t, d)
loss, flat_grad, t, ls_func_evals = _strong_wolfe(
obj_func,
x_init,
t,
d,
loss,
flat_grad,
gtd,
max_ls=max_eval - current_evals,
)
self._add_grad(t, d)
opt_cond = flat_grad.abs().max() <= tolerance_grad
else:
# no line search, simply move with fixed-step
self._add_grad(t, d)
if n_iter != max_iter:
# re-evaluate function only if not in last iteration
# the reason we do this: in a stochastic setting,
# no use to re-evaluate that function here
with torch.enable_grad():
loss = closure()
loss = float(loss)
flat_grad = self._gather_flat_grad()
opt_cond = flat_grad.abs().max() <= tolerance_grad
ls_func_evals = 1
# update func eval
current_evals += ls_func_evals
state["func_evals"] += ls_func_evals
############################################################
# check conditions
############################################################
if n_iter == max_iter:
break
if current_evals >= max_eval:
break
# optimal condition
if opt_cond:
break
# lack of progress
if d.mul(t).abs().max() <= tolerance_change:
break
if abs(loss - prev_loss) < tolerance_change:
break
state["d"] = d
state["t"] = t
state["old_dirs"] = old_dirs
state["old_stps"] = old_stps
state["ro"] = ro
state["H_diag"] = H_diag
state["prev_flat_grad"] = prev_flat_grad
state["prev_loss"] = prev_loss
return orig_loss
| LBFGS |
python | econchick__interrogate | tests/functional/sample/partial.py | {
"start": 1533,
"end": 1613
} | class ____:
def method_bar(self):
class InnerBar:
pass
| Bar |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.