language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | python-openxml__python-docx | src/docx/oxml/simpletypes.py | {
"start": 2173,
"end": 2484
} | class ____(BaseSimpleType):
@classmethod
def convert_from_xml(cls, str_value: str) -> str:
return str_value
@classmethod
def convert_to_xml(cls, value: str) -> str:
return value
@classmethod
def validate(cls, value: str):
cls.validate_string(value)
| BaseStringType |
python | redis__redis-py | redis/commands/search/querystring.py | {
"start": 2607,
"end": 2879
} | class ____(Value):
def __init__(self, lon, lat, radius, unit="km"):
self.lon = lon
self.lat = lat
self.radius = radius
self.unit = unit
def to_string(self):
return f"[{self.lon} {self.lat} {self.radius} {self.unit}]"
| GeoValue |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/backfill.py | {
"start": 4238,
"end": 6721
} | class ____(graphene.Enum):
REQUESTED = "REQUESTED"
COMPLETED = "COMPLETED"
FAILED = "FAILED"
CANCELED = "CANCELED"
CANCELING = "CANCELING"
COMPLETED_SUCCESS = "COMPLETED_SUCCESS"
COMPLETED_FAILED = "COMPLETED_FAILED"
FAILING = "FAILING"
class Meta:
name = "BulkActionStatus"
def to_dagster_run_status(self) -> GrapheneRunStatus:
"""Maps bulk action status to a run status for use with the RunsFeedEntry interface."""
# the pyright ignores are required because GrapheneBulkActionStatus.STATUS and GrapheneRunStatus.STATUS
# are interpreted as a Literal string during static analysis, but it is actually an Enum value
if self.args[0] == GrapheneBulkActionStatus.REQUESTED.value: # pyright: ignore[reportAttributeAccessIssue]
return GrapheneRunStatus.STARTED # pyright: ignore[reportReturnType]
if self.args[0] == GrapheneBulkActionStatus.COMPLETED.value: # pyright: ignore[reportAttributeAccessIssue]
return GrapheneRunStatus.SUCCESS # pyright: ignore[reportReturnType]
if self.args[0] == GrapheneBulkActionStatus.COMPLETED_SUCCESS.value: # pyright: ignore[reportAttributeAccessIssue]
return GrapheneRunStatus.SUCCESS # pyright: ignore[reportReturnType]
if self.args[0] == GrapheneBulkActionStatus.COMPLETED_FAILED.value: # pyright: ignore[reportAttributeAccessIssue]
return GrapheneRunStatus.FAILURE # pyright: ignore[reportReturnType]
if self.args[0] == GrapheneBulkActionStatus.FAILED.value: # pyright: ignore[reportAttributeAccessIssue]
return GrapheneRunStatus.FAILURE # pyright: ignore[reportReturnType]
if self.args[0] == GrapheneBulkActionStatus.CANCELED.value: # pyright: ignore[reportAttributeAccessIssue]
return GrapheneRunStatus.CANCELED # pyright: ignore[reportReturnType]
if self.args[0] == GrapheneBulkActionStatus.CANCELING.value: # pyright: ignore[reportAttributeAccessIssue]
return GrapheneRunStatus.CANCELING # pyright: ignore[reportReturnType]
if self.args[0] == GrapheneBulkActionStatus.FAILING.value: # pyright: ignore[reportAttributeAccessIssue]
return GrapheneRunStatus.FAILURE # pyright: ignore[reportReturnType]
raise DagsterInvariantViolationError(
f"Unable to convert BulkActionStatus {self.args[0]} to a RunStatus. {self.args[0]} is an unknown status."
)
| GrapheneBulkActionStatus |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-nebius/llama_index/llms/nebius/base.py | {
"start": 153,
"end": 1249
} | class ____(OpenAILike):
"""
Nebius AI Studio LLM class.
Examples:
`pip install llama-index-llms-nebius`
```python
from llama_index.llms.nebius import NebiusLLM
# set api key in env or in llm
# import os
# os.environ["NEBIUS_API_KEY"] = "your api key"
llm = NebiusLLM(
model="mistralai/Mixtral-8x7B-Instruct-v0.1", api_key="your_api_key"
)
resp = llm.complete("Who is Paul Graham?")
print(resp)
```
"""
def __init__(
self,
model: str,
api_key: Optional[str] = None,
api_base: str = DEFAULT_API_BASE,
is_chat_model: bool = True,
**kwargs: Any,
) -> None:
api_key = api_key or os.environ.get("NEBIUS_API_KEY", None)
super().__init__(
model=model,
api_key=api_key,
api_base=api_base,
is_chat_model=is_chat_model,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "NebiusLLM"
| NebiusLLM |
python | pytorch__pytorch | test/test_mps.py | {
"start": 558186,
"end": 559389
} | class ____(TestCase):
def test_tensor_scalar_binops(self):
# Regression test for https://github.com/pytorch/pytorch/issues/119088
def to_cpu(x):
return x.cpu() if isinstance(x, torch.Tensor) else x
# Allocate tensors on mps
with torch.device("mps"):
inputs = [torch.rand(2, dtype=dtype) for dtype in [torch.float, torch.half, torch.cfloat]]
self.assertTrue(all(x.device.type == "mps" for x in inputs))
# Add scalars
inputs.extend([7, 3.14, 2 + 3j, torch.tensor(4 + 5j, dtype=torch.chalf)])
# Iterate over all permutations of types(int, float, complex, half) and ops (excluding div)
for x, y in itertools.product(inputs, inputs):
for op_name in ["__add__", "__sub__", "__mul__"]:
x_cpu, y_cpu = map(to_cpu, (x, y))
res = getattr(x, op_name)(y)
res_cpu = getattr(x_cpu, op_name)(y_cpu)
self.assertEqual(to_cpu(res), res_cpu, f"{op_name}({x}, {y}) produces different results {res} vs {res_cpu}")
# Copied from `TestCommon` in `test_ops.py`, just enough to duplicate the `test_numpy_ref` for MPS
@skipIfSlowGradcheckEnv
| TestComplex |
python | sympy__sympy | sympy/printing/codeprinter.py | {
"start": 1086,
"end": 1532
} | class ____(NotImplementedError):
"""
Raised if a _print_* method is missing in the Printer.
"""
pass
def _convert_python_lists(arg):
if isinstance(arg, list):
from sympy.codegen.abstract_nodes import List
return List(*(_convert_python_lists(e) for e in arg))
elif isinstance(arg, tuple):
return tuple(_convert_python_lists(e) for e in arg)
else:
return arg
| PrintMethodNotImplementedError |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-qdrant/unit_tests/test_destination.py | {
"start": 319,
"end": 3841
} | class ____(unittest.TestCase):
def setUp(self):
self.config = {
"processing": {"text_fields": ["str_col"], "metadata_fields": [], "chunk_size": 1000},
"embedding": {"mode": "openai", "openai_key": "mykey"},
"indexing": {
"url": "localhost:6333",
"auth_method": {
"mode": "no_auth",
},
"prefer_grpc": False,
"collection": "dummy-collection",
"distance_metric": "dot",
"text_field": "text",
},
}
self.config_model = ConfigModel.parse_obj(self.config)
self.logger = logging.getLogger("airbyte")
@patch("destination_qdrant.destination.QdrantIndexer")
@patch("destination_qdrant.destination.create_from_config")
def test_check(self, MockedEmbedder, MockedQdrantIndexer):
mock_embedder = Mock()
mock_indexer = Mock()
MockedEmbedder.return_value = mock_embedder
MockedQdrantIndexer.return_value = mock_indexer
mock_embedder.check.return_value = None
mock_indexer.check.return_value = None
destination = DestinationQdrant()
result = destination.check(self.logger, self.config)
self.assertEqual(result.status, Status.SUCCEEDED)
mock_embedder.check.assert_called_once()
mock_indexer.check.assert_called_once()
@patch("destination_qdrant.destination.QdrantIndexer")
@patch("destination_qdrant.destination.create_from_config")
def test_check_with_errors(self, MockedEmbedder, MockedQdrantIndexer):
mock_embedder = Mock()
mock_indexer = Mock()
MockedEmbedder.return_value = mock_embedder
MockedQdrantIndexer.return_value = mock_indexer
embedder_error_message = "Embedder Error"
indexer_error_message = "Indexer Error"
mock_embedder.check.return_value = embedder_error_message
mock_indexer.check.return_value = indexer_error_message
destination = DestinationQdrant()
result = destination.check(self.logger, self.config)
self.assertEqual(result.status, Status.FAILED)
self.assertEqual(result.message, f"{embedder_error_message}\n{indexer_error_message}")
mock_embedder.check.assert_called_once()
mock_indexer.check.assert_called_once()
@patch("destination_qdrant.destination.Writer")
@patch("destination_qdrant.destination.QdrantIndexer")
@patch("destination_qdrant.destination.create_from_config")
def test_write(self, MockedEmbedder, MockedQdrantIndexer, MockedWriter):
mock_embedder = Mock()
mock_indexer = Mock()
mock_writer = Mock()
MockedEmbedder.return_value = mock_embedder
MockedQdrantIndexer.return_value = mock_indexer
MockedWriter.return_value = mock_writer
mock_writer.write.return_value = []
configured_catalog = MagicMock()
input_messages = []
destination = DestinationQdrant()
list(destination.write(self.config, configured_catalog, input_messages))
MockedWriter.assert_called_once_with(self.config_model.processing, mock_indexer, mock_embedder, batch_size=256, omit_raw_text=False)
mock_writer.write.assert_called_once_with(configured_catalog, input_messages)
def test_spec(self):
destination = DestinationQdrant()
result = destination.spec()
self.assertIsInstance(result, ConnectorSpecification)
| TestDestinationQdrant |
python | numba__numba | numba/tests/test_unpickle_without_module.py | {
"start": 84,
"end": 1631
} | class ____(unittest.TestCase):
def test_loading_pickle_with_no_module(self):
"""Create a module that uses Numba, import a function from it.
Then delete the module and pickle the function. The function
should load from the pickle without a problem.
Note - This is a simplified version of how Numba might be used
on a distributed system using e.g. dask distributed. With the
pickle being sent to the worker but not the original module.
"""
# Source code for temporary module we will make
source = "\n".join(
[
"from numba import vectorize",
"@vectorize(['float64(float64)'])",
"def inc1(x):",
" return x + 1",
]
)
# Create a temporary directory and add it to path.
modname = "tmp_module"
with tempfile.TemporaryDirectory() as tmp_dir:
sys.path.append(tmp_dir)
# Create tmp_module.py in there with our source code above.
filename = Path(f"{tmp_dir}/{modname}.py")
f = open(filename, "a")
f.write(source)
f.close()
# Import the temporary module before file is deleted
from tmp_module import inc1
# Remove from imported libraries
del sys.modules[modname]
# Pickle function and assert that it loads correctly
pkl = pickle.dumps(inc1)
f = pickle.loads(pkl)
self.assertEqual(f(2), 3)
| TestUnpickleDeletedModule |
python | sympy__sympy | sympy/solvers/ode/systems.py | {
"start": 1559,
"end": 71467
} | class ____(NonlinearError):
"""Raised by linear_ode_to_matrix if the system is nonlinear"""
pass
def _simpsol(soleq):
lhs = soleq.lhs
sol = soleq.rhs
sol = powsimp(sol)
gens = list(sol.atoms(exp))
p = Poly(sol, *gens, expand=False)
gens = [factor_terms(g) for g in gens]
if not gens:
gens = p.gens
syms = [Symbol('C1'), Symbol('C2')]
terms = []
for coeff, monom in zip(p.coeffs(), p.monoms()):
coeff = piecewise_fold(coeff)
if isinstance(coeff, Piecewise):
coeff = Piecewise(*((ratsimp(coef).collect(syms), cond) for coef, cond in coeff.args))
else:
coeff = ratsimp(coeff).collect(syms)
monom = Mul(*(g ** i for g, i in zip(gens, monom)))
terms.append(coeff * monom)
return Eq(lhs, Add(*terms))
def _solsimp(e, t):
no_t, has_t = powsimp(expand_mul(e)).as_independent(t)
no_t = ratsimp(no_t)
has_t = has_t.replace(exp, lambda a: exp(factor_terms(a)))
return no_t + has_t
def simpsol(sol, wrt1, wrt2, doit=True):
"""Simplify solutions from dsolve_system."""
# The parameter sol is the solution as returned by dsolve (list of Eq).
#
# The parameters wrt1 and wrt2 are lists of symbols to be collected for
# with those in wrt1 being collected for first. This allows for collecting
# on any factors involving the independent variable before collecting on
# the integration constants or vice versa using e.g.:
#
# sol = simpsol(sol, [t], [C1, C2]) # t first, constants after
# sol = simpsol(sol, [C1, C2], [t]) # constants first, t after
#
# If doit=True (default) then simpsol will begin by evaluating any
# unevaluated integrals. Since many integrals will appear multiple times
# in the solutions this is done intelligently by computing each integral
# only once.
#
# The strategy is to first perform simple cancellation with factor_terms
# and then multiply out all brackets with expand_mul. This gives an Add
# with many terms.
#
# We split each term into two multiplicative factors dep and coeff where
# all factors that involve wrt1 are in dep and any constant factors are in
# coeff e.g.
# sqrt(2)*C1*exp(t) -> ( exp(t), sqrt(2)*C1 )
#
# The dep factors are simplified using powsimp to combine expanded
# exponential factors e.g.
# exp(a*t)*exp(b*t) -> exp(t*(a+b))
#
# We then collect coefficients for all terms having the same (simplified)
# dep. The coefficients are then simplified using together and ratsimp and
# lastly by recursively applying the same transformation to the
# coefficients to collect on wrt2.
#
# Finally the result is recombined into an Add and signsimp is used to
# normalise any minus signs.
def simprhs(rhs, rep, wrt1, wrt2):
"""Simplify the rhs of an ODE solution"""
if rep:
rhs = rhs.subs(rep)
rhs = factor_terms(rhs)
rhs = simp_coeff_dep(rhs, wrt1, wrt2)
rhs = signsimp(rhs)
return rhs
def simp_coeff_dep(expr, wrt1, wrt2=None):
"""Split rhs into terms, split terms into dep and coeff and collect on dep"""
add_dep_terms = lambda e: e.is_Add and e.has(*wrt1)
expandable = lambda e: e.is_Mul and any(map(add_dep_terms, e.args))
expand_func = lambda e: expand_mul(e, deep=False)
expand_mul_mod = lambda e: e.replace(expandable, expand_func)
terms = Add.make_args(expand_mul_mod(expr))
dc = {}
for term in terms:
coeff, dep = term.as_independent(*wrt1, as_Add=False)
# Collect together the coefficients for terms that have the same
# dependence on wrt1 (after dep is normalised using simpdep).
dep = simpdep(dep, wrt1)
# See if the dependence on t cancels out...
if dep is not S.One:
dep2 = factor_terms(dep)
if not dep2.has(*wrt1):
coeff *= dep2
dep = S.One
if dep not in dc:
dc[dep] = coeff
else:
dc[dep] += coeff
# Apply the method recursively to the coefficients but this time
# collecting on wrt2 rather than wrt2.
termpairs = ((simpcoeff(c, wrt2), d) for d, c in dc.items())
if wrt2 is not None:
termpairs = ((simp_coeff_dep(c, wrt2), d) for c, d in termpairs)
return Add(*(c * d for c, d in termpairs))
def simpdep(term, wrt1):
"""Normalise factors involving t with powsimp and recombine exp"""
def canonicalise(a):
# Using factor_terms here isn't quite right because it leads to things
# like exp(t*(1+t)) that we don't want. We do want to cancel factors
# and pull out a common denominator but ideally the numerator would be
# expressed as a standard form polynomial in t so we expand_mul
# and collect afterwards.
a = factor_terms(a)
num, den = a.as_numer_denom()
num = expand_mul(num)
num = collect(num, wrt1)
return num / den
term = powsimp(term)
rep = {e: exp(canonicalise(e.args[0])) for e in term.atoms(exp)}
term = term.subs(rep)
return term
def simpcoeff(coeff, wrt2):
"""Bring to a common fraction and cancel with ratsimp"""
coeff = together(coeff)
if coeff.is_polynomial():
# Calling ratsimp can be expensive. The main reason is to simplify
# sums of terms with irrational denominators so we limit ourselves
# to the case where the expression is polynomial in any symbols.
# Maybe there's a better approach...
coeff = ratsimp(radsimp(coeff))
# collect on secondary variables first and any remaining symbols after
if wrt2 is not None:
syms = list(wrt2) + list(ordered(coeff.free_symbols - set(wrt2)))
else:
syms = list(ordered(coeff.free_symbols))
coeff = collect(coeff, syms)
coeff = together(coeff)
return coeff
# There are often repeated integrals. Collect unique integrals and
# evaluate each once and then substitute into the final result to replace
# all occurrences in each of the solution equations.
if doit:
integrals = set().union(*(s.atoms(Integral) for s in sol))
rep = {i: factor_terms(i).doit() for i in integrals}
else:
rep = {}
sol = [Eq(s.lhs, simprhs(s.rhs, rep, wrt1, wrt2)) for s in sol]
return sol
def linodesolve_type(A, t, b=None):
r"""
Helper function that determines the type of the system of ODEs for solving with :obj:`sympy.solvers.ode.systems.linodesolve()`
Explanation
===========
This function takes in the coefficient matrix and/or the non-homogeneous term
and returns the type of the equation that can be solved by :obj:`sympy.solvers.ode.systems.linodesolve()`.
If the system is constant coefficient homogeneous, then "type1" is returned
If the system is constant coefficient non-homogeneous, then "type2" is returned
If the system is non-constant coefficient homogeneous, then "type3" is returned
If the system is non-constant coefficient non-homogeneous, then "type4" is returned
If the system has a non-constant coefficient matrix which can be factorized into constant
coefficient matrix, then "type5" or "type6" is returned for when the system is homogeneous or
non-homogeneous respectively.
Note that, if the system of ODEs is of "type3" or "type4", then along with the type,
the commutative antiderivative of the coefficient matrix is also returned.
If the system cannot be solved by :obj:`sympy.solvers.ode.systems.linodesolve()`, then
NotImplementedError is raised.
Parameters
==========
A : Matrix
Coefficient matrix of the system of ODEs
b : Matrix or None
Non-homogeneous term of the system. The default value is None.
If this argument is None, then the system is assumed to be homogeneous.
Examples
========
>>> from sympy import symbols, Matrix
>>> from sympy.solvers.ode.systems import linodesolve_type
>>> t = symbols("t")
>>> A = Matrix([[1, 1], [2, 3]])
>>> b = Matrix([t, 1])
>>> linodesolve_type(A, t)
{'antiderivative': None, 'type_of_equation': 'type1'}
>>> linodesolve_type(A, t, b=b)
{'antiderivative': None, 'type_of_equation': 'type2'}
>>> A_t = Matrix([[1, t], [-t, 1]])
>>> linodesolve_type(A_t, t)
{'antiderivative': Matrix([
[ t, t**2/2],
[-t**2/2, t]]), 'type_of_equation': 'type3'}
>>> linodesolve_type(A_t, t, b=b)
{'antiderivative': Matrix([
[ t, t**2/2],
[-t**2/2, t]]), 'type_of_equation': 'type4'}
>>> A_non_commutative = Matrix([[1, t], [t, -1]])
>>> linodesolve_type(A_non_commutative, t)
Traceback (most recent call last):
...
NotImplementedError:
The system does not have a commutative antiderivative, it cannot be
solved by linodesolve.
Returns
=======
Dict
Raises
======
NotImplementedError
When the coefficient matrix does not have a commutative antiderivative
See Also
========
linodesolve: Function for which linodesolve_type gets the information
"""
match = {}
is_non_constant = not _matrix_is_constant(A, t)
is_non_homogeneous = not (b is None or b.is_zero_matrix)
type = "type{}".format(int("{}{}".format(int(is_non_constant), int(is_non_homogeneous)), 2) + 1)
B = None
match.update({"type_of_equation": type, "antiderivative": B})
if is_non_constant:
B, is_commuting = _is_commutative_anti_derivative(A, t)
if not is_commuting:
raise NotImplementedError(filldedent('''
The system does not have a commutative antiderivative, it cannot be solved
by linodesolve.
'''))
match['antiderivative'] = B
match.update(_first_order_type5_6_subs(A, t, b=b))
return match
def _first_order_type5_6_subs(A, t, b=None):
match = {}
factor_terms = _factor_matrix(A, t)
is_homogeneous = b is None or b.is_zero_matrix
if factor_terms is not None:
t_ = Symbol("{}_".format(t))
F_t = integrate(factor_terms[0], t)
inverse = solveset(Eq(t_, F_t), t)
# Note: A simple way to check if a function is invertible
# or not.
if isinstance(inverse, FiniteSet) and not inverse.has(Piecewise)\
and len(inverse) == 1:
A = factor_terms[1]
if not is_homogeneous:
b = b / factor_terms[0]
b = b.subs(t, list(inverse)[0])
type = "type{}".format(5 + (not is_homogeneous))
match.update({'func_coeff': A, 'tau': F_t,
't_': t_, 'type_of_equation': type, 'rhs': b})
return match
def linear_ode_to_matrix(eqs, funcs, t, order):
r"""
Convert a linear system of ODEs to matrix form
Explanation
===========
Express a system of linear ordinary differential equations as a single
matrix differential equation [1]. For example the system $x' = x + y + 1$
and $y' = x - y$ can be represented as
.. math:: A_1 X' = A_0 X + b
where $A_1$ and $A_0$ are $2 \times 2$ matrices and $b$, $X$ and $X'$ are
$2 \times 1$ matrices with $X = [x, y]^T$.
Higher-order systems are represented with additional matrices e.g. a
second-order system would look like
.. math:: A_2 X'' = A_1 X' + A_0 X + b
Examples
========
>>> from sympy import Function, Symbol, Matrix, Eq
>>> from sympy.solvers.ode.systems import linear_ode_to_matrix
>>> t = Symbol('t')
>>> x = Function('x')
>>> y = Function('y')
We can create a system of linear ODEs like
>>> eqs = [
... Eq(x(t).diff(t), x(t) + y(t) + 1),
... Eq(y(t).diff(t), x(t) - y(t)),
... ]
>>> funcs = [x(t), y(t)]
>>> order = 1 # 1st order system
Now ``linear_ode_to_matrix`` can represent this as a matrix
differential equation.
>>> (A1, A0), b = linear_ode_to_matrix(eqs, funcs, t, order)
>>> A1
Matrix([
[1, 0],
[0, 1]])
>>> A0
Matrix([
[1, 1],
[1, -1]])
>>> b
Matrix([
[1],
[0]])
The original equations can be recovered from these matrices:
>>> eqs_mat = Matrix([eq.lhs - eq.rhs for eq in eqs])
>>> X = Matrix(funcs)
>>> A1 * X.diff(t) - A0 * X - b == eqs_mat
True
If the system of equations has a maximum order greater than the
order of the system specified, a ODEOrderError exception is raised.
>>> eqs = [Eq(x(t).diff(t, 2), x(t).diff(t) + x(t)), Eq(y(t).diff(t), y(t) + x(t))]
>>> linear_ode_to_matrix(eqs, funcs, t, 1)
Traceback (most recent call last):
...
ODEOrderError: Cannot represent system in 1-order form
If the system of equations is nonlinear, then ODENonlinearError is
raised.
>>> eqs = [Eq(x(t).diff(t), x(t) + y(t)), Eq(y(t).diff(t), y(t)**2 + x(t))]
>>> linear_ode_to_matrix(eqs, funcs, t, 1)
Traceback (most recent call last):
...
ODENonlinearError: The system of ODEs is nonlinear.
Parameters
==========
eqs : list of SymPy expressions or equalities
The equations as expressions (assumed equal to zero).
funcs : list of applied functions
The dependent variables of the system of ODEs.
t : symbol
The independent variable.
order : int
The order of the system of ODEs.
Returns
=======
The tuple ``(As, b)`` where ``As`` is a tuple of matrices and ``b`` is the
the matrix representing the rhs of the matrix equation.
Raises
======
ODEOrderError
When the system of ODEs have an order greater than what was specified
ODENonlinearError
When the system of ODEs is nonlinear
See Also
========
linear_eq_to_matrix: for systems of linear algebraic equations.
References
==========
.. [1] https://en.wikipedia.org/wiki/Matrix_differential_equation
"""
from sympy.solvers.solveset import linear_eq_to_matrix
if any(ode_order(eq, func) > order for eq in eqs for func in funcs):
msg = "Cannot represent system in {}-order form"
raise ODEOrderError(msg.format(order))
As = []
for o in range(order, -1, -1):
# Work from the highest derivative down
syms = [func.diff(t, o) for func in funcs]
# Ai is the matrix for X(t).diff(t, o)
# eqs is minus the remainder of the equations.
try:
Ai, b = linear_eq_to_matrix(eqs, syms)
except NonlinearError:
raise ODENonlinearError("The system of ODEs is nonlinear.")
Ai = Ai.applyfunc(expand_mul)
As.append(Ai if o == order else -Ai)
if o:
eqs = [-eq for eq in b]
else:
rhs = b
return As, rhs
def matrix_exp(A, t):
r"""
Matrix exponential $\exp(A*t)$ for the matrix ``A`` and scalar ``t``.
Explanation
===========
This functions returns the $\exp(A*t)$ by doing a simple
matrix multiplication:
.. math:: \exp(A*t) = P * expJ * P^{-1}
where $expJ$ is $\exp(J*t)$. $J$ is the Jordan normal
form of $A$ and $P$ is matrix such that:
.. math:: A = P * J * P^{-1}
The matrix exponential $\exp(A*t)$ appears in the solution of linear
differential equations. For example if $x$ is a vector and $A$ is a matrix
then the initial value problem
.. math:: \frac{dx(t)}{dt} = A \times x(t), x(0) = x0
has the unique solution
.. math:: x(t) = \exp(A t) x0
Examples
========
>>> from sympy import Symbol, Matrix, pprint
>>> from sympy.solvers.ode.systems import matrix_exp
>>> t = Symbol('t')
We will consider a 2x2 matrix for comupting the exponential
>>> A = Matrix([[2, -5], [2, -4]])
>>> pprint(A)
[2 -5]
[ ]
[2 -4]
Now, exp(A*t) is given as follows:
>>> pprint(matrix_exp(A, t))
[ -t -t -t ]
[3*e *sin(t) + e *cos(t) -5*e *sin(t) ]
[ ]
[ -t -t -t ]
[ 2*e *sin(t) - 3*e *sin(t) + e *cos(t)]
Parameters
==========
A : Matrix
The matrix $A$ in the expression $\exp(A*t)$
t : Symbol
The independent variable
See Also
========
matrix_exp_jordan_form: For exponential of Jordan normal form
References
==========
.. [1] https://en.wikipedia.org/wiki/Jordan_normal_form
.. [2] https://en.wikipedia.org/wiki/Matrix_exponential
"""
P, expJ = matrix_exp_jordan_form(A, t)
return P * expJ * P.inv()
def matrix_exp_jordan_form(A, t):
r"""
Matrix exponential $\exp(A*t)$ for the matrix *A* and scalar *t*.
Explanation
===========
Returns the Jordan form of the $\exp(A*t)$ along with the matrix $P$ such that:
.. math::
\exp(A*t) = P * expJ * P^{-1}
Examples
========
>>> from sympy import Matrix, Symbol
>>> from sympy.solvers.ode.systems import matrix_exp, matrix_exp_jordan_form
>>> t = Symbol('t')
We will consider a 2x2 defective matrix. This shows that our method
works even for defective matrices.
>>> A = Matrix([[1, 1], [0, 1]])
It can be observed that this function gives us the Jordan normal form
and the required invertible matrix P.
>>> P, expJ = matrix_exp_jordan_form(A, t)
Here, it is shown that P and expJ returned by this function is correct
as they satisfy the formula: P * expJ * P_inverse = exp(A*t).
>>> P * expJ * P.inv() == matrix_exp(A, t)
True
Parameters
==========
A : Matrix
The matrix $A$ in the expression $\exp(A*t)$
t : Symbol
The independent variable
References
==========
.. [1] https://en.wikipedia.org/wiki/Defective_matrix
.. [2] https://en.wikipedia.org/wiki/Jordan_matrix
.. [3] https://en.wikipedia.org/wiki/Jordan_normal_form
"""
N, M = A.shape
if N != M:
raise ValueError('Needed square matrix but got shape (%s, %s)' % (N, M))
elif A.has(t):
raise ValueError('Matrix A should not depend on t')
def jordan_chains(A):
'''Chains from Jordan normal form analogous to M.eigenvects().
Returns a dict with eignevalues as keys like:
{e1: [[v111,v112,...], [v121, v122,...]], e2:...}
where vijk is the kth vector in the jth chain for eigenvalue i.
'''
P, blocks = A.jordan_cells()
basis = [P[:,i] for i in range(P.shape[1])]
n = 0
chains = {}
for b in blocks:
eigval = b[0, 0]
size = b.shape[0]
if eigval not in chains:
chains[eigval] = []
chains[eigval].append(basis[n:n+size])
n += size
return chains
eigenchains = jordan_chains(A)
# Needed for consistency across Python versions
eigenchains_iter = sorted(eigenchains.items(), key=default_sort_key)
isreal = not A.has(I)
blocks = []
vectors = []
seen_conjugate = set()
for e, chains in eigenchains_iter:
for chain in chains:
n = len(chain)
if isreal and e != e.conjugate() and e.conjugate() in eigenchains:
if e in seen_conjugate:
continue
seen_conjugate.add(e.conjugate())
exprt = exp(re(e) * t)
imrt = im(e) * t
imblock = Matrix([[cos(imrt), sin(imrt)],
[-sin(imrt), cos(imrt)]])
expJblock2 = Matrix(n, n, lambda i,j:
imblock * t**(j-i) / factorial(j-i) if j >= i
else zeros(2, 2))
expJblock = Matrix(2*n, 2*n, lambda i,j: expJblock2[i//2,j//2][i%2,j%2])
blocks.append(exprt * expJblock)
for i in range(n):
vectors.append(re(chain[i]))
vectors.append(im(chain[i]))
else:
vectors.extend(chain)
fun = lambda i,j: t**(j-i)/factorial(j-i) if j >= i else 0
expJblock = Matrix(n, n, fun)
blocks.append(exp(e * t) * expJblock)
expJ = Matrix.diag(*blocks)
P = Matrix(N, N, lambda i,j: vectors[j][i])
return P, expJ
# Note: To add a docstring example with tau
def linodesolve(A, t, b=None, B=None, type="auto", doit=False,
tau=None):
r"""
System of n equations linear first-order differential equations
Explanation
===========
This solver solves the system of ODEs of the following form:
.. math::
X'(t) = A(t) X(t) + b(t)
Here, $A(t)$ is the coefficient matrix, $X(t)$ is the vector of n independent variables,
$b(t)$ is the non-homogeneous term and $X'(t)$ is the derivative of $X(t)$
Depending on the properties of $A(t)$ and $b(t)$, this solver evaluates the solution
differently.
When $A(t)$ is constant coefficient matrix and $b(t)$ is zero vector i.e. system is homogeneous,
the system is "type1". The solution is:
.. math::
X(t) = \exp(A t) C
Here, $C$ is a vector of constants and $A$ is the constant coefficient matrix.
When $A(t)$ is constant coefficient matrix and $b(t)$ is non-zero i.e. system is non-homogeneous,
the system is "type2". The solution is:
.. math::
X(t) = e^{A t} ( \int e^{- A t} b \,dt + C)
When $A(t)$ is coefficient matrix such that its commutative with its antiderivative $B(t)$ and
$b(t)$ is a zero vector i.e. system is homogeneous, the system is "type3". The solution is:
.. math::
X(t) = \exp(B(t)) C
When $A(t)$ is commutative with its antiderivative $B(t)$ and $b(t)$ is non-zero i.e. system is
non-homogeneous, the system is "type4". The solution is:
.. math::
X(t) = e^{B(t)} ( \int e^{-B(t)} b(t) \,dt + C)
When $A(t)$ is a coefficient matrix such that it can be factorized into a scalar and a constant
coefficient matrix:
.. math::
A(t) = f(t) * A
Where $f(t)$ is a scalar expression in the independent variable $t$ and $A$ is a constant matrix,
then we can do the following substitutions:
.. math::
tau = \int f(t) dt, X(t) = Y(tau), b(t) = b(f^{-1}(tau))
Here, the substitution for the non-homogeneous term is done only when its non-zero.
Using these substitutions, our original system becomes:
.. math::
Y'(tau) = A * Y(tau) + b(tau)/f(tau)
The above system can be easily solved using the solution for "type1" or "type2" depending
on the homogeneity of the system. After we get the solution for $Y(tau)$, we substitute the
solution for $tau$ as $t$ to get back $X(t)$
.. math::
X(t) = Y(tau)
Systems of "type5" and "type6" have a commutative antiderivative but we use this solution
because its faster to compute.
The final solution is the general solution for all the four equations since a constant coefficient
matrix is always commutative with its antidervative.
An additional feature of this function is, if someone wants to substitute for value of the independent
variable, they can pass the substitution `tau` and the solution will have the independent variable
substituted with the passed expression(`tau`).
Parameters
==========
A : Matrix
Coefficient matrix of the system of linear first order ODEs.
t : Symbol
Independent variable in the system of ODEs.
b : Matrix or None
Non-homogeneous term in the system of ODEs. If None is passed,
a homogeneous system of ODEs is assumed.
B : Matrix or None
Antiderivative of the coefficient matrix. If the antiderivative
is not passed and the solution requires the term, then the solver
would compute it internally.
type : String
Type of the system of ODEs passed. Depending on the type, the
solution is evaluated. The type values allowed and the corresponding
system it solves are: "type1" for constant coefficient homogeneous
"type2" for constant coefficient non-homogeneous, "type3" for non-constant
coefficient homogeneous, "type4" for non-constant coefficient non-homogeneous,
"type5" and "type6" for non-constant coefficient homogeneous and non-homogeneous
systems respectively where the coefficient matrix can be factorized to a constant
coefficient matrix.
The default value is "auto" which will let the solver decide the correct type of
the system passed.
doit : Boolean
Evaluate the solution if True, default value is False
tau: Expression
Used to substitute for the value of `t` after we get the solution of the system.
Examples
========
To solve the system of ODEs using this function directly, several things must be
done in the right order. Wrong inputs to the function will lead to incorrect results.
>>> from sympy import symbols, Function, Eq
>>> from sympy.solvers.ode.systems import canonical_odes, linear_ode_to_matrix, linodesolve, linodesolve_type
>>> from sympy.solvers.ode.subscheck import checkodesol
>>> f, g = symbols("f, g", cls=Function)
>>> x, a = symbols("x, a")
>>> funcs = [f(x), g(x)]
>>> eqs = [Eq(f(x).diff(x) - f(x), a*g(x) + 1), Eq(g(x).diff(x) + g(x), a*f(x))]
Here, it is important to note that before we derive the coefficient matrix, it is
important to get the system of ODEs into the desired form. For that we will use
:obj:`sympy.solvers.ode.systems.canonical_odes()`.
>>> eqs = canonical_odes(eqs, funcs, x)
>>> eqs
[[Eq(Derivative(f(x), x), a*g(x) + f(x) + 1), Eq(Derivative(g(x), x), a*f(x) - g(x))]]
Now, we will use :obj:`sympy.solvers.ode.systems.linear_ode_to_matrix()` to get the coefficient matrix and the
non-homogeneous term if it is there.
>>> eqs = eqs[0]
>>> (A1, A0), b = linear_ode_to_matrix(eqs, funcs, x, 1)
>>> A = A0
We have the coefficient matrices and the non-homogeneous term ready. Now, we can use
:obj:`sympy.solvers.ode.systems.linodesolve_type()` to get the information for the system of ODEs
to finally pass it to the solver.
>>> system_info = linodesolve_type(A, x, b=b)
>>> sol_vector = linodesolve(A, x, b=b, B=system_info['antiderivative'], type=system_info['type_of_equation'])
Now, we can prove if the solution is correct or not by using :obj:`sympy.solvers.ode.checkodesol()`
>>> sol = [Eq(f, s) for f, s in zip(funcs, sol_vector)]
>>> checkodesol(eqs, sol)
(True, [0, 0])
We can also use the doit method to evaluate the solutions passed by the function.
>>> sol_vector_evaluated = linodesolve(A, x, b=b, type="type2", doit=True)
Now, we will look at a system of ODEs which is non-constant.
>>> eqs = [Eq(f(x).diff(x), f(x) + x*g(x)), Eq(g(x).diff(x), -x*f(x) + g(x))]
The system defined above is already in the desired form, so we do not have to convert it.
>>> (A1, A0), b = linear_ode_to_matrix(eqs, funcs, x, 1)
>>> A = A0
A user can also pass the commutative antiderivative required for type3 and type4 system of ODEs.
Passing an incorrect one will lead to incorrect results. If the coefficient matrix is not commutative
with its antiderivative, then :obj:`sympy.solvers.ode.systems.linodesolve_type()` raises a NotImplementedError.
If it does have a commutative antiderivative, then the function just returns the information about the system.
>>> system_info = linodesolve_type(A, x, b=b)
Now, we can pass the antiderivative as an argument to get the solution. If the system information is not
passed, then the solver will compute the required arguments internally.
>>> sol_vector = linodesolve(A, x, b=b)
Once again, we can verify the solution obtained.
>>> sol = [Eq(f, s) for f, s in zip(funcs, sol_vector)]
>>> checkodesol(eqs, sol)
(True, [0, 0])
Returns
=======
List
Raises
======
ValueError
This error is raised when the coefficient matrix, non-homogeneous term
or the antiderivative, if passed, are not a matrix or
do not have correct dimensions
NonSquareMatrixError
When the coefficient matrix or its antiderivative, if passed is not a
square matrix
NotImplementedError
If the coefficient matrix does not have a commutative antiderivative
See Also
========
linear_ode_to_matrix: Coefficient matrix computation function
canonical_odes: System of ODEs representation change
linodesolve_type: Getting information about systems of ODEs to pass in this solver
"""
if not isinstance(A, MatrixBase):
raise ValueError(filldedent('''\
The coefficients of the system of ODEs should be of type Matrix
'''))
if not A.is_square:
raise NonSquareMatrixError(filldedent('''\
The coefficient matrix must be a square
'''))
if b is not None:
if not isinstance(b, MatrixBase):
raise ValueError(filldedent('''\
The non-homogeneous terms of the system of ODEs should be of type Matrix
'''))
if A.rows != b.rows:
raise ValueError(filldedent('''\
The system of ODEs should have the same number of non-homogeneous terms and the number of
equations
'''))
if B is not None:
if not isinstance(B, MatrixBase):
raise ValueError(filldedent('''\
The antiderivative of coefficients of the system of ODEs should be of type Matrix
'''))
if not B.is_square:
raise NonSquareMatrixError(filldedent('''\
The antiderivative of the coefficient matrix must be a square
'''))
if A.rows != B.rows:
raise ValueError(filldedent('''\
The coefficient matrix and its antiderivative should have same dimensions
'''))
if not any(type == "type{}".format(i) for i in range(1, 7)) and not type == "auto":
raise ValueError(filldedent('''\
The input type should be a valid one
'''))
n = A.rows
# constants = numbered_symbols(prefix='C', cls=Dummy, start=const_idx+1)
Cvect = Matrix([Dummy() for _ in range(n)])
if b is None and any(type == typ for typ in ["type2", "type4", "type6"]):
b = zeros(n, 1)
is_transformed = tau is not None
passed_type = type
if type == "auto":
system_info = linodesolve_type(A, t, b=b)
type = system_info["type_of_equation"]
B = system_info["antiderivative"]
if type in ("type5", "type6"):
is_transformed = True
if passed_type != "auto":
if tau is None:
system_info = _first_order_type5_6_subs(A, t, b=b)
if not system_info:
raise ValueError(filldedent('''
The system passed isn't {}.
'''.format(type)))
tau = system_info['tau']
t = system_info['t_']
A = system_info['A']
b = system_info['b']
intx_wrtt = lambda x: Integral(x, t) if x else 0
if type in ("type1", "type2", "type5", "type6"):
P, J = matrix_exp_jordan_form(A, t)
P = simplify(P)
if type in ("type1", "type5"):
sol_vector = P * (J * Cvect)
else:
Jinv = J.subs(t, -t)
sol_vector = P * J * ((Jinv * P.inv() * b).applyfunc(intx_wrtt) + Cvect)
else:
if B is None:
B, _ = _is_commutative_anti_derivative(A, t)
if type == "type3":
sol_vector = B.exp() * Cvect
else:
sol_vector = B.exp() * (((-B).exp() * b).applyfunc(intx_wrtt) + Cvect)
if is_transformed:
sol_vector = sol_vector.subs(t, tau)
gens = sol_vector.atoms(exp)
if type != "type1":
sol_vector = [expand_mul(s) for s in sol_vector]
sol_vector = [collect(s, ordered(gens), exact=True) for s in sol_vector]
if doit:
sol_vector = [s.doit() for s in sol_vector]
return sol_vector
def _matrix_is_constant(M, t):
"""Checks if the matrix M is independent of t or not."""
return all(coef.as_independent(t, as_Add=True)[1] == 0 for coef in M)
def canonical_odes(eqs, funcs, t):
r"""
Function that solves for highest order derivatives in a system
Explanation
===========
This function inputs a system of ODEs and based on the system,
the dependent variables and their highest order, returns the system
in the following form:
.. math::
X'(t) = A(t) X(t) + b(t)
Here, $X(t)$ is the vector of dependent variables of lower order, $A(t)$ is
the coefficient matrix, $b(t)$ is the non-homogeneous term and $X'(t)$ is the
vector of dependent variables in their respective highest order. We use the term
canonical form to imply the system of ODEs which is of the above form.
If the system passed has a non-linear term with multiple solutions, then a list of
systems is returned in its canonical form.
Parameters
==========
eqs : List
List of the ODEs
funcs : List
List of dependent variables
t : Symbol
Independent variable
Examples
========
>>> from sympy import symbols, Function, Eq, Derivative
>>> from sympy.solvers.ode.systems import canonical_odes
>>> f, g = symbols("f g", cls=Function)
>>> x, y = symbols("x y")
>>> funcs = [f(x), g(x)]
>>> eqs = [Eq(f(x).diff(x) - 7*f(x), 12*g(x)), Eq(g(x).diff(x) + g(x), 20*f(x))]
>>> canonical_eqs = canonical_odes(eqs, funcs, x)
>>> canonical_eqs
[[Eq(Derivative(f(x), x), 7*f(x) + 12*g(x)), Eq(Derivative(g(x), x), 20*f(x) - g(x))]]
>>> system = [Eq(Derivative(f(x), x)**2 - 2*Derivative(f(x), x) + 1, 4), Eq(-y*f(x) + Derivative(g(x), x), 0)]
>>> canonical_system = canonical_odes(system, funcs, x)
>>> canonical_system
[[Eq(Derivative(f(x), x), -1), Eq(Derivative(g(x), x), y*f(x))], [Eq(Derivative(f(x), x), 3), Eq(Derivative(g(x), x), y*f(x))]]
Returns
=======
List
"""
from sympy.solvers.solvers import solve
order = _get_func_order(eqs, funcs)
canon_eqs = solve(eqs, *[func.diff(t, order[func]) for func in funcs], dict=True)
systems = []
for eq in canon_eqs:
system = [Eq(func.diff(t, order[func]), eq[func.diff(t, order[func])]) for func in funcs]
systems.append(system)
return systems
def _is_commutative_anti_derivative(A, t):
r"""
Helper function for determining if the Matrix passed is commutative with its antiderivative
Explanation
===========
This function checks if the Matrix $A$ passed is commutative with its antiderivative with respect
to the independent variable $t$.
.. math::
B(t) = \int A(t) dt
The function outputs two values, first one being the antiderivative $B(t)$, second one being a
boolean value, if True, then the matrix $A(t)$ passed is commutative with $B(t)$, else the matrix
passed isn't commutative with $B(t)$.
Parameters
==========
A : Matrix
The matrix which has to be checked
t : Symbol
Independent variable
Examples
========
>>> from sympy import symbols, Matrix
>>> from sympy.solvers.ode.systems import _is_commutative_anti_derivative
>>> t = symbols("t")
>>> A = Matrix([[1, t], [-t, 1]])
>>> B, is_commuting = _is_commutative_anti_derivative(A, t)
>>> is_commuting
True
Returns
=======
Matrix, Boolean
"""
B = integrate(A, t)
is_commuting = (B*A - A*B).applyfunc(expand).applyfunc(factor_terms).is_zero_matrix
is_commuting = False if is_commuting is None else is_commuting
return B, is_commuting
def _factor_matrix(A, t):
term = None
for element in A:
temp_term = element.as_independent(t)[1]
if temp_term.has(t):
term = temp_term
break
if term is not None:
A_factored = (A/term).applyfunc(ratsimp)
can_factor = _matrix_is_constant(A_factored, t)
term = (term, A_factored) if can_factor else None
return term
def _is_second_order_type2(A, t):
term = _factor_matrix(A, t)
is_type2 = False
if term is not None:
term = 1/term[0]
is_type2 = term.is_polynomial()
if is_type2:
poly = Poly(term.expand(), t)
monoms = poly.monoms()
if monoms[0][0] in (2, 4):
cs = _get_poly_coeffs(poly, 4)
a, b, c, d, e = cs
a1 = powdenest(sqrt(a), force=True)
c1 = powdenest(sqrt(e), force=True)
b1 = powdenest(sqrt(c - 2*a1*c1), force=True)
is_type2 = (b == 2*a1*b1) and (d == 2*b1*c1)
term = a1*t**2 + b1*t + c1
else:
is_type2 = False
return is_type2, term
def _get_poly_coeffs(poly, order):
cs = [0 for _ in range(order+1)]
for c, m in zip(poly.coeffs(), poly.monoms()):
cs[-1-m[0]] = c
return cs
def _match_second_order_type(A1, A0, t, b=None):
r"""
Works only for second order system in its canonical form.
Type 0: Constant coefficient matrix, can be simply solved by
introducing dummy variables.
Type 1: When the substitution: $U = t*X' - X$ works for reducing
the second order system to first order system.
Type 2: When the system is of the form: $poly * X'' = A*X$ where
$poly$ is square of a quadratic polynomial with respect to
*t* and $A$ is a constant coefficient matrix.
"""
match = {"type_of_equation": "type0"}
n = A1.shape[0]
if _matrix_is_constant(A1, t) and _matrix_is_constant(A0, t):
return match
if (A1 + A0*t).applyfunc(expand_mul).is_zero_matrix:
match.update({"type_of_equation": "type1", "A1": A1})
elif A1.is_zero_matrix and (b is None or b.is_zero_matrix):
is_type2, term = _is_second_order_type2(A0, t)
if is_type2:
a, b, c = _get_poly_coeffs(Poly(term, t), 2)
A = (A0*(term**2).expand()).applyfunc(ratsimp) + (b**2/4 - a*c)*eye(n, n)
tau = integrate(1/term, t)
t_ = Symbol("{}_".format(t))
match.update({"type_of_equation": "type2", "A0": A,
"g(t)": sqrt(term), "tau": tau, "is_transformed": True,
"t_": t_})
return match
def _second_order_subs_type1(A, b, funcs, t):
r"""
For a linear, second order system of ODEs, a particular substitution.
A system of the below form can be reduced to a linear first order system of
ODEs:
.. math::
X'' = A(t) * (t*X' - X) + b(t)
By substituting:
.. math:: U = t*X' - X
To get the system:
.. math:: U' = t*(A(t)*U + b(t))
Where $U$ is the vector of dependent variables, $X$ is the vector of dependent
variables in `funcs` and $X'$ is the first order derivative of $X$ with respect to
$t$. It may or may not reduce the system into linear first order system of ODEs.
Then a check is made to determine if the system passed can be reduced or not, if
this substitution works, then the system is reduced and its solved for the new
substitution. After we get the solution for $U$:
.. math:: U = a(t)
We substitute and return the reduced system:
.. math::
a(t) = t*X' - X
Parameters
==========
A: Matrix
Coefficient matrix($A(t)*t$) of the second order system of this form.
b: Matrix
Non-homogeneous term($b(t)$) of the system of ODEs.
funcs: List
List of dependent variables
t: Symbol
Independent variable of the system of ODEs.
Returns
=======
List
"""
U = Matrix([t*func.diff(t) - func for func in funcs])
sol = linodesolve(A, t, t*b)
reduced_eqs = [Eq(u, s) for s, u in zip(sol, U)]
reduced_eqs = canonical_odes(reduced_eqs, funcs, t)[0]
return reduced_eqs
def _second_order_subs_type2(A, funcs, t_):
r"""
Returns a second order system based on the coefficient matrix passed.
Explanation
===========
This function returns a system of second order ODE of the following form:
.. math::
X'' = A * X
Here, $X$ is the vector of dependent variables, but a bit modified, $A$ is the
coefficient matrix passed.
Along with returning the second order system, this function also returns the new
dependent variables with the new independent variable `t_` passed.
Parameters
==========
A: Matrix
Coefficient matrix of the system
funcs: List
List of old dependent variables
t_: Symbol
New independent variable
Returns
=======
List, List
"""
func_names = [func.func.__name__ for func in funcs]
new_funcs = [Function(Dummy("{}_".format(name)))(t_) for name in func_names]
rhss = A * Matrix(new_funcs)
new_eqs = [Eq(func.diff(t_, 2), rhs) for func, rhs in zip(new_funcs, rhss)]
return new_eqs, new_funcs
def _is_euler_system(As, t):
return all(_matrix_is_constant((A*t**i).applyfunc(ratsimp), t) for i, A in enumerate(As))
def _classify_linear_system(eqs, funcs, t, is_canon=False):
r"""
Returns a dictionary with details of the eqs if the system passed is linear
and can be classified by this function else returns None
Explanation
===========
This function takes the eqs, converts it into a form Ax = b where x is a vector of terms
containing dependent variables and their derivatives till their maximum order. If it is
possible to convert eqs into Ax = b, then all the equations in eqs are linear otherwise
they are non-linear.
To check if the equations are constant coefficient, we need to check if all the terms in
A obtained above are constant or not.
To check if the equations are homogeneous or not, we need to check if b is a zero matrix
or not.
Parameters
==========
eqs: List
List of ODEs
funcs: List
List of dependent variables
t: Symbol
Independent variable of the equations in eqs
is_canon: Boolean
If True, then this function will not try to get the
system in canonical form. Default value is False
Returns
=======
match = {
'no_of_equation': len(eqs),
'eq': eqs,
'func': funcs,
'order': order,
'is_linear': is_linear,
'is_constant': is_constant,
'is_homogeneous': is_homogeneous,
}
Dict or list of Dicts or None
Dict with values for keys:
1. no_of_equation: Number of equations
2. eq: The set of equations
3. func: List of dependent variables
4. order: A dictionary that gives the order of the
dependent variable in eqs
5. is_linear: Boolean value indicating if the set of
equations are linear or not.
6. is_constant: Boolean value indicating if the set of
equations have constant coefficients or not.
7. is_homogeneous: Boolean value indicating if the set of
equations are homogeneous or not.
8. commutative_antiderivative: Antiderivative of the coefficient
matrix if the coefficient matrix is non-constant
and commutative with its antiderivative. This key
may or may not exist.
9. is_general: Boolean value indicating if the system of ODEs is
solvable using one of the general case solvers or not.
10. rhs: rhs of the non-homogeneous system of ODEs in Matrix form. This
key may or may not exist.
11. is_higher_order: True if the system passed has an order greater than 1.
This key may or may not exist.
12. is_second_order: True if the system passed is a second order ODE. This
key may or may not exist.
This Dict is the answer returned if the eqs are linear and constant
coefficient. Otherwise, None is returned.
"""
# Error for i == 0 can be added but isn't for now
# Check for len(funcs) == len(eqs)
if len(funcs) != len(eqs):
raise ValueError("Number of functions given is not equal to the number of equations %s" % funcs)
# ValueError when functions have more than one arguments
for func in funcs:
if len(func.args) != 1:
raise ValueError("dsolve() and classify_sysode() work with "
"functions of one variable only, not %s" % func)
# Getting the func_dict and order using the helper
# function
order = _get_func_order(eqs, funcs)
system_order = max(order[func] for func in funcs)
is_higher_order = system_order > 1
is_second_order = system_order == 2 and all(order[func] == 2 for func in funcs)
# Not adding the check if the len(func.args) for
# every func in funcs is 1
# Linearity check
try:
canon_eqs = canonical_odes(eqs, funcs, t) if not is_canon else [eqs]
if len(canon_eqs) == 1:
As, b = linear_ode_to_matrix(canon_eqs[0], funcs, t, system_order)
else:
match = {
'is_implicit': True,
'canon_eqs': canon_eqs
}
return match
# When the system of ODEs is non-linear, an ODENonlinearError is raised.
# This function catches the error and None is returned.
except ODENonlinearError:
return None
is_linear = True
# Homogeneous check
is_homogeneous = True if b.is_zero_matrix else False
# Is general key is used to identify if the system of ODEs can be solved by
# one of the general case solvers or not.
match = {
'no_of_equation': len(eqs),
'eq': eqs,
'func': funcs,
'order': order,
'is_linear': is_linear,
'is_homogeneous': is_homogeneous,
'is_general': True
}
if not is_homogeneous:
match['rhs'] = b
is_constant = all(_matrix_is_constant(A_, t) for A_ in As)
# The match['is_linear'] check will be added in the future when this
# function becomes ready to deal with non-linear systems of ODEs
if not is_higher_order:
A = As[1]
match['func_coeff'] = A
# Constant coefficient check
is_constant = _matrix_is_constant(A, t)
match['is_constant'] = is_constant
try:
system_info = linodesolve_type(A, t, b=b)
except NotImplementedError:
return None
match.update(system_info)
antiderivative = match.pop("antiderivative")
if not is_constant:
match['commutative_antiderivative'] = antiderivative
return match
else:
match['type_of_equation'] = "type0"
if is_second_order:
A1, A0 = As[1:]
match_second_order = _match_second_order_type(A1, A0, t)
match.update(match_second_order)
match['is_second_order'] = True
# If system is constant, then no need to check if its in euler
# form or not. It will be easier and faster to directly proceed
# to solve it.
if match['type_of_equation'] == "type0" and not is_constant:
is_euler = _is_euler_system(As, t)
if is_euler:
t_ = Symbol('{}_'.format(t))
match.update({'is_transformed': True, 'type_of_equation': 'type1',
't_': t_})
else:
is_jordan = lambda M: M == Matrix.jordan_block(M.shape[0], M[0, 0])
terms = _factor_matrix(As[-1], t)
if all(A.is_zero_matrix for A in As[1:-1]) and terms is not None and not is_jordan(terms[1]):
P, J = terms[1].jordan_form()
match.update({'type_of_equation': 'type2', 'J': J,
'f(t)': terms[0], 'P': P, 'is_transformed': True})
if match['type_of_equation'] != 'type0' and is_second_order:
match.pop('is_second_order', None)
match['is_higher_order'] = is_higher_order
return match
def _preprocess_eqs(eqs):
processed_eqs = []
for eq in eqs:
processed_eqs.append(eq if isinstance(eq, Equality) else Eq(eq, 0))
return processed_eqs
def _eqs2dict(eqs, funcs):
eqsorig = {}
eqsmap = {}
funcset = set(funcs)
for eq in eqs:
f1, = eq.lhs.atoms(AppliedUndef)
f2s = (eq.rhs.atoms(AppliedUndef) - {f1}) & funcset
eqsmap[f1] = f2s
eqsorig[f1] = eq
return eqsmap, eqsorig
def _dict2graph(d):
nodes = list(d)
edges = [(f1, f2) for f1, f2s in d.items() for f2 in f2s]
G = (nodes, edges)
return G
def _is_type1(scc, t):
eqs, funcs = scc
try:
(A1, A0), b = linear_ode_to_matrix(eqs, funcs, t, 1)
except (ODENonlinearError, ODEOrderError):
return False
if _matrix_is_constant(A0, t) and b.is_zero_matrix:
return True
return False
def _combine_type1_subsystems(subsystem, funcs, t):
indices = [i for i, sys in enumerate(zip(subsystem, funcs)) if _is_type1(sys, t)]
remove = set()
for ip, i in enumerate(indices):
for j in indices[ip+1:]:
if any(eq2.has(funcs[i]) for eq2 in subsystem[j]):
subsystem[j] = subsystem[i] + subsystem[j]
remove.add(i)
subsystem = [sys for i, sys in enumerate(subsystem) if i not in remove]
return subsystem
def _component_division(eqs, funcs, t):
# Assuming that each eq in eqs is in canonical form,
# that is, [f(x).diff(x) = .., g(x).diff(x) = .., etc]
# and that the system passed is in its first order
eqsmap, eqsorig = _eqs2dict(eqs, funcs)
subsystems = []
for cc in connected_components(_dict2graph(eqsmap)):
eqsmap_c = {f: eqsmap[f] for f in cc}
sccs = strongly_connected_components(_dict2graph(eqsmap_c))
subsystem = [[eqsorig[f] for f in scc] for scc in sccs]
subsystem = _combine_type1_subsystems(subsystem, sccs, t)
subsystems.append(subsystem)
return subsystems
# Returns: List of equations
def _linear_ode_solver(match):
t = match['t']
funcs = match['func']
rhs = match.get('rhs', None)
tau = match.get('tau', None)
t = match['t_'] if 't_' in match else t
A = match['func_coeff']
# Note: To make B None when the matrix has constant
# coefficient
B = match.get('commutative_antiderivative', None)
type = match['type_of_equation']
sol_vector = linodesolve(A, t, b=rhs, B=B,
type=type, tau=tau)
sol = [Eq(f, s) for f, s in zip(funcs, sol_vector)]
return sol
def _select_equations(eqs, funcs, key=lambda x: x):
eq_dict = {e.lhs: e.rhs for e in eqs}
return [Eq(f, eq_dict[key(f)]) for f in funcs]
def _higher_order_ode_solver(match):
eqs = match["eq"]
funcs = match["func"]
t = match["t"]
sysorder = match['order']
type = match.get('type_of_equation', "type0")
is_second_order = match.get('is_second_order', False)
is_transformed = match.get('is_transformed', False)
is_euler = is_transformed and type == "type1"
is_higher_order_type2 = is_transformed and type == "type2" and 'P' in match
if is_second_order:
new_eqs, new_funcs = _second_order_to_first_order(eqs, funcs, t,
A1=match.get("A1", None), A0=match.get("A0", None),
b=match.get("rhs", None), type=type,
t_=match.get("t_", None))
else:
new_eqs, new_funcs = _higher_order_to_first_order(eqs, sysorder, t, funcs=funcs,
type=type, J=match.get('J', None),
f_t=match.get('f(t)', None),
P=match.get('P', None), b=match.get('rhs', None))
if is_transformed:
t = match.get('t_', t)
if not is_higher_order_type2:
new_eqs = _select_equations(new_eqs, [f.diff(t) for f in new_funcs])
sol = None
# NotImplementedError may be raised when the system may be actually
# solvable if it can be just divided into sub-systems
try:
if not is_higher_order_type2:
sol = _strong_component_solver(new_eqs, new_funcs, t)
except NotImplementedError:
sol = None
# Dividing the system only when it becomes essential
if sol is None:
try:
sol = _component_solver(new_eqs, new_funcs, t)
except NotImplementedError:
sol = None
if sol is None:
return sol
is_second_order_type2 = is_second_order and type == "type2"
underscores = '__' if is_transformed else '_'
sol = _select_equations(sol, funcs,
key=lambda x: Function(Dummy('{}{}0'.format(x.func.__name__, underscores)))(t))
if match.get("is_transformed", False):
if is_second_order_type2:
g_t = match["g(t)"]
tau = match["tau"]
sol = [Eq(s.lhs, s.rhs.subs(t, tau) * g_t) for s in sol]
elif is_euler:
t = match['t']
tau = match['t_']
sol = [s.subs(tau, log(t)) for s in sol]
elif is_higher_order_type2:
P = match['P']
sol_vector = P * Matrix([s.rhs for s in sol])
sol = [Eq(f, s) for f, s in zip(funcs, sol_vector)]
return sol
# Returns: List of equations or None
# If None is returned by this solver, then the system
# of ODEs cannot be solved directly by dsolve_system.
def _strong_component_solver(eqs, funcs, t):
from sympy.solvers.ode.ode import dsolve, constant_renumber
match = _classify_linear_system(eqs, funcs, t, is_canon=True)
sol = None
# Assuming that we can't get an implicit system
# since we are already canonical equations from
# dsolve_system
if match:
match['t'] = t
if match.get('is_higher_order', False):
sol = _higher_order_ode_solver(match)
elif match.get('is_linear', False):
sol = _linear_ode_solver(match)
# Note: For now, only linear systems are handled by this function
# hence, the match condition is added. This can be removed later.
if sol is None and len(eqs) == 1:
sol = dsolve(eqs[0], func=funcs[0])
variables = Tuple(eqs[0]).free_symbols
new_constants = [Dummy() for _ in range(ode_order(eqs[0], funcs[0]))]
sol = constant_renumber(sol, variables=variables, newconstants=new_constants)
sol = [sol]
# To add non-linear case here in future
return sol
def _get_funcs_from_canon(eqs):
return [eq.lhs.args[0] for eq in eqs]
# Returns: List of Equations(a solution)
def _weak_component_solver(wcc, t):
# We will divide the systems into sccs
# only when the wcc cannot be solved as
# a whole
eqs = []
for scc in wcc:
eqs += scc
funcs = _get_funcs_from_canon(eqs)
sol = _strong_component_solver(eqs, funcs, t)
if sol:
return sol
sol = []
for scc in wcc:
eqs = scc
funcs = _get_funcs_from_canon(eqs)
# Substituting solutions for the dependent
# variables solved in previous SCC, if any solved.
comp_eqs = [eq.subs({s.lhs: s.rhs for s in sol}) for eq in eqs]
scc_sol = _strong_component_solver(comp_eqs, funcs, t)
if scc_sol is None:
raise NotImplementedError(filldedent('''
The system of ODEs passed cannot be solved by dsolve_system.
'''))
# scc_sol: List of equations
# scc_sol is a solution
sol += scc_sol
return sol
# Returns: List of Equations(a solution)
def _component_solver(eqs, funcs, t):
components = _component_division(eqs, funcs, t)
sol = []
for wcc in components:
# wcc_sol: List of Equations
sol += _weak_component_solver(wcc, t)
# sol: List of Equations
return sol
def _second_order_to_first_order(eqs, funcs, t, type="auto", A1=None,
A0=None, b=None, t_=None):
r"""
Expects the system to be in second order and in canonical form
Explanation
===========
Reduces a second order system into a first order one depending on the type of second
order system.
1. "type0": If this is passed, then the system will be reduced to first order by
introducing dummy variables.
2. "type1": If this is passed, then a particular substitution will be used to reduce the
the system into first order.
3. "type2": If this is passed, then the system will be transformed with new dependent
variables and independent variables. This transformation is a part of solving
the corresponding system of ODEs.
`A1` and `A0` are the coefficient matrices from the system and it is assumed that the
second order system has the form given below:
.. math::
A2 * X'' = A1 * X' + A0 * X + b
Here, $A2$ is the coefficient matrix for the vector $X''$ and $b$ is the non-homogeneous
term.
Default value for `b` is None but if `A1` and `A0` are passed and `b` is not passed, then the
system will be assumed homogeneous.
"""
is_a1 = A1 is None
is_a0 = A0 is None
if (type == "type1" and is_a1) or (type == "type2" and is_a0)\
or (type == "auto" and (is_a1 or is_a0)):
(A2, A1, A0), b = linear_ode_to_matrix(eqs, funcs, t, 2)
if not A2.is_Identity:
raise ValueError(filldedent('''
The system must be in its canonical form.
'''))
if type == "auto":
match = _match_second_order_type(A1, A0, t)
type = match["type_of_equation"]
A1 = match.get("A1", None)
A0 = match.get("A0", None)
sys_order = dict.fromkeys(funcs, 2)
if type == "type1":
if b is None:
b = zeros(len(eqs))
eqs = _second_order_subs_type1(A1, b, funcs, t)
sys_order = dict.fromkeys(funcs, 1)
if type == "type2":
if t_ is None:
t_ = Symbol("{}_".format(t))
t = t_
eqs, funcs = _second_order_subs_type2(A0, funcs, t_)
sys_order = dict.fromkeys(funcs, 2)
return _higher_order_to_first_order(eqs, sys_order, t, funcs=funcs)
def _higher_order_type2_to_sub_systems(J, f_t, funcs, t, max_order, b=None, P=None):
# Note: To add a test for this ValueError
if J is None or f_t is None or not _matrix_is_constant(J, t):
raise ValueError(filldedent('''
Correctly input for args 'A' and 'f_t' for Linear, Higher Order,
Type 2
'''))
if P is None and b is not None and not b.is_zero_matrix:
raise ValueError(filldedent('''
Provide the keyword 'P' for matrix P in A = P * J * P-1.
'''))
new_funcs = Matrix([Function(Dummy('{}__0'.format(f.func.__name__)))(t) for f in funcs])
new_eqs = new_funcs.diff(t, max_order) - f_t * J * new_funcs
if b is not None and not b.is_zero_matrix:
new_eqs -= P.inv() * b
new_eqs = canonical_odes(new_eqs, new_funcs, t)[0]
return new_eqs, new_funcs
def _higher_order_to_first_order(eqs, sys_order, t, funcs=None, type="type0", **kwargs):
if funcs is None:
funcs = sys_order.keys()
# Standard Cauchy Euler system
if type == "type1":
t_ = Symbol('{}_'.format(t))
new_funcs = [Function(Dummy('{}_'.format(f.func.__name__)))(t_) for f in funcs]
max_order = max(sys_order[func] for func in funcs)
subs_dict = dict(zip(funcs, new_funcs))
subs_dict[t] = exp(t_)
free_function = Function(Dummy())
def _get_coeffs_from_subs_expression(expr):
if isinstance(expr, Subs):
free_symbol = expr.args[1][0]
term = expr.args[0]
return {ode_order(term, free_symbol): 1}
if isinstance(expr, Mul):
coeff = expr.args[0]
order = list(_get_coeffs_from_subs_expression(expr.args[1]).keys())[0]
return {order: coeff}
if isinstance(expr, Add):
coeffs = {}
for arg in expr.args:
if isinstance(arg, Mul):
coeffs.update(_get_coeffs_from_subs_expression(arg))
else:
order = list(_get_coeffs_from_subs_expression(arg).keys())[0]
coeffs[order] = 1
return coeffs
for o in range(1, max_order + 1):
expr = free_function(log(t_)).diff(t_, o)*t_**o
coeff_dict = _get_coeffs_from_subs_expression(expr)
coeffs = [coeff_dict[order] if order in coeff_dict else 0 for order in range(o + 1)]
expr_to_subs = sum(free_function(t_).diff(t_, i) * c for i, c in
enumerate(coeffs)) / t**o
subs_dict.update({f.diff(t, o): expr_to_subs.subs(free_function(t_), nf)
for f, nf in zip(funcs, new_funcs)})
new_eqs = [eq.subs(subs_dict) for eq in eqs]
new_sys_order = {nf: sys_order[f] for f, nf in zip(funcs, new_funcs)}
new_eqs = canonical_odes(new_eqs, new_funcs, t_)[0]
return _higher_order_to_first_order(new_eqs, new_sys_order, t_, funcs=new_funcs)
# Systems of the form: X(n)(t) = f(t)*A*X + b
# where X(n)(t) is the nth derivative of the vector of dependent variables
# with respect to the independent variable and A is a constant matrix.
if type == "type2":
J = kwargs.get('J', None)
f_t = kwargs.get('f_t', None)
b = kwargs.get('b', None)
P = kwargs.get('P', None)
max_order = max(sys_order[func] for func in funcs)
return _higher_order_type2_to_sub_systems(J, f_t, funcs, t, max_order, P=P, b=b)
# Note: To be changed to this after doit option is disabled for default cases
# new_sysorder = _get_func_order(new_eqs, new_funcs)
#
# return _higher_order_to_first_order(new_eqs, new_sysorder, t, funcs=new_funcs)
new_funcs = []
for prev_func in funcs:
func_name = prev_func.func.__name__
func = Function(Dummy('{}_0'.format(func_name)))(t)
new_funcs.append(func)
subs_dict = {prev_func: func}
new_eqs = []
for i in range(1, sys_order[prev_func]):
new_func = Function(Dummy('{}_{}'.format(func_name, i)))(t)
subs_dict[prev_func.diff(t, i)] = new_func
new_funcs.append(new_func)
prev_f = subs_dict[prev_func.diff(t, i-1)]
new_eq = Eq(prev_f.diff(t), new_func)
new_eqs.append(new_eq)
eqs = [eq.subs(subs_dict) for eq in eqs] + new_eqs
return eqs, new_funcs
def dsolve_system(eqs, funcs=None, t=None, ics=None, doit=False, simplify=True):
r"""
Solves any(supported) system of Ordinary Differential Equations
Explanation
===========
This function takes a system of ODEs as an input, determines if the
it is solvable by this function, and returns the solution if found any.
This function can handle:
1. Linear, First Order, Constant coefficient homogeneous system of ODEs
2. Linear, First Order, Constant coefficient non-homogeneous system of ODEs
3. Linear, First Order, non-constant coefficient homogeneous system of ODEs
4. Linear, First Order, non-constant coefficient non-homogeneous system of ODEs
5. Any implicit system which can be divided into system of ODEs which is of the above 4 forms
6. Any higher order linear system of ODEs that can be reduced to one of the 5 forms of systems described above.
The types of systems described above are not limited by the number of equations, i.e. this
function can solve the above types irrespective of the number of equations in the system passed.
But, the bigger the system, the more time it will take to solve the system.
This function returns a list of solutions. Each solution is a list of equations where LHS is
the dependent variable and RHS is an expression in terms of the independent variable.
Among the non constant coefficient types, not all the systems are solvable by this function. Only
those which have either a coefficient matrix with a commutative antiderivative or those systems which
may be divided further so that the divided systems may have coefficient matrix with commutative antiderivative.
Parameters
==========
eqs : List
system of ODEs to be solved
funcs : List or None
List of dependent variables that make up the system of ODEs
t : Symbol or None
Independent variable in the system of ODEs
ics : Dict or None
Set of initial boundary/conditions for the system of ODEs
doit : Boolean
Evaluate the solutions if True. Default value is True. Can be
set to false if the integral evaluation takes too much time and/or
is not required.
simplify: Boolean
Simplify the solutions for the systems. Default value is True.
Can be set to false if simplification takes too much time and/or
is not required.
Examples
========
>>> from sympy import symbols, Eq, Function
>>> from sympy.solvers.ode.systems import dsolve_system
>>> f, g = symbols("f g", cls=Function)
>>> x = symbols("x")
>>> eqs = [Eq(f(x).diff(x), g(x)), Eq(g(x).diff(x), f(x))]
>>> dsolve_system(eqs)
[[Eq(f(x), -C1*exp(-x) + C2*exp(x)), Eq(g(x), C1*exp(-x) + C2*exp(x))]]
You can also pass the initial conditions for the system of ODEs:
>>> dsolve_system(eqs, ics={f(0): 1, g(0): 0})
[[Eq(f(x), exp(x)/2 + exp(-x)/2), Eq(g(x), exp(x)/2 - exp(-x)/2)]]
Optionally, you can pass the dependent variables and the independent
variable for which the system is to be solved:
>>> funcs = [f(x), g(x)]
>>> dsolve_system(eqs, funcs=funcs, t=x)
[[Eq(f(x), -C1*exp(-x) + C2*exp(x)), Eq(g(x), C1*exp(-x) + C2*exp(x))]]
Lets look at an implicit system of ODEs:
>>> eqs = [Eq(f(x).diff(x)**2, g(x)**2), Eq(g(x).diff(x), g(x))]
>>> dsolve_system(eqs)
[[Eq(f(x), C1 - C2*exp(x)), Eq(g(x), C2*exp(x))], [Eq(f(x), C1 + C2*exp(x)), Eq(g(x), C2*exp(x))]]
Returns
=======
List of List of Equations
Raises
======
NotImplementedError
When the system of ODEs is not solvable by this function.
ValueError
When the parameters passed are not in the required form.
"""
from sympy.solvers.ode.ode import solve_ics, _extract_funcs, constant_renumber
if not iterable(eqs):
raise ValueError(filldedent('''
List of equations should be passed. The input is not valid.
'''))
eqs = _preprocess_eqs(eqs)
if funcs is not None and not isinstance(funcs, list):
raise ValueError(filldedent('''
Input to the funcs should be a list of functions.
'''))
if funcs is None:
funcs = _extract_funcs(eqs)
if any(len(func.args) != 1 for func in funcs):
raise ValueError(filldedent('''
dsolve_system can solve a system of ODEs with only one independent
variable.
'''))
if len(eqs) != len(funcs):
raise ValueError(filldedent('''
Number of equations and number of functions do not match
'''))
if t is not None and not isinstance(t, Symbol):
raise ValueError(filldedent('''
The independent variable must be of type Symbol
'''))
if t is None:
t = list(list(eqs[0].atoms(Derivative))[0].atoms(Symbol))[0]
sols = []
canon_eqs = canonical_odes(eqs, funcs, t)
for canon_eq in canon_eqs:
try:
sol = _strong_component_solver(canon_eq, funcs, t)
except NotImplementedError:
sol = None
if sol is None:
sol = _component_solver(canon_eq, funcs, t)
sols.append(sol)
if sols:
final_sols = []
variables = Tuple(*eqs).free_symbols
for sol in sols:
sol = _select_equations(sol, funcs)
sol = constant_renumber(sol, variables=variables)
if ics:
constants = Tuple(*sol).free_symbols - variables
solved_constants = solve_ics(sol, funcs, constants, ics)
sol = [s.subs(solved_constants) for s in sol]
if simplify:
constants = Tuple(*sol).free_symbols - variables
sol = simpsol(sol, [t], constants, doit=doit)
final_sols.append(sol)
sols = final_sols
return sols
| ODENonlinearError |
python | numba__llvmlite | llvmlite/tests/test_binding.py | {
"start": 87677,
"end": 88505
} | class ____(BaseTest):
def test_lock_callbacks(self):
events = []
def acq():
events.append('acq')
def rel():
events.append('rel')
# register callback
llvm.ffi.register_lock_callback(acq, rel)
# Check: events are initially empty
self.assertFalse(events)
# Call LLVM functions
llvm.create_new_module_pass_manager()
# Check: there must be at least one acq and one rel
self.assertIn("acq", events)
self.assertIn("rel", events)
# unregister callback
llvm.ffi.unregister_lock_callback(acq, rel)
# Check: removing non-existent callbacks will trigger a ValueError
with self.assertRaises(ValueError):
llvm.ffi.unregister_lock_callback(acq, rel)
| TestLLVMLockCallbacks |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_database_backend.py | {
"start": 22890,
"end": 24996
} | class ____(ExampleDatabase):
def save(self, key: bytes, value: bytes) -> None: ...
def fetch(self, key: bytes) -> Iterable[bytes]: ...
def delete(self, key: bytes, value: bytes) -> None: ...
def test_warns_when_listening_not_supported():
db = DoesNotSupportListening()
listener = lambda event: event
with pytest.warns(
HypothesisWarning, match="does not support listening for changes"
):
db.add_listener(listener)
with pytest.warns(
HypothesisWarning, match="does not support stopping listening for changes"
):
db.remove_listener(listener)
def test_readonly_listener():
db = ReadOnlyDatabase(InMemoryExampleDatabase())
def listener(event):
raise AssertionError("ReadOnlyDatabase never fires change events")
db.add_listener(listener)
db.save(b"a", b"a")
db.remove_listener(listener)
db.save(b"b", b"b")
@skipif_threading
def test_metakeys_move_into_existing_key(tmp_path):
db = DirectoryBasedExampleDatabase(tmp_path)
db.save(b"k1", b"v1")
db.save(b"k1", b"v2")
db.save(b"k2", b"v3")
assert set(db.fetch(db._metakeys_name)) == {b"k1", b"k2"}
db.move(b"k1", b"k2", b"v2")
assert set(db.fetch(db._metakeys_name)) == {b"k1", b"k2"}
@skipif_threading
def test_metakeys_move_into_nonexistent_key(tmp_path):
db = DirectoryBasedExampleDatabase(tmp_path)
db.save(b"k1", b"v1")
assert set(db.fetch(db._metakeys_name)) == {b"k1"}
db.move(b"k1", b"k2", b"v1")
assert set(db.fetch(db._metakeys_name)) == {b"k1", b"k2"}
@skipif_threading
def test_metakeys(tmp_path):
db = DirectoryBasedExampleDatabase(tmp_path)
db.save(b"k1", b"v1")
assert set(db.fetch(db._metakeys_name)) == {b"k1"}
db.save(b"k1", b"v2")
assert set(db.fetch(db._metakeys_name)) == {b"k1"}
# deleting all the values from a key removes that metakey
db.delete(b"k1", b"v1")
db.delete(b"k1", b"v2")
assert set(db.fetch(db._metakeys_name)) == set()
db.save(b"k2", b"v1")
assert set(db.fetch(db._metakeys_name)) == {b"k2"}
| DoesNotSupportListening |
python | langchain-ai__langchain | libs/langchain/langchain_classic/indexes/vectorstore.py | {
"start": 836,
"end": 7042
} | class ____(BaseModel):
"""Wrapper around a `VectorStore` for easy access."""
vectorstore: VectorStore
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
def query(
self,
question: str,
llm: BaseLanguageModel | None = None,
retriever_kwargs: dict[str, Any] | None = None,
**kwargs: Any,
) -> str:
"""Query the `VectorStore` using the provided LLM.
Args:
question: The question or prompt to query.
llm: The language model to use. Must not be `None`.
retriever_kwargs: Optional keyword arguments for the retriever.
**kwargs: Additional keyword arguments forwarded to the chain.
Returns:
The result string from the RetrievalQA chain.
"""
if llm is None:
msg = (
"This API has been changed to require an LLM. "
"Please provide an llm to use for querying the vectorstore.\n"
"For example,\n"
"from langchain_openai import OpenAI\n"
"model = OpenAI(temperature=0)"
)
raise NotImplementedError(msg)
retriever_kwargs = retriever_kwargs or {}
chain = RetrievalQA.from_chain_type(
llm,
retriever=self.vectorstore.as_retriever(**retriever_kwargs),
**kwargs,
)
return chain.invoke({chain.input_key: question})[chain.output_key]
async def aquery(
self,
question: str,
llm: BaseLanguageModel | None = None,
retriever_kwargs: dict[str, Any] | None = None,
**kwargs: Any,
) -> str:
"""Asynchronously query the `VectorStore` using the provided LLM.
Args:
question: The question or prompt to query.
llm: The language model to use. Must not be `None`.
retriever_kwargs: Optional keyword arguments for the retriever.
**kwargs: Additional keyword arguments forwarded to the chain.
Returns:
The asynchronous result string from the RetrievalQA chain.
"""
if llm is None:
msg = (
"This API has been changed to require an LLM. "
"Please provide an llm to use for querying the vectorstore.\n"
"For example,\n"
"from langchain_openai import OpenAI\n"
"model = OpenAI(temperature=0)"
)
raise NotImplementedError(msg)
retriever_kwargs = retriever_kwargs or {}
chain = RetrievalQA.from_chain_type(
llm,
retriever=self.vectorstore.as_retriever(**retriever_kwargs),
**kwargs,
)
return (await chain.ainvoke({chain.input_key: question}))[chain.output_key]
def query_with_sources(
self,
question: str,
llm: BaseLanguageModel | None = None,
retriever_kwargs: dict[str, Any] | None = None,
**kwargs: Any,
) -> dict:
"""Query the `VectorStore` and retrieve the answer along with sources.
Args:
question: The question or prompt to query.
llm: The language model to use. Must not be `None`.
retriever_kwargs: Optional keyword arguments for the retriever.
**kwargs: Additional keyword arguments forwarded to the chain.
Returns:
`dict` containing the answer and source documents.
"""
if llm is None:
msg = (
"This API has been changed to require an LLM. "
"Please provide an llm to use for querying the vectorstore.\n"
"For example,\n"
"from langchain_openai import OpenAI\n"
"model = OpenAI(temperature=0)"
)
raise NotImplementedError(msg)
retriever_kwargs = retriever_kwargs or {}
chain = RetrievalQAWithSourcesChain.from_chain_type(
llm,
retriever=self.vectorstore.as_retriever(**retriever_kwargs),
**kwargs,
)
return chain.invoke({chain.question_key: question})
async def aquery_with_sources(
self,
question: str,
llm: BaseLanguageModel | None = None,
retriever_kwargs: dict[str, Any] | None = None,
**kwargs: Any,
) -> dict:
"""Asynchronously query the `VectorStore` and retrieve the answer and sources.
Args:
question: The question or prompt to query.
llm: The language model to use. Must not be `None`.
retriever_kwargs: Optional keyword arguments for the retriever.
**kwargs: Additional keyword arguments forwarded to the chain.
Returns:
`dict` containing the answer and source documents.
"""
if llm is None:
msg = (
"This API has been changed to require an LLM. "
"Please provide an llm to use for querying the vectorstore.\n"
"For example,\n"
"from langchain_openai import OpenAI\n"
"model = OpenAI(temperature=0)"
)
raise NotImplementedError(msg)
retriever_kwargs = retriever_kwargs or {}
chain = RetrievalQAWithSourcesChain.from_chain_type(
llm,
retriever=self.vectorstore.as_retriever(**retriever_kwargs),
**kwargs,
)
return await chain.ainvoke({chain.question_key: question})
def _get_in_memory_vectorstore() -> type[VectorStore]:
"""Get the `InMemoryVectorStore`."""
import warnings
try:
from langchain_community.vectorstores.inmemory import InMemoryVectorStore
except ImportError as e:
msg = "Please install langchain-community to use the InMemoryVectorStore."
raise ImportError(msg) from e
warnings.warn(
"Using InMemoryVectorStore as the default vectorstore."
"This memory store won't persist data. You should explicitly"
"specify a VectorStore when using VectorstoreIndexCreator",
stacklevel=3,
)
return InMemoryVectorStore
| VectorStoreIndexWrapper |
python | sqlalchemy__sqlalchemy | test/dialect/sqlite/test_reflection.py | {
"start": 46305,
"end": 48669
} | class ____(fixtures.TablesTest):
__only_on__ = "sqlite"
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"sqliteatable",
metadata,
Column("id", Integer, primary_key=True),
Column("other", String(42)),
sqlite_autoincrement=True,
)
view = "CREATE VIEW sqliteview AS SELECT * FROM sqliteatable"
event.listen(metadata, "after_create", DDL(view))
event.listen(metadata, "before_drop", DDL("DROP VIEW sqliteview"))
def test_get_table_names(self, connection):
insp = inspect(connection)
res = insp.get_table_names(sqlite_include_internal=True)
eq_(res, ["sqlite_sequence", "sqliteatable"])
res = insp.get_table_names()
eq_(res, ["sqliteatable"])
meta = MetaData()
meta.reflect(connection)
eq_(len(meta.tables), 1)
eq_(set(meta.tables), {"sqliteatable"})
def test_get_view_names(self, connection):
insp = inspect(connection)
res = insp.get_view_names(sqlite_include_internal=True)
eq_(res, ["sqliteview"])
res = insp.get_view_names()
eq_(res, ["sqliteview"])
def test_get_temp_table_names(self, connection, metadata):
Table(
"sqlitetemptable",
metadata,
Column("id", Integer, primary_key=True),
Column("other", String(42)),
sqlite_autoincrement=True,
prefixes=["TEMPORARY"],
).create(connection)
insp = inspect(connection)
res = insp.get_temp_table_names(sqlite_include_internal=True)
eq_(res, ["sqlite_sequence", "sqlitetemptable"])
res = insp.get_temp_table_names()
eq_(res, ["sqlitetemptable"])
def test_get_temp_view_names(self, connection):
view = (
"CREATE TEMPORARY VIEW sqlitetempview AS "
"SELECT * FROM sqliteatable"
)
connection.exec_driver_sql(view)
insp = inspect(connection)
try:
res = insp.get_temp_view_names(sqlite_include_internal=True)
eq_(res, ["sqlitetempview"])
res = insp.get_temp_view_names()
eq_(res, ["sqlitetempview"])
finally:
connection.exec_driver_sql("DROP VIEW sqlitetempview")
| ReflectInternalSchemaTables |
python | django__django | tests/serializers/models/data.py | {
"start": 5353,
"end": 5457
} | class ____(models.Model):
data = models.GenericIPAddressField(primary_key=True)
| GenericIPAddressPKData |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/typevar.py | {
"start": 272,
"end": 433
} | class ____:
def __init__(self, tainted: str) -> None:
self.tainted: str = tainted
def issue(foo: T) -> T:
_test_sink(foo.tainted)
return foo
| Foo |
python | django__django | tests/postgres_tests/test_ranges.py | {
"start": 14954,
"end": 20455
} | class ____(PostgreSQLTestCase):
def test_date_range(self):
objs = [
RangeLookupsModel.objects.create(date="2015-01-01"),
RangeLookupsModel.objects.create(date="2015-05-05"),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.filter(
date__contained_by=DateRange("2015-01-01", "2015-05-04")
),
[objs[0]],
)
def test_date_range_datetime_field(self):
objs = [
RangeLookupsModel.objects.create(timestamp="2015-01-01"),
RangeLookupsModel.objects.create(timestamp="2015-05-05"),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.filter(
timestamp__date__contained_by=DateRange("2015-01-01", "2015-05-04")
),
[objs[0]],
)
def test_datetime_range(self):
objs = [
RangeLookupsModel.objects.create(timestamp="2015-01-01T09:00:00"),
RangeLookupsModel.objects.create(timestamp="2015-05-05T17:00:00"),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.filter(
timestamp__contained_by=DateTimeTZRange(
"2015-01-01T09:00", "2015-05-04T23:55"
)
),
[objs[0]],
)
def test_small_integer_field_contained_by(self):
objs = [
RangeLookupsModel.objects.create(small_integer=8),
RangeLookupsModel.objects.create(small_integer=4),
RangeLookupsModel.objects.create(small_integer=-1),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.filter(
small_integer__contained_by=NumericRange(4, 6)
),
[objs[1]],
)
def test_integer_range(self):
objs = [
RangeLookupsModel.objects.create(integer=5),
RangeLookupsModel.objects.create(integer=99),
RangeLookupsModel.objects.create(integer=-1),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.filter(integer__contained_by=NumericRange(1, 98)),
[objs[0]],
)
def test_biginteger_range(self):
objs = [
RangeLookupsModel.objects.create(big_integer=5),
RangeLookupsModel.objects.create(big_integer=99),
RangeLookupsModel.objects.create(big_integer=-1),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.filter(
big_integer__contained_by=NumericRange(1, 98)
),
[objs[0]],
)
def test_decimal_field_contained_by(self):
objs = [
RangeLookupsModel.objects.create(decimal_field=Decimal("1.33")),
RangeLookupsModel.objects.create(decimal_field=Decimal("2.88")),
RangeLookupsModel.objects.create(decimal_field=Decimal("99.17")),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.filter(
decimal_field__contained_by=NumericRange(
Decimal("1.89"), Decimal("7.91")
),
),
[objs[1]],
)
def test_float_range(self):
objs = [
RangeLookupsModel.objects.create(float=5),
RangeLookupsModel.objects.create(float=99),
RangeLookupsModel.objects.create(float=-1),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.filter(float__contained_by=NumericRange(1, 98)),
[objs[0]],
)
def test_small_auto_field_contained_by(self):
objs = SmallAutoFieldModel.objects.bulk_create(
[SmallAutoFieldModel() for i in range(1, 5)]
)
self.assertSequenceEqual(
SmallAutoFieldModel.objects.filter(
id__contained_by=NumericRange(objs[1].pk, objs[3].pk),
),
objs[1:3],
)
def test_auto_field_contained_by(self):
objs = RangeLookupsModel.objects.bulk_create(
[RangeLookupsModel() for i in range(1, 5)]
)
self.assertSequenceEqual(
RangeLookupsModel.objects.filter(
id__contained_by=NumericRange(objs[1].pk, objs[3].pk),
),
objs[1:3],
)
def test_big_auto_field_contained_by(self):
objs = BigAutoFieldModel.objects.bulk_create(
[BigAutoFieldModel() for i in range(1, 5)]
)
self.assertSequenceEqual(
BigAutoFieldModel.objects.filter(
id__contained_by=NumericRange(objs[1].pk, objs[3].pk),
),
objs[1:3],
)
def test_f_ranges(self):
parent = RangesModel.objects.create(decimals=NumericRange(0, 10))
objs = [
RangeLookupsModel.objects.create(float=5, parent=parent),
RangeLookupsModel.objects.create(float=99, parent=parent),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.filter(float__contained_by=F("parent__decimals")),
[objs[0]],
)
def test_exclude(self):
objs = [
RangeLookupsModel.objects.create(float=5),
RangeLookupsModel.objects.create(float=99),
RangeLookupsModel.objects.create(float=-1),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.exclude(float__contained_by=NumericRange(0, 100)),
[objs[2]],
)
| TestQueryingWithRanges |
python | sympy__sympy | sympy/stats/drv_types.py | {
"start": 12393,
"end": 14464
} | class ____(SingleDiscreteDistribution):
_argnames = ('lamda',)
set = S.Naturals0
@staticmethod
def check(lamda):
_value_check(lamda > 0, "Lambda must be positive")
def pdf(self, k):
return self.lamda**k / factorial(k) * exp(-self.lamda)
def _characteristic_function(self, t):
return exp(self.lamda * (exp(I*t) - 1))
def _moment_generating_function(self, t):
return exp(self.lamda * (exp(t) - 1))
def expectation(self, expr, var, evaluate=True, **kwargs):
if evaluate:
if expr == var:
return self.lamda
if (
isinstance(expr, FallingFactorial)
and expr.args[1].is_integer
and expr.args[1].is_positive
and expr.args[0] == var
):
return self.lamda ** expr.args[1]
return super().expectation(expr, var, evaluate, **kwargs)
def Poisson(name, lamda):
r"""
Create a discrete random variable with a Poisson distribution.
Explanation
===========
The density of the Poisson distribution is given by
.. math::
f(k) := \frac{\lambda^{k} e^{- \lambda}}{k!}
Parameters
==========
lamda : Positive number, a rate
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Poisson, density, E, variance
>>> from sympy import Symbol, simplify
>>> rate = Symbol("lambda", positive=True)
>>> z = Symbol("z")
>>> X = Poisson("x", rate)
>>> density(X)(z)
lambda**z*exp(-lambda)/factorial(z)
>>> E(X)
lambda
>>> simplify(variance(X))
lambda
References
==========
.. [1] https://en.wikipedia.org/wiki/Poisson_distribution
.. [2] https://mathworld.wolfram.com/PoissonDistribution.html
"""
return rv(name, PoissonDistribution, lamda)
# -----------------------------------------------------------------------------
# Skellam distribution --------------------------------------------------------
| PoissonDistribution |
python | pytransitions__transitions | transitions/extensions/states.py | {
"start": 6387,
"end": 9723
} | class ____(State):
"""The Retry mix-in sets a limit on the number of times a state may be
re-entered from itself.
The first time a state is entered it does not count as a retry. Thus with
`retries=3` the state can be entered four times before it fails.
When the retry limit is exceeded, the state is not entered and instead the
`on_failure` callback is invoked on the model. For example,
Retry(retries=3, on_failure='to_failed')
transitions the model directly to the 'failed' state, if the machine has
automatic transitions enabled (the default).
Attributes:
retries (int): Number of retries to allow before failing.
on_failure (str): Function to invoke on the model when the retry limit
is exceeded.
"""
def __init__(self, *args, **kwargs):
"""
Args:
**kwargs: If kwargs contains `retries`, then limit the number of times
the state may be re-entered from itself. The argument `on_failure`,
which is the function to invoke on the model when the retry limit
is exceeded, must also be provided.
"""
self.retries = kwargs.pop('retries', 0)
self.on_failure = kwargs.pop('on_failure', None)
self.retry_counts = Counter()
if self.retries > 0 and self.on_failure is None:
raise AttributeError("Retry state requires 'on_failure' when "
"'retries' is set.")
super(Retry, self).__init__(*args, **kwargs)
def enter(self, event_data):
k = id(event_data.model)
# If we are entering from a different state, then this is our first try;
# reset the retry counter.
if event_data.transition.source != self.name:
_LOGGER.debug('%sRetry limit for state %s reset (came from %s)',
event_data.machine.name, self.name,
event_data.transition.source)
self.retry_counts[k] = 0
# If we have tried too many times, invoke our failure callback instead
if self.retry_counts[k] > self.retries > 0:
_LOGGER.info('%sRetry count for state %s exceeded limit (%i)',
event_data.machine.name, self.name, self.retries)
event_data.machine.callback(self.on_failure, event_data)
return
# Otherwise, increment the retry count and continue per normal
_LOGGER.debug('%sRetry count for state %s is now %i',
event_data.machine.name, self.name, self.retry_counts[k])
self.retry_counts.update((k,))
super(Retry, self).enter(event_data)
def add_state_features(*args):
"""State feature decorator. Should be used in conjunction with a custom Machine class."""
def _class_decorator(cls):
class CustomState(type('CustomState', args, {}), cls.state_cls):
"""The decorated State. It is based on the State class used by the decorated Machine."""
method_list = sum([c.dynamic_methods for c in inspect.getmro(CustomState) if hasattr(c, 'dynamic_methods')], [])
CustomState.dynamic_methods = list(set(method_list))
cls.state_cls = CustomState
return cls
return _class_decorator
| Retry |
python | pydata__xarray | xarray/core/dataset_variables.py | {
"start": 348,
"end": 2054
} | class ____(Mapping[Any, "DataArray"]):
__slots__ = ("_dataset",)
def __init__(self, dataset: "Dataset"):
self._dataset = dataset
def __iter__(self) -> Iterator[Hashable]:
return (
key
for key in self._dataset._variables
if key not in self._dataset._coord_names
)
def __len__(self) -> int:
length = len(self._dataset._variables) - len(self._dataset._coord_names)
assert length >= 0, "something is wrong with Dataset._coord_names"
return length
def __contains__(self, key: Hashable) -> bool:
return key in self._dataset._variables and key not in self._dataset._coord_names
def __getitem__(self, key: Hashable) -> "DataArray":
if key not in self._dataset._coord_names:
return self._dataset[key]
raise KeyError(key)
def __repr__(self) -> str:
return formatting.data_vars_repr(self)
@property
def variables(self) -> Mapping[Hashable, Variable]:
all_variables = self._dataset.variables
return Frozen({k: all_variables[k] for k in self})
@property
def dtypes(self) -> Frozen[Hashable, np.dtype]:
"""Mapping from data variable names to dtypes.
Cannot be modified directly, but is updated when adding new variables.
See Also
--------
Dataset.dtype
"""
return self._dataset.dtypes
def _ipython_key_completions_(self):
"""Provide method for the key-autocompletions in IPython."""
return [
key
for key in self._dataset._ipython_key_completions_()
if key not in self._dataset._coord_names
]
| DataVariables |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 288547,
"end": 289196
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of RerequestCheckSuite"""
__schema__ = github_schema
__field_names__ = ("repository_id", "check_suite_id", "client_mutation_id")
repository_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="repositoryId")
"""The Node ID of the repository."""
check_suite_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="checkSuiteId")
"""The Node ID of the check suite."""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| RerequestCheckSuiteInput |
python | kubernetes-client__python | kubernetes/client/models/v1_volume_attachment_spec.py | {
"start": 383,
"end": 5813
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'attacher': 'str',
'node_name': 'str',
'source': 'V1VolumeAttachmentSource'
}
attribute_map = {
'attacher': 'attacher',
'node_name': 'nodeName',
'source': 'source'
}
def __init__(self, attacher=None, node_name=None, source=None, local_vars_configuration=None): # noqa: E501
"""V1VolumeAttachmentSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._attacher = None
self._node_name = None
self._source = None
self.discriminator = None
self.attacher = attacher
self.node_name = node_name
self.source = source
@property
def attacher(self):
"""Gets the attacher of this V1VolumeAttachmentSpec. # noqa: E501
attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName(). # noqa: E501
:return: The attacher of this V1VolumeAttachmentSpec. # noqa: E501
:rtype: str
"""
return self._attacher
@attacher.setter
def attacher(self, attacher):
"""Sets the attacher of this V1VolumeAttachmentSpec.
attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName(). # noqa: E501
:param attacher: The attacher of this V1VolumeAttachmentSpec. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and attacher is None: # noqa: E501
raise ValueError("Invalid value for `attacher`, must not be `None`") # noqa: E501
self._attacher = attacher
@property
def node_name(self):
"""Gets the node_name of this V1VolumeAttachmentSpec. # noqa: E501
nodeName represents the node that the volume should be attached to. # noqa: E501
:return: The node_name of this V1VolumeAttachmentSpec. # noqa: E501
:rtype: str
"""
return self._node_name
@node_name.setter
def node_name(self, node_name):
"""Sets the node_name of this V1VolumeAttachmentSpec.
nodeName represents the node that the volume should be attached to. # noqa: E501
:param node_name: The node_name of this V1VolumeAttachmentSpec. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and node_name is None: # noqa: E501
raise ValueError("Invalid value for `node_name`, must not be `None`") # noqa: E501
self._node_name = node_name
@property
def source(self):
"""Gets the source of this V1VolumeAttachmentSpec. # noqa: E501
:return: The source of this V1VolumeAttachmentSpec. # noqa: E501
:rtype: V1VolumeAttachmentSource
"""
return self._source
@source.setter
def source(self, source):
"""Sets the source of this V1VolumeAttachmentSpec.
:param source: The source of this V1VolumeAttachmentSpec. # noqa: E501
:type: V1VolumeAttachmentSource
"""
if self.local_vars_configuration.client_side_validation and source is None: # noqa: E501
raise ValueError("Invalid value for `source`, must not be `None`") # noqa: E501
self._source = source
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1VolumeAttachmentSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1VolumeAttachmentSpec):
return True
return self.to_dict() != other.to_dict()
| V1VolumeAttachmentSpec |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_permissions.py | {
"start": 1821,
"end": 1990
} | class ____:
@check_permission("fake_other_permission")
async def mutate(self, graphene_info: ResolveInfo, **_kwargs):
pass
| FakeOtherPermissionMutationAsync |
python | huggingface__transformers | tests/models/bridgetower/test_image_processing_bridgetower.py | {
"start": 1221,
"end": 3580
} | class ____:
def __init__(
self,
parent,
do_resize: bool = True,
size: dict[str, int] | None = None,
size_divisor: int = 32,
do_rescale: bool = True,
rescale_factor: int | float = 1 / 255,
do_normalize: bool = True,
do_center_crop: bool = True,
image_mean: float | list[float] | None = [0.48145466, 0.4578275, 0.40821073],
image_std: float | list[float] | None = [0.26862954, 0.26130258, 0.27577711],
do_pad: bool = True,
batch_size=7,
min_resolution=30,
max_resolution=400,
num_channels=3,
):
self.parent = parent
self.do_resize = do_resize
self.size = size if size is not None else {"shortest_edge": 288}
self.size_divisor = size_divisor
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.do_center_crop = do_center_crop
self.image_mean = image_mean
self.image_std = image_std
self.do_pad = do_pad
self.batch_size = batch_size
self.num_channels = num_channels
self.min_resolution = min_resolution
self.max_resolution = max_resolution
def prepare_image_processor_dict(self):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def get_expected_values(self, image_inputs, batched=False):
return self.size["shortest_edge"], self.size["shortest_edge"]
def expected_output_image_shape(self, images):
height, width = self.get_expected_values(images, batched=True)
return self.num_channels, height, width
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
@require_torch
@require_vision
| BridgeTowerImageProcessingTester |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/attributes.py | {
"start": 4254,
"end": 17004
} | class ____(
_DeclarativeMapped[_T_co],
SQLORMExpression[_T_co],
interfaces.InspectionAttr,
interfaces.PropComparator[_T_co],
roles.JoinTargetRole,
roles.OnClauseRole,
sql_base.Immutable,
cache_key.SlotsMemoizedHasCacheKey,
util.MemoizedSlots,
EventTarget,
):
"""Base class for :term:`descriptor` objects that intercept
attribute events on behalf of a :class:`.MapperProperty`
object. The actual :class:`.MapperProperty` is accessible
via the :attr:`.QueryableAttribute.property`
attribute.
.. seealso::
:class:`.InstrumentedAttribute`
:class:`.MapperProperty`
:attr:`_orm.Mapper.all_orm_descriptors`
:attr:`_orm.Mapper.attrs`
"""
__slots__ = (
"class_",
"key",
"impl",
"comparator",
"property",
"parent",
"expression",
"_of_type",
"_extra_criteria",
"_slots_dispatch",
"_propagate_attrs",
"_doc",
)
is_attribute = True
dispatch: dispatcher[QueryableAttribute[_T_co]]
class_: _ExternalEntityType[Any]
key: str
parententity: _InternalEntityType[Any]
impl: _AttributeImpl
comparator: interfaces.PropComparator[_T_co]
_of_type: Optional[_InternalEntityType[Any]]
_extra_criteria: Tuple[ColumnElement[bool], ...]
_doc: Optional[str]
# PropComparator has a __visit_name__ to participate within
# traversals. Disambiguate the attribute vs. a comparator.
__visit_name__ = "orm_instrumented_attribute"
def __init__(
self,
class_: _ExternalEntityType[_O],
key: str,
parententity: _InternalEntityType[_O],
comparator: interfaces.PropComparator[_T_co],
impl: Optional[_AttributeImpl] = None,
of_type: Optional[_InternalEntityType[Any]] = None,
extra_criteria: Tuple[ColumnElement[bool], ...] = (),
):
self.class_ = class_
self.key = key
self._parententity = self.parent = parententity
# this attribute is non-None after mappers are set up, however in the
# interim class manager setup, there's a check for None to see if it
# needs to be populated, so we assign None here leaving the attribute
# in a temporarily not-type-correct state
self.impl = impl # type: ignore
assert comparator is not None
self.comparator = comparator
self._of_type = of_type
self._extra_criteria = extra_criteria
self._doc = None
manager = opt_manager_of_class(class_)
# manager is None in the case of AliasedClass
if manager:
# propagate existing event listeners from
# immediate superclass
for base in manager._bases:
if key in base:
self.dispatch._update(base[key].dispatch)
if base[key].dispatch._active_history:
self.dispatch._active_history = True # type: ignore
_cache_key_traversal = [
("key", visitors.ExtendedInternalTraversal.dp_string),
("_parententity", visitors.ExtendedInternalTraversal.dp_multi),
("_of_type", visitors.ExtendedInternalTraversal.dp_multi),
("_extra_criteria", visitors.InternalTraversal.dp_clauseelement_list),
]
def __reduce__(self) -> Any:
# this method is only used in terms of the
# sqlalchemy.ext.serializer extension
return (
_queryable_attribute_unreduce,
(
self.key,
self._parententity.mapper.class_,
self._parententity,
self._parententity.entity,
),
)
@property
def _impl_uses_objects(self) -> bool:
return self.impl.uses_objects
def get_history(
self, instance: Any, passive: PassiveFlag = PASSIVE_OFF
) -> History:
return self.impl.get_history(
instance_state(instance), instance_dict(instance), passive
)
@property
def info(self) -> _InfoType:
"""Return the 'info' dictionary for the underlying SQL element.
The behavior here is as follows:
* If the attribute is a column-mapped property, i.e.
:class:`.ColumnProperty`, which is mapped directly
to a schema-level :class:`_schema.Column` object, this attribute
will return the :attr:`.SchemaItem.info` dictionary associated
with the core-level :class:`_schema.Column` object.
* If the attribute is a :class:`.ColumnProperty` but is mapped to
any other kind of SQL expression other than a
:class:`_schema.Column`,
the attribute will refer to the :attr:`.MapperProperty.info`
dictionary associated directly with the :class:`.ColumnProperty`,
assuming the SQL expression itself does not have its own ``.info``
attribute (which should be the case, unless a user-defined SQL
construct has defined one).
* If the attribute refers to any other kind of
:class:`.MapperProperty`, including :class:`.Relationship`,
the attribute will refer to the :attr:`.MapperProperty.info`
dictionary associated with that :class:`.MapperProperty`.
* To access the :attr:`.MapperProperty.info` dictionary of the
:class:`.MapperProperty` unconditionally, including for a
:class:`.ColumnProperty` that's associated directly with a
:class:`_schema.Column`, the attribute can be referred to using
:attr:`.QueryableAttribute.property` attribute, as
``MyClass.someattribute.property.info``.
.. seealso::
:attr:`.SchemaItem.info`
:attr:`.MapperProperty.info`
"""
return self.comparator.info
parent: _InternalEntityType[Any]
"""Return an inspection instance representing the parent.
This will be either an instance of :class:`_orm.Mapper`
or :class:`.AliasedInsp`, depending upon the nature
of the parent entity which this attribute is associated
with.
"""
expression: ColumnElement[_T_co]
"""The SQL expression object represented by this
:class:`.QueryableAttribute`.
This will typically be an instance of a :class:`_sql.ColumnElement`
subclass representing a column expression.
"""
def _memoized_attr_expression(self) -> ColumnElement[_T]:
annotations: _AnnotationDict
# applies only to Proxy() as used by hybrid.
# currently is an exception to typing rather than feeding through
# non-string keys.
# ideally Proxy() would have a separate set of methods to deal
# with this case.
entity_namespace = self._entity_namespace
assert isinstance(entity_namespace, HasCacheKey)
if self.key is _UNKNOWN_ATTR_KEY:
annotations = {"entity_namespace": entity_namespace}
else:
annotations = {
"proxy_key": self.key,
"proxy_owner": self._parententity,
"entity_namespace": entity_namespace,
}
ce = self.comparator.__clause_element__()
try:
if TYPE_CHECKING:
assert isinstance(ce, ColumnElement)
anno = ce._annotate
except AttributeError as ae:
raise exc.InvalidRequestError(
'When interpreting attribute "%s" as a SQL expression, '
"expected __clause_element__() to return "
"a ClauseElement object, got: %r" % (self, ce)
) from ae
else:
return anno(annotations)
def _memoized_attr__propagate_attrs(self) -> _PropagateAttrsType:
# this suits the case in coercions where we don't actually
# call ``__clause_element__()`` but still need to get
# resolved._propagate_attrs. See #6558.
return util.immutabledict(
{
"compile_state_plugin": "orm",
"plugin_subject": self._parentmapper,
}
)
@property
def _entity_namespace(self) -> _InternalEntityType[Any]:
return self._parententity
@property
def _annotations(self) -> _AnnotationDict:
return self.__clause_element__()._annotations
def __clause_element__(self) -> ColumnElement[_T_co]:
return self.expression
@property
def _from_objects(self) -> List[FromClause]:
return self.expression._from_objects
def _bulk_update_tuples(
self, value: Any
) -> Sequence[Tuple[_DMLColumnArgument, Any]]:
"""Return setter tuples for a bulk UPDATE."""
return self.comparator._bulk_update_tuples(value)
def _bulk_dml_setter(self, key: str) -> Optional[Callable[..., Any]]:
"""return a callable that will process a bulk INSERT value"""
return self.comparator._bulk_dml_setter(key)
def adapt_to_entity(self, adapt_to_entity: AliasedInsp[Any]) -> Self:
assert not self._of_type
return self.__class__(
adapt_to_entity.entity,
self.key,
impl=self.impl,
comparator=self.comparator.adapt_to_entity(adapt_to_entity),
parententity=adapt_to_entity,
)
def of_type(self, entity: _EntityType[_T]) -> QueryableAttribute[_T]:
return QueryableAttribute(
self.class_,
self.key,
self._parententity,
impl=self.impl,
comparator=self.comparator.of_type(entity),
of_type=inspection.inspect(entity),
extra_criteria=self._extra_criteria,
)
def and_(
self, *clauses: _ColumnExpressionArgument[bool]
) -> QueryableAttribute[bool]:
if TYPE_CHECKING:
assert isinstance(self.comparator, RelationshipProperty.Comparator)
exprs = tuple(
coercions.expect(roles.WhereHavingRole, clause)
for clause in util.coerce_generator_arg(clauses)
)
return QueryableAttribute(
self.class_,
self.key,
self._parententity,
impl=self.impl,
comparator=self.comparator.and_(*exprs),
of_type=self._of_type,
extra_criteria=self._extra_criteria + exprs,
)
def _clone(self, **kw: Any) -> QueryableAttribute[_T]:
return QueryableAttribute(
self.class_,
self.key,
self._parententity,
impl=self.impl,
comparator=self.comparator,
of_type=self._of_type,
extra_criteria=self._extra_criteria,
)
def label(self, name: Optional[str]) -> Label[_T_co]:
return self.__clause_element__().label(name)
def operate(
self, op: OperatorType, *other: Any, **kwargs: Any
) -> ColumnElement[Any]:
return op(self.comparator, *other, **kwargs) # type: ignore[no-any-return] # noqa: E501
def reverse_operate(
self, op: OperatorType, other: Any, **kwargs: Any
) -> ColumnElement[Any]:
return op(other, self.comparator, **kwargs) # type: ignore[no-any-return] # noqa: E501
def hasparent(
self, state: InstanceState[Any], optimistic: bool = False
) -> bool:
return self.impl.hasparent(state, optimistic=optimistic) is not False
def _column_strategy_attrs(self) -> Sequence[QueryableAttribute[Any]]:
return (self,)
def __getattr__(self, key: str) -> Any:
try:
return util.MemoizedSlots.__getattr__(self, key)
except AttributeError:
pass
try:
return getattr(self.comparator, key)
except AttributeError as err:
raise AttributeError(
"Neither %r object nor %r object associated with %s "
"has an attribute %r"
% (
type(self).__name__,
type(self.comparator).__name__,
self,
key,
)
) from err
def __str__(self) -> str:
return f"{self.class_.__name__}.{self.key}"
def _memoized_attr_property(self) -> Optional[MapperProperty[Any]]:
return self.comparator.property
def _queryable_attribute_unreduce(
key: str,
mapped_class: Type[_O],
parententity: _InternalEntityType[_O],
entity: _ExternalEntityType[Any],
) -> Any:
# this method is only used in terms of the
# sqlalchemy.ext.serializer extension
if insp_is_aliased_class(parententity):
return entity._get_from_serialized(key, mapped_class, parententity)
else:
return getattr(entity, key)
| QueryableAttribute |
python | encode__django-rest-framework | tests/test_multitable_inheritance.py | {
"start": 157,
"end": 243
} | class ____(RESTFrameworkModel):
name1 = models.CharField(max_length=100)
| ParentModel |
python | apache__airflow | airflow-core/tests/unit/cli/commands/test_legacy_commands.py | {
"start": 1063,
"end": 1961
} | class ____:
@classmethod
def setup_class(cls):
cls.parser = cli_parser.get_parser()
def test_should_display_value(self, stderr_capture):
with pytest.raises(SystemExit) as ctx, stderr_capture as temp_stderr:
config_command.get_value(self.parser.parse_args(["webserver"]))
assert ctx.value.code == 2
assert (
"Command `airflow webserver` has been removed. "
"Please use `airflow api-server`" in temp_stderr.getvalue().strip()
)
def test_check_legacy_command(self):
mock_action = MagicMock()
mock_action._prog_prefix = "airflow"
with pytest.raises(
ArgumentError,
match="argument : Command `airflow webserver` has been removed. Please use `airflow api-server`",
):
check_legacy_command(mock_action, "webserver")
| TestCliDeprecatedCommandsValue |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 93465,
"end": 94210
} | class ____(GeneratedAirbyteSource):
@public
def __init__(self, name: str, api_key: str, start_date: Optional[str] = None):
r"""Airbyte Source for Close Com.
Documentation can be found at https://docs.airbyte.com/integrations/sources/close-com
Args:
name (str): The name of the destination.
api_key (str): Close.com API key (usually starts with 'api\\_'; find yours here).
start_date (Optional[str]): The start date to sync data. Leave blank for full sync. Format: YYYY-MM-DD.
"""
self.api_key = check.str_param(api_key, "api_key")
self.start_date = check.opt_str_param(start_date, "start_date")
super().__init__("Close Com", name)
| CloseComSource |
python | openai__openai-python | examples/realtime/push_to_talk_app.py | {
"start": 1700,
"end": 2051
} | class ____(Static):
"""A widget that shows the current audio recording status."""
is_recording = reactive(False)
@override
def render(self) -> str:
status = (
"🔴 Recording... (Press K to stop)" if self.is_recording else "⚪ Press K to start recording (Q to quit)"
)
return status
| AudioStatusIndicator |
python | donnemartin__interactive-coding-challenges | graphs_trees/graph/test_graph.py | {
"start": 18,
"end": 2840
} | class ____(unittest.TestCase):
def create_graph(self):
graph = Graph()
for key in range(0, 6):
graph.add_node(key)
return graph
def test_graph(self):
graph = self.create_graph()
graph.add_edge(0, 1, weight=5)
graph.add_edge(0, 5, weight=2)
graph.add_edge(1, 2, weight=3)
graph.add_edge(2, 3, weight=4)
graph.add_edge(3, 4, weight=5)
graph.add_edge(3, 5, weight=6)
graph.add_edge(4, 0, weight=7)
graph.add_edge(5, 4, weight=8)
graph.add_edge(5, 2, weight=9)
self.assertEqual(graph.nodes[0].adj_weights[graph.nodes[1].key], 5)
self.assertEqual(graph.nodes[0].adj_weights[graph.nodes[5].key], 2)
self.assertEqual(graph.nodes[1].adj_weights[graph.nodes[2].key], 3)
self.assertEqual(graph.nodes[2].adj_weights[graph.nodes[3].key], 4)
self.assertEqual(graph.nodes[3].adj_weights[graph.nodes[4].key], 5)
self.assertEqual(graph.nodes[3].adj_weights[graph.nodes[5].key], 6)
self.assertEqual(graph.nodes[4].adj_weights[graph.nodes[0].key], 7)
self.assertEqual(graph.nodes[5].adj_weights[graph.nodes[4].key], 8)
self.assertEqual(graph.nodes[5].adj_weights[graph.nodes[2].key], 9)
self.assertEqual(graph.nodes[0].incoming_edges, 1)
self.assertEqual(graph.nodes[1].incoming_edges, 1)
self.assertEqual(graph.nodes[2].incoming_edges, 2)
self.assertEqual(graph.nodes[3].incoming_edges, 1)
self.assertEqual(graph.nodes[4].incoming_edges, 2)
self.assertEqual(graph.nodes[5].incoming_edges, 2)
graph.nodes[0].remove_neighbor(graph.nodes[1])
self.assertEqual(graph.nodes[1].incoming_edges, 0)
graph.nodes[3].remove_neighbor(graph.nodes[4])
self.assertEqual(graph.nodes[4].incoming_edges, 1)
self.assertEqual(graph.nodes[0] < graph.nodes[1], True)
print('Success: test_graph')
def test_graph_undirected(self):
graph = self.create_graph()
graph.add_undirected_edge(0, 1, weight=5)
graph.add_undirected_edge(0, 5, weight=2)
graph.add_undirected_edge(1, 2, weight=3)
self.assertEqual(graph.nodes[0].adj_weights[graph.nodes[1].key], 5)
self.assertEqual(graph.nodes[1].adj_weights[graph.nodes[0].key], 5)
self.assertEqual(graph.nodes[0].adj_weights[graph.nodes[5].key], 2)
self.assertEqual(graph.nodes[5].adj_weights[graph.nodes[0].key], 2)
self.assertEqual(graph.nodes[1].adj_weights[graph.nodes[2].key], 3)
self.assertEqual(graph.nodes[2].adj_weights[graph.nodes[1].key], 3)
print('Success: test_graph_undirected')
def main():
test = TestGraph()
test.test_graph()
test.test_graph_undirected()
if __name__ == '__main__':
main()
| TestGraph |
python | ray-project__ray | ci/ray_ci/windows_builder_container.py | {
"start": 79,
"end": 1089
} | class ____(WindowsContainer):
def __init__(
self,
python_version: str,
upload: bool,
) -> None:
super().__init__(
"windowsbuild",
volumes=[
f"{os.path.abspath(os.environ.get('RAYCI_CHECKOUT_DIR'))}:{WORKDIR}",
],
)
self.python_version = python_version
self.upload = upload
def run(self) -> None:
cmds = [
"powershell ci/pipeline/fix-windows-container-networking.ps1",
# fix symlink issue across windows and linux
"git config --global core.symlinks true",
"git config --global core.autocrlf false",
"git clone . ray",
"cd ray",
# build wheel
f"export BUILD_ONE_PYTHON_ONLY={self.python_version}",
"./python/build-wheel-windows.sh",
]
if self.upload:
cmds += ["./ci/build/copy_build_artifacts.sh"]
self.run_script(cmds)
| WindowsBuilderContainer |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/solverHigherOrder5.py | {
"start": 5143,
"end": 5577
} | class ____(Protocol):
def __call__(self, a: T, b: T) -> T: ...
def func4(a: T, b: T) -> T:
return a
def test_12(p: Proto1) -> Proto1:
return p(func4, func4)
reveal_type(
identity((identity, identity)),
expected_text="tuple[(x: T(1)@identity) -> T(1)@identity, (x: T(2)@identity) -> T(2)@identity]",
)
reveal_type(
identity([identity]),
expected_text="list[(x: T(1)@identity) -> T(1)@identity]",
)
| Proto1 |
python | getsentry__sentry | src/sentry/web/frontend/debug/debug_resolved_in_release_email.py | {
"start": 464,
"end": 684
} | class ____(ActivityMailDebugView):
def get_activity(self, request: HttpRequest, event):
return {"type": ActivityType.SET_RESOLVED_IN_RELEASE.value, "data": {"version": ""}}
| DebugResolvedInReleaseUpcomingEmailView |
python | numpy__numpy | numpy/_typing/_dtype_like.py | {
"start": 1130,
"end": 3864
} | class ____(Protocol[_DTypeT_co]):
@property
def __numpy_dtype__(self, /) -> _DTypeT_co: ...
_SupportsDType: TypeAlias = _HasDType[_DTypeT] | _HasNumPyDType[_DTypeT]
# A subset of `npt.DTypeLike` that can be parametrized w.r.t. `np.generic`
_DTypeLike: TypeAlias = type[_ScalarT] | np.dtype[_ScalarT] | _SupportsDType[np.dtype[_ScalarT]]
# Would create a dtype[np.void]
_VoidDTypeLike: TypeAlias = (
# If a tuple, then it can be either:
# - (flexible_dtype, itemsize)
# - (fixed_dtype, shape)
# - (base_dtype, new_dtype)
# But because `_DTypeLikeNested = Any`, the first two cases are redundant
# tuple[_DTypeLikeNested, int] | tuple[_DTypeLikeNested, _ShapeLike] |
tuple[_DTypeLikeNested, _DTypeLikeNested]
# [(field_name, field_dtype, field_shape), ...]
# The type here is quite broad because NumPy accepts quite a wide
# range of inputs inside the list; see the tests for some examples.
| list[Any]
# {'names': ..., 'formats': ..., 'offsets': ..., 'titles': ..., 'itemsize': ...}
| _DTypeDict
)
# Aliases for commonly used dtype-like objects.
# Note that the precision of `np.number` subclasses is ignored herein.
_DTypeLikeBool: TypeAlias = type[bool] | _DTypeLike[np.bool] | _BoolCodes
_DTypeLikeInt: TypeAlias = (
type[int] | _DTypeLike[np.signedinteger] | _SignedIntegerCodes
)
_DTypeLikeUInt: TypeAlias = _DTypeLike[np.unsignedinteger] | _UnsignedIntegerCodes
_DTypeLikeFloat: TypeAlias = type[float] | _DTypeLike[np.floating] | _FloatingCodes
_DTypeLikeComplex: TypeAlias = (
type[complex] | _DTypeLike[np.complexfloating] | _ComplexFloatingCodes
)
_DTypeLikeComplex_co: TypeAlias = (
type[complex] | _DTypeLike[np.bool | np.number] | _BoolCodes | _NumberCodes
)
_DTypeLikeDT64: TypeAlias = _DTypeLike[np.timedelta64] | _TD64Codes
_DTypeLikeTD64: TypeAlias = _DTypeLike[np.datetime64] | _DT64Codes
_DTypeLikeBytes: TypeAlias = type[bytes] | _DTypeLike[np.bytes_] | _BytesCodes
_DTypeLikeStr: TypeAlias = type[str] | _DTypeLike[np.str_] | _StrCodes
_DTypeLikeVoid: TypeAlias = (
type[memoryview] | _DTypeLike[np.void] | _VoidDTypeLike | _VoidCodes
)
_DTypeLikeObject: TypeAlias = type[object] | _DTypeLike[np.object_] | _ObjectCodes
# Anything that can be coerced into numpy.dtype.
# Reference: https://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html
DTypeLike: TypeAlias = _DTypeLike[Any] | _VoidDTypeLike | str
# NOTE: while it is possible to provide the dtype as a dict of
# dtype-like objects (e.g. `{'field1': ..., 'field2': ..., ...}`),
# this syntax is officially discouraged and
# therefore not included in the type-union defining `DTypeLike`.
#
# See https://github.com/numpy/numpy/issues/16891 for more details.
| _HasNumPyDType |
python | celery__celery | celery/worker/heartbeat.py | {
"start": 328,
"end": 2107
} | class ____:
"""Timer sending heartbeats at regular intervals.
Arguments:
timer (kombu.asynchronous.timer.Timer): Timer to use.
eventer (celery.events.EventDispatcher): Event dispatcher
to use.
interval (float): Time in seconds between sending
heartbeats. Default is 2 seconds.
"""
def __init__(self, timer, eventer, interval=None):
self.timer = timer
self.eventer = eventer
self.interval = float(interval or 2.0)
self.tref = None
# Make event dispatcher start/stop us when enabled/disabled.
self.eventer.on_enabled.add(self.start)
self.eventer.on_disabled.add(self.stop)
# Only send heartbeat_sent signal if it has receivers.
self._send_sent_signal = (
heartbeat_sent.send if heartbeat_sent.receivers else None)
def _send(self, event, retry=True):
if self._send_sent_signal is not None:
self._send_sent_signal(sender=self)
return self.eventer.send(event, freq=self.interval,
active=len(active_requests),
processed=all_total_count[0],
loadavg=load_average(),
retry=retry,
**SOFTWARE_INFO)
def start(self):
if self.eventer.enabled:
self._send('worker-online')
self.tref = self.timer.call_repeatedly(
self.interval, self._send, ('worker-heartbeat',),
)
def stop(self):
if self.tref is not None:
self.timer.cancel(self.tref)
self.tref = None
if self.eventer.enabled:
self._send('worker-offline', retry=False)
| Heart |
python | realpython__materials | django-diary/source_code_step_5/entries/views.py | {
"start": 348,
"end": 482
} | class ____(CreateView):
model = Entry
fields = ["title", "content"]
success_url = reverse_lazy("entry-list")
| EntryCreateView |
python | pallets__jinja | src/jinja2/nodes.py | {
"start": 32542,
"end": 32593
} | class ____(Stmt):
"""Continue a loop."""
| Continue |
python | TheAlgorithms__Python | graphs/graph_adjacency_matrix.py | {
"start": 7275,
"end": 22114
} | class ____(unittest.TestCase):
def __assert_graph_edge_exists_check(
self,
undirected_graph: GraphAdjacencyMatrix,
directed_graph: GraphAdjacencyMatrix,
edge: list[int],
) -> None:
assert undirected_graph.contains_edge(edge[0], edge[1])
assert undirected_graph.contains_edge(edge[1], edge[0])
assert directed_graph.contains_edge(edge[0], edge[1])
def __assert_graph_edge_does_not_exist_check(
self,
undirected_graph: GraphAdjacencyMatrix,
directed_graph: GraphAdjacencyMatrix,
edge: list[int],
) -> None:
assert not undirected_graph.contains_edge(edge[0], edge[1])
assert not undirected_graph.contains_edge(edge[1], edge[0])
assert not directed_graph.contains_edge(edge[0], edge[1])
def __assert_graph_vertex_exists_check(
self,
undirected_graph: GraphAdjacencyMatrix,
directed_graph: GraphAdjacencyMatrix,
vertex: int,
) -> None:
assert undirected_graph.contains_vertex(vertex)
assert directed_graph.contains_vertex(vertex)
def __assert_graph_vertex_does_not_exist_check(
self,
undirected_graph: GraphAdjacencyMatrix,
directed_graph: GraphAdjacencyMatrix,
vertex: int,
) -> None:
assert not undirected_graph.contains_vertex(vertex)
assert not directed_graph.contains_vertex(vertex)
def __generate_random_edges(
self, vertices: list[int], edge_pick_count: int
) -> list[list[int]]:
assert edge_pick_count <= len(vertices)
random_source_vertices: list[int] = random.sample(
vertices[0 : int(len(vertices) / 2)], edge_pick_count
)
random_destination_vertices: list[int] = random.sample(
vertices[int(len(vertices) / 2) :], edge_pick_count
)
random_edges: list[list[int]] = []
for source in random_source_vertices:
for dest in random_destination_vertices:
random_edges.append([source, dest])
return random_edges
def __generate_graphs(
self, vertex_count: int, min_val: int, max_val: int, edge_pick_count: int
) -> tuple[GraphAdjacencyMatrix, GraphAdjacencyMatrix, list[int], list[list[int]]]:
if max_val - min_val + 1 < vertex_count:
raise ValueError(
"Will result in duplicate vertices. Either increase "
"range between min_val and max_val or decrease vertex count"
)
# generate graph input
random_vertices: list[int] = random.sample(
range(min_val, max_val + 1), vertex_count
)
random_edges: list[list[int]] = self.__generate_random_edges(
random_vertices, edge_pick_count
)
# build graphs
undirected_graph = GraphAdjacencyMatrix(
vertices=random_vertices, edges=random_edges, directed=False
)
directed_graph = GraphAdjacencyMatrix(
vertices=random_vertices, edges=random_edges, directed=True
)
return undirected_graph, directed_graph, random_vertices, random_edges
def test_init_check(self) -> None:
(
undirected_graph,
directed_graph,
random_vertices,
random_edges,
) = self.__generate_graphs(20, 0, 100, 4)
# test graph initialization with vertices and edges
for num in random_vertices:
self.__assert_graph_vertex_exists_check(
undirected_graph, directed_graph, num
)
for edge in random_edges:
self.__assert_graph_edge_exists_check(
undirected_graph, directed_graph, edge
)
assert not undirected_graph.directed
assert directed_graph.directed
def test_contains_vertex(self) -> None:
random_vertices: list[int] = random.sample(range(101), 20)
# Build graphs WITHOUT edges
undirected_graph = GraphAdjacencyMatrix(
vertices=random_vertices, edges=[], directed=False
)
directed_graph = GraphAdjacencyMatrix(
vertices=random_vertices, edges=[], directed=True
)
# Test contains_vertex
for num in range(101):
assert (num in random_vertices) == undirected_graph.contains_vertex(num)
assert (num in random_vertices) == directed_graph.contains_vertex(num)
def test_add_vertices(self) -> None:
random_vertices: list[int] = random.sample(range(101), 20)
# build empty graphs
undirected_graph: GraphAdjacencyMatrix = GraphAdjacencyMatrix(
vertices=[], edges=[], directed=False
)
directed_graph: GraphAdjacencyMatrix = GraphAdjacencyMatrix(
vertices=[], edges=[], directed=True
)
# run add_vertex
for num in random_vertices:
undirected_graph.add_vertex(num)
for num in random_vertices:
directed_graph.add_vertex(num)
# test add_vertex worked
for num in random_vertices:
self.__assert_graph_vertex_exists_check(
undirected_graph, directed_graph, num
)
def test_remove_vertices(self) -> None:
random_vertices: list[int] = random.sample(range(101), 20)
# build graphs WITHOUT edges
undirected_graph = GraphAdjacencyMatrix(
vertices=random_vertices, edges=[], directed=False
)
directed_graph = GraphAdjacencyMatrix(
vertices=random_vertices, edges=[], directed=True
)
# test remove_vertex worked
for num in random_vertices:
self.__assert_graph_vertex_exists_check(
undirected_graph, directed_graph, num
)
undirected_graph.remove_vertex(num)
directed_graph.remove_vertex(num)
self.__assert_graph_vertex_does_not_exist_check(
undirected_graph, directed_graph, num
)
def test_add_and_remove_vertices_repeatedly(self) -> None:
random_vertices1: list[int] = random.sample(range(51), 20)
random_vertices2: list[int] = random.sample(range(51, 101), 20)
# build graphs WITHOUT edges
undirected_graph = GraphAdjacencyMatrix(
vertices=random_vertices1, edges=[], directed=False
)
directed_graph = GraphAdjacencyMatrix(
vertices=random_vertices1, edges=[], directed=True
)
# test adding and removing vertices
for i, _ in enumerate(random_vertices1):
undirected_graph.add_vertex(random_vertices2[i])
directed_graph.add_vertex(random_vertices2[i])
self.__assert_graph_vertex_exists_check(
undirected_graph, directed_graph, random_vertices2[i]
)
undirected_graph.remove_vertex(random_vertices1[i])
directed_graph.remove_vertex(random_vertices1[i])
self.__assert_graph_vertex_does_not_exist_check(
undirected_graph, directed_graph, random_vertices1[i]
)
# remove all vertices
for i, _ in enumerate(random_vertices1):
undirected_graph.remove_vertex(random_vertices2[i])
directed_graph.remove_vertex(random_vertices2[i])
self.__assert_graph_vertex_does_not_exist_check(
undirected_graph, directed_graph, random_vertices2[i]
)
def test_contains_edge(self) -> None:
# generate graphs and graph input
vertex_count = 20
(
undirected_graph,
directed_graph,
random_vertices,
random_edges,
) = self.__generate_graphs(vertex_count, 0, 100, 4)
# generate all possible edges for testing
all_possible_edges: list[list[int]] = []
for i in range(vertex_count - 1):
for j in range(i + 1, vertex_count):
all_possible_edges.append([random_vertices[i], random_vertices[j]])
all_possible_edges.append([random_vertices[j], random_vertices[i]])
# test contains_edge function
for edge in all_possible_edges:
if edge in random_edges:
self.__assert_graph_edge_exists_check(
undirected_graph, directed_graph, edge
)
elif [edge[1], edge[0]] in random_edges:
# since this edge exists for undirected but the reverse may
# not exist for directed
self.__assert_graph_edge_exists_check(
undirected_graph, directed_graph, [edge[1], edge[0]]
)
else:
self.__assert_graph_edge_does_not_exist_check(
undirected_graph, directed_graph, edge
)
def test_add_edge(self) -> None:
# generate graph input
random_vertices: list[int] = random.sample(range(101), 15)
random_edges: list[list[int]] = self.__generate_random_edges(random_vertices, 4)
# build graphs WITHOUT edges
undirected_graph = GraphAdjacencyMatrix(
vertices=random_vertices, edges=[], directed=False
)
directed_graph = GraphAdjacencyMatrix(
vertices=random_vertices, edges=[], directed=True
)
# run and test add_edge
for edge in random_edges:
undirected_graph.add_edge(edge[0], edge[1])
directed_graph.add_edge(edge[0], edge[1])
self.__assert_graph_edge_exists_check(
undirected_graph, directed_graph, edge
)
def test_remove_edge(self) -> None:
# generate graph input and graphs
(
undirected_graph,
directed_graph,
_random_vertices,
random_edges,
) = self.__generate_graphs(20, 0, 100, 4)
# run and test remove_edge
for edge in random_edges:
self.__assert_graph_edge_exists_check(
undirected_graph, directed_graph, edge
)
undirected_graph.remove_edge(edge[0], edge[1])
directed_graph.remove_edge(edge[0], edge[1])
self.__assert_graph_edge_does_not_exist_check(
undirected_graph, directed_graph, edge
)
def test_add_and_remove_edges_repeatedly(self) -> None:
(
undirected_graph,
directed_graph,
random_vertices,
random_edges,
) = self.__generate_graphs(20, 0, 100, 4)
# make some more edge options!
more_random_edges: list[list[int]] = []
while len(more_random_edges) != len(random_edges):
edges: list[list[int]] = self.__generate_random_edges(random_vertices, 4)
for edge in edges:
if len(more_random_edges) == len(random_edges):
break
elif edge not in more_random_edges and edge not in random_edges:
more_random_edges.append(edge)
for i, _ in enumerate(random_edges):
undirected_graph.add_edge(more_random_edges[i][0], more_random_edges[i][1])
directed_graph.add_edge(more_random_edges[i][0], more_random_edges[i][1])
self.__assert_graph_edge_exists_check(
undirected_graph, directed_graph, more_random_edges[i]
)
undirected_graph.remove_edge(random_edges[i][0], random_edges[i][1])
directed_graph.remove_edge(random_edges[i][0], random_edges[i][1])
self.__assert_graph_edge_does_not_exist_check(
undirected_graph, directed_graph, random_edges[i]
)
def test_add_vertex_exception_check(self) -> None:
(
undirected_graph,
directed_graph,
random_vertices,
_random_edges,
) = self.__generate_graphs(20, 0, 100, 4)
for vertex in random_vertices:
with pytest.raises(ValueError):
undirected_graph.add_vertex(vertex)
with pytest.raises(ValueError):
directed_graph.add_vertex(vertex)
def test_remove_vertex_exception_check(self) -> None:
(
undirected_graph,
directed_graph,
random_vertices,
_random_edges,
) = self.__generate_graphs(20, 0, 100, 4)
for i in range(101):
if i not in random_vertices:
with pytest.raises(ValueError):
undirected_graph.remove_vertex(i)
with pytest.raises(ValueError):
directed_graph.remove_vertex(i)
def test_add_edge_exception_check(self) -> None:
(
undirected_graph,
directed_graph,
_random_vertices,
random_edges,
) = self.__generate_graphs(20, 0, 100, 4)
for edge in random_edges:
with pytest.raises(ValueError):
undirected_graph.add_edge(edge[0], edge[1])
with pytest.raises(ValueError):
directed_graph.add_edge(edge[0], edge[1])
def test_remove_edge_exception_check(self) -> None:
(
undirected_graph,
directed_graph,
random_vertices,
random_edges,
) = self.__generate_graphs(20, 0, 100, 4)
more_random_edges: list[list[int]] = []
while len(more_random_edges) != len(random_edges):
edges: list[list[int]] = self.__generate_random_edges(random_vertices, 4)
for edge in edges:
if len(more_random_edges) == len(random_edges):
break
elif edge not in more_random_edges and edge not in random_edges:
more_random_edges.append(edge)
for edge in more_random_edges:
with pytest.raises(ValueError):
undirected_graph.remove_edge(edge[0], edge[1])
with pytest.raises(ValueError):
directed_graph.remove_edge(edge[0], edge[1])
def test_contains_edge_exception_check(self) -> None:
(
undirected_graph,
directed_graph,
random_vertices,
_random_edges,
) = self.__generate_graphs(20, 0, 100, 4)
for vertex in random_vertices:
with pytest.raises(ValueError):
undirected_graph.contains_edge(vertex, 102)
with pytest.raises(ValueError):
directed_graph.contains_edge(vertex, 102)
with pytest.raises(ValueError):
undirected_graph.contains_edge(103, 102)
with pytest.raises(ValueError):
directed_graph.contains_edge(103, 102)
if __name__ == "__main__":
unittest.main()
| TestGraphMatrix |
python | doocs__leetcode | lcci/08.05.Recursive Mulitply/Solution.py | {
"start": 0,
"end": 221
} | class ____:
def multiply(self, A: int, B: int) -> int:
if B == 1:
return A
if B & 1:
return (self.multiply(A, B >> 1) << 1) + A
return self.multiply(A, B >> 1) << 1
| Solution |
python | aio-libs__aiohttp | examples/logging_middleware.py | {
"start": 1591,
"end": 5728
} | class ____:
"""Test server for logging middleware demo."""
async def handle_hello(self, request: web.Request) -> web.Response:
"""Simple hello endpoint."""
name = request.match_info.get("name", "World")
return web.json_response({"message": f"Hello, {name}!"})
async def handle_slow(self, request: web.Request) -> web.Response:
"""Endpoint that simulates slow response."""
delay = float(request.match_info.get("delay", 1))
await asyncio.sleep(delay)
return web.json_response({"message": "Slow response completed", "delay": delay})
async def handle_error(self, request: web.Request) -> web.Response:
"""Endpoint that returns an error."""
status = int(request.match_info.get("status", 500))
return web.Response(status=status, text=f"Error response with status {status}")
async def handle_json_data(self, request: web.Request) -> web.Response:
"""Endpoint that echoes JSON data."""
try:
data = await request.json()
return web.json_response({"echo": data, "received_at": time.time()})
except json.JSONDecodeError:
return web.json_response({"error": "Invalid JSON"}, status=400)
async def run_test_server() -> web.AppRunner:
"""Run a simple test server."""
app = web.Application()
server = TestServer()
app.router.add_get("/hello", server.handle_hello)
app.router.add_get("/hello/{name}", server.handle_hello)
app.router.add_get("/slow/{delay}", server.handle_slow)
app.router.add_get("/error/{status}", server.handle_error)
app.router.add_post("/echo", server.handle_json_data)
runner = web.AppRunner(app)
await runner.setup()
site = web.TCPSite(runner, "localhost", 8080)
await site.start()
return runner
async def run_tests() -> None:
"""Run all the middleware tests."""
# Create logging middleware
logging_middleware = LoggingMiddleware()
# Use middleware in session
async with ClientSession(middlewares=(logging_middleware,)) as session:
# Test 1: Simple GET request
print("\n=== Test 1: Simple GET request ===")
async with session.get("http://localhost:8080/hello") as resp:
data = await resp.json()
print(f"Response: {data}")
# Test 2: GET with parameter
print("\n=== Test 2: GET with parameter ===")
async with session.get("http://localhost:8080/hello/Alice") as resp:
data = await resp.json()
print(f"Response: {data}")
# Test 3: Slow request
print("\n=== Test 3: Slow request (2 seconds) ===")
async with session.get("http://localhost:8080/slow/2") as resp:
data = await resp.json()
print(f"Response: {data}")
# Test 4: Error response
print("\n=== Test 4: Error response ===")
async with session.get("http://localhost:8080/error/404") as resp:
text = await resp.text()
print(f"Response: {text}")
# Test 5: POST with JSON data
print("\n=== Test 5: POST with JSON data ===")
payload = {"name": "Bob", "age": 30, "city": "New York"}
async with session.post("http://localhost:8080/echo", json=payload) as resp:
data = await resp.json()
print(f"Response: {data}")
# Test 6: Multiple concurrent requests
print("\n=== Test 6: Multiple concurrent requests ===")
coros: list[Coroutine[Any, Any, ClientResponse]] = []
for i in range(3):
coro = session.get(f"http://localhost:8080/hello/User{i}")
coros.append(coro)
responses = await asyncio.gather(*coros)
for i, resp in enumerate(responses):
async with resp:
data = await resp.json()
print(f"Concurrent request {i}: {data}")
async def main() -> None:
# Start test server
server = await run_test_server()
try:
await run_tests()
finally:
# Cleanup server
await server.cleanup()
if __name__ == "__main__":
asyncio.run(main())
| TestServer |
python | tensorflow__tensorflow | tensorflow/python/keras/engine/base_layer_v1.py | {
"start": 96367,
"end": 97688
} | class ____(
collections.namedtuple('KerasHistory',
['layer', 'node_index', 'tensor_index'])):
"""Tracks the Layer call that created a Tensor, for Keras Graph Networks.
During construction of Keras Graph Networks, this metadata is added to
each Tensor produced as the output of a Layer, starting with an
`InputLayer`. This allows Keras to track how each Tensor was produced, and
this information is later retraced by the `keras.engine.Network` class to
reconstruct the Keras Graph Network.
Attributes:
layer: The Layer that produced the Tensor.
node_index: The specific call to the Layer that produced this Tensor. Layers
can be called multiple times in order to share weights. A new node is
created every time a Tensor is called.
tensor_index: The output index for this Tensor. Always zero if the Layer
that produced this Tensor only has one output. Nested structures of
Tensors are deterministically assigned an index via `nest.flatten`.
"""
# Added to maintain memory and performance characteristics of `namedtuple`
# while subclassing.
__slots__ = ()
# Avoid breaking users who directly import this symbol from this file.
# TODO(fchollet): remove this.
InputSpec = input_spec.InputSpec # pylint:disable=invalid-name
| KerasHistory |
python | kamyu104__LeetCode-Solutions | Python/check-if-there-is-a-valid-partition-for-the-array.py | {
"start": 34,
"end": 591
} | class ____(object):
def validPartition(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
dp = [False]*4
dp[0] = True
for i in xrange(len(nums)):
dp[(i+1)%4] = False
if i-1 >= 0 and nums[i] == nums[i-1]:
dp[(i+1)%4] |= dp[((i+1)-2)%4]
if i-2 >= 0 and (nums[i] == nums[i-1] == nums[i-2] or
nums[i] == nums[i-1]+1 == nums[i-2]+2):
dp[(i+1)%4] |= dp[((i+1)-3)%4]
return dp[len(nums)%4]
| Solution |
python | Netflix__metaflow | metaflow/_vendor/yaml/constructor.py | {
"start": 27622,
"end": 28580
} | class ____(FullConstructor):
def find_python_module(self, name, mark):
return super(UnsafeConstructor, self).find_python_module(name, mark, unsafe=True)
def find_python_name(self, name, mark):
return super(UnsafeConstructor, self).find_python_name(name, mark, unsafe=True)
def make_python_instance(self, suffix, node, args=None, kwds=None, newobj=False):
return super(UnsafeConstructor, self).make_python_instance(
suffix, node, args, kwds, newobj, unsafe=True)
def set_python_instance_state(self, instance, state):
return super(UnsafeConstructor, self).set_python_instance_state(
instance, state, unsafe=True)
UnsafeConstructor.add_multi_constructor(
'tag:yaml.org,2002:python/object/apply:',
UnsafeConstructor.construct_python_object_apply)
# Constructor is same as UnsafeConstructor. Need to leave this in place in case
# people have extended it directly.
| UnsafeConstructor |
python | doocs__leetcode | lcof2/剑指 Offer II 044. 二叉树每层的最大值/Solution.py | {
"start": 192,
"end": 689
} | class ____:
def largestValues(self, root: TreeNode) -> List[int]:
if root is None:
return []
q = deque([root])
ans = []
while q:
t = -inf
for _ in range(len(q)):
node = q.popleft()
t = max(t, node.val)
if node.left:
q.append(node.left)
if node.right:
q.append(node.right)
ans.append(t)
return ans
| Solution |
python | walkccc__LeetCode | solutions/336. Palindrome Pairs/336.py | {
"start": 0,
"end": 569
} | class ____:
def palindromePairs(self, words: list[str]) -> list[list[int]]:
ans = []
dict = {word[::-1]: i for i, word in enumerate(words)}
for i, word in enumerate(words):
if "" in dict and dict[""] != i and word == word[::-1]:
ans.append([i, dict[""]])
for j in range(1, len(word) + 1):
l = word[:j]
r = word[j:]
if l in dict and dict[l] != i and r == r[::-1]:
ans.append([i, dict[l]])
if r in dict and dict[r] != i and l == l[::-1]:
ans.append([dict[r], i])
return ans
| Solution |
python | more-itertools__more-itertools | tests/test_more.py | {
"start": 125506,
"end": 126007
} | class ____(TestCase):
def test_encode(self):
iterable = (int(str(n)[0]) for n in count(800))
actual = mi.take(4, mi.run_length.encode(iterable))
expected = [(8, 100), (9, 100), (1, 1000), (2, 1000)]
self.assertEqual(actual, expected)
def test_decode(self):
iterable = [('d', 4), ('c', 3), ('b', 2), ('a', 1)]
actual = ''.join(mi.run_length.decode(iterable))
expected = 'ddddcccbba'
self.assertEqual(actual, expected)
| RunLengthTest |
python | sqlalchemy__sqlalchemy | examples/versioned_rows/versioned_update_old_row.py | {
"start": 3780,
"end": 4688
} | class ____(VersionedStartEnd, Base):
__tablename__ = "parent"
id = Column(Integer, primary_key=True)
start = Column(DateTime, primary_key=True)
end = Column(DateTime, primary_key=True)
data = Column(String)
child_n = Column(Integer)
child = relationship(
"Child",
primaryjoin=("Child.id == foreign(Parent.child_n)"),
# note the primaryjoin can also be:
#
# "and_(Child.id == foreign(Parent.child_n), "
# "func.now().between(Child.start, Child.end))"
#
# however the before_compile() above will take care of this for us in
# all cases except for joinedload. You *can* use the above primaryjoin
# as well, it just means the criteria will be present twice for most
# parent->child load operations
#
uselist=False,
backref=backref("parent", uselist=False),
)
| Parent |
python | pypa__warehouse | tests/unit/search/test_tasks.py | {
"start": 16078,
"end": 21179
} | class ____:
def test_reindex_fails_when_raising(self, db_request, monkeypatch):
docs = pretend.stub()
task = pretend.stub()
db_request.registry.settings = {"celery.scheduler_url": "redis://redis:6379/0"}
def project_docs(db, project_name=None):
return docs
monkeypatch.setattr(warehouse.search.tasks, "_project_docs", project_docs)
es_client = FakeESClient()
db_request.registry.update(
{"opensearch.client": es_client, "opensearch.index": "warehouse"}
)
class TestError(Exception):
pass
def parallel_bulk(client, iterable, index=None):
assert client is es_client
assert iterable is docs
raise TestError
monkeypatch.setattr(warehouse.search.tasks, "parallel_bulk", parallel_bulk)
monkeypatch.setattr(warehouse.search.tasks, "SearchLock", NotLock)
with pytest.raises(TestError):
reindex_project(task, db_request, "foo")
assert es_client.indices.put_settings.calls == []
def test_unindex_fails_when_raising(self, db_request, monkeypatch):
task = pretend.stub()
db_request.registry.settings = {"celery.scheduler_url": "redis://redis:6379/0"}
class TestError(Exception):
pass
es_client = FakeESClient()
es_client.delete = pretend.raiser(TestError)
monkeypatch.setattr(warehouse.search.tasks, "SearchLock", NotLock)
db_request.registry.update(
{"opensearch.client": es_client, "opensearch.index": "warehouse"}
)
with pytest.raises(TestError):
unindex_project(task, db_request, "foo")
def test_unindex_accepts_defeat(self, db_request, monkeypatch):
task = pretend.stub()
db_request.registry.settings = {"celery.scheduler_url": "redis://redis:6379/0"}
es_client = FakeESClient()
es_client.delete = pretend.call_recorder(
pretend.raiser(opensearchpy.exceptions.NotFoundError)
)
monkeypatch.setattr(warehouse.search.tasks, "SearchLock", NotLock)
db_request.registry.update(
{"opensearch.client": es_client, "opensearch.index": "warehouse"}
)
unindex_project(task, db_request, "foo")
assert es_client.delete.calls == [pretend.call(index="warehouse", id="foo")]
def test_unindex_retry_on_lock(self, db_request, monkeypatch):
task = pretend.stub(
retry=pretend.call_recorder(pretend.raiser(celery.exceptions.Retry))
)
db_request.registry.settings = {"celery.scheduler_url": "redis://redis:6379/0"}
le = redis.exceptions.LockError("Failed to acquire lock")
monkeypatch.setattr(SearchLock, "acquire", pretend.raiser(le))
with pytest.raises(celery.exceptions.Retry):
unindex_project(task, db_request, "foo")
assert task.retry.calls == [pretend.call(countdown=60, exc=le)]
def test_reindex_retry_on_lock(self, db_request, monkeypatch):
task = pretend.stub(
retry=pretend.call_recorder(pretend.raiser(celery.exceptions.Retry))
)
db_request.registry.settings = {"celery.scheduler_url": "redis://redis:6379/0"}
le = redis.exceptions.LockError("Failed to acquire lock")
monkeypatch.setattr(SearchLock, "acquire", pretend.raiser(le))
with pytest.raises(celery.exceptions.Retry):
reindex_project(task, db_request, "foo")
assert task.retry.calls == [pretend.call(countdown=60, exc=le)]
def test_successfully_indexes(self, db_request, monkeypatch):
docs = pretend.stub()
task = pretend.stub()
db_request.registry.settings = {"celery.scheduler_url": "redis://redis:6379/0"}
def project_docs(db, project_name=None):
return docs
monkeypatch.setattr(warehouse.search.tasks, "_project_docs", project_docs)
es_client = FakeESClient()
es_client.indices.indices["warehouse-aaaaaaaaaa"] = None
es_client.indices.aliases["warehouse"] = ["warehouse-aaaaaaaaaa"]
db_engine = pretend.stub()
db_request.registry.update(
{
"opensearch.client": es_client,
"opensearch.index": "warehouse",
"opensearch.shards": 42,
"sqlalchemy.engine": db_engine,
}
)
parallel_bulk = pretend.call_recorder(
lambda client, iterable, index=None: [None]
)
monkeypatch.setattr(warehouse.search.tasks, "parallel_bulk", parallel_bulk)
monkeypatch.setattr(warehouse.search.tasks, "SearchLock", NotLock)
reindex_project(task, db_request, "foo")
assert parallel_bulk.calls == [pretend.call(es_client, docs, index="warehouse")]
assert es_client.indices.create.calls == []
assert es_client.indices.delete.calls == []
assert es_client.indices.aliases == {"warehouse": ["warehouse-aaaaaaaaaa"]}
assert es_client.indices.put_settings.calls == []
| TestPartialReindex |
python | pytorch__pytorch | torch/_dynamo/variables/misc.py | {
"start": 75816,
"end": 76625
} | class ____(VariableTracker):
"""random.Random"""
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
def call_function(self, tx: "InstructionTranslator", args, kwargs):
if len(args) > 1 or kwargs:
unimplemented(
gb_type="random.Random() with improper arguments",
context=f"args: {args}, kwargs: {kwargs}",
explanation="random.Random() with > 1 arg or with kwargs is not supported.",
hints=[
*graph_break_hints.USER_ERROR,
],
)
seed = variables.ConstantVariable.create(None) if len(args) == 0 else args[0]
return RandomVariable(
seed=seed, mutation_type=variables.base.ValueMutationNew()
)
| RandomClassVariable |
python | pandas-dev__pandas | doc/source/conf.py | {
"start": 17251,
"end": 19315
} | class ____(Documenter):
"""
Specialized Documenter subclass for objects on accessor level (methods,
attributes).
"""
# This is the simple straightforward version
# modname is None, base the last elements (eg 'hour')
# and path the part before (eg 'Series.dt')
# def resolve_name(self, modname, parents, path, base):
# modname = 'pandas'
# mod_cls = path.rstrip('.')
# mod_cls = mod_cls.split('.')
#
# return modname, mod_cls + [base]
def resolve_name(self, modname, parents, path, base):
if modname is None:
if path:
mod_cls = path.rstrip(".")
else:
mod_cls = None
# if documenting a class-level object without path,
# there must be a current class, either from a parent
# auto directive ...
mod_cls = self.env.temp_data.get("autodoc:class")
# ... or from a class directive
if mod_cls is None:
mod_cls = self.env.temp_data.get("py:class")
# ... if still None, there's no way to know
if mod_cls is None:
return None, []
# HACK: this is added in comparison to ClassLevelDocumenter
# mod_cls still exists of class.accessor, so an extra
# rpartition is needed
modname, _, accessor = mod_cls.rpartition(".")
modname, _, cls = modname.rpartition(".")
parents = [cls, accessor]
# if the module name is still missing, get it like above
if not modname:
modname = self.env.temp_data.get("autodoc:module")
if not modname:
if sphinx.__version__ > "1.3":
modname = self.env.ref_context.get("py:module")
else:
modname = self.env.temp_data.get("py:module")
# ... else, it stays None, which means invalid
return modname, parents + [base]
| AccessorLevelDocumenter |
python | gevent__gevent | src/gevent/tests/test__pywsgi.py | {
"start": 58794,
"end": 59950
} | class ____(TestCase):
_leak_wsgi_input = None
_leak_environ = None
def tearDown(self):
TestCase.tearDown(self)
self._leak_wsgi_input = None
self._leak_environ = None
def application(self, environ, start_response):
pi = environ["PATH_INFO"]
self._leak_wsgi_input = environ["wsgi.input"]
self._leak_environ = environ
if pi == "/leak-frame":
environ["_leak"] = sys._getframe(0)
text = b"foobar"
start_response('200 OK', [('Content-Length', str(len(text))), ('Content-Type', 'text/plain')])
return [text]
def test_connection_close_leak_simple(self):
with self.makefile() as fd:
fd.write(b"GET / HTTP/1.0\r\nConnection: close\r\n\r\n")
d = fd.read()
self.assertTrue(d.startswith(b"HTTP/1.1 200 OK"), d)
def test_connection_close_leak_frame(self):
with self.makefile() as fd:
fd.write(b"GET /leak-frame HTTP/1.0\r\nConnection: close\r\n\r\n")
d = fd.read()
self.assertTrue(d.startswith(b"HTTP/1.1 200 OK"), d)
self._leak_environ.pop('_leak')
| TestLeakInput |
python | facebook__pyre-check | client/command_arguments.py | {
"start": 3637,
"end": 6837
} | class ____:
changed_files_path: Optional[str] = None
debug: bool = False
enable_memory_profiling: bool = False
enable_profiling: bool = False
flavor: PyreFlavor = PyreFlavor.CLASSIC
load_initial_state_from: Optional[str] = None
_log_identifier: Optional[str] = None
logging_sections: Optional[str] = None
no_saved_state: bool = False
no_watchman: bool = False
noninteractive: bool = False
save_initial_state_to: Optional[str] = None
sequential: bool = False
show_error_traces: bool = False
store_type_check_resolution: bool = False
terminal: bool = False
wait_on_initialization: bool = False
skip_initial_type_check: bool = False
use_lazy_module_tracking: bool = False
analyze_external_sources: bool = False
number_of_buck_threads: Optional[int] = None
@staticmethod
def create(
command_argument: CommandArguments,
flavor: PyreFlavor = PyreFlavor.CLASSIC,
no_watchman: bool = False,
store_type_check_resolution: bool = False,
wait_on_initialization: bool = False,
terminal: bool = False,
skip_initial_type_check: bool = False,
use_lazy_module_tracking: bool = False,
analyze_external_sources: bool = False,
number_of_buck_threads: Optional[int] = None,
) -> StartArguments:
return StartArguments(
changed_files_path=command_argument.changed_files_path,
debug=command_argument.debug,
enable_memory_profiling=command_argument.enable_memory_profiling,
enable_profiling=command_argument.enable_profiling,
flavor=flavor,
load_initial_state_from=command_argument.load_initial_state_from,
_log_identifier=command_argument.log_identifier,
logging_sections=command_argument.logging_sections,
no_saved_state=command_argument.no_saved_state,
no_watchman=no_watchman,
noninteractive=command_argument.noninteractive,
save_initial_state_to=command_argument.save_initial_state_to,
sequential=command_argument.sequential,
show_error_traces=command_argument.show_error_traces,
store_type_check_resolution=store_type_check_resolution,
terminal=terminal,
wait_on_initialization=wait_on_initialization,
skip_initial_type_check=skip_initial_type_check,
use_lazy_module_tracking=use_lazy_module_tracking,
analyze_external_sources=analyze_external_sources,
number_of_buck_threads=number_of_buck_threads,
)
def get_log_identifier(self) -> str:
"""
If a log identifier was manually set (this is usually done specifically
to isolate telemetry, e.g. when running a performance experiment), we
use that.
Otherwise, we use the flavor. This keeps telemetry from various kinds
of language servers separate so that our metrics can distinguish them.
"""
if self._log_identifier is not None:
return self._log_identifier
else:
return self.flavor.value
@dataclass(frozen=True)
| StartArguments |
python | cython__cython | Cython/Compiler/Tests/TestCode.py | {
"start": 139,
"end": 2290
} | class ____(TestCase):
def _test_indentations(self, chunk, expected):
for indentation in range(16):
expected_indented = textwrap.indent(expected, ' ' * indentation)
for line in expected_indented.splitlines():
# Validate before the comparison that empty lines got stripped also by textwrap.indent().
self.assertTrue(line == '' or line.strip(), repr(line))
with self.subTest(indentation=indentation):
result = _indent_chunk(chunk, indentation_length=indentation)
self.assertEqual(expected_indented, result)
def test_indent_empty(self):
self._test_indentations('', '')
def test_indent_empty_lines(self):
self._test_indentations('\n', '\n')
self._test_indentations('\n'*2, '\n'*2)
self._test_indentations('\n'*3, '\n'*3)
self._test_indentations(' \n'*2, '\n'*2)
self._test_indentations('\n \n \n \n', '\n'*4)
def test_indent_one_line(self):
self._test_indentations('abc', 'abc')
def test_indent_chunk(self):
chunk = """
x = 1
if x == 2:
print("False")
else:
print("True")
"""
expected = """
x = 1
if x == 2:
print("False")
else:
print("True")
"""
self._test_indentations(chunk, expected)
def test_indent_empty_line(self):
chunk = """
x = 1
if x == 2:
print("False")
else:
print("True")
"""
expected = """
x = 1
if x == 2:
print("False")
else:
print("True")
"""
self._test_indentations(chunk, expected)
def test_indent_empty_line_unclean(self):
lines = """
x = 1
if x == 2:
print("False")
else:
print("True")
""".splitlines(keepends=True)
lines[2] = ' \n'
chunk = ''.join(lines)
expected = """
x = 1
if x == 2:
print("False")
else:
print("True")
"""
self._test_indentations(chunk, expected)
| TestIndent |
python | pypa__hatch | tests/backend/metadata/test_core.py | {
"start": 12164,
"end": 13547
} | class ____:
def test_dynamic(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": {"description": 9000, "dynamic": ["description"]}})
with pytest.raises(
ValueError,
match=(
"Metadata field `description` cannot be both statically defined and listed in field `project.dynamic`"
),
):
_ = metadata.core.description
def test_not_string(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": {"description": 9000}})
with pytest.raises(TypeError, match="Field `project.description` must be a string"):
_ = metadata.core.description
def test_default(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": {}})
assert metadata.core.description == metadata.core.description == ""
def test_custom(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": {"description": "foo"}})
assert metadata.core.description == metadata.core.description == "foo"
def test_normaliza(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": {"description": "\nfirst line.\r\nsecond line"}})
assert metadata.core.description == metadata.core.description == " first line. second line"
| TestDescription |
python | celery__celery | t/unit/app/test_app.py | {
"start": 1434,
"end": 1586
} | class ____:
LEAVE_FOR_WORK = True
MOMENT_TO_STOP = True
CALL_ME_BACK = 123456789
WANT_ME_TO = False
UNDERSTAND_ME = True
| ObjectConfig2 |
python | huggingface__transformers | src/transformers/models/sam2_video/modular_sam2_video.py | {
"start": 27861,
"end": 43365
} | class ____(Sam2Processor):
r"""
Constructs a SAM2 processor which wraps a SAM2 image processor and an 2D points & Bounding boxes processor into a
single processor.
[`Sam2VideoProcessor`] offers all the functionalities of [`Sam2ImageProcessorFast`] and [`Sam2VideoProcessor`]. See the docstring of
[`~Sam2ImageProcessorFast.__call__`] and [`~Sam2VideoProcessor.__call__`] for more information.
Args:
image_processor (`Sam2ImageProcessorFast`):
An instance of [`Sam2ImageProcessorFast`].
video_processor (`Sam2VideoVideoProcessor`):
An instance of [`Sam2VideoVideoProcessor`].
target_size (`int`, *optional*):
The target size (target_size, target_size) to which the image will be resized.
point_pad_value (`int`, *optional*, defaults to -10):
The value used for padding input points.
"""
def __init__(
self, image_processor, video_processor, target_size: Optional[int] = None, point_pad_value: int = -10, **kwargs
):
ProcessorMixin.__init__(self, image_processor, video_processor, **kwargs)
self.point_pad_value = point_pad_value
self.target_size = target_size if target_size is not None else self.image_processor.size["height"]
def init_video_session(
self,
video: Optional[VideoInput] = None,
inference_device: Union[str, "torch.device"] = "cpu",
inference_state_device: Optional[Union[str, "torch.device"]] = None,
processing_device: Optional[Union[str, "torch.device"]] = None,
video_storage_device: Optional[Union[str, "torch.device"]] = None,
max_vision_features_cache_size: int = 1,
dtype: torch.dtype = torch.float32,
):
"""
Initializes a video session for inference.
If a video is provided (async inference), the video will be processed and stored on the `video_storage_device`.
Args:
video (`VideoInput`, *optional*):
The video to process. No need to provide when streaming.
inference_device (`str` or `torch.device`, *optional*, defaults to "cpu"):
The device to use for inference.
inference_state_device (`str` or `torch.device`, *optional*):
The device to store the inference state on.
processing_device (`str` or `torch.device`, *optional*):
The device to use for video processing.
video_storage_device (`str` or `torch.device`, *optional*):
The device to store the processed video frames on.
max_vision_features_cache_size (`int`, *optional*, defaults to 1):
The maximum number of vision features to cache.
dtype (`torch.dtype`, *optional*, defaults to `torch.float32`):
The torch dtype to use for the whole session.
"""
video_storage_device = video_storage_device if video_storage_device is not None else inference_device
inference_state_device = inference_state_device if inference_state_device is not None else inference_device
processing_device = processing_device if processing_device is not None else inference_device
pixel_values_video = None
video_height = None
video_width = None
if video is not None:
processed_video = self.video_processor(videos=video, device=processing_device, return_tensors="pt")
pixel_values_video = processed_video.pixel_values_videos[0]
video_height = processed_video.original_sizes[0][0]
video_width = processed_video.original_sizes[0][1]
inference_session = Sam2VideoInferenceSession(
video=pixel_values_video,
video_height=video_height,
video_width=video_width,
inference_device=inference_device,
video_storage_device=video_storage_device,
inference_state_device=inference_state_device,
dtype=dtype,
max_vision_features_cache_size=max_vision_features_cache_size,
)
return inference_session
def add_inputs_to_inference_session(
self,
inference_session: Sam2VideoInferenceSession,
frame_idx: int,
obj_ids: Union[list[int], int],
input_points: Optional[Union[list[list[list[list[float]]]], torch.Tensor]] = None,
input_labels: Optional[Union[list[list[list[int]]], torch.Tensor]] = None,
input_boxes: Optional[Union[list[list[list[float]]], torch.Tensor]] = None,
input_masks: Optional[Union[np.ndarray, torch.Tensor, list[np.ndarray], list[torch.Tensor]]] = None,
original_size: Optional[tuple[int, int]] = None,
clear_old_inputs: bool = True,
) -> Sam2VideoInferenceSession:
"""
Process new points, boxes, or masks for a video frame and add them to the inference session.
Args:
inference_session (`Sam2VideoInferenceSession`):
The inference session for the video.
frame_idx (`int`):
The index of the frame to process.
obj_ids (`list[int]` or `int`):
The object ID(s) to associate with the points or box.
These can be any integers and can be reused later on to specify an object.
input_points (`list[list[list[list[float]]]]`, `torch.Tensor`, *optional*):
The points to add to the frame.
input_labels (`list[list[list[int]]]`, `torch.Tensor`, *optional*):
The labels for the points.
input_boxes (`list[list[list[float]]]`, `torch.Tensor`, *optional*):
The bounding boxes to add to the frame.
input_masks (`np.ndarray`, `torch.Tensor`, `list[np.ndarray]`, or `list[torch.Tensor]`, *optional*):
The mask(s) to add to the frame.
original_size (`tuple[int, int]`, *optional*):
The original size of the video. Provide when streaming.
clear_old_inputs (`bool`, *optional*, defaults to `True`):
Whether to clear old inputs for the object.
"""
if isinstance(obj_ids, int):
obj_ids = [obj_ids]
# Validate inputs
if (input_points is not None) != (input_labels is not None):
raise ValueError("points and labels must be provided together")
if input_points is None and input_boxes is None and input_masks is None:
raise ValueError("at least one of points, boxes, or masks must be provided as input")
if input_masks is not None and (input_points is not None or input_boxes is not None):
raise ValueError("masks cannot be provided together with points or boxes")
if input_masks is not None:
return self.process_new_mask_for_video_frame(inference_session, frame_idx, obj_ids, input_masks)
else:
return self.process_new_points_or_boxes_for_video_frame(
inference_session,
frame_idx,
obj_ids,
input_points,
input_labels,
input_boxes,
original_size,
clear_old_inputs,
)
def process_new_points_or_boxes_for_video_frame(
self,
inference_session: Sam2VideoInferenceSession,
frame_idx: int,
obj_ids: list[int],
input_points: Optional[Union[list[list[list[list[float]]]], torch.Tensor]] = None,
input_labels: Optional[Union[list[list[list[int]]], torch.Tensor]] = None,
input_boxes: Optional[Union[list[list[list[float]]], torch.Tensor]] = None,
original_size: Optional[tuple[int, int]] = None,
clear_old_inputs: bool = True,
) -> Sam2VideoInferenceSession:
"""
Process new points or boxes for a video frame and add them to the inference session.
Args:
inference_session (`Sam2VideoInferenceSession`):
The inference session for the video.
frame_idx (`int`):
The index of the frame to process.
obj_ids (`list[int]`):
The object ID(s) to associate with the points or box.
These can be any integers and can be reused later on to specify an object.
input_points (`list[list[list[list[float]]]]`, `torch.Tensor`, *optional*):
The points to add to the frame.
input_labels (`list[list[list[int]]]`, `torch.Tensor`, *optional*):
The labels for the points.
input_boxes (`list[list[list[float]]]`, `torch.Tensor`, *optional*):
The bounding boxes to add to the frame.
original_size (`tuple[int, int]`, *optional*):
The original size of the video. Provide when streaming.
clear_old_inputs (`bool`, *optional*, defaults to `True`):
Whether to clear old inputs for the object.
"""
if original_size is not None:
inference_session.video_height = original_size[0]
inference_session.video_width = original_size[1]
elif inference_session.video_height is None or inference_session.video_width is None:
raise ValueError("original_size must be provided when adding points or boxes on a first streamed frame")
original_sizes = [[inference_session.video_height, inference_session.video_width]]
encoded_inputs = self(
input_points=input_points,
input_labels=input_labels,
input_boxes=input_boxes,
original_sizes=original_sizes,
return_tensors="pt",
)
input_points = encoded_inputs.get("input_points", None)
input_labels = encoded_inputs.get("input_labels", None)
input_boxes = encoded_inputs.get("input_boxes", None)
if input_points is not None:
if input_points.shape[1] != len(obj_ids):
raise ValueError(
f"Number of object ids ({len(obj_ids)}) does not match number of points ({input_points.shape[1]})"
)
else:
input_points = torch.zeros(1, len(obj_ids), 0, 2, dtype=torch.float32)
if input_labels is not None:
if input_labels.shape[1] != len(obj_ids):
raise ValueError(
f"Number of object ids ({len(obj_ids)}) does not match number of labels ({input_labels.shape[1]})"
)
else:
input_labels = torch.zeros(1, len(obj_ids), 0, dtype=torch.int32)
if input_boxes is not None:
if input_boxes.shape[1] != len(obj_ids):
raise ValueError(
f"Number of object ids ({len(obj_ids)}) does not match number of boxes ({input_boxes.shape[1]})"
)
if input_boxes is not None:
if not clear_old_inputs:
raise ValueError(
"cannot add box without clearing old points, since "
"box prompt must be provided before any point prompt "
"(please use clear_old_points=True instead)"
)
box_coords = input_boxes.reshape(1, -1, 2, 2)
box_labels = torch.tensor([2, 3], dtype=torch.int32).repeat(1, box_coords.shape[1], 1)
input_points = torch.cat([box_coords, input_points], dim=2)
input_labels = torch.cat([box_labels, input_labels], dim=2)
for obj_id, idx in zip(obj_ids, range(len(obj_ids))):
obj_idx = inference_session.obj_id_to_idx(obj_id)
input_points_for_obj = input_points[:, idx, :, :].unsqueeze(1)
input_labels_for_obj = input_labels[:, idx, :].unsqueeze(1)
# Handle existing points
if not clear_old_inputs:
existing_points = inference_session.point_inputs_per_obj[obj_idx].get(frame_idx, None)
if existing_points is not None:
# Concatenate with existing points
input_points_for_obj = torch.cat(
[existing_points["point_coords"].to(input_points_for_obj.device), input_points_for_obj], dim=2
)
input_labels_for_obj = torch.cat(
[existing_points["point_labels"].to(input_labels_for_obj.device), input_labels_for_obj], dim=2
)
point_inputs = {
"point_coords": input_points_for_obj,
"point_labels": input_labels_for_obj,
}
inference_session.add_point_inputs(obj_idx, frame_idx, point_inputs)
inference_session.remove_mask_inputs(obj_idx, frame_idx) # Clear any mask inputs
inference_session.obj_with_new_inputs = obj_ids
def process_new_mask_for_video_frame(
self,
inference_session: Sam2VideoInferenceSession,
frame_idx: int,
obj_ids: list[int],
input_masks: Union[np.ndarray, torch.Tensor, list[np.ndarray], list[torch.Tensor]],
):
"""
Add new mask to a frame and add them to the inference session.
Args:
inference_session (`Sam2VideoInferenceSession`):
The inference session for the video.
frame_idx (`int`):
The index of the frame to process.
obj_ids (`list[int]`):
The object ID(s) to associate with the mask.
These can be any integers and can be reused later on to specify an object.
input_masks (`np.ndarray`, `torch.Tensor`, `list[np.ndarray]`, or `list[torch.Tensor]`):
The mask(s) to add to the frame.
"""
if not isinstance(input_masks, list):
input_masks = [input_masks]
if len(input_masks) != len(obj_ids):
raise ValueError(
f"Number of object ids ({len(obj_ids)}) does not match number of masks ({len(input_masks)})"
)
for obj_id, mask in zip(obj_ids, input_masks):
obj_idx = inference_session.obj_id_to_idx(obj_id)
device = inference_session.inference_device
# Process mask
if not isinstance(mask, torch.Tensor):
mask = torch.tensor(mask, dtype=torch.bool)
nb_dim = mask.dim()
if nb_dim > 4 or nb_dim < 2:
raise ValueError(f"Mask has an unsupported number of dimensions: {nb_dim}")
for i in range(4 - nb_dim):
mask = mask.unsqueeze(0)
mask_H, mask_W = mask.shape[-2:]
mask_inputs_orig = mask.to(device)
mask_inputs_orig = mask_inputs_orig.float().to(device)
# Resize mask if needed
if mask_H != self.target_size or mask_W != self.target_size:
mask_inputs = torch.nn.functional.interpolate(
mask_inputs_orig,
size=(self.target_size, self.target_size),
align_corners=False,
mode="bilinear",
antialias=True,
)
mask_inputs = (mask_inputs >= 0.5).float()
else:
mask_inputs = mask_inputs_orig
inference_session.add_mask_inputs(obj_idx, frame_idx, mask_inputs)
inference_session.remove_point_inputs(obj_idx, frame_idx) # Clear any point inputs
inference_session.obj_with_new_inputs = obj_ids
| Sam2VideoProcessor |
python | doocs__leetcode | solution/1600-1699/1690.Stone Game VII/Solution2.py | {
"start": 0,
"end": 419
} | class ____:
def stoneGameVII(self, stones: List[int]) -> int:
s = list(accumulate(stones, initial=0))
n = len(stones)
f = [[0] * n for _ in range(n)]
for i in range(n - 2, -1, -1):
for j in range(i + 1, n):
a = s[j + 1] - s[i + 1] - f[i + 1][j]
b = s[j] - s[i] - f[i][j - 1]
f[i][j] = max(a, b)
return f[0][-1]
| Solution |
python | Pylons__pyramid | tests/test_encode.py | {
"start": 2072,
"end": 2842
} | class ____(unittest.TestCase):
def _callFUT(self, val, safe=''):
from pyramid.encode import url_quote
return url_quote(val, safe)
def test_it_bytes(self):
la = b'La/Pe\xc3\xb1a'
result = self._callFUT(la)
self.assertEqual(result, 'La%2FPe%C3%B1a')
def test_it_native(self):
la = text_(b'La/Pe\xc3\xb1a', 'utf-8')
result = self._callFUT(la)
self.assertEqual(result, 'La%2FPe%C3%B1a')
def test_it_with_safe(self):
la = b'La/Pe\xc3\xb1a'
result = self._callFUT(la, '/')
self.assertEqual(result, 'La/Pe%C3%B1a')
def test_it_with_nonstr_nonbinary(self):
la = None
result = self._callFUT(la, '/')
self.assertEqual(result, 'None')
| URLQuoteTests |
python | huggingface__transformers | tests/models/phi/test_modeling_phi.py | {
"start": 1732,
"end": 5879
} | class ____(unittest.TestCase):
def test_model_phi_1_logits(self):
input_ids = {
"input_ids": torch.tensor(
[[1212, 318, 281, 1672, 2643, 290, 428, 318, 257, 1332]], dtype=torch.long, device=torch_device
)
}
model = PhiForCausalLM.from_pretrained("microsoft/phi-1").to(torch_device)
model.eval()
output = model(**input_ids).logits
EXPECTED_OUTPUT = torch.tensor([[2.2671, 6.7684, -2.0107, -1.2440, -1.5335, -2.3828, 6.9186, 6.4245, 3.1548, 0.9998, 0.0760, 4.4653, 4.9857, 4.2956, 1.2308, -1.4178, 0.1361, 0.5191, -0.5699, -2.2201, -3.0750, -3.9600, -4.5936, -3.7394, -2.7777, 6.1874, -0.4148, -1.5684, -0.5967, 0.2395], [1.7004, 4.0383, 0.0546, 0.4530, -0.3619, -0.9021, 1.8355, 1.3587, 1.2406, 2.5775, -0.8834, 5.1910, 4.2565, 4.1406, 3.0752, -0.9099, 1.1595, 0.0264, 0.3243, -1.1803, -1.3945, -2.1406, -3.9939, -1.4438, -2.9546, 3.9204, 1.0851, -1.0598, -1.7819, -0.4827]]).to(torch_device) # fmt: skip
torch.testing.assert_close(EXPECTED_OUTPUT, output[0, :2, :30], rtol=1e-4, atol=1e-4)
def test_model_phi_1_5_logits(self):
input_ids = {
"input_ids": torch.tensor(
[[1212, 318, 281, 1672, 2643, 290, 428, 318, 257, 1332]], dtype=torch.long, device=torch_device
)
}
model = PhiForCausalLM.from_pretrained("microsoft/phi-1_5").to(torch_device)
model.eval()
output = model(**input_ids).logits
EXPECTED_OUTPUT = torch.tensor([[12.2922, 13.3507, 8.6963, 9.1355, 9.3502, 9.2667, 14.2027, 13.1363, 13.5446, 11.1337, 9.9279, 16.7195, 13.0768, 14.9141, 11.9965, 8.0233, 10.3129, 10.6118, 10.0204, 9.3827, 8.8344, 8.2806, 8.0153, 8.0540, 7.0964, 16.5743, 11.1256, 9.6987, 11.4770, 10.5440], [12.3323, 14.6050, 8.9986, 8.1580, 9.5654, 6.6728, 12.5966, 12.6662, 12.2784, 11.7522, 8.2039, 16.3102, 11.2203, 13.6088, 12.0125, 9.1021, 9.8216, 10.0987, 9.0926, 8.4260, 8.8009, 7.6547, 6.8075, 7.7881, 7.4501, 15.7451, 10.5053, 8.3129, 10.0027, 9.2612]]).to(torch_device) # fmt: skip
torch.testing.assert_close(EXPECTED_OUTPUT, output[0, :2, :30], rtol=1e-4, atol=1e-4)
def test_model_phi_2_logits(self):
input_ids = {
"input_ids": torch.tensor(
[[1212, 318, 281, 1672, 2643, 290, 428, 318, 257, 1332]], dtype=torch.long, device=torch_device
)
}
model = PhiForCausalLM.from_pretrained("microsoft/phi-2").to(torch_device)
model.eval()
output = model(**input_ids).logits
EXPECTED_OUTPUT = torch.tensor([[6.4830, 6.1644, 3.4055, 2.2848, 5.4654, 2.8360, 5.5975, 5.5391, 7.3101, 4.2498, 2.5913, 10.3885, 6.4359, 8.7982, 5.6534, 0.5150, 2.7498, 3.1930, 2.4334, 1.7781, 1.5613, 1.3067, 0.8291, 0.5633, 0.6522, 9.8191, 5.5771, 2.7987, 4.2845, 3.7030], [6.0642, 7.8242, 3.4634, 1.9259, 4.3169, 2.0913, 6.0446, 3.6804, 6.6736, 4.0727, 2.1791, 11.4139, 5.6795, 7.5652, 6.2039, 2.7174, 4.3266, 3.6930, 2.8058, 2.6721, 2.3047, 2.0848, 2.0972, 2.0441, 1.3160, 9.2085, 4.5557, 3.0296, 2.6045, 2.4059]]).to(torch_device) # fmt: skip
torch.testing.assert_close(EXPECTED_OUTPUT, output[0, :2, :30], rtol=1e-3, atol=1e-3)
def test_phi_2_generation(self):
model = PhiForCausalLM.from_pretrained("microsoft/phi-2")
tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-2")
inputs = tokenizer(
"Can you help me write a formal email to a potential business partner proposing a joint venture?",
return_tensors="pt",
return_attention_mask=False,
)
outputs = model.generate(**inputs, max_new_tokens=30)
output_text = tokenizer.batch_decode(outputs)
EXPECTED_OUTPUT = [
"Can you help me write a formal email to a potential business partner proposing a joint venture?\nInput: Company A: ABC Inc.\nCompany B: XYZ Ltd.\nJoint Venture: A new online platform for e-commerce"
]
self.assertListEqual(output_text, EXPECTED_OUTPUT)
| PhiIntegrationTest |
python | ray-project__ray | python/ray/util/iter_metrics.py | {
"start": 136,
"end": 1509
} | class ____:
"""Metrics context object for a local iterator.
This object is accessible by all operators of a local iterator. It can be
used to store and retrieve global execution metrics for the iterator.
It can be accessed by calling LocalIterator.get_metrics(), which is only
allowable inside iterator functions.
Attributes:
counters: dict storing increasing metrics.
timers: dict storing latency timers.
info: dict storing misc metric values.
current_actor: reference to the actor handle that
produced the current iterator output. This is automatically set
for gather_async().
"""
def __init__(self):
self.counters = collections.defaultdict(int)
self.timers = collections.defaultdict(_Timer)
self.info = {}
self.current_actor = None
def save(self):
"""Return a serializable copy of this context."""
return {
"counters": dict(self.counters),
"info": dict(self.info),
"timers": None, # TODO(ekl) consider persisting timers too
}
def restore(self, values):
"""Restores state given the output of save()."""
self.counters.clear()
self.counters.update(values["counters"])
self.timers.clear()
self.info = values["info"]
@Deprecated
| MetricsContext |
python | kubernetes-client__python | kubernetes/base/leaderelection/electionconfig.py | {
"start": 656,
"end": 2176
} | class ____:
# Validate config, exit if an error is detected
def __init__(self, lock, lease_duration, renew_deadline, retry_period, onstarted_leading, onstopped_leading):
self.jitter_factor = 1.2
if lock is None:
sys.exit("lock cannot be None")
self.lock = lock
if lease_duration <= renew_deadline:
sys.exit("lease_duration must be greater than renew_deadline")
if renew_deadline <= self.jitter_factor * retry_period:
sys.exit("renewDeadline must be greater than retry_period*jitter_factor")
if lease_duration < 1:
sys.exit("lease_duration must be greater than one")
if renew_deadline < 1:
sys.exit("renew_deadline must be greater than one")
if retry_period < 1:
sys.exit("retry_period must be greater than one")
self.lease_duration = lease_duration
self.renew_deadline = renew_deadline
self.retry_period = retry_period
if onstarted_leading is None:
sys.exit("callback onstarted_leading cannot be None")
self.onstarted_leading = onstarted_leading
if onstopped_leading is None:
self.onstopped_leading = self.on_stoppedleading_callback
else:
self.onstopped_leading = onstopped_leading
# Default callback for when the current candidate if a leader, stops leading
def on_stoppedleading_callback(self):
logging.info("stopped leading".format(self.lock.identity))
| Config |
python | sympy__sympy | sympy/sets/fancysets.py | {
"start": 46248,
"end": 47627
} | class ____(ComplexRegion):
r"""
Set representing a polar region of the complex plane.
.. math:: Z = \{z \in \mathbb{C} \mid z = r\times (\cos(\theta) + I\sin(\theta)), r \in [\texttt{r}], \theta \in [\texttt{theta}]\}
Examples
========
>>> from sympy import ComplexRegion, Interval, oo, pi, I
>>> rset = Interval(0, oo)
>>> thetaset = Interval(0, pi)
>>> upper_half_plane = ComplexRegion(rset * thetaset, polar=True)
>>> 1 + I in upper_half_plane
True
>>> 1 - I in upper_half_plane
False
See also
========
ComplexRegion
CartesianComplexRegion
Complexes
"""
polar = True
variables = symbols('r, theta', cls=Dummy)
def __new__(cls, sets):
new_sets = []
# sets is Union of ProductSets
if not sets.is_ProductSet:
for k in sets.args:
new_sets.append(k)
# sets is ProductSets
else:
new_sets.append(sets)
# Normalize input theta
for k, v in enumerate(new_sets):
new_sets[k] = ProductSet(v.args[0],
normalize_theta_set(v.args[1]))
sets = Union(*new_sets)
return Set.__new__(cls, sets)
@property
def expr(self):
r, theta = self.variables
return r*(cos(theta) + S.ImaginaryUnit*sin(theta))
| PolarComplexRegion |
python | getsentry__sentry | src/sentry/grouping/strategies/base.py | {
"start": 11533,
"end": 19178
} | class ____:
id: str | None
base: type[StrategyConfiguration] | None = None
strategies: dict[str, Strategy[Any]] = {}
delegates: dict[str, Strategy[Any]] = {}
initial_context: ContextDict = {}
enhancements_base: str | None = DEFAULT_ENHANCEMENTS_BASE
fingerprinting_bases: Sequence[str] | None = DEFAULT_GROUPING_FINGERPRINTING_BASES
def __init__(self, base64_enhancements: str | None = None):
if base64_enhancements is None:
enhancements_config = EnhancementsConfig.from_rules_text("", referrer="strategy_config")
else:
# If the enhancements string has been loaded from an existing event, it may be from an
# obsolete enhancements version, in which case we just use the default enhancements for
# this grouping config
try:
enhancements_config = EnhancementsConfig.from_base64_string(
base64_enhancements, referrer="strategy_config"
)
except InvalidEnhancerConfig:
enhancements_config = ENHANCEMENT_BASES[
self.enhancements_base or DEFAULT_ENHANCEMENTS_BASE
]
self.enhancements = enhancements_config
def __repr__(self) -> str:
return f"<{self.__class__.__name__} {self.id!r}>"
def iter_strategies(self) -> Iterator[Strategy[Any]]:
"""Iterates over all strategies by highest score to lowest."""
return iter(sorted(self.strategies.values(), key=lambda x: -x.score if x.score else 0))
@classmethod
def as_dict(cls) -> dict[str, Any]:
return {
"id": cls.id,
"base": cls.base.id if cls.base else None,
"strategies": sorted(cls.strategies),
"delegates": sorted(x.id for x in cls.delegates.values()),
}
def create_strategy_configuration_class(
id: str,
strategies: Sequence[str] | None = None,
delegates: Sequence[str] | None = None,
base: type[StrategyConfiguration] | None = None,
initial_context: ContextDict | None = None,
enhancements_base: str | None = None,
fingerprinting_bases: Sequence[str] | None = None,
) -> type[StrategyConfiguration]:
"""Declares a new strategy configuration class.
Values can be inherited from a base configuration. For strategies if there is
a strategy of the same class it's replaced. For delegates if there is a
delegation for the same interface it's replaced.
It's impossible to remove a strategy of a class when a base is declared (same
for delegates).
"""
class NewStrategyConfiguration(StrategyConfiguration):
pass
NewStrategyConfiguration.id = id
NewStrategyConfiguration.base = base
NewStrategyConfiguration.strategies = dict(base.strategies) if base else {}
NewStrategyConfiguration.delegates = dict(base.delegates) if base else {}
NewStrategyConfiguration.initial_context = dict(base.initial_context) if base else {}
NewStrategyConfiguration.enhancements_base = base.enhancements_base if base else None
if base and base.fingerprinting_bases is not None:
NewStrategyConfiguration.fingerprinting_bases = list(base.fingerprinting_bases)
else:
NewStrategyConfiguration.fingerprinting_bases = None
by_class: dict[str, list[str]] = {}
for strategy in NewStrategyConfiguration.strategies.values():
by_class.setdefault(strategy.strategy_class, []).append(strategy.id)
for strategy_id in strategies or {}:
strategy = lookup_strategy(strategy_id)
if strategy.score is None:
raise RuntimeError(f"Unscored strategy {strategy_id} added to {id}")
for old_id in by_class.get(strategy.strategy_class) or ():
NewStrategyConfiguration.strategies.pop(old_id, None)
NewStrategyConfiguration.strategies[strategy_id] = strategy
new_delegates = set()
for strategy_id in delegates or ():
strategy = lookup_strategy(strategy_id)
if strategy.interface_name in new_delegates:
raise RuntimeError(
"duplicate interface match for "
"delegate %r (conflict on %r)" % (id, strategy.interface_name)
)
NewStrategyConfiguration.delegates[strategy.interface_name] = strategy
new_delegates.add(strategy.interface_name)
if initial_context:
NewStrategyConfiguration.initial_context.update(initial_context)
if enhancements_base:
NewStrategyConfiguration.enhancements_base = enhancements_base
if fingerprinting_bases:
NewStrategyConfiguration.fingerprinting_bases = fingerprinting_bases
NewStrategyConfiguration.__name__ = "StrategyConfiguration(%s)" % id
return NewStrategyConfiguration
def produces_variants(
variants: Sequence[str],
) -> Callable[[StrategyFunc[ConcreteInterface]], StrategyFunc[ConcreteInterface]]:
"""
A grouping strategy can either:
- be told by the caller which variant to generate
- determine its own variants
In the latter case, use this decorator to produce variants and eliminate
duplicate hashes.
Syntax::
# call decorated function twice with different variant values
# (returning a new variant dictionary)
#
# Return value is a dictionary of `{"system": ..., "app": ...}`.
@produces_variants(["system", "app"])
# discard app variant if system variant produces the same hash, or if
# the function returned None when invoked with `context['variant'] ==
# 'system'`. The actual logic for discarding is within
# `Component.get_grouping_component_variants`, so hashes are compared
# at the outermost level of the tree.
#
# Return value is a dictionary of `{"!system": ..., "app": ...}`,
# however function is still called with `"system"` as
# `context["variant_name"]`.
@produces_variants(["!system", "app"])
"""
def decorator(
strategy_func: StrategyFunc[ConcreteInterface],
) -> StrategyFunc[ConcreteInterface]:
def inner(*args: Any, **kwargs: Any) -> ComponentsByVariant:
return call_with_variants(strategy_func, variants, *args, **kwargs)
return inner
return decorator
def call_with_variants(
strategy_func: Callable[..., ComponentsByVariant],
variants_to_produce: Sequence[str],
*args: Any,
**kwargs: Any,
) -> ComponentsByVariant:
context = kwargs["context"]
incoming_variant_name = context.get("variant_name")
if incoming_variant_name is not None:
# For the case where the variant is already determined, we act as a delegate strategy. To
# ensure the function can deal with the given value, we assert the variant name is one
# of our own.
#
# Note that this branch is not currently used by any strategies.
assert (
incoming_variant_name in variants_to_produce
or "!" + incoming_variant_name in variants_to_produce
)
return strategy_func(*args, **kwargs)
components_by_variant = {}
for variant_name in variants_to_produce:
with context:
stripped_variant_name = variant_name.lstrip("!")
context["variant_name"] = stripped_variant_name
components_by_stripped_variant = strategy_func(*args, **kwargs)
assert len(components_by_stripped_variant) == 1
component = components_by_stripped_variant[stripped_variant_name]
components_by_variant[variant_name] = component
return components_by_variant
| StrategyConfiguration |
python | walkccc__LeetCode | solutions/3205. Maximum Array Hopping Score I/3205.py | {
"start": 0,
"end": 359
} | class ____:
def maxScore(self, nums: list[int]) -> int:
n = len(nums)
# dp[i] := the maximum score to jump from index i to n - 1
dp = [0] * n
for i in range(n - 1, -1, -1):
for j in range(i + 1, n):
# Jump from i to j, and then jump from j to n - 1.
dp[i] = max(dp[i], (j - i) * nums[j] + dp[j])
return dp[0]
| Solution |
python | django__django | tests/model_formsets_regress/tests.py | {
"start": 472,
"end": 10620
} | class ____(TestCase):
def test_formset_over_to_field(self):
"""
A formset over a ForeignKey with a to_field can be saved.
"""
Form = modelform_factory(User, fields="__all__")
FormSet = inlineformset_factory(User, UserSite, fields="__all__")
# Instantiate the Form and FormSet to prove
# you can create a form with no data
form = Form()
form_set = FormSet(instance=User())
# Now create a new User and UserSite instance
data = {
"serial": "1",
"username": "apollo13",
"usersite_set-TOTAL_FORMS": "1",
"usersite_set-INITIAL_FORMS": "0",
"usersite_set-MAX_NUM_FORMS": "0",
"usersite_set-0-data": "10",
"usersite_set-0-user": "apollo13",
}
user = User()
form = Form(data)
if form.is_valid():
user = form.save()
else:
self.fail("Errors found on form:%s" % form_set)
form_set = FormSet(data, instance=user)
if form_set.is_valid():
form_set.save()
usersite = UserSite.objects.values()
self.assertEqual(usersite[0]["data"], 10)
self.assertEqual(usersite[0]["user_id"], "apollo13")
else:
self.fail("Errors found on formset:%s" % form_set.errors)
# Now update the UserSite instance
data = {
"usersite_set-TOTAL_FORMS": "1",
"usersite_set-INITIAL_FORMS": "1",
"usersite_set-MAX_NUM_FORMS": "0",
"usersite_set-0-id": str(usersite[0]["id"]),
"usersite_set-0-data": "11",
"usersite_set-0-user": "apollo13",
}
form_set = FormSet(data, instance=user)
if form_set.is_valid():
form_set.save()
usersite = UserSite.objects.values()
self.assertEqual(usersite[0]["data"], 11)
self.assertEqual(usersite[0]["user_id"], "apollo13")
else:
self.fail("Errors found on formset:%s" % form_set.errors)
# Now add a new UserSite instance
data = {
"usersite_set-TOTAL_FORMS": "2",
"usersite_set-INITIAL_FORMS": "1",
"usersite_set-MAX_NUM_FORMS": "0",
"usersite_set-0-id": str(usersite[0]["id"]),
"usersite_set-0-data": "11",
"usersite_set-0-user": "apollo13",
"usersite_set-1-data": "42",
"usersite_set-1-user": "apollo13",
}
form_set = FormSet(data, instance=user)
if form_set.is_valid():
form_set.save()
usersite = UserSite.objects.values().order_by("data")
self.assertEqual(usersite[0]["data"], 11)
self.assertEqual(usersite[0]["user_id"], "apollo13")
self.assertEqual(usersite[1]["data"], 42)
self.assertEqual(usersite[1]["user_id"], "apollo13")
else:
self.fail("Errors found on formset:%s" % form_set.errors)
def test_formset_over_inherited_model(self):
"""
A formset over a ForeignKey with a to_field can be saved.
"""
Form = modelform_factory(Restaurant, fields="__all__")
FormSet = inlineformset_factory(Restaurant, Manager, fields="__all__")
# Instantiate the Form and FormSet to prove
# you can create a form with no data
form = Form()
form_set = FormSet(instance=Restaurant())
# Now create a new Restaurant and Manager instance
data = {
"name": "Guido's House of Pasta",
"manager_set-TOTAL_FORMS": "1",
"manager_set-INITIAL_FORMS": "0",
"manager_set-MAX_NUM_FORMS": "0",
"manager_set-0-name": "Guido Van Rossum",
}
restaurant = User()
form = Form(data)
if form.is_valid():
restaurant = form.save()
else:
self.fail("Errors found on form:%s" % form_set)
form_set = FormSet(data, instance=restaurant)
if form_set.is_valid():
form_set.save()
manager = Manager.objects.values()
self.assertEqual(manager[0]["name"], "Guido Van Rossum")
else:
self.fail("Errors found on formset:%s" % form_set.errors)
# Now update the Manager instance
data = {
"manager_set-TOTAL_FORMS": "1",
"manager_set-INITIAL_FORMS": "1",
"manager_set-MAX_NUM_FORMS": "0",
"manager_set-0-id": str(manager[0]["id"]),
"manager_set-0-name": "Terry Gilliam",
}
form_set = FormSet(data, instance=restaurant)
if form_set.is_valid():
form_set.save()
manager = Manager.objects.values()
self.assertEqual(manager[0]["name"], "Terry Gilliam")
else:
self.fail("Errors found on formset:%s" % form_set.errors)
# Now add a new Manager instance
data = {
"manager_set-TOTAL_FORMS": "2",
"manager_set-INITIAL_FORMS": "1",
"manager_set-MAX_NUM_FORMS": "0",
"manager_set-0-id": str(manager[0]["id"]),
"manager_set-0-name": "Terry Gilliam",
"manager_set-1-name": "John Cleese",
}
form_set = FormSet(data, instance=restaurant)
if form_set.is_valid():
form_set.save()
manager = Manager.objects.values().order_by("name")
self.assertEqual(manager[0]["name"], "John Cleese")
self.assertEqual(manager[1]["name"], "Terry Gilliam")
else:
self.fail("Errors found on formset:%s" % form_set.errors)
def test_inline_model_with_to_field(self):
"""
#13794 --- An inline model with a to_field of a formset with instance
has working relations.
"""
FormSet = inlineformset_factory(User, UserSite, exclude=("is_superuser",))
user = User.objects.create(username="guido", serial=1337)
UserSite.objects.create(user=user, data=10)
formset = FormSet(instance=user)
# Testing the inline model's relation
self.assertEqual(formset[0].instance.user_id, "guido")
def test_inline_model_with_primary_to_field(self):
"""An inline model with a OneToOneField with to_field & primary key."""
FormSet = inlineformset_factory(
User, UserPreferences, exclude=("is_superuser",)
)
user = User.objects.create(username="guido", serial=1337)
UserPreferences.objects.create(user=user, favorite_number=10)
formset = FormSet(instance=user)
self.assertEqual(formset[0].fields["user"].initial, "guido")
def test_inline_model_with_to_field_to_rel(self):
"""
#13794 --- An inline model with a to_field to a related field of a
formset with instance has working relations.
"""
FormSet = inlineformset_factory(UserProfile, ProfileNetwork, exclude=[])
user = User.objects.create(username="guido", serial=1337, pk=1)
self.assertEqual(user.pk, 1)
profile = UserProfile.objects.create(user=user, about="about", pk=2)
self.assertEqual(profile.pk, 2)
ProfileNetwork.objects.create(profile=profile, network=10, identifier=10)
formset = FormSet(instance=profile)
# Testing the inline model's relation
self.assertEqual(formset[0].instance.profile_id, 1)
def test_formset_with_none_instance(self):
"A formset with instance=None can be created. Regression for #11872"
Form = modelform_factory(User, fields="__all__")
FormSet = inlineformset_factory(User, UserSite, fields="__all__")
# Instantiate the Form and FormSet to prove
# you can create a formset with an instance of None
Form(instance=None)
FormSet(instance=None)
def test_empty_fields_on_modelformset(self):
"""
No fields passed to modelformset_factory() should result in no fields
on returned forms except for the id (#14119).
"""
UserFormSet = modelformset_factory(User, fields=())
formset = UserFormSet()
for form in formset.forms:
self.assertIn("id", form.fields)
self.assertEqual(len(form.fields), 1)
def test_save_as_new_with_new_inlines(self):
"""
Existing and new inlines are saved with save_as_new.
Regression for #14938.
"""
efnet = Network.objects.create(name="EFNet")
host1 = Host.objects.create(hostname="irc.he.net", network=efnet)
HostFormSet = inlineformset_factory(Network, Host, fields="__all__")
# Add a new host, modify previous host, and save-as-new
data = {
"host_set-TOTAL_FORMS": "2",
"host_set-INITIAL_FORMS": "1",
"host_set-MAX_NUM_FORMS": "0",
"host_set-0-id": str(host1.id),
"host_set-0-hostname": "tranquility.hub.dal.net",
"host_set-1-hostname": "matrix.de.eu.dal.net",
}
# To save a formset as new, it needs a new hub instance
dalnet = Network.objects.create(name="DALnet")
formset = HostFormSet(data, instance=dalnet, save_as_new=True)
self.assertTrue(formset.is_valid())
formset.save()
self.assertQuerySetEqual(
dalnet.host_set.order_by("hostname"),
Host.objects.filter(
hostname__in=[
"matrix.de.eu.dal.net",
"tranquility.hub.dal.net",
]
).order_by("hostname"),
)
def test_initial_data(self):
user = User.objects.create(username="bibi", serial=1)
UserSite.objects.create(user=user, data=7)
FormSet = inlineformset_factory(User, UserSite, extra=2, fields="__all__")
formset = FormSet(instance=user, initial=[{"data": 41}, {"data": 42}])
self.assertEqual(formset.forms[0].initial["data"], 7)
self.assertEqual(formset.extra_forms[0].initial["data"], 41)
self.assertIn('value="42"', formset.extra_forms[1].as_p())
| InlineFormsetTests |
python | python-pillow__Pillow | src/PIL/AvifImagePlugin.py | {
"start": 1809,
"end": 8994
} | class ____(ImageFile.ImageFile):
format = "AVIF"
format_description = "AVIF image"
__frame = -1
def _open(self) -> None:
if not SUPPORTED:
msg = "image file could not be opened because AVIF support not installed"
raise SyntaxError(msg)
if DECODE_CODEC_CHOICE != "auto" and not _avif.decoder_codec_available(
DECODE_CODEC_CHOICE
):
msg = "Invalid opening codec"
raise ValueError(msg)
self._decoder = _avif.AvifDecoder(
self.fp.read(),
DECODE_CODEC_CHOICE,
_get_default_max_threads(),
)
# Get info from decoder
self._size, self.n_frames, self._mode, icc, exif, exif_orientation, xmp = (
self._decoder.get_info()
)
self.is_animated = self.n_frames > 1
if icc:
self.info["icc_profile"] = icc
if xmp:
self.info["xmp"] = xmp
if exif_orientation != 1 or exif:
exif_data = Image.Exif()
if exif:
exif_data.load(exif)
original_orientation = exif_data.get(ExifTags.Base.Orientation, 1)
else:
original_orientation = 1
if exif_orientation != original_orientation:
exif_data[ExifTags.Base.Orientation] = exif_orientation
exif = exif_data.tobytes()
if exif:
self.info["exif"] = exif
self.seek(0)
def seek(self, frame: int) -> None:
if not self._seek_check(frame):
return
# Set tile
self.__frame = frame
self.tile = [ImageFile._Tile("raw", (0, 0) + self.size, 0, self.mode)]
def load(self) -> Image.core.PixelAccess | None:
if self.tile:
# We need to load the image data for this frame
data, timescale, pts_in_timescales, duration_in_timescales = (
self._decoder.get_frame(self.__frame)
)
self.info["timestamp"] = round(1000 * (pts_in_timescales / timescale))
self.info["duration"] = round(1000 * (duration_in_timescales / timescale))
if self.fp and self._exclusive_fp:
self.fp.close()
self.fp = BytesIO(data)
return super().load()
def load_seek(self, pos: int) -> None:
pass
def tell(self) -> int:
return self.__frame
def _save_all(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
_save(im, fp, filename, save_all=True)
def _save(
im: Image.Image, fp: IO[bytes], filename: str | bytes, save_all: bool = False
) -> None:
info = im.encoderinfo.copy()
if save_all:
append_images = list(info.get("append_images", []))
else:
append_images = []
total = 0
for ims in [im] + append_images:
total += getattr(ims, "n_frames", 1)
quality = info.get("quality", 75)
if not isinstance(quality, int) or quality < 0 or quality > 100:
msg = "Invalid quality setting"
raise ValueError(msg)
duration = info.get("duration", 0)
subsampling = info.get("subsampling", "4:2:0")
speed = info.get("speed", 6)
max_threads = info.get("max_threads", _get_default_max_threads())
codec = info.get("codec", "auto")
if codec != "auto" and not _avif.encoder_codec_available(codec):
msg = "Invalid saving codec"
raise ValueError(msg)
range_ = info.get("range", "full")
tile_rows_log2 = info.get("tile_rows", 0)
tile_cols_log2 = info.get("tile_cols", 0)
alpha_premultiplied = bool(info.get("alpha_premultiplied", False))
autotiling = bool(info.get("autotiling", tile_rows_log2 == tile_cols_log2 == 0))
icc_profile = info.get("icc_profile", im.info.get("icc_profile"))
exif_orientation = 1
if exif := info.get("exif"):
if isinstance(exif, Image.Exif):
exif_data = exif
else:
exif_data = Image.Exif()
exif_data.load(exif)
if ExifTags.Base.Orientation in exif_data:
exif_orientation = exif_data.pop(ExifTags.Base.Orientation)
exif = exif_data.tobytes() if exif_data else b""
elif isinstance(exif, Image.Exif):
exif = exif_data.tobytes()
xmp = info.get("xmp")
if isinstance(xmp, str):
xmp = xmp.encode("utf-8")
advanced = info.get("advanced")
if advanced is not None:
if isinstance(advanced, dict):
advanced = advanced.items()
try:
advanced = tuple(advanced)
except TypeError:
invalid = True
else:
invalid = any(not isinstance(v, tuple) or len(v) != 2 for v in advanced)
if invalid:
msg = (
"advanced codec options must be a dict of key-value string "
"pairs or a series of key-value two-tuples"
)
raise ValueError(msg)
# Setup the AVIF encoder
enc = _avif.AvifEncoder(
im.size,
subsampling,
quality,
speed,
max_threads,
codec,
range_,
tile_rows_log2,
tile_cols_log2,
alpha_premultiplied,
autotiling,
icc_profile or b"",
exif or b"",
exif_orientation,
xmp or b"",
advanced,
)
# Add each frame
frame_idx = 0
frame_duration = 0
cur_idx = im.tell()
is_single_frame = total == 1
try:
for ims in [im] + append_images:
# Get number of frames in this image
nfr = getattr(ims, "n_frames", 1)
for idx in range(nfr):
ims.seek(idx)
# Make sure image mode is supported
frame = ims
rawmode = ims.mode
if ims.mode not in {"RGB", "RGBA"}:
rawmode = "RGBA" if ims.has_transparency_data else "RGB"
frame = ims.convert(rawmode)
# Update frame duration
if isinstance(duration, (list, tuple)):
frame_duration = duration[frame_idx]
else:
frame_duration = duration
# Append the frame to the animation encoder
enc.add(
frame.tobytes("raw", rawmode),
frame_duration,
frame.size,
rawmode,
is_single_frame,
)
# Update frame index
frame_idx += 1
if not save_all:
break
finally:
im.seek(cur_idx)
# Get the final output from the encoder
data = enc.finish()
if data is None:
msg = "cannot write file as AVIF (encoder returned None)"
raise OSError(msg)
fp.write(data)
Image.register_open(AvifImageFile.format, AvifImageFile, _accept)
if SUPPORTED:
Image.register_save(AvifImageFile.format, _save)
Image.register_save_all(AvifImageFile.format, _save_all)
Image.register_extensions(AvifImageFile.format, [".avif", ".avifs"])
Image.register_mime(AvifImageFile.format, "image/avif")
| AvifImageFile |
python | doocs__leetcode | solution/0600-0699/0657.Robot Return to Origin/Solution.py | {
"start": 0,
"end": 371
} | class ____:
def judgeCircle(self, moves: str) -> bool:
x = y = 0
for c in moves:
match c:
case "U":
y += 1
case "D":
y -= 1
case "L":
x -= 1
case "R":
x += 1
return x == 0 and y == 0
| Solution |
python | sphinx-doc__sphinx | tests/test_ext_napoleon/test_ext_napoleon.py | {
"start": 1031,
"end": 1509
} | class ____:
def _private_doc(self) -> None:
"""SampleClass._private_doc.DOCSTRING"""
pass
def _private_undoc(self) -> None:
pass
def __special_doc__(self) -> None: # NoQA: PLW3201
"""SampleClass.__special_doc__.DOCSTRING"""
pass
def __special_undoc__(self) -> None: # NoQA: PLW3201
pass
@simple_decorator
def __decorated_func__(self) -> None: # NoQA: PLW3201
"""Doc"""
pass
| SampleClass |
python | chroma-core__chroma | chromadb/execution/expression/operator.py | {
"start": 32137,
"end": 32359
} | class ____(Rank):
"""Division of two ranks"""
left: Rank
right: Rank
def to_dict(self) -> Dict[str, Any]:
return {"$div": {"left": self.left.to_dict(), "right": self.right.to_dict()}}
@dataclass
| Div |
python | getsentry__sentry | src/sentry/seer/endpoints/seer_rpc.py | {
"start": 6424,
"end": 7179
} | class ____(StandardAuthentication):
"""
Authentication for seer RPC requests.
Requests are sent with an HMAC signed by a shared private key.
"""
token_name = b"rpcsignature"
def accepts_auth(self, auth: list[bytes]) -> bool:
if not auth or len(auth) < 2:
return False
return auth[0].lower() == self.token_name
def authenticate_token(self, request: Request, token: str) -> tuple[Any, Any]:
if not compare_signature(request.path_info, request.body, token):
raise AuthenticationFailed("Invalid signature")
sentry_sdk.get_isolation_scope().set_tag("seer_rpc_auth", True)
return (AnonymousUser(), token)
@internal_region_silo_endpoint
| SeerRpcSignatureAuthentication |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/nn_ops/fractional_avg_pool_op_test.py | {
"start": 1267,
"end": 14703
} | class ____(test.TestCase):
# Random number generate with seed.
_PRNG = np.random.RandomState(341261000)
_SEED = 341261001
def _AvgPoolAlongRows(self, input_matrix, row_seq, overlapping):
"""Perform average pool along row of a 2-D matrix based on row_seq.
Args:
input_matrix: A 2-D matrix.
row_seq: Cumulative pooling sequence along row.
overlapping: Whether or not use overlapping when pooling.
Returns:
A 2-D matrix, with
* num_rows = len(row_seq)-1
* num_cols = input_matrix.num_cols.
"""
output_image = np.zeros(input_matrix.shape[1])
row_max = row_seq[-1]
for i in range(row_seq.shape[0] - 1):
row_start = row_seq[i]
row_end = row_seq[i + 1] + 1 if overlapping else row_seq[i + 1]
row_end = min(row_end, row_max)
output_image = np.vstack((output_image, np.mean(
input_matrix[row_start:row_end, :], axis=0))) # axis 0 is along row
# remove the sentinel row
return output_image[1:, :]
def _AvgPoolAlongCols(self, input_matrix, col_seq, overlapping):
"""Perform average pool along column of a 2-D matrix based on col_seq.
Args:
input_matrix: A 2-D matrix.
col_seq: Cumulative pooling sequence along column.
overlapping: Whether or not use overlapping when pooling.
Returns:
A 2-D matrix, with
* num_rows = input_matrix.num_rows
* num_cols = len(col_seq)-1.
"""
input_matrix = input_matrix.transpose()
output_matrix = self._AvgPoolAlongRows(input_matrix, col_seq, overlapping)
return output_matrix.transpose()
def _GetExpectedFractionalAvgPoolResult(self, input_tensor, row_seq, col_seq,
overlapping):
"""Get expected fractional average pooling result.
row_seq and col_seq together defines the fractional pooling region.
Args:
input_tensor: Original input tensor, assuming it is a 4-D tensor, with
dimension as [batch, height/row, width/column, channels/depth].
row_seq: Cumulative pooling sequence along row.
col_seq: Cumulative pooling sequence along column.
overlapping: Use overlapping when doing pooling.
Returns:
A 4-D tensor that is the result of average pooling on input_tensor based
on pooling region defined by row_seq and col_seq, conditioned on whether
or not overlapping is used.
"""
input_shape = input_tensor.shape
output_shape = (input_shape[0], len(row_seq) - 1, len(col_seq) - 1,
input_shape[3])
output_tensor = np.zeros(shape=output_shape, dtype=input_tensor.dtype)
for batch in range(input_shape[0]):
for channel in range(input_shape[3]):
two_dim_slice = input_tensor[batch, :, :, channel]
tmp = self._AvgPoolAlongRows(two_dim_slice, row_seq, overlapping)
output_tensor[batch, :, :, channel] = self._AvgPoolAlongCols(
tmp, col_seq, overlapping)
return output_tensor
def _ValidateFractionalAvgPoolResult(self, input_tensor, pooling_ratio,
pseudo_random, overlapping):
"""Validate FractionalAvgPool's result against expected.
Expected result is computed given input_tensor, and pooling region defined
by row_seq and col_seq.
Args:
input_tensor: A tensor or numpy ndarray.
pooling_ratio: A list or tuple of length 4, first and last element be 1.
pseudo_random: Use pseudo random method to generate pooling sequence.
overlapping: Use overlapping when pooling.
Returns:
None
"""
with self.cached_session() as sess:
p, r, c = nn_ops.fractional_avg_pool_v2(
input_tensor,
pooling_ratio,
pseudo_random,
overlapping,
seed=self._SEED)
actual, row_seq, col_seq = self.evaluate([p, r, c])
expected = self._GetExpectedFractionalAvgPoolResult(input_tensor, row_seq,
col_seq, overlapping)
self.assertShapeEqual(expected, p)
self.assertAllClose(expected, actual)
def _testVisually(self):
"""Manual test by printing out intermediate result of a small random tensor.
Since _GetExpectedFractionalAvgPoolResult is 'automated', it feels safer to
have a test case that you can see what's happening.
This test will generate a small, random, int 2D matrix, and feed it to
FractionalAvgPool and _GetExpectedFractionalAvgPoolResult.
"""
num_rows = 6
num_cols = 6
tensor_shape = (1, num_rows, num_cols, 1)
pseudo_random = False
for overlapping in True, False:
print("-" * 70)
print("Testing FractionalAvgPool with overlapping = {}".format(
overlapping))
rand_mat = self._PRNG.randint(10, size=tensor_shape)
pooling_ratio = [1, math.sqrt(2), math.sqrt(2), 1]
with self.cached_session() as sess:
p, r, c = nn_ops.fractional_avg_pool_v2(
rand_mat.astype(np.float32),
pooling_ratio,
pseudo_random,
overlapping,
seed=self._SEED)
tensor_output, row_seq, col_seq = self.evaluate([p, r, c])
expected_result = self._GetExpectedFractionalAvgPoolResult(
rand_mat.astype(np.float32), row_seq, col_seq, overlapping)
print("row sequence:")
print(row_seq)
print("column sequence:")
print(col_seq)
print("Input:")
# Print input with pooling region marked.
for i in range(num_rows):
row_to_print = []
for j in range(num_cols):
if j in col_seq:
row_to_print.append("|")
row_to_print.append(str(rand_mat[0, i, j, 0]))
row_to_print.append("|")
if i in row_seq:
print("-" * 2 * len(row_to_print))
print(" ".join(row_to_print))
print("-" * 2 * len(row_to_print))
print("Output from FractionalAvgPool:")
print(tensor_output[0, :, :, 0])
print("Expected result:")
print(expected_result[0, :, :, 0])
def testAllInputOptions(self):
"""Try all possible input options for fractional_avg_pool.
"""
num_batches = 5
num_channels = 3
num_rows = 20
num_cols = 30
for pseudo_random in True, False:
for overlapping in True, False:
tensor_shape = (num_batches, num_rows, num_cols, num_channels)
# random tensor with value in [-500.0, 500.0)
rand_mat = self._PRNG.random_sample(tensor_shape) * 1000 - 500
self._ValidateFractionalAvgPoolResult(
rand_mat, [1, math.sqrt(3), math.sqrt(2), 1], pseudo_random,
overlapping)
def testIntegerTensorInput(self):
"""Test FractionalAvgPool works fine when input tensor is integer type.
"""
pseudo_random = True
overlapping = True
tensor_shape = (1, 6, 6, 1)
# pyformat: disable
mat = np.array([
[2, 6, 4, 1, 3, 6],
[8, 9, 1, 6, 6, 8],
[3, 9, 8, 2, 5, 6],
[2, 7, 9, 5, 4, 5],
[8, 5, 0, 5, 7, 4],
[4, 4, 5, 9, 7, 2]
])
# pyformat: enable
self._ValidateFractionalAvgPoolResult(mat.reshape(tensor_shape),
[1, math.sqrt(3), math.sqrt(2), 1],
pseudo_random, overlapping)
def testDifferentTensorShapes(self):
"""Test different shapes of input tensor.
Mainly test different combinations of num_rows and num_cols.
"""
pseudo_random = True
overlapping = True
for num_batches in [1, 3]:
for num_channels in [1, 3]:
for num_rows in [10, 20, 50]:
for num_cols in [10, 20, 50]:
tensor_shape = (num_batches, num_rows, num_cols, num_channels)
# random tensor with value in [-500.0, 500.0)
rand_mat = self._PRNG.random_sample(tensor_shape) * 1000 - 500
self._ValidateFractionalAvgPoolResult(
rand_mat, [1, math.sqrt(3), math.sqrt(2), 1], pseudo_random,
overlapping)
def testLargePoolingRatio(self):
"""Test when pooling ratio is not within [1, 2).
"""
pseudo_random = True
overlapping = True
num_batches = 3
num_channels = 3
num_rows = 30
num_cols = 50
tensor_shape = (num_batches, num_rows, num_cols, num_channels)
for row_ratio in [math.sqrt(11), math.sqrt(37)]:
for col_ratio in [math.sqrt(11), math.sqrt(27)]:
# random tensor with value in [-500.0, 500.0)
rand_mat = self._PRNG.random_sample(tensor_shape) * 1000 - 500
self._ValidateFractionalAvgPoolResult(rand_mat,
[1, row_ratio, col_ratio, 1],
pseudo_random, overlapping)
def testDivisiblePoolingRatio(self):
"""Test when num of rows/cols can evenly divide pooling ratio.
This is a case regular average pooling can handle. Should be handled by
fractional pooling as well.
"""
pseudo_random = True
overlapping = True
num_batches = 3
num_channels = 3
num_rows = 30
num_cols = 50
tensor_shape = (num_batches, num_rows, num_cols, num_channels)
# random tensor with value in [-500.0, 500.0)
rand_mat = self._PRNG.random_sample(tensor_shape) * 1000 - 500
self._ValidateFractionalAvgPoolResult(rand_mat, [1, 2, 2, 1], pseudo_random,
overlapping)
@test_util.run_deprecated_v1
def testDifferentInputTensorShape(self):
"""Runs the operation in one session with different input tensor shapes."""
with self.cached_session() as sess:
input_holder = array_ops.placeholder(dtypes.float32,
[None, None, None, 3])
pooling_ratio = [1, 1.5, 1.5, 1]
pseudo_random = False
overlapping = False
p, r, c = nn_ops.fractional_avg_pool_v2(
input_holder,
pooling_ratio,
pseudo_random,
overlapping,
seed=self._SEED)
# First run.
input_a = np.zeros([3, 32, 32, 3])
actual, row_seq, col_seq = sess.run([p, r, c], {input_holder: input_a})
expected = self._GetExpectedFractionalAvgPoolResult(
input_a, row_seq, col_seq, overlapping)
self.assertSequenceEqual(expected.shape, actual.shape)
# Second run.
input_b = np.zeros([4, 60, 60, 3])
actual, row_seq, col_seq = sess.run([p, r, c], {input_holder: input_b})
expected = self._GetExpectedFractionalAvgPoolResult(
input_b, row_seq, col_seq, overlapping)
self.assertSequenceEqual(expected.shape, actual.shape)
def testNegativeSeqValuesForGradOp(self):
with self.assertRaisesRegex(
errors.InvalidArgumentError,
r"Row sequence tensor values must not be negative.*"):
y = nn_ops.gen_nn_ops.fractional_avg_pool_grad(
orig_input_tensor_shape=[2, 2, 2, 2],
out_backprop=[[[[1, 2], [3, 4], [5, 6]], [[7, 8], [9, 10], [11,
12]]]],
row_pooling_sequence=[-10, 1, 2, 3],
col_pooling_sequence=[1, 2, 3, 4],
overlapping=True)
self.evaluate(y)
with self.assertRaisesRegex(
errors.InvalidArgumentError,
r"Column sequence tensor values must not be negative.*"):
z = nn_ops.gen_nn_ops.fractional_avg_pool_grad(
orig_input_tensor_shape=[2, 2, 2, 2],
out_backprop=[[[[1, 2], [3, 4], [5, 6]], [[7, 8], [9, 10], [11,
12]]]],
row_pooling_sequence=[10, 1, 2, 3],
col_pooling_sequence=[1, 2, -3, 4],
overlapping=True)
self.evaluate(z)
def testPoolingRatioHasMoreDimThanInput(self):
with self.cached_session() as _:
with self.assertRaisesRegex(
errors.InvalidArgumentError,
r"Pooling ratio is higher than input dimension size for dimension 1.*"
):
result = nn_ops.gen_nn_ops.fractional_avg_pool(
value=constant_op.constant(
value=[[[[1, 4, 2, 3]]]], dtype=dtypes.int64),
pooling_ratio=[1.0, 1.44, 1.73, 1.0],
pseudo_random=False,
overlapping=False,
deterministic=False,
seed=0,
seed2=0,
name=None)
self.evaluate(result)
def testPoolingRatioIllegalSmallValue(self):
with self.cached_session() as _:
# Whether turn on `TF2_BEHAVIOR` generates different error messages
with self.assertRaisesRegex(
(errors.InvalidArgumentError, ValueError),
r"(pooling_ratio cannot be smaller than 1, got: .*)|(is negative)"):
result = nn_ops.gen_nn_ops.fractional_avg_pool(
value=np.zeros([3, 30, 30, 3]),
pooling_ratio=[1, -1, 3, 1],
pseudo_random=False,
overlapping=False,
deterministic=False,
seed=0,
seed2=0,
)
self.evaluate(result)
def testPoolingIllegalRatioForBatch(self):
with self.cached_session() as _:
with self.assertRaises(errors.UnimplementedError):
result = nn_ops.gen_nn_ops.fractional_avg_pool(
np.zeros([3, 30, 50, 3]),
[2, 3, 1.5, 1],
True,
True)
self.evaluate(result)
| FractionalAvgTest |
python | getsentry__sentry | src/sentry/backup/findings.py | {
"start": 5044,
"end": 6251
} | class ____(ABC):
"""
A JSON serializable and user-reportable finding for an import/export operation. Don't use this
class directly - inherit from it, set a specific `kind` type, and define your own pretty
printer!
"""
on: InstanceID
# The original `pk` of the model in question, if one is specified in the `InstanceID`.
left_pk: int | None = None
# The post-import `pk` of the model in question, if one is specified in the `InstanceID`.
right_pk: int | None = None
reason: str = ""
def get_finding_name(self) -> str:
return self.__class__.__name__
def _pretty_inner(self) -> str:
"""
Pretty print only the fields on the shared `Finding` portion.
"""
out = f"\n on: {self.on.pretty()}"
if self.left_pk:
out += f",\n left_pk: {self.left_pk}"
if self.right_pk:
out += f",\n right_pk: {self.right_pk}"
if self.reason:
out += f",\n reason: {self.reason}"
return out
@abstractmethod
def pretty(self) -> str:
pass
@abstractmethod
def to_dict(self) -> dict[str, Any]:
pass
@dataclass(frozen=True)
| Finding |
python | pytorch__pytorch | torch/_inductor/compile_worker/subproc_pool.py | {
"start": 3152,
"end": 3463
} | class ____:
"""
Allows a caller to provide a custom pickler for passing data with the
subprocess.
"""
def dumps(self, obj: object) -> bytes:
return pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)
def loads(self, data: bytes) -> object:
return pickle.loads(data)
| SubprocPickler |
python | huggingface__transformers | tests/models/cvt/test_modeling_cvt.py | {
"start": 5210,
"end": 8891
} | class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as Cvt does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
pipeline_model_mapping = (
{"image-feature-extraction": CvtModel, "image-classification": CvtForImageClassification}
if is_torch_available()
else {}
)
test_resize_embeddings = False
has_attentions = False
test_torch_exportable = True
def setUp(self):
self.model_tester = CvtModelTester(self)
self.config_tester = ConfigTester(
self,
config_class=CvtConfig,
has_text_modality=False,
hidden_size=37,
common_properties=["hidden_size", "num_channels"],
)
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="Cvt does not output attentions")
def test_attention_outputs(self):
pass
@unittest.skip(reason="Cvt does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="Cvt does not support input and output embeddings")
def test_model_get_set_embeddings(self):
pass
# Larger differences on A10 than T4
def test_batching_equivalence(self, atol=2e-4, rtol=2e-4):
super().test_batching_equivalence(atol=atol, rtol=rtol)
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.hidden_states
expected_num_layers = len(self.model_tester.depth)
self.assertEqual(len(hidden_states), expected_num_layers)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:]),
[
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def test_for_image_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
model_name = "microsoft/cvt-13"
model = CvtModel.from_pretrained(model_name)
self.assertIsNotNone(model)
# We will verify our results on an image of cute cats
def prepare_img():
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
| CvtModelTest |
python | lxml__lxml | src/lxml/html/diff.py | {
"start": 19479,
"end": 20747
} | class ____(str):
""" Represents a diffable token, generally a word that is displayed to
the user. Opening tags are attached to this token when they are
adjacent (pre_tags) and closing tags that follow the word
(post_tags). Some exceptions occur when there are empty tags
adjacent to a word, so there may be close tags in pre_tags, or
open tags in post_tags.
We also keep track of whether the word was originally followed by
whitespace, even though we do not want to treat the word as
equivalent to a similar word that does not have a trailing
space."""
# When this is true, the token will be eliminated from the
# displayed diff if no change has occurred:
hide_when_equal = False
def __new__(cls, text, pre_tags=None, post_tags=None, trailing_whitespace=""):
obj = str.__new__(cls, text)
obj.pre_tags = pre_tags if pre_tags is not None else []
obj.post_tags = post_tags if post_tags is not None else []
obj.trailing_whitespace = trailing_whitespace
return obj
def __repr__(self):
return 'token(%s, %r, %r, %r)' % (
str.__repr__(self), self.pre_tags, self.post_tags, self.trailing_whitespace)
def html(self):
return str(self)
| token |
python | huggingface__transformers | src/transformers/generation/logits_process.py | {
"start": 153487,
"end": 157096
} | class ____(LogitsProcessor):
r"""Special logits processor to handle the generation of the EOS token in Dia.
This is due to the fact that Dia does not allow the generation of EOS in all
channels except the first channel (C0).
Hence, based on the delay pattern, an EOS is forced after the respective delays
in the channels. For example, if the delay pattern is [0, 2, 3, 4]:
s s+1 s+2 s+3 s+4 s+5 ...
| | | | | |
C0: EOS PAD PAD PAD PAD PAD ...
C1: x x EOS PAD PAD PAD ...
C2: x x x EOS PAD PAD ...
C3: x x x x EOS PAD ...
If the first channel generated EOS at step s, channels Cx are forced to generate
theirs at the respective delays (s+2, s+3, s+4). Subsequent padding tokens are
handled by the `EosTokenCriteria` when an EOS has been detected.
<Tip warning={true}>
This logits processor is exclusively compatible with
[Dia](https://huggingface.co/docs/transformers/en/model_doc/dia).
</Tip>
Args:
delay_pattern (`List[int]`):
The delays per channel in the audio codebooks.
eos_token_id (`int`):
The id of *end-of-sequence* token.
max_generation_len (`int`):
The max sequence length that can be generated.
device (`str`, *optional*, defaults to `"cpu"`):
The device to allocate the tensors on.
"""
def __init__(self, delay_pattern: list[int], eos_token_id: int, max_generation_len: int, device: str = "cpu"):
self.num_channels = len(delay_pattern)
# Update during first iteration
self.active_batches = None
self.delay_pattern = torch.tensor(delay_pattern, device=device, dtype=torch.int)[None, :]
self.eos_token_id = eos_token_id
self.max_generation_len = max_generation_len - max(delay_pattern) - 1
self.device = device
@add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
# Reshape for easier channel indexing [B, C, V]
scores = scores.reshape(-1, self.num_channels, scores.shape[-1])
# Initialize / expand values on first iteration
if self.active_batches is None:
self.delay_pattern = self.delay_pattern.repeat(scores.shape[0], 1)
self.active_batches = torch.zeros(size=(scores.shape[0],), device=self.device, dtype=torch.bool)
# Check if eos has been generated in any batch
channel_generated_eos = torch.argmax(scores, dim=-1)[:, 0] == self.eos_token_id
# Check if max len has been reached
reached_max_len = input_ids.shape[1] == self.max_generation_len
# Update active batches
self.active_batches |= channel_generated_eos
self.active_batches |= reached_max_len
# Find channels that need to force eos
forced_eos_channels = self.active_batches[:, None] & (self.delay_pattern == 0)
# Use indexing to avoid issues on all `False` by having empty tensors in that case
idx_bsz, idx_channel = forced_eos_channels.nonzero(as_tuple=True)
# Force eos if delay is kicking in
scores[idx_bsz, idx_channel, :] = -float("inf")
scores[idx_bsz, idx_channel, self.eos_token_id] = 0.0
# Reshape back to [B * C, V]
scores = scores.reshape(-1, scores.shape[-1])
# Update amount of delay left for each channel
self.delay_pattern -= self.active_batches[:, None].int()
return scores
| DiaEOSDelayPatternLogitsProcessor |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 124291,
"end": 124401
} | class ____:
xlTop10Bottom = 0 # from enum XlTopBottom
xlTop10Top = 1 # from enum XlTopBottom
| TopBottom |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/array_ops/array_ops_test.py | {
"start": 73689,
"end": 82864
} | class ____(test_util.TensorFlowTestCase):
def testUpperBoundFloatHandCoded(self):
cdf = np.array([0, .2, .5, .6, .8, 1.], dtype=np.float32)
arr = np.array([.04, .99, .53, .58, .31, .01, .79, .8, .21],
dtype=np.float32)
result = np.searchsorted(cdf, arr, side="right")
tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="right"))
self.assertAllEqual(result, tf_result)
def testUpperBoundFloatRandomNd(self):
dim_size = 7
for d in range(1, 5):
shape = [dim_size] * d
cdf = np.cumsum(
np.random.uniform(size=shape).astype(np.float32), axis=(d - 1))
arr = np.random.uniform(size=shape).astype(np.float32) * dim_size
tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="right"))
cdf = cdf.reshape([-1, dim_size])
arr = arr.reshape([-1, dim_size])
result = np.zeros(arr.shape, dtype=np.int32)
for i in range(dim_size**(d - 1)):
result[i, :] = np.searchsorted(cdf[i, :], arr[i, :], side="right")
result = result.reshape(shape)
self.assertAllEqual(result, tf_result)
def testUpperBoundFloatUneven(self):
batch_size = 7
size_search_array = 1000
size_values = 47
cdf = np.cumsum(
np.random.uniform(size=[batch_size, size_search_array]).astype(
np.float32),
axis=1)
arr = np.random.uniform(size=[batch_size, size_values]).astype(
np.float32) * size_search_array
tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="right"))
result = np.zeros(arr.shape, dtype=np.int32)
for i in range(batch_size):
result[i, :] = np.searchsorted(cdf[i, :], arr[i, :], side="right")
self.assertAllEqual(result, tf_result)
def testLowerBoundFloatHandCoded(self):
cdf = np.array([0, .2, .5, .6, .8, 1.], dtype=np.float32)
arr = np.array([.04, .99, .53, .58, .31, .01, .79, .8, .21],
dtype=np.float32)
result = np.searchsorted(cdf, arr, side="left")
tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="left"))
self.assertAllEqual(result, tf_result)
def testLowerBoundFloatRandomNd(self):
dim_size = 7
for d in range(1, 5):
shape = [dim_size] * d
cdf = np.cumsum(
np.random.uniform(size=shape).astype(np.float32), axis=(d - 1))
arr = np.random.uniform(size=shape).astype(np.float32) * dim_size
tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="left"))
cdf = cdf.reshape([-1, dim_size])
arr = arr.reshape([-1, dim_size])
result = np.zeros(arr.shape, dtype=np.int32)
for i in range(dim_size**(d - 1)):
result[i, :] = np.searchsorted(cdf[i, :], arr[i, :], side="left")
result = result.reshape(shape)
self.assertAllEqual(result, tf_result)
def testLowerBoundFloatUneven(self):
batch_size = 7
size_search_array = 1000
size_values = 47
cdf = np.cumsum(
np.random.uniform(size=[batch_size, size_search_array]).astype(
np.float32),
axis=1)
arr = np.random.uniform(size=[batch_size, size_values]).astype(
np.float32) * size_search_array
tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="left"))
result = np.zeros(arr.shape, dtype=np.int32)
for i in range(batch_size):
result[i, :] = np.searchsorted(cdf[i, :], arr[i, :], side="left")
self.assertAllEqual(result, tf_result)
def testUpperBoundIntHandCoded(self):
cdf = np.array([0, 20, 50, 60, 80, 100], dtype=np.int64)
arr = np.array([4, 99, 53, 58, 31, 1, 79, 8, 21], dtype=np.int64)
result = np.searchsorted(cdf, arr, side="right")
tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="right"))
self.assertAllEqual(result, tf_result)
def testUpperBoundIntRandomNd(self):
dim_size = 7
for d in range(1, 5):
shape = [dim_size] * d
cdf = np.cumsum(
np.random.randint(low=0, high=10, size=shape).astype(np.int64),
axis=(d - 1))
arr = np.random.randint(
low=0, high=10 * dim_size, size=shape).astype(np.int64)
tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="right"))
cdf = cdf.reshape([-1, dim_size])
arr = arr.reshape([-1, dim_size])
result = np.zeros(arr.shape, dtype=np.int32)
for i in range(dim_size**(d - 1)):
result[i, :] = np.searchsorted(cdf[i, :], arr[i, :], side="right")
result = result.reshape(shape)
self.assertAllEqual(result, tf_result)
def testUpperBoundIntUneven(self):
batch_size = 7
size_search_array = 1000
size_values = 47
cdf = np.cumsum(
np.random.randint(low=0, high=10,
size=[batch_size,
size_search_array]).astype(np.int64),
axis=1)
arr = np.random.randint(
low=0, high=10 * size_search_array, size=[batch_size,
size_values]).astype(np.int64)
tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="right"))
result = np.zeros(arr.shape, dtype=np.int32)
for i in range(batch_size):
result[i, :] = np.searchsorted(cdf[i, :], arr[i, :], side="right")
self.assertAllEqual(result, tf_result)
def testLowerBoundIntHandCoded(self):
cdf = np.array([0, 20, 50, 60, 80, 100], dtype=np.int64)
arr = np.array([4, 99, 53, 58, 31, 1, 79, 8, 21], dtype=np.int64)
result = np.searchsorted(cdf, arr, side="left")
tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="left"))
self.assertAllEqual(result, tf_result)
def testLowerBoundIntRandomNd(self):
dim_size = 7
for d in range(1, 5):
shape = [dim_size] * d
cdf = np.cumsum(
np.random.randint(low=0, high=10, size=shape).astype(np.int64),
axis=(d - 1))
arr = np.random.randint(
low=0, high=10 * dim_size, size=shape).astype(np.int64)
tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="left"))
cdf = cdf.reshape([-1, dim_size])
arr = arr.reshape([-1, dim_size])
result = np.zeros(arr.shape, dtype=np.int32)
for i in range(dim_size**(d - 1)):
result[i, :] = np.searchsorted(cdf[i, :], arr[i, :], side="left")
result = result.reshape(shape)
self.assertAllEqual(result, tf_result)
def testLowerBoundIntUneven(self):
batch_size = 7
size_search_array = 1000
size_values = 47
cdf = np.cumsum(
np.random.randint(low=0, high=10,
size=[batch_size,
size_search_array]).astype(np.int64),
axis=1)
arr = np.random.randint(
low=0, high=10 * size_search_array, size=[batch_size,
size_values]).astype(np.int64)
tf_result = self.evaluate(array_ops.searchsorted(cdf, arr, side="left"))
result = np.zeros(arr.shape, dtype=np.int32)
for i in range(batch_size):
result[i, :] = np.searchsorted(cdf[i, :], arr[i, :], side="left")
self.assertAllEqual(result, tf_result)
def testZeroSequenceSize(self):
dtype = dtypes.int32
for side in ("left", "right"):
with self.subTest(side=side):
self.assertAllEqual(
array_ops.searchsorted(
array_ops.ones([2, 0]),
array_ops.ones([2, 3]),
side=side,
out_type=dtype), array_ops.zeros([2, 3], dtype))
def testZeroValueSize(self):
dtype = dtypes.int32
for side in ("left", "right"):
with self.subTest(side=side):
self.assertAllEqual(
array_ops.searchsorted(
array_ops.ones([2, 3]),
array_ops.ones([2, 0]),
side=side,
out_type=dtype), array_ops.zeros([2, 0], dtype))
def testZeroInputSize(self):
dtype = dtypes.int32
for side in ("left", "right"):
with self.subTest(side=side):
self.assertAllEqual(
array_ops.searchsorted(
array_ops.ones([2, 0]),
array_ops.ones([2, 3]),
side=side,
out_type=dtype), array_ops.zeros([2, 3], dtype))
def testInt64(self):
@def_function.function
def g():
x = random_ops.random_normal(shape=[int(1e10)])
y = array_ops.ones(shape=[int(1e10)])
return array_ops.searchsorted(x, y, out_type=dtypes.int64)
_ = g.get_concrete_function()
def testInt64UnspecifiedOutType(self):
@def_function.function
def g():
x = random_ops.random_normal(shape=[int(1e10)])
y = array_ops.ones(shape=[int(1e10)])
return array_ops.searchsorted(x, y)
_ = g.get_concrete_function()
def testInvalidValuesLowerBound(self):
arg_0_tensor = random_ops.random_uniform([3, 3], dtype=dtypes.float32)
arg_0 = array_ops.identity(arg_0_tensor)
arg_1_tensor = random_ops.random_uniform([3], dtype=dtypes.float32)
arg_1 = array_ops.identity(arg_1_tensor)
arg_2 = dtypes.int32
arg_3 = False
with self.assertRaises(Exception):
gen_array_ops.lower_bound(arg_0, arg_1, arg_2, arg_3,)
| SortedSearchTest |
python | conda__conda | conda/core/index.py | {
"start": 1468,
"end": 18590
} | class ____(UserDict):
"""The ``Index`` provides information about available packages from all relevant sources.
There are four types of sources for package information, namely
Channels
represent packages available from standard sources identified with a url, mostly online,
but can also be on a local filesystem using the ``file://`` scheme.
Programatically, channels are represented by :class:`conda.models.channel.Channel`, their data
is fetched using :class:`conda.core.subdir_data.SubdirData`.
For more information see :ref:`concepts-channels`.
Individual packages from channels are usually represented by :class:`conda.models.records.PackageRecord`.
Prefix
represents packages that are already installed. Every :class:`Index` can be associated
with exactly one Prefix, which is the location of one of the conda :ref:`concepts-conda-environments`.
The package information about the installed packages is represented by :class:`conda.core.prefix_data.PrefixData`.
Individual packages from prefixes are usually represented by :class:`conda.models.records.PrefixRecord`.
Package Cache
represents packages that are locally unpacked, but may not be installed in the environment
associated with this index. These are usually packages that have been installed in any environment
of the local conda installation, but may have been removed from all environments by now.
Individual packages from the package are usually represented by :class:`conda.models.records.PackageCacheRecord`.
Virtual Packages
represent properties of the system, not actual conda packages in the normal sense. These are,
for example, system packages that inform the solver about the operating system in use, or
track features that can be used to steer package priority.
Individual virtual packages are represented by special :class:`conda.models.records.PackageRecord`,
see :meth:`conda.models.records.PackageRecord.virtual_package` and
:meth:`conda.models.records.PackageRecord.feature`.
"""
def __init__(
self,
channels: Iterable[str | Channel] = (),
prepend: bool = True,
platform: str | None = None,
subdirs: tuple[str, ...] | None = None,
use_local: bool = False,
use_cache: bool | None = None,
prefix: PathType | PrefixData | None = None,
repodata_fn: str | None = context.repodata_fns[-1],
use_system: bool = False,
) -> None:
"""Initializes a new index with the desired components.
Args:
channels: channels identified by canonical names or URLS or Channel objects;
for more details, see :meth:`conda.models.channel.Channel.from_value`
prepend: if ``True`` (default), add configured channel with higher priority than passed channels;
if ``False``, do *not* add configured channels.
platform: see ``subdirs``.
subdirs: platform and subdirs determine the selection of subdirs in the channels;
if both are ``None``, subdirs is taken from the configuration;
if both are given, ``subdirs`` takes precedence and ``platform`` is ignored;
if only ``platform`` is given, subdirs will be ``(platform, "noarch")``;
if ``subdirs`` is given, subdirs will be ``subdirs``.
use_local: if ``True``, add the special "local" channel for locally built packages with lowest priority.
use_cache: if ``True``, add packages from the package cache.
prefix: associate prefix with this index and add its packages.
repodata_fn: filename of the repodata, default taken from config, almost always "repodata.json".
use_system: if ``True``, add system packages, that is virtual packages defined by plugins, usually used
to make intrinsic information about the system, such as cpu architecture or operating system, available
to the solver.
"""
channels = list(channels)
if use_local:
channels = ["local", *channels]
if prepend:
channels += context.channels
self._channels = IndexedSet(channels)
if subdirs:
if platform:
log.warning("subdirs is %s, ignoring platform %s", subdirs, platform)
else:
subdirs = (platform, "noarch") if platform is not None else context.subdirs
self._subdirs = subdirs
self._repodata_fn = repodata_fn
self.channels = {}
self.expanded_channels = IndexedSet()
for channel in self._channels:
urls = Channel(channel).urls(True, subdirs)
expanded_channels = [Channel(url) for url in urls]
self.channels[channel] = [
SubdirData(expanded_channel, repodata_fn=repodata_fn)
for expanded_channel in expanded_channels
]
self.expanded_channels.update(expanded_channels)
# LAST_CHANNEL_URLS is still used in conda-build and must be maintained for the moment.
LAST_CHANNEL_URLS.clear()
LAST_CHANNEL_URLS.extend(self.expanded_channels)
if prefix is None:
self.prefix_data = None
elif isinstance(prefix, PrefixData):
self.prefix_data = prefix
else:
self.prefix_data = PrefixData(prefix)
self.use_cache = True if use_cache is None and context.offline else use_cache
self.use_system = use_system
@property
def cache_entries(self) -> tuple[PackageCacheRecord, ...]:
"""Contents of the package cache if active.
Returns:
All packages available from the package cache.
"""
try:
return self._cache_entries
except AttributeError:
self.reload(cache=True)
return self._cache_entries
@property
def system_packages(self) -> dict[PackageRecord, PackageRecord]:
"""System packages provided by plugins.
Returns:
Identity mapping of the available system packages in a ``dict``.
"""
try:
return self._system_packages
except AttributeError:
self.reload(system=True)
return self._system_packages
@property
def features(self) -> dict[PackageRecord, PackageRecord]:
"""Active tracking features.
Returns:
Identity mapping of the local tracking features in a ``dict``.
"""
try:
return self._features
except AttributeError:
self.reload(features=True)
return self._features
def reload(
self,
*,
prefix: bool = False,
cache: bool = False,
features: bool = False,
system: bool = False,
) -> None:
"""Reload one or more of the index components.
Can be used to refresh the index with new information, for example after a new
package has been installed into the index.
Args:
prefix: if ``True``, reload the prefix data.
cache: if ``True``, reload the package cache.
features: if ``True``, reload the tracking features.
system: if ``True``, reload the system packages.
"""
has_data = hasattr(self, "_data")
if prefix:
if self.prefix_data:
self.prefix_data.reload()
if has_data:
self._supplement_index_dict_with_prefix()
if cache:
self._cache_entries = PackageCacheData.get_all_extracted_entries()
if has_data:
self._supplement_index_dict_with_cache()
if features:
self._features = {
(rec := PackageRecord.feature(track_feature)): rec
for track_feature in context.track_features
}
if has_data:
self._data.update(self.features)
if system:
self._system_packages = {
package: package
for package in context.plugin_manager.get_virtual_package_records()
}
if has_data:
self._data.update(self.system_packages)
def __repr__(self) -> str:
channels = ", ".join(self.channels.keys())
return f"<{self.__class__.__name__}(channels=[{channels}])>"
def get_reduced_index(self, specs: Iterable[MatchSpec]) -> ReducedIndex:
"""Create a reduced index with a subset of packages.
Can be used to create a reduced index as a subset from an existing index.
Args:
specs: the specs that span the subset.
Returns:
a reduced index with the same sources as this index, but limited to ``specs``
and their dependency graph.
"""
return ReducedIndex(
specs=specs,
channels=self._channels,
prepend=False,
subdirs=self._subdirs,
use_local=False,
use_cache=self.use_cache,
prefix=self.prefix_data,
repodata_fn=self._repodata_fn,
use_system=self.use_system,
)
@property
def data(self) -> dict[PackageRecord, PackageRecord]:
"""The entire index as a dict; avoid if possible.
Warning:
This returns the entire contents of the index as a single identity mapping in
a ``dict``. This may be convenient, but it comes at a cost because all sources
must be fully loaded at significant overhead for :class:`~conda.models.records.PackageRecord`
construction for **every** package.
Hence, all uses of :attr:`data`, including all iteration over the entire index,
is strongly discouraged.
"""
try:
return self._data
except AttributeError:
self._realize()
return self._data
@data.setter
def data(self, value: dict[PackageRecord, PackageRecord]) -> None:
self._data = value
def _supplement_index_dict_with_prefix(self) -> None:
"""
Supplement the index with information from its prefix.
"""
if self.prefix_data is None:
return
# supplement index with information from prefix/conda-meta
for prefix_record in self.prefix_data.iter_records():
if prefix_record in self._data:
current_record = self._data[prefix_record]
if current_record.channel == prefix_record.channel:
# The downloaded repodata takes priority, so we do not overwrite.
# We do, however, copy the link information so that the solver (i.e. resolve)
# knows this package is installed.
link = prefix_record.get("link") or EMPTY_LINK
self._data[prefix_record] = PrefixRecord.from_objects(
current_record, prefix_record, link=link
)
else:
# If the local packages channel information does not agree with
# the channel information in the index then they are most
# likely referring to different packages. This can occur if a
# multi-channel changes configuration, e.g. defaults with and
# without the free channel. In this case we need to fake the
# channel data for the existing package.
prefix_channel = prefix_record.channel
prefix_channel._Channel__canonical_name = prefix_channel.url()
del prefix_record._PackageRecord__pkey
self._data[prefix_record] = prefix_record
else:
# If the package is not in the repodata, use the local data.
# If the channel is known but the package is not in the index, it
# is because 1) the channel is unavailable offline, or 2) it no
# longer contains this package. Either way, we should prefer any
# other version of the package to this one. On the other hand, if
# it is in a channel we don't know about, assign it a value just
# above the priority of all known channels.
self._data[prefix_record] = prefix_record
def _supplement_index_dict_with_cache(self) -> None:
# supplement index with packages from the cache
for pcrec in self.cache_entries:
if pcrec in self._data:
# The downloaded repodata takes priority
current_record = self._data[pcrec]
self._data[pcrec] = PackageCacheRecord.from_objects(
current_record, pcrec
)
else:
self._data[pcrec] = pcrec
def _realize(self) -> None:
self._data = {}
for subdir_datas in self.channels.values():
for subdir_data in subdir_datas:
self._data.update((prec, prec) for prec in subdir_data.iter_records())
self._supplement_index_dict_with_prefix()
if self.use_cache:
self._supplement_index_dict_with_cache()
self._data.update(self.features)
if self.use_system:
self._data.update(self.system_packages)
def _retrieve_from_channels(self, key: PackageRecord) -> PackageRecord | None:
for subdir_datas in reversed(self.channels.values()):
for subdir_data in subdir_datas:
if key.subdir != subdir_data.channel.subdir:
continue
prec_candidates = list(subdir_data.query(key))
if not prec_candidates:
continue
if len(prec_candidates) > 1:
raise CondaKeyError(
key, "More than one matching package found in channels."
)
prec = prec_candidates[0]
if prec:
return prec
return None
def _retrieve_all_from_channels(self, key: PackageRecord) -> list[PackageRecord]:
precs = []
for subdir_datas in reversed(self.channels.values()):
for subdir_data in subdir_datas:
if hasattr(key, "subdir") and key.subdir != subdir_data.channel.subdir:
continue
precs.extend(subdir_data.query(key))
return precs
def _update_from_prefix(
self, key: PackageRecord, prec: PackageRecord | None
) -> PackageRecord | None:
prefix_prec = self.prefix_data.get(key.name, None) if self.prefix_data else None
if prefix_prec and prefix_prec == prec:
if prec:
if prec.channel == prefix_prec.channel:
link = prefix_prec.get("link") or EMPTY_LINK
prec = PrefixRecord.from_objects(prec, prefix_prec, link=link)
else:
prefix_channel = prefix_prec.channel
prefix_channel._Channel__canonical_name = prefix_channel.url()
del prefix_prec._PackageRecord__pkey
prec = prefix_prec
else:
prec = prefix_prec
return prec
def _update_from_cache(
self, key: PackageRecord, prec: PackageRecord | None
) -> PackageRecord | None:
for pcrec in self.cache_entries:
if pcrec == key:
if prec:
# The downloaded repodata takes priority
return PackageCacheRecord.from_objects(prec, pcrec)
else:
return pcrec
return prec
def __getitem__(self, key: PackageRecord) -> PackageRecord:
if not isinstance(key, PackageRecord):
raise TypeError(
"Can only retrieve PackageRecord objects. Got {}.", type(key)
)
try:
return self._data[key]
except AttributeError:
pass
if key.name.startswith("__"):
try:
return self.system_packages[key]
except KeyError:
pass
if key.name.endswith("@"):
try:
return self.features[key]
except KeyError:
pass
prec = self._retrieve_from_channels(key)
prec = self._update_from_prefix(key, prec)
if self.use_cache:
prec = self._update_from_cache(key, prec)
if prec is None:
raise KeyError((key,))
return prec
def __contains__(self, key: PackageRecord) -> bool:
try:
_ = self[key]
return True
except (PackagesNotFoundError, KeyError):
return False
def __copy__(self) -> Self:
inst = self.__class__.__new__(self.__class__)
inst.__dict__.update(self.__dict__)
if "_data" in self.__dict__:
inst.__dict__["_data"] = self.__dict__["_data"].copy()
return inst
| Index |
python | huggingface__transformers | src/transformers/models/flaubert/modeling_flaubert.py | {
"start": 58339,
"end": 63235
} | class ____(FlaubertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.transformer = FlaubertModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
langs: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
lengths: Optional[torch.Tensor] = None,
cache: Optional[dict[str, torch.Tensor]] = None,
inputs_embeds: Optional[torch.Tensor] = None,
start_positions: Optional[torch.Tensor] = None,
end_positions: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, QuestionAnsweringModelOutput]:
r"""
langs (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are
languages ids which can be obtained from the language names by using two conversion mappings provided in
the configuration of the model (only provided for multilingual models). More precisely, the *language name
to language id* mapping is in `model.config.lang2id` (which is a dictionary string to int) and the
*language id to language name* mapping is in `model.config.id2lang` (dictionary int to string).
See usage examples detailed in the [multilingual documentation](../multilingual).
lengths (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Length of each sentence that can be used to avoid performing attention on padding token indices. You can
also use *attention_mask* for the same result (see above), kept here for compatibility. Indices selected in
`[0, ..., input_ids.size(-1)]`.
cache (`dict[str, torch.FloatTensor]`, *optional*):
Instance of `EncoderDecoderCache` that contains precomputed KV states. Can be used to speed up sequential
decoding.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
langs=langs,
token_type_ids=token_type_ids,
position_ids=position_ids,
lengths=lengths,
cache=cache,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = transformer_outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + transformer_outputs[1:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
@dataclass
@auto_docstring(
custom_intro="""
Base class for outputs of question answering models using a `SquadHead`.
"""
)
# Copied from transformer.models.xlm.modeling_xlm.XLMForQuestionAnsweringOutput with XLM->Flaubert
| FlaubertForQuestionAnsweringSimple |
python | keon__algorithms | tests/test_maths.py | {
"start": 12976,
"end": 13448
} | class ____(unittest.TestCase):
"""[summary]
Test for the file find_order_simple.py
Arguments:
unittest {[type]} -- [description]
"""
def test_magic_number(self):
self.assertTrue(magic_number(50113))
self.assertTrue(magic_number(1234))
self.assertTrue(magic_number(100))
self.assertTrue(magic_number(199))
self.assertFalse(magic_number(2000))
self.assertFalse(magic_number(500000))
| TestMagicNumber |
python | spyder-ide__spyder | spyder/plugins/externalterminal/widgets/run_conf.py | {
"start": 8324,
"end": 9557
} | class ____(type(GenericExternalTerminalShConfiguration)):
def __new__(cls, clsname, bases, attrs):
interp = attrs.pop('default_shell_meta')
interp_opts = attrs.pop('shell_args_meta')
interp_opts_enabled = interp_opts != ''
def get_default_conf() -> RunExecutorConfigurationGroupFactory:
return {
'interpreter': interp,
'interpreter_opts_enabled': interp_opts_enabled,
'interpreter_opts': interp_opts,
'script_opts_enabled': False,
'script_opts': '',
'close_after_exec': False,
}
return super(MetaShConfiguration, cls).__new__(cls, clsname, bases, {
**attrs,
'get_default_configuration': staticmethod(get_default_conf)
})
def ExternalTerminalShConfiguration(
default_shell: str,
shell_args: str = ''
) -> RunExecutorConfigurationGroup:
class WrappedExternalTerminalShConfiguration(
GenericExternalTerminalShConfiguration,
metaclass=MetaShConfiguration
):
default_shell_meta = default_shell
shell_args_meta = shell_args
return WrappedExternalTerminalShConfiguration
| MetaShConfiguration |
python | run-llama__llama_index | llama-index-integrations/memory/llama-index-memory-bedrock-agentcore/tests/test_agentcore_memory.py | {
"start": 18134,
"end": 19620
} | class ____:
"""Test error handling scenarios."""
def test_boto3_import_error(self, memory_context):
"""Test handling of boto3 import error."""
with patch("boto3.Session", side_effect=ImportError("boto3 not found")):
with pytest.raises(ImportError, match="boto3 package not found"):
AgentCoreMemory(context=memory_context)
def test_client_initialization_error(self, memory_context):
"""Test handling of client initialization errors."""
with patch("boto3.Session") as mock_session:
mock_session.side_effect = Exception("AWS credentials not found")
with pytest.raises(Exception, match="AWS credentials not found"):
AgentCoreMemory(context=memory_context)
# Integration test with existing tests
@pytest.mark.asyncio
async def test_aput(memory):
"""Test adding a message."""
message = ChatMessage(role="user", content="New message")
await memory.aput(message)
# Verify that create_event was called
assert memory._client.create_event.called
@pytest.mark.asyncio
async def test_aput_messages(memory):
"""Test adding multiple messages."""
messages = [
ChatMessage(role="user", content="Message 1"),
ChatMessage(role="assistant", content="Response 1"),
]
await memory.aput_messages(messages)
# Verify that create_event was called for each message
assert memory._client.create_event.call_count == 1
| TestErrorHandling |
python | weaviate__weaviate-python-client | integration/test_iterator.py | {
"start": 954,
"end": 7965
} | class ____(TypedDict):
data: int
@pytest.mark.parametrize(
"include_vector",
[False, True],
)
@pytest.mark.parametrize("return_metadata", [None, MetadataQuery.full()])
@pytest.mark.parametrize(
"return_properties",
[None, Data, ["data"]],
)
@pytest.mark.parametrize("cache_size", [None, 100, 10000])
def test_iterator_arguments(
collection_factory: CollectionFactory,
include_vector: bool,
return_metadata: Optional[METADATA],
return_properties: Optional[PROPERTIES],
cache_size: Optional[int],
) -> None:
collection = collection_factory(
properties=[
Property(name="data", data_type=DataType.INT),
Property(name="text", data_type=DataType.TEXT),
],
vectorizer_config=Configure.Vectorizer.text2vec_contextionary(
vectorize_collection_name=False
),
)
collection.data.insert_many(
[DataObject(properties={"data": i, "text": "hi"}) for i in range(10)]
)
iter_ = collection.iterator(
include_vector,
return_metadata=return_metadata,
return_properties=return_properties,
cache_size=cache_size,
)
# Expect everything back
if include_vector and return_properties is None and return_metadata == MetadataQuery.full():
all_data: list[int] = sorted([cast(int, obj.properties["data"]) for obj in iter_])
assert all_data == list(range(10))
assert all("text" in obj.properties for obj in iter_)
assert all("default" in obj.vector for obj in iter_)
assert all(obj.metadata.creation_time is not None for obj in iter_)
assert all(obj.metadata.score is not None for obj in iter_)
# Expect everything back except vector
elif (
not include_vector and return_properties is None and return_metadata == MetadataQuery.full()
):
all_data = sorted([cast(int, obj.properties["data"]) for obj in iter_])
assert all_data == list(range(10))
assert all("text" in obj.properties for obj in iter_)
assert all("default" not in obj.vector for obj in iter_)
assert all(obj.metadata.creation_time is not None for obj in iter_)
assert all(obj.metadata.score is not None for obj in iter_)
# Expect specified properties and vector
elif include_vector and return_properties is not None:
all_data = sorted([cast(int, obj.properties["data"]) for obj in iter_])
assert all_data == list(range(10))
assert all("text" not in obj.properties for obj in iter_)
assert all("default" in obj.vector for obj in iter_)
if return_metadata is not None:
assert all(obj.metadata.creation_time is not None for obj in iter_)
assert all(obj.metadata.score is not None for obj in iter_)
else:
assert all(obj.metadata._is_empty() for obj in iter_)
# Expect specified properties and no vector
elif not include_vector and return_properties is not None:
all_data = sorted([cast(int, obj.properties["data"]) for obj in iter_])
assert all_data == list(range(10))
assert all("text" not in obj.properties for obj in iter_)
assert all("default" not in obj.vector for obj in iter_)
if return_metadata is not None:
assert all(obj.metadata.creation_time is not None for obj in iter_)
assert all(obj.metadata.score is not None for obj in iter_)
else:
assert all(obj.metadata._is_empty() for obj in iter_)
def test_iterator_dict_hint(collection_factory: CollectionFactory, request: SubRequest) -> None:
collection = collection_factory(
properties=[Property(name="data", data_type=DataType.INT)],
vectorizer_config=Configure.Vectorizer.none(),
)
collection.data.insert_many([DataObject(properties={"data": i}) for i in range(10)])
with pytest.raises(WeaviateInvalidInputError) as e:
for _ in collection.iterator(return_properties=dict):
pass
assert (
"return_properties must only be a TypedDict or PROPERTIES within this context but is "
in e.value.args[0]
)
def test_iterator_with_default_generic(
collection_factory: CollectionFactory, request: SubRequest
) -> None:
class This(TypedDict):
this: str
class That(TypedDict):
this: str
that: str
collection = collection_factory(
properties=[
Property(name="this", data_type=DataType.TEXT),
Property(name="that", data_type=DataType.TEXT),
],
vectorizer_config=Configure.Vectorizer.none(),
data_model_properties=That,
)
collection.data.insert_many(
[DataObject(properties=That(this="this", that="that")) for _ in range(10)]
)
iter_ = collection.iterator()
for this in iter_:
assert this.properties["this"] == "this"
assert this.properties["that"] == "that"
iter__ = collection.iterator(return_properties=This)
for that in iter__:
assert that.properties["this"] == "this"
assert "that" not in that.properties
@pytest.mark.parametrize(
"count",
[
0,
1,
2,
ITERATOR_CACHE_SIZE - 1,
ITERATOR_CACHE_SIZE,
ITERATOR_CACHE_SIZE + 1,
2 * ITERATOR_CACHE_SIZE - 1,
2 * ITERATOR_CACHE_SIZE,
2 * ITERATOR_CACHE_SIZE + 1,
20 * ITERATOR_CACHE_SIZE,
],
)
def test_iterator(collection_factory: CollectionFactory, count: int) -> None:
collection = collection_factory(
properties=[Property(name="data", data_type=DataType.INT)],
vectorizer_config=Configure.Vectorizer.none(),
data_model_properties=Dict[str, int],
)
if count > 0:
collection.data.insert_many([DataObject(properties={"data": i}) for i in range(count)])
expected = list(range(count))
first_order = None
# make sure a new iterator resets the internal state and that the return order is the same for every run
for _ in range(3):
# get the property and sort them - order returned by weaviate is not identical to the order inserted
ret: list[int] = [int(obj.properties["data"]) for obj in collection.iterator()]
if first_order is None:
first_order = ret
else:
assert first_order == ret
assert sorted(ret) == expected
def test_iterator_with_after(collection_factory: CollectionFactory) -> None:
collection = collection_factory(
properties=[Property(name="data", data_type=DataType.INT)],
vectorizer_config=Configure.Vectorizer.none(),
data_model_properties=Dict[str, int],
)
collection.data.insert_many([DataObject(properties={"data": i}) for i in range(10)])
uuids = [obj.uuid for obj in collection.iterator()]
iterator = collection.iterator(after=uuids[5])
assert (
next(iterator).properties["data"]
== collection.query.fetch_object_by_id(uuids[6]).properties["data"]
)
| Data |
python | Netflix__metaflow | metaflow/_vendor/importlib_metadata/__init__.py | {
"start": 2827,
"end": 3425
} | class ____:
"""
Provide subscript item access for backward compatibility.
>>> recwarn = getfixture('recwarn')
>>> ep = EntryPoint(name='name', value='value', group='group')
>>> ep[:]
('name', 'value', 'group')
>>> ep[0]
'name'
>>> len(recwarn)
1
"""
_warn = functools.partial(
warnings.warn,
"EntryPoint tuple interface is deprecated. Access members by name.",
DeprecationWarning,
stacklevel=pypy_partial(2),
)
def __getitem__(self, item):
self._warn()
return self._key()[item]
| DeprecatedTuple |
python | bokeh__bokeh | src/bokeh/models/widgets/pickers.py | {
"start": 8152,
"end": 8484
} | class ____(BaseDatePicker):
""" Calendar-based date picker widget.
"""
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
value = Nullable(Date, default=None, help="""
The initial or picked date.
""")
| DatePicker |
python | scipy__scipy | scipy/sparse/tests/test_base.py | {
"start": 157111,
"end": 161238
} | class ____:
def test_getnnz_axis(self):
dat = array([[0, 2],
[3, 5],
[-6, 9]])
bool_dat = dat.astype(bool)
datsp = self.spcreator(dat)
accepted_return_dtypes = (np.int32, np.int64)
getnnz = datsp.count_nonzero if self.is_array_test else datsp.getnnz
assert_array_equal(bool_dat.sum(axis=None), getnnz(axis=None))
assert_array_equal(bool_dat.sum(), getnnz())
assert_array_equal(bool_dat.sum(axis=0), getnnz(axis=0))
assert_in(getnnz(axis=0).dtype, accepted_return_dtypes)
assert_array_equal(bool_dat.sum(axis=1), getnnz(axis=1))
assert_in(getnnz(axis=1).dtype, accepted_return_dtypes)
assert_array_equal(bool_dat.sum(axis=-2), getnnz(axis=-2))
assert_in(getnnz(axis=-2).dtype, accepted_return_dtypes)
assert_array_equal(bool_dat.sum(axis=-1), getnnz(axis=-1))
assert_in(getnnz(axis=-1).dtype, accepted_return_dtypes)
assert_raises(ValueError, getnnz, axis=2)
#------------------------------------------------------------------------------
# Tailored base class for generic tests
#------------------------------------------------------------------------------
def _possibly_unimplemented(cls, require=True):
"""
Construct a class that either runs tests as usual (require=True),
or each method skips if it encounters a common error.
"""
if require:
return cls
else:
def wrap(fc):
@functools.wraps(fc)
def wrapper(*a, **kw):
try:
return fc(*a, **kw)
except (NotImplementedError, TypeError, ValueError,
IndexError, AttributeError):
raise pytest.skip("feature not implemented")
return wrapper
new_dict = dict(cls.__dict__)
for name, func in cls.__dict__.items():
if name.startswith('test_'):
new_dict[name] = wrap(func)
return type(cls.__name__ + "NotImplemented",
cls.__bases__,
new_dict)
def sparse_test_class(getset=True, slicing=True, slicing_assign=True,
fancy_indexing=True, fancy_assign=True,
fancy_multidim_indexing=True, fancy_multidim_assign=True,
minmax=True, nnz_axis=True):
"""
Construct a base class, optionally converting some of the tests in
the suite to check that the feature is not implemented.
"""
bases = (_TestCommon,
_possibly_unimplemented(_TestGetSet, getset),
_TestSolve,
_TestInplaceArithmetic,
_TestArithmetic,
_possibly_unimplemented(_TestSlicing, slicing),
_possibly_unimplemented(_TestSlicingAssign, slicing_assign),
_possibly_unimplemented(_TestFancyIndexing, fancy_indexing),
_possibly_unimplemented(_TestFancyIndexingAssign,
fancy_assign),
_possibly_unimplemented(_TestFancyMultidim,
fancy_indexing and fancy_multidim_indexing),
_possibly_unimplemented(_TestFancyMultidimAssign,
fancy_multidim_assign and fancy_assign),
_possibly_unimplemented(_TestMinMax, minmax),
_possibly_unimplemented(_TestGetNnzAxis, nnz_axis))
# check that test names do not clash
names = {}
for cls in bases:
for name in cls.__dict__:
if not name.startswith('test_'):
continue
old_cls = names.get(name)
if old_cls is not None:
raise ValueError(f"Test class {cls.__name__} overloads test "
f"{name} defined in {old_cls.__name__}")
names[name] = cls
return type("TestBase", bases, {})
#------------------------------------------------------------------------------
# Matrix class based tests
#------------------------------------------------------------------------------
| _TestGetNnzAxis |
python | cython__cython | Cython/Compiler/PyrexTypes.py | {
"start": 145423,
"end": 153221
} | class ____(CType):
# name string
# cname string
# kind string "struct" or "union"
# scope StructOrUnionScope, or None if incomplete
# typedef_flag boolean
# packed boolean
# entry Entry
is_struct_or_union = 1
has_attributes = 1
exception_check = True
_needs_cpp_construction = False
def __init__(self, name, kind, scope, typedef_flag, cname, packed=False, in_cpp=False):
self.name = name
self.cname = cname
self.kind = kind
self.scope = scope
self.typedef_flag = typedef_flag
self.is_struct = kind == 'struct'
self.to_py_function = "%s_to_py_%s" % (
Naming.convert_func_prefix, self.specialization_name())
self.from_py_function = "%s_from_py_%s" % (
Naming.convert_func_prefix, self.specialization_name())
self.exception_check = True
self._convert_to_py_code = None
self._convert_from_py_code = None
self.packed = packed
self._needs_cpp_construction = self.is_struct and in_cpp
def can_coerce_to_pyobject(self, env):
if self._convert_to_py_code is False:
return None # tri-state-ish
if env.outer_scope is None:
return False
if self._convert_to_py_code is None:
is_union = not self.is_struct
unsafe_union_types = set()
safe_union_types = set()
for member in self.scope.var_entries:
member_type = member.type
if not member_type.can_coerce_to_pyobject(env):
self.to_py_function = None
self._convert_to_py_code = False
return False
if is_union:
if member_type.is_ptr or member_type.is_cpp_class:
unsafe_union_types.add(member_type)
else:
safe_union_types.add(member_type)
if unsafe_union_types and (safe_union_types or len(unsafe_union_types) > 1):
# unsafe mix of safe and unsafe to convert types
self.from_py_function = None
self._convert_from_py_code = False
return False
return True
def create_to_py_utility_code(self, env):
if not self.can_coerce_to_pyobject(env):
return False
if self._convert_to_py_code is None:
for member in self.scope.var_entries:
member.type.create_to_py_utility_code(env)
forward_decl = self.entry.visibility != 'extern' and not self.typedef_flag
self._convert_to_py_code = ToPyStructUtilityCode(self, forward_decl, env)
env.use_utility_code(self._convert_to_py_code)
return True
def can_coerce_from_pyobject(self, env):
if env.outer_scope is None or self._convert_from_py_code is False:
return False
for member in self.scope.var_entries:
if not member.type.can_coerce_from_pyobject(env):
return False
return True
def create_from_py_utility_code(self, env):
if env.outer_scope is None:
return False
if self._convert_from_py_code is False:
return None # tri-state-ish
if self._convert_from_py_code is None:
if not self.scope.var_entries:
# There are obviously missing fields; don't allow instantiation
# where absolutely no content is provided.
return False
for member in self.scope.var_entries:
if not member.type.create_from_py_utility_code(env):
self.from_py_function = None
self._convert_from_py_code = False
return False
context = dict(
struct_type=self,
var_entries=self.scope.var_entries,
funcname=self.from_py_function,
)
env.use_utility_code(UtilityCode.load_cached("RaiseUnexpectedTypeError", "ObjectHandling.c"))
from .UtilityCode import CythonUtilityCode
self._convert_from_py_code = CythonUtilityCode.load(
"FromPyStructUtility" if self.is_struct else "FromPyUnionUtility",
"CConvert.pyx",
outer_module_scope=env.global_scope(), # need access to types declared in module
context=context)
env.use_utility_code(self._convert_from_py_code)
return True
def __repr__(self):
return "<CStructOrUnionType %s %s%s>" % (
self.name, self.cname,
("", " typedef")[self.typedef_flag])
def declaration_code(self, entity_code,
for_display=0, dll_linkage=None, pyrex=0):
if pyrex or for_display:
base_code = self.name
else:
if self.typedef_flag:
base_code = self.cname
else:
base_code = "%s %s" % (self.kind, self.cname)
base_code = public_decl(base_code, dll_linkage)
return self.base_declaration_code(base_code, entity_code)
def __eq__(self, other):
try:
return (isinstance(other, CStructOrUnionType) and
self.name == other.name)
except AttributeError:
return False
def __lt__(self, other):
try:
return self.name < other.name
except AttributeError:
# this is arbitrary, but it makes sure we always have
# *some* kind of order
return False
def __hash__(self):
return hash(self.cname) ^ hash(self.kind)
def is_complete(self):
return self.scope is not None
def attributes_known(self):
return self.is_complete()
def can_be_complex(self):
# Does the struct consist of exactly two identical floats?
fields = self.scope.var_entries
if len(fields) != 2: return False
a, b = fields
return (a.type.is_float and b.type.is_float and
a.type.empty_declaration_code() ==
b.type.empty_declaration_code())
def struct_nesting_depth(self):
child_depths = [x.type.struct_nesting_depth()
for x in self.scope.var_entries]
return max(child_depths) + 1
def cast_code(self, expr_code):
if self.is_struct:
return expr_code
return super().cast_code(expr_code)
def needs_explicit_construction(self, scope):
if self._needs_cpp_construction and scope.is_c_class_scope:
return True
return False
def needs_explicit_destruction(self, scope):
return self.needs_explicit_construction(scope) # same rules
def generate_explicit_construction(self, code, entry, extra_access_code=""):
# defer to CppClassType since its implementation will be the same
CppClassType.generate_explicit_construction(self, code, entry, extra_access_code=extra_access_code)
def generate_explicit_destruction(self, code, entry, extra_access_code=""):
# defer to CppClassType since its implementation will be the same
CppClassType.generate_explicit_destruction(self, code, entry, extra_access_code=extra_access_code)
cpp_string_conversions = ("std::string", "std::string_view")
cpp_unowned_views = ("std::string_view",)
builtin_cpp_conversions = {
# type element template params
"std::pair": 2,
"std::vector": 1,
"std::list": 1,
"std::set": 1,
"std::unordered_set": 1,
"std::map": 2,
"std::unordered_map": 2,
"std::complex": 1,
}
| CStructOrUnionType |
python | keras-team__keras | keras/src/ops/nn.py | {
"start": 34215,
"end": 37669
} | class ____(Operation):
def __init__(
self,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
*,
name=None,
):
super().__init__(name=name)
self.strides = strides
self.padding = padding.lower()
self.data_format = data_format
self.dilation_rate = dilation_rate
def call(self, inputs, kernel):
return backend.nn.conv(
inputs,
kernel,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate,
)
def compute_output_spec(self, inputs, kernel):
output_shape = operation_utils.compute_conv_output_shape(
inputs.shape,
kernel.shape[-1],
kernel.shape[:-2],
self.strides,
self.padding,
self.data_format,
self.dilation_rate,
)
return KerasTensor(output_shape, dtype=inputs.dtype)
@keras_export(["keras.ops.conv", "keras.ops.nn.conv"])
def conv(
inputs,
kernel,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
):
"""General N-D convolution.
This ops supports 1D, 2D and 3D convolution.
Args:
inputs: Tensor of rank N+2. `inputs` has shape
`(batch_size,) + inputs_spatial_shape + (num_channels,)` if
`data_format="channels_last"`, or
`(batch_size, num_channels) + inputs_spatial_shape` if
`data_format="channels_first"`.
kernel: Tensor of rank N+2. `kernel` has shape
`(kernel_spatial_shape, num_input_channels, num_output_channels)`.
`num_input_channels` should match the number of channels in
`inputs`.
strides: int or int tuple/list of `len(inputs_spatial_shape)`,
specifying the strides of the convolution along each spatial
dimension. If `strides` is int, then every spatial dimension shares
the same `strides`.
padding: string, either `"valid"` or `"same"`. `"valid"` means no
padding is applied, and `"same"` results in padding evenly to the
left/right or up/down of the input such that output has the
same height/width dimension as the input when `strides=1`.
data_format: A string, either `"channels_last"` or `"channels_first"`.
`data_format` determines the ordering of the dimensions in the
inputs. If `data_format="channels_last"`, `inputs` is of shape
`(batch_size, ..., channels)` while if
`data_format="channels_first"`, `inputs` is of shape
`(batch_size, channels, ...)`.
dilation_rate: int or int tuple/list of `len(inputs_spatial_shape)`,
specifying the dilation rate to use for dilated convolution. If
`dilation_rate` is int, then every spatial dimension shares
the same `dilation_rate`.
Returns:
A tensor of rank N+2, the result of the conv operation.
"""
data_format = standardize_data_format(data_format)
padding = padding.lower()
if any_symbolic_tensors((inputs,)):
return Conv(strides, padding, data_format, dilation_rate).symbolic_call(
inputs, kernel
)
return backend.nn.conv(
inputs, kernel, strides, padding, data_format, dilation_rate
)
| Conv |
python | coleifer__peewee | tests/base_models.py | {
"start": 697,
"end": 795
} | class ____(TestModel):
username = CharField()
class Meta:
table_name = 'users'
| User |
python | getsentry__sentry | src/sentry/notifications/notification_action/types.py | {
"start": 13739,
"end": 14664
} | class ____(BaseIssueAlertHandler):
@classmethod
def get_target_display(cls, action: Action, mapping: ActionFieldMapping) -> dict[str, Any]:
return {}
@classmethod
def get_target_identifier(
cls, action: Action, mapping: ActionFieldMapping, organization_id: int
) -> dict[str, Any]:
return {}
@classmethod
def get_additional_fields(cls, action: Action, mapping: ActionFieldMapping) -> dict[str, Any]:
# Use helper to separate fields
dynamic_form_fields = action.data.get(
TicketFieldMappingKeys.DYNAMIC_FORM_FIELDS_KEY.value, {}
)
additional_fields = action.data.get(TicketFieldMappingKeys.ADDITIONAL_FIELDS_KEY.value, {})
final_blob = {
TicketFieldMappingKeys.DYNAMIC_FORM_FIELDS_KEY.value: dynamic_form_fields,
**additional_fields,
}
return final_blob
| TicketingIssueAlertHandler |
python | encode__django-rest-framework | rest_framework/schemas/openapi.py | {
"start": 747,
"end": 3954
} | class ____(BaseSchemaGenerator):
def get_info(self):
# Title and version are required by openapi specification 3.x
info = {
'title': self.title or '',
'version': self.version or ''
}
if self.description is not None:
info['description'] = self.description
return info
def check_duplicate_operation_id(self, paths):
ids = {}
for route in paths:
for method in paths[route]:
if 'operationId' not in paths[route][method]:
continue
operation_id = paths[route][method]['operationId']
if operation_id in ids:
warnings.warn(
'You have a duplicated operationId in your OpenAPI schema: {operation_id}\n'
'\tRoute: {route1}, Method: {method1}\n'
'\tRoute: {route2}, Method: {method2}\n'
'\tAn operationId has to be unique across your schema. Your schema may not work in other tools.'
.format(
route1=ids[operation_id]['route'],
method1=ids[operation_id]['method'],
route2=route,
method2=method,
operation_id=operation_id
)
)
ids[operation_id] = {
'route': route,
'method': method
}
def get_schema(self, request=None, public=False):
"""
Generate a OpenAPI schema.
"""
self._initialise_endpoints()
components_schemas = {}
# Iterate endpoints generating per method path operations.
paths = {}
_, view_endpoints = self._get_paths_and_endpoints(None if public else request)
for path, method, view in view_endpoints:
if not self.has_view_permissions(path, method, view):
continue
operation = view.schema.get_operation(path, method)
components = view.schema.get_components(path, method)
for k in components.keys():
if k not in components_schemas:
continue
if components_schemas[k] == components[k]:
continue
warnings.warn(f'Schema component "{k}" has been overridden with a different value.')
components_schemas.update(components)
# Normalize path for any provided mount url.
if path.startswith('/'):
path = path[1:]
path = urljoin(self.url or '/', path)
paths.setdefault(path, {})
paths[path][method.lower()] = operation
self.check_duplicate_operation_id(paths)
# Compile final schema.
schema = {
'openapi': '3.0.2',
'info': self.get_info(),
'paths': paths,
}
if len(components_schemas) > 0:
schema['components'] = {
'schemas': components_schemas
}
return schema
# View Inspectors
| SchemaGenerator |
python | google__pytype | pytype/directors/parser.py | {
"start": 433,
"end": 584
} | class ____(Exception):
"""Exception thrown if we encounter "pytype: skip-file" in the source code."""
@dataclasses.dataclass(frozen=True)
| SkipFileError |
python | pytransitions__transitions | tests/test_states.py | {
"start": 519,
"end": 7206
} | class ____(TestCase):
def setUp(self):
super(TestTransitions, self).setUp()
self.machine_cls = Machine # type: Type[Machine]
def test_tags(self):
if TYPE_CHECKING:
@add_state_features(Tags)
class CustomMachine(Machine):
pass
else:
@add_state_features(Tags)
class CustomMachine(self.machine_cls):
pass
states = [{"name": "A", "tags": ["initial", "success", "error_state"]}]
m = CustomMachine(states=states, initial='A')
s = m.get_state(m.state)
self.assertTrue(s.is_initial)
self.assertTrue(s.is_success)
self.assertTrue(s.is_error_state)
self.assertFalse(s.is_not_available)
def test_error(self):
if TYPE_CHECKING:
@add_state_features(Error)
class CustomMachine(Machine):
pass
else:
@add_state_features(Error)
class CustomMachine(self.machine_cls):
pass
states = ['A', 'B', 'F',
{'name': 'S1', 'tags': ['accepted']},
{'name': 'S2', 'accepted': True}]
transitions = [['to_B', ['S1', 'S2'], 'B'], ['go', 'A', 'B'], ['fail', 'B', 'F'],
['success1', 'B', 'S2'], ['success2', 'B', 'S2']] # type: Sequence[TransitionConfig]
m = CustomMachine(states=states, transitions=transitions, auto_transitions=False, initial='A')
m.go()
m.success1()
self.assertTrue(m.get_state(m.state).is_accepted)
m.to_B()
m.success2()
self.assertTrue(m.get_state(m.state).is_accepted)
m.to_B()
with self.assertRaises(MachineError):
m.fail()
def test_error_callback(self):
if TYPE_CHECKING:
@add_state_features(Error)
class CustomMachine(Machine):
pass
else:
@add_state_features(Error)
class CustomMachine(self.machine_cls):
pass
mock_callback = MagicMock()
states = ['A', {"name": "B", "on_enter": mock_callback}, 'C']
transitions = [
["to_B", "A", "B"],
["to_C", "B", "C"],
]
m = CustomMachine(states=states, transitions=transitions, auto_transitions=False, initial='A')
m.to_B()
self.assertEqual(m.state, "B")
self.assertTrue(mock_callback.called)
def test_timeout(self):
mock = MagicMock()
if TYPE_CHECKING:
@add_state_features(Timeout)
class CustomMachine(Machine):
def timeout(self):
mock()
else:
@add_state_features(Timeout)
class CustomMachine(self.machine_cls):
def timeout(self):
mock()
states = ['A',
{'name': 'B', 'timeout': 0.3, 'on_timeout': 'timeout'},
{'name': 'C', 'timeout': 0.3, 'on_timeout': mock}]
m = CustomMachine(states=states)
m.to_B()
m.to_A()
sleep(0.4)
self.assertFalse(mock.called)
m.to_B()
sleep(0.4)
self.assertTrue(mock.called)
m.to_C()
sleep(0.4)
self.assertEqual(mock.call_count, 2)
with self.assertRaises(AttributeError):
m.add_state({'name': 'D', 'timeout': 0.3})
def test_timeout_callbacks(self):
timeout = MagicMock()
notification = MagicMock()
counter = MagicMock()
if TYPE_CHECKING:
@add_state_features(Timeout)
class CustomMachine(Machine):
pass
else:
@add_state_features(Timeout)
class CustomMachine(self.machine_cls):
pass
class Model(object):
def on_timeout_B(self):
counter()
def timeout(self):
timeout()
def notification(self):
notification()
def another_notification(self):
notification()
states = ['A', {'name': 'B', 'timeout': 0.05, 'on_timeout': 'timeout'}]
model = Model()
machine = CustomMachine(model=model, states=states, initial='A')
model.to_B()
sleep(0.1)
self.assertTrue(timeout.called)
self.assertTrue(counter.called)
machine.get_state('B').add_callback('timeout', 'notification')
machine.on_timeout_B('another_notification')
model.to_B()
sleep(0.1)
self.assertEqual(timeout.call_count, 2)
self.assertEqual(counter.call_count, 2)
self.assertTrue(notification.called)
machine.get_state('B').on_timeout = []
model.to_B()
sleep(0.1)
self.assertEqual(timeout.call_count, 2)
self.assertEqual(notification.call_count, 2)
def test_timeout_transitioning(self):
timeout_mock = MagicMock()
if TYPE_CHECKING:
@add_state_features(Timeout)
class CustomMachine(Machine):
pass
else:
@add_state_features(Timeout)
class CustomMachine(self.machine_cls):
pass
states = ['A', {'name': 'B', 'timeout': 0.05, 'on_timeout': ['to_A', timeout_mock]}]
machine = CustomMachine(states=states, initial='A')
machine.to_B()
sleep(0.1)
self.assertTrue(machine.is_A())
self.assertTrue(timeout_mock.called)
def test_volatile(self):
class TemporalState(object):
def __init__(self):
self.value = 5
def increase(self):
self.value += 1
if TYPE_CHECKING:
@add_state_features(Volatile)
class CustomMachine(Machine):
pass
else:
@add_state_features(Volatile)
class CustomMachine(self.machine_cls):
pass
states = ['A', {'name': 'B', 'volatile': TemporalState}]
m = CustomMachine(states=states, initial='A')
m.to_B()
self.assertEqual(m.scope.value, 5)
# should call method of TemporalState
m.scope.increase()
self.assertEqual(m.scope.value, 6)
# re-entering state should reset default volatile object
m.to_A()
self.assertFalse(hasattr(m.scope, 'value'))
m.scope.foo = 'bar'
m.to_B()
# custom attribute of A should be gone
self.assertFalse(hasattr(m.scope, 'foo'))
# value should be reset
self.assertEqual(m.scope.value, 5)
@skipIf(pgv is None, 'Graph diagram requires pygraphviz')
| TestTransitions |
python | sympy__sympy | sympy/functions/combinatorial/numbers.py | {
"start": 68264,
"end": 69346
} | class ____(DefinedFunction):
r"""
Calculate the number of distinct prime factors for a positive integer n.
If n's prime factorization is:
.. math ::
n = \prod_{i=1}^k p_i^{m_i},
then ``primenu(n)`` or `\nu(n)` is:
.. math ::
\nu(n) = k.
Examples
========
>>> from sympy.functions.combinatorial.numbers import primenu
>>> primenu(1)
0
>>> primenu(30)
3
See Also
========
sympy.ntheory.factor_.factorint
References
==========
.. [1] https://mathworld.wolfram.com/PrimeFactor.html
.. [2] https://oeis.org/A001221
"""
is_integer = True
is_nonnegative = True
@classmethod
def eval(cls, n):
if n.is_integer is False:
raise TypeError("n should be an integer")
if n.is_positive is False:
raise ValueError("n should be a positive integer")
if n.is_prime is True:
return S.One
if n is S.One:
return S.Zero
if n.is_Integer is True:
return S(len(factorint(n)))
| primenu |
python | joke2k__faker | tests/providers/test_address.py | {
"start": 3891,
"end": 6799
} | class ____:
"""Test address provider methods"""
def test_alpha_2_country_codes(self, faker, num_samples):
for _ in range(num_samples):
country_code = faker.country_code(representation="alpha-2")
assert len(country_code) == 2
assert country_code.isalpha()
def test_alpha_2_country_codes_as_default(self, faker, num_samples):
for _ in range(num_samples):
country_code = faker.country_code()
assert len(country_code) == 2
assert country_code.isalpha()
def test_alpha_3_country_codes(self, faker, num_samples):
for _ in range(num_samples):
country_code = faker.country_code(representation="alpha-3")
assert len(country_code) == 3
assert country_code.isalpha()
def test_bad_country_code_representation(self, faker, num_samples):
for _ in range(num_samples):
with pytest.raises(ValueError):
faker.country_code(representation="hello")
def _collect_fakers_for_locales(self):
cached_locales = []
language_locale_codes = providers.BaseProvider.language_locale_codes
for code, countries in language_locale_codes.items():
for country in countries:
name = f"{code}_{country}"
try:
faker = Faker(name)
cached_locales.append(faker)
except AttributeError as e:
print(f"Cannot generate faker for {name}: {e}. Skipped")
return cached_locales
def _fakers_for_locales(self):
if not hasattr(self.__class__, "cached_locales"):
self.__class__.cached_locales = self._collect_fakers_for_locales()
return self.cached_locales
def test_administrative_unit_all_locales(self):
for faker in self._fakers_for_locales():
if faker.current_country_code() not in ["IL", "GE", "TW", "UA", "NZ"]:
try:
assert isinstance(faker.administrative_unit(), str)
except Exception as e:
raise e.__class__(faker.current_country_code(), *e.args)
def test_country_code_all_locales(self):
for faker in self._fakers_for_locales():
assert isinstance(faker.current_country(), str)
def test_current_country_errors(self):
dt = providers.date_time
countries_duplicated = [*dt.Provider.countries, *dt.Provider.countries]
with mock.patch.object(dt.Provider, "countries", countries_duplicated), pytest.raises(ValueError) as e:
Faker("en_US").current_country()
assert "Ambiguous" in str(e)
country_code = "faker.providers.address.Provider.current_country_code"
with pytest.raises(ValueError), mock.patch(country_code, lambda self: "en_ZZ"):
Faker("en_US").current_country()
| TestBaseProvider |
python | ray-project__ray | doc/source/serve/doc_code/production_guide/text_ml.py | {
"start": 234,
"end": 1075
} | class ____:
def __init__(self):
self.language = "french"
self.model = pipeline("translation_en_to_fr", model="t5-small")
def translate(self, text: str) -> str:
model_output = self.model(text)
translation = model_output[0]["translation_text"]
return translation
def reconfigure(self, config: Dict):
self.language = config.get("language", "french")
if self.language.lower() == "french":
self.model = pipeline("translation_en_to_fr", model="t5-small")
elif self.language.lower() == "german":
self.model = pipeline("translation_en_to_de", model="t5-small")
elif self.language.lower() == "romanian":
self.model = pipeline("translation_en_to_ro", model="t5-small")
else:
pass
@serve.deployment
| Translator |
python | google__jax | jax/_src/pallas/mosaic/sc_lowering.py | {
"start": 2012,
"end": 3050
} | class ____:
"""Hands out global allocations sequentially during lowering."""
def __init__(self, allocations: dict[pallas_core.MemoryRef, list[Any]]):
self._allocations = {k: list(v) for k, v in allocations.items()}
def next_allocation(self, what: state.AbstractRef | pallas_core.TransformedRef) -> Any:
"""Returns the next available allocation for the given shape."""
what = pallas_core.MemoryRef(what.inner_aval, what.memory_space)
if what not in self._allocations:
raise LookupError(f"No allocations are available for {what}.")
if not self._allocations[what]:
raise LookupError(f"No more allocations available for {what}.")
return self._allocations[what].pop()
@contextlib.contextmanager
def verify_usage(self):
"""Scope that verifies all allocations are used."""
try:
yield
finally:
unused = [k for k, v in self._allocations.items() if v]
if unused:
raise AssertionError(f"Some allocations unused ({unused}).")
@dataclasses.dataclass
| GlobalAllocations |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1575482,
"end": 1575682
} | class ____(VegaLiteSchema):
"""Vector7string schema wrapper."""
_schema = {"$ref": "#/definitions/Vector7<string>"}
def __init__(self, *args):
super().__init__(*args)
| Vector7string |
python | huggingface__transformers | src/transformers/models/table_transformer/modeling_table_transformer.py | {
"start": 6914,
"end": 9456
} | class ____(nn.Module):
"""
BatchNorm2d where the batch statistics and the affine parameters are fixed.
Copy-paste from torchvision.misc.ops with added eps before rqsrt, without which any other models than
torchvision.models.resnet[18,34,50,101] produce nans.
"""
def __init__(self, n):
super().__init__()
self.register_buffer("weight", torch.ones(n))
self.register_buffer("bias", torch.zeros(n))
self.register_buffer("running_mean", torch.zeros(n))
self.register_buffer("running_var", torch.ones(n))
def _load_from_state_dict(
self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
):
num_batches_tracked_key = prefix + "num_batches_tracked"
if num_batches_tracked_key in state_dict:
del state_dict[num_batches_tracked_key]
super()._load_from_state_dict(
state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
)
def forward(self, x):
# move reshapes to the beginning
# to make it user-friendly
weight = self.weight.reshape(1, -1, 1, 1)
bias = self.bias.reshape(1, -1, 1, 1)
running_var = self.running_var.reshape(1, -1, 1, 1)
running_mean = self.running_mean.reshape(1, -1, 1, 1)
epsilon = 1e-5
scale = weight * (running_var + epsilon).rsqrt()
bias = bias - running_mean * scale
return x * scale + bias
# Copied from transformers.models.detr.modeling_detr.replace_batch_norm with Detr->TableTransformer
def replace_batch_norm(model):
r"""
Recursively replace all `torch.nn.BatchNorm2d` with `TableTransformerFrozenBatchNorm2d`.
Args:
model (torch.nn.Module):
input model
"""
for name, module in model.named_children():
if isinstance(module, nn.BatchNorm2d):
new_module = TableTransformerFrozenBatchNorm2d(module.num_features)
if module.weight.device != torch.device("meta"):
new_module.weight.copy_(module.weight)
new_module.bias.copy_(module.bias)
new_module.running_mean.copy_(module.running_mean)
new_module.running_var.copy_(module.running_var)
model._modules[name] = new_module
if len(list(module.children())) > 0:
replace_batch_norm(module)
# Copied from transformers.models.detr.modeling_detr.DetrConvEncoder with Detr->TableTransformer
| TableTransformerFrozenBatchNorm2d |
python | ansible__ansible | lib/ansible/module_utils/connection.py | {
"start": 3306,
"end": 3516
} | class ____(Exception):
def __init__(self, message, *args, **kwargs):
super(ConnectionError, self).__init__(message)
for k, v in kwargs.items():
setattr(self, k, v)
| ConnectionError |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.