language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pytorch__pytorch | torch/_numpy/testing/utils.py | {
"start": 36039,
"end": 55711
} | class ____(unittest.TestCase):
def nop(self):
pass
_d = _Dummy("nop")
def assert_raises_regex(exception_class, expected_regexp, *args, **kwargs):
"""
assert_raises_regex(exception_class, expected_regexp, callable, *args,
**kwargs)
assert_raises_regex(exception_class, expected_regexp)
Fail unless an exception of class exception_class and with message that
matches expected_regexp is thrown by callable when invoked with arguments
args and keyword arguments kwargs.
Alternatively, can be used as a context manager like `assert_raises`.
Notes
-----
.. versionadded:: 1.9.0
"""
__tracebackhide__ = True # Hide traceback for py.test
return _d.assertRaisesRegex(exception_class, expected_regexp, *args, **kwargs)
def decorate_methods(cls, decorator, testmatch=None):
"""
Apply a decorator to all methods in a class matching a regular expression.
The given decorator is applied to all public methods of `cls` that are
matched by the regular expression `testmatch`
(``testmatch.search(methodname)``). Methods that are private, i.e. start
with an underscore, are ignored.
Parameters
----------
cls : class
Class whose methods to decorate.
decorator : function
Decorator to apply to methods
testmatch : compiled regexp or str, optional
The regular expression. Default value is None, in which case the
nose default (``re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)``)
is used.
If `testmatch` is a string, it is compiled to a regular expression
first.
"""
if testmatch is None:
testmatch = re.compile(rf"(?:^|[\\b_\\.{os.sep}-])[Tt]est")
else:
testmatch = re.compile(testmatch)
cls_attr = cls.__dict__
# delayed import to reduce startup time
from inspect import isfunction
methods = [_m for _m in cls_attr.values() if isfunction(_m)]
for function in methods:
try:
if hasattr(function, "compat_func_name"):
funcname = function.compat_func_name
else:
funcname = function.__name__
except AttributeError:
# not a function
continue
if testmatch.search(funcname) and not funcname.startswith("_"):
setattr(cls, funcname, decorator(function))
return
def _assert_valid_refcount(op):
"""
Check that ufuncs don't mishandle refcount of object `1`.
Used in a few regression tests.
"""
if not HAS_REFCOUNT:
return True
import gc
import numpy as np
b = np.arange(100 * 100).reshape(100, 100)
c = b
i = 1
gc.disable()
try:
rc = sys.getrefcount(i)
for _ in range(15):
d = op(b, c)
assert_(sys.getrefcount(i) >= rc)
finally:
gc.enable()
del d # for pyflakes
def assert_allclose(
actual,
desired,
rtol=1e-7,
atol=0,
equal_nan=True,
err_msg="",
verbose=True,
check_dtype=False,
):
"""
Raises an AssertionError if two objects are not equal up to desired
tolerance.
Given two array_like objects, check that their shapes and all elements
are equal (but see the Notes for the special handling of a scalar). An
exception is raised if the shapes mismatch or any values conflict. In
contrast to the standard usage in numpy, NaNs are compared like numbers,
no assertion is raised if both objects have NaNs in the same positions.
The test is equivalent to ``allclose(actual, desired, rtol, atol)`` (note
that ``allclose`` has different default values). It compares the difference
between `actual` and `desired` to ``atol + rtol * abs(desired)``.
.. versionadded:: 1.5.0
Parameters
----------
actual : array_like
Array obtained.
desired : array_like
Array desired.
rtol : float, optional
Relative tolerance.
atol : float, optional
Absolute tolerance.
equal_nan : bool, optional.
If True, NaNs will compare equal.
err_msg : str, optional
The error message to be printed in case of failure.
verbose : bool, optional
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired are not equal up to specified precision.
See Also
--------
assert_array_almost_equal_nulp, assert_array_max_ulp
Notes
-----
When one of `actual` and `desired` is a scalar and the other is
array_like, the function checks that each element of the array_like
object is equal to the scalar.
Examples
--------
>>> x = [1e-5, 1e-3, 1e-1]
>>> y = np.arccos(np.cos(x))
>>> np.testing.assert_allclose(x, y, rtol=1e-5, atol=0)
"""
__tracebackhide__ = True # Hide traceback for py.test
def compare(x, y):
return np.isclose(x, y, rtol=rtol, atol=atol, equal_nan=equal_nan)
actual, desired = asanyarray(actual), asanyarray(desired)
header = f"Not equal to tolerance rtol={rtol:g}, atol={atol:g}"
if check_dtype:
assert actual.dtype == desired.dtype
assert_array_compare(
compare,
actual,
desired,
err_msg=str(err_msg),
verbose=verbose,
header=header,
equal_nan=equal_nan,
)
def assert_array_almost_equal_nulp(x, y, nulp=1):
"""
Compare two arrays relatively to their spacing.
This is a relatively robust method to compare two arrays whose amplitude
is variable.
Parameters
----------
x, y : array_like
Input arrays.
nulp : int, optional
The maximum number of unit in the last place for tolerance (see Notes).
Default is 1.
Returns
-------
None
Raises
------
AssertionError
If the spacing between `x` and `y` for one or more elements is larger
than `nulp`.
See Also
--------
assert_array_max_ulp : Check that all items of arrays differ in at most
N Units in the Last Place.
spacing : Return the distance between x and the nearest adjacent number.
Notes
-----
An assertion is raised if the following condition is not met::
abs(x - y) <= nulp * spacing(maximum(abs(x), abs(y)))
Examples
--------
>>> x = np.array([1.0, 1e-10, 1e-20])
>>> eps = np.finfo(x.dtype).eps
>>> np.testing.assert_array_almost_equal_nulp(x, x * eps / 2 + x) # doctest: +SKIP
>>> np.testing.assert_array_almost_equal_nulp(x, x * eps + x) # doctest: +SKIP
Traceback (most recent call last):
...
AssertionError: X and Y are not equal to 1 ULP (max is 2)
"""
__tracebackhide__ = True # Hide traceback for py.test
import numpy as np
ax = np.abs(x)
ay = np.abs(y)
ref = nulp * np.spacing(np.where(ax > ay, ax, ay))
if not np.all(np.abs(x - y) <= ref):
if np.iscomplexobj(x) or np.iscomplexobj(y):
msg = f"X and Y are not equal to {nulp:d} ULP"
else:
max_nulp = np.max(nulp_diff(x, y))
msg = f"X and Y are not equal to {nulp:d} ULP (max is {max_nulp:g})"
raise AssertionError(msg)
def assert_array_max_ulp(a, b, maxulp=1, dtype=None):
"""
Check that all items of arrays differ in at most N Units in the Last Place.
Parameters
----------
a, b : array_like
Input arrays to be compared.
maxulp : int, optional
The maximum number of units in the last place that elements of `a` and
`b` can differ. Default is 1.
dtype : dtype, optional
Data-type to convert `a` and `b` to if given. Default is None.
Returns
-------
ret : ndarray
Array containing number of representable floating point numbers between
items in `a` and `b`.
Raises
------
AssertionError
If one or more elements differ by more than `maxulp`.
Notes
-----
For computing the ULP difference, this API does not differentiate between
various representations of NAN (ULP difference between 0x7fc00000 and 0xffc00000
is zero).
See Also
--------
assert_array_almost_equal_nulp : Compare two arrays relatively to their
spacing.
Examples
--------
>>> a = np.linspace(0.0, 1.0, 100)
>>> res = np.testing.assert_array_max_ulp(a, np.arcsin(np.sin(a))) # doctest: +SKIP
"""
__tracebackhide__ = True # Hide traceback for py.test
import numpy as np
ret = nulp_diff(a, b, dtype)
if not np.all(ret <= maxulp):
raise AssertionError(
f"Arrays are not almost equal up to {maxulp:g} "
f"ULP (max difference is {np.max(ret):g} ULP)"
)
return ret
def nulp_diff(x, y, dtype=None):
"""For each item in x and y, return the number of representable floating
points between them.
Parameters
----------
x : array_like
first input array
y : array_like
second input array
dtype : dtype, optional
Data-type to convert `x` and `y` to if given. Default is None.
Returns
-------
nulp : array_like
number of representable floating point numbers between each item in x
and y.
Notes
-----
For computing the ULP difference, this API does not differentiate between
various representations of NAN (ULP difference between 0x7fc00000 and 0xffc00000
is zero).
Examples
--------
# By definition, epsilon is the smallest number such as 1 + eps != 1, so
# there should be exactly one ULP between 1 and 1 + eps
>>> nulp_diff(1, 1 + np.finfo(x.dtype).eps) # doctest: +SKIP
1.0
"""
import numpy as np
if dtype:
x = np.asarray(x, dtype=dtype)
y = np.asarray(y, dtype=dtype)
else:
x = np.asarray(x)
y = np.asarray(y)
t = np.common_type(x, y)
if np.iscomplexobj(x) or np.iscomplexobj(y):
raise NotImplementedError("_nulp not implemented for complex array")
x = np.array([x], dtype=t)
y = np.array([y], dtype=t)
x[np.isnan(x)] = np.nan
y[np.isnan(y)] = np.nan
if not x.shape == y.shape:
raise ValueError(f"x and y do not have the same shape: {x.shape} - {y.shape}")
def _diff(rx, ry, vdt):
diff = np.asarray(rx - ry, dtype=vdt)
return np.abs(diff)
rx = integer_repr(x)
ry = integer_repr(y)
return _diff(rx, ry, t)
def _integer_repr(x, vdt, comp):
# Reinterpret binary representation of the float as sign-magnitude:
# take into account two-complement representation
# See also
# https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/
rx = x.view(vdt)
if rx.size != 1:
rx[rx < 0] = comp - rx[rx < 0]
else:
if rx < 0:
rx = comp - rx
return rx
def integer_repr(x):
"""Return the signed-magnitude interpretation of the binary representation
of x."""
import numpy as np
if x.dtype == np.float16:
return _integer_repr(x, np.int16, np.int16(-(2**15)))
elif x.dtype == np.float32:
return _integer_repr(x, np.int32, np.int32(-(2**31)))
elif x.dtype == np.float64:
return _integer_repr(x, np.int64, np.int64(-(2**63)))
else:
raise ValueError(f"Unsupported dtype {x.dtype}")
@contextlib.contextmanager
def _assert_warns_context(warning_class, name=None):
__tracebackhide__ = True # Hide traceback for py.test
with suppress_warnings() as sup:
l = sup.record(warning_class)
yield
if not len(l) > 0:
name_str = f" when calling {name}" if name is not None else ""
raise AssertionError("No warning raised" + name_str)
def assert_warns(warning_class, *args, **kwargs):
"""
Fail unless the given callable throws the specified warning.
A warning of class warning_class should be thrown by the callable when
invoked with arguments args and keyword arguments kwargs.
If a different type of warning is thrown, it will not be caught.
If called with all arguments other than the warning class omitted, may be
used as a context manager:
with assert_warns(SomeWarning):
do_something()
The ability to be used as a context manager is new in NumPy v1.11.0.
.. versionadded:: 1.4.0
Parameters
----------
warning_class : class
The class defining the warning that `func` is expected to throw.
func : callable, optional
Callable to test
*args : Arguments
Arguments for `func`.
**kwargs : Kwargs
Keyword arguments for `func`.
Returns
-------
The value returned by `func`.
Examples
--------
>>> import warnings
>>> def deprecated_func(num):
... warnings.warn("Please upgrade", DeprecationWarning)
... return num * num
>>> with np.testing.assert_warns(DeprecationWarning):
... assert deprecated_func(4) == 16
>>> # or passing a func
>>> ret = np.testing.assert_warns(DeprecationWarning, deprecated_func, 4)
>>> assert ret == 16
"""
if not args:
return _assert_warns_context(warning_class)
func = args[0]
args = args[1:]
with _assert_warns_context(warning_class, name=func.__name__):
return func(*args, **kwargs)
@contextlib.contextmanager
def _assert_no_warnings_context(name=None):
__tracebackhide__ = True # Hide traceback for py.test
with warnings.catch_warnings(record=True) as l:
warnings.simplefilter("always")
yield
if len(l) > 0:
name_str = f" when calling {name}" if name is not None else ""
raise AssertionError(f"Got warnings{name_str}: {l}")
def assert_no_warnings(*args, **kwargs):
"""
Fail if the given callable produces any warnings.
If called with all arguments omitted, may be used as a context manager:
with assert_no_warnings():
do_something()
The ability to be used as a context manager is new in NumPy v1.11.0.
.. versionadded:: 1.7.0
Parameters
----------
func : callable
The callable to test.
\\*args : Arguments
Arguments passed to `func`.
\\*\\*kwargs : Kwargs
Keyword arguments passed to `func`.
Returns
-------
The value returned by `func`.
"""
if not args:
return _assert_no_warnings_context()
func = args[0]
args = args[1:]
with _assert_no_warnings_context(name=func.__name__):
return func(*args, **kwargs)
def _gen_alignment_data(dtype=float32, type="binary", max_size=24):
"""
generator producing data with different alignment and offsets
to test simd vectorization
Parameters
----------
dtype : dtype
data type to produce
type : string
'unary': create data for unary operations, creates one input
and output array
'binary': create data for unary operations, creates two input
and output array
max_size : integer
maximum size of data to produce
Returns
-------
if type is 'unary' yields one output, one input array and a message
containing information on the data
if type is 'binary' yields one output array, two input array and a message
containing information on the data
"""
ufmt = "unary offset=(%d, %d), size=%d, dtype=%r, %s"
bfmt = "binary offset=(%d, %d, %d), size=%d, dtype=%r, %s"
for o in range(3):
for s in range(o + 2, max(o + 3, max_size)):
if type == "unary":
def inp():
return arange(s, dtype=dtype)[o:]
out = empty((s,), dtype=dtype)[o:]
yield out, inp(), ufmt % (o, o, s, dtype, "out of place")
d = inp()
yield d, d, ufmt % (o, o, s, dtype, "in place")
yield (
out[1:],
inp()[:-1],
ufmt
% (
o + 1,
o,
s - 1,
dtype,
"out of place",
),
)
yield (
out[:-1],
inp()[1:],
ufmt
% (
o,
o + 1,
s - 1,
dtype,
"out of place",
),
)
yield inp()[:-1], inp()[1:], ufmt % (o, o + 1, s - 1, dtype, "aliased")
yield inp()[1:], inp()[:-1], ufmt % (o + 1, o, s - 1, dtype, "aliased")
if type == "binary":
def inp1():
return arange(s, dtype=dtype)[o:]
inp2 = inp1
out = empty((s,), dtype=dtype)[o:]
yield out, inp1(), inp2(), bfmt % (o, o, o, s, dtype, "out of place")
d = inp1()
yield d, d, inp2(), bfmt % (o, o, o, s, dtype, "in place1")
d = inp2()
yield d, inp1(), d, bfmt % (o, o, o, s, dtype, "in place2")
yield (
out[1:],
inp1()[:-1],
inp2()[:-1],
bfmt
% (
o + 1,
o,
o,
s - 1,
dtype,
"out of place",
),
)
yield (
out[:-1],
inp1()[1:],
inp2()[:-1],
bfmt
% (
o,
o + 1,
o,
s - 1,
dtype,
"out of place",
),
)
yield (
out[:-1],
inp1()[:-1],
inp2()[1:],
bfmt
% (
o,
o,
o + 1,
s - 1,
dtype,
"out of place",
),
)
yield (
inp1()[1:],
inp1()[:-1],
inp2()[:-1],
bfmt
% (
o + 1,
o,
o,
s - 1,
dtype,
"aliased",
),
)
yield (
inp1()[:-1],
inp1()[1:],
inp2()[:-1],
bfmt
% (
o,
o + 1,
o,
s - 1,
dtype,
"aliased",
),
)
yield (
inp1()[:-1],
inp1()[:-1],
inp2()[1:],
bfmt
% (
o,
o,
o + 1,
s - 1,
dtype,
"aliased",
),
)
| _Dummy |
python | apache__airflow | providers/microsoft/azure/tests/unit/microsoft/azure/triggers/test_message_bus.py | {
"start": 1068,
"end": 2175
} | class ____:
"""Test the base trigger functionality."""
def test_init_with_defaults(self):
"""Test initialization with default values using queue trigger."""
with patch("airflow.providers.microsoft.azure.triggers.message_bus.MessageHook"):
trigger = AzureServiceBusQueueTrigger(queues=["test_queue"])
assert trigger.max_wait_time is None
assert trigger.poll_interval == 60
assert hasattr(trigger, "message_hook")
def test_init_with_custom_values(self):
"""Test initialization with custom values using queue trigger."""
with patch("airflow.providers.microsoft.azure.triggers.message_bus.MessageHook"):
trigger = AzureServiceBusQueueTrigger(
queues=["test_queue"],
poll_interval=30,
azure_service_bus_conn_id="custom_conn",
max_wait_time=120,
)
assert trigger.poll_interval == 30
assert trigger.max_wait_time == 120
assert trigger.connection_id == "custom_conn"
| TestBaseAzureServiceBusTrigger |
python | apache__airflow | providers/apprise/tests/unit/apprise/hooks/test_apprise.py | {
"start": 1099,
"end": 5162
} | class ____:
"""
Test for AppriseHook
"""
@pytest.mark.parametrize(
"config",
[
{"path": "http://some_path_that_dont_exist/", "tag": "alert"},
'{"path": "http://some_path_that_dont_exist/", "tag": "alert"}',
],
)
def test_get_config_from_conn(self, config):
extra = {"config": config}
conn = Connection(conn_type="apprise", extra=extra)
hook = AppriseHook()
assert hook.get_config_from_conn(conn) == (json.loads(config) if isinstance(config, str) else config)
def test_set_config_from_conn_with_dict(self):
"""
Test set_config_from_conn for dict config
"""
extra = {"config": {"path": "http://some_path_that_dont_exist/", "tag": "alert"}}
apprise_obj = apprise.Apprise()
apprise_obj.add = MagicMock()
conn = Connection(conn_type="apprise", extra=extra)
hook = AppriseHook()
hook.set_config_from_conn(conn=conn, apprise_obj=apprise_obj)
apprise_obj.add.assert_called_once_with("http://some_path_that_dont_exist/", tag="alert")
def test_set_config_from_conn_with_list(self):
"""
Test set_config_from_conn for list of dict config
"""
extra = {
"config": [
{"path": "http://some_path_that_dont_exist/", "tag": "p0"},
{"path": "http://some_other_path_that_dont_exist/", "tag": "p1"},
]
}
apprise_obj = apprise.Apprise()
apprise_obj.add = MagicMock()
conn = Connection(conn_type="apprise", extra=extra)
hook = AppriseHook()
hook.set_config_from_conn(conn=conn, apprise_obj=apprise_obj)
apprise_obj.add.assert_has_calls(
[
call("http://some_path_that_dont_exist/", tag="p0"),
call("http://some_other_path_that_dont_exist/", tag="p1"),
]
)
@mock.patch(
"airflow.providers.apprise.hooks.apprise.AppriseHook.get_connection",
)
def test_notify(self, mock_conn):
mock_conn.return_value = Connection(
conn_id="apprise",
extra={
"config": [
{"path": "http://some_path_that_dont_exist/", "tag": "p0"},
{"path": "http://some_other_path_that_dont_exist/", "tag": "p1"},
]
},
)
apprise_obj = apprise.Apprise()
apprise_obj.notify = MagicMock()
apprise_obj.add = MagicMock()
with patch.object(apprise, "Apprise", return_value=apprise_obj):
hook = AppriseHook()
hook.notify(body="test")
apprise_obj.notify.assert_called_once_with(
body="test",
title="",
notify_type=NotifyType.INFO,
body_format=NotifyFormat.TEXT,
tag="all",
attach=None,
interpret_escapes=None,
)
@pytest.mark.asyncio
@mock.patch(
"airflow.providers.apprise.hooks.apprise.get_async_connection",
)
async def test_async_notify(self, mock_conn):
mock_conn.return_value = Connection(
conn_id="apprise",
extra={
"config": [
{"path": "http://some_path_that_dont_exist/", "tag": "p0"},
{"path": "http://some_other_path_that_dont_exist/", "tag": "p1"},
]
},
)
apprise_obj = apprise.Apprise()
apprise_obj.async_notify = AsyncMock()
apprise_obj.add = MagicMock()
with patch.object(apprise, "Apprise", return_value=apprise_obj):
hook = AppriseHook()
await hook.async_notify(body="test")
mock_conn.assert_called()
apprise_obj.async_notify.assert_called_once_with(
body="test",
title="",
notify_type=NotifyType.INFO,
body_format=NotifyFormat.TEXT,
tag="all",
attach=None,
interpret_escapes=None,
)
| TestAppriseHook |
python | Pylons__pyramid | tests/test_response.py | {
"start": 84,
"end": 592
} | class ____(unittest.TestCase):
def _getTargetClass(self):
from pyramid.response import Response
return Response
def test_implements_IResponse(self):
from pyramid.interfaces import IResponse
cls = self._getTargetClass()
self.assertTrue(IResponse.implementedBy(cls))
def test_provides_IResponse(self):
from pyramid.interfaces import IResponse
inst = self._getTargetClass()()
self.assertTrue(IResponse.providedBy(inst))
| TestResponse |
python | keon__algorithms | tests/test_backtrack.py | {
"start": 513,
"end": 1379
} | class ____(unittest.TestCase):
def test_add_operators(self):
# "123", 6 -> ["1+2+3", "1*2*3"]
s = "123"
target = 6
self.assertEqual(add_operators(s, target), ["1+2+3", "1*2*3"])
# "232", 8 -> ["2*3+2", "2+3*2"]
s = "232"
target = 8
self.assertEqual(add_operators(s, target), ["2+3*2", "2*3+2"])
s = "123045"
target = 3
answer = ['1+2+3*0*4*5',
'1+2+3*0*45',
'1+2-3*0*4*5',
'1+2-3*0*45',
'1-2+3+0-4+5',
'1-2+3-0-4+5',
'1*2+3*0-4+5',
'1*2-3*0-4+5',
'1*23+0-4*5',
'1*23-0-4*5',
'12+3*0-4-5',
'12-3*0-4-5']
self.assertEqual(add_operators(s, target), answer)
| TestAddOperator |
python | python-poetry__poetry | src/poetry/utils/env/null_env.py | {
"start": 187,
"end": 1304
} | class ____(SystemEnv):
def __init__(
self, path: Path | None = None, base: Path | None = None, execute: bool = False
) -> None:
if path is None:
path = Path(sys.prefix)
super().__init__(path, base=base)
self._execute = execute
self.executed: list[list[str]] = []
@cached_property
def paths(self) -> dict[str, str]:
paths = self.get_paths()
paths["platlib"] = str(self._path / "platlib")
paths["purelib"] = str(self._path / "purelib")
paths["scripts"] = str(self._path / "scripts")
paths["data"] = str(self._path / "data")
return paths
def _run(self, cmd: list[str], **kwargs: Any) -> str:
self.executed.append(cmd)
if self._execute:
return super()._run(cmd, **kwargs)
return ""
def execute(self, bin: str, *args: str, **kwargs: Any) -> int:
self.executed.append([bin, *list(args)])
if self._execute:
return super().execute(bin, *args, **kwargs)
return 0
def _bin(self, bin: str) -> str:
return bin
| NullEnv |
python | scikit-learn__scikit-learn | sklearn/neural_network/_multilayer_perceptron.py | {
"start": 50619,
"end": 66162
} | class ____(RegressorMixin, BaseMultilayerPerceptron):
"""Multi-layer Perceptron regressor.
This model optimizes the squared error using LBFGS or stochastic gradient
descent.
.. versionadded:: 0.18
Parameters
----------
loss : {'squared_error', 'poisson'}, default='squared_error'
The loss function to use when training the weights. Note that the
"squared error" and "poisson" losses actually implement
"half squares error" and "half poisson deviance" to simplify the
computation of the gradient. Furthermore, the "poisson" loss internally uses
a log-link (exponential as the output activation function) and requires
``y >= 0``.
.. versionchanged:: 1.7
Added parameter `loss` and option 'poisson'.
hidden_layer_sizes : array-like of shape(n_layers - 2,), default=(100,)
The ith element represents the number of neurons in the ith
hidden layer.
activation : {'identity', 'logistic', 'tanh', 'relu'}, default='relu'
Activation function for the hidden layer.
- 'identity', no-op activation, useful to implement linear bottleneck,
returns f(x) = x
- 'logistic', the logistic sigmoid function,
returns f(x) = 1 / (1 + exp(-x)).
- 'tanh', the hyperbolic tan function,
returns f(x) = tanh(x).
- 'relu', the rectified linear unit function,
returns f(x) = max(0, x)
solver : {'lbfgs', 'sgd', 'adam'}, default='adam'
The solver for weight optimization.
- 'lbfgs' is an optimizer in the family of quasi-Newton methods.
- 'sgd' refers to stochastic gradient descent.
- 'adam' refers to a stochastic gradient-based optimizer proposed by
Kingma, Diederik, and Jimmy Ba
For a comparison between Adam optimizer and SGD, see
:ref:`sphx_glr_auto_examples_neural_networks_plot_mlp_training_curves.py`.
Note: The default solver 'adam' works pretty well on relatively
large datasets (with thousands of training samples or more) in terms of
both training time and validation score.
For small datasets, however, 'lbfgs' can converge faster and perform
better.
alpha : float, default=0.0001
Strength of the L2 regularization term. The L2 regularization term
is divided by the sample size when added to the loss.
batch_size : int, default='auto'
Size of minibatches for stochastic optimizers.
If the solver is 'lbfgs', the regressor will not use minibatch.
When set to "auto", `batch_size=min(200, n_samples)`.
learning_rate : {'constant', 'invscaling', 'adaptive'}, default='constant'
Learning rate schedule for weight updates.
- 'constant' is a constant learning rate given by
'learning_rate_init'.
- 'invscaling' gradually decreases the learning rate ``learning_rate_``
at each time step 't' using an inverse scaling exponent of 'power_t'.
effective_learning_rate = learning_rate_init / pow(t, power_t)
- 'adaptive' keeps the learning rate constant to
'learning_rate_init' as long as training loss keeps decreasing.
Each time two consecutive epochs fail to decrease training loss by at
least tol, or fail to increase validation score by at least tol if
'early_stopping' is on, the current learning rate is divided by 5.
Only used when solver='sgd'.
learning_rate_init : float, default=0.001
The initial learning rate used. It controls the step-size
in updating the weights. Only used when solver='sgd' or 'adam'.
power_t : float, default=0.5
The exponent for inverse scaling learning rate.
It is used in updating effective learning rate when the learning_rate
is set to 'invscaling'. Only used when solver='sgd'.
max_iter : int, default=200
Maximum number of iterations. The solver iterates until convergence
(determined by 'tol') or this number of iterations. For stochastic
solvers ('sgd', 'adam'), note that this determines the number of epochs
(how many times each data point will be used), not the number of
gradient steps.
shuffle : bool, default=True
Whether to shuffle samples in each iteration. Only used when
solver='sgd' or 'adam'.
random_state : int, RandomState instance, default=None
Determines random number generation for weights and bias
initialization, train-test split if early stopping is used, and batch
sampling when solver='sgd' or 'adam'.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
tol : float, default=1e-4
Tolerance for the optimization. When the loss or score is not improving
by at least ``tol`` for ``n_iter_no_change`` consecutive iterations,
unless ``learning_rate`` is set to 'adaptive', convergence is
considered to be reached and training stops.
verbose : bool, default=False
Whether to print progress messages to stdout.
warm_start : bool, default=False
When set to True, reuse the solution of the previous
call to fit as initialization, otherwise, just erase the
previous solution. See :term:`the Glossary <warm_start>`.
momentum : float, default=0.9
Momentum for gradient descent update. Should be between 0 and 1. Only
used when solver='sgd'.
nesterovs_momentum : bool, default=True
Whether to use Nesterov's momentum. Only used when solver='sgd' and
momentum > 0.
early_stopping : bool, default=False
Whether to use early stopping to terminate training when validation
score is not improving. If set to True, it will automatically set
aside ``validation_fraction`` of training data as validation and
terminate training when validation score is not improving by at
least ``tol`` for ``n_iter_no_change`` consecutive epochs.
Only effective when solver='sgd' or 'adam'.
validation_fraction : float, default=0.1
The proportion of training data to set aside as validation set for
early stopping. Must be between 0 and 1.
Only used if early_stopping is True.
beta_1 : float, default=0.9
Exponential decay rate for estimates of first moment vector in adam,
should be in [0, 1). Only used when solver='adam'.
beta_2 : float, default=0.999
Exponential decay rate for estimates of second moment vector in adam,
should be in [0, 1). Only used when solver='adam'.
epsilon : float, default=1e-8
Value for numerical stability in adam. Only used when solver='adam'.
n_iter_no_change : int, default=10
Maximum number of epochs to not meet ``tol`` improvement.
Only effective when solver='sgd' or 'adam'.
.. versionadded:: 0.20
max_fun : int, default=15000
Only used when solver='lbfgs'. Maximum number of function calls.
The solver iterates until convergence (determined by ``tol``), number
of iterations reaches max_iter, or this number of function calls.
Note that number of function calls will be greater than or equal to
the number of iterations for the MLPRegressor.
.. versionadded:: 0.22
Attributes
----------
loss_ : float
The current loss computed with the loss function.
best_loss_ : float
The minimum loss reached by the solver throughout fitting.
If `early_stopping=True`, this attribute is set to `None`. Refer to
the `best_validation_score_` fitted attribute instead.
Only accessible when solver='sgd' or 'adam'.
loss_curve_ : list of shape (`n_iter_`,)
Loss value evaluated at the end of each training step.
The ith element in the list represents the loss at the ith iteration.
Only accessible when solver='sgd' or 'adam'.
validation_scores_ : list of shape (`n_iter_`,) or None
The score at each iteration on a held-out validation set. The score
reported is the R2 score. Only available if `early_stopping=True`,
otherwise the attribute is set to `None`.
Only accessible when solver='sgd' or 'adam'.
best_validation_score_ : float or None
The best validation score (i.e. R2 score) that triggered the
early stopping. Only available if `early_stopping=True`, otherwise the
attribute is set to `None`.
Only accessible when solver='sgd' or 'adam'.
t_ : int
The number of training samples seen by the solver during fitting.
Mathematically equals `n_iters * X.shape[0]`, it means
`time_step` and it is used by optimizer's learning rate scheduler.
coefs_ : list of shape (n_layers - 1,)
The ith element in the list represents the weight matrix corresponding
to layer i.
intercepts_ : list of shape (n_layers - 1,)
The ith element in the list represents the bias vector corresponding to
layer i + 1.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_iter_ : int
The number of iterations the solver has run.
n_layers_ : int
Number of layers.
n_outputs_ : int
Number of outputs.
out_activation_ : str
Name of the output activation function.
See Also
--------
BernoulliRBM : Bernoulli Restricted Boltzmann Machine (RBM).
MLPClassifier : Multi-layer Perceptron classifier.
sklearn.linear_model.SGDRegressor : Linear model fitted by minimizing
a regularized empirical loss with SGD.
Notes
-----
MLPRegressor trains iteratively since at each time step
the partial derivatives of the loss function with respect to the model
parameters are computed to update the parameters.
It can also have a regularization term added to the loss function
that shrinks model parameters to prevent overfitting.
This implementation works with data represented as dense and sparse numpy
arrays of floating point values.
References
----------
Hinton, Geoffrey E. "Connectionist learning procedures."
Artificial intelligence 40.1 (1989): 185-234.
Glorot, Xavier, and Yoshua Bengio.
"Understanding the difficulty of training deep feedforward neural networks."
International Conference on Artificial Intelligence and Statistics. 2010.
:arxiv:`He, Kaiming, et al (2015). "Delving deep into rectifiers:
Surpassing human-level performance on imagenet classification." <1502.01852>`
:arxiv:`Kingma, Diederik, and Jimmy Ba (2014)
"Adam: A method for stochastic optimization." <1412.6980>`
Examples
--------
>>> from sklearn.neural_network import MLPRegressor
>>> from sklearn.datasets import make_regression
>>> from sklearn.model_selection import train_test_split
>>> X, y = make_regression(n_samples=200, n_features=20, random_state=1)
>>> X_train, X_test, y_train, y_test = train_test_split(X, y,
... random_state=1)
>>> regr = MLPRegressor(random_state=1, max_iter=2000, tol=0.1)
>>> regr.fit(X_train, y_train)
MLPRegressor(max_iter=2000, random_state=1, tol=0.1)
>>> regr.predict(X_test[:2])
array([ 28.98, -291])
>>> regr.score(X_test, y_test)
0.98
"""
_parameter_constraints: dict = {
**BaseMultilayerPerceptron._parameter_constraints,
"loss": [StrOptions({"squared_error", "poisson"})],
}
def __init__(
self,
loss="squared_error",
hidden_layer_sizes=(100,),
activation="relu",
*,
solver="adam",
alpha=0.0001,
batch_size="auto",
learning_rate="constant",
learning_rate_init=0.001,
power_t=0.5,
max_iter=200,
shuffle=True,
random_state=None,
tol=1e-4,
verbose=False,
warm_start=False,
momentum=0.9,
nesterovs_momentum=True,
early_stopping=False,
validation_fraction=0.1,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-8,
n_iter_no_change=10,
max_fun=15000,
):
super().__init__(
hidden_layer_sizes=hidden_layer_sizes,
activation=activation,
solver=solver,
alpha=alpha,
batch_size=batch_size,
learning_rate=learning_rate,
learning_rate_init=learning_rate_init,
power_t=power_t,
max_iter=max_iter,
loss=loss,
shuffle=shuffle,
random_state=random_state,
tol=tol,
verbose=verbose,
warm_start=warm_start,
momentum=momentum,
nesterovs_momentum=nesterovs_momentum,
early_stopping=early_stopping,
validation_fraction=validation_fraction,
beta_1=beta_1,
beta_2=beta_2,
epsilon=epsilon,
n_iter_no_change=n_iter_no_change,
max_fun=max_fun,
)
def predict(self, X):
"""Predict using the multi-layer perceptron model.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
Returns
-------
y : ndarray of shape (n_samples, n_outputs)
The predicted values.
"""
check_is_fitted(self)
return self._predict(X)
def _predict(self, X, check_input=True):
"""Private predict method with optional input validation"""
y_pred = self._forward_pass_fast(X, check_input=check_input)
if y_pred.shape[1] == 1:
return y_pred.ravel()
return y_pred
def _score(self, X, y, sample_weight=None):
return super()._score_with_function(
X, y, sample_weight=sample_weight, score_function=r2_score
)
def _validate_input(self, X, y, incremental, reset):
X, y = validate_data(
self,
X,
y,
accept_sparse=["csr", "csc"],
multi_output=True,
y_numeric=True,
dtype=(np.float64, np.float32),
reset=reset,
)
if y.ndim == 2 and y.shape[1] == 1:
y = column_or_1d(y, warn=True)
return X, y
@available_if(lambda est: est._check_solver)
@_fit_context(prefer_skip_nested_validation=True)
def partial_fit(self, X, y, sample_weight=None):
"""Update the model with a single iteration over the given data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
y : ndarray of shape (n_samples,)
The target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
.. versionadded:: 1.6
Returns
-------
self : object
Trained MLP model.
"""
return self._fit(X, y, sample_weight=sample_weight, incremental=True)
| MLPRegressor |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-monday/components.py | {
"start": 21337,
"end": 21660
} | class ____(StateMigration):
def migrate(self, stream_state: Mapping[str, Any]) -> Mapping[str, Any]:
del stream_state["activity_logs"]
return stream_state
def should_migrate(self, stream_state: Mapping[str, Any]) -> bool:
return "activity_logs" in stream_state
@dataclass
| MondayStateMigration |
python | docker__docker-py | docker/types/services.py | {
"start": 19530,
"end": 20646
} | class ____(dict):
"""
Used when creating a :py:class:`~docker.types.ContainerSpec`,
dictates whether a container should restart after stopping or failing.
Args:
condition (string): Condition for restart (``none``, ``on-failure``,
or ``any``). Default: `none`.
delay (int): Delay between restart attempts. Default: 0
max_attempts (int): Maximum attempts to restart a given container
before giving up. Default value is 0, which is ignored.
window (int): Time window used to evaluate the restart policy. Default
value is 0, which is unbounded.
"""
condition_types = RestartConditionTypesEnum
def __init__(self, condition=RestartConditionTypesEnum.NONE, delay=0,
max_attempts=0, window=0):
if condition not in self.condition_types._values:
raise TypeError(
f'Invalid RestartPolicy condition {condition}'
)
self['Condition'] = condition
self['Delay'] = delay
self['MaxAttempts'] = max_attempts
self['Window'] = window
| RestartPolicy |
python | pymupdf__PyMuPDF | src/table.py | {
"start": 49160,
"end": 49501
} | class ____:
def __init__(self, cells):
self.cells = cells
self.bbox = (
min(map(itemgetter(0), filter(None, cells))),
min(map(itemgetter(1), filter(None, cells))),
max(map(itemgetter(2), filter(None, cells))),
max(map(itemgetter(3), filter(None, cells))),
)
| CellGroup |
python | numpy__numpy | numpy/polynomial/tests/test_hermite_e.py | {
"start": 10115,
"end": 11497
} | class ____:
def test_hermeder(self):
# check exceptions
assert_raises(TypeError, herme.hermeder, [0], .5)
assert_raises(ValueError, herme.hermeder, [0], -1)
# check that zeroth derivative does nothing
for i in range(5):
tgt = [0] * i + [1]
res = herme.hermeder(tgt, m=0)
assert_equal(trim(res), trim(tgt))
# check that derivation is the inverse of integration
for i in range(5):
for j in range(2, 5):
tgt = [0] * i + [1]
res = herme.hermeder(herme.hermeint(tgt, m=j), m=j)
assert_almost_equal(trim(res), trim(tgt))
# check derivation with scaling
for i in range(5):
for j in range(2, 5):
tgt = [0] * i + [1]
res = herme.hermeder(
herme.hermeint(tgt, m=j, scl=2), m=j, scl=.5)
assert_almost_equal(trim(res), trim(tgt))
def test_hermeder_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([herme.hermeder(c) for c in c2d.T]).T
res = herme.hermeder(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([herme.hermeder(c) for c in c2d])
res = herme.hermeder(c2d, axis=1)
assert_almost_equal(res, tgt)
| TestDerivative |
python | scipy__scipy | scipy/stats/_continuous_distns.py | {
"start": 174471,
"end": 176225
} | class ____(rv_continuous):
"""An inverted Weibull continuous random variable.
This distribution is also known as the Fréchet distribution or the
type II extreme value distribution.
%(before_notes)s
Notes
-----
The probability density function for `invweibull` is:
.. math::
f(x, c) = c x^{-c-1} \\exp(-x^{-c})
for :math:`x > 0`, :math:`c > 0`.
`invweibull` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
References
----------
F.R.S. de Gusmao, E.M.M Ortega and G.M. Cordeiro, "The generalized inverse
Weibull distribution", Stat. Papers, vol. 52, pp. 591-619, 2011.
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _shape_info(self):
return [_ShapeInfo("c", False, (0, np.inf), (False, False))]
def _pdf(self, x, c):
# invweibull.pdf(x, c) = c * x**(-c-1) * exp(-x**(-c))
xc1 = np.power(x, -c - 1.0)
xc2 = np.power(x, -c)
xc2 = np.exp(-xc2)
return c * xc1 * xc2
def _cdf(self, x, c):
xc1 = np.power(x, -c)
return np.exp(-xc1)
def _sf(self, x, c):
return -np.expm1(-x**-c)
def _ppf(self, q, c):
return np.power(-np.log(q), -1.0/c)
def _isf(self, p, c):
return (-np.log1p(-p))**(-1/c)
def _munp(self, n, c):
return sc.gamma(1 - n / c)
def _entropy(self, c):
return 1+_EULER + _EULER / c - np.log(c)
def _fitstart(self, data, args=None):
# invweibull requires c > 1 for the first moment to exist, so use 2.0
args = (2.0,) if args is None else args
return super()._fitstart(data, args=args)
invweibull = invweibull_gen(a=0, name='invweibull')
| invweibull_gen |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/sensors/test_s3.py | {
"start": 17834,
"end": 22015
} | class ____:
def setup_method(self):
self.dag = DAG(
"unit_tests_aws_sensor_test_schedule_dag_once", start_date=DEFAULT_DATE, schedule="@once"
)
self.sensor = S3KeysUnchangedSensor(
task_id="sensor_1",
bucket_name="test-bucket",
prefix="test-prefix/path",
inactivity_period=12,
poke_interval=0.1,
min_objects=1,
allow_delete=True,
dag=self.dag,
)
def test_reschedule_mode_not_allowed(self):
with pytest.raises(ValueError, match="Cannot set mode to 'reschedule'. Only 'poke' is acceptable"):
S3KeysUnchangedSensor(
task_id="sensor_2",
bucket_name="test-bucket",
prefix="test-prefix/path",
poke_interval=0.1,
mode="reschedule",
dag=self.dag,
)
@pytest.mark.db_test
def test_render_template_fields(self, clean_dags_dagruns_and_dagbundles):
S3KeysUnchangedSensor(
task_id="sensor_3",
bucket_name="test-bucket",
prefix="test-prefix/path",
inactivity_period=12,
poke_interval=0.1,
min_objects=1,
allow_delete=True,
dag=self.dag,
).render_template_fields({})
@time_machine.travel(DEFAULT_DATE)
def test_files_deleted_between_pokes_throw_error(self):
self.sensor.allow_delete = False
self.sensor.is_keys_unchanged({"a", "b"})
with pytest.raises(AirflowException):
self.sensor.is_keys_unchanged({"a"})
@pytest.mark.parametrize(
("current_objects", "expected_returns", "inactivity_periods"),
[
pytest.param(
({"a"}, {"a", "b"}, {"a", "b", "c"}),
(False, False, False),
(0, 0, 0),
id="resetting inactivity period after key change",
),
pytest.param(
({"a", "b"}, {"a"}, {"a", "c"}),
(False, False, False),
(0, 0, 0),
id="item was deleted with option `allow_delete=True`",
),
pytest.param(
({"a"}, {"a"}, {"a"}), (False, False, True), (0, 10, 20), id="inactivity period was exceeded"
),
pytest.param(
(set(), set(), set()), (False, False, False), (0, 10, 20), id="not pass if empty key is given"
),
],
)
def test_key_changes(self, current_objects, expected_returns, inactivity_periods, time_machine):
time_machine.move_to(DEFAULT_DATE)
for current, expected, period in zip(current_objects, expected_returns, inactivity_periods):
assert self.sensor.is_keys_unchanged(current) == expected
assert self.sensor.inactivity_seconds == period
time_machine.coordinates.shift(10)
def test_poke_succeeds_on_upload_complete(self, time_machine):
time_machine.move_to(DEFAULT_DATE)
self.sensor.hook = mock.MagicMock()
self.sensor.hook.list_keys.return_value = {"a"}
assert not self.sensor.poke(dict())
time_machine.coordinates.shift(10)
assert not self.sensor.poke(dict())
time_machine.coordinates.shift(10)
assert self.sensor.poke(dict())
def test_fail_is_keys_unchanged(self):
op = S3KeysUnchangedSensor(task_id="sensor", bucket_name="test-bucket", prefix="test-prefix/path")
op.previous_objects = {"1", "2", "3"}
current_objects = {"1", "2"}
op.allow_delete = False
message = "Illegal behavior: objects were deleted in"
with pytest.raises(AirflowException, match=message):
op.is_keys_unchanged(current_objects=current_objects)
def test_fail_execute_complete(self):
op = S3KeysUnchangedSensor(task_id="sensor", bucket_name="test-bucket", prefix="test-prefix/path")
message = "test message"
with pytest.raises(AirflowException, match=message):
op.execute_complete(context={}, event={"status": "error", "message": message})
| TestS3KeysUnchangedSensor |
python | celery__celery | t/unit/contrib/test_migrate.py | {
"start": 3747,
"end": 5906
} | class ____:
def test_start(self):
with patch('celery.contrib.migrate.eventloop') as evloop:
app = Mock()
filt = Mock(name='filter')
conn = Connection('memory://')
evloop.side_effect = StopFiltering()
app.amqp.queues = {'foo': Queue('foo'), 'bar': Queue('bar')}
consumer = app.amqp.TaskConsumer.return_value = Mock(name='consum')
consumer.queues = list(app.amqp.queues.values())
consumer.channel = conn.default_channel
consumer.__enter__ = Mock(name='consumer.__enter__')
consumer.__exit__ = Mock(name='consumer.__exit__')
consumer.callbacks = []
def register_callback(x):
consumer.callbacks.append(x)
consumer.register_callback = register_callback
start_filter(app, conn, filt,
queues='foo,bar', ack_messages=True)
body = {'task': 'add', 'id': 'id'}
for callback in consumer.callbacks:
callback(body, Message(body))
consumer.callbacks[:] = []
cb = Mock(name='callback=')
start_filter(app, conn, filt, tasks='add,mul', callback=cb)
for callback in consumer.callbacks:
callback(body, Message(body))
cb.assert_called()
on_declare_queue = Mock()
start_filter(app, conn, filt, tasks='add,mul', queues='foo',
on_declare_queue=on_declare_queue)
on_declare_queue.assert_called()
start_filter(app, conn, filt, queues=['foo', 'bar'])
consumer.callbacks[:] = []
state = State()
start_filter(app, conn, filt,
tasks='add,mul', callback=cb, state=state, limit=1)
stop_filtering_raised = False
for callback in consumer.callbacks:
try:
callback(body, Message(body))
except StopFiltering:
stop_filtering_raised = True
assert state.count
assert stop_filtering_raised
| test_start_filter |
python | apache__thrift | test/py/TestClient.py | {
"start": 16117,
"end": 16700
} | class ____(MultiplexedOptionalTest):
def get_protocol(self, transport):
wrapped_proto = make_pedantic(TCompactProtocol.TCompactProtocolAcceleratedFactory(fallback=False).getProtocol(transport))
return TMultiplexedProtocol.TMultiplexedProtocol(wrapped_proto, "ThriftTest")
def get_protocol2(self, transport):
wrapped_proto = make_pedantic(TCompactProtocol.TCompactProtocolAcceleratedFactory(fallback=False).getProtocol(transport))
return TMultiplexedProtocol.TMultiplexedProtocol(wrapped_proto, "SecondService")
| MultiplexedAcceleratedCompactTest |
python | getsentry__sentry | tests/snuba/api/endpoints/test_organization_tagkey_values.py | {
"start": 551,
"end": 1491
} | class ____(APITestCase, SnubaTestCase):
endpoint = "sentry-api-0-organization-tagkey-values"
def setUp(self) -> None:
super().setUp()
self.min_ago = before_now(minutes=1)
self.day_ago = before_now(days=1)
user = self.create_user()
self.org = self.create_organization()
self.team = self.create_team(organization=self.org)
self.create_member(organization=self.org, user=user, teams=[self.team])
self.login_as(user=user)
def get_response(self, key, **kwargs):
return super().get_response(self.org.slug, key, **kwargs)
def run_test(self, key, expected, **kwargs):
response = self.get_success_response(key, **kwargs)
assert [(val["value"], val["count"]) for val in response.data] == expected
@cached_property
def project(self):
return self.create_project(organization=self.org, teams=[self.team])
| OrganizationTagKeyTestCase |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/general_tests/utils_tests/test_backoff.py | {
"start": 248,
"end": 2318
} | class ____:
def __init__(self, fails=0, exception=RetryableException):
self.fails = fails
self.exception = exception
self.call_count = 0
self.args = []
self.kwargs = []
def __call__(self, *args, **kwargs):
self.call_count += 1
self.args.append(args)
self.kwargs.append(kwargs)
if self.call_count <= self.fails:
raise self.exception
return True
def test_backoff_delay_generator():
gen = exponential_delay_generator()
vals = []
for _ in range(10):
vals.append(next(gen))
assert vals == [0.1, 0.2, 0.4, 0.8, 1.6, 3.2, 6.4, 12.8, 25.6, 51.2]
@pytest.fixture
def fake_sleep_times(monkeypatch):
sleeps = []
def fake_sleep(s):
sleeps.append(s)
monkeypatch.setattr(time, "sleep", fake_sleep)
yield sleeps
def test_backoff(fake_sleep_times):
fn = Failer(fails=100)
with pytest.raises(RetryableException):
backoff(fn, retry_on=(RetryableException,), args=[3, 2, 1], kwargs={"foo": "bar"})
assert fn.call_count == 5
assert all([args == (3, 2, 1) for args in fn.args])
assert all([kwargs == {"foo": "bar"} for kwargs in fn.kwargs])
assert fake_sleep_times == [0.1, 0.2, 0.4, 0.8]
fn = Failer()
assert backoff(fn, retry_on=(RetryableException,), args=[3, 2, 1], kwargs={"foo": "bar"})
assert fn.call_count == 1
fn = Failer(fails=1)
assert backoff(fn, retry_on=(RetryableException,), args=[3, 2, 1], kwargs={"foo": "bar"})
assert fn.call_count == 2
assert fake_sleep_times == [0.1, 0.2, 0.4, 0.8, 0.1]
fn = Failer(fails=1)
with pytest.raises(RetryableException):
backoff(
fn, retry_on=(RetryableException,), args=[3, 2, 1], kwargs={"foo": "bar"}, max_retries=0
)
assert fn.call_count == 1
fn = Failer(fails=2)
with pytest.raises(RetryableException):
backoff(
fn, retry_on=(RetryableException,), args=[3, 2, 1], kwargs={"foo": "bar"}, max_retries=1
)
assert fn.call_count == 2
| Failer |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/functionMember2.py | {
"start": 913,
"end": 1209
} | class ____(Protocol):
@property
def __self__(self, /) -> object: ...
f1: HasSelf
f1 = A.method2
f1 = A().method1
f1 = A().method2
# These three should generate an error because they are not
# MethodTypes but are instead FunctionTypes.
f1 = A.method1
f1 = A.method3
f1 = A().method3
| HasSelf |
python | dagster-io__dagster | python_modules/dagster/dagster/_utils/error.py | {
"start": 832,
"end": 10793
} | class ____(DagsterUserCodeExecutionError):
pass
@contextlib.contextmanager
def redact_user_stacktrace_if_enabled():
"""Context manager which, if a user has enabled redacting user code errors, logs exceptions raised from within,
and clears the stacktrace from the exception. It also marks the exception to be redacted if it was to be persisted
or otherwise serialized to be sent to Dagster Plus. This is useful for preventing sensitive information from
being leaked in error messages.
"""
if not _should_redact_user_code_error():
yield
else:
try:
yield
except BaseException as e:
exc_info = sys.exc_info()
# Generate a unique error ID for this error, or re-use an existing one
# if this error has already been seen
existing_error_id = getattr(e, USER_FACING_ERROR_ID_ATTR_NAME, None)
if not existing_error_id:
error_id = str(uuid.uuid4())
# Track the error ID for this exception so we can redact it later
setattr(e, USER_FACING_ERROR_ID_ATTR_NAME, error_id)
masked_logger = logging.getLogger(_REDACTED_ERROR_LOGGER_NAME)
masked_logger.error(
f"Error occurred during user code execution, error ID {error_id}",
exc_info=exc_info,
)
else:
error_id = existing_error_id
if isinstance(e, DagsterUserCodeExecutionError):
# To be especially sure that user code error information doesn't leak from
# outside the context, we raise a new exception with a cleared original_exc_info
# The only remnant that remains is user_exception, which we only use to allow the user
# to retrieve exceptions in hooks
try:
raise Exception("Masked").with_traceback(None) from None
except Exception as dummy_exception:
redacted_exception = DagsterRedactedUserCodeError(
f"Error occurred during user code execution, error ID {error_id}. "
"The error has been masked to prevent leaking sensitive information. "
"Search in logs for this error ID for more details.",
user_exception=e.user_exception,
original_exc_info=sys.exc_info(),
).with_traceback(None)
setattr(dummy_exception, USER_FACING_ERROR_ID_ATTR_NAME, error_id)
setattr(redacted_exception, USER_FACING_ERROR_ID_ATTR_NAME, error_id)
raise redacted_exception from None
# Redact the stacktrace to ensure it will not be passed to Dagster Plus
raise e.with_traceback(None) from None
def _generate_redacted_user_code_error_message(err_id: str) -> SerializableErrorInfo:
return SerializableErrorInfo(
message=(
f"Error occurred during user code execution, error ID {err_id}. "
"The error has been masked to prevent leaking sensitive information. "
"Search in logs for this error ID for more details."
),
stack=[],
cls_name="DagsterRedactedUserCodeError",
cause=None,
context=None,
)
def _generate_partly_redacted_framework_error_message(
exc_info: ExceptionInfo, err_id: str
) -> SerializableErrorInfo:
exc_type, e, tb = exc_info
tb_exc = traceback.TracebackException(check.not_none(exc_type), check.not_none(e), tb)
error_info = SerializableErrorInfo.from_traceback(tb_exc)
return SerializableErrorInfo(
message=error_info.message
+ (
f"Error ID {err_id}. "
"The error has been masked to prevent leaking sensitive information. "
"Search in logs for this error ID for more details."
),
stack=[],
cls_name=error_info.cls_name,
cause=None,
context=None,
)
def serializable_error_info_from_exc_info(
exc_info: ExceptionInfo,
# Whether to forward serialized errors thrown from subprocesses
hoist_user_code_error: Optional[bool] = True,
) -> SerializableErrorInfo:
"""This function is used to turn an exception into a serializable object that can be passed
across process boundaries or sent over GraphQL.
Args:
exc_info (ExceptionInfo): The exception info to serialize
hoist_user_code_error (Optional[bool]): Whether to extract the inner user code error if the raised exception
is a DagsterUserCodeProcessError. Defaults to True.
"""
# `sys.exc_info() return Tuple[None, None, None] when there is no exception being processed. We accept this in
# the type signature here since this function is meant to directly receive the return value of
# `sys.exc_info`, but the function should never be called when there is no exception to process.
exc_type, e, tb = exc_info
additional_message = "sys.exc_info() called but no exception available to process."
exc_type = check.not_none(exc_type, additional_message=additional_message)
e = check.not_none(e, additional_message=additional_message)
tb = check.not_none(tb, additional_message=additional_message)
from dagster._core.errors import DagsterUserCodeProcessError
err_id = getattr(e, USER_FACING_ERROR_ID_ATTR_NAME, None)
if err_id:
if isinstance(e, DagsterUserCodeExecutionError):
# For user code, we want to completely mask the error message, since
# both the stacktrace and the message could contain sensitive information
return _generate_redacted_user_code_error_message(err_id)
else:
# For all other errors (framework errors, interrupts),
# we want to redact the error message, but keep the stacktrace
return _generate_partly_redacted_framework_error_message(exc_info, err_id)
if (
hoist_user_code_error
and isinstance(e, DagsterUserCodeProcessError)
and len(e.user_code_process_error_infos) == 1
):
return e.user_code_process_error_infos[0]
else:
tb_exc = traceback.TracebackException(exc_type, e, tb)
return SerializableErrorInfo.from_traceback(tb_exc)
def unwrap_user_code_error(error_info: SerializableErrorInfo) -> SerializableErrorInfo:
"""Extracts the underlying error from the passed error, if it is a DagsterUserCodeLoadError."""
if error_info.cls_name == "DagsterUserCodeLoadError":
return unwrap_user_code_error(check.not_none(error_info.cause))
return error_info
def truncate_event_error_info(
error_info: Optional[SerializableErrorInfo],
) -> Optional[SerializableErrorInfo]:
event_error_field_size_limit = int(os.getenv("DAGSTER_EVENT_ERROR_FIELD_SIZE_LIMIT", "500000"))
event_error_max_stack_trace_depth = int(
os.getenv("DAGSTER_EVENT_ERROR_MAX_STACK_TRACE_DEPTH", "5")
)
if error_info is None:
return None
return truncate_serialized_error(
error_info,
field_size_limit=event_error_field_size_limit,
max_depth=event_error_max_stack_trace_depth,
)
def truncate_serialized_error(
error_info: SerializableErrorInfo,
field_size_limit: int,
max_depth: int,
truncations: Optional[list[str]] = None,
):
truncations = [] if truncations is None else truncations
if error_info.cause:
if max_depth == 0:
truncations.append("cause")
new_cause = (
error_info.cause
if len(serialize_value(error_info.cause)) <= field_size_limit
else SerializableErrorInfo(
message="(Cause truncated due to size limitations)",
stack=[],
cls_name=None,
)
)
else:
new_cause = truncate_serialized_error(
error_info.cause,
field_size_limit,
max_depth=max_depth - 1,
truncations=truncations,
)
error_info = error_info._replace(cause=new_cause)
if error_info.context:
if max_depth == 0:
truncations.append("context")
new_context = (
error_info.context
if len(serialize_value(error_info.context)) <= field_size_limit
else SerializableErrorInfo(
message="(Context truncated due to size limitations)",
stack=[],
cls_name=None,
)
)
else:
new_context = truncate_serialized_error(
error_info.context,
field_size_limit,
max_depth=max_depth - 1,
truncations=truncations,
)
error_info = error_info._replace(context=new_context)
stack_size_so_far = 0
truncated_stack = []
for stack_elem in error_info.stack:
stack_size_so_far += len(stack_elem)
if stack_size_so_far > field_size_limit:
truncations.append("stack")
truncated_stack.append("(TRUNCATED)")
break
truncated_stack.append(stack_elem)
error_info = error_info._replace(stack=truncated_stack)
msg_len = len(error_info.message)
if msg_len > field_size_limit:
truncations.append(f"message from {msg_len} to {field_size_limit}")
error_info = error_info._replace(
message=error_info.message[:field_size_limit] + " (TRUNCATED)"
)
if error_info.cls_name and len(error_info.cls_name) > ERROR_CLASS_NAME_SIZE_LIMIT:
truncations.append("cls_name")
error_info = error_info._replace(
cls_name=error_info.cls_name[:ERROR_CLASS_NAME_SIZE_LIMIT] + " (TRUNCATED)"
)
return error_info
| DagsterRedactedUserCodeError |
python | Pylons__pyramid | docs/quick_tutorial/databases/tutorial/views.py | {
"start": 159,
"end": 363
} | class ____(colander.MappingSchema):
title = colander.SchemaNode(colander.String())
body = colander.SchemaNode(
colander.String(),
widget=deform.widget.RichTextWidget()
)
| WikiPage |
python | OmkarPathak__pygorithm | pygorithm/data_structures/stack.py | {
"start": 107,
"end": 1682
} | class ____(object):
"""
Stack object
"""
def __init__(self, limit=10):
"""
:param limit: the stack size
"""
self.stack = []
self.limit = limit
def __str__(self):
return ' '.join([str(i) for i in self.stack])
def push(self, data):
"""
pushes an item into the stack
returns -1 if the stack is empty
"""
if len(self.stack) >= self.limit:
# indicates stack overflow
return -1
else:
self.stack.append(data)
def pop(self):
"""
pops the topmost item from the stack
returns -1 if the stack is empty
"""
if len(self.stack) <= 0:
# indicates stack underflow
return -1
else:
return self.stack.pop()
def peek(self):
"""
returns the topmost element of the stack
returns -1 if the stack is empty
"""
if len(self.stack) <= 0:
# stack underflow
return -1
else:
return self.stack[len(self.stack) - 1]
def is_empty(self):
"""
checks if the stack is empty
returns boolean value, True or False
"""
return self.size() == 0
def size(self):
"""
returns the current size of the stack
"""
return len(self.stack)
@staticmethod
def get_code():
"""
returns the code for current class
"""
return inspect.getsource(Stack)
| Stack |
python | pytorch__pytorch | test/dynamo/test_activation_checkpointing.py | {
"start": 63844,
"end": 64732
} | class ____(torch.nn.Module):
def forward(self, primals_1: "f32[4, 4]"):
mm: "f32[4, 4]" = torch.ops.aten.mm.default(primals_1, primals_1)
mm_1: "f32[4, 4]" = torch.ops.aten.mm.default(mm, primals_1); mm = None
sin: "f32[4, 4]" = torch.ops.aten.sin.default(primals_1)
sin_1: "f32[4, 4]" = torch.ops.aten.sin.default(mm_1); mm_1 = None
cos: "f32[4, 4]" = torch.ops.aten.cos.default(sin_1); sin_1 = None
sin_2: "f32[4, 4]" = torch.ops.aten.sin.default(primals_1)
add: "f32[4, 4]" = torch.ops.aten.add.Tensor(cos, sin_2); cos = sin_2 = None
return (add, sin, primals_1)
""",
)
devices = ["cuda", "hpu"]
instantiate_device_type_tests(
ActivationCheckpointingViaTagsTests, globals(), only_for=devices
)
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
run_tests()
| GraphModule |
python | huggingface__transformers | src/transformers/models/mpnet/modeling_mpnet.py | {
"start": 17098,
"end": 19859
} | class ____(MPNetPreTrainedModel):
_tied_weights_keys = {
"lm_head.decoder.weight": "mpnet.embeddings.word_embeddings.weight",
"lm_head.decoder.bias": "lm_head.bias",
}
def __init__(self, config):
super().__init__(config)
self.mpnet = MPNetModel(config, add_pooling_layer=False)
self.lm_head = MPNetLMHead(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.lm_head.decoder
def set_output_embeddings(self, new_embeddings):
self.lm_head.decoder = new_embeddings
self.lm_head.bias = new_embeddings.bias
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple[torch.Tensor], MaskedLMOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.mpnet(
input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.lm_head(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| MPNetForMaskedLM |
python | matplotlib__matplotlib | lib/matplotlib/axes/__init__.py | {
"start": 250,
"end": 351
} | class ____(metaclass=_SubplotBaseMeta):
pass
def subplot_class_factory(cls): return cls
| SubplotBase |
python | getsentry__sentry | src/sentry/ingest/inbound_filters.py | {
"start": 5524,
"end": 5763
} | class ____(serializers.Serializer):
active = serializers.BooleanField(
help_text="Toggle the browser-extensions, localhost, filtered-transaction, or web-crawlers filter on or off.",
required=False,
)
| _FilterSerializer |
python | getsentry__sentry | tests/sentry/api/endpoints/test_organization_auth_token_details.py | {
"start": 344,
"end": 5165
} | class ____(APITestCase):
endpoint = "sentry-api-0-org-auth-token-details"
def test_simple(self) -> None:
token = OrgAuthToken.objects.create(
organization_id=self.organization.id,
name="token 1",
token_hashed="ABCDEF",
token_last_characters="xyz1",
scope_list=["org:ci"],
date_last_used=None,
)
self.login_as(self.user)
response = self.get_success_response(
self.organization.slug, token.id, status_code=status.HTTP_200_OK
)
assert response.content
res = response.data
assert res.get("id") == str(token.id)
assert res.get("name") == "token 1"
assert res.get("token") is None
assert res.get("tokenLastCharacters") == "xyz1"
assert res.get("scopes") == ["org:ci"]
assert res.get("dateCreated") is not None
assert res.get("lastUsedDate") is None
assert res.get("lastUsedProjectId") is None
def test_last_used(self) -> None:
token = OrgAuthToken.objects.create(
organization_id=self.organization.id,
name="token 1",
token_hashed="ABCDEF",
token_last_characters="xyz1",
scope_list=["org:ci"],
date_last_used=datetime(2023, 1, 1, tzinfo=timezone.utc),
project_last_used_id=self.project.id,
)
self.login_as(self.user)
response = self.get_success_response(
self.organization.slug, token.id, status_code=status.HTTP_200_OK
)
assert response.content
res = response.data
assert res.get("dateLastUsed") == datetime(2023, 1, 1, tzinfo=timezone.utc)
assert res.get("projectLastUsedId") == str(self.project.id)
def test_no_auth(self) -> None:
token = OrgAuthToken.objects.create(
organization_id=self.organization.id,
name="token 1",
token_hashed="ABCDEF",
token_last_characters="xyz1",
scope_list=["org:ci"],
date_last_used=None,
)
response = self.get_error_response(self.organization.slug, token.id)
assert response.status_code == status.HTTP_403_FORBIDDEN
def test_other_org_token(self) -> None:
other_org = self.create_organization()
token = OrgAuthToken.objects.create(
organization_id=other_org.id,
name="token 1",
token_hashed="ABCDEF",
token_last_characters="xyz1",
scope_list=["org:ci"],
date_last_used=None,
)
self.login_as(self.user)
response = self.get_error_response(other_org.slug, token.id)
assert response.status_code == status.HTTP_403_FORBIDDEN
def test_other_org(self) -> None:
other_org = self.create_organization()
token = OrgAuthToken.objects.create(
organization_id=other_org.id,
name="token 1",
token_hashed="ABCDEF",
token_last_characters="xyz1",
scope_list=["org:ci"],
date_last_used=None,
)
self.login_as(self.user)
response = self.get_error_response(self.organization.slug, token.id)
assert response.status_code == status.HTTP_404_NOT_FOUND
def test_deny_token_access(self) -> None:
org_token = OrgAuthToken.objects.create(
organization_id=self.organization.id,
name="token 1",
token_hashed="ABCDEF",
token_last_characters="xyz1",
scope_list=["org:ci"],
date_last_used=None,
)
personal_token = ApiToken.objects.create(user=self.user, scope_list=["org:read"])
response = self.get_error_response(
self.organization.slug,
org_token.id,
extra_headers={"HTTP_AUTHORIZATION": f"Bearer {personal_token.token}"},
)
assert response.status_code == status.HTTP_403_FORBIDDEN
def test_not_exists(self) -> None:
self.login_as(self.user)
response = self.get_error_response(self.organization.slug, 999999)
assert response.status_code == status.HTTP_404_NOT_FOUND
def test_get_deleted(self) -> None:
token = OrgAuthToken.objects.create(
organization_id=self.organization.id,
name="token 1",
token_hashed="ABCDEF",
token_last_characters="xyz1",
scope_list=["org:ci"],
date_last_used=None,
date_deactivated=datetime(2023, 1, 1, tzinfo=timezone.utc),
)
self.login_as(self.user)
response = self.get_error_response(self.organization.slug, token.id)
assert response.status_code == status.HTTP_404_NOT_FOUND
@control_silo_test
| OrganizationAuthTokenDetailTest |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/taint_in_taint_out.py | {
"start": 265,
"end": 352
} | class ____:
pass
def some_service(id):
...
def _unpack(tuple):
...
| Object |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/array_ops/where_op_test.py | {
"start": 8394,
"end": 11065
} | class ____(test.Benchmark):
def benchmarkWhere(self):
for (m, n, p, use_gpu) in itertools.product(
[10],
[10, 100, 1000, 10000, 100000, 1000000],
[0.01, 0.5, 0.99],
[False, True]):
name = "m_%d_n_%d_p_%g_use_gpu_%s" % (m, n, p, use_gpu)
device = "/%s:0" % ("gpu" if use_gpu else "cpu")
with ops.Graph().as_default():
with ops.device(device):
x = random_ops.random_uniform((m, n), dtype=dtypes.float32) <= p
v = resource_variable_ops.ResourceVariable(x)
op = array_ops.where(v)
with session.Session(config=benchmark.benchmark_config()) as sess:
self.evaluate(v.initializer)
r = self.run_op_benchmark(sess, op, min_iters=100, name=name)
gb_processed_input = m * n / 1.0e9
# approximate size of output: m*n*p int64s for each axis.
gb_processed_output = 2 * 8 * m * n * p / 1.0e9
gb_processed = gb_processed_input + gb_processed_output
throughput = gb_processed / r["wall_time"]
print("Benchmark: %s \t wall_time: %0.03g s \t "
"Throughput: %0.03g GB/s" % (name, r["wall_time"], throughput))
sys.stdout.flush()
def benchmarkBatchSelect(self):
for (m, n, use_gpu) in itertools.product([1000, 10000, 100000],
[10, 100, 1000], [False, True]):
name = "m_%d_n_%d_use_gpu_%s" % (m, n, use_gpu)
device = "/%s:0" % ("gpu" if use_gpu else "cpu")
with ops.Graph().as_default():
with ops.device(device):
x_gen = random_ops.random_uniform([m, n], dtype=dtypes.float32)
y_gen = random_ops.random_uniform([m, n], dtype=dtypes.float32)
c_gen = random_ops.random_uniform([m], dtype=dtypes.float32) <= 0.5
x = resource_variable_ops.ResourceVariable(x_gen)
y = resource_variable_ops.ResourceVariable(y_gen)
c = resource_variable_ops.ResourceVariable(c_gen)
op = array_ops.where(c, x, y)
with session.Session(config=benchmark.benchmark_config()) as sess:
self.evaluate(x.initializer)
self.evaluate(y.initializer)
self.evaluate(c.initializer)
r = self.run_op_benchmark(sess, op, min_iters=100, name=name)
# approximate size of output: m*n*2 floats for each axis.
gb_processed = m * n * 8 / 1.0e9
throughput = gb_processed / r["wall_time"]
print("Benchmark: %s \t wall_time: %0.03g s \t "
"Throughput: %0.03g GB/s" % (name, r["wall_time"], throughput))
sys.stdout.flush()
if __name__ == "__main__":
test.main()
| WhereBenchmark |
python | geekcomputers__Python | floodfill/floodfill.py | {
"start": 194,
"end": 4019
} | class ____:
def __init__(self, window_width, window_height):
self.window_width = int(window_width)
self.window_height = int(window_height)
pygame.init()
pygame.display.set_caption("Floodfill")
self.display = pygame.display.set_mode((self.window_width, self.window_height))
self.surface = pygame.Surface(self.display.get_size())
self.surface.fill((0, 0, 0))
self.generateClosedPolygons() # for visualisation purposes
self.queue = []
def generateClosedPolygons(self):
if self.window_height < 128 or self.window_width < 128:
return # surface too small
from random import randint, uniform
from math import pi, sin, cos
for n in range(0, randint(0, 5)):
x = randint(50, self.window_width - 50)
y = randint(50, self.window_height - 50)
angle = 0
angle += uniform(0, 0.7)
vertices = []
for i in range(0, randint(3, 7)):
dist = randint(10, 50)
vertices.append(
(int(x + cos(angle) * dist), int(y + sin(angle) * dist))
)
angle += uniform(0, pi / 2)
for i in range(0, len(vertices) - 1):
pygame.draw.line(
self.surface, (255, 0, 0), vertices[i], vertices[i + 1]
)
pygame.draw.line(
self.surface, (255, 0, 0), vertices[len(vertices) - 1], vertices[0]
)
def run(self):
looping = True
while looping:
evsforturn = []
for ev in pygame.event.get():
if ev.type == pygame.QUIT:
looping = False
else:
evsforturn.append(ev) # TODO: Maybe extend with more events
self.update(evsforturn)
self.display.blit(self.surface, (0, 0))
pygame.display.flip()
pygame.quit()
def update(self, events):
for ev in events:
if ev.type == pygame.MOUSEBUTTONDOWN and ev.button == 1:
self.queue.append(ev.pos)
if not len(self.queue):
return
point = self.queue.pop(0)
pixArr = pygame.PixelArray(self.surface)
if pixArr[point[0], point[1]] == self.surface.map_rgb((255, 255, 255)):
return
pixArr[point[0], point[1]] = (255, 255, 255)
left = (point[0] - 1, point[1])
right = (point[0] + 1, point[1])
top = (point[0], point[1] + 1)
bottom = (point[0], point[1] - 1)
if (
self.inBounds(left)
and left not in self.queue
and pixArr[left[0], left[1]] == self.surface.map_rgb((0, 0, 0))
):
self.queue.append(left)
if (
self.inBounds(right)
and right not in self.queue
and pixArr[right[0], right[1]] == self.surface.map_rgb((0, 0, 0))
):
self.queue.append(right)
if (
self.inBounds(top)
and top not in self.queue
and pixArr[top[0], top[1]] == self.surface.map_rgb((0, 0, 0))
):
self.queue.append(top)
if (
self.inBounds(bottom)
and bottom not in self.queue
and pixArr[bottom[0], bottom[1]] == self.surface.map_rgb((0, 0, 0))
):
self.queue.append(bottom)
del pixArr
def inBounds(self, coord):
if coord[0] < 0 or coord[0] >= self.window_width:
return False
elif coord[1] < 0 or coord[1] >= self.window_height:
return False
return True
if __name__ == "__main__":
import sys
floodfill = FloodFill(sys.argv[1], sys.argv[2])
floodfill.run()
| FloodFill |
python | pandas-dev__pandas | pandas/io/pytables.py | {
"start": 89445,
"end": 90243
} | class ____(DataCol):
"""represent a data column that can be indexed"""
is_data_indexable = True
def validate_names(self) -> None:
if not is_string_dtype(Index(self.values).dtype):
# TODO: should the message here be more specifically non-str?
raise ValueError("cannot have non-object label DataIndexableCol")
@classmethod
def get_atom_string(cls, shape, itemsize):
return _tables().StringCol(itemsize=itemsize)
@classmethod
def get_atom_data(cls, shape, kind: str) -> Col:
return cls.get_atom_coltype(kind=kind)()
@classmethod
def get_atom_datetime64(cls, shape):
return _tables().Int64Col()
@classmethod
def get_atom_timedelta64(cls, shape):
return _tables().Int64Col()
| DataIndexableCol |
python | google__jax | jax/_src/export/shape_poly.py | {
"start": 2511,
"end": 2591
} | class ____(Enum):
EQ = 1
GEQ = 2
@dataclasses.dataclass(frozen=True)
| Comparator |
python | pytorch__pytorch | torch/_inductor/scheduler.py | {
"start": 51866,
"end": 66122
} | class ____(BaseSchedulerNode):
"""
A SchedulerNode is a node for scheduling that encapsulates either
a ComputedBuffer or a TemplateBuffer.
"""
_sizes: tuple[Sequence[sympy.Expr], ...]
_body: LoopBody
def __init__(
self,
scheduler: Scheduler,
node: Union[ir.ComputedBuffer, ir.TemplateBuffer],
) -> None:
super().__init__(scheduler)
self._init_from_node(node)
self._compute_attrs()
def _compute_attrs(
self,
extra_indexing_constraints: Optional[tuple[dict[Any, Any], list[Any]]] = None,
recompute_sizes_body_func: Optional[Callable[_P, _T]] = None,
) -> None:
assert isinstance(self.node, (ir.ComputedBuffer, ir.TemplateBuffer))
self._sizes, body = self.node.simplify_and_reorder(
extra_indexing_constraints=extra_indexing_constraints,
recompute_sizes_body_func=recompute_sizes_body_func,
)
self._body = body # type: ignore[assignment]
device = self.node.get_device_or_error()
group_fn = self.scheduler.get_backend(device).group_fn
self.group = (device, group_fn(self._sizes))
# Don't normalize since normalization will merge loops which
# makes it hard to decide new loop orders.
should_normalize = not config.loop_ordering_after_fusion or not is_gpu(
device.type
)
if isinstance(self.node, ir.TemplateBuffer):
self.set_read_writes(
self.node.extract_read_writes(normalize=should_normalize)
)
else:
self.set_read_writes(
dependencies.extract_read_writes(
self._body, *self._sizes, normalize=should_normalize
)
)
def recompute_size_and_body(
self,
extra_indexing_constraints: Optional[tuple[dict[Any, Any], list[Any]]] = None,
recompute_sizes_body_func: Optional[Callable[..., Any]] = None,
) -> None:
self._compute_attrs(
extra_indexing_constraints=extra_indexing_constraints,
recompute_sizes_body_func=recompute_sizes_body_func,
)
def refresh_dependencies(
self, normalize: bool, need_clear_tiling_cache: bool
) -> None:
# Fake dependencies are added manually. They can not be analyzed from
# extract_read_writes. Find them out and apply manually.
fake_deps: OrderedSet[Dep] = OrderedSet(
dep for dep in self.read_writes.reads if isinstance(dep, (WeakDep, StarDep))
)
# don't normalize since the loop order may need to be further changed
# later
self.set_read_writes(
dependencies.extract_read_writes(
self._body, *self._sizes, normalize=normalize
)
.with_read(fake_deps)
.rename(self.mutation_renames)
)
self.pointwise_read_writes.clear_cache(self)
if need_clear_tiling_cache:
from .codegen.simd import SIMDScheduling
# TODO(shunting) if this cause compilation time increase when
# enabling LOAF by default, try just clearing the specific cache
# entry by using a customized cache implementation rather than
# lru_cache.
SIMDScheduling.candidate_tilings.cache_clear()
def apply_new_loop_order(self, new_order: Sequence[int]) -> None:
self._body = self._body.reorder_iter_loops(
new_order,
)
self._sizes = self._body.sizes
self.refresh_dependencies(normalize=False, need_clear_tiling_cache=True)
def swap_pw_red_dimension(self) -> None:
num_rdims = self._body.get_original_num_rdims()
num_pwdims = len(self._body.iter_vars) - num_rdims
pwdims = tuple(range(num_pwdims))
rdims = tuple(range(num_pwdims, num_pwdims + num_rdims))
self.apply_new_loop_order(rdims + pwdims)
assert len(self.group[1]) == 2
self.group = self.group[0], (self.group[1][1], self.group[1][0])
def extract_pw_from_reduction(self) -> BaseSchedulerNode:
self._body = self._body.extract_pw_from_reduction()
return self
def cancel_reduction_split(self) -> None:
if not MixOrderReduction.is_split_reduction(self):
return
assert isinstance(self.node, ir.ComputedBuffer)
with self.node.with_original_inner_fn():
self._compute_attrs()
def expand_dimension_for_pointwise_node(
self, dimension: int, new_range: int
) -> None:
assert isinstance(self.node, (ir.ComputedBuffer, ir.TemplateBuffer))
self._body = self._body.expand_dimension_for_pointwise_node(
dimension, new_range
)
self._sizes = self._body.sizes
device = self.node.get_device_or_error()
group_fn = self.scheduler.get_backend(device).group_fn
self.group = (device, group_fn(self._sizes))
# Need normalize the prefix name to facilitate finding common dependencies
self.refresh_dependencies(normalize=True, need_clear_tiling_cache=True)
def merge_loops(self) -> None:
self._body = self._body.merge_loops()
self._sizes = self._body.sizes
# merge_loops is called after loop reordering.
# We still need retain fake dependencies since codegen the
# estimated amount of memory access rely on them.
#
# Merge loops does not affect the tiling decision. So we
# don't need clear the tiling cache.
self.refresh_dependencies(normalize=True, need_clear_tiling_cache=False)
def reorder_loops_by_dep_pair(
self, self_dep: MemoryDep, other_dep: MemoryDep
) -> bool:
new_order = None
self_sizes = self._sizes[0]
if len(self_sizes) == self_dep.num_vars == other_dep.num_vars:
new_order = self_dep.decide_loop_order_to_match(other_dep)
if new_order:
# pyrefly: ignore [bad-assignment]
metrics.num_loop_reordering += 1
loop_ordering_log.debug(
"Reorder loops for %s with order %s", self.get_name(), new_order
)
self.apply_new_loop_order(new_order)
return True
else:
loop_ordering_log.debug(
"Don't reordering %s because we can not decide the suitable loop order",
self.get_name(),
)
return False
def debug_str_extra(self) -> str:
name = self.get_name()
lines = [
f"{name}.group.device = {self.group[0]}",
f"{name}.group.iteration = {self.group[1]}",
f"{name}.sizes = {self._sizes}",
]
for dep in self.read_writes.reads_and_writes():
if not isinstance(dep, WeakDep):
buf_name = dep.name
buf = V.graph.get_buffer(buf_name)
if not isinstance(buf, ir.TorchBindObject):
lines.append(f"{buf_name}_layout = {pformat(buf.layout)}")
if isinstance(self._body, LoopBody):
lines.append(f"class {name}_loop_body:")
lines.append(textwrap.indent(self._body.debug_str(), " "))
assert self.node is not None
lines.extend(self._debug_str_for_device())
return "\n".join(lines)
def get_ranges(self) -> Sequence[Sequence[sympy.Expr]]:
return self._sizes
def is_reduction(self) -> bool:
assert isinstance(self.node, (ir.ComputedBuffer, ir.TemplateBuffer)), (
f"{type(self.node)=}"
)
# self._body containing partial accumulate means the reduction is
# converted to a pointwise node. Need this extra check since
# we change self._body but didn't change self.node (IRNode)
# when converting a reduction to a pointwise
return bool(self.node.get_reduction_type()) and (
self._body is None or not self._body.has_partial_accumulate
)
def is_native_matmul(self) -> bool:
assert isinstance(self.node, ir.ComputedBuffer), f"{type(self.node)=}"
return self.node.get_reduction_type() == "dot"
def is_split_scan(self) -> bool:
assert isinstance(self.node, (ir.ComputedBuffer, ir.TemplateBuffer)), (
f"{type(self.node)=}"
)
return isinstance(self.node, ir.ComputedBuffer) and isinstance(
self.node.data, ir.SplitScan
)
def is_template(self) -> bool:
return isinstance(self.node, ir.TemplateBuffer)
def get_template_node(self) -> Optional[ir.TemplateBuffer]:
return self.node if isinstance(self.node, ir.TemplateBuffer) else None
def run(self, *index_vars: Sequence[sympy.Expr]) -> None:
self.decide_inplace_update()
self.mark_run()
self.codegen(index_vars)
def ranges_from_index_vars(
self, index_vars: Sequence[Sequence[sympy.Expr]]
) -> dict[sympy.Expr, sympy.Expr]:
sizes = self._sizes
assert sum(map(len, sizes)) == sum(map(len, index_vars))
var_ranges = dict(
zip(
itertools.chain.from_iterable(index_vars),
itertools.chain.from_iterable(sizes),
)
)
return var_ranges
def codegen(self, index_vars: Sequence[Sequence[sympy.Expr]]) -> None:
"""
Generate code for this node using the provided index variables.
This method sets up the appropriate context for code generation, including
simplifying indexing expressions based on the variable ranges, and then
calls the node's body function with the index variables.
Args:
index_vars: A sequence of sequences of sympy expressions representing
the index variables for each dimension of the computation.
"""
var_ranges = self.ranges_from_index_vars(index_vars)
try:
with (
V.set_ops_handler(SimplifyIndexing(V.get_ops_handler(), var_ranges)),
V.kernel.set_current_node(self),
):
self._body(*index_vars)
except Exception:
log.fatal("Error in codegen for %s", self.node)
raise
def pointwise_or_reduction_read_writes(
self, pointwise: bool = True
) -> dependencies.ReadWrites:
"""
Get the memory dependencies in either the pointwise or the reduction axes.
"""
keep_sizes, ignore_sizes = self._sizes if pointwise else reversed(self._sizes)
return dependencies.extract_read_writes(
self._body, keep_sizes, hidden_args=[[sympy.S.Zero] * len(ignore_sizes)]
)
@cache_on_self
def pointwise_read_writes(self) -> dependencies.ReadWrites:
"""
Get the memory dependencies in the non-reduction axes.
"""
return self.pointwise_or_reduction_read_writes(pointwise=True)
@cache_on_self
def reduction_read_writes(self) -> dependencies.ReadWrites:
"""
Get the memory dependencies in the reduction axes.
"""
return self.pointwise_or_reduction_read_writes(pointwise=False)
def can_inplace(self, read_dep: dependencies.Dep) -> bool:
if self.is_template():
return False
if any(out.get_aliases() for out in self.get_outputs()):
return False
if len(self.read_writes.writes) == 1 and isinstance(
read_dep, dependencies.MemoryDep
):
write_dep = next(iter(self.read_writes.writes))
assert isinstance(write_dep, dependencies.MemoryDep), f"{type(write_dep)=}"
return read_dep.index == write_dep.index and read_dep.size == write_dep.size
return False
@cache_on_self
def _get_atomic_add_buffers(self) -> OrderedSet[str]:
buffers_store_as_atomic_add: OrderedSet[str] = OrderedSet()
if isinstance(self._body, LoopBody):
for node in self._body.get_nodes():
if (
node.op == "call_method"
and node.target == "store"
and (
("mode" in node.kwargs and node.kwargs["mode"] == "atomic_add")
or (len(node.args) == 5 and node.args[4] == "atomic_add")
)
):
buffers_store_as_atomic_add.add(
node.kwargs["name"]
if "name" in node.kwargs
else (node.args[1] if len(node.args) >= 2 else "")
)
return buffers_store_as_atomic_add
@cache_on_self
def has_side_effects(self) -> bool:
# self._body is None sometimes that's why this check was added
if self._body is not None and self._body.has_op("device_assert_async"):
return True
return super().has_side_effects()
def refresh_group_node_dependencies(
group_snode: Union[FusedSchedulerNode, GroupedSchedulerNode],
) -> None:
snodes = group_snode.snodes
group_snode.set_read_writes(
dependencies.ReadWrites.merge_list([x.read_writes for x in snodes])
)
group_snode.unmet_dependencies = (
OrderedSet(
dep
for dep in OrderedSet.union(*[x.unmet_dependencies for x in snodes])
if dep.name not in group_snode.get_buffer_names()
)
- group_snode.read_writes.writes
)
def init_group_node(
group_snode: Union[FusedSchedulerNode, GroupedSchedulerNode],
scheduler: Scheduler,
snodes: list[BaseSchedulerNode],
) -> None:
assert isinstance(group_snode, (FusedSchedulerNode, GroupedSchedulerNode))
group_snode.snodes = snodes
group_snode.scheduler = scheduler
group_snode.node = None
group_snode.ancestors = OrderedSet.union(
*[x.ancestors for x in snodes if x.ancestors is not None]
)
refresh_group_node_dependencies(group_snode)
group_snode.min_order = min(x.min_order for x in group_snode.snodes)
group_snode.max_order = max(x.max_order for x in group_snode.snodes)
group_snode.outputs_by_name = {
buf.get_name(): buf for buf in group_snode.get_outputs()
}
| SchedulerNode |
python | scipy__scipy | scipy/fftpack/tests/test_real_transforms.py | {
"start": 14776,
"end": 14906
} | class ____(_TestDSTIBase):
def setup_method(self):
self.rdt = int
self.dec = 5
self.type = 1
| TestDSTIInt |
python | getsentry__sentry | src/sentry/grouping/fingerprinting/utils.py | {
"start": 882,
"end": 924
} | class ____(TypedDict):
sdk: str
| _SdkInfo |
python | getsentry__sentry | tests/relay_integration/test_message_filters.py | {
"start": 448,
"end": 4355
} | class ____(RelayStoreHelper, TransactionTestCase):
def _get_message(self):
return {}
def _set_filter_state(self, flt, state):
ProjectOption.objects.set_value(project=self.project, key=f"filters:{flt.id}", value=state)
def test_should_not_filter_simple_messages(self) -> None:
# baseline test (so we know everything works as expected)
message = self._get_message()
self.post_and_retrieve_event(message)
def _get_message_with_bad_ip(self):
message = self._get_message()
set_path(message, "user", "ip_address", value="127.0.0.1")
return message
def test_should_filter_local_ip_addresses_when_enabled(self) -> None:
self._set_filter_state(_localhost_filter, "1")
message = self._get_message_with_bad_ip()
event = self.post_and_try_retrieve_event(message)
assert event is None
def test_should_not_filter_bad_ip_addresses_when_disabled(self) -> None:
self._set_filter_state(_localhost_filter, "0")
message = self._get_message_with_bad_ip()
self.post_and_retrieve_event(message)
def _get_message_with_bad_extension(self):
message = self._get_message()
set_path(message, "platform", value="javascript")
set_path(
message,
"exception",
value={"values": [{"type": "Error", "value": "http://loading.retry.widdit.com/"}]},
)
return message
def test_should_filter_browser_extensions_when_enabled(self) -> None:
self._set_filter_state(_browser_extensions_filter, "1")
message = self._get_message_with_bad_extension()
event = self.post_and_try_retrieve_event(message)
assert event is None
def test_should_not_filter_browser_extensions_when_disabled(self) -> None:
self._set_filter_state(_browser_extensions_filter, "0")
message = self._get_message_with_bad_extension()
self.post_and_retrieve_event(message)
def _get_message_from_webcrawler(self):
message = self._get_message()
set_path(
message,
"request",
value={
"url": "http://example.com",
"method": "GET",
"headers": [["User-Agent", "Mediapartners-Google"]],
},
)
return message
def test_should_filter_web_crawlers_when_enabled(self) -> None:
self._set_filter_state(_web_crawlers_filter, "1")
message = self._get_message_from_webcrawler()
event = self.post_and_try_retrieve_event(message)
assert event is None
def test_should_not_filter_web_crawlers_when_disabled(self) -> None:
self._set_filter_state(_web_crawlers_filter, "0")
message = self._get_message_from_webcrawler()
self.post_and_retrieve_event(message)
def _get_message_from_legacy_browser(self):
ie_5_user_agent = (
"Mozilla/4.0 (compatible; MSIE 5.50; Windows NT; SiteKiosk 4.9; SiteCoach 1.0)"
)
message = self._get_message()
set_path(message, "platform", value="javascript")
set_path(
message,
"request",
value={
"url": "http://example.com",
"method": "GET",
"headers": [["User-Agent", ie_5_user_agent]],
},
)
return message
def test_should_filter_legacy_browsers(self) -> None:
self._set_filter_state(_legacy_browsers_filter, "1")
message = self._get_message_from_legacy_browser()
event = self.post_and_try_retrieve_event(message)
assert event is None
def test_should_not_filter_legacy_browsers_when_disabled(self) -> None:
self._set_filter_state(_legacy_browsers_filter, "0")
message = self._get_message_from_legacy_browser()
self.post_and_retrieve_event(message)
| FilterTests |
python | django-haystack__django-haystack | test_haystack/solr_tests/test_solr_backend.py | {
"start": 28623,
"end": 28859
} | class ____(std_logging.Handler):
logs_seen = []
def emit(self, record):
CaptureHandler.logs_seen.append(record)
@patch("pysolr.Solr._send_request", side_effect=pysolr.SolrError)
@patch("logging.Logger.log")
| CaptureHandler |
python | python__mypy | mypy/stubgenc.py | {
"start": 7208,
"end": 39265
} | class ____(BaseStubGenerator):
"""Stub generator that does not parse code.
Generation is performed by inspecting the module's contents, and thus works
for highly dynamic modules, pyc files, and C modules (via the CStubGenerator
subclass).
"""
def __init__(
self,
module_name: str,
known_modules: list[str],
doc_dir: str = "",
_all_: list[str] | None = None,
include_private: bool = False,
export_less: bool = False,
include_docstrings: bool = False,
module: ModuleType | None = None,
) -> None:
self.doc_dir = doc_dir
if module is None:
self.module = importlib.import_module(module_name)
else:
self.module = module
self.is_c_module = is_c_module(self.module)
self.known_modules = known_modules
self.resort_members = self.is_c_module
super().__init__(_all_, include_private, export_less, include_docstrings)
self.module_name = module_name
if self.is_c_module:
# Add additional implicit imports.
# C-extensions are given more latitude since they do not import the typing module.
self.known_imports.update(
{
"typing": [
"Any",
"Callable",
"ClassVar",
"Dict",
"Iterable",
"Iterator",
"List",
"Literal",
"NamedTuple",
"Optional",
"Tuple",
"Union",
]
}
)
def get_default_function_sig(self, func: object, ctx: FunctionContext) -> FunctionSig:
argspec = None
if not self.is_c_module:
# Get the full argument specification of the function
try:
argspec = inspect.getfullargspec(func)
except TypeError:
# some callables cannot be inspected, e.g. functools.partial
pass
if argspec is None:
if ctx.class_info is not None:
# method:
return FunctionSig(
name=ctx.name,
args=infer_c_method_args(ctx.name, ctx.class_info.self_var),
ret_type=infer_method_ret_type(ctx.name),
)
else:
# function:
return FunctionSig(
name=ctx.name,
args=[ArgSig(name="*args"), ArgSig(name="**kwargs")],
ret_type=None,
)
# Extract the function arguments, defaults, and varargs
args = argspec.args
defaults = argspec.defaults
varargs = argspec.varargs
kwargs = argspec.varkw
annotations = argspec.annotations
kwonlyargs = argspec.kwonlyargs
kwonlydefaults = argspec.kwonlydefaults
def get_annotation(key: str) -> str | None:
if key not in annotations:
return None
argtype = annotations[key]
if argtype is None:
return "None"
if not isinstance(argtype, str):
return self.get_type_fullname(argtype)
return argtype
arglist: list[ArgSig] = []
# Add the arguments to the signature
def add_args(
args: list[str], get_default_value: Callable[[int, str], object | _Missing]
) -> None:
for i, arg in enumerate(args):
# Check if the argument has a default value
default_value = get_default_value(i, arg)
if default_value is not _Missing.VALUE:
if arg in annotations:
argtype = get_annotation(arg)
else:
argtype = self.get_type_annotation(default_value)
if argtype == "None":
# None is not a useful annotation, but we can infer that the arg
# is optional
incomplete = self.add_name("_typeshed.Incomplete")
argtype = f"{incomplete} | None"
arglist.append(ArgSig(arg, argtype, default=True))
else:
arglist.append(ArgSig(arg, get_annotation(arg), default=False))
def get_pos_default(i: int, _arg: str) -> Any | _Missing:
if defaults and i >= len(args) - len(defaults):
return defaults[i - (len(args) - len(defaults))]
else:
return _Missing.VALUE
add_args(args, get_pos_default)
# Add *args if present
if varargs:
arglist.append(ArgSig(f"*{varargs}", get_annotation(varargs)))
# if we have keyword only args, then we need to add "*"
elif kwonlyargs:
arglist.append(ArgSig("*"))
def get_kw_default(_i: int, arg: str) -> Any | _Missing:
if kwonlydefaults and arg in kwonlydefaults:
return kwonlydefaults[arg]
else:
return _Missing.VALUE
add_args(kwonlyargs, get_kw_default)
# Add **kwargs if present
if kwargs:
arglist.append(ArgSig(f"**{kwargs}", get_annotation(kwargs)))
# add types for known special methods
if ctx.class_info is not None and all(
arg.type is None and arg.default is False for arg in arglist
):
new_args = infer_method_arg_types(
ctx.name, ctx.class_info.self_var, [arg.name for arg in arglist if arg.name]
)
if new_args is not None:
arglist = new_args
ret_type = get_annotation("return") or infer_method_ret_type(ctx.name)
return FunctionSig(ctx.name, arglist, ret_type)
def get_sig_generators(self) -> list[SignatureGenerator]:
if not self.is_c_module:
return []
else:
sig_generators: list[SignatureGenerator] = [DocstringSignatureGenerator()]
if self.doc_dir:
# Collect info from docs (if given). Always check these first.
sig_generators.insert(0, ExternalSignatureGenerator.from_doc_dir(self.doc_dir))
return sig_generators
def strip_or_import(self, type_name: str) -> str:
"""Strips unnecessary module names from typ.
If typ represents a type that is inside module or is a type coming from builtins, remove
module declaration from it. Return stripped name of the type.
Arguments:
typ: name of the type
"""
local_modules = ["builtins", self.module_name]
parsed_type = parse_type_comment(type_name, 0, 0, None)[1]
assert parsed_type is not None, type_name
return self.print_annotation(parsed_type, self.known_modules, local_modules)
def get_obj_module(self, obj: object) -> str | None:
"""Return module name of the object."""
return getattr(obj, "__module__", None)
def is_defined_in_module(self, obj: object) -> bool:
"""Check if object is considered defined in the current module."""
module = self.get_obj_module(obj)
return module is None or module == self.module_name
def generate_module(self) -> None:
all_items = self.get_members(self.module)
if self.resort_members:
all_items = sorted(all_items, key=lambda x: x[0])
items = []
for name, obj in all_items:
if inspect.ismodule(obj) and obj.__name__ in self.known_modules:
module_name = obj.__name__
if module_name.startswith(self.module_name + "."):
# from {.rel_name} import {mod_name} as {name}
pkg_name, mod_name = module_name.rsplit(".", 1)
rel_module = pkg_name[len(self.module_name) :] or "."
self.import_tracker.add_import_from(rel_module, [(mod_name, name)])
self.import_tracker.reexport(name)
else:
# import {module_name} as {name}
self.import_tracker.add_import(module_name, name)
self.import_tracker.reexport(name)
elif self.is_defined_in_module(obj) and not inspect.ismodule(obj):
# process this below
items.append((name, obj))
else:
# from {obj_module} import {obj_name}
obj_module_name = self.get_obj_module(obj)
if obj_module_name:
self.import_tracker.add_import_from(obj_module_name, [(name, None)])
if self.should_reexport(name, obj_module_name, name_is_alias=False):
self.import_tracker.reexport(name)
self.set_defined_names({name for name, obj in all_items if not inspect.ismodule(obj)})
if self.resort_members:
functions: list[str] = []
types: list[str] = []
variables: list[str] = []
else:
output: list[str] = []
functions = types = variables = output
for name, obj in items:
if self.is_function(obj):
self.generate_function_stub(name, obj, output=functions)
elif inspect.isclass(obj):
self.generate_class_stub(name, obj, output=types)
else:
self.generate_variable_stub(name, obj, output=variables)
self._output = []
if self.resort_members:
for line in variables:
self._output.append(line + "\n")
for line in types:
if line.startswith("class") and self._output and self._output[-1]:
self._output.append("\n")
self._output.append(line + "\n")
if self._output and functions:
self._output.append("\n")
for line in functions:
self._output.append(line + "\n")
else:
for i, line in enumerate(output):
if (
self._output
and line.startswith("class")
and (
not self._output[-1].startswith("class")
or (len(output) > i + 1 and output[i + 1].startswith(" "))
)
) or (
self._output
and self._output[-1].startswith("def")
and not line.startswith("def")
):
self._output.append("\n")
self._output.append(line + "\n")
self.check_undefined_names()
def is_skipped_attribute(self, attr: str) -> bool:
return (
attr
in (
"__class__",
"__getattribute__",
"__str__",
"__repr__",
"__doc__",
"__dict__",
"__module__",
"__weakref__",
"__annotations__",
"__firstlineno__",
"__static_attributes__",
"__annotate__",
)
or attr in self.IGNORED_DUNDERS
or is_pybind_skipped_attribute(attr) # For pickling
or keyword.iskeyword(attr)
)
def get_members(self, obj: object) -> list[tuple[str, Any]]:
obj_dict: Mapping[str, Any] = getattr(obj, "__dict__") # noqa: B009
results = []
for name in obj_dict:
if self.is_skipped_attribute(name):
continue
# Try to get the value via getattr
try:
value = getattr(obj, name)
except AttributeError:
continue
else:
results.append((name, value))
return results
def get_type_annotation(self, obj: object) -> str:
"""
Given an instance, return a string representation of its type that is valid
to use as a type annotation.
"""
if obj is None or obj is type(None):
return "None"
elif inspect.isclass(obj):
return f"type[{self.get_type_fullname(obj)}]"
elif isinstance(obj, FunctionType):
return self.add_name("typing.Callable")
elif isinstance(obj, ModuleType):
return self.add_name("types.ModuleType", require=False)
else:
return self.get_type_fullname(type(obj))
def is_function(self, obj: object) -> bool:
if self.is_c_module:
return inspect.isbuiltin(obj)
else:
return inspect.isfunction(obj)
def is_method(self, class_info: ClassInfo, name: str, obj: object) -> bool:
if self.is_c_module:
return inspect.ismethoddescriptor(obj) or type(obj) in (
type(str.index),
type(str.__add__),
type(str.__new__),
)
else:
# this is valid because it is only called on members of a class
return inspect.isfunction(obj)
def is_classmethod(self, class_info: ClassInfo, name: str, obj: object) -> bool:
if self.is_c_module:
return inspect.isbuiltin(obj) or type(obj).__name__ in (
"classmethod",
"classmethod_descriptor",
)
else:
return inspect.ismethod(obj)
def is_staticmethod(self, class_info: ClassInfo | None, name: str, obj: object) -> bool:
if class_info is None:
return False
elif self.is_c_module:
raw_lookup: Mapping[str, Any] = getattr(class_info.cls, "__dict__") # noqa: B009
raw_value = raw_lookup.get(name, obj)
return isinstance(raw_value, staticmethod)
else:
return isinstance(inspect.getattr_static(class_info.cls, name), staticmethod)
@staticmethod
def is_abstract_method(obj: object) -> bool:
return getattr(obj, "__abstractmethod__", False)
@staticmethod
def is_property(class_info: ClassInfo, name: str, obj: object) -> bool:
return inspect.isdatadescriptor(obj) or hasattr(obj, "fget")
@staticmethod
def is_property_readonly(prop: Any) -> bool:
return hasattr(prop, "fset") and prop.fset is None
def is_static_property(self, obj: object) -> bool:
"""For c-modules, whether the property behaves like an attribute"""
if self.is_c_module:
# StaticProperty is from boost-python
return type(obj).__name__ in ("pybind11_static_property", "StaticProperty")
else:
return False
def process_inferred_sigs(self, inferred: list[FunctionSig]) -> None:
for i, sig in enumerate(inferred):
for arg in sig.args:
if arg.type is not None:
arg.type = self.strip_or_import(arg.type)
if sig.ret_type is not None:
inferred[i] = sig._replace(ret_type=self.strip_or_import(sig.ret_type))
def generate_function_stub(
self, name: str, obj: object, *, output: list[str], class_info: ClassInfo | None = None
) -> None:
"""Generate stub for a single function or method.
The result (always a single line) will be appended to 'output'.
If necessary, any required names will be added to 'imports'.
The 'class_name' is used to find signature of __init__ or __new__ in
'class_sigs'.
"""
docstring: Any = getattr(obj, "__doc__", None)
if not isinstance(docstring, str):
docstring = None
ctx = FunctionContext(
self.module_name,
name,
docstring=docstring,
is_abstract=self.is_abstract_method(obj),
class_info=class_info,
)
if self.is_private_name(name, ctx.fullname) or self.is_not_in_all(name):
return
self.record_name(ctx.name)
default_sig = self.get_default_function_sig(obj, ctx)
inferred = self.get_signatures(default_sig, self.sig_generators, ctx)
self.process_inferred_sigs(inferred)
decorators = []
if len(inferred) > 1:
decorators.append("@{}".format(self.add_name("typing.overload")))
if ctx.is_abstract:
decorators.append("@{}".format(self.add_name("abc.abstractmethod")))
if class_info is not None:
if self.is_staticmethod(class_info, name, obj):
decorators.append("@staticmethod")
else:
for sig in inferred:
if not sig.args or sig.args[0].name not in ("self", "cls"):
sig.args.insert(0, ArgSig(name=class_info.self_var))
# a sig generator indicates @classmethod by specifying the cls arg.
if inferred[0].args and inferred[0].args[0].name == "cls":
decorators.append("@classmethod")
docstring = self._indent_docstring(ctx.docstring) if ctx.docstring else None
output.extend(self.format_func_def(inferred, decorators=decorators, docstring=docstring))
self._fix_iter(ctx, inferred, output)
def _indent_docstring(self, docstring: str) -> str:
"""Fix indentation of docstring extracted from pybind11 or other binding generators."""
lines = docstring.splitlines(keepends=True)
indent = self._indent + " "
if len(lines) > 1:
if not all(line.startswith(indent) or not line.strip() for line in lines):
# if the docstring is not indented, then indent all but the first line
for i, line in enumerate(lines[1:]):
if line.strip():
lines[i + 1] = indent + line
# if there's a trailing newline, add a final line to visually indent the quoted docstring
if lines[-1].endswith("\n"):
if len(lines) > 1:
lines.append(indent)
else:
lines[-1] = lines[-1][:-1]
return "".join(lines)
def _fix_iter(
self, ctx: FunctionContext, inferred: list[FunctionSig], output: list[str]
) -> None:
"""Ensure that objects which implement old-style iteration via __getitem__
are considered iterable.
"""
if (
ctx.class_info
and ctx.class_info.cls is not None
and ctx.name == "__getitem__"
and "__iter__" not in ctx.class_info.cls.__dict__
):
item_type: str | None = None
for sig in inferred:
if sig.args and sig.args[-1].type == "int":
item_type = sig.ret_type
break
if item_type is None:
return
obj = CFunctionStub(
"__iter__", f"def __iter__(self) -> typing.Iterator[{item_type}]\n"
)
self.generate_function_stub("__iter__", obj, output=output, class_info=ctx.class_info)
def generate_property_stub(
self,
name: str,
raw_obj: object,
obj: object,
static_properties: list[str],
rw_properties: list[str],
ro_properties: list[str],
class_info: ClassInfo | None = None,
) -> None:
"""Generate property stub using introspection of 'obj'.
Try to infer type from docstring, append resulting lines to 'output'.
raw_obj : object before evaluation of descriptor (if any)
obj : object after evaluation of descriptor
"""
docstring = getattr(raw_obj, "__doc__", None)
fget = getattr(raw_obj, "fget", None)
if fget:
alt_docstr = getattr(fget, "__doc__", None)
if alt_docstr and docstring:
docstring += "\n" + alt_docstr
elif alt_docstr:
docstring = alt_docstr
ctx = FunctionContext(
self.module_name, name, docstring=docstring, is_abstract=False, class_info=class_info
)
if self.is_private_name(name, ctx.fullname) or self.is_not_in_all(name):
return
self.record_name(ctx.name)
static = self.is_static_property(raw_obj)
readonly = self.is_property_readonly(raw_obj)
if static:
ret_type: str | None = self.strip_or_import(self.get_type_annotation(obj))
else:
default_sig = self.get_default_function_sig(raw_obj, ctx)
ret_type = default_sig.ret_type
inferred_type = self.get_property_type(ret_type, self.sig_generators, ctx)
if inferred_type is not None:
inferred_type = self.strip_or_import(inferred_type)
if static:
classvar = self.add_name("typing.ClassVar")
trailing_comment = " # read-only" if readonly else ""
if inferred_type is None:
inferred_type = self.add_name("_typeshed.Incomplete")
static_properties.append(
f"{self._indent}{name}: {classvar}[{inferred_type}] = ...{trailing_comment}"
)
else: # regular property
if readonly:
docstring = self._indent_docstring(ctx.docstring) if ctx.docstring else None
ro_properties.append(f"{self._indent}@property")
sig = FunctionSig(name, [ArgSig("self")], inferred_type, docstring=docstring)
ro_properties.append(
sig.format_sig(
indent=self._indent, include_docstrings=self._include_docstrings
)
)
else:
if inferred_type is None:
inferred_type = self.add_name("_typeshed.Incomplete")
rw_properties.append(f"{self._indent}{name}: {inferred_type}")
def get_type_fullname(self, typ: type) -> str:
"""Given a type, return a string representation"""
if typ is Any:
return "Any"
typename = getattr(typ, "__qualname__", typ.__name__)
module_name = self.get_obj_module(typ)
if module_name is None:
# This should not normally happen, but some types may resist our
# introspection attempts too hard. See
# https://github.com/python/mypy/issues/19031
return "_typeshed.Incomplete"
if module_name != "builtins":
typename = f"{module_name}.{typename}"
return typename
def get_base_types(self, obj: type) -> list[str]:
all_bases = type.mro(obj)
if all_bases[-1] is object:
# TODO: Is this always object?
del all_bases[-1]
# remove pybind11_object. All classes generated by pybind11 have pybind11_object in their MRO,
# which only overrides a few functions in object type
if all_bases and all_bases[-1].__name__ == "pybind11_object":
del all_bases[-1]
# remove the class itself
all_bases = all_bases[1:]
# Remove base classes of other bases as redundant.
bases: list[type] = []
for base in all_bases:
if not any(issubclass(b, base) for b in bases):
bases.append(base)
return [self.strip_or_import(self.get_type_fullname(base)) for base in bases]
def generate_class_stub(
self, class_name: str, cls: type, output: list[str], parent_class: ClassInfo | None = None
) -> None:
"""Generate stub for a single class using runtime introspection.
The result lines will be appended to 'output'. If necessary, any
required names will be added to 'imports'.
"""
raw_lookup: Mapping[str, Any] = getattr(cls, "__dict__") # noqa: B009
items = self.get_members(cls)
if self.resort_members:
items = sorted(items, key=lambda x: method_name_sort_key(x[0]))
names = {x[0] for x in items}
methods: list[str] = []
types: list[str] = []
static_properties: list[str] = []
rw_properties: list[str] = []
ro_properties: list[str] = []
attrs: list[tuple[str, Any]] = []
self.record_name(class_name)
self.indent()
class_info = ClassInfo(
class_name, "", getattr(cls, "__doc__", None), cls, parent=parent_class
)
for attr, value in items:
# use unevaluated descriptors when dealing with property inspection
raw_value = raw_lookup.get(attr, value)
if self.is_method(class_info, attr, value) or self.is_classmethod(
class_info, attr, value
):
if attr == "__new__":
# TODO: We should support __new__.
if "__init__" in names:
# Avoid duplicate functions if both are present.
# But is there any case where .__new__() has a
# better signature than __init__() ?
continue
attr = "__init__"
# FIXME: make this nicer
if self.is_staticmethod(class_info, attr, value):
class_info.self_var = ""
elif self.is_classmethod(class_info, attr, value):
class_info.self_var = "cls"
else:
class_info.self_var = "self"
self.generate_function_stub(attr, value, output=methods, class_info=class_info)
elif self.is_property(class_info, attr, raw_value):
self.generate_property_stub(
attr,
raw_value,
value,
static_properties,
rw_properties,
ro_properties,
class_info,
)
elif inspect.isclass(value) and self.is_defined_in_module(value):
self.generate_class_stub(attr, value, types, parent_class=class_info)
else:
attrs.append((attr, value))
for attr, value in attrs:
if attr == "__hash__" and value is None:
# special case for __hash__
continue
prop_type_name = self.strip_or_import(self.get_type_annotation(value))
classvar = self.add_name("typing.ClassVar")
static_properties.append(f"{self._indent}{attr}: {classvar}[{prop_type_name}] = ...")
self.dedent()
bases = self.get_base_types(cls)
if bases:
bases_str = "(%s)" % ", ".join(bases)
else:
bases_str = ""
if class_info.docstring and self._include_docstrings:
doc = quote_docstring(self._indent_docstring(class_info.docstring))
doc = f" {self._indent}{doc}"
docstring = doc.splitlines(keepends=False)
else:
docstring = []
if docstring or types or static_properties or rw_properties or methods or ro_properties:
output.append(f"{self._indent}class {class_name}{bases_str}:")
output.extend(docstring)
for line in types:
if (
output
and output[-1]
and not output[-1].strip().startswith("class")
and line.strip().startswith("class")
):
output.append("")
output.append(line)
output.extend(static_properties)
output.extend(rw_properties)
output.extend(methods)
output.extend(ro_properties)
else:
output.append(f"{self._indent}class {class_name}{bases_str}: ...")
def generate_variable_stub(self, name: str, obj: object, output: list[str]) -> None:
"""Generate stub for a single variable using runtime introspection.
The result lines will be appended to 'output'. If necessary, any
required names will be added to 'imports'.
"""
if self.is_private_name(name, f"{self.module_name}.{name}") or self.is_not_in_all(name):
return
self.record_name(name)
type_str = self.strip_or_import(self.get_type_annotation(obj))
output.append(f"{name}: {type_str}")
def method_name_sort_key(name: str) -> tuple[int, str]:
"""Sort methods in classes in a typical order.
I.e.: constructor, normal methods, special methods.
"""
if name in ("__new__", "__init__"):
return 0, name
if name.startswith("__") and name.endswith("__"):
return 2, name
return 1, name
def is_pybind_skipped_attribute(attr: str) -> bool:
return attr.startswith("__pybind11_module_local_")
def infer_c_method_args(
name: str, self_var: str = "self", arg_names: list[str] | None = None
) -> list[ArgSig]:
args: list[ArgSig] | None = None
if name.startswith("__") and name.endswith("__"):
name = name[2:-2]
if name in (
"hash",
"iter",
"next",
"sizeof",
"copy",
"deepcopy",
"reduce",
"getinitargs",
"int",
"float",
"trunc",
"complex",
"bool",
"abs",
"bytes",
"dir",
"len",
"reversed",
"round",
"index",
"enter",
):
args = []
elif name == "getitem":
args = [ArgSig(name="index")]
elif name == "setitem":
args = [ArgSig(name="index"), ArgSig(name="object")]
elif name in ("delattr", "getattr"):
args = [ArgSig(name="name")]
elif name == "setattr":
args = [ArgSig(name="name"), ArgSig(name="value")]
elif name == "getstate":
args = []
elif name == "setstate":
args = [ArgSig(name="state")]
elif name in ("eq", "ne", "lt", "le", "gt", "ge"):
args = [ArgSig(name="other", type="object")]
elif name in (
"add",
"radd",
"sub",
"rsub",
"mul",
"rmul",
"mod",
"rmod",
"floordiv",
"rfloordiv",
"truediv",
"rtruediv",
"divmod",
"rdivmod",
"pow",
"rpow",
"xor",
"rxor",
"or",
"ror",
"and",
"rand",
"lshift",
"rlshift",
"rshift",
"rrshift",
"contains",
"delitem",
"iadd",
"iand",
"ifloordiv",
"ilshift",
"imod",
"imul",
"ior",
"ipow",
"irshift",
"isub",
"itruediv",
"ixor",
):
args = [ArgSig(name="other")]
elif name in ("neg", "pos", "invert"):
args = []
elif name == "get":
args = [ArgSig(name="instance"), ArgSig(name="owner")]
elif name == "set":
args = [ArgSig(name="instance"), ArgSig(name="value")]
elif name == "reduce_ex":
args = [ArgSig(name="protocol")]
elif name == "exit":
args = [
ArgSig(name="type", type="type[BaseException] | None"),
ArgSig(name="value", type="BaseException | None"),
ArgSig(name="traceback", type="types.TracebackType | None"),
]
if args is None:
args = infer_method_arg_types(name, self_var, arg_names)
else:
args = [ArgSig(name=self_var)] + args
if args is None:
args = [ArgSig(name="*args"), ArgSig(name="**kwargs")]
return args
| InspectionStubGenerator |
python | sympy__sympy | sympy/integrals/manualintegrate.py | {
"start": 18071,
"end": 18233
} | class ____(IRule):
def eval(self) -> Expr:
a, b, x = self.a, self.b, self.variable
return sinh(b)*Chi(a*x) + cosh(b)*Shi(a*x)
@dataclass
| ShiRule |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 117605,
"end": 117761
} | class ____:
xlSubscribeToPicture = -4147 # from enum XlSubscribeToFormat
xlSubscribeToText = -4158 # from enum XlSubscribeToFormat
| SubscribeToFormat |
python | keon__algorithms | algorithms/set/randomized_set.py | {
"start": 407,
"end": 1704
} | class ____():
"""
idea: shoot
"""
def __init__(self):
self.elements = []
self.index_map = {} # element -> index
def insert(self, new_one):
if new_one in self.index_map:
return
self.index_map[new_one] = len(self.elements)
self.elements.append(new_one)
def remove(self, old_one):
if not old_one in self.index_map:
return
index = self.index_map[old_one]
last = self.elements.pop()
self.index_map.pop(old_one)
if index == len(self.elements):
return
self.elements[index] = last
self.index_map[last] = index
def random_element(self):
return random.choice(self.elements)
def __test():
rset = RandomizedSet()
ground_truth = set()
n = 64
for i in range(n):
rset.insert(i)
ground_truth.add(i)
# Remove a half
for i in random.sample(range(n), n // 2):
rset.remove(i)
ground_truth.remove(i)
print(len(ground_truth), len(rset.elements), len(rset.index_map))
for i in ground_truth:
assert(i == rset.elements[rset.index_map[i]])
for i in range(n):
print(rset.random_element(), end=' ')
print()
if __name__ == "__main__":
__test()
| RandomizedSet |
python | catalyst-team__catalyst | examples/reinforcement_learning/ddpg.py | {
"start": 566,
"end": 1539
} | class ____:
def __init__(self, capacity: int):
self.buffer = deque(maxlen=capacity)
def append(self, transition: Transition):
self.buffer.append(transition)
def sample(self, size: int) -> Sequence[np.array]:
indices = np.random.choice(
len(self.buffer), size, replace=size > len(self.buffer)
)
states, actions, rewards, dones, next_states = zip(
*[self.buffer[idx] for idx in indices]
)
states = np.array(states, dtype=np.float32)
actions = np.array(actions, dtype=np.int64)
rewards = np.array(rewards, dtype=np.float32)
dones = np.array(dones, dtype=np.bool)
next_states = np.array(next_states, dtype=np.float32)
return states, actions, rewards, dones, next_states
def __len__(self) -> int:
return len(self.buffer)
# as far as RL does not have some predefined dataset,
# we need to specify epoch length by ourselfs
| ReplayBuffer |
python | ipython__ipython | tests/test_debugger.py | {
"start": 707,
"end": 1178
} | class ____(object):
"""
A fake input stream for pdb's interactive debugger. Whenever a
line is read, print it (to simulate the user typing it), and then
return it. The set of lines to return is specified in the
constructor; they should not have trailing newlines.
"""
def __init__(self, lines):
self.lines = iter(lines)
def readline(self):
line = next(self.lines)
print(line)
return line + "\n"
| _FakeInput |
python | django__django | tests/test_client_regress/tests.py | {
"start": 53150,
"end": 54053
} | class ____(SimpleTestCase):
def test_client_headers(self):
"A test client can receive custom headers"
response = self.client.get(
"/check_headers/", headers={"x-arg-check": "Testing 123"}
)
self.assertEqual(response.content, b"HTTP_X_ARG_CHECK: Testing 123")
self.assertEqual(response.status_code, 200)
def test_client_headers_redirect(self):
"Test client headers are preserved through redirects"
response = self.client.get(
"/check_headers_redirect/",
follow=True,
headers={"x-arg-check": "Testing 123"},
)
self.assertEqual(response.content, b"HTTP_X_ARG_CHECK: Testing 123")
self.assertRedirects(
response, "/check_headers/", status_code=302, target_status_code=200
)
@override_settings(ROOT_URLCONF="test_client_regress.urls")
| RequestHeadersTest |
python | getsentry__sentry | tests/sentry/db/models/fields/test_jsonfield.py | {
"start": 357,
"end": 556
} | class ____(models.Model):
id = models.AutoField(primary_key=True)
json = JSONField(default={"sukasuka": "YAAAAAZ"})
class Meta:
app_label = "fixtures"
| JSONFieldWithDefaultTestModel |
python | kamyu104__LeetCode-Solutions | Python/maximum-score-of-a-node-sequence.py | {
"start": 62,
"end": 907
} | class ____(object):
def maximumScore(self, scores, edges):
"""
:type scores: List[int]
:type edges: List[List[int]]
:rtype: int
"""
def find_top3(scores, x, top3):
heapq.heappush(top3, (scores[x], x))
if len(top3) > 3:
heapq.heappop(top3)
top3 = [[] for _ in xrange(len(scores))]
for a, b in edges:
find_top3(scores, b, top3[a])
find_top3(scores, a, top3[b])
result = -1
for a, b in edges:
for _, c in top3[a]:
if c == b:
continue
for _, d in top3[b]:
if d == a or d == c:
continue
result = max(result, sum(scores[x] for x in (a, b, c, d)))
return result
| Solution |
python | cython__cython | Cython/Debugger/libcython.py | {
"start": 16673,
"end": 18656
} | class ____:
def __init__(self, filename, lexer, formatter=None):
self.filename = filename
self.lexer = lexer
self.formatter = formatter
def valid(self):
return self.filename is not None
def lex(self, code):
if pygments and self.lexer and parameters.colorize_code:
bg = parameters.terminal_background.value
if self.formatter is None:
formatter = pygments.formatters.TerminalFormatter(bg=bg)
else:
formatter = self.formatter
return pygments.highlight(code, self.lexer, formatter)
return code
def _get_source(self, start, stop, lex_source, mark_line, lex_entire):
with open(self.filename) as f:
# to provide "correct" colouring, the entire code needs to be
# lexed. However, this makes a lot of things terribly slow, so
# we decide not to. Besides, it's unlikely to matter.
if lex_source and lex_entire:
f = self.lex(f.read()).splitlines()
slice = itertools.islice(f, start - 1, stop - 1)
for idx, line in enumerate(slice):
if start + idx == mark_line:
prefix = '>'
else:
prefix = ' '
if lex_source and not lex_entire:
line = self.lex(line)
yield '%s %4d %s' % (prefix, start + idx, line.rstrip())
def get_source(self, start, stop=None, lex_source=True, mark_line=0,
lex_entire=False):
exc = gdb.GdbError('Unable to retrieve source code')
if not self.filename:
raise exc
start = max(start, 1)
if stop is None:
stop = start + 1
try:
return '\n'.join(
self._get_source(start, stop, lex_source, mark_line, lex_entire))
except OSError:
raise exc
# Errors
| SourceFileDescriptor |
python | google__jax | tests/pallas/tpu_pallas_async_test.py | {
"start": 25086,
"end": 31597
} | class ____(parameterized.TestCase):
def setUp(self):
super().setUp()
if not jtu.is_device_tpu_at_least(4):
self.skipTest('DMAs only guaranteed to work on TPU v4+')
if jax.device_count() < 2:
self.skipTest('Test only works with >2 devices')
def test_basic_remote_copy(self):
mesh = jax.make_mesh(
(jax.device_count(),),
('x',),
axis_types=(jax.sharding.AxisType.Auto,),
)
@jax.jit
@partial(
shard_map.shard_map, mesh=mesh, in_specs=(P('x'),), out_specs=P('x'),
check_vma=False,
)
def f(x):
copy_start, send_done, recv_done = make_async_remote_copy('x')
x, fut = copy_start(x)
x = send_done(x, fut)
y = recv_done(x, fut)
return y
x = jax.random.normal(
jax.random.key(0), (jax.device_count(), 8, 128), dtype=jnp.float32
)
y = f(x)
expected = jnp.roll(x, shift=1, axis=0)
np.testing.assert_array_equal(y, expected)
def test_multi_remote_copy(self):
mesh = jax.make_mesh(
(jax.device_count(),),
('x',),
axis_types=(jax.sharding.AxisType.Auto,),
)
@jax.jit
@partial(
shard_map.shard_map, mesh=mesh, in_specs=(P('x'),), out_specs=P('x'),
check_vma=False,
)
def f(x):
copy_start, send_done, recv_done = make_async_remote_copy(
'x', direction='right'
)
copy_start2, send_done2, recv_done2 = make_async_remote_copy(
'x', direction='left'
)
x, fut = copy_start(x)
x, fut2 = copy_start2(x)
x = send_done(x, fut)
x = send_done2(x, fut2)
y = recv_done(x, fut)
y2 = recv_done2(x, fut2)
return y, y2
x = jax.random.normal(
jax.random.key(0), (jax.device_count(), 8, 128), dtype=jnp.float32
)
y, y2 = f(x)
y_expected = jnp.roll(x, shift=1, axis=0)
y2_expected = jnp.roll(x, shift=-1, axis=0)
np.testing.assert_array_equal(y, y_expected)
np.testing.assert_array_equal(y2, y2_expected)
def test_basic_collective_permute_loop(self):
mesh = jax.make_mesh(
(jax.device_count(),),
('x',),
axis_types=(jax.sharding.AxisType.Auto,),
)
@jax.jit
@partial(
shard_map.shard_map, mesh=mesh, in_specs=(P('x'),), out_specs=P('x'),
check_vma=False,
)
def f(x):
copy_start, send_done, recv_done = make_async_remote_copy('x')
def body(_, x):
x, fut = copy_start(x)
x = send_done(x, fut)
y = recv_done(x, fut)
return y
# Send all the way around except for one step
return jax.lax.fori_loop(0, jax.device_count() - 1, body, x)
x = jax.random.normal(
jax.random.key(0), (jax.device_count(), 8, 128), dtype=jnp.float32
)
y = f(x)
expected = jnp.roll(x, shift=-1, axis=0)
np.testing.assert_array_equal(y, expected)
def test_staggered_collective_permute_loop(self):
mesh = jax.make_mesh(
(jax.device_count(),),
('x',),
axis_types=(jax.sharding.AxisType.Auto,),
)
@jax.jit
@partial(
shard_map.shard_map, mesh=mesh, in_specs=(P('x'),), out_specs=P('x'),
check_vma=False,
)
def f(x):
assert x.shape[0] == 1
copy_start, send_done, recv_done = make_async_remote_copy('x')
x, fut = copy_start(x)
def body(_, carry):
x, fut = carry
x = send_done(x, fut)
y = recv_done(x, fut)
y, fut = copy_start(y)
return y, fut
# Send all the way around except for one step
x, fut = jax.lax.fori_loop(0, jax.device_count() - 2, body, (x, fut),
unroll=2)
x = send_done(x, fut)
y = recv_done(x, fut)
return y
n_devices = jax.device_count()
x = jax.random.normal(
jax.random.key(0), (n_devices, 8, 128), dtype=jnp.float32
)
y = f(x)
expected = jnp.roll(x, shift=-1, axis=0)
np.testing.assert_array_equal(y, expected)
def test_bidi_collective_permute_loop(self):
mesh = jax.make_mesh(
(jax.device_count(),),
('x',),
axis_types=(jax.sharding.AxisType.Auto,),
)
@jax.jit
@partial(
shard_map.shard_map, mesh=mesh, in_specs=(P('x'),), out_specs=P('x'),
check_vma=False,
)
def f(x):
assert x.shape[0] == 1
x = x[0]
copy_start, send_done, recv_done = make_bidi_collective_permute('x')
def body(_, x):
x, fut = copy_start(x)
x = send_done(x, fut)
y = recv_done(x, fut)
return y
# Send all the way around except for one step
y = jax.lax.fori_loop(0, jax.device_count() - 1, body, x)
return y[None]
x = jax.random.normal(
jax.random.key(0), (jax.device_count(), 16, 128), dtype=jnp.float32
)
y = f(x)
expected = jnp.concatenate([
jnp.roll(x[:, :8], axis=0, shift=-1),
jnp.roll(x[:, 8:], axis=0, shift=1),
], axis=1)
np.testing.assert_array_equal(y, expected)
def make_stateful_async_copy():
@jax.named_call
def copy_start(x_ref, o_ref) -> Future:
def copy_start_kernel(sem):
pltpu.make_async_copy(x_ref, o_ref, sem).start()
sem = pl.pallas_call(
copy_start_kernel,
out_shape=pltpu.SemaphoreType.DMA(()),
out_specs=pl.BlockSpec(memory_space=pltpu.SEMAPHORE),
)()
return sem
@jax.named_call
def copy_done(x_ref, o_ref, future):
sem = future
def copy_done_kernel(sem):
pltpu.make_async_copy(x_ref, o_ref, sem).wait()
() = pl.pallas_call(
copy_done_kernel,
out_shape=(),
in_specs=[
pl.BlockSpec(memory_space=pltpu.SEMAPHORE),
],
)(sem)
return copy_start, copy_done
def make_stateful_async_slice(i: int):
@jax.named_call
def copy_start(x_ref, o_ref) -> Future:
def copy_start_kernel(sem):
pltpu.make_async_copy(x_ref.at[i], o_ref, sem).start()
sem = pl.pallas_call(
copy_start_kernel,
out_shape=pltpu.SemaphoreType.DMA(()),
out_specs=pl.BlockSpec(memory_space=pltpu.SEMAPHORE),
)()
return sem
@jax.named_call
def copy_done(x_ref, o_ref, future):
sem = future
def copy_done_kernel(sem):
pltpu.make_async_copy(x_ref.at[i], o_ref, sem).wait()
() = pl.pallas_call(
copy_done_kernel,
out_shape=(),
in_specs=[
pl.BlockSpec(memory_space=pltpu.SEMAPHORE),
],
)(sem)
return copy_start, copy_done
| PallasCallRemoteAsyncCopyTest |
python | pydantic__pydantic | pydantic-core/python/pydantic_core/core_schema.py | {
"start": 42018,
"end": 44135
} | class ____(TypedDict, total=False):
type: Required[Literal['timedelta']]
strict: bool
le: timedelta
ge: timedelta
lt: timedelta
gt: timedelta
microseconds_precision: Literal['truncate', 'error']
ref: str
metadata: dict[str, Any]
serialization: SerSchema
def timedelta_schema(
*,
strict: bool | None = None,
le: timedelta | None = None,
ge: timedelta | None = None,
lt: timedelta | None = None,
gt: timedelta | None = None,
microseconds_precision: Literal['truncate', 'error'] = 'truncate',
ref: str | None = None,
metadata: dict[str, Any] | None = None,
serialization: SerSchema | None = None,
) -> TimedeltaSchema:
"""
Returns a schema that matches a timedelta value, e.g.:
```py
from datetime import timedelta
from pydantic_core import SchemaValidator, core_schema
schema = core_schema.timedelta_schema(le=timedelta(days=1), ge=timedelta(days=0))
v = SchemaValidator(schema)
assert v.validate_python(timedelta(hours=12)) == timedelta(hours=12)
```
Args:
strict: Whether the value should be a timedelta or a value that can be converted to a timedelta
le: The value must be less than or equal to this timedelta
ge: The value must be greater than or equal to this timedelta
lt: The value must be strictly less than this timedelta
gt: The value must be strictly greater than this timedelta
microseconds_precision: The behavior when seconds have more than 6 digits or microseconds is too large
ref: optional unique identifier of the schema, used to reference the schema in other places
metadata: Any other information you want to include with the schema, not used by pydantic-core
serialization: Custom serialization schema
"""
return _dict_not_none(
type='timedelta',
strict=strict,
le=le,
ge=ge,
lt=lt,
gt=gt,
microseconds_precision=microseconds_precision,
ref=ref,
metadata=metadata,
serialization=serialization,
)
| TimedeltaSchema |
python | PrefectHQ__prefect | tests/concurrency/test_concurrency_slot_acquisition_with_lease_service.py | {
"start": 474,
"end": 10508
} | class ____:
"""Wrapper to make mocked client work with async context manager."""
def __init__(self, client: PrefectClient):
self.client = client
async def __aenter__(self) -> PrefectClient:
return self.client
async def __aexit__(self, *args: Any) -> None:
pass
@pytest.fixture
async def mocked_client(test_database_connection_url: str) -> Any:
"""Fixture providing a mocked client with increment_concurrency_slots_with_lease patched."""
async with get_client() as client:
with mock.patch.object(
client, "increment_concurrency_slots_with_lease", autospec=True
):
wrapped_client = ClientWrapper(client)
with mock.patch(
"prefect.concurrency.services.get_client", lambda: wrapped_client
):
yield wrapped_client
async def test_returns_successful_response(mocked_client: Any) -> None:
"""Test that the service returns a successful response with lease information."""
lease_id = uuid4()
response_data = {
"lease_id": str(lease_id),
"limits": [{"id": str(uuid4()), "name": "test-limit", "limit": 10}],
}
response = Response(200, json=response_data)
mocked_method = mocked_client.client.increment_concurrency_slots_with_lease
mocked_method.return_value = response
expected_names = sorted(["tag:test"])
expected_slots = 1
expected_mode = "concurrency"
expected_lease_duration = 60.0
expected_holder = ConcurrencyLeaseHolder(type="task_run", id=uuid4())
service = ConcurrencySlotAcquisitionWithLeaseService.instance(
frozenset(expected_names)
)
future: Future[Response] = service.send(
(
expected_slots,
expected_mode,
None, # timeout_seconds
None, # max_retries
expected_lease_duration,
False, # strict
expected_holder,
)
)
await service.drain()
returned_response = await asyncio.wrap_future(future)
assert returned_response == response
mocked_method.assert_called_once_with(
names=expected_names,
slots=expected_slots,
mode=expected_mode,
lease_duration=expected_lease_duration,
holder=expected_holder,
)
async def test_retries_failed_call_respects_retry_after_header(
mocked_client: Any,
) -> None:
"""Test that the service respects Retry-After headers on 423 responses."""
lease_id = uuid4()
responses = [
HTTPStatusError(
"Limit is locked",
request=Request("post", "/v2/concurrency_limits/increment-with-lease"),
response=Response(423, headers={"Retry-After": "10"}),
),
Response(
200,
json={
"lease_id": str(lease_id),
"limits": [{"id": str(uuid4()), "name": "tag:test", "limit": 10}],
},
),
]
mocked_client.client.increment_concurrency_slots_with_lease.side_effect = responses
limit_names = sorted(["tag:test"])
service = ConcurrencySlotAcquisitionWithLeaseService.instance(
frozenset(limit_names)
)
with mock.patch("asyncio.sleep") as sleep:
future: Future[Response] = service.send(
(
1, # slots
"concurrency", # mode
None, # timeout_seconds
None, # max_retries
60.0, # lease_duration
False, # strict
None, # holder
)
)
await service.drain()
returned_response = await asyncio.wrap_future(future)
assert returned_response == responses[1]
# Verify sleep was called with the Retry-After value
sleep.assert_called_once_with(
float(responses[0].response.headers["Retry-After"])
)
assert (
mocked_client.client.increment_concurrency_slots_with_lease.call_count == 2
)
async def test_failed_call_status_code_not_retryable_returns_exception(
mocked_client: Any,
) -> None:
"""Test that non-423 errors are not retried and are returned as exceptions."""
response = HTTPStatusError(
"Internal server error",
request=Request("post", "/v2/concurrency_limits/increment-with-lease"),
response=Response(500, headers={"Retry-After": "2"}),
)
mocked_client.client.increment_concurrency_slots_with_lease.side_effect = response
limit_names = sorted(["tag:test"])
service = ConcurrencySlotAcquisitionWithLeaseService.instance(
frozenset(limit_names)
)
future: Future[Response] = service.send(
(1, "concurrency", None, None, 60.0, False, None)
)
await service.drain()
with pytest.raises(HTTPStatusError) as exc_info:
await asyncio.wrap_future(future)
assert exc_info.value == response
async def test_max_retries_honored(mocked_client: Any) -> None:
"""Test that max_retries limit is respected and acquisition stops after exhausting retries."""
responses = [
HTTPStatusError(
"Limit is locked",
request=Request("post", "/v2/concurrency_limits/increment-with-lease"),
response=Response(423, headers={"Retry-After": "1"}),
)
] * 5 # More 423s than max_retries
mocked_client.client.increment_concurrency_slots_with_lease.side_effect = responses
limit_names = sorted(["tag:test"])
service = ConcurrencySlotAcquisitionWithLeaseService.instance(
frozenset(limit_names)
)
with mock.patch("asyncio.sleep"):
future: Future[Response] = service.send(
(
1, # slots
"concurrency", # mode
None, # timeout_seconds
2, # max_retries - only allow 2 retries
60.0, # lease_duration
False, # strict
None, # holder
)
)
await service.drain()
# Should get an exception after max_retries is exhausted
with pytest.raises(HTTPStatusError):
await asyncio.wrap_future(future)
# Should have called increment 3 times (initial + 2 retries)
assert (
mocked_client.client.increment_concurrency_slots_with_lease.call_count == 3
)
async def test_basic_exception_returns_exception(mocked_client: Any) -> None:
"""Test that basic exceptions are propagated correctly."""
exc = Exception("Something went wrong")
mocked_client.client.increment_concurrency_slots_with_lease.side_effect = exc
limit_names = sorted(["tag:test"])
service = ConcurrencySlotAcquisitionWithLeaseService.instance(
frozenset(limit_names)
)
future: Future[Response] = service.send(
(1, "concurrency", None, None, 60.0, False, None)
)
await service.drain()
with pytest.raises(Exception) as exc_info:
await asyncio.wrap_future(future)
assert exc_info.value == exc
async def test_singleton_per_limit_names(mocked_client: Any) -> None:
"""Test that the service is a singleton per unique set of limit names."""
names_a = frozenset(["tag:test-a"])
names_b = frozenset(["tag:test-b"])
names_a_duplicate = frozenset(["tag:test-a"])
service_a1 = ConcurrencySlotAcquisitionWithLeaseService.instance(names_a)
service_a2 = ConcurrencySlotAcquisitionWithLeaseService.instance(names_a_duplicate)
service_b = ConcurrencySlotAcquisitionWithLeaseService.instance(names_b)
# Same limit names should return the same instance
assert service_a1 is service_a2
# Different limit names should return different instances
assert service_a1 is not service_b
assert service_a2 is not service_b
async def test_serialization_behavior(mocked_client: Any) -> None:
"""Test that multiple concurrent acquisitions are serialized through the service.
This is the key test that validates the fix for the thundering herd issue.
When multiple tasks try to acquire slots simultaneously, the service ensures
they are processed one at a time rather than all hitting the server at once.
"""
call_order: list[dict[str, int | None]] = []
async def mock_increment(*args: Any, **kwargs: Any) -> Response:
# Record when this call starts
call_index = len(call_order)
call_order.append({"start": call_index, "end": None})
# Simulate some processing time
await asyncio.sleep(0.01)
# Record when this call ends
call_order[-1]["end"] = len([c for c in call_order if c["end"] is not None])
return Response(
200,
json={
"lease_id": str(uuid4()),
"limits": [{"id": str(uuid4()), "name": "tag:test", "limit": 10}],
},
)
mocked_client.client.increment_concurrency_slots_with_lease.side_effect = (
mock_increment
)
limit_names = frozenset(["tag:test"])
service = ConcurrencySlotAcquisitionWithLeaseService.instance(limit_names)
# Send 10 concurrent acquisition requests
futures: list[Future[Response]] = []
for i in range(10):
future = service.send((1, "concurrency", None, None, 60.0, False, None))
futures.append(future)
# Wait for all acquisitions to complete
await service.drain()
responses = await asyncio.gather(*[asyncio.wrap_future(f) for f in futures])
# Verify all succeeded
assert len(responses) == 10
assert all(r.status_code == 200 for r in responses)
# Verify they were processed serially (no overlapping execution)
# Each call should complete before the next one starts
for i in range(len(call_order) - 1):
# Current call's end index should be <= next call's start index
# This proves serialization
assert call_order[i]["end"] is not None
assert call_order[i]["end"] <= len(
[c for c in call_order[: i + 2] if c["end"] is not None]
)
| ClientWrapper |
python | anthropics__anthropic-sdk-python | src/anthropic/_client.py | {
"start": 1261,
"end": 10124
} | class ____(SyncAPIClient):
# client options
api_key: str | None
auth_token: str | None
# constants
HUMAN_PROMPT = _constants.HUMAN_PROMPT
AI_PROMPT = _constants.AI_PROMPT
def __init__(
self,
*,
api_key: str | None = None,
auth_token: str | None = None,
base_url: str | httpx.URL | None = None,
timeout: float | Timeout | None | NotGiven = not_given,
max_retries: int = DEFAULT_MAX_RETRIES,
default_headers: Mapping[str, str] | None = None,
default_query: Mapping[str, object] | None = None,
# Configure a custom httpx client.
# We provide a `DefaultHttpxClient` class that you can pass to retain the default values we use for `limits`, `timeout` & `follow_redirects`.
# See the [httpx documentation](https://www.python-httpx.org/api/#client) for more details.
http_client: httpx.Client | None = None,
# Enable or disable schema validation for data returned by the API.
# When enabled an error APIResponseValidationError is raised
# if the API responds with invalid data for the expected schema.
#
# This parameter may be removed or changed in the future.
# If you rely on this feature, please open a GitHub issue
# outlining your use-case to help us decide if it should be
# part of our public interface in the future.
_strict_response_validation: bool = False,
) -> None:
"""Construct a new synchronous Anthropic client instance.
This automatically infers the following arguments from their corresponding environment variables if they are not provided:
- `api_key` from `ANTHROPIC_API_KEY`
- `auth_token` from `ANTHROPIC_AUTH_TOKEN`
"""
if api_key is None:
api_key = os.environ.get("ANTHROPIC_API_KEY")
self.api_key = api_key
if auth_token is None:
auth_token = os.environ.get("ANTHROPIC_AUTH_TOKEN")
self.auth_token = auth_token
if base_url is None:
base_url = os.environ.get("ANTHROPIC_BASE_URL")
if base_url is None:
base_url = f"https://api.anthropic.com"
super().__init__(
version=__version__,
base_url=base_url,
max_retries=max_retries,
timeout=timeout,
http_client=http_client,
custom_headers=default_headers,
custom_query=default_query,
_strict_response_validation=_strict_response_validation,
)
self._default_stream_cls = Stream
@cached_property
def completions(self) -> Completions:
from .resources.completions import Completions
return Completions(self)
@cached_property
def messages(self) -> Messages:
from .resources.messages import Messages
return Messages(self)
@cached_property
def models(self) -> Models:
from .resources.models import Models
return Models(self)
@cached_property
def beta(self) -> Beta:
from .resources.beta import Beta
return Beta(self)
@cached_property
def with_raw_response(self) -> AnthropicWithRawResponse:
return AnthropicWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AnthropicWithStreamedResponse:
return AnthropicWithStreamedResponse(self)
@property
@override
def qs(self) -> Querystring:
return Querystring(array_format="comma")
@property
@override
def auth_headers(self) -> dict[str, str]:
return {**self._api_key_auth, **self._bearer_auth}
@property
def _api_key_auth(self) -> dict[str, str]:
api_key = self.api_key
if api_key is None:
return {}
return {"X-Api-Key": api_key}
@property
def _bearer_auth(self) -> dict[str, str]:
auth_token = self.auth_token
if auth_token is None:
return {}
return {"Authorization": f"Bearer {auth_token}"}
@property
@override
def default_headers(self) -> dict[str, str | Omit]:
return {
**super().default_headers,
"X-Stainless-Async": "false",
"anthropic-version": "2023-06-01",
**self._custom_headers,
}
@override
def _validate_headers(self, headers: Headers, custom_headers: Headers) -> None:
if headers.get("Authorization") or headers.get("X-Api-Key"):
# valid
return
if self.api_key and headers.get("X-Api-Key"):
return
if isinstance(custom_headers.get("X-Api-Key"), Omit):
return
if self.auth_token and headers.get("Authorization"):
return
if isinstance(custom_headers.get("Authorization"), Omit):
return
raise TypeError(
'"Could not resolve authentication method. Expected either api_key or auth_token to be set. Or for one of the `X-Api-Key` or `Authorization` headers to be explicitly omitted"'
)
def copy(
self,
*,
api_key: str | None = None,
auth_token: str | None = None,
base_url: str | httpx.URL | None = None,
timeout: float | Timeout | None | NotGiven = not_given,
http_client: httpx.Client | None = None,
max_retries: int | NotGiven = not_given,
default_headers: Mapping[str, str] | None = None,
set_default_headers: Mapping[str, str] | None = None,
default_query: Mapping[str, object] | None = None,
set_default_query: Mapping[str, object] | None = None,
_extra_kwargs: Mapping[str, Any] = {},
) -> Self:
"""
Create a new client instance re-using the same options given to the current client with optional overriding.
"""
if default_headers is not None and set_default_headers is not None:
raise ValueError("The `default_headers` and `set_default_headers` arguments are mutually exclusive")
if default_query is not None and set_default_query is not None:
raise ValueError("The `default_query` and `set_default_query` arguments are mutually exclusive")
headers = self._custom_headers
if default_headers is not None:
headers = {**headers, **default_headers}
elif set_default_headers is not None:
headers = set_default_headers
params = self._custom_query
if default_query is not None:
params = {**params, **default_query}
elif set_default_query is not None:
params = set_default_query
http_client = http_client or self._client
return self.__class__(
api_key=api_key or self.api_key,
auth_token=auth_token or self.auth_token,
base_url=base_url or self.base_url,
timeout=self.timeout if isinstance(timeout, NotGiven) else timeout,
http_client=http_client,
max_retries=max_retries if is_given(max_retries) else self.max_retries,
default_headers=headers,
default_query=params,
**_extra_kwargs,
)
# Alias for `copy` for nicer inline usage, e.g.
# client.with_options(timeout=10).foo.create(...)
with_options = copy
@override
def _make_status_error(
self,
err_msg: str,
*,
body: object,
response: httpx.Response,
) -> APIStatusError:
if response.status_code == 400:
return _exceptions.BadRequestError(err_msg, response=response, body=body)
if response.status_code == 401:
return _exceptions.AuthenticationError(err_msg, response=response, body=body)
if response.status_code == 403:
return _exceptions.PermissionDeniedError(err_msg, response=response, body=body)
if response.status_code == 404:
return _exceptions.NotFoundError(err_msg, response=response, body=body)
if response.status_code == 409:
return _exceptions.ConflictError(err_msg, response=response, body=body)
if response.status_code == 413:
return _exceptions.RequestTooLargeError(err_msg, response=response, body=body)
if response.status_code == 422:
return _exceptions.UnprocessableEntityError(err_msg, response=response, body=body)
if response.status_code == 429:
return _exceptions.RateLimitError(err_msg, response=response, body=body)
if response.status_code == 529:
return _exceptions.OverloadedError(err_msg, response=response, body=body)
if response.status_code >= 500:
return _exceptions.InternalServerError(err_msg, response=response, body=body)
return APIStatusError(err_msg, response=response, body=body)
| Anthropic |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 30784,
"end": 31179
} | class ____(sgqlc.types.Enum):
"""The possible state reasons of an issue.
Enumeration Choices:
* `COMPLETED`: An issue that has been closed as completed
* `NOT_PLANNED`: An issue that has been closed as not planned
* `REOPENED`: An issue that has been reopened
"""
__schema__ = github_schema
__choices__ = ("COMPLETED", "NOT_PLANNED", "REOPENED")
| IssueStateReason |
python | pyinstaller__pyinstaller | PyInstaller/utils/win32/icon.py | {
"start": 2814,
"end": 3047
} | class ____(Structure):
_names_ = ("bWidth", "bHeight", "bColorCount", "bReserved", "wPlanes", "wBitCount", "dwBytesInRes", "nID")
_format_ = "bbbbhhih"
# An IconFile instance is created for each .ico file given.
| GRPICONDIRENTRY |
python | xlwings__xlwings | xlwings/pro/reports/markdown.py | {
"start": 4148,
"end": 8909
} | class ____(Converter):
@classmethod
def write_value(cls, value, options):
return render_text(value.text, value.style)
def traverse_ast_node(tree, data=None, level=0):
data = (
{
"length": [],
"type": [],
"parent_type": [],
"text": [],
"parents": [],
"level": [],
}
if data is None
else data
)
for element in tree:
data["parents"] = data["parents"][:level]
if "children" in element:
data["parents"].append(element)
traverse_ast_node(element["children"], data, level=level + 1)
else:
data["level"].append(level)
data["parent_type"].append([parent["type"] for parent in data["parents"]])
data["type"].append(element["type"])
if element["type"] == "text":
marker = "text" if mistune.__version__.startswith("2") else "raw"
data["length"].append(len(element[marker]))
data["text"].append(element[marker])
elif element["type"] in ("linebreak", "softbreak"):
# mistune v2 uses linebreak, mistune v3 uses softbreak
data["length"].append(1)
data["text"].append("\n")
return data
def flatten_ast(value):
if not mistune:
raise ImportError(
"For xlwings Reports, "
"you need to install mistune via 'pip/conda install mistune'"
)
if mistune.__version__.startswith("0"):
raise ImportError(
"Only mistune v2.x and v3.x are supported. "
f"You have version {mistune.__version__}"
)
elif mistune.__version__.startswith("2"):
parse_ast = mistune.create_markdown(renderer=mistune.AstRenderer())
else:
parse_ast = mistune.create_markdown(renderer="ast")
ast = parse_ast(value)
flat_ast = []
for node in ast:
rv = traverse_ast_node([node])
del rv["parents"]
flat_ast.append(rv)
return flat_ast
def render_text(text, style):
flat_ast = flatten_ast(text)
output = ""
for node in flat_ast:
# heading/list currently don't respect the level
if "heading" in node["parent_type"][0]:
output += "".join(node["text"])
output += "\n" + style.h1.blank_lines_after * "\n"
elif "paragraph" in node["parent_type"][0]:
output += "".join(node["text"])
output += "\n" + style.paragraph.blank_lines_after * "\n"
elif "list" in node["parent_type"][0]:
for j in node["text"]:
output += f"{style.unordered_list.bullet_character} {j}\n"
output += style.unordered_list.blank_lines_after * "\n"
return output.rstrip("\n")
def format_text(parent, text, style):
if sys.platform.startswith("darwin"):
# Characters formatting is broken because of a bug in AppleScript/Excel 2016
warnings.warn("Markdown formatting is currently ignored on macOS.")
return
flat_ast = flatten_ast(text)
position = 0
for node in flat_ast:
if "heading" in node["parent_type"][0]:
node_length = sum(node["length"]) + style.h1.blank_lines_after + 1
apply_style_to_font(
style.h1.font, parent.characters[position : position + node_length].font
)
elif "paragraph" in node["parent_type"][0]:
node_length = sum(node["length"]) + style.paragraph.blank_lines_after + 1
intra_node_position = position
for ix, j in enumerate(node["parent_type"]):
selection = slice(
intra_node_position, intra_node_position + node["length"][ix]
)
if "strong" in j:
apply_style_to_font(style.strong, parent.characters[selection].font)
elif "emphasis" in j:
apply_style_to_font(
style.emphasis, parent.characters[selection].font
)
intra_node_position += node["length"][ix]
elif "list" in node["parent_type"][0]:
node_length = sum(node["length"]) + style.unordered_list.blank_lines_after
for _ in node["text"]:
# TODO: check ast level to allow nested **strong** etc.
node_length += 3 # bullet, space and new line
else:
node_length = sum(node["length"])
position += node_length
def apply_style_to_font(style_object, font_object):
for attribute in vars(style_object):
if getattr(style_object, attribute):
setattr(font_object, attribute, getattr(style_object, attribute))
| MarkdownConverter |
python | pydantic__pydantic | tests/mypy/modules/generics.py | {
"start": 832,
"end": 951
} | class ____(HistoryField[int]):
pass
thing = DomainType(value=None)
assert_type(thing.value, Optional[int])
| DomainType |
python | ray-project__ray | python/ray/train/v2/api/report_config.py | {
"start": 575,
"end": 1011
} | class ____(Enum):
"""Read semantics for checkpoint retrieval during an ongoing run.
Members:
COMMITTED: Block until the checkpoint from the latest ray.train.report
has been uploaded and committed.
VALIDATED: Block until the checkpoint from the latest ray.train.report
has been uploaded and validated.
"""
COMMITTED = "COMMITTED"
VALIDATED = "VALIDATED"
| CheckpointConsistencyMode |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1126160,
"end": 1127224
} | class ____(sgqlc.types.Type, Node):
"""Represents a 'deployed' event on a given pull request."""
__schema__ = github_schema
__field_names__ = ("actor", "created_at", "database_id", "deployment", "pull_request", "ref")
actor = sgqlc.types.Field(Actor, graphql_name="actor")
"""Identifies the actor who performed the event."""
created_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="createdAt")
"""Identifies the date and time when the object was created."""
database_id = sgqlc.types.Field(Int, graphql_name="databaseId")
"""Identifies the primary key from the database."""
deployment = sgqlc.types.Field(sgqlc.types.non_null("Deployment"), graphql_name="deployment")
"""The deployment associated with the 'deployed' event."""
pull_request = sgqlc.types.Field(sgqlc.types.non_null("PullRequest"), graphql_name="pullRequest")
"""PullRequest referenced by event."""
ref = sgqlc.types.Field("Ref", graphql_name="ref")
"""The ref associated with the 'deployed' event."""
| DeployedEvent |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-couchdb/llama_index/readers/couchdb/base.py | {
"start": 207,
"end": 2688
} | class ____(BaseReader):
"""
Simple CouchDB reader.
Concatenates each CouchDB doc into Document used by LlamaIndex.
Args:
couchdb_url (str): CouchDB Full URL.
max_docs (int): Maximum number of documents to load.
"""
def __init__(
self,
user: str,
pwd: str,
host: str,
port: int,
couchdb_url: Optional[Dict] = None,
max_docs: int = 1000,
) -> None:
"""Initialize with parameters."""
if couchdb_url is not None:
self.client = couchdb3.Server(couchdb_url)
else:
self.client = couchdb3.Server(f"http://{user}:{pwd}@{host}:{port}")
self.max_docs = max_docs
def load_data(self, db_name: str, query: Optional[str] = None) -> List[Document]:
"""
Load data from the input directory.
Args:
db_name (str): name of the database.
query (Optional[str]): query to filter documents.
Defaults to None
Returns:
List[Document]: A list of documents.
"""
documents = []
db = self.client.get(db_name)
if query is None:
# if no query is specified, return all docs in database
logging.debug("showing all docs")
results = db.view("_all_docs", include_docs=True)
else:
logging.debug("executing query")
results = db.find(query)
if not isinstance(results, dict):
logging.debug(results.rows)
else:
logging.debug(results)
# check if more than one result
if (
not isinstance(results, dict)
and hasattr(results, "rows")
and results.rows is not None
):
for row in results.rows:
# check that the id field exists
if "id" not in row:
raise ValueError("`id` field not found in CouchDB document.")
documents.append(Document(text=json.dumps(row.doc)))
else:
# only one result
if results.get("docs") is not None:
for item in results.get("docs"):
# check that the _id field exists
if "_id" not in item:
raise ValueError("`_id` field not found in CouchDB document.")
documents.append(Document(text=json.dumps(item)))
return documents
| SimpleCouchDBReader |
python | doocs__leetcode | solution/0600-0699/0617.Merge Two Binary Trees/Solution.py | {
"start": 192,
"end": 616
} | class ____:
def mergeTrees(
self, root1: Optional[TreeNode], root2: Optional[TreeNode]
) -> Optional[TreeNode]:
if root1 is None:
return root2
if root2 is None:
return root1
node = TreeNode(root1.val + root2.val)
node.left = self.mergeTrees(root1.left, root2.left)
node.right = self.mergeTrees(root1.right, root2.right)
return node
| Solution |
python | kamyu104__LeetCode-Solutions | Python/is-array-a-preorder-of-some-binary-tree.py | {
"start": 37,
"end": 469
} | class ____(object):
def isPreorder(self, nodes):
"""
:type nodes: List[List[int]]
:rtype: bool
"""
stk = [nodes[0][0]]
for i in xrange(1, len(nodes)):
while stk and stk[-1] != nodes[i][1]:
stk.pop()
if not stk:
return False
stk.append(nodes[i][0])
return True
| Solution |
python | dagster-io__dagster | python_modules/dagster/dagster/_daemon/daemon.py | {
"start": 12653,
"end": 14703
} | class ____(DagsterDaemon):
def __init__(self, settings: Mapping[str, Any]) -> None:
super().__init__()
self._exit_stack = ExitStack()
self._threadpool_executor: Optional[InheritContextThreadPoolExecutor] = None
self._submit_threadpool_executor: Optional[InheritContextThreadPoolExecutor] = None
# Backfill daemon is enabled by default for reasons explained at:
# https://github.com/dagster-io/dagster/pull/30189#issuecomment-2930805760
if settings.get("use_threads", True):
num_workers = settings.get("num_workers", 4)
if num_workers:
self._threadpool_executor = self._exit_stack.enter_context(
InheritContextThreadPoolExecutor(
max_workers=settings.get("num_workers"),
thread_name_prefix="backfill_daemon_worker",
)
)
num_submit_workers = settings.get("num_submit_workers")
if num_submit_workers:
self._submit_threadpool_executor = self._exit_stack.enter_context(
InheritContextThreadPoolExecutor(
max_workers=settings.get("num_submit_workers"),
thread_name_prefix="backfill_submit_worker",
)
)
@classmethod
def daemon_type(cls) -> str:
return "BACKFILL"
def __exit__(self, _exception_type, _exception_value, _traceback):
self._exit_stack.close()
super().__exit__(_exception_type, _exception_value, _traceback)
def core_loop(
self,
workspace_process_context: IWorkspaceProcessContext,
shutdown_event: Event,
) -> DaemonIterator:
yield from execute_backfill_iteration_loop(
workspace_process_context,
self._logger,
shutdown_event,
threadpool_executor=self._threadpool_executor,
submit_threadpool_executor=self._submit_threadpool_executor,
)
| BackfillDaemon |
python | ray-project__ray | python/ray/data/llm.py | {
"start": 3714,
"end": 9122
} | class ____(_vLLMEngineProcessorConfig):
"""The configuration for the vLLM engine processor.
Args:
model_source: The model source to use for the vLLM engine.
batch_size: The batch size to send to the vLLM engine. Large batch sizes are
likely to saturate the compute resources and could achieve higher throughput.
On the other hand, small batch sizes are more fault-tolerant and could
reduce bubbles in the data pipeline. You can tune the batch size to balance
the throughput and fault-tolerance based on your use case.
engine_kwargs: The kwargs to pass to the vLLM engine. Default engine kwargs are
pipeline_parallel_size: 1, tensor_parallel_size: 1, max_num_seqs: 128,
distributed_executor_backend: "mp".
task_type: The task type to use. If not specified, will use 'generate' by default.
runtime_env: The runtime environment to use for the vLLM engine. See
:ref:`this doc <handling_dependencies>` for more details.
max_pending_requests: The maximum number of pending requests. If not specified,
will use the default value from the vLLM engine.
max_concurrent_batches: The maximum number of concurrent batches in the engine.
This is to overlap the batch processing to avoid the tail latency of
each batch. The default value may not be optimal when the batch size
or the batch processing latency is too small, but it should be good
enough for batch size >= 64.
chat_template_stage: Chat templating stage config (bool | dict | ChatTemplateStageConfig).
Defaults to True. Use nested config for per-stage control over batch_size,
concurrency, runtime_env, num_cpus, and memory. Legacy ``apply_chat_template``
and ``chat_template`` fields are deprecated but still supported.
tokenize_stage: Tokenizer stage config (bool | dict | TokenizerStageConfig).
Defaults to True. Use nested config for per-stage control over batch_size,
concurrency, runtime_env, num_cpus, memory, and model_source. Legacy
``tokenize`` field is deprecated but still supported.
detokenize_stage: Detokenizer stage config (bool | dict | DetokenizeStageConfig).
Defaults to True. Use nested config for per-stage control over batch_size,
concurrency, runtime_env, num_cpus, memory, and model_source. Legacy
``detokenize`` field is deprecated but still supported.
prepare_image_stage: Prepare image stage config (bool | dict | PrepareImageStageConfig).
Defaults to False. Use nested config for per-stage control over batch_size,
concurrency, runtime_env, num_cpus, and memory. Legacy ``has_image`` field
is deprecated but still supported.
accelerator_type: The accelerator type used by the LLM stage in a processor.
Default to None, meaning that only the CPU will be used.
concurrency: The number of workers for data parallelism. Default to 1.
If ``concurrency`` is a tuple ``(m, n)``, Ray creates an autoscaling
actor pool that scales between ``m`` and ``n`` workers (``1 <= m <= n``).
If ``concurrency`` is an ``int`` ``n``, CPU stages use an autoscaling
pool from ``(1, n)``, while GPU stages use a fixed pool of ``n`` workers.
Stage-specific concurrency can be set via nested stage configs.
Examples:
.. testcode::
:skipif: True
import ray
from ray.data.llm import vLLMEngineProcessorConfig, build_llm_processor
config = vLLMEngineProcessorConfig(
model_source="meta-llama/Meta-Llama-3.1-8B-Instruct",
engine_kwargs=dict(
enable_prefix_caching=True,
enable_chunked_prefill=True,
max_num_batched_tokens=4096,
),
concurrency=1,
batch_size=64,
)
processor = build_llm_processor(
config,
preprocess=lambda row: dict(
messages=[
{"role": "system", "content": "You are a calculator"},
{"role": "user", "content": f"{row['id']} ** 3 = ?"},
],
sampling_params=dict(
temperature=0.3,
max_tokens=20,
detokenize=False,
),
),
postprocess=lambda row: dict(
resp=row["generated_text"],
),
)
# The processor requires specific input columns, which depend on
# your processor config. You can use the following API to check
# the required input columns:
processor.log_input_column_names()
# Example log:
# The first stage of the processor is ChatTemplateStage.
# Required input columns:
# messages: A list of messages in OpenAI chat format.
ds = ray.data.range(300)
ds = processor(ds)
for row in ds.take_all():
print(row)
"""
pass
@PublicAPI(stability="alpha")
| vLLMEngineProcessorConfig |
python | huggingface__transformers | src/transformers/models/parakeet/feature_extraction_parakeet.py | {
"start": 1106,
"end": 13147
} | class ____(SequenceFeatureExtractor):
r"""
Constructs a Parakeet feature extractor.
This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains
most of the main methods. Users should refer to this superclass for more information regarding those methods.
This class extracts mel-filter bank features from raw speech using a custom numpy implementation of the `Short Time
Fourier Transform` which should match pytorch's `torch.stft` equivalent.
Args:
feature_size (`int`, *optional*, defaults to 80):
The feature dimension of the extracted features.
sampling_rate (`int`, *optional*, defaults to 16000):
The sampling rate at which the audio files should be digitalized expressed in hertz (Hz).
hop_length (`int`, *optional*, defaults to 160):
Length of the overlapping windows for the STFT used to obtain the Mel Frequency coefficients.
n_fft (`int`, *optional*, defaults to 512):
Size of the Fourier transform.
win_length (`int`, *optional*, defaults to 400):
The window length for the STFT computation.
preemphasis (`float`, *optional*, defaults to 0.97):
A preemphasis filter coefficient. 0.0 means no preemphasis filter.
padding_value (`float`, *optional*, defaults to 0.0):
Padding value used to pad the audio. Should correspond to silences.
"""
model_input_names = ["input_features", "attention_mask"]
def __init__(
self,
feature_size=80,
sampling_rate=16000,
hop_length=160,
n_fft=512,
win_length=400,
preemphasis=0.97,
padding_value=0.0,
**kwargs,
):
super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs)
self.hop_length = hop_length
self.n_fft = n_fft
self.win_length = win_length
self.preemphasis = preemphasis
# TODO: @eustlb, for now we use librosa to compute the mel filters
# indeed mel_filter_bank uses np.float64 (while librosa uses np.float32), giving numerical differences
# self.mel_filters = mel_filter_bank(
# num_frequency_bins=n_fft // 2 + 1,
# num_mel_filters=feature_size,
# min_frequency=0.0,
# max_frequency=sampling_rate / 2,
# sampling_rate=sampling_rate,
# norm="slaney",
# mel_scale="slaney",
# )
mel_filters = librosa.filters.mel(
sr=sampling_rate, n_fft=n_fft, n_mels=feature_size, fmin=0.0, fmax=sampling_rate / 2, norm="slaney"
)
self.mel_filters = torch.from_numpy(mel_filters).to(torch.float32)
def _torch_extract_fbank_features(self, waveform, device="cpu"):
# spectrogram
window = torch.hann_window(self.win_length, periodic=False, device=device)
stft = torch.stft(
waveform,
self.n_fft,
hop_length=self.hop_length,
win_length=self.win_length,
window=window,
return_complex=True,
pad_mode="constant",
)
# Let's math original implementation
# magnitudes = torch.abs(stft) ** 2
magnitudes = torch.view_as_real(stft)
magnitudes = torch.sqrt(magnitudes.pow(2).sum(-1))
magnitudes = magnitudes.pow(2)
# log mel spectrogram
mel_filters = self.mel_filters.to(device)
mel_spec = mel_filters @ magnitudes
mel_spec = torch.log(mel_spec + LOG_ZERO_GUARD_VALUE)
# (batch_size, num_mel_filters, num_frames) -> (batch_size, num_frames, num_mel_filters)
mel_spec = mel_spec.permute(0, 2, 1)
return mel_spec
def __call__(
self,
raw_speech: Union[np.ndarray, list[float], list[np.ndarray], list[list[float]]],
truncation: bool = False,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_attention_mask: Optional[bool] = None,
padding: Optional[str] = "longest",
max_length: Optional[int] = None,
sampling_rate: Optional[int] = None,
do_normalize: Optional[bool] = None,
device: Optional[str] = "cpu",
return_token_timestamps: Optional[bool] = None,
**kwargs,
) -> BatchFeature:
"""
Main method to featurize and prepare for the model one or several sequence(s). Implementation uses PyTorch for
the STFT computation if available, otherwise a slower NumPy based one.
Args:
raw_speech (`np.ndarray`, `list[float]`, `list[np.ndarray]`, `list[list[float]]`):
The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float
values, a list of numpy arrays or a list of list of float values. Must be mono channel audio, not
stereo, i.e. single float per timestep.
truncation (`bool`, *optional*, default to `True`):
Activates truncation to cut input sequences longer than *max_length* to *max_length*.
pad_to_multiple_of (`int`, *optional*, defaults to None):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
`>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128.
return_attention_mask (`bool`, *optional*):
Whether to return the attention mask. If left to the default, will return the attention mask according
to the specific feature_extractor's default.
[What are attention masks?](../glossary#attention-mask)
<Tip>
For Parakeet models, `attention_mask` should always be passed for batched inference, to avoid subtle
bugs.
</Tip>
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
sampling_rate (`int`, *optional*):
The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass
`sampling_rate` at the forward call to prevent silent errors and allow automatic speech recognition
pipeline.
padding_value (`float`, *optional*, defaults to 0.0):
The value that is used to fill the padding values / vectors.
do_normalize (`bool`, *optional*, defaults to `False`):
Whether or not to zero-mean unit-variance normalize the input. Normalizing can help to significantly
improve the performance of the model.
device (`str`, *optional*, defaults to `'cpu'`):
Specifies the device for computation of the log-mel spectrogram of audio signals in the
`_torch_extract_fbank_features` method. (e.g., "cpu", "cuda")
return_token_timestamps (`bool`, *optional*, defaults to `None`):
Deprecated. Use `return_attention_mask` instead from which the number of frames can be inferred.
Whether or not to return the number of frames of the input raw_speech.
These num_frames can be used by the model to compute word level timestamps.
"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
f" was sampled with {self.sampling_rate} and not {sampling_rate}."
)
else:
logger.warning(
f"It is strongly recommended to pass the `sampling_rate` argument to `{self.__class__.__name__}()`. "
"Failing to do so can result in silent errors that might be hard to debug."
)
# Convert to torch tensor
if isinstance(raw_speech, np.ndarray):
raw_speech = torch.tensor(raw_speech)
elif isinstance(raw_speech, (list, tuple)) and isinstance(raw_speech[0], np.ndarray):
raw_speech = [torch.tensor(speech) for speech in raw_speech]
is_batched_torch = isinstance(raw_speech, torch.Tensor) and len(raw_speech.shape) > 1
if is_batched_torch and len(raw_speech.shape) > 2:
logger.warning(
f"Only mono-channel audio is supported for input to {self.__class__.__name__}. "
"We will take the mean of the channels to convert to mono."
)
raw_speech = raw_speech.mean(-1)
is_batched_sequence = isinstance(raw_speech, (list, tuple))
if is_batched_sequence:
for speech in raw_speech:
if len(speech.shape) > 1:
logger.warning(
f"Only mono-channel audio is supported for input to {self.__class__.__name__}. "
"We will take the mean of the channels to convert to mono."
)
speech = speech.mean(-1)
if is_batched_torch or is_batched_sequence:
raw_speech = [speech[:, None].to(torch.float32) for speech in raw_speech]
else:
raw_speech = [raw_speech[:, None].to(torch.float32)]
audio_lengths = [len(speech) for speech in raw_speech]
batched_speech = BatchFeature({"input_features": raw_speech, "audio_lengths": audio_lengths})
padded_inputs = self.pad(
batched_speech,
padding=padding,
max_length=max_length,
truncation=truncation,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors="pt",
)
input_features = padded_inputs.input_features.squeeze(-1)
# preemphasis
if self.preemphasis is not None:
timemask = torch.arange(input_features.shape[1], device=input_features.device).unsqueeze(
0
) < padded_inputs.audio_lengths.unsqueeze(1)
input_features = torch.cat(
[input_features[:, :1], input_features[:, 1:] - self.preemphasis * input_features[:, :-1]], dim=1
)
input_features = input_features.masked_fill(~timemask, 0.0)
input_features = self._torch_extract_fbank_features(input_features, device)
features_lengths = torch.floor_divide(
padded_inputs.audio_lengths + self.n_fft // 2 * 2 - self.n_fft, self.hop_length
)
attention_mask = torch.arange(input_features.shape[1], device=device)[None, :] < features_lengths[:, None]
# normalize mel features, ignoring padding
mask = attention_mask.unsqueeze(-1)
input_features_masked = input_features * mask
mean = input_features_masked.sum(dim=1) / features_lengths.unsqueeze(-1)
mean = mean.unsqueeze(1)
variance = ((input_features_masked - mean) ** 2 * mask).sum(dim=1) / (features_lengths - 1).unsqueeze(-1)
std = torch.sqrt(variance).unsqueeze(1)
input_features = (input_features - mean) / (std + EPSILON)
input_features *= mask
return BatchFeature(
data={
"input_features": input_features,
"attention_mask": attention_mask,
},
tensor_type=return_tensors,
)
__all__ = ["ParakeetFeatureExtractor"]
| ParakeetFeatureExtractor |
python | optuna__optuna | optuna/_gp/search_space.py | {
"start": 677,
"end": 8212
} | class ____:
def __init__(
self,
optuna_search_space: dict[str, BaseDistribution],
) -> None:
self._optuna_search_space = optuna_search_space
self._scale_types = np.empty(len(optuna_search_space), dtype=np.int64)
self._bounds = np.empty((len(optuna_search_space), 2), dtype=float)
self._steps = np.empty(len(optuna_search_space), dtype=float)
for i, distribution in enumerate(optuna_search_space.values()):
if isinstance(distribution, CategoricalDistribution):
self._scale_types[i] = _ScaleType.CATEGORICAL
self._bounds[i, :] = (0.0, len(distribution.choices))
self._steps[i] = 1.0
else:
assert isinstance(distribution, (FloatDistribution, IntDistribution))
self._scale_types[i] = _ScaleType.LOG if distribution.log else _ScaleType.LINEAR
self._bounds[i, :] = (distribution.low, distribution.high)
self._steps[i] = distribution.step or 0.0
self.dim = len(optuna_search_space)
# TODO: Make it an index array.
self.is_categorical = self._scale_types == _ScaleType.CATEGORICAL
# NOTE(nabenabe): MyPy Redefinition for NumPy v2.2.0. (Cast signed int to int)
self.discrete_indices = np.flatnonzero(self._steps > 0).astype(int)
self.continuous_indices = np.flatnonzero(self._steps == 0.0).astype(int)
def get_normalized_params(
self,
trials: list[FrozenTrial],
) -> np.ndarray:
values = np.empty((len(trials), len(self._optuna_search_space)), dtype=float)
for i, (param, distribution) in enumerate(self._optuna_search_space.items()):
if isinstance(distribution, CategoricalDistribution):
values[:, i] = [distribution.to_internal_repr(t.params[param]) for t in trials]
else:
values[:, i] = _normalize_one_param(
np.array([trial.params[param] for trial in trials]),
self._scale_types[i],
(self._bounds[i, 0], self._bounds[i, 1]),
self._steps[i],
)
return values
def get_unnormalized_param(
self,
normalized_param: np.ndarray,
) -> dict[str, Any]:
# TODO(kAIto47802): Move the implementation of `_get_unnormalized_param` here
# instead of wrapping it.
return _get_unnormalized_param(self._optuna_search_space, normalized_param)
def sample_normalized_params(self, n: int, rng: np.random.RandomState | None) -> np.ndarray:
# TODO(kAIto47802): Move the implementation of `_sample_normalized_params` here
# instead of wrapping it.
return _sample_normalized_params(n, self, rng)
def get_choices_of_discrete_params(self) -> list[np.ndarray]:
return [
(
np.arange(self._bounds[i, 1])
if self.is_categorical[i]
else _normalize_one_param(
param_value=np.arange(
self._bounds[i, 0],
self._bounds[i, 1] + 0.5 * self._steps[i],
self._steps[i],
),
scale_type=_ScaleType(self._scale_types[i]),
bounds=(self._bounds[i, 0], self._bounds[i, 1]),
step=self._steps[i],
)
)
for i in self.discrete_indices
]
def _unnormalize_one_param(
param_value: np.ndarray, scale_type: _ScaleType, bounds: tuple[float, float], step: float
) -> np.ndarray:
# param_value can be batched, or not.
if scale_type == _ScaleType.CATEGORICAL:
return param_value
low, high = (bounds[0] - 0.5 * step, bounds[1] + 0.5 * step)
if scale_type == _ScaleType.LOG:
low, high = (math.log(low), math.log(high))
param_value = param_value * (high - low) + low
if scale_type == _ScaleType.LOG:
param_value = np.exp(param_value)
return param_value
def _normalize_one_param(
param_value: np.ndarray, scale_type: _ScaleType, bounds: tuple[float, float], step: float
) -> np.ndarray:
# param_value can be batched, or not.
if scale_type == _ScaleType.CATEGORICAL:
return param_value
low, high = (bounds[0] - 0.5 * step, bounds[1] + 0.5 * step)
if scale_type == _ScaleType.LOG:
low, high = (math.log(low), math.log(high))
param_value = np.log(param_value)
if high == low:
return np.full_like(param_value, 0.5)
param_value = (param_value - low) / (high - low)
return param_value
def _round_one_normalized_param(
param_value: np.ndarray, scale_type: _ScaleType, bounds: tuple[float, float], step: float
) -> np.ndarray:
assert scale_type != _ScaleType.CATEGORICAL
if step == 0.0:
return param_value
param_value = _unnormalize_one_param(param_value, scale_type, bounds, step)
param_value = np.clip(
(param_value - bounds[0] + 0.5 * step) // step * step + bounds[0],
bounds[0],
bounds[1],
)
param_value = _normalize_one_param(param_value, scale_type, bounds, step)
return param_value
def _sample_normalized_params(
n: int, search_space: SearchSpace, rng: np.random.RandomState | None
) -> np.ndarray:
rng = rng or np.random.RandomState()
dim = search_space._scale_types.shape[0]
scale_types = search_space._scale_types
bounds = search_space._bounds
steps = search_space._steps
# Sobol engine likely shares its internal state among threads.
# Without threading.Lock, ValueError exceptions are raised in Sobol engine as discussed in
# https://github.com/optuna/optunahub-registry/pull/168#pullrequestreview-2404054969
with _threading_lock:
qmc_engine = qmc.Sobol(dim, scramble=True, seed=rng.randint(np.iinfo(np.int32).max))
param_values = qmc_engine.random(n)
for i in range(dim):
if scale_types[i] == _ScaleType.CATEGORICAL:
param_values[:, i] = np.floor(param_values[:, i] * bounds[i, 1])
elif steps[i] != 0.0:
param_values[:, i] = _round_one_normalized_param(
param_values[:, i], scale_types[i], (bounds[i, 0], bounds[i, 1]), steps[i]
)
return param_values
def _get_unnormalized_param(
optuna_search_space: dict[str, BaseDistribution],
normalized_param: np.ndarray,
) -> dict[str, Any]:
ret = {}
for i, (param, distribution) in enumerate(optuna_search_space.items()):
if isinstance(distribution, CategoricalDistribution):
ret[param] = distribution.to_external_repr(normalized_param[i])
else:
assert isinstance(
distribution,
(
FloatDistribution,
IntDistribution,
),
)
scale_type = _ScaleType.LOG if distribution.log else _ScaleType.LINEAR
step = 0.0 if distribution.step is None else distribution.step
bounds = (distribution.low, distribution.high)
param_value = float(
np.clip(
_unnormalize_one_param(normalized_param[i], scale_type, bounds, step),
distribution.low,
distribution.high,
)
)
if isinstance(distribution, IntDistribution):
param_value = round(param_value)
ret[param] = param_value
return ret
| SearchSpace |
python | pypa__warehouse | tests/unit/accounts/test_views.py | {
"start": 58086,
"end": 69611
} | class ____:
def test_already_authenticated(self):
request = pretend.stub(
user=pretend.stub(),
route_path=pretend.call_recorder(lambda p: "redirect_to"),
)
result = views.recovery_code(request)
assert request.route_path.calls == [pretend.call("manage.projects")]
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "redirect_to"
def test_two_factor_token_invalid(self, pyramid_request):
token_service = pretend.stub(loads=pretend.raiser(TokenException))
pyramid_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
pyramid_request.route_path = pretend.call_recorder(lambda p: "redirect_to")
pyramid_request.find_service = lambda interface, **kwargs: {
ITokenService: token_service
}[interface]
result = views.recovery_code(pyramid_request)
assert isinstance(result, HTTPSeeOther)
assert pyramid_request.route_path.calls == [pretend.call("accounts.login")]
assert result.headers["Location"] == "redirect_to"
assert pyramid_request.session.flash.calls == [
pretend.call("Invalid or expired two factor login.", queue="error")
]
def test_get_returns_form(self, pyramid_request):
query_params = {"userid": 1}
token_service = pretend.stub(
loads=pretend.call_recorder(
lambda *args, **kwargs: (
query_params,
datetime.datetime.now(datetime.UTC),
)
)
)
user_service = pretend.stub(
find_userid=pretend.call_recorder(lambda username: 1),
get_user=pretend.call_recorder(
lambda userid: pretend.stub(
last_login=(
datetime.datetime.now(datetime.UTC) - datetime.timedelta(days=1)
)
)
),
update_user=lambda *a, **k: None,
has_totp=lambda uid: True,
has_webauthn=lambda uid: False,
has_recovery_codes=lambda uid: False,
)
pyramid_request.find_service = lambda interface, **kwargs: {
ITokenService: token_service,
IUserService: user_service,
}[interface]
pyramid_request.query_string = pretend.stub()
form_obj = pretend.stub()
form_class = pretend.call_recorder(lambda d, user_service, **kw: form_obj)
result = views.recovery_code(pyramid_request, _form_class=form_class)
assert token_service.loads.calls == [
pretend.call(pyramid_request.query_string, return_timestamp=True)
]
assert result == {"form": form_obj}
assert form_class.calls == [
pretend.call(
pyramid_request.POST,
request=pyramid_request,
user_id=1,
user_service=user_service,
)
]
@pytest.mark.parametrize("redirect_url", ["test_redirect_url", None])
def test_recovery_code_auth_with_confirmed_unique_login(
self, monkeypatch, db_request, redirect_url
):
remember = pretend.call_recorder(lambda request, user_id: [("foo", "bar")])
monkeypatch.setattr(views, "remember", remember)
user = UserFactory.create(
last_login=(
datetime.datetime.now(datetime.UTC) - datetime.timedelta(days=1)
),
)
user.record_event = pretend.call_recorder(lambda *a, **kw: None)
user_id = user.id
query_params = {"userid": str(user_id)}
if redirect_url:
query_params["redirect_to"] = redirect_url
token_service = pretend.stub(
loads=pretend.call_recorder(
lambda *args, **kwargs: (
query_params,
datetime.datetime.now(datetime.UTC),
)
)
)
user_service = pretend.stub(
find_userid=pretend.call_recorder(lambda username: user_id),
get_user=pretend.call_recorder(lambda userid: user),
update_user=lambda *a, **k: None,
has_recovery_codes=lambda userid: True,
check_recovery_code=lambda userid, recovery_code_value: True,
get_password_timestamp=lambda userid: 0,
needs_tos_flash=lambda userid, revision: False,
device_is_known=lambda *a: True,
)
new_session = {}
db_request.find_service = lambda interface, **kwargs: {
ITokenService: token_service,
IUserService: user_service,
}[interface]
db_request.method = "POST"
db_request.session = pretend.stub(
items=lambda: [("a", "b"), ("foo", "bar")],
update=new_session.update,
invalidate=pretend.call_recorder(lambda: None),
new_csrf_token=pretend.call_recorder(lambda: None),
flash=pretend.call_recorder(lambda message, queue: None),
)
db_request.set_property(
lambda r: str(uuid.uuid4()), name="unauthenticated_userid"
)
db_request.session.record_auth_timestamp = pretend.call_recorder(
lambda *args: None
)
db_request.session.record_password_timestamp = lambda timestamp: None
form_obj = pretend.stub(
validate=pretend.call_recorder(lambda: True),
recovery_code_value=pretend.stub(data="recovery-code"),
)
form_class = pretend.call_recorder(lambda d, **kw: form_obj)
db_request.route_path = pretend.call_recorder(lambda a: "/account/two-factor")
db_request.params = pretend.stub(
get=pretend.call_recorder(lambda k: query_params.get(k))
)
result = views.recovery_code(db_request, _form_class=form_class)
token_expected_data = {"userid": str(user_id)}
if redirect_url:
token_expected_data["redirect_to"] = redirect_url
assert isinstance(result, HTTPSeeOther)
assert result.headers["Set-Cookie"].startswith("user_id__insecure=")
assert remember.calls == [pretend.call(db_request, str(user_id))]
assert db_request.session.invalidate.calls == [pretend.call()]
assert db_request.session.new_csrf_token.calls == [pretend.call()]
assert user.record_event.calls == [
pretend.call(
tag=EventTag.Account.LoginSuccess,
request=db_request,
additional={
"two_factor_method": "recovery-code",
"two_factor_label": None,
},
),
pretend.call(
tag=EventTag.Account.RecoveryCodesUsed,
request=db_request,
),
]
assert db_request.session.flash.calls == [
pretend.call(
"Recovery code accepted. The supplied code cannot be used again.",
queue="success",
)
]
assert db_request.session.record_auth_timestamp.calls == [pretend.call()]
def test_recovery_code_form_invalid(self):
token_data = {"userid": 1}
token_service = pretend.stub(
loads=pretend.call_recorder(
lambda *args, **kwargs: (
token_data,
datetime.datetime.now(datetime.UTC),
)
)
)
user_service = pretend.stub(
find_userid=pretend.call_recorder(lambda username: 1),
get_user=pretend.call_recorder(
lambda userid: pretend.stub(
last_login=(
datetime.datetime.now(datetime.UTC) - datetime.timedelta(days=1)
)
)
),
has_recovery_codes=lambda userid: True,
check_recovery_code=lambda userid, recovery_code_value: False,
)
request = pretend.stub(
POST={},
method="POST",
session=pretend.stub(flash=pretend.call_recorder(lambda *a, **kw: None)),
user=None,
route_path=pretend.call_recorder(lambda p: "redirect_to"),
find_service=lambda interface, **kwargs: {
ITokenService: token_service,
IUserService: user_service,
}[interface],
query_string=pretend.stub(),
# registry=pretend.stub(settings={"remember_device.days": 30}),
)
form_obj = pretend.stub(
validate=pretend.call_recorder(lambda: False),
recovery_code_value=pretend.stub(data="invalid-recovery-code"),
)
form_class = pretend.call_recorder(lambda *a, **kw: form_obj)
result = views.recovery_code(request, _form_class=form_class)
assert token_service.loads.calls == [
pretend.call(request.query_string, return_timestamp=True)
]
assert result == {"form": form_obj}
def test_recovery_code_auth_invalid_token(self, pyramid_request):
token_service = pretend.stub(loads=pretend.raiser(TokenException))
pyramid_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
pyramid_request.route_path = pretend.call_recorder(lambda p: "redirect_to")
pyramid_request.find_service = lambda interface, **kwargs: {
ITokenService: token_service
}[interface]
result = views.recovery_code(pyramid_request)
assert isinstance(result, HTTPSeeOther)
assert pyramid_request.route_path.calls == [pretend.call("accounts.login")]
assert result.headers["Location"] == "redirect_to"
assert pyramid_request.session.flash.calls == [
pretend.call("Invalid or expired two factor login.", queue="error")
]
def test_recovery_code_device_not_known(self, db_request, token_service):
user = UserFactory.create()
token_data = {"userid": str(user.id)}
token_service.loads = pretend.call_recorder(
lambda *args, **kwargs: (
token_data,
datetime.datetime.now(datetime.UTC),
)
)
user_service = pretend.stub(
get_user=lambda userid: user,
has_recovery_codes=lambda userid: True,
check_recovery_code=lambda userid, recovery_code_value: True,
device_is_known=lambda *a: False,
)
db_request.find_service = lambda interface, **kwargs: {
ITokenService: token_service,
IUserService: user_service,
}[interface]
db_request.route_path = pretend.call_recorder(
lambda name: "/account/confirm-login/"
)
db_request.query_string = token_service.dumps(token_data)
db_request.method = "POST"
db_request.POST = MultiDict({"recovery_code_value": "test-recovery-code"})
form_obj = pretend.stub(
validate=pretend.call_recorder(lambda: True),
recovery_code_value=pretend.stub(data="test-recovery-code"),
)
form_class = pretend.call_recorder(lambda d, **kw: form_obj)
result = views.recovery_code(db_request, _form_class=form_class)
assert isinstance(result, HTTPSeeOther)
assert db_request.route_path.calls == [pretend.call("accounts.confirm-login")]
| TestRecoveryCode |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/log/s3_task_handler.py | {
"start": 1497,
"end": 6619
} | class ____(LoggingMixin): # noqa: D101
remote_base: str
base_log_folder: pathlib.Path = attrs.field(converter=pathlib.Path)
delete_local_copy: bool
processors = ()
def upload(self, path: os.PathLike | str, ti: RuntimeTI):
"""Upload the given log path to the remote storage."""
path = pathlib.Path(path)
if path.is_absolute():
local_loc = path
remote_loc = os.path.join(self.remote_base, path.relative_to(self.base_log_folder))
else:
local_loc = self.base_log_folder.joinpath(path)
remote_loc = os.path.join(self.remote_base, path)
if local_loc.is_file():
# read log and remove old logs to get just the latest additions
log = local_loc.read_text()
has_uploaded = self.write(log, remote_loc)
if has_uploaded and self.delete_local_copy:
shutil.rmtree(os.path.dirname(local_loc))
@cached_property
def hook(self):
"""Returns S3Hook."""
return S3Hook(
aws_conn_id=conf.get("logging", "REMOTE_LOG_CONN_ID"),
transfer_config_args={"use_threads": False},
)
def s3_log_exists(self, remote_log_location: str) -> bool:
"""
Check if remote_log_location exists in remote storage.
:param remote_log_location: log's location in remote storage
:return: True if location exists else False
"""
return self.hook.check_for_key(remote_log_location)
def s3_read(self, remote_log_location: str, return_error: bool = False) -> str:
"""
Return the log found at the remote_log_location or '' if no logs are found or there is an error.
:param remote_log_location: the log's location in remote storage
:param return_error: if True, returns a string error message if an
error occurs. Otherwise returns '' when an error occurs.
:return: the log found at the remote_log_location
"""
try:
return self.hook.read_key(remote_log_location)
except Exception as error:
msg = f"Could not read logs from {remote_log_location} with error: {error}"
self.log.exception(msg)
# return error if needed
if return_error:
return msg
return ""
def write(
self,
log: str,
remote_log_location: str,
append: bool = True,
max_retry: int = 1,
) -> bool:
"""
Write the log to the remote_log_location; return `True` or fails silently and return `False`.
:param log: the contents to write to the remote_log_location
:param remote_log_location: the log's location in remote storage
:param append: if False, any existing log file is overwritten. If True,
the new log is appended to any existing logs.
:param max_retry: Maximum number of times to retry on upload failure
:return: whether the log is successfully written to remote location or not.
"""
try:
if append and self.s3_log_exists(remote_log_location):
old_log = self.s3_read(remote_log_location)
log = f"{old_log}\n{log}" if old_log else log
except Exception:
self.log.exception("Could not verify previous log to append")
return False
# Default to a single retry attempt because s3 upload failures are
# rare but occasionally occur. Multiple retry attempts are unlikely
# to help as they usually indicate non-ephemeral errors.
for try_num in range(1 + max_retry):
try:
self.hook.load_string(
log,
key=remote_log_location,
replace=True,
encrypt=conf.getboolean("logging", "ENCRYPT_S3_LOGS"),
)
break
except Exception:
if try_num < max_retry:
self.log.warning(
"Failed attempt to write logs to %s, will retry",
remote_log_location,
)
else:
self.log.exception("Could not write logs to %s", remote_log_location)
return False
return True
def read(self, relative_path: str, ti: RuntimeTI) -> tuple[LogSourceInfo, LogMessages | None]:
logs: list[str] = []
messages = []
bucket, prefix = self.hook.parse_s3_url(s3url=os.path.join(self.remote_base, relative_path))
keys = self.hook.list_keys(bucket_name=bucket, prefix=prefix)
if keys:
keys = sorted(f"s3://{bucket}/{key}" for key in keys)
if AIRFLOW_V_3_0_PLUS:
messages = keys
else:
messages.append("Found logs in s3:")
messages.extend(f" * {key}" for key in keys)
for key in keys:
logs.append(self.s3_read(key, return_error=True))
return messages, logs
return messages, None
| S3RemoteLogIO |
python | scipy__scipy | scipy/fftpack/tests/test_real_transforms.py | {
"start": 15598,
"end": 15729
} | class ____(_TestDSTBase):
def setup_method(self):
self.rdt = int
self.dec = 7
self.type = 3
| TestDSTIIIInt |
python | wandb__wandb | wandb/sdk/wandb_run.py | {
"start": 3616,
"end": 14680
} | class ____:
"""Periodically polls the background process for relevant updates.
- check if the user has requested a stop.
- check the network status.
- check the run sync status.
"""
_stop_status_lock: threading.Lock
_stop_status_handle: MailboxHandle[Result] | None
_network_status_lock: threading.Lock
_network_status_handle: MailboxHandle[Result] | None
_internal_messages_lock: threading.Lock
_internal_messages_handle: MailboxHandle[Result] | None
def __init__(
self,
run_id: str,
interface: InterfaceBase,
settings: Settings,
stop_polling_interval: int = 15,
retry_polling_interval: int = 5,
internal_messages_polling_interval: int = 10,
) -> None:
self._run_id = run_id
self._interface = interface
self._stop_polling_interval = stop_polling_interval
self._retry_polling_interval = retry_polling_interval
self._internal_messages_polling_interval = internal_messages_polling_interval
self._settings = settings
self._join_event = threading.Event()
self._stop_status_lock = threading.Lock()
self._stop_status_handle = None
self._stop_thread = threading.Thread(
target=self.check_stop_status,
name="ChkStopThr",
daemon=True,
)
self._network_status_lock = threading.Lock()
self._network_status_handle = None
self._network_status_thread = threading.Thread(
target=self.check_network_status,
name="NetStatThr",
daemon=True,
)
self._internal_messages_lock = threading.Lock()
self._internal_messages_handle = None
self._internal_messages_thread = threading.Thread(
target=self.check_internal_messages,
name="IntMsgThr",
daemon=True,
)
def start(self) -> None:
self._stop_thread.start()
self._network_status_thread.start()
self._internal_messages_thread.start()
@staticmethod
def _abandon_status_check(
lock: threading.Lock,
handle: MailboxHandle[Result] | None,
):
with lock:
if handle:
handle.abandon()
def _loop_check_status(
self,
*,
lock: threading.Lock,
set_handle: Any,
timeout: int,
request: Any,
process: Any,
) -> None:
local_handle: MailboxHandle[Result] | None = None
join_requested = False
while not join_requested:
time_probe = time.monotonic()
if not local_handle:
try:
local_handle = request()
except MailboxClosedError:
# This can happen if the service process dies.
break
assert local_handle
with lock:
if self._join_event.is_set():
break
set_handle(local_handle)
try:
result = local_handle.wait_or(timeout=timeout)
except HandleAbandonedError:
# This can happen if the service process dies.
break
except TimeoutError:
result = None
with lock:
set_handle(None)
if result:
process(result)
local_handle = None
time_elapsed = time.monotonic() - time_probe
wait_time = max(timeout - time_elapsed, 0)
join_requested = self._join_event.wait(timeout=wait_time)
def check_network_status(self) -> None:
def _process_network_status(result: Result) -> None:
network_status = result.response.network_status_response
for hr in network_status.network_responses:
if (
hr.http_status_code == 200 or hr.http_status_code == 0
): # we use 0 for non-http errors (eg wandb errors)
wandb.termlog(f"{hr.http_response_text}")
else:
wandb.termlog(
f"{hr.http_status_code} encountered ({hr.http_response_text.rstrip()}), retrying request"
)
with wb_logging.log_to_run(self._run_id):
try:
self._loop_check_status(
lock=self._network_status_lock,
set_handle=lambda x: setattr(self, "_network_status_handle", x),
timeout=self._retry_polling_interval,
request=self._interface.deliver_network_status,
process=_process_network_status,
)
except BrokenPipeError:
self._abandon_status_check(
self._network_status_lock,
self._network_status_handle,
)
def check_stop_status(self) -> None:
def _process_stop_status(result: Result) -> None:
from wandb.agents import pyagent
stop_status = result.response.stop_status_response
if stop_status.run_should_stop:
# TODO(frz): This check is required
# until WB-3606 is resolved on server side.
if not pyagent.is_running(): # type: ignore
interrupt.interrupt_main()
return
with wb_logging.log_to_run(self._run_id):
try:
self._loop_check_status(
lock=self._stop_status_lock,
set_handle=lambda x: setattr(self, "_stop_status_handle", x),
timeout=self._stop_polling_interval,
request=self._interface.deliver_stop_status,
process=_process_stop_status,
)
except BrokenPipeError:
self._abandon_status_check(
self._stop_status_lock,
self._stop_status_handle,
)
def check_internal_messages(self) -> None:
def _process_internal_messages(result: Result) -> None:
if (
not self._settings.show_warnings
or self._settings.quiet
or self._settings.silent
):
return
internal_messages = result.response.internal_messages_response
for msg in internal_messages.messages.warning:
wandb.termwarn(msg, repeat=False)
with wb_logging.log_to_run(self._run_id):
try:
self._loop_check_status(
lock=self._internal_messages_lock,
set_handle=lambda x: setattr(self, "_internal_messages_handle", x),
timeout=self._internal_messages_polling_interval,
request=self._interface.deliver_internal_messages,
process=_process_internal_messages,
)
except BrokenPipeError:
self._abandon_status_check(
self._internal_messages_lock,
self._internal_messages_handle,
)
def stop(self) -> None:
self._join_event.set()
self._abandon_status_check(
self._stop_status_lock,
self._stop_status_handle,
)
self._abandon_status_check(
self._network_status_lock,
self._network_status_handle,
)
self._abandon_status_check(
self._internal_messages_lock,
self._internal_messages_handle,
)
def join(self) -> None:
self.stop()
self._stop_thread.join()
self._network_status_thread.join()
self._internal_messages_thread.join()
_P = ParamSpec("_P")
_T = TypeVar("_T")
def _log_to_run(
func: Callable[Concatenate[Run, _P], _T],
) -> Callable[Concatenate[Run, _P], _T]:
"""Decorate a Run method to set the run ID in the logging context.
Any logs during the execution of the method go to the run's log file
and not to other runs' log files.
This is meant for use on all public methods and some callbacks. Private
methods can be assumed to be called from some public method somewhere.
The general rule is to use it on methods that can be called from a
context that isn't specific to this run (such as all user code or
internal methods that aren't run-specific).
"""
@functools.wraps(func)
def wrapper(self: Run, *args: _P.args, **kwargs: _P.kwargs) -> _T:
# In "attach" usage, many properties of the Run are not initially
# populated.
if hasattr(self, "_settings"):
run_id = self._settings.run_id
else:
run_id = self._attach_id
with wb_logging.log_to_run(run_id):
return func(self, *args, **kwargs)
return wrapper
_is_attaching: str = ""
def _attach(
func: Callable[Concatenate[Run, _P], _T],
) -> Callable[Concatenate[Run, _P], _T]:
"""Decorate a Run method to auto-attach when in a new process.
When in a forked process or using a pickled Run instance, this automatically
connects to the service process to "attach" to the existing run.
"""
@functools.wraps(func)
def wrapper(self: Run, *args: _P.args, **kwargs: _P.kwargs) -> _T:
global _is_attaching
# The _attach_id attribute is only None when running in the "disable
# service" mode.
#
# Since it is set early in `__init__` and included in the run's pickled
# state, the attribute always exists.
is_using_service = self._attach_id is not None
# The _attach_pid attribute is not pickled, so it might not exist.
# It is set when the run is initialized.
attach_pid = getattr(self, "_attach_pid", None)
if is_using_service and attach_pid != os.getpid():
if _is_attaching:
raise RuntimeError(
f"Trying to attach `{func.__name__}`"
+ f" while in the middle of attaching `{_is_attaching}`"
)
_is_attaching = func.__name__
try:
wandb._attach(run=self) # type: ignore
finally:
_is_attaching = ""
return func(self, *args, **kwargs)
return wrapper
def _raise_if_finished(
func: Callable[Concatenate[Run, _P], _T],
) -> Callable[Concatenate[Run, _P], _T]:
"""Decorate a Run method to raise an error after the run is finished."""
@functools.wraps(func)
def wrapper_fn(self: Run, *args: _P.args, **kwargs: _P.kwargs) -> _T:
if not getattr(self, "_is_finished", False):
return func(self, *args, **kwargs)
message = (
f"Run ({self.id}) is finished. The call to"
f" `{func.__name__}` will be ignored."
f" Please make sure that you are using an active run."
)
raise UsageError(message)
return wrapper_fn
@dataclass
| RunStatusChecker |
python | PrefectHQ__prefect | tests/test_flows.py | {
"start": 168450,
"end": 172837
} | class ____:
def test_grouped_rollback_behavior(self):
data1, data2 = {}, {}
@task
def task1():
pass
@task1.on_rollback
def rollback(txn):
data1["called"] = True
@task
def task2():
pass
@task2.on_rollback
def rollback2(txn):
data2["called"] = True
@flow
def main():
with transaction():
task1()
task2()
raise ValueError("oopsie")
main(return_state=True)
assert data2["called"] is True
assert data1["called"] is True
def test_isolated_shared_state_on_txn_between_tasks(self):
data1, data2 = {}, {}
@task
def task1():
get_transaction().set("task", 1)
@task1.on_rollback
def rollback(txn):
data1["hook"] = txn.get("task")
@task
def task2():
get_transaction().set("task", 2)
@task2.on_rollback
def rollback2(txn):
data2["hook"] = txn.get("task")
@flow
def main():
with transaction():
task1()
task2()
raise ValueError("oopsie")
main(return_state=True)
assert data2["hook"] == 2
assert data1["hook"] == 1
def test_task_failure_causes_previous_to_rollback(self):
data1, data2 = {}, {}
@task
def task1():
pass
@task1.on_rollback
def rollback(txn):
data1["called"] = True
@task
def task2():
raise RuntimeError("oopsie")
@task2.on_rollback
def rollback2(txn):
data2["called"] = True
@flow
def main():
with transaction():
task1()
task2()
main(return_state=True)
assert "called" not in data2
assert data1["called"] is True
def test_task_doesnt_persist_prior_to_commit(self, tmp_path):
result_storage = LocalFileSystem(basepath=tmp_path)
result_storage.save("txn-results", _sync=True)
@task(result_storage=result_storage, result_storage_key="task1-result")
def task1():
pass
@task(result_storage=result_storage, result_storage_key="task2-result")
def task2():
raise RuntimeError("oopsie")
@flow
def main():
with transaction():
task1()
task2()
main(return_state=True)
with pytest.raises(ValueError, match="does not exist"):
result_storage.read_path("task1-result", _sync=True)
def test_task_persists_only_at_commit(self, tmp_path):
result_storage = LocalFileSystem(basepath=tmp_path)
result_storage.save("moar-results", _sync=True)
@task(
result_storage=result_storage,
result_storage_key="task1-result-A",
persist_result=True,
)
def task1():
return dict(some="data")
@task(
result_storage=result_storage,
result_storage_key="task2-result-B",
persist_result=True,
)
def task2():
pass
@flow
def main():
retval = None
with transaction():
task1()
try:
result_storage.read_path("task1-result-A", _sync=True)
except ValueError as exc:
retval = exc
task2()
return retval
val = main()
assert isinstance(val, ValueError)
assert "does not exist" in str(val)
content = result_storage.read_path("task1-result-A", _sync=True)
record = ResultRecord.deserialize(content)
assert record.result == {"some": "data"}
def test_commit_isnt_called_on_rollback(self):
data = {}
@task
def task1():
pass
@task1.on_commit
def rollback(txn):
data["called"] = True
@task
def task2():
raise ValueError("oopsie")
@flow
def main():
with transaction(None):
task1()
task2()
main(return_state=True)
assert data == {}
| TestTransactions |
python | tensorflow__tensorflow | tensorflow/python/data/experimental/kernel_tests/service/test_base.py | {
"start": 5059,
"end": 10883
} | class ____:
"""Test tf.data service cluster."""
def __init__(
self,
num_workers,
dispatcher_port=0,
work_dir=TMP_WORK_DIR,
fault_tolerant_mode=True,
job_gc_check_interval_ms=TEST_JOB_GC_CHECK_INTERNAL_MS,
job_gc_timeout_ms=None,
worker_timeout_ms=TEST_WORKER_TIMEOUT_MS,
worker_shutdown_quiet_period_ms=0,
snapshot_max_chunk_size_bytes=TEST_SNAPSHOT_MAX_CHUNK_SIZE_BYTES,
worker_max_concurrent_snapshots=0,
start=True,
protocol=PROTOCOL,
data_transfer_protocol=None,
):
"""Creates a tf.data service test cluster.
Args:
num_workers: The number of workers to initially add to the cluster.
dispatcher_port: The port to use for the dispatcher.
work_dir: The work directory to use for the dispatcher. If set to
`TMP_WORK_DIR`, the cluster will create a new temporary directory to use
as the work directory. If set to `NO_WORK_DIR`, no work directory will
be used.
fault_tolerant_mode: Whether the dispatcher should write its state to a
journal so that it can recover from restarts.
job_gc_check_interval_ms: How often the dispatcher should scan through to
delete old and unused jobs, in milliseconds.
job_gc_timeout_ms: How long a job needs to be unused before it becomes a
candidate for garbage collection, in milliseconds.
worker_timeout_ms: How long to wait for a worker to heartbeat before
considering it missing, in milliseconds.
worker_shutdown_quiet_period_ms: When shutting down a worker, how long to
wait for the gRPC server to process the final requests.
snapshot_max_chunk_size_bytes: The maximum size of a distributed snapshot
chunk file.
worker_max_concurrent_snapshots: The maximum number of snapshots a worker
can concurrently process.
start: Whether to immediately start the servers in the cluster. If
`False`, the servers can be started later by calling
`start_dispatcher()` and `start_workers()`.
protocol: The protocol to use for communicating with the tf.data service,
e.g. "grpc".
data_transfer_protocol: (Optional.) The protocol to use for transferring
data with the tf.data service.
"""
if work_dir == TMP_WORK_DIR:
work_dir = tempfile.mkdtemp(dir=googletest.GetTempDir())
self._worker_shutdown_quiet_period_ms = worker_shutdown_quiet_period_ms
self._snapshot_max_chunk_size_bytes = snapshot_max_chunk_size_bytes
self._protocol = protocol
self._data_transfer_protocol = data_transfer_protocol
self._job_gc_check_interval_ms = job_gc_check_interval_ms
self._job_gc_timeout_ms = job_gc_timeout_ms
self._worker_timeout_ms = worker_timeout_ms
self._worker_max_concurrent_snapshots = worker_max_concurrent_snapshots
self.dispatcher = server_lib.DispatchServer(
server_lib.DispatcherConfig(
port=dispatcher_port,
work_dir=work_dir,
protocol=protocol,
fault_tolerant_mode=fault_tolerant_mode,
job_gc_check_interval_ms=job_gc_check_interval_ms,
job_gc_timeout_ms=job_gc_timeout_ms,
worker_timeout_ms=worker_timeout_ms,
worker_max_concurrent_snapshots=worker_max_concurrent_snapshots,
),
start=start,
)
self.workers = []
for _ in range(num_workers):
self.add_worker(start=start)
def dispatcher_address(self):
return self.dispatcher.target.split("://")[1]
def add_worker(self, start=True):
worker = TestWorker(
self.dispatcher_address(),
self._worker_shutdown_quiet_period_ms,
self._protocol,
self._data_transfer_protocol,
snapshot_max_chunk_size_bytes=self._snapshot_max_chunk_size_bytes,
)
if start:
worker.start()
self.workers.append(worker)
def start_dispatcher(self):
self.dispatcher.start()
def start_workers(self):
for worker in self.workers:
worker.start()
def stop_dispatcher(self):
# pylint: disable=protected-access
self.dispatcher._stop()
def restart_worker(self, index):
self.workers[index].restart()
def stop_worker(self, index):
self.workers[index].stop()
def stop_workers(self):
for worker in self.workers:
worker.stop()
# pylint: disable=protected-access
def restart_dispatcher(self):
"""Stops `dispatcher` and creates a new dispatcher with the same port.
Restarting is supported only when the dispatcher is configured with
`fault_tolerant_mode=True`.
"""
if not self.dispatcher._config.fault_tolerant_mode:
raise ValueError(
"Trying to restart the dispatcher without fault-tolerance.")
port = int(self.dispatcher_address().split(":")[1])
self.dispatcher._stop()
self.dispatcher = server_lib.DispatchServer(
server_lib.DispatcherConfig(
port=port,
work_dir=self.dispatcher._config.work_dir,
protocol=self._protocol,
fault_tolerant_mode=self.dispatcher._config.fault_tolerant_mode,
job_gc_check_interval_ms=self._job_gc_check_interval_ms,
job_gc_timeout_ms=self._job_gc_timeout_ms,
worker_timeout_ms=self._worker_timeout_ms,
worker_max_concurrent_snapshots=
self._worker_max_concurrent_snapshots,
)
)
def num_registered_workers(self):
return self.dispatcher._num_workers()
def num_tasks_on_workers(self):
return sum(worker.num_tasks() for worker in self.workers)
def snapshot_streams(self, path):
return self.dispatcher._snapshot_streams(path)
def __del__(self):
# Destroy workers before the dispatcher for clean shutdown.
self.workers.clear()
del self.dispatcher
| TestCluster |
python | plotly__plotly.py | plotly/graph_objs/splom/marker/colorbar/_tickfont.py | {
"start": 233,
"end": 9944
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "splom.marker.colorbar"
_path_str = "splom.marker.colorbar.tickfont"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Tickfont object
Sets the color bar's tick label font
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.splom.marker.c
olorbar.Tickfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Tickfont
"""
super().__init__("tickfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.splom.marker.colorbar.Tickfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.splom.marker.colorbar.Tickfont`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Tickfont |
python | wandb__wandb | tests/unit_tests/test_artifacts/test_storage.py | {
"start": 16016,
"end": 18159
} | class ____(StoragePolicy):
@classmethod
def name(cls) -> str:
return "UnfinishedStoragePolicy"
def test_storage_policy_incomplete():
policy = StoragePolicy.lookup_by_name("UnfinishedStoragePolicy")
assert policy is UnfinishedStoragePolicy
with raises(ValueError, match="Failed to find storage policy"):
StoragePolicy.lookup_by_name("NotAStoragePolicy")
def test_storage_handler_incomplete():
class UnfinishedStorageHandler(_BaseStorageHandler):
pass
# Instantiation should fail if the StorageHandler impl doesn't fully implement all abstract methods.
with raises(TypeError):
UnfinishedStorageHandler()
class UnfinishedSingleStorageHandler(StorageHandler):
pass
with raises(TypeError):
UnfinishedSingleStorageHandler()
def test_unwritable_staging_dir(monkeypatch):
# Use a non-writable directory as the staging directory.
# CI just doesn't care about permissions, so we're patching os.makedirs 🙃
def nope(*args, **kwargs):
raise OSError(13, "Permission denied")
monkeypatch.setattr(os, "makedirs", nope)
with raises(PermissionError, match="WANDB_DATA_DIR"):
get_staging_dir()
def test_invalid_upload_policy():
path = "foo/bar"
artifact = wandb.Artifact("test", type="dataset")
with raises(ValueError):
artifact.add_file(local_path=path, name="file.json", policy="tmp")
with raises(ValueError):
artifact.add_dir(local_path=path, policy="tmp")
@mark.parametrize(
"storage_region",
[
None,
"coreweave-us",
"coreweave-404", # local validation won't check against server for actual supported regions
],
)
def test_artifact_with_valid_storage_region(storage_region: str):
wandb.Artifact("test", type="dataset", storage_region=storage_region)
@mark.parametrize(
"storage_region",
[
"",
" ",
123,
],
)
def test_artifact_with_invalid_storage_region(storage_region: Any):
with raises(ValidationError):
wandb.Artifact("test", type="dataset", storage_region=storage_region)
| UnfinishedStoragePolicy |
python | sqlalchemy__sqlalchemy | test/orm/test_cascade.py | {
"start": 80992,
"end": 83778
} | class ____(_fixtures.FixtureTest):
run_inserts = None
@classmethod
def setup_mappers(cls):
addresses, Dingaling, User, dingalings, Address, users = (
cls.tables.addresses,
cls.classes.Dingaling,
cls.classes.User,
cls.tables.dingalings,
cls.classes.Address,
cls.tables.users,
)
cls.mapper_registry.map_imperatively(Address, addresses)
cls.mapper_registry.map_imperatively(
User,
users,
properties={"addresses": relationship(Address, backref="user")},
)
cls.mapper_registry.map_imperatively(
Dingaling,
dingalings,
properties={
"address": relationship(Address, backref="dingalings")
},
)
def test_o2m_basic(self):
User, Address = self.classes.User, self.classes.Address
sess = fixture_session()
u1 = User(name="u1")
sess.add(u1)
a1 = Address(email_address="a1")
a1.user = u1
assert a1 not in sess
def test_o2m_commit_warns(self):
User, Address = self.classes.User, self.classes.Address
sess = fixture_session()
u1 = User(name="u1")
sess.add(u1)
a1 = Address(email_address="a1")
a1.user = u1
assert_warns_message(sa_exc.SAWarning, "not in session", sess.commit)
assert a1 not in sess
def test_o2m_on_backref_no_cascade(self):
Dingaling, Address = self.classes.Dingaling, self.classes.Address
sess = fixture_session()
a1 = Address(email_address="a1")
sess.add(a1)
d1 = Dingaling()
d1.address = a1
assert d1 in a1.dingalings
assert d1 not in sess
def test_m2o_basic(self):
Dingaling, Address = self.classes.Dingaling, self.classes.Address
sess = fixture_session()
a1 = Address(email_address="a1")
d1 = Dingaling()
sess.add(d1)
a1.dingalings.append(d1)
assert a1 not in sess
def test_m2o_on_backref_no_cascade(self):
User, Address = self.classes.User, self.classes.Address
sess = fixture_session()
a1 = Address(email_address="a1")
sess.add(a1)
u1 = User(name="u1")
u1.addresses.append(a1)
assert u1 not in sess
def test_m2o_commit_no_cascade(self):
Dingaling, Address = self.classes.Dingaling, self.classes.Address
sess = fixture_session()
a1 = Address(email_address="a1")
d1 = Dingaling()
sess.add(d1)
a1.dingalings.append(d1)
assert a1 not in sess
assert_warns_message(sa_exc.SAWarning, "not in session", sess.commit)
| NoBackrefCascadeTest |
python | pytorch__pytorch | torch/profiler/profiler.py | {
"start": 1324,
"end": 2538
} | class ____(json.JSONEncoder):
"""
Json encoder for numpy types (np.int, np.float, np.array etc.)
Returns default encoder if numpy is not available
"""
def default(self, obj):
"""Encode NumPy types to JSON"""
try:
import numpy as np
except ImportError:
return json.JSONEncoder.default(self, obj)
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return json.JSONEncoder.default(self, obj)
def supported_activities():
"""
Returns a set of supported profiler tracing activities.
Note: profiler uses CUPTI library to trace on-device CUDA kernels.
In case when CUDA is enabled but CUPTI is not available, passing
``ProfilerActivity.CUDA`` to profiler results in using the legacy CUDA
profiling code (same as in the legacy ``torch.autograd.profiler``).
This, in turn, results in including CUDA time in the profiler table output,
but not in the JSON trace.
"""
return torch.autograd._supported_activities()
| _NumpyEncoder |
python | kamyu104__LeetCode-Solutions | Python/rotating-the-box.py | {
"start": 33,
"end": 579
} | class ____(object):
def rotateTheBox(self, box):
"""
:type box: List[List[str]]
:rtype: List[List[str]]
"""
result = [['.']*len(box) for _ in xrange(len(box[0]))]
for i in xrange(len(box)):
k = len(box[0])-1
for j in reversed(xrange(len(box[0]))):
if box[i][j] == '.':
continue
if box[i][j] == '*':
k = j
result[k][-1-i] = box[i][j]
k -= 1
return result
| Solution |
python | scipy__scipy | scipy/signal/tests/test_windows.py | {
"start": 1201,
"end": 1987
} | class ____:
def test_basic(self, xp):
xp_assert_close(windows.barthann(6, sym=True, xp=xp),
xp.asarray([0, 0.35857354213752, 0.8794264578624801,
0.8794264578624801, 0.3585735421375199, 0], dtype=xp.float64),
rtol=1e-15, atol=1e-15)
xp_assert_close(windows.barthann(7, xp=xp),
xp.asarray([0, 0.27, 0.73, 1.0, 0.73, 0.27, 0],
dtype=xp.float64),
rtol=1e-15, atol=1e-15)
xp_assert_close(windows.barthann(6, False, xp=xp),
xp.asarray([0, 0.27, 0.73, 1.0, 0.73, 0.27], dtype=xp.float64),
rtol=1e-15, atol=1e-15)
@make_xp_test_case(windows.bartlett)
| TestBartHann |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 257521,
"end": 259540
} | class ____(_PrintableStructure):
_fields_ = [
('version', c_uint),
('requestedProfilesMask', c_nvmlMask255_t),
]
def __init__(self):
super(c_nvmlWorkloadPowerProfileRequestedProfiles_v1_t, self).__init__(version=nvmlWorkloadPowerProfileRequestedProfiles_v1)
def nvmlDeviceWorkloadPowerProfileGetProfilesInfo(device, profilesInfo):
fn = _nvmlGetFunctionPointer("nvmlDeviceWorkloadPowerProfileGetProfilesInfo")
ret = fn(device, profilesInfo)
_nvmlCheckReturn(ret)
return NVML_SUCCESS
def nvmlDeviceWorkloadPowerProfileGetCurrentProfiles(device, currentProfiles):
fn = _nvmlGetFunctionPointer("nvmlDeviceWorkloadPowerProfileGetCurrentProfiles")
ret = fn(device, currentProfiles)
_nvmlCheckReturn(ret)
return NVML_SUCCESS
def nvmlDeviceWorkloadPowerProfileSetRequestedProfiles(device, requestedProfiles):
fn = _nvmlGetFunctionPointer("nvmlDeviceWorkloadPowerProfileSetRequestedProfiles")
ret = fn(device, requestedProfiles)
_nvmlCheckReturn(ret)
return NVML_SUCCESS
def nvmlDeviceWorkloadPowerProfileClearRequestedProfiles(device, requestedProfiles):
fn = _nvmlGetFunctionPointer("nvmlDeviceWorkloadPowerProfileClearRequestedProfiles")
ret = fn(device, requestedProfiles)
_nvmlCheckReturn(ret)
return NVML_SUCCESS
def nvmlDeviceGetNvlinkSupportedBwModes(device, supportedBwModes):
fn = _nvmlGetFunctionPointer("nvmlDeviceGetNvlinkSupportedBwModes")
ret = fn(device, supportedBwModes)
_nvmlCheckReturn(ret)
return NVML_SUCCESS
def nvmlDeviceGetNvlinkBwMode(device, getBwMode):
fn = _nvmlGetFunctionPointer("nvmlDeviceGetNvlinkBwMode")
ret = fn(device, getBwMode)
_nvmlCheckReturn(ret)
return NVML_SUCCESS
def nvmlDeviceSetNvlinkBwMode(device, setBwMode):
fn = _nvmlGetFunctionPointer("nvmlDeviceSetNvlinkBwMode")
ret = fn(device, setBwMode)
_nvmlCheckReturn(ret)
return NVML_SUCCESS
nvmlDramEncryptionInfo_v1 = 0x01000008
| c_nvmlWorkloadPowerProfileRequestedProfiles_v1_t |
python | numpy__numpy | numpy/_core/tests/test_unicode.py | {
"start": 2761,
"end": 2874
} | class ____(CreateZeros):
"""Check the creation of zero-valued arrays (size 1)"""
ulen = 1
| TestCreateZeros_1 |
python | cookiecutter__cookiecutter | tests/test_prompt.py | {
"start": 2428,
"end": 14121
} | class ____:
"""Class to unite user prompt related tests."""
@pytest.mark.parametrize(
'context',
[
{'cookiecutter': {'full_name': 'Your Name'}},
{'cookiecutter': {'full_name': 'Řekni či napiš své jméno'}},
],
ids=['ASCII default prompt/input', 'Unicode default prompt/input'],
)
def test_prompt_for_config(self, monkeypatch, context) -> None:
"""Verify `prompt_for_config` call `read_user_variable` on text request."""
monkeypatch.setattr(
'cookiecutter.prompt.read_user_variable',
lambda _var, default, _prompts, _prefix: default,
)
cookiecutter_dict = prompt.prompt_for_config(context)
assert cookiecutter_dict == context['cookiecutter']
@pytest.mark.parametrize(
'context',
[
{
'cookiecutter': {
'full_name': 'Your Name',
'check': ['yes', 'no'],
'nothing': 'ok',
'__prompts__': {
'full_name': 'Name please',
'check': 'Checking',
},
}
},
],
ids=['ASCII default prompt/input'],
)
def test_prompt_for_config_with_human_prompts(self, monkeypatch, context) -> None:
"""Verify call `read_user_variable` on request when human-readable prompts."""
monkeypatch.setattr(
'cookiecutter.prompt.read_user_variable',
lambda _var, default, _prompts, _prefix: default,
)
monkeypatch.setattr(
'cookiecutter.prompt.read_user_yes_no',
lambda _var, default, _prompts, _prefix: default,
)
monkeypatch.setattr(
'cookiecutter.prompt.read_user_choice',
lambda _var, default, _prompts, _prefix: default,
)
cookiecutter_dict = prompt.prompt_for_config(context)
assert cookiecutter_dict == context['cookiecutter']
@pytest.mark.parametrize(
'context',
[
{
'cookiecutter': {
'full_name': 'Your Name',
'check': ['yes', 'no'],
'__prompts__': {
'check': 'Checking',
},
}
},
{
'cookiecutter': {
'full_name': 'Your Name',
'check': ['yes', 'no'],
'__prompts__': {
'full_name': 'Name please',
'check': {'__prompt__': 'Checking', 'yes': 'Yes', 'no': 'No'},
},
}
},
{
'cookiecutter': {
'full_name': 'Your Name',
'check': ['yes', 'no'],
'__prompts__': {
'full_name': 'Name please',
'check': {'no': 'No'},
},
}
},
],
)
def test_prompt_for_config_with_human_choices(self, context) -> None:
"""Test prompts when human-readable labels for user choices."""
runner = click.testing.CliRunner()
with runner.isolation(input="\n\n\n"):
cookiecutter_dict = prompt.prompt_for_config(context)
assert dict(cookiecutter_dict) == {'full_name': 'Your Name', 'check': 'yes'}
def test_prompt_for_config_dict(self, monkeypatch) -> None:
"""Verify `prompt_for_config` call `read_user_variable` on dict request."""
monkeypatch.setattr(
'cookiecutter.prompt.read_user_dict',
lambda _var, _default, _prompts, _prefix: {"key": "value", "integer": 37},
)
context: dict[str, Any] = {'cookiecutter': {'details': {}}}
cookiecutter_dict = prompt.prompt_for_config(context)
assert cookiecutter_dict == {'details': {'key': 'value', 'integer': 37}}
def test_should_render_dict(self) -> None:
"""Verify template inside dictionary variable rendered."""
context = {
'cookiecutter': {
'project_name': 'Slartibartfast',
'details': {
'{{cookiecutter.project_name}}': '{{cookiecutter.project_name}}'
},
}
}
cookiecutter_dict = prompt.prompt_for_config(context, no_input=True)
assert cookiecutter_dict == {
'project_name': 'Slartibartfast',
'details': {'Slartibartfast': 'Slartibartfast'},
}
def test_should_render_deep_dict(self) -> None:
"""Verify nested structures like dict in dict, rendered correctly."""
context = {
'cookiecutter': {
'project_name': "Slartibartfast",
'details': {
"key": "value",
"integer_key": 37,
"other_name": '{{cookiecutter.project_name}}',
"dict_key": {
"deep_key": "deep_value",
"deep_integer": 42,
"deep_other_name": '{{cookiecutter.project_name}}',
"deep_list": [
"deep value 1",
"{{cookiecutter.project_name}}",
"deep value 3",
],
},
"list_key": [
"value 1",
"{{cookiecutter.project_name}}",
"value 3",
],
},
}
}
cookiecutter_dict = prompt.prompt_for_config(context, no_input=True)
assert cookiecutter_dict == {
'project_name': "Slartibartfast",
'details': {
"key": "value",
"integer_key": "37",
"other_name": "Slartibartfast",
"dict_key": {
"deep_key": "deep_value",
"deep_integer": "42",
"deep_other_name": "Slartibartfast",
"deep_list": ["deep value 1", "Slartibartfast", "deep value 3"],
},
"list_key": ["value 1", "Slartibartfast", "value 3"],
},
}
def test_should_render_deep_dict_with_human_prompts(self) -> None:
"""Verify dict rendered correctly when human-readable prompts."""
context = {
'cookiecutter': {
'project_name': "Slartibartfast",
'details': {
"key": "value",
"integer_key": 37,
"other_name": '{{cookiecutter.project_name}}',
"dict_key": {
"deep_key": "deep_value",
},
},
'__prompts__': {'project_name': 'Project name'},
}
}
cookiecutter_dict = prompt.prompt_for_config(context, no_input=True)
assert cookiecutter_dict == {
'project_name': "Slartibartfast",
'details': {
"key": "value",
"integer_key": "37",
"other_name": "Slartibartfast",
"dict_key": {
"deep_key": "deep_value",
},
},
}
def test_internal_use_no_human_prompts(self) -> None:
"""Verify dict rendered correctly when human-readable prompts empty."""
context = {
'cookiecutter': {
'project_name': "Slartibartfast",
'__prompts__': {},
}
}
cookiecutter_dict = prompt.prompt_for_config(context, no_input=True)
assert cookiecutter_dict == {
'project_name': "Slartibartfast",
}
def test_prompt_for_templated_config(self, monkeypatch) -> None:
"""Verify Jinja2 templating works in unicode prompts."""
monkeypatch.setattr(
'cookiecutter.prompt.read_user_variable',
lambda _var, default, _prompts, _prefix: default,
)
context = {
'cookiecutter': OrderedDict(
[
('project_name', 'A New Project'),
(
'pkg_name',
'{{ cookiecutter.project_name|lower|replace(" ", "") }}',
),
]
)
}
exp_cookiecutter_dict = {
'project_name': 'A New Project',
'pkg_name': 'anewproject',
}
cookiecutter_dict = prompt.prompt_for_config(context)
assert cookiecutter_dict == exp_cookiecutter_dict
def test_dont_prompt_for_private_context_var(self, monkeypatch) -> None:
"""Verify `read_user_variable` not called for private context variables."""
monkeypatch.setattr(
'cookiecutter.prompt.read_user_variable',
lambda _var, _default: pytest.fail(
'Should not try to read a response for private context var'
),
)
context = {'cookiecutter': {'_copy_without_render': ['*.html']}}
cookiecutter_dict = prompt.prompt_for_config(context)
assert cookiecutter_dict == {'_copy_without_render': ['*.html']}
def test_should_render_private_variables_with_two_underscores(self) -> None:
"""Test rendering of private variables with two underscores.
There are three cases:
1. Variables beginning with a single underscore are private and not rendered.
2. Variables beginning with a double underscore are private and are rendered.
3. Variables beginning with anything other than underscores are not private and
are rendered.
"""
context = {
'cookiecutter': OrderedDict(
[
('foo', 'Hello world'),
('bar', 123),
('rendered_foo', '{{ cookiecutter.foo|lower }}'),
('rendered_bar', 123),
('_hidden_foo', '{{ cookiecutter.foo|lower }}'),
('_hidden_bar', 123),
('__rendered_hidden_foo', '{{ cookiecutter.foo|lower }}'),
('__rendered_hidden_bar', 123),
]
)
}
cookiecutter_dict = prompt.prompt_for_config(context, no_input=True)
assert cookiecutter_dict == OrderedDict(
[
('foo', 'Hello world'),
('bar', '123'),
('rendered_foo', 'hello world'),
('rendered_bar', '123'),
('_hidden_foo', '{{ cookiecutter.foo|lower }}'),
('_hidden_bar', 123),
('__rendered_hidden_foo', 'hello world'),
('__rendered_hidden_bar', '123'),
]
)
def test_should_not_render_private_variables(self) -> None:
"""Verify private(underscored) variables not rendered by `prompt_for_config`.
Private variables designed to be raw, same as context input.
"""
context = {
'cookiecutter': {
'project_name': 'Skip render',
'_skip_jinja_template': '{{cookiecutter.project_name}}',
'_skip_float': 123.25,
'_skip_integer': 123,
'_skip_boolean': True,
'_skip_nested': True,
}
}
cookiecutter_dict = prompt.prompt_for_config(context, no_input=True)
assert cookiecutter_dict == context['cookiecutter']
DEFAULT_PREFIX = ' [dim][1/1][/] '
| TestPrompt |
python | pyqtgraph__pyqtgraph | pyqtgraph/opengl/items/GLGradientLegendItem.py | {
"start": 178,
"end": 2992
} | class ____(GLGraphicsItem):
"""
Displays legend colorbar on the screen.
"""
def __init__(self, parentItem=None, **kwds):
"""
Arguments:
pos: position of the colorbar on the screen, from the top left corner, in pixels
size: size of the colorbar without the text, in pixels
gradient: a pg.ColorMap used to color the colorbar
labels: a dict of "text":value to display next to the colorbar.
The value corresponds to a position in the gradient from 0 to 1.
fontColor: sets the color of the texts. Accepts any single argument accepted by
:func:`~pyqtgraph.mkColor`
#Todo:
size as percentage
legend title
"""
super().__init__(parentItem=parentItem)
glopts = kwds.pop("glOptions", "additive")
self.setGLOptions(glopts)
self.pos = (10, 10)
self.size = (10, 100)
self.fontColor = QtGui.QColor(QtCore.Qt.GlobalColor.white)
# setup a default trivial gradient
stops = (0.0, 1.0)
self.gradient = ColorMap(pos=stops, color=(0.0, 1.0))
self._gradient = None
self.labels = {str(x) : x for x in stops}
self.setData(**kwds)
def setData(self, **kwds):
args = ["size", "pos", "gradient", "labels", "fontColor"]
for k in kwds.keys():
if k not in args:
raise Exception(
"Invalid keyword argument: %s (allowed arguments are %s)"
% (k, str(args))
)
self.antialias = False
for key in kwds:
value = kwds[key]
if key == 'fontColor':
value = fn.mkColor(value)
elif key == 'gradient':
self._gradient = None
setattr(self, key, value)
##todo: add title
##todo: take more gradient types
self.update()
def paint(self):
self.setupGLState()
if self._gradient is None:
self._gradient = self.gradient.getGradient()
barRect = QtCore.QRectF(self.pos[0], self.pos[1], self.size[0], self.size[1])
self._gradient.setStart(barRect.bottomLeft())
self._gradient.setFinalStop(barRect.topLeft())
painter = QtGui.QPainter(self.view())
painter.fillRect(barRect, self._gradient)
painter.setPen(self.fontColor)
for labelText, labelPosition in self.labels.items():
## todo: draw ticks, position text properly
x = 1.1 * self.size[0] + self.pos[0]
y = self.size[1] - labelPosition * self.size[1] + self.pos[1] + 8
##todo: fonts
painter.drawText(QtCore.QPointF(x, y), labelText)
painter.end()
| GLGradientLegendItem |
python | pytorch__pytorch | torch/_dynamo/variables/higher_order_ops.py | {
"start": 135027,
"end": 135693
} | class ____(TorchHigherOrderOperatorVariable):
"""
Handles torch._dynamo._trace_wrapped_higher_order_op.inner_trace
by unwrapping the higher order op and inlining through it. This op
is created by dynamo to survive through AotAutograd, then unwrapped
here in the call to dynamo from compiled autograd.
"""
def _call_function(
self,
tx: "InstructionTranslator",
args: "list[VariableTracker]",
kwargs: "dict[str, VariableTracker]",
) -> "VariableTracker":
kwargs = dict(kwargs)
fn = kwargs.pop("fn")
return fn.call_function(tx, args, kwargs)
| TraceWrappedHigherOrderOperatorVariable |
python | Netflix__metaflow | metaflow/plugins/parallel_decorator.py | {
"start": 327,
"end": 9702
} | class ____(StepDecorator):
"""
MF Add To Current
-----------------
parallel -> metaflow.metaflow_current.Parallel
Returns a namedtuple with relevant information about the parallel task.
@@ Returns
-------
Parallel
`namedtuple` with the following fields:
- main_ip (`str`)
The IP address of the control task.
- num_nodes (`int`)
The total number of tasks created by @parallel
- node_index (`int`)
The index of the current task in all the @parallel tasks.
- control_task_id (`Optional[str]`)
The task ID of the control task. Available to all tasks.
is_parallel -> bool
True if the current step is a @parallel step.
"""
name = "parallel"
defaults = {}
IS_PARALLEL = True
def __init__(self, attributes=None, statically_defined=False, inserted_by=None):
super(ParallelDecorator, self).__init__(
attributes, statically_defined, inserted_by
)
def runtime_step_cli(
self, cli_args, retry_count, max_user_code_retries, ubf_context
):
if ubf_context == UBF_CONTROL:
num_parallel = cli_args.task.ubf_iter.num_parallel
cli_args.command_options["num-parallel"] = str(num_parallel)
if os.environ.get("METAFLOW_RUNTIME_ENVIRONMENT", "local") == "local":
cli_args.command_options["split_index"] = "0"
def step_init(
self, flow, graph, step_name, decorators, environment, flow_datastore, logger
):
# TODO: This can be supported in the future, but for the time being we disable the transition as it leads to
# a UBF exception during runtime when the actual parallel-join step is conditional (switching between different join implementations from the @parallel step).
if graph[step_name].type == "split-switch":
raise MetaflowException(
"A @parallel step can not be a conditional switch step. Please add a join step after *%s*"
% step_name
)
self.environment = environment
# Previously, the `parallel` property was a hardcoded, static property within `current`.
# Whenever `current.parallel` was called, it returned a named tuple with values coming from
# environment variables, loaded dynamically at runtime.
# Now, many of these environment variables are set by compute-related decorators in `task_pre_step`.
# This necessitates ensuring the correct ordering of the `parallel` and compute decorators if we want to
# statically set the namedtuple via `current._update_env` in `task_pre_step`. Hence we avoid using
# `current._update_env` since:
# - it will set a static named tuple, resolving environment variables only once (at the time of calling `current._update_env`).
# - we cannot guarantee the order of calling the decorator's `task_pre_step` (calling `current._update_env` may not set
# the named tuple with the correct values).
# Therefore, we explicitly set the property in `step_init` to ensure the property can resolve the appropriate values in the named tuple
# when accessed at runtime.
setattr(
current.__class__,
"parallel",
property(
fget=lambda _: Parallel(
main_ip=os.environ.get("MF_PARALLEL_MAIN_IP", "127.0.0.1"),
num_nodes=int(os.environ.get("MF_PARALLEL_NUM_NODES", "1")),
node_index=int(os.environ.get("MF_PARALLEL_NODE_INDEX", "0")),
control_task_id=os.environ.get("MF_PARALLEL_CONTROL_TASK_ID", None),
)
),
)
def task_pre_step(
self,
step_name,
task_datastore,
metadata,
run_id,
task_id,
flow,
graph,
retry_count,
max_user_code_retries,
ubf_context,
inputs,
):
from metaflow import current
# Set `is_parallel` to `True` in `current` just like we
# with `is_production` in the project decorator.
current._update_env(
{
"is_parallel": True,
}
)
self.input_paths = [obj.pathspec for obj in inputs]
task_metadata_list = [
MetaDatum(
field="parallel-world-size",
value=flow._parallel_ubf_iter.num_parallel,
type="parallel-world-size",
tags=["attempt_id:{0}".format(0)],
)
]
if ubf_context == UBF_CONTROL:
# A Task's tags are now those of its ancestral Run, so we are not able
# to rely on a task's tags to indicate the presence of a control task
# so, on top of adding the tags above, we also add a task metadata
# entry indicating that this is a "control task".
#
# Here we will also add a task metadata entry to indicate "control
# task". Within the metaflow repo, the only dependency of such a
# "control task" indicator is in the integration test suite (see
# Step.control_tasks() in client API).
task_metadata_list += [
MetaDatum(
field="internal_task_type",
value=CONTROL_TASK_TAG,
type="internal_task_type",
tags=["attempt_id:{0}".format(0)],
)
]
flow._control_task_is_mapper_zero = True
metadata.register_metadata(run_id, step_name, task_id, task_metadata_list)
def task_decorate(
self, step_func, flow, graph, retry_count, max_user_code_retries, ubf_context
):
def _step_func_with_setup():
self.setup_distributed_env(flow)
step_func()
if (
ubf_context == UBF_CONTROL
and os.environ.get("METAFLOW_RUNTIME_ENVIRONMENT", "local") == "local"
):
from functools import partial
env_to_use = getattr(self.environment, "base_env", self.environment)
return partial(
_local_multinode_control_task_step_func,
flow,
env_to_use,
_step_func_with_setup,
retry_count,
",".join(self.input_paths),
)
else:
return _step_func_with_setup
def setup_distributed_env(self, flow):
# Overridden by subclasses to set up particular framework's environment.
pass
def _local_multinode_control_task_step_func(
flow, env_to_use, step_func, retry_count, input_paths
):
"""
Used as multinode UBF control task when run in local mode.
"""
from metaflow import current
from metaflow.cli_args import cli_args
from metaflow.unbounded_foreach import UBF_TASK
import subprocess
assert flow._unbounded_foreach
foreach_iter = flow._parallel_ubf_iter
if foreach_iter.__class__.__name__ != "ParallelUBF":
raise MetaflowException(
"Expected ParallelUBFIter iterator object, got:"
+ foreach_iter.__class__.__name__
)
num_parallel = foreach_iter.num_parallel
os.environ["MF_PARALLEL_NUM_NODES"] = str(num_parallel)
os.environ["MF_PARALLEL_MAIN_IP"] = "127.0.0.1"
os.environ["MF_PARALLEL_CONTROL_TASK_ID"] = str(current.task_id)
run_id = current.run_id
step_name = current.step_name
control_task_id = current.task_id
# UBF handling for multinode case
mapper_task_ids = [control_task_id]
# If we are running inside Conda, we use the base executable FIRST;
# the conda environment will then be used when runtime_step_cli is
# called. This is so that it can properly set up all the metaflow
# aliases needed.
executable = env_to_use.executable(step_name)
script = sys.argv[0]
# start workers
# TODO: Logs for worker processes are assigned to control process as of today, which
# should be fixed at some point
subprocesses = []
for node_index in range(1, num_parallel):
task_id = "%s_node_%d" % (control_task_id, node_index)
mapper_task_ids.append(task_id)
os.environ["MF_PARALLEL_NODE_INDEX"] = str(node_index)
# Override specific `step` kwargs.
kwargs = cli_args.step_kwargs
kwargs["split_index"] = str(node_index)
kwargs["run_id"] = run_id
kwargs["task_id"] = task_id
kwargs["input_paths"] = input_paths
kwargs["ubf_context"] = UBF_TASK
kwargs["retry_count"] = str(retry_count)
cmd = cli_args.step_command(executable, script, step_name, step_kwargs=kwargs)
p = subprocess.Popen(cmd)
subprocesses.append(p)
flow._control_mapper_tasks = [
"%s/%s/%s" % (run_id, step_name, mapper_task_id)
for mapper_task_id in mapper_task_ids
]
# run the step function ourselves
os.environ["MF_PARALLEL_NODE_INDEX"] = "0"
step_func()
# join the subprocesses
for p in subprocesses:
p.wait()
if p.returncode:
raise Exception("Subprocess failed, return code {}".format(p.returncode))
| ParallelDecorator |
python | walkccc__LeetCode | solutions/845. Longest Mountain in Array/845.py | {
"start": 0,
"end": 533
} | class ____:
def longestMountain(self, arr: list[int]) -> int:
ans = 0
i = 0
while i + 1 < len(arr):
while i + 1 < len(arr) and arr[i] == arr[i + 1]:
i += 1
increasing = 0
decreasing = 0
while i + 1 < len(arr) and arr[i] < arr[i + 1]:
increasing += 1
i += 1
while i + 1 < len(arr) and arr[i] > arr[i + 1]:
decreasing += 1
i += 1
if increasing > 0 and decreasing > 0:
ans = max(ans, increasing + decreasing + 1)
return ans
| Solution |
python | doocs__leetcode | solution/1800-1899/1829.Maximum XOR for Each Query/Solution2.py | {
"start": 0,
"end": 297
} | class ____:
def getMaximumXor(self, nums: List[int], maximumBit: int) -> List[int]:
ans = []
xs = reduce(xor, nums)
mask = (1 << maximumBit) - 1
for x in nums[::-1]:
k = xs ^ mask
ans.append(k)
xs ^= x
return ans
| Solution |
python | django__django | tests/utils_tests/test_module_loading.py | {
"start": 8451,
"end": 8663
} | class ____:
def __init__(self, *args, **kwargs):
self.importer = zipimporter(*args, **kwargs)
def find_spec(self, path, target=None):
return self.importer.find_spec(path, target)
| TestFinder |
python | pytorch__pytorch | test/distributed/_composable/fsdp/test_fully_shard_training.py | {
"start": 52386,
"end": 55386
} | class ____(FSDPTest):
@property
def world_size(self) -> int:
return min(8, torch.get_device_module(device_type).device_count())
def init_global_mesh(self) -> DeviceMesh:
return init_device_mesh(
device_type.type,
(2, 2, 2),
mesh_dim_names=("dp_replicate", "dp_shard", "tp"),
)
@skip_if_lt_x_gpu(8)
def test_3d_mlp_with_nd_mesh(self):
global_mesh = self.init_global_mesh()
self.run_subtests(
{
"reshard_after_forward": [False, True],
"use_activation_checkpointing": [False, True],
"mlp_dim": [3, 5, 16, 17],
"foreach": [False],
},
functools.partial(self._test_3d_mlp_with_nd_mesh, global_mesh),
)
def _test_3d_mlp_with_nd_mesh(
self,
global_mesh: DeviceMesh,
reshard_after_forward: bool,
use_activation_checkpointing: bool,
mlp_dim: int,
foreach: bool,
):
global_mesh = self.init_global_mesh()
dp_mesh, tp_mesh = global_mesh["dp_replicate", "dp_shard"], global_mesh["tp"]
dp_pg = dp_mesh._flatten().get_group() # used for `replicate()`
torch.manual_seed(42)
model = MLPStack(mlp_dim)
ref_model = copy.deepcopy(model).to(device_type)
replicate(
ref_model,
device_ids=[self.rank],
process_group=dp_pg,
)
ref_optim = torch.optim.Adam(ref_model.parameters(), lr=1e-2, foreach=foreach)
model.parallelize(
tp_mesh,
dp_mesh,
use_activation_checkpointing,
reshard_after_forward=reshard_after_forward,
)
# Checking parameters match orig model is critical to validate .full_tensor correctly replicates the
# strided-sharded layers.
for ref_p, p in zip(ref_model.parameters(), model.parameters()):
self.assertIsInstance(p, DTensor)
self.assertEqual(ref_p, p.full_tensor())
optim = torch.optim.Adam(model.parameters(), lr=1e-2, foreach=foreach)
torch.manual_seed(42 + dp_pg.rank() + 1)
device = device_type
for iter_idx in range(10):
inp = torch.randn((8, mlp_dim), device=device)
losses: list[torch.Tensor] = []
for _model, _optim in ((ref_model, ref_optim), (model, optim)):
_optim.zero_grad(set_to_none=(iter_idx % 2 == 0))
losses.append(_model(inp).sum())
losses[-1].backward()
_optim.step()
self.assertEqual(losses[0], losses[1])
for _, p in model.named_parameters():
self.assertIsInstance(p, DTensor)
self.assertEqual(p.device_mesh.ndim, 3)
self.assertEqual(len(p.placements), 3)
self.assertEqual(
p.device_mesh.mesh_dim_names, ("dp_replicate", "dp_shard", "tp")
)
| TestFullyShardHSDP3DTraining |
python | django__django | django/contrib/auth/views.py | {
"start": 2880,
"end": 4896
} | class ____(RedirectURLMixin, FormView):
"""
Display the login form and handle the login action.
"""
form_class = AuthenticationForm
authentication_form = None
template_name = "registration/login.html"
redirect_authenticated_user = False
extra_context = None
def dispatch(self, request, *args, **kwargs):
if self.redirect_authenticated_user and self.request.user.is_authenticated:
redirect_to = self.get_success_url()
if redirect_to == self.request.path:
raise ValueError(
"Redirection loop for authenticated user detected. Check that "
"your LOGIN_REDIRECT_URL doesn't point to a login page."
)
return HttpResponseRedirect(redirect_to)
return super().dispatch(request, *args, **kwargs)
def get_default_redirect_url(self):
"""Return the default redirect URL."""
if self.next_page:
return resolve_url(self.next_page)
else:
return resolve_url(settings.LOGIN_REDIRECT_URL)
def get_form_class(self):
return self.authentication_form or self.form_class
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs["request"] = self.request
return kwargs
def form_valid(self, form):
"""Security check complete. Log the user in."""
auth_login(self.request, form.get_user())
return HttpResponseRedirect(self.get_success_url())
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
current_site = get_current_site(self.request)
context.update(
{
self.redirect_field_name: self.get_redirect_url(),
"site": current_site,
"site_name": current_site.name,
**(self.extra_context or {}),
}
)
return context
@method_decorator([csrf_protect, never_cache], name="dispatch")
| LoginView |
python | allegroai__clearml | examples/frameworks/openmmlab/openmmlab_cifar10.py | {
"start": 338,
"end": 3012
} | class ____(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
self.loss_fn = nn.CrossEntropyLoss()
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def train_step(self, data, optimizer):
images, labels = data
predicts = self(images) # -> self.__call__() -> self.forward()
loss = self.loss_fn(predicts, labels)
return {'loss': loss}
if __name__ == '__main__':
model = Model()
if torch.cuda.is_available():
model = MMDataParallel(model.cuda(), device_ids=[0])
# dataset and dataloader
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
trainset = CIFAR10(
root='data', train=True, download=True, transform=transform)
trainloader = DataLoader(
trainset, batch_size=128, shuffle=True, num_workers=2)
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
logger = get_logger('mmcv')
# runner is a scheduler to manage the training
runner = EpochBasedRunner(
model,
optimizer=optimizer,
work_dir='./work_dir',
logger=logger,
max_epochs=4)
# learning rate scheduler config
lr_config = dict(policy='step', step=[2, 3])
# configuration of optimizer
optimizer_config = dict(grad_clip=None)
# configuration of saving checkpoints periodically
checkpoint_config = dict(interval=1)
# save log periodically and multiple hooks can be used simultaneously
log_config = dict(
interval=100,
hooks=[
dict(type='TextLoggerHook'),
dict(
type='ClearMLLoggerHook',
init_kwargs=dict(
project_name='examples',
task_name='OpenMMLab cifar10',
output_uri=True
)
),
]
)
# register hooks to runner and those hooks will be invoked automatically
runner.register_training_hooks(
lr_config=lr_config,
optimizer_config=optimizer_config,
checkpoint_config=checkpoint_config,
log_config=log_config)
runner.run([trainloader], [('train', 1)])
| Model |
python | kamyu104__LeetCode-Solutions | Python/minimum-operations-to-collect-elements.py | {
"start": 42,
"end": 483
} | class ____(object):
def minOperations(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
lookup = [False]*k
for i in reversed(xrange(len(nums))):
if nums[i] > len(lookup) or lookup[nums[i]-1]:
continue
lookup[nums[i]-1] = True
k -= 1
if not k:
break
return len(nums)-i
| Solution |
python | pytorch__pytorch | torch/_inductor/codegen/mps.py | {
"start": 5881,
"end": 14829
} | class ____(OpOverrides):
"""Implements Metal-specific overrides for ops. Base class emits Python-friendly overrides."""
@staticmethod
def to_dtype(
x: CSEVariable,
dtype: torch.dtype,
src_dtype: Optional[torch.dtype] = None,
use_compute_types: bool = True,
) -> str:
if dtype == torch.double:
log.warning(
"float64 cast requested, probably from tensorify_python_scalars"
)
return f"static_cast<float>({x})"
return f"static_cast<{DTYPE_TO_METAL[dtype]}>({x})"
@staticmethod
def to_dtype_bitcast(
x: CSEVariable, dtype: torch.dtype, src_dtype: torch.dtype
) -> str:
return f"as_type<{DTYPE_TO_METAL[dtype]}>(static_cast<{DTYPE_TO_METAL[src_dtype]}>({x}))"
@staticmethod
def constant(val: Union[bool, float, int], dtype: torch.dtype) -> str:
return value_to_metal(val)
@staticmethod
def index_expr(expr: sympy.Expr, dtype: torch.dtype) -> str:
idx_str = V.kernel.index_to_str(V.kernel.prepare_indexing(expr))
var = V.kernel.cse.generate(
V.kernel.compute, idx_str, bounds=get_bounds_index_expr(expr)
)
return ops.to_dtype(var, dtype)
@staticmethod
def masked(mask: CSEVariable, body: sympy.Expr, other: CSEVariable) -> str:
# TODO: Type annotation for other is wrong, it's often float or int
with V.kernel.mask_loads(mask, other) as new_mask:
result = body()
if result.bounds.is_bool:
other = bool(other) # type: ignore[assignment]
return ops.where(new_mask, result, other)
@staticmethod
def where(a: OpVarT, b: OpVarT, c: OpVarT) -> str:
return f"{a} ? {b} : {value_to_metal(c)}"
@staticmethod
def remainder(a: OpVarT, b: OpVarT) -> str:
return f"c10::metal::remainder({a}, {b})"
@staticmethod
def maximum(a: CSEVariable, b: CSEVariable) -> str:
typecast_a = f"static_cast<decltype({a}+{b})>({a})"
typecast_b = f"static_cast<decltype({a}+{b})>({b})"
return f"c10::metal::max({typecast_a}, {typecast_b})"
@staticmethod
def minimum(a: CSEVariable, b: CSEVariable) -> str:
typecast_a = f"static_cast<decltype({a}+{b})>({a})"
typecast_b = f"static_cast<decltype({a}+{b})>({b})"
return f"c10::metal::min({typecast_a}, {typecast_b})"
@staticmethod
def logical_or(a: CSEVariable, b: CSEVariable) -> str:
return f"{a} || {b}"
@staticmethod
def logical_and(a: CSEVariable, b: CSEVariable) -> str:
return f"{a} && {b}"
@staticmethod
def isnan(x: CSEVariable) -> str:
return f"metal::isnan({x})"
@staticmethod
def isinf(x: CSEVariable) -> str:
return f"metal::isinf({x})"
@staticmethod
def log(x: CSEVariable) -> str:
return f"metal::log({x})"
@staticmethod
def exp(x: CSEVariable) -> str:
return f"metal::exp({x})"
@staticmethod
def abs(x: CSEVariable) -> str:
return f"metal::abs({x})"
@staticmethod
def signbit(x: CSEVariable) -> str:
return f"metal::signbit({x})"
@staticmethod
def sin(x: CSEVariable) -> str:
return f"metal::precise::sin({x})"
@staticmethod
def sinc(x: CSEVariable) -> str:
return f"c10::metal::sinc({x})"
@staticmethod
def cos(x: CSEVariable) -> str:
return f"metal::precise::cos({x})"
@staticmethod
def tan(x: CSEVariable) -> str:
return f"metal::tan({x})"
@staticmethod
def asin(x: CSEVariable) -> str:
return f"metal::asin({x})"
@staticmethod
def acos(x: CSEVariable) -> str:
return f"metal::acos({x})"
@staticmethod
def atan(x: CSEVariable) -> str:
return f"metal::atan({x})"
@staticmethod
def atan2(x: CSEVariable, y: CSEVariable) -> str:
return f"::metal::atan2({x}, {y})"
@staticmethod
def sqrt(x: CSEVariable) -> str:
return f"metal::sqrt({x})"
@staticmethod
def neg(x: CSEVariable) -> str:
# TODO: Does it rely on undefined behavior?
# If so, add special logic for unsigned types
return f"static_cast<decltype({x})>(-{x})"
@staticmethod
def rsqrt(x: CSEVariable) -> str:
return f"metal::rsqrt({x})"
@staticmethod
def tanh(x: CSEVariable) -> str:
return f"metal::tanh({x})"
@staticmethod
def atanh(x: CSEVariable) -> str:
return f"metal::atanh({x})"
@staticmethod
def floordiv(a: CSEVariable, b: CSEVariable) -> str:
# a and b must be of integer type
return f"c10::metal::floor_divide({a}, {b})"
@staticmethod
def floor(x: CSEVariable) -> str:
return f"metal::floor({x})"
@staticmethod
def sign(x: CSEVariable) -> str:
return f"metal::sign({x})"
@staticmethod
def fmod(a: CSEVariable, b: CSEVariable) -> str:
typecast_a = f"static_cast<decltype({a}+{b})>({a})"
typecast_b = f"static_cast<decltype({a}+{b})>({b})"
return f"metal::fmod({typecast_a}, {typecast_b})"
@staticmethod
def trunc(x: CSEVariable) -> str:
return f"metal::trunc({x})"
@staticmethod
def truncdiv(a: CSEVariable, b: CSEVariable) -> str:
quot = f"{a} / {b}"
if (a.dtype is not None and a.dtype.is_floating_point) or (
b.dtype is not None and b.dtype.is_floating_point
):
return f"metal::trunc({quot})"
return quot
@staticmethod
def ceil(x: CSEVariable) -> str:
return f"metal::ceil({x})"
@staticmethod
def rand(seed: CSEVariable, offset: CSEVariable) -> str:
V.kernel.headers.add("random")
return f"c10::metal::rand({seed}, {offset})"
@staticmethod
def randn(seed: CSEVariable, offset: CSEVariable) -> str:
V.kernel.headers.add("random")
return f"c10::metal::randn({seed}, {offset})"
@staticmethod
def randint64(
seed: CSEVariable, offset: CSEVariable, low: CSEVariable, high: CSEVariable
) -> str:
V.kernel.headers.add("random")
return f"c10::metal::randint64({seed}, {offset}, {low}, {high})"
@staticmethod
def round(x: CSEVariable) -> str:
return f"metal::rint({x})"
@staticmethod
def pow(a: CSEVariable, b: CSEVariable) -> str:
cast_a = f"static_cast<decltype({a}+{b})>({a})"
cast_b = f"static_cast<decltype({a}+{b})>({b})"
return f"metal::pow({cast_a}, {cast_b})"
def _special_unary(self, a: CSEVariable, name: str) -> str:
V.kernel.headers.add("special_math")
return f"c10::metal::{name}({a})"
def _special_binary(self, a: CSEVariable, b: CSEVariable, name: str) -> str:
V.kernel.headers.add("special_math")
return f"c10::metal::{name}({a}, {b})"
@classmethod
def _initialize_special_ops(cls) -> None:
# Unary special ops
for name in [
"erf",
"erfinv",
"i0",
"i0e",
"i1",
"i1e",
"digamma",
"spherical_bessel_j0",
]:
setattr(cls, name, functools.partialmethod(cls._special_unary, name=name))
cls.lgamma = functools.partialmethod(cls._special_unary, name="log_gamma") # type: ignore[assignment]
# Unary special ops with forward in method name
for name in [
"bessel_j0",
"bessel_j1",
"bessel_y0",
"bessel_y1",
"modified_bessel_i0",
"modified_bessel_i1",
"modified_bessel_k0",
"modified_bessel_k1",
"scaled_modified_bessel_k0",
"scaled_modified_bessel_k1",
]:
setattr(
cls,
name,
functools.partialmethod(cls._special_unary, name=name + "_forward"),
)
# Binary special ops
for name in [
"polygamma",
"igamma",
"igammac",
"zeta",
]:
setattr(cls, name, functools.partialmethod(cls._special_binary, name=name))
# Binary special ops with forward in method name
for name in [
"chebyshev_polynomial_t",
"chebyshev_polynomial_u",
"chebyshev_polynomial_v",
"chebyshev_polynomial_w",
"hermite_polynomial_h",
"hermite_polynomial_he",
"shifted_chebyshev_polynomial_t",
"shifted_chebyshev_polynomial_u",
"shifted_chebyshev_polynomial_v",
"shifted_chebyshev_polynomial_w",
]:
setattr(
cls,
name,
functools.partialmethod(cls._special_binary, name=name + "_forward"),
)
MetalOverrides._initialize_pointwise_overrides("mps")
MetalOverrides._initialize_special_ops()
| MetalOverrides |
python | openai__openai-python | src/openai/types/beta/realtime/session_create_response.py | {
"start": 2819,
"end": 6574
} | class ____(BaseModel):
client_secret: ClientSecret
"""Ephemeral key returned by the API."""
input_audio_format: Optional[str] = None
"""The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`."""
input_audio_transcription: Optional[InputAudioTranscription] = None
"""
Configuration for input audio transcription, defaults to off and can be set to
`null` to turn off once on. Input audio transcription is not native to the
model, since the model consumes audio directly. Transcription runs
asynchronously and should be treated as rough guidance rather than the
representation understood by the model.
"""
instructions: Optional[str] = None
"""The default system instructions (i.e.
system message) prepended to model calls. This field allows the client to guide
the model on desired responses. The model can be instructed on response content
and format, (e.g. "be extremely succinct", "act friendly", "here are examples of
good responses") and on audio behavior (e.g. "talk quickly", "inject emotion
into your voice", "laugh frequently"). The instructions are not guaranteed to be
followed by the model, but they provide guidance to the model on the desired
behavior.
Note that the server sets default instructions which will be used if this field
is not set and are visible in the `session.created` event at the start of the
session.
"""
max_response_output_tokens: Union[int, Literal["inf"], None] = None
"""
Maximum number of output tokens for a single assistant response, inclusive of
tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
`inf` for the maximum available tokens for a given model. Defaults to `inf`.
"""
modalities: Optional[List[Literal["text", "audio"]]] = None
"""The set of modalities the model can respond with.
To disable audio, set this to ["text"].
"""
output_audio_format: Optional[str] = None
"""The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`."""
speed: Optional[float] = None
"""The speed of the model's spoken response.
1.0 is the default speed. 0.25 is the minimum speed. 1.5 is the maximum speed.
This value can only be changed in between model turns, not while a response is
in progress.
"""
temperature: Optional[float] = None
"""Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8."""
tool_choice: Optional[str] = None
"""How the model chooses tools.
Options are `auto`, `none`, `required`, or specify a function.
"""
tools: Optional[List[Tool]] = None
"""Tools (functions) available to the model."""
tracing: Optional[Tracing] = None
"""Configuration options for tracing.
Set to null to disable tracing. Once tracing is enabled for a session, the
configuration cannot be modified.
`auto` will create a trace for the session with default values for the workflow
name, group id, and metadata.
"""
turn_detection: Optional[TurnDetection] = None
"""Configuration for turn detection.
Can be set to `null` to turn off. Server VAD means that the model will detect
the start and end of speech based on audio volume and respond at the end of user
speech.
"""
voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"], None] = None
"""The voice the model uses to respond.
Voice cannot be changed during the session once the model has responded with
audio at least once. Current voice options are `alloy`, `ash`, `ballad`,
`coral`, `echo`, `sage`, `shimmer`, and `verse`.
"""
| SessionCreateResponse |
python | pypa__warehouse | tests/common/db/admin.py | {
"start": 137,
"end": 333
} | class ____(WarehouseFactory):
class Meta:
model = AdminFlag
id = factory.Faker("text", max_nb_chars=12)
description = factory.Faker("sentence")
enabled = True
| AdminFlagFactory |
python | ray-project__ray | python/ray/train/_internal/worker_group.py | {
"start": 1078,
"end": 1671
} | class ____:
"""Metadata for each worker/actor.
This information is expected to stay the same throughout the lifetime of
actor.
Args:
node_id: ID of the node this worker is on.
node_ip: IP address of the node this worker is on.
hostname: Hostname that this worker is on.
resource_ids: Map of accelerator resources
("GPU", "neuron_cores", ..) to their IDs.
pid: Process ID of this worker.
"""
node_id: str
node_ip: str
hostname: str
resource_ids: Dict[str, List[str]]
pid: int
@dataclass
| WorkerMetadata |
python | getsentry__sentry | tests/sentry/integrations/repository/notification_action/test_notification_action_notification_message_repository.py | {
"start": 924,
"end": 3884
} | class ____(BaseNotificationActionNotificationMessageRepositoryTest):
def test_returns_parent_notification_message(self) -> None:
instance = self.repository.get_parent_notification_message(
action=self.action,
group=self.group,
)
assert instance is not None
assert instance == NotificationActionNotificationMessage.from_model(
self.parent_notification_message
)
def test_returns_latest_parent_notification_message(self) -> None:
latest = NotificationMessage.objects.create(
action=self.action,
group=self.group,
message_identifier="new_later_one",
)
instance = self.repository.get_parent_notification_message(
action=self.action,
group=self.group,
)
assert instance is not None
assert instance == NotificationActionNotificationMessage.from_model(latest)
def test_returns_none_when_filter_does_not_exist(self) -> None:
different_action = self.create_action()
instance = self.repository.get_parent_notification_message(
action=different_action,
group=self.group,
)
assert instance is None
def test_when_parent_has_child(self) -> None:
child = NotificationMessage.objects.create(
action=self.action,
group=self.group,
message_identifier="456abc",
parent_notification_message=self.parent_notification_message,
)
assert child.id != self.parent_notification_message.id
instance = self.repository.get_parent_notification_message(
action=self.action,
group=self.group,
)
assert instance is not None
assert instance == NotificationActionNotificationMessage.from_model(
self.parent_notification_message
)
def test_returns_parent_notification_message_with_open_period_start(self) -> None:
open_period_start = timezone.now()
NotificationMessage.objects.create(
action=self.action,
group=self.group,
message_identifier="789xyz",
open_period_start=open_period_start,
)
latest_notification = NotificationMessage.objects.create(
action=self.action,
group=self.group,
message_identifier="789xyz",
open_period_start=open_period_start + timedelta(seconds=1),
)
instance = self.repository.get_parent_notification_message(
action=self.action,
group=self.group,
open_period_start=open_period_start + timedelta(seconds=1),
)
assert instance is not None
assert instance == NotificationActionNotificationMessage.from_model(latest_notification)
assert instance.open_period_start == open_period_start + timedelta(seconds=1)
| TestGetParentNotificationMessage |
python | google__jax | tests/lax_test.py | {
"start": 2988,
"end": 156619
} | class ____(jtu.JaxTestCase):
"""Numerical tests for LAX operations."""
@parameterized.parameters(itertools.chain.from_iterable(
jtu.sample_product_testcases(
[dict(op_name=rec.op, rng_factory=rec.rng_factory)],
shapes=itertools.chain.from_iterable(
itertools.combinations_with_replacement(shape_group, rec.nargs)
for shape_group in lax_test_util.compatible_shapes),
dtype=rec.dtypes)
for rec in lax_test_util.lax_ops()))
def testOp(self, op_name, rng_factory, shapes, dtype):
rng = rng_factory(self.rng())
args_maker = lambda: [rng(shape, dtype) for shape in shapes]
op = getattr(lax, op_name)
self._CompileAndCheck(op, args_maker)
@parameterized.parameters(itertools.chain.from_iterable(
jtu.sample_product_testcases(
[dict(op_name=rec.op, rng_factory=rec.rng_factory, tol=rec.tol)],
shapes=itertools.chain.from_iterable(
itertools.combinations_with_replacement(shape_group, rec.nargs)
for shape_group in lax_test_util.compatible_shapes),
dtype=rec.dtypes)
for rec in lax_test_util.lax_ops()))
@jtu.ignore_warning(message="invalid value", category=RuntimeWarning)
def testOpAgainstNumpy(self, op_name, rng_factory, shapes, dtype, tol):
if (not config.enable_x64.value and op_name == "nextafter"
and dtype == np.float64):
raise SkipTest("64-bit mode disabled")
rng = rng_factory(self.rng())
args_maker = lambda: [rng(shape, dtype) for shape in shapes]
op = getattr(lax, op_name)
numpy_op = getattr(lax_reference, op_name)
tol = tol or jtu.default_tolerance()
if jtu.test_device_matches(["tpu"]):
if dtype in (np.float32, np.complex64) and op_name in (
"acosh", "asinh", "betainc", "cos", "cosh", "digamma", "exp", "exp2", "igamma",
"igammac", "log", "log1p", "logistic", "pow", "sin", "sinh", "tan"):
tol = jtu.join_tolerance(tol, 2e-4)
elif op_name == "asinh" and dtype == np.float16:
tol = jtu.join_tolerance(tol, 1e-3)
elif op_name == "lgamma" and dtype == np.float32:
tol = jtu.join_tolerance(tol, 1e-3)
elif op_name == "pow" and dtype == np.complex128:
tol = jtu.join_tolerance(tol, 2e-15)
self._CheckAgainstNumpy(numpy_op, op, args_maker, tol=tol)
@parameterized.parameters(["logistic", "tanh"])
def testEvenFunctionGrads(self, op_name):
op = getattr(lax, op_name)
x = jnp.arange(0.0, 80.0, 1.0, dtype=jnp.float32)
high_acc_op = lambda x: op(x, accuracy=lax.AccuracyMode.HIGHEST)
grads = jax.vmap(jax.grad(high_acc_op))(x)
neg_grads = jax.vmap(jax.grad(high_acc_op))(-x)
self.assertAllClose(
grads, neg_grads, atol=jtu.default_tolerance()[np.dtype(np.float32)], rtol=0.0
)
def testExpm1Grad(self):
x = jnp.arange(-80.0, 80.0, 1.0, dtype=jnp.float32)
expected = jax.vmap(jax.grad(lambda x: lax.exp(x, accuracy=lax.AccuracyMode.HIGHEST)))(x)
actual = jax.vmap(jax.grad(lambda x: lax.expm1(x, accuracy=lax.AccuracyMode.HIGHEST)))(x)
self.assertAllClose(
actual, expected, atol=jtu.default_tolerance()[np.dtype(np.float32)], rtol=0.0
)
# TODO test shift_left, shift_right_arithmetic, shift_right_logical
@jtu.sample_product(
[dict(from_dtype=from_dtype, to_dtype=to_dtype)
for from_dtype, to_dtype in itertools.product(
[None, np.float32, np.int32, "float32", "int32"], repeat=2)],
weak_type=[False, True],
)
def testConvertElementType(self, from_dtype, to_dtype, weak_type):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng((2, 3), from_dtype)]
to_dtype_canonicalized = (
dtypes.canonicalize_dtype(to_dtype) if to_dtype is not None else None
)
op = lambda x: lax_internal._convert_element_type(
x, to_dtype_canonicalized, weak_type
)
self._CompileAndCheck(op, args_maker)
x = rng((1,), from_dtype)
out = op(x)
self.assertEqual(out.dtype, dtypes.canonicalize_dtype(to_dtype or x.dtype))
self.assertEqual(out.aval.weak_type, weak_type)
def testConvertElementTypeOOB(self):
out = lax.convert_element_type(2 ** 32, 'int32')
self.assertEqual(out, 0)
@jtu.sample_product(
[dict(from_dtype=from_dtype, to_dtype=to_dtype)
for from_dtype, to_dtype in itertools.product(
[np.float32, np.int32, "float32", "int32"], repeat=2)],
)
def testConvertElementTypeAgainstNumpy(self, from_dtype, to_dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng((2, 3), from_dtype)]
op = lambda x: lax.convert_element_type(x, to_dtype)
numpy_op = lambda x: lax_reference.convert_element_type(x, to_dtype)
self._CheckAgainstNumpy(numpy_op, op, args_maker)
@jtu.sample_product(
from_dtype=['int4', 'uint4'] + jtu.dtypes.all_floating + jtu.dtypes.all_integer + jtu.dtypes.all_unsigned,
to_dtype=['int4', 'uint4'] + jtu.dtypes.all_floating + jtu.dtypes.all_integer + jtu.dtypes.all_unsigned,
shape = [(), (2,), (2, 3)]
)
def testBitcastConvertType(self, from_dtype, to_dtype, shape):
rng = jtu.rand_default(self.rng())
nbits_in = dtypes.itemsize_bits(from_dtype)
nbits_out = dtypes.itemsize_bits(to_dtype)
if nbits_in < nbits_out:
shape = (*shape, nbits_out // nbits_in)
args_maker = lambda: [rng(shape, from_dtype)]
jnp_op = lambda x: lax.bitcast_convert_type(x, to_dtype)
self._CompileAndCheck(jnp_op, args_maker)
# Test the shape and dtype of the output. We avoid testing the values here
# because the bitwise representation may vary from platform to platform.
out = jnp_op(*args_maker())
if nbits_in == nbits_out:
expected_shape = shape
elif nbits_in < nbits_out:
expected_shape = shape[:-1]
else:
expected_shape = (*shape, nbits_in // nbits_out)
self.assertEqual(out.dtype, to_dtype)
self.assertEqual(out.shape, expected_shape)
@jtu.sample_product(
[dict(from_dtype=from_dtype, to_dtype=to_dtype)
for from_dtype, to_dtype in itertools.product(
['int4', 'uint4', np.int8, np.uint8, np.int32, np.float16, np.float32],
repeat=2)],
shape=[(4,), (2, 4), (2, 3, 4)]
)
def testBitcastConvertTypeAgainstNumpy(self, from_dtype, to_dtype, shape):
nbits_in = dtypes.itemsize_bits(from_dtype)
nbits_out = dtypes.itemsize_bits(to_dtype)
if nbits_in < nbits_out:
shape = (*shape, nbits_out // nbits_in)
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, from_dtype)]
jnp_op = lambda x: lax.bitcast_convert_type(x, to_dtype)
np_op = lambda x: lax_reference.bitcast_convert_type(x, to_dtype)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
@jtu.sample_product(
[dict(from_dtype=from_dtype, to_dtype=to_dtype)
for from_dtype, to_dtype in itertools.product(
[np.float32, np.int32, "float32", "int32"], repeat=2)],
weak_type=[False, True],
)
def testBitcastConvertWeakType(self, from_dtype, to_dtype, weak_type):
rng = jtu.rand_default(self.rng())
x_in = lax_internal._convert_element_type(rng((2, 3), np.dtype(from_dtype)),
weak_type=weak_type)
op = lambda x: lax.bitcast_convert_type(x, to_dtype)
self.assertEqual(dtypes.is_weakly_typed(x_in), weak_type)
x_out = op(x_in)
self.assertEqual(dtypes.is_weakly_typed(x_out), False)
x_out_jit = jax.jit(op)(x_in)
self.assertEqual(dtypes.is_weakly_typed(x_out_jit), False)
@jtu.sample_product(
[dict(min_shape=min_shape, operand_shape=operand_shape, max_shape=max_shape)
for min_shape, operand_shape, max_shape in [
[(), (2, 3), ()],
[(2, 3), (2, 3), ()],
[(), (2, 3), (2, 3)],
[(2, 3), (2, 3), (2, 3)],
]],
dtype=lax_test_util.default_dtypes,
)
def testClamp(self, min_shape, operand_shape, max_shape, dtype):
rng = jtu.rand_default(self.rng())
shapes = [min_shape, operand_shape, max_shape]
args_maker = lambda: [rng(shape, dtype) for shape in shapes]
self._CompileAndCheck(lax.clamp, args_maker)
@jtu.sample_product(
[dict(min_shape=min_shape, operand_shape=operand_shape, max_shape=max_shape)
for min_shape, operand_shape, max_shape in [
[(), (2, 3), ()],
[(2, 3), (2, 3), ()],
[(), (2, 3), (2, 3)],
[(2, 3), (2, 3), (2, 3)],
]],
dtype=lax_test_util.default_dtypes,
)
def testClampAgainstNumpy(self, min_shape, operand_shape, max_shape, dtype):
rng = jtu.rand_default(self.rng())
shapes = [min_shape, operand_shape, max_shape]
args_maker = lambda: [rng(shape, dtype) for shape in shapes]
self._CheckAgainstNumpy(lax_reference.clamp, lax.clamp, args_maker)
@jtu.sample_product(
[dict(base_shape=shape, dim=dim) for shape in [(4,), (3, 4), (2, 3, 4)]
for dim in range(len(shape))],
num_arrs=[3],
dtype=lax_test_util.default_dtypes,
)
def testConcatenate(self, dim, base_shape, dtype, num_arrs):
rng = jtu.rand_default(self.rng())
shapes = [base_shape[:dim] + (size,) + base_shape[dim+1:]
for size, _ in zip(itertools.cycle([3, 1, 4]), range(num_arrs))]
args_maker = lambda: [rng(shape, dtype) for shape in shapes]
op = lambda *args: lax.concatenate(args, dim)
self._CompileAndCheck(op, args_maker)
@jtu.sample_product(
[dict(base_shape=shape, dim=dim) for shape in [(4,), (3, 4), (2, 3, 4)]
for dim in range(len(shape))],
num_arrs=[3],
dtype=lax_test_util.default_dtypes,
)
def testConcatenateAgainstNumpy(self, dim, base_shape, dtype, num_arrs):
rng = jtu.rand_default(self.rng())
shapes = [base_shape[:dim] + (size,) + base_shape[dim+1:]
for size, _ in zip(itertools.cycle([3, 1, 4]), range(num_arrs))]
args_maker = lambda: [rng(shape, dtype) for shape in shapes]
op = lambda *args: lax.concatenate(args, dim)
numpy_op = lambda *args: lax_reference.concatenate(args, dim)
self._CheckAgainstNumpy(numpy_op, op, args_maker)
@jtu.sample_product(
[dict(base_shape=shape, axis=axis) for shape in [(4,), (3, 4), (2, 3, 4)]
for axis in range(len(shape))],
num_pieces=range(3),
dtype=lax_test_util.default_dtypes,
)
def testSplit(self, axis, base_shape, dtype, num_pieces):
sizes = jtu.rand_int(self.rng(), 5)((num_pieces + 1,), np.int64)
shape = list(base_shape)
shape[axis] = np.sum(sizes)
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
op = lambda x: lax.split(x, sizes, axis=axis)
def numpy_op(x):
return np.split(x, np.cumsum(sizes[:-1]), axis=axis)
self._CompileAndCheck(op, args_maker)
self._CheckAgainstNumpy(numpy_op, op, args_maker)
def testSplitErrors(self):
with self.assertRaisesRegex(ValueError,
"Sizes passed to split must be nonnegative"):
lax.split(np.arange(5), [-1])
with self.assertRaisesRegex(ValueError, "Sum of sizes 6 must be equal"):
lax.split(np.arange(5), [6])
with self.assertRaisesRegex(ValueError, "axis 1 is out of bounds"):
lax.split(np.arange(5), sizes=(), axis=1)
@jtu.sample_product(
[
dict(lhs_shape=(b, i, 9, 10), rhs_shape=(j, i, 4, 5))
for b, i, j in itertools.product([2, 3], repeat=3)
],
dtype=lax_test_util.float_dtypes,
strides=[(1, 1), (1, 2), (2, 1)],
padding=["VALID", "SAME", "SAME_LOWER"],
)
def testConv(self, lhs_shape, rhs_shape, dtype, strides, padding):
rng = jtu.rand_small(self.rng())
args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
def fun(lhs, rhs):
return lax.conv(lhs, rhs, strides, padding)
self._CompileAndCheck(fun, args_maker)
@jtu.sample_product(
[dict(lhs_shape=(b, i, 9, 10), rhs_shape=(j, i, 4, 5))
for b, i, j in itertools.product([2, 3], repeat=3)],
[dict(dtype=dtype, preferred_element_type=preferred)
for dtype, preferred in preferred_type_combinations]
)
@jax.default_matmul_precision("float32")
def testConvPreferredElement(self, lhs_shape, rhs_shape, dtype, preferred_element_type):
if (not config.enable_x64.value and
(dtype == np.float64 or preferred_element_type == np.float64
or dtype == np.int64 or preferred_element_type == np.int64
or dtype == np.complex128 or preferred_element_type == np.complex128)):
raise SkipTest("64-bit mode disabled")
if (jtu.test_device_matches(["tpu"]) and
(dtype == np.complex128 or preferred_element_type == np.complex128)):
raise SkipTest("np.complex128 is not yet supported on TPU")
if jtu.test_device_matches(["gpu"]) and np.issubdtype(dtype, np.integer):
# TODO(b/183565702): Support integer convolutions on CPU/GPU.
raise SkipTest("Integer convolution not yet supported on GPU")
# x64 implementation is only accurate to ~float32 precision for this case.
if dtype == np.complex64 and preferred_element_type == np.complex128:
tol = 1e-5
else:
tol = {np.float64: 1e-14}
if (jtu.test_device_matches(["tpu"]) and dtype == np.float16 and
preferred_element_type == np.float32):
tol = 2e-3
if (jtu.test_device_matches(["tpu"]) and dtype == jnp.bfloat16 and
preferred_element_type == np.float32):
tol = 1e-5
rng = jtu.rand_default(self.rng())
x = rng(lhs_shape, dtype)
y = rng(rhs_shape, dtype)
# We first compute the conv when both inputs are a lower-precision type and
# preferred_element_type is a higher-precision type. We then compute results
# where the inputs are first upcast to the higher-precision type and no
# `preferred_element_type` is given. We expect the result to be extremely
# similar given the semantics of `preferred_element_type`.
result_with_preferred_type = lax.conv(
x, y, (1, 1), "VALID",
preferred_element_type=preferred_element_type)
result_with_upcast_inputs = lax.conv(
x.astype(preferred_element_type),
y.astype(preferred_element_type),
(1, 1), "VALID")
self.assertArraysAllClose(
result_with_preferred_type, result_with_upcast_inputs, rtol=tol, atol=tol)
@jtu.sample_product(
[dict(lhs_shape=(b, i, 9, 10), rhs_shape=(j, i, 4, 5))
for b, i, j in itertools.product([2, 3], repeat=3)],
dtype=lax_test_util.float_dtypes,
strides=[(1, 1), (1, 2), (2, 1)],
padding=["VALID", "SAME"],
)
def testConvAgainstNumpy(self, lhs_shape, rhs_shape, dtype, strides, padding):
rng = jtu.rand_small(self.rng())
args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
op = lambda lhs, rhs: lax.conv(lhs, rhs, strides, padding)
numpy_op = lambda lhs, rhs: lax_reference.conv(lhs, rhs, strides, padding)
self._CheckAgainstNumpy(numpy_op, op, args_maker)
@jtu.sample_product(
[dict(lhs_shape=(b, i, 9, 10), rhs_shape=(j, i, 4, 5))
for b, i, j in itertools.product([1, 2, 3], repeat=3)],
dtype=lax_test_util.float_dtypes,
strides=[(1, 1), (1, 2), (2, 1)],
padding=[((0, 0), (0, 0)), ((1, 2), (2, 0))],
lhs_dilation=[(1, 1), (1, 2), (2, 2)],
rhs_dilation=[(1, 1), (1, 2), (2, 2)],
)
def testConvWithGeneralPadding(self, lhs_shape, rhs_shape, dtype, strides,
padding, lhs_dilation, rhs_dilation):
rng = jtu.rand_small(self.rng())
args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
def fun(lhs, rhs):
return lax.conv_with_general_padding(
lhs, rhs, strides, padding, lhs_dilation, rhs_dilation)
self._CompileAndCheck(fun, args_maker)
@jtu.sample_product(
[dict(lhs_shape=(b, i, 9, 10), rhs_shape=(j, i, 4, 5))
for b, i, j in itertools.product([1, 2, 3], repeat=3)],
dtype=[np.float32],
strides=[(1, 1), (1, 2), (2, 1)],
padding=[((0, 0), (0, 0)), ((1, 2), (2, 0))],
lhs_dilation=[(1, 1), (1, 2), (2, 2)],
rhs_dilation=[(1, 1), (1, 2), (2, 2)],
)
def testConvWithGeneralPaddingAgainstNumpy(
self, lhs_shape, rhs_shape, dtype, strides, padding, lhs_dilation,
rhs_dilation):
rng = jtu.rand_small(self.rng())
args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
def fun(lhs, rhs):
return lax.conv_with_general_padding(
lhs, rhs, strides, padding, lhs_dilation, rhs_dilation,
precision=lax.Precision.HIGHEST)
def numpy_fun(lhs, rhs):
return lax_reference.conv_with_general_padding(
lhs, rhs, strides, padding, lhs_dilation, rhs_dilation)
self._CheckAgainstNumpy(numpy_fun, fun, args_maker)
@jtu.sample_product(
[
dict(
lhs_shape=(b * batch_group_count, i * feature_group_count),
rhs_shape=(j * feature_group_count * batch_group_count, i),
batch_group_count=batch_group_count,
feature_group_count=feature_group_count,
)
for batch_group_count, feature_group_count in [(1, 1), (2, 1), (1, 2)]
for b, i, j in itertools.product([2, 3], repeat=3)
],
[dict(dimension_numbers=("NC", "OI", "NC"), perms=([0, 1], [0, 1]))],
dtype=lax_test_util.all_dtypes,
)
def testConvGeneralDilated0D(self, lhs_shape, rhs_shape, dtype,
feature_group_count, batch_group_count,
dimension_numbers, perms):
if np.issubdtype(dtype, np.integer) or np.issubdtype(dtype, np.bool_):
# TODO(b/183565702): Support integer convolutions on CPU/GPU.
if jtu.test_device_matches(["gpu"]):
raise SkipTest("Integer convolution not yet supported on GPU")
rng = jtu.rand_small(self.rng())
lhs_perm, rhs_perm = perms # permute to compatible shapes
def args_maker():
return [lax.transpose(rng(lhs_shape, dtype), lhs_perm),
lax.transpose(rng(rhs_shape, dtype), rhs_perm)]
def fun(lhs, rhs):
return lax.conv_general_dilated(
lhs, rhs, window_strides=(), padding=(),
dimension_numbers=dimension_numbers,
feature_group_count=feature_group_count,
batch_group_count=batch_group_count)
self._CompileAndCheck(fun, args_maker)
@jtu.sample_product(
[
dict(
lhs_shape=(b * batch_group_count, i * feature_group_count, 9, w),
rhs_shape=(j * feature_group_count * batch_group_count, i, 4, 5),
batch_group_count=batch_group_count,
feature_group_count=feature_group_count,
)
for batch_group_count, feature_group_count in [(1, 1), (2, 1), (1, 2)]
for w in [0, 10]
for b, i, j in itertools.product([2, 3], repeat=3)
],
[
dict(
dimension_numbers=("NCHW", "OIHW", "NCHW"),
perms=([0, 1, 2, 3], [0, 1, 2, 3]),
),
dict(
dimension_numbers=("NHWC", "HWIO", "NHWC"),
perms=([0, 2, 3, 1], [2, 3, 1, 0]),
),
dict(
dimension_numbers=("NCHW", "HWIO", "NHWC"),
perms=([0, 1, 2, 3], [2, 3, 1, 0]),
),
],
dtype=lax_test_util.all_dtypes,
strides=[(1, 1), (2, 1)],
padding=[((1, 2), (2, 0)), ((10, 8), (7, 13))],
lhs_dilation=[(1, 1), (1, 2), (1, 4)],
rhs_dilation=[(1, 1), (1, 2), (1, 4)],
)
def testConvGeneralDilated(self, lhs_shape, rhs_shape, dtype, strides,
padding, lhs_dilation, rhs_dilation,
feature_group_count, batch_group_count,
dimension_numbers, perms):
if np.issubdtype(dtype, np.integer) or np.issubdtype(dtype, np.bool_):
# TODO(b/183565702): Support integer convolutions on CPU/GPU.
if jtu.test_device_matches(["gpu"]):
raise SkipTest("Integer convolution not yet supported on GPU")
rng = jtu.rand_small(self.rng())
lhs_perm, rhs_perm = perms # permute to compatible shapes
def args_maker():
return [lax.transpose(rng(lhs_shape, dtype), lhs_perm),
lax.transpose(rng(rhs_shape, dtype), rhs_perm)]
def fun(lhs, rhs):
return lax.conv_general_dilated(
lhs, rhs, strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count=feature_group_count,
batch_group_count=batch_group_count)
self._CompileAndCheck(fun, args_maker)
@jax.default_matmul_precision("float32")
def testConvGeneralDilatedPatchesOverlapping1D(self):
lhs = np.array([[1]], np.float32).reshape((1, 1))
patches = lax.conv_general_dilated_patches(
lhs=lhs,
filter_shape=(),
window_strides=(),
padding='SAME'
)
self.assertAllClose(lhs, patches)
dn = ('NHC', 'OIH', 'NHC')
lhs = np.array([1, 2, 3, 4, 5], np.float32).reshape((1, -1, 1))
patches = lax.conv_general_dilated_patches(
lhs=lhs,
filter_shape=(2,),
window_strides=(2,),
padding='VALID',
dimension_numbers=dn
)
self.assertAllClose(
np.array([[1, 2],
[3, 4]], np.float32).reshape((1, 2, 2)), patches)
patches = lax.conv_general_dilated_patches(
lhs=lhs,
filter_shape=(3,),
window_strides=(1,),
padding='SAME',
dimension_numbers=dn
)
self.assertAllClose(
np.array([[0, 1, 2],
[1, 2, 3],
[2, 3, 4],
[3, 4, 5],
[4, 5, 0]], np.float32).reshape((1, 5, 3)), patches)
patches = lax.conv_general_dilated_patches(
lhs=lhs,
filter_shape=(3,),
window_strides=(1,),
padding='SAME',
rhs_dilation=(2,),
dimension_numbers=dn
)
self.assertAllClose(
np.array([[0, 1, 3],
[0, 2, 4],
[1, 3, 5],
[2, 4, 0],
[3, 5, 0]], np.float32).reshape((1, 5, 3)), patches)
def testConvGeneralDilatedPatchesOverlapping2D(self):
lhs = np.array([[1, 2, 3],
[4, 5, 6]], np.float32).reshape((1, 2, 3, 1))
patches = lax.conv_general_dilated_patches(
lhs=lhs,
filter_shape=(2, 2),
window_strides=(1, 1),
padding='SAME',
dimension_numbers=('NHWC', 'OIHW', 'NHWC')
)
self.assertAllClose(np.array([[1, 2, 4, 5],
[2, 3, 5, 6],
[3, 0, 6, 0],
[4, 5, 0, 0],
[5, 6, 0, 0],
[6, 0, 0, 0]],
np.float32).reshape((1, 2, 3, 4)), patches)
@jtu.sample_product(
[
dict(
lhs_shape=lhs_shape,
filter_shape=filter_shape,
strides=strides,
padding=padding,
dimension_numbers=dim_nums,
)
for lhs_shape, filter_shape, strides, padding, dim_nums in [
((2, 5), (), (), [], ("NC", "OI", "CN")),
((2, 3, 4), (2,), (2,), [(0, 2)], ("CNH", "OHI", "HNC")),
(
(3, 1, 4, 5),
(1, 3),
(1, 3),
[(3, 1), (2, 2)],
("NCHW", "OIHW", "NCHW"),
),
((3, 2, 5, 6), (4, 3), (4, 3), [(5, 2), (2, 4)], None),
(
(1, 2, 3, 4),
(1, 1),
(1, 1),
[(0, 0), (0, 0)],
("NCWH", "OHWI", "CNHW"),
),
(
(1, 2, 3, 4),
(3, 2),
(1, 1),
[(0, 0), (0, 0)],
("CWHN", "HOWI", "NCHW"),
),
(
(2, 3, 4, 5, 6),
(2, 1, 3),
(2, 1, 3),
[(1, 2), (5, 3), (3, 5)],
("NHWDC", "HDIWO", "DCWNH"),
),
]
],
dtype=lax_test_util.all_dtypes,
precision=[
None,
lax.Precision.DEFAULT,
lax.Precision.HIGH,
lax.Precision.HIGHEST,
],
)
def testConvGeneralDilatedPatchesNonOverlapping(self,
lhs_shape,
filter_shape,
dtype,
strides,
padding,
dimension_numbers,
precision):
if np.issubdtype(dtype, np.integer) or np.issubdtype(dtype, np.bool_):
# TODO(b/183565702): Support integer convolutions on CPU/GPU.
if jtu.test_device_matches(["gpu"]):
raise SkipTest("Integer convolution not yet supported on GPU")
rng = jtu.rand_small(self.rng())
lhs = rng(lhs_shape, dtype)
if dimension_numbers is None:
lhs_spec, rhs_spec, out_spec = "NCHW", "OIHW", "NCHW"
else:
lhs_spec, rhs_spec, out_spec = dimension_numbers
filter_spec = ''.join(c for c in rhs_spec if c not in ('I', 'O'))
patches_spec = out_spec.replace('C', 'C' + filter_spec.lower())
full_padding = []
for c in lhs_spec:
if c in ('N', 'C'):
full_padding += [(0, 0)]
else:
full_padding += [padding[filter_spec.index(c)]]
lhs_padded = np.pad(lhs, full_padding, 'constant')
out = lax.transpose(lhs_padded, [lhs_spec.index(c) for c in out_spec])
patches = lax.conv_general_dilated_patches(
lhs=lhs,
filter_shape=filter_shape,
window_strides=strides,
padding=padding,
dimension_numbers=dimension_numbers,
precision=precision
)
source = []
# Test that output spatial shape is factored into `#patches x patch_size`.
for c in out_spec:
out_c = out.shape[out_spec.index(c)]
patch_c = patches.shape[out_spec.index(c)]
if c == 'N':
self.assertEqual(out_c, patch_c)
elif c == 'C':
self.assertEqual(out_c * math.prod(filter_shape), patch_c)
else:
self.assertEqual(out_c, patch_c * filter_shape[filter_spec.index(c)])
source += [patches_spec.index(c), patches_spec.index(c.lower())]
# Test that stacking patches together gives the source image, padded.
c = out_spec.index('C')
patches = patches.reshape(patches.shape[:c] +
(lhs_shape[lhs_spec.index('C')],) +
filter_shape +
patches.shape[c + 1:]
)
patches = np.moveaxis(patches, source, range(len(source)))
for i in range(len(filter_shape)):
patches = patches.reshape(patches.shape[:i] + (-1,) +
patches.shape[2 + i:])
patches = np.moveaxis(
patches,
range(len(filter_shape)),
[out_spec.index(c) for c in out_spec if c not in ('N', 'C')])
tol = None
if (jtu.test_device_matches(["tpu"]) and
precision in (None, lax.Precision.DEFAULT)):
tol = 1e-3
self.assertAllClose(out, patches, atol=tol, rtol=tol)
@jtu.sample_product(
[
dict(n=n, lhs_spec=lhs_spec, rhs_spec=rhs_spec, out_spec=out_spec)
for n in [1, 2]
for lhs_spec in [
"".join(s) for s in itertools.permutations("NCHWD"[: n + 2])
]
for rhs_spec in [
"".join(s) for s in itertools.permutations("OIHWDX"[: n + 2])
]
for out_spec in [
"".join(s) for s in itertools.permutations("NCHWDX"[: n + 2])
]
],
dtype=lax_test_util.inexact_dtypes,
precision=[
None,
lax.Precision.DEFAULT,
lax.Precision.HIGH,
lax.Precision.HIGHEST,
(lax.Precision.DEFAULT, lax.Precision.HIGHEST),
],
padding=["SAME", "VALID"],
)
def testConvGeneralDilatedLocal(self, dtype, precision, n, padding, lhs_spec,
rhs_spec, out_spec):
"""Make sure LCN with tiled CNN kernel matches CNN."""
lhs_spec_default = 'NCHWDX'[:n + 2]
rhs_spec_default = 'OIHWDX'[:n + 2]
rng = jtu.rand_small(self.rng())
lhs_default = rng((2, 4, 7, 6, 5, 8)[:n + 2], dtype)
rhs_default = rng((5, 4, 2, 3, 1, 2)[:n + 2], dtype)
window_strides = (1, 2, 3, 4)[:n]
rhs_dilation = (2, 1, 3, 2)[:n]
lhs_perm = [lhs_spec_default.index(c) for c in lhs_spec]
lhs = np.transpose(lhs_default, lhs_perm)
rhs_perm = [rhs_spec_default.index(c) for c in rhs_spec]
rhs = np.transpose(rhs_default, rhs_perm)
kwargs = dict(
lhs=lhs,
window_strides=window_strides,
padding=padding,
rhs_dilation=rhs_dilation,
dimension_numbers=(lhs_spec, rhs_spec, out_spec),
precision=precision
)
out_conv = lax.conv_general_dilated(rhs=rhs, **kwargs)
rhs_local = np.moveaxis(rhs, (rhs_spec.index('O'), rhs_spec.index('I')),
(0, 1))
rhs_local = rhs_local.reshape((rhs_local.shape[0], -1) + (1,) * n)
rhs_shape = (rhs_local.shape[:2] +
tuple(out_conv.shape[out_spec.index(c)]
for c in rhs_spec_default[2:]))
rhs_local = np.broadcast_to(rhs_local, rhs_shape)
rhs_local = np.transpose(rhs_local, rhs_perm)
filter_shape = [rhs.shape[i]
for i in range(n + 2) if rhs_spec[i] not in ('O', 'I')]
out_local = lax.conv_general_dilated_local(rhs=rhs_local,
filter_shape=filter_shape,
**kwargs)
self.assertAllClose(out_conv, out_local)
# TODO(mattjj): test conv_general_dilated against numpy
def testConv0DIsDot(self):
rng = jtu.rand_default(self.rng())
def args_maker():
return [rng((10, 5), np.float32), rng((5, 7), np.float32)]
jnp_fun = partial(lax.conv_general_dilated, window_strides=(),
padding='VALID', dimension_numbers=('NC', 'IO', 'NC'))
self._CompileAndCheck(jnp_fun, args_maker)
self._CheckAgainstNumpy(np.dot, jnp_fun, args_maker, tol=.1)
def testGradConv0D(self):
# Reproduces a failure in neural_tangents not caught in our presubmit tests
# See cl/367416742.
lhs = np.ones((2, 5), dtype=np.float32)
rhs = np.ones((5, 10), dtype=np.float32)
def f_jax(lhs, rhs):
return lax.conv_general_dilated(
lhs, rhs, window_strides=(),
padding=(), lhs_dilation=(), rhs_dilation=(),
dimension_numbers=lax.ConvDimensionNumbers((0, 1), (1, 0), (0, 1)),
batch_group_count=1, feature_group_count=1, precision=None,
preferred_element_type=None)
res, pullback = jax.vjp(f_jax, lhs, rhs)
grad = pullback(np.ones_like(res))
self.assertAllClose((lhs * 10., rhs * 2.), grad)
@staticmethod
def _conv_transpose_via_grad(data, kernel, strides, padding,
rhs_dilation=None, dimension_numbers=None):
"""Helper method: calculates conv transpose via grad for testing."""
assert len(data.shape) == len(kernel.shape)
nspatial = len(data.shape) - 2
one = (1,) * nspatial
rhs_dilation = rhs_dilation or one
dn = lax.conv_dimension_numbers(data.shape, kernel.shape,
dimension_numbers)
in_shape = np.take(data.shape, dn.lhs_spec)
in_sdims = in_shape[2:]
k_shape = np.take(kernel.shape, dn.rhs_spec)
k_sdims = k_shape[2:]
e_k_sdims = [(k-1) * r + 1 for k, r in zip(k_sdims, rhs_dilation)]
if padding == 'VALID':
o_sdims = [in_sdims[i]*strides[i] + max(e_k_sdims[i]-strides[i],0)
for i in range(nspatial)]
elif padding == 'SAME':
o_sdims = [in_sdims[i]*strides[i] for i in range(nspatial)]
o_shape = [in_shape[0], k_shape[1]] + o_sdims
out_spec_inv = [x[0] for x in
sorted(enumerate(dn.out_spec), key=lambda x: x[1])]
o_layout = np.take(np.array(o_shape), out_spec_inv)
placeholder = np.ones(o_layout, data.dtype)
conv = lambda x: lax.conv_general_dilated(x, kernel, strides, padding,
one, rhs_dilation, dn)
_, g = jax.vjp(conv, placeholder)
return g(data)[0]
@staticmethod
def _transpose_conv_kernel(data, kernel, dimension_numbers):
dn = lax.conv_dimension_numbers(data.shape, kernel.shape,
dimension_numbers)
spatial_axes = np.array(dn.rhs_spec)[2:]
for axis in spatial_axes:
kernel = np.flip(kernel, axis)
kernel = np.swapaxes(kernel, dn.rhs_spec[0], dn.rhs_spec[1])
return kernel
@jtu.sample_product(
[
dict(lhs_shape=lhs_shape, rhs_shape=rhs_shape)
for lhs_shape, rhs_shape in [
(
(b, 9, 10, i),
(k, k, j, i),
) # NB: i,j flipped in RHS for transpose
for b, i, j, k in itertools.product(
[2, 3], [2, 3], [2, 3], [3, 4, 5]
)
]
],
dtype=lax_test_util.float_dtypes,
strides=[(1, 1), (1, 2), (2, 1), (2, 2), (3, 3)],
padding=["VALID", "SAME"],
dspec=[
("NHWC", "HWIO", "NHWC"),
],
rhs_dilation=[None, (2, 2)],
)
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def testConvTranspose2DT(self, lhs_shape, rhs_shape, dtype, strides,
padding, dspec, rhs_dilation):
rng = jtu.rand_small(self.rng())
args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
# NB: this test calculates conv_transpose performing identically to the
# lhs-grad of conv.
def fun(lhs, rhs):
return lax.conv_transpose(lhs, rhs, strides, padding,
rhs_dilation=rhs_dilation,
dimension_numbers=dspec,
transpose_kernel=True)
def fun_via_grad(lhs, rhs):
return self._conv_transpose_via_grad(lhs, rhs, strides, padding,
rhs_dilation=rhs_dilation,
dimension_numbers=dspec)
# NB: below just checks for agreement, we're not calling numpy.
self._CheckAgainstNumpy(fun_via_grad, fun, args_maker)
@jtu.sample_product(
[
dict(lhs_shape=lhs_shape, rhs_shape=rhs_shape)
for lhs_shape, rhs_shape in [
((b, 9, 10, i), (k, k, i, j))
for b, i, j, k in itertools.product(
[2, 3], [2, 3], [2, 3], [3, 4, 5]
)
]
],
dtype=lax_test_util.float_dtypes,
strides=[(1, 1), (1, 2), (2, 1), (2, 2), (3, 3)],
padding=["VALID", "SAME"],
dspec=[
("NHWC", "HWIO", "NHWC"),
],
rhs_dilation=[None, (2, 2)],
)
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def testConvTranspose2D(self, lhs_shape, rhs_shape, dtype, strides,
padding, dspec, rhs_dilation):
rng = jtu.rand_small(self.rng())
args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
def fun(lhs, rhs):
return lax.conv_transpose(lhs, rhs, strides, padding,
rhs_dilation=rhs_dilation,
dimension_numbers=dspec,
transpose_kernel=False)
def fun_via_grad(lhs, rhs):
rhs_t = self._transpose_conv_kernel(lhs, rhs, dimension_numbers=dspec)
return self._conv_transpose_via_grad(lhs, rhs_t, strides, padding,
rhs_dilation=rhs_dilation,
dimension_numbers=dspec)
# NB: below just checks for agreement, we're not calling numpy.
self._CheckAgainstNumpy(fun_via_grad, fun, args_maker)
@jtu.sample_product(
[
dict(lhs_shape=lhs_shape, rhs_shape=rhs_shape)
for lhs_shape, rhs_shape in [
((b, 10, i), (k, i, j))
for b, i, j, k in itertools.product(
[2, 3], [2, 3], [2, 3], [3, 4, 5]
)
]
],
dtype=lax_test_util.float_dtypes,
strides=[(1,), (2,), (3,)],
padding=["VALID", "SAME"],
dspec=[
("NHC", "HIO", "NHC"),
],
rhs_dilation=[None, (2,)],
)
def testConvTranspose1D(self, lhs_shape, rhs_shape, dtype, strides,
padding, dspec, rhs_dilation):
rng = jtu.rand_small(self.rng())
args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
def fun(lhs, rhs):
return lax.conv_transpose(lhs, rhs, strides, padding,
dimension_numbers=dspec,
rhs_dilation=rhs_dilation,
transpose_kernel=False)
def fun_via_grad(lhs, rhs):
rhs_t = self._transpose_conv_kernel(lhs, rhs, dimension_numbers=dspec)
return self._conv_transpose_via_grad(lhs, rhs_t, strides, padding,
rhs_dilation=rhs_dilation,
dimension_numbers=dspec)
# NB: below just checks for agreement, we're not calling numpy.
self._CheckAgainstNumpy(fun_via_grad, fun, args_maker)
@jtu.sample_product(
[
dict(lhs_shape=lhs_shape, rhs_shape=rhs_shape)
for lhs_shape, rhs_shape in [
((b, i), (i, j))
for b, i, j in itertools.product([2, 3], [2, 3], [2, 3])
]
],
dtype=lax_test_util.float_dtypes,
strides=[()],
padding=["VALID", "SAME"],
dspec=[
("NC", "IO", "NC"),
],
rhs_dilation=[None, ()],
)
def testConvTranspose0D(self, lhs_shape, rhs_shape, dtype, strides,
padding, dspec, rhs_dilation):
rng = jtu.rand_small(self.rng())
args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
def fun(lhs, rhs):
return lax.conv_transpose(lhs, rhs, strides, padding,
dimension_numbers=dspec,
rhs_dilation=rhs_dilation,
transpose_kernel=False)
def fun_via_grad(lhs, rhs):
rhs_t = self._transpose_conv_kernel(lhs, rhs, dimension_numbers=dspec)
return self._conv_transpose_via_grad(lhs, rhs_t, strides, padding,
rhs_dilation=rhs_dilation,
dimension_numbers=dspec)
# NB: below just checks for agreement, we're not calling numpy.
self._CheckAgainstNumpy(fun_via_grad, fun, args_maker)
def testConvTransposePaddingList(self):
# Regression test for https://github.com/jax-ml/jax/discussions/8695
a = jnp.ones((28,28))
b = jnp.ones((3,3))
c = lax.conv_general_dilated(a[None, None], b[None, None], (1,1), [(0,0),(0,0)], (1,1))
self.assertAllClose(c, 9 * jnp.ones((1, 1, 26, 26)))
def testConvInvalidPadding(self):
x = jnp.ones((1, 10, 10, 5), dtype=jnp.bfloat16)
with self.assertRaisesRegex(ValueError,
r"padding argument.*, got \(3, 3\)"):
jax.lax.conv_general_dilated_patches(x, (5, 5), window_strides=(1, 1),
padding=(3, 3))
@jtu.sample_product(
[dict(lhs_shape=lhs_shape, rhs_shape=rhs_shape)
for lhs_shape in [(3,), (4, 3)] for rhs_shape in [(3,), (3, 6)]],
[dict(lhs_dtype=lhs_dtype, rhs_dtype=rhs_dtype)
for lhs_dtype, rhs_dtype in
itertools.chain(
itertools.product(lax_test_util.int_dtypes +
lax_test_util.float_dtypes +
lax_test_util.complex_dtypes +
lax_test_util.uint_dtypes,
repeat=2),
zip(lax_test_util.bool_dtypes, lax_test_util.bool_dtypes))],
precision=[
None,
lax.Precision.DEFAULT,
lax.Precision.HIGH,
lax.Precision.HIGHEST,
(lax.Precision.DEFAULT, lax.Precision.HIGHEST),
],
)
def testDot(self, lhs_shape, rhs_shape, lhs_dtype, rhs_dtype, precision):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
self._CompileAndCheck(partial(lax.dot, precision=precision), args_maker)
def testDotPositionalArgumentDeprecation(self):
lhs = jnp.arange(5.0)
rhs = jnp.arange(5.0)
msg = "jax.lax.dot: passing precision or preferred_element_type by position"
multiple_args_msg = "jax.lax.dot got multiple values for argument"
with self.assertDeprecationWarnsOrRaises("jax-lax-dot-positional-args", msg):
lax.dot(lhs, rhs, lax.Precision.DEFAULT, jnp.float32)
with self.assertDeprecationWarnsOrRaises("jax-lax-dot-positional-args", msg):
with self.assertRaises(TypeError):
lax.dot(lhs, rhs, lax.Precision.DEFAULT, precision=lax.Precision.DEFAULT)
if deprecations.is_accelerated("jax-lax-dot-positional-args"):
with self.assertRaisesRegex(ValueError, msg):
lax.dot(lhs, rhs, lax.Precision.DEFAULT, jnp.float32,
preferred_element_type=jnp.float32)
else:
with self.assertWarnsRegex(DeprecationWarning, msg):
with self.assertRaisesRegex(TypeError, multiple_args_msg):
lax.dot(lhs, rhs, lax.Precision.DEFAULT, jnp.float32,
preferred_element_type=jnp.float32)
@parameterized.parameters([
(algorithm, dtype)
for algorithm, test_dtypes in [
(lax.DotAlgorithm(
lhs_precision_type=np.float32,
rhs_precision_type=np.float32,
accumulation_type=np.float32,
lhs_component_count=1,
rhs_component_count=1,
num_primitive_operations=1,
allow_imprecise_accumulation=False,
), [np.float32]),
(lax.DotAlgorithm(
lhs_precision_type=np.float16,
rhs_precision_type=np.float16,
accumulation_type=np.float32,
), [np.float16]),
("F16_F16_F32", [np.float16]),
(lax.DotAlgorithmPreset.DEFAULT, lax_test_util.float_dtypes),
(lax.DotAlgorithmPreset.ANY_F8_ANY_F8_F32, dtypes._float8_dtypes),
(lax.DotAlgorithmPreset.ANY_F8_ANY_F8_F32_FAST_ACCUM, dtypes._float8_dtypes),
(lax.DotAlgorithmPreset.F16_F16_F16, [np.float16]),
(lax.DotAlgorithmPreset.F16_F16_F32, [np.float16]),
(lax.DotAlgorithmPreset.BF16_BF16_BF16, [dtypes.bfloat16]),
(lax.DotAlgorithmPreset.BF16_BF16_F32, [dtypes.bfloat16]),
(lax.DotAlgorithmPreset.BF16_BF16_F32_X3, [np.float32]),
(lax.DotAlgorithmPreset.BF16_BF16_F32_X6, [np.float32]),
(lax.DotAlgorithmPreset.BF16_BF16_F32_X9, [np.float32]),
(lax.DotAlgorithmPreset.TF32_TF32_F32, [np.float32]),
(lax.DotAlgorithmPreset.TF32_TF32_F32_X3, [np.float32]),
(lax.DotAlgorithmPreset.F32_F32_F32, [np.float32]),
(lax.DotAlgorithmPreset.F64_F64_F64, [np.float64]),
] for dtype in test_dtypes
if jtu.dtypes.supported([dtype])
])
def testDotAlgorithm(self, algorithm, dtype):
if jtu.test_device_matches(["cpu"]):
if algorithm not in {
lax.DotAlgorithmPreset.DEFAULT,
lax.DotAlgorithmPreset.F16_F16_F16,
lax.DotAlgorithmPreset.F32_F32_F32,
lax.DotAlgorithmPreset.F64_F64_F64,
lax.DotAlgorithmPreset.BF16_BF16_F32,
lax.DotAlgorithmPreset.BF16_BF16_F32_X3,
lax.DotAlgorithmPreset.BF16_BF16_F32_X6,
}:
raise SkipTest(
f"The dot algorithm '{algorithm}' is not supported on CPU.")
if jtu.test_device_matches(["gpu"]):
# GPU algorithm support is a little spotty. It is checked in
# xla/service/algorithm_util.cc and the logic is copied here.
if algorithm in {
lax.DotAlgorithmPreset.F16_F16_F32,
lax.DotAlgorithmPreset.TF32_TF32_F32,
lax.DotAlgorithmPreset.BF16_BF16_F32,
lax.DotAlgorithmPreset.BF16_BF16_F32_X3,
lax.DotAlgorithmPreset.BF16_BF16_F32_X6,
lax.DotAlgorithmPreset.BF16_BF16_F32_X9,
}:
if not jtu.is_cuda_compute_capability_at_least("8.0"):
raise SkipTest(
f"The dot algorithm '{algorithm}' requires CUDA compute "
"capability >= 8.0.")
elif algorithm not in {
lax.DotAlgorithmPreset.DEFAULT,
lax.DotAlgorithmPreset.ANY_F8_ANY_F8_F32,
lax.DotAlgorithmPreset.ANY_F8_ANY_F8_F32_FAST_ACCUM,
lax.DotAlgorithmPreset.F32_F32_F32,
lax.DotAlgorithmPreset.F64_F64_F64,
}:
raise SkipTest(
f"The dot algorithm '{algorithm}' is not supported on GPU.")
if jtu.test_device_matches(["tpu"]):
if algorithm not in {
lax.DotAlgorithmPreset.DEFAULT,
lax.DotAlgorithmPreset.BF16_BF16_F32,
lax.DotAlgorithmPreset.BF16_BF16_F32_X3,
lax.DotAlgorithmPreset.BF16_BF16_F32_X6,
}:
raise SkipTest(
f"The dot algorithm '{algorithm}' is not supported on TPU."
)
lhs_shape = (3, 4)
rhs_shape = (4, 3)
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
self._CompileAndCheck(partial(lax.dot, precision=algorithm), args_maker,
rtol={np.float64: 3e-15})
self.assertEqual(lax.dot(*args_maker(), precision=algorithm).dtype, dtype)
def testDotAlgorithmInvalidFloat8Type(self):
if jtu.test_device_matches(["cpu"]):
raise SkipTest("Not supported on CPU.")
lhs_shape = (3, 4)
rhs_shape = (4, 3)
rng = jtu.rand_default(self.rng())
lhs, rhs = rng(lhs_shape, np.float32), rng(rhs_shape, dtypes.float8_e4m3fn)
with self.assertRaisesRegex(ValueError, "The dot algorithm"):
lax.dot(lhs, rhs, precision="ANY_F8_ANY_F8_F32")
def testDotAlgorithmCasting(self):
if jtu.test_device_matches(["tpu"]):
raise SkipTest("F32_F32_F32 is not supported on TPU.")
def fun(lhs, rhs):
return lax.dot(lhs, rhs, precision="F32_F32_F32")
lhs_shape = (3, 4)
rhs_shape = (4, 3)
rng = jtu.rand_default(self.rng())
lhs, rhs = rng(lhs_shape, np.float16), rng(rhs_shape, np.float16)
self.assertEqual(fun(lhs, rhs).dtype, np.float16)
def testDotAlgorithmAllowedOutputStorage(self):
# see https://github.com/jax-ml/jax/issues/24794
if not jtu.test_device_matches(["gpu"]):
self.skipTest("Only supported on GPU.")
def fun(lhs, rhs):
return lax.dot(lhs, rhs, precision="F16_F16_F32",
preferred_element_type=np.float16)
lhs_shape = (3, 4)
rhs_shape = (4, 3)
rng = jtu.rand_default(self.rng())
lhs, rhs = rng(lhs_shape, np.float16), rng(rhs_shape, np.float16)
self.assertNotIn("convert", jax.jit(fun).lower(lhs, rhs).as_text())
def testDotAlgorithmConfig(self):
lhs_shape = (3, 4)
rhs_shape = (4, 3)
rng = jtu.rand_default(self.rng())
lhs, rhs = rng(lhs_shape, np.float32), rng(rhs_shape, np.float32)
expected = ("algorithm = <lhs_precision_type = f32, rhs_precision_type = "
"f32, accumulation_type = f32")
with jax.default_matmul_precision("F32_F32_F32"):
hlo = jax.jit(lax.dot).lower(lhs, rhs).as_text()
self.assertRegex(hlo, expected)
@jtu.sample_product(
[dict(lhs_shape=lhs_shape, rhs_shape=rhs_shape)
for lhs_shape in [(3,), (4, 3)] for rhs_shape in [(3,), (3, 6)]],
[dict(dtype=d, preferred_element_type=p)
for d, p in preferred_type_combinations],
)
def testDotPreferredElement(self, lhs_shape, rhs_shape, dtype,
preferred_element_type):
if (not config.enable_x64.value and
(dtype == np.float64 or preferred_element_type == np.float64
or dtype == np.int64 or preferred_element_type == np.int64
or dtype == np.complex128 or preferred_element_type == np.complex128)):
raise SkipTest("64-bit mode disabled")
if (jtu.test_device_matches(["tpu"]) and
(dtype == np.complex128 or preferred_element_type == np.complex128)):
raise SkipTest("np.complex128 is not yet supported on TPU")
if jtu.test_device_matches(["gpu"]):
# TODO(b/189287598)
raise SkipTest("dot_general with preferred_element_type returns NaN "
"non-deterministically on GPU")
rng = jtu.rand_default(self.rng())
x = rng(lhs_shape, dtype)
y = rng(rhs_shape, dtype)
# We first compute the dot when both inputs are a lower-precision type and
# preferred_element_type is a higher-precision type. We then compute results
# where the inputs are first upcast to the higher-precision type and no
# `preferred_element_type` is given. We expect the result to be extremely
# similar given the semantics of `preferred_element_type`.
result_with_preferred_type = lax.dot(x, y, preferred_element_type=preferred_element_type)
result_with_upcast_inputs = lax.dot(
x.astype(preferred_element_type),
y.astype(preferred_element_type))
self.assertArraysAllClose(result_with_preferred_type, result_with_upcast_inputs)
@jtu.sample_product(
[dict(lhs_shape=lhs_shape, rhs_shape=rhs_shape)
for lhs_shape in [(3,), (4, 3)] for rhs_shape in [(3,), (3, 6)]],
[dict(dtype_lhs=dtype_lhs, dtype_rhs=dtype_rhs)
for dtype_lhs, dtype_rhs in [(dtypes.float8_e4m3fn, dtypes.float8_e5m2),
(dtypes.float8_e5m2, dtypes.float8_e4m3fn),
(dtypes.float8_e4m3fnuz, dtypes.float8_e5m2fnuz),
(dtypes.float8_e5m2fnuz, dtypes.float8_e4m3fnuz)]],
)
def test_mixed_fp8_dot_general(self, lhs_shape, rhs_shape, dtype_lhs, dtype_rhs):
if jtu.test_device_matches(["tpu"]):
raise SkipTest("Mixed fp8 precision matmul is not yet supported on TPU")
if not jtu.is_device_rocm() and (
dtype_lhs in [dtypes.float8_e4m3fnuz, dtypes.float8_e5m2fnuz] or
dtype_rhs in [dtypes.float8_e4m3fnuz, dtypes.float8_e5m2fnuz]
):
raise SkipTest(
"float8_e4m3fnuz and float8_e5m2fnuz types are only supported on ROCm"
)
rng = jtu.rand_default(self.rng())
lhs = rng(lhs_shape, dtype=dtype_lhs)
rhs = rng(rhs_shape, dtype=dtype_rhs)
dot_general_result = lax.dot(
lhs, rhs,
preferred_element_type=jnp.float32
)
lhs_upcasted = lhs.astype(jnp.float32)
rhs_upcasted = rhs.astype(jnp.float32)
dot_general_result_upcasted = lax.dot(
lhs_upcasted, rhs_upcasted,
preferred_element_type=jnp.float32
)
self.assertArraysAllClose(
dot_general_result, dot_general_result_upcasted, rtol=1e-3, atol=1e-3)
@jtu.sample_product(
[
dict(lhs_shape=lhs_shape, rhs_shape=rhs_shape)
for lhs_shape in [(3,), (4, 3)]
for rhs_shape in [(3,), (3, 6)]
],
dtype=lax_test_util.all_dtypes,
)
def testDotAgainstNumpy(self, lhs_shape, rhs_shape, dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
tol = {
np.float16: 1e-2,
np.float64: max(jtu.default_tolerance()[np.dtype(np.float64)], 1e-14),
np.complex128: max(jtu.default_tolerance()[np.dtype(np.complex128)],
1e-14),
jnp.bfloat16: 1e-1
}
lax_op = partial(lax.dot, precision=lax.Precision.HIGHEST)
self._CheckAgainstNumpy(lax_reference.dot, lax_op, args_maker, tol=tol)
@jtu.sample_product(
[
dict(
lhs_shape=lhs_shape,
rhs_shape=rhs_shape,
lhs_contracting=lhs_contracting,
rhs_contracting=rhs_contracting,
)
for lhs_shape, rhs_shape, lhs_contracting, rhs_contracting in [
[(5,), (5,), [0], [0]],
[(5, 7), (5,), [0], [0]],
[(7, 5), (5,), [1], [0]],
[(3, 5), (2, 5), [1], [1]],
[(5, 3), (5, 2), [0], [0]],
[(5, 3, 2), (5, 2, 4), [0], [0]],
[(5, 3, 2), (5, 2, 4), [0, 2], [0, 1]],
[(5, 3, 2), (3, 5, 2, 4), [0, 2], [1, 2]],
[(1, 2, 2, 3), (1, 2, 3, 1), [1], [1]],
[(3, 2), (2, 4), [1], [0]],
]
],
dtype=lax_test_util.all_dtypes,
)
def testDotGeneralContractOnly(self, lhs_shape, rhs_shape, dtype,
lhs_contracting, rhs_contracting):
rng = jtu.rand_small(self.rng())
args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
dimension_numbers = ((lhs_contracting, rhs_contracting), ([], []))
def fun(lhs, rhs):
return lax.dot_general(lhs, rhs, dimension_numbers)
self._CompileAndCheck(fun, args_maker, check_dtypes=False)
@jtu.sample_product(
[
dict(
lhs_shape=lhs_shape,
rhs_shape=rhs_shape,
dimension_numbers=dimension_numbers,
)
for lhs_shape, rhs_shape, dimension_numbers in [
((3, 3, 2), (3, 2, 4), (([2], [1]), ([0], [0]))),
((3, 3, 2), (2, 3, 4), (([2], [0]), ([0], [1]))),
((3, 4, 2, 4), (3, 4, 3, 2), (([2], [3]), ([0, 1], [0, 1]))),
]
],
dtype=lax_test_util.all_dtypes,
)
def testDotGeneralContractAndBatch(self, lhs_shape, rhs_shape, dtype,
dimension_numbers):
rng = jtu.rand_small(self.rng())
args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
def fun(lhs, rhs):
return lax.dot_general(lhs, rhs, dimension_numbers)
self._CompileAndCheck(fun, args_maker, check_dtypes=False)
@jtu.sample_product(
[
dict(
lhs_shape=lhs_shape,
rhs_shape=rhs_shape,
dimension_numbers=dimension_numbers,
)
for lhs_shape, rhs_shape, dimension_numbers in [
((3, 3, 2), (3, 2, 4), (([2], [1]), ([0], [0]))),
((3, 3, 2), (2, 3, 4), (([2], [0]), ([0], [1]))),
((3, 4, 2, 4), (3, 4, 3, 2), (([2], [3]), ([0, 1], [0, 1]))),
]
],
dtype=lax_test_util.all_dtypes,
)
def testDotGeneralAgainstNumpy(self, lhs_shape, rhs_shape, dtype,
dimension_numbers):
rng = jtu.rand_small(self.rng())
args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
op = lambda x, y: lax.dot_general(x, y, dimension_numbers)
numpy_op = lambda x, y: lax_reference.dot_general(x, y, dimension_numbers)
self._CheckAgainstNumpy(numpy_op, op, args_maker)
@jtu.sample_product(
shape=[(), (2, 3)],
dtype=lax_test_util.default_dtypes,
broadcast_sizes=[(), (2,), (1, 2)],
)
def testBroadcast(self, shape, dtype, broadcast_sizes):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
op = lambda x: lax.broadcast(x, broadcast_sizes)
self._CompileAndCheck(op, args_maker)
@jtu.sample_product(
shape=[(), (2, 3)],
dtype=lax_test_util.default_dtypes,
broadcast_sizes=[(), (2,), (1, 2)],
)
def testBroadcastAgainstNumpy(self, shape, dtype, broadcast_sizes):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
op = lambda x: lax.broadcast(x, broadcast_sizes)
numpy_op = lambda x: lax_reference.broadcast(x, broadcast_sizes)
self._CheckAgainstNumpy(numpy_op, op, args_maker)
@jtu.sample_product(
[
dict(inshape=inshape, outshape=outshape, dimensions=dimensions)
for inshape, outshape, dimensions in [
([2], [2, 2], [0]),
([2], [2, 2], [1]),
([2], [2, 3], [0]),
([], [2, 3], []),
([1], [2, 3], [1]),
]
],
dtype=lax_test_util.default_dtypes,
)
def testBroadcastInDim(self, inshape, dtype, outshape, dimensions):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(inshape, dtype)]
op = lambda x: lax.broadcast_in_dim(x, outshape, dimensions)
self._CompileAndCheck(op, args_maker)
def testBroadcastInDimOperandShapeTranspose(self):
# Regression test for https://github.com/jax-ml/jax/issues/5276
def f(x):
return lax.broadcast_in_dim(x, (2, 3, 4), broadcast_dimensions=(0, 1, 2)).sum()
def g(x):
return lax.broadcast_in_dim(x.reshape((3,)), (2, 3, 4), broadcast_dimensions=(1,)).sum()
x = np.ones((1, 3, 1))
self.assertArraysEqual(jax.grad(f)(x), jax.grad(g)(x))
@parameterized.parameters(
{"inshape": inshape, "outshape": outshape,
"broadcast_dimensions": broadcast_dimensions, "err_msg": err_msg}
for inshape, outshape, broadcast_dimensions, err_msg in [
([2], [2, 2], [0, 1], ('broadcast_dimensions must have length equal to '
'operand ndim')),
([2, 2], [2], [0, 1], ('target broadcast shape must have equal or higher rank '
'to the operand shape')),
([2], [2, 3], [2], ('broadcast_in_dim broadcast_dimensions must be a subset of output '
'dimensions')),
([2], [3], [0], ('operand dimension sizes must either be 1, or be '
'equal to their corresponding dimensions in the target broadcast shape')),
([2, 2], [2, 2], [1, 0], ('broadcast_dimensions must be strictly increasing')),
])
def testBroadcastInDimShapeCheck(self, inshape, outshape, broadcast_dimensions, err_msg):
rng = jtu.rand_default(self.rng())
x = rng(inshape, np.float32)
with self.assertRaisesRegex(TypeError, err_msg):
lax.broadcast_in_dim(x, shape=outshape, broadcast_dimensions=broadcast_dimensions)
@jtu.sample_product(
[
dict(inshape=inshape, outshape=outshape, dimensions=dimensions)
for inshape, outshape, dimensions in [
([2], [2, 2], [0]),
([2], [2, 2], [1]),
([2], [2, 3], [0]),
([], [2, 3], []),
([1], [2, 3], [1]),
]
],
dtype=lax_test_util.default_dtypes,
)
def testBroadcastInDimAgainstNumpy(self, inshape, dtype, outshape, dimensions):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(inshape, dtype)]
op = lambda x: lax.broadcast_in_dim(x, outshape, dimensions)
numpy_op = lambda x: lax_reference.broadcast_in_dim(x, outshape, dimensions)
self._CheckAgainstNumpy(numpy_op, op, args_maker)
@parameterized.parameters(
{"inshape": inshape, "dimensions": dimensions, "error_type": error_type,
"err_msg": err_msg}
for inshape, dimensions, error_type, err_msg in [
((1, 2, 3), (0, 0), ValueError, 'dimensions are not unique'),
((1, 2, 3), (3,), ValueError, 'axis 3 is out of bounds'),
((1, 2, 3), (-4,), ValueError, 'axis -4 is out of bounds'),
((1, 2, 3), (1,), ValueError, 'cannot select an axis to squeeze out'),
((1, 2, 3), (None,), TypeError, 'cannot be interpreted as an integer'),
])
def testSqueezeShapeCheck(self, inshape, dimensions, error_type, err_msg):
rng = jtu.rand_default(self.rng())
x = rng(inshape, np.float32)
with self.assertRaisesRegex(error_type, err_msg):
lax.squeeze(x, dimensions=dimensions)
@jtu.sample_product(
[dict(arg_shape=arg_shape, dimensions=dimensions)
for arg_shape, dimensions in [
[(1,), (0,)],
[(1,), (-1,)],
[(2, 1, 4), (1,)],
[(2, 1, 3, 1), (1,)],
[(2, 1, 3, 1), (1, 3)],
[(2, 1, 3, 1), (3,)],
]],
)
def testSqueeze(self, arg_shape, dimensions):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(arg_shape, np.float32)]
op = lambda x: lax.squeeze(x, dimensions)
numpy_op = lambda x: lax_reference.squeeze(x, dimensions)
self._CompileAndCheck(op, args_maker)
self._CheckAgainstNumpy(numpy_op, op, args_maker)
check_grads(op, args_maker(), 3, ["fwd", "rev"], eps=1.)
@jtu.sample_product(
input_type=["np.array", "jnp.array", "float", "np.float32"],
jit=[True, False],
)
def testEmptySqueezeReturnType(self, input_type, jit):
if input_type == "np.array":
operand = np.arange(5)
elif input_type == "jnp.array":
operand = jnp.arange(5)
elif input_type == "float":
operand = 2.0
elif input_type == "np.float32":
operand = np.float32(2.0)
else:
raise ValueError(f"Unrecognized {input_type=}")
op = lambda x: lax.squeeze(x, dimensions=())
if jit:
op = jax.jit(op)
result = op(operand)
self.assertIsInstance(result, jax.Array)
@jtu.sample_product(
[dict(arg_shape=arg_shape, out_shape=out_shape)
for arg_shape, out_shape in [
[(3, 4), (12,)], [(2, 1, 4), (8,)], [(2, 2, 4), (2, 8)]
]],
dtype=lax_test_util.default_dtypes,
)
def testReshape(self, arg_shape, out_shape, dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(arg_shape, dtype)]
op = lambda x: lax.reshape(x, out_shape)
self._CompileAndCheck(op, args_maker)
@jtu.sample_product(
[dict(arg_shape=arg_shape, out_shape=out_shape)
for arg_shape, out_shape in [
[(3, 4), (12,)], [(2, 1, 4), (8,)], [(2, 2, 4), (2, 8)]
]],
dtype=lax_test_util.default_dtypes,
)
def testReshapeAgainstNumpy(self, arg_shape, out_shape, dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(arg_shape, dtype)]
op = lambda x: lax.reshape(x, out_shape)
numpy_op = lambda x: lax_reference.reshape(x, out_shape)
self._CheckAgainstNumpy(numpy_op, op, args_maker)
def testRoundRoundingMethods(self):
x = np.array([-2.5, -1.5, -0.5, 0.5, 1.5, 2.5], dtype=np.float32)
self.assertAllClose(lax.round(x, lax.RoundingMethod.AWAY_FROM_ZERO),
np.array([-3, -2, -1, 1, 2, 3], dtype=np.float32))
self.assertAllClose(lax.round(x, lax.RoundingMethod.TO_NEAREST_EVEN),
np.array([-2, -2, 0, 0, 2, 2], dtype=np.float32))
@jtu.sample_product(
[dict(shape=shape, pads=pads) for shape, pads in [
((0, 2), [(1, 2, 1), (0, 1, 0)]),
((2, 3), [(1, 2, 1), (0, 1, 0)]),
((2,), [(1, 2, 0)]),
((1, 2), [(1, 2, 0), (3, 4, 0)]),
((1, 2), [(0, 0, 0), (0, 0, 0)]),
((2,), [(1, 2, 3),]),
((3, 2), [(1, 2, 1), (3, 4, 2)]),
((2,), [(-1, 2, 0),]),
((4, 2), [(-1, -2, 0), (1, 2, 0)]),
((4, 2), [(-1, 2, 0), (1, 2, 2)]),
((5,), [(-1, -2, 2),]),
((4, 2), [(-1, -2, 1), (1, 2, 2)])
]
],
dtype=lax_test_util.default_dtypes,
)
def testPad(self, shape, dtype, pads):
rng = jtu.rand_small(self.rng())
args_maker = lambda: [rng(shape, dtype)]
fun = lambda operand: lax.pad(operand, np.array(0, dtype), pads)
self._CompileAndCheck(fun, args_maker)
@jtu.sample_product(
shape=[(2, 3)],
dtype=lax_test_util.default_dtypes,
pads=[
[(0, 0, 0), (0, 0, 0)], # no padding
[(1, 1, 0), (2, 2, 0)], # only positive edge padding
[(1, 2, 1), (0, 1, 0)], # edge padding and interior padding
[(0, 0, 0), (-1, -1, 0)], # negative padding
[(0, 0, 0), (-2, -2, 4)], # add big dilation then remove from edges
[(0, 0, 0), (-2, -3, 1)], # remove everything in one dimension
]
)
def testPadAgainstNumpy(self, shape, dtype, pads):
rng = jtu.rand_small(self.rng())
args_maker = lambda: [rng(shape, dtype)]
op = lambda x: lax.pad(x, np.array(0, dtype), pads)
numpy_op = lambda x: lax_reference.pad(x, np.array(0, dtype), pads)
self._CheckAgainstNumpy(numpy_op, op, args_maker)
def testPadErrors(self):
with self.assertRaisesRegex(ValueError, "padding_value must be a scalar"):
lax.pad(np.zeros(2), np.zeros(2), [(0, 0, 0)])
with self.assertRaisesRegex(ValueError, "padding_config"):
lax.pad(np.zeros(2), 0., [(0, 1, 0), (0, 1, 0)])
with self.assertRaisesRegex(ValueError, "interior padding in padding_config must be nonnegative"):
lax.pad(np.zeros(2), 0., [(0, 1, -1)])
with self.assertRaisesRegex(ValueError, "Dimension size after padding is not at least 0"):
lax.pad(np.zeros(2), 0., [(-3, 0, 0)])
with self.assertRaisesRegex(ValueError, "Dimension size after padding is not at least 0"):
lax.pad(np.zeros(2), 0., [(-4, 0, 1)])
def testReverse(self):
rev = jax.jit(lambda operand: lax.rev(operand, dimensions))
dimensions = []
self.assertAllClose(np.array([0, 1, 2, 3]), rev(np.array([0, 1, 2, 3])),
check_dtypes=False)
dimensions = [0]
self.assertAllClose(np.array([3, 2, 1]), rev(np.array([1, 2, 3])),
check_dtypes=False)
dimensions = [0, 1]
self.assertAllClose(np.array([[6, 5, 4], [3, 2, 1]]),
rev(np.array([[1, 2, 3], [4, 5, 6]])),
check_dtypes=False)
@jtu.sample_product(
[dict(arg_shape=arg_shape, pred_shape=pred_shape)
for arg_shape in [(), (3,), (2, 3)]
for pred_shape in ([(), arg_shape] if arg_shape else [()])
],
arg_dtype=lax_test_util.default_dtypes,
)
def testSelect(self, pred_shape, arg_shape, arg_dtype):
rng = jtu.rand_default(self.rng())
def args_maker():
return [rng(pred_shape, np.bool_), rng(arg_shape, arg_dtype),
rng(arg_shape, arg_dtype)]
return self._CheckAgainstNumpy(lax_reference.select, lax.select, args_maker)
return self._CompileAndCheck(lax.select, args_maker)
@jtu.sample_product(
[
dict(arg_shape=arg_shape, pred_shape=pred_shape)
for arg_shape in [(), (3,), (2, 3)]
for pred_shape in ([(), arg_shape] if arg_shape else [()])
],
[
dict(pred_dtype=pred_dtype, num_args=num_args)
for (pred_dtype, num_args) in (
list(
itertools.product(
[np.dtype(np.bool_), np.dtype(np.int32)], [1, 2]
)
)
+ [(np.dtype(np.int32), 6)]
)
],
arg_dtype=lax_test_util.default_dtypes,
)
def testSelectN(self, pred_dtype, pred_shape, arg_shape, arg_dtype, num_args):
if pred_dtype == np.bool_:
pred_rng = jtu.rand_default(self.rng())
else:
pred_rng = jtu.rand_int(self.rng(), low=-1, high=num_args + 1)
rng = jtu.rand_default(self.rng())
def args_maker():
return [pred_rng(pred_shape, pred_dtype)] + (
[rng(arg_shape, arg_dtype) for _ in range(num_args)])
return self._CheckAgainstNumpy(lambda c, *xs: np.choose(c, xs, mode='clip'),
lax.select_n, args_maker)
return self._CompileAndCheck(lax.select_n, args_maker)
@jtu.sample_product(
[
dict(
shape=shape, starts=indices, limits=limit_indices, strides=strides
)
for shape, indices, limit_indices, strides in [
[(3,), (1,), (2,), None],
[(7,), (4,), (7,), None],
[(5,), (1,), (5,), (2,)],
[(8,), (1,), (6,), (2,)],
[(5, 3), (1, 1), (3, 2), None],
[(5, 3), (1, 1), (3, 1), None],
[(7, 5, 3), (4, 0, 1), (7, 1, 3), None],
[(5, 3), (1, 1), (2, 1), (1, 1)],
[(5, 3), (1, 1), (5, 3), (2, 1)],
]
],
dtype=lax_test_util.default_dtypes,
)
def testSlice(self, shape, dtype, starts, limits, strides):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
op = lambda x: lax.slice(x, starts, limits, strides)
self._CompileAndCheck(op, args_maker)
@jtu.sample_product(
[
dict(
shape=shape, starts=indices, limits=limit_indices, strides=strides
)
for shape, indices, limit_indices, strides in [
[(3,), (1,), (2,), None],
[(7,), (4,), (7,), None],
[(5,), (1,), (5,), (2,)],
[(8,), (1,), (6,), (2,)],
[(5, 3), (1, 1), (3, 2), None],
[(5, 3), (1, 1), (3, 1), None],
[(7, 5, 3), (4, 0, 1), (7, 1, 3), None],
[(5, 3), (1, 1), (2, 1), (1, 1)],
[(5, 3), (1, 1), (5, 3), (2, 1)],
]
],
dtype=lax_test_util.default_dtypes,
)
def testSliceAgainstNumpy(self, shape, dtype, starts, limits, strides):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
op = lambda x: lax.slice(x, starts, limits, strides)
numpy_op = lambda x: lax_reference.slice(x, starts, limits, strides)
self._CheckAgainstNumpy(numpy_op, op, args_maker)
@jtu.sample_product(
[
dict(shape=shape, indices=indices, size_indices=size_indices)
for shape, indices, size_indices in [
[(3,), np.array((1,)), (1,)],
[(5, 3), (1, 1), (3, 1)],
[(5, 3), np.array((1, 1)), (3, 1)],
[(7, 5, 3), np.array((4, 1, 0)), (2, 0, 1)],
]
],
dtype=lax_test_util.default_dtypes,
)
def testDynamicSlice(self, shape, dtype, indices, size_indices):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype), np.array(indices)]
op = lambda x, starts: lax.dynamic_slice(x, starts, size_indices)
self._CompileAndCheck(op, args_maker)
@jtu.sample_product(
[
dict(shape=shape, indices=indices, size_indices=size_indices)
for shape, indices, size_indices in [
[(3,), (1,), (1,)],
[(5, 3), (1, 1), (3, 1)],
[(7, 5, 3), (4, 1, 0), (2, 0, 1)],
]
],
dtype=lax_test_util.default_dtypes,
)
def testDynamicSliceAgainstNumpy(self, shape, dtype, indices, size_indices):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype), np.array(indices)]
op = lambda x, s: lax.dynamic_slice(x, s, size_indices)
numpy_op = lambda x, s: lax_reference.dynamic_slice(x, s, size_indices)
self._CheckAgainstNumpy(numpy_op, op, args_maker)
def testDynamicSliceInDim(self):
# Regression test for mixed type problem in dynamic_slice_in_dim.
rng = jtu.rand_default(self.rng())
x = rng((6, 7), np.int32)
np.testing.assert_equal(lax.dynamic_slice_in_dim(x, 2, 3), x[2:5])
def testDynamicSliceArraySliceSizes(self):
rng = jtu.rand_default(self.rng())
x = rng((6, 7), np.int32)
np.testing.assert_equal(lax.dynamic_slice(x, [2, 3], jnp.array([2, 2])),
x[2:4, 3:5])
def testDynamicSliceWithNonScalarIndex(self):
x = jnp.ones((6, 7), np.int32)
with self.assertRaises(TypeError):
lax.dynamic_slice_in_dim(x, jnp.array([2, 2]), 3)
@jtu.sample_product(
[
dict(shape=shape, indices=indices, update_shape=update_shape)
for shape, indices, update_shape in [
[(3,), (1,), (1,)],
[(5, 3), (1, 1), (3, 1)],
[(7, 5, 3), (4, 1, 0), (2, 0, 1)],
]
],
dtype=lax_test_util.default_dtypes,
)
def testDynamicUpdateSlice(self, shape, dtype, indices, update_shape):
rng = jtu.rand_default(self.rng())
def args_maker():
return [rng(shape, dtype), rng(update_shape, dtype), np.array(indices)]
self._CompileAndCheck(lax.dynamic_update_slice, args_maker)
@jtu.sample_product(
[
dict(shape=shape, indices=indices, update_shape=update_shape)
for shape, indices, update_shape in [
[(3,), (1,), (1,)],
[(5, 3), (1, 1), (3, 1)],
[(7, 5, 3), (4, 1, 0), (2, 0, 1)],
]
],
dtype=lax_test_util.default_dtypes,
)
def testDynamicUpdateSliceAgainstNumpy(self, shape, dtype, indices,
update_shape):
rng = jtu.rand_default(self.rng())
def args_maker():
return [rng(shape, dtype), rng(update_shape, dtype), np.array(indices)]
self._CheckAgainstNumpy(lax_reference.dynamic_update_slice,
lax.dynamic_update_slice, args_maker)
def testDynamicUpdateSliceBatched(self):
# Regression test for https://github.com/jax-ml/jax/issues/9083
x = jnp.arange(5)
y = jnp.arange(6, 9)
ind = jnp.arange(6)
expected = jnp.vstack([lax.dynamic_update_slice(x, y, (i,)) for i in ind])
actual = jax.vmap(lax.dynamic_update_slice, (None, None, 0))(x, y, (ind,))
self.assertAllClose(expected, actual)
def testDynamicUpdateSliceWithNonScalarIndex(self):
x = jnp.ones((6, 7), np.int32)
with self.assertRaises(TypeError):
lax.dynamic_update_slice_in_dim(x, jnp.ones((2, 7), np.int32),
jnp.array([2, 2]), axis=0)
@jtu.sample_product(
[
dict(shape=shape, perm=perm)
for shape, perm in [
[(3, 4), (1, 0)],
[(3, 4), (0, 1)],
[(3, 4, 5), (2, 1, 0)],
[(3, 4, 5), (1, 0, 2)],
]
],
dtype=lax_test_util.default_dtypes,
)
def testTranspose(self, shape, dtype, perm):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
op = lambda x: lax.transpose(x, perm)
self._CompileAndCheck(op, args_maker)
def testTransposeWithArrayPermutation(self):
x = lax.transpose(np.ones((2, 3)), jnp.array([1, 0]))
self.assertEqual((3, 2), x.shape)
@jtu.sample_product(
[
dict(shape=shape, perm=perm)
for shape, perm in [
[(3, 4), (1, 0)],
[(3, 4), (0, 1)],
[(3, 4, 5), (2, 1, 0)],
[(3, 4, 5), (1, 0, 2)],
]
],
dtype=lax_test_util.default_dtypes,
)
def testTransposeAgainstNumpy(self, shape, dtype, perm):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
op = lambda x: lax.transpose(x, perm)
numpy_op = lambda x: lax_reference.transpose(x, perm)
self._CheckAgainstNumpy(numpy_op, op, args_maker)
@jtu.sample_product(
[
dict(
op=rec.op,
reference_op=rec.reference_op,
init_val=rec.init_val,
primitive=rec.primitive,
dtype=dtype,
)
for rec in lax_test_util.lax_reduce_ops()
for dtype in rec.dtypes
],
[
dict(shape=shape, dims=dims)
for shape, dims in [
[(3, 4, 5), (0,)],
[(3, 4, 5), (1, 2)],
[(3, 4, 5), (0, 2)],
[(3, 4, 5), (0, 1, 2)],
]
],
)
def testReduce(self, op, reference_op, init_val, shape, dtype, dims, primitive):
if not config.enable_x64.value and dtype in (np.float64, np.int64, np.uint64):
raise SkipTest("x64 mode is disabled.")
def reference_fun(operand):
if hasattr(reference_op, "reduce"):
initial = np.array(init_val, dtype=dtype)
result = reference_op.reduce(operand, axis=dims, initial=initial)
else:
result = reference_op(operand, axis=dims)
return result.astype(dtype)
rng_factory = (jtu.rand_default if dtypes.issubdtype(dtype, np.integer)
else jtu.rand_small)
rng = rng_factory(self.rng())
init_val = np.asarray(init_val).astype(dtype)
fun = lambda operand, init_val: lax.reduce(operand, init_val, op, dims)
args_maker = lambda: [rng(shape, dtype), init_val]
self._CompileAndCheck(fun, args_maker)
# we separately test the version that uses a concrete init_val because it
# can hit different code paths
fun = lambda operand: lax.reduce(operand, init_val, op, dims)
args_maker = lambda: [rng(shape, dtype)]
self._CompileAndCheck(fun, args_maker)
self._CheckAgainstNumpy(reference_fun, fun, args_maker)
# check that the correct monoid reducer primitive is used inside the jaxpr.
# This requires the init_val (monoid identity element) to be static
jaxpr = jax.make_jaxpr(fun)(rng(shape, dtype))
self.assertEqual(jaxpr.eqns[0].primitive, primitive)
@jtu.sample_product(
[
dict(
op=rec.op,
reference_op=rec.reference_op,
dtype=dtype,
)
for rec in lax_test_util.lax_named_reduce_ops()
for dtype in rec.dtypes
],
[
dict(shape=shape, dims=dims)
for shape, dims in [
[(3, 4, 5), (0,)],
[(3, 4, 5), (1, 2)],
[(3, 4, 5), (0, 2)],
[(3, 4, 5), (0, 1, 2)],
]
],
)
def testNamedReduceOperators(self, op, reference_op, dtype, shape, dims):
rng_factory = (jtu.rand_default if dtypes.issubdtype(dtype, np.integer)
else jtu.rand_small)
rng = rng_factory(self.rng())
args_maker = lambda: [rng(shape, dtype)]
def lax_fun(operand):
return op(operand, dims)
def reference_fun(operand):
return reference_op(operand, dims).astype(dtype)
self._CompileAndCheck(lax_fun, args_maker)
self._CheckAgainstNumpy(reference_fun, lax_fun, args_maker)
@jtu.sample_product(
op=["add", "mul"],
op_namespace=[lax, operator],
arr_weak_type=[False, True],
init_weak_type=[False, True],
)
def testReduceWeakType(self, op_namespace, op, arr_weak_type, init_weak_type):
op = getattr(op_namespace, op)
arr = lax_internal._convert_element_type(np.arange(10), dtypes.dtype(int),
weak_type=arr_weak_type)
init = lax_internal._convert_element_type(1, dtypes.dtype(int),
weak_type=init_weak_type)
fun = lambda arr, init: lax.reduce(arr, init, op, (0,))
out = fun(arr, init)
self.assertEqual(dtypes.is_weakly_typed(out), arr_weak_type and init_weak_type)
out_jit = jax.jit(fun)(arr, init)
self.assertEqual(dtypes.is_weakly_typed(out_jit), arr_weak_type and init_weak_type)
def testReduceWindowScalar(self):
rng = jtu.rand_small(self.rng())
dtype = jnp.float32
init_val = np.asarray(0, dtype=dtype)
op = lax.add
def fun(operand, init_val):
return lax.reduce_window(
operand, init_val, op, window_dimensions=(), window_strides=(),
padding=(), base_dilation=(), window_dilation=())
def reference_fun(operand, init_val):
return lax_reference.reduce_window(
operand, init_val, op, window_dimensions=(), window_strides=(),
padding=(), base_dilation=())
args_maker = lambda: [rng((), dtype), init_val]
self._CompileAndCheck(fun, args_maker)
self._CheckAgainstNumpy(reference_fun, fun, args_maker)
@jtu.sample_product(
[dict(init_val=init_val, op=op, dtype=dtype)
for init_val, op, dtypes in [
(0, lax.add, [np.float32]),
(-np.inf, lax.max, [np.float32]),
(np.inf, lax.min, [np.float32]),
]
for dtype in dtypes
],
[dict(shape=shape, dims=dims, strides=strides, padding=padding,
base_dilation=base_dilation, window_dilation=window_dilation)
for shape, dims, strides, padding, base_dilation, window_dilation in (
itertools.chain(
itertools.product(
[(4, 6)],
[(2, 1), (1, 2)],
[(1, 1), (2, 1), (1, 2)],
["VALID", "SAME", [(0, 3), (1, 2)]],
[(1, 1), (2, 3)],
[(1, 1), (1, 2)]),
itertools.product(
[(3, 2, 4, 6)], [(1, 1, 2, 1), (2, 1, 2, 1)],
[(1, 2, 2, 1), (1, 1, 1, 1)],
["VALID", "SAME", [(0, 1), (1, 0), (2, 3), (0, 2)]],
[(1, 1, 1, 1), (2, 1, 3, 2)],
[(1, 1, 1, 1), (1, 2, 2, 1)])))
],
)
def testReduceWindow(self, op, init_val, dtype, shape, dims, strides, padding,
base_dilation, window_dilation):
rng = jtu.rand_small(self.rng())
init_val = np.asarray(init_val, dtype=dtype)
def fun(operand, init_val):
return lax.reduce_window(operand, init_val, op, dims, strides, padding,
base_dilation, window_dilation)
def reference_fun(operand, init_val):
return lax_reference.reduce_window(operand, init_val, op, dims, strides,
padding, base_dilation)
args_maker = lambda: [rng(shape, dtype), init_val]
self._CompileAndCheck(fun, args_maker)
if all(d == 1 for d in window_dilation):
self._CheckAgainstNumpy(reference_fun, fun, args_maker)
# we separately test the version that uses a concrete init_val because it
# can hit different code paths
def fun(operand):
return lax.reduce_window(
operand,
init_val,
op,
dims,
strides,
padding,
base_dilation,
window_dilation,
)
args_maker = lambda: [rng(shape, dtype)]
self._CompileAndCheck(fun, args_maker)
# TODO(voz): I broke these out to their own test for 2 reasons:
# 1. I wanted to show that general ops work, there's a small subset of
# ops, specifically, the ones used in the test above, lax.add, lax.max, and
# lax.min that actually route to a monoid operator that *doesn't* pass JVP
# tests.
# 2. Slightly different parameterization.
@jtu.sample_product(
[
dict(init_val=init_val, op=op, dtype=dtype)
for init_val, op, dtypes in [
(1, _reduce_custom_add, [np.float32]),
(0, _reduce_custom_mul, [np.float32]),
(0, _reduce_custom_sub, [np.float32]),
]
for dtype in dtypes
],
[
dict(
shape=shape,
dims=dims,
strides=strides,
padding=padding,
base_dilation=base_dilation,
window_dilation=window_dilation,
)
for shape, dims, strides, padding, base_dilation, window_dilation in (
itertools.chain(
itertools.product(
[(4, 6)],
[(2, 1), (1, 2)],
[(1, 1), (2, 1), (1, 2)],
['VALID', 'SAME', [(0, 3), (1, 2)]],
[(1, 1), (2, 3)],
[(1, 1), (1, 2)],
),
itertools.product(
[(3, 2, 4, 6)],
[(1, 1, 2, 1), (2, 1, 2, 1)],
[(1, 2, 2, 1), (1, 1, 1, 1)],
['VALID', 'SAME', [(0, 1), (1, 0), (2, 3), (0, 2)]],
[(1, 1, 1, 1), (2, 1, 3, 2)],
[(1, 1, 1, 1), (1, 2, 2, 1)],
),
)
)
],
)
@jtu.skip_on_devices('gpu') # jax.lax.mul has an XLA bug on GPU b/339071103
@jtu.skip_on_devices('tpu') # b/39342488
def testReduceWindowGeneralJVP(
self,
op,
init_val,
dtype,
shape,
dims,
strides,
padding,
base_dilation,
window_dilation,
):
rng = jtu.rand_small(self.rng())
init_val = np.asarray(init_val, dtype=dtype)
def fun(operand, init_val):
return lax.reduce_window(
operand,
init_val,
op,
dims,
strides,
padding,
base_dilation,
window_dilation,
)
args_maker = lambda: [rng(shape, dtype), init_val]
self._CompileAndCheck(fun, args_maker)
args = args_maker()
init_val = args[1]
# we separately test the version that uses a concrete init_val because it
# can hit different code paths
def fun2(operand):
return lax.reduce_window(
operand,
init_val,
op,
dims,
strides,
padding,
base_dilation,
window_dilation,
)
args_maker = lambda: [rng(shape, dtype)]
self._CompileAndCheck(fun2, args_maker)
operand = args_maker()[0]
jtu.check_jvp(fun2, partial(jax.jvp, fun2), (operand,))
check_grads(fun2, (operand,), 3, ["fwd"], eps=1.)
@jtu.sample_product(
[
dict(init_val=init_val, op=op, dtype=dtype)
for init_val, op, dtypes in [
(-np.inf, lax.max, [np.float32]),
(np.inf, lax.min, [np.float32]),
(0, lax.add, [np.float32]),
]
for dtype in dtypes
],
[
dict(
shape=shape,
dims=dims,
strides=strides,
padding=padding,
base_dilation=base_dilation,
window_dilation=window_dilation,
)
for shape, dims, strides, padding, base_dilation, window_dilation in (
itertools.chain(
itertools.product(
[(4, 6)],
[(2, 1), (1, 2)],
[(1, 1), (2, 1), (1, 2)],
['VALID', 'SAME', [(0, 3), (1, 2)]],
[(1, 1), (2, 3)],
[(1, 1), (1, 2)],
),
itertools.product(
[(3, 2, 4, 6)],
[(1, 1, 2, 1), (2, 1, 2, 1)],
[(1, 2, 2, 1), (1, 1, 1, 1)],
['VALID', 'SAME', [(0, 1), (1, 0), (2, 3), (0, 2)]],
[(1, 1, 1, 1), (2, 1, 3, 2)],
[(1, 1, 1, 1), (1, 2, 2, 1)],
),
)
)
],
)
@jtu.skip_on_devices('gpu') # jax.lax.mul has an XLA bug on GPU b/339071103
@jtu.skip_on_devices('tpu') # b/39342488
def testReduceWindowCustomSameAsMonoid(
self,
op,
init_val,
dtype,
shape,
dims,
strides,
padding,
base_dilation,
window_dilation,
):
rng = jtu.rand_small(self.rng())
init_val = np.asarray(init_val, dtype=dtype)
def fun(op_, operand_):
return lax.reduce_window(
operand_,
init_val,
op_,
dims,
strides,
padding,
base_dilation,
window_dilation,
)
args_maker = lambda: [rng(shape, dtype)]
args = args_maker()
operand = args[0]
rng = np.random.RandomState(0)
tangent = tree_map(partial(jtu.rand_like, rng), operand)
# There are "special" paths for "monoid" ops that have
# their jvp defined separately, either for legacy reasons
# or for optimization - compare across both and prove
# that their jvp is the same.
# TODO(voz): Look into the "monoid" paths and collapse them as necessary.
# Especially when we go to add support for (1) recursive is_jvp (hessians),
# and (2) transpose?
custom_equiv = {
lax.max: _reduce_custom_max,
lax.min: _reduce_custom_min,
lax.add: _reduce_custom_add,
}
custom_op = custom_equiv[op]
custom_primals, custom_tangents = jax.jvp(
partial(fun, custom_op),
primals=(operand,),
tangents=(tangent,),
)
lax_primals, lax_tangents = jax.jvp(
partial(fun, op),
primals=(operand,),
tangents=(tangent,),
)
# tol = 1e-4
# None is sane defaults, but useful to have here for debugging.
tol = None
jtu.check_close(
lax_primals,
custom_primals,
atol=tol,
rtol=tol,
err_msg='Mismatched primal',
)
jtu.check_close(
lax_tangents,
custom_tangents,
atol=tol,
rtol=tol,
err_msg='Mismatched tangents',
)
# Numerical jvp comparison for min and max values
# does not work - the underlying implementation of the test util
# nans on infs.
if init_val.item() in (np.inf, -np.inf):
return
op_bound_fn = partial(fun, op)
jtu.check_jvp(
op_bound_fn,
partial(jax.jvp, op_bound_fn),
(operand,),
)
check_grads(partial(fun, op), [operand], 3, ["fwd"], eps=1.)
check_grads(partial(fun, custom_op), [operand], 3, ["fwd"], eps=1.)
# TODO(b/183233858): variadic reduce-window is not implemented on XLA:GPU
@jtu.sample_product(
[
dict(
shape=shape,
dims=dims,
strides=strides,
padding=padding,
base_dilation=base_dilation,
window_dilation=window_dilation,
)
for shape, dims, strides, padding, base_dilation, window_dilation in (
itertools.chain(
itertools.product(
[(4, 6)],
[(2, 1), (1, 2)],
[(1, 1), (2, 1), (1, 2)],
['VALID', 'SAME', [(0, 3), (1, 2)]],
[(1, 1), (2, 3)],
[(1, 1), (1, 2)],
),
itertools.product(
[(3, 2, 4, 6)],
[(1, 1, 2, 1), (2, 1, 2, 1)],
[(1, 2, 2, 1), (1, 1, 1, 1)],
['VALID', 'SAME', [(0, 1), (1, 0), (2, 3), (0, 2)]],
[(1, 1, 1, 1), (2, 1, 3, 2)],
[(1, 1, 1, 1), (1, 2, 2, 1)],
),
)
)
],
dtype=[np.float32],
)
@jtu.skip_on_devices('gpu')
def testReduceWindowVariadic(self, dtype, shape, dims, strides, padding,
base_dilation, window_dilation):
if (jtu.test_device_matches(["tpu"]) and
any(d != 1 for d in window_dilation)):
raise SkipTest("TPU support missing for arbitrary window dilation.")
rng = jtu.rand_small(self.rng())
init_values = (np.asarray(0, dtype=dtype), np.array(-np.inf, dtype=dtype))
def reducer(xs, ys):
x1, x2 = xs
y1, y2 = ys
return (x1 + y1, lax.max(x2, y2))
def fun(*operands):
return lax.reduce_window(operands, init_values, reducer, dims, strides,
padding, base_dilation, window_dilation)
def reference_fun(*operands):
return [
lax_reference.reduce_window(operand, init_val, op, dims, strides,
padding, base_dilation)
for operand, init_val, op in zip(operands, init_values,
[np.add, np.maximum])]
args_maker = lambda: [rng(shape, dtype), rng(shape, dtype)]
self._CompileAndCheck(fun, args_maker)
if all(d == 1 for d in window_dilation):
self._CheckAgainstNumpy(reference_fun, fun, args_maker)
def testReduceWindowFailures(self):
def empty_window_test():
return lax.reduce_window(np.ones((1,)), 0., lax.add, padding='VALID',
window_dimensions=(0,), window_strides=(1,))
def zero_stride_test():
return lax.reduce_window(np.ones((1,)), 0., lax.add, padding='VALID',
window_dimensions=(1,), window_strides=(0,))
for failure_fun in [empty_window_test, zero_stride_test]:
with self.assertRaisesRegex(TypeError, "must have every element be"):
failure_fun()
with self.assertRaisesRegex(
ValueError,
"reduce_window output must have the same tree structure as the "
"operands.*"):
return lax.reduce_window(
np.ones((1,)), 0., lambda x, y: [x + y],
padding='VALID', window_dimensions=(1,), window_strides=(1,))
@jtu.sample_product(
[dict(shape=shape, window_dimensions=window_dimensions,
base_dilation=base_dilation, window_dilation=window_dilation)
for shape, window_dimensions, base_dilation, window_dilation in (
itertools.chain(
itertools.product(
[(4, 6)],
[(1, 1), (3, 4)],
[(1, 1), (1, 2), (2, 13), (40, 60)],
[(1, 1), (1, 2), (2, 13), (40, 60)]),
itertools.product(
[(3, 2, 4, 6)],
[(1, 1, 1, 1), (2, 1, 2, 1)],
[(1, 1, 1, 1), (1, 2, 2, 1), (30, 40, 3, 2)],
[(1, 1, 1, 1), (1, 2, 2, 1), (30, 40, 3, 2)])))
],
)
def testReduceWindowShapeDilation(self, shape, window_dimensions,
base_dilation, window_dilation):
operand, padding, strides = np.ones(shape), 'SAME', (1,) * len(shape)
result = lax.reduce_window(operand, 0., lax.add, padding=padding,
window_strides=strides,
window_dimensions=window_dimensions)
# With a stride of 1 in each direction and a padding of 'SAME', the
# shape of the input should be equal to the shape of the result according
# to https://www.openxla.org/xla/operation_semantics#reducewindow.
self.assertEqual(shape, result.shape)
def testReduceWindowWithEmptyOutput(self):
# https://github.com/jax-ml/jax/issues/10315
shape = (5, 3, 2)
operand, padding, strides = np.ones(shape), 'VALID', (1,) * len(shape)
out = jax.eval_shape(lambda x: lax.reduce_window(x, 0., lax.add, padding=padding,
window_strides=strides,
window_dimensions=(3, 1, 1),
window_dilation=(3, 1, 1)), operand)
self.assertEqual((0, 3, 2), out.shape)
@jtu.sample_product(
[dict(op=op, np_op=np_op) for op, np_op in [
(lax.cumsum, np.cumsum),
(lax.cumprod, np.cumprod),
(lax.cummax, np.maximum.accumulate),
(lax.cummin, np.minimum.accumulate),
]],
[dict(shape=shape, axis=axis)
for shape in [[10], [3, 4, 5]] for axis in range(len(shape))],
dtype=lax_test_util.default_dtypes,
reverse=[False, True],
)
def testCumulativeReduce(self, op, np_op, shape, dtype, axis, reverse):
rng_factory = (jtu.rand_default if dtypes.issubdtype(dtype, np.integer)
else jtu.rand_small)
rng = rng_factory(self.rng())
fun = partial(op, axis=axis, reverse=reverse)
def np_fun(x):
if reverse:
return np.flip(np_op(np.flip(x, axis), axis=axis, dtype=dtype), axis)
else:
return np_op(x, axis=axis, dtype=dtype)
args_maker = lambda: [rng(shape, dtype)]
self._CompileAndCheck(fun, args_maker)
self._CheckAgainstNumpy(np_fun, fun, args_maker)
@jtu.sample_product(
[dict(shape=shape, axis=axis)
for shape in [[10], [3, 4, 5]] for axis in range(len(shape))],
dtype=lax_test_util.float_dtypes,
reverse=[False, True],
)
def testCumulativeLogSumExp(self, shape, dtype, axis, reverse):
# This op only works on floating-point types, so we've separated out the
# test.
rng = jtu.rand_small(self.rng())
fun = partial(lax.cumlogsumexp, axis=axis, reverse=reverse)
def np_fun(x):
if reverse:
return np.flip(np.logaddexp.accumulate(
np.flip(x, axis), axis=axis, dtype=dtype), axis)
else:
return np.logaddexp.accumulate(x, axis=axis, dtype=dtype)
args_maker = lambda: [rng(shape, dtype)]
self._CompileAndCheck(fun, args_maker)
tol = None
if jtu.test_device_matches(["tpu"]) and dtype == np.float32:
tol = 1e-4
self._CheckAgainstNumpy(np_fun, fun, args_maker, atol=tol, rtol=tol)
@jtu.sample_product(
shape=[(), (3,), (3, 4)],
dtype=lax_test_util.float_dtypes,
out_dtype=lax_test_util.float_dtypes,
)
def testReducePrecision(self, shape, dtype, out_dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
info = dtypes.finfo(out_dtype)
fun = lambda x: lax.reduce_precision(x, info.nexp, info.nmant)
np_fun = lambda x: np.asarray(x).astype(out_dtype).astype(dtype)
self._CheckAgainstNumpy(np_fun, fun, args_maker)
self._CompileAndCheck(fun, args_maker)
def testReducePrecisionGrad(self):
info = dtypes.finfo(jnp.dtype('bfloat16'))
y, f_vjp = jax.vjp(lambda x: lax.reduce_precision(x, info.nexp, info.nmant), jnp.pi)
y2 = f_vjp(jnp.pi)
y3 = lax.reduce_precision(jnp.pi, info.nexp, info.nmant)
self.assertArraysEqual(y, y2)
self.assertArraysEqual(y, y3)
@jtu.sample_product(
[dict(shape=shape, axis=axis)
for shape in [(5,), (5, 7)] for axis in [-1, len(shape) - 1]],
dtype=lax_test_util.all_dtypes,
is_stable=[False, True],
)
def testSort(self, shape, dtype, axis, is_stable):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
fun = lambda x: lax.sort(x, dimension=axis, is_stable=is_stable)
self._CompileAndCheck(fun, args_maker)
@jtu.sample_product(dtype=lax_test_util.float_dtypes)
def testSortFloatSpecialValues(self, dtype):
# Test confirms that
# - NaNs are sorted to the end, regardless of representation
# - sign bit of 0.0 is ignored
x = jnp.array([-np.inf, 0.0, -0.0, np.inf, np.nan, -np.nan], dtype=dtype)
index = lax.iota(int, x.size)
argsort = lambda x: lax.sort_key_val(x, lax.iota(int, x.size), is_stable=True)[1]
self.assertArraysEqual(argsort(x), index)
self.assertArraysEqual(jax.jit(argsort)(x), index)
@jtu.sample_product(
[dict(shape=shape, axis=axis)
for shape in [(5,), (5, 7)] for axis in [-1, len(shape) - 1]],
dtype=lax_test_util.all_dtypes,
is_stable=[False, True],
)
def testSortAgainstNumpy(self, shape, dtype, axis, is_stable):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
op = lambda x: lax.sort(x, dimension=axis, is_stable=is_stable)
def numpy_op(x):
if is_stable:
return lax_reference.sort(x, axis, kind='stable')
else:
return lax_reference.sort(x, axis)
self._CheckAgainstNumpy(numpy_op, op, args_maker)
@jtu.sample_product(
[dict(shape=shape, axis=axis)
for shape in [(3,), (5, 3)] for axis in [-1, len(shape) - 1]],
key_dtype=lax_test_util.float_dtypes + lax_test_util.complex_dtypes +
lax_test_util.int_dtypes + lax_test_util.uint_dtypes,
val_dtype=[np.float32, np.int32, np.uint32],
is_stable=[False, True],
)
def testSortKeyVal(self, shape, key_dtype, val_dtype, axis, is_stable):
if (np.issubdtype(key_dtype, np.complexfloating) and
jtu.test_device_matches(["cpu"])):
raise SkipTest("Complex-valued sort not implemented")
rng = jtu.rand_default(self.rng())
# This test relies on the property that wherever keys are tied, values are
# too, since we don't guarantee the same ordering of values with equal keys.
# To avoid that case, we generate unique keys (globally in the key array).
def args_maker():
flat_keys = np.arange(math.prod(shape), dtype=key_dtype)
keys = self.rng().permutation(flat_keys).reshape(shape)
values = rng(shape, val_dtype)
return keys, values
fun = lambda keys, values: lax.sort_key_val(keys, values, axis, is_stable)
self._CompileAndCheck(fun, args_maker)
@jtu.sample_product(
[dict(shape=shape, num_keys=num_keys)
for shape in [(3, 5), (4, 3)] for num_keys in range(1, shape[0] + 1)],
dtype=lax_test_util.all_dtypes,
)
def testSortNumKeys(self, shape, dtype, num_keys):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
lax_fun = lambda x: lax.sort(tuple(x), num_keys=num_keys)
numpy_fun = lambda x: tuple(x[:, np.lexsort(x[:num_keys][::-1])])
# self._CompileAndCheck(lax_fun, args_maker)
self._CheckAgainstNumpy(numpy_fun, lax_fun, args_maker)
@jtu.sample_product(
[dict(shape=shape, axis=axis)
for shape in [(3,), (5, 3)] for axis in [-1, len(shape) - 1]],
key_dtype=lax_test_util.float_dtypes + lax_test_util.complex_dtypes +
lax_test_util.int_dtypes + lax_test_util.uint_dtypes,
val_dtype=[np.float32, np.int32, np.uint32],
)
def testSortKeyValAgainstNumpy(self, shape, key_dtype, val_dtype, axis):
if (np.issubdtype(key_dtype, np.complexfloating) and
jtu.test_device_matches(["cpu"])):
raise SkipTest("Complex-valued sort not implemented")
rng = jtu.rand_default(self.rng())
# This test relies on the property that wherever keys are tied, values are
# too, since we don't guarantee the same ordering of values with equal keys.
# To avoid that case, we generate unique keys (globally in the key array).
def args_maker():
flat_keys = np.arange(math.prod(shape), dtype=key_dtype)
keys = self.rng().permutation(flat_keys).reshape(shape)
values = rng(shape, val_dtype)
return keys, values
op = lambda ks, vs: lax.sort_key_val(ks, vs, axis)
numpy_op = lambda ks, vs: lax_reference.sort_key_val(ks, vs, axis)
self._CheckAgainstNumpy(numpy_op, op, args_maker)
@jtu.sample_product(
dtype=[np.float32, np.int32, np.uint32],
shape=[(20,), (8, 20), (2000,)],
k=[1, 3, 8],
axis=[0, -1]
)
def testTopK(self, shape, dtype, k, axis):
rng = jtu.rand_some_equal(self.rng())
def args_maker():
return [rng(shape, dtype)]
op = lambda vs: lax.top_k(vs, k=k, axis=axis)
ref_op = lambda vs: lax_reference.top_k(vs, k=k, axis=axis)
self._CheckAgainstNumpy(op, ref_op, args_maker)
self._CompileAndCheck(op, args_maker)
def testTopKOverflow(self):
x = jax.ShapeDtypeStruct((2 ** 31 + 1,), np.dtype('bfloat16'))
with self.assertRaisesRegex(ValueError, "top_k returns int32 indices, which will overflow"):
jax.eval_shape(lambda x: jax.lax.top_k(x, 100), x)
@jtu.sample_product(
[dict(lhs_shape=lhs_shape, rhs_shape=rhs_shape)
for lhs_shape, rhs_shape in [((3, 2), (2, 4)),
((5, 3, 2), (5, 2, 4)),
((1, 2, 2, 3), (1, 2, 3, 1))]],
dtype=lax_test_util.float_dtypes,
)
def testBatchMatMul(self, lhs_shape, rhs_shape, dtype):
rng = jtu.rand_small(self.rng())
arg_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
self._CompileAndCheck(lax.batch_matmul, arg_maker)
def testCollapse(self):
@jax.jit
def collapse_first_two(x):
return lax.collapse(x, 0, 2)
self.assertEqual((6,), collapse_first_two(np.zeros((2, 3))).shape)
self.assertEqual((6, 4), collapse_first_two(np.zeros((2, 3, 4))).shape)
self.assertEqual((2, 3, 4),
collapse_first_two(np.zeros((1, 2, 3, 4))).shape)
def testCollapseLastTwo(self):
@jax.jit
def collapse_last_two_none_end(x):
return lax.collapse(x, -2)
@jax.jit
def collapse_last_two_pos_end(x):
return lax.collapse(x, -2)
self.assertEqual((4, 3, 10),
collapse_last_two_none_end(np.zeros((4, 3, 2, 5))).shape)
self.assertEqual((4, 3, 10),
collapse_last_two_pos_end(np.zeros((4, 3, 2, 5))).shape)
@jtu.sample_product(
[dict(shape=shape, idxs=idxs, axes=axes)
for shape, idxs, axes in [
[(3, 4, 5), (np.array([0, 2, 1]),), (0,)],
[(3, 4, 5), (np.array([-1, -2]),), (0,)],
[(3, 4, 5), (np.array([0, 2]), np.array([1, 3])), (0, 1)],
[(3, 4, 5), (np.array([0, 2]), np.array([1, 3])), [0, 2]],
]],
dtype=lax_test_util.all_dtypes,
)
def testIndexTake(self, shape, dtype, idxs, axes):
rng = jtu.rand_default(self.rng())
rand_idxs = lambda: tuple(rng(e.shape, e.dtype) for e in idxs)
args_maker = lambda: [rng(shape, dtype), rand_idxs()]
fun = lambda src, idxs: lax.index_take(src, idxs, axes)
self._CompileAndCheck(fun, args_maker)
@jtu.sample_product(
[dict(shape=shape, idxs=idxs, dnums=dnums, slice_sizes=slice_sizes)
for shape, idxs, dnums, slice_sizes in [
((5,), np.array([[0], [2]]), lax.GatherDimensionNumbers(
offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,)),
(1,)),
((10,), np.array([[0], [0], [0]]), lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(), start_index_map=(0,)),
(2,)),
((10, 5,), np.array([[0], [2], [1]]), lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0,)),
(1, 3)),
((10, 5), np.array([[0, 2], [1, 0]]), lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0, 1)),
(1, 3)),
((2, 5), np.array([[[0], [2]], [[1], [1]]]),
lax.GatherDimensionNumbers(
offset_dims=(), collapsed_slice_dims=(1,),
start_index_map=(1,), operand_batching_dims=(0,),
start_indices_batching_dims=(0,)),
(1, 1)),
((2, 3, 10), np.array([[[0], [1]], [[2], [3]], [[4], [5]]]),
lax.GatherDimensionNumbers(
offset_dims=(2,), collapsed_slice_dims=(),
start_index_map=(2,), operand_batching_dims=(0, 1),
start_indices_batching_dims=(1, 0)),
(1, 1, 3)),
# This test verifies that we allow slice sizes that would not fit in
# the operand if indices were empty. This is a useful base case.
((0,), np.zeros((0, 1), dtype=np.int32), lax.GatherDimensionNumbers(
offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,)),
(1,)),
]],
dtype=lax_test_util.all_dtypes,
)
def testGather(self, shape, dtype, idxs, dnums, slice_sizes):
rng = jtu.rand_default(self.rng())
rng_idx = jtu.rand_int(self.rng(), high=max(shape))
rand_idxs = lambda: rng_idx(idxs.shape, idxs.dtype)
args_maker = lambda: [rng(shape, dtype), rand_idxs()]
fun = partial(lax.gather, dimension_numbers=dnums, slice_sizes=slice_sizes)
self._CompileAndCheck(fun, args_maker)
# These tests are adapted from the corresponding tests in
# tensorflow/compiler/xla/service/shape_inference_test.cc with slight
# variations to account for the implicit setting of index_vector_dim in JAX.
@parameterized.named_parameters(
{"testcase_name": f"_{testcase_name}", "operand_shape": operand_shape,
"indices_shape": indices_shape,
"dimension_numbers": dimension_numbers,
"slice_sizes": slice_sizes, "msg": msg}
for (testcase_name, operand_shape, indices_shape, dimension_numbers,
slice_sizes, msg) in [
("NonAscendingWindowIndices", (10, 9, 8, 7, 6), (5, 4, 3, 2, 1),
lax.GatherDimensionNumbers(
offset_dims=(4, 5, 6, 8, 7), collapsed_slice_dims=(),
start_index_map=(0, 1, 2, 3, 4)),
(10, 9, 8, 7, 6), "offset_dims in gather op must be sorted"),
("RepeatedWindowIndices", (10, 9, 8, 7, 6), (5, 4, 3, 2, 1),
lax.GatherDimensionNumbers(
offset_dims=(4, 5, 6, 7, 7), collapsed_slice_dims=(),
start_index_map=(0, 1, 2, 3, 4)),
(10, 9, 8, 7, 6), "offset_dims in gather op must not repeat"),
("WindowIndexOutOfBounds", (10, 9, 8, 7, 6), (5, 4, 3, 2, 1),
lax.GatherDimensionNumbers(
offset_dims=(4, 5, 100, 101, 102), collapsed_slice_dims=(),
start_index_map=(0, 1, 2, 3, 4)),
(10, 9, 8, 7, 6), "Offset dimension 2 in gather op is out of bounds"),
("WindowIndexBarelyOutOfBounds", (10, 9, 8, 7, 6), (5, 4, 3, 2, 1),
lax.GatherDimensionNumbers(
offset_dims=(4, 5, 6, 7, 9), collapsed_slice_dims=(),
start_index_map=(0, 1, 2, 3, 4)),
(10, 9, 8, 7, 6), "Offset dimension 4 in gather op is out of bounds"),
("MismatchingElidedWindowDims", (10, 9, 8, 7, 6), (5, 4, 3, 2, 5),
lax.GatherDimensionNumbers(
offset_dims=(4, 5, 6, 7, 8), collapsed_slice_dims=(4,),
start_index_map=(0, 1, 2, 3, 4)),
(10, 9, 8, 7, 6),
("All components of the offset index in a gather op must either be a "
"offset dimension or explicitly collapsed/batching")),
("MismatchingElidedWindowDimsV2", (10, 9, 8, 7, 6, 5), (10, 4, 3, 2, 4),
lax.GatherDimensionNumbers(
offset_dims=(4, 5, 6, 7, 8), collapsed_slice_dims=(4,),
start_index_map=(1, 2, 3, 4), operand_batching_dims=(0,),
start_indices_batching_dims=(0,)),
(10, 9, 8, 7, 6, 5),
("All components of the offset index in a gather op must either be a "
"offset dimension or explicitly collapsed/batching")),
("OutOfBoundsWindowToInputMapping", (10, 9, 8, 7, 6), (5, 4, 3, 2, 5),
lax.GatherDimensionNumbers(
offset_dims=(4, 5, 6, 7, 8), collapsed_slice_dims=(0, 1, 2, 3, 19),
start_index_map=(0, 1, 2, 3, 4)),
(10, 9, 8, 7, 6),
"Invalid collapsed_slice_dims set in gather op; valid range is"),
("RepeatedWindowToInputMapping", (10, 9, 8, 7, 6), (5, 4, 3, 2, 5),
lax.GatherDimensionNumbers(
offset_dims=(4, 5, 6, 7, 8), collapsed_slice_dims=(0, 1, 2, 3, 3),
start_index_map=(0, 1, 2, 3, 4)),
(10, 9, 8, 7, 6), "collapsed_slice_dims in gather op must not repeat"),
("MismatchingGatherToInputMapping", (10, 9, 8, 7, 6), (5, 4, 3, 2, 5),
lax.GatherDimensionNumbers(
offset_dims=(4, 5, 6, 7, 8), collapsed_slice_dims=(),
start_index_map=(0, 1, 2, 3)),
(10, 9, 8, 7, 6),
("Gather op has 4 elements in start_index_map and the bound of "
"dimension index_vector_dim=4 of indices is 5. These two "
"numbers must be equal.")),
("OutOfBoundsGatherToInputMapping", (10, 9, 8, 7, 6), (5, 4, 3, 2, 5),
lax.GatherDimensionNumbers(
offset_dims=(4, 5, 6, 7, 8), collapsed_slice_dims=(),
start_index_map=(0, 1, 2, 3, 7)),
(10, 9, 8, 7, 6), "Invalid start_index_map"),
("RepeatedGatherToInputMapping", (10, 9, 8, 7, 6), (5, 4, 3, 2, 5),
lax.GatherDimensionNumbers(
offset_dims=(4, 5, 6, 7, 8), collapsed_slice_dims=(),
start_index_map=(0, 1, 2, 3, 3)),
(10, 9, 8, 7, 6), "start_index_map in gather op must not repeat"),
("NonAscendingElidedWindowDims", (10, 9, 8, 7, 6), (5, 4, 3, 2, 5),
lax.GatherDimensionNumbers(
offset_dims=(4, 5, 6, 7, 8), collapsed_slice_dims=(2, 1),
start_index_map=(0, 1, 2, 3, 4)),
(10, 9, 8, 7, 6),
"collapsed_slice_dims in gather op must be sorted"),
("WindowBoundsTooLarge", (10, 9, 8, 7, 6), (5, 4, 3, 2, 5),
lax.GatherDimensionNumbers(
offset_dims=(4, 5, 6, 7), collapsed_slice_dims=(2,),
start_index_map=(0, 1, 2, 3, 4)),
(10, 9, 8, 100, 6),
"Slice size at index 3 in gather op is out of range"),
("MismatchingNumberOfWindowBounds", (10, 9, 8, 7, 6), (5, 4, 3, 2, 5),
lax.GatherDimensionNumbers(
offset_dims=(4, 5, 6, 7), collapsed_slice_dims=(),
start_index_map=(0, 1, 2, 3, 4)),
(10, 9, 8, 7),
"Gather op must have one slice size for every input dimension"),
("WindowBoundsNot1ForElidedDim", (10, 9, 8, 7, 6), (5, 4, 3, 2, 5),
lax.GatherDimensionNumbers(
offset_dims=(4, 5, 6, 7), collapsed_slice_dims=(1,),
start_index_map=(0, 1, 2, 3, 4)),
(10, 9, 8, 7, 6),
("Gather op can only collapse slice dims with bound 1, but bound "
"is 9 for index 1 at position 0.")),
("RepeatedOperandBatchingDims", (10, 9, 8, 7, 6), (5, 4, 3, 2, 3),
lax.GatherDimensionNumbers(
offset_dims=(4, 5, 6), collapsed_slice_dims=(0, 1),
start_index_map=(0, 1, 4), operand_batching_dims=(2, 3, 3)),
(10, 9, 8, 7, 6),
"operand_batching_dims in gather op must not repeat"),
("NonAscendingOperandBatchingDims", (10, 9, 8, 7, 6), (5, 4, 3, 2, 3),
lax.GatherDimensionNumbers(
offset_dims=(4, 5, 6), collapsed_slice_dims=(0, 1),
start_index_map=(0, 1, 4), operand_batching_dims=(3, 2)),
(10, 9, 8, 7, 6),
"operand_batching_dims in gather op must be sorted"),
("OutOfBoundsOperandBatchingDims", (10, 9, 8, 7, 6),
(5, 4, 3, 2, 5),
lax.GatherDimensionNumbers(
offset_dims=(4, 5, 6), collapsed_slice_dims=(0, 1),
start_index_map=(0, 1, 2, 3, 4),
operand_batching_dims=(0, 10)),
(10, 9, 8, 7, 6),
"Invalid operand_batching_dims set in gather op; valid range is"),
("NonDisjointCollapsedAndBatchingDims", (10, 9, 8, 7, 6),
(5, 4, 3, 2, 3),
lax.GatherDimensionNumbers(
offset_dims=(4, 5, 6), collapsed_slice_dims=(0, 1, 2),
start_index_map=(0, 1, 4), operand_batching_dims=(2, 3)),
(10, 9, 8, 7, 6),
("collapsed_slice_dims and operand_batching_dims in gather op must be "
"disjoint")),
("NonDisjointStartIndexMapAndBatchingDims", (10, 9, 8, 7, 6),
(5, 4, 3, 2, 4),
lax.GatherDimensionNumbers(
offset_dims=(4, 5, 6), collapsed_slice_dims=(0, 1),
start_index_map=(0, 1, 2, 4), operand_batching_dims=(2, 3)),
(10, 9, 8, 7, 6),
("start_index_map and operand_batching_dims in gather op must be "
"disjoint")),
("WindowBoundsNot1ForBatchingDim", (10, 9, 8, 7, 6), (9, 4, 3, 2, 4),
lax.GatherDimensionNumbers(
offset_dims=(4, 5, 6, 7), collapsed_slice_dims=(),
start_index_map=(0, 2, 3, 4), operand_batching_dims=(1,),
start_indices_batching_dims=(0,)),
(10, 9, 8, 7, 6),
("Gather op can only have operand batching dims with bound 0/1, but "
"bound is 9 for index 1 at position 0.")),
("RepeatedStartIndicesBatchingDims", (10, 9, 8, 7, 6), (5, 4, 3, 2, 5),
lax.GatherDimensionNumbers(
offset_dims=(4, 5, 6), collapsed_slice_dims=(0, 1),
start_index_map=(0, 1, 2, 3, 4),
start_indices_batching_dims=(0, 1, 0)),
(10, 9, 8, 7, 6),
"start_indices_batching_dims in gather op must not repeat"),
("OutOfBoundsStartIndicesBatchingDims", (10, 9, 8, 7, 6),
(5, 4, 3, 2, 5),
lax.GatherDimensionNumbers(
offset_dims=(4, 5, 6), collapsed_slice_dims=(0, 1),
start_index_map=(0, 1, 2, 3, 4),
start_indices_batching_dims=(0, 5)),
(10, 9, 8, 7, 6),
"Invalid start_indices_batching_dims set in gather op; valid range"),
("IndexVectorDimInStartIndicesBatchingDims", (10, 9, 8, 7, 6),
(5, 4, 3, 2, 5),
lax.GatherDimensionNumbers(
offset_dims=(4, 5, 6), collapsed_slice_dims=(0, 1),
start_index_map=(0, 1, 2, 3, 4),
start_indices_batching_dims=(0, 4)),
(10, 9, 8, 7, 6),
("Gather op cannot have the index vector dimension as a batching "
"dimension")),
("MismatchingNumberOfBatchingDims", (10, 9, 8, 7, 6), (5, 4, 3, 2, 4),
lax.GatherDimensionNumbers(
offset_dims=(4, 5, 6), collapsed_slice_dims=(1, 2),
start_index_map=(1, 2, 3, 4), operand_batching_dims=(0,),
start_indices_batching_dims=(0, 1)),
(10, 9, 8, 7, 6),
("Gather op requires equal numbers of operand_batching_dims and "
"start_indices_batching_dims")),
("MismatchingBatchingDimSizes", (10, 9, 8, 7, 6), (10, 9, 3, 2, 3),
lax.GatherDimensionNumbers(
offset_dims=(4, 5, 6, 7, 8), collapsed_slice_dims=(2, 3, 4),
start_index_map=(2, 3, 4), operand_batching_dims=(0, 1),
start_indices_batching_dims=(1, 0)),
(10, 9, 8, 7, 6),
("Gather op requires operand batching dimensions and indices batching "
"dimensions to have the same shape"))
]
)
def testGatherShapeCheckingRule(self, operand_shape, indices_shape,
dimension_numbers, slice_sizes, msg):
"""
Args:
operand_shape:
indices_shape:
dimension_numbers:
slice_sizes:
msg:
"""
operand = np.ones(operand_shape, dtype=np.int32)
indices = np.ones(indices_shape, dtype=np.int32)
with self.assertRaisesRegex(TypeError, msg):
lax.gather(operand, indices, dimension_numbers, slice_sizes)
@jtu.sample_product(
[dict(arg_shape=arg_shape, idxs=idxs, update_shape=update_shape,
dnums=dnums)
for arg_shape, idxs, update_shape, dnums in [
((5,), np.array([[0], [2]]), (2,), lax.ScatterDimensionNumbers(
update_window_dims=(), inserted_window_dims=(0,),
scatter_dims_to_operand_dims=(0,))),
((10,), np.array([[0], [0], [0]]), (3, 2), lax.ScatterDimensionNumbers(
update_window_dims=(1,), inserted_window_dims=(),
scatter_dims_to_operand_dims=(0,))),
((10, 5), np.array([[0], [2], [1]]), (3, 3), lax.ScatterDimensionNumbers(
update_window_dims=(1,), inserted_window_dims=(0,),
scatter_dims_to_operand_dims=(0,))),
((2, 5), np.array([[[0], [2]], [[1], [1]]]), (2, 2),
lax.ScatterDimensionNumbers(
update_window_dims=(), inserted_window_dims=(1,),
scatter_dims_to_operand_dims=(1,), operand_batching_dims=(0,),
scatter_indices_batching_dims=(0,))),
((2, 3, 10), np.array([[[0], [1]], [[2], [3]], [[4], [5]]]),
(3, 2, 3), lax.ScatterDimensionNumbers(
update_window_dims=(2,), inserted_window_dims=(),
scatter_dims_to_operand_dims=(2,), operand_batching_dims=(0, 1),
scatter_indices_batching_dims=(1, 0)))
]],
dtype=lax_test_util.inexact_dtypes,
mode=["clip", "fill", None],
op=[lax.scatter_add, lax.scatter_sub],
)
def testScatterAddSub(self, arg_shape, dtype, idxs, update_shape, dnums, mode, op):
rng = jtu.rand_default(self.rng())
rng_idx = jtu.rand_int(self.rng(), high=max(arg_shape))
rand_idxs = lambda: rng_idx(idxs.shape, idxs.dtype)
args_maker = lambda: [rng(arg_shape, dtype), rand_idxs(),
rng(update_shape, dtype)]
fun = partial(op, dimension_numbers=dnums, mode=mode)
self._CompileAndCheck(fun, args_maker)
@jtu.sample_product(
[dict(arg_shape=arg_shape, idxs=idxs, update_shape=update_shape,
dnums=dnums)
for arg_shape, idxs, update_shape, dnums in [
((5,), np.array([[0], [2]]), (2,), lax.ScatterDimensionNumbers(
update_window_dims=(), inserted_window_dims=(0,),
scatter_dims_to_operand_dims=(0,))),
((10,), np.array([[0], [0], [0]]), (3, 2), lax.ScatterDimensionNumbers(
update_window_dims=(1,), inserted_window_dims=(),
scatter_dims_to_operand_dims=(0,))),
((10, 5), np.array([[0], [2], [1]], dtype=np.uint64), (3, 3), lax.ScatterDimensionNumbers(
update_window_dims=(1,), inserted_window_dims=(0,),
scatter_dims_to_operand_dims=(0,))),
((2, 5), np.array([[[0], [2]], [[1], [1]]]), (2, 2),
lax.ScatterDimensionNumbers(
update_window_dims=(), inserted_window_dims=(1,),
scatter_dims_to_operand_dims=(1,), operand_batching_dims=(0,),
scatter_indices_batching_dims=(0,))),
((2, 3, 10), np.array([[[0], [1]], [[2], [3]], [[4], [5]]]),
(3, 2, 3), lax.ScatterDimensionNumbers(
update_window_dims=(2,), inserted_window_dims=(),
scatter_dims_to_operand_dims=(2,), operand_batching_dims=(0, 1),
scatter_indices_batching_dims=(1, 0)))
]],
dtype=lax_test_util.float_dtypes,
)
def testScatterMin(self, arg_shape, dtype, idxs, update_shape, dnums):
rng = jtu.rand_default(self.rng())
rng_idx = jtu.rand_int(self.rng(), high=max(arg_shape))
rand_idxs = lambda: rng_idx(idxs.shape, idxs.dtype)
args_maker = lambda: [rng(arg_shape, dtype), rand_idxs(),
rng(update_shape, dtype)]
fun = partial(lax.scatter_min, dimension_numbers=dnums)
self._CompileAndCheck(fun, args_maker)
@jtu.sample_product(
[dict(arg_shape=arg_shape, idxs=idxs, update_shape=update_shape,
dnums=dnums)
for arg_shape, idxs, update_shape, dnums in [
((5,), np.array([[0], [2]]), (2,), lax.ScatterDimensionNumbers(
update_window_dims=(), inserted_window_dims=(0,),
scatter_dims_to_operand_dims=(0,))),
((10,), np.array([[0], [0], [0]]), (3, 2), lax.ScatterDimensionNumbers(
update_window_dims=(1,), inserted_window_dims=(),
scatter_dims_to_operand_dims=(0,))),
((10, 5), np.array([[0], [2], [1]]), (3, 3), lax.ScatterDimensionNumbers(
update_window_dims=(1,), inserted_window_dims=(0,),
scatter_dims_to_operand_dims=(0,))),
((2, 5), np.array([[[0], [2]], [[1], [1]]]), (2, 2),
lax.ScatterDimensionNumbers(
update_window_dims=(), inserted_window_dims=(1,),
scatter_dims_to_operand_dims=(1,), operand_batching_dims=(0,),
scatter_indices_batching_dims=(0,))),
((2, 3, 10), np.array([[[0], [1]], [[2], [3]], [[4], [5]]]),
(3, 2, 3), lax.ScatterDimensionNumbers(
update_window_dims=(2,), inserted_window_dims=(),
scatter_dims_to_operand_dims=(2,), operand_batching_dims=(0, 1),
scatter_indices_batching_dims=(1, 0)))
]],
dtype=lax_test_util.float_dtypes,
)
def testScatterMax(self, arg_shape, dtype, idxs, update_shape, dnums):
rng = jtu.rand_default(self.rng())
rng_idx = jtu.rand_int(self.rng(), high=max(arg_shape))
rand_idxs = lambda: rng_idx(idxs.shape, idxs.dtype)
args_maker = lambda: [rng(arg_shape, dtype), rand_idxs(),
rng(update_shape, dtype)]
fun = partial(lax.scatter_max, dimension_numbers=dnums)
self._CompileAndCheck(fun, args_maker)
@jtu.sample_product(
[dict(arg_shape=arg_shape, idxs=idxs, update_shape=update_shape, dnums=dnums)
for arg_shape, idxs, update_shape, dnums in [
((5,), np.array([[0], [2]]), (2,), lax.ScatterDimensionNumbers(
update_window_dims=(), inserted_window_dims=(0,),
scatter_dims_to_operand_dims=(0,))),
((10,), np.array([[0], [0], [0]]), (3, 2), lax.ScatterDimensionNumbers(
update_window_dims=(1,), inserted_window_dims=(),
scatter_dims_to_operand_dims=(0,))),
((10, 5), np.array([[0], [2], [1]]), (3, 3), lax.ScatterDimensionNumbers(
update_window_dims=(1,), inserted_window_dims=(0,),
scatter_dims_to_operand_dims=(0,))),
((2, 5), np.array([[[0], [2]], [[1], [1]]]), (2, 2),
lax.ScatterDimensionNumbers(
update_window_dims=(), inserted_window_dims=(1,),
scatter_dims_to_operand_dims=(1,), operand_batching_dims=(0,),
scatter_indices_batching_dims=(0,))),
((2, 3, 10), np.array([[[0], [1]], [[2], [3]], [[4], [5]]]),
(3, 2, 3), lax.ScatterDimensionNumbers(
update_window_dims=(2,), inserted_window_dims=(),
scatter_dims_to_operand_dims=(2,), operand_batching_dims=(0, 1),
scatter_indices_batching_dims=(1, 0)))
]],
dtype=lax_test_util.float_dtypes,
)
def testScatterApply(self, arg_shape, dtype, idxs, update_shape, dnums):
rng = jtu.rand_default(self.rng())
rng_idx = jtu.rand_int(self.rng(), high=max(arg_shape))
rand_idxs = lambda: rng_idx(idxs.shape, idxs.dtype)
args_maker = lambda: [rng(arg_shape, dtype), rand_idxs()]
fun = partial(lax.scatter_apply, func=jnp.sin, update_shape=update_shape, dimension_numbers=dnums)
self._CompileAndCheck(fun, args_maker)
@jtu.sample_product(
[dict(arg_shape=arg_shape, idxs=idxs, update_shape=update_shape,
dnums=dnums)
for arg_shape, idxs, update_shape, dnums in [
((5,), np.array([[0], [2]]), (2,), lax.ScatterDimensionNumbers(
update_window_dims=(), inserted_window_dims=(0,),
scatter_dims_to_operand_dims=(0,))),
((10,), np.array([[0], [0], [0]]), (3, 2), lax.ScatterDimensionNumbers(
update_window_dims=(1,), inserted_window_dims=(),
scatter_dims_to_operand_dims=(0,))),
((10, 5), np.array([[0], [2], [1]]), (3, 3), lax.ScatterDimensionNumbers(
update_window_dims=(1,), inserted_window_dims=(0,),
scatter_dims_to_operand_dims=(0,))),
((2, 5), np.array([[[0], [2]], [[1], [1]]]), (2, 2),
lax.ScatterDimensionNumbers(
update_window_dims=(), inserted_window_dims=(1,),
scatter_dims_to_operand_dims=(1,), operand_batching_dims=(0,),
scatter_indices_batching_dims=(0,))),
((2, 3, 10), np.array([[[0], [1]], [[2], [3]], [[4], [5]]]),
(3, 2, 3), lax.ScatterDimensionNumbers(
update_window_dims=(2,), inserted_window_dims=(),
scatter_dims_to_operand_dims=(2,), operand_batching_dims=(0, 1),
scatter_indices_batching_dims=(1, 0)))
]],
dtype=lax_test_util.float_dtypes,
)
def testScatter(self, arg_shape, dtype, idxs, update_shape, dnums):
rng = jtu.rand_default(self.rng())
rng_idx = jtu.rand_int(self.rng(), high=max(arg_shape))
rand_idxs = lambda: rng_idx(idxs.shape, idxs.dtype)
args_maker = lambda: [rng(arg_shape, dtype), rand_idxs(),
rng(update_shape, dtype)]
fun = partial(lax.scatter, dimension_numbers=dnums)
self._CompileAndCheck(fun, args_maker)
# These tests are adapted from the corresponding tests in
# tensorflow/compiler/xla/service/shape_inference_test.cc with slight
# variations to account for the implicit setting of index_vector_dim in JAX.
@parameterized.named_parameters(
{"testcase_name": f"_{testcase_name}", "operand_shape": operand_shape,
"indices_shape": indices_shape, "update_shape": update_shape,
"dimension_numbers": dimension_numbers,
"msg": msg}
for (testcase_name, operand_shape, indices_shape, update_shape,
dimension_numbers, msg) in [
("ScatterWithUpdatesBiggerThanInput", (64, 48), (32, 1), (65, 32),
lax.ScatterDimensionNumbers(
update_window_dims=(0,), inserted_window_dims=(1,),
scatter_dims_to_operand_dims=(1,)),
"Bounds of the window dimensions"),
("ScatterWithUpdatesBiggerThanInputV2", (64, 48), (32, 1),
(32, 49), lax.ScatterDimensionNumbers(
update_window_dims=(1,), inserted_window_dims=(0,),
scatter_dims_to_operand_dims=(1,)),
"Bounds of the window dimensions"),
("ScatterWithUpdatesNotMatchingIndices", (64, 48), (32, 1),
(64, 31), lax.ScatterDimensionNumbers(
update_window_dims=(1,), inserted_window_dims=(0,),
scatter_dims_to_operand_dims=(1,)),
"Bounds of the scatter dimensions"),
("ScatterWithUpdatesNotMatchingIndicesV2", (64, 48), (32, 1),
(31, 48), lax.ScatterDimensionNumbers(
update_window_dims=(1,), inserted_window_dims=(0,),
scatter_dims_to_operand_dims=(1,)),
"Bounds of the scatter dimensions"),
("ScatterNdWithUpdatesBiggerThanInput", (64, 48),
(10, 9, 8, 7, 1), (10, 9, 8, 7, 65),
lax.ScatterDimensionNumbers(
update_window_dims=(4,), inserted_window_dims=(1,),
scatter_dims_to_operand_dims=(1,)),
"Bounds of the window dimensions"),
("ScatterNdWithUpdatesNotMatchingIndices", (64, 48),
(10, 9, 8, 7, 1), (9, 9, 8, 7, 64),
lax.ScatterDimensionNumbers(
update_window_dims=(4,), inserted_window_dims=(1,),
scatter_dims_to_operand_dims=(0,)),
"Bounds of the scatter dimensions"),
("InvalidUpdates", (50, 49, 48, 47, 46), (10, 9, 8, 7, 5),
(10, 9, 8, 7, 3, 2, 4, 1),
lax.ScatterDimensionNumbers(
update_window_dims=(4, 5, 6), inserted_window_dims=(1, 2),
scatter_dims_to_operand_dims=(0, 1, 2, 3, 4)),
"Updates tensor must be of rank 7; got 8."),
("NonAscendingUpdateWindowDims", (6, 5, 4, 3, 2), (5, 4, 3, 2, 1),
(10, 9, 8, 7, 6, 5, 4, 3, 2),
lax.ScatterDimensionNumbers(
update_window_dims=(4, 5, 6, 8, 7), inserted_window_dims=(),
scatter_dims_to_operand_dims=(0, 1, 2, 3, 4)),
"update_window_dims in scatter op must be sorted"),
("RepeatedUpdateWindowDims", (6, 5, 4, 3, 2), (5, 4, 3, 2, 1),
(10, 9, 8, 7, 6, 5, 4, 3, 2),
lax.ScatterDimensionNumbers(
update_window_dims=(4, 5, 6, 7, 7), inserted_window_dims=(),
scatter_dims_to_operand_dims=(0, 1, 2, 3, 4)),
"update_window_dims in scatter op must not repeat"),
("OutOfBoundsUpdateWindowDims", (6, 5, 4, 3, 2), (5, 4, 3, 2, 1),
(10, 9, 8, 7, 6, 5, 4, 3, 2),
lax.ScatterDimensionNumbers(
update_window_dims=(4, 5, 6, 7, 9), inserted_window_dims=(),
scatter_dims_to_operand_dims=(0, 1, 2, 3, 4)),
"Invalid update_window_dims set in scatter op"),
("NonAscendingInsertedWindowDims", (50, 49, 48, 47, 46),
(10, 9, 8, 7, 5), (10, 9, 8, 7, 3, 2, 4),
lax.ScatterDimensionNumbers(
update_window_dims=(4, 5, 6), inserted_window_dims=(2, 1),
scatter_dims_to_operand_dims=(0, 1, 2, 3, 4)),
"inserted_window_dims in scatter op must be sorted"),
("RepeatedInsertedWindowDims", (50, 49, 48, 47, 46),
(10, 9, 8, 7, 5), (10, 9, 8, 7, 3, 2, 4),
lax.ScatterDimensionNumbers(
update_window_dims=(4, 5, 6), inserted_window_dims=(1, 1),
scatter_dims_to_operand_dims=(0, 1, 2, 3, 4)),
"inserted_window_dims in scatter op must not repeat"),
("OutOfBoundsInsertedWindowDims", (50, 49, 48, 47, 46),
(10, 9, 8, 7, 5), (10, 9, 8, 7, 3, 2, 4),
lax.ScatterDimensionNumbers(
update_window_dims=(4, 5, 6), inserted_window_dims=(1, 5),
scatter_dims_to_operand_dims=(0, 1, 2, 3, 4)),
"Invalid inserted_window_dims set in scatter op"),
("MismatchingScatterDimsToOperandDims", (50, 49, 48, 47, 46),
(10, 9, 8, 7, 5), (10, 9, 8, 7, 3, 2, 4),
lax.ScatterDimensionNumbers(
update_window_dims=(4, 5, 6), inserted_window_dims=(1, 2),
scatter_dims_to_operand_dims=(0, 1, 2, 3)),
("Scatter op has 4 elements in scatter_dims_to_operand_dims and "
"the bound of dimension index_vector_dim=4 of indices "
"is 5. These two numbers must be equal")),
("OutOfBoundsScatterDimsToOperandDims", (50, 49, 48, 47, 46),
(10, 9, 8, 7, 5), (10, 9, 8, 7, 3, 2, 4),
lax.ScatterDimensionNumbers(
update_window_dims=(4, 5, 6), inserted_window_dims=(1, 2),
scatter_dims_to_operand_dims=(0, 1, 2, 3, 10)),
"Invalid scatter_dims_to_operand_dims mapping"),
("RepeatedValuesInScatterDimsToOperandDims", (50, 49, 48, 47, 46),
(10, 9, 8, 7, 5), (10, 9, 8, 7, 3, 2, 4),
lax.ScatterDimensionNumbers(
update_window_dims=(4, 5, 6), inserted_window_dims=(1, 2),
scatter_dims_to_operand_dims=(0, 1, 2, 2, 3)),
"scatter_dims_to_operand_dims in scatter op must not repeat"),
("InsufficientWindowDims", (50, 49, 48, 47, 46),
(10, 9, 8, 7, 4), (10, 9, 8, 7, 3, 2, 4),
lax.ScatterDimensionNumbers(
update_window_dims=(4, 5, 6), inserted_window_dims=(1,),
scatter_dims_to_operand_dims=(0, 1, 2, 3)),
("Scatter op has window of size 4; doesn't match operand of "
"rank 5.")),
("InsufficientWindowDimsV2", (10, 49, 48, 47, 46, 45),
(10, 9, 8, 7, 3), (10, 9, 8, 7, 3, 2, 4),
lax.ScatterDimensionNumbers(
update_window_dims=(4, 5, 6), inserted_window_dims=(1,),
scatter_dims_to_operand_dims=(1, 2, 3),
operand_batching_dims=(0,),
scatter_indices_batching_dims=(0,)),
("Scatter op has window of size 5; doesn't match operand of "
"rank 6.")),
("RepeatedOperandBatchingDims", (50, 49, 48, 47, 46),
(10, 9, 8, 7, 5), (10, 9, 8, 7, 3, 2, 4),
lax.ScatterDimensionNumbers(
update_window_dims=(4, 5, 6), inserted_window_dims=(0, 1),
scatter_dims_to_operand_dims=(0, 1, 4),
operand_batching_dims=(2, 3, 3)),
"operand_batching_dims in scatter op must not repeat"),
("NonAscendingOperandBatchingDims", (50, 49, 48, 47, 46),
(10, 9, 8, 7, 5), (10, 9, 8, 7, 3, 2, 4),
lax.ScatterDimensionNumbers(
update_window_dims=(4, 5, 6), inserted_window_dims=(0, 1),
scatter_dims_to_operand_dims=(0, 1, 4),
operand_batching_dims=(3, 2)),
"operand_batching_dims in scatter op must be sorted"),
("OutOfBoundsOperandBatchingDims", (50, 49, 48, 47, 46),
(10, 9, 8, 7, 5), (10, 9, 8, 7, 3, 2, 4),
lax.ScatterDimensionNumbers(
update_window_dims=(4, 5, 6), inserted_window_dims=(),
scatter_dims_to_operand_dims=(0, 1, 2, 3, 4),
operand_batching_dims=(0, 10)),
("Invalid operand_batching_dims set in scatter op; valid range "
"is")),
("NonDisjointCollapsedAndBatchingDims", (50, 49, 48, 47, 46, 45),
(10, 9, 8, 7, 5), (10, 9, 8, 7, 3, 2, 4),
lax.ScatterDimensionNumbers(
update_window_dims=(4, 5, 6), inserted_window_dims=(0, 1),
scatter_dims_to_operand_dims=(0, 1, 4),
operand_batching_dims=(1, 2)),
("inserted_window_dims and operand_batching_dims in scatter op "
"must be disjoint")),
("NonDisjointScatterDimsToOperandDimsAndBatchingDims",
(50, 49, 48, 47, 46), (10, 9, 8, 7, 5),
(10, 9, 8, 7, 3, 2, 4),
lax.ScatterDimensionNumbers(
update_window_dims=(4, 5, 6), inserted_window_dims=(0, 1),
scatter_dims_to_operand_dims=(0, 1, 2, 4),
operand_batching_dims=(2, 3)),
("scatter_dims_to_operand_dims and operand_batching_dims in "
"scatter op must be disjoint")),
("RepeatedScatterIndicesBatchingDims", (50, 49, 48, 47, 46),
(10, 9, 8, 7, 5), (10, 9, 8, 7, 3, 2, 4),
lax.ScatterDimensionNumbers(
update_window_dims=(4, 5, 6), inserted_window_dims=(0, 1),
scatter_dims_to_operand_dims=(0, 1, 2, 3, 4),
scatter_indices_batching_dims=(0, 1, 0)),
"scatter_indices_batching_dims in scatter op must not repeat"),
("OutOfBoundsScatterIndicesBatchingDims", (50, 49, 48, 47, 46),
(10, 9, 8, 7, 5), (10, 9, 8, 7, 3, 2, 4),
lax.ScatterDimensionNumbers(
update_window_dims=(4, 5, 6), inserted_window_dims=(0, 1),
scatter_dims_to_operand_dims=(0, 1, 2, 3, 4),
scatter_indices_batching_dims=(0, 5)),
("Invalid scatter_indices_batching_dims set in scatter op; "
"valid range")),
("IndexVectorDimInScatterIndicesBatchingDims",
(50, 49, 48, 47, 46), (10, 9, 8, 7, 5),
(10, 9, 8, 7, 3, 2, 4),
lax.ScatterDimensionNumbers(
update_window_dims=(4, 5, 6), inserted_window_dims=(0, 1),
scatter_dims_to_operand_dims=(0, 1, 2, 3, 4),
scatter_indices_batching_dims=(0, 4)),
("Scatter op cannot have the index vector dimension as a "
"batching dimension")),
("MismatchingNumberOfBatchingDims", (50, 49, 48, 47, 46, 45),
(10, 9, 8, 7, 4), (10, 9, 8, 7, 3, 2, 4),
lax.ScatterDimensionNumbers(
update_window_dims=(4, 5, 6), inserted_window_dims=(1, 2),
scatter_dims_to_operand_dims=(1, 2, 3, 4),
operand_batching_dims=(0,),
scatter_indices_batching_dims=(0, 1)),
("Scatter op requires equal numbers of operand_batching_dims "
"and scatter_indices_batching_dims")),
("MismatchingBatchingDimSizes", (10, 9, 48, 47, 46, 45),
(10, 9, 8, 7, 2), (10, 9, 8, 7, 3, 2, 4),
lax.ScatterDimensionNumbers(
update_window_dims=(4, 5, 6), inserted_window_dims=(2,),
scatter_dims_to_operand_dims=(2, 3),
operand_batching_dims=(0, 1),
scatter_indices_batching_dims=(1, 0)),
("Scatter op requires operand batching dimensions and indices "
"batching dimensions to have the same shape"))
]
)
def testScatterShapeCheckingRule(self, operand_shape, indices_shape,
update_shape, dimension_numbers, msg):
indices = np.zeros(indices_shape, dtype=np.int32)
def f(x, y):
operand = lax.broadcast(x, operand_shape)
updates = lax.broadcast(y, update_shape)
return lax.scatter(operand, indices, updates, dimension_numbers)
with self.assertRaisesRegex(TypeError, msg):
jax.eval_shape(f, np.int32(1), np.int32(1))
def testIssue831(self):
# Tests the DeviceTuple constant handler
def f(x):
g = lambda *args: args[1]
return jax.jit(lax.fori_loop, static_argnums=(2,))( 0, 10, g, x)
jax.jit(f)(1.) # doesn't crash
def testReshapeWithUnusualShapes(self):
ans = lax.reshape(np.ones((3,), np.float32), (lax.add(1, 2), 1))
self.assertAllClose(ans, np.ones((3, 1), np.float32))
self.assertRaisesRegex(
TypeError,
"Shapes must be 1D sequences of concrete values of integer type.*",
lambda: lax.reshape(np.ones(3,), (np.array([3, 1]),)))
self.assertRaisesRegex(
TypeError,
"Shapes must be 1D sequences of concrete values of integer type.*",
lambda: lax.reshape(np.ones(3,), (1.5, 2.0)))
def testDynamicSliceTypeErrors(self):
self.assertRaisesRegex(
TypeError,
"index arguments to dynamic_slice must be integers of the same type",
lambda: lax.dynamic_slice(np.ones((3, 4), dtype=np.float32),
(np.int32(1), np.int16(2)), (2, 2)))
def testDynamicUpdateSliceTypeErrors(self):
self.assertRaisesRegex(
TypeError,
"index arguments to dynamic_update_slice must be integers of the same "
"type",
lambda: lax.dynamic_update_slice(np.ones((3, 4), dtype=np.float32),
np.zeros((2, 2), dtype=np.float32),
(np.int32(1), np.int16(2))))
def test_primitive_jaxtype_error(self):
err_str = ("Error interpreting argument to .* as an abstract array. The problematic "
r"value is of type .* and was passed to the function at path args\[1\].")
with jax.enable_checks(False):
with self.assertRaisesRegex(TypeError, err_str):
lax.add(1, 'hi')
def test_reduction_with_repeated_axes_error(self):
with self.assertRaisesRegex(ValueError, "duplicate value in 'axes' .*"):
lax.reduce(np.arange(3), 0, lax.add, (0, 0))
@parameterized.parameters([lax.rem, lax.lt, lax.gt, lax.ge, lax.le])
def test_ops_do_not_accept_complex_dtypes(self, op):
with self.assertRaisesRegex(TypeError, ".*does not accept dtype complex.*"):
op(2+3j, 4+5j)
@parameterized.parameters([lax.add, lax.mul, lax.div, lax.rem, lax.lt, lax.gt,
lax.ge, lax.le, lax.eq, lax.ne])
def test_ops_error_on_mismatched_dtypes(self, op):
with self.assertRaisesRegex(TypeError, ".*requires arguments to have the same dtypes.*"):
op(0, 0.0)
def test_population_count_booleans_not_supported(self):
# https://github.com/jax-ml/jax/issues/3886
msg = "population_count does not accept dtype bool"
with self.assertRaisesRegex(TypeError, msg):
lax.population_count(True)
def test_conv_general_dilated_different_input_ranks_error(self):
# https://github.com/jax-ml/jax/issues/4316
msg = ("conv_general_dilated lhs and rhs must have the same number of "
"dimensions")
dimension_numbers = lax.ConvDimensionNumbers(lhs_spec=(0, 1, 2),
rhs_spec=(0, 1, 2),
out_spec=(0, 1, 2))
kwargs = { 'window_strides': (1,)
, 'padding': ((0, 0),)
, 'lhs_dilation': (1,)
, 'rhs_dilation': (1,)
, 'dimension_numbers': dimension_numbers
, 'feature_group_count': 1
, 'batch_group_count': 1
, 'precision': None
}
lhs, rhs = np.ones((1, 1, 1)), np.ones((1, 1, 1, 1))
with self.assertRaisesRegex(ValueError, msg):
lax.conv_general_dilated(lhs, rhs, **kwargs)
def test_window_strides_dimension_shape_rule(self):
# https://github.com/jax-ml/jax/issues/5087
msg = ("conv_general_dilated window and window_strides must have "
"the same number of dimensions")
lhs = jax.numpy.zeros((1, 1, 3, 3))
rhs = np.zeros((1, 1, 1, 1))
with self.assertRaisesRegex(ValueError, msg):
jax.lax.conv(lhs, rhs, [1], 'SAME')
def test_reduce_window_scalar_init_value_shape_rule(self):
# https://github.com/jax-ml/jax/issues/4574
args = { "operand": np.ones((4, 4), dtype=np.int32)
, "init_value": np.zeros((1,), dtype=np.int32)
, "computation": lax.max
, "window_dimensions": (2, 2)
, "window_strides": (2, 2)
, "padding": "VALID"
, "base_dilation": (1, 1)
, "window_dilation": (1, 1)
}
msg = (r"reduce_window expected init_values to be scalars but init_values "
r"have shapes \[\(1,\)\].")
with self.assertRaisesRegex(TypeError, msg):
lax.reduce_window(**args)
def test_reduce_correctly_works_with_pytrees(self):
operands = {'x': [np.ones(5), np.arange(5)]}
init_values = {'x': [0., 0]}
result = lax.reduce(operands, init_values,
lambda x, y: jax.tree.map(lax.add, x, y),
[0])
self.assertDictEqual(result, {'x': [5., 10]})
def test_reduce_with_mismatched_pytrees_errors(self):
operands = {'x': np.ones(5)}
bad_init_values = {'y': 0.}
with self.assertRaisesRegex(ValueError, 'Operands must have the same '
'tree structure as init_values'):
lax.reduce(operands, bad_init_values,
lambda x, y: dict(x=x['x'] + y['x']), [0])
def test_reduce_with_nonscalar_inits_errors(self):
operands = {'x': np.ones(5)}
bad_init_values = {'x': np.ones(5)}
with self.assertRaisesRegex(ValueError,
'reduce found non-scalar initial value'):
lax.reduce(operands, bad_init_values,
lambda x, y: dict(x=x['x'] + y['x']), [0])
def test_select_jvp_complexity(self):
jaxpr = jax.make_jaxpr(lambda x: jax.jvp(lambda x: lax.select(True, x, x),
(x,), (1.,)))(1.)
self.assertLen(jaxpr.jaxpr.eqns, 2)
def testRngBitGenerator(self):
# This test covers the original behavior of lax.rng_bit_generator, which
# required x64=True, and only checks shapes and jit invariance.
if not config.enable_x64.value:
raise SkipTest("RngBitGenerator requires 64bit key")
key = np.array((1, 2)).astype(np.uint64)
def fn(k):
return lax.rng_bit_generator(
k, shape=(5, 7), algorithm=lax.RandomAlgorithm.RNG_THREE_FRY)
out = fn(key)
out_jit = jax.jit(fn)(key)
self.assertEqual(out[0].shape, (2,))
self.assertEqual(out[1].shape, (5, 7))
self.assertArraysEqual(out[0], out_jit[0])
self.assertArraysEqual(out[1], out_jit[1])
def testRngBitGenerator2(self):
def f(key):
return lax.rng_bit_generator(key, shape=(5, 7))
key = np.array((1, 2, 3, 4)).astype(np.uint32)
out1 = f(key)
out2 = jax.jit(f)(key)
self.assertEqual(out1[0].shape, (4,))
self.assertEqual(out1[1].shape, (5, 7))
self.assertArraysEqual(out1[0], out2[0])
self.assertArraysEqual(out1[1], out2[1])
@jtu.skip_on_devices("tpu")
def testRngBitGeneratorReturnedKey(self):
# This test ensures that the key bit-packing/unpacking operations used in
# the translation rule for rng_bit_generator, on older jaxlibs and at time
# of writing on GPU, are inverses of one another.
key = np.array([3, 1, 4, 2], dtype=np.dtype('uint32'))
new_key, _ = lax.rng_bit_generator(key, (0,))
self.assertAllClose(key, new_key)
def test_rng_bit_generator_vmap(self):
def f(key):
return lax.rng_bit_generator(key, shape=(5, 7))
keys = np.arange(3 * 4).reshape((3, 4)).astype(np.uint32)
out_keys, bits = jax.vmap(f)(keys)
self.assertEqual(out_keys.shape, (3, 4))
self.assertEqual(bits.shape, (3, 5, 7))
def test_rng_bit_generator_vmap_vmap(self):
def f(key):
return lax.rng_bit_generator(key, shape=(5, 7))
keys = np.arange(2 * 3 * 4).reshape((2, 3, 4)).astype(np.uint32)
out_keys, bits = jax.vmap(jax.vmap(f))(keys)
self.assertEqual(out_keys.shape, (2, 3, 4))
self.assertEqual(bits.shape, (2, 3, 5, 7))
@jtu.sample_product(
dtype=lax_test_util.all_dtypes + lax_test_util.python_scalar_types,
weak_type=[True, False],
)
def test_const(self, dtype, weak_type):
if dtype in set(lax_test_util.python_scalar_types):
val = dtype(0)
else:
val = lax_internal._convert_element_type(0, np.dtype(dtype),
weak_type=weak_type)
const = lax_internal._const(val, 0)
self.assertEqual(dtypes.dtype(val), dtypes.dtype(const))
def testIgammaSpecial(self):
self.assertEqual(lax.igamma(1., np.inf), 1.)
self.assertEqual(lax.igammac(1., np.inf), 0.)
def testRegressionIssue5728(self):
# The computation in this test gave garbage data on CPU due to an LLVM bug.
@jax.jit
def f(inputs):
out_action_2 = lax.slice_in_dim(inputs, 0, 15, axis=-1)
mask = lax.slice_in_dim(inputs, 7, 22, axis=-1)
out_action_2 = lax.select(lax.eq(mask, np.float32(0)),
lax.broadcast(np.float32(42), (1, 15)),
out_action_2)
return lax.pad(out_action_2, np.float32(42), [(0, 0, 0), (0, 15, 0)])
self.assertArraysEqual(np.full((1, 30), np.float32(42)),
f(np.zeros((1, 24), dtype=np.float32)))
def testDynamicSliceUnsignedNoNormalization(self):
# Test that no negative index correction is done for unsigned indices.
f = lambda x, i: lax.dynamic_slice(x, [i], [1])
x = np.arange(200)
i = np.uint32(128)
jaxpr = jax.make_jaxpr(f)(x, i)
self.assertLen(jaxpr.eqns, 1)
self.assertEqual(jaxpr.eqns[0].primitive, lax.dynamic_slice_p)
def testDynamicSliceU8Index(self):
# Regression test for u8 index in dynamic-slice (#6122)
x = np.arange(200)
np.testing.assert_equal(
np.array(lax.dynamic_slice(x, np.uint8([128]), (1,))), [128])
def test_dot_general_batching_python_builtin_arg(self):
# https://github.com/jax-ml/jax/issues/16805
@jax.remat
def f(x):
return jax.lax.dot_general(x, x, (([], []), ([], [])))
jax.hessian(f)(1.0) # don't crash
def test_constant_folding_complex_to_real_scan_regression(self):
# regression test for github.com/jax-ml/jax/issues/19059
def g(hiddens):
hiddens_aug = jnp.vstack((hiddens[0], hiddens))
new_hiddens = hiddens_aug.copy()
diff = new_hiddens[:-1] - hiddens
diff = new_hiddens[:-1] - hiddens
out = jnp.trace(jnp.conj(diff).T @ diff).real
return jnp.array(out, dtype=jnp.complex64)
def _step(carry, arg):
primals, f_vjp = jax.vjp(
g,
jax.random.normal(jax.random.key(0), (9, 8), dtype=jnp.complex64),
)
out = f_vjp(np.array(1.0 + 0j, 'complex64'))[0]
return carry, carry
a, b = jax.lax.scan(_step, 0, jnp.arange(4, dtype=jnp.complex64))
@parameterized.parameters([float, np.array, np.float32, jnp.float32])
def testAsarray(self, typ):
x = typ(1.0)
x_arr = lax_internal.asarray(x)
self.assertArraysEqual(x, x_arr)
self.assertIsInstance(x_arr, jax.Array)
# jaxpr should not bind any primitives, whether called directly or
# as a closure:
jaxpr = jax.make_jaxpr(lax_internal.asarray)(x)
self.assertLen(jaxpr.eqns, 0)
asarray_closure = lambda: lax_internal.asarray(x)
jaxpr = jax.make_jaxpr(asarray_closure)()
self.assertLen(jaxpr.eqns, 0)
# Regression test for https://github.com/jax-ml/jax/issues/19334
# lax.asarray as a closure should not trigger transfer guard.
with jax.transfer_guard('disallow'):
jax.jit(asarray_closure)()
def test_optimization_barrier(self):
x = lax.optimization_barrier((2, 3))
self.assertEqual((2, 3), x)
def test_optimization_barrier_autodiff(self):
def f(x):
y = 1. * x
x, y = lax.optimization_barrier((x, y))
z = 2. * x
return y + z
g = jax.grad(f)(5.) # doesn't crash
self.assertAllClose(g, 3., check_dtypes=False)
def test_shape_as_value_handles_static_shapes(self):
result = lax.shape_as_value(())
self.assertArraysEqual(result, lax.full((0,), np.array(0, np.int32)))
result = lax.shape_as_value((2,))
self.assertArraysEqual(result, np.asarray((2,), np.int32))
result = lax.shape_as_value((2, 3))
self.assertArraysEqual(result, np.asarray((2, 3), np.int32))
def test_shape_as_value_handles_polymorphic_shapes(self):
@jax.jit
def f(x):
return lax.shape_as_value(x.shape)
exported = export.export(f)(
jax.ShapeDtypeStruct(export.symbolic_shape("a"), jnp.float32)
)
result = exported.call(np.ones((1), dtype=np.float32))
self.assertArraysEqual(result, np.asarray((1,), np.int64))
result = exported.call(np.ones((2), dtype=np.float32))
self.assertArraysEqual(result, np.asarray((2,), np.int64))
exported = export.export(f)(
jax.ShapeDtypeStruct(export.symbolic_shape("a, b"), jnp.float32)
)
result = exported.call(np.ones((1, 2), dtype=np.float32))
self.assertArraysEqual(result, np.asarray((1, 2), np.int64))
result = exported.call(np.ones((3, 4), dtype=np.float32))
self.assertArraysEqual(result, np.asarray((3, 4), np.int64))
@jtu.sample_product(
name = ['abs'],
dtype = ['int4', 'uint4'],
)
def test_int4_non_support_errors(self, name, dtype):
func = getattr(lax, name)
arg = lax.iota(dtype, 3)
with self.assertRaisesRegex(TypeError, f'{name} does not accept dtype {dtype}.'):
func(arg)
@jtu.sample_product(
name = ['bitwise_not', 'neg', 'sign'],
dtype = ['int4', 'uint4'],
)
def test_int4_unary_ops(self, name, dtype):
func = getattr(lax, name)
rng = jtu.rand_default(self.rng())
x = rng(3, dtype)
actual = func(x)
expected = func(x.astype('int8')).astype(dtype)
self.assertArraysEqual(actual, expected, check_dtypes=True)
@jtu.sample_product(
name = ['add', 'sub', 'mul', 'div', 'rem', 'max', 'min',
'shift_left', 'shift_right_arithmetic', 'shift_right_logical',
'bitwise_and', 'bitwise_or', 'bitwise_xor',
'eq', 'ne', 'gt', 'ge', 'lt', 'le'],
dtype = ['int4', 'uint4'],
)
def test_int4_binary_ops(self, name, dtype):
func = getattr(lax, name)
rng = jtu.rand_default(self.rng())
x, y = rng(3, dtype), rng(3, dtype)
actual = func(x, y)
expected = func(x.astype('int8'), y.astype('int8'))
if expected.dtype == 'int8':
expected = expected.astype(dtype)
self.assertArraysEqual(actual, expected, check_dtypes=True)
def test_gather_with_asymmetric_dtype(self):
@jax.custom_vjp
def f(x):
return x
def f_fwd(x):
return f(x), ()
def f_bwd(res, g):
del res
return g.astype(jnp.bfloat16),
f.defvjp(f_fwd, f_bwd)
def g(x):
idx = jnp.argsort(x)
x = x.at[idx].get()
return f(x)
x = jnp.arange(8, dtype=jnp.float8_e4m3fn)
_, vjp_fn = jax.vjp(g, x)
cts = vjp_fn(jnp.ones((8,), dtype=jnp.float8_e4m3fn)) # Don't crash
self.assertEqual(cts[0].dtype, jnp.bfloat16)
| LaxTest |
python | astropy__astropy | astropy/io/votable/tree.py | {
"start": 10081,
"end": 10414
} | class ____:
@property
def name(self):
"""An optional name for the element."""
return self._name
@name.setter
def name(self, name):
xmlutil.check_token(name, "name", self._config, self._pos)
self._name = name
@name.deleter
def name(self):
self._name = None
| _NameProperty |
python | pytorch__pytorch | benchmarks/inductor_backends/cutlass.py | {
"start": 4130,
"end": 13197
} | class ____:
config: ExperimentGroupConfig
results: list[ExperimentResults] = field(default_factory=list)
def get_inputs(
config: ExperimentGroupConfig,
) -> tuple[torch.Tensor, ...]:
op_name = config.op_name
M, N, K = config.shape
batch_size = config.batch_size
dtype = config.dtype
device = torch.device("cuda")
if op_name == "mm":
A = torch.randn(M, K, dtype=dtype, device=device)
B = torch.randn(N, K, dtype=dtype, device=device).t()
return A, B
elif op_name == "addmm":
A = torch.randn(M, K, dtype=dtype, device=device)
B = torch.randn(N, K, dtype=dtype, device=device).t()
C = torch.randn(N, dtype=dtype, device=device)
return C, A, B
elif op_name == "bmm":
A = torch.randn(batch_size, M, K, dtype=dtype, device=device)
B = torch.randn(batch_size, N, K, dtype=dtype, device=device).permute(0, 2, 1)
return A, B
elif op_name == "_scaled_mm":
# For _scaled_mm, we only support fp8e4m3 with rowwise scaling
if dtype != torch.float8_e4m3fn:
raise ValueError(f"_scaled_mm only supports fp8e4m3, got {dtype}")
# Create input tensors in bfloat16 first, then quantize to fp8
input_dtype = torch.bfloat16
x = torch.randn(M, K, dtype=input_dtype, device=device)
w = torch.randn(N, K, dtype=input_dtype, device=device)
# Quantize using rowwise scaling
w_fp8, w_inverse_scale = _quantize_rowwise(w, dtype)
w_t_fp8 = w_fp8.t()
w_inverse_scale = w_inverse_scale.t() # scale_b should be (1, N)
x_fp8, x_inverse_scale = _quantize_rowwise(x, dtype)
# Return inputs for _scaled_mm: (input, weight_t, scale_a, scale_b, bias, out, out_dtype, use_fast_accum)
return (
x_fp8,
w_t_fp8,
x_inverse_scale,
w_inverse_scale,
None,
None,
torch.bfloat16,
USE_FAST_ACCUM,
)
else:
raise ValueError(f"Unknown op {op_name}")
def run_single_experiment_group(
group_config: ExperimentGroupConfig,
) -> list[ExperimentResults]:
inputs = get_inputs(group_config)
op = getattr(torch, group_config.op_name)
results = []
for config in group_config.experiments:
torch._dynamo.reset()
torch._inductor.utils.clear_caches()
compiled_op = torch.compile(
op,
options=config.to_options(),
)
start_time = time.perf_counter()
try:
_ = compiled_op(*inputs)
except Exception as e:
import traceback
log.warning(
f"Benchmark config {config.name()} failed: {e}, " # noqa: G004
f"traceback: {traceback.format_exc()}"
)
results.append(
ExperimentResults(
name=config.name(),
forward_time=float("inf"),
teraflops=0.0,
compilation_time=float("inf"),
)
)
continue
compilation_time = time.perf_counter() - start_time
forward_time = benchmark_torch_function_in_microseconds(
compiled_op,
*inputs,
)
flops = calculate_flops(
group_config.op_name,
group_config.shape,
group_config.batch_size,
)
teraflops = flops / (forward_time * 1e-6) / 1e12
results.append(
ExperimentResults(
name=config.name(),
forward_time=forward_time,
teraflops=teraflops,
compilation_time=compilation_time,
)
)
return results
def generate_experiment_groups(
op_names: list[str],
shapes: list[tuple[int, int, int]],
dtypes: list[torch.dtype],
enable_persistent_tma_matmuls: list[bool],
cutlass_instantiation_levels: list[str],
batch_sizes: list[int],
) -> list[ExperimentGroupConfig]:
groups = []
for (
op_name,
shape,
dtype,
batch_size,
) in itertools.product(op_names, shapes, dtypes, batch_sizes):
group = ExperimentGroupConfig(
op_name=op_name,
shape=shape,
dtype=dtype,
batch_size=batch_size,
)
experiments = generate_experiment_configs(
enable_persistent_tma_matmuls, cutlass_instantiation_levels
)
group.experiments.extend(experiments)
groups.append(group)
return groups
def generate_experiment_configs(
enable_persistent_tma_matmuls: list[bool], cutlass_instantiation_levels: list[str]
) -> list[ExperimentConfig]:
configs = []
# add aten configs
configs.append(
AtenExperimentConfig(
max_autotune_gemm_backends="ATEN",
)
)
# add triton configs
for enable_persistent_tma_matmul in enable_persistent_tma_matmuls:
configs.append(
TritonExperimentConfig(
max_autotune_gemm_backends="TRITON",
enable_persistent_tma_matmul=enable_persistent_tma_matmul,
)
)
# add cutlass configs
for cutlass_instantiation_level in cutlass_instantiation_levels:
configs.append(
CutlassExperimentConfig(
max_autotune_gemm_backends="CUTLASS",
cutlass_instantiation_level=cutlass_instantiation_level,
)
)
return configs
def calculate_table_data(results: list[ExperimentResults]) -> dict:
table_data = defaultdict(list)
aten_perf: Optional[float] = None
for experiment_result in results:
for key, value in experiment_result.asdict().items():
assert key in UNITS, f"Unknown key {key}"
table_data[key + UNITS[key]].append(value)
if experiment_result.name == "aten":
aten_perf = experiment_result.forward_time
table_data[PERF_OVER_ATEN_STR].append("NA")
elif aten_perf is not None:
perf_over_aten = (
(experiment_result.forward_time - aten_perf) / aten_perf * 100
)
table_data[PERF_OVER_ATEN_STR].append(perf_over_aten)
else:
# fallback in case aten is not in experiment group
table_data[PERF_OVER_ATEN_STR].append("NA")
return table_data
def calculate_flops(op_name: str, shape: tuple[int, int, int], batch_size: int) -> int:
"""
Calculate the number of floating point operations based on operation type and shape.
"""
M, N, K = shape
if op_name == "bmm":
return 2 * batch_size * M * N * K
elif op_name == "addmm":
return 2 * M * N * K + M * N
elif op_name == "_scaled_mm":
return 2 * M * N * K
else:
return 2 * M * N * K
def get_printable_results(experiment_groups: list[ExperimentGroup]) -> list[str]:
edge_over_aten = defaultdict(list)
output = []
for experiment_group in experiment_groups:
group_config_name = experiment_group.config.name()
output.append(f"\nExperiment group: {group_config_name}")
table_data = calculate_table_data(experiment_group.results)
for name, edge in zip(table_data["name"], table_data[PERF_OVER_ATEN_STR]):
edge_over_aten[name].append(edge)
output.append(
tabulate(table_data, headers="keys", tablefmt="pretty", floatfmt=".3f")
)
if "aten" in edge_over_aten:
output.append("\nAverage edge over aten (max(-edge, 0), higher is better):")
for name in edge_over_aten:
if name != "aten":
values = [
max(-v, 0.0)
for v in edge_over_aten[name]
if v != float("inf") and v != "NA"
]
valid_count = len(values)
average_edge = sum(values) / valid_count if values else "No valid data"
output.append(
f"{name}: {average_edge} (from {valid_count} valid values)"
)
output.append("\n")
return "\n".join(output)
def main():
seed = 123
torch.manual_seed(seed)
results = []
log.info("Starting benchmarking...")
configs = list(
generate_experiment_groups(
OP_NAMES,
SHAPES,
DTYPES,
ENABLE_PERSISTENT_TMA_MATMULS,
CUTLASS_INSTANTIATION_LEVELS,
BATCH_SIZES,
)
)
for i, group_config in enumerate(tqdm(configs)):
group_results = run_single_experiment_group(group_config) # noqa: G004
results.append(
ExperimentGroup(config=group_config, results=group_results),
)
sys.stderr.write(
f"\nINTERMEDIATE results: {i + 1}/{len(configs)} \n"
+ get_printable_results(results)
)
print("\nFINAL results...")
print(get_printable_results(results))
if __name__ == "__main__":
main()
| ExperimentGroup |
python | doocs__leetcode | solution/2800-2899/2873.Maximum Value of an Ordered Triplet I/Solution.py | {
"start": 0,
"end": 259
} | class ____:
def maximumTripletValue(self, nums: List[int]) -> int:
ans = mx = mx_diff = 0
for x in nums:
ans = max(ans, mx_diff * x)
mx_diff = max(mx_diff, mx - x)
mx = max(mx, x)
return ans
| Solution |
python | django__django | tests/i18n/test_percents.py | {
"start": 452,
"end": 895
} | class ____(SimpleTestCase):
"""Tests using the French translations of the sampleproject."""
PO_FILE = os.path.join(SAMPLEPROJECT_LOCALE, "fr", "LC_MESSAGES", "django.po")
def setUp(self):
self._language = get_language()
self._translations = trans_real._translations
activate("fr")
def tearDown(self):
trans_real._translations = self._translations
activate(self._language)
| FrenchTestCase |
python | ray-project__ray | python/ray/data/_internal/stats.py | {
"start": 31511,
"end": 35441
} | class ____:
"""A Class containing util functions that manage remote calls to _StatsActor.
Ray Data updates metrics through the _StatsManager, and direct remote calls
to the _StatsActor is discouraged. Some functionalities provided by
_StatsManager:
- Format and update iteration metrics
- Format and update execution metrics
- Aggregate per node metrics
- Dataset registration
"""
@staticmethod
def _aggregate_per_node_metrics(
op_metrics: List[OpRuntimeMetrics],
) -> Optional[Mapping[str, Mapping[str, Union[int, float]]]]:
"""
Aggregate per-node metrics from a list of OpRuntimeMetrics objects.
If per-node metrics are disabled in the current DataContext, returns None.
Otherwise, it sums up all NodeMetrics fields across the provided metrics and
returns a nested dictionary mapping each node ID to a dict of field values.
"""
if not DataContext.get_current().enable_per_node_metrics:
return None
aggregated_by_node = defaultdict(lambda: defaultdict(int))
for metrics in op_metrics:
for node_id, node_metrics in metrics._per_node_metrics.items():
agg_node_metrics = aggregated_by_node[node_id]
for f in fields(NodeMetrics):
agg_node_metrics[f.name] += getattr(node_metrics, f.name)
return aggregated_by_node
@staticmethod
def update_execution_metrics(
dataset_tag: str,
op_metrics: List[OpRuntimeMetrics],
operator_tags: List[str],
state: Dict[str, Any],
):
per_node_metrics = _StatsManager._aggregate_per_node_metrics(op_metrics)
op_metrics_dicts = [metric.as_dict() for metric in op_metrics]
args = (
dataset_tag,
op_metrics_dicts,
operator_tags,
state,
per_node_metrics,
)
try:
get_or_create_stats_actor().update_execution_metrics.remote(*args)
except Exception as e:
logger.warning(
f"Error occurred during update_execution_metrics.remote call to _StatsActor: {e}",
exc_info=True,
)
return
@staticmethod
def update_iteration_metrics(stats: "DatasetStats", dataset_tag: str):
args = (stats, dataset_tag)
try:
get_or_create_stats_actor().update_iteration_metrics.remote(*args)
except Exception as e:
logger.warning(
f"Error occurred during update_iteration_metrics.remote call to _StatsActor: {e}",
exc_info=True,
)
@staticmethod
def register_dataset_to_stats_actor(
dataset_tag: str,
operator_tags: List[str],
topology: Topology,
data_context: DataContext,
):
"""Register a dataset with the stats actor.
Args:
dataset_tag: Tag for the dataset
operator_tags: List of operator tags
topology: Optional Topology representing the DAG structure to export
data_context: The DataContext attached to the dataset
"""
get_or_create_stats_actor().register_dataset.remote(
ray.get_runtime_context().get_job_id(),
dataset_tag,
operator_tags,
topology,
data_context,
)
@staticmethod
def gen_dataset_id_from_stats_actor() -> str:
try:
stats_actor = get_or_create_stats_actor()
return ray.get(stats_actor.gen_dataset_id.remote())
except Exception as e:
logger.warning(
f"Failed to generate dataset_id, falling back to random uuid_v4: {e}"
)
# Getting dataset id from _StatsActor may fail, in this case
# fall back to uuid4
return uuid4().hex
| _StatsManager |
python | run-llama__llama_index | llama-index-packs/llama-index-packs-multidoc-autoretrieval/llama_index/packs/multidoc_autoretrieval/base.py | {
"start": 1697,
"end": 6197
} | class ____(BaseLlamaPack):
"""
Multi-doc auto-retriever pack.
Uses weaviate as the underlying storage.
Args:
docs (List[Document]): A list of documents to index.
**kwargs: Keyword arguments to pass to the underlying index.
"""
def __init__(
self,
weaviate_client: Any,
doc_metadata_index_name: str,
doc_chunks_index_name: str,
metadata_nodes: List[BaseNode],
docs: List[Document],
doc_metadata_schema: VectorStoreInfo,
auto_retriever_kwargs: Optional[Dict[str, Any]] = None,
verbose: bool = False,
) -> None:
"""Init params."""
import weaviate
# do some validation
if len(docs) != len(metadata_nodes):
raise ValueError(
"The number of metadata nodes must match the number of documents."
)
# authenticate
client = cast(weaviate.Client, weaviate_client)
# auth_config = weaviate.AuthApiKey(api_key="")
# client = weaviate.Client(
# "https://<weaviate-cluster>.weaviate.network",
# auth_client_secret=auth_config,
# )
# initialize two vector store classes corresponding to the two index names
metadata_store = WeaviateVectorStore(
weaviate_client=client, index_name=doc_metadata_index_name
)
metadata_sc = StorageContext.from_defaults(vector_store=metadata_store)
# index VectorStoreIndex
# Since "new_docs" are concise summaries, we can directly feed them as nodes into VectorStoreIndex
index = VectorStoreIndex(metadata_nodes, storage_context=metadata_sc)
if verbose:
print("Indexed metadata nodes.")
# construct separate Weaviate Index with original docs. Define a separate query engine with query engine mapping to each doc id.
chunks_store = WeaviateVectorStore(
weaviate_client=client, index_name=doc_chunks_index_name
)
chunks_sc = StorageContext.from_defaults(vector_store=chunks_store)
doc_index = VectorStoreIndex.from_documents(docs, storage_context=chunks_sc)
if verbose:
print("Indexed source document nodes.")
# setup auto retriever
auto_retriever = VectorIndexAutoRetriever(
index,
vector_store_info=doc_metadata_schema,
**(auto_retriever_kwargs or {}),
)
self.index_auto_retriever = IndexAutoRetriever(retriever=auto_retriever)
if verbose:
print("Setup autoretriever over metadata.")
# define per-document retriever
self.retriever_dict = {}
for doc in docs:
index_id = doc.metadata["index_id"]
# filter for the specific doc id
filters = MetadataFilters(
filters=[
MetadataFilter(
key="index_id", operator=FilterOperator.EQ, value=index_id
),
]
)
retriever = doc_index.as_retriever(filters=filters)
self.retriever_dict[index_id] = retriever
if verbose:
print("Setup per-document retriever.")
# setup recursive retriever
self.recursive_retriever = RecursiveRetriever(
"vector",
retriever_dict={"vector": self.index_auto_retriever, **self.retriever_dict},
verbose=True,
)
if verbose:
print("Setup recursive retriever.")
# plug into query engine
llm = OpenAI(model="gpt-3.5-turbo")
self.query_engine = RetrieverQueryEngine.from_args(
self.recursive_retriever, llm=llm
)
def get_modules(self) -> Dict[str, Any]:
"""
Returns a dictionary containing the internals of the LlamaPack.
Returns:
Dict[str, Any]: A dictionary containing the internals of the
LlamaPack.
"""
return {
"index_auto_retriever": self.index_auto_retriever,
"retriever_dict": self.retriever_dict,
"recursive_retriever": self.recursive_retriever,
"query_engine": self.query_engine,
}
def run(self, *args: Any, **kwargs: Any) -> Any:
"""
Runs queries against the index.
Returns:
Any: A response from the query engine.
"""
return self.query_engine.query(*args, **kwargs)
| MultiDocAutoRetrieverPack |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.