language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
gevent__gevent
|
src/greentest/3.14/test_smtplib.py
|
{
"start": 28876,
"end": 29421
}
|
class ____(unittest.TestCase):
def setUp(self):
smtplib.socket = mock_socket
mock_socket.reply_with(b"199 no hello for you!")
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.port = 25
def tearDown(self):
smtplib.socket = socket
sys.stdout = self.old_stdout
def testFailingHELO(self):
self.assertRaises(smtplib.SMTPConnectError, smtplib.SMTP,
HOST, self.port, 'localhost', 3)
|
BadHELOServerTests
|
python
|
tensorflow__tensorflow
|
tensorflow/compiler/tests/float_ops_test.py
|
{
"start": 868,
"end": 17917
}
|
class ____(xla_test.XLATestCase):
def test_float_ops(self):
with self.session() as session:
for dtype in self.float_types:
x = np.arange(-0.90, 0.90, 0.25)
self.assert_op_output_matches_expected(
math_ops.acos,
x.astype(dtype),
expected=np.arccos(x).astype(dtype),
local_session=session,
)
self.assert_op_output_matches_expected(
math_ops.asin,
x.astype(dtype),
expected=np.arcsin(x).astype(dtype),
local_session=session,
)
x = np.arange(-3, 3).reshape(1, 3, 2)
self.assert_op_output_matches_expected(
math_ops.atan,
x.astype(dtype),
expected=np.arctan(x).astype(dtype),
local_session=session,
)
self.assert_op_output_matches_expected(
math_ops.acosh,
np.array([1, 2, 3, 4], dtype=dtype),
expected=np.array(
[0, 1.3169579, 1.76274717, 2.06343707], dtype=dtype
),
local_session=session,
)
self.assert_op_output_matches_expected(
math_ops.asinh,
np.array([1, 2, 3, 4], dtype=dtype),
expected=np.array(
[0.88137359, 1.44363548, 1.81844646, 2.09471255], dtype=dtype
),
local_session=session,
)
self.assert_op_output_matches_expected(
math_ops.atanh,
np.array([0.1, 0.2, 0.3, 0.4], dtype=dtype),
expected=np.array(
[0.10033535, 0.20273255, 0.3095196, 0.42364893], dtype=dtype
),
local_session=session,
)
self.assert_op_output_matches_expected(
math_ops.ceil,
np.array([[-1.7, 1.2]], dtype=dtype),
expected=np.array([[-1, 2]], dtype=dtype),
local_session=session,
)
self.assert_op_output_matches_expected(
math_ops.cosh,
np.array([1, 2, 3, 4], dtype=dtype),
expected=np.array(
[1.54308063, 3.76219569, 10.067662, 27.30823284], dtype=dtype
),
local_session=session,
)
# Disable float16 testing for now
if dtype != np.float16:
x = np.arange(-10, 10, 1).astype(dtype)
erf_x = session.run(math_ops.erf(x))
erfc_x = session.run(math_ops.erfc(x))
self.assert_op_output_matches_expected(
math_ops.erf,
x,
expected=erf_x,
local_session=session,
)
self.assert_op_output_matches_expected(
math_ops.erfc,
x,
expected=erfc_x,
local_session=session,
)
self.assert_op_output_matches_expected(
math_ops.exp,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([[0.36787945, 2.7182817]], dtype=dtype),
local_session=session,
)
self.assert_op_output_matches_expected(
math_ops.expm1,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([[-0.63212056, 1.71828183]], dtype=dtype),
local_session=session,
rtol=1e-5,
)
self.assert_op_output_matches_expected(
math_ops.floor,
np.array([[-1.7, 1.2]], dtype=dtype),
expected=np.array([[-2, 1]], dtype=dtype),
local_session=session,
)
self.assert_op_output_matches_expected(
math_ops.is_finite,
np.array(
[[-np.inf, -2, -1, 0, 0.5, 1, 2, np.inf, np.nan]], dtype=dtype
),
expected=np.array([[0, 1, 1, 1, 1, 1, 1, 0, 0]], dtype=np.bool_),
local_session=session,
)
# Tests for tf.nn ops.
self.assert_op_output_matches_expected(
nn_ops.l2_loss,
np.array([[[]]], dtype=dtype),
expected=dtype(0),
local_session=session,
)
self.assert_op_output_matches_expected(
nn_ops.l2_loss,
dtype(4),
dtype(8),
local_session=session,
)
self.assert_op_output_matches_expected(
nn_ops.l2_loss,
np.array([[-2, 4]], dtype=dtype),
expected=dtype(10),
local_session=session,
)
self.assert_op_output_matches_expected(
math_ops.reciprocal,
np.array([[1, 2]], dtype=dtype),
expected=np.array([[1, 0.5]], dtype=dtype),
local_session=session,
)
self.assert_op_output_matches_expected(
math_ops.log,
np.array([[1, 2]], dtype=dtype),
expected=np.array([[0, 0.69314718]], dtype=dtype),
local_session=session,
)
self.assert_op_output_matches_expected(
math_ops.sin,
np.array([[1, 2]], dtype=dtype),
expected=np.array([[0.841478, 0.909302]], dtype=dtype),
local_session=session,
)
self.assert_op_output_matches_expected(
math_ops.cos,
np.array([[1, 2]], dtype=dtype),
expected=np.array([[0.540297, -0.41614]], dtype=dtype),
local_session=session,
)
# Confirm that log1p will remain precise across a range of small values.
self.assert_op_output_matches_expected(
math_ops.log1p,
np.array(
[[1e-14, 1e-15, 0.6, 2] + [x * 1e-5 for x in range(1, 20)]],
dtype=dtype,
),
expected=np.log1p(
np.array(
[[1e-14, 1e-15, 0.6, 2] + [x * 1e-5 for x in range(1, 20)]],
dtype=dtype,
)
).astype(dtype),
local_session=session,
rtol=1e-15 if dtype == np.float64 else 1e-4,
atol=1e-15 if dtype == np.float64 else 1e-4,
)
self.assert_op_output_matches_expected(
math_ops.rint,
np.array(
[
[-1.7, 1.2, 4.0, 0.0],
[-3.5, -2.5, -1.5, -0.5],
[0.5, 1.5, 2.5, 3.5],
],
dtype=dtype,
),
expected=np.array(
[[-2, 1, 4, 0], [-4, -2, -2, 0], [0, 2, 2, 4]], dtype=dtype
),
local_session=session,
)
self.assert_op_output_matches_expected(
math_ops.round,
np.array(
[
[-1.7, 1.2, 4.0, 0.0],
[-3.5, -2.5, -1.5, -0.5],
[0.5, 1.5, 2.5, 3.5],
],
dtype=dtype,
),
expected=np.array(
[[-2, 1, 4, 0], [-4, -2, -2, 0], [0, 2, 2, 4]], dtype=dtype
),
local_session=session,
)
self.assert_op_output_matches_expected(
math_ops.rsqrt,
np.array([[4, 16]], dtype=dtype),
expected=np.array([[0.5, 0.25]], dtype=dtype),
local_session=session,
)
self.assert_op_output_matches_expected(
math_ops.sigmoid,
np.array([[1, 1, 1, 1], [1, 2, 3, 4]], dtype=dtype),
expected=np.array(
[
[0.7310586, 0.7310586, 0.7310586, 0.7310586],
[0.7310586, 0.880797, 0.95257413, 0.98201376],
],
dtype=dtype,
),
local_session=session,
)
self.assert_op_output_matches_expected(
math_ops.sigmoid,
np.array([-300, -150, 0, 150, 300], dtype=dtype),
expected=np.array([0, 0, 0.5, 1, 1], dtype=dtype),
local_session=session,
)
self.assert_op_output_matches_expected(
math_ops.sinh,
np.array([1, 2, 3, 4], dtype=dtype),
expected=np.array(
[1.17520119, 3.62686041, 10.01787493, 27.2899172], dtype=dtype
),
local_session=session,
)
self.assert_op_output_matches_expected(
math_ops.sqrt,
np.array([[4, 9]], dtype=dtype),
expected=np.array([[2, 3]], dtype=dtype),
local_session=session,
)
self.assert_op_output_matches_expected(
math_ops.tan,
np.array([1, 2, 3, 4], dtype=dtype),
expected=np.array(
[1.55740772, -2.18503986, -0.14254654, 1.15782128], dtype=dtype
),
local_session=session,
)
self.assert_op_output_matches_expected(
math_ops.tanh,
np.array(
[
[1, 2, 3, 4],
[np.inf, -np.inf, np.nan, 20],
[19, -19, 22, -22],
],
dtype=dtype,
),
expected=np.array(
[
[0.76159418, 0.96402758, 0.99505478, 0.99932933],
[1.0, -1.0, np.nan, 1.0],
[1.0, -1.0, 1.0, -1.0],
],
dtype=dtype,
),
local_session=session,
)
self.assert_op_output_matches_expected(
nn_ops.log_softmax,
np.array([[1, 1, 1, 1], [1, 2, 3, 4]], dtype=dtype),
expected=np.array(
[
[-1.3862944, -1.3862944, -1.3862944, -1.3862944],
[-3.4401896, -2.4401896, -1.4401897, -0.44018969],
],
dtype=dtype,
),
local_session=session,
)
self.assert_op_output_matches_expected(
nn_ops.elu,
np.array([[-1, 0, 1, -1e-6]], dtype=dtype),
expected=np.array(
[[-0.63212056, 0, 1, -9.999995e-07]], dtype=dtype
),
rtol=1e-5,
atol=1e-6,
local_session=session,
)
self.assert_op_output_matches_expected(
nn_ops.selu,
np.array([[-1, 0, 1, -1e-5]], dtype=dtype),
expected=np.array(
[[-1.11133074, 0.0, 1.05070099, -1.758090550379974e-05]],
dtype=dtype,
),
rtol=1e-5,
atol=1e-6,
local_session=session,
)
self.assert_op_output_matches_expected(
nn_ops.relu,
np.array([[-1, 1]], dtype=dtype),
expected=np.array([[0, 1]], dtype=dtype),
local_session=session,
)
self.assert_op_output_matches_expected(
nn_ops.relu6,
np.array([[-0.05, 6.05, 5]], dtype=dtype),
expected=np.array([[0, 6, 5]], dtype=dtype),
local_session=session,
)
self.assert_op_output_matches_expected(
nn_ops.leaky_relu,
np.array([[-2, -1, 0, 1, 2]], dtype=dtype),
expected=np.array([[-0.4, -0.2, 0.0, 1.0, 2.0]], dtype=dtype),
local_session=session,
)
self.assert_op_output_matches_expected(
nn_ops.softmax,
np.array([1, 2, 3, 4], dtype=dtype),
expected=np.array(
[0.032058604, 0.087144323, 0.23688284, 0.64391428], dtype=dtype
),
local_session=session,
)
self.assert_op_output_matches_expected(
nn_ops.softmax,
np.array([[1, 1, 1, 1], [1, 2, 3, 4]], dtype=dtype),
expected=np.array(
[
[0.25, 0.25, 0.25, 0.25],
[0.032058604, 0.087144323, 0.23688284, 0.64391428],
],
dtype=dtype,
),
local_session=session,
)
self.assert_op_output_matches_expected(
nn_ops.softmax,
np.array([[[1, 1], [1, 1]], [[1, 2], [3, 4]]], dtype=dtype),
expected=np.array(
[
[[0.5, 0.5], [0.5, 0.5]],
[[0.26894142, 0.73105858], [0.26894142, 0.73105858]],
],
dtype=dtype,
),
local_session=session,
)
self.assert_op_output_matches_expected(
nn_ops.softsign,
np.array([[-2, -1, 0, 1, 2]], dtype=dtype),
expected=np.array(
[[-0.66666669, -0.5, 0, 0.5, 0.66666669]], dtype=dtype
),
local_session=session,
)
self.assert_op_output_matches_expected(
math_ops.sign,
np.array(
[[-2.0, -1.0, -0.0, +0.0, 1.0, 2.0, float("nan")]], dtype=dtype
),
expected=np.array(
[[-1.0, -1.0, -0.0, +0.0, 1.0, 1.0, float("nan")]], dtype=dtype
),
local_session=session,
)
self.assert_op_output_matches_expected(
math_ops.is_finite,
np.array(
[[42, float("inf"), -123], [float("nan"), 0, -0.0]], dtype=dtype
),
expected=np.array(
[[True, False, True], [False, True, True]], dtype=np.bool_
),
local_session=session,
)
self.assert_op_output_matches_expected(
math_ops.lgamma,
np.array(0.5, dtype=dtype),
expected=np.array(np.log(np.pi) / 2, dtype=dtype),
local_session=session,
)
self.assert_op_output_matches_expected(
math_ops.lgamma,
np.array(
[
[1, 2, 3],
[4, 5, 6],
[1 / 2, 3 / 2, 5 / 2],
[-3 / 2, -7 / 2, -11 / 2],
],
dtype=dtype,
),
expected=np.array(
[
[0, 0, np.log(2.0)],
[np.log(6.0), np.log(24.0), np.log(120)],
[
np.log(np.pi) / 2,
np.log(np.pi) / 2 - np.log(2),
np.log(np.pi) / 2 - np.log(4) + np.log(3),
],
[
np.log(np.pi) / 2 - np.log(3) + np.log(4),
np.log(np.pi) / 2 - np.log(105) + np.log(16),
np.log(np.pi) / 2 - np.log(10395) + np.log(64),
],
],
dtype=dtype,
),
local_session=session,
)
# The actual result is complex. Take the real part.
self.assert_op_output_matches_expected(
math_ops.lgamma,
np.array([-1 / 2, -5 / 2, -9 / 2], dtype=dtype),
expected=np.array(
[
np.log(np.pi) / 2 + np.log(2),
np.log(np.pi) / 2 - np.log(15) + np.log(8),
np.log(np.pi) / 2 - np.log(945) + np.log(32),
],
dtype=dtype,
),
local_session=session,
atol=1e-4,
)
self.assert_op_output_matches_expected(
math_ops.digamma,
np.array(
[
[1.0, 0.5, 1 / 3.0],
[0.25, 1 / 6.0, 0.125],
[2.0, 3.0, 4.0],
[6.0, 8.0, 9.0],
],
dtype=dtype,
),
expected=np.array(
[
[
-np.euler_gamma,
-2 * np.log(2) - np.euler_gamma,
-np.pi / 2 / np.sqrt(3)
- 3 * np.log(3) / 2
- np.euler_gamma,
],
[
-np.pi / 2 - 3 * np.log(2) - np.euler_gamma,
-np.pi * np.sqrt(3) / 2
- 2 * np.log(2)
- 3 * np.log(3) / 2
- np.euler_gamma,
-np.pi / 2
- 4 * np.log(2)
- (
np.pi
+ np.log(2 + np.sqrt(2))
- np.log(2 - np.sqrt(2))
)
/ np.sqrt(2)
- np.euler_gamma,
],
[
1 - np.euler_gamma,
1.5 - np.euler_gamma,
11 / 6.0 - np.euler_gamma,
],
[
137 / 60.0 - np.euler_gamma,
363 / 140.0 - np.euler_gamma,
761 / 280.0 - np.euler_gamma,
],
],
dtype=dtype,
),
local_session=session,
)
if __name__ == "__main__":
googletest.main()
|
FloatOpsTest
|
python
|
pytorch__pytorch
|
torch/onnx/_internal/torchscript_exporter/_type_utils.py
|
{
"start": 1086,
"end": 13980
}
|
class ____(enum.IntEnum):
"""Scalar types defined in torch.
Use ``JitScalarType`` to convert from torch and JIT scalar types to ONNX scalar types.
Examples:
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_ONNX)
>>> # xdoctest: +IGNORE_WANT("win32 has different output")
>>> JitScalarType.from_value(torch.ones(1, 2)).onnx_type()
TensorProtoDataType.FLOAT
>>> JitScalarType.from_value(torch_c_value_with_type_float).onnx_type()
TensorProtoDataType.FLOAT
>>> JitScalarType.from_dtype(torch.get_default_dtype).onnx_type()
TensorProtoDataType.FLOAT
"""
# Order defined in https://github.com/pytorch/pytorch/blob/344defc9733a45fee8d0c4d3f5530f631e823196/c10/core/ScalarType.h
UINT8 = 0
INT8 = enum.auto() # 1
INT16 = enum.auto() # 2
INT = enum.auto() # 3
INT64 = enum.auto() # 4
HALF = enum.auto() # 5
FLOAT = enum.auto() # 6
DOUBLE = enum.auto() # 7
COMPLEX32 = enum.auto() # 8
COMPLEX64 = enum.auto() # 9
COMPLEX128 = enum.auto() # 10
BOOL = enum.auto() # 11
QINT8 = enum.auto() # 12
QUINT8 = enum.auto() # 13
QINT32 = enum.auto() # 14
BFLOAT16 = enum.auto() # 15
FLOAT8E5M2 = enum.auto() # 16
FLOAT8E4M3FN = enum.auto() # 17
FLOAT8E5M2FNUZ = enum.auto() # 18
FLOAT8E4M3FNUZ = enum.auto() # 19
UNDEFINED = enum.auto() # 20
@classmethod
def _from_name(cls, name: ScalarName | TorchName | str | None) -> JitScalarType:
"""Convert a JIT scalar type or torch type name to ScalarType.
Note: DO NOT USE this API when `name` comes from a `torch._C.Value.type()` calls.
A "RuntimeError: INTERNAL ASSERT FAILED at "../aten/src/ATen/core/jit_type_base.h" can
be raised in several scenarios where shape info is not present.
Instead use `from_value` API which is safer.
Args:
name: JIT scalar type name (Byte) or torch type name (uint8_t).
Returns:
JitScalarType
Raises:
OnnxExporterError: if name is not a valid scalar type name or if it is None.
"""
if name is None:
raise errors.OnnxExporterError("Scalar type name cannot be None")
if valid_scalar_name(name):
return _SCALAR_NAME_TO_TYPE[name] # type: ignore[index]
if valid_torch_name(name):
return _TORCH_NAME_TO_SCALAR_TYPE[name] # type: ignore[index]
raise errors.OnnxExporterError(f"Unknown torch or scalar type: '{name}'")
@classmethod
def from_dtype(cls, dtype: torch.dtype | None) -> JitScalarType:
"""Convert a torch dtype to JitScalarType.
Note: DO NOT USE this API when `dtype` comes from a `torch._C.Value.type()` calls.
A "RuntimeError: INTERNAL ASSERT FAILED at "../aten/src/ATen/core/jit_type_base.h" can
be raised in several scenarios where shape info is not present.
Instead use `from_value` API which is safer.
Args:
dtype: A torch.dtype to create a JitScalarType from
Returns:
JitScalarType
Raises:
OnnxExporterError: if dtype is not a valid torch.dtype or if it is None.
"""
if dtype not in _DTYPE_TO_SCALAR_TYPE:
raise errors.OnnxExporterError(f"Unknown dtype: {dtype}")
# pyrefly: ignore [index-error]
return _DTYPE_TO_SCALAR_TYPE[dtype]
@classmethod
def from_onnx_type(
cls, onnx_type: int | _C_onnx.TensorProtoDataType | None
) -> JitScalarType:
"""Convert a ONNX data type to JitScalarType.
Args:
onnx_type: A torch._C._onnx.TensorProtoDataType to create a JitScalarType from
Returns:
JitScalarType
Raises:
OnnxExporterError: if dtype is not a valid torch.dtype or if it is None.
"""
if onnx_type not in _ONNX_TO_SCALAR_TYPE:
raise errors.OnnxExporterError(f"Unknown onnx_type: {onnx_type}")
return _ONNX_TO_SCALAR_TYPE[typing.cast(_C_onnx.TensorProtoDataType, onnx_type)]
@classmethod
def from_value(
cls, value: None | torch._C.Value | torch.Tensor, default=None
) -> JitScalarType:
"""Create a JitScalarType from an value's scalar type.
Args:
value: An object to fetch scalar type from.
default: The JitScalarType to return if a valid scalar cannot be fetched from value
Returns:
JitScalarType.
Raises:
OnnxExporterError: if value does not have a valid scalar type and default is None.
SymbolicValueError: when value.type()'s info are empty and default is None
"""
if not isinstance(value, (torch._C.Value, torch.Tensor)) or (
isinstance(value, torch._C.Value) and value.node().mustBeNone()
):
# default value of type JitScalarType is returned when value is not valid
if default is None:
raise errors.OnnxExporterError(
"value must be either torch._C.Value or torch.Tensor objects."
)
elif not isinstance(default, JitScalarType):
raise errors.OnnxExporterError(
"default value must be a JitScalarType object."
)
return default
# Each value type has their own way of storing scalar type
if isinstance(value, torch.Tensor):
return cls.from_dtype(value.dtype)
if isinstance(value.type(), torch.ListType):
try:
return cls.from_dtype(value.type().getElementType().dtype())
except RuntimeError:
return cls._from_name(str(value.type().getElementType()))
if isinstance(value.type(), torch._C.OptionalType):
if value.type().getElementType().dtype() is None:
if isinstance(default, JitScalarType):
return default
raise errors.OnnxExporterError(
"default value must be a JitScalarType object."
)
return cls.from_dtype(value.type().getElementType().dtype())
scalar_type = None
if value.node().kind() != "prim::Constant" or not isinstance(
value.type(), torch._C.NoneType
):
# value must be a non-list torch._C.Value scalar
scalar_type = value.type().scalarType()
if scalar_type is not None:
return cls._from_name(scalar_type)
# When everything fails... try to default
if default is not None:
return default
raise errors.SymbolicValueError(
f"Cannot determine scalar type for this '{type(value.type())}' instance and "
"a default value was not provided.",
value,
)
def scalar_name(self) -> ScalarName:
"""Convert a JitScalarType to a JIT scalar type name."""
return _SCALAR_TYPE_TO_NAME[self]
def torch_name(self) -> TorchName:
"""Convert a JitScalarType to a torch type name."""
return _SCALAR_TYPE_TO_TORCH_NAME[self]
def dtype(self) -> torch.dtype:
"""Convert a JitScalarType to a torch dtype."""
return _SCALAR_TYPE_TO_DTYPE[self]
def onnx_type(self) -> _C_onnx.TensorProtoDataType:
"""Convert a JitScalarType to an ONNX data type."""
if self not in _SCALAR_TYPE_TO_ONNX:
raise errors.OnnxExporterError(
f"Scalar type {self} cannot be converted to ONNX"
)
return _SCALAR_TYPE_TO_ONNX[self]
def onnx_compatible(self) -> bool:
"""Return whether this JitScalarType is compatible with ONNX."""
return (
self in _SCALAR_TYPE_TO_ONNX
and self != JitScalarType.UNDEFINED
and self != JitScalarType.COMPLEX32
)
def valid_scalar_name(scalar_name: ScalarName | str) -> bool:
"""Return whether the given scalar name is a valid JIT scalar type name."""
return scalar_name in _SCALAR_NAME_TO_TYPE
def valid_torch_name(torch_name: TorchName | str) -> bool:
"""Return whether the given torch name is a valid torch type name."""
return torch_name in _TORCH_NAME_TO_SCALAR_TYPE
# https://github.com/pytorch/pytorch/blob/344defc9733a45fee8d0c4d3f5530f631e823196/c10/core/ScalarType.h
_SCALAR_TYPE_TO_NAME: dict[JitScalarType, ScalarName] = {
JitScalarType.BOOL: "Bool",
JitScalarType.UINT8: "Byte",
JitScalarType.INT8: "Char",
JitScalarType.INT16: "Short",
JitScalarType.INT: "Int",
JitScalarType.INT64: "Long",
JitScalarType.HALF: "Half",
JitScalarType.FLOAT: "Float",
JitScalarType.DOUBLE: "Double",
JitScalarType.COMPLEX32: "ComplexHalf",
JitScalarType.COMPLEX64: "ComplexFloat",
JitScalarType.COMPLEX128: "ComplexDouble",
JitScalarType.QINT8: "QInt8",
JitScalarType.QUINT8: "QUInt8",
JitScalarType.QINT32: "QInt32",
JitScalarType.BFLOAT16: "BFloat16",
JitScalarType.FLOAT8E5M2: "Float8E5M2",
JitScalarType.FLOAT8E4M3FN: "Float8E4M3FN",
JitScalarType.FLOAT8E5M2FNUZ: "Float8E5M2FNUZ",
JitScalarType.FLOAT8E4M3FNUZ: "Float8E4M3FNUZ",
JitScalarType.UNDEFINED: "Undefined",
}
_SCALAR_NAME_TO_TYPE: dict[ScalarName, JitScalarType] = {
v: k for k, v in _SCALAR_TYPE_TO_NAME.items()
}
_SCALAR_TYPE_TO_TORCH_NAME: dict[JitScalarType, TorchName] = {
JitScalarType.BOOL: "bool",
JitScalarType.UINT8: "uint8_t",
JitScalarType.INT8: "int8_t",
JitScalarType.INT16: "int16_t",
JitScalarType.INT: "int",
JitScalarType.INT64: "int64_t",
JitScalarType.HALF: "half",
JitScalarType.FLOAT: "float",
JitScalarType.DOUBLE: "double",
JitScalarType.COMPLEX32: "complex32",
JitScalarType.COMPLEX64: "complex64",
JitScalarType.COMPLEX128: "complex128",
JitScalarType.QINT8: "qint8",
JitScalarType.QUINT8: "quint8",
JitScalarType.QINT32: "qint32",
JitScalarType.BFLOAT16: "bfloat16",
JitScalarType.FLOAT8E5M2: "float8_e5m2",
JitScalarType.FLOAT8E4M3FN: "float8_e4m3fn",
JitScalarType.FLOAT8E5M2FNUZ: "float8_e5m2fnuz",
JitScalarType.FLOAT8E4M3FNUZ: "float8_e4m3fnuz",
}
_TORCH_NAME_TO_SCALAR_TYPE: dict[TorchName, JitScalarType] = {
v: k for k, v in _SCALAR_TYPE_TO_TORCH_NAME.items()
}
_SCALAR_TYPE_TO_ONNX = {
JitScalarType.BOOL: _C_onnx.TensorProtoDataType.BOOL,
JitScalarType.UINT8: _C_onnx.TensorProtoDataType.UINT8,
JitScalarType.INT8: _C_onnx.TensorProtoDataType.INT8,
JitScalarType.INT16: _C_onnx.TensorProtoDataType.INT16,
JitScalarType.INT: _C_onnx.TensorProtoDataType.INT32,
JitScalarType.INT64: _C_onnx.TensorProtoDataType.INT64,
JitScalarType.HALF: _C_onnx.TensorProtoDataType.FLOAT16,
JitScalarType.FLOAT: _C_onnx.TensorProtoDataType.FLOAT,
JitScalarType.DOUBLE: _C_onnx.TensorProtoDataType.DOUBLE,
JitScalarType.COMPLEX64: _C_onnx.TensorProtoDataType.COMPLEX64,
JitScalarType.COMPLEX128: _C_onnx.TensorProtoDataType.COMPLEX128,
JitScalarType.BFLOAT16: _C_onnx.TensorProtoDataType.BFLOAT16,
JitScalarType.UNDEFINED: _C_onnx.TensorProtoDataType.UNDEFINED,
JitScalarType.COMPLEX32: _C_onnx.TensorProtoDataType.UNDEFINED,
JitScalarType.QINT8: _C_onnx.TensorProtoDataType.INT8,
JitScalarType.QUINT8: _C_onnx.TensorProtoDataType.UINT8,
JitScalarType.QINT32: _C_onnx.TensorProtoDataType.INT32,
JitScalarType.FLOAT8E5M2: _C_onnx.TensorProtoDataType.FLOAT8E5M2,
JitScalarType.FLOAT8E4M3FN: _C_onnx.TensorProtoDataType.FLOAT8E4M3FN,
JitScalarType.FLOAT8E5M2FNUZ: _C_onnx.TensorProtoDataType.FLOAT8E5M2FNUZ,
JitScalarType.FLOAT8E4M3FNUZ: _C_onnx.TensorProtoDataType.FLOAT8E4M3FNUZ,
}
_ONNX_TO_SCALAR_TYPE = {v: k for k, v in _SCALAR_TYPE_TO_ONNX.items()}
# source of truth is
# https://github.com/pytorch/pytorch/blob/master/torch/csrc/utils/tensor_dtypes.cpp
_SCALAR_TYPE_TO_DTYPE = {
JitScalarType.BOOL: torch.bool,
JitScalarType.UINT8: torch.uint8,
JitScalarType.INT8: torch.int8,
JitScalarType.INT16: torch.short,
JitScalarType.INT: torch.int,
JitScalarType.INT64: torch.int64,
JitScalarType.HALF: torch.half,
JitScalarType.FLOAT: torch.float,
JitScalarType.DOUBLE: torch.double,
JitScalarType.COMPLEX32: torch.complex32,
JitScalarType.COMPLEX64: torch.complex64,
JitScalarType.COMPLEX128: torch.complex128,
JitScalarType.QINT8: torch.qint8,
JitScalarType.QUINT8: torch.quint8,
JitScalarType.QINT32: torch.qint32,
JitScalarType.BFLOAT16: torch.bfloat16,
JitScalarType.FLOAT8E5M2: torch.float8_e5m2,
JitScalarType.FLOAT8E4M3FN: torch.float8_e4m3fn,
JitScalarType.FLOAT8E5M2FNUZ: torch.float8_e5m2fnuz,
JitScalarType.FLOAT8E4M3FNUZ: torch.float8_e4m3fnuz,
}
_DTYPE_TO_SCALAR_TYPE = {v: k for k, v in _SCALAR_TYPE_TO_DTYPE.items()}
|
JitScalarType
|
python
|
arrow-py__arrow
|
tests/test_locales.py
|
{
"start": 75041,
"end": 75696
}
|
class ____:
def test_ordinal_number(self):
assert self.locale._ordinal_number(0) == "0তম"
assert self.locale._ordinal_number(1) == "1ম"
assert self.locale._ordinal_number(3) == "3য়"
assert self.locale._ordinal_number(4) == "4র্থ"
assert self.locale._ordinal_number(5) == "5ম"
assert self.locale._ordinal_number(6) == "6ষ্ঠ"
assert self.locale._ordinal_number(10) == "10ম"
assert self.locale._ordinal_number(11) == "11তম"
assert self.locale._ordinal_number(42) == "42তম"
assert self.locale._ordinal_number(-1) == ""
@pytest.mark.usefixtures("lang_locale")
|
TestBengaliLocale
|
python
|
tornadoweb__tornado
|
tornado/test/tcpserver_test.py
|
{
"start": 3814,
"end": 7711
}
|
class ____(unittest.TestCase):
# These tests verify that the two multiprocess examples from the
# TCPServer docs work. Both tests start a server with three worker
# processes, each of which prints its task id to stdout (a single
# byte, so we don't have to worry about atomicity of the shared
# stdout stream) and then exits.
def run_subproc(self, code: str) -> Tuple[str, str]:
try:
result = subprocess.run(
[sys.executable, "-Werror::DeprecationWarning"],
capture_output=True,
input=code,
encoding="utf8",
check=True,
)
except subprocess.CalledProcessError as e:
raise RuntimeError(
f"Process returned {e.returncode} stdout={e.stdout} stderr={e.stderr}"
) from e
return result.stdout, result.stderr
def test_listen_single(self):
# As a sanity check, run the single-process version through this test
# harness too.
code = textwrap.dedent(
"""
import asyncio
from tornado.tcpserver import TCPServer
async def main():
server = TCPServer()
server.listen(0, address='127.0.0.1')
asyncio.run(main())
print('012', end='')
"""
)
out, err = self.run_subproc(code)
self.assertEqual("".join(sorted(out)), "012")
self.assertEqual(err, "")
def test_bind_start(self):
code = textwrap.dedent(
"""
import warnings
from tornado.ioloop import IOLoop
from tornado.process import task_id
from tornado.tcpserver import TCPServer
warnings.simplefilter("ignore", DeprecationWarning)
server = TCPServer()
server.bind(0, address='127.0.0.1')
server.start(3)
IOLoop.current().run_sync(lambda: None)
print(task_id(), end='')
"""
)
out, err = self.run_subproc(code)
self.assertEqual("".join(sorted(out)), "012")
self.assertEqual(err, "")
def test_add_sockets(self):
code = textwrap.dedent(
"""
import asyncio
from tornado.netutil import bind_sockets
from tornado.process import fork_processes, task_id
from tornado.ioloop import IOLoop
from tornado.tcpserver import TCPServer
sockets = bind_sockets(0, address='127.0.0.1')
fork_processes(3)
async def post_fork_main():
server = TCPServer()
server.add_sockets(sockets)
asyncio.run(post_fork_main())
print(task_id(), end='')
"""
)
out, err = self.run_subproc(code)
self.assertEqual("".join(sorted(out)), "012")
self.assertEqual(err, "")
def test_listen_multi_reuse_port(self):
code = textwrap.dedent(
"""
import asyncio
import socket
from tornado.netutil import bind_sockets
from tornado.process import task_id, fork_processes
from tornado.tcpserver import TCPServer
# Pick an unused port which we will be able to bind to multiple times.
(sock,) = bind_sockets(0, address='127.0.0.1',
family=socket.AF_INET, reuse_port=True)
port = sock.getsockname()[1]
fork_processes(3)
async def main():
server = TCPServer()
server.listen(port, address='127.0.0.1', reuse_port=True)
asyncio.run(main())
print(task_id(), end='')
"""
)
out, err = self.run_subproc(code)
self.assertEqual("".join(sorted(out)), "012")
self.assertEqual(err, "")
|
TestMultiprocess
|
python
|
ray-project__ray
|
python/ray/data/preprocessors/discretizer.py
|
{
"start": 2285,
"end": 7217
}
|
class ____(_AbstractKBinsDiscretizer):
"""Bin values into discrete intervals using custom bin edges.
Columns must contain numerical values.
Examples:
Use :class:`CustomKBinsDiscretizer` to bin continuous features.
>>> import pandas as pd
>>> import ray
>>> from ray.data.preprocessors import CustomKBinsDiscretizer
>>> df = pd.DataFrame({
... "value_1": [0.2, 1.4, 2.5, 6.2, 9.7, 2.1],
... "value_2": [10, 15, 13, 12, 23, 25],
... })
>>> ds = ray.data.from_pandas(df)
>>> discretizer = CustomKBinsDiscretizer(
... columns=["value_1", "value_2"],
... bins=[0, 1, 4, 10, 25]
... )
>>> discretizer.transform(ds).to_pandas()
value_1 value_2
0 0 2
1 1 3
2 1 3
3 2 3
4 2 3
5 1 3
:class:`CustomKBinsDiscretizer` can also be used in append mode by providing the
name of the output_columns that should hold the encoded values.
>>> discretizer = CustomKBinsDiscretizer(
... columns=["value_1", "value_2"],
... bins=[0, 1, 4, 10, 25],
... output_columns=["value_1_discretized", "value_2_discretized"]
... )
>>> discretizer.fit_transform(ds).to_pandas() # doctest: +SKIP
value_1 value_2 value_1_discretized value_2_discretized
0 0.2 10 0 2
1 1.4 15 1 3
2 2.5 13 1 3
3 6.2 12 2 3
4 9.7 23 2 3
5 2.1 25 1 3
You can also specify different bin edges per column.
>>> discretizer = CustomKBinsDiscretizer(
... columns=["value_1", "value_2"],
... bins={"value_1": [0, 1, 4], "value_2": [0, 18, 35, 70]},
... )
>>> discretizer.transform(ds).to_pandas()
value_1 value_2
0 0.0 0
1 1.0 0
2 1.0 0
3 NaN 0
4 NaN 1
5 1.0 1
Args:
columns: The columns to discretize.
bins: Defines custom bin edges. Can be an iterable of numbers,
a ``pd.IntervalIndex``, or a dict mapping columns to either of them.
Note that ``pd.IntervalIndex`` for bins must be non-overlapping.
right: Indicates whether bins include the rightmost edge.
include_lowest: Indicates whether the first interval should be left-inclusive.
duplicates: Can be either 'raise' or 'drop'. If bin edges are not unique,
raise ``ValueError`` or drop non-uniques.
dtypes: An optional dictionary that maps columns to ``pd.CategoricalDtype``
objects or ``np.integer`` types. If you don't include a column in ``dtypes``
or specify it as an integer dtype, the outputted column will consist of
ordered integers corresponding to bins. If you use a
``pd.CategoricalDtype``, the outputted column will be a
``pd.CategoricalDtype`` with the categories being mapped to bins.
You can use ``pd.CategoricalDtype(categories, ordered=True)`` to
preserve information about bin order.
output_columns: The names of the transformed columns. If None, the transformed
columns will be the same as the input columns. If not None, the length of
``output_columns`` must match the length of ``columns``, othwerwise an error
will be raised.
.. seealso::
:class:`UniformKBinsDiscretizer`
If you want to bin data into uniform width bins.
"""
def __init__(
self,
columns: List[str],
bins: Union[
Iterable[float],
pd.IntervalIndex,
Dict[str, Union[Iterable[float], pd.IntervalIndex]],
],
*,
right: bool = True,
include_lowest: bool = False,
duplicates: str = "raise",
dtypes: Optional[
Dict[str, Union[pd.CategoricalDtype, Type[np.integer]]]
] = None,
output_columns: Optional[List[str]] = None,
):
self.columns = columns
self.bins = bins
self.right = right
self.include_lowest = include_lowest
self.duplicates = duplicates
self.dtypes = dtypes
self.output_columns = Preprocessor._derive_and_validate_output_columns(
columns, output_columns
)
self._validate_bins_columns()
_is_fittable = False
@PublicAPI(stability="alpha")
|
CustomKBinsDiscretizer
|
python
|
pytorch__pytorch
|
torchgen/api/lazy.py
|
{
"start": 11456,
"end": 17053
}
|
class ____:
# The name of the operator this function schema describes.
name: OperatorName
positional_args: tuple[LazyArgument, ...]
keyword_args: tuple[LazyArgument, ...]
# TODO: Need to handle collisions with argument names at some point
returns: tuple[Return, ...]
# if this schema has a Generator arg, list its orig ctype/name but don't
# build a LazyArgument since lazy IR doesn't support it
generator_arg: NamedCType | None = None
# original function schema
func: FunctionSchema
# Whether or not we are code-genning for SymInt or not
symint: bool
properties: LazyIrProperties = LazyIrProperties(
# default properties
"ShapePrecompute",
"Lower",
"CanBeReused",
)
opkind: str | None = None
def __init__(
self,
func: FunctionSchema,
properties: LazyIrProperties | None = None,
*,
symint: bool,
) -> None:
if properties:
self.properties = properties
self.func = func
self.symint = symint
positional_args: list[LazyArgument] = []
for arg_field in ["pre_self_positional", "self_arg", "post_self_positional"]:
if arg_field == "self_arg" and func.arguments.self_arg is not None:
arg = func.arguments.self_arg.argument
positional_args.append(
LazyArgument(arg, self.properties, symint=symint)
)
elif getattr(func.arguments, arg_field) is not None:
positional_args.extend(
LazyArgument(arg, self.properties, symint=symint)
for arg in getattr(func.arguments, arg_field)
)
self.positional_args = tuple(positional_args)
keyword_args: list[LazyArgument] = []
for arg_field in [
"pre_tensor_options_kwarg_only",
"tensor_options",
"post_tensor_options_kwarg_only",
"out",
]:
curr_args = getattr(func.arguments, arg_field)
if curr_args is not None:
if isinstance(curr_args, TensorOptionsArguments):
curr_args = curr_args.all()
for arg in curr_args:
if isGeneratorType(arg.type):
assert self.generator_arg is None, (
"We expect there is only one generator arg"
)
self.generator_arg = NamedCType(
arg.name,
arg.type, # type:ignore[arg-type]
)
keyword_args.extend(
LazyArgument(arg, self.properties, symint=symint)
for arg in curr_args
)
self.keyword_args = tuple(keyword_args)
self.name = func.name
self.returns = func.returns
@property
def node_name(self) -> str:
"""
Return camel-case version of op in node.
Note: This function also appends any `overload_name` in the operation.
For example, if the op is `bitwise_and.Tensor`, the returned name
will be `BitwiseAndTensor`.
"""
op_name = f"{self.name.name}_{self.name.overload_name}".lower()
return "".join(word.capitalize() or "" for word in op_name.split("_"))
@property
def aten_name(self) -> str:
return str(self.name.name)
@property
def base_name(self) -> str:
return f"{self.name.name.base}"
def filtered_args(
self,
positional: bool = True,
keyword: bool = True,
values: bool = True,
scalars: bool = True,
generator: bool = True,
) -> list[LazyArgument]:
# This function maintains the sorted order of arguments but provides different filtered views.
# Some parts of the code care about kwargs vs args (TS lowerings),
# other parts care about whether they need to wrap the arg in a lazy value or leave it alone.
# Generators are special cased, as they are needed for fallback/shape-inference but not supported
# in TS lowerings and therefore also omitted from lazy IR.
args: list[LazyArgument] = []
if positional:
args.extend(self.positional_args)
if keyword:
args.extend(self.keyword_args)
if values and scalars and generator:
return args
elif values and scalars:
return [a for a in args if not a.is_generator]
elif values:
return [a for a in args if a.is_lazy_value]
elif scalars:
return [
a
for a in args
if not a.is_lazy_value and (generator or not a.is_generator)
]
return []
@property
def positional_values(self) -> list[LazyArgument]:
return self.filtered_args(
positional=True, keyword=False, values=True, scalars=False
)
@property
def positional_scalars(self) -> list[LazyArgument]:
return self.filtered_args(
positional=True, keyword=False, values=False, scalars=True
)
@property
def keyword_values(self) -> list[LazyArgument]:
return self.filtered_args(
positional=False, keyword=True, values=True, scalars=False
)
@property
def keyword_scalars(self) -> list[LazyArgument]:
return self.filtered_args(
positional=False, keyword=True, values=False, scalars=True
)
|
LazyIrSchema
|
python
|
tensorflow__tensorflow
|
tensorflow/python/framework/extension_type.py
|
{
"start": 3908,
"end": 15230
}
|
class ____(
composite_tensor.CompositeTensor, metaclass=ExtensionTypeMetaclass
):
"""Base class for TensorFlow `ExtensionType` classes.
Tensorflow `ExtensionType` classes are specialized Python classes that can be
used transparently with TensorFlow -- e.g., they can be used with ops
such as `tf.cond` or `tf.while_loop` and used as inputs or outputs for
`tf.function` and Keras layers.
New `ExtensionType` classes are defined by creating a subclass of
`tf.ExtensionType` that
contains type annotations for all instance variables. The following type
annotations are supported:
Type | Example
------------------------- | --------------------------------------------
Python integers | `i: int`
Python floats | `f: float`
Python strings | `s: str`
Python booleans | `b: bool`
Python None | `n: None`
Python tuple | `params: tuple[int, float, int, int]`
Python tuple w/ Ellipsis | `lengths: tuple[int, ...]`
Tensors | `t: tf.Tensor`
Composite Tensors | `rt: tf.RaggedTensor`
Extension Types | `m: MyMaskedTensor`
Tensor shapes | `shape: tf.TensorShape`
Tensor dtypes | `dtype: tf.DType`
Type unions | `length: typing.Union[int, float]`
Tuples | `params: typing.Tuple[int, float, int, int]`
Tuples w/ Ellipsis | `lengths: typing.Tuple[int, ...]`
Mappings | `tags: typing.Mapping[str, str]`
Fields annotated with `typing.Mapping` will be stored using an immutable
mapping type.
ExtensionType values are immutable -- i.e., once constructed, you can not
modify or delete any of their instance members.
### Examples
>>> class MaskedTensor(ExtensionType):
... values: tf.Tensor
... mask: tf.Tensor
>>> class Toy(ExtensionType):
... name: str
... price: tensor.Tensor
... features: typing.Mapping[str, tf.Tensor]
>>> class ToyStore(ExtensionType):
... name: str
... toys: typing.Tuple[Toy, ...]
"""
# Let the metaclass know that it should *not* transform this class (since
# this class is part of the ExtensionType framework, and not a user class).
_tf_extension_type_do_not_transform_this_class = True
def __init__(self, *args, **kwargs):
if type(self) is ExtensionType: # pylint: disable=unidiomatic-typecheck
raise AssertionError(
'Cannot create an instance of ExtensionType '
'because ExtensionType is an abstract base class.'
)
# This class variable is used to cache the return value for
# _tf_extension_type_fields.
_tf_extension_type_cached_fields = None
@classmethod
def _tf_extension_type_fields(cls): # pylint: disable=no-self-argument
"""An ordered list describing the fields of this ExtensionType.
Returns:
A list of `ExtensionTypeField` objects. Forward references are resolved
if possible, or left unresolved otherwise.
"""
if '_tf_extension_type_cached_fields' in cls.__dict__: # do not inherit.
return cls._tf_extension_type_cached_fields
try:
# Using include_extras=False will replace all Annotated[T, ...] with T.
# The typing_extensions module is used since this is only supported in
# Python 3.9.
type_hints = typing_extensions.get_type_hints(cls, include_extras=False)
ok_to_cache = True # all forward references have been resolved.
except (NameError, AttributeError):
# Unresolved forward reference -- gather type hints manually.
# * NameError comes from an annotation like `Foo` where class
# `Foo` hasn't been defined yet.
# * AttributeError comes from an annotation like `foo.Bar`, where
# the module `foo` exists but `Bar` hasn't been defined yet.
# Note: If a user attempts to instantiate a `ExtensionType` type that
# still has unresolved forward references (e.g., because of a typo or a
# missing import), then the constructor will raise an exception.
type_hints = {}
for base in reversed(cls.__mro__):
type_hints.update(base.__dict__.get('__annotations__', {}))
ok_to_cache = False
fields = []
for name, value_type in type_hints.items():
default = getattr(
cls, name, extension_type_field.ExtensionTypeField.NO_DEFAULT
)
fields.append(
extension_type_field.ExtensionTypeField(name, value_type, default)
)
fields = tuple(fields)
if ok_to_cache:
cls._tf_extension_type_cached_fields = fields
return fields
@classmethod
def _tf_extension_type_has_field(cls, name):
return any(name == field.name for field in cls._tf_extension_type_fields())
def _tf_extension_type_convert_fields(self):
extension_type_field.convert_fields(
self._tf_extension_type_fields(), self.__dict__
)
def __repr__(self):
fields = ', '.join(
[
f'{field.name}={getattr(self, field.name)!r}'
for field in self._tf_extension_type_fields()
]
)
return f'{type(self).__qualname__}({fields})'
def __setattr__(self, name, value):
if name in _MUTABLE_KERAS_PROPERTIES or (
hasattr(self, _IN_CONSTRUCTOR)
and self._tf_extension_type_has_field(name)
):
self.__dict__[name] = value
else:
raise AttributeError(
f'Cannot mutate attribute `{name}` '
'outside the custom constructor of ExtensionType.'
)
def __delattr__(self, name):
if name in _MUTABLE_KERAS_PROPERTIES or (
hasattr(self, _IN_CONSTRUCTOR)
and self._tf_extension_type_has_field(name)
):
del self.__dict__[name]
else:
raise AttributeError(
f'Cannot mutate attribute `{name}` '
'outside the custom constructor of ExtensionType.'
)
def __getattr__(self, name):
if name in _MUTABLE_KERAS_PROPERTIES:
return object.__getattribute__(self, name)
if '_tf_extension_type_packed_variant' in self.__dict__:
# Note: it's *not* ok to cache the results of unpack() here. In
# particular, it would be nice if we could do something like
# `self.__dict__.update(unpack(self).__dict__)`, but that (potentially)
# violates an invariant required by the `cond` operation. E.g., if we had
# `tf.cond(lambda: x.foo, lambda: x.bar)`, then tensor `x.bar` used in the
# "else" branch would be created by an op in the "then" branch (when
# looking up `x.foo`); and that's not allowed.
return getattr(unpack(self), name)
raise AttributeError(
f'{type(self).__name__!r} object has no attribute {name!r}'
)
def __eq__(self, other):
if type(self) is not type(other):
return False
if self._type_spec != other._type_spec:
return False
self_tensors = nest.flatten(self, expand_composites=True)
other_tensors = nest.flatten(other, expand_composites=True)
if len(self_tensors) != len(other_tensors):
return False
conditions = []
for t1, t2 in zip(self_tensors, other_tensors):
conditions.append(
math_ops.reduce_all(
gen_math_ops.equal(
array_ops.shape(t1),
array_ops.shape(t2),
incompatible_shape_error=False,
)
)
)
# Explicitly check shape (values that have different shapes but broadcast
# to the same value are considered non-equal).
conditions.append(
math_ops.reduce_all(
gen_math_ops.equal(t1, t2, incompatible_shape_error=False)
)
)
return math_ops.reduce_all(array_ops_stack.stack(conditions))
def __ne__(self, other):
eq = self.__eq__(other)
if isinstance(eq, tensor.Tensor):
return math_ops.logical_not(eq)
else:
return not eq
def __validate__(self):
"""Perform post-construction validation."""
# This instance variable is used to cache the value for the _type_spec
# property.
_tf_extension_type_cached_type_spec = None
@property
def _type_spec(self): # CompositeTensor API.
# Note: the TypeSpec contains all static (non-tensor) data from `self`.
if self._tf_extension_type_cached_type_spec is None:
assert not is_packed(self) # Packed version always caches TypeSpec.
self.__dict__['_tf_extension_type_cached_type_spec'] = (
self.Spec.from_value(self)
)
return self._tf_extension_type_cached_type_spec
@tf_export('experimental.extension_type.as_dict')
def as_dict(value):
"""Extracts the attributes of `value` and their values to a dict format.
Unlike `dataclasses.asdict()`, this function is not recursive and in case of
nested `ExtensionType` objects, only the top level object is converted to a
dict.
Args:
value: An `ExtensionType` object.
Returns:
A dict that contains the attributes of `value` and their values.
"""
return {
field.name: getattr(value, field.name)
for field in value._tf_extension_type_fields() # pylint: disable=protected-access
}
def pack(value):
"""Returns a copy of `value` with fields packed in a single Variant.
Args:
value: An `ExtensionType` object.
Returns:
An `ExtensionType` object.
"""
if is_packed(value):
return value
spec = value._type_spec._tf_extension_type_with_packed(True) # pylint: disable=protected-access
try:
variant = composite_tensor_ops.composite_tensor_to_variants(value)
except nested_structure_coder.NotEncodableError as e:
# Note: the only time `_TypeSpecCodec.can_encode` returns False is if the
# named type is not registered. The default error message would simply
# tell the user that there is no encoder for the object, so we provide
# a more useful message letting them know how to register the type.
raise ValueError(
'ExtensionTypes must have a __name__ field in order to be packed.'
) from e
return _create_object_from_type_and_dict(
type(value),
{
'_tf_extension_type_cached_type_spec': spec,
'_tf_extension_type_packed_variant': variant,
},
)
def unpack(value):
"""Returns a copy of `value` with individual fields stored in __dict__.
Args:
value: An `ExtensionType` object.
Returns:
An `ExtensionType` object.
"""
if not is_packed(value):
return value
# pylint: disable=protected-access
variant = value._tf_extension_type_packed_variant
spec = value._tf_extension_type_cached_type_spec
spec = spec._tf_extension_type_with_packed(False)
return composite_tensor_ops.composite_tensor_from_variant(variant, spec)
def is_packed(value):
"""Returns true if `value`'s fields are packed in a single Variant."""
if not isinstance(value, ExtensionType):
raise ValueError(
'Expected `value` to be an object of type ExtensionType,'
f'got an instance of {type(value)}.'
)
return '_tf_extension_type_packed_variant' in value.__dict__
# ==============================================================================
# Base class for the tf.ExtensionType TypeSpecs
# ==============================================================================
@tf_export('experimental.ExtensionTypeSpec')
|
ExtensionType
|
python
|
modin-project__modin
|
modin/pandas/groupby.py
|
{
"start": 3090,
"end": 70034
}
|
class ____(ClassLogger, QueryCompilerCaster): # noqa: GL08
_pandas_class = pandas.core.groupby.DataFrameGroupBy
_return_tuple_when_iterating = False
_df: Union[DataFrame, Series]
_query_compiler: BaseQueryCompiler
_extensions: EXTENSION_DICT_TYPE = EXTENSION_DICT_TYPE(dict)
def __init__(
self,
df: Union[DataFrame, Series],
by,
axis,
level,
as_index,
sort,
group_keys,
idx_name,
drop,
backend_pinned: bool,
**kwargs,
):
self._axis = axis
self._idx_name = idx_name
self._df = df
self._query_compiler = self._df._query_compiler
self._columns = self._query_compiler.columns
self._by = by
self._drop = drop
# When providing a list of columns of length one to DataFrame.groupby(),
# the keys that are returned by iterating over the resulting DataFrameGroupBy
# object will now be tuples of length one (pandas#GH47761)
self._return_tuple_when_iterating = kwargs.pop(
"return_tuple_when_iterating", False
)
# Whether the backend of this groupby object has been pinned.
self._backend_pinned = backend_pinned
if (
level is None
and is_list_like(by)
or isinstance(by, type(self._query_compiler))
):
# This tells us whether or not there are multiple columns/rows in the groupby
self._is_multi_by = (
isinstance(by, type(self._query_compiler)) and len(by.columns) > 1
) or (
not isinstance(by, type(self._query_compiler))
and axis == 0
and all(
(hashable(obj) and obj in self._query_compiler.columns)
or isinstance(obj, type(self._query_compiler))
or is_list_like(obj)
for obj in self._by
)
)
else:
self._is_multi_by = False
self._level = level
self._kwargs = {
"level": level,
"sort": sort,
"as_index": as_index,
"group_keys": group_keys,
}
self._kwargs.update(kwargs)
@disable_logging
@_inherit_docstrings(QueryCompilerCaster._get_query_compiler)
def _get_query_compiler(self) -> Optional[BaseQueryCompiler]:
if hasattr(self, "_df"):
return self._df._query_compiler
return None
@disable_logging
@_inherit_docstrings(QueryCompilerCaster.get_backend)
def get_backend(self) -> str:
return self._df.get_backend()
@disable_logging
def set_backend(
self,
backend: str,
inplace: bool = False,
*,
switch_operation: Optional[str] = None,
) -> Optional[Self]:
"""
Move the data in this groupby object to a different backend.
Parameters
----------
backend : str
The name of the backend to switch to.
inplace : bool, default: False
Whether to perform the operation in-place.
switch_operation : str, optional
The operation being performed that triggered the backend switch.
Returns
-------
DataFrameGroupBy or None
If inplace=False, returns a new groupby object with the specified backend.
If inplace=True, returns None and changes the backend of the current object.
Notes
-----
When `inplace=True`, this method will move the data between backends
for all parent objects (the DataFrame/Series used to create this
groupby, and any DataFrames/Series in the `by` list). When
`inplace=False`, new copies of the parent objects are created with their
data in the target backend for the returned groupby object, leaving the
original parent objects unchanged.
"""
def set_instance_variable_backend(arg: Any) -> Any:
# groupby object _by and _df fields may include both
# QueryCompilerCaster objects and BaseQueryCompiler objects,
# so we have to be able to set the backend on both of those.
if isinstance(arg, QueryCompilerCaster):
result = arg.set_backend(
backend=backend, inplace=inplace, switch_operation=switch_operation
)
return arg if inplace else result
if isinstance(arg, BaseQueryCompiler):
# Use a cyclic import here because query compilers themselves
# do not implement set_backend().
from modin.pandas import DataFrame
return (
DataFrame(query_compiler=arg)
.set_backend(backend=backend, inplace=False)
._query_compiler
)
return arg
new_by = visit_nested_args([self._by], set_instance_variable_backend)[0]
new_df = visit_nested_args([self._df], set_instance_variable_backend)[0]
if inplace:
self._df = new_df
self._query_compiler = new_df._query_compiler
self._by = new_by
return None
return type(self)(
df=new_df,
by=new_by,
axis=self._axis,
level=self._level,
as_index=self._as_index,
sort=self._sort,
group_keys=self._kwargs["group_keys"],
idx_name=self._idx_name,
drop=self._drop,
backend_pinned=self._backend_pinned,
# We have added as_index, sort, group_keys, and level to the kwargs
# dictionary, so we need to remove them from the keyword arguments
# that we pass to the new DataFrameGroupBy object.
**{
k: v
for k, v in self._kwargs.items()
if k not in ["as_index", "sort", "group_keys", "level"]
},
)
@_inherit_docstrings(QueryCompilerCaster.is_backend_pinned)
def is_backend_pinned(self) -> bool:
return self._backend_pinned
@_inherit_docstrings(QueryCompilerCaster._set_backend_pinned)
def _set_backend_pinned(self, pinned: bool, inplace: bool) -> Optional[Self]:
if inplace:
self._backend_pinned = pinned
return None
else:
# Create a new groupby object with the updated pinned status
new_obj = self._override(backend_pinned=pinned)
# Force the correct pinned status since the automatic pinning logic
# in query_compiler_caster.py might override it
new_obj._backend_pinned = pinned
return new_obj
@disable_logging
@_inherit_docstrings(QueryCompilerCaster._get_query_compiler)
def _copy_into(self, other: Self) -> None:
# TODO(https://github.com/modin-project/modin/issues/7544): implement
# this method to support automatic pre-operation backend switch for
# groupby methods.
ErrorMessage.not_implemented()
def _override(self, **kwargs):
"""
Override groupby parameters.
Parameters
----------
**kwargs : dict
Parameters to override.
Returns
-------
DataFrameGroupBy
A groupby object with new parameters.
"""
new_kw = dict(
df=self._df,
by=self._by,
axis=self._axis,
idx_name=self._idx_name,
drop=self._drop,
backend_pinned=self._backend_pinned,
**self._kwargs,
)
new_kw.update(kwargs)
return type(self)(**new_kw)
@disable_logging
def __getattr__(self, key):
"""
Alter regular attribute access, looks up the name in the columns.
Parameters
----------
key : str
Attribute name.
Returns
-------
The value of the attribute.
"""
try:
return self._getattr__from_extension_impl(
key=key,
default_behavior_attributes=GROUPBY_EXTENSION_NO_LOOKUP,
extensions=__class__._extensions,
)
except AttributeError as err:
if key != "_columns" and key in self._columns:
return self.__getitem__(key)
raise err
@disable_logging
def __getattribute__(self, item: str) -> Any:
"""
Override __getattribute__, which python calls to access any attribute of an object of this class.
We override this method
1) to default to pandas for empty dataframes on non-lazy engines.
2) to get non-method extensions (e.g. properties)
Parameters
----------
item : str
The name of the attribute to access.
Returns
-------
Any
The value of the attribute.
"""
if item not in GROUPBY_EXTENSION_NO_LOOKUP:
extensions_result = self._getattribute__from_extension_impl(
item, __class__._extensions
)
if extensions_result is not sentinel:
return extensions_result
attr = super().__getattribute__(item)
if item not in _DEFAULT_BEHAVIOUR and not self._query_compiler.lazy_shape:
# We default to pandas on empty DataFrames. This avoids a large amount of
# pain in underlying implementation and returns a result immediately rather
# than dealing with the edge cases that empty DataFrames have.
if callable(attr) and self._df.empty and hasattr(self._pandas_class, item):
def default_handler(*args, **kwargs):
return self._default_to_pandas(item, *args, **kwargs)
return default_handler
return attr
@disable_logging
def __setattr__(self, key: str, value) -> None:
"""
Set an attribute on the object.
We override this method to set extension properties.
Parameters
----------
key : str
The name of the attribute to set.
value : Any
The value to set the attribute to.
Returns
-------
None
"""
# An extension property is only accessible if the backend supports it.
extension = self._get_extension(key, __class__._extensions)
if extension is not sentinel and hasattr(extension, "__set__"):
return extension.__set__(self, value)
return super().__setattr__(key, value)
@disable_logging
def __delattr__(self, name: str) -> None:
"""
Delete an attribute on the object.
We override this method to delete extension properties.
Parameters
----------
name : str
The name of the attribute to delete.
Returns
-------
None
"""
# An extension property is only accessible if the backend supports it.
extension = self._get_extension(name, __class__._extensions)
if extension is not sentinel and hasattr(extension, "__delete__"):
return extension.__delete__(self)
return super().__delattr__(name)
@property
def ngroups(self): # noqa: GL08
return len(self)
def skew(self, axis=lib.no_default, skipna=True, numeric_only=False, **kwargs):
# default behaviour for aggregations; for the reference see
# `_op_via_apply` func in pandas==2.0.2
if axis is None or axis is lib.no_default:
axis = self._axis
if axis != 0 or not skipna:
return self._default_to_pandas(
lambda df: df.skew(
axis=axis, skipna=skipna, numeric_only=numeric_only, **kwargs
)
)
return self._wrap_aggregation(
type(self._query_compiler).groupby_skew,
agg_kwargs=kwargs,
numeric_only=numeric_only,
)
def ffill(self, limit=None):
ErrorMessage.single_warning(
".ffill() is implemented using .fillna() in Modin, "
+ "which can be impacted by pandas bug https://github.com/pandas-dev/pandas/issues/43412 "
+ "on dataframes with duplicated indices"
)
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message=".*fillna with 'method' is deprecated.*",
category=FutureWarning,
)
return self.fillna(limit=limit, method="ffill")
def sem(self, ddof=1, numeric_only=False):
return self._wrap_aggregation(
type(self._query_compiler).groupby_sem,
agg_kwargs=dict(ddof=ddof),
numeric_only=numeric_only,
)
def sample(self, n=None, frac=None, replace=False, weights=None, random_state=None):
return self._default_to_pandas(
lambda df: df.sample(
n=n,
frac=frac,
replace=replace,
weights=weights,
random_state=random_state,
)
)
def ewm(self, *args, **kwargs):
return self._default_to_pandas(lambda df: df.ewm(*args, **kwargs))
def value_counts(
self,
subset=None,
normalize: bool = False,
sort: bool = True,
ascending: bool = False,
dropna: bool = True,
):
return self._default_to_pandas(
lambda df: df.value_counts(
subset=subset,
normalize=normalize,
sort=sort,
ascending=ascending,
dropna=dropna,
)
)
def mean(self, numeric_only=False, engine=None, engine_kwargs=None):
if engine not in ("cython", None) and engine_kwargs is not None:
return self._default_to_pandas(
lambda df: df.mean(
numeric_only=numeric_only,
engine=engine,
engine_kwargs=engine_kwargs,
)
)
return self._check_index(
self._wrap_aggregation(
type(self._query_compiler).groupby_mean,
agg_kwargs=dict(numeric_only=numeric_only),
numeric_only=numeric_only,
)
)
def any(self, skipna=True):
return self._wrap_aggregation(
type(self._query_compiler).groupby_any,
numeric_only=False,
agg_kwargs=dict(skipna=skipna),
)
@property
def plot(self): # pragma: no cover
return self._default_to_pandas(lambda df: df.plot)
def ohlc(self):
from .dataframe import DataFrame
return DataFrame(
query_compiler=self._query_compiler.groupby_ohlc(
by=self._by,
axis=self._axis,
groupby_kwargs=self._kwargs,
agg_args=[],
agg_kwargs={},
is_df=isinstance(self._df, DataFrame),
),
)
def __bytes__(self):
"""
Convert DataFrameGroupBy object into a python2-style byte string.
Returns
-------
bytearray
Byte array representation of `self`.
Notes
-----
Deprecated and removed in pandas and will be likely removed in Modin.
"""
return self._default_to_pandas(lambda df: df.__bytes__())
@cached_property
def groups(self):
return self._compute_index_grouped(numerical=False)
def min(self, numeric_only=False, min_count=-1, engine=None, engine_kwargs=None):
if engine not in ("cython", None) and engine_kwargs is not None:
return self._default_to_pandas(
lambda df: df.min(
numeric_only=numeric_only,
min_count=min_count,
engine=engine,
engine_kwargs=engine_kwargs,
)
)
return self._wrap_aggregation(
type(self._query_compiler).groupby_min,
agg_kwargs=dict(min_count=min_count),
numeric_only=numeric_only,
)
def max(self, numeric_only=False, min_count=-1, engine=None, engine_kwargs=None):
if engine not in ("cython", None) and engine_kwargs is not None:
return self._default_to_pandas(
lambda df: df.max(
numeric_only=numeric_only,
min_count=min_count,
engine=engine,
engine_kwargs=engine_kwargs,
)
)
return self._wrap_aggregation(
type(self._query_compiler).groupby_max,
agg_kwargs=dict(min_count=min_count),
numeric_only=numeric_only,
)
def idxmax(self, axis=lib.no_default, skipna=True, numeric_only=False):
if axis is not lib.no_default:
self._deprecate_axis(axis, "idxmax")
# default behaviour for aggregations; for the reference see
# `_op_via_apply` func in pandas==2.0.2
if axis is None or axis is lib.no_default:
axis = self._axis
return self._wrap_aggregation(
type(self._query_compiler).groupby_idxmax,
agg_kwargs=dict(axis=axis, skipna=skipna),
numeric_only=numeric_only,
)
def idxmin(self, axis=lib.no_default, skipna=True, numeric_only=False):
if axis is not lib.no_default:
self._deprecate_axis(axis, "idxmin")
# default behaviour for aggregations; for the reference see
# `_op_via_apply` func in pandas==2.0.2
if axis is None or axis is lib.no_default:
axis = self._axis
return self._wrap_aggregation(
type(self._query_compiler).groupby_idxmin,
agg_kwargs=dict(axis=axis, skipna=skipna),
numeric_only=numeric_only,
)
@property
def ndim(self):
"""
Return 2.
Returns
-------
int
Returns 2.
Notes
-----
Deprecated and removed in pandas and will be likely removed in Modin.
"""
return 2 # ndim is always 2 for DataFrames
def shift(
self,
periods=1,
freq=None,
axis=lib.no_default,
fill_value=lib.no_default,
suffix=None,
):
if suffix:
return self._default_to_pandas(
lambda df: df.shift(
periods=periods,
freq=freq,
axis=axis,
fill_value=fill_value,
suffix=suffix,
)
)
if axis is not lib.no_default:
axis = self._df._get_axis_number(axis)
self._deprecate_axis(axis, "shift")
else:
axis = 0
def _shift(data, periods, freq, axis, fill_value, is_set_nan_rows=True):
from .dataframe import DataFrame
result = data.shift(periods, freq, axis, fill_value)
if (
is_set_nan_rows
and isinstance(self._by, BaseQueryCompiler)
and (
# Check using `issubset` is effective only in case of MultiIndex
set(self._by.columns).issubset(list(data.columns))
if isinstance(self._by.columns, pandas.MultiIndex)
else len(
self._by.columns.unique()
.sort_values()
.difference(data.columns.unique().sort_values())
)
== 0
)
and DataFrame(query_compiler=self._by.isna()).any(axis=None)
):
mask_nan_rows = data[self._by.columns].isna().any(axis=1)
result = result.loc[~mask_nan_rows]
return result
if freq is None and axis == 1 and self._axis == 0:
result = _shift(self._df, periods, freq, axis, fill_value)
elif (
freq is not None
and axis == 0
and self._axis == 0
and isinstance(self._by, BaseQueryCompiler)
):
result = _shift(
self._df, periods, freq, axis, fill_value, is_set_nan_rows=False
)
result = result.dropna(subset=self._by.columns)
if self._sort:
result = result.sort_values(list(self._by.columns), axis=axis)
else:
result = result.sort_index()
else:
result = self._wrap_aggregation(
type(self._query_compiler).groupby_shift,
numeric_only=False,
agg_kwargs=dict(
periods=periods, freq=freq, axis=axis, fill_value=fill_value
),
)
return result
def nth(self, n, dropna=None):
# TODO: what we really should do is create a GroupByNthSelector to mimic
# pandas behavior and then implement some of these methods there.
# Adapted error checking from pandas
if dropna:
if not is_integer(n):
raise ValueError("dropna option only supported for an integer argument")
if dropna not in ("any", "all"):
# Note: when agg-ing picker doesn't raise this, just returns NaN
raise ValueError(
"For a DataFrame or Series groupby.nth, dropna must be "
+ "either None, 'any' or 'all', "
+ f"(was passed {dropna})."
)
return self._check_index(
self._wrap_aggregation(
type(self._query_compiler).groupby_nth,
numeric_only=False,
agg_kwargs=dict(n=n, dropna=dropna),
)
)
def cumsum(self, axis=lib.no_default, *args, **kwargs):
if axis is not lib.no_default:
axis = self._df._get_axis_number(axis)
self._deprecate_axis(axis, "cumsum")
else:
axis = 0
return self._wrap_aggregation(
type(self._query_compiler).groupby_cumsum,
agg_args=args,
agg_kwargs=dict(axis=axis, **kwargs),
)
@cached_property
def indices(self):
return self._compute_index_grouped(numerical=True)
@_inherit_docstrings(pandas.core.groupby.DataFrameGroupBy.pct_change)
def pct_change(
self,
periods=1,
fill_method=lib.no_default,
limit=lib.no_default,
freq=None,
axis=lib.no_default,
):
from .dataframe import DataFrame
if fill_method not in (lib.no_default, None) or limit is not lib.no_default:
warnings.warn(
"The 'fill_method' keyword being not None and the 'limit' keyword in "
+ f"{type(self).__name__}.pct_change are deprecated and will be removed "
+ "in a future version. Either fill in any non-leading NA values prior "
+ "to calling pct_change or specify 'fill_method=None' to not fill NA "
+ "values.",
FutureWarning,
)
if fill_method is lib.no_default:
if any(grp.isna().values.any() for _, grp in self):
warnings.warn(
"The default fill_method='ffill' in "
+ f"{type(self).__name__}.pct_change is deprecated and will be "
+ "removed in a future version. Call ffill before calling "
+ "pct_change to retain current behavior and silence this warning.",
FutureWarning,
)
fill_method = "ffill"
if limit is lib.no_default:
limit = None
if axis is not lib.no_default:
axis = self._df._get_axis_number(axis)
self._deprecate_axis(axis, "pct_change")
else:
axis = 0
# Should check for API level errors
# Attempting to match pandas error behavior here
if not isinstance(periods, int):
raise TypeError(f"periods must be an int. got {type(periods)} instead")
if isinstance(self._df, Series):
if not is_numeric_dtype(self._df.dtypes):
raise TypeError(
f"unsupported operand type for -: got {self._df.dtypes}"
)
elif isinstance(self._df, DataFrame) and axis == 0:
for col, dtype in self._df.dtypes.items():
# can't calculate change on non-numeric columns, so check for
# non-numeric columns that are not included in the `by`
if not is_numeric_dtype(dtype) and not (
isinstance(self._by, BaseQueryCompiler) and col in self._by.columns
):
raise TypeError(f"unsupported operand type for -: got {dtype}")
return self._wrap_aggregation(
type(self._query_compiler).groupby_pct_change,
agg_kwargs=dict(
periods=periods,
fill_method=fill_method,
limit=limit,
freq=freq,
axis=axis,
),
)
def filter(self, func, dropna=True, *args, **kwargs):
return self._default_to_pandas(
lambda df: df.filter(func, dropna=dropna, *args, **kwargs)
)
def _deprecate_axis(self, axis: int, name: str) -> None: # noqa: GL08
if axis == 1:
warnings.warn(
f"{type(self).__name__}.{name} with axis=1 is deprecated and "
+ "will be removed in a future version. Operate on the un-grouped "
+ "DataFrame instead",
FutureWarning,
)
else:
warnings.warn(
f"The 'axis' keyword in {type(self).__name__}.{name} is deprecated "
+ "and will be removed in a future version. "
+ "Call without passing 'axis' instead.",
FutureWarning,
)
def cummax(self, axis=lib.no_default, numeric_only=False, **kwargs):
if axis is not lib.no_default:
axis = self._df._get_axis_number(axis)
self._deprecate_axis(axis, "cummax")
else:
axis = 0
return self._wrap_aggregation(
type(self._query_compiler).groupby_cummax,
agg_kwargs=dict(axis=axis, **kwargs),
numeric_only=numeric_only,
)
def apply(self, func, *args, include_groups=True, **kwargs):
func = cast_function_modin2pandas(func)
if not isinstance(func, BuiltinFunctionType):
func = wrap_udf_function(func)
apply_res = self._wrap_aggregation(
qc_method=type(self._query_compiler).groupby_agg,
numeric_only=False,
agg_func=func,
agg_args=args,
agg_kwargs={**kwargs, "include_groups": include_groups},
how="group_wise",
)
reduced_index = pandas.Index([MODIN_UNNAMED_SERIES_LABEL])
if not isinstance(apply_res, Series) and apply_res.columns.equals(
reduced_index
):
apply_res = apply_res.squeeze(axis=1)
return self._check_index(apply_res)
@property
def dtypes(self):
if self._axis == 1:
raise ValueError("Cannot call dtypes on groupby with axis=1")
warnings.warn(
f"{type(self).__name__}.dtypes is deprecated and will be removed in "
+ "a future version. Check the dtypes on the base object instead",
FutureWarning,
)
return self._check_index(
self._wrap_aggregation(
type(self._query_compiler).groupby_dtypes,
numeric_only=False,
)
)
def first(self, numeric_only=False, min_count=-1, skipna=True):
return self._wrap_aggregation(
type(self._query_compiler).groupby_first,
agg_kwargs=dict(min_count=min_count, skipna=skipna),
numeric_only=numeric_only,
)
def last(self, numeric_only=False, min_count=-1, skipna=True):
return self._wrap_aggregation(
type(self._query_compiler).groupby_last,
agg_kwargs=dict(min_count=min_count, skipna=skipna),
numeric_only=numeric_only,
)
@cached_property
def _internal_by(self) -> tuple[Hashable]:
"""
Get only those components of 'by' that are column labels of the source frame.
Returns
-------
tuple of labels
"""
internal_by = tuple()
if self._drop:
if is_list_like(self._by):
internal_by_list = []
for by in self._by:
if isinstance(by, str):
internal_by_list.append(by)
elif isinstance(by, pandas.Grouper):
internal_by_list.append(by.key)
internal_by = tuple(internal_by_list)
elif isinstance(self._by, pandas.Grouper):
internal_by = tuple([self._by.key])
else:
ErrorMessage.catch_bugs_and_request_email(
failure_condition=not isinstance(self._by, BaseQueryCompiler),
extra_log=f"When 'drop' is True, 'by' must be either list-like, Grouper, or a QueryCompiler, met: {type(self._by)}.",
)
internal_by = tuple(self._by.columns)
return internal_by
def __getitem__(self, key):
"""
Implement indexing operation on a DataFrameGroupBy object.
Parameters
----------
key : list or str
Names of columns to use as subset of original object.
Returns
-------
DataFrameGroupBy or SeriesGroupBy
Result of indexing operation.
Raises
------
NotImplementedError
Column lookups on GroupBy with arbitrary Series in by is not yet supported.
"""
# These parameters are common for building the resulted Series or DataFrame groupby object
kwargs = {
**self._kwargs.copy(),
"by": self._by,
"axis": self._axis,
"idx_name": self._idx_name,
}
# The rules of type deduction for the resulted object is the following:
# 1. If `key` is a list-like or `as_index is False`, then the resulted object is a DataFrameGroupBy
# 2. Otherwise, the resulted object is SeriesGroupBy
# 3. Result type does not depend on the `by` origin
# Examples:
# - drop: any, as_index: any, __getitem__(key: list_like) -> DataFrameGroupBy
# - drop: any, as_index: False, __getitem__(key: any) -> DataFrameGroupBy
# - drop: any, as_index: True, __getitem__(key: label) -> SeriesGroupBy
if is_list_like(key):
make_dataframe = True
else:
if self._as_index:
make_dataframe = False
else:
make_dataframe = True
key = [key]
if make_dataframe:
internal_by = frozenset(self._internal_by)
if len(internal_by.intersection(key)) != 0:
ErrorMessage.mismatch_with_pandas(
operation="GroupBy.__getitem__",
message=(
"intersection of the selection and 'by' columns is not yet supported, "
+ "to achieve the desired result rewrite the original code from:\n"
+ "df.groupby('by_column')['by_column']\n"
+ "to the:\n"
+ "df.groupby(df['by_column'].copy())['by_column']"
),
)
# We need to maintain order of the columns in key, using a set doesn't
# maintain order.
# We use dictionaries since they maintain insertion order as of 3.7,
# and its faster to call dict.update than it is to loop through `key`
# and select only the elements which aren't in `cols_to_grab`.
cols_to_grab = dict.fromkeys(self._internal_by)
cols_to_grab.update(dict.fromkeys(key))
key = [col for col in cols_to_grab.keys() if col in self._df.columns]
return DataFrameGroupBy(
self._df[key],
drop=self._drop,
backend_pinned=self._backend_pinned,
**kwargs,
)
if (
self._is_multi_by
and isinstance(self._by, list)
and not all(hashable(o) and o in self._df for o in self._by)
):
raise NotImplementedError(
"Column lookups on GroupBy with arbitrary Series in by"
+ " is not yet supported."
)
return SeriesGroupBy(
self._df[key],
drop=False,
backend_pinned=self._backend_pinned,
**kwargs,
)
def cummin(self, axis=lib.no_default, numeric_only=False, **kwargs):
if axis is not lib.no_default:
axis = self._df._get_axis_number(axis)
self._deprecate_axis(axis, "cummin")
else:
axis = 0
return self._wrap_aggregation(
type(self._query_compiler).groupby_cummin,
agg_kwargs=dict(axis=axis, **kwargs),
numeric_only=numeric_only,
)
def bfill(self, limit=None):
ErrorMessage.single_warning(
".bfill() is implemented using .fillna() in Modin, "
+ "which can be impacted by pandas bug https://github.com/pandas-dev/pandas/issues/43412 "
+ "on dataframes with duplicated indices"
)
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message=".*fillna with 'method' is deprecated.*",
category=FutureWarning,
)
return self.fillna(limit=limit, method="bfill")
def prod(self, numeric_only=False, min_count=0):
return self._wrap_aggregation(
type(self._query_compiler).groupby_prod,
agg_kwargs=dict(min_count=min_count),
numeric_only=numeric_only,
)
def std(self, ddof=1, engine=None, engine_kwargs=None, numeric_only=False):
if engine not in ("cython", None) and engine_kwargs is not None:
return self._default_to_pandas(
lambda df: df.std(
ddof=ddof,
engine=engine,
engine_kwargs=engine_kwargs,
numeric_only=numeric_only,
)
)
return self._wrap_aggregation(
type(self._query_compiler).groupby_std,
agg_kwargs=dict(ddof=ddof),
numeric_only=numeric_only,
)
def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs):
if engine not in ("cython", None) and engine_kwargs is not None:
return self._default_to_pandas(
lambda df: df.aggregate(
func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs
)
)
if self._axis != 0:
# This is not implemented in pandas,
# so we throw a different message
raise NotImplementedError("axis other than 0 is not supported")
if (
callable(func)
and isinstance(func, BuiltinFunctionType)
and func.__name__ in dir(self)
):
func_name = func.__name__
warnings.warn(
f"The provided callable {func} is currently using "
+ f"{type(self).__name__}.{func_name}. In a future version of pandas, "
+ "the provided callable will be used directly. To keep current "
+ f"behavior pass the string {func_name} instead.",
category=FutureWarning,
)
func = func_name
do_relabel = None
if isinstance(func, dict) or func is None:
# the order from `reconstruct_func` cannot be used correctly if there
# is more than one columnar partition, since for correct use all columns
# must be available within one partition.
old_kwargs = dict(kwargs)
relabeling_required, func_dict, new_columns, _ = reconstruct_func(
func, **kwargs
)
if relabeling_required:
def do_relabel(obj_to_relabel): # noqa: F811
# unwrap nested labels into one level tuple
result_labels = [None] * len(old_kwargs)
for idx, labels in enumerate(old_kwargs.values()):
if is_scalar(labels) or callable(labels):
result_labels[idx] = (
labels if not callable(labels) else labels.__name__
)
continue
new_elem = []
for label in labels:
if is_scalar(label) or callable(label):
new_elem.append(
label if not callable(label) else label.__name__
)
else:
new_elem.extend(label)
result_labels[idx] = tuple(new_elem)
new_order = obj_to_relabel.columns.get_indexer(result_labels)
new_columns_idx = pandas.Index(new_columns)
if not self._as_index:
nby_cols = len(obj_to_relabel.columns) - len(new_columns_idx)
new_order = np.concatenate([np.arange(nby_cols), new_order])
by_cols = obj_to_relabel.columns[:nby_cols]
if by_cols.nlevels != new_columns_idx.nlevels:
by_cols = by_cols.remove_unused_levels()
empty_levels = [
i
for i, level in enumerate(by_cols.levels)
if len(level) == 1 and level[0] == ""
]
by_cols = by_cols.droplevel(empty_levels)
new_columns_idx = by_cols.append(new_columns_idx)
result = obj_to_relabel.iloc[:, new_order]
result.columns = new_columns_idx
return result
if any(isinstance(fn, list) for fn in func_dict.values()):
# multicolumn case
# putting functions in a `list` allows to achieve multicolumn in each partition
func_dict = {
col: fn if isinstance(fn, list) else [fn]
for col, fn in func_dict.items()
}
if (
relabeling_required
and not self._as_index
and any(col in func_dict for col in self._internal_by)
):
ErrorMessage.mismatch_with_pandas(
operation="GroupBy.aggregate(**dictionary_renaming_aggregation)",
message=(
"intersection of the columns to aggregate and 'by' is not yet supported when 'as_index=False', "
+ "columns with group names of the intersection will not be presented in the result. "
+ "To achieve the desired result rewrite the original code from:\n"
+ "df.groupby('by_column', as_index=False).agg(agg_func=('by_column', agg_func))\n"
+ "to the:\n"
+ "df.groupby('by_column').agg(agg_func=('by_column', agg_func)).reset_index()"
),
)
if any(i not in self._df.columns for i in func_dict.keys()):
raise SpecificationError("nested renamer is not supported")
if func is None:
kwargs = {}
func = func_dict
elif is_list_like(func):
# for list-list aggregation pandas always puts
# groups as index in the result, ignoring as_index,
# so we have to reset it to default value
res = self._override(as_index=True)._wrap_aggregation(
qc_method=type(self._query_compiler).groupby_agg,
numeric_only=False,
agg_func=func,
agg_args=args,
agg_kwargs=kwargs,
how="axis_wise",
)
if not self._kwargs["as_index"]:
res.reset_index(inplace=True)
return res
elif callable(func):
return self._check_index(
self._wrap_aggregation(
qc_method=type(self._query_compiler).groupby_agg,
numeric_only=False,
agg_func=func,
agg_args=args,
agg_kwargs=kwargs,
how="axis_wise",
)
)
elif isinstance(func, str):
# Using "getattr" here masks possible AttributeError which we throw
# in __getattr__, so we should call __getattr__ directly instead.
agg_func = self.__getattr__(func)
if callable(agg_func):
return agg_func(*args, **kwargs)
result = self._wrap_aggregation(
qc_method=type(self._query_compiler).groupby_agg,
numeric_only=False,
agg_func=func,
agg_args=args,
agg_kwargs=kwargs,
how="axis_wise",
)
return do_relabel(result) if do_relabel else result
agg = aggregate
def rank(
self,
method="average",
ascending=True,
na_option="keep",
pct=False,
axis=lib.no_default,
):
if na_option not in {"keep", "top", "bottom"}:
raise ValueError("na_option must be one of 'keep', 'top', or 'bottom'")
if axis is not lib.no_default:
axis = self._df._get_axis_number(axis)
self._deprecate_axis(axis, "rank")
else:
axis = 0
result = self._wrap_aggregation(
type(self._query_compiler).groupby_rank,
agg_kwargs=dict(
method=method,
ascending=ascending,
na_option=na_option,
pct=pct,
axis=axis,
),
numeric_only=False,
)
return result
@property
def corrwith(self):
return self._default_to_pandas(lambda df: df.corrwith)
def var(self, ddof=1, engine=None, engine_kwargs=None, numeric_only=False):
if engine not in ("cython", None) and engine_kwargs is not None:
return self._default_to_pandas(
lambda df: df.var(
ddof=ddof,
engine=engine,
engine_kwargs=engine_kwargs,
numeric_only=numeric_only,
)
)
return self._wrap_aggregation(
type(self._query_compiler).groupby_var,
agg_kwargs=dict(ddof=ddof),
numeric_only=numeric_only,
)
def get_group(self, name, obj=None):
work_object = self._override(
df=obj if obj is not None else self._df, as_index=True
)
return work_object._check_index(
work_object._wrap_aggregation(
qc_method=type(work_object._query_compiler).groupby_get_group,
numeric_only=False,
agg_kwargs=dict(name=name),
)
)
def __len__(self): # noqa: GL08
return len(self.indices)
def all(self, skipna=True):
return self._wrap_aggregation(
type(self._query_compiler).groupby_all,
numeric_only=False,
agg_kwargs=dict(skipna=skipna),
)
def size(self):
if self._axis == 1:
return DataFrameGroupBy(
self._df.T.iloc[:, [0]],
self._by,
0,
drop=self._drop,
idx_name=self._idx_name,
backend_pinned=self._backend_pinned,
**self._kwargs,
).size()
result = self._wrap_aggregation(
type(self._query_compiler).groupby_size,
numeric_only=False,
)
if not isinstance(result, Series):
result = result.squeeze(axis=1)
if not self._kwargs.get("as_index") and not isinstance(result, Series):
result = (
result.rename(columns={MODIN_UNNAMED_SERIES_LABEL: "index"})
if MODIN_UNNAMED_SERIES_LABEL in result.columns
else result
)
elif isinstance(self._df, Series):
result.name = self._df.name
return result
def sum(self, numeric_only=False, min_count=0, engine=None, engine_kwargs=None):
if engine not in ("cython", None) and engine_kwargs is not None:
return self._default_to_pandas(
lambda df: df.sum(
numeric_only=numeric_only,
min_count=min_count,
engine=engine,
engine_kwargs=engine_kwargs,
)
)
return self._wrap_aggregation(
type(self._query_compiler).groupby_sum,
agg_kwargs=dict(min_count=min_count),
numeric_only=numeric_only,
)
def describe(self, percentiles=None, include=None, exclude=None):
return self._default_to_pandas(
lambda df: df.describe(
percentiles=percentiles, include=include, exclude=exclude
)
)
def boxplot(
self,
grouped,
subplots=True,
column=None,
fontsize=None,
rot=0,
grid=True,
ax=None,
figsize=None,
layout=None,
sharex=False,
sharey=True,
backend=None,
**kwargs,
):
return self._default_to_pandas(
lambda df: df.boxplot(
grouped,
subplots=subplots,
column=column,
fontsize=fontsize,
rot=rot,
grid=grid,
ax=ax,
figsize=figsize,
layout=layout,
sharex=sharex,
sharey=sharey,
backend=backend,
**kwargs,
)
)
def ngroup(self, ascending=True):
result = self._wrap_aggregation(
type(self._query_compiler).groupby_ngroup,
numeric_only=False,
agg_kwargs=dict(ascending=ascending),
)
if not isinstance(result, Series):
# The result should always be a Series with name None and type int64
result = result.squeeze(axis=1)
return result
def nunique(self, dropna=True):
return self._check_index(
self._wrap_aggregation(
type(self._query_compiler).groupby_nunique,
numeric_only=False,
agg_kwargs=dict(dropna=dropna),
)
)
def resample(self, rule, *args, include_groups=True, **kwargs):
return self._default_to_pandas(
lambda df: df.resample(rule, *args, include_groups=include_groups, **kwargs)
)
def median(self, numeric_only=False):
return self._check_index(
self._wrap_aggregation(
type(self._query_compiler).groupby_median,
numeric_only=numeric_only,
)
)
def head(self, n=5):
# groupby().head()/.tail() ignore as_index, so override it to True
work_object = self._override(as_index=True)
return work_object._check_index(
work_object._wrap_aggregation(
type(work_object._query_compiler).groupby_head,
agg_kwargs=dict(n=n),
numeric_only=False,
)
)
def cumprod(self, axis=lib.no_default, *args, **kwargs):
if axis is not lib.no_default:
axis = self._df._get_axis_number(axis)
self._deprecate_axis(axis, "cumprod")
else:
axis = 0
return self._wrap_aggregation(
type(self._query_compiler).groupby_cumprod,
agg_args=args,
agg_kwargs=dict(axis=axis, **kwargs),
)
def __iter__(self):
return self._iter.__iter__()
def cov(self, min_periods=None, ddof=1, numeric_only=False):
return self._wrap_aggregation(
type(self._query_compiler).groupby_cov,
agg_kwargs=dict(min_periods=min_periods, ddof=ddof),
numeric_only=numeric_only,
)
def transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs):
if engine not in ("cython", None) and engine_kwargs is not None:
return self._default_to_pandas(
lambda df: df.transform(
func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs
)
)
return self._wrap_aggregation(
qc_method=type(self._query_compiler).groupby_agg,
numeric_only=False,
agg_func=func,
agg_args=args,
agg_kwargs=kwargs,
how="transform",
)
def corr(self, method="pearson", min_periods=1, numeric_only=False):
return self._wrap_aggregation(
type(self._query_compiler).groupby_corr,
agg_kwargs=dict(method=method, min_periods=min_periods),
numeric_only=numeric_only,
)
def fillna(
self,
value=None,
method=None,
axis=lib.no_default,
inplace=False,
limit=None,
downcast=lib.no_default,
):
if axis is not lib.no_default:
self._deprecate_axis(axis, "fillna")
warnings.warn(
f"{type(self).__name__}.fillna is deprecated and will be removed "
+ "in a future version. Use obj.ffill(), obj.bfill(), "
+ "or obj.nearest() instead.",
FutureWarning,
)
# default behaviour for aggregations; for the reference see
# `_op_via_apply` func in pandas==2.0.2
if axis is None or axis is lib.no_default:
axis = self._axis
new_groupby_kwargs = self._kwargs.copy()
new_groupby_kwargs["as_index"] = True
work_object = type(self)(
df=self._df,
by=self._by,
axis=self._axis,
idx_name=self._idx_name,
drop=self._drop,
backend_pinned=self._backend_pinned,
**new_groupby_kwargs,
)
return work_object._wrap_aggregation(
type(self._query_compiler).groupby_fillna,
agg_kwargs=dict(
value=value,
method=method,
axis=axis,
inplace=inplace,
limit=limit,
downcast=downcast,
),
numeric_only=False,
)
def count(self):
return self._wrap_aggregation(
type(self._query_compiler).groupby_count,
numeric_only=False,
)
def pipe(self, func, *args, **kwargs):
return com.pipe(self, func, *args, **kwargs)
def cumcount(self, ascending=True):
result = self._wrap_aggregation(
type(self._query_compiler).groupby_cumcount,
numeric_only=False,
agg_kwargs=dict(ascending=ascending),
)
if not isinstance(result, Series):
# The result should always be a Series with name None and type int64
result = result.squeeze(axis=1)
return result
def tail(self, n=5):
# groupby().head()/.tail() ignore as_index, so override it to True
work_object = self._override(as_index=True)
return work_object._check_index(
work_object._wrap_aggregation(
type(work_object._query_compiler).groupby_tail,
agg_kwargs=dict(n=n),
numeric_only=False,
)
)
# expanding and rolling are unique cases and need to likely be handled
# separately. They do not appear to be commonly used.
def expanding(self, *args, **kwargs):
return self._default_to_pandas(lambda df: df.expanding(*args, **kwargs))
def rolling(self, *args, **kwargs):
return RollingGroupby(self, *args, **kwargs)
def hist(
self,
column=None,
by=None,
grid=True,
xlabelsize=None,
xrot=None,
ylabelsize=None,
yrot=None,
ax=None,
sharex=False,
sharey=False,
figsize=None,
layout=None,
bins=10,
backend=None,
legend=False,
**kwargs,
):
return self._default_to_pandas(
lambda df: df.hist(
column=column,
by=by,
grid=grid,
xlabelsize=xlabelsize,
xrot=xrot,
ylabelsize=ylabelsize,
yrot=yrot,
ax=ax,
sharex=sharex,
sharey=sharey,
figsize=figsize,
layout=layout,
bins=bins,
backend=backend,
legend=legend,
**kwargs,
)
)
def quantile(self, q=0.5, interpolation="linear", numeric_only=False):
# TODO: handle list-like cases properly
if is_list_like(q):
return self._default_to_pandas(
lambda df: df.quantile(q=q, interpolation=interpolation)
)
return self._check_index(
self._wrap_aggregation(
type(self._query_compiler).groupby_quantile,
numeric_only=numeric_only,
agg_kwargs=dict(q=q, interpolation=interpolation),
)
)
def diff(self, periods=1, axis=lib.no_default):
from .dataframe import DataFrame
if axis is not lib.no_default:
axis = self._df._get_axis_number(axis)
self._deprecate_axis(axis, "diff")
else:
axis = 0
# Should check for API level errors
# Attempting to match pandas error behavior here
if not isinstance(periods, int):
raise TypeError(f"periods must be an int. got {type(periods)} instead")
if isinstance(self._df, Series):
if not is_numeric_dtype(self._df.dtypes):
raise TypeError(
f"unsupported operand type for -: got {self._df.dtypes}"
)
elif isinstance(self._df, DataFrame) and axis == 0:
for col, dtype in self._df.dtypes.items():
# can't calculate diff on non-numeric columns, so check for non-numeric
# columns that are not included in the `by`
if not (
is_numeric_dtype(dtype) or is_datetime64_any_dtype(dtype)
) and not (
isinstance(self._by, BaseQueryCompiler) and col in self._by.columns
):
raise TypeError(f"unsupported operand type for -: got {dtype}")
return self._wrap_aggregation(
type(self._query_compiler).groupby_diff,
agg_kwargs=dict(
periods=periods,
axis=axis,
),
)
def take(self, indices, axis=lib.no_default, **kwargs):
return self._default_to_pandas(lambda df: df.take(indices, axis=axis, **kwargs))
@property
def _index(self):
"""
Get index value.
Returns
-------
pandas.Index
Index value.
"""
return self._query_compiler.index
@property
def _sort(self):
"""
Get sort parameter value.
Returns
-------
bool
Value of sort parameter used to create DataFrameGroupBy object.
"""
return self._kwargs.get("sort")
@property
def _as_index(self):
"""
Get as_index parameter value.
Returns
-------
bool
Value of as_index parameter used to create DataFrameGroupBy object.
"""
return self._kwargs.get("as_index")
@property
def _iter(self):
"""
Construct a tuple of (group_id, DataFrame) tuples to allow iteration over groups.
Returns
-------
generator
Generator expression of GroupBy object broken down into tuples for iteration.
"""
from .dataframe import DataFrame
indices = self.indices
group_ids = indices.keys()
if self._axis == 0:
return (
(
(k,) if self._return_tuple_when_iterating else k,
DataFrame(
query_compiler=self._query_compiler.getitem_row_array(
indices[k]
)
),
)
for k in (sorted(group_ids) if self._sort else group_ids)
)
else:
return (
(
(k,) if self._return_tuple_when_iterating else k,
DataFrame(
query_compiler=self._query_compiler.getitem_column_array(
indices[k], numeric=True
)
),
)
for k in (sorted(group_ids) if self._sort else group_ids)
)
def _compute_index_grouped(self, numerical=False):
"""
Construct an index of group IDs.
Parameters
----------
numerical : bool, default: False
Whether a group indices should be positional (True) or label-based (False).
Returns
-------
dict
A dict of {group name -> group indices} values.
See Also
--------
pandas.core.groupby.GroupBy.groups
"""
# We end up using pure pandas to compute group indices, so raising a warning
ErrorMessage.default_to_pandas("Group indices computation")
# Splitting level-by and column-by since we serialize them in a different ways
by = None
level = []
if self._level is not None:
level = self._level
if not isinstance(level, list):
level = [level]
elif isinstance(self._by, list):
by = []
for o in self._by:
if hashable(o) and o in self._query_compiler.get_index_names(
self._axis
):
level.append(o)
else:
by.append(o)
else:
by = self._by
is_multi_by = self._is_multi_by or (by is not None and len(level) > 0)
# `dropna` param is the only one that matters for the group indices result
dropna = self._kwargs.get("dropna", True)
if isinstance(self._by, BaseQueryCompiler) and is_multi_by:
by = list(self._by.columns)
if is_multi_by:
# Because we are doing a collect (to_pandas) here and then groupby, we
# end up using pandas implementation. Add the warning so the user is
# aware.
ErrorMessage.catch_bugs_and_request_email(self._axis == 1)
if isinstance(by, list) and all(
is_label(self._df, o, self._axis) for o in by
):
pandas_df = self._df._query_compiler.getitem_column_array(
by
).to_pandas()
else:
by = try_cast_to_pandas(by, squeeze=True)
pandas_df = self._df._to_pandas()
by = wrap_into_list(by, level)
groupby_obj = pandas_df.groupby(by=by, dropna=dropna)
return groupby_obj.indices if numerical else groupby_obj.groups
else:
if isinstance(self._by, type(self._query_compiler)):
by = self._by.to_pandas().squeeze().values
elif self._by is None:
index = self._query_compiler.get_axis(self._axis)
levels_to_drop = [
i
for i, name in enumerate(index.names)
if name not in level and i not in level
]
by = index.droplevel(levels_to_drop)
if isinstance(by, pandas.MultiIndex):
by = by.reorder_levels(level)
else:
by = self._by
axis_labels = self._query_compiler.get_axis(self._axis)
if numerical:
# Since we want positional indices of the groups, we want to group
# on a `RangeIndex`, not on the actual index labels
axis_labels = pandas.RangeIndex(len(axis_labels))
# `pandas.Index.groupby` doesn't take any parameters except `by`.
# Have to convert an Index to a Series to be able to process `dropna=False`:
if dropna:
return axis_labels.groupby(by)
else:
groupby_obj = axis_labels.to_series().groupby(by, dropna=dropna)
return groupby_obj.indices if numerical else groupby_obj.groups
def _wrap_aggregation(
self,
qc_method,
numeric_only=False,
agg_args=None,
agg_kwargs=None,
**kwargs,
):
"""
Perform common metadata transformations and apply groupby functions.
Parameters
----------
qc_method : callable
The query compiler method to call.
numeric_only : {None, True, False}, default: None
Specifies whether to aggregate non numeric columns:
- True: include only numeric columns (including categories that holds a numeric dtype)
- False: include all columns
- None: infer the parameter, ``False`` if there are no numeric types in the frame,
``True`` otherwise.
agg_args : list-like, optional
Positional arguments to pass to the aggregation function.
agg_kwargs : dict-like, optional
Keyword arguments to pass to the aggregation function.
**kwargs : dict
Keyword arguments to pass to the specified query compiler's method.
Returns
-------
DataFrame or Series
Returns the same type as `self._df`.
"""
agg_args = tuple() if agg_args is None else agg_args
agg_kwargs = dict() if agg_kwargs is None else agg_kwargs
if numeric_only and self.ndim == 2:
by_cols = self._internal_by
mask_cols = [
col
for col, dtype in self._query_compiler.dtypes.items()
if (is_numeric_dtype(dtype) or col in by_cols)
]
groupby_qc = self._query_compiler.getitem_column_array(mask_cols)
else:
groupby_qc = self._query_compiler
return type(self._df)(
query_compiler=qc_method(
groupby_qc,
by=self._by,
axis=self._axis,
groupby_kwargs=self._kwargs,
agg_args=agg_args,
agg_kwargs=agg_kwargs,
drop=self._drop,
**kwargs,
)
)
def _check_index(self, result):
"""
Check the result of groupby aggregation on the need of resetting index.
Parameters
----------
result : DataFrame
Group by aggregation result.
Returns
-------
DataFrame
"""
if self._by is None and not self._as_index:
# This is a workaround to align behavior with pandas. In this case pandas
# resets index, but Modin doesn't do that. More details are in https://github.com/modin-project/modin/issues/3716.
result.reset_index(drop=True, inplace=True)
return result
def _default_to_pandas(self, f, *args, **kwargs):
"""
Execute function `f` in default-to-pandas way.
Parameters
----------
f : callable or str
The function to apply to each group.
*args : list
Extra positional arguments to pass to `f`.
**kwargs : dict
Extra keyword arguments to pass to `f`.
Returns
-------
modin.pandas.DataFrame
A new Modin DataFrame with the result of the pandas function.
"""
if (
isinstance(self._by, type(self._query_compiler))
and len(self._by.columns) == 1
):
by = self._by.columns[0] if self._drop else self._by.to_pandas().squeeze()
# converting QC 'by' to a list of column labels only if this 'by' comes from the self (if drop is True)
elif self._drop and isinstance(self._by, type(self._query_compiler)):
by = list(self._by.columns)
else:
by = self._by
by = try_cast_to_pandas(by, squeeze=True)
# Since 'by' may be a 2D query compiler holding columns to group by,
# to_pandas will also produce a pandas DataFrame containing them.
# So splitting 2D 'by' into a list of 1D Series using 'GroupBy.validate_by':
by = GroupBy.validate_by(by)
def groupby_on_multiple_columns(df, *args, **kwargs):
groupby_obj = df.groupby(by=by, axis=self._axis, **self._kwargs)
if callable(f):
return f(groupby_obj, *args, **kwargs)
else:
ErrorMessage.catch_bugs_and_request_email(
failure_condition=not isinstance(f, str)
)
attribute = getattr(groupby_obj, f)
if callable(attribute):
return attribute(*args, **kwargs)
return attribute
return self._df._default_to_pandas(groupby_on_multiple_columns, *args, **kwargs)
@_inherit_docstrings(pandas.core.groupby.SeriesGroupBy)
|
DataFrameGroupBy
|
python
|
huggingface__transformers
|
tests/models/altclip/test_modeling_altclip.py
|
{
"start": 14616,
"end": 16674
}
|
class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (AltCLIPModel,) if is_torch_available() else ()
pipeline_model_mapping = {"feature-extraction": AltCLIPModel} if is_torch_available() else {}
test_resize_embeddings = False
test_attention_outputs = False
# TODO: Fix the failed tests when this model gets more usage
def is_pipeline_test_to_skip(
self,
pipeline_test_case_name,
config_class,
model_architecture,
tokenizer_name,
image_processor_name,
feature_extractor_name,
processor_name,
):
if pipeline_test_case_name == "FeatureExtractionPipelineTests":
return True
return False
def setUp(self):
self.model_tester = AltCLIPModelTester(self)
self.config_tester = ConfigTester(
self,
config_class=AltCLIPConfig,
has_text_modality=False,
common_properties=["projection_dim", "logit_scale_init_value"],
)
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="Hidden_states is tested in individual model tests")
def test_hidden_states_output(self):
pass
@unittest.skip(reason="Inputs_embeds is tested in individual model tests")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="Retain_grad is tested in individual model tests")
def test_retain_grad_hidden_states_attentions(self):
pass
@unittest.skip(reason="CLIPModel does not have input/output embeddings")
def test_model_get_set_embeddings(self):
pass
@slow
def test_model_from_pretrained(self):
model_name = "BAAI/AltCLIP"
model = AltCLIPModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@require_vision
@require_torch
|
AltCLIPModelTest
|
python
|
ansible__ansible
|
test/units/module_utils/facts/test_collectors.py
|
{
"start": 8339,
"end": 9017
}
|
class ____(BaseFactsTest):
__test__ = True
gather_subset = ['!all', 'pkg_mgr']
valid_subsets = ['pkg_mgr']
fact_namespace = 'ansible_pkgmgr'
collector_class = PkgMgrFactCollector
collected_facts = {
"ansible_distribution": "Fedora",
"ansible_distribution_major_version": "28",
"ansible_os_family": "RedHat"
}
def test_collect(self):
module = self._mock_module()
fact_collector = self.collector_class()
facts_dict = fact_collector.collect(module=module, collected_facts=self.collected_facts)
self.assertIsInstance(facts_dict, dict)
self.assertIn('pkg_mgr', facts_dict)
|
TestPkgMgrFacts
|
python
|
cython__cython
|
Cython/Debugger/Tests/test_libcython_in_gdb.py
|
{
"start": 13564,
"end": 13922
}
|
class ____(DebugTestCase):
def test_updown(self):
self.break_and_run('os.path.join("foo", "bar")')
gdb.execute('cy step')
self.assertRaises(RuntimeError, gdb.execute, 'cy down')
result = gdb.execute('cy up', to_string=True)
assert 'spam()' in result
assert 'os.path.join("foo", "bar")' in result
|
TestUpDown
|
python
|
pallets__jinja
|
tests/test_inheritance.py
|
{
"start": 12198,
"end": 13504
}
|
class ____:
def test_fixed_macro_scoping_bug(self, env):
assert Environment(
loader=DictLoader(
{
"test.html": """\
{% extends 'details.html' %}
{% macro my_macro() %}
my_macro
{% endmacro %}
{% block inner_box %}
{{ my_macro() }}
{% endblock %}
""",
"details.html": """\
{% extends 'standard.html' %}
{% macro my_macro() %}
my_macro
{% endmacro %}
{% block content %}
{% block outer_box %}
outer_box
{% block inner_box %}
inner_box
{% endblock %}
{% endblock %}
{% endblock %}
""",
"standard.html": """
{% block content %} {% endblock %}
""",
}
)
).get_template("test.html").render().split() == ["outer_box", "my_macro"]
def test_double_extends(self, env):
"""Ensures that a template with more than 1 {% extends ... %} usage
raises a ``TemplateError``.
"""
with pytest.raises(TemplateRuntimeError, match="extended multiple times"):
env.get_template("doublee").render()
|
TestBugFix
|
python
|
spack__spack
|
var/spack/test_repos/spack_repo/builtin_mock/packages/gmake/package.py
|
{
"start": 217,
"end": 847
}
|
class ____(Package):
"""Dummy GMake Package"""
homepage = "https://www.gnu.org/software/make"
url = "https://ftpmirror.gnu.org/make/make-4.4.tar.gz"
tags = ["build-tools"]
version("4.4", sha256="ce35865411f0490368a8fc383f29071de6690cbadc27704734978221f25e2bed")
version("3.0", sha256="ce35865411f0490368a8fc383f29071de6690cbadc27704734978221f25e2bed")
def do_stage(self):
mkdirp(self.stage.source_path)
def setup_dependent_package(self, module, dspec):
module.make = MakeExecutable(
"make", jobs=determine_number_of_jobs(parallel=dspec.package.parallel)
)
|
Gmake
|
python
|
weaviate__weaviate-python-client
|
weaviate/aliases/alias.py
|
{
"start": 63,
"end": 248
}
|
class ____(BaseModel):
"""Returned aliases from Weaviate."""
alias: str
collection: str
_WeaviateAlias = TypedDict("_WeaviateAlias", {"alias": str, "class": str})
|
AliasReturn
|
python
|
getsentry__sentry
|
src/sentry/conf/types/topic_definition.py
|
{
"start": 67,
"end": 225
}
|
class ____(TypedDict):
cluster: str
# The topic name may be overridden from the default via KAFKA_TOPIC_OVERRIDES
real_topic_name: str
|
TopicDefinition
|
python
|
boto__boto3
|
boto3/resources/action.py
|
{
"start": 3546,
"end": 6333
}
|
class ____(ServiceAction):
"""
An action which operates on a batch of items in a collection, typically
a single page of results from the collection's underlying service
operation call. For example, this allows you to delete up to 999
S3 objects in a single operation rather than calling ``.delete()`` on
each one individually.
:type action_model: :py:class`~boto3.resources.model.Action`
:param action_model: The action model.
:type factory: ResourceFactory
:param factory: The factory that created the resource class to which
this action is attached.
:type service_context: :py:class:`~boto3.utils.ServiceContext`
:param service_context: Context about the AWS service
"""
def __call__(self, parent, *args, **kwargs):
"""
Perform the batch action's operation on every page of results
from the collection.
:type parent:
:py:class:`~boto3.resources.collection.ResourceCollection`
:param parent: The collection iterator to which this action
is attached.
:rtype: list(dict)
:return: A list of low-level response dicts from each call.
"""
service_name = None
client = None
responses = []
operation_name = xform_name(self._action_model.request.operation)
# Unlike the simple action above, a batch action must operate
# on batches (or pages) of items. So we get each page, construct
# the necessary parameters and call the batch operation.
for page in parent.pages():
params = {}
for index, resource in enumerate(page):
# There is no public interface to get a service name
# or low-level client from a collection, so we get
# these from the first resource in the collection.
if service_name is None:
service_name = resource.meta.service_name
if client is None:
client = resource.meta.client
create_request_parameters(
resource,
self._action_model.request,
params=params,
index=index,
)
if not params:
# There are no items, no need to make a call.
break
params.update(kwargs)
logger.debug(
'Calling %s:%s with %r', service_name, operation_name, params
)
response = getattr(client, operation_name)(*args, **params)
logger.debug('Response: %r', response)
responses.append(self._response_handler(parent, params, response))
return responses
|
BatchAction
|
python
|
django__django
|
tests/m2m_regress/tests.py
|
{
"start": 268,
"end": 5102
}
|
class ____(TestCase):
def test_multiple_m2m(self):
# Multiple m2m references to model must be distinguished when
# accessing the relations through an instance attribute.
s1 = SelfRefer.objects.create(name="s1")
s2 = SelfRefer.objects.create(name="s2")
s3 = SelfRefer.objects.create(name="s3")
s1.references.add(s2)
s1.related.add(s3)
e1 = Entry.objects.create(name="e1")
t1 = Tag.objects.create(name="t1")
t2 = Tag.objects.create(name="t2")
e1.topics.add(t1)
e1.related.add(t2)
self.assertSequenceEqual(s1.references.all(), [s2])
self.assertSequenceEqual(s1.related.all(), [s3])
self.assertSequenceEqual(e1.topics.all(), [t1])
self.assertSequenceEqual(e1.related.all(), [t2])
def test_internal_related_name_not_in_error_msg(self):
# The secret internal related names for self-referential many-to-many
# fields shouldn't appear in the list when an error is made.
msg = (
"Choices are: id, name, references, related, selfreferchild, "
"selfreferchildsibling"
)
with self.assertRaisesMessage(FieldError, msg):
SelfRefer.objects.filter(porcupine="fred")
def test_m2m_inheritance_symmetry(self):
# Test to ensure that the relationship between two inherited models
# with a self-referential m2m field maintains symmetry
sr_child = SelfReferChild(name="Hanna")
sr_child.save()
sr_sibling = SelfReferChildSibling(name="Beth")
sr_sibling.save()
sr_child.related.add(sr_sibling)
self.assertSequenceEqual(sr_child.related.all(), [sr_sibling.selfrefer_ptr])
self.assertSequenceEqual(sr_sibling.related.all(), [sr_child.selfrefer_ptr])
def test_m2m_pk_field_type(self):
# Regression for #11311 - The primary key for models in a m2m relation
# doesn't have to be an AutoField
w = Worksheet(id="abc")
w.save()
w.delete()
def test_create_copy_with_m2m(self):
t1 = Tag.objects.create(name="t1")
Entry.objects.create(name="e1")
entry = Entry.objects.first()
entry.topics.set([t1])
old_topics = entry.topics.all()
entry.pk = None
entry._state.adding = True
entry.save()
entry.topics.set(old_topics)
entry = Entry.objects.get(pk=entry.pk)
self.assertCountEqual(entry.topics.all(), old_topics)
self.assertSequenceEqual(entry.topics.all(), [t1])
def test_add_m2m_with_base_class(self):
# Regression for #11956 -- You can add an object to a m2m with the
# base class without causing integrity errors
t1 = Tag.objects.create(name="t1")
t2 = Tag.objects.create(name="t2")
c1 = TagCollection.objects.create(name="c1")
c1.tags.set([t1, t2])
c1 = TagCollection.objects.get(name="c1")
self.assertCountEqual(c1.tags.all(), [t1, t2])
self.assertCountEqual(t1.tag_collections.all(), [c1])
def test_manager_class_caching(self):
e1 = Entry.objects.create()
e2 = Entry.objects.create()
t1 = Tag.objects.create()
t2 = Tag.objects.create()
# Get same manager twice in a row:
self.assertIs(t1.entry_set.__class__, t1.entry_set.__class__)
self.assertIs(e1.topics.__class__, e1.topics.__class__)
# Get same manager for different instances
self.assertIs(e1.topics.__class__, e2.topics.__class__)
self.assertIs(t1.entry_set.__class__, t2.entry_set.__class__)
def test_m2m_abstract_split(self):
# Regression for #19236 - an abstract class with a 'split' method
# causes a TypeError in add_lazy_relation
m1 = RegressionModelSplit(name="1")
m1.save()
def test_assigning_invalid_data_to_m2m_doesnt_clear_existing_relations(self):
t1 = Tag.objects.create(name="t1")
t2 = Tag.objects.create(name="t2")
c1 = TagCollection.objects.create(name="c1")
c1.tags.set([t1, t2])
with self.assertRaisesMessage(TypeError, "'int' object is not iterable"):
c1.tags.set(7)
c1.refresh_from_db()
self.assertSequenceEqual(c1.tags.order_by("name"), [t1, t2])
def test_multiple_forwards_only_m2m(self):
# Regression for #24505 - Multiple ManyToManyFields to same "to"
# model with related_name set to '+'.
foo = Line.objects.create(name="foo")
bar = Line.objects.create(name="bar")
post = Post.objects.create()
post.primary_lines.add(foo)
post.secondary_lines.add(bar)
self.assertSequenceEqual(post.primary_lines.all(), [foo])
self.assertSequenceEqual(post.secondary_lines.all(), [bar])
|
M2MRegressionTests
|
python
|
dagster-io__dagster
|
examples/project_fully_featured/project_fully_featured/resources/hn_resource.py
|
{
"start": 632,
"end": 1037
}
|
class ____(HNClient):
def fetch_item_by_id(self, item_id: int) -> Optional[HNItemRecord]:
item_url = f"{HN_BASE_URL}/item/{item_id}.json"
item = requests.get(item_url, timeout=5).json()
return item
def fetch_max_item_id(self) -> int:
return requests.get(f"{HN_BASE_URL}/maxitem.json", timeout=5).json()
def min_item_id(self) -> int:
return 1
|
HNAPIClient
|
python
|
huggingface__transformers
|
src/transformers/models/mimi/modeling_mimi.py
|
{
"start": 7005,
"end": 8226
}
|
class ____(ModelOutput):
r"""
audio_codes (`torch.LongTensor` of shape `(batch_size, num_quantizers, codes_length)`, *optional*):
Discret code embeddings computed using `model.encode`.
encoder_past_key_values (`Cache`, *optional*):
Pre-computed hidden-states (key and values in the self-attention blocks) that can be used to speed up sequential decoding of the encoder transformer.
This typically consists in the `past_key_values` returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
The model will output the same cache format that is fed as input.
If `past_key_values` are used, the user can optionally input only the last `audio_values` or `audio_codes (those that don't
have their past key value states given to this model).
padding_cache (`MimiConv1dPaddingCache`, *optional*):
Padding cache for MimiConv1d causal convolutions in order to support streaming via cache padding.
"""
audio_codes: Optional[torch.LongTensor] = None
encoder_past_key_values: Optional[Cache] = None
padding_cache: Optional[MimiConv1dPaddingCache] = None
@dataclass
@auto_docstring
|
MimiEncoderOutput
|
python
|
conda__conda
|
conda/common/configuration.py
|
{
"start": 5497,
"end": 6354
}
|
class ____(metaclass=ABCMeta):
def __init__(self, source, key, raw_value):
self.source = source
self.key = key
try:
self._raw_value = raw_value.decode("utf-8")
except AttributeError:
# AttributeError: raw_value is not encoded
self._raw_value = raw_value
def __repr__(self):
return str(vars(self))
@abstractmethod
def value(self, parameter_obj):
raise NotImplementedError()
@abstractmethod
def keyflag(self):
raise NotImplementedError()
@abstractmethod
def valueflags(self, parameter_obj):
raise NotImplementedError()
@classmethod
def make_raw_parameters(cls, source, from_map):
if from_map:
return {key: cls(source, key, from_map[key]) for key in from_map}
return EMPTY_MAP
|
RawParameter
|
python
|
kamyu104__LeetCode-Solutions
|
Python/number-of-good-paths.py
|
{
"start": 54,
"end": 1026
}
|
class ____(object): # Time: O(n * alpha(n)), Space: O(n)
def __init__(self, vals):
self.set = range(len(vals))
self.rank = [0]*len(vals)
self.cnt = [collections.Counter({v:1}) for v in vals] # added
def find_set(self, x):
stk = []
while self.set[x] != x: # path compression
stk.append(x)
x = self.set[x]
while stk:
self.set[stk.pop()] = x
return x
def union_set(self, x, y, v): # modified
x, y = self.find_set(x), self.find_set(y)
if x == y:
return 0 # modified
if self.rank[x] > self.rank[y]: # union by rank
x, y = y, x
self.set[x] = self.set[y]
if self.rank[x] == self.rank[y]:
self.rank[y] += 1
cx, cy = self.cnt[x][v], self.cnt[y][v] # added
self.cnt[y] = collections.Counter({v:cx+cy}) # added
return cx*cy # modified
# tree, sort, union find
|
UnionFind
|
python
|
kubernetes-client__python
|
kubernetes/client/models/v1_priority_level_configuration.py
|
{
"start": 383,
"end": 7676
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1PriorityLevelConfigurationSpec',
'status': 'V1PriorityLevelConfigurationStatus'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec',
'status': 'status'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
"""V1PriorityLevelConfiguration - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self._status = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if spec is not None:
self.spec = spec
if status is not None:
self.status = status
@property
def api_version(self):
"""Gets the api_version of this V1PriorityLevelConfiguration. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1PriorityLevelConfiguration. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1PriorityLevelConfiguration.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1PriorityLevelConfiguration. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this V1PriorityLevelConfiguration. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1PriorityLevelConfiguration. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1PriorityLevelConfiguration.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1PriorityLevelConfiguration. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1PriorityLevelConfiguration. # noqa: E501
:return: The metadata of this V1PriorityLevelConfiguration. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1PriorityLevelConfiguration.
:param metadata: The metadata of this V1PriorityLevelConfiguration. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""Gets the spec of this V1PriorityLevelConfiguration. # noqa: E501
:return: The spec of this V1PriorityLevelConfiguration. # noqa: E501
:rtype: V1PriorityLevelConfigurationSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this V1PriorityLevelConfiguration.
:param spec: The spec of this V1PriorityLevelConfiguration. # noqa: E501
:type: V1PriorityLevelConfigurationSpec
"""
self._spec = spec
@property
def status(self):
"""Gets the status of this V1PriorityLevelConfiguration. # noqa: E501
:return: The status of this V1PriorityLevelConfiguration. # noqa: E501
:rtype: V1PriorityLevelConfigurationStatus
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this V1PriorityLevelConfiguration.
:param status: The status of this V1PriorityLevelConfiguration. # noqa: E501
:type: V1PriorityLevelConfigurationStatus
"""
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1PriorityLevelConfiguration):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1PriorityLevelConfiguration):
return True
return self.to_dict() != other.to_dict()
|
V1PriorityLevelConfiguration
|
python
|
urllib3__urllib3
|
test/with_dummyserver/test_connectionpool.py
|
{
"start": 57321,
"end": 57742
}
|
class ____(HypercornDummyServerTestCase):
def test_pool_size_retry(self) -> None:
retries = Retry(total=1, raise_on_status=False, status_forcelist=[404])
with HTTPConnectionPool(
self.host, self.port, maxsize=10, retries=retries, block=True
) as pool:
pool.urlopen("GET", "/not_found", preload_content=False)
assert pool.num_connections == 1
|
TestRetryPoolSize
|
python
|
joke2k__faker
|
faker/providers/color/ar_PS/__init__.py
|
{
"start": 80,
"end": 5782
}
|
class ____(ColorProvider):
"""Implement color provider for ``ar_PS`` locale."""
all_colors = OrderedDict(
(
("أزرق أليس", "#F0F8FF"),
("أبيض عتيق", "#FAEBD7"),
("مائي", "#00FFFF"),
("زبرجدي", "#7FFFD4"),
("لازوردي", "#F0FFFF"),
("صوفي", "#F5F5DC"),
("حسائي", "#FFE4C4"),
("أسود", "#000000"),
("لوزي", "#FFEBCD"),
("أزرق", "#0000FF"),
("بنفسجي مزرق", "#8A2BE2"),
("بني", "#A52A2A"),
("خشبية", "#DEB887"),
("أزرق كاديتي", "#5F9EA0"),
("كرتوزي", "#7FFF00"),
("شوكولاتيّ", "#D2691E"),
("مرجاني", "#FF7F50"),
("قنطريوني", "#6495ED"),
("حرير الذرة", "#FFF8DC"),
("قرمزي", "#DC143C"),
("سيان", "#00FFFF"),
("أزرق داكن", "#00008B"),
("سيان داكن", "#008B8B"),
("عصا الدهب الغامق", "#B8860B"),
("رمادي داكن", "#A9A9A9"),
("أخضر داكن", "#006400"),
("خاكي داكن", "#BDB76B"),
("ماجنتا داكن", "#8B008B"),
("أخضر زيتوني داكن", "#556B2F"),
("برتقالي داكن", "#FF8C00"),
("أوركيدي داكن", "#9932CC"),
("أحمر داكن", "#8B0000"),
("سلموني داكن", "#E9967A"),
("أخضر بحري داكن", "#8FBC8F"),
("أزرق أردوازي داكن", "#483D8B"),
("رمادي لازوردي داكن", "#2F4F4F"),
("تركوازي داكن", "#00CED1"),
("بنفسج داكن", "#9400D3"),
("زهري غامق", "#FF1493"),
("أزرق سماوي غامق", "#00BFFF"),
("رمادي خافت", "#696969"),
("أزرق فريق دودجر", "#1E90FF"),
("الطوب شمت", "#B22222"),
("أبيض وردي", "#FFFAF0"),
("أخضر الغابت", "#228B22"),
("فوشي", "#FF00FF"),
("رمادي باهت", "#DCDCDC"),
("أبيض شبحي", "#F8F8FF"),
("ذهبي", "#FFD700"),
("ذهبي", "#DAA520"),
("رمادي", "#808080"),
("أخضر", "#008000"),
("أصفر مخضر", "#ADFF2F"),
("عسلي", "#F0FFF0"),
("وردي فاقع", "#FF69B4"),
("قسطلي", "#CD5C5C"),
("نيلي", "#4B0082"),
("سكري", "#FFFFF0"),
("خاكي", "#F0E68C"),
("لاڤندر", "#E6E6FA"),
("أحمر اللافندر", "#FFF0F5"),
("أخضر عشبي", "#7CFC00"),
("ليمون شيفوني", "#FFFACD"),
("أزرق فاتح", "#ADD8E6"),
("مرجاني فاتح", "#F08080"),
("أزرق طفولي", "#E0FFFF"),
("أصفر ذهبي فاتح ", "#FAFAD2"),
("رمادي فاتح", "#D3D3D3"),
("أخضر فاتح", "#90EE90"),
("وردي فاتح", "#FFB6C1"),
("سلموني فاتح", "#FFA07A"),
("أخضر بحري فاتح", "#20B2AA"),
("سماوي فاتح", "#87CEFA"),
("أزرق أردوازي فاتح", "#778899"),
("أزرق معدني فاتح", "#B0C4DE"),
("أصفر فاتح", "#FFFFE0"),
("ليمي", "#00FF00"),
("أخضر ليموني", "#32CD32"),
("كتاني", "#FAF0E6"),
("فوشيا", "#FF00FF"),
("كستنائي", "#800000"),
("زبرجدي متوسط", "#66CDAA"),
("أزرق متوسط", "#0000CD"),
("أوركيدي متوسط", "#BA55D3"),
("فوشي متوسط", "#9370DB"),
("أخضر بحري متوسط", "#3CB371"),
("أزرق أردوازي متوسط", "#7B68EE"),
("أخضر ربيعي متوسط", "#00FA9A"),
("ترموازي متوسط", "#48D1CC"),
("أحمر بنفسجي", "#C71585"),
("الأزرق متوسط", "#191970"),
("نعناعي كريمي", "#F5FFFA"),
("الوردي الضبابي", "#FFE4E1"),
("موكاسيني", "#FFE4B5"),
("أبيض نافاجو", "#FFDEAD"),
("كحلي", "#000080"),
("رباطي قديم", "#FDF5E6"),
("زيتوني", "#808000"),
("زيتوني رمادي", "#6B8E23"),
("برتقالي", "#FFA500"),
("أحمر برتقالي", "#FF4500"),
("أوركيدي", "#DA70D6"),
("ذهبي باهت", "#EEE8AA"),
("أخضر باهت", "#98FB98"),
("تركوازي باهت", "#AFEEEE"),
("أحمر بنفسجي باهت", "#DB7093"),
("بابايا", "#FFEFD5"),
("حنطي", "#FFDAB9"),
("بيرو", "#CD853F"),
("زهري", "#FFC0CB"),
("برقوقي", "#DDA0DD"),
("أزرق مسحوقي", "#B0E0E6"),
("أرجواني", "#800080"),
("أحمر", "#FF0000"),
("بني وردي", "#BC8F8F"),
("أزرق ملكي", "#4169E1"),
("بني السرج", "#8B4513"),
("سالموني", "#FA8072"),
("بني رملي", "#F4A460"),
("أخضر بحري", "#2E8B57"),
("صدفي", "#FFF5EE"),
("سيينا", "#A0522D"),
("فضي", "#C0C0C0"),
("أزرق سماي", "#87CEEB"),
("أزرق أردوازي", "#6A5ACD"),
("رمادي معدني", "#708090"),
("ثلجي", "#FFFAFA"),
("أخضر ربيعي", "#00FF7F"),
("أزرق معدني", "#4682B4"),
("نطي", "#D2B48C"),
("حذفي", "#008080"),
("أرجواني", "#D8BFD8"),
("طماطمي", "#FF6347"),
("تركواز", "#40E0D0"),
("بنفسجي", "#EE82EE"),
("قمحي", "#F5DEB3"),
("أبيض", "#FFFFFF"),
("دخاني قمحي", "#F5F5F5"),
("أصفر", "#FFFF00"),
("أصفر مخضر", "#9ACD32"),
)
)
safe_colors = (
"أبيض",
"أخضر",
"أرجواني",
"أزرق",
"أسود",
"أصفر",
"حذفي",
"رمادي",
"زيتوني",
"فضي",
"فوشي",
"كحلي",
"كستنائي",
"ليمي",
"مائي",
)
|
Provider
|
python
|
davidhalter__jedi
|
test/completion/classes.py
|
{
"start": 4054,
"end": 4142
}
|
class ____(object):
def sum(self, a, b):
self.xxx = 3
return a + b
|
Calc
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/flake8_comprehensions/C420_3.py
|
{
"start": 333,
"end": 884
}
|
class ____:
a = None
def func():
{(C.a,): None for (C.a,) in "abc"} # OK
def func():
obj = type('obj', (), {'attr': 1})()
{(obj.attr,): None for (obj.attr,) in "abc"} # OK
def func():
lst = [1, 2, 3]
{(lst[0],): None for (lst[0],) in "abc"} # OK
def func():
lst = [1, 2, 3, 4, 5]
{(lst[1:3],): None for (lst[1:3],) in "abc"} # OK
# C420: side-effecting assignment targets
# These should NOT trigger C420 because they have side-effecting assignment targets
# See https://github.com/astral-sh/ruff/issues/19511
|
C
|
python
|
airbytehq__airbyte
|
airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/connectors/test/steps/python_connectors.py
|
{
"start": 8923,
"end": 11077
}
|
class ____(Step):
"""Validate the connector can be installed and invoked via Python, using PyAirbyte.
When this fails, it generally signals that the connector is not installable or not
runnable in a Python environment. The most common reasons for this are:
1. Conflicting dependencies.
2. Missing dependency declarations.
3. Incorrect or invalid CLI entrypoints.
"""
title = "Python CLI smoke test using PyAirbyte"
context: ConnectorTestContext
async def _run(self, connector_under_test: Container) -> StepResult:
"""Run all pytest tests declared in the test directory of the connector code.
Args:
connector_under_test (Container): The connector under test container.
Returns:
StepResult: Failure or success of the unit tests with stdout and stdout.
"""
if dpath.util.get(self.context.connector.metadata, "remoteRegistries/pypi/enabled", default=False) is False:
return self.skip("Connector is not flagged for PyPI publish, skipping Python CLI validation.")
test_environment = await self.install_testing_environment(with_poetry(self.context))
test_execution = test_environment.with_(
hacks.never_fail_exec(
[
"pyab",
"validate",
f"--connector={self.context.connector.technical_name}",
"--pip-url='.'",
]
)
)
return await self.get_step_result(test_execution)
async def install_testing_environment(
self,
built_connector_container: Container,
) -> Container:
"""Add PyAirbyte and secrets to the test environment."""
context: ConnectorTestContext = self.context
container_with_test_deps = await pipelines.dagger.actions.python.common.with_python_package(
self.context, built_connector_container.with_entrypoint([]), str(context.connector.code_directory)
)
return container_with_test_deps.with_exec(["pip", "install", f"airbyte=={PYAIRBYTE_VERSION}"], use_entrypoint=True)
|
PyAirbyteValidation
|
python
|
sphinx-doc__sphinx
|
sphinx/domains/cpp/_ast.py
|
{
"start": 47494,
"end": 49051
}
|
class ____(ASTBase):
def __init__(
self, exprs: list[ASTExpression | ASTBracedInitList], trailingComma: bool
) -> None:
self.exprs = exprs
self.trailingComma = trailingComma
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTBracedInitList):
return NotImplemented
return self.exprs == other.exprs and self.trailingComma == other.trailingComma
def __hash__(self) -> int:
return hash((self.exprs, self.trailingComma))
def get_id(self, version: int) -> str:
return 'il%sE' % ''.join(e.get_id(version) for e in self.exprs)
def _stringify(self, transform: StringifyTransform) -> str:
exprs = ', '.join(transform(e) for e in self.exprs)
trailing_comma = ',' if self.trailingComma else ''
return f'{{{exprs}{trailing_comma}}}'
def describe_signature(
self, signode: TextElement, mode: str, env: BuildEnvironment, symbol: Symbol
) -> None:
verify_description_mode(mode)
signode += addnodes.desc_sig_punctuation('{', '{')
first = True
for e in self.exprs:
if not first:
signode += addnodes.desc_sig_punctuation(',', ',')
signode += addnodes.desc_sig_space()
else:
first = False
e.describe_signature(signode, mode, env, symbol)
if self.trailingComma:
signode += addnodes.desc_sig_punctuation(',', ',')
signode += addnodes.desc_sig_punctuation('}', '}')
|
ASTBracedInitList
|
python
|
ApeWorX__ape
|
src/ape_pm/dependency.py
|
{
"start": 3026,
"end": 9031
}
|
class ____(DependencyAPI):
"""
A dependency from GitHub. Use the ``github`` key in your ``dependencies:``
section of your ``ape-config.yaml`` file to declare a dependency from GitHub.
Config example::
dependencies:
- name: OpenZeppelin
github: OpenZeppelin/openzeppelin-contracts
version: 4.4.0
"""
github: str
"""
The Github repo ID e.g. the organization name followed by the repo name,
such as ``dapphub/erc20``.
"""
ref: Optional[str] = None
"""
The branch or tag to use. When using this field
instead of the 'release' field, the repository
gets cloned instead of downloaded via the
official GitHub release API.
**NOTE**: Will be ignored if given a 'release'.
"""
version: Optional[str] = None
"""
The release version to use. When using this
field instead of the 'ref' field, the GitHub
release API is used to fetch instead of cloning.
**NOTE**: Will be ignored if given a 'ref'.
"""
scheme: str = "https"
"""
Either HTTPS or SSH; only used with `ref:`.
"""
# Exists as property so can be changed for testing.
_github_client: _GithubClient = github_client
@model_validator(mode="before")
@classmethod
def _validate_model(cls, model):
# branch -> ref
if "branch" in model and "ref" not in model:
# Handle branch as an alias.
model["ref"] = model.pop("branch")
if "name" not in model and "github" in model:
# Calculate a default name.
model["name"] = model["github"].split("/")[-1].lower()
return model
@model_validator(mode="after")
def ensure_ref_or_version(self):
if self.ref is None and self.version is None:
raise ValueError("GitHub dependency must have either ref or version specified.")
return self
@property
def package_id(self) -> str:
return self.github
@cached_property
def version_id(self) -> str:
if self.ref:
return self.ref
elif self.version and self.version != "latest":
return self.version
latest_release = self._github_client.get_latest_release(self.org_name, self.repo_name)
return latest_release["tag_name"]
@cached_property
def org_name(self) -> str:
return self.github.split("/")[0]
@cached_property
def repo_name(self) -> str:
return self.github.split("/")[1]
@property
def uri(self) -> str:
_uri = f"https://github.com/{self.github.strip('/')}"
if self.version:
version = f"v{self.version}" if not self.version.startswith("v") else self.version
_uri = f"{_uri}/releases/tag/{version}"
elif self.ref:
_uri = f"{_uri}/tree/{self.ref}"
return _uri
def __repr__(self) -> str:
cls_name = getattr(type(self), "__name__", GithubDependency.__name__)
return f"<{cls_name} github={self.github}>"
def fetch(self, destination: Path):
destination.parent.mkdir(exist_ok=True, parents=True)
if ref := self.ref:
# NOTE: destination path should not exist at this point,
# so delete it in case it's left over from a failure.
if destination.is_dir():
shutil.rmtree(destination)
# Fetch using git-clone approach (by git-reference).
self._fetch_ref(ref, destination)
else:
# Fetch using Version API from GitHub.
version = self.version or "latest"
try:
self._fetch_version(version, destination)
except Exception as err_from_version_approach:
logger.warning(
f"No official release found for version '{version}'. "
"Use `ref:` instead of `version:` for release tags. "
"Checking for matching tags..."
)
# NOTE: When using ref-from-a-version, ensure
# it didn't create the destination along the way;
# else, the ref is cloned in the wrong spot.
if destination.is_dir():
shutil.rmtree(destination, onerror=_remove_readonly)
try:
self._fetch_ref(version, destination)
except Exception:
# NOTE: Ignore this error, it was merely a last attempt.
raise err_from_version_approach
def _fetch_ref(self, ref: str, destination: Path):
options = _version_to_options(ref)
attempt = 0
num_attempts = len(options)
for ref in options:
attempt += 1
try:
self._github_client.clone_repo(
self.org_name,
self.repo_name,
destination,
branch=ref,
scheme=self.scheme,
)
except Exception:
if attempt == num_attempts:
raise # This error!
# Try another option.
continue
else:
# Was successful! Don't try anymore.
break
def _fetch_version(self, version: str, destination: Path):
destination.mkdir(parents=True, exist_ok=True)
options = _version_to_options(version)
attempt = 0
max_attempts = len(options)
for vers in options:
attempt += 1
try:
self._github_client.download_package(
self.org_name, self.repo_name, vers, destination
)
except Exception:
if attempt == max_attempts:
raise # This error!
# Try another option.
continue
else:
# Was successful! Don't try anymore.
break
|
GithubDependency
|
python
|
doocs__leetcode
|
solution/2300-2399/2301.Match Substring After Replacement/Solution2.py
|
{
"start": 0,
"end": 433
}
|
class ____:
def matchReplacement(self, s: str, sub: str, mappings: List[List[str]]) -> bool:
d = [[False] * 128 for _ in range(128)]
for a, b in mappings:
d[ord(a)][ord(b)] = True
for i in range(len(s) - len(sub) + 1):
if all(
a == b or d[ord(b)][ord(a)] for a, b in zip(s[i : i + len(sub)], sub)
):
return True
return False
|
Solution
|
python
|
pytorch__pytorch
|
.github/scripts/test_filter_test_configs.py
|
{
"start": 7855,
"end": 35353
}
|
class ____(TestCase):
def setUp(self) -> None:
os.environ["GITHUB_TOKEN"] = "GITHUB_TOKEN"
if os.getenv("GITHUB_OUTPUT"):
del os.environ["GITHUB_OUTPUT"]
def tearDown(self) -> None:
if os.getenv("GITHUB_OUTPUT"):
os.remove(str(os.getenv("GITHUB_OUTPUT")))
@mock.patch("filter_test_configs.download_json")
def test_get_labels(self, mock_download_json: Any) -> None:
mock_download_json.return_value = MOCKED_PR_INFO
labels = get_labels(pr_number=12345)
self.assertSetEqual({"open source", "foo"}, labels)
@mock.patch("filter_test_configs.download_json")
def test_get_labels_failed(self, mock_download_json: Any) -> None:
mock_download_json.return_value = {}
labels = get_labels(pr_number=54321)
self.assertFalse(labels)
def test_filter(self) -> None:
mocked_labels = {f"{PREFIX}cfg", "ciflow/trunk", "plain-cfg"}
testcases = [
{
"test_matrix": '{include: [{config: "default", runner: "linux"}]}',
"expected": '{"include": []}',
"description": "Request test-config/cfg but the test matrix doesn't have it",
},
{
"test_matrix": '{include: [{config: "default", runner: "linux"}, {config: "plain-cfg"}]}',
"expected": '{"include": []}',
"description": "A valid test config label needs to start with test-config/",
},
{
"test_matrix": '{include: [{config: "default", runner: "linux"}, {config: "cfg", shard: 1}]}',
"expected": '{"include": [{"config": "cfg", "shard": 1}]}',
"description": "Found a match, only keep that",
},
]
for case in testcases:
filtered_test_matrix = filter(
yaml.safe_load(case["test_matrix"]), mocked_labels
)
self.assertEqual(case["expected"], json.dumps(filtered_test_matrix))
def test_filter_with_test_config_label(self) -> None:
mocked_labels = {f"{PREFIX}cfg", "ciflow/trunk"}
testcases = [
{
"test_matrix": '{include: [{config: "default", runner: "linux"}]}',
"expected": '{"include": []}',
"description": "Found a valid label in the PR body, return the filtered test matrix",
},
{
"test_matrix": '{include: [{config: "default", runner: "linux"}, {config: "cfg", shard: 1}]}',
"expected": '{"include": [{"config": "cfg", "shard": 1}]}',
"description": "Found a match, only keep that",
},
]
for case in testcases:
filtered_test_matrix = filter(
yaml.safe_load(case["test_matrix"]), mocked_labels
)
self.assertEqual(case["expected"], json.dumps(filtered_test_matrix))
def test_filter_selected_test_configs(self) -> None:
testcases = [
{
"test_matrix": '{include: [{config: "default"}]}',
"selected_test_configs": "",
"expected": '{"include": [{"config": "default"}]}',
"description": "No selected test configs",
},
{
"test_matrix": '{include: [{config: "default"}]}',
"selected_test_configs": "foo",
"expected": '{"include": []}',
"description": "A different test config is selected",
},
{
"test_matrix": '{include: [{config: "default"}]}',
"selected_test_configs": "foo, bar",
"expected": '{"include": []}',
"description": "A different set of test configs is selected",
},
{
"test_matrix": '{include: [{config: "default"}]}',
"selected_test_configs": "foo, bar,default",
"expected": '{"include": [{"config": "default"}]}',
"description": "One of the test config is selected",
},
{
"test_matrix": '{include: [{config: "default"}, {config: "bar"}]}',
"selected_test_configs": "foo, bar,Default",
"expected": '{"include": [{"config": "default"}, {"config": "bar"}]}',
"description": "Several test configs are selected",
},
]
for case in testcases:
selected_test_configs = {
v.strip().lower()
for v in case["selected_test_configs"].split(",")
if v.strip()
}
filtered_test_matrix = filter_selected_test_configs(
yaml.safe_load(case["test_matrix"]), selected_test_configs
)
self.assertEqual(case["expected"], json.dumps(filtered_test_matrix))
def test_set_periodic_modes(self) -> None:
testcases: list[dict[str, str]] = [
{
"job_name": "a CI job",
"test_matrix": "{include: []}",
"description": "Empty test matrix",
},
{
"job_name": "a-ci-job",
"test_matrix": '{include: [{config: "default", runner: "linux"}, {config: "cfg", runner: "macos"}]}',
"description": "Replicate each periodic mode in a different config",
},
{
"job_name": "a-ci-cuda11.8-job",
"test_matrix": '{include: [{config: "default", runner: "linux"}, {config: "cfg", runner: "macos"}]}',
"description": "Replicate each periodic mode in a different config for a CUDA job",
},
{
"job_name": "a-ci-rocm-job",
"test_matrix": '{include: [{config: "default", runner: "linux"}, {config: "cfg", runner: "macos"}]}',
"description": "Replicate each periodic mode in a different config for a ROCm job",
},
{
"job_name": "",
"test_matrix": '{include: [{config: "default", runner: "linux"}, {config: "cfg", runner: "macos"}]}',
"description": "Empty job name",
},
{
"test_matrix": '{include: [{config: "default", runner: "linux"}, {config: "cfg", runner: "macos"}]}',
"description": "Missing job name",
},
]
for case in testcases:
job_name = case.get("job_name", None)
test_matrix = yaml.safe_load(case["test_matrix"])
scheduled_test_matrix = set_periodic_modes(test_matrix, job_name)
expected_modes = [
m for m, c in SUPPORTED_PERIODICAL_MODES.items() if c(job_name)
]
self.assertEqual(
len(test_matrix["include"]) * len(expected_modes),
len(scheduled_test_matrix["include"]),
)
@mock.patch("filter_test_configs.download_json")
def test_remove_disabled_jobs(self, mock_download_json: Any) -> None:
mock_download_json.return_value = MOCKED_DISABLED_UNSTABLE_JOBS
testcases = [
{
"workflow": "pull",
"job_name": "invalid job name",
"test_matrix": '{include: [{config: "default"}]}',
"expected": '{"include": [{"config": "default"}]}',
"description": "invalid job name",
},
{
"workflow": "pull",
"job_name": "mock-platform-1 / build",
"test_matrix": '{include: [{config: "default"}]}',
"expected": '{"include": []}',
"description": "disable build and test jobs",
},
{
"workflow": "trunk",
"job_name": "mock-platform-2 / build",
"test_matrix": '{include: [{config: "default"}]}',
"expected": '{"include": []}',
"description": "disable build job",
},
{
"workflow": "periodic",
"job_name": "mock-platform-3 / test",
"test_matrix": '{include: [{config: "default"}]}',
"expected": '{"include": []}',
"description": "disable test job",
},
{
"workflow": "pull",
"job_name": "mock-platform-4 / build-and-test",
"test_matrix": '{include: [{config: "default"}]}',
"expected": '{"include": []}',
"description": "disable build-and-test job",
},
{
"workflow": "trunk",
"job_name": "mock-platform-5 / test",
"test_matrix": '{include: [{config: "default", runner: "linux"}, {config: "backward_compat"}]}',
"expected": '{"include": [{"config": "default", "runner": "linux"}]}',
"description": "disable a test config",
},
{
"workflow": "periodic",
"job_name": "mock-platform-6 / build-and-test",
"test_matrix": '{include: [{config: "default", runner: "linux"}, {config: "backward_compat"}]}',
"expected": '{"include": [{"config": "backward_compat"}]}',
"description": "disable a build-and-test config",
},
{
"workflow": "pull",
"job_name": "mock-platform-7 / test",
"test_matrix": '{include: [{config: "default"}, {config: "backward_compat"}]}',
"expected": '{"include": [{"config": "default"}, {"config": "backward_compat"}]}',
"description": "include an invalid job name in the disabled issue",
},
{
"workflow": "trunk",
"job_name": "mock-platform-8 / build",
"test_matrix": '{include: [{config: "default"}, {config: "backward_compat"}]}',
"expected": '{"include": [{"config": "default"}, {"config": "backward_compat"}]}',
"description": "include an invalid combination of build and test config",
},
{
"workflow": "inductor",
"job_name": "mock-platform-8 / build",
"test_matrix": '{include: [{config: "default"}, {config: "backward_compat"}]}',
"expected": '{"include": [{"config": "default"}, {"config": "backward_compat"}]}',
"description": "not disabled on this workflow",
},
{
"workflow": "pull",
"job_name": "mock-platform-9 / build",
"test_matrix": '{include: [{config: "default"}, {config: "backward_compat"}]}',
"expected": '{"include": [{"config": "default"}, {"config": "backward_compat"}]}',
"description": "not disabled on this platform",
},
{
"workflow": "linux-binary-libtorch-cxx11-abi",
"job_name": "libtorch-cpu-shared-with-deps-cxx11-abi-build / build",
"test_matrix": '{include: [{config: "default"}]}',
"expected": '{"include": []}',
"description": "Build job is not needed when test job has been disabled",
},
{
"workflow": "linux-binary-libtorch-cxx11-abi",
"job_name": "libtorch-cpu-shared-with-deps-cxx11-abi-test / test",
"test_matrix": '{include: [{config: "default"}]}',
"expected": '{"include": []}',
"description": "The binary test job is disabled on this platform",
},
{
"workflow": "linux-binary-manywheel",
"job_name": "manywheel-py3_8-cuda11_8-build / build",
"test_matrix": '{include: [{config: "default"}]}',
"expected": '{"include": []}',
"description": "Both binary build and test jobs are disabled",
},
{
"workflow": "linux-binary-manywheel",
"job_name": "manywheel-py3_8-cuda11_8-test / test",
"test_matrix": '{include: [{config: "default"}]}',
"expected": '{"include": []}',
"description": "Both binary build and test jobs are disabled",
},
]
for case in testcases:
workflow = case["workflow"]
job_name = case["job_name"]
test_matrix = yaml.safe_load(case["test_matrix"])
filtered_test_matrix = remove_disabled_jobs(workflow, job_name, test_matrix)
self.assertEqual(case["expected"], json.dumps(filtered_test_matrix))
@mock.patch("filter_test_configs.download_json")
def test_mark_unstable_jobs(self, mock_download_json: Any) -> None:
mock_download_json.return_value = MOCKED_DISABLED_UNSTABLE_JOBS
testcases = [
{
"workflow": "pull",
"job_name": "invalid job name",
"test_matrix": '{include: [{config: "default"}]}',
"expected": '{"include": [{"config": "default"}]}',
"description": "invalid job name",
},
{
"workflow": "pull",
"job_name": "mock-platform-1 / build",
"test_matrix": '{include: [{config: "default"}]}',
"expected": '{"include": [{"config": "default", "unstable": "unstable"}]}',
"description": "mark build and test jobs as unstable",
},
{
"workflow": "trunk",
"job_name": "mock-platform-2 / build",
"test_matrix": '{include: [{config: "default"}]}',
"expected": '{"include": [{"config": "default", "unstable": "unstable"}]}',
"description": "mark build job as unstable",
},
{
"workflow": "periodic",
"job_name": "mock-platform-3 / test",
"test_matrix": '{include: [{config: "default"}]}',
"expected": '{"include": [{"config": "default", "unstable": "unstable"}]}',
"description": "mark test job as unstable",
},
{
"workflow": "pull",
"job_name": "mock-platform-4 / build-and-test",
"test_matrix": '{include: [{config: "default"}]}',
"expected": '{"include": [{"config": "default", "unstable": "unstable"}]}',
"description": "mark build-and-test job as unstable",
},
{
"workflow": "trunk",
"job_name": "mock-platform-5 / test",
"test_matrix": '{include: [{config: "default"}, {config: "backward_compat"}]}',
"expected": '{"include": [{"config": "default"}, {"config": "backward_compat", "unstable": "unstable"}]}',
"description": "mark a test config as unstable",
},
{
"workflow": "periodic",
"job_name": "mock-platform-6 / build-and-test",
"test_matrix": '{include: [{config: "default"}, {config: "backward_compat"}]}',
"expected": '{"include": [{"config": "default", "unstable": "unstable"}, {"config": "backward_compat"}]}',
"description": "mark a build-and-test config as unstable",
},
{
"workflow": "pull",
"job_name": "mock-platform-7 / test",
"test_matrix": '{include: [{config: "default"}, {config: "backward_compat"}]}',
"expected": '{"include": [{"config": "default"}, {"config": "backward_compat"}]}',
"description": "include an invalid job name in the unstable issue",
},
{
"workflow": "trunk",
"job_name": "mock-platform-8 / build",
"test_matrix": '{include: [{config: "default"}, {config: "backward_compat"}]}',
"expected": '{"include": [{"config": "default"}, {"config": "backward_compat"}]}',
"description": "include an invalid combination of build and test config",
},
{
"workflow": "inductor",
"job_name": "mock-platform-8 / build",
"test_matrix": '{include: [{config: "default"}, {config: "backward_compat"}]}',
"expected": '{"include": [{"config": "default"}, {"config": "backward_compat"}]}',
"description": "not target this workflow",
},
{
"workflow": "pull",
"job_name": "mock-platform-9 / build",
"test_matrix": '{include: [{config: "default"}, {config: "backward_compat"}]}',
"expected": '{"include": [{"config": "default"}, {"config": "backward_compat"}]}',
"description": "not target this platform",
},
{
"workflow": "linux-binary-libtorch-cxx11-abi",
"job_name": "libtorch-cpu-shared-with-deps-cxx11-abi-build / build",
"test_matrix": '{include: [{config: "default"}]}',
"expected": '{"include": [{"config": "default", "unstable": "unstable"}]}',
"description": "Unstable binary build job",
},
{
"workflow": "linux-binary-libtorch-cxx11-abi",
"job_name": "libtorch-cpu-shared-with-deps-cxx11-abi-test / test",
"test_matrix": '{include: [{config: "default"}]}',
"expected": '{"include": [{"config": "default", "unstable": "unstable"}]}',
"description": "Unstable binary test job",
},
{
"workflow": "linux-binary-manywheel",
"job_name": "manywheel-py3_8-cuda11_8-build / build",
"test_matrix": '{include: [{config: "default"}]}',
"expected": '{"include": [{"config": "default", "unstable": "unstable"}]}',
"description": "Both binary build and test jobs are unstable",
},
{
"workflow": "linux-binary-manywheel",
"job_name": "manywheel-py3_8-cuda11_8-test / test",
"test_matrix": '{include: [{config: "default"}]}',
"expected": '{"include": [{"config": "default", "unstable": "unstable"}]}',
"description": "Both binary build and test jobs are unstable",
},
]
for case in testcases:
workflow = case["workflow"]
job_name = case["job_name"]
test_matrix = yaml.safe_load(case["test_matrix"])
filtered_test_matrix = mark_unstable_jobs(workflow, job_name, test_matrix)
self.assertEqual(json.loads(case["expected"]), filtered_test_matrix)
@mock.patch("subprocess.check_output")
def test_perform_misc_tasks(self, mocked_subprocess: Any) -> None:
def _gen_expected_string(
keep_going: bool = False,
ci_verbose_test_logs: bool = False,
ci_test_showlocals: bool = False,
ci_no_test_timeout: bool = False,
ci_no_td: bool = False,
ci_td_distributed: bool = False,
is_unstable: bool = False,
reenabled_issues: str = "",
) -> str:
return (
f"keep-going={keep_going}\n"
f"ci-verbose-test-logs={ci_verbose_test_logs}\n"
f"ci-test-showlocals={ci_test_showlocals}\n"
f"ci-no-test-timeout={ci_no_test_timeout}\n"
f"ci-no-td={ci_no_td}\n"
f"ci-td-distributed={ci_td_distributed}\n"
f"is-unstable={is_unstable}\n"
f"reenabled-issues={reenabled_issues}\n"
)
mocked_subprocess.return_value = b""
testcases: list[dict[str, Any]] = [
{
"labels": {},
"test_matrix": '{include: [{config: "default"}]}',
"job_name": "A job name",
"expected": _gen_expected_string(),
"description": "No keep-going, no is-unstable",
},
{
"labels": {"keep-going"},
"test_matrix": '{include: [{config: "default"}]}',
"job_name": "A job name",
"expected": _gen_expected_string(keep_going=True),
"description": "Has keep-going, no is-unstable",
},
{
"labels": {},
"test_matrix": '{include: [{config: "default"}]}',
"job_name": "A job name",
"pr_body": "[keep-going]",
"expected": _gen_expected_string(keep_going=True),
"description": "Keep-going in PR body",
},
{
"labels": {"ci-verbose-test-logs"},
"test_matrix": '{include: [{config: "default"}]}',
"job_name": "A job name",
"pr_body": "[ci-no-test-timeout]",
"expected": _gen_expected_string(
ci_verbose_test_logs=True, ci_no_test_timeout=True
),
"description": "No pipe logs label and no test timeout in PR body",
},
{
"labels": {"ci-test-showlocals"},
"test_matrix": '{include: [{config: "default"}]}',
"job_name": "A job name",
"expected": _gen_expected_string(ci_test_showlocals=True),
"description": "Has ci-test-showlocals",
},
{
"labels": {},
"test_matrix": '{include: [{config: "default"}]}',
"job_name": "A job name",
"pr_body": "[ci-test-showlocals]",
"expected": _gen_expected_string(ci_test_showlocals=True),
"description": "ci-test-showlocals in body",
},
{
"labels": {"ci-no-test-timeout"},
"test_matrix": '{include: [{config: "default"}]}',
"job_name": "A job name",
"pr_body": "[ci-verbose-test-logs]",
"expected": _gen_expected_string(
ci_verbose_test_logs=True, ci_no_test_timeout=True
),
"description": "No pipe logs in PR body and no test timeout in label (same as the above but swapped)",
},
{
"labels": {"ci-no-td"},
"test_matrix": '{include: [{config: "default"}]}',
"job_name": "A job name",
"pr_body": "",
"expected": _gen_expected_string(ci_no_td=True),
"description": "No pipe logs in PR body and no test timeout in label (same as the above but swapped)",
},
{
"labels": {},
"test_matrix": '{include: [{config: "default"}]}',
"job_name": None,
"expected": _gen_expected_string(),
"description": "No job name",
},
{
"labels": {},
"test_matrix": '{include: [{config: "default"}]}',
"job_name": "macos-12-py3-arm64 / test (default, 1, 3, macos-m1-stable, unstable)",
"expected": _gen_expected_string(is_unstable=True),
"description": "Unstable job",
},
{
"labels": {},
"test_matrix": '{include: [{config: "default"}]}',
"job_name": "macos-12-py3-arm64 / test (default, 1, 3, macos-m1-stable, unstable)",
"expected": _gen_expected_string(is_unstable=True),
"description": "Unstable job",
},
{
"labels": {},
"test_matrix": '{include: [{config: "1", unstable: "unstable"}, {config: "2", unstable: "unstable"}]}',
"job_name": "macos-12-py3-arm64 / build",
"expected": _gen_expected_string(is_unstable=True),
"description": "All configs are unstable",
},
{
"labels": {},
"test_matrix": '{include: [{config: "1", unstable: "unstable"}, {config: "2"}]}',
"job_name": "macos-12-py3-arm64 / build",
"expected": _gen_expected_string(is_unstable=False),
"description": "Only mark some configs as unstable",
},
{
"labels": {},
"test_matrix": '{include: [{config: "default"}]}',
"job_name": "A job name",
"pr_body": "resolves #123 fixes #234",
"expected": _gen_expected_string(reenabled_issues="123,234"),
"description": "Reenable some issues",
},
]
for case in testcases:
labels = case["labels"]
test_matrix = yaml.safe_load(case["test_matrix"])
job_name = case["job_name"]
pr_body = case.get("pr_body", "")
with tempfile.NamedTemporaryFile(delete=False) as fp:
os.environ["GITHUB_OUTPUT"] = fp.name
perform_misc_tasks(labels, test_matrix, job_name, pr_body)
with open(str(os.getenv("GITHUB_OUTPUT"))) as fp:
self.assertEqual(case["expected"], fp.read())
# test variations of close in PR_BODY
def test_parse_reenabled_issues(self) -> None:
pr_body = "closes #123 Close #143 ClOsE #345 closed #10283"
self.assertEqual(
parse_reenabled_issues(pr_body), ["123", "143", "345", "10283"]
)
# test variations of fix
pr_body = "fix #123 FixEd #143 fixes #345 FiXeD #10283"
self.assertEqual(
parse_reenabled_issues(pr_body), ["123", "143", "345", "10283"]
)
# test variations of resolve
pr_body = "resolve #123 resolveS #143 REsolved #345 RESOLVES #10283"
self.assertEqual(
parse_reenabled_issues(pr_body), ["123", "143", "345", "10283"]
)
# test links
pr_body = "closes https://github.com/pytorch/pytorch/issues/75198 fixes https://github.com/pytorch/pytorch/issues/75123"
self.assertEqual(parse_reenabled_issues(pr_body), ["75198", "75123"])
# test strange spacing
pr_body = (
"resolve #123,resolveS #143Resolved #345\nRESOLVES #10283 "
"Fixed #2348fixes https://github.com/pytorch/pytorch/issues/75123resolveS #2134"
)
self.assertEqual(
parse_reenabled_issues(pr_body),
["123", "143", "345", "10283", "2348", "75123", "2134"],
)
# test bad things
pr_body = (
"fixes189 fixeshttps://github.com/pytorch/pytorch/issues/75123 "
"closedhttps://githubcom/pytorch/pytorch/issues/75123" # @lint-ignore
"fix 234, fixes # 45, fixing #123, close 234, closes#45, closing #123 resolve 234, "
"resolves #45, resolving #123"
)
self.assertEqual(parse_reenabled_issues(pr_body), [])
pr_body = None
self.assertEqual(parse_reenabled_issues(pr_body), [])
if __name__ == "__main__":
main()
|
TestConfigFilter
|
python
|
getsentry__sentry
|
src/sentry/core/endpoints/team_projects.py
|
{
"start": 5330,
"end": 5488
}
|
class ____(TypedDict):
request: Request
organization: Organization
target_object: int
@extend_schema(tags=["Teams"])
@region_silo_endpoint
|
AuditData
|
python
|
tensorflow__tensorflow
|
tensorflow/python/autograph/core/converter_test.py
|
{
"start": 1111,
"end": 1157
}
|
class ____(converter.Base):
pass
|
TestConverter
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_array_formula03.py
|
{
"start": 315,
"end": 1572
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("array_formula03.xlsx")
self.ignore_files = [
"xl/calcChain.xml",
"[Content_Types].xml",
"xl/_rels/workbook.xml.rels",
]
def test_create_file_write_formula(self):
"""Test the creation of an XlsxWriter file with an array formula."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
data = [0, 0, 0]
worksheet.write_column("B1", data)
worksheet.write_column("C1", data)
worksheet.write_formula("A1", "{=SUM(B1:C1*B2:C2)}", None)
workbook.close()
self.assertExcelEqual()
def test_create_file_write(self):
"""Test the creation of an XlsxWriter file with an array formula."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
data = [0, 0, 0]
worksheet.write_column("B1", data)
worksheet.write_column("C1", data)
worksheet.write("A1", "{=SUM(B1:C1*B2:C2)}", None)
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
openai__openai-python
|
tests/api_resources/uploads/test_parts.py
|
{
"start": 2114,
"end": 4032
}
|
class ____:
parametrize = pytest.mark.parametrize(
"async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
)
@parametrize
async def test_method_create(self, async_client: AsyncOpenAI) -> None:
part = await async_client.uploads.parts.create(
upload_id="upload_abc123",
data=b"raw file contents",
)
assert_matches_type(UploadPart, part, path=["response"])
@parametrize
async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
response = await async_client.uploads.parts.with_raw_response.create(
upload_id="upload_abc123",
data=b"raw file contents",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
part = response.parse()
assert_matches_type(UploadPart, part, path=["response"])
@parametrize
async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
async with async_client.uploads.parts.with_streaming_response.create(
upload_id="upload_abc123",
data=b"raw file contents",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
part = await response.parse()
assert_matches_type(UploadPart, part, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_create(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `upload_id` but received ''"):
await async_client.uploads.parts.with_raw_response.create(
upload_id="",
data=b"raw file contents",
)
|
TestAsyncParts
|
python
|
Pylons__pyramid
|
tests/test_viewderivers.py
|
{
"start": 70982,
"end": 71170
}
|
class ____:
def __init__(self, permitted=True):
self.permitted = permitted
def permits(self, request, context, permission):
return self.permitted
|
DummySecurityPolicy
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/workbook/test_overlap_exceptions.py
|
{
"start": 350,
"end": 3369
}
|
class ____(unittest.TestCase):
"""
Test overlapping merge and table ranges.
"""
def setUp(self):
fh = StringIO()
workbook = Workbook()
workbook._set_filehandle(fh)
self.workbook = workbook
def test_overlaps01(self):
"""Test Worksheet range overlap exceptions"""
worksheet = self.workbook.add_worksheet()
worksheet.merge_range("A1:G10", "")
with self.assertRaises(OverlappingRange):
worksheet.merge_range("A1:G10", "")
def test_overlaps02(self):
"""Test Worksheet range overlap exceptions"""
worksheet = self.workbook.add_worksheet()
worksheet.merge_range("A1:G10", "")
with self.assertRaises(OverlappingRange):
worksheet.merge_range("B3:C3", "")
def test_overlaps03(self):
"""Test Worksheet range overlap exceptions"""
worksheet = self.workbook.add_worksheet()
worksheet.merge_range("A1:G10", "")
with self.assertRaises(OverlappingRange):
worksheet.merge_range("G10:G11", "")
def test_overlaps04(self):
"""Test Worksheet range overlap exceptions"""
worksheet = self.workbook.add_worksheet()
worksheet.add_table("A1:G10")
with self.assertRaises(OverlappingRange):
worksheet.add_table("A1:G10")
def test_overlaps05(self):
"""Test Worksheet range overlap exceptions"""
worksheet = self.workbook.add_worksheet()
worksheet.add_table("A1:G10")
with self.assertRaises(OverlappingRange):
worksheet.add_table("B3:C3")
def test_overlaps06(self):
"""Test Worksheet range overlap exceptions"""
worksheet = self.workbook.add_worksheet()
worksheet.add_table("A1:G10")
with self.assertRaises(OverlappingRange):
worksheet.add_table("G1:G11")
def test_overlaps07(self):
"""Test Worksheet range overlap exceptions"""
worksheet = self.workbook.add_worksheet()
worksheet.merge_range("A1:G10", "")
with self.assertRaises(OverlappingRange):
worksheet.add_table("B3:C3")
def test_overlaps08(self):
"""Test Worksheet range overlap exceptions"""
worksheet = self.workbook.add_worksheet()
worksheet.add_table("A1:G10")
with self.assertRaises(OverlappingRange):
worksheet.merge_range("B3:C3", "")
def test_overlaps09(self):
"""Test Worksheet range overlap exceptions"""
worksheet = self.workbook.add_worksheet()
worksheet.add_table("A1:C5", {"autofilter": True})
with self.assertRaises(OverlappingRange):
worksheet.autofilter("A1:C5")
def test_overlaps10(self):
"""Test Worksheet range overlap exceptions"""
worksheet = self.workbook.add_worksheet()
worksheet.autofilter("A1:C5")
with self.assertRaises(OverlappingRange):
worksheet.add_table("A1:C5", {"autofilter": True})
|
TestOverlapRanges
|
python
|
apache__airflow
|
providers/fab/src/airflow/providers/fab/auth_manager/api_fastapi/services/login.py
|
{
"start": 1343,
"end": 2627
}
|
class ____:
"""Login Service for FABAuthManager."""
@classmethod
def create_token(
cls, body: LoginBody, expiration_time_in_seconds: int = conf.getint("api_auth", "jwt_expiration_time")
) -> LoginResponse:
"""Create a new token."""
if not body.username or not body.password:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST, detail="Username and password must be provided"
)
auth_manager = cast("FabAuthManager", get_auth_manager())
user: User | None = None
if auth_manager.security_manager.auth_type == AUTH_LDAP:
user = auth_manager.security_manager.auth_user_ldap(
body.username, body.password, rotate_session_id=False
)
if user is None:
user = auth_manager.security_manager.auth_user_db(
body.username, body.password, rotate_session_id=False
)
if not user:
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Invalid credentials")
return LoginResponse(
access_token=auth_manager.generate_jwt(
user=user, expiration_time_in_seconds=expiration_time_in_seconds
)
)
|
FABAuthManagerLogin
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/operators/dataproc.py
|
{
"start": 84777,
"end": 94144
}
|
class ____(GoogleCloudBaseOperator):
"""
Submit a job to a cluster.
:param project_id: Optional. The ID of the Google Cloud project that the job belongs to.
:param region: Required. The Cloud Dataproc region in which to handle the request.
:param job: Required. The job resource.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.dataproc_v1.types.Job`.
For the complete list of supported job types and their configurations please take a look here
https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs
:param request_id: Optional. A unique id used to identify the request. If the server receives two
``SubmitJobRequest`` requests with the same id, then the second request will be ignored and the first
``Job`` created and stored in the backend is returned.
It is recommended to always set this value to a UUID.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id:
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param asynchronous: Flag to return after submitting the job to the Dataproc API.
This is useful for submitting long-running jobs and
waiting on them asynchronously using the DataprocJobSensor
:param deferrable: Run operator in the deferrable mode
:param polling_interval_seconds: time in seconds between polling for job completion.
The value is considered only when running in deferrable mode. Must be greater than 0.
:param cancel_on_kill: Flag which indicates whether cancel the hook's job or not, when on_kill is called
:param wait_timeout: How many seconds wait for job to be ready. Used only if ``asynchronous`` is False
"""
template_fields: Sequence[str] = (
"project_id",
"region",
"job",
"gcp_conn_id",
"impersonation_chain",
"request_id",
)
template_fields_renderers = {"job": "json"}
operator_extra_links = (DataprocJobLink(),)
def __init__(
self,
*,
job: dict,
region: str,
project_id: str = PROVIDE_PROJECT_ID,
request_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
asynchronous: bool = False,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
polling_interval_seconds: int = 10,
cancel_on_kill: bool = True,
wait_timeout: int | None = None,
openlineage_inject_parent_job_info: bool = conf.getboolean(
"openlineage", "spark_inject_parent_job_info", fallback=False
),
openlineage_inject_transport_info: bool = conf.getboolean(
"openlineage", "spark_inject_transport_info", fallback=False
),
**kwargs,
) -> None:
super().__init__(**kwargs)
if deferrable and polling_interval_seconds <= 0:
raise ValueError("Invalid value for polling_interval_seconds. Expected value greater than 0")
self.project_id = project_id
self.region = region
self.job = job
self.request_id = request_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.asynchronous = asynchronous
self.deferrable = deferrable
self.polling_interval_seconds = polling_interval_seconds
self.cancel_on_kill = cancel_on_kill
self.hook: DataprocHook | None = None
self.job_id: str | None = None
self.wait_timeout = wait_timeout
self.openlineage_inject_parent_job_info = openlineage_inject_parent_job_info
self.openlineage_inject_transport_info = openlineage_inject_transport_info
def execute(self, context: Context):
self.log.info("Submitting job")
self.hook = DataprocHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
if self.openlineage_inject_parent_job_info or self.openlineage_inject_transport_info:
self.log.info("Automatic injection of OpenLineage information into Spark properties is enabled.")
self._inject_openlineage_properties_into_dataproc_job(context)
job_object = self.hook.submit_job(
project_id=self.project_id,
region=self.region,
job=self.job,
request_id=self.request_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
new_job_id: str = job_object.reference.job_id
self.log.info("Job %s submitted successfully.", new_job_id)
# Save data required by extra links no matter what the job status will be
project_id = self.project_id or self.hook.project_id
if project_id:
DataprocJobLink.persist(
context=context,
job_id=new_job_id,
region=self.region,
project_id=project_id,
)
self.job_id = new_job_id
if self.deferrable:
job = self.hook.get_job(project_id=self.project_id, region=self.region, job_id=self.job_id)
state = job.status.state
if state == JobStatus.State.DONE:
return self.job_id
if state == JobStatus.State.ERROR:
raise AirflowException(f"Job failed:\n{job}")
if state == JobStatus.State.CANCELLED:
raise AirflowException(f"Job was cancelled:\n{job}")
self.defer(
trigger=DataprocSubmitTrigger(
job_id=self.job_id,
project_id=self.project_id,
region=self.region,
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
polling_interval_seconds=self.polling_interval_seconds,
cancel_on_kill=self.cancel_on_kill,
),
method_name="execute_complete",
)
elif not self.asynchronous:
self.log.info("Waiting for job %s to complete", new_job_id)
self.hook.wait_for_job(
job_id=new_job_id, region=self.region, project_id=self.project_id, timeout=self.wait_timeout
)
self.log.info("Job %s completed successfully.", new_job_id)
return self.job_id
def execute_complete(self, context, event=None) -> None:
"""
Act as a callback for when the trigger fires.
This returns immediately. It relies on trigger to throw an exception,
otherwise it assumes execution was successful.
"""
job_state = event["job_state"]
job_id = event["job_id"]
job = event["job"]
if job_state == JobStatus.State.ERROR.name: # type: ignore
raise AirflowException(f"Job {job_id} failed:\n{job}")
if job_state == JobStatus.State.CANCELLED.name: # type: ignore
raise AirflowException(f"Job {job_id} was cancelled:\n{job}")
self.log.info("%s completed successfully.", self.task_id)
return job_id
def on_kill(self):
if self.job_id and self.cancel_on_kill:
self.hook.cancel_job(job_id=self.job_id, project_id=self.project_id, region=self.region)
def _inject_openlineage_properties_into_dataproc_job(self, context: Context) -> None:
try:
from airflow.providers.google.cloud.openlineage.utils import (
inject_openlineage_properties_into_dataproc_job,
)
self.job = inject_openlineage_properties_into_dataproc_job(
job=self.job,
context=context,
inject_parent_job_info=self.openlineage_inject_parent_job_info,
inject_transport_info=self.openlineage_inject_transport_info,
)
except Exception as e:
self.log.warning(
"An error occurred while trying to inject OpenLineage information. "
"Dataproc job has not been modified by OpenLineage.",
exc_info=e,
)
|
DataprocSubmitJobOperator
|
python
|
scikit-learn__scikit-learn
|
sklearn/gaussian_process/_gpc.py
|
{
"start": 1303,
"end": 20701
}
|
class ____(BaseEstimator):
"""Binary Gaussian process classification based on Laplace approximation.
The implementation is based on Algorithm 3.1, 3.2, and 5.1 from [RW2006]_.
Internally, the Laplace approximation is used for approximating the
non-Gaussian posterior by a Gaussian.
Currently, the implementation is restricted to using the logistic link
function.
.. versionadded:: 0.18
Parameters
----------
kernel : kernel instance, default=None
The kernel specifying the covariance function of the GP. If None is
passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
the kernel's hyperparameters are optimized during fitting.
optimizer : 'fmin_l_bfgs_b' or callable, default='fmin_l_bfgs_b'
Can either be one of the internally supported optimizers for optimizing
the kernel's parameters, specified by a string, or an externally
defined optimizer passed as a callable. If a callable is passed, it
must have the signature::
def optimizer(obj_func, initial_theta, bounds):
# * 'obj_func' is the objective function to be maximized, which
# takes the hyperparameters theta as parameter and an
# optional flag eval_gradient, which determines if the
# gradient is returned additionally to the function value
# * 'initial_theta': the initial value for theta, which can be
# used by local optimizers
# * 'bounds': the bounds on the values of theta
....
# Returned are the best found hyperparameters theta and
# the corresponding value of the target function.
return theta_opt, func_min
Per default, the 'L-BFGS-B' algorithm from scipy.optimize.minimize
is used. If None is passed, the kernel's parameters are kept fixed.
Available internal optimizers are::
'fmin_l_bfgs_b'
n_restarts_optimizer : int, default=0
The number of restarts of the optimizer for finding the kernel's
parameters which maximize the log-marginal likelihood. The first run
of the optimizer is performed from the kernel's initial parameters,
the remaining ones (if any) from thetas sampled log-uniform randomly
from the space of allowed theta-values. If greater than 0, all bounds
must be finite. Note that n_restarts_optimizer=0 implies that one
run is performed.
max_iter_predict : int, default=100
The maximum number of iterations in Newton's method for approximating
the posterior during predict. Smaller values will reduce computation
time at the cost of worse results.
warm_start : bool, default=False
If warm-starts are enabled, the solution of the last Newton iteration
on the Laplace approximation of the posterior mode is used as
initialization for the next call of _posterior_mode(). This can speed
up convergence when _posterior_mode is called several times on similar
problems as in hyperparameter optimization. See :term:`the Glossary
<warm_start>`.
copy_X_train : bool, default=True
If True, a persistent copy of the training data is stored in the
object. Otherwise, just a reference to the training data is stored,
which might cause predictions to change if the data is modified
externally.
random_state : int, RandomState instance or None, default=None
Determines random number generation used to initialize the centers.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
X_train_ : array-like of shape (n_samples, n_features) or list of object
Feature vectors or other representations of training data (also
required for prediction).
y_train_ : array-like of shape (n_samples,)
Target values in training data (also required for prediction)
classes_ : array-like of shape (n_classes,)
Unique class labels.
kernel_ : kernl instance
The kernel used for prediction. The structure of the kernel is the
same as the one passed as parameter but with optimized hyperparameters
L_ : array-like of shape (n_samples, n_samples)
Lower-triangular Cholesky decomposition of the kernel in X_train_
pi_ : array-like of shape (n_samples,)
The probabilities of the positive class for the training points
X_train_
W_sr_ : array-like of shape (n_samples,)
Square root of W, the Hessian of log-likelihood of the latent function
values for the observed labels. Since W is diagonal, only the diagonal
of sqrt(W) is stored.
log_marginal_likelihood_value_ : float
The log-marginal-likelihood of ``self.kernel_.theta``
References
----------
.. [RW2006] `Carl E. Rasmussen and Christopher K.I. Williams,
"Gaussian Processes for Machine Learning",
MIT Press 2006 <https://www.gaussianprocess.org/gpml/chapters/RW.pdf>`_
"""
def __init__(
self,
kernel=None,
*,
optimizer="fmin_l_bfgs_b",
n_restarts_optimizer=0,
max_iter_predict=100,
warm_start=False,
copy_X_train=True,
random_state=None,
):
self.kernel = kernel
self.optimizer = optimizer
self.n_restarts_optimizer = n_restarts_optimizer
self.max_iter_predict = max_iter_predict
self.warm_start = warm_start
self.copy_X_train = copy_X_train
self.random_state = random_state
def fit(self, X, y):
"""Fit Gaussian process classification model.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Feature vectors or other representations of training data.
y : array-like of shape (n_samples,)
Target values, must be binary.
Returns
-------
self : returns an instance of self.
"""
if self.kernel is None: # Use an RBF kernel as default
self.kernel_ = C(1.0, constant_value_bounds="fixed") * RBF(
1.0, length_scale_bounds="fixed"
)
else:
self.kernel_ = clone(self.kernel)
self.rng = check_random_state(self.random_state)
self.X_train_ = np.copy(X) if self.copy_X_train else X
# Encode class labels and check that it is a binary classification
# problem
label_encoder = LabelEncoder()
self.y_train_ = label_encoder.fit_transform(y)
self.classes_ = label_encoder.classes_
if self.classes_.size > 2:
raise ValueError(
"%s supports only binary classification. y contains classes %s"
% (self.__class__.__name__, self.classes_)
)
elif self.classes_.size == 1:
raise ValueError(
"{0:s} requires 2 classes; got {1:d} class".format(
self.__class__.__name__, self.classes_.size
)
)
if self.optimizer is not None and self.kernel_.n_dims > 0:
# Choose hyperparameters based on maximizing the log-marginal
# likelihood (potentially starting from several initial values)
def obj_func(theta, eval_gradient=True):
if eval_gradient:
lml, grad = self.log_marginal_likelihood(
theta, eval_gradient=True, clone_kernel=False
)
return -lml, -grad
else:
return -self.log_marginal_likelihood(theta, clone_kernel=False)
# First optimize starting from theta specified in kernel
optima = [
self._constrained_optimization(
obj_func, self.kernel_.theta, self.kernel_.bounds
)
]
# Additional runs are performed from log-uniform chosen initial
# theta
if self.n_restarts_optimizer > 0:
if not np.isfinite(self.kernel_.bounds).all():
raise ValueError(
"Multiple optimizer restarts (n_restarts_optimizer>0) "
"requires that all bounds are finite."
)
bounds = self.kernel_.bounds
for iteration in range(self.n_restarts_optimizer):
theta_initial = np.exp(self.rng.uniform(bounds[:, 0], bounds[:, 1]))
optima.append(
self._constrained_optimization(obj_func, theta_initial, bounds)
)
# Select result from run with minimal (negative) log-marginal
# likelihood
lml_values = list(map(itemgetter(1), optima))
self.kernel_.theta = optima[np.argmin(lml_values)][0]
self.kernel_._check_bounds_params()
self.log_marginal_likelihood_value_ = -np.min(lml_values)
else:
self.log_marginal_likelihood_value_ = self.log_marginal_likelihood(
self.kernel_.theta
)
# Precompute quantities required for predictions which are independent
# of actual query points
K = self.kernel_(self.X_train_)
_, (self.pi_, self.W_sr_, self.L_, _, _) = self._posterior_mode(
K, return_temporaries=True
)
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Query points where the GP is evaluated for classification.
Returns
-------
C : ndarray of shape (n_samples,)
Predicted target values for X, values are from ``classes_``
"""
check_is_fitted(self)
# As discussed on Section 3.4.2 of GPML, for making hard binary
# decisions, it is enough to compute the MAP of the posterior and
# pass it through the link function
K_star = self.kernel_(self.X_train_, X) # K_star =k(x_star)
f_star = K_star.T.dot(self.y_train_ - self.pi_) # Algorithm 3.2,Line 4
return np.where(f_star > 0, self.classes_[1], self.classes_[0])
def predict_proba(self, X):
"""Return probability estimates for the test vector X.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Query points where the GP is evaluated for classification.
Returns
-------
C : array-like of shape (n_samples, n_classes)
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute ``classes_``.
"""
check_is_fitted(self)
# Compute the mean and variance of the latent function
# (Lines 4-6 of Algorithm 3.2 of GPML)
latent_mean, latent_var = self.latent_mean_and_variance(X)
# Line 7:
# Approximate \int log(z) * N(z | f_star, var_f_star)
# Approximation is due to Williams & Barber, "Bayesian Classification
# with Gaussian Processes", Appendix A: Approximate the logistic
# sigmoid by a linear combination of 5 error functions.
# For information on how this integral can be computed see
# blitiri.blogspot.de/2012/11/gaussian-integral-of-error-function.html
alpha = 1 / (2 * latent_var)
gamma = LAMBDAS * latent_mean
integrals = (
np.sqrt(np.pi / alpha)
* erf(gamma * np.sqrt(alpha / (alpha + LAMBDAS**2)))
/ (2 * np.sqrt(latent_var * 2 * np.pi))
)
pi_star = (COEFS * integrals).sum(axis=0) + 0.5 * COEFS.sum()
return np.vstack((1 - pi_star, pi_star)).T
def log_marginal_likelihood(
self, theta=None, eval_gradient=False, clone_kernel=True
):
"""Returns log-marginal likelihood of theta for training data.
Parameters
----------
theta : array-like of shape (n_kernel_params,), default=None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default=False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. If True, theta must not be None.
clone_kernel : bool, default=True
If True, the kernel attribute is copied. If False, the kernel
attribute is modified, but may result in a performance improvement.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : ndarray of shape (n_kernel_params,), \
optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when `eval_gradient` is True.
"""
if theta is None:
if eval_gradient:
raise ValueError("Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
if clone_kernel:
kernel = self.kernel_.clone_with_theta(theta)
else:
kernel = self.kernel_
kernel.theta = theta
if eval_gradient:
K, K_gradient = kernel(self.X_train_, eval_gradient=True)
else:
K = kernel(self.X_train_)
# Compute log-marginal-likelihood Z and also store some temporaries
# which can be reused for computing Z's gradient
Z, (pi, W_sr, L, b, a) = self._posterior_mode(K, return_temporaries=True)
if not eval_gradient:
return Z
# Compute gradient based on Algorithm 5.1 of GPML
d_Z = np.empty(theta.shape[0])
# XXX: Get rid of the np.diag() in the next line
R = W_sr[:, np.newaxis] * cho_solve((L, True), np.diag(W_sr)) # Line 7
C = solve(L, W_sr[:, np.newaxis] * K) # Line 8
# Line 9: (use einsum to compute np.diag(C.T.dot(C))))
s_2 = (
-0.5
* (np.diag(K) - np.einsum("ij, ij -> j", C, C))
* (pi * (1 - pi) * (1 - 2 * pi))
) # third derivative
for j in range(d_Z.shape[0]):
C = K_gradient[:, :, j] # Line 11
# Line 12: (R.T.ravel().dot(C.ravel()) = np.trace(R.dot(C)))
s_1 = 0.5 * a.T.dot(C).dot(a) - 0.5 * R.T.ravel().dot(C.ravel())
b = C.dot(self.y_train_ - pi) # Line 13
s_3 = b - K.dot(R.dot(b)) # Line 14
d_Z[j] = s_1 + s_2.T.dot(s_3) # Line 15
return Z, d_Z
def latent_mean_and_variance(self, X):
"""Compute the mean and variance of the latent function values.
Based on algorithm 3.2 of [RW2006]_, this function returns the latent
mean (Line 4) and variance (Line 6) of the Gaussian process
classification model.
Note that this function is only supported for binary classification.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Query points where the GP is evaluated for classification.
Returns
-------
latent_mean : array-like of shape (n_samples,)
Mean of the latent function values at the query points.
latent_var : array-like of shape (n_samples,)
Variance of the latent function values at the query points.
"""
check_is_fitted(self)
# Based on Algorithm 3.2 of GPML
K_star = self.kernel_(self.X_train_, X) # K_star =k(x_star)
latent_mean = K_star.T.dot(self.y_train_ - self.pi_) # Line 4
v = solve(self.L_, self.W_sr_[:, np.newaxis] * K_star) # Line 5
# Line 6 (compute np.diag(v.T.dot(v)) via einsum)
latent_var = self.kernel_.diag(X) - np.einsum("ij,ij->j", v, v)
return latent_mean, latent_var
def _posterior_mode(self, K, return_temporaries=False):
"""Mode-finding for binary Laplace GPC and fixed kernel.
This approximates the posterior of the latent function values for given
inputs and target observations with a Gaussian approximation and uses
Newton's iteration to find the mode of this approximation.
"""
# Based on Algorithm 3.1 of GPML
# If warm_start are enabled, we reuse the last solution for the
# posterior mode as initialization; otherwise, we initialize with 0
if (
self.warm_start
and hasattr(self, "f_cached")
and self.f_cached.shape == self.y_train_.shape
):
f = self.f_cached
else:
f = np.zeros_like(self.y_train_, dtype=np.float64)
# Use Newton's iteration method to find mode of Laplace approximation
log_marginal_likelihood = -np.inf
for _ in range(self.max_iter_predict):
# Line 4
pi = expit(f)
W = pi * (1 - pi)
# Line 5
W_sr = np.sqrt(W)
W_sr_K = W_sr[:, np.newaxis] * K
B = np.eye(W.shape[0]) + W_sr_K * W_sr
L = cholesky(B, lower=True)
# Line 6
b = W * f + (self.y_train_ - pi)
# Line 7
a = b - W_sr * cho_solve((L, True), W_sr_K.dot(b))
# Line 8
f = K.dot(a)
# Line 10: Compute log marginal likelihood in loop and use as
# convergence criterion
lml = (
-0.5 * a.T.dot(f)
- np.log1p(np.exp(-(self.y_train_ * 2 - 1) * f)).sum()
- np.log(np.diag(L)).sum()
)
# Check if we have converged (log marginal likelihood does
# not decrease)
# XXX: more complex convergence criterion
if lml - log_marginal_likelihood < 1e-10:
break
log_marginal_likelihood = lml
self.f_cached = f # Remember solution for later warm-starts
if return_temporaries:
return log_marginal_likelihood, (pi, W_sr, L, b, a)
else:
return log_marginal_likelihood
def _constrained_optimization(self, obj_func, initial_theta, bounds):
if self.optimizer == "fmin_l_bfgs_b":
opt_res = scipy.optimize.minimize(
obj_func, initial_theta, method="L-BFGS-B", jac=True, bounds=bounds
)
_check_optimize_result("lbfgs", opt_res)
theta_opt, func_min = opt_res.x, opt_res.fun
elif callable(self.optimizer):
theta_opt, func_min = self.optimizer(obj_func, initial_theta, bounds=bounds)
else:
raise ValueError("Unknown optimizer %s." % self.optimizer)
return theta_opt, func_min
|
_BinaryGaussianProcessClassifierLaplace
|
python
|
openai__openai-python
|
src/openai/types/realtime/realtime_response.py
|
{
"start": 1198,
"end": 3826
}
|
class ____(BaseModel):
id: Optional[str] = None
"""The unique ID of the response, will look like `resp_1234`."""
audio: Optional[Audio] = None
"""Configuration for audio output."""
conversation_id: Optional[str] = None
"""
Which conversation the response is added to, determined by the `conversation`
field in the `response.create` event. If `auto`, the response will be added to
the default conversation and the value of `conversation_id` will be an id like
`conv_1234`. If `none`, the response will not be added to any conversation and
the value of `conversation_id` will be `null`. If responses are being triggered
automatically by VAD the response will be added to the default conversation
"""
max_output_tokens: Union[int, Literal["inf"], None] = None
"""
Maximum number of output tokens for a single assistant response, inclusive of
tool calls, that was used in this response.
"""
metadata: Optional[Metadata] = None
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
structured format, and querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
"""
object: Optional[Literal["realtime.response"]] = None
"""The object type, must be `realtime.response`."""
output: Optional[List[ConversationItem]] = None
"""The list of output items generated by the response."""
output_modalities: Optional[List[Literal["text", "audio"]]] = None
"""
The set of modalities the model used to respond, currently the only possible
values are `[\"audio\"]`, `[\"text\"]`. Audio output always include a text
transcript. Setting the output to mode `text` will disable audio output from the
model.
"""
status: Optional[Literal["completed", "cancelled", "failed", "incomplete", "in_progress"]] = None
"""
The final status of the response (`completed`, `cancelled`, `failed`, or
`incomplete`, `in_progress`).
"""
status_details: Optional[RealtimeResponseStatus] = None
"""Additional details about the status."""
usage: Optional[RealtimeResponseUsage] = None
"""Usage statistics for the Response, this will correspond to billing.
A Realtime API session will maintain a conversation context and append new Items
to the Conversation, thus output from previous turns (text and audio tokens)
will become the input for later turns.
"""
|
RealtimeResponse
|
python
|
apache__airflow
|
providers/google/tests/unit/google/cloud/links/test_dataplex.py
|
{
"start": 8647,
"end": 9769
}
|
class ____:
@pytest.mark.db_test
def test_get_link(self, create_task_instance_of_operator, session, mock_supervisor_comms):
expected_url = EXPECTED_DATAPLEX_CATALOG_ENTRY_GROUPS_LINK
link = DataplexCatalogEntryGroupsLink()
ti = create_task_instance_of_operator(
DataplexCatalogCreateEntryGroupOperator,
dag_id="test_link_dag",
task_id="test_link_task",
location=TEST_LOCATION,
entry_group_id=TEST_ENTRY_GROUP_ID,
entry_group_configuration=TEST_ENTRY_GROUP_ID_BODY,
project_id=TEST_PROJECT_ID,
)
session.add(ti)
session.commit()
if AIRFLOW_V_3_0_PLUS and mock_supervisor_comms:
mock_supervisor_comms.send.return_value = XComResult(
key="key",
value={
"location": ti.task.location,
"project_id": ti.task.project_id,
},
)
actual_url = link.get_link(operator=ti.task, ti_key=ti.key)
assert actual_url == expected_url
|
TestDataplexCatalogEntryGroupsLink
|
python
|
apache__airflow
|
providers/presto/src/airflow/providers/presto/hooks/presto.py
|
{
"start": 2893,
"end": 11537
}
|
class ____(DbApiHook):
"""
Interact with Presto through prestodb.
>>> ph = PrestoHook()
>>> sql = "SELECT count(1) AS num FROM airflow.static_babynames"
>>> ph.get_records(sql)
[[340698]]
"""
conn_name_attr = "presto_conn_id"
default_conn_name = "presto_default"
conn_type = "presto"
hook_name = "Presto"
strip_semicolon = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._placeholder: str = "?"
def get_conn(self) -> Connection:
"""Return a connection object."""
db = self.get_connection(self.get_conn_id())
extra = db.extra_dejson
auth = None
if db.password and extra.get("auth") == "kerberos":
raise AirflowException("Kerberos authorization doesn't support password.")
if db.password:
auth = prestodb.auth.BasicAuthentication(db.login, db.password)
elif extra.get("auth") == "kerberos":
auth = prestodb.auth.KerberosAuthentication(
config=extra.get("kerberos__config", os.environ.get("KRB5_CONFIG")),
service_name=extra.get("kerberos__service_name"),
mutual_authentication=_boolify(extra.get("kerberos__mutual_authentication", False)),
force_preemptive=_boolify(extra.get("kerberos__force_preemptive", False)),
hostname_override=extra.get("kerberos__hostname_override"),
sanitize_mutual_error_response=_boolify(
extra.get("kerberos__sanitize_mutual_error_response", True)
),
principal=extra.get("kerberos__principal", conf.get("kerberos", "principal")),
delegate=_boolify(extra.get("kerberos__delegate", False)),
ca_bundle=extra.get("kerberos__ca_bundle"),
)
http_headers = {"X-Presto-Client-Info": generate_presto_client_info()}
presto_conn = prestodb.dbapi.connect(
host=db.host,
port=db.port,
user=db.login,
source=db.extra_dejson.get("source", "airflow"),
http_headers=http_headers,
http_scheme=db.extra_dejson.get("protocol", "http"),
catalog=db.extra_dejson.get("catalog", "hive"),
schema=db.schema,
auth=auth,
isolation_level=self.get_isolation_level(),
)
if extra.get("verify") is not None:
# Unfortunately verify parameter is available via public API.
# The PR is merged in the presto library, but has not been released.
# See: https://github.com/prestosql/presto-python-client/pull/31
presto_conn._http_session.verify = _boolify(extra["verify"])
return presto_conn
@property
def sqlalchemy_url(self) -> URL:
"""Return a `sqlalchemy.engine.URL` object constructed from the connection."""
conn = self.get_connection(self.get_conn_id())
extra = conn.extra_dejson or {}
required_attrs = ["host", "login", "port"]
for attr in required_attrs:
if getattr(conn, attr) is None:
raise ValueError(f"Presto connections error: '{attr}' is missing in the connection")
# adding only when **kwargs are given by user
query = {
k: v
for k, v in {
"schema": conn.schema,
"protocol": extra.get("protocol"),
"source": extra.get("source"),
"catalog": extra.get("catalog"),
}.items()
if v is not None
}
return URL.create(
drivername="presto",
username=conn.login,
password=conn.password or "",
host=str(conn.host),
port=conn.port,
database=extra.get("catalog"),
query=query,
)
def get_uri(self) -> str:
"""Return a SQLAlchemy engine URL as a string."""
return self.sqlalchemy_url.render_as_string(hide_password=False)
def get_isolation_level(self) -> Any:
"""Return an isolation level."""
db = self.get_connection(self.get_conn_id())
isolation_level = db.extra_dejson.get("isolation_level", "AUTOCOMMIT").upper()
return getattr(IsolationLevel, isolation_level, IsolationLevel.AUTOCOMMIT)
def get_records(
self,
sql: str | list[str] = "",
parameters: Iterable | Mapping[str, Any] | None = None,
) -> Any:
if not isinstance(sql, str):
raise ValueError(f"The sql in Presto Hook must be a string and is {sql}!")
try:
return super().get_records(self.strip_sql_string(sql), parameters)
except DatabaseError as e:
raise PrestoException(e)
def get_first(
self, sql: str | list[str] = "", parameters: Iterable | Mapping[str, Any] | None = None
) -> Any:
if not isinstance(sql, str):
raise ValueError(f"The sql in Presto Hook must be a string and is {sql}!")
try:
return super().get_first(self.strip_sql_string(sql), parameters)
except DatabaseError as e:
raise PrestoException(e)
def _get_pandas_df(self, sql: str = "", parameters=None, **kwargs):
try:
import pandas as pd
except ImportError:
raise AirflowOptionalProviderFeatureException(
"Pandas is not installed. Please install it with `pip install pandas`."
)
cursor = self.get_cursor()
try:
cursor.execute(self.strip_sql_string(sql), parameters)
data = cursor.fetchall()
except DatabaseError as e:
raise PrestoException(e)
column_descriptions = cursor.description
if data:
df = pd.DataFrame(data, **kwargs)
df.rename(columns={n: c[0] for n, c in zip(df.columns, column_descriptions)}, inplace=True)
else:
df = pd.DataFrame(**kwargs)
return df
def _get_polars_df(self, sql: str = "", parameters=None, **kwargs):
try:
import polars as pl
except ImportError:
raise AirflowOptionalProviderFeatureException(
"Polars is not installed. Please install it with `pip install polars`."
)
cursor = self.get_cursor()
try:
cursor.execute(self.strip_sql_string(sql), parameters)
data = cursor.fetchall()
except DatabaseError as e:
raise PrestoException(e)
column_descriptions = cursor.description
if data:
df = pl.DataFrame(
data,
schema=[c[0] for c in column_descriptions],
orient="row",
**kwargs,
)
else:
df = pl.DataFrame(**kwargs)
return df
@deprecated(
reason="Replaced by function `get_df`.",
category=AirflowProviderDeprecationWarning,
action="ignore",
)
def get_pandas_df(self, sql: str = "", parameters=None, **kwargs):
return self._get_pandas_df(sql, parameters, **kwargs)
def insert_rows(
self,
table: str,
rows: Iterable[tuple],
target_fields: Iterable[str] | None = None,
commit_every: int = 0,
replace: bool = False,
**kwargs,
) -> None:
"""
Insert a set of tuples into a table.
:param table: Name of the target table
:param rows: The rows to insert into the table
:param target_fields: The names of the columns to fill in the table
:param commit_every: The maximum number of rows to insert in one
transaction. Set to 0 to insert all rows in one transaction.
:param replace: Whether to replace instead of insert
"""
if self.get_isolation_level() == IsolationLevel.AUTOCOMMIT:
self.log.info(
"Transactions are not enable in presto connection. "
"Please use the isolation_level property to enable it. "
"Falling back to insert all rows in one transaction."
)
commit_every = 0
super().insert_rows(table, rows, target_fields, commit_every)
@staticmethod
def _serialize_cell(cell: Any, conn: Connection | None = None) -> Any:
"""
Presto will adapt all execute() args internally, hence we return cell without any conversion.
:param cell: The cell to insert into the table
:param conn: The database connection
:return: The cell
"""
return cell
|
PrestoHook
|
python
|
instagram__MonkeyType
|
tests/test_typing.py
|
{
"start": 17660,
"end": 20175
}
|
class ____:
@pytest.mark.parametrize(
'typ, expected',
[
(TypedDict(DUMMY_TYPED_DICT_NAME, {'a': int, 'b': int}), True),
(Dict[str, int], False),
# Regression test.
(lambda x: x, False),
],
)
def test_is_typed_dict(self, typ, expected):
assert is_typed_dict(typ) == expected
@pytest.mark.parametrize(
'type1, type2, expected_value',
[
(
TypedDict(DUMMY_TYPED_DICT_NAME, {'a': int, 'b': int}),
TypedDict(DUMMY_TYPED_DICT_NAME, {'a': int, 'b': int}),
True,
),
(
TypedDict(DUMMY_TYPED_DICT_NAME, {'a': int, 'b': int}, total=False),
TypedDict(DUMMY_TYPED_DICT_NAME, {'a': int, 'b': int}),
False,
),
(
TypedDict(DUMMY_TYPED_DICT_NAME, {'a': int, 'b': int}),
Dict[str, int],
False,
),
(
Dict[str, int],
TypedDict(DUMMY_TYPED_DICT_NAME, {'a': int, 'b': int}),
False,
),
(Dict[str, int], Dict[str, int], True),
# Recursive equality checks.
(
TypedDict(DUMMY_TYPED_DICT_NAME,
{'a': TypedDict(DUMMY_TYPED_DICT_NAME, {'a': int, 'b': str})}),
TypedDict(DUMMY_TYPED_DICT_NAME,
{'a': TypedDict(DUMMY_TYPED_DICT_NAME, {'a': int, 'b': str})}),
True,
),
(
TypedDict(DUMMY_TYPED_DICT_NAME,
{'a': TypedDict(DUMMY_TYPED_DICT_NAME, {'a': int, 'b': str})}),
TypedDict(DUMMY_TYPED_DICT_NAME,
{'a': TypedDict(DUMMY_TYPED_DICT_NAME, {'a': str, 'b': str})}),
False,
),
],
)
def test_are_dict_types_equal(self, type1, type2, expected_value):
assert (type1 == type2) == expected_value
def helper() -> None:
pass
def generator() -> Iterator[int]:
yield 1
def get_default_dict(key, value):
m = defaultdict(lambda: 1)
m[key] += value
return m
def get_nested_default_dict(key, value):
m = defaultdict(lambda: defaultdict(lambda: 1))
m[key][key] += value
return m
def get_default_dict_with_dict(key, value):
m = defaultdict(lambda: {'a': 1, 'b': 2})
m[key]['a'] = value
return m
|
TestTypedDictHelpers
|
python
|
pytorch__pytorch
|
torch/ao/quantization/observer.py
|
{
"start": 64180,
"end": 64530
}
|
class ____(Granularity):
"""
Represents per-block granularity in quantization. See
:func:`~torchao.quantization.quant_primitives.quantize_affine` for docs for
`block_size`
Attributes:
block_size (Tuple[int, ...]): The size of each quantization group
"""
block_size: tuple[int, ...]
@dataclass(frozen=True)
|
PerBlock
|
python
|
encode__django-rest-framework
|
tests/test_routers.py
|
{
"start": 10579,
"end": 11105
}
|
class ____(TestCase):
def setUp(self):
class NoteViewSet(viewsets.ModelViewSet):
queryset = RouterTestModel.objects.all()
self.router = SimpleRouter()
self.router.register(r'notes', NoteViewSet)
self.urls = self.router.urls
def test_urls_have_trailing_slash_by_default(self):
expected = ['^notes/$', '^notes/(?P<pk>[^/.]+)/$']
for idx in range(len(expected)):
assert expected[idx] == self.urls[idx].pattern.regex.pattern
|
TestTrailingSlashIncluded
|
python
|
huggingface__transformers
|
src/transformers/models/wavlm/modeling_wavlm.py
|
{
"start": 62000,
"end": 63613
}
|
class ____(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.in_conv_dim = config.tdnn_dim[layer_id - 1] if layer_id > 0 else config.tdnn_dim[layer_id]
self.out_conv_dim = config.tdnn_dim[layer_id]
self.kernel_size = config.tdnn_kernel[layer_id]
self.dilation = config.tdnn_dilation[layer_id]
self.kernel = nn.Linear(self.in_conv_dim * self.kernel_size, self.out_conv_dim)
self.activation = nn.ReLU()
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
if is_peft_available():
from peft.tuners.lora import LoraLayer
if is_peft_available():
if isinstance(self.kernel, LoraLayer):
warnings.warn(
"Detected LoRA on TDNNLayer. LoRA weights won't be applied due to optimization. "
"You should exclude TDNNLayer from LoRA's target modules.",
)
# for backward compatibility, we keep nn.Linear but call F.conv1d for speed up
hidden_states = hidden_states.transpose(1, 2)
weight = self.kernel.weight.view(self.out_conv_dim, self.kernel_size, self.in_conv_dim).transpose(1, 2)
hidden_states = nn.functional.conv1d(hidden_states, weight, self.kernel.bias, dilation=self.dilation)
hidden_states = hidden_states.transpose(1, 2)
hidden_states = self.activation(hidden_states)
return hidden_states
@auto_docstring(
custom_intro="""
WavLM Model with an XVector feature extraction head on top for tasks like Speaker Verification.
"""
)
|
TDNNLayer
|
python
|
ansible__ansible
|
lib/ansible/module_utils/facts/hardware/darwin.py
|
{
"start": 5828,
"end": 5936
}
|
class ____(HardwareCollector):
_fact_class = DarwinHardware
_platform = 'Darwin'
|
DarwinHardwareCollector
|
python
|
numpy__numpy
|
benchmarks/benchmarks/bench_app.py
|
{
"start": 1436,
"end": 2669
}
|
class ____(Benchmark):
def setup(self):
np.random.seed(1)
nsubj = 5
nfeat = 100
ntime = 200
self.arrays = [np.random.normal(size=(ntime, nfeat))
for i in range(nsubj)]
def maxes_of_dots(self, arrays):
"""
A magical feature score for each feature in each dataset
:ref:`Haxby et al., Neuron (2011) <HGC+11>`.
If arrays are column-wise zscore-d before computation it
results in characterizing each column in each array with
sum of maximal correlations of that column with columns
in other arrays.
Arrays must agree only on the first dimension.
Numpy uses this as a simultaneous benchmark of 1) dot products
and 2) max(<array>, axis=<int>).
"""
feature_scores = ([0] * len(arrays))
for (i, sd) in enumerate(arrays):
for (j, sd2) in enumerate(arrays[(i + 1):]):
corr_temp = np.dot(sd.T, sd2)
feature_scores[i] += np.max(corr_temp, axis=1)
feature_scores[((j + i) + 1)] += np.max(corr_temp, axis=0)
return feature_scores
def time_it(self):
self.maxes_of_dots(self.arrays)
|
MaxesOfDots
|
python
|
sqlalchemy__sqlalchemy
|
test/ext/test_indexable.py
|
{
"start": 726,
"end": 4951
}
|
class ____(fixtures.TestBase):
def test_array(self):
Base = declarative_base()
class A(Base):
__tablename__ = "a"
id = Column("id", Integer, primary_key=True)
array = Column("_array", ARRAY(Integer), default=[])
first = index_property("array", 0)
tenth = index_property("array", 9)
a = A(array=[1, 2, 3])
eq_(a.first, 1)
assert_raises(AttributeError, lambda: a.tenth)
a.first = 100
eq_(a.first, 100)
eq_(a.array, [100, 2, 3])
del a.first
eq_(a.first, 2)
a2 = A(first=5)
eq_(a2.first, 5)
eq_(a2.array, [5])
def test_array_longinit(self):
Base = declarative_base()
class A(Base):
__tablename__ = "a"
id = Column("id", Integer, primary_key=True)
array = Column("_array", ARRAY(Integer), default=[])
first = index_property("array", 0)
fifth = index_property("array", 4)
a1 = A(fifth=10)
a2 = A(first=5)
eq_(a1.array, [None, None, None, None, 10])
eq_(a2.array, [5])
assert_raises(IndexError, setattr, a2, "fifth", 10)
def test_json(self):
Base = declarative_base()
class J(Base):
__tablename__ = "j"
id = Column("id", Integer, primary_key=True)
json = Column("_json", JSON, default={})
field = index_property("json", "field")
j = J(json={"a": 1, "b": 2})
assert_raises(AttributeError, lambda: j.field)
j.field = "test"
eq_(j.field, "test")
eq_(j.json, {"a": 1, "b": 2, "field": "test"})
j2 = J(field="test")
eq_(j2.json, {"field": "test"})
eq_(j2.field, "test")
def test_value_is_none_attributeerror(self):
Base = declarative_base()
class A(Base):
__tablename__ = "a"
id = Column("id", Integer, primary_key=True)
array = Column("_array", ARRAY(Integer))
first = index_property("array", 1)
a = A()
assert_raises(AttributeError, getattr, a, "first")
assert_raises(AttributeError, delattr, a, "first")
def test_get_attribute_error(self):
Base = declarative_base()
class A(Base):
__tablename__ = "a"
id = Column("id", Integer, primary_key=True)
array = Column("_array", ARRAY(Integer))
first = index_property("array", 1)
a = A(array=[])
assert_raises(AttributeError, lambda: a.first)
def test_set_immutable(self):
Base = declarative_base()
class A(Base):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
array = Column(ARRAY(Integer))
first = index_property("array", 1, mutable=False)
a = A()
def set_():
a.first = 10
assert_raises(AttributeError, set_)
def test_set_mutable_dict(self):
Base = declarative_base()
class J(Base):
__tablename__ = "j"
id = Column(Integer, primary_key=True)
json = Column(JSON, default={})
field = index_property("json", "field")
j = J()
j.field = 10
j.json = {}
assert_raises(AttributeError, lambda: j.field)
assert_raises(AttributeError, delattr, j, "field")
j.field = 10
eq_(j.field, 10)
def test_get_default_value(self):
Base = declarative_base()
class J(Base):
__tablename__ = "j"
id = Column(Integer, primary_key=True)
json = Column(JSON, default={})
default = index_property("json", "field", default="default")
none = index_property("json", "field", default=None)
j = J()
assert j.json is None
assert j.default == "default"
assert j.none is None
j.json = {}
assert j.default == "default"
assert j.none is None
j.default = None
assert j.default is None
assert j.none is None
j.none = 10
assert j.default == 10
assert j.none == 10
|
IndexPropertyTest
|
python
|
kubernetes-client__python
|
kubernetes/client/models/v1_persistent_volume.py
|
{
"start": 383,
"end": 7376
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1PersistentVolumeSpec',
'status': 'V1PersistentVolumeStatus'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec',
'status': 'status'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
"""V1PersistentVolume - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self._status = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if spec is not None:
self.spec = spec
if status is not None:
self.status = status
@property
def api_version(self):
"""Gets the api_version of this V1PersistentVolume. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1PersistentVolume. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1PersistentVolume.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1PersistentVolume. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this V1PersistentVolume. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1PersistentVolume. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1PersistentVolume.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1PersistentVolume. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1PersistentVolume. # noqa: E501
:return: The metadata of this V1PersistentVolume. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1PersistentVolume.
:param metadata: The metadata of this V1PersistentVolume. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""Gets the spec of this V1PersistentVolume. # noqa: E501
:return: The spec of this V1PersistentVolume. # noqa: E501
:rtype: V1PersistentVolumeSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this V1PersistentVolume.
:param spec: The spec of this V1PersistentVolume. # noqa: E501
:type: V1PersistentVolumeSpec
"""
self._spec = spec
@property
def status(self):
"""Gets the status of this V1PersistentVolume. # noqa: E501
:return: The status of this V1PersistentVolume. # noqa: E501
:rtype: V1PersistentVolumeStatus
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this V1PersistentVolume.
:param status: The status of this V1PersistentVolume. # noqa: E501
:type: V1PersistentVolumeStatus
"""
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1PersistentVolume):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1PersistentVolume):
return True
return self.to_dict() != other.to_dict()
|
V1PersistentVolume
|
python
|
spack__spack
|
var/spack/test_repos/spack_repo/flags_test/packages/x/package.py
|
{
"start": 216,
"end": 480
}
|
class ____(Package):
version("1.1")
version("1.0")
variant("activatemultiflag", default=False)
depends_on('y cflags="-d1"', when="~activatemultiflag")
depends_on('y cflags="-d1 -d2"', when="+activatemultiflag")
depends_on("c", type="build")
|
X
|
python
|
sympy__sympy
|
sympy/printing/latex.py
|
{
"start": 4521,
"end": 124935
}
|
class ____(Printer):
printmethod = "_latex"
_default_settings: dict[str, Any] = {
"full_prec": False,
"fold_frac_powers": False,
"fold_func_brackets": False,
"fold_short_frac": None,
"inv_trig_style": "abbreviated",
"itex": False,
"ln_notation": False,
"long_frac_ratio": None,
"mat_delim": "[",
"mat_str": None,
"mode": "plain",
"mul_symbol": None,
"order": None,
"symbol_names": {},
"root_notation": True,
"mat_symbol_style": "plain",
"imaginary_unit": "i",
"gothic_re_im": False,
"decimal_separator": "period",
"perm_cyclic": True,
"parenthesize_super": True,
"min": None,
"max": None,
"diff_operator": "d",
"adjoint_style": "dagger",
"disable_split_super_sub": False,
}
def __init__(self, settings=None):
Printer.__init__(self, settings)
if 'mode' in self._settings:
valid_modes = ['inline', 'plain', 'equation',
'equation*']
if self._settings['mode'] not in valid_modes:
raise ValueError("'mode' must be one of 'inline', 'plain', "
"'equation' or 'equation*'")
if self._settings['fold_short_frac'] is None and \
self._settings['mode'] == 'inline':
self._settings['fold_short_frac'] = True
mul_symbol_table = {
None: r" ",
"ldot": r" \,.\, ",
"dot": r" \cdot ",
"times": r" \times "
}
try:
self._settings['mul_symbol_latex'] = \
mul_symbol_table[self._settings['mul_symbol']]
except KeyError:
self._settings['mul_symbol_latex'] = \
self._settings['mul_symbol']
try:
self._settings['mul_symbol_latex_numbers'] = \
mul_symbol_table[self._settings['mul_symbol'] or 'dot']
except KeyError:
if (self._settings['mul_symbol'].strip() in
['', ' ', '\\', '\\,', '\\:', '\\;', '\\quad']):
self._settings['mul_symbol_latex_numbers'] = \
mul_symbol_table['dot']
else:
self._settings['mul_symbol_latex_numbers'] = \
self._settings['mul_symbol']
self._delim_dict = {'(': ')', '[': ']'}
imaginary_unit_table = {
None: r"i",
"i": r"i",
"ri": r"\mathrm{i}",
"ti": r"\text{i}",
"j": r"j",
"rj": r"\mathrm{j}",
"tj": r"\text{j}",
}
imag_unit = self._settings['imaginary_unit']
self._settings['imaginary_unit_latex'] = imaginary_unit_table.get(imag_unit, imag_unit)
diff_operator_table = {
None: r"d",
"d": r"d",
"rd": r"\mathrm{d}",
"td": r"\text{d}",
}
diff_operator = self._settings['diff_operator']
self._settings["diff_operator_latex"] = diff_operator_table.get(diff_operator, diff_operator)
def _add_parens(self, s) -> str:
return r"\left({}\right)".format(s)
# TODO: merge this with the above, which requires a lot of test changes
def _add_parens_lspace(self, s) -> str:
return r"\left( {}\right)".format(s)
def parenthesize(self, item, level, is_neg=False, strict=False) -> str:
prec_val = precedence_traditional(item)
if is_neg and strict:
return self._add_parens(self._print(item))
if (prec_val < level) or ((not strict) and prec_val <= level):
return self._add_parens(self._print(item))
else:
return self._print(item)
def parenthesize_super(self, s):
"""
Protect superscripts in s
If the parenthesize_super option is set, protect with parentheses, else
wrap in braces.
"""
if "^" in s:
if self._settings['parenthesize_super']:
return self._add_parens(s)
else:
return "{{{}}}".format(s)
return s
def doprint(self, expr) -> str:
tex = Printer.doprint(self, expr)
if self._settings['mode'] == 'plain':
return tex
elif self._settings['mode'] == 'inline':
return r"$%s$" % tex
elif self._settings['itex']:
return r"$$%s$$" % tex
else:
env_str = self._settings['mode']
return r"\begin{%s}%s\end{%s}" % (env_str, tex, env_str)
def _needs_brackets(self, expr) -> bool:
"""
Returns True if the expression needs to be wrapped in brackets when
printed, False otherwise. For example: a + b => True; a => False;
10 => False; -10 => True.
"""
return not ((expr.is_Integer and expr.is_nonnegative)
or (expr.is_Atom and (expr is not S.NegativeOne
and expr.is_Rational is False)))
def _needs_function_brackets(self, expr) -> bool:
"""
Returns True if the expression needs to be wrapped in brackets when
passed as an argument to a function, False otherwise. This is a more
liberal version of _needs_brackets, in that many expressions which need
to be wrapped in brackets when added/subtracted/raised to a power do
not need them when passed to a function. Such an example is a*b.
"""
if not self._needs_brackets(expr):
return False
else:
# Muls of the form a*b*c... can be folded
if expr.is_Mul and not self._mul_is_clean(expr):
return True
# Pows which don't need brackets can be folded
elif expr.is_Pow and not self._pow_is_clean(expr):
return True
# Add and Function always need brackets
elif expr.is_Add or expr.is_Function:
return True
else:
return False
def _needs_mul_brackets(self, expr, first=False, last=False) -> bool:
"""
Returns True if the expression needs to be wrapped in brackets when
printed as part of a Mul, False otherwise. This is True for Add,
but also for some container objects that would not need brackets
when appearing last in a Mul, e.g. an Integral. ``last=True``
specifies that this expr is the last to appear in a Mul.
``first=True`` specifies that this expr is the first to appear in
a Mul.
"""
from sympy.concrete.products import Product
from sympy.concrete.summations import Sum
from sympy.integrals.integrals import Integral
if expr.is_Mul:
if not first and expr.could_extract_minus_sign():
return True
elif precedence_traditional(expr) < PRECEDENCE["Mul"]:
return True
elif expr.is_Relational:
return True
if expr.is_Piecewise:
return True
if any(expr.has(x) for x in (Mod,)):
return True
if (not last and
any(expr.has(x) for x in (Integral, Product, Sum))):
return True
return False
def _needs_add_brackets(self, expr) -> bool:
"""
Returns True if the expression needs to be wrapped in brackets when
printed as part of an Add, False otherwise. This is False for most
things.
"""
if expr.is_Relational:
return True
if any(expr.has(x) for x in (Mod,)):
return True
if expr.is_Add:
return True
return False
def _mul_is_clean(self, expr) -> bool:
for arg in expr.args:
if arg.is_Function:
return False
return True
def _pow_is_clean(self, expr) -> bool:
return not self._needs_brackets(expr.base)
def _do_exponent(self, expr: str, exp):
if exp is not None:
return r"\left(%s\right)^{%s}" % (expr, exp)
else:
return expr
def _print_Basic(self, expr):
name = self._deal_with_super_sub(expr.__class__.__name__)
if expr.args:
ls = [self._print(o) for o in expr.args]
s = r"\operatorname{{{}}}\left({}\right)"
return s.format(name, ", ".join(ls))
else:
return r"\text{{{}}}".format(name)
def _print_bool(self, e: bool | BooleanTrue | BooleanFalse):
return r"\text{%s}" % e
_print_BooleanTrue = _print_bool
_print_BooleanFalse = _print_bool
def _print_NoneType(self, e):
return r"\text{%s}" % e
def _print_Add(self, expr, order=None):
terms = self._as_ordered_terms(expr, order=order)
tex = ""
for i, term in enumerate(terms):
if i == 0:
pass
elif term.could_extract_minus_sign():
tex += " - "
term = -term
else:
tex += " + "
term_tex = self._print(term)
if self._needs_add_brackets(term):
term_tex = r"\left(%s\right)" % term_tex
tex += term_tex
return tex
def _print_Cycle(self, expr):
from sympy.combinatorics.permutations import Permutation
if expr.size == 0:
return r"\left( \right)"
expr = Permutation(expr)
expr_perm = expr.cyclic_form
siz = expr.size
if expr.array_form[-1] == siz - 1:
expr_perm = expr_perm + [[siz - 1]]
term_tex = ''
for i in expr_perm:
term_tex += str(i).replace(',', r"\;")
term_tex = term_tex.replace('[', r"\left( ")
term_tex = term_tex.replace(']', r"\right)")
return term_tex
def _print_Permutation(self, expr):
from sympy.combinatorics.permutations import Permutation
from sympy.utilities.exceptions import sympy_deprecation_warning
perm_cyclic = Permutation.print_cyclic
if perm_cyclic is not None:
sympy_deprecation_warning(
f"""
Setting Permutation.print_cyclic is deprecated. Instead use
init_printing(perm_cyclic={perm_cyclic}).
""",
deprecated_since_version="1.6",
active_deprecations_target="deprecated-permutation-print_cyclic",
stacklevel=8,
)
else:
perm_cyclic = self._settings.get("perm_cyclic", True)
if perm_cyclic:
return self._print_Cycle(expr)
if expr.size == 0:
return r"\left( \right)"
lower = [self._print(arg) for arg in expr.array_form]
upper = [self._print(arg) for arg in range(len(lower))]
row1 = " & ".join(upper)
row2 = " & ".join(lower)
mat = r" \\ ".join((row1, row2))
return r"\begin{pmatrix} %s \end{pmatrix}" % mat
def _print_AppliedPermutation(self, expr):
perm, var = expr.args
return r"\sigma_{%s}(%s)" % (self._print(perm), self._print(var))
def _print_Float(self, expr):
# Based off of that in StrPrinter
dps = prec_to_dps(expr._prec)
strip = False if self._settings['full_prec'] else True
low = self._settings["min"] if "min" in self._settings else None
high = self._settings["max"] if "max" in self._settings else None
str_real = mlib_to_str(expr._mpf_, dps, strip_zeros=strip, min_fixed=low, max_fixed=high)
# Must always have a mul symbol (as 2.5 10^{20} just looks odd)
# thus we use the number separator
separator = self._settings['mul_symbol_latex_numbers']
if 'e' in str_real:
(mant, exp) = str_real.split('e')
if exp[0] == '+':
exp = exp[1:]
if self._settings['decimal_separator'] == 'comma':
mant = mant.replace('.','{,}')
return r"%s%s10^{%s}" % (mant, separator, exp)
elif str_real == "+inf":
return r"\infty"
elif str_real == "-inf":
return r"- \infty"
else:
if self._settings['decimal_separator'] == 'comma':
str_real = str_real.replace('.','{,}')
return str_real
def _print_Cross(self, expr):
vec1 = expr._expr1
vec2 = expr._expr2
return r"%s \times %s" % (self.parenthesize(vec1, PRECEDENCE['Mul']),
self.parenthesize(vec2, PRECEDENCE['Mul']))
def _print_Curl(self, expr):
vec = expr._expr
return r"\nabla\times %s" % self.parenthesize(vec, PRECEDENCE['Mul'])
def _print_Divergence(self, expr):
vec = expr._expr
return r"\nabla\cdot %s" % self.parenthesize(vec, PRECEDENCE['Mul'])
def _print_Dot(self, expr):
vec1 = expr._expr1
vec2 = expr._expr2
return r"%s \cdot %s" % (self.parenthesize(vec1, PRECEDENCE['Mul']),
self.parenthesize(vec2, PRECEDENCE['Mul']))
def _print_Gradient(self, expr):
func = expr._expr
return r"\nabla %s" % self.parenthesize(func, PRECEDENCE['Mul'])
def _print_Laplacian(self, expr):
func = expr._expr
return r"\Delta %s" % self.parenthesize(func, PRECEDENCE['Mul'])
def _print_Mul(self, expr: Expr):
from sympy.simplify import fraction
separator: str = self._settings['mul_symbol_latex']
numbersep: str = self._settings['mul_symbol_latex_numbers']
def convert(expr) -> str:
if not expr.is_Mul:
return str(self._print(expr))
else:
if self.order not in ('old', 'none'):
args = expr.as_ordered_factors()
else:
args = list(expr.args)
# If there are quantities or prefixes, append them at the back.
units, nonunits = sift(args, lambda x: (hasattr(x, "_scale_factor") or hasattr(x, "is_physical_constant")) or
(isinstance(x, Pow) and
hasattr(x.base, "is_physical_constant")), binary=True)
prefixes, units = sift(units, lambda x: hasattr(x, "_scale_factor"), binary=True)
return convert_args(nonunits + prefixes + units)
def convert_args(args) -> str:
_tex = last_term_tex = ""
for i, term in enumerate(args):
term_tex = self._print(term)
if not (hasattr(term, "_scale_factor") or hasattr(term, "is_physical_constant")):
if self._needs_mul_brackets(term, first=(i == 0),
last=(i == len(args) - 1)):
term_tex = r"\left(%s\right)" % term_tex
if _between_two_numbers_p[0].search(last_term_tex) and \
_between_two_numbers_p[1].match(term_tex):
# between two numbers
_tex += numbersep
elif _tex:
_tex += separator
elif _tex:
_tex += separator
_tex += term_tex
last_term_tex = term_tex
return _tex
# Check for unevaluated Mul. In this case we need to make sure the
# identities are visible, multiple Rational factors are not combined
# etc so we display in a straight-forward form that fully preserves all
# args and their order.
# XXX: _print_Pow calls this routine with instances of Pow...
if isinstance(expr, Mul):
args = expr.args
if args[0] is S.One or any(isinstance(arg, Number) for arg in args[1:]):
return convert_args(args)
include_parens = False
if expr.could_extract_minus_sign():
expr = -expr
tex = "- "
if expr.is_Add:
tex += "("
include_parens = True
else:
tex = ""
numer, denom = fraction(expr, exact=True)
if denom is S.One and Pow(1, -1, evaluate=False) not in expr.args:
# use the original expression here, since fraction() may have
# altered it when producing numer and denom
tex += convert(expr)
else:
snumer = convert(numer)
sdenom = convert(denom)
ldenom = len(sdenom.split())
ratio = self._settings['long_frac_ratio']
if self._settings['fold_short_frac'] and ldenom <= 2 and \
"^" not in sdenom:
# handle short fractions
if self._needs_mul_brackets(numer, last=False):
tex += r"\left(%s\right) / %s" % (snumer, sdenom)
else:
tex += r"%s / %s" % (snumer, sdenom)
elif ratio is not None and \
len(snumer.split()) > ratio*ldenom:
# handle long fractions
if self._needs_mul_brackets(numer, last=True):
tex += r"\frac{1}{%s}%s\left(%s\right)" \
% (sdenom, separator, snumer)
elif numer.is_Mul:
# split a long numerator
a = S.One
b = S.One
for x in numer.args:
if self._needs_mul_brackets(x, last=False) or \
len(convert(a*x).split()) > ratio*ldenom or \
(b.is_commutative is x.is_commutative is False):
b *= x
else:
a *= x
if self._needs_mul_brackets(b, last=True):
tex += r"\frac{%s}{%s}%s\left(%s\right)" \
% (convert(a), sdenom, separator, convert(b))
else:
tex += r"\frac{%s}{%s}%s%s" \
% (convert(a), sdenom, separator, convert(b))
else:
tex += r"\frac{1}{%s}%s%s" % (sdenom, separator, snumer)
else:
tex += r"\frac{%s}{%s}" % (snumer, sdenom)
if include_parens:
tex += ")"
return tex
def _print_AlgebraicNumber(self, expr):
if expr.is_aliased:
return self._print(expr.as_poly().as_expr())
else:
return self._print(expr.as_expr())
def _print_PrimeIdeal(self, expr):
p = self._print(expr.p)
if expr.is_inert:
return rf'\left({p}\right)'
alpha = self._print(expr.alpha.as_expr())
return rf'\left({p}, {alpha}\right)'
def _print_Pow(self, expr: Pow):
# Treat x**Rational(1,n) as special case
if expr.exp.is_Rational:
p: int = expr.exp.p # type: ignore
q: int = expr.exp.q # type: ignore
if abs(p) == 1 and q != 1 and self._settings['root_notation']:
base = self._print(expr.base)
if q == 2:
tex = r"\sqrt{%s}" % base
elif self._settings['itex']:
tex = r"\root{%d}{%s}" % (q, base)
else:
tex = r"\sqrt[%d]{%s}" % (q, base)
if expr.exp.is_negative:
return r"\frac{1}{%s}" % tex
else:
return tex
elif self._settings['fold_frac_powers'] and q != 1:
base = self.parenthesize(expr.base, PRECEDENCE['Pow'])
# issue #12886: add parentheses for superscripts raised to powers
if expr.base.is_Symbol:
base = self.parenthesize_super(base)
if expr.base.is_Function:
return self._print(expr.base, exp="%s/%s" % (p, q))
return r"%s^{%s/%s}" % (base, p, q)
elif expr.exp.is_negative and expr.base.is_commutative:
# special case for 1^(-x), issue 9216
if expr.base == 1:
return r"%s^{%s}" % (expr.base, expr.exp)
# special case for (1/x)^(-y) and (-1/-x)^(-y), issue 20252
if expr.base.is_Rational:
base_p: int = expr.base.p # type: ignore
base_q: int = expr.base.q # type: ignore
if base_p * base_q == abs(base_q):
if expr.exp == -1:
return r"\frac{1}{\frac{%s}{%s}}" % (base_p, base_q)
else:
return r"\frac{1}{(\frac{%s}{%s})^{%s}}" % (base_p, base_q, abs(expr.exp))
# things like 1/x
return self._print_Mul(expr)
if expr.base.is_Function:
return self._print(expr.base, exp=self._print(expr.exp))
tex = r"%s^{%s}"
return self._helper_print_standard_power(expr, tex)
def _helper_print_standard_power(self, expr, template: str) -> str:
exp = self._print(expr.exp)
# issue #12886: add parentheses around superscripts raised
# to powers
base = self.parenthesize(expr.base, PRECEDENCE['Pow'])
if expr.base.is_Symbol:
base = self.parenthesize_super(base)
elif expr.base.is_Float:
base = r"{%s}" % base
elif (isinstance(expr.base, Derivative)
and base.startswith(r'\left(')
and re.match(r'\\left\(\\d?d?dot', base)
and base.endswith(r'\right)')):
# don't use parentheses around dotted derivative
base = base[6: -7] # remove outermost added parens
return template % (base, exp)
def _print_UnevaluatedExpr(self, expr):
return self._print(expr.args[0])
def _print_Sum(self, expr):
if len(expr.limits) == 1:
tex = r"\sum_{%s=%s}^{%s} " % \
tuple([self._print(i) for i in expr.limits[0]])
else:
def _format_ineq(l):
return r"%s \leq %s \leq %s" % \
tuple([self._print(s) for s in (l[1], l[0], l[2])])
tex = r"\sum_{\substack{%s}} " % \
str.join('\\\\', [_format_ineq(l) for l in expr.limits])
if isinstance(expr.function, Add):
tex += r"\left(%s\right)" % self._print(expr.function)
else:
tex += self._print(expr.function)
return tex
def _print_Product(self, expr):
if len(expr.limits) == 1:
tex = r"\prod_{%s=%s}^{%s} " % \
tuple([self._print(i) for i in expr.limits[0]])
else:
def _format_ineq(l):
return r"%s \leq %s \leq %s" % \
tuple([self._print(s) for s in (l[1], l[0], l[2])])
tex = r"\prod_{\substack{%s}} " % \
str.join('\\\\', [_format_ineq(l) for l in expr.limits])
if isinstance(expr.function, Add):
tex += r"\left(%s\right)" % self._print(expr.function)
else:
tex += self._print(expr.function)
return tex
def _print_BasisDependent(self, expr: 'BasisDependent'):
from sympy.vector import Vector
o1: list[str] = []
if expr == expr.zero:
return expr.zero._latex_form
if isinstance(expr, Vector):
items = expr.separate().items()
else:
items = [(0, expr)]
for system, vect in items:
inneritems = list(vect.components.items())
inneritems.sort(key=lambda x: x[0].__str__())
for k, v in inneritems:
if v == 1:
o1.append(' + ' + k._latex_form)
elif v == -1:
o1.append(' - ' + k._latex_form)
else:
arg_str = r'\left(' + self._print(v) + r'\right)'
o1.append(' + ' + arg_str + k._latex_form)
outstr = (''.join(o1))
if outstr[1] != '-':
outstr = outstr[3:]
else:
outstr = outstr[1:]
return outstr
def _print_Indexed(self, expr):
tex_base = self._print(expr.base)
tex = '{'+tex_base+'}'+'_{%s}' % ','.join(
map(self._print, expr.indices))
return tex
def _print_IndexedBase(self, expr):
return self._print(expr.label)
def _print_Idx(self, expr):
label = self._print(expr.label)
if expr.upper is not None:
upper = self._print(expr.upper)
if expr.lower is not None:
lower = self._print(expr.lower)
else:
lower = self._print(S.Zero)
interval = '{lower}\\mathrel{{..}}\\nobreak {upper}'.format(
lower = lower, upper = upper)
return '{{{label}}}_{{{interval}}}'.format(
label = label, interval = interval)
#if no bounds are defined this just prints the label
return label
def _print_Derivative(self, expr):
if requires_partial(expr.expr):
diff_symbol = r'\partial'
else:
diff_symbol = self._settings["diff_operator_latex"]
tex = ""
dim = 0
for x, num in reversed(expr.variable_count):
dim += num
if num == 1:
tex += r"%s %s" % (diff_symbol, self._print(x))
else:
tex += r"%s %s^{%s}" % (diff_symbol,
self.parenthesize_super(self._print(x)),
self._print(num))
if dim == 1:
tex = r"\frac{%s}{%s}" % (diff_symbol, tex)
else:
tex = r"\frac{%s^{%s}}{%s}" % (diff_symbol, self._print(dim), tex)
precedence = PRECEDENCE["Mul"]
if self._settings['mul_symbol']:
# Nudge up the precedence so d/dx (f(x) * g(x)) also gets parenthesized
precedence += 1
if any(i.could_extract_minus_sign() for i in expr.args):
return r"%s %s" % (tex, self.parenthesize(expr.expr,
precedence,
is_neg=True,
strict=True))
return r"%s %s" % (tex, self.parenthesize(expr.expr,
precedence,
is_neg=False,
strict=True))
def _print_Subs(self, subs):
expr, old, new = subs.args
latex_expr = self._print(expr)
latex_old = (self._print(e) for e in old)
latex_new = (self._print(e) for e in new)
latex_subs = r'\\ '.join(
e[0] + '=' + e[1] for e in zip(latex_old, latex_new))
return r'\left. %s \right|_{\substack{ %s }}' % (latex_expr,
latex_subs)
def _print_Integral(self, expr):
tex, symbols = "", []
diff_symbol = self._settings["diff_operator_latex"]
# Only up to \iiiint exists
if len(expr.limits) <= 4 and all(len(lim) == 1 for lim in expr.limits):
# Use len(expr.limits)-1 so that syntax highlighters don't think
# \" is an escaped quote
tex = r"\i" + "i"*(len(expr.limits) - 1) + "nt"
symbols = [r"\, %s%s" % (diff_symbol, self._print(symbol[0]))
for symbol in expr.limits]
else:
for lim in reversed(expr.limits):
symbol = lim[0]
tex += r"\int"
if len(lim) > 1:
if self._settings['mode'] != 'inline' \
and not self._settings['itex']:
tex += r"\limits"
if len(lim) == 3:
tex += "_{%s}^{%s}" % (self._print(lim[1]),
self._print(lim[2]))
if len(lim) == 2:
tex += "^{%s}" % (self._print(lim[1]))
symbols.insert(0, r"\, %s%s" % (diff_symbol, self._print(symbol)))
return r"%s %s%s" % (tex, self.parenthesize(expr.function,
PRECEDENCE["Mul"],
is_neg=any(i.could_extract_minus_sign() for i in expr.args),
strict=True),
"".join(symbols))
def _print_Limit(self, expr):
e, z, z0, dir = expr.args
tex = r"\lim_{%s \to " % self._print(z)
if str(dir) == '+-' or z0 in (S.Infinity, S.NegativeInfinity):
tex += r"%s}" % self._print(z0)
else:
tex += r"%s^%s}" % (self._print(z0), self._print(dir))
if isinstance(e, AssocOp):
return r"%s\left(%s\right)" % (tex, self._print(e))
else:
return r"%s %s" % (tex, self._print(e))
def _hprint_Function(self, func: str) -> str:
r'''
Logic to decide how to render a function to latex
- if it is a recognized latex name, use the appropriate latex command
- if it is a single letter, excluding sub- and superscripts, just use that letter
- if it is a longer name, then put \operatorname{} around it and be
mindful of undercores in the name
'''
func = self._deal_with_super_sub(func)
superscriptidx = func.find("^")
subscriptidx = func.find("_")
if func in accepted_latex_functions:
name = r"\%s" % func
elif len(func) == 1 or func.startswith('\\') or subscriptidx == 1 or superscriptidx == 1:
name = func
else:
if superscriptidx > 0 and subscriptidx > 0:
name = r"\operatorname{%s}%s" %(
func[:min(subscriptidx,superscriptidx)],
func[min(subscriptidx,superscriptidx):])
elif superscriptidx > 0:
name = r"\operatorname{%s}%s" %(
func[:superscriptidx],
func[superscriptidx:])
elif subscriptidx > 0:
name = r"\operatorname{%s}%s" %(
func[:subscriptidx],
func[subscriptidx:])
else:
name = r"\operatorname{%s}" % func
return name
def _print_Function(self, expr: Function, exp=None) -> str:
r'''
Render functions to LaTeX, handling functions that LaTeX knows about
e.g., sin, cos, ... by using the proper LaTeX command (\sin, \cos, ...).
For single-letter function names, render them as regular LaTeX math
symbols. For multi-letter function names that LaTeX does not know
about, (e.g., Li, sech) use \operatorname{} so that the function name
is rendered in Roman font and LaTeX handles spacing properly.
expr is the expression involving the function
exp is an exponent
'''
func = expr.func.__name__
if hasattr(self, '_print_' + func) and \
not isinstance(expr, AppliedUndef):
return getattr(self, '_print_' + func)(expr, exp)
else:
args = [str(self._print(arg)) for arg in expr.args]
# How inverse trig functions should be displayed, formats are:
# abbreviated: asin, full: arcsin, power: sin^-1
inv_trig_style = self._settings['inv_trig_style']
# If we are dealing with a power-style inverse trig function
inv_trig_power_case = False
# If it is applicable to fold the argument brackets
can_fold_brackets = self._settings['fold_func_brackets'] and \
len(args) == 1 and \
not self._needs_function_brackets(expr.args[0])
inv_trig_table = [
"asin", "acos", "atan",
"acsc", "asec", "acot",
"asinh", "acosh", "atanh",
"acsch", "asech", "acoth",
]
# If the function is an inverse trig function, handle the style
if func in inv_trig_table:
if inv_trig_style == "abbreviated":
pass
elif inv_trig_style == "full":
func = ("ar" if func[-1] == "h" else "arc") + func[1:]
elif inv_trig_style == "power":
func = func[1:]
inv_trig_power_case = True
# Can never fold brackets if we're raised to a power
if exp is not None:
can_fold_brackets = False
if inv_trig_power_case:
if func in accepted_latex_functions:
name = r"\%s^{-1}" % func
else:
name = r"\operatorname{%s}^{-1}" % func
elif exp is not None:
func_tex = self._hprint_Function(func)
func_tex = self.parenthesize_super(func_tex)
name = r'%s^{%s}' % (func_tex, exp)
else:
name = self._hprint_Function(func)
if can_fold_brackets:
if func in accepted_latex_functions:
# Wrap argument safely to avoid parse-time conflicts
# with the function name itself
name += r" {%s}"
else:
name += r"%s"
else:
name += r"{\left(%s \right)}"
if inv_trig_power_case and exp is not None:
name += r"^{%s}" % exp
return name % ",".join(args)
def _print_UndefinedFunction(self, expr):
return self._hprint_Function(str(expr))
def _print_ElementwiseApplyFunction(self, expr):
return r"{%s}_{\circ}\left({%s}\right)" % (
self._print(expr.function),
self._print(expr.expr),
)
@property
def _special_function_classes(self):
from sympy.functions.special.tensor_functions import KroneckerDelta
from sympy.functions.special.gamma_functions import gamma, lowergamma
from sympy.functions.special.beta_functions import beta
from sympy.functions.special.delta_functions import DiracDelta
from sympy.functions.special.error_functions import Chi
return {KroneckerDelta: r'\delta',
gamma: r'\Gamma',
lowergamma: r'\gamma',
beta: r'\operatorname{B}',
DiracDelta: r'\delta',
Chi: r'\operatorname{Chi}'}
def _print_FunctionClass(self, expr):
for cls in self._special_function_classes:
if issubclass(expr, cls) and expr.__name__ == cls.__name__:
return self._special_function_classes[cls]
return self._hprint_Function(str(expr))
def _print_Lambda(self, expr):
symbols, expr = expr.args
if len(symbols) == 1:
symbols = self._print(symbols[0])
else:
symbols = self._print(tuple(symbols))
tex = r"\left( %s \mapsto %s \right)" % (symbols, self._print(expr))
return tex
def _print_IdentityFunction(self, expr):
return r"\left( x \mapsto x \right)"
def _hprint_variadic_function(self, expr, exp=None) -> str:
args = sorted(expr.args, key=default_sort_key)
texargs = [r"%s" % self._print(symbol) for symbol in args]
tex = r"\%s\left(%s\right)" % (str(expr.func).lower(),
", ".join(texargs))
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
_print_Min = _print_Max = _hprint_variadic_function
def _print_floor(self, expr, exp=None):
tex = r"\left\lfloor{%s}\right\rfloor" % self._print(expr.args[0])
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_ceiling(self, expr, exp=None):
tex = r"\left\lceil{%s}\right\rceil" % self._print(expr.args[0])
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_log(self, expr, exp=None):
if len(expr.args) == 2:
argument = self._print(expr.args[0])
base = self._print(expr.args[1])
if len(base) == 1:
tex = r"\log_%s{\left(%s \right)}" % (base, argument)
else:
tex = r"\log_{%s}{\left(%s \right)}" % (base, argument)
elif not self._settings["ln_notation"]:
tex = r"\log{\left(%s \right)}" % self._print(expr.args[0])
else:
tex = r"\ln{\left(%s \right)}" % self._print(expr.args[0])
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_Abs(self, expr, exp=None):
tex = r"\left|{%s}\right|" % self._print(expr.args[0])
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_re(self, expr, exp=None):
if self._settings['gothic_re_im']:
tex = r"\Re{%s}" % self.parenthesize(expr.args[0], PRECEDENCE['Atom'])
else:
tex = r"\operatorname{{re}}{{{}}}".format(self.parenthesize(expr.args[0], PRECEDENCE['Atom']))
return self._do_exponent(tex, exp)
def _print_im(self, expr, exp=None):
if self._settings['gothic_re_im']:
tex = r"\Im{%s}" % self.parenthesize(expr.args[0], PRECEDENCE['Atom'])
else:
tex = r"\operatorname{{im}}{{{}}}".format(self.parenthesize(expr.args[0], PRECEDENCE['Atom']))
return self._do_exponent(tex, exp)
def _print_Not(self, e):
from sympy.logic.boolalg import (Equivalent, Implies)
if isinstance(e.args[0], Equivalent):
return self._print_Equivalent(e.args[0], r"\not\Leftrightarrow")
if isinstance(e.args[0], Implies):
return self._print_Implies(e.args[0], r"\not\Rightarrow")
if (e.args[0].is_Boolean):
return r"\neg \left(%s\right)" % self._print(e.args[0])
else:
return r"\neg %s" % self._print(e.args[0])
def _print_LogOp(self, args, char):
arg = args[0]
if arg.is_Boolean and not arg.is_Not:
tex = r"\left(%s\right)" % self._print(arg)
else:
tex = r"%s" % self._print(arg)
for arg in args[1:]:
if arg.is_Boolean and not arg.is_Not:
tex += r" %s \left(%s\right)" % (char, self._print(arg))
else:
tex += r" %s %s" % (char, self._print(arg))
return tex
def _print_And(self, e):
args = sorted(e.args, key=default_sort_key)
return self._print_LogOp(args, r"\wedge")
def _print_Or(self, e):
args = sorted(e.args, key=default_sort_key)
return self._print_LogOp(args, r"\vee")
def _print_Xor(self, e):
args = sorted(e.args, key=default_sort_key)
return self._print_LogOp(args, r"\veebar")
def _print_Implies(self, e, altchar=None):
return self._print_LogOp(e.args, altchar or r"\Rightarrow")
def _print_Equivalent(self, e, altchar=None):
args = sorted(e.args, key=default_sort_key)
return self._print_LogOp(args, altchar or r"\Leftrightarrow")
def _print_conjugate(self, expr, exp=None):
tex = r"\overline{%s}" % self._print(expr.args[0])
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_polar_lift(self, expr, exp=None):
func = r"\operatorname{polar\_lift}"
arg = r"{\left(%s \right)}" % self._print(expr.args[0])
if exp is not None:
return r"%s^{%s}%s" % (func, exp, arg)
else:
return r"%s%s" % (func, arg)
def _print_ExpBase(self, expr, exp=None):
# TODO should exp_polar be printed differently?
# what about exp_polar(0), exp_polar(1)?
tex = r"e^{%s}" % self._print(expr.args[0])
return self._do_exponent(tex, exp)
def _print_Exp1(self, expr, exp=None):
return "e"
def _print_elliptic_k(self, expr, exp=None):
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"K^{%s}%s" % (exp, tex)
else:
return r"K%s" % tex
def _print_elliptic_f(self, expr, exp=None):
tex = r"\left(%s\middle| %s\right)" % \
(self._print(expr.args[0]), self._print(expr.args[1]))
if exp is not None:
return r"F^{%s}%s" % (exp, tex)
else:
return r"F%s" % tex
def _print_elliptic_e(self, expr, exp=None):
if len(expr.args) == 2:
tex = r"\left(%s\middle| %s\right)" % \
(self._print(expr.args[0]), self._print(expr.args[1]))
else:
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"E^{%s}%s" % (exp, tex)
else:
return r"E%s" % tex
def _print_elliptic_pi(self, expr, exp=None):
if len(expr.args) == 3:
tex = r"\left(%s; %s\middle| %s\right)" % \
(self._print(expr.args[0]), self._print(expr.args[1]),
self._print(expr.args[2]))
else:
tex = r"\left(%s\middle| %s\right)" % \
(self._print(expr.args[0]), self._print(expr.args[1]))
if exp is not None:
return r"\Pi^{%s}%s" % (exp, tex)
else:
return r"\Pi%s" % tex
def _print_beta(self, expr, exp=None):
x = expr.args[0]
# Deal with unevaluated single argument beta
y = expr.args[0] if len(expr.args) == 1 else expr.args[1]
tex = rf"\left({x}, {y}\right)"
if exp is not None:
return r"\operatorname{B}^{%s}%s" % (exp, tex)
else:
return r"\operatorname{B}%s" % tex
def _print_betainc(self, expr, exp=None, operator='B'):
largs = [self._print(arg) for arg in expr.args]
tex = r"\left(%s, %s\right)" % (largs[0], largs[1])
if exp is not None:
return r"\operatorname{%s}_{(%s, %s)}^{%s}%s" % (operator, largs[2], largs[3], exp, tex)
else:
return r"\operatorname{%s}_{(%s, %s)}%s" % (operator, largs[2], largs[3], tex)
def _print_betainc_regularized(self, expr, exp=None):
return self._print_betainc(expr, exp, operator='I')
def _print_uppergamma(self, expr, exp=None):
tex = r"\left(%s, %s\right)" % (self._print(expr.args[0]),
self._print(expr.args[1]))
if exp is not None:
return r"\Gamma^{%s}%s" % (exp, tex)
else:
return r"\Gamma%s" % tex
def _print_lowergamma(self, expr, exp=None):
tex = r"\left(%s, %s\right)" % (self._print(expr.args[0]),
self._print(expr.args[1]))
if exp is not None:
return r"\gamma^{%s}%s" % (exp, tex)
else:
return r"\gamma%s" % tex
def _hprint_one_arg_func(self, expr, exp=None) -> str:
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"%s^{%s}%s" % (self._print(expr.func), exp, tex)
else:
return r"%s%s" % (self._print(expr.func), tex)
_print_gamma = _hprint_one_arg_func
def _print_Chi(self, expr, exp=None):
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"\operatorname{Chi}^{%s}%s" % (exp, tex)
else:
return r"\operatorname{Chi}%s" % tex
def _print_expint(self, expr, exp=None):
tex = r"\left(%s\right)" % self._print(expr.args[1])
nu = self._print(expr.args[0])
if exp is not None:
return r"\operatorname{E}_{%s}^{%s}%s" % (nu, exp, tex)
else:
return r"\operatorname{E}_{%s}%s" % (nu, tex)
def _print_fresnels(self, expr, exp=None):
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"S^{%s}%s" % (exp, tex)
else:
return r"S%s" % tex
def _print_fresnelc(self, expr, exp=None):
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"C^{%s}%s" % (exp, tex)
else:
return r"C%s" % tex
def _print_subfactorial(self, expr, exp=None):
tex = r"!%s" % self.parenthesize(expr.args[0], PRECEDENCE["Func"])
if exp is not None:
return r"\left(%s\right)^{%s}" % (tex, exp)
else:
return tex
def _print_factorial(self, expr, exp=None):
tex = r"%s!" % self.parenthesize(expr.args[0], PRECEDENCE["Func"])
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_factorial2(self, expr, exp=None):
tex = r"%s!!" % self.parenthesize(expr.args[0], PRECEDENCE["Func"])
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_binomial(self, expr, exp=None):
tex = r"{\binom{%s}{%s}}" % (self._print(expr.args[0]),
self._print(expr.args[1]))
if exp is not None:
return r"%s^{%s}" % (tex, exp)
else:
return tex
def _print_RisingFactorial(self, expr, exp=None):
n, k = expr.args
base = r"%s" % self.parenthesize(n, PRECEDENCE['Func'])
tex = r"{%s}^{\left(%s\right)}" % (base, self._print(k))
return self._do_exponent(tex, exp)
def _print_FallingFactorial(self, expr, exp=None):
n, k = expr.args
sub = r"%s" % self.parenthesize(k, PRECEDENCE['Func'])
tex = r"{\left(%s\right)}_{%s}" % (self._print(n), sub)
return self._do_exponent(tex, exp)
def _hprint_BesselBase(self, expr, exp, sym: str) -> str:
tex = r"%s" % (sym)
need_exp = False
if exp is not None:
if tex.find('^') == -1:
tex = r"%s^{%s}" % (tex, exp)
else:
need_exp = True
tex = r"%s_{%s}\left(%s\right)" % (tex, self._print(expr.order),
self._print(expr.argument))
if need_exp:
tex = self._do_exponent(tex, exp)
return tex
def _hprint_vec(self, vec) -> str:
if not vec:
return ""
s = ""
for i in vec[:-1]:
s += "%s, " % self._print(i)
s += self._print(vec[-1])
return s
def _print_besselj(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'J')
def _print_besseli(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'I')
def _print_besselk(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'K')
def _print_bessely(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'Y')
def _print_yn(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'y')
def _print_jn(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'j')
def _print_hankel1(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'H^{(1)}')
def _print_hankel2(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'H^{(2)}')
def _print_hn1(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'h^{(1)}')
def _print_hn2(self, expr, exp=None):
return self._hprint_BesselBase(expr, exp, 'h^{(2)}')
def _hprint_airy(self, expr, exp=None, notation="") -> str:
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"%s^{%s}%s" % (notation, exp, tex)
else:
return r"%s%s" % (notation, tex)
def _hprint_airy_prime(self, expr, exp=None, notation="") -> str:
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"{%s^\prime}^{%s}%s" % (notation, exp, tex)
else:
return r"%s^\prime%s" % (notation, tex)
def _print_airyai(self, expr, exp=None):
return self._hprint_airy(expr, exp, 'Ai')
def _print_airybi(self, expr, exp=None):
return self._hprint_airy(expr, exp, 'Bi')
def _print_airyaiprime(self, expr, exp=None):
return self._hprint_airy_prime(expr, exp, 'Ai')
def _print_airybiprime(self, expr, exp=None):
return self._hprint_airy_prime(expr, exp, 'Bi')
def _print_hyper(self, expr, exp=None):
tex = r"{{}_{%s}F_{%s}\left(\begin{matrix} %s \\ %s \end{matrix}" \
r"\middle| {%s} \right)}" % \
(self._print(len(expr.ap)), self._print(len(expr.bq)),
self._hprint_vec(expr.ap), self._hprint_vec(expr.bq),
self._print(expr.argument))
if exp is not None:
tex = r"{%s}^{%s}" % (tex, exp)
return tex
def _print_meijerg(self, expr, exp=None):
tex = r"{G_{%s, %s}^{%s, %s}\left(\begin{matrix} %s & %s \\" \
r"%s & %s \end{matrix} \middle| {%s} \right)}" % \
(self._print(len(expr.ap)), self._print(len(expr.bq)),
self._print(len(expr.bm)), self._print(len(expr.an)),
self._hprint_vec(expr.an), self._hprint_vec(expr.aother),
self._hprint_vec(expr.bm), self._hprint_vec(expr.bother),
self._print(expr.argument))
if exp is not None:
tex = r"{%s}^{%s}" % (tex, exp)
return tex
def _print_dirichlet_eta(self, expr, exp=None):
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"\eta^{%s}%s" % (exp, tex)
return r"\eta%s" % tex
def _print_zeta(self, expr, exp=None):
if len(expr.args) == 2:
tex = r"\left(%s, %s\right)" % tuple(map(self._print, expr.args))
else:
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"\zeta^{%s}%s" % (exp, tex)
return r"\zeta%s" % tex
def _print_stieltjes(self, expr, exp=None):
if len(expr.args) == 2:
tex = r"_{%s}\left(%s\right)" % tuple(map(self._print, expr.args))
else:
tex = r"_{%s}" % self._print(expr.args[0])
if exp is not None:
return r"\gamma%s^{%s}" % (tex, exp)
return r"\gamma%s" % tex
def _print_lerchphi(self, expr, exp=None):
tex = r"\left(%s, %s, %s\right)" % tuple(map(self._print, expr.args))
if exp is None:
return r"\Phi%s" % tex
return r"\Phi^{%s}%s" % (exp, tex)
def _print_polylog(self, expr, exp=None):
s, z = map(self._print, expr.args)
tex = r"\left(%s\right)" % z
if exp is None:
return r"\operatorname{Li}_{%s}%s" % (s, tex)
return r"\operatorname{Li}_{%s}^{%s}%s" % (s, exp, tex)
def _print_jacobi(self, expr, exp=None):
n, a, b, x = map(self._print, expr.args)
tex = r"P_{%s}^{\left(%s,%s\right)}\left(%s\right)" % (n, a, b, x)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (exp)
return tex
def _print_gegenbauer(self, expr, exp=None):
n, a, x = map(self._print, expr.args)
tex = r"C_{%s}^{\left(%s\right)}\left(%s\right)" % (n, a, x)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (exp)
return tex
def _print_chebyshevt(self, expr, exp=None):
n, x = map(self._print, expr.args)
tex = r"T_{%s}\left(%s\right)" % (n, x)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (exp)
return tex
def _print_chebyshevu(self, expr, exp=None):
n, x = map(self._print, expr.args)
tex = r"U_{%s}\left(%s\right)" % (n, x)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (exp)
return tex
def _print_legendre(self, expr, exp=None):
n, x = map(self._print, expr.args)
tex = r"P_{%s}\left(%s\right)" % (n, x)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (exp)
return tex
def _print_assoc_legendre(self, expr, exp=None):
n, a, x = map(self._print, expr.args)
tex = r"P_{%s}^{\left(%s\right)}\left(%s\right)" % (n, a, x)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (exp)
return tex
def _print_hermite(self, expr, exp=None):
n, x = map(self._print, expr.args)
tex = r"H_{%s}\left(%s\right)" % (n, x)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (exp)
return tex
def _print_laguerre(self, expr, exp=None):
n, x = map(self._print, expr.args)
tex = r"L_{%s}\left(%s\right)" % (n, x)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (exp)
return tex
def _print_assoc_laguerre(self, expr, exp=None):
n, a, x = map(self._print, expr.args)
tex = r"L_{%s}^{\left(%s\right)}\left(%s\right)" % (n, a, x)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (exp)
return tex
def _print_Ynm(self, expr, exp=None):
n, m, theta, phi = map(self._print, expr.args)
tex = r"Y_{%s}^{%s}\left(%s,%s\right)" % (n, m, theta, phi)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (exp)
return tex
def _print_Znm(self, expr, exp=None):
n, m, theta, phi = map(self._print, expr.args)
tex = r"Z_{%s}^{%s}\left(%s,%s\right)" % (n, m, theta, phi)
if exp is not None:
tex = r"\left(" + tex + r"\right)^{%s}" % (exp)
return tex
def __print_mathieu_functions(self, character, args, prime=False, exp=None):
a, q, z = map(self._print, args)
sup = r"^{\prime}" if prime else ""
exp = "" if not exp else "^{%s}" % exp
return r"%s%s\left(%s, %s, %s\right)%s" % (character, sup, a, q, z, exp)
def _print_mathieuc(self, expr, exp=None):
return self.__print_mathieu_functions("C", expr.args, exp=exp)
def _print_mathieus(self, expr, exp=None):
return self.__print_mathieu_functions("S", expr.args, exp=exp)
def _print_mathieucprime(self, expr, exp=None):
return self.__print_mathieu_functions("C", expr.args, prime=True, exp=exp)
def _print_mathieusprime(self, expr, exp=None):
return self.__print_mathieu_functions("S", expr.args, prime=True, exp=exp)
def _print_Rational(self, expr):
if expr.q != 1:
sign = ""
p = expr.p
if expr.p < 0:
sign = "- "
p = -p
if self._settings['fold_short_frac']:
return r"%s%d / %d" % (sign, p, expr.q)
return r"%s\frac{%d}{%d}" % (sign, p, expr.q)
else:
return self._print(expr.p)
def _print_Order(self, expr):
s = self._print(expr.expr)
if expr.point and any(p != S.Zero for p in expr.point) or \
len(expr.variables) > 1:
s += '; '
if len(expr.variables) > 1:
s += self._print(expr.variables)
elif expr.variables:
s += self._print(expr.variables[0])
s += r'\rightarrow '
if len(expr.point) > 1:
s += self._print(expr.point)
else:
s += self._print(expr.point[0])
return r"O\left(%s\right)" % s
def _print_Symbol(self, expr: Symbol, style='plain'):
name: str = self._settings['symbol_names'].get(expr)
if name is not None:
return name
return self._deal_with_super_sub(expr.name, style=style)
_print_RandomSymbol = _print_Symbol
def _split_super_sub(self, name: str) -> tuple[str, list[str], list[str]]:
if name is None or '{' in name:
return (name, [], [])
elif self._settings["disable_split_super_sub"]:
name, supers, subs = (name.replace('_', '\\_').replace('^', '\\^'), [], [])
else:
name, supers, subs = split_super_sub(name)
name = translate(name)
supers = [translate(sup) for sup in supers]
subs = [translate(sub) for sub in subs]
return (name, supers, subs)
def _deal_with_super_sub(self, string: str, style='plain') -> str:
name, supers, subs = self._split_super_sub(string)
# apply the style only to the name
if style == 'bold':
name = "\\mathbf{{{}}}".format(name)
# glue all items together:
if supers:
name += "^{%s}" % " ".join(supers)
if subs:
name += "_{%s}" % " ".join(subs)
return name
def _print_Relational(self, expr):
if self._settings['itex']:
gt = r"\gt"
lt = r"\lt"
else:
gt = ">"
lt = "<"
charmap = {
"==": "=",
">": gt,
"<": lt,
">=": r"\geq",
"<=": r"\leq",
"!=": r"\neq",
}
return "%s %s %s" % (self._print(expr.lhs),
charmap[expr.rel_op], self._print(expr.rhs))
def _print_Piecewise(self, expr):
ecpairs = [r"%s & \text{for}\: %s" % (self._print(e), self._print(c))
for e, c in expr.args[:-1]]
if expr.args[-1].cond == true:
ecpairs.append(r"%s & \text{otherwise}" %
self._print(expr.args[-1].expr))
else:
ecpairs.append(r"%s & \text{for}\: %s" %
(self._print(expr.args[-1].expr),
self._print(expr.args[-1].cond)))
tex = r"\begin{cases} %s \end{cases}"
return tex % r" \\".join(ecpairs)
def _print_matrix_contents(self, expr):
lines = []
for line in range(expr.rows): # horrible, should be 'rows'
lines.append(" & ".join([self._print(i) for i in expr[line, :]]))
mat_str = self._settings['mat_str']
if mat_str is None:
if self._settings['mode'] == 'inline':
mat_str = 'smallmatrix'
else:
if (expr.cols <= 10) is True:
mat_str = 'matrix'
else:
mat_str = 'array'
out_str = r'\begin{%MATSTR%}%s\end{%MATSTR%}'
out_str = out_str.replace('%MATSTR%', mat_str)
if mat_str == 'array':
out_str = out_str.replace('%s', '{' + 'c'*expr.cols + '}%s')
return out_str % r"\\".join(lines)
def _print_MatrixBase(self, expr):
out_str = self._print_matrix_contents(expr)
if self._settings['mat_delim']:
left_delim = self._settings['mat_delim']
right_delim = self._delim_dict[left_delim]
out_str = r'\left' + left_delim + out_str + \
r'\right' + right_delim
return out_str
def _print_MatrixElement(self, expr):
matrix_part = self.parenthesize(expr.parent, PRECEDENCE['Atom'], strict=True)
index_part = f"{self._print(expr.i)},{self._print(expr.j)}"
return f"{{{matrix_part}}}_{{{index_part}}}"
def _print_MatrixSlice(self, expr):
def latexslice(x, dim):
x = list(x)
if x[2] == 1:
del x[2]
if x[0] == 0:
x[0] = None
if x[1] == dim:
x[1] = None
return ':'.join(self._print(xi) if xi is not None else '' for xi in x)
return (self.parenthesize(expr.parent, PRECEDENCE["Atom"], strict=True) + r'\left[' +
latexslice(expr.rowslice, expr.parent.rows) + ', ' +
latexslice(expr.colslice, expr.parent.cols) + r'\right]')
def _print_BlockMatrix(self, expr):
return self._print(expr.blocks)
def _print_Transpose(self, expr):
mat = expr.arg
from sympy.matrices import MatrixSymbol, BlockMatrix
if (not isinstance(mat, MatrixSymbol) and
not isinstance(mat, BlockMatrix) and mat.is_MatrixExpr):
return r"\left(%s\right)^{T}" % self._print(mat)
else:
s = self.parenthesize(mat, precedence_traditional(expr), True)
if '^' in s:
return r"\left(%s\right)^{T}" % s
else:
return "%s^{T}" % s
def _print_Trace(self, expr):
mat = expr.arg
return r"\operatorname{tr}\left(%s \right)" % self._print(mat)
def _print_Adjoint(self, expr):
style_to_latex = {
"dagger" : r"\dagger",
"star" : r"\ast",
"hermitian": r"\mathsf{H}"
}
adjoint_style = style_to_latex.get(self._settings["adjoint_style"], r"\dagger")
mat = expr.arg
from sympy.matrices import MatrixSymbol, BlockMatrix
if (not isinstance(mat, MatrixSymbol) and
not isinstance(mat, BlockMatrix) and mat.is_MatrixExpr):
return r"\left(%s\right)^{%s}" % (self._print(mat), adjoint_style)
else:
s = self.parenthesize(mat, precedence_traditional(expr), True)
if '^' in s:
return r"\left(%s\right)^{%s}" % (s, adjoint_style)
else:
return r"%s^{%s}" % (s, adjoint_style)
def _print_MatMul(self, expr):
from sympy import MatMul
# Parenthesize nested MatMul but not other types of Mul objects:
parens = lambda x: self._print(x) if isinstance(x, Mul) and not isinstance(x, MatMul) else \
self.parenthesize(x, precedence_traditional(expr), False)
args = list(expr.args)
if expr.could_extract_minus_sign():
if args[0] == -1:
args = args[1:]
else:
args[0] = -args[0]
return '- ' + ' '.join(map(parens, args))
else:
return ' '.join(map(parens, args))
def _print_DotProduct(self, expr):
level = precedence_traditional(expr)
left, right = expr.args
return rf"{self.parenthesize(left, level)} \cdot {self.parenthesize(right, level)}"
def _print_Determinant(self, expr):
mat = expr.arg
if mat.is_MatrixExpr:
from sympy.matrices.expressions.blockmatrix import BlockMatrix
if isinstance(mat, BlockMatrix):
return r"\left|{%s}\right|" % self._print_matrix_contents(mat.blocks)
return r"\left|{%s}\right|" % self._print(mat)
return r"\left|{%s}\right|" % self._print_matrix_contents(mat)
def _print_Mod(self, expr, exp=None):
if exp is not None:
return r'\left(%s \bmod %s\right)^{%s}' % \
(self.parenthesize(expr.args[0], PRECEDENCE['Mul'],
strict=True),
self.parenthesize(expr.args[1], PRECEDENCE['Mul'],
strict=True),
exp)
return r'%s \bmod %s' % (self.parenthesize(expr.args[0],
PRECEDENCE['Mul'],
strict=True),
self.parenthesize(expr.args[1],
PRECEDENCE['Mul'],
strict=True))
def _print_HadamardProduct(self, expr):
args = expr.args
prec = PRECEDENCE['Pow']
parens = self.parenthesize
return r' \circ '.join(
(parens(arg, prec, strict=True) for arg in args))
def _print_HadamardPower(self, expr):
if precedence_traditional(expr.exp) < PRECEDENCE["Mul"]:
template = r"%s^{\circ \left({%s}\right)}"
else:
template = r"%s^{\circ {%s}}"
return self._helper_print_standard_power(expr, template)
def _print_KroneckerProduct(self, expr):
args = expr.args
prec = PRECEDENCE['Pow']
parens = self.parenthesize
return r' \otimes '.join(
(parens(arg, prec, strict=True) for arg in args))
def _print_MatPow(self, expr):
base, exp = expr.base, expr.exp
from sympy.matrices import MatrixSymbol
if not isinstance(base, MatrixSymbol) and base.is_MatrixExpr:
return "\\left(%s\\right)^{%s}" % (self._print(base),
self._print(exp))
else:
base_str = self._print(base)
if '^' in base_str:
return r"\left(%s\right)^{%s}" % (base_str, self._print(exp))
else:
return "%s^{%s}" % (base_str, self._print(exp))
def _print_MatrixSymbol(self, expr):
return self._print_Symbol(expr, style=self._settings[
'mat_symbol_style'])
def _print_MatrixUnit(self, E):
return "E_{%s,%s}" % (self._print(E._i), self._print(E._j)) \
if self._settings['mat_symbol_style'] == 'plain' else r"\mathcal{E}_{%s,%s}" % (self._print(E._i), self._print(E._j))
def _print_ZeroMatrix(self, Z):
return "0" if self._settings[
'mat_symbol_style'] == 'plain' else r"\mathbf{0}"
def _print_OneMatrix(self, O):
return "1" if self._settings[
'mat_symbol_style'] == 'plain' else r"\mathbf{1}"
def _print_Identity(self, I):
return r"\mathbb{I}" if self._settings[
'mat_symbol_style'] == 'plain' else r"\mathbf{I}"
def _print_PermutationMatrix(self, P):
perm_str = self._print(P.args[0])
return "P_{%s}" % perm_str
def _print_NDimArray(self, expr: NDimArray):
if expr.rank() == 0:
return self._print(expr[()])
mat_str = self._settings['mat_str']
if mat_str is None:
if self._settings['mode'] == 'inline':
mat_str = 'smallmatrix'
else:
if (expr.rank() == 0) or (expr.shape[-1] <= 10):
mat_str = 'matrix'
else:
mat_str = 'array'
block_str = r'\begin{%MATSTR%}%s\end{%MATSTR%}'
block_str = block_str.replace('%MATSTR%', mat_str)
if mat_str == 'array':
block_str = block_str.replace('%s', '{' + 'c'*expr.shape[0] + '}%s')
if self._settings['mat_delim']:
left_delim: str = self._settings['mat_delim']
right_delim = self._delim_dict[left_delim]
block_str = r'\left' + left_delim + block_str + \
r'\right' + right_delim
if expr.rank() == 0:
return block_str % ""
level_str: list[list[str]] = [[] for i in range(expr.rank() + 1)]
shape_ranges = [list(range(i)) for i in expr.shape]
for outer_i in itertools.product(*shape_ranges):
level_str[-1].append(self._print(expr[outer_i]))
even = True
for back_outer_i in range(expr.rank()-1, -1, -1):
if len(level_str[back_outer_i+1]) < expr.shape[back_outer_i]:
break
if even:
level_str[back_outer_i].append(
r" & ".join(level_str[back_outer_i+1]))
else:
level_str[back_outer_i].append(
block_str % (r"\\".join(level_str[back_outer_i+1])))
if len(level_str[back_outer_i+1]) == 1:
level_str[back_outer_i][-1] = r"\left[" + \
level_str[back_outer_i][-1] + r"\right]"
even = not even
level_str[back_outer_i+1] = []
out_str = level_str[0][0]
if expr.rank() % 2 == 1:
out_str = block_str % out_str
return out_str
def _printer_tensor_indices(self, name, indices, index_map: dict):
out_str = self._print(name)
last_valence = None
prev_map = None
for index in indices:
new_valence = index.is_up
if ((index in index_map) or prev_map) and \
last_valence == new_valence:
out_str += ","
if last_valence != new_valence:
if last_valence is not None:
out_str += "}"
if index.is_up:
out_str += "{}^{"
else:
out_str += "{}_{"
out_str += self._print(index.args[0])
if index in index_map:
out_str += "="
out_str += self._print(index_map[index])
prev_map = True
else:
prev_map = False
last_valence = new_valence
if last_valence is not None:
out_str += "}"
return out_str
def _print_Tensor(self, expr):
name = expr.args[0].args[0]
indices = expr.get_indices()
return self._printer_tensor_indices(name, indices, {})
def _print_TensorElement(self, expr):
name = expr.expr.args[0].args[0]
indices = expr.expr.get_indices()
index_map = expr.index_map
return self._printer_tensor_indices(name, indices, index_map)
def _print_TensMul(self, expr):
# prints expressions like "A(a)", "3*A(a)", "(1+x)*A(a)"
sign, args = expr._get_args_for_traditional_printer()
return sign + "".join(
[self.parenthesize(arg, precedence(expr)) for arg in args]
)
def _print_TensAdd(self, expr):
a = []
args = expr.args
for x in args:
a.append(self.parenthesize(x, precedence(expr)))
a.sort()
s = ' + '.join(a)
s = s.replace('+ -', '- ')
return s
def _print_TensorIndex(self, expr):
return "{}%s{%s}" % (
"^" if expr.is_up else "_",
self._print(expr.args[0])
)
def _print_PartialDerivative(self, expr):
if len(expr.variables) == 1:
return r"\frac{\partial}{\partial {%s}}{%s}" % (
self._print(expr.variables[0]),
self.parenthesize(expr.expr, PRECEDENCE["Mul"], False)
)
else:
return r"\frac{\partial^{%s}}{%s}{%s}" % (
len(expr.variables),
" ".join([r"\partial {%s}" % self._print(i) for i in expr.variables]),
self.parenthesize(expr.expr, PRECEDENCE["Mul"], False)
)
def _print_ArraySymbol(self, expr):
return self._print(expr.name)
def _print_ArrayElement(self, expr):
return "{{%s}_{%s}}" % (
self.parenthesize(expr.name, PRECEDENCE["Func"], True),
", ".join([f"{self._print(i)}" for i in expr.indices]))
def _print_UniversalSet(self, expr):
return r"\mathbb{U}"
def _print_frac(self, expr, exp=None):
if exp is None:
return r"\operatorname{frac}{\left(%s\right)}" % self._print(expr.args[0])
else:
return r"\operatorname{frac}{\left(%s\right)}^{%s}" % (
self._print(expr.args[0]), exp)
def _print_tuple(self, expr):
if self._settings['decimal_separator'] == 'comma':
sep = ";"
elif self._settings['decimal_separator'] == 'period':
sep = ","
else:
raise ValueError('Unknown Decimal Separator')
if len(expr) == 1:
# 1-tuple needs a trailing separator
return self._add_parens_lspace(self._print(expr[0]) + sep)
else:
return self._add_parens_lspace(
(sep + r" \ ").join([self._print(i) for i in expr]))
def _print_TensorProduct(self, expr):
elements = [self._print(a) for a in expr.args]
return r' \otimes '.join(elements)
def _print_WedgeProduct(self, expr):
elements = [self._print(a) for a in expr.args]
return r' \wedge '.join(elements)
def _print_Tuple(self, expr):
return self._print_tuple(expr)
def _print_list(self, expr):
if self._settings['decimal_separator'] == 'comma':
return r"\left[ %s\right]" % \
r"; \ ".join([self._print(i) for i in expr])
elif self._settings['decimal_separator'] == 'period':
return r"\left[ %s\right]" % \
r", \ ".join([self._print(i) for i in expr])
else:
raise ValueError('Unknown Decimal Separator')
def _print_dict(self, d):
keys = sorted(d.keys(), key=default_sort_key)
items = []
for key in keys:
val = d[key]
items.append("%s : %s" % (self._print(key), self._print(val)))
return r"\left\{ %s\right\}" % r", \ ".join(items)
def _print_Dict(self, expr):
return self._print_dict(expr)
def _print_DiracDelta(self, expr, exp=None):
if len(expr.args) == 1 or expr.args[1] == 0:
tex = r"\delta\left(%s\right)" % self._print(expr.args[0])
else:
tex = r"\delta^{\left( %s \right)}\left( %s \right)" % (
self._print(expr.args[1]), self._print(expr.args[0]))
if exp:
tex = r"\left(%s\right)^{%s}" % (tex, exp)
return tex
def _print_SingularityFunction(self, expr, exp=None):
shift = self._print(expr.args[0] - expr.args[1])
power = self._print(expr.args[2])
tex = r"{\left\langle %s \right\rangle}^{%s}" % (shift, power)
if exp is not None:
tex = r"{\left({\langle %s \rangle}^{%s}\right)}^{%s}" % (shift, power, exp)
return tex
def _print_Heaviside(self, expr, exp=None):
pargs = ', '.join(self._print(arg) for arg in expr.pargs)
tex = r"\theta\left(%s\right)" % pargs
if exp:
tex = r"\left(%s\right)^{%s}" % (tex, exp)
return tex
def _print_KroneckerDelta(self, expr, exp=None):
i = self._print(expr.args[0])
j = self._print(expr.args[1])
if expr.args[0].is_Atom and expr.args[1].is_Atom:
tex = r'\delta_{%s %s}' % (i, j)
else:
tex = r'\delta_{%s, %s}' % (i, j)
if exp is not None:
tex = r'\left(%s\right)^{%s}' % (tex, exp)
return tex
def _print_LeviCivita(self, expr, exp=None):
indices = map(self._print, expr.args)
if all(x.is_Atom for x in expr.args):
tex = r'\varepsilon_{%s}' % " ".join(indices)
else:
tex = r'\varepsilon_{%s}' % ", ".join(indices)
if exp:
tex = r'\left(%s\right)^{%s}' % (tex, exp)
return tex
def _print_RandomDomain(self, d):
if hasattr(d, 'as_boolean'):
return '\\text{Domain: }' + self._print(d.as_boolean())
elif hasattr(d, 'set'):
return ('\\text{Domain: }' + self._print(d.symbols) + ' \\in ' +
self._print(d.set))
elif hasattr(d, 'symbols'):
return '\\text{Domain on }' + self._print(d.symbols)
else:
return self._print(None)
def _print_FiniteSet(self, s):
items = sorted(s.args, key=default_sort_key)
return self._print_set(items)
def _print_set(self, s):
items = sorted(s, key=default_sort_key)
if self._settings['decimal_separator'] == 'comma':
items = "; ".join(map(self._print, items))
elif self._settings['decimal_separator'] == 'period':
items = ", ".join(map(self._print, items))
else:
raise ValueError('Unknown Decimal Separator')
return r"\left\{%s\right\}" % items
_print_frozenset = _print_set
def _print_Range(self, s):
def _print_symbolic_range():
# Symbolic Range that cannot be resolved
if s.args[0] == 0:
if s.args[2] == 1:
cont = self._print(s.args[1])
else:
cont = ", ".join(self._print(arg) for arg in s.args)
else:
if s.args[2] == 1:
cont = ", ".join(self._print(arg) for arg in s.args[:2])
else:
cont = ", ".join(self._print(arg) for arg in s.args)
return(f"\\text{{Range}}\\left({cont}\\right)")
dots = object()
if s.start.is_infinite and s.stop.is_infinite:
if s.step.is_positive:
printset = dots, -1, 0, 1, dots
else:
printset = dots, 1, 0, -1, dots
elif s.start.is_infinite:
printset = dots, s[-1] - s.step, s[-1]
elif s.stop.is_infinite:
it = iter(s)
printset = next(it), next(it), dots
elif s.is_empty is not None:
if (s.size < 4) == True:
printset = tuple(s)
elif s.is_iterable:
it = iter(s)
printset = next(it), next(it), dots, s[-1]
else:
return _print_symbolic_range()
else:
return _print_symbolic_range()
return (r"\left\{" +
r", ".join(self._print(el) if el is not dots else r'\ldots' for el in printset) +
r"\right\}")
def __print_number_polynomial(self, expr, letter, exp=None):
if len(expr.args) == 2:
if exp is not None:
return r"%s_{%s}^{%s}\left(%s\right)" % (letter,
self._print(expr.args[0]), exp,
self._print(expr.args[1]))
return r"%s_{%s}\left(%s\right)" % (letter,
self._print(expr.args[0]), self._print(expr.args[1]))
tex = r"%s_{%s}" % (letter, self._print(expr.args[0]))
if exp is not None:
tex = r"%s^{%s}" % (tex, exp)
return tex
def _print_bernoulli(self, expr, exp=None):
return self.__print_number_polynomial(expr, "B", exp)
def _print_genocchi(self, expr, exp=None):
return self.__print_number_polynomial(expr, "G", exp)
def _print_bell(self, expr, exp=None):
if len(expr.args) == 3:
tex1 = r"B_{%s, %s}" % (self._print(expr.args[0]),
self._print(expr.args[1]))
tex2 = r"\left(%s\right)" % r", ".join(self._print(el) for
el in expr.args[2])
if exp is not None:
tex = r"%s^{%s}%s" % (tex1, exp, tex2)
else:
tex = tex1 + tex2
return tex
return self.__print_number_polynomial(expr, "B", exp)
def _print_fibonacci(self, expr, exp=None):
return self.__print_number_polynomial(expr, "F", exp)
def _print_lucas(self, expr, exp=None):
tex = r"L_{%s}" % self._print(expr.args[0])
if exp is not None:
tex = r"%s^{%s}" % (tex, exp)
return tex
def _print_tribonacci(self, expr, exp=None):
return self.__print_number_polynomial(expr, "T", exp)
def _print_mobius(self, expr, exp=None):
if exp is None:
return r'\mu\left(%s\right)' % self._print(expr.args[0])
return r'\mu^{%s}\left(%s\right)' % (exp, self._print(expr.args[0]))
def _print_SeqFormula(self, s):
dots = object()
if len(s.start.free_symbols) > 0 or len(s.stop.free_symbols) > 0:
return r"\left\{%s\right\}_{%s=%s}^{%s}" % (
self._print(s.formula),
self._print(s.variables[0]),
self._print(s.start),
self._print(s.stop)
)
if s.start is S.NegativeInfinity:
stop = s.stop
printset = (dots, s.coeff(stop - 3), s.coeff(stop - 2),
s.coeff(stop - 1), s.coeff(stop))
elif s.stop is S.Infinity or s.length > 4:
printset = s[:4]
printset.append(dots)
else:
printset = tuple(s)
return (r"\left[" +
r", ".join(self._print(el) if el is not dots else r'\ldots' for el in printset) +
r"\right]")
_print_SeqPer = _print_SeqFormula
_print_SeqAdd = _print_SeqFormula
_print_SeqMul = _print_SeqFormula
def _print_Interval(self, i):
if i.start == i.end:
return r"\left\{%s\right\}" % self._print(i.start)
else:
if i.left_open:
left = '('
else:
left = '['
if i.right_open:
right = ')'
else:
right = ']'
return r"\left%s%s, %s\right%s" % \
(left, self._print(i.start), self._print(i.end), right)
def _print_AccumulationBounds(self, i):
return r"\left\langle %s, %s\right\rangle" % \
(self._print(i.min), self._print(i.max))
def _print_Union(self, u):
prec = precedence_traditional(u)
args_str = [self.parenthesize(i, prec) for i in u.args]
return r" \cup ".join(args_str)
def _print_Complement(self, u):
prec = precedence_traditional(u)
args_str = [self.parenthesize(i, prec) for i in u.args]
return r" \setminus ".join(args_str)
def _print_Intersection(self, u):
prec = precedence_traditional(u)
args_str = [self.parenthesize(i, prec) for i in u.args]
return r" \cap ".join(args_str)
def _print_SymmetricDifference(self, u):
prec = precedence_traditional(u)
args_str = [self.parenthesize(i, prec) for i in u.args]
return r" \triangle ".join(args_str)
def _print_ProductSet(self, p):
prec = precedence_traditional(p)
if len(p.sets) >= 1 and not has_variety(p.sets):
return self.parenthesize(p.sets[0], prec) + "^{%d}" % len(p.sets)
return r" \times ".join(
self.parenthesize(set, prec) for set in p.sets)
def _print_EmptySet(self, e):
return r"\emptyset"
def _print_Naturals(self, n):
return r"\mathbb{N}"
def _print_Naturals0(self, n):
return r"\mathbb{N}_0"
def _print_Integers(self, i):
return r"\mathbb{Z}"
def _print_Rationals(self, i):
return r"\mathbb{Q}"
def _print_Reals(self, i):
return r"\mathbb{R}"
def _print_Complexes(self, i):
return r"\mathbb{C}"
def _print_ImageSet(self, s):
expr = s.lamda.expr
sig = s.lamda.signature
xys = ((self._print(x), self._print(y)) for x, y in zip(sig, s.base_sets))
xinys = r", ".join(r"%s \in %s" % xy for xy in xys)
return r"\left\{%s\; \middle|\; %s\right\}" % (self._print(expr), xinys)
def _print_ConditionSet(self, s):
vars_print = ', '.join([self._print(var) for var in Tuple(s.sym)])
if s.base_set is S.UniversalSet:
return r"\left\{%s\; \middle|\; %s \right\}" % \
(vars_print, self._print(s.condition))
return r"\left\{%s\; \middle|\; %s \in %s \wedge %s \right\}" % (
vars_print,
vars_print,
self._print(s.base_set),
self._print(s.condition))
def _print_PowerSet(self, expr):
arg_print = self._print(expr.args[0])
return r"\mathcal{{P}}\left({}\right)".format(arg_print)
def _print_ComplexRegion(self, s):
vars_print = ', '.join([self._print(var) for var in s.variables])
return r"\left\{%s\; \middle|\; %s \in %s \right\}" % (
self._print(s.expr),
vars_print,
self._print(s.sets))
def _print_Contains(self, e):
return r"%s \in %s" % tuple(self._print(a) for a in e.args)
def _print_FourierSeries(self, s):
if s.an.formula is S.Zero and s.bn.formula is S.Zero:
return self._print(s.a0)
return self._print_Add(s.truncate()) + r' + \ldots'
def _print_FormalPowerSeries(self, s):
return self._print_Add(s.infinite)
def _print_FiniteField(self, expr):
return r"\mathbb{F}_{%s}" % expr.mod
def _print_IntegerRing(self, expr):
return r"\mathbb{Z}"
def _print_RationalField(self, expr):
return r"\mathbb{Q}"
def _print_RealField(self, expr):
return r"\mathbb{R}"
def _print_ComplexField(self, expr):
return r"\mathbb{C}"
def _print_PolynomialRing(self, expr):
domain = self._print(expr.domain)
symbols = ", ".join(map(self._print, expr.symbols))
return r"%s\left[%s\right]" % (domain, symbols)
def _print_FractionField(self, expr):
domain = self._print(expr.domain)
symbols = ", ".join(map(self._print, expr.symbols))
return r"%s\left(%s\right)" % (domain, symbols)
def _print_PolynomialRingBase(self, expr):
domain = self._print(expr.domain)
symbols = ", ".join(map(self._print, expr.symbols))
inv = ""
if not expr.is_Poly:
inv = r"S_<^{-1}"
return r"%s%s\left[%s\right]" % (inv, domain, symbols)
def _print_Poly(self, poly):
cls = poly.__class__.__name__
terms = []
for monom, coeff in poly.terms():
s_monom = ''
for i, exp in enumerate(monom):
if exp > 0:
if exp == 1:
s_monom += self._print(poly.gens[i])
else:
s_monom += self._print(pow(poly.gens[i], exp))
if coeff.is_Add:
if s_monom:
s_coeff = r"\left(%s\right)" % self._print(coeff)
else:
s_coeff = self._print(coeff)
else:
if s_monom:
if coeff is S.One:
terms.extend(['+', s_monom])
continue
if coeff is S.NegativeOne:
terms.extend(['-', s_monom])
continue
s_coeff = self._print(coeff)
if not s_monom:
s_term = s_coeff
else:
s_term = s_coeff + " " + s_monom
if s_term.startswith('-'):
terms.extend(['-', s_term[1:]])
else:
terms.extend(['+', s_term])
if terms[0] in ('-', '+'):
modifier = terms.pop(0)
if modifier == '-':
terms[0] = '-' + terms[0]
expr = ' '.join(terms)
gens = list(map(self._print, poly.gens))
domain = "domain=%s" % self._print(poly.get_domain())
args = ", ".join([expr] + gens + [domain])
if cls in accepted_latex_functions:
tex = r"\%s {\left(%s \right)}" % (cls, args)
else:
tex = r"\operatorname{%s}{\left( %s \right)}" % (cls, args)
return tex
def _print_ComplexRootOf(self, root):
cls = root.__class__.__name__
if cls == "ComplexRootOf":
cls = "CRootOf"
expr = self._print(root.expr)
index = root.index
if cls in accepted_latex_functions:
return r"\%s {\left(%s, %d\right)}" % (cls, expr, index)
else:
return r"\operatorname{%s} {\left(%s, %d\right)}" % (cls, expr,
index)
def _print_RootSum(self, expr):
cls = expr.__class__.__name__
args = [self._print(expr.expr)]
if expr.fun is not S.IdentityFunction:
args.append(self._print(expr.fun))
if cls in accepted_latex_functions:
return r"\%s {\left(%s\right)}" % (cls, ", ".join(args))
else:
return r"\operatorname{%s} {\left(%s\right)}" % (cls,
", ".join(args))
def _print_OrdinalOmega(self, expr):
return r"\omega"
def _print_OmegaPower(self, expr):
exp, mul = expr.args
if mul != 1:
if exp != 1:
return r"{} \omega^{{{}}}".format(mul, exp)
else:
return r"{} \omega".format(mul)
else:
if exp != 1:
return r"\omega^{{{}}}".format(exp)
else:
return r"\omega"
def _print_Ordinal(self, expr):
return " + ".join([self._print(arg) for arg in expr.args])
def _print_PolyElement(self, poly):
mul_symbol = self._settings['mul_symbol_latex']
return poly.str(self, PRECEDENCE, "{%s}^{%d}", mul_symbol)
def _print_FracElement(self, frac):
if frac.denom == 1:
return self._print(frac.numer)
else:
numer = self._print(frac.numer)
denom = self._print(frac.denom)
return r"\frac{%s}{%s}" % (numer, denom)
def _print_euler(self, expr, exp=None):
m, x = (expr.args[0], None) if len(expr.args) == 1 else expr.args
tex = r"E_{%s}" % self._print(m)
if exp is not None:
tex = r"%s^{%s}" % (tex, exp)
if x is not None:
tex = r"%s\left(%s\right)" % (tex, self._print(x))
return tex
def _print_catalan(self, expr, exp=None):
tex = r"C_{%s}" % self._print(expr.args[0])
if exp is not None:
tex = r"%s^{%s}" % (tex, exp)
return tex
def _print_UnifiedTransform(self, expr, s, inverse=False):
return r"\mathcal{{{}}}{}_{{{}}}\left[{}\right]\left({}\right)".format(s, '^{-1}' if inverse else '', self._print(expr.args[1]), self._print(expr.args[0]), self._print(expr.args[2]))
def _print_MellinTransform(self, expr):
return self._print_UnifiedTransform(expr, 'M')
def _print_InverseMellinTransform(self, expr):
return self._print_UnifiedTransform(expr, 'M', True)
def _print_LaplaceTransform(self, expr):
return self._print_UnifiedTransform(expr, 'L')
def _print_InverseLaplaceTransform(self, expr):
return self._print_UnifiedTransform(expr, 'L', True)
def _print_FourierTransform(self, expr):
return self._print_UnifiedTransform(expr, 'F')
def _print_InverseFourierTransform(self, expr):
return self._print_UnifiedTransform(expr, 'F', True)
def _print_SineTransform(self, expr):
return self._print_UnifiedTransform(expr, 'SIN')
def _print_InverseSineTransform(self, expr):
return self._print_UnifiedTransform(expr, 'SIN', True)
def _print_CosineTransform(self, expr):
return self._print_UnifiedTransform(expr, 'COS')
def _print_InverseCosineTransform(self, expr):
return self._print_UnifiedTransform(expr, 'COS', True)
def _print_DMP(self, p):
try:
if p.ring is not None:
# TODO incorporate order
return self._print(p.ring.to_sympy(p))
except SympifyError:
pass
return self._print(repr(p))
def _print_DMF(self, p):
return self._print_DMP(p)
def _print_Object(self, object):
return self._print(Symbol(object.name))
def _print_LambertW(self, expr, exp=None):
arg0 = self._print(expr.args[0])
exp = r"^{%s}" % (exp,) if exp is not None else ""
if len(expr.args) == 1:
result = r"W%s\left(%s\right)" % (exp, arg0)
else:
arg1 = self._print(expr.args[1])
result = "W{0}_{{{1}}}\\left({2}\\right)".format(exp, arg1, arg0)
return result
def _print_Expectation(self, expr):
return r"\operatorname{{E}}\left[{}\right]".format(self._print(expr.args[0]))
def _print_Variance(self, expr):
return r"\operatorname{{Var}}\left({}\right)".format(self._print(expr.args[0]))
def _print_Covariance(self, expr):
return r"\operatorname{{Cov}}\left({}\right)".format(", ".join(self._print(arg) for arg in expr.args))
def _print_Probability(self, expr):
return r"\operatorname{{P}}\left({}\right)".format(self._print(expr.args[0]))
def _print_Morphism(self, morphism):
domain = self._print(morphism.domain)
codomain = self._print(morphism.codomain)
return "%s\\rightarrow %s" % (domain, codomain)
def _print_TransferFunction(self, expr):
num, den = self._print(expr.num), self._print(expr.den)
return r"\frac{%s}{%s}" % (num, den)
def _print_DiscreteTransferFunction(self, expr):
num, den = self._print(expr.num), self._print(expr.den)
sampling_time = self._print(expr.sampling_time)
return r"\frac{%s}{%s} \text{ [st: } {%s} \text{]}" % \
(num, den, sampling_time)
def _print_Series(self, expr):
args = list(expr.args)
parens = lambda x: self.parenthesize(x, precedence_traditional(expr),
False)
return ' '.join(map(parens, args))
def _print_MIMOSeries(self, expr):
from sympy.physics.control.lti import MIMOParallel
args = list(expr.args)[::-1]
parens = lambda x: self.parenthesize(x, precedence_traditional(expr),
False) if isinstance(x, MIMOParallel) else self._print(x)
return r"\cdot".join(map(parens, args))
def _print_Parallel(self, expr):
return ' + '.join(map(self._print, expr.args))
def _print_MIMOParallel(self, expr):
return ' + '.join(map(self._print, expr.args))
def _print_Feedback(self, expr):
from sympy.physics.control import TransferFunction, Series
num, tf = expr.sys1, TransferFunction(1, 1, expr.var)
num_arg_list = list(num.args) if isinstance(num, Series) else [num]
den_arg_list = list(expr.sys2.args) if \
isinstance(expr.sys2, Series) else [expr.sys2]
den_term_1 = tf
if isinstance(num, Series) and isinstance(expr.sys2, Series):
den_term_2 = Series(*num_arg_list, *den_arg_list)
elif isinstance(num, Series) and isinstance(expr.sys2, TransferFunction):
if expr.sys2 == tf:
den_term_2 = Series(*num_arg_list)
else:
den_term_2 = tf, Series(*num_arg_list, expr.sys2)
elif isinstance(num, TransferFunction) and isinstance(expr.sys2, Series):
if num == tf:
den_term_2 = Series(*den_arg_list)
else:
den_term_2 = Series(num, *den_arg_list)
else:
if num == tf:
den_term_2 = Series(*den_arg_list)
elif expr.sys2 == tf:
den_term_2 = Series(*num_arg_list)
else:
den_term_2 = Series(*num_arg_list, *den_arg_list)
numer = self._print(num)
denom_1 = self._print(den_term_1)
denom_2 = self._print(den_term_2)
_sign = "+" if expr.sign == -1 else "-"
return r"\frac{%s}{%s %s %s}" % (numer, denom_1, _sign, denom_2)
def _print_MIMOFeedback(self, expr):
from sympy.physics.control import MIMOSeries
inv_mat = self._print(MIMOSeries(expr.sys2, expr.sys1))
sys1 = self._print(expr.sys1)
_sign = "+" if expr.sign == -1 else "-"
return r"\left(I_{\tau} %s %s\right)^{-1} \cdot %s" % (_sign, inv_mat,
sys1)
def _print_TransferFunctionMatrix(self, expr):
mat = self._print(expr._expr_mat)
if expr.sampling_time == 0:
print_mat = r"%s_\tau" % mat
else:
print_mat = r"\underset{[st:\ {%s}]}{%s_k}" %\
(expr.sampling_time, mat)
return print_mat
def _print_DFT(self, expr):
return r"\text{{{}}}_{{{}}}".format(expr.__class__.__name__, expr.n)
_print_IDFT = _print_DFT
def _print_NamedMorphism(self, morphism):
pretty_name = self._print(Symbol(morphism.name))
pretty_morphism = self._print_Morphism(morphism)
return "%s:%s" % (pretty_name, pretty_morphism)
def _print_IdentityMorphism(self, morphism):
from sympy.categories import NamedMorphism
return self._print_NamedMorphism(NamedMorphism(
morphism.domain, morphism.codomain, "id"))
def _print_CompositeMorphism(self, morphism):
# All components of the morphism have names and it is thus
# possible to build the name of the composite.
component_names_list = [self._print(Symbol(component.name)) for
component in morphism.components]
component_names_list.reverse()
component_names = "\\circ ".join(component_names_list) + ":"
pretty_morphism = self._print_Morphism(morphism)
return component_names + pretty_morphism
def _print_Category(self, morphism):
return r"\mathbf{{{}}}".format(self._print(Symbol(morphism.name)))
def _print_Diagram(self, diagram):
if not diagram.premises:
# This is an empty diagram.
return self._print(S.EmptySet)
latex_result = self._print(diagram.premises)
if diagram.conclusions:
latex_result += "\\Longrightarrow %s" % \
self._print(diagram.conclusions)
return latex_result
def _print_DiagramGrid(self, grid):
latex_result = "\\begin{array}{%s}\n" % ("c" * grid.width)
for i in range(grid.height):
for j in range(grid.width):
if grid[i, j]:
latex_result += latex(grid[i, j])
latex_result += " "
if j != grid.width - 1:
latex_result += "& "
if i != grid.height - 1:
latex_result += "\\\\"
latex_result += "\n"
latex_result += "\\end{array}\n"
return latex_result
def _print_FreeModule(self, M):
return '{{{}}}^{{{}}}'.format(self._print(M.ring), self._print(M.rank))
def _print_FreeModuleElement(self, m):
# Print as row vector for convenience, for now.
return r"\left[ {} \right]".format(",".join(
'{' + self._print(x) + '}' for x in m))
def _print_SubModule(self, m):
gens = [[self._print(m.ring.to_sympy(x)) for x in g] for g in m.gens]
curly = lambda o: r"{" + o + r"}"
square = lambda o: r"\left[ " + o + r" \right]"
gens_latex = ",".join(curly(square(",".join(curly(x) for x in g))) for g in gens)
return r"\left\langle {} \right\rangle".format(gens_latex)
def _print_SubQuotientModule(self, m):
gens_latex = ",".join(["{" + self._print(g) + "}" for g in m.gens])
return r"\left\langle {} \right\rangle".format(gens_latex)
def _print_ModuleImplementedIdeal(self, m):
gens = [m.ring.to_sympy(x) for [x] in m._module.gens]
gens_latex = ",".join('{' + self._print(x) + '}' for x in gens)
return r"\left\langle {} \right\rangle".format(gens_latex)
def _print_Quaternion(self, expr):
# TODO: This expression is potentially confusing,
# shall we print it as `Quaternion( ... )`?
s = [self.parenthesize(i, PRECEDENCE["Mul"], strict=True)
for i in expr.args]
a = [s[0]] + [i+" "+j for i, j in zip(s[1:], "ijk")]
return " + ".join(a)
def _print_QuotientRing(self, R):
# TODO nicer fractions for few generators...
return r"\frac{{{}}}{{{}}}".format(self._print(R.ring),
self._print(R.base_ideal))
def _print_QuotientRingElement(self, x):
x_latex = self._print(x.ring.to_sympy(x))
return r"{{{}}} + {{{}}}".format(x_latex,
self._print(x.ring.base_ideal))
def _print_QuotientModuleElement(self, m):
data = [m.module.ring.to_sympy(x) for x in m.data]
data_latex = r"\left[ {} \right]".format(",".join(
'{' + self._print(x) + '}' for x in data))
return r"{{{}}} + {{{}}}".format(data_latex,
self._print(m.module.killed_module))
def _print_QuotientModule(self, M):
# TODO nicer fractions for few generators...
return r"\frac{{{}}}{{{}}}".format(self._print(M.base),
self._print(M.killed_module))
def _print_MatrixHomomorphism(self, h):
return r"{{{}}} : {{{}}} \to {{{}}}".format(self._print(h._sympy_matrix()),
self._print(h.domain), self._print(h.codomain))
def _print_Manifold(self, manifold):
name, supers, subs = self._split_super_sub(manifold.name.name)
name = r'\text{%s}' % name
if supers:
name += "^{%s}" % " ".join(supers)
if subs:
name += "_{%s}" % " ".join(subs)
return name
def _print_Patch(self, patch):
return r'\text{%s}_{%s}' % (self._print(patch.name), self._print(patch.manifold))
def _print_CoordSystem(self, coordsys):
return r'\text{%s}^{\text{%s}}_{%s}' % (
self._print(coordsys.name), self._print(coordsys.patch.name), self._print(coordsys.manifold)
)
def _print_CovarDerivativeOp(self, cvd):
return r'\mathbb{\nabla}_{%s}' % self._print(cvd._wrt)
def _print_BaseScalarField(self, field):
string = field._coord_sys.symbols[field._index].name
return r'\mathbf{{{}}}'.format(self._print(Symbol(string)))
def _print_BaseVectorField(self, field):
string = field._coord_sys.symbols[field._index].name
return r'\partial_{{{}}}'.format(self._print(Symbol(string)))
def _print_Differential(self, diff):
field = diff._form_field
if hasattr(field, '_coord_sys'):
string = field._coord_sys.symbols[field._index].name
return r'\operatorname{{d}}{}'.format(self._print(Symbol(string)))
else:
string = self._print(field)
return r'\operatorname{{d}}\left({}\right)'.format(string)
def _print_Tr(self, p):
# TODO: Handle indices
contents = self._print(p.args[0])
return r'\operatorname{{tr}}\left({}\right)'.format(contents)
def _print_totient(self, expr, exp=None):
if exp is not None:
return r'\left(\phi\left(%s\right)\right)^{%s}' % \
(self._print(expr.args[0]), exp)
return r'\phi\left(%s\right)' % self._print(expr.args[0])
def _print_reduced_totient(self, expr, exp=None):
if exp is not None:
return r'\left(\lambda\left(%s\right)\right)^{%s}' % \
(self._print(expr.args[0]), exp)
return r'\lambda\left(%s\right)' % self._print(expr.args[0])
def _print_divisor_sigma(self, expr, exp=None):
if len(expr.args) == 2:
tex = r"_%s\left(%s\right)" % tuple(map(self._print,
(expr.args[1], expr.args[0])))
else:
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"\sigma^{%s}%s" % (exp, tex)
return r"\sigma%s" % tex
def _print_udivisor_sigma(self, expr, exp=None):
if len(expr.args) == 2:
tex = r"_%s\left(%s\right)" % tuple(map(self._print,
(expr.args[1], expr.args[0])))
else:
tex = r"\left(%s\right)" % self._print(expr.args[0])
if exp is not None:
return r"\sigma^*^{%s}%s" % (exp, tex)
return r"\sigma^*%s" % tex
def _print_primenu(self, expr, exp=None):
if exp is not None:
return r'\left(\nu\left(%s\right)\right)^{%s}' % \
(self._print(expr.args[0]), exp)
return r'\nu\left(%s\right)' % self._print(expr.args[0])
def _print_primeomega(self, expr, exp=None):
if exp is not None:
return r'\left(\Omega\left(%s\right)\right)^{%s}' % \
(self._print(expr.args[0]), exp)
return r'\Omega\left(%s\right)' % self._print(expr.args[0])
def _print_Str(self, s):
return str(s.name)
def _print_float(self, expr):
return self._print(Float(expr))
def _print_int(self, expr):
return str(expr)
def _print_mpz(self, expr):
return str(expr)
def _print_mpq(self, expr):
return str(expr)
def _print_fmpz(self, expr):
return str(expr)
def _print_fmpq(self, expr):
return str(expr)
def _print_Predicate(self, expr):
return r"\operatorname{{Q}}_{{\text{{{}}}}}".format(latex_escape(str(expr.name)))
def _print_AppliedPredicate(self, expr):
pred = expr.function
args = expr.arguments
pred_latex = self._print(pred)
args_latex = ', '.join([self._print(a) for a in args])
return '%s(%s)' % (pred_latex, args_latex)
def emptyPrinter(self, expr):
# default to just printing as monospace, like would normally be shown
s = super().emptyPrinter(expr)
return r"\mathtt{\text{%s}}" % latex_escape(s)
def translate(s: str) -> str:
r'''
Check for a modifier ending the string. If present, convert the
modifier to latex and translate the rest recursively.
Given a description of a Greek letter or other special character,
return the appropriate latex.
Let everything else pass as given.
>>> from sympy.printing.latex import translate
>>> translate('alphahatdotprime')
"{\\dot{\\hat{\\alpha}}}'"
'''
# Process the rest
tex = tex_greek_dictionary.get(s)
if tex:
return tex
elif s.lower() in greek_letters_set:
return "\\" + s.lower()
elif s in other_symbols:
return "\\" + s
else:
# Process modifiers, if any, and recurse
for key in sorted(modifier_dict.keys(), key=len, reverse=True):
if s.lower().endswith(key) and len(s) > len(key):
return modifier_dict[key](translate(s[:-len(key)]))
return s
@print_function(LatexPrinter)
def latex(expr, **settings):
r"""Convert the given expression to LaTeX string representation.
Parameters
==========
full_prec: boolean, optional
If set to True, a floating point number is printed with full precision.
fold_frac_powers : boolean, optional
Emit ``^{p/q}`` instead of ``^{\frac{p}{q}}`` for fractional powers.
fold_func_brackets : boolean, optional
Fold function brackets where applicable.
fold_short_frac : boolean, optional
Emit ``p / q`` instead of ``\frac{p}{q}`` when the denominator is
simple enough (at most two terms and no powers). The default value is
``True`` for inline mode, ``False`` otherwise.
inv_trig_style : string, optional
How inverse trig functions should be displayed. Can be one of
``'abbreviated'``, ``'full'``, or ``'power'``. Defaults to
``'abbreviated'``.
itex : boolean, optional
Specifies if itex-specific syntax is used, including emitting
``$$...$$``.
ln_notation : boolean, optional
If set to ``True``, ``\ln`` is used instead of default ``\log``.
long_frac_ratio : float or None, optional
The allowed ratio of the width of the numerator to the width of the
denominator before the printer breaks off long fractions. If ``None``
(the default value), long fractions are not broken up.
mat_delim : string, optional
The delimiter to wrap around matrices. Can be one of ``'['``, ``'('``,
or the empty string ``''``. Defaults to ``'['``.
mat_str : string, optional
Which matrix environment string to emit. ``'smallmatrix'``,
``'matrix'``, ``'array'``, etc. Defaults to ``'smallmatrix'`` for
inline mode, ``'matrix'`` for matrices of no more than 10 columns, and
``'array'`` otherwise.
mode: string, optional
Specifies how the generated code will be delimited. ``mode`` can be one
of ``'plain'``, ``'inline'``, ``'equation'`` or ``'equation*'``. If
``mode`` is set to ``'plain'``, then the resulting code will not be
delimited at all (this is the default). If ``mode`` is set to
``'inline'`` then inline LaTeX ``$...$`` will be used. If ``mode`` is
set to ``'equation'`` or ``'equation*'``, the resulting code will be
enclosed in the ``equation`` or ``equation*`` environment (remember to
import ``amsmath`` for ``equation*``), unless the ``itex`` option is
set. In the latter case, the ``$$...$$`` syntax is used.
mul_symbol : string or None, optional
The symbol to use for multiplication. Can be one of ``None``,
``'ldot'``, ``'dot'``, or ``'times'``.
order: string, optional
Any of the supported monomial orderings (currently ``'lex'``,
``'grlex'``, or ``'grevlex'``), ``'old'``, and ``'none'``. This
parameter does nothing for `~.Mul` objects. Setting order to ``'old'``
uses the compatibility ordering for ``~.Add`` defined in Printer. For
very large expressions, set the ``order`` keyword to ``'none'`` if
speed is a concern.
symbol_names : dictionary of strings mapped to symbols, optional
Dictionary of symbols and the custom strings they should be emitted as.
root_notation : boolean, optional
If set to ``False``, exponents of the form 1/n are printed in fractonal
form. Default is ``True``, to print exponent in root form.
mat_symbol_style : string, optional
Can be either ``'plain'`` (default) or ``'bold'``. If set to
``'bold'``, a `~.MatrixSymbol` A will be printed as ``\mathbf{A}``,
otherwise as ``A``.
imaginary_unit : string, optional
String to use for the imaginary unit. Defined options are ``'i'``
(default) and ``'j'``. Adding ``r`` or ``t`` in front gives ``\mathrm``
or ``\text``, so ``'ri'`` leads to ``\mathrm{i}`` which gives
`\mathrm{i}`.
gothic_re_im : boolean, optional
If set to ``True``, `\Re` and `\Im` is used for ``re`` and ``im``, respectively.
The default is ``False`` leading to `\operatorname{re}` and `\operatorname{im}`.
decimal_separator : string, optional
Specifies what separator to use to separate the whole and fractional parts of a
floating point number as in `2.5` for the default, ``period`` or `2{,}5`
when ``comma`` is specified. Lists, sets, and tuple are printed with semicolon
separating the elements when ``comma`` is chosen. For example, [1; 2; 3] when
``comma`` is chosen and [1,2,3] for when ``period`` is chosen.
parenthesize_super : boolean, optional
If set to ``False``, superscripted expressions will not be parenthesized when
powered. Default is ``True``, which parenthesizes the expression when powered.
min: Integer or None, optional
Sets the lower bound for the exponent to print floating point numbers in
fixed-point format.
max: Integer or None, optional
Sets the upper bound for the exponent to print floating point numbers in
fixed-point format.
diff_operator: string, optional
String to use for differential operator. Default is ``'d'``, to print in italic
form. ``'rd'``, ``'td'`` are shortcuts for ``\mathrm{d}`` and ``\text{d}``.
adjoint_style: string, optional
String to use for the adjoint symbol. Defined options are ``'dagger'``
(default),``'star'``, and ``'hermitian'``.
Notes
=====
Not using a print statement for printing, results in double backslashes for
latex commands since that's the way Python escapes backslashes in strings.
>>> from sympy import latex, Rational
>>> from sympy.abc import tau
>>> latex((2*tau)**Rational(7,2))
'8 \\sqrt{2} \\tau^{\\frac{7}{2}}'
>>> print(latex((2*tau)**Rational(7,2)))
8 \sqrt{2} \tau^{\frac{7}{2}}
Examples
========
>>> from sympy import latex, pi, sin, asin, Integral, Matrix, Rational, log
>>> from sympy.abc import x, y, mu, r, tau
Basic usage:
>>> print(latex((2*tau)**Rational(7,2)))
8 \sqrt{2} \tau^{\frac{7}{2}}
``mode`` and ``itex`` options:
>>> print(latex((2*mu)**Rational(7,2), mode='plain'))
8 \sqrt{2} \mu^{\frac{7}{2}}
>>> print(latex((2*tau)**Rational(7,2), mode='inline'))
$8 \sqrt{2} \tau^{7 / 2}$
>>> print(latex((2*mu)**Rational(7,2), mode='equation*'))
\begin{equation*}8 \sqrt{2} \mu^{\frac{7}{2}}\end{equation*}
>>> print(latex((2*mu)**Rational(7,2), mode='equation'))
\begin{equation}8 \sqrt{2} \mu^{\frac{7}{2}}\end{equation}
>>> print(latex((2*mu)**Rational(7,2), mode='equation', itex=True))
$$8 \sqrt{2} \mu^{\frac{7}{2}}$$
>>> print(latex((2*mu)**Rational(7,2), mode='plain'))
8 \sqrt{2} \mu^{\frac{7}{2}}
>>> print(latex((2*tau)**Rational(7,2), mode='inline'))
$8 \sqrt{2} \tau^{7 / 2}$
>>> print(latex((2*mu)**Rational(7,2), mode='equation*'))
\begin{equation*}8 \sqrt{2} \mu^{\frac{7}{2}}\end{equation*}
>>> print(latex((2*mu)**Rational(7,2), mode='equation'))
\begin{equation}8 \sqrt{2} \mu^{\frac{7}{2}}\end{equation}
>>> print(latex((2*mu)**Rational(7,2), mode='equation', itex=True))
$$8 \sqrt{2} \mu^{\frac{7}{2}}$$
Fraction options:
>>> print(latex((2*tau)**Rational(7,2), fold_frac_powers=True))
8 \sqrt{2} \tau^{7/2}
>>> print(latex((2*tau)**sin(Rational(7,2))))
\left(2 \tau\right)^{\sin{\left(\frac{7}{2} \right)}}
>>> print(latex((2*tau)**sin(Rational(7,2)), fold_func_brackets=True))
\left(2 \tau\right)^{\sin {\frac{7}{2}}}
>>> print(latex(3*x**2/y))
\frac{3 x^{2}}{y}
>>> print(latex(3*x**2/y, fold_short_frac=True))
3 x^{2} / y
>>> print(latex(Integral(r, r)/2/pi, long_frac_ratio=2))
\frac{\int r\, dr}{2 \pi}
>>> print(latex(Integral(r, r)/2/pi, long_frac_ratio=0))
\frac{1}{2 \pi} \int r\, dr
Multiplication options:
>>> print(latex((2*tau)**sin(Rational(7,2)), mul_symbol="times"))
\left(2 \times \tau\right)^{\sin{\left(\frac{7}{2} \right)}}
Trig options:
>>> print(latex(asin(Rational(7,2))))
\operatorname{asin}{\left(\frac{7}{2} \right)}
>>> print(latex(asin(Rational(7,2)), inv_trig_style="full"))
\arcsin{\left(\frac{7}{2} \right)}
>>> print(latex(asin(Rational(7,2)), inv_trig_style="power"))
\sin^{-1}{\left(\frac{7}{2} \right)}
Matrix options:
>>> print(latex(Matrix(2, 1, [x, y])))
\left[\begin{matrix}x\\y\end{matrix}\right]
>>> print(latex(Matrix(2, 1, [x, y]), mat_str = "array"))
\left[\begin{array}{c}x\\y\end{array}\right]
>>> print(latex(Matrix(2, 1, [x, y]), mat_delim="("))
\left(\begin{matrix}x\\y\end{matrix}\right)
Custom printing of symbols:
>>> print(latex(x**2, symbol_names={x: 'x_i'}))
x_i^{2}
Logarithms:
>>> print(latex(log(10)))
\log{\left(10 \right)}
>>> print(latex(log(10), ln_notation=True))
\ln{\left(10 \right)}
``latex()`` also supports the builtin container types :class:`list`,
:class:`tuple`, and :class:`dict`:
>>> print(latex([2/x, y], mode='inline'))
$\left[ 2 / x, \ y\right]$
Unsupported types are rendered as monospaced plaintext:
>>> print(latex(int))
\mathtt{\text{<class 'int'>}}
>>> print(latex("plain % text"))
\mathtt{\text{plain \% text}}
See :ref:`printer_method_example` for an example of how to override
this behavior for your own types by implementing ``_latex``.
.. versionchanged:: 1.7.0
Unsupported types no longer have their ``str`` representation treated as valid latex.
"""
return LatexPrinter(settings).doprint(expr)
def print_latex(expr, **settings):
"""Prints LaTeX representation of the given expression. Takes the same
settings as ``latex()``."""
print(latex(expr, **settings))
def multiline_latex(lhs, rhs, terms_per_line=1, environment="align*", use_dots=False, **settings):
r"""
This function generates a LaTeX equation with a multiline right-hand side
in an ``align*``, ``eqnarray`` or ``IEEEeqnarray`` environment.
Parameters
==========
lhs : Expr
Left-hand side of equation
rhs : Expr
Right-hand side of equation
terms_per_line : integer, optional
Number of terms per line to print. Default is 1.
environment : "string", optional
Which LaTeX wnvironment to use for the output. Options are "align*"
(default), "eqnarray", and "IEEEeqnarray".
use_dots : boolean, optional
If ``True``, ``\\dots`` is added to the end of each line. Default is ``False``.
Examples
========
>>> from sympy import multiline_latex, symbols, sin, cos, exp, log, I
>>> x, y, alpha = symbols('x y alpha')
>>> expr = sin(alpha*y) + exp(I*alpha) - cos(log(y))
>>> print(multiline_latex(x, expr))
\begin{align*}
x = & e^{i \alpha} \\
& + \sin{\left(\alpha y \right)} \\
& - \cos{\left(\log{\left(y \right)} \right)}
\end{align*}
Using at most two terms per line:
>>> print(multiline_latex(x, expr, 2))
\begin{align*}
x = & e^{i \alpha} + \sin{\left(\alpha y \right)} \\
& - \cos{\left(\log{\left(y \right)} \right)}
\end{align*}
Using ``eqnarray`` and dots:
>>> print(multiline_latex(x, expr, terms_per_line=2, environment="eqnarray", use_dots=True))
\begin{eqnarray}
x & = & e^{i \alpha} + \sin{\left(\alpha y \right)} \dots\nonumber\\
& & - \cos{\left(\log{\left(y \right)} \right)}
\end{eqnarray}
Using ``IEEEeqnarray``:
>>> print(multiline_latex(x, expr, environment="IEEEeqnarray"))
\begin{IEEEeqnarray}{rCl}
x & = & e^{i \alpha} \nonumber\\
& & + \sin{\left(\alpha y \right)} \nonumber\\
& & - \cos{\left(\log{\left(y \right)} \right)}
\end{IEEEeqnarray}
Notes
=====
All optional parameters from ``latex`` can also be used.
"""
# Based on code from https://github.com/sympy/sympy/issues/3001
l = LatexPrinter(**settings)
if environment == "eqnarray":
result = r'\begin{eqnarray}' + '\n'
first_term = '& = &'
nonumber = r'\nonumber'
end_term = '\n\\end{eqnarray}'
doubleet = True
elif environment == "IEEEeqnarray":
result = r'\begin{IEEEeqnarray}{rCl}' + '\n'
first_term = '& = &'
nonumber = r'\nonumber'
end_term = '\n\\end{IEEEeqnarray}'
doubleet = True
elif environment == "align*":
result = r'\begin{align*}' + '\n'
first_term = '= &'
nonumber = ''
end_term = '\n\\end{align*}'
doubleet = False
else:
raise ValueError("Unknown environment: {}".format(environment))
dots = ''
if use_dots:
dots=r'\dots'
terms = rhs.as_ordered_terms()
n_terms = len(terms)
term_count = 1
for i in range(n_terms):
term = terms[i]
term_start = ''
term_end = ''
sign = '+'
if term_count > terms_per_line:
if doubleet:
term_start = '& & '
else:
term_start = '& '
term_count = 1
if term_count == terms_per_line:
# End of line
if i < n_terms-1:
# There are terms remaining
term_end = dots + nonumber + r'\\' + '\n'
else:
term_end = ''
if term.as_ordered_factors()[0] == -1:
term = -1*term
sign = r'-'
if i == 0: # beginning
if sign == '+':
sign = ''
result += r'{:s} {:s}{:s} {:s} {:s}'.format(l.doprint(lhs),
first_term, sign, l.doprint(term), term_end)
else:
result += r'{:s}{:s} {:s} {:s}'.format(term_start, sign,
l.doprint(term), term_end)
term_count += 1
result += end_term
return result
|
LatexPrinter
|
python
|
dask__dask
|
dask/dataframe/dask_expr/_collection.py
|
{
"start": 173404,
"end": 237513
}
|
class ____(FrameBase):
"""Scalar Expr Collection"""
def __repr__(self):
return f"<dask_expr.expr.Scalar: expr={self.expr}, dtype={self.dtype}>"
def __bool__(self):
raise TypeError(
f"Trying to convert {self} to a boolean value. Because Dask objects are "
"lazily evaluated, they cannot be converted to a boolean value or used "
"in boolean conditions like if statements. Try calling .compute() to "
"force computation prior to converting to a boolean value or using in "
"a conditional statement."
)
def __dask_postcompute__(self):
return first, ()
def to_series(self, index=0) -> Series:
return new_collection(expr.ScalarToSeries(self, index=index))
def __array__(self):
# array interface is required to support pandas instance + Scalar
# Otherwise, above op results in pd.Series of Scalar (object dtype)
return np.asarray(self.compute())
@functools.cached_property
def dtype(self):
return pd.Series(self._meta).dtype
def to_delayed(self, optimize_graph=True):
return super().to_delayed(optimize_graph=optimize_graph)[0]
def optimize(collection, fuse=True):
return new_collection(expr.optimize(collection.expr, fuse=fuse))
def from_pandas(data, npartitions=None, sort=True, chunksize=None):
"""
Construct a Dask DataFrame from a Pandas DataFrame
This splits an in-memory Pandas dataframe into several parts and constructs
a dask.dataframe from those parts on which Dask.dataframe can operate in
parallel. By default, the input dataframe will be sorted by the index to
produce cleanly-divided partitions (with known divisions). To preserve the
input ordering, make sure the input index is monotonically-increasing. The
``sort=False`` option will also avoid reordering, but will not result in
known divisions.
Parameters
----------
data : pandas.DataFrame or pandas.Series
The DataFrame/Series with which to construct a Dask DataFrame/Series
npartitions : int, optional, default 1
The number of partitions of the index to create. Note that if there
are duplicate values or insufficient elements in ``data.index``, the
output may have fewer partitions than requested.
chunksize : int, optional
The desired number of rows per index partition to use. Note that
depending on the size and index of the dataframe, actual partition
sizes may vary.
sort: bool, default True
Sort the input by index first to obtain cleanly divided partitions
(with known divisions). If False, the input will not be sorted, and
all divisions will be set to None. Default is True.
Returns
-------
dask.DataFrame or dask.Series
A dask DataFrame/Series partitioned along the index
Examples
--------
>>> from dask.dataframe import from_pandas
>>> df = pd.DataFrame(dict(a=list('aabbcc'), b=list(range(6))),
... index=pd.date_range(start='20100101', periods=6))
>>> ddf = from_pandas(df, npartitions=3)
>>> ddf.divisions # doctest: +NORMALIZE_WHITESPACE
(Timestamp('2010-01-01 00:00:00'),
Timestamp('2010-01-03 00:00:00'),
Timestamp('2010-01-05 00:00:00'),
Timestamp('2010-01-06 00:00:00'))
>>> ddf = from_pandas(df.a, npartitions=3) # Works with Series too!
>>> ddf.divisions # doctest: +NORMALIZE_WHITESPACE
(Timestamp('2010-01-01 00:00:00'),
Timestamp('2010-01-03 00:00:00'),
Timestamp('2010-01-05 00:00:00'),
Timestamp('2010-01-06 00:00:00'))
Raises
------
TypeError
If something other than a ``pandas.DataFrame`` or ``pandas.Series`` is
passed in.
See Also
--------
from_array : Construct a dask.DataFrame from an array that has record dtype
read_csv : Construct a dask.DataFrame from a CSV file
"""
if chunksize is not None and npartitions is not None:
raise ValueError("Exactly one of npartitions and chunksize must be specified.")
elif chunksize is None and npartitions is None:
npartitions = 1
if not has_parallel_type(data):
raise TypeError("Input must be a pandas DataFrame or Series.")
if data.index.isna().any() and not _is_any_real_numeric_dtype(data.index):
raise NotImplementedError(
"Index in passed data is non-numeric and contains nulls, which Dask does not entirely support.\n"
"Consider passing `data.loc[~data.isna()]` instead."
)
if npartitions is not None and not isinstance(npartitions, int):
raise TypeError(
"Please provide npartitions as an int, or possibly as None if you specify chunksize."
)
elif chunksize is not None and not isinstance(chunksize, int):
raise TypeError(
"Please provide chunksize as an int, or possibly as None if you specify npartitions."
)
from dask.dataframe.dask_expr.io.io import FromPandas
return new_collection(
FromPandas(
_BackendData(data.copy()),
npartitions=npartitions,
sort=sort,
chunksize=chunksize,
pyarrow_strings_enabled=pyarrow_strings_enabled(),
)
)
def from_array(arr, chunksize=50_000, columns=None, meta=None):
"""Read any sliceable array into a Dask Dataframe
Uses getitem syntax to pull slices out of the array. The array need not be
a NumPy array but must support slicing syntax
x[50000:100000]
and have 2 dimensions:
x.ndim == 2
or have a record dtype:
x.dtype == [('name', 'O'), ('balance', 'i8')]
Parameters
----------
x : array_like
chunksize : int, optional
The number of rows per partition to use.
columns : list or string, optional
list of column names if DataFrame, single string if Series
meta : object, optional
An optional `meta` parameter can be passed for dask
to specify the concrete dataframe type to use for partitions of
the Dask dataframe. By default, pandas DataFrame is used.
Returns
-------
dask.DataFrame or dask.Series
A dask DataFrame/Series
"""
import dask.array as da
if isinstance(arr, da.Array):
return from_dask_array(arr, columns=columns, meta=meta)
from dask.dataframe.dask_expr.io.io import FromArray
result = FromArray(
arr,
chunksize=chunksize,
original_columns=columns,
meta=meta,
)
if pyarrow_strings_enabled() and arr.dtype.kind in "OU":
result = expr.ArrowStringConversion(result)
return new_collection(result)
def from_graph(layer, _meta, divisions, keys, name_prefix):
from dask.dataframe.dask_expr.io.io import FromGraph
return new_collection(
FromGraph(
layer=layer,
_meta=_meta,
divisions=divisions,
keys=keys,
name_prefix=name_prefix,
)
)
@dataframe_creation_dispatch.register_inplace("pandas")
def from_dict(
data,
npartitions,
orient="columns",
dtype=None,
columns=None,
constructor=pd.DataFrame,
):
"""
Construct a Dask DataFrame from a Python Dictionary
Parameters
----------
data : dict
Of the form {field : array-like} or {field : dict}.
npartitions : int
The number of partitions of the index to create. Note that depending on
the size and index of the dataframe, the output may have fewer
partitions than requested.
orient : {'columns', 'index', 'tight'}, default 'columns'
The "orientation" of the data. If the keys of the passed dict
should be the columns of the resulting DataFrame, pass 'columns'
(default). Otherwise if the keys should be rows, pass 'index'.
If 'tight', assume a dict with keys
['index', 'columns', 'data', 'index_names', 'column_names'].
dtype: bool
Data type to force, otherwise infer.
columns: string, optional
Column labels to use when ``orient='index'``. Raises a ValueError
if used with ``orient='columns'`` or ``orient='tight'``.
constructor: class, default pd.DataFrame
Class with which ``from_dict`` should be called with.
Examples
--------
>>> import dask.dataframe as dd
>>> ddf = dd.from_dict({"num1": [1, 2, 3, 4], "num2": [7, 8, 9, 10]}, npartitions=2)
"""
collection_types = {type(v) for v in data.values() if is_dask_collection(v)}
if collection_types:
raise NotImplementedError(
"from_dict doesn't currently support Dask collections as inputs. "
f"Objects of type {collection_types} were given in the input dict."
)
return from_pandas(
constructor.from_dict(data, orient, dtype, columns),
npartitions,
)
def from_dask_array(x, columns=None, index=None, meta=None) -> DataFrame:
"""Create a Dask DataFrame from a Dask Array.
Converts a 2d array into a DataFrame and a 1d array into a Series.
Parameters
----------
x : da.Array
columns : list or string
list of column names if DataFrame, single string if Series
index : dask.dataframe.Index, optional
An optional *dask* Index to use for the output Series or DataFrame.
The default output index depends on whether `x` has any unknown
chunks. If there are any unknown chunks, the output has ``None``
for all the divisions (one per chunk). If all the chunks are known,
a default index with known divisions is created.
Specifying `index` can be useful if you're conforming a Dask Array
to an existing dask Series or DataFrame, and you would like the
indices to match.
meta : object, optional
An optional `meta` parameter can be passed for dask
to specify the concrete dataframe type to be returned.
By default, pandas DataFrame is used.
Examples
--------
>>> import dask.array as da
>>> import dask.dataframe as dd
>>> x = da.ones((4, 2), chunks=(2, 2))
>>> df = dd.io.from_dask_array(x, columns=['a', 'b'])
>>> df.compute()
a b
0 1.0 1.0
1 1.0 1.0
2 1.0 1.0
3 1.0 1.0
See Also
--------
dask.bag.to_dataframe: from dask.bag
dask.dataframe.DataFrame.values: Reverse conversion
dask.dataframe.DataFrame.to_records: Reverse conversion
"""
from dask.dataframe.io import from_dask_array
if columns is not None and isinstance(columns, list) and not len(columns):
columns = None
return from_dask_array(x, columns=columns, index=index, meta=meta)
@dataframe_creation_dispatch.register_inplace("pandas")
def read_parquet(
path=None,
columns=None,
filters=None,
categories=None,
index=None,
storage_options=None,
dtype_backend=None,
calculate_divisions=False,
ignore_metadata_file=False,
metadata_task_size=None,
split_row_groups="infer",
blocksize="default",
aggregate_files=None,
parquet_file_extension=(".parq", ".parquet", ".pq"),
filesystem="fsspec",
engine=None,
arrow_to_pandas=None,
**kwargs,
):
"""
Read a Parquet file into a Dask DataFrame
This reads a directory of Parquet data into a Dask.dataframe, one file per
partition. It selects the index among the sorted columns if any exist.
.. note::
Dask automatically resizes partitions to ensure that each partition is of
adequate size. The optimizer uses the ratio of selected columns to total
columns to squash multiple files into one partition.
Additionally, the Optimizer uses a minimum size per partition (default 75MB)
to avoid too many small partitions. This configuration can be set with
>>> dask.config.set({"dataframe.parquet.minimum-partition-size": "100MB"}) # doctest: +SKIP
.. note::
Specifying ``filesystem="arrow"`` leverages a complete reimplementation of
the Parquet reader that is solely based on PyArrow. It is significantly faster
than the legacy implementation, but doesn't yet support all features.
Parameters
----------
path : str or list
Source directory for data, or path(s) to individual parquet files.
Prefix with a protocol like ``s3://`` to read from alternative
filesystems. To read from multiple files you can pass a globstring or a
list of paths, with the caveat that they must all have the same
protocol.
columns : str or list, default None
Field name(s) to read in as columns in the output. By default all
non-index fields will be read (as determined by the pandas parquet
metadata, if present). Provide a single field name instead of a list to
read in the data as a Series.
filters : Union[List[Tuple[str, str, Any]], List[List[Tuple[str, str, Any]]]], default None
List of filters to apply, like ``[[('col1', '==', 0), ...], ...]``.
Using this argument will result in row-wise filtering of the final partitions.
Predicates can be expressed in disjunctive normal form (DNF). This means that
the inner-most tuple describes a single column predicate. These inner predicates
are combined with an AND conjunction into a larger predicate. The outer-most
list then combines all of the combined filters with an OR disjunction.
Predicates can also be expressed as a ``List[Tuple]``. These are evaluated
as an AND conjunction. To express OR in predicates, one must use the
(preferred for "pyarrow") ``List[List[Tuple]]`` notation.
index : str, list or False, default None
Field name(s) to use as the output frame index. By default will be
inferred from the pandas parquet file metadata, if present. Use ``False``
to read all fields as columns.
categories : list or dict, default None
For any fields listed here, if the parquet encoding is Dictionary,
the column will be created with dtype category. Use only if it is
guaranteed that the column is encoded as dictionary in all row-groups.
If a list, assumes up to 2**16-1 labels; if a dict, specify the number
of labels expected; if None, will load categories automatically for
data written by dask, not otherwise.
storage_options : dict, default None
Key/value pairs to be passed on to the file-system backend, if any.
Note that the default file-system backend can be configured with the
``filesystem`` argument, described below.
open_file_options : dict, default None
Key/value arguments to be passed along to ``AbstractFileSystem.open``
when each parquet data file is open for reading. Experimental
(optimized) "precaching" for remote file systems (e.g. S3, GCS) can
be enabled by adding ``{"method": "parquet"}`` under the
``"precache_options"`` key. Also, a custom file-open function can be
used (instead of ``AbstractFileSystem.open``), by specifying the
desired function under the ``"open_file_func"`` key.
dtype_backend : {'numpy_nullable', 'pyarrow'}, defaults to NumPy backed DataFrames
Which dtype_backend to use, e.g. whether a DataFrame should have NumPy arrays,
nullable dtypes are used for all dtypes that have a nullable implementation
when 'numpy_nullable' is set, pyarrow is used for all dtypes if 'pyarrow'
is set.
``dtype_backend="pyarrow"`` requires ``pandas`` 1.5+.
calculate_divisions : bool, default False
Whether to use min/max statistics from the footer metadata (or global
``_metadata`` file) to calculate divisions for the output DataFrame
collection. Divisions will not be calculated if statistics are missing.
This option will be ignored if ``index`` is not specified and there is
no physical index column specified in the custom "pandas" Parquet
metadata. Note that ``calculate_divisions=True`` may be extremely slow
when no global ``_metadata`` file is present, especially when reading
from remote storage. Set this to ``True`` only when known divisions
are needed for your workload (see :ref:`dataframe-design-partitions`).
ignore_metadata_file : bool, default False
Whether to ignore the global ``_metadata`` file (when one is present).
If ``True``, or if the global ``_metadata`` file is missing, the parquet
metadata may be gathered and processed in parallel. Parallel metadata
processing is currently supported for ``ArrowDatasetEngine`` only.
metadata_task_size : int, default configurable
If parquet metadata is processed in parallel (see ``ignore_metadata_file``
description above), this argument can be used to specify the number of
dataset files to be processed by each task in the Dask graph. If this
argument is set to ``0``, parallel metadata processing will be disabled.
The default values for local and remote filesystems can be specified
with the "metadata-task-size-local" and "metadata-task-size-remote"
config fields, respectively (see "dataframe.parquet").
split_row_groups : 'infer', 'adaptive', bool, or int, default 'infer'
If True, then each output dataframe partition will correspond to a single
parquet-file row-group. If False, each partition will correspond to a
complete file. If a positive integer value is given, each dataframe
partition will correspond to that number of parquet row-groups (or fewer).
If 'adaptive', the metadata of each file will be used to ensure that every
partition satisfies ``blocksize``. If 'infer' (the default), the
uncompressed storage-size metadata in the first file will be used to
automatically set ``split_row_groups`` to either 'adaptive' or ``False``.
blocksize : int or str, default 'default'
The desired size of each output ``DataFrame`` partition in terms of total
(uncompressed) parquet storage space. This argument is currently used to
set the default value of ``split_row_groups`` (using row-group metadata
from a single file), and will be ignored if ``split_row_groups`` is not
set to 'infer' or 'adaptive'. Default is 256 MiB.
aggregate_files : bool or str, default None
WARNING: Passing a string argument to ``aggregate_files`` will result
in experimental behavior. This behavior may change in the future.
Whether distinct file paths may be aggregated into the same output
partition. This parameter is only used when `split_row_groups` is set to
'infer', 'adaptive' or to an integer >1. A setting of True means that any
two file paths may be aggregated into the same output partition, while
False means that inter-file aggregation is prohibited.
For "hive-partitioned" datasets, a "partition"-column name can also be
specified. In this case, we allow the aggregation of any two files
sharing a file path up to, and including, the corresponding directory name.
For example, if ``aggregate_files`` is set to ``"section"`` for the
directory structure below, ``03.parquet`` and ``04.parquet`` may be
aggregated together, but ``01.parquet`` and ``02.parquet`` cannot be.
If, however, ``aggregate_files`` is set to ``"region"``, ``01.parquet``
may be aggregated with ``02.parquet``, and ``03.parquet`` may be aggregated
with ``04.parquet``::
dataset-path/
├── region=1/
│ ├── section=a/
│ │ └── 01.parquet
│ ├── section=b/
│ └── └── 02.parquet
└── region=2/
├── section=a/
│ ├── 03.parquet
└── └── 04.parquet
Note that the default behavior of ``aggregate_files`` is ``False``.
parquet_file_extension: str, tuple[str], or None, default (".parq", ".parquet", ".pq")
A file extension or an iterable of extensions to use when discovering
parquet files in a directory. Files that don't match these extensions
will be ignored. This argument only applies when ``paths`` corresponds
to a directory and no ``_metadata`` file is present (or
``ignore_metadata_file=True``). Passing in ``parquet_file_extension=None``
will treat all files in the directory as parquet files.
The purpose of this argument is to ensure that the engine will ignore
unsupported metadata files (like Spark's '_SUCCESS' and 'crc' files).
It may be necessary to change this argument if the data files in your
parquet dataset do not end in ".parq", ".parquet", or ".pq".
filesystem: "fsspec", "arrow", or fsspec.AbstractFileSystem backend to use.
dataset: dict, default None
Dictionary of options to use when creating a ``pyarrow.dataset.Dataset`` object.
These options may include a "filesystem" key to configure the desired
file-system backend. However, the top-level ``filesystem`` argument will always
take precedence.
**Note**: The ``dataset`` options may include a "partitioning" key.
However, since ``pyarrow.dataset.Partitioning``
objects cannot be serialized, the value can be a dict of key-word
arguments for the ``pyarrow.dataset.partitioning`` API
(e.g. ``dataset={"partitioning": {"flavor": "hive", "schema": ...}}``).
Note that partitioned columns will not be converted to categorical
dtypes when a custom partitioning schema is specified in this way.
read: dict, default None
Dictionary of options to pass through to ``engine.read_partitions``
using the ``read`` key-word argument.
arrow_to_pandas: dict, default None
Dictionary of options to use when converting from ``pyarrow.Table`` to
a pandas ``DataFrame`` object. Only used by the "arrow" engine.
**kwargs: dict (of dicts)
Options to pass through to ``engine.read_partitions`` as stand-alone
key-word arguments. Note that these options will be ignored by the
engines defined in ``dask.dataframe``, but may be used by other custom
implementations.
Examples
--------
>>> df = dd.read_parquet('s3://bucket/my-parquet-data') # doctest: +SKIP
See Also
--------
to_parquet
pyarrow.parquet.ParquetDataset
"""
from dask.dataframe.dask_expr.io.parquet import (
ReadParquetFSSpec,
ReadParquetPyarrowFS,
_set_parquet_engine,
)
if not isinstance(path, str):
path = stringify_path(path)
kwargs["dtype_backend"] = dtype_backend
if arrow_to_pandas:
kwargs["arrow_to_pandas"] = arrow_to_pandas
if filters is not None:
for filter in flatten(filters, container=list):
col, op, val = filter
if op == "in" and not isinstance(val, (set, list, tuple)):
raise TypeError("Value of 'in' filter must be a list, set or tuple.")
if (
isinstance(filesystem, pa_fs.FileSystem)
or isinstance(filesystem, str)
and filesystem.lower() in ("arrow", "pyarrow")
):
if parse_version(pa.__version__) < parse_version("15.0.0"):
raise ValueError(
"pyarrow>=15.0.0 is required to use the pyarrow filesystem."
)
if metadata_task_size is not None:
raise NotImplementedError(
"metadata_task_size is not supported when using the pyarrow filesystem."
)
if split_row_groups != "infer":
raise NotImplementedError(
"split_row_groups is not supported when using the pyarrow filesystem."
)
if blocksize is not None and blocksize != "default":
raise NotImplementedError(
"blocksize is not supported when using the pyarrow filesystem."
)
if aggregate_files is not None:
raise NotImplementedError(
"aggregate_files is not supported when using the pyarrow filesystem."
)
if parquet_file_extension != (".parq", ".parquet", ".pq"):
raise NotImplementedError(
"parquet_file_extension is not supported when using the pyarrow filesystem."
)
if engine is not None:
raise NotImplementedError(
"engine is not supported when using the pyarrow filesystem."
)
return new_collection(
ReadParquetPyarrowFS(
path,
columns=_convert_to_list(columns),
filters=filters,
categories=categories,
index=index,
calculate_divisions=calculate_divisions,
storage_options=storage_options,
filesystem=filesystem,
ignore_metadata_file=ignore_metadata_file,
arrow_to_pandas=arrow_to_pandas,
pyarrow_strings_enabled=pyarrow_strings_enabled(),
kwargs=kwargs,
_series=isinstance(columns, str),
)
)
return new_collection(
ReadParquetFSSpec(
path,
columns=_convert_to_list(columns),
filters=filters,
categories=categories,
index=index,
storage_options=storage_options,
calculate_divisions=calculate_divisions,
ignore_metadata_file=ignore_metadata_file,
metadata_task_size=metadata_task_size,
split_row_groups=split_row_groups,
blocksize=blocksize,
aggregate_files=aggregate_files,
parquet_file_extension=parquet_file_extension,
filesystem=filesystem,
engine=_set_parquet_engine(engine),
kwargs=kwargs,
_series=isinstance(columns, str),
)
)
def concat(
dfs,
axis=0,
join="outer",
ignore_unknown_divisions=False,
ignore_order=False,
interleave_partitions=False,
**kwargs,
):
"""Concatenate DataFrames along rows.
- When axis=0 (default), concatenate DataFrames row-wise:
- If all divisions are known and ordered, concatenate DataFrames keeping
divisions. When divisions are not ordered, specifying
interleave_partition=True allows concatenate divisions each by each.
- If any of division is unknown, concatenate DataFrames resetting its
division to unknown (None)
- When axis=1, concatenate DataFrames column-wise:
- Allowed if all divisions are known.
- If any of division is unknown, it raises ValueError.
Parameters
----------
dfs : list
List of dask.DataFrames to be concatenated
axis : {0, 1, 'index', 'columns'}, default 0
The axis to concatenate along
join : {'inner', 'outer'}, default 'outer'
How to handle indexes on other axis
interleave_partitions : bool, default False
Whether to concatenate DataFrames ignoring its order. If True, every
divisions are concatenated each by each.
ignore_unknown_divisions : bool, default False
By default a warning is raised if any input has unknown divisions.
Set to True to disable this warning.
ignore_order : bool, default False
Whether to ignore order when doing the union of categoricals.
Notes
-----
This differs in from ``pd.concat`` in the when concatenating Categoricals
with different categories. Pandas currently coerces those to objects
before concatenating. Coercing to objects is very expensive for large
arrays, so dask preserves the Categoricals by taking the union of
the categories.
Examples
--------
If all divisions are known and ordered, divisions are kept.
>>> import dask.dataframe as dd
>>> a # doctest: +SKIP
dd.DataFrame<x, divisions=(1, 3, 5)>
>>> b # doctest: +SKIP
dd.DataFrame<y, divisions=(6, 8, 10)>
>>> dd.concat([a, b]) # doctest: +SKIP
dd.DataFrame<concat-..., divisions=(1, 3, 6, 8, 10)>
Unable to concatenate if divisions are not ordered.
>>> a # doctest: +SKIP
dd.DataFrame<x, divisions=(1, 3, 5)>
>>> b # doctest: +SKIP
dd.DataFrame<y, divisions=(2, 3, 6)>
>>> dd.concat([a, b]) # doctest: +SKIP
ValueError: All inputs have known divisions which cannot be concatenated
in order. Specify interleave_partitions=True to ignore order
Specify interleave_partitions=True to ignore the division order.
>>> dd.concat([a, b], interleave_partitions=True) # doctest: +SKIP
dd.DataFrame<concat-..., divisions=(1, 2, 3, 5, 6)>
If any of division is unknown, the result division will be unknown
>>> a # doctest: +SKIP
dd.DataFrame<x, divisions=(None, None)>
>>> b # doctest: +SKIP
dd.DataFrame<y, divisions=(1, 4, 10)>
>>> dd.concat([a, b]) # doctest: +SKIP
dd.DataFrame<concat-..., divisions=(None, None, None, None)>
By default concatenating with unknown divisions will raise a warning.
Set ``ignore_unknown_divisions=True`` to disable this:
>>> dd.concat([a, b], ignore_unknown_divisions=True)# doctest: +SKIP
dd.DataFrame<concat-..., divisions=(None, None, None, None)>
Different categoricals are unioned
>>> dd.concat([
... dd.from_pandas(pd.Series(['a', 'b'], dtype='category'), 1),
... dd.from_pandas(pd.Series(['a', 'c'], dtype='category'), 1),
... ], interleave_partitions=True).dtype
CategoricalDtype(categories=['a', 'b', 'c'], ordered=False, categories_dtype=object)
"""
if not isinstance(dfs, list):
raise TypeError("dfs must be a list of DataFrames/Series objects")
if len(dfs) == 0:
raise ValueError("No objects to concatenate")
if len(dfs) == 1:
if axis == 1 and isinstance(dfs[0], Series):
return dfs[0].to_frame()
return dfs[0]
if join not in ("inner", "outer"):
raise ValueError("'join' must be 'inner' or 'outer'")
dfs = [from_pandas(df) if not isinstance(df, FrameBase) else df for df in dfs]
if axis == 1:
dfs = [df for df in dfs if len(df.columns) > 0 or isinstance(df, Series)]
return new_collection(
Concat(
join,
ignore_order,
axis,
ignore_unknown_divisions,
interleave_partitions,
kwargs,
*dfs,
)
)
def melt(
frame,
id_vars=None,
value_vars=None,
var_name=None,
value_name="value",
col_level=None,
):
return map_partitions(
M.melt,
frame,
clear_divisions=True,
id_vars=id_vars,
value_vars=value_vars,
var_name=var_name,
value_name=value_name,
col_level=col_level,
)
@wraps(pd.merge)
def merge(
left,
right,
how="inner",
on=None,
left_on=None,
right_on=None,
left_index=False,
right_index=False,
suffixes=("_x", "_y"),
indicator=False,
shuffle_method=None,
npartitions=None,
broadcast=None,
):
for o in [on, left_on, right_on]:
if isinstance(o, FrameBase):
raise NotImplementedError()
if not on and not left_on and not right_on and not left_index and not right_index:
on = [c for c in left.columns if c in right.columns]
if not on:
left_index = right_index = True
if on and not left_on and not right_on:
left_on = right_on = on
if pd.api.types.is_list_like(left_on) and not isinstance(left_on, FrameBase):
left_on = list(left_on)
if pd.api.types.is_list_like(right_on) and not isinstance(right_on, FrameBase):
right_on = list(right_on)
supported_how = ("left", "right", "outer", "inner", "leftsemi")
if how not in supported_how:
raise ValueError(
f"dask.dataframe.merge does not support how='{how}'."
f"Options are: {supported_how}."
)
if how == "leftsemi":
if right_index or any(
o not in right.columns for o in _convert_to_list(right_on)
):
raise NotImplementedError(
"how='leftsemi' does not support right_index=True or on columns from the index"
)
else:
right = right[_convert_to_list(right_on)].rename(
columns=dict(zip(_convert_to_list(right_on), _convert_to_list(left_on)))
)
right_on = left_on
# Transform pandas objects into dask.dataframe objects
if not is_dask_collection(left):
if right_index and left_on: # change to join on index
left = left.set_index(left[left_on])
left_on = None
left_index = True
left = from_pandas(left, npartitions=1)
if not is_dask_collection(right):
if left_index and right_on: # change to join on index
right = right.set_index(right[right_on])
right_on = None
right_index = True
right = from_pandas(right, npartitions=1)
assert is_dataframe_like(right._meta)
if left_on and right_on:
warn_dtype_mismatch(left, right, left_on, right_on)
result = new_collection(
Merge(
left,
right,
how=how,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
suffixes=suffixes,
indicator=indicator,
shuffle_method=get_specified_shuffle(shuffle_method),
_npartitions=npartitions,
broadcast=broadcast,
)
)
if left._meta.index.name != right._meta.index.name:
return result.rename_axis(index=result._meta.index.name)
else:
return result
@wraps(pd.merge_asof)
def merge_asof(
left,
right,
on=None,
left_on=None,
right_on=None,
left_index=False,
right_index=False,
by=None,
left_by=None,
right_by=None,
suffixes=("_x", "_y"),
tolerance=None,
allow_exact_matches=True,
direction="backward",
):
if direction not in ["backward", "forward", "nearest"]:
raise ValueError(
"Invalid merge_asof direction. Choose from 'backward'"
" 'forward', or 'nearest'"
)
kwargs = {
"on": on,
"left_on": left_on,
"right_on": right_on,
"left_index": left_index,
"right_index": right_index,
"by": by,
"left_by": left_by,
"right_by": right_by,
"suffixes": suffixes,
"tolerance": tolerance,
"allow_exact_matches": allow_exact_matches,
"direction": direction,
}
if left is None or right is None:
raise ValueError("Cannot merge_asof on None")
# if is_dataframe_like(left) and is_dataframe_like(right):
if isinstance(left, pd.DataFrame) and isinstance(right, pd.DataFrame):
return pd.merge_asof(left, right, **kwargs)
if on is not None:
if left_on is not None or right_on is not None:
raise ValueError(
"Can only pass argument 'on' OR 'left_on' and 'right_on', not a "
"combination of both."
)
left_on = right_on = on
kwargs["left_on"] = left_on
kwargs["right_on"] = right_on
del kwargs["on"]
for o in [left_on, right_on]:
if isinstance(o, FrameBase):
raise NotImplementedError(
"Dask collections not currently allowed in merge columns"
)
if not is_dask_collection(left):
left = from_pandas(left, npartitions=1)
if not is_dask_collection(right):
right = from_pandas(right, npartitions=1)
if by is not None:
if left_by is not None or right_by is not None:
raise ValueError(
"Can only pass argument 'by' OR 'left_by' and 'right_by', not a combination of both."
)
kwargs["left_by"] = kwargs["right_by"] = by
del kwargs["by"]
if left_by is None and right_by is not None:
raise ValueError("Must specify both left_on and right_on if one is specified.")
if left_by is not None and right_by is None:
raise ValueError("Must specify both left_on and right_on if one is specified.")
from dask.dataframe.dask_expr._merge_asof import MergeAsof
return new_collection(MergeAsof(left, right, **kwargs))
def from_map(
func,
*iterables,
args=None,
meta=no_default,
divisions=None,
label=None,
enforce_metadata=False,
**kwargs,
):
"""Create a DataFrame collection from a custom function map.
``from_map`` is the preferred option when reading from data sources
that are not natively supported by Dask or if the data source
requires custom handling before handing things of to Dask DataFrames.
Examples are things like binary files or other unstructured data that
doesn't have an IO connector.
``from_map`` supports column projection by the optimizer. The optimizer
tries to push column selections into the from_map call if the function
supports a ``columns`` argument.
Parameters
----------
func : callable
Function used to create each partition. Column projection will be
enabled if the function has a ``columns`` keyword argument.
*iterables : Iterable objects
Iterable objects to map to each output partition. All iterables must
be the same length. This length determines the number of partitions
in the output collection (only one element of each iterable will
be passed to ``func`` for each partition).
args : list or tuple, optional
Positional arguments to broadcast to each output partition. Note
that these arguments will always be passed to ``func`` after the
``iterables`` positional arguments.
$META
divisions : tuple, str, optional
Partition boundaries along the index.
For tuple, see https://docs.dask.org/en/latest/dataframe-design.html#partitions
For string 'sorted' will compute the delayed values to find index
values. Assumes that the indexes are mutually sorted.
If None, then won't use index information
label : str, optional
String to use as the function-name label in the output
collection-key names.
token : str, optional
String to use as the "token" in the output collection-key names.
enforce_metadata : bool, default True
Whether to enforce at runtime that the structure of the DataFrame
produced by ``func`` actually matches the structure of ``meta``.
This will rename and reorder columns for each partition,
and will raise an error if this doesn't work,
but it won't raise if dtypes don't match.
**kwargs:
Key-word arguments to broadcast to each output partition. These
same arguments will be passed to ``func`` for every output partition.
Examples
--------
>>> import pandas as pd
>>> import dask.dataframe as dd
>>> func = lambda x, size=0: pd.Series([x] * size)
>>> inputs = ["A", "B"]
>>> dd.from_map(func, inputs, size=2).compute()
0 A
1 A
0 B
1 B
dtype: string
The optimizer will identify a column selection that happens after from_map
and push the columns argument into the actual map call to drop unnecessary
columns as early as possible.
>>> def map_function(x, columns=None):
... df = pd.DataFrame({"a": [1, 2], "b": x})
... if columns is not None:
... df = df[columns]
... return df
>>> dd.from_map(map_function, [1, 2])["b"].compute()
0 1
1 1
0 2
1 2
Name: b, dtype: int64
This API can also be used as an alternative to other file-based
IO functions, like ``read_csv`` (which are already just
``from_map`` wrapper functions):
>>> import pandas as pd
>>> import dask.dataframe as dd
>>> paths = ["0.csv", "1.csv", "2.csv"]
>>> dd.from_map(pd.read_csv, paths).head() # doctest: +SKIP
name
timestamp
2000-01-01 00:00:00 Laura
2000-01-01 00:00:01 Oliver
2000-01-01 00:00:02 Alice
2000-01-01 00:00:03 Victor
2000-01-01 00:00:04 Bob
Since ``from_map`` allows you to map an arbitrary function
to any number of iterable objects, it can be a very convenient
means of implementing functionality that may be missing
from other DataFrame-creation methods. For example, if you
happen to have apriori knowledge about the number of rows
in each of the files in a dataset, you can generate a
DataFrame collection with a global RangeIndex:
>>> import pandas as pd
>>> import numpy as np
>>> import dask.dataframe as dd
>>> paths = ["0.csv", "1.csv", "2.csv"]
>>> file_sizes = [86400, 86400, 86400]
>>> def func(path, row_offset):
... # Read parquet file and set RangeIndex offset
... df = pd.read_csv(path)
... return df.set_index(
... pd.RangeIndex(row_offset, row_offset+len(df))
... )
>>> def get_ddf(paths, file_sizes):
... offsets = [0] + list(np.cumsum(file_sizes))
... return dd.from_map(
... func, paths, offsets[:-1], divisions=offsets
... )
>>> ddf = get_ddf(paths, file_sizes) # doctest: +SKIP
>>> ddf.index # doctest: +SKIP
Dask Index Structure:
npartitions=3
0 int64
86400 ...
172800 ...
259200 ...
dtype: int64
Dask Name: myfunc, 6 tasks
"""
from dask.dataframe.dask_expr.io import FromMap, FromMapProjectable
from dask.dataframe.io.utils import DataFrameIOFunction
if "token" in kwargs:
# This option doesn't really make sense in dask-expr
raise NotImplementedError("dask_expr does not support a token argument.")
lengths = set()
iterables = list(iterables)
for i, iterable in enumerate(iterables):
if not isinstance(iterable, Iterable):
raise ValueError(
f"All elements of `iterables` must be Iterable, got {type(iterable)}"
)
try:
lengths.add(len(iterable))
except (AttributeError, TypeError):
iterables[i] = list(iterable)
lengths.add(len(iterables[i]))
if len(lengths) == 0:
raise ValueError("`from_map` requires at least one Iterable input")
elif len(lengths) > 1:
raise ValueError("All `iterables` must have the same length")
if lengths == {0}:
raise ValueError("All `iterables` must have a non-zero length")
# Check if `func` supports column projection
allow_projection = False
columns_arg_required = False
if param := inspect.signature(func).parameters.get("columns", None):
allow_projection = True
columns_arg_required = param.default is param.empty
if meta is no_default and columns_arg_required:
raise TypeError(
"Argument `func` of `from_map` has a required `columns` "
" parameter and not `meta` provided."
"Either provide `meta` yourself or make `columns` an optional argument."
)
elif isinstance(func, DataFrameIOFunction):
warnings.warn(
"dask_expr does not support the DataFrameIOFunction "
"protocol for column projection. To enable column "
"projection, please ensure that the signature of `func` "
"includes a `columns=` keyword argument instead."
)
else:
allow_projection = False
args = [] if args is None else args
kwargs = {} if kwargs is None else kwargs
if allow_projection:
columns = kwargs.pop("columns", None)
result = new_collection(
FromMapProjectable(
func,
iterables,
columns,
args,
kwargs,
columns_arg_required,
meta,
enforce_metadata,
divisions,
label,
)
)
else:
result = new_collection(
FromMap(
func,
iterables,
args,
kwargs,
meta,
enforce_metadata,
divisions,
label,
)
)
if pyarrow_strings_enabled():
return new_collection(expr.ArrowStringConversion(result))
return result
def repartition(df, divisions, force=False):
"""Repartition dataframe along new divisions
Dask.DataFrame objects are partitioned along their index. Often when
multiple dataframes interact we need to align these partitionings. The
``repartition`` function constructs a new DataFrame object holding the same
data but partitioned on different values. It does this by performing a
sequence of ``loc`` and ``concat`` calls to split and merge the previous
generation of partitions.
Parameters
----------
divisions : list
List of partitions to be used
force : bool, default False
Allows the expansion of the existing divisions.
If False then the new divisions lower and upper bounds must be
the same as the old divisions.
Examples
--------
>>> df = df.repartition([0, 5, 10, 20]) # doctest: +SKIP
Also works on Pandas objects
>>> ddf = dd.repartition(df, [0, 5, 10, 20]) # doctest: +SKIP
"""
if isinstance(df, FrameBase):
return df.repartition(divisions=divisions, force=force)
elif is_dataframe_like(df) or is_series_like(df):
return new_collection(
FromPandasDivisions(
_BackendData(df),
divisions=divisions,
pyarrow_strings_enabled=pyarrow_strings_enabled(),
)
)
else:
raise NotImplementedError(f"repartition is not implemented for {type(df)}.")
def pivot_table(df, index, columns, values, aggfunc="mean"):
"""
Create a spreadsheet-style pivot table as a DataFrame. Target ``columns``
must have category dtype to infer result's ``columns``.
``index``, ``columns``, and ``aggfunc`` must be all scalar.
``values`` can be scalar or list-like.
Parameters
----------
df : DataFrame
index : scalar
column to be index
columns : scalar
column to be columns
values : scalar or list(scalar)
column(s) to aggregate
aggfunc : {'mean', 'sum', 'count', 'first', 'last'}, default 'mean'
Returns
-------
table : DataFrame
See Also
--------
pandas.DataFrame.pivot_table
"""
if not is_scalar(index) or index not in df._meta.columns:
raise ValueError("'index' must be the name of an existing column")
if not is_scalar(columns) or columns not in df._meta.columns:
raise ValueError("'columns' must be the name of an existing column")
if not methods.is_categorical_dtype(df._meta[columns]):
raise ValueError("'columns' must be category dtype")
if not has_known_categories(df._meta[columns]):
raise ValueError("'columns' must have known categories")
if not (
is_scalar(values)
and values in df._meta.columns
or not is_scalar(values)
and all(is_scalar(x) and x in df._meta.columns for x in values)
):
raise ValueError("'values' must refer to an existing column or columns")
available_aggfuncs = ["mean", "sum", "count", "first", "last"]
if not is_scalar(aggfunc) or aggfunc not in available_aggfuncs:
raise ValueError(
"aggfunc must be either " + ", ".join(f"'{x}'" for x in available_aggfuncs)
)
return new_collection(
PivotTable(df, index=index, columns=columns, values=values, aggfunc=aggfunc)
)
@derived_from(pd, ua_args=["downcast"])
def to_numeric(arg, errors="raise", downcast=None, meta=None):
"""
Return type depends on input. Delayed if scalar, otherwise same as input.
For errors, only "raise" and "coerce" are allowed.
"""
if errors not in ("raise", "coerce"):
raise ValueError("invalid error value specified")
if pd_is_scalar(arg):
if meta is not None:
raise KeyError("``meta`` is not allowed when input is a scalar.")
return delayed(pd.to_numeric, pure=True)(arg, errors=errors, downcast=downcast)
if is_arraylike(arg):
return new_collection(
ToNumeric(
from_array(arg).astype(arg.dtype), errors=errors, downcast=downcast
)
).to_dask_array(meta=meta)
if is_series_like(arg):
return new_collection(
ToNumeric(frame=arg, errors=errors, downcast=downcast, meta=meta)
)
raise TypeError(
"arg must be a list, tuple, dask.array.Array, or dask.dataframe.Series"
)
@wraps(pd.to_datetime)
def to_datetime(arg, meta=None, **kwargs):
tz_kwarg = {"tz": "utc"} if kwargs.get("utc") else {}
(arg,) = _maybe_from_pandas([arg])
if meta is None:
if isinstance(arg, Index):
meta = get_meta_library(arg).DatetimeIndex([], **tz_kwarg)
meta.name = arg.name
if PANDAS_GE_300:
meta = meta.as_unit(
get_meta_library(arg).to_datetime(meta_nonempty(arg._meta)).unit
)
elif not (is_dataframe_like(arg) or is_series_like(arg)):
raise NotImplementedError(
"dask.dataframe.to_datetime does not support "
"non-index-able arguments (like scalars)"
)
else:
meta = meta_series_constructor(arg)([pd.Timestamp("2000", **tz_kwarg)])
meta.index = meta.index.astype(arg.index.dtype)
meta.index.name = arg.index.name
else:
meta = make_meta(meta)
kwargs.pop("infer_datetime_format", None)
return new_collection(ToDatetime(frame=arg, kwargs=kwargs, meta=meta))
@wraps(pd.to_timedelta)
def to_timedelta(arg, unit=None, errors="raise"):
if not isinstance(arg, Series):
raise TypeError("arg must be a Series")
return new_collection(ToTimedelta(frame=arg, unit=unit, errors=errors))
def _from_scalars(scalars, meta, names):
return new_collection(FromScalars(meta, names, *scalars))
@insert_meta_param_description
def map_partitions(
func,
*args,
meta=no_default,
enforce_metadata=True,
transform_divisions=True,
clear_divisions=False,
align_dataframes=False,
parent_meta=None,
required_columns=None,
**kwargs,
):
"""Apply Python function on each DataFrame partition.
Parameters
----------
func : function
Function applied to each partition.
args, kwargs :
Arguments and keywords to pass to the function. At least one of the
args should be a Dask.dataframe. Arguments and keywords may contain
``Scalar``, ``Delayed`` or regular python objects. DataFrame-like args
(both dask and pandas) will be repartitioned to align (if necessary)
before applying the function (see ``align_dataframes`` to control).
enforce_metadata : bool, default True
Whether to enforce at runtime that the structure of the DataFrame
produced by ``func`` actually matches the structure of ``meta``.
This will rename and reorder columns for each partition,
and will raise an error if this doesn't work,
but it won't raise if dtypes don't match.
transform_divisions : bool, default True
Whether to apply the function onto the divisions and apply those
transformed divisions to the output.
align_dataframes : bool, default True
Whether to repartition DataFrame- or Series-like args
(both dask and pandas) so their divisions align before applying
the function. This requires all inputs to have known divisions.
Single-partition inputs will be split into multiple partitions.
If False, all inputs must have either the same number of partitions
or a single partition. Single-partition inputs will be broadcast to
every partition of multi-partition inputs.
required_columns : list or None, default None
List of columns that ``func`` requires for execution. These columns
must belong to the first DataFrame argument (in ``args``). If None
is specified (the default), the query optimizer will assume that
all input columns are required.
$META
"""
if align_dataframes:
# TODO: Handle alignment?
# Perhaps we only handle the case that all `Expr` operands
# have the same number of partitions or can be broadcasted
# within `MapPartitions`. If so, the `map_partitions` API
# will need to call `Repartition` on operands that are not
# aligned with `self.expr`.
raise NotImplementedError()
args = [_DelayedExpr(a) if isinstance(a, Delayed) else a for a in args]
newkwargs = {}
delayed_kwargs = []
for k, v in kwargs.items():
if isinstance(v, Delayed):
dexpr = _DelayedExpr(v)
delayed_kwargs.append(dexpr)
newkwargs[k] = TaskRef(dexpr.__dask_keys__()[0])
else:
newkwargs[k] = v
del kwargs
new_expr = expr.MapPartitions(
args[0],
func,
meta,
enforce_metadata,
transform_divisions,
clear_divisions,
align_dataframes,
parent_meta,
required_columns,
newkwargs.pop("token", None),
Dict(newkwargs),
len(args) - 1,
*args[1:],
*delayed_kwargs,
)
return new_collection(new_expr)
@insert_meta_param_description
def map_overlap(
func,
df,
before,
after,
*args,
meta=no_default,
enforce_metadata=True,
transform_divisions=True,
clear_divisions=False,
align_dataframes=False,
**kwargs,
):
"""Apply a function to each partition, sharing rows with adjacent partitions.
Parameters
----------
func : function
The function applied to each partition. If this function accepts
the special ``partition_info`` keyword argument, it will receive
information on the partition's relative location within the
dataframe.
df: dd.DataFrame, dd.Series
args, kwargs :
Positional and keyword arguments to pass to the function.
Positional arguments are computed on a per-partition basis, while
keyword arguments are shared across all partitions. The partition
itself will be the first positional argument, with all other
arguments passed *after*. Arguments can be ``Scalar``, ``Delayed``,
or regular Python objects. DataFrame-like args (both dask and
pandas) will be repartitioned to align (if necessary) before
applying the function; see ``align_dataframes`` to control this
behavior.
enforce_metadata : bool, default True
Whether to enforce at runtime that the structure of the DataFrame
produced by ``func`` actually matches the structure of ``meta``.
This will rename and reorder columns for each partition,
and will raise an error if this doesn't work,
but it won't raise if dtypes don't match.
before : int, timedelta or string timedelta
The rows to prepend to partition ``i`` from the end of
partition ``i - 1``.
after : int, timedelta or string timedelta
The rows to append to partition ``i`` from the beginning
of partition ``i + 1``.
transform_divisions : bool, default True
Whether to apply the function onto the divisions and apply those
transformed divisions to the output.
align_dataframes : bool, default True
Whether to repartition DataFrame- or Series-like args
(both dask and pandas) so their divisions align before applying
the function. This requires all inputs to have known divisions.
Single-partition inputs will be split into multiple partitions.
If False, all inputs must have either the same number of partitions
or a single partition. Single-partition inputs will be broadcast to
every partition of multi-partition inputs.
$META
See Also
--------
dd.DataFrame.map_overlap
"""
if isinstance(before, str):
before = pd.to_timedelta(before)
if isinstance(after, str):
after = pd.to_timedelta(after)
if isinstance(before, datetime.timedelta) or isinstance(after, datetime.timedelta):
if isinstance(df, FrameBase):
inferred_type = df.index._meta_nonempty.inferred_type
else:
inferred_type = df.index.inferred_type
if not is_datetime64_any_dtype(inferred_type):
raise TypeError(
"Must have a `DatetimeIndex` when using string offset "
"for `before` and `after`"
)
elif not (
isinstance(before, Integral)
and before >= 0
and isinstance(after, Integral)
and after >= 0
):
raise ValueError("before and after must be positive integers")
df = _maybe_from_pandas([df])[0]
args = _maybe_from_pandas(args)
if align_dataframes:
dfs = [df] + args
dfs = [df for df in dfs if isinstance(df, FrameBase)]
if len(dfs) > 1 and not expr.are_co_aligned(*dfs):
return new_collection(
expr.MapOverlapAlign(
df,
func,
before,
after,
meta,
enforce_metadata,
transform_divisions,
clear_divisions,
align_dataframes,
kwargs.pop("token", None),
kwargs,
*args,
)
)
new_expr = expr.MapOverlap(
df,
func,
before,
after,
meta,
enforce_metadata,
transform_divisions,
clear_divisions,
align_dataframes,
kwargs.pop("token", None),
kwargs,
*args,
)
return new_collection(new_expr)
def isna(arg):
if isinstance(arg, FrameBase):
return arg.isna()
else:
return from_pandas(arg).isna()
def elemwise(op, *args, meta=no_default, out=None, transform_divisions=True, **kwargs):
"""Elementwise operation for Dask dataframes
Parameters
----------
op: callable
Function to apply across input dataframes
*args: DataFrames, Series, Scalars, Arrays,
The arguments of the operation
meta: pd.DataFrame, pd.Series (optional)
Valid metadata for the operation. Will evaluate on a small piece of
data if not provided.
transform_divisions: boolean
If the input is a ``dask.dataframe.Index`` we normally will also apply
the function onto the divisions and apply those transformed divisions
to the output. You can pass ``transform_divisions=False`` to override
this behavior
out : ``dask.array`` or ``None``
If out is a dask.DataFrame, dask.Series or dask.Scalar then
this overwrites the contents of it with the result
**kwargs: scalars
Examples
--------
>>> elemwise(operator.add, df.x, df.y) # doctest: +SKIP
"""
args = _maybe_from_pandas(args)
dfs = [df for df in args if isinstance(df, FrameBase)]
if len(dfs) <= 1 or expr.are_co_aligned(*dfs):
result = new_collection(
expr.UFuncElemwise(dfs[0], op, meta, transform_divisions, kwargs, *args)
)
else:
result = new_collection(expr.UFuncAlign(dfs[0], op, meta, kwargs, *args))
return handle_out(out, result)
def handle_out(out, result):
"""Handle out parameters
If out is a dask.DataFrame, dask.Series or dask.Scalar then
this overwrites the contents of it with the result. The method
replaces the expression of the out parameter with the result
from this operation to perform something akin to an inplace
modification.
"""
if isinstance(out, tuple):
if len(out) == 1:
out = out[0]
elif len(out) > 1:
raise NotImplementedError(
"The `out` parameter with length > 1 is not supported"
)
else:
out = None
if out is not None and out.__class__ != result.__class__:
raise TypeError(
"Mismatched types between result and out parameter. "
f"out={type(out)}, result={type(result)}"
)
if isinstance(out, DataFrame):
if len(out.columns) != len(result.columns):
raise ValueError(
"Mismatched columns count between result and out parameter. "
f"out={len(out.columns)}, result={len(result.columns)}"
)
if isinstance(out, (Series, DataFrame, Scalar)):
out._expr = result._expr
elif out is not None:
msg = (
"The out parameter is not fully supported."
f" Received type {typename(type(out))}, expected {typename(type(result))} "
)
raise NotImplementedError(msg)
else:
return result
def _compute_partition_stats(
column: Series, allow_overlap: bool = False
) -> tuple[list, list, list[int]]:
"""For a given column, compute the min, max, and len of each partition.
And make sure that the partitions are sorted relative to each other.
NOTE: this does not guarantee that every partition is internally sorted.
"""
mins = column.map_partitions(M.min, meta=column)
maxes = column.map_partitions(M.max, meta=column)
lens = column.map_partitions(len, meta=column)
mins, maxes, lens = compute(mins, maxes, lens)
mins = mins.bfill().tolist()
maxes = maxes.bfill().tolist()
non_empty_mins = [m for m, length in zip(mins, lens) if length != 0]
non_empty_maxes = [m for m, length in zip(maxes, lens) if length != 0]
if (
sorted(non_empty_mins) != non_empty_mins
or sorted(non_empty_maxes) != non_empty_maxes
):
raise ValueError(
f"Partitions are not sorted ascending by {column.name or 'the index'}. ",
f"In your dataset the (min, max, len) values of {column.name or 'the index'} "
f"for each partition are: {list(zip(mins, maxes, lens))}",
)
if not allow_overlap and any(
a <= b for a, b in zip(non_empty_mins[1:], non_empty_maxes[:-1])
):
warnings.warn(
"Partitions have overlapping values, so divisions are non-unique. "
"Use `set_index(sorted=True)` with no `divisions` to allow dask to fix the overlap. "
f"In your dataset the (min, max, len) values of {column.name or 'the index'} "
f"for each partition are : {list(zip(mins, maxes, lens))}",
UserWarning,
)
lens = methods.tolist(lens)
if not allow_overlap:
return (mins, maxes, lens)
else:
return (non_empty_mins, non_empty_maxes, lens)
@get_parallel_type.register(FrameBase)
def get_parallel_type_frame(o):
return get_parallel_type(o._meta)
|
Scalar
|
python
|
networkx__networkx
|
networkx/algorithms/centrality/tests/test_degree_centrality.py
|
{
"start": 82,
"end": 4101
}
|
class ____:
def setup_method(self):
self.K = nx.krackhardt_kite_graph()
self.P3 = nx.path_graph(3)
self.K5 = nx.complete_graph(5)
F = nx.Graph() # Florentine families
F.add_edge("Acciaiuoli", "Medici")
F.add_edge("Castellani", "Peruzzi")
F.add_edge("Castellani", "Strozzi")
F.add_edge("Castellani", "Barbadori")
F.add_edge("Medici", "Barbadori")
F.add_edge("Medici", "Ridolfi")
F.add_edge("Medici", "Tornabuoni")
F.add_edge("Medici", "Albizzi")
F.add_edge("Medici", "Salviati")
F.add_edge("Salviati", "Pazzi")
F.add_edge("Peruzzi", "Strozzi")
F.add_edge("Peruzzi", "Bischeri")
F.add_edge("Strozzi", "Ridolfi")
F.add_edge("Strozzi", "Bischeri")
F.add_edge("Ridolfi", "Tornabuoni")
F.add_edge("Tornabuoni", "Guadagni")
F.add_edge("Albizzi", "Ginori")
F.add_edge("Albizzi", "Guadagni")
F.add_edge("Bischeri", "Guadagni")
F.add_edge("Guadagni", "Lamberteschi")
self.F = F
G = nx.DiGraph()
G.add_edge(0, 5)
G.add_edge(1, 5)
G.add_edge(2, 5)
G.add_edge(3, 5)
G.add_edge(4, 5)
G.add_edge(5, 6)
G.add_edge(5, 7)
G.add_edge(5, 8)
self.G = G
def test_degree_centrality_1(self):
d = nx.degree_centrality(self.K5)
exact = dict(zip(range(5), [1] * 5))
for n, dc in d.items():
assert exact[n] == pytest.approx(dc, abs=1e-7)
def test_degree_centrality_2(self):
d = nx.degree_centrality(self.P3)
exact = {0: 0.5, 1: 1, 2: 0.5}
for n, dc in d.items():
assert exact[n] == pytest.approx(dc, abs=1e-7)
def test_degree_centrality_3(self):
d = nx.degree_centrality(self.K)
exact = {
0: 0.444,
1: 0.444,
2: 0.333,
3: 0.667,
4: 0.333,
5: 0.556,
6: 0.556,
7: 0.333,
8: 0.222,
9: 0.111,
}
for n, dc in d.items():
assert exact[n] == pytest.approx(float(f"{dc:.3f}"), abs=1e-7)
def test_degree_centrality_4(self):
d = nx.degree_centrality(self.F)
names = sorted(self.F.nodes())
dcs = [
0.071,
0.214,
0.143,
0.214,
0.214,
0.071,
0.286,
0.071,
0.429,
0.071,
0.214,
0.214,
0.143,
0.286,
0.214,
]
exact = dict(zip(names, dcs))
for n, dc in d.items():
assert exact[n] == pytest.approx(float(f"{dc:.3f}"), abs=1e-7)
def test_indegree_centrality(self):
d = nx.in_degree_centrality(self.G)
exact = {
0: 0.0,
1: 0.0,
2: 0.0,
3: 0.0,
4: 0.0,
5: 0.625,
6: 0.125,
7: 0.125,
8: 0.125,
}
for n, dc in d.items():
assert exact[n] == pytest.approx(dc, abs=1e-7)
def test_outdegree_centrality(self):
d = nx.out_degree_centrality(self.G)
exact = {
0: 0.125,
1: 0.125,
2: 0.125,
3: 0.125,
4: 0.125,
5: 0.375,
6: 0.0,
7: 0.0,
8: 0.0,
}
for n, dc in d.items():
assert exact[n] == pytest.approx(dc, abs=1e-7)
def test_small_graph_centrality(self):
G = nx.empty_graph(create_using=nx.DiGraph)
assert {} == nx.degree_centrality(G)
assert {} == nx.out_degree_centrality(G)
assert {} == nx.in_degree_centrality(G)
G = nx.empty_graph(1, create_using=nx.DiGraph)
assert {0: 1} == nx.degree_centrality(G)
assert {0: 1} == nx.out_degree_centrality(G)
assert {0: 1} == nx.in_degree_centrality(G)
|
TestDegreeCentrality
|
python
|
getsentry__sentry
|
tests/sentry/uptime/endpoints/test_project_uptime_alert_details.py
|
{
"start": 411,
"end": 550
}
|
class ____(UptimeAlertBaseEndpointTest):
endpoint = "sentry-api-0-project-uptime-alert-details"
|
ProjectUptimeAlertDetailsBaseEndpointTest
|
python
|
realpython__materials
|
wordcount/tests/realpython/resources.py
|
{
"start": 1650,
"end": 1797
}
|
class ____(Resource):
@property
def url(self) -> str:
return f"https://realpython.com/learning-paths/{self.slug_clean}/"
|
LearningPath
|
python
|
langchain-ai__langchain
|
libs/partners/prompty/langchain_prompty/core.py
|
{
"start": 346,
"end": 683
}
|
class ____(BaseModel):
"""Property settings for a prompty model."""
model_config = ConfigDict(arbitrary_types_allowed=True)
type: Literal["string", "number", "array", "object", "boolean"]
default: str | int | float | list | dict | bool | None = Field(default=None)
description: str = Field(default="")
|
PropertySettings
|
python
|
doocs__leetcode
|
solution/1800-1899/1805.Number of Different Integers in a String/Solution.py
|
{
"start": 0,
"end": 442
}
|
class ____:
def numDifferentIntegers(self, word: str) -> int:
s = set()
i, n = 0, len(word)
while i < n:
if word[i].isdigit():
while i < n and word[i] == '0':
i += 1
j = i
while j < n and word[j].isdigit():
j += 1
s.add(word[i:j])
i = j
i += 1
return len(s)
|
Solution
|
python
|
pydata__xarray
|
xarray/core/indexes.py
|
{
"start": 54523,
"end": 61803
}
|
class ____(Index):
"""Helper class for creating Xarray indexes based on coordinate transforms.
- wraps a :py:class:`CoordinateTransform` instance
- takes care of creating the index (lazy) coordinates
- supports point-wise label-based selection
- supports exact alignment only, by comparing indexes based on their transform
(not on their explicit coordinate labels)
.. caution::
This API is experimental and subject to change. Please report any bugs or surprising
behaviour you encounter.
"""
transform: CoordinateTransform
def __init__(
self,
transform: CoordinateTransform,
):
self.transform = transform
def create_variables(
self, variables: Mapping[Any, Variable] | None = None
) -> IndexVars:
from xarray.core.variable import Variable
new_variables = {}
for name in self.transform.coord_names:
# copy attributes, if any
attrs: Mapping[Hashable, Any] | None
if variables is not None and name in variables:
var = variables[name]
attrs = var.attrs
else:
attrs = None
data = CoordinateTransformIndexingAdapter(self.transform, name)
new_variables[name] = Variable(self.transform.dims, data, attrs=attrs)
return new_variables
def isel(
self, indexers: Mapping[Any, int | slice | np.ndarray | Variable]
) -> Index | None:
# TODO: support returning a new index (e.g., possible to re-calculate the
# the transform or calculate another transform on a reduced dimension space)
return None
def sel(
self, labels: dict[Any, Any], method=None, tolerance=None
) -> IndexSelResult:
from xarray.core.dataarray import DataArray
from xarray.core.variable import Variable
if method != "nearest":
raise ValueError(
"CoordinateTransformIndex only supports selection with method='nearest'"
)
labels_set = set(labels)
coord_names_set = set(self.transform.coord_names)
missing_labels = coord_names_set - labels_set
if missing_labels:
missing_labels_str = ",".join([f"{name}" for name in missing_labels])
raise ValueError(f"missing labels for coordinate(s): {missing_labels_str}.")
label0_obj = next(iter(labels.values()))
dim_size0 = getattr(label0_obj, "sizes", {})
is_xr_obj = [
isinstance(label, DataArray | Variable) for label in labels.values()
]
if not all(is_xr_obj):
raise TypeError(
"CoordinateTransformIndex only supports advanced (point-wise) indexing "
"with either xarray.DataArray or xarray.Variable objects."
)
dim_size = [getattr(label, "sizes", {}) for label in labels.values()]
if any(ds != dim_size0 for ds in dim_size):
raise ValueError(
"CoordinateTransformIndex only supports advanced (point-wise) indexing "
"with xarray.DataArray or xarray.Variable objects of matching dimensions."
)
coord_labels = {
name: labels[name].values for name in self.transform.coord_names
}
dim_positions = self.transform.reverse(coord_labels)
results: dict[str, Variable | DataArray] = {}
dims0 = tuple(dim_size0)
for dim, pos in dim_positions.items():
# TODO: rounding the decimal positions is not always the behavior we expect
# (there are different ways to represent implicit intervals)
# we should probably make this customizable.
pos = np.round(pos).astype("int")
if isinstance(label0_obj, Variable):
results[dim] = Variable(dims0, pos)
else:
# dataarray
results[dim] = DataArray(pos, dims=dims0)
return IndexSelResult(results)
def equals(
self, other: Index, *, exclude: frozenset[Hashable] | None = None
) -> bool:
if not isinstance(other, CoordinateTransformIndex):
return False
return self.transform.equals(other.transform, exclude=exclude)
def rename(
self,
name_dict: Mapping[Any, Hashable],
dims_dict: Mapping[Any, Hashable],
) -> Self:
coord_names = self.transform.coord_names
dims = self.transform.dims
dim_size = self.transform.dim_size
if not set(coord_names) & set(name_dict) and not set(dims) & set(dims_dict):
return self
new_transform = copy.deepcopy(self.transform)
new_transform.coord_names = tuple(name_dict.get(n, n) for n in coord_names)
new_transform.dims = tuple(str(dims_dict.get(d, d)) for d in dims)
new_transform.dim_size = {
str(dims_dict.get(d, d)): v for d, v in dim_size.items()
}
return type(self)(new_transform)
def create_default_index_implicit(
dim_variable: Variable,
all_variables: Mapping | Iterable[Hashable] | None = None,
) -> tuple[PandasIndex, IndexVars]:
"""Create a default index from a dimension variable.
Create a PandasMultiIndex if the given variable wraps a pandas.MultiIndex,
otherwise create a PandasIndex (note that this will become obsolete once we
depreciate implicitly passing a pandas.MultiIndex as a coordinate).
"""
if all_variables is None:
all_variables = {}
if not isinstance(all_variables, Mapping):
all_variables = dict.fromkeys(all_variables)
name = dim_variable.dims[0]
array = getattr(dim_variable._data, "array", None)
index: PandasIndex
if isinstance(array, pd.MultiIndex):
index = PandasMultiIndex(array, name)
index_vars = index.create_variables()
# check for conflict between level names and variable names
duplicate_names = [k for k in index_vars if k in all_variables and k != name]
if duplicate_names:
# dirty workaround for an edge case where both the dimension
# coordinate and the level coordinates are given for the same
# multi-index object => do not raise an error
# TODO: remove this check when removing the multi-index dimension coordinate
if len(duplicate_names) < len(index.index.names):
conflict = True
else:
duplicate_vars = [all_variables[k] for k in duplicate_names]
conflict = any(
v is None or not dim_variable.equals(v) for v in duplicate_vars
)
if conflict:
conflict_str = "\n".join(duplicate_names)
raise ValueError(
f"conflicting MultiIndex level / variable name(s):\n{conflict_str}"
)
else:
dim_var = {name: dim_variable}
index = PandasIndex.from_variables(dim_var, options={})
index_vars = index.create_variables(dim_var)
return index, index_vars
# generic type that represents either a pandas or an xarray index
T_PandasOrXarrayIndex = TypeVar("T_PandasOrXarrayIndex", Index, pd.Index)
|
CoordinateTransformIndex
|
python
|
PrefectHQ__prefect
|
src/prefect/events/actions.py
|
{
"start": 1928,
"end": 2823
}
|
class ____(DeploymentAction):
"""Runs the given deployment with the given parameters"""
type: Literal["run-deployment"] = "run-deployment"
parameters: Optional[Dict[str, Any]] = Field(
None,
description=(
"The parameters to pass to the deployment, or None to use the "
"deployment's default parameters"
),
)
job_variables: Optional[Dict[str, Any]] = Field(
None,
description=(
"The job variables to pass to the created flow run, or None "
"to use the deployment's default job variables"
),
)
schedule_after: NonNegativeTimeDelta = Field(
default_factory=lambda: timedelta(0),
description=(
"The amount of time to wait before running the deployment. "
"Defaults to running the deployment immediately."
),
)
|
RunDeployment
|
python
|
tensorflow__tensorflow
|
tensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_algorithm.py
|
{
"start": 7308,
"end": 7995
}
|
class ____(_CalibrationAlgorithmBase):
"""MinMaxCalibrationAlgorithm for calculating min and max values of calibration result.
MinMax calibration calculates the global min and global max values.
global min = min of given sample inputs
global max = max of given sample inputs
"""
def get_min_max_value(self) -> tuple[float, float]:
"""Calculates the global min and max values.
Returns:
(min_value, max_value): Min and max calculated using MinMax
"""
return (
self._statistics.min_max_statistics.global_min,
self._statistics.min_max_statistics.global_max,
)
@_implements(_CalibrationMethod.CALIBRATION_METHOD_AVERAGE_MIN_MAX)
|
_MinMax
|
python
|
django__django
|
tests/lookup/test_lookups.py
|
{
"start": 1779,
"end": 2345
}
|
class ____(SimpleTestCase):
def test_get_bound_params(self):
look_up = YearLookup(
lhs=Value(datetime(2010, 1, 1, 0, 0, 0), output_field=DateTimeField()),
rhs=Value(datetime(2010, 1, 1, 23, 59, 59), output_field=DateTimeField()),
)
msg = "subclasses of YearLookup must provide a get_bound_params() method"
with self.assertRaisesMessage(NotImplementedError, msg):
look_up.get_bound_params(
datetime(2010, 1, 1, 0, 0, 0), datetime(2010, 1, 1, 23, 59, 59)
)
|
YearLookupTests
|
python
|
kamyu104__LeetCode-Solutions
|
Python/find-valid-pair-of-adjacent-digits-in-string.py
|
{
"start": 42,
"end": 461
}
|
class ____(object):
def findValidPair(self, s):
"""
:type s: str
:rtype: str
"""
cnt = [0]*9
for x in s:
cnt[ord(x)-ord('1')] += 1
for i in xrange(len(s)-1):
if s[i] != s[i+1] and cnt[ord(s[i])-ord('1')] == ord(s[i])-ord('0') and cnt[ord(s[i+1])-ord('1')] == ord(s[i+1])-ord('0'):
return s[i:i+2]
return ""
|
Solution
|
python
|
python-visualization__folium
|
folium/plugins/pattern.py
|
{
"start": 214,
"end": 2347
}
|
class ____(JSCSSMixin, MacroElement):
"""Fill Pattern for polygon composed of alternating lines.
Add these to the 'fillPattern' field in GeoJson style functions.
Parameters
----------
angle: float, default 0.5
Angle of the line pattern (degrees). Should be between -360 and 360.
weight: float, default 4
Width of the main lines (pixels).
space_weight: float
Width of the alternate lines (pixels).
color: string with hexadecimal, RGB, or named color, default "#000000"
Color of the main lines.
space_color: string with hexadecimal, RGB, or named color, default "#ffffff"
Color of the alternate lines.
opacity: float, default 0.75
Opacity of the main lines. Should be between 0 and 1.
space_opacity: float, default 0.0
Opacity of the alternate lines. Should be between 0 and 1.
See https://github.com/teastman/Leaflet.pattern for more information.
"""
_template = Template(
"""
{% macro script(this, kwargs) %}
var {{ this.get_name() }} = new L.StripePattern(
{{ this.options|tojavascript }}
);
{{ this.get_name() }}.addTo({{ this.parent_map.get_name() }});
{% endmacro %}
"""
)
default_js = [
("pattern", "https://teastman.github.io/Leaflet.pattern/leaflet.pattern.js")
]
def __init__(
self,
angle=0.5,
weight=4,
space_weight=4,
color="#000000",
space_color="#ffffff",
opacity=0.75,
space_opacity=0.0,
**kwargs
):
super().__init__()
self._name = "StripePattern"
self.options = remove_empty(
angle=angle,
weight=weight,
space_weight=space_weight,
color=color,
space_color=space_color,
opacity=opacity,
space_opacity=space_opacity,
**kwargs
)
self.parent_map = None
def render(self, **kwargs):
self.parent_map = get_obj_in_upper_tree(self, Map)
super().render(**kwargs)
|
StripePattern
|
python
|
pytorch__pytorch
|
torch/package/package_importer.py
|
{
"start": 28948,
"end": 29600
}
|
class ____(_PathNode):
pass
# A private global registry of all modules that have been package-imported.
_package_imported_modules: WeakValueDictionary = WeakValueDictionary()
# `inspect` by default only looks in `sys.modules` to find source files for classes.
# Patch it to check our private registry of package-imported modules as well.
_orig_getfile = inspect.getfile
def _patched_getfile(object):
if inspect.isclass(object):
if object.__module__ in _package_imported_modules:
return _package_imported_modules[object.__module__].__file__
return _orig_getfile(object)
inspect.getfile = _patched_getfile
|
_ExternNode
|
python
|
apache__airflow
|
providers/celery/tests/unit/celery/cli/test_celery_command.py
|
{
"start": 10666,
"end": 16946
}
|
class ____:
@classmethod
def setup_class(cls):
with conf_vars({("core", "executor"): "CeleryExecutor"}):
importlib.reload(cli_parser)
cls.parser = cli_parser.get_parser()
@mock.patch("airflow.providers.celery.executors.celery_executor.app")
def test_run_command(self, mock_celery_app):
args = self.parser.parse_args(
[
"celery",
"flower",
"--basic-auth",
"admin:admin",
"--broker-api",
"http://username:password@rabbitmq-server-name:15672/api/",
"--flower-conf",
"flower_config",
"--hostname",
"my-hostname",
"--port",
"3333",
"--url-prefix",
"flower-monitoring",
]
)
celery_command.flower(args)
mock_celery_app.start.assert_called_once_with(
[
"flower",
conf.get("celery", "BROKER_URL"),
"--address=my-hostname",
"--port=3333",
"--broker-api=http://username:password@rabbitmq-server-name:15672/api/",
"--url-prefix=flower-monitoring",
"--basic-auth=admin:admin",
"--conf=flower_config",
]
)
def _test_run_command_daemon(self, mock_celery_app, mock_daemon, mock_setup_locations, mock_pid_file):
mock_setup_locations.return_value = (
mock.MagicMock(name="pidfile"),
mock.MagicMock(name="stdout"),
mock.MagicMock(name="stderr"),
mock.MagicMock(name="INVALID"),
)
args = self.parser.parse_args(
[
"celery",
"flower",
"--basic-auth",
"admin:admin",
"--broker-api",
"http://username:password@rabbitmq-server-name:15672/api/",
"--flower-conf",
"flower_config",
"--hostname",
"my-hostname",
"--log-file",
"/tmp/flower.log",
"--pid",
"/tmp/flower.pid",
"--port",
"3333",
"--stderr",
"/tmp/flower-stderr.log",
"--stdout",
"/tmp/flower-stdout.log",
"--url-prefix",
"flower-monitoring",
"--daemon",
]
)
mock_open = mock.mock_open()
with mock.patch("airflow.cli.commands.daemon_utils.open", mock_open):
celery_command.flower(args)
mock_celery_app.start.assert_called_once_with(
[
"flower",
conf.get("celery", "BROKER_URL"),
"--address=my-hostname",
"--port=3333",
"--broker-api=http://username:password@rabbitmq-server-name:15672/api/",
"--url-prefix=flower-monitoring",
"--basic-auth=admin:admin",
"--conf=flower_config",
]
)
assert mock_daemon.mock_calls[:3] == [
mock.call.DaemonContext(
pidfile=mock_pid_file.return_value,
files_preserve=None,
stdout=mock_open.return_value,
stderr=mock_open.return_value,
umask=0o077,
),
mock.call.DaemonContext().__enter__(),
mock.call.DaemonContext().__exit__(None, None, None),
]
assert mock_setup_locations.mock_calls == [
mock.call(
process="flower",
pid="/tmp/flower.pid",
stdout="/tmp/flower-stdout.log",
stderr="/tmp/flower-stderr.log",
log="/tmp/flower.log",
)
]
mock_pid_file.assert_has_calls([mock.call(mock_setup_locations.return_value[0], -1)])
if PY313:
assert mock_open.mock_calls == [
mock.call(mock_setup_locations.return_value[1], "a"),
mock.call().__enter__(),
mock.call(mock_setup_locations.return_value[2], "a"),
mock.call().__enter__(),
mock.call().truncate(0),
mock.call().truncate(0),
mock.call().__exit__(None, None, None),
mock.call().close(),
mock.call().__exit__(None, None, None),
mock.call().close(),
]
else:
assert mock_open.mock_calls == [
mock.call(mock_setup_locations.return_value[1], "a"),
mock.call().__enter__(),
mock.call(mock_setup_locations.return_value[2], "a"),
mock.call().__enter__(),
mock.call().truncate(0),
mock.call().truncate(0),
mock.call().__exit__(None, None, None),
mock.call().__exit__(None, None, None),
]
@pytest.mark.skipif(AIRFLOW_V_3_0_PLUS, reason="Test requires Airflow 3.0-")
@mock.patch("airflow.cli.commands.daemon_utils.TimeoutPIDLockFile")
@mock.patch("airflow.cli.commands.daemon_utils.setup_locations")
@mock.patch("airflow.cli.commands.daemon_utils.daemon")
@mock.patch("airflow.providers.celery.executors.celery_executor.app")
def test_run_command_daemon_v_3_below(
self, mock_celery_app, mock_daemon, mock_setup_locations, mock_pid_file
):
self._test_run_command_daemon(mock_celery_app, mock_daemon, mock_setup_locations, mock_pid_file)
@pytest.mark.skipif(not AIRFLOW_V_3_0_PLUS, reason="Test requires Airflow 3.0+")
@mock.patch("airflow.cli.commands.daemon_utils.TimeoutPIDLockFile")
@mock.patch("airflow.cli.commands.daemon_utils.setup_locations")
@mock.patch("airflow.cli.commands.daemon_utils.daemon")
@mock.patch("airflow.providers.celery.executors.celery_executor.app")
def test_run_command_daemon_v3_above(
self, mock_celery_app, mock_daemon, mock_setup_locations, mock_pid_file
):
self._test_run_command_daemon(mock_celery_app, mock_daemon, mock_setup_locations, mock_pid_file)
|
TestFlowerCommand
|
python
|
django__django
|
tests/utils_tests/test_autoreload.py
|
{
"start": 17116,
"end": 18948
}
|
class ____(SimpleTestCase):
@mock.patch("django.utils.autoreload._exception", None)
def test_no_exception(self):
# Should raise no exception if _exception is None
autoreload.raise_last_exception()
def test_raises_exception(self):
class MyException(Exception):
pass
# Create an exception
try:
raise MyException("Test Message")
except MyException:
exc_info = sys.exc_info()
with mock.patch("django.utils.autoreload._exception", exc_info):
with self.assertRaisesMessage(MyException, "Test Message"):
autoreload.raise_last_exception()
def test_raises_custom_exception(self):
class MyException(Exception):
def __init__(self, msg, extra_context):
super().__init__(msg)
self.extra_context = extra_context
# Create an exception.
try:
raise MyException("Test Message", "extra context")
except MyException:
exc_info = sys.exc_info()
with mock.patch("django.utils.autoreload._exception", exc_info):
with self.assertRaisesMessage(MyException, "Test Message"):
autoreload.raise_last_exception()
def test_raises_exception_with_context(self):
try:
raise Exception(2)
except Exception as e:
try:
raise Exception(1) from e
except Exception:
exc_info = sys.exc_info()
with mock.patch("django.utils.autoreload._exception", exc_info):
with self.assertRaises(Exception) as cm:
autoreload.raise_last_exception()
self.assertEqual(cm.exception.args[0], 1)
self.assertEqual(cm.exception.__cause__.args[0], 2)
|
TestRaiseLastException
|
python
|
celery__celery
|
t/unit/utils/test_collections.py
|
{
"start": 9845,
"end": 11755
}
|
class ____:
def assert_size_and_first(self, buf, size, expected_first_item):
assert len(buf) == size
assert buf.take() == expected_first_item
def test_append_limited(self):
b = Messagebuffer(10)
for i in range(20):
b.put(i)
self.assert_size_and_first(b, 10, 10)
def test_append_unlimited(self):
b = Messagebuffer(None)
for i in range(20):
b.put(i)
self.assert_size_and_first(b, 20, 0)
def test_extend_limited(self):
b = Messagebuffer(10)
b.extend(list(range(20)))
self.assert_size_and_first(b, 10, 10)
def test_extend_unlimited(self):
b = Messagebuffer(None)
b.extend(list(range(20)))
self.assert_size_and_first(b, 20, 0)
def test_extend_eviction_time_limited(self):
b = Messagebuffer(3000)
b.extend(range(10000))
assert len(b) > 3000
b.evict()
assert len(b) == 3000
def test_pop_empty_with_default(self):
b = Messagebuffer(10)
sentinel = object()
assert b.take(sentinel) is sentinel
def test_pop_empty_no_default(self):
b = Messagebuffer(10)
with pytest.raises(b.Empty):
b.take()
def test_repr(self):
assert repr(Messagebuffer(10, [1, 2, 3]))
def test_iter(self):
b = Messagebuffer(10, list(range(10)))
assert len(b) == 10
for i, item in enumerate(b):
assert item == i
assert len(b) == 0
def test_contains(self):
b = Messagebuffer(10, list(range(10)))
assert 5 in b
def test_reversed(self):
assert (list(reversed(Messagebuffer(10, list(range(10))))) ==
list(reversed(range(10))))
def test_getitem(self):
b = Messagebuffer(10, list(range(10)))
for i in range(10):
assert b[i] == i
|
test_Messagebuffer
|
python
|
numba__numba
|
numba/tests/test_dyn_array.py
|
{
"start": 1642,
"end": 15631
}
|
class ____(NrtRefCtTest, TestCase):
def test_empty_0d(self):
@nrtjit
def foo():
arr = np.empty(())
arr[()] = 42
return arr
arr = foo()
self.assert_array_nrt_refct(arr, 1)
np.testing.assert_equal(42, arr)
self.assertEqual(arr.size, 1)
self.assertEqual(arr.shape, ())
self.assertEqual(arr.dtype, np.dtype(np.float64))
self.assertEqual(arr.strides, ())
arr.fill(123) # test writability
np.testing.assert_equal(123, arr)
del arr
def test_empty_1d(self):
@nrtjit
def foo(n):
arr = np.empty(n)
for i in range(n):
arr[i] = i
return arr
n = 3
arr = foo(n)
self.assert_array_nrt_refct(arr, 1)
np.testing.assert_equal(np.arange(n), arr)
self.assertEqual(arr.size, n)
self.assertEqual(arr.shape, (n,))
self.assertEqual(arr.dtype, np.dtype(np.float64))
self.assertEqual(arr.strides, (np.dtype(np.float64).itemsize,))
arr.fill(123) # test writability
np.testing.assert_equal(123, arr)
del arr
def test_empty_2d(self):
def pyfunc(m, n):
arr = np.empty((m, n), np.int32)
for i in range(m):
for j in range(n):
arr[i, j] = i + j
return arr
cfunc = nrtjit(pyfunc)
m = 4
n = 3
expected_arr = pyfunc(m, n)
got_arr = cfunc(m, n)
self.assert_array_nrt_refct(got_arr, 1)
np.testing.assert_equal(expected_arr, got_arr)
self.assertEqual(expected_arr.size, got_arr.size)
self.assertEqual(expected_arr.shape, got_arr.shape)
self.assertEqual(expected_arr.strides, got_arr.strides)
del got_arr
def test_empty_3d(self):
def pyfunc(m, n, p):
arr = np.empty((m, n, p), np.int32)
for i in range(m):
for j in range(n):
for k in range(p):
arr[i, j, k] = i + j + k
return arr
cfunc = nrtjit(pyfunc)
m = 4
n = 3
p = 2
expected_arr = pyfunc(m, n, p)
got_arr = cfunc(m, n, p)
self.assert_array_nrt_refct(got_arr, 1)
np.testing.assert_equal(expected_arr, got_arr)
self.assertEqual(expected_arr.size, got_arr.size)
self.assertEqual(expected_arr.shape, got_arr.shape)
self.assertEqual(expected_arr.strides, got_arr.strides)
del got_arr
def test_empty_2d_sliced(self):
def pyfunc(m, n, p):
arr = np.empty((m, n), np.int32)
for i in range(m):
for j in range(n):
arr[i, j] = i + j
return arr[p]
cfunc = nrtjit(pyfunc)
m = 4
n = 3
p = 2
expected_arr = pyfunc(m, n, p)
got_arr = cfunc(m, n, p)
self.assert_array_nrt_refct(got_arr, 1)
np.testing.assert_equal(expected_arr, got_arr)
self.assertEqual(expected_arr.size, got_arr.size)
self.assertEqual(expected_arr.shape, got_arr.shape)
self.assertEqual(expected_arr.strides, got_arr.strides)
del got_arr
def test_return_global_array(self):
y = np.ones(4, dtype=np.float32)
initrefct = sys.getrefcount(y)
def return_external_array():
return y
cfunc = nrtjit(return_external_array)
out = cfunc()
# out reference by cfunc
self.assertEqual(initrefct + 1, sys.getrefcount(y))
np.testing.assert_equal(y, out)
np.testing.assert_equal(y, np.ones(4, dtype=np.float32))
np.testing.assert_equal(out, np.ones(4, dtype=np.float32))
del out
gc.collect()
# out is only referenced by cfunc
self.assertEqual(initrefct + 1, sys.getrefcount(y))
del cfunc
gc.collect()
# y is no longer referenced by cfunc
self.assertEqual(initrefct, sys.getrefcount(y))
def test_return_global_array_sliced(self):
y = np.ones(4, dtype=np.float32)
def return_external_array():
return y[2:]
cfunc = nrtjit(return_external_array)
out = cfunc()
self.assertIsNone(out.base)
yy = y[2:]
np.testing.assert_equal(yy, out)
np.testing.assert_equal(yy, np.ones(2, dtype=np.float32))
np.testing.assert_equal(out, np.ones(2, dtype=np.float32))
def test_array_pass_through(self):
def pyfunc(y):
return y
arr = np.ones(4, dtype=np.float32)
cfunc = nrtjit(pyfunc)
expected = cfunc(arr)
got = pyfunc(arr)
np.testing.assert_equal(expected, arr)
np.testing.assert_equal(expected, got)
self.assertIs(expected, arr)
self.assertIs(expected, got)
def test_array_pass_through_sliced(self):
def pyfunc(y):
return y[y.size // 2:]
arr = np.ones(4, dtype=np.float32)
initrefct = sys.getrefcount(arr)
cfunc = nrtjit(pyfunc)
got = cfunc(arr)
self.assertEqual(initrefct + 1, sys.getrefcount(arr))
expected = pyfunc(arr)
self.assertEqual(initrefct + 2, sys.getrefcount(arr))
np.testing.assert_equal(expected, arr[arr.size // 2])
np.testing.assert_equal(expected, got)
del expected
self.assertEqual(initrefct + 1, sys.getrefcount(arr))
del got
self.assertEqual(initrefct, sys.getrefcount(arr))
def test_ufunc_with_allocated_output(self):
def pyfunc(a, b):
out = np.empty(a.shape)
np.add(a, b, out)
return out
cfunc = nrtjit(pyfunc)
# 1D case
arr_a = np.random.random(10)
arr_b = np.random.random(10)
np.testing.assert_equal(pyfunc(arr_a, arr_b),
cfunc(arr_a, arr_b))
self.assert_array_nrt_refct(cfunc(arr_a, arr_b), 1)
# 2D case
arr_a = np.random.random(10).reshape(2, 5)
arr_b = np.random.random(10).reshape(2, 5)
np.testing.assert_equal(pyfunc(arr_a, arr_b),
cfunc(arr_a, arr_b))
self.assert_array_nrt_refct(cfunc(arr_a, arr_b), 1)
# 3D case
arr_a = np.random.random(70).reshape(2, 5, 7)
arr_b = np.random.random(70).reshape(2, 5, 7)
np.testing.assert_equal(pyfunc(arr_a, arr_b),
cfunc(arr_a, arr_b))
self.assert_array_nrt_refct(cfunc(arr_a, arr_b), 1)
def test_allocation_mt(self):
"""
This test exercises the array allocation in multithreaded usecase.
This stress the freelist inside NRT.
"""
def pyfunc(inp):
out = np.empty(inp.size)
# Zero fill
for i in range(out.size):
out[i] = 0
for i in range(inp[0]):
# Allocate inside a loop
tmp = np.empty(inp.size)
# Write to tmp
for j in range(tmp.size):
tmp[j] = inp[j]
# out = tmp + i
for j in range(tmp.size):
out[j] += tmp[j] + i
return out
cfunc = nrtjit(pyfunc)
size = 10 # small array size so that the computation is short
arr = np.random.randint(1, 10, size)
frozen_arr = arr.copy()
np.testing.assert_equal(pyfunc(arr), cfunc(arr))
# Ensure we did not modify the input
np.testing.assert_equal(frozen_arr, arr)
workers = []
inputs = []
outputs = []
# Make wrapper to store the output
def wrapped(inp, out):
out[:] = cfunc(inp)
# Create a lot of worker threads to create contention
for i in range(100):
arr = np.random.randint(1, 10, size)
out = np.empty_like(arr)
thread = threading.Thread(target=wrapped,
args=(arr, out),
name="worker{0}".format(i))
workers.append(thread)
inputs.append(arr)
outputs.append(out)
# Launch worker threads
for thread in workers:
thread.start()
# Join worker threads
for thread in workers:
thread.join()
# Check result
for inp, out in zip(inputs, outputs):
np.testing.assert_equal(pyfunc(inp), out)
def test_refct_mt(self):
"""
This test exercises the refct in multithreaded code
"""
def pyfunc(n, inp):
out = np.empty(inp.size)
for i in range(out.size):
out[i] = inp[i] + 1
# Use swap to trigger many refct ops
for i in range(n):
out, inp = inp, out
return out
cfunc = nrtjit(pyfunc)
size = 10
input = np.arange(size, dtype=float)
expected_refct = sys.getrefcount(input)
swapct = random.randrange(1000)
expected = pyfunc(swapct, input)
np.testing.assert_equal(expected, cfunc(swapct, input))
# The following checks can discover a reference count error
del expected
self.assertEqual(expected_refct, sys.getrefcount(input))
workers = []
outputs = []
swapcts = []
# Make wrapper to store the output
def wrapped(n, input, out):
out[:] = cfunc(n, input)
# Create worker threads
for i in range(100):
out = np.empty(size)
# All thread shares the same input
swapct = random.randrange(1000)
thread = threading.Thread(target=wrapped,
args=(swapct, input, out),
name="worker{0}".format(i))
workers.append(thread)
outputs.append(out)
swapcts.append(swapct)
# Launch worker threads
for thread in workers:
thread.start()
# Join worker threads
for thread in workers:
thread.join()
# Check result
for swapct, out in zip(swapcts, outputs):
np.testing.assert_equal(pyfunc(swapct, input), out)
del outputs, workers
# The following checks can discover a reference count error
self.assertEqual(expected_refct, sys.getrefcount(input))
@skip_if_32bit
def test_invalid_size_array(self):
@njit
def foo(x):
np.empty(x)
# Exceptions leak references
self.disable_leak_check()
with self.assertRaises(MemoryError) as raises:
foo(types.size_t.maxval // 8 // 2)
self.assertIn("Allocation failed", str(raises.exception))
def test_swap(self):
def pyfunc(x, y, t):
"""Swap array x and y for t number of times
"""
for i in range(t):
x, y = y, x
return x, y
cfunc = nrtjit(pyfunc)
x = np.random.random(100)
y = np.random.random(100)
t = 100
initrefct = sys.getrefcount(x), sys.getrefcount(y)
expect, got = pyfunc(x, y, t), cfunc(x, y, t)
self.assertIsNone(got[0].base)
self.assertIsNone(got[1].base)
np.testing.assert_equal(expect, got)
del expect, got
self.assertEqual(initrefct, (sys.getrefcount(x), sys.getrefcount(y)))
def test_return_tuple_of_array(self):
def pyfunc(x):
y = np.empty(x.size)
for i in range(y.size):
y[i] = x[i] + 1
return x, y
cfunc = nrtjit(pyfunc)
x = np.random.random(5)
initrefct = sys.getrefcount(x)
expected_x, expected_y = pyfunc(x)
got_x, got_y = cfunc(x)
self.assertIs(x, expected_x)
self.assertIs(x, got_x)
np.testing.assert_equal(expected_x, got_x)
np.testing.assert_equal(expected_y, got_y)
del expected_x, got_x
self.assertEqual(initrefct, sys.getrefcount(x))
self.assertEqual(sys.getrefcount(expected_y), sys.getrefcount(got_y))
def test_return_tuple_of_array_created(self):
def pyfunc(x):
y = np.empty(x.size)
for i in range(y.size):
y[i] = x[i] + 1
out = y, y
return out
cfunc = nrtjit(pyfunc)
x = np.random.random(5)
expected_x, expected_y = pyfunc(x)
got_x, got_y = cfunc(x)
np.testing.assert_equal(expected_x, got_x)
np.testing.assert_equal(expected_y, got_y)
if PYVERSION in ((3, 14), ):
expected_refcount = 1
elif PYVERSION in ((3, 10), (3, 11), (3, 12), (3, 13)):
expected_refcount = 2
else:
raise NotImplementedError(PYVERSION)
self.assertEqual(expected_refcount, sys.getrefcount(got_y))
self.assertEqual(expected_refcount, sys.getrefcount(got_x))
def test_issue_with_return_leak(self):
"""
Dispatcher returns a new reference.
It need to workaround it for now.
"""
@nrtjit
def inner(out):
return out
def pyfunc(x):
return inner(x)
cfunc = nrtjit(pyfunc)
arr = np.arange(10)
old_refct = sys.getrefcount(arr)
if PYVERSION in ((3, 14), ):
self.assertEqual(2, sys.getrefcount(pyfunc(arr)))
self.assertEqual(2, sys.getrefcount(cfunc(arr)))
elif PYVERSION in ((3, 10), (3, 11), (3, 12), (3, 13)):
self.assertEqual(old_refct, sys.getrefcount(pyfunc(arr)))
self.assertEqual(old_refct, sys.getrefcount(cfunc(arr)))
else:
raise NotImplementedError(PYVERSION)
self.assertEqual(old_refct, sys.getrefcount(arr))
|
TestDynArray
|
python
|
langchain-ai__langchain
|
libs/partners/anthropic/tests/unit_tests/test_chat_models.py
|
{
"start": 40276,
"end": 44066
}
|
class ____(BaseModel):
"""Get the current weather in a given location."""
location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
def test_anthropic_bind_tools_tool_choice() -> None:
chat_model = ChatAnthropic( # type: ignore[call-arg, call-arg]
model=MODEL_NAME,
anthropic_api_key="secret-api-key",
)
chat_model_with_tools = chat_model.bind_tools(
[GetWeather],
tool_choice={"type": "tool", "name": "GetWeather"},
)
assert cast("RunnableBinding", chat_model_with_tools).kwargs["tool_choice"] == {
"type": "tool",
"name": "GetWeather",
}
chat_model_with_tools = chat_model.bind_tools(
[GetWeather],
tool_choice="GetWeather",
)
assert cast("RunnableBinding", chat_model_with_tools).kwargs["tool_choice"] == {
"type": "tool",
"name": "GetWeather",
}
chat_model_with_tools = chat_model.bind_tools([GetWeather], tool_choice="auto")
assert cast("RunnableBinding", chat_model_with_tools).kwargs["tool_choice"] == {
"type": "auto",
}
chat_model_with_tools = chat_model.bind_tools([GetWeather], tool_choice="any")
assert cast("RunnableBinding", chat_model_with_tools).kwargs["tool_choice"] == {
"type": "any",
}
def test_optional_description() -> None:
llm = ChatAnthropic(model=MODEL_NAME)
class SampleModel(BaseModel):
sample_field: str
_ = llm.with_structured_output(SampleModel.model_json_schema())
def test_get_num_tokens_from_messages_passes_kwargs() -> None:
"""Test that get_num_tokens_from_messages passes kwargs to the model."""
llm = ChatAnthropic(model=MODEL_NAME)
with patch.object(anthropic, "Client") as _client:
llm.get_num_tokens_from_messages([HumanMessage("foo")], foo="bar")
assert _client.return_value.messages.count_tokens.call_args.kwargs["foo"] == "bar"
llm = ChatAnthropic(
model=MODEL_NAME,
betas=["context-management-2025-06-27"],
context_management={"edits": [{"type": "clear_tool_uses_20250919"}]},
)
with patch.object(anthropic, "Client") as _client:
llm.get_num_tokens_from_messages([HumanMessage("foo")])
call_args = _client.return_value.beta.messages.count_tokens.call_args.kwargs
assert call_args["betas"] == ["context-management-2025-06-27"]
assert call_args["context_management"] == {
"edits": [{"type": "clear_tool_uses_20250919"}]
}
def test_usage_metadata_standardization() -> None:
class UsageModel(BaseModel):
input_tokens: int = 10
output_tokens: int = 5
cache_read_input_tokens: int = 3
cache_creation_input_tokens: int = 2
# Happy path
usage = UsageModel()
result = _create_usage_metadata(usage)
assert result["input_tokens"] == 15 # 10 + 3 + 2
assert result["output_tokens"] == 5
assert result["total_tokens"] == 20
assert result.get("input_token_details") == {"cache_read": 3, "cache_creation": 2}
# Null input and output tokens
class UsageModelNulls(BaseModel):
input_tokens: int | None = None
output_tokens: int | None = None
cache_read_input_tokens: int | None = None
cache_creation_input_tokens: int | None = None
usage_nulls = UsageModelNulls()
result = _create_usage_metadata(usage_nulls)
assert result["input_tokens"] == 0
assert result["output_tokens"] == 0
assert result["total_tokens"] == 0
# Test missing fields
class UsageModelMissing(BaseModel):
pass
usage_missing = UsageModelMissing()
result = _create_usage_metadata(usage_missing)
assert result["input_tokens"] == 0
assert result["output_tokens"] == 0
assert result["total_tokens"] == 0
|
GetWeather
|
python
|
streamlit__streamlit
|
lib/streamlit/testing/v1/app_test.py
|
{
"start": 2600,
"end": 38351
}
|
class ____:
"""
A simulated Streamlit app to check the correctness of displayed\
elements and outputs.
An instance of ``AppTest`` simulates a running Streamlit app. This class
provides methods to set up, manipulate, and inspect the app contents via
API instead of a browser UI. It can be used to write automated tests of an
app in various scenarios. These can then be run using a tool like pytest.
``AppTest`` can be initialized by one of three class methods:
* |st.testing.v1.AppTest.from_file|_ (recommended)
* |st.testing.v1.AppTest.from_string|_
* |st.testing.v1.AppTest.from_function|_
Once initialized, Session State and widget values can be updated and the
script can be run. Unlike an actual live-running Streamlit app, you need to
call ``AppTest.run()`` explicitly to re-run the app after changing a widget
value. Switching pages also requires an explicit, follow-up call to
``AppTest.run()``.
``AppTest`` enables developers to build tests on their app as-is, in the
familiar python test format, without major refactoring or abstracting out
logic to be tested separately from the UI. Tests can run quickly with very
low overhead. A typical pattern is to build a suite of tests for an app
that ensure consistent functionality as the app evolves, and run the tests
locally and/or in a CI environment like Github Actions.
.. note::
``AppTest`` only supports testing a single page of an app per
instance. For multipage apps, each page will need to be tested
separately. ``AppTest`` is not yet compatible with multipage apps
using ``st.navigation`` and ``st.Page``.
.. |st.testing.v1.AppTest.from_file| replace:: ``st.testing.v1.AppTest.from_file``
.. _st.testing.v1.AppTest.from_file: #apptestfrom_file
.. |st.testing.v1.AppTest.from_string| replace:: ``st.testing.v1.AppTest.from_string``
.. _st.testing.v1.AppTest.from_string: #apptestfrom_string
.. |st.testing.v1.AppTest.from_function| replace:: ``st.testing.v1.AppTest.from_function``
.. _st.testing.v1.AppTest.from_function: #apptestfrom_function
Attributes
----------
secrets: dict[str, Any]
Dictionary of secrets to be used the simulated app. Use dict-like
syntax to set secret values for the simulated app.
session_state: SafeSessionState
Session State for the simulated app. SafeSessionState object supports
read and write operations as usual for Streamlit apps.
query_params: dict[str, Any]
Dictionary of query parameters to be used by the simluated app. Use
dict-like syntax to set ``query_params`` values for the simulated app.
"""
def __init__(
self,
script_path: str | Path,
*,
default_timeout: float,
args: tuple[Any, ...] | None = None,
kwargs: dict[str, Any] | None = None,
) -> None:
self._script_path = str(script_path)
self.default_timeout = default_timeout
session_state = SessionState()
session_state[TESTING_KEY] = {}
self.session_state = SafeSessionState(session_state, lambda: None)
self.query_params: dict[str, Any] = {}
self.secrets: dict[str, Any] = {}
self.args = args
self.kwargs = kwargs
self._page_hash = ""
tree = ElementTree()
tree._runner = self
self._tree = tree
@classmethod
def from_string(cls, script: str, *, default_timeout: float = 3) -> AppTest:
"""
Create an instance of ``AppTest`` to simulate an app page defined\
within a string.
This is useful for testing short scripts that fit comfortably as an
inline string in the test itself, without having to create a separate
file for it. The script must be executable on its own and so must
contain all necessary imports.
Parameters
----------
script: str
The string contents of the script to be run.
default_timeout: float
Default time in seconds before a script run is timed out. Can be
overridden for individual ``.run()`` calls.
Returns
-------
AppTest
A simulated Streamlit app for testing. The simulated app can be
executed via ``.run()``.
"""
return cls._from_string(script, default_timeout=default_timeout)
@classmethod
def _from_string(
cls,
script: str,
*,
default_timeout: float = 3,
args: tuple[Any, ...] | None = None,
kwargs: dict[str, Any] | None = None,
) -> AppTest:
script_name = calc_md5(bytes(script, "utf-8"))
path = Path(TMP_DIR.name, script_name)
aligned_script = textwrap.dedent(script)
path.write_text(aligned_script)
return AppTest(
str(path), default_timeout=default_timeout, args=args, kwargs=kwargs
)
@classmethod
def from_function(
cls,
script: Callable[..., Any],
*,
default_timeout: float = 3,
args: tuple[Any, ...] | None = None,
kwargs: dict[str, Any] | None = None,
) -> AppTest:
"""
Create an instance of ``AppTest`` to simulate an app page defined\
within a function.
This is similar to ``AppTest.from_string()``, but more convenient to
write with IDE assistance. The script must be executable on its own and
so must contain all necessary imports.
Parameters
----------
script: Callable
A function whose body will be used as a script. Must be runnable
in isolation, so it must include any necessary imports.
default_timeout: float
Default time in seconds before a script run is timed out. Can be
overridden for individual ``.run()`` calls.
args: tuple
An optional tuple of args to pass to the script function.
kwargs: dict
An optional dict of kwargs to pass to the script function.
Returns
-------
AppTest
A simulated Streamlit app for testing. The simulated app can be
executed via ``.run()``.
"""
source_lines, _ = inspect.getsourcelines(script)
source = textwrap.dedent("".join(source_lines))
module = (
source
+ f"\n{script.__name__ if hasattr(script, '__name__') else 'script'}(*__args, **__kwargs)"
)
return cls._from_string(
module, default_timeout=default_timeout, args=args, kwargs=kwargs
)
@classmethod
def from_file(
cls, script_path: str | Path, *, default_timeout: float = 3
) -> AppTest:
"""
Create an instance of ``AppTest`` to simulate an app page defined\
within a file.
This option is most convenient for CI workflows and testing of
published apps. The script must be executable on its own and so must
contain all necessary imports.
Parameters
----------
script_path: str | Path
Path to a script file. The path should be absolute or relative to
the file calling ``.from_file``.
default_timeout: float
Default time in seconds before a script run is timed out. Can be
overridden for individual ``.run()`` calls.
Returns
-------
AppTest
A simulated Streamlit app for testing. The simulated app can be
executed via ``.run()``.
"""
script_path = Path(script_path)
if script_path.is_file():
path = script_path
else:
# TODO: Make this not super fragile
# Attempt to find the test file calling this method, so the
# path can be relative to there.
stack = traceback.StackSummary.extract(traceback.walk_stack(None))
filepath = Path(stack[1].filename)
path = filepath.parent / script_path
return AppTest(path, default_timeout=default_timeout)
def _run(
self,
widget_state: WidgetStates | None = None,
timeout: float | None = None,
) -> AppTest:
"""Run the script, and parse the output messages for querying
and interaction.
Timeout is in seconds, or None to use the default timeout of the runner.
"""
# Have to import the streamlit module itself so replacing st.secrets
# is visible to other modules.
import streamlit as st
if timeout is None:
timeout = self.default_timeout
# setup
mock_runtime = MagicMock(spec=Runtime)
mock_runtime.media_file_mgr = MediaFileManager(
MemoryMediaFileStorage("/mock/media")
)
mock_runtime.cache_storage_manager = MemoryCacheStorageManager()
Runtime._instance = mock_runtime
script_cache = ScriptCache()
pages_manager = PagesManager(
self._script_path, script_cache, setup_watcher=False
)
saved_secrets: Secrets = st.secrets
# Only modify global secrets stuff if we have been given secrets
if self.secrets:
new_secrets = Secrets()
new_secrets._secrets = self.secrets
st.secrets = new_secrets
script_runner = LocalScriptRunner(
self._script_path,
self.session_state,
pages_manager,
args=self.args,
kwargs=self.kwargs,
)
with patch_config_options({"global.appTest": True}):
self._tree = script_runner.run(
widget_state, self.query_params, timeout, self._page_hash
)
self._tree._runner = self
# Last event is SHUTDOWN, so the corresponding data includes query string
query_string = script_runner.event_data[-1]["client_state"].query_string
self.query_params = parse.parse_qs(query_string)
if self.secrets:
if st.secrets._secrets is not None:
self.secrets = dict(st.secrets._secrets)
st.secrets = saved_secrets
Runtime._instance = None
return self
def run(self, *, timeout: float | None = None) -> AppTest:
"""Run the script from the current state.
This is equivalent to manually rerunning the app or the rerun that
occurs upon user interaction. ``AppTest.run()`` must be manually called
after updating a widget value or switching pages as script reruns do
not occur automatically as they do for live-running Streamlit apps.
Parameters
----------
timeout : float or None
The maximum number of seconds to run the script. If ``timeout`` is
``None`` (default), Streamlit uses the default timeout set for the
instance of ``AppTest``.
Returns
-------
AppTest
self
"""
return self._tree.run(timeout=timeout)
def switch_page(self, page_path: str) -> AppTest:
"""Switch to another page of the app.
This method does not automatically rerun the app. Use a follow-up call
to ``AppTest.run()`` to obtain the elements on the selected page.
Parameters
----------
page_path: str
Path of the page to switch to. The path must be relative to the
main script's location (e.g. ``"pages/my_page.py"``).
Returns
-------
AppTest
self
"""
main_dir = Path(self._script_path).parent
full_page_path = main_dir / page_path
if not full_page_path.is_file():
raise ValueError(
f"Unable to find script at {page_path}, make sure the page given is relative to the main script."
)
page_path_str = str(full_page_path.resolve())
_, page_name = page_icon_and_name(Path(page_path_str))
self._page_hash = calc_md5(page_name)
return self
@property
def main(self) -> Block:
"""Sequence of elements within the main body of the app.
Returns
-------
Block
A container of elements. Block can be queried for elements in the
same manner as ``AppTest``. For example, ``Block.checkbox`` will
return all ``st.checkbox`` within the associated container.
"""
return self._tree.main
@property
def sidebar(self) -> Block:
"""Sequence of all elements within ``st.sidebar``.
Returns
-------
Block
A container of elements. Block can be queried for elements in the
same manner as ``AppTest``. For example, ``Block.checkbox`` will
return all ``st.checkbox`` within the associated container.
"""
return self._tree.sidebar
@property
def button(self) -> WidgetList[Button]:
"""Sequence of all ``st.button`` and ``st.form_submit_button`` widgets.
Returns
-------
WidgetList of Button
Sequence of all ``st.button`` and ``st.form_submit_button``
widgets. Individual widgets can be accessed from a WidgetList by
index (order on the page) or key. For example, ``at.button[0]`` for
the first widget or ``at.button(key="my_key")`` for a widget with a
given key.
"""
return self._tree.button
@property
def button_group(self) -> WidgetList[ButtonGroup[Any]]:
"""Sequence of all ``st.feedback`` widgets.
Returns
-------
WidgetList of ButtonGroup
Sequence of all ``st.feedback`` widgets. Individual widgets can be
accessed from a WidgetList by index (order on the page) or key. For
example, ``at.button_group[0]`` for the first widget or
``at.button_group(key="my_key")`` for a widget with a given key.
"""
return self._tree.button_group
@property
def caption(self) -> ElementList[Caption]:
"""Sequence of all ``st.caption`` elements.
Returns
-------
ElementList of Caption
Sequence of all ``st.caption`` elements. Individual elements can be
accessed from an ElementList by index (order on the page). For
example, ``at.caption[0]`` for the first element. Caption is an
extension of the Element class.
"""
return self._tree.caption
@property
def chat_input(self) -> WidgetList[ChatInput]:
"""Sequence of all ``st.chat_input`` widgets.
Returns
-------
WidgetList of ChatInput
Sequence of all ``st.chat_input`` widgets. Individual widgets can
be accessed from a WidgetList by index (order on the page) or key.
For example, ``at.chat_input[0]`` for the first widget or
``at.chat_input(key="my_key")`` for a widget with a given key.
"""
return self._tree.chat_input
@property
def chat_message(self) -> Sequence[ChatMessage]:
"""Sequence of all ``st.chat_message`` elements.
Returns
-------
Sequence of ChatMessage
Sequence of all ``st.chat_message`` elements. Individual elements can be
accessed from an ElementList by index (order on the page). For
example, ``at.chat_message[0]`` for the first element. ChatMessage
is an extension of the Block class.
"""
return self._tree.chat_message
@property
def checkbox(self) -> WidgetList[Checkbox]:
"""Sequence of all ``st.checkbox`` widgets.
Returns
-------
WidgetList of Checkbox
Sequence of all ``st.checkbox`` widgets. Individual widgets can
be accessed from a WidgetList by index (order on the page) or key.
For example, ``at.checkbox[0]`` for the first widget or
``at.checkbox(key="my_key")`` for a widget with a given key.
"""
return self._tree.checkbox
@property
def code(self) -> ElementList[Code]:
"""Sequence of all ``st.code`` elements.
Returns
-------
ElementList of Code
Sequence of all ``st.code`` elements. Individual elements can be
accessed from an ElementList by index (order on the page). For
example, ``at.code[0]`` for the first element. Code is an
extension of the Element class.
"""
return self._tree.code
@property
def color_picker(self) -> WidgetList[ColorPicker]:
"""Sequence of all ``st.color_picker`` widgets.
Returns
-------
WidgetList of ColorPicker
Sequence of all ``st.color_picker`` widgets. Individual widgets can
be accessed from a WidgetList by index (order on the page) or key.
For example, ``at.color_picker[0]`` for the first widget or
``at.color_picker(key="my_key")`` for a widget with a given key.
"""
return self._tree.color_picker
@property
def columns(self) -> Sequence[Column]:
"""Sequence of all columns within ``st.columns`` elements.
Each column within a single ``st.columns`` will be returned as a
separate Column in the Sequence.
Returns
-------
Sequence of Column
Sequence of all columns within ``st.columns`` elements. Individual
columns can be accessed from an ElementList by index (order on the
page). For example, ``at.columns[0]`` for the first column. Column
is an extension of the Block class.
"""
return self._tree.columns
@property
def dataframe(self) -> ElementList[Dataframe]:
"""Sequence of all ``st.dataframe`` elements.
Returns
-------
ElementList of Dataframe
Sequence of all ``st.dataframe`` elements. Individual elements can be
accessed from an ElementList by index (order on the page). For
example, ``at.dataframe[0]`` for the first element. Dataframe is an
extension of the Element class.
"""
return self._tree.dataframe
@property
def date_input(self) -> WidgetList[DateInput]:
"""Sequence of all ``st.date_input`` widgets.
Returns
-------
WidgetList of DateInput
Sequence of all ``st.date_input`` widgets. Individual widgets can
be accessed from a WidgetList by index (order on the page) or key.
For example, ``at.date_input[0]`` for the first widget or
``at.date_input(key="my_key")`` for a widget with a given key.
"""
return self._tree.date_input
@property
def datetime_input(self) -> WidgetList[DateTimeInput]:
"""Sequence of all ``st.datetime_input`` widgets.
Returns
-------
WidgetList of DateTimeInput
Sequence of all ``st.datetime_input`` widgets. Individual widgets can
be accessed from a WidgetList by index (order on the page) or key.
For example, ``at.datetime_input[0]`` for the first widget or
``at.datetime_input(key="my_key")`` for a widget with a given key.
"""
return self._tree.datetime_input
@property
def divider(self) -> ElementList[Divider]:
"""Sequence of all ``st.divider`` elements.
Returns
-------
ElementList of Divider
Sequence of all ``st.divider`` elements. Individual elements can be
accessed from an ElementList by index (order on the page). For
example, ``at.divider[0]`` for the first element. Divider is an
extension of the Element class.
"""
return self._tree.divider
@property
def error(self) -> ElementList[Error]:
"""Sequence of all ``st.error`` elements.
Returns
-------
ElementList of Error
Sequence of all ``st.error`` elements. Individual elements can be
accessed from an ElementList by index (order on the page). For
example, ``at.error[0]`` for the first element. Error is an
extension of the Element class.
"""
return self._tree.error
@property
def exception(self) -> ElementList[Exception]:
"""Sequence of all ``st.exception`` elements.
Returns
-------
ElementList of Exception
Sequence of all ``st.exception`` elements. Individual elements can be
accessed from an ElementList by index (order on the page). For
example, ``at.exception[0]`` for the first element. Exception is an
extension of the Element class.
"""
return self._tree.exception
@property
def expander(self) -> Sequence[Expander]:
"""Sequence of all ``st.expander`` elements.
Returns
-------
Sequence of Expandable
Sequence of all ``st.expander`` elements. Individual elements can be
accessed from a Sequence by index (order on the page). For
example, ``at.expander[0]`` for the first element. Expandable is an
extension of the Block class.
"""
return self._tree.expander
@property
def header(self) -> ElementList[Header]:
"""Sequence of all ``st.header`` elements.
Returns
-------
ElementList of Header
Sequence of all ``st.header`` elements. Individual elements can be
accessed from an ElementList by index (order on the page). For
example, ``at.header[0]`` for the first element. Header is an
extension of the Element class.
"""
return self._tree.header
@property
def info(self) -> ElementList[Info]:
"""Sequence of all ``st.info`` elements.
Returns
-------
ElementList of Info
Sequence of all ``st.info`` elements. Individual elements can be
accessed from an ElementList by index (order on the page). For
example, ``at.info[0]`` for the first element. Info is an
extension of the Element class.
"""
return self._tree.info
@property
def json(self) -> ElementList[Json]:
"""Sequence of all ``st.json`` elements.
Returns
-------
ElementList of Json
Sequence of all ``st.json`` elements. Individual elements can be
accessed from an ElementList by index (order on the page). For
example, ``at.json[0]`` for the first element. Json is an
extension of the Element class.
"""
return self._tree.json
@property
def latex(self) -> ElementList[Latex]:
"""Sequence of all ``st.latex`` elements.
Returns
-------
ElementList of Latex
Sequence of all ``st.latex`` elements. Individual elements can be
accessed from an ElementList by index (order on the page). For
example, ``at.latex[0]`` for the first element. Latex is an
extension of the Element class.
"""
return self._tree.latex
@property
def markdown(self) -> ElementList[Markdown]:
"""Sequence of all ``st.markdown`` elements.
Returns
-------
ElementList of Markdown
Sequence of all ``st.markdown`` elements. Individual elements can be
accessed from an ElementList by index (order on the page). For
example, ``at.markdown[0]`` for the first element. Markdown is an
extension of the Element class.
"""
return self._tree.markdown
@property
def metric(self) -> ElementList[Metric]:
"""Sequence of all ``st.metric`` elements.
Returns
-------
ElementList of Metric
Sequence of all ``st.metric`` elements. Individual elements can be
accessed from an ElementList by index (order on the page). For
example, ``at.metric[0]`` for the first element. Metric is an
extension of the Element class.
"""
return self._tree.metric
@property
def multiselect(self) -> WidgetList[Multiselect[Any]]:
"""Sequence of all ``st.multiselect`` widgets.
Returns
-------
WidgetList of Multiselect
Sequence of all ``st.multiselect`` widgets. Individual widgets can
be accessed from a WidgetList by index (order on the page) or key.
For example, ``at.multiselect[0]`` for the first widget or
``at.multiselect(key="my_key")`` for a widget with a given key.
"""
return self._tree.multiselect
@property
def number_input(self) -> WidgetList[NumberInput]:
"""Sequence of all ``st.number_input`` widgets.
Returns
-------
WidgetList of NumberInput
Sequence of all ``st.number_input`` widgets. Individual widgets can
be accessed from a WidgetList by index (order on the page) or key.
For example, ``at.number_input[0]`` for the first widget or
``at.number_input(key="my_key")`` for a widget with a given key.
"""
return self._tree.number_input
@property
def radio(self) -> WidgetList[Radio[Any]]:
"""Sequence of all ``st.radio`` widgets.
Returns
-------
WidgetList of Radio
Sequence of all ``st.radio`` widgets. Individual widgets can
be accessed from a WidgetList by index (order on the page) or key.
For example, ``at.radio[0]`` for the first widget or
``at.radio(key="my_key")`` for a widget with a given key.
"""
return self._tree.radio
@property
def select_slider(self) -> WidgetList[SelectSlider[Any]]:
"""Sequence of all ``st.select_slider`` widgets.
Returns
-------
WidgetList of SelectSlider
Sequence of all ``st.select_slider`` widgets. Individual widgets can
be accessed from a WidgetList by index (order on the page) or key.
For example, ``at.select_slider[0]`` for the first widget or
``at.select_slider(key="my_key")`` for a widget with a given key.
"""
return self._tree.select_slider
@property
def selectbox(self) -> WidgetList[Selectbox[Any]]:
"""Sequence of all ``st.selectbox`` widgets.
Returns
-------
WidgetList of Selectbox
Sequence of all ``st.selectbox`` widgets. Individual widgets can
be accessed from a WidgetList by index (order on the page) or key.
For example, ``at.selectbox[0]`` for the first widget or
``at.selectbox(key="my_key")`` for a widget with a given key.
"""
return self._tree.selectbox
@property
def slider(self) -> WidgetList[Slider[Any]]:
"""Sequence of all ``st.slider`` widgets.
Returns
-------
WidgetList of Slider
Sequence of all ``st.slider`` widgets. Individual widgets can
be accessed from a WidgetList by index (order on the page) or key.
For example, ``at.slider[0]`` for the first widget or
``at.slider(key="my_key")`` for a widget with a given key.
"""
return self._tree.slider
@property
def subheader(self) -> ElementList[Subheader]:
"""Sequence of all ``st.subheader`` elements.
Returns
-------
ElementList of Subheader
Sequence of all ``st.subheader`` elements. Individual elements can be
accessed from an ElementList by index (order on the page). For
example, ``at.subheader[0]`` for the first element. Subheader is an
extension of the Element class.
"""
return self._tree.subheader
@property
def success(self) -> ElementList[Success]:
"""Sequence of all ``st.success`` elements.
Returns
-------
ElementList of Success
Sequence of all ``st.success`` elements. Individual elements can be
accessed from an ElementList by index (order on the page). For
example, ``at.success[0]`` for the first element. Success is an
extension of the Element class.
"""
return self._tree.success
@property
def status(self) -> Sequence[Status]:
"""Sequence of all ``st.status`` elements.
Returns
-------
Sequence of Status
Sequence of all ``st.status`` elements. Individual elements can be
accessed from a Sequence by index (order on the page). For
example, ``at.status[0]`` for the first element. Status is an
extension of the Block class.
"""
return self._tree.status
@property
def table(self) -> ElementList[Table]:
"""Sequence of all ``st.table`` elements.
Returns
-------
ElementList of Table
Sequence of all ``st.table`` elements. Individual elements can be
accessed from an ElementList by index (order on the page). For
example, ``at.table[0]`` for the first element. Table is an
extension of the Element class.
"""
return self._tree.table
@property
def tabs(self) -> Sequence[Tab]:
"""Sequence of all tabs within ``st.tabs`` elements.
Each tab within a single ``st.tabs`` will be returned as a separate Tab
in the Sequence. Additionally, the tab labels are forwarded to each
Tab element as a property. For example, ``st.tabs("A","B")`` will
yield two Tab objects, with ``Tab.label`` returning "A" and "B",
respectively.
Returns
-------
Sequence of Tab
Sequence of all tabs within ``st.tabs`` elements. Individual
tabs can be accessed from an ElementList by index (order on the
page). For example, ``at.tabs[0]`` for the first tab. Tab is an
extension of the Block class.
"""
return self._tree.tabs
@property
def text(self) -> ElementList[Text]:
"""Sequence of all ``st.text`` elements.
Returns
-------
ElementList of Text
Sequence of all ``st.text`` elements. Individual elements can be
accessed from an ElementList by index (order on the page). For
example, ``at.text[0]`` for the first element. Text is an
extension of the Element class.
"""
return self._tree.text
@property
def text_area(self) -> WidgetList[TextArea]:
"""Sequence of all ``st.text_area`` widgets.
Returns
-------
WidgetList of TextArea
Sequence of all ``st.text_area`` widgets. Individual widgets can
be accessed from a WidgetList by index (order on the page) or key.
For example, ``at.text_area[0]`` for the first widget or
``at.text_area(key="my_key")`` for a widget with a given key.
"""
return self._tree.text_area
@property
def text_input(self) -> WidgetList[TextInput]:
"""Sequence of all ``st.text_input`` widgets.
Returns
-------
WidgetList of TextInput
Sequence of all ``st.text_input`` widgets. Individual widgets can
be accessed from a WidgetList by index (order on the page) or key.
For example, ``at.text_input[0]`` for the first widget or
``at.text_input(key="my_key")`` for a widget with a given key.
"""
return self._tree.text_input
@property
def time_input(self) -> WidgetList[TimeInput]:
"""Sequence of all ``st.time_input`` widgets.
Returns
-------
WidgetList of TimeInput
Sequence of all ``st.time_input`` widgets. Individual widgets can
be accessed from a WidgetList by index (order on the page) or key.
For example, ``at.time_input[0]`` for the first widget or
``at.time_input(key="my_key")`` for a widget with a given key.
"""
return self._tree.time_input
@property
def title(self) -> ElementList[Title]:
"""Sequence of all ``st.title`` elements.
Returns
-------
ElementList of Title
Sequence of all ``st.title`` elements. Individual elements can be
accessed from an ElementList by index (order on the page). For
example, ``at.title[0]`` for the first element. Title is an
extension of the Element class.
"""
return self._tree.title
@property
def toast(self) -> ElementList[Toast]:
"""Sequence of all ``st.toast`` elements.
Returns
-------
ElementList of Toast
Sequence of all ``st.toast`` elements. Individual elements can be
accessed from an ElementList by index (order on the page). For
example, ``at.toast[0]`` for the first element. Toast is an
extension of the Element class.
"""
return self._tree.toast
@property
def toggle(self) -> WidgetList[Toggle]:
"""Sequence of all ``st.toggle`` widgets.
Returns
-------
WidgetList of Toggle
Sequence of all ``st.toggle`` widgets. Individual widgets can
be accessed from a WidgetList by index (order on the page) or key.
For example, ``at.toggle[0]`` for the first widget or
``at.toggle(key="my_key")`` for a widget with a given key.
"""
return self._tree.toggle
@property
def warning(self) -> ElementList[Warning]:
"""Sequence of all ``st.warning`` elements.
Returns
-------
ElementList of Warning
Sequence of all ``st.warning`` elements. Individual elements can be
accessed from an ElementList by index (order on the page). For
example, ``at.warning[0]`` for the first element. Warning is an
extension of the Element class.
"""
return self._tree.warning
def __len__(self) -> int:
return len(self._tree)
def __iter__(self) -> Iterator[Node]:
yield from self._tree
def __getitem__(self, idx: int) -> Node:
return self._tree[idx]
def get(self, element_type: str) -> Sequence[Node]:
"""Get elements or widgets of the specified type.
This method returns the collection of all elements or widgets of
the specified type on the current page. Retrieve a specific element by
using its index (order on page) or key lookup.
Parameters
----------
element_type: str
An element attribute of ``AppTest``. For example, "button",
"caption", or "chat_input".
Returns
-------
Sequence of Elements
Sequence of elements of the given type. Individual elements can
be accessed from a Sequence by index (order on the page). When
getting and ``element_type`` that is a widget, individual widgets
can be accessed by key. For example, ``at.get("text")[0]`` for the
first ``st.text`` element or ``at.get("slider")(key="my_key")`` for
the ``st.slider`` widget with a given key.
"""
return self._tree.get(element_type)
def __repr__(self) -> str:
return repr_(self)
|
AppTest
|
python
|
django__django
|
tests/m2m_and_m2o/models.py
|
{
"start": 255,
"end": 581
}
|
class ____(models.Model):
num = models.IntegerField()
cc = models.ManyToManyField(User, blank=True, related_name="test_issue_cc")
client = models.ForeignKey(User, models.CASCADE, related_name="test_issue_client")
class Meta:
ordering = ("num",)
def __str__(self):
return str(self.num)
|
Issue
|
python
|
FactoryBoy__factory_boy
|
tests/djapp/models.py
|
{
"start": 949,
"end": 993
}
|
class ____(AbstractBase):
pass
|
ConcreteSon
|
python
|
tensorflow__tensorflow
|
tensorflow/compiler/tests/sharding_util_ops_test.py
|
{
"start": 2241,
"end": 13625
}
|
class ____(xla_test.XLATestCase, parameterized.TestCase):
@parameterized.named_parameters(('Tensor', create_tensor_split_graph),
('Resource', create_resource_split_graph))
def testSplitDimensionZero(self, graph_fn):
for dtype in self.numeric_types:
with self.session() as sess, self.device_scope():
split = graph_fn(
sess,
input_value=[[[0]]],
input_dtype=dtype,
num_outputs=1,
num_splits=[1, 1, 0])
with self.assertRaisesOpError('index 2 must be positive, but got 0'):
sess.run(split)
@parameterized.named_parameters(('Tensor', create_tensor_split_graph),
('Resource', create_resource_split_graph))
def testSplitDimensionNegative(self, graph_fn):
for dtype in self.numeric_types:
with self.session() as sess, self.device_scope():
split = graph_fn(
sess,
input_value=[[[0]]],
input_dtype=dtype,
num_outputs=1,
num_splits=[1, -1, 1])
with self.assertRaisesOpError('index 1 must be positive, but got -1'):
sess.run(split)
@parameterized.named_parameters(('Tensor', create_tensor_split_graph),
('Resource', create_resource_split_graph))
def testNumOutputsMismatch(self, graph_fn):
for dtype in self.numeric_types:
with self.session() as sess, self.device_scope():
split = graph_fn(
sess,
input_value=[0, 1],
input_dtype=dtype,
num_outputs=1,
num_splits=[2])
with self.assertRaisesOpError('\'N\' must match number of slices 2'):
sess.run(split)
@parameterized.named_parameters(('Tensor', create_tensor_split_graph),
('Resource', create_resource_split_graph))
def testPaddingsLengthMismatch(self, graph_fn):
for dtype in self.numeric_types:
with self.session() as sess, self.device_scope():
split = graph_fn(
sess,
input_value=[[0, 1], [2, 3]],
input_dtype=dtype,
num_outputs=4,
num_splits=[2, 2],
paddings=[0])
with self.assertRaisesOpError('length 2, but got 1'):
sess.run(split)
@parameterized.named_parameters(('Tensor', create_tensor_split_graph),
('Resource', create_resource_split_graph))
def testPaddingsNegative(self, graph_fn):
for dtype in self.numeric_types:
with self.session() as sess, self.device_scope():
split = graph_fn(
sess,
input_value=[[0, 1], [2, 3]],
input_dtype=dtype,
num_outputs=4,
num_splits=[2, 2],
paddings=[0, -1])
with self.assertRaisesOpError('non-negative, but got -1 at index 1'):
sess.run(split)
@parameterized.named_parameters(('Tensor', create_tensor_split_graph),
('Resource', create_resource_split_graph))
def testInputRankSplitMismatch(self, graph_fn):
for dtype in self.numeric_types:
with self.session() as sess, self.device_scope():
split = graph_fn(
sess,
input_value=[[0, 1], [2, 3]],
input_dtype=dtype,
num_outputs=8,
num_splits=[2, 2, 2])
with self.assertRaisesOpError(
'\'num_splits\' length 3, but got rank 2'):
sess.run(split)
@parameterized.named_parameters(('Tensor', create_tensor_split_graph),
('Resource', create_resource_split_graph))
def testDimNotEvenlySplit(self, graph_fn):
for dtype in self.numeric_types:
with self.session() as sess, self.device_scope():
split = graph_fn(
sess,
input_value=[[0, 1], [2, 3], [4, 5], [6, 7]],
input_dtype=dtype,
num_outputs=6,
num_splits=[3, 2])
with self.assertRaisesOpError('divisible by \'num_splits\' 3'):
sess.run(split)
@parameterized.named_parameters(('Tensor', create_tensor_split_graph),
('Resource', create_resource_split_graph))
def testDimWithPaddingNotEvenlySplit(self, graph_fn):
for dtype in self.numeric_types:
with self.session() as sess, self.device_scope():
split = graph_fn(
sess,
input_value=[[0, 1], [2, 3], [4, 5], [6, 7]],
input_dtype=dtype,
num_outputs=4,
num_splits=[2, 2],
paddings=[0, 1])
with self.assertRaisesOpError('divisible by \'num_splits\' 2'):
sess.run(split)
@parameterized.named_parameters(('Tensor', create_tensor_split_graph),
('Resource', create_resource_split_graph))
def testNoSplits(self, graph_fn):
for dtype in self.numeric_types:
with self.session() as sess, self.device_scope():
split = graph_fn(
sess,
input_value=[[[0, 1], [2, 3]], [[4, 5], [6, 7]]],
input_dtype=dtype,
num_outputs=1,
num_splits=[1, 1, 1])
results = sess.run(split)
self.assertLen(results, 1)
self.assertAllClose(results[0], [[[0, 1], [2, 3]], [[4, 5], [6, 7]]])
@parameterized.named_parameters(('Tensor', create_tensor_split_graph),
('Resource', create_resource_split_graph))
def testNoSplitsWithPadding(self, graph_fn):
for dtype in self.numeric_types:
with self.session() as sess, self.device_scope():
split = graph_fn(
sess,
input_value=[[[0]], [[1]]],
input_dtype=dtype,
num_outputs=1,
num_splits=[1, 1, 1],
paddings=[0, 1, 1])
results = sess.run(split)
self.assertLen(results, 1)
self.assertAllClose(results[0], [[[0, 0], [0, 0]], [[1, 0], [0, 0]]])
@parameterized.named_parameters(('Tensor', create_tensor_split_graph),
('Resource', create_resource_split_graph))
def testSplitNoPadding(self, graph_fn):
for dtype in self.numeric_types:
with self.session() as sess, self.device_scope():
split = graph_fn(
sess,
input_value=[
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15],
],
input_dtype=dtype,
num_outputs=4,
num_splits=[2, 2])
results = sess.run(split)
self.assertLen(results, 4)
self.assertAllClose(results[0], [[0, 1], [4, 5]])
self.assertAllClose(results[1], [[2, 3], [6, 7]])
self.assertAllClose(results[2], [[8, 9], [12, 13]])
self.assertAllClose(results[3], [[10, 11], [14, 15]])
@parameterized.named_parameters(('Tensor', create_tensor_split_graph),
('Resource', create_resource_split_graph))
def testSplitPartialPadding(self, graph_fn):
for dtype in self.numeric_types:
with self.session() as sess, self.device_scope():
split = graph_fn(
sess,
input_value=[
[0, 1, 2],
[3, 4, 5],
[6, 7, 8],
],
input_dtype=dtype,
num_outputs=4,
num_splits=[2, 2],
paddings=[1, 1])
results = sess.run(split)
self.assertLen(results, 4)
self.assertAllClose(results[0], [[0, 1], [3, 4]])
self.assertAllClose(results[1], [[2, 0], [5, 0]])
self.assertAllClose(results[2], [[6, 7], [0, 0]])
self.assertAllClose(results[3], [[8, 0], [0, 0]])
@parameterized.named_parameters(('Tensor', create_tensor_split_graph),
('Resource', create_resource_split_graph))
def testSplitCompletePadding(self, graph_fn):
for dtype in self.numeric_types:
with self.session() as sess, self.device_scope():
split = graph_fn(
sess,
input_value=[[0], [1]],
input_dtype=dtype,
num_outputs=4,
num_splits=[2, 2],
paddings=[2, 3])
results = sess.run(split)
self.assertLen(results, 4)
self.assertAllClose(results[0], [[0, 0], [1, 0]])
self.assertAllClose(results[1], [[0, 0], [0, 0]])
self.assertAllClose(results[2], [[0, 0], [0, 0]])
self.assertAllClose(results[3], [[0, 0], [0, 0]])
@parameterized.named_parameters(
('1Tensor', create_tensor_split_graph, 1),
('2Tensor', create_tensor_split_graph, 2),
('3Tensor', create_tensor_split_graph, 3),
('4Tensor', create_tensor_split_graph, 4),
('5Tensor', create_tensor_split_graph, 5),
('6Tensor', create_tensor_split_graph, 6),
('7Tensor', create_tensor_split_graph, 7),
('8Tensor', create_tensor_split_graph, 8),
('1Resource', create_resource_split_graph, 1),
('2Resource', create_resource_split_graph, 2),
('3Resource', create_resource_split_graph, 3),
('4Resource', create_resource_split_graph, 4),
('5Resource', create_resource_split_graph, 5),
('6Resource', create_resource_split_graph, 6),
('7Resource', create_resource_split_graph, 7),
('8Resource', create_resource_split_graph, 8),
)
def testRanked(self, graph_fn, rank):
num_splits = [2] * rank
num_outputs = 2 << (rank - 1)
input_value = np.reshape(np.arange(np.prod(num_splits)), num_splits)
for dtype in self.numeric_types:
with self.session() as sess, self.device_scope():
split = graph_fn(
sess,
input_value=input_value,
input_dtype=dtype,
num_outputs=num_outputs,
num_splits=num_splits)
results = sess.run(split)
self.assertLen(results, num_outputs)
for i, result in enumerate(results):
expected_output = np.reshape(i, [1] * rank).astype(dtype)
self.assertAllClose(result, expected_output)
def create_tensor_concat_graph(
sess: Session,
input_values: List[Any],
input_dtype: Any,
num_concats: List[int],
paddings: Optional[List[int]] = None,
output_shape: Optional[List[int]] = None) -> Tensor:
del sess
del output_shape
const_input_ops = [
constant_op.constant(i, dtype=input_dtype) for i in input_values
]
return gen_tpu_ops.xla_concat_nd(const_input_ops, num_concats, paddings)
def create_resource_concat_graph(
sess: Session,
input_values: List[Any],
input_dtype: Any,
num_concats: List[int],
paddings: Optional[List[int]] = None,
output_shape: Optional[List[int]] = None) -> Tensor:
variable_shape = [] if output_shape is None else output_shape
variable = resource_variable_ops.ResourceVariable(
initial_value=np.zeros(variable_shape, dtype=input_dtype),
dtype=input_dtype)
sess.run(variables.variables_initializer([variable]))
const_input_ops = [
constant_op.constant(i, dtype=input_dtype) for i in input_values
]
concat = gen_tpu_ops.assign_variable_xla_concat_nd(variable.handle,
const_input_ops,
num_concats, paddings)
with control_dependencies([concat]):
return variable.read_value()
|
XlaSplitNDOpTest
|
python
|
mitmproxy__pdoc
|
pdoc/doc_ast.py
|
{
"start": 1973,
"end": 11345
}
|
class ____:
"""The information extracted from walking the syntax tree."""
var_docstrings: dict[str, str]
"""A qualname -> docstring mapping."""
func_docstrings: dict[str, str]
"""A qualname -> docstring mapping for functions."""
annotations: dict[str, str | type[pdoc.doc_types.empty]]
"""A qualname -> annotation mapping.
Annotations are not evaluated by this module and only returned as strings."""
def walk_tree(obj: types.ModuleType | type) -> AstInfo:
"""
Walks the abstract syntax tree for `obj` and returns the extracted information.
"""
return _walk_tree(parse(obj))
@cache
def _walk_tree(
tree: ast.Module | ast.ClassDef | ast.FunctionDef | ast.AsyncFunctionDef,
) -> AstInfo:
var_docstrings = {}
func_docstrings = {}
annotations = {}
for a, b in _pairwise_longest(_nodes(tree)):
if isinstance(a, ast_TypeAlias):
name = a.name.id
elif (
isinstance(a, ast.AnnAssign) and isinstance(a.target, ast.Name) and a.simple
):
name = a.target.id
annotations[name] = unparse(a.annotation)
elif (
isinstance(a, ast.Assign)
and len(a.targets) == 1
and isinstance(a.targets[0], ast.Name)
):
name = a.targets[0].id
# Make sure that all assignments are picked up, even is there is
# no annotation or docstring.
annotations.setdefault(name, pdoc.doc_types.empty)
elif isinstance(a, ast.FunctionDef) and a.body:
first = a.body[0]
if (
isinstance(first, ast.Expr)
and isinstance(first.value, ast.Constant)
and isinstance(first.value.value, str)
):
func_docstrings[a.name] = inspect.cleandoc(first.value.value).strip()
continue
else:
continue
if (
isinstance(b, ast.Expr)
and isinstance(b.value, ast.Constant)
and isinstance(b.value.value, str)
):
var_docstrings[name] = inspect.cleandoc(b.value.value).strip()
return AstInfo(
var_docstrings,
func_docstrings,
annotations,
)
T = TypeVar("T")
def sort_by_source(
obj: types.ModuleType | type, sorted: dict[str, T], unsorted: dict[str, T]
) -> tuple[dict[str, T], dict[str, T]]:
"""
Takes items from `unsorted` and inserts them into `sorted` in order of appearance in the source code of `obj`.
The only exception to this rule is `__init__`, which (if present) is always inserted first.
Some items may not be found, for example because they've been inherited from a superclass. They are returned as-is.
Returns a `(sorted, not found)` tuple.
"""
tree = parse(obj)
if "__init__" in unsorted:
sorted["__init__"] = unsorted.pop("__init__")
for a in _nodes(tree):
if (
isinstance(a, ast.Assign)
and len(a.targets) == 1
and isinstance(a.targets[0], ast.Name)
):
name = a.targets[0].id
elif (
isinstance(a, ast.AnnAssign) and isinstance(a.target, ast.Name) and a.simple
):
name = a.target.id
elif isinstance(a, (ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef)):
name = a.name
elif isinstance(a, ast_TypeAlias):
name = a.name.id
else:
continue
if name in unsorted:
sorted[name] = unsorted.pop(name)
return sorted, unsorted
def type_checking_sections(mod: types.ModuleType) -> ast.Module:
"""
Walks the abstract syntax tree for `mod` and returns all statements guarded by TYPE_CHECKING blocks.
"""
ret = ast.Module(body=[], type_ignores=[])
tree = _parse_module(get_source(mod))
for node in tree.body:
if (
isinstance(node, ast.If)
and isinstance(node.test, ast.Name)
and node.test.id == "TYPE_CHECKING"
):
ret.body.extend(node.body)
if (
isinstance(node, ast.If)
and isinstance(node.test, ast.Attribute)
and isinstance(node.test.value, ast.Name)
# some folks do "import typing as t", the accuracy with just TYPE_CHECKING is good enough.
# and node.test.value.id == "typing"
and node.test.attr == "TYPE_CHECKING"
):
ret.body.extend(node.body)
return ret
@cache
def _parse_module(source: str) -> ast.Module:
"""
Parse the AST for the source code of a module and return the ast.Module.
Returns an empty ast.Module if source is empty.
"""
tree = _parse(source)
assert isinstance(tree, ast.Module)
return tree
@cache
def _parse_class(source: str) -> ast.ClassDef:
"""
Parse the AST for the source code of a class and return the ast.ClassDef.
Returns an empty ast.ClassDef if source is empty.
"""
tree = _parse(source)
if tree.body and len(tree.body) == 1:
t = tree.body[0]
if isinstance(t, ast.ClassDef):
return t
return ast.ClassDef(name="PdocStub", body=[], decorator_list=[]) # type: ignore
@cache
def _parse_function(source: str) -> ast.FunctionDef | ast.AsyncFunctionDef:
"""
Parse the AST for the source code of a (async) function and return the matching AST node.
Returns an empty ast.FunctionDef if source is empty.
"""
tree = _parse(source)
if tree.body and len(tree.body) == 1:
t = tree.body[0]
if isinstance(t, (ast.FunctionDef, ast.AsyncFunctionDef)):
return t
else:
# we have a lambda function,
# to simplify the API return the ast.FunctionDef stub.
pass
return ast.FunctionDef(
name="pdoc_stub", args=ast.arguments(), body=[], decorator_list=[]
) # type: ignore
def _parse(
source: str,
) -> ast.Module | ast.ClassDef | ast.FunctionDef | ast.AsyncFunctionDef:
try:
return ast.parse(_dedent(source))
except Exception as e:
warnings.warn(f"Error parsing source code: {e}\n===\n{source}\n===")
return ast.parse("")
@cache
def _dedent(source: str) -> str:
"""
Dedent the head of a function or class definition so that it can be parsed by `ast.parse`.
This is an alternative to `textwrap.dedent`, which does not dedent if there are docstrings
without indentation. For example, this is valid Python code but would not be dedented with `textwrap.dedent`:
class Foo:
def bar(self):
'''
this is a docstring
'''
"""
if not source or source[0] not in (" ", "\t"):
return source
source = source.lstrip()
# we may have decorators before our function definition, in which case we need to dedent a few more lines.
# the following heuristic should be good enough to detect if we have reached the definition.
# it's easy to produce examples where this fails, but this probably is not a problem in practice.
if not any(source.startswith(x) for x in ["async ", "def ", "class "]):
first_line, rest = source.split("\n", 1)
return first_line + "\n" + _dedent(rest)
else:
return source
@cache
def _nodes(tree: ast.Module | ast.ClassDef) -> list[ast.AST]:
"""
Returns the list of all nodes in tree's body, but also inlines the body of __init__.
This is useful to detect all declared variables in a class, even if they only appear in the constructor.
"""
return list(_nodes_iter(tree))
def _nodes_iter(tree: ast.Module | ast.ClassDef) -> Iterator[ast.AST]:
for a in tree.body:
yield a
if isinstance(a, ast.FunctionDef) and a.name == "__init__":
yield from _init_nodes(a)
def _init_nodes(tree: ast.FunctionDef) -> Iterator[ast.AST]:
"""
Transform attribute assignments like "self.foo = 42" to name assignments like "foo = 42",
keep all constant expressions, and no-op everything else.
This essentially allows us to inline __init__ when parsing a class definition.
"""
for a in tree.body:
if (
isinstance(a, ast.AnnAssign)
and isinstance(a.target, ast.Attribute)
and isinstance(a.target.value, ast.Name)
and a.target.value.id == "self"
):
yield ast.AnnAssign(
ast.Name(a.target.attr), a.annotation, a.value, simple=1
)
elif (
isinstance(a, ast.Assign)
and len(a.targets) == 1
and isinstance(a.targets[0], ast.Attribute)
and isinstance(a.targets[0].value, ast.Name)
and a.targets[0].value.id == "self"
):
yield ast.Assign(
[ast.Name(a.targets[0].attr)],
value=a.value,
type_comment=a.type_comment,
)
elif (
isinstance(a, ast.Expr)
and isinstance(a.value, ast.Constant)
and isinstance(a.value.value, str)
):
yield a
else:
yield ast.Pass()
def _pairwise_longest(iterable: Iterable[T]) -> Iterable[tuple[T, T]]:
"""s -> (s0,s1), (s1,s2), (s2, s3), ..., (sN, None)"""
a, b = tee(iterable)
next(b, None)
return zip_longest(a, b)
|
AstInfo
|
python
|
fluentpython__example-code-2e
|
03-dict-set/missing.py
|
{
"start": 2085,
"end": 2421
}
|
class ____(SimpleMappingSub):
def __getitem__(self, key):
try:
return self._data[key]
except KeyError:
return self[_upper(key)]
def get(self, key, default=None):
return self._data.get(key, default)
def __contains__(self, key):
return key in self._data
|
DictLikeMappingSub
|
python
|
pypa__hatch
|
backend/src/hatchling/builders/sdist.py
|
{
"start": 3102,
"end": 5496
}
|
class ____(BuilderConfig):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.__core_metadata_constructor: Callable[..., str] | None = None
self.__strict_naming: bool | None = None
self.__support_legacy: bool | None = None
@property
def core_metadata_constructor(self) -> Callable[..., str]:
if self.__core_metadata_constructor is None:
core_metadata_version = self.target_config.get("core-metadata-version", DEFAULT_METADATA_VERSION)
if not isinstance(core_metadata_version, str):
message = f"Field `tool.hatch.build.targets.{self.plugin_name}.core-metadata-version` must be a string"
raise TypeError(message)
constructors = get_core_metadata_constructors()
if core_metadata_version not in constructors:
message = (
f"Unknown metadata version `{core_metadata_version}` for field "
f"`tool.hatch.build.targets.{self.plugin_name}.core-metadata-version`. "
f"Available: {', '.join(sorted(constructors))}"
)
raise ValueError(message)
self.__core_metadata_constructor = constructors[core_metadata_version]
return self.__core_metadata_constructor
@property
def strict_naming(self) -> bool:
if self.__strict_naming is None:
if "strict-naming" in self.target_config:
strict_naming = self.target_config["strict-naming"]
if not isinstance(strict_naming, bool):
message = f"Field `tool.hatch.build.targets.{self.plugin_name}.strict-naming` must be a boolean"
raise TypeError(message)
else:
strict_naming = self.build_config.get("strict-naming", True)
if not isinstance(strict_naming, bool):
message = "Field `tool.hatch.build.strict-naming` must be a boolean"
raise TypeError(message)
self.__strict_naming = strict_naming
return self.__strict_naming
@property
def support_legacy(self) -> bool:
if self.__support_legacy is None:
self.__support_legacy = bool(self.target_config.get("support-legacy", False))
return self.__support_legacy
|
SdistBuilderConfig
|
python
|
openai__openai-python
|
tests/test_transform.py
|
{
"start": 2235,
"end": 2287
}
|
class ____(TypedDict):
foo: Union[Bar4, Baz4]
|
Foo4
|
python
|
google__jax
|
jax/_src/tpu/linalg/eigh.py
|
{
"start": 11156,
"end": 26537
}
|
class ____(NamedTuple):
"""Describes a subproblem of _eigh_work.
Each subproblem is a `size` x `size` Hermitian matrix, starting at `offset`
in the workspace.
"""
# The row offset of the block in the matrix of blocks.
offset: Array
# The size of the block.
size: Array
@api.jit(static_argnames=('termination_size', 'subset_by_index'))
def _eigh_work(H, n, termination_size, subset_by_index):
""" The main work loop performing the symmetric eigendecomposition of H.
Each step recursively computes a projector into the space of eigenvalues
above jnp.mean(jnp.diag(H)). The result of the projections into and out of
that space, along with the isometries accomplishing these, are then computed.
This is performed recursively until the projections have size 1, and thus
store an eigenvalue of the original input; the corresponding isometry is
the related eigenvector. The results are then composed.
This function cannot be Jitted because the internal split_spectrum cannot
be.
Args:
H: The Hermitian input.
n: The true (dynamic) shape of H.
Returns:
H, V: The result of the projection.
"""
# We turn what was originally a recursive algorithm into an iterative
# algorithm with an explicit stack.
N, _ = H.shape
n = jnp.asarray(n, np.int32)
agenda = Stack.create(
N + 1, _Subproblem(jnp.array(0, np.int32), jnp.array(0, np.int32)))
agenda = agenda.push(_Subproblem(offset=jnp.array(0, np.int32), size=n))
# eigenvectors is the array in which we build the output eigenvectors.
# We initialize it with the identity matrix so the initial matrix
# multiplications in_split_spectrum_jittable are the identity.
eigenvectors = jnp.eye(N, dtype=H.dtype)
# Keep a copy of the initial matrix Frobenius norm, so we know when to stop
# recursing. When the sub-matrix norm is less than eps*H0_norm, the contents are
# pure numerical noise, and we should just stop.
H0_norm = jnp_linalg.norm(_mask(H, (n, n)))
# blocks is an array representing a stack of Hermitian matrix blocks that we
# need to recursively decompose. Subproblems are different sizes, so the stack
# of blocks is ragged. Subproblems are left-aligned (i.e. starting at the 0th
# column). Here is an ASCII art picture of three blocks A, B, C, embedded
# in the larger `blocks` workspace (represented with trailing dots).
#
# A A A . . .
# A A A . . .
# A A A . . .
# B B . . . .
# B B . . . .
# C C C C . .
# C C C C . .
# C C C C . .
# C C C C . .
#
# Each step of the algorithm subdivides a block into two subblocks whose
# sizes sum to the original block size. We overwrite the original block with
# those two subblocks so we don't need any additional scratch space.
#
# At termination, "blocks" will contain 1x1 blocks (i.e., the eigenvalues) in
# its first column.
blocks = H
def base_case(B, offset, b, agenda, blocks, eigenvectors):
# Base case: for blocks under a minimum size, we cutoff the recursion
# and call the TPU Jacobi eigendecomposition implementation. The Jacobi
# algorithm works well for small matrices but scales poorly, so the two
# complement each other well.
H = _slice(blocks, (offset, 0), (b, b), (B, B))
V = _slice(eigenvectors, (0, offset), (n, b), (N, B))
# We replace the masked-out part of the matrix with the identity matrix.
# We know that the TPU Jacobi eigh implementation will not alter the order
# of the eigenvalues, so we know the eigendecomposition of the original
# matrix is in the top-left corner of the eigendecomposition of the padded
# matrix.
# It is very important that the underlying eigh implementation does not sort
# the eigenvalues for this reason! This is currently not true of JAX's CPU
# and GPU eigendecompositions, and for those platforms this algorithm will
# only do the right thing if termination_size == 1.
H = _mask(H, (b, b))
eig_vecs, eig_vals = lax_linalg.eigh(H, sort_eigenvalues=False)
eig_vecs = _mask(eig_vecs, (b, b))
eig_vals = _mask(eig_vals, (b,))
eig_vecs = tensor_contractions.dot(V, eig_vecs)
eig_vals = eig_vals.astype(eig_vecs.dtype)
blocks = _update_slice(blocks, eig_vals[:, None], (offset, 0), (b, 1))
eigenvectors = _update_slice(eigenvectors, eig_vecs, (0, offset), (n, b))
return agenda, blocks, eigenvectors
def recursive_case(B, offset, b, agenda, blocks, eigenvectors):
# The recursive case of the algorithm, specialized to a static block size
# of B.
H = _slice(blocks, (offset, 0), (b, b), (B, B))
def nearly_diagonal_case(agenda, blocks, eigenvectors):
blocks = _update_slice(blocks, jnp.diag(H)[:, None], (offset, 0), (b, 1))
return agenda, blocks, eigenvectors
def should_update_range(start, end, subset_by_index):
return (
True
if subset_by_index is None
else ((start < subset_by_index[1]) & (end > subset_by_index[0]))
)
def default_case(agenda, blocks, eigenvectors):
V = _slice(eigenvectors, (0, offset), (n, b), (N, B))
# TODO: Improve this?
split_point = reductions.nanmedian(_mask(jnp.diag(ufuncs.real(H)), (b,), np.nan))
H_minus, V_minus, H_plus, V_plus, rank = split_spectrum(
H, b, split_point, V0=V)
# Update state for *_minus.
updated_minus_state = (
_update_slice(blocks, H_minus, (offset, 0), (rank, rank)),
_update_slice(eigenvectors, V_minus, (0, offset), (n, rank)),
agenda.push(_Subproblem(offset, rank)),
)
should_update_minus = should_update_range(
offset, offset + rank, subset_by_index
)
blocks, eigenvectors, agenda = lax.cond(
should_update_minus,
lambda: updated_minus_state,
lambda: (blocks, eigenvectors, agenda),
)
# Update state for *_plus.
updated_plus_state = (
_update_slice(
blocks, H_plus, (offset + rank, 0), (b - rank, b - rank)
),
_update_slice(
eigenvectors, V_plus, (0, offset + rank), (n, b - rank)
),
agenda.push(_Subproblem(offset + rank, (b - rank))),
)
should_update_plus = should_update_range(
offset + rank, offset + b, subset_by_index
)
blocks, eigenvectors, agenda = lax.cond(
should_update_plus,
lambda: updated_plus_state,
lambda: (blocks, eigenvectors, agenda),
)
return agenda, blocks, eigenvectors
# If the matrix is nearly diagonal or has a tiny Frobenius norm compared to
# the original input matrix,, terminate the execution. This is necessary to
# handle matrices with clusters of eigenvalues, including rank deficient
# matrices. See Nakatsukasa and Higham section 5.2.
norm = jnp_linalg.norm(H)
eps = jnp.asarray(dtypes.finfo(H.dtype).eps, dtype=norm.dtype)
off_diag_norm = jnp_linalg.norm(
H - jnp.diag(jnp.diag(ufuncs.real(H)).astype(H.dtype)))
nearly_diagonal = off_diag_norm <= 5 * eps * norm
tiny = norm < eps * H0_norm
return lax.cond(
nearly_diagonal | tiny,
nearly_diagonal_case,
default_case,
agenda,
blocks,
eigenvectors,
)
def loop_cond(state):
agenda, _, _ = state
return ~agenda.empty()
# It would be wasteful to perform all computation padded up to the original
# matrix size. Instead, we form buckets of padded sizes e.g.,
# [N_0, N_1, ... N_k], aiming for a balance between compilation time
# and runtime.
cutoff = min(N, termination_size)
buckets = [cutoff]
branches = [partial(base_case, cutoff)]
if N > termination_size:
# If N > termination_size We use the following schedule:
# 1. N_0 = N,
# 2. N_i = _round_up(int(N_{i-1} / 1.98), 32), 0 < i < k
# 3. N_k = termination_size
# the rule for N_i is to avoid falling into the original large bucket
# when not splitting exactly at the half-way point during the recursion.
buckets.append(N)
branches.append(partial(recursive_case, N))
multiplier = 1.98
granularity = 32
i = int(N / multiplier)
while i > cutoff:
bucket_size = _round_up(i, granularity)
buckets.append(bucket_size)
branches.append(partial(recursive_case, bucket_size))
i = i // 2
buckets = jnp.array(buckets, dtype=np.int32)
def loop_body(state):
agenda, blocks, eigenvectors = state
(offset, b), agenda = agenda.pop()
which = jnp.where(buckets < b, dtypes.iinfo(np.int32).max, buckets)
choice = jnp.argmin(which)
return lax.switch(choice, branches, offset, b, agenda, blocks, eigenvectors)
_, blocks, eigenvectors = lax.while_loop(
loop_cond, loop_body, (agenda, blocks, eigenvectors))
return blocks[:, 0], eigenvectors
def eigh(
H,
*,
precision='float32',
termination_size=256,
n=None,
sort_eigenvalues=True,
subset_by_index=None,
):
"""Computes the eigendecomposition of the symmetric/Hermitian matrix H.
Args:
H: The `n x n` Hermitian input, padded to `N x N`.
precision: :class:`~jax.lax.Precision` object specifying the matmul
precision.
termination_size: Recursion ends once the blocks reach this linear size.
n: the true (dynamic) size of the matrix.
sort_eigenvalues: If `True`, the eigenvalues will be sorted from lowest to
highest.
subset_by_index: Optional 2-tuple [start, end] indicating the range of
indices of eigenvalues to compute. For example, is ``range_select`` =
[n-2,n], then ``eigh`` computes the two largest eigenvalues and their
eigenvectors.
Returns:
vals: The `n` eigenvalues of `H`.
vecs: A unitary matrix such that `vecs[:, i]` is a normalized eigenvector
of `H` corresponding to `vals[i]`. We have `H @ vecs = vals * vecs` up
to numerical error.
"""
M, N = H.shape
if M != N:
raise TypeError(f"Input H of shape {H.shape} must be square.")
if n is not None and n > N:
raise ValueError('Static size must be greater or equal to dynamic size.')
compute_slice = False
if not subset_by_index is None:
compute_slice = subset_by_index != (0, n)
if len(subset_by_index) != 2:
raise ValueError('subset_by_index must be a tuple of size 2.')
if subset_by_index[0] >= subset_by_index[1]:
raise ValueError('Got empty index range in subset_by_index.')
if subset_by_index[0] < 0:
raise ValueError('Indices in subset_by_index must be non-negative.')
range_max = N if n is None else n
if subset_by_index[1] > range_max:
raise ValueError('Index in subset_by_index[1] exceeds matrix size.')
if N <= termination_size:
if n is not None:
H = _mask(H, (n, n))
eig_vecs, eig_vals = lax_linalg.eigh(
H, lower=True, sort_eigenvalues=(sort_eigenvalues or compute_slice),
subset_by_index=None, symmetrize_input=False,
implementation=lax_linalg.EighImplementation.JACOBI,
)
if compute_slice:
eig_vals = eig_vals[subset_by_index[0] : subset_by_index[1]]
eig_vecs = eig_vecs[:, subset_by_index[0] : subset_by_index[1]]
return eig_vals, eig_vecs
n = N if n is None else n
with config.default_matmul_precision(precision):
eig_vals, eig_vecs = _eigh_work(
H, n, termination_size=termination_size, subset_by_index=subset_by_index
)
eig_vals = _mask(ufuncs.real(eig_vals), (n,), np.nan)
if sort_eigenvalues or compute_slice:
sort_idxs = jnp.argsort(eig_vals)
if compute_slice:
sort_idxs = sort_idxs[subset_by_index[0] : subset_by_index[1]]
eig_vals = eig_vals[sort_idxs]
eig_vecs = eig_vecs[:, sort_idxs]
return eig_vals, eig_vecs
def _T(x: Array) -> Array:
return lax.transpose(x, (*range(x.ndim - 2), x.ndim - 1, x.ndim - 2))
def _eigh_qdwh_impl(x, *, lower, sort_eigenvalues, subset_by_index):
"""QDWH-based eigendecomposition for TPU."""
*_, m, n = x.shape
assert m == n, (m, n)
termination_size = 256
if not core.is_constant_dim(m):
# TODO: maybe we can relax the check below for shape polymorphism?
raise NotImplementedError(
"Shape polymorphism for native lowering for eigh is implemented "
f"only for the batch dimensions: {x.shape}")
if m <= termination_size and (
subset_by_index is None or subset_by_index == (0, n)
):
return lax_linalg.eigh(
x, lower=lower, sort_eigenvalues=sort_eigenvalues,
symmetrize_input=False,
implementation=lax_linalg.EighImplementation.JACOBI
)
def eigh_qdwh(x):
if len(x.shape) > 2:
return control_flow.map(eigh_qdwh, x)
# We should only look at elements from the lower/upper triangle. Reflects
# that triangle into the other triangle to form a Hermitian matrix.
if lower:
mask = lax_internal._tri(bool, (n, n), 0)
else:
mask = lax.bitwise_not(lax_internal._tri(bool, (n, n), -1))
if dtypes.issubdtype(x.dtype, np.complexfloating):
re = lax.select(mask, lax.real(x), _T(lax.real(x)))
if lower:
im_mask = lax_internal._tri(bool, (n, n), -1)
else:
im_mask = lax.bitwise_not(lax_internal._tri(bool, (n, n), 0))
im = lax.imag(x)
im = lax.select(im_mask, im, lax.full_like(im, 0))
im = lax.select(mask, im, -_T(im))
x = lax.complex(re, im)
else:
x = lax.select(mask, x, _T(x))
return eigh(
x,
sort_eigenvalues=sort_eigenvalues,
termination_size=termination_size,
subset_by_index=subset_by_index,
)
eig_vals, eig_vecs = eigh_qdwh(x)
return eig_vecs, eig_vals
def _eigh_tpu_lowering(
ctx, operand, *, lower, sort_eigenvalues, subset_by_index, algorithm
):
if algorithm is None:
algorithm = lax_linalg.EighImplementation.QDWH
if algorithm == lax_linalg.EighImplementation.QR:
raise NotImplementedError("QR algorithm is not supported on TPU")
elif algorithm == lax_linalg.EighImplementation.JACOBI:
operand_aval, = ctx.avals_in
if operand_aval.shape[-1] == 0:
reshape_aval = operand_aval.update(shape=operand_aval.shape[:-1])
return [
operand,
hlo.real(mlir.reshape(ctx, operand, reshape_aval)),
]
v_aval, w_aval = ctx.avals_out
eigvecs_type = mlir.aval_to_ir_type(v_aval)
eigvals_type = mlir.aval_to_ir_type(w_aval)
result_types = [eigvecs_type, eigvals_type]
backend_config = f"{int(lower)},{int(sort_eigenvalues)},100,1e-6"
if any(not is_constant_shape(aval_out.shape)
for aval_out in ctx.avals_out):
result_shapes = [
mlir.eval_dynamic_shape_as_tensor(ctx, aval_out.shape)
for aval_out in ctx.avals_out
]
else:
result_shapes = None
op = mlir.custom_call(
"Eigh",
result_types=result_types,
operands=[operand],
backend_config=backend_config,
api_version=1,
result_shapes=result_shapes,
)
return op.results
elif algorithm == lax_linalg.EighImplementation.QDWH:
return mlir.lower_fun(_eigh_qdwh_impl, multiple_results=True)(
ctx, operand, lower=lower, sort_eigenvalues=sort_eigenvalues,
subset_by_index=subset_by_index)
else:
raise ValueError(f"Unknown algorithm: {algorithm}")
mlir.register_lowering(lax_linalg.eigh_p, _eigh_tpu_lowering, platform='tpu')
|
_Subproblem
|
python
|
Farama-Foundation__Gymnasium
|
gymnasium/vector/vector_env.py
|
{
"start": 23645,
"end": 24534
}
|
class ____(VectorWrapper):
"""Wraps the vectorized environment to allow a modular transformation of the reward.
Equivalent of :class:`gymnasium.RewardWrapper` for vectorized environments.
"""
def step(
self, actions: ActType
) -> tuple[ObsType, ArrayType, ArrayType, ArrayType, dict[str, Any]]:
"""Steps through the environment returning a reward modified by :meth:`reward`."""
observations, rewards, terminations, truncations, infos = self.env.step(actions)
return observations, self.rewards(rewards), terminations, truncations, infos
def rewards(self, rewards: ArrayType) -> ArrayType:
"""Transform the reward before returning it.
Args:
rewards (array): the reward to transform
Returns:
array: the transformed reward
"""
raise NotImplementedError
|
VectorRewardWrapper
|
python
|
doocs__leetcode
|
solution/0200-0299/0218.The Skyline Problem/Solution.py
|
{
"start": 34,
"end": 821
}
|
class ____:
def getSkyline(self, buildings: List[List[int]]) -> List[List[int]]:
skys, lines, pq = [], [], PriorityQueue()
for build in buildings:
lines.extend([build[0], build[1]])
lines.sort()
city, n = 0, len(buildings)
for line in lines:
while city < n and buildings[city][0] <= line:
pq.put([-buildings[city][2], buildings[city][0], buildings[city][1]])
city += 1
while not pq.empty() and pq.queue[0][2] <= line:
pq.get()
high = 0
if not pq.empty():
high = -pq.queue[0][0]
if len(skys) > 0 and skys[-1][1] == high:
continue
skys.append([line, high])
return skys
|
Solution
|
python
|
google__pytype
|
pytype/tests/test_base_test.py
|
{
"start": 5490,
"end": 6495
}
|
class ____(test_base.BaseTest):
@test_utils.skipUnlessPy((3, 10), reason="testing skipUnlessPy")
def test_skip_unless_py(self):
# This test will fail if run in a version other than 3.10.
self.Check("""
import sys
if sys.version_info.minor != 10:
name_error
""")
@test_utils.skipIfPy((3, 10), reason="testing skipIfPy")
def test_skip_if_py(self):
# This test will fail if run in 3.10.
self.Check("""
import sys
if sys.version_info.minor == 10:
name_error
""")
@test_utils.skipBeforePy((3, 10), reason="testing skipBeforePy")
def test_skip_before_py(self):
# This will fail before 3.10.
self.Check("""
import sys
if sys.version_info.minor < 10:
name_error
""")
@test_utils.skipFromPy((3, 10), reason="testing skipFromPy")
def test_skip_from_py(self):
# This will fail in 3.10+.
self.Check("""
import sys
if sys.version_info.minor >= 10:
name_error
""")
|
SkipTest
|
python
|
pytorch__pytorch
|
torch/_subclasses/_fake_tensor_utils.py
|
{
"start": 3114,
"end": 5458
}
|
class ____:
"""
Represents a SymInt in the cached key. Needed because SymInt doesn't
support __eq__ or __hash__ directly.
"""
# value can be:
# PySymType: This is the 'normal' SymInt value, wrapped so we can use
# hash/eq as value hash/eq (normally SymInt does object
# hash/eq).
# _DeconstructedSymType: This is used when storing the _PySymInputStub in
# the cache to avoid cyclic ShapeEnv references.
# _InputBackref: This is a back-reference to a previous _PySymInputStub in
# the key.
value: Union[PySymType, _DeconstructedSymType, _InputBackref]
def __init__(
self, value: Union[PySymType, _DeconstructedSymType, _InputBackref]
) -> None:
# For inputs (values in the `key`) we need to keep the PySymType intact
# - this way if we need to reuse it as an output we can properly copy
# the original value.
self.value = value
def strip_shape_env(self) -> None:
if isinstance(self.value, py_sym_types):
self.value = _DeconstructedSymType.from_sym_type(self.value)
def extract(self, shape_env: ShapeEnv) -> PySymType:
if isinstance(self.value, _DeconstructedSymType):
return self.value.extract(shape_env)
else:
# We should never see an _InputBackref here - anyone extracting a
# value should be pulling from the original entry (the one this
# backref points at).
assert not isinstance(self.value, _InputBackref)
return self.value
def __str__(self) -> str:
return str(self.value)
def __repr__(self) -> str:
return f"_PySymInputStub({self.value!r})"
def __eq__(self, other: object) -> bool:
if not isinstance(other, _PySymInputStub):
return False
elif isinstance(self.value, _InputBackref) or isinstance(
other.value, _InputBackref
):
return self.value == other.value
else:
return self.value.node._value_eq(other.value.node)
def __hash__(self) -> int:
if isinstance(self.value, _InputBackref):
return hash(self.value)
else:
return self.value.node._value_hash()
@dataclass(slots=True)
|
_PySymInputStub
|
python
|
buildout__buildout
|
src/zc/buildout/easy_install.py
|
{
"start": 66505,
"end": 67641
}
|
class ____(zc.buildout.UserError):
def __init__(self, req, ws):
ws = list(ws)
ws.sort()
self.data = req, ws
def __str__(self):
req, ws = self.data
return "Couldn't find a distribution for %r." % str(req)
def _constrained_requirement(constraint, requirement):
assert isinstance(requirement, pkg_resources.Requirement)
if constraint[0] not in '<>':
if constraint.startswith('='):
assert constraint.startswith('==')
version = constraint[2:]
else:
version = constraint
constraint = '==' + constraint
if version not in requirement:
msg = ("The requirement (%r) is not allowed by your [versions] "
"constraint (%s)" % (str(requirement), version))
raise IncompatibleConstraintError(msg)
specifier = specifiers.SpecifierSet(constraint)
else:
specifier = requirement.specifier & constraint
constrained = copy.deepcopy(requirement)
constrained.specifier = specifier
return pkg_resources.Requirement.parse(str(constrained))
|
MissingDistribution
|
python
|
MongoEngine__mongoengine
|
tests/queryset/test_pickable.py
|
{
"start": 185,
"end": 1315
}
|
class ____(MongoDBTestCase):
"""
Test for adding pickling support for QuerySet instances
See issue https://github.com/MongoEngine/mongoengine/issues/442
"""
def setUp(self):
super().setUp()
self.john = Person.objects.create(name="John", age=21)
def test_picke_simple_qs(self):
qs = Person.objects.all()
pickle.dumps(qs)
def _get_loaded(self, qs):
s = pickle.dumps(qs)
return pickle.loads(s)
def test_unpickle(self):
qs = Person.objects.all()
loadedQs = self._get_loaded(qs)
assert qs.count() == loadedQs.count()
# can update loadedQs
loadedQs.update(age=23)
# check
assert Person.objects.first().age == 23
def test_pickle_support_filtration(self):
Person.objects.create(name="Alice", age=22)
Person.objects.create(name="Bob", age=23)
qs = Person.objects.filter(age__gte=22)
assert qs.count() == 2
loaded = self._get_loaded(qs)
assert loaded.count() == 2
assert loaded.filter(name="Bob").first().age == 23
|
TestQuerysetPickable
|
python
|
google__jax
|
tests/cache_key_test.py
|
{
"start": 1266,
"end": 15285
}
|
class ____(jtu.JaxTestCase):
def test_serialized_compile_options(self):
compile_options = compiler.get_compile_options(
num_replicas=1, num_partitions=1
)
hash1 = self.get_hashed_value(
cache_key._hash_serialized_compile_options, compile_options
)
debug_options = compile_options.executable_build_options.debug_options
debug_options.xla_force_host_platform_device_count = 2
debug_options.xla_dump_to = "foo"
debug_options.xla_dump_hlo_module_re = "bar"
debug_options.xla_dump_hlo_pass_re = "baz"
debug_options.xla_dump_hlo_as_text = True
debug_options.xla_dump_hlo_as_proto = True
debug_options.xla_dump_hlo_as_dot = True
debug_options.xla_dump_hlo_as_url = True
debug_options.xla_dump_hlo_as_html = True
debug_options.xla_dump_fusion_visualization = True
debug_options.xla_dump_hlo_snapshots = True
debug_options.xla_dump_max_hlo_modules = True
debug_options.xla_dump_module_metadata = True
debug_options.xla_dump_compress_protos = True
debug_options.xla_dump_hlo_as_long_text = True
debug_options.xla_dump_disable_metadata = True
debug_options.xla_dump_hlo_pipeline_re = "xyzzy"
debug_options.xla_gpu_experimental_autotune_cache_mode = 2
hash2 = self.get_hashed_value(
cache_key._hash_serialized_compile_options, compile_options
)
self.assertEqual(hash1, hash2)
@jtu.skip_on_devices("cpu")
def test_hash_accelerator_devices(self):
devices = np.array([[jax.local_devices()[0]]])
dev_hash1 = self.get_hashed_value(cache_key._hash_devices, devices)
dev_hash2 = self.get_hashed_value(cache_key._hash_devices, devices)
self.assertEqual(dev_hash1, dev_hash2)
acc_hash1 = self.get_hashed_value(
cache_key._hash_accelerator_config, devices)
acc_hash2 = self.get_hashed_value(
cache_key._hash_accelerator_config, devices)
self.assertEqual(acc_hash1, acc_hash2)
def test_hash_platform(self):
hash1 = self.get_hashed_value(
cache_key._hash_platform, xla_bridge.get_backend()
)
hash2 = self.get_hashed_value(
cache_key._hash_platform, xla_bridge.get_backend()
)
self.assertEqual(hash1, hash2)
if xla_bridge.get_backend().platform != "cpu":
cpu_backend = xla_bridge.get_backend("cpu")
hash3 = self.get_hashed_value(cache_key._hash_platform, cpu_backend)
self.assertNotEqual(hash1, hash3)
def test_hash_string(self):
hash1 = self.get_hashed_value(cache_key._hash_string, "foo")
hash2 = self.get_hashed_value(cache_key._hash_string, "bar")
hash3 = self.get_hashed_value(cache_key._hash_string, "bar")
self.assertEqual(hash2, hash3)
self.assertNotEqual(hash1, hash2)
def test_same_key(self):
computation = jax.jit(lambda x, y: x + y).lower(1, 1).compiler_ir()
devices = np.array([[jax.local_devices()[0]]])
compile_options = compiler.get_compile_options(
num_replicas=1, num_partitions=1
)
backend = xla_bridge.get_backend()
self.assertEqual(
cache_key.get(computation, devices, compile_options, backend),
cache_key.get(computation, devices, compile_options, backend),
)
def test_different_key(self):
computation = jax.jit(lambda x, y: x + y).lower(1, 1).compiler_ir()
devices = np.array([[jax.local_devices()[0]]])
compile_options_not_filled = compiler.get_compile_options(
num_replicas=1, num_partitions=1
)
compile_options_filled = self.filled_compile_options()
backend = xla_bridge.get_backend()
self.assertNotEqual(
cache_key.get(
computation, devices, compile_options_not_filled, backend
),
cache_key.get(computation, devices, compile_options_filled, backend),
)
def test_custom_hook(self):
computation = jax.jit(lambda x, y: x + y).lower(1, 1).compiler_ir()
devices = np.array([[jax.local_devices()[0]]])
compile_options = compiler.get_compile_options(
num_replicas=1, num_partitions=1
)
backend = xla_bridge.get_backend()
original_custom_hook = cache_key.custom_hook
cache_key.custom_hook = lambda: "hook1"
key1 = cache_key.get(computation, devices, compile_options, backend)
cache_key.custom_hook = lambda: "hook2"
key2 = cache_key.get(computation, devices, compile_options, backend)
cache_key.custom_hook = original_custom_hook
self.assertNotEqual(key1, key2)
def test_different_computations(self):
computation1 = jax.jit(lambda x, y: x + y).lower(1, 1).compiler_ir()
computation2 = jax.jit(lambda x, y: x * y).lower(2, 2).compiler_ir()
devices = np.array([[jax.local_devices()[0]]])
compile_options = compiler.get_compile_options(
num_replicas=1, num_partitions=1
)
backend = xla_bridge.get_backend()
self.assertNotEqual(
cache_key.get(computation1, devices, compile_options, backend),
cache_key.get(computation2, devices, compile_options, backend),
)
# TODO(phawkins): this test flakes if test concurrency is enabled.
@jtu.thread_unsafe_test()
def test_custom_partitioning_ptr_removal(self):
def _partition(mesh, arg_shapes, result_shape):
arg_shardings = jax.tree.map(lambda x: x.sharding, arg_shapes)
result_shardings = NamedSharding(mesh, arg_shapes[0].sharding.spec)
return mesh, jax.numpy.add, result_shardings, arg_shardings
def _infer_sharding_from_operands(mesh, arg_shapes, result_shape):
return NamedSharding(mesh, arg_shapes[0].sharding.spec)
@custom_partitioning
def _cp_add(x, y):
return jax.numpy.add(x, y)
_cp_add.def_partition(
infer_sharding_from_operands=_infer_sharding_from_operands,
partition=_partition,
sharding_rule='..., ... -> ...')
devices = np.asarray(jax.devices())
with Mesh(devices, ('x',)) as m:
computation = jax.jit(
_cp_add,
in_shardings=(NamedSharding(m, P('x')),
NamedSharding(m, P('x'))),
out_shardings=NamedSharding(m, P('x'))
).lower(
jax.ShapeDtypeStruct([1024], dtype=jax.numpy.float32),
jax.ShapeDtypeStruct([1024], dtype=jax.numpy.float32),
).compiler_ir()
pattern = (
r'stablehlo\.custom_call @CustomSPMDPartitioning\('
r'(.*?)\) \{'
r'(.*?backend_config\s*=\s*"([^"]*)".*?)'
r'\}'
)
with computation.context:
updated_module = cache_key._remove_callbacks(
type_cast(ir.Module, computation.operation.clone()),
ignore_callbacks=cache_key.IgnoreCallbacks.ALL,
)
bcs = [
match[2]
for match in re.findall(pattern, str(updated_module), re.DOTALL)
]
for bc in bcs:
self.assertEqual(bc, "REMOVED")
compile_options = compiler.get_compile_options(
num_replicas=1, num_partitions=1
)
backend = xla_bridge.get_backend()
hash_without_callback_ptrs = cache_key.get(
computation,
devices,
compile_options,
backend,
ignore_callbacks=cache_key.IgnoreCallbacks.CUSTOM_PARTITIONING,
)
expected_hash = cache_key.get(
updated_module, devices, compile_options, backend
)
self.assertEqual(expected_hash, hash_without_callback_ptrs)
@jtu.skip_on_devices("cpu")
def test_host_callbacks_ptrs_removed(self):
def _host_callback(x, y):
jax.debug.print("x={x[0]} y={y[0]}", x=x, y=y)
computation = (
jax.jit(_host_callback)
.lower(
jax.ShapeDtypeStruct([1024], dtype=jax.numpy.float32),
jax.ShapeDtypeStruct([1024], dtype=jax.numpy.float32),
)
.compiler_ir()
)
pattern = r'(.*?backend_config\s*=\s*"([^"]*)".*?)'
with computation.context:
updated_module = cache_key._remove_callbacks(
type_cast(ir.Module, computation.operation.clone()),
ignore_callbacks=cache_key.IgnoreCallbacks.ALL,
)
bcs = [
match[1]
for match in re.findall(pattern, str(updated_module), re.DOTALL)
]
for bc in bcs:
self.assertEqual(bc, "REMOVED")
def test_different_device_assignment(self):
computation = jax.jit(lambda x, y: x + y).lower(1, 1).compiler_ir()
devices = np.array([[jax.local_devices()[0]]])
compile_options_1 = compiler.get_compile_options(
num_replicas=1, num_partitions=1, device_assignment=np.array([[0]])
)
compile_options_2 = compiler.get_compile_options(
num_replicas=1, num_partitions=1, device_assignment=np.array([[1]])
)
backend = xla_bridge.get_backend()
hash_1 = cache_key.get(computation, devices, compile_options_1, backend)
hash_2 = cache_key.get(computation, devices, compile_options_2, backend)
if backend.platform == "gpu":
self.assertEqual(hash_1, hash_2)
else:
self.assertNotEqual(hash_1, hash_2)
@parameterized.parameters([False, True])
@jtu.thread_unsafe_test() # env vars are not thread-safe
def test_identical_computations_different_metadata(self, include_metadata):
f = lambda x, y: lax.mul(lax.add(x, y), 2)
g = lambda x, y: lax.mul(lax.add(x, y), 2)
assert id(f) != id(g)
computation1 = jax.jit(f).lower(1, 1).compiler_ir()
computation2 = jax.jit(g).lower(2, 3).compiler_ir()
devices = np.array([[jax.local_devices()[0]]])
compile_options = compiler.get_compile_options(
num_replicas=1, num_partitions=1
)
backend = xla_bridge.get_backend()
with config.compilation_cache_include_metadata_in_key(include_metadata):
key1 = cache_key.get(computation1, devices, compile_options, backend)
key2 = cache_key.get(computation2, devices, compile_options, backend)
self.assertEqual(include_metadata, key1 != key2)
@jtu.thread_unsafe_test() # env vars are not thread-safe
def test_xla_flags(self):
if jtu.is_device_tpu(version=4):
raise unittest.SkipTest("TODO(b/240151176)")
computation = jax.jit(lambda x, y: x + y).lower(1, 1).compiler_ir()
devices = np.array([[jax.local_devices()[0]]])
compile_options = compiler.get_compile_options(
num_replicas=1, num_partitions=1
)
backend = xla_bridge.get_backend()
orig_xla_flags = os.getenv("XLA_FLAGS")
orig_argv = sys.argv
try:
os.environ["XLA_FLAGS"] = "--xla_gpu_autotune_level=0"
key1 = cache_key.get(computation, devices, compile_options, backend)
os.environ["XLA_FLAGS"] = "--xla_gpu_autotune_level=1"
key2 = cache_key.get(computation, devices, compile_options, backend)
self.assertNotEqual(key1, key2)
os.environ["XLA_FLAGS"] = "--xla_gpu_autotune_level=0"
key3 = cache_key.get(computation, devices, compile_options, backend)
self.assertEqual(key1, key3)
# Test flag in _xla_flags_to_exclude_from_cache_key
os.environ["XLA_FLAGS"] = (
"--xla_gpu_autotune_level=0 --xla_force_host_platform_device_count=8"
)
key4 = cache_key.get(computation, devices, compile_options, backend)
self.assertEqual(key1, key4)
# Test flags given on command line
del os.environ["XLA_FLAGS"]
sys.argv.append("--xla_gpu_autotune_level=0")
key5 = cache_key.get(computation, devices, compile_options, backend)
self.assertEqual(key1, key5)
sys.argv.append("--xla_force_host_platform_device_count=8")
self.assertEqual(key1, key5)
finally:
if orig_xla_flags is not None:
os.environ["XLA_FLAGS"] = orig_xla_flags
elif os.getenv("XLA_FLAGS") is not None:
del os.environ["XLA_FLAGS"]
sys.argv = orig_argv
@jtu.thread_unsafe_test() # env vars are not thread-safe
def test_libtpu_init_args(self):
if jtu.is_device_tpu(version=4):
raise unittest.SkipTest("TODO(b/240151176)")
computation = jax.jit(lambda x, y: x + y).lower(1, 1).compiler_ir()
devices = np.array([[jax.local_devices()[0]]])
compile_options = compiler.get_compile_options(
num_replicas=1, num_partitions=1
)
backend = xla_bridge.get_backend()
orig_libtpu_init_args = os.getenv("LIBTPU_INIT_ARGS")
orig_argv = sys.argv
try:
os.environ["LIBTPU_INIT_ARGS"] = (
"--xla_spmd_threshold_for_windowed_einsum_mib=0"
)
key1 = cache_key.get(computation, devices, compile_options, backend)
os.environ["LIBTPU_INIT_ARGS"] = (
"--xla_spmd_threshold_for_windowed_einsum_mib=1"
)
key2 = cache_key.get(computation, devices, compile_options, backend)
self.assertNotEqual(key1, key2)
finally:
if orig_libtpu_init_args is not None:
os.environ["LIBTPU_INIT_ARGS"] = orig_libtpu_init_args
elif os.getenv("LIBTPU_INIT_ARGS") is not None:
del os.environ["LIBTPU_INIT_ARGS"]
sys.argv = orig_argv
def filled_compile_options(self):
compile_options = xla_client.CompileOptions()
compile_options.num_replicas = 1
compile_options.num_partitions = 1
shape = xla_client.Shape.array_shape(np.dtype(np.float32), [2])
shape_array = [shape, shape]
compile_options.argument_layouts = shape_array
compile_options.executable_build_options.result_layout = shape
device_assignment = xla_client.DeviceAssignment.create(
np.arange(4).reshape(2, 2)
)
compile_options.device_assignment = device_assignment
compile_options.executable_build_options.device_assignment = (
device_assignment
)
compile_options.executable_build_options.fdo_profile = b"test_profile"
return compile_options
def get_hashed_value(
self, hash_function, hash_function_input1, hash_function_input2=None):
hash_obj = hashlib.sha256()
if hash_function_input2 is not None:
hash_function(hash_obj, hash_function_input1, hash_function_input2)
else:
hash_function(hash_obj, hash_function_input1)
return hash_obj.digest().hex()
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
|
CacheKeyTest
|
python
|
realpython__materials
|
asterioids-pygame-project/source_code_step_3/space_rocks/game.py
|
{
"start": 46,
"end": 856
}
|
class ____:
def __init__(self):
self._init_pygame()
self.screen = pygame.display.set_mode((800, 600))
self.background = load_sprite("space", False)
def main_loop(self):
while True:
self._handle_input()
self._process_game_logic()
self._draw()
def _init_pygame(self):
pygame.init()
pygame.display.set_caption("Space Rocks")
def _handle_input(self):
for event in pygame.event.get():
if event.type == pygame.QUIT or (
event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE
):
quit()
def _process_game_logic(self):
pass
def _draw(self):
self.screen.blit(self.background, (0, 0))
pygame.display.flip()
|
SpaceRocks
|
python
|
huggingface__transformers
|
src/transformers/models/mllama/modeling_mllama.py
|
{
"start": 28733,
"end": 31146
}
|
class ____(GradientCheckpointingLayer):
"""Cross-attention transformer block with tanh-gated attention and feedforward."""
def __init__(self, config: MllamaTextConfig, layer_idx: int) -> None:
super().__init__()
self.layer_idx = layer_idx
self.cross_attn = MllamaTextCrossAttention(config, layer_idx=layer_idx)
self.input_layernorm = MllamaTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.cross_attn_attn_gate = torch.nn.Parameter(torch.zeros(1))
self.mlp = MllamaTextMLP(config)
self.post_attention_layernorm = MllamaTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.cross_attn_mlp_gate = torch.nn.Parameter(torch.zeros(1))
def forward(
self,
hidden_states: torch.Tensor,
cross_attention_states: torch.Tensor,
cross_attention_mask: torch.Tensor,
attention_mask: torch.Tensor,
full_text_row_masked_out_mask: tuple[torch.Tensor, torch.Tensor],
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[torch.Tensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor]:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
hidden_states, attn_weights = self.cross_attn(
hidden_states=hidden_states,
attention_mask=cross_attention_mask,
cross_attention_states=cross_attention_states,
past_key_values=past_key_values,
cache_position=cache_position,
**kwargs,
)
hidden_states = residual + self.cross_attn_attn_gate.tanh() * hidden_states
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
if full_text_row_masked_out_mask is not None:
hidden_states = full_text_row_masked_out_mask[:, 0] * hidden_states # type: ignore
hidden_states = residual + self.cross_attn_mlp_gate.tanh() * hidden_states
return hidden_states
# Copied from transformers.models.llama.modeling_llama.LlamaRotaryEmbedding with LlamaConfig->MllamaTextConfig,Llama->Mllama
|
MllamaCrossAttentionDecoderLayer
|
python
|
pyinstaller__pyinstaller
|
PyInstaller/utils/osx.py
|
{
"start": 12847,
"end": 12999
}
|
class ____(Exception):
"""
Exception raised by `get_binary_architectures` when it is passed an invalid binary.
"""
pass
|
InvalidBinaryError
|
python
|
encode__django-rest-framework
|
rest_framework/fields.py
|
{
"start": 46414,
"end": 48649
}
|
class ____(Field):
default_error_messages = {
'invalid': _('Time has wrong format. Use one of these formats instead: {format}.'),
}
datetime_parser = datetime.datetime.strptime
def __init__(self, format=empty, input_formats=None, **kwargs):
if format is not empty:
self.format = format
if input_formats is not None:
self.input_formats = input_formats
super().__init__(**kwargs)
def to_internal_value(self, value):
input_formats = getattr(self, 'input_formats', api_settings.TIME_INPUT_FORMATS)
if isinstance(value, datetime.time):
return value
for input_format in input_formats:
if input_format.lower() == ISO_8601:
try:
parsed = parse_time(value)
except (ValueError, TypeError):
pass
else:
if parsed is not None:
return parsed
else:
try:
parsed = self.datetime_parser(value, input_format)
except (ValueError, TypeError):
pass
else:
return parsed.time()
humanized_format = humanize_datetime.time_formats(input_formats)
self.fail('invalid', format=humanized_format)
def to_representation(self, value):
if value in (None, ''):
return None
output_format = getattr(self, 'format', api_settings.TIME_FORMAT)
if output_format is None or isinstance(value, str):
return value
# Applying a `TimeField` to a datetime value is almost always
# not a sensible thing to do, as it means naively dropping
# any explicit or implicit timezone info.
assert not isinstance(value, datetime.datetime), (
'Expected a `time`, but got a `datetime`. Refusing to coerce, '
'as this may mean losing timezone information. Use a custom '
'read-only field and deal with timezone issues explicitly.'
)
if output_format.lower() == ISO_8601:
return value.isoformat()
return value.strftime(output_format)
|
TimeField
|
python
|
facebook__pyre-check
|
scripts/callgraph_utilities.py
|
{
"start": 796,
"end": 2355
}
|
class ____(abc.ABC):
call_graph: Dict[str, Set[str]]
original_call_graph: Dict[str, JSON]
def __init__(self, call_graph: JSON) -> None:
self.original_call_graph = self.validate_top_level_dict(call_graph)
self.call_graph = self._to_call_graph()
@staticmethod
def validate_top_level_dict(call_graph: JSON) -> Dict[str, JSON]:
if not isinstance(call_graph, dict):
raise ValueError(
f"Call graph structure in call graph file is not a JSON dict: {type(call_graph)}"
)
return call_graph
@abc.abstractmethod
def extract_callee(self, callee: JSON) -> str: ...
def validate_callees(self, callees: List[JSON]) -> Set[str]:
return {self.extract_callee(callee) for callee in callees}
def _to_call_graph(self) -> Dict[str, Set[str]]:
result = {}
call_graph = self.original_call_graph
for caller, callees in call_graph.items():
if not isinstance(callees, list):
raise ValueError(
f"Expected value for caller {caller} to be list of callers with location, got {type(callees)}: {callees}"
)
formatted_qualifier = self.extract_caller(caller)
result[formatted_qualifier] = self.validate_callees(callees) - {
formatted_qualifier
}
return result
def get_keys(self) -> Set[str]:
return set(self.call_graph)
@abc.abstractmethod
def extract_caller(self, qualifier: str) -> str: ...
|
InputFormat
|
python
|
apache__airflow
|
airflow-ctl/src/airflowctl/api/datamodels/generated.py
|
{
"start": 70853,
"end": 71108
}
|
class ____(BaseModel):
"""
Schema for a collection of Human-in-the-loop details.
"""
hitl_details: Annotated[list[HITLDetail], Field(title="Hitl Details")]
total_entries: Annotated[int, Field(title="Total Entries")]
|
HITLDetailCollection
|
python
|
getsentry__sentry
|
src/sentry/web/forms/accounts.py
|
{
"start": 7552,
"end": 8676
}
|
class ____(PasswordlessRegistrationForm):
password = forms.CharField(
required=True, widget=forms.PasswordInput(attrs={"placeholder": "something super secret"})
)
def clean_password(self):
password = self.cleaned_data["password"]
user = (
User(username=self.cleaned_data["username"])
if "username" in self.cleaned_data
else None
)
password_validation.validate_password(password, user=user)
return password
def save(self, commit=True):
user = super().save(commit=False)
user.set_password(self.cleaned_data["password"])
if commit:
user.save()
if self.cleaned_data.get("subscribe"):
newsletter.backend.create_or_update_subscriptions(
user, list_ids=newsletter.backend.get_default_list_ids()
)
if self.cleaned_data.get("timezone"):
UserOption.objects.create(
user=user, key="timezone", value=self.cleaned_data.get("timezone")
)
return user
|
RegistrationForm
|
python
|
tensorflow__tensorflow
|
tensorflow/python/data/util/random_seed_test.py
|
{
"start": 3622,
"end": 4758
}
|
class ____(test_base.DatasetTestBase, parameterized.TestCase):
def _checkEqual(self, tinput, toutput):
random_seed.set_random_seed(tinput[0])
g_seed, op_seed = data_random_seed.get_seed(tinput[1])
g_seed = self.evaluate(g_seed)
op_seed = self.evaluate(op_seed)
msg = "test_case = {0}, got {1}, want {2}".format(tinput, (g_seed, op_seed),
toutput)
self.assertEqual((g_seed, op_seed), toutput, msg=msg)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
_test_random_seed_combinations()))
def testRandomSeed(self, input_fn, output_fn):
tinput, toutput = input_fn(), output_fn()
self._checkEqual(tinput=tinput, toutput=toutput)
random_seed.set_random_seed(None)
@combinations.generate(test_base.graph_only_combinations())
def testIncrementalRandomSeed(self):
random_seed.set_random_seed(1)
for i in range(10):
tinput = (1, None)
toutput = (1, i)
self._checkEqual(tinput=tinput, toutput=toutput)
if __name__ == '__main__':
test.main()
|
RandomSeedTest
|
python
|
huggingface__transformers
|
src/transformers/models/dia/processing_dia.py
|
{
"start": 1073,
"end": 1239
}
|
class ____(AudioKwargs, total=False):
bos_token_id: int
eos_token_id: int
pad_token_id: int
delay_pattern: list[int]
generation: bool
|
DiaAudioKwargs
|
python
|
great-expectations__great_expectations
|
contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_hashtag.py
|
{
"start": 1589,
"end": 3762
}
|
class ____(ColumnMapExpectation):
"""Expect column values to be valid #hashtags."""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"well_formed_hashtag": [
"#Hello",
"#Photography_Daily",
"#42",
"#Code4all",
],
"malformed_hashtag": [
"",
"Hello",
"##Hello",
"#Hello!",
],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "well_formed_hashtag"},
"out": {"success": True},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "malformed_hashtag"},
"out": {"success": False},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.valid_hashtag"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental",
"tags": ["experimental", "hackathon", "typed-entities"],
"contributors": [
"@voidforall",
],
}
if __name__ == "__main__":
ExpectColumnValuesToBeValidHashtag().print_diagnostic_checklist()
|
ExpectColumnValuesToBeValidHashtag
|
python
|
run-llama__llama_index
|
llama-index-integrations/embeddings/llama-index-embeddings-upstage/llama_index/embeddings/upstage/base.py
|
{
"start": 1148,
"end": 7455
}
|
class ____(OpenAIEmbedding):
"""
Class for Upstage embeddings.
"""
additional_kwargs: Dict[str, Any] = Field(
default_factory=dict, description="Additional kwargs for the Upstage API."
)
api_key: str = Field(description="The Upstage API key.")
api_base: Optional[str] = Field(
default=DEFAULT_UPSTAGE_API_BASE, description="The base URL for Upstage API."
)
dimensions: Optional[int] = Field(
None,
description="Not supported yet. The number of dimensions the resulting output embeddings should have.",
)
def __init__(
self,
model: str = "embedding",
embed_batch_size: int = 100,
dimensions: Optional[int] = None,
additional_kwargs: Dict[str, Any] = None,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
max_retries: int = 10,
timeout: float = 60.0,
reuse_client: bool = True,
callback_manager: Optional[CallbackManager] = None,
default_headers: Optional[Dict[str, str]] = None,
http_client: Optional[httpx.Client] = None,
**kwargs: Any,
) -> None:
additional_kwargs = additional_kwargs or {}
if dimensions is not None:
warnings.warn("Received dimensions argument. This is not supported yet.")
additional_kwargs["dimensions"] = dimensions
if embed_batch_size > MAX_EMBED_BATCH_SIZE:
raise ValueError(
f"embed_batch_size should be less than or equal to {MAX_EMBED_BATCH_SIZE}."
)
if "upstage_api_key" in kwargs:
api_key = kwargs.pop("upstage_api_key")
api_key, api_base = resolve_upstage_credentials(
api_key=api_key, api_base=api_base
)
if "model_name" in kwargs:
model = kwargs.pop("model_name")
# if model endswith with "-query" or "-passage", remove the suffix and print a warning
if model.endswith(("-query", "-passage")):
model = model.rsplit("-", 1)[0]
logger.warning(
f"Model name should not end with '-query' or '-passage'. The suffix has been removed. "
f"Model name: {model}"
)
default_headers = (default_headers or {}) | {"x-upstage-client": "llamaindex"}
super().__init__(
embed_batch_size=embed_batch_size,
dimensions=dimensions,
callback_manager=callback_manager,
model_name=model,
additional_kwargs=additional_kwargs,
api_key=api_key,
api_base=api_base,
max_retries=max_retries,
reuse_client=reuse_client,
timeout=timeout,
default_headers=default_headers,
**kwargs,
)
self._client = None
self._aclient = None
self._http_client = http_client
self._query_engine, self._text_engine = get_engine(model)
def class_name(cls) -> str:
return "UpstageEmbedding"
def _get_credential_kwargs(self, is_async: bool = False) -> Dict[str, Any]:
return {
"api_key": self.api_key,
"base_url": self.api_base,
"max_retries": self.max_retries,
"timeout": self.timeout,
"default_headers": self.default_headers,
"http_client": self._async_http_client if is_async else self._http_client,
}
def _get_query_embedding(self, query: str) -> List[float]:
"""Get query embedding."""
client = self._get_client()
text = query.replace("\n", " ")
return (
client.embeddings.create(
input=text, model=self._query_engine, **self.additional_kwargs
)
.data[0]
.embedding
)
async def _aget_query_embedding(self, query: str) -> List[float]:
"""The asynchronous version of _get_query_embedding."""
client = self._get_aclient()
text = query.replace("\n", " ")
return (
(
await client.embeddings.create(
input=text, model=self._query_engine, **self.additional_kwargs
)
)
.data[0]
.embedding
)
def _get_text_embedding(self, text: str) -> List[float]:
"""Get text embedding."""
client = self._get_client()
return (
client.embeddings.create(
input=text, model=self._text_engine, **self.additional_kwargs
)
.data[0]
.embedding
)
async def _aget_text_embedding(self, text: str) -> List[float]:
"""Asynchronously get text embedding."""
client = self._get_aclient()
return (
(
await client.embeddings.create(
input=text, model=self._text_engine, **self.additional_kwargs
)
)
.data[0]
.embedding
)
def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Get text embeddings."""
client = self._get_client()
batch_size = min(self.embed_batch_size, len(texts))
texts = [text.replace("\n", " ") for text in texts]
embeddings = []
for i in range(0, len(texts), batch_size):
batch = texts[i : i + batch_size]
response = client.embeddings.create(
input=batch, model=self._text_engine, **self.additional_kwargs
)
embeddings.extend([r.embedding for r in response.data])
return embeddings
async def _aget_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Asynchronously get text embeddings."""
client = self._get_aclient()
batch_size = min(self.embed_batch_size, len(texts))
texts = [text.replace("\n", " ") for text in texts]
embeddings = []
for i in range(0, len(texts), batch_size):
batch = texts[i : i + batch_size]
response = await client.embeddings.create(
input=batch, model=self._text_engine, **self.additional_kwargs
)
embeddings.extend([r.embedding for r in response.data])
return embeddings
|
UpstageEmbedding
|
python
|
tensorflow__tensorflow
|
tensorflow/python/feature_column/sequence_feature_column_test.py
|
{
"start": 8425,
"end": 9983
}
|
class ____(
test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
{'testcase_name': '2D',
'inputs_args': {
'indices': ((0, 0), (1, 0), (1, 1)),
'values': ('omar', 'stringer', 'marlo'),
'dense_shape': (2, 2)},
'expected_args': {
'indices': ((0, 0, 0), (1, 0, 0), (1, 1, 0)),
# Ignored to avoid hash dependence in test.
'values': np.array((0, 0, 0), dtype=np.int64),
'dense_shape': (2, 2, 1)}},
{'testcase_name': '3D',
'inputs_args': {
'indices': ((0, 0, 2), (1, 0, 0), (1, 2, 0)),
'values': ('omar', 'stringer', 'marlo'),
'dense_shape': (2, 2, 2)},
'expected_args': {
'indices': ((0, 0, 2), (1, 0, 0), (1, 2, 0)),
# Ignored to avoid hash dependence in test.
'values': np.array((0, 0, 0), dtype=np.int64),
'dense_shape': (2, 2, 2)}}
)
def test_get_sparse_tensors(self, inputs_args, expected_args):
inputs = sparse_tensor.SparseTensorValue(**inputs_args)
expected = sparse_tensor.SparseTensorValue(**expected_args)
column = sfc.sequence_categorical_column_with_hash_bucket(
'aaa', hash_bucket_size=10)
id_weight_pair = _get_sparse_tensors(column, {'aaa': inputs})
self.assertIsNone(id_weight_pair.weight_tensor)
_assert_sparse_tensor_indices_shape(
self, expected, self.evaluate(id_weight_pair.id_tensor))
@test_util.run_all_in_graph_and_eager_modes
|
SequenceCategoricalColumnWithHashBucketTest
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.