language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pytoolz__toolz | tlz/_build_tlz.py | {
"start": 118,
"end": 3144
} | class ____:
""" Finds and loads ``tlz`` modules when added to sys.meta_path"""
def __init__(self):
self.always_from_toolz = {
toolz.pipe,
}
def _load_toolz(self, fullname):
rv = {}
package, dot, submodules = fullname.partition('.')
try:
module_name = ''.join(['cytoolz', dot, submodules])
rv['cytoolz'] = import_module(module_name)
except ImportError:
pass
try:
module_name = ''.join(['toolz', dot, submodules])
rv['toolz'] = import_module(module_name)
except ImportError:
pass
if not rv:
raise ImportError(fullname)
return rv
def find_module(self, fullname, path=None): # pragma: py3 no cover
package, dot, submodules = fullname.partition('.')
if package == 'tlz':
return self
def load_module(self, fullname): # pragma: py3 no cover
if fullname in sys.modules: # pragma: no cover
return sys.modules[fullname]
spec = ModuleSpec(fullname, self)
module = self.create_module(spec)
sys.modules[fullname] = module
self.exec_module(module)
return module
def find_spec(self, fullname, path, target=None): # pragma: no cover
package, dot, submodules = fullname.partition('.')
if package == 'tlz':
return ModuleSpec(fullname, self)
def create_module(self, spec):
return types.ModuleType(spec.name)
def exec_module(self, module):
toolz_mods = self._load_toolz(module.__name__)
fast_mod = toolz_mods.get('cytoolz') or toolz_mods['toolz']
slow_mod = toolz_mods.get('toolz') or toolz_mods['cytoolz']
module.__dict__.update(toolz.merge(fast_mod.__dict__, module.__dict__))
package = fast_mod.__package__
if package is not None:
package, dot, submodules = package.partition('.')
module.__package__ = ''.join(['tlz', dot, submodules])
if not module.__doc__:
module.__doc__ = fast_mod.__doc__
# show file from toolz during introspection
try:
module.__file__ = slow_mod.__file__
except AttributeError:
pass
for k, v in fast_mod.__dict__.items():
tv = slow_mod.__dict__.get(k)
try:
hash(tv)
except TypeError:
tv = None
if tv in self.always_from_toolz:
module.__dict__[k] = tv
elif (
isinstance(v, types.ModuleType)
and v.__package__ == fast_mod.__name__
):
package, dot, submodules = v.__name__.partition('.')
module_name = ''.join(['tlz', dot, submodules])
submodule = import_module(module_name)
module.__dict__[k] = submodule
tlz_loader = TlzLoader()
sys.meta_path.append(tlz_loader)
tlz_loader.exec_module(sys.modules['tlz'])
| TlzLoader |
python | ray-project__ray | python/ray/data/_internal/execution/operators/map_operator.py | {
"start": 1635,
"end": 2672
} | class ____(ABC):
"""Interface for the rebundling behavior of the MapOperator."""
@abstractmethod
def num_blocks(self) -> int:
"""Return the total number of blocks buffered inside the bundler."""
pass
@abstractmethod
def add_bundle(self, bundle: RefBundle):
"""Add a new input bundle to the bundler."""
pass
@abstractmethod
def has_bundle(self) -> bool:
"""Return whether the bundler currently holds a full bundle ready to emit."""
pass
@abstractmethod
def size_bytes(self) -> int:
"""Estimate the total size in bytes of buffered bundles."""
pass
@abstractmethod
def get_next_bundle(
self,
) -> Tuple[List[RefBundle], RefBundle]:
"""Pop and return the next bundled input ready for task submission."""
pass
@abstractmethod
def done_adding_bundles(self):
"""Signal that no additional bundles will be added to the bundler so the bundler can be finalized."""
pass
| BaseRefBundler |
python | pyqtgraph__pyqtgraph | pyqtgraph/opengl/GLViewWidget.py | {
"start": 21555,
"end": 22516
} | class ____(GLViewMixin, QtWidgets.QOpenGLWidget):
def __init__(self, *args, devicePixelRatio=None, **kwargs):
"""
Basic widget for displaying 3D data
- Rotation/scale controls
- Axis/grid display
- Export options
================ ==============================================================
**Arguments:**
parent (QObject, optional): Parent QObject. Defaults to None.
devicePixelRatio No longer in use. High-DPI displays should automatically
detect the correct resolution.
rotationMethod (str): Mechanism to drive the rotation method, options are
'euler' and 'quaternion'. Defaults to 'euler'.
================ ==============================================================
"""
super().__init__(*args, **kwargs)
self.setFocusPolicy(QtCore.Qt.FocusPolicy.ClickFocus)
| GLViewWidget |
python | giampaolo__psutil | tests/test_linux.py | {
"start": 85182,
"end": 88125
} | class ____(PsutilTestCase):
"""/proc/pid/stat and /proc/pid/status have many values in common.
Whenever possible, psutil uses /proc/pid/stat (it's faster).
For all those cases we check that the value found in
/proc/pid/stat (by psutil) matches the one found in
/proc/pid/status.
"""
@classmethod
def setUpClass(cls):
cls.proc = psutil.Process()
def read_status_file(self, linestart):
with psutil._psplatform.open_text(
f"/proc/{self.proc.pid}/status"
) as f:
for line in f:
line = line.strip()
if line.startswith(linestart):
value = line.partition('\t')[2]
try:
return int(value)
except ValueError:
return value
raise ValueError(f"can't find {linestart!r}")
def test_name(self):
value = self.read_status_file("Name:")
assert self.proc.name() == value
def test_status(self):
value = self.read_status_file("State:")
value = value[value.find('(') + 1 : value.rfind(')')]
value = value.replace(' ', '-')
assert self.proc.status() == value
def test_ppid(self):
value = self.read_status_file("PPid:")
assert self.proc.ppid() == value
def test_num_threads(self):
value = self.read_status_file("Threads:")
assert self.proc.num_threads() == value
def test_uids(self):
value = self.read_status_file("Uid:")
value = tuple(map(int, value.split()[1:4]))
assert self.proc.uids() == value
def test_gids(self):
value = self.read_status_file("Gid:")
value = tuple(map(int, value.split()[1:4]))
assert self.proc.gids() == value
@retry_on_failure()
def test_num_ctx_switches(self):
value = self.read_status_file("voluntary_ctxt_switches:")
assert self.proc.num_ctx_switches().voluntary == value
value = self.read_status_file("nonvoluntary_ctxt_switches:")
assert self.proc.num_ctx_switches().involuntary == value
def test_cpu_affinity(self):
value = self.read_status_file("Cpus_allowed_list:")
if '-' in str(value):
min_, max_ = map(int, value.split('-'))
assert self.proc.cpu_affinity() == list(range(min_, max_ + 1))
def test_cpu_affinity_eligible_cpus(self):
value = self.read_status_file("Cpus_allowed_list:")
with mock.patch("psutil._pslinux.per_cpu_times") as m:
self.proc._proc._get_eligible_cpus()
if '-' in str(value):
assert not m.called
else:
assert m.called
# =====================================================================
# --- test utils
# =====================================================================
@pytest.mark.skipif(not LINUX, reason="LINUX only")
| TestProcessAgainstStatus |
python | google__jax | tests/mosaic/gpu_test.py | {
"start": 42418,
"end": 95266
} | class ____(TestCase):
def setUp(self):
super().setUp()
capabilities = ("10.0", "10.1")
if not any(jtu.is_cuda_compute_capability_equal(sm) for sm in capabilities):
self.skipTest("Only works on GPU with capability sm_100a or sm_101a")
@parameterized.product(
jax_dtype_packing=[(jnp.float32, 1), (jnp.float16, 1), (jnp.float16, 2), (jnp.float8_e5m2, 4)],
reg_tmem_layout_m=[
(lambda _c, _p: tcgen05.LAYOUT, lambda _, p: tcgen05.tmem_default_layout(p), 128),
(lambda _c, _p: fa.WGMMA_LAYOUT, tcgen05.tmem_half_lane_layout, 64),
(
lambda c, _p: tcgen05.fa_m64_collective_layout(c),
tcgen05.tmem_m64_collective_layout,
64,
),
(
lambda c, p: tcgen05.tmem_m64_collective_layout(c, p).as_tiled_layout(),
tcgen05.tmem_m64_collective_layout,
64,
),
],
)
def test_load_store_tmem(self, jax_dtype_packing, reg_tmem_layout_m):
jax_dtype, packing = jax_dtype_packing
reg_layout_f, tmem_layout_f, m = reg_tmem_layout_m
n = 160
reg_layout = reg_layout_f(n, packing)
if tmem_layout_f is tcgen05.tmem_m64_collective_layout:
if jax_dtype == jnp.float16 and packing == 1:
self.skipTest("Not implemented yet")
is_native_transfer = tmem_layout_f(n, packing).as_tiled_layout() == reg_layout
if not is_native_transfer and jax_dtype == jnp.float8_e5m2:
self.skipTest("Not implemented yet")
def kernel(ctx, input, output, tmem):
del ctx
tmem.store(fa.FragmentedArray.load_untiled(input, layout=reg_layout, optimized=False))
tcgen05.commit_tmem()
tmem.load(reg_layout).store_untiled(output, optimized=False)
x = self.prng.uniform(-1, 1, (m, n)).astype(jax_dtype)
y = mgpu.as_gpu_kernel(
kernel, (1, 1, 1), (128, 1, 1), x, x,
mgpu.TMEM(x.shape, jax_dtype, layout=tmem_layout_f(n, packing)),
)(x)
np.testing.assert_array_equal(x, y)
@parameterized.parameters([
(jnp.float32, 1),
(jnp.float16, 1),
(jnp.float16, 2),
(jnp.float8_e5m2, 4),
(jnp.float4_e2m1fn, 8),
])
def test_load_store_tmem_native(self, jax_dtype, packing):
# TODO(bchetioui): add a test for int8 with a native layout with vector
# length equal to 4 once TMEM load is implemented for it.
def kernel(ctx, input, output, tmem):
del ctx
reg_layout = tcgen05.tmem_default_layout(max(packing, 2)).as_tiled_layout()
tmem.store(fa.FragmentedArray.load_untiled(input, layout=reg_layout, optimized=False))
tcgen05.commit_tmem()
tmem.load(reg_layout).store_untiled(output, optimized=False)
x = self.prng.uniform(-1, 1, (128, 128)).astype(jax_dtype)
y = mgpu.as_gpu_kernel(
kernel, (1, 1, 1), (128, 1, 1), x, x, mgpu.TMEM(x.shape, jax_dtype, packing=packing)
)(x)
np.testing.assert_array_equal(x, y)
def test_mixed_tmem_allocations_raise(self):
def body(ctx, out, scratch):
del ctx, out, scratch
with self.assertRaisesRegex(
ValueError,
"Can't mix collective and non-collective TMEM allocations within the"
" same kernel.",
):
mgpu.as_gpu_kernel(
body,
grid=(1, 1, 1),
block=(128, 1, 1),
in_shape=(),
out_shape=(jax.ShapeDtypeStruct((), jnp.int32),),
smem_scratch_shape=[
mgpu.TMEM((128, 128), jnp.float16, collective=True),
mgpu.TMEM((128, 128), jnp.float16, collective=False),
],
)
@parameterized.parameters([
(jnp.float32, 1, "130.0000"),
(jnp.float16, 1, "130.0000"),
(jnp.float16, 2, "[132.000000,133.000000]"),
])
@jtu.thread_unsafe_test()
def test_tmem_debug_print(self, jax_dtype, packing, expected):
def kernel(ctx, input, output, tmem):
del ctx, output
tmem.store(fa.FragmentedArray.load_untiled(input, layout=tcgen05.LAYOUT, optimized=False))
tcgen05.commit_tmem()
tmem.slice(slice(None), slice(0, 8))._debug_print()
x = jnp.arange(128 * 128, dtype=jax_dtype).reshape(128, 128)
with self.capture_stdout() as stdout:
mgpu.as_gpu_kernel(
kernel, (1, 1, 1), (128, 1, 1), x, x, mgpu.TMEM(x.shape, jax_dtype, packing=packing),
)(x).block_until_ready()
self.assertIn("[1, 2]: " + expected, stdout())
@parameterized.product(
lhs_transpose=(False, True),
rhs_transpose=(False, True),
in_jax_dtype=(jnp.float16, jnp.bfloat16, jnp.float8_e5m2, jnp.float8_e4m3fn), # TODO(apaszke): f32
out_jax_dtype=(jnp.float16, jnp.float32,),
m=(64, 128,), # TODO(apaszke): 64, 192, 256
n=(64, 128, 192, 224, 256, 512),
swizzle=(32, 64, 128,),
)
def test_mma_basic_float(self, **kwargs):
in_bytewidth = jnp.dtype(kwargs["in_jax_dtype"]).itemsize
lhs_transpose = kwargs["lhs_transpose"]
swizzle = kwargs["swizzle"]
if lhs_transpose and kwargs["m"] * in_bytewidth < swizzle:
self.skipTest("swizzle too large for input (lhs)")
n_steps = 2 if kwargs["m"] == 64 else 1
n_instr_size = kwargs["n"] * in_bytewidth // n_steps
if n_instr_size < swizzle or n_instr_size % swizzle != 0:
self.skipTest("swizzle doesn't work with this instruction size")
if dtypes.itemsize_bits(kwargs["in_jax_dtype"]) <= 8 and kwargs["n"] == swizzle:
self.skipTest("Only 8-bit and larger inputs are supported for MMA")
self._basic_mma_test(
**kwargs,
k_steps=2, # Reducing to 1 can be helpful while debugging.
lhs_transpose_tiles=False,
rhs_transpose_tiles=False,
)
@parameterized.product(
lhs_transpose=(False, True),
rhs_transpose=(False, True),
in_jax_dtype=(jnp.int8,),
out_jax_dtype=(jnp.int32,),
m=(64, 128,), # TODO(apaszke): 192, 256
n=(64, 128, 160, 192, 256, 512),
swizzle=(32, 64, 128,),
)
def test_mma_basic_int(self, **kwargs):
in_bytewidth = jnp.dtype(kwargs["in_jax_dtype"]).itemsize
lhs_transpose = kwargs["lhs_transpose"]
swizzle = kwargs["swizzle"]
if lhs_transpose and kwargs["m"] * in_bytewidth < swizzle:
self.skipTest("swizzle too large for input (lhs)")
n_steps = 2 if kwargs["m"] == 64 else 1
n_instr_size = kwargs["n"] * in_bytewidth // n_steps
if n_instr_size < swizzle or n_instr_size % swizzle != 0:
self.skipTest("swizzle doesn't work with this instruction size")
if dtypes.itemsize_bits(kwargs["in_jax_dtype"]) <= 8 and kwargs["n"] == swizzle:
self.skipTest("Only 8-bit and larger inputs are supported for MMA")
self._basic_mma_test(
**kwargs,
k_steps=2, # Reducing to 1 can be helpful while debugging.
lhs_transpose_tiles=False,
rhs_transpose_tiles=False,
)
@parameterized.product(
lhs_transpose=(False, True),
rhs_transpose=(False, True),
in_jax_dtype=(jnp.float16,),
out_jax_dtype=(jnp.float32,),
m=(128,),
n=(128, 512),
swizzle=(32, 64, 128,),
lhs_transpose_tiles=(False, True),
rhs_transpose_tiles=(False, True),
)
def test_mma_transposed_tiles(self, **kwargs):
if not kwargs["lhs_transpose_tiles"] and not kwargs["rhs_transpose_tiles"]:
self.skipTest("This is already tested in test_mma_basic")
self._basic_mma_test(
**kwargs,
k_steps=2, # Reducing to 1 can be helpful while debugging.
)
@parameterized.product(
lhs_transpose=(False, True),
rhs_transpose=(False, True),
m=(64, 128,),
n=(128, 256, 512),
lhs_swizzle=(32, 64, 128,),
rhs_swizzle=(32, 64, 128,),
)
def test_mma_different_swizzle(self, **kwargs):
if kwargs["lhs_swizzle"] == kwargs["rhs_swizzle"]:
self.skipTest("Swizzle is equal")
self._basic_mma_test(
in_jax_dtype=jnp.float16,
out_jax_dtype=jnp.float32,
swizzle=None,
k_steps=2, # Reducing to 1 can be helpful while debugging.
**kwargs,
)
def _basic_mma_test(
self,
m,
n,
k_steps,
swizzle,
lhs_transpose,
rhs_transpose,
in_jax_dtype,
out_jax_dtype,
rhs_transpose_tiles=False,
lhs_transpose_tiles=False,
lhs_swizzle=None,
rhs_swizzle=None,
):
if lhs_swizzle is None:
lhs_swizzle = swizzle
if rhs_swizzle is None:
rhs_swizzle = swizzle
swizzle = max(lhs_swizzle, rhs_swizzle)
if out_jax_dtype != jnp.float32 and (
in_jax_dtype == jnp.float32 or in_jax_dtype == jnp.bfloat16
):
self.skipTest("Only f32 output is supported for f32 and bf16 input.")
in_mlir_dtype = utils.dtype_to_ir_type(in_jax_dtype)
swizzle_elems = swizzle // bytewidth(in_mlir_dtype)
k = swizzle_elems * k_steps
lhs_tiling = (8, lhs_swizzle // bytewidth(in_mlir_dtype))
rhs_tiling = (8, rhs_swizzle // bytewidth(in_mlir_dtype))
def kernel(ctx, lhs, rhs, out, scratch):
lhs_smem, rhs_smem, barriers, mma_barrier, acc = scratch
lhs_transform = (mgpu.TileTransform(lhs_tiling),)
if lhs_transpose_tiles:
lhs_transform += (mgpu.TransposeTransform((1, 0, 2, 3)),)
rhs_transform = (mgpu.TileTransform(rhs_tiling),)
if rhs_transpose_tiles:
rhs_transform += (mgpu.TransposeTransform((1, 0, 2, 3)),)
ctx.async_copy(
src_ref=lhs,
dst_ref=lhs_smem,
swizzle=lhs_swizzle,
gmem_transform=lhs_transform,
barrier=barriers[0],
)
ctx.async_copy(
src_ref=rhs,
dst_ref=rhs_smem,
swizzle=rhs_swizzle,
gmem_transform=rhs_transform,
barrier=barriers[1],
)
barriers[0].wait()
barriers[1].wait()
with mgpu.single_thread():
if lhs_transpose_tiles:
lhs_smem = memref_transpose(lhs_smem, (1, 0, 2, 3))
if lhs_transpose:
lhs_smem = memref_transpose(lhs_smem, (1, 0, 3, 2))
if rhs_transpose_tiles:
rhs_smem = memref_transpose(rhs_smem, (1, 0, 2, 3))
if rhs_transpose:
rhs_smem = memref_transpose(rhs_smem, (1, 0, 3, 2))
tcgen05.mma(
acc, lhs_smem, rhs_smem, a_swizzle=lhs_swizzle, b_swizzle=rhs_swizzle, accumulate=False,
)
tcgen05.commit_arrive(mma_barrier)
mma_barrier.wait(orders_tensor_core=True)
is_signed = True if jnp.issubdtype(in_jax_dtype, jnp.integer) else None
acc.load(is_signed=is_signed).store_untiled(out, optimized=False)
x_shape = (k, m) if lhs_transpose else (m, k)
x = self.prng.uniform(-1, 1, x_shape).astype(in_jax_dtype)
y_shape = (n, k) if rhs_transpose else (k, n)
y = self.prng.uniform(-1, 1, y_shape).astype(in_jax_dtype)
out_shape = jax.ShapeDtypeStruct((m, n), out_jax_dtype)
if y_shape[0] % rhs_tiling[0] != 0 or y_shape[1] % rhs_tiling[1] != 0:
self.skipTest("rhs tiling must divide y_shape")
rhs_smem_shape = tile_shape(y_shape, rhs_tiling)
if rhs_transpose_tiles:
rhs_smem_shape = (
rhs_smem_shape[1], rhs_smem_shape[0], *rhs_smem_shape[2:]
)
if x_shape[0] % lhs_tiling[0] != 0 or x_shape[1] % lhs_tiling[1] != 0:
self.skipTest("lhs tiling must divide x_shape")
lhs_smem_shape = tile_shape(x_shape, lhs_tiling)
if lhs_transpose_tiles:
lhs_smem_shape = (
lhs_smem_shape[1], lhs_smem_shape[0], *lhs_smem_shape[2:]
)
scratch_shape = [
jax.ShapeDtypeStruct(lhs_smem_shape, in_jax_dtype),
jax.ShapeDtypeStruct(rhs_smem_shape, in_jax_dtype),
mgpu.TMABarrier(2),
mgpu.Barrier(1),
mgpu.TMEM((m, n), out_jax_dtype),
]
z = mgpu.as_gpu_kernel(
kernel, (1, 1, 1), (128, 1, 1), (x, y), out_shape, scratch_shape
)(x, y)
x32, y32 = x.astype(np.float32), y.astype(np.float32)
ref = (x32.T if lhs_transpose else x32) @ (y32.T if rhs_transpose else y32)
atol = 2e-2 if out_jax_dtype == jnp.float16 else 2e-5
rtol = 8e-4 if out_jax_dtype == jnp.float16 else 1e-7
np.testing.assert_allclose(z, ref, atol=atol, rtol=rtol)
@parameterized.product(
in_jax_dtype=(jnp.float16, jnp.bfloat16), # TODO(apaszke): f32
out_jax_dtype=(jnp.float16, jnp.float32,),
m=(128,), # TODO(apaszke): 64, 192, 256
n=(64, 160, 128, 256),
)
def test_mma_lhs_tmem_float(self, m, n, in_jax_dtype, out_jax_dtype):
self._basic_mma_lhs_tmem_test(
m, n, in_jax_dtype, out_jax_dtype, tcgen05.LAYOUT, swizzle=128
)
@parameterized.product(
in_jax_dtype=(jnp.int8, jnp.uint8),
out_jax_dtype=(jnp.int32,),
m=(128,),
n=(64, 128, 256),
)
def test_mma_lhs_tmem_integer(self, m, n, in_jax_dtype, out_jax_dtype):
self._basic_mma_lhs_tmem_test(
m, n, in_jax_dtype, out_jax_dtype, fa.tmem_native_layout(vector_length=4),
swizzle=math.gcd(n, 128)
)
def _basic_mma_lhs_tmem_test(
self, m, n, in_jax_dtype, out_jax_dtype, lhs_layout, swizzle
):
k_steps = 2 # Reducing to 1 can be helpful while debugging.
if out_jax_dtype == jnp.float16 and in_jax_dtype != jnp.float16:
self.skipTest("Only f16 input is supported for f16 output.")
in_mlir_dtype = utils.dtype_to_ir_type(in_jax_dtype)
swizzle_elems = swizzle // bytewidth(in_mlir_dtype)
k = swizzle_elems * k_steps
rhs_tiling = (8, swizzle_elems)
def kernel(ctx, lhs, rhs, out, scratch):
rhs_smem, barrier, mma_barrier, acc, lhs_tmem = scratch
ctx.async_copy(
src_ref=rhs,
dst_ref=rhs_smem,
swizzle=swizzle,
gmem_transform=mgpu.TileTransform(rhs_tiling),
barrier=barrier,
)
barrier.wait()
if jnp.issubdtype(in_jax_dtype, jnp.integer):
is_signed = jnp.issubdtype(in_jax_dtype, jnp.signedinteger)
else:
is_signed = None
lhs_tmem.store(
fa.FragmentedArray.load_untiled(
lhs, layout=lhs_layout, is_signed=is_signed, optimized=False
)
)
tcgen05.commit_tmem()
with mgpu.single_thread():
tcgen05.mma(
acc, lhs_tmem, rhs_smem, a_swizzle=swizzle, b_swizzle=swizzle, accumulate=False,
)
tcgen05.commit_arrive(mma_barrier)
mma_barrier.wait(orders_tensor_core=True)
acc.load(is_signed=is_signed).store_untiled(out, optimized=False)
x_shape = (m, k)
x = self.prng.uniform(-1, 1, x_shape).astype(in_jax_dtype)
y_shape = (k, n)
y = self.prng.uniform(-1, 1, y_shape).astype(in_jax_dtype)
out_shape = jax.ShapeDtypeStruct((m, n), out_jax_dtype)
if y_shape[0] % rhs_tiling[0] != 0 or y_shape[1] % rhs_tiling[1] != 0:
self.skipTest("rhs tiling must divide y_shape")
scratch_shape = [
jax.ShapeDtypeStruct(tile_shape(y_shape, rhs_tiling), in_jax_dtype),
mgpu.TMABarrier(),
mgpu.Barrier(1),
mgpu.TMEM((128, n), out_jax_dtype),
mgpu.TMEM((128, k), in_jax_dtype, packing=4 // bytewidth(in_mlir_dtype)),
]
z = mgpu.as_gpu_kernel(
kernel, (1, 1, 1), (128, 1, 1), (x, y), out_shape, scratch_shape
)(x, y)
x32, y32 = x.astype(np.float32), y.astype(np.float32)
ref = x32 @ y32
atol = 2e-2 if out_jax_dtype == jnp.float16 else 2e-5
rtol = 8e-4 if out_jax_dtype == jnp.float16 else 1e-7
np.testing.assert_allclose(z, ref, atol=atol, rtol=rtol)
def test_tmem_copy_scales(self):
dtype = jnp.float8_e8m0fnu
def kernel(ctx, src, out, scratch):
smem, barrier, tmem = scratch
ctx.async_copy(src_ref=src, dst_ref=smem, barrier=barrier)
barrier.wait()
with mgpu.single_thread():
tcgen05.async_copy_scales_smem_to_tmem(smem, tmem)
tcgen05.commit_arrive(barrier)
barrier.wait(orders_tensor_core=True)
# We print as i32, because i8 seems to overflow the CUDA printf buffer and
# produce a truncated output.
tcgen05.TMEMRef(
tmem.address,
(128, 4),
ir.IntegerType.get_signless(32),
tcgen05.tmem_default_layout(),
)._debug_print()
copy(src, out)
shape = (1, 1, 32, 16)
x = jax.lax.bitcast_convert_type(
np.arange(math.prod(shape), dtype=np.uint8).reshape(shape), dtype
)
scratch_shape = [
x,
mgpu.TMABarrier(1),
mgpu.TMEM((128, 4), dtype, layout=tcgen05.scales_layout()),
]
with self.capture_stdout() as stdout:
mgpu.as_gpu_kernel(
kernel, (1, 1, 1), (128, 1, 1), x, x, scratch_shape
)(x)
matches = 0
for l in stdout().splitlines():
if ":" not in l:
continue
idxs, value = l.split(":")
row, col = map(int, idxs[1:-1].split(","))
base = (row % 32) * 16 + col * 4
base %= 256 # int8 has very limited range
expected = base | (base + 1) << 8 | (base + 2) << 16 | (base + 3) << 24
self.assertEqual(int(value), expected)
matches += 1
self.assertEqual(matches, 128 * 4)
def _sample_scales(self, m, k, n, block_size, scale_jax_dtype):
ka, kb = jax.random.split(jax.random.key(1234), 2)
if scale_jax_dtype == jnp.float8_e8m0fnu:
a_scales = jax.lax.bitcast_convert_type(
jax.random.randint(ka, (m, k // block_size), 122, 132, dtype=jnp.uint8),
scale_jax_dtype
)
b_scales = jax.lax.bitcast_convert_type(
jax.random.randint(kb, (n, k // block_size), 122, 132, dtype=jnp.uint8),
scale_jax_dtype
)
elif scale_jax_dtype == jnp.float8_e4m3fn:
a_scales = jnp.abs(
jax.random.normal(ka, (m, k // block_size), dtype=jnp.float32).astype(
scale_jax_dtype
)
)
b_scales = jnp.abs(
jax.random.normal(kb, (n, k // block_size), dtype=jnp.float32).astype(
scale_jax_dtype
)
)
else:
raise ValueError(f"Unsupported scale dtype: {scale_jax_dtype}")
return a_scales, b_scales
@parameterized.product(
in_jax_dtype=(jnp.float8_e5m2, jnp.float8_e4m3fn, jnp.float4_e2m1fn),
scale_jax_dtype=(jnp.float8_e8m0fnu, jnp.float8_e4m3fn),
m=(128,), # TODO(apaszke): 256
n=(128, 256), # TODO(apaszke): 192, other non-power-of-2
swizzle=(32, 128),
)
def test_mma_block_scaled_basic(self, m, n, in_jax_dtype, scale_jax_dtype, swizzle):
out_jax_dtype = jnp.float32
# When swizzle is small, we need to take many steps to make it large enough
# to make the scale count a multiple of 4.
k_steps = 4 if swizzle == 32 else 2
if scale_jax_dtype == jnp.float8_e8m0fnu:
block_size = 32
elif scale_jax_dtype == jnp.float8_e4m3fn:
if in_jax_dtype != jnp.float4_e2m1fn:
self.skipTest("Only float4_e2m1fn input is supported for e4m3fn scale.")
block_size = 16
else:
raise ValueError(f"Unsupported scale dtype: {scale_jax_dtype}")
if out_jax_dtype == jnp.float16 and in_jax_dtype != jnp.float16:
self.skipTest("Only f16 input is supported for f16 output.")
in_mlir_dtype = utils.dtype_to_ir_type(in_jax_dtype)
swizzle_elems = 8 * swizzle // bitwidth(in_mlir_dtype)
k = swizzle_elems * k_steps
lhs_tiling = rhs_tiling = (8, swizzle_elems)
def kernel(ctx, lhs, rhs, lhs_scales_gmem, rhs_scales_gmem, out, scratch):
lhs_smem, rhs_smem, lhs_scales_smem, rhs_scales_smem, barriers, mma_barrier, acc, lhs_scales, rhs_scales = scratch
operand_kwargs = dict(
swizzle=swizzle,
gmem_transform=mgpu.TileTransform(lhs_tiling),
)
ctx.async_copy(src_ref=lhs, dst_ref=lhs_smem, barrier=barriers[0], **operand_kwargs)
ctx.async_copy(src_ref=rhs, dst_ref=rhs_smem, barrier=barriers[1], **operand_kwargs)
ctx.async_copy(src_ref=lhs_scales_gmem, dst_ref=lhs_scales_smem, barrier=barriers[2])
ctx.async_copy(src_ref=rhs_scales_gmem, dst_ref=rhs_scales_smem, barrier=barriers[3])
for i in range(4):
barriers[i].wait()
with mgpu.single_thread():
tcgen05.async_copy_scales_smem_to_tmem(lhs_scales_smem, lhs_scales)
tcgen05.async_copy_scales_smem_to_tmem(rhs_scales_smem, rhs_scales)
tcgen05.mma(
acc,
lhs_smem,
mgpu.memref_transpose(rhs_smem, (1, 0, 3, 2)),
a_swizzle=swizzle,
b_swizzle=swizzle,
a_scale=lhs_scales,
b_scale=rhs_scales,
accumulate=False,
)
tcgen05.commit_arrive(mma_barrier)
mma_barrier.wait(orders_tensor_core=True)
acc.load().store_untiled(out, optimized=False)
x_shape = (m, k)
x = self.prng.uniform(-1, 1, x_shape).astype(in_jax_dtype)
y_shape = (n, k)
y = self.prng.uniform(-1, 1, y_shape).astype(in_jax_dtype)
out_shape = jax.ShapeDtypeStruct((m, n), out_jax_dtype)
scratch_shape = [
jax.ShapeDtypeStruct(tile_shape(x_shape, lhs_tiling), in_jax_dtype),
jax.ShapeDtypeStruct(tile_shape(y_shape, rhs_tiling), in_jax_dtype),
jax.ShapeDtypeStruct((m // 128, k // (block_size * 4), 32, 16), scale_jax_dtype),
jax.ShapeDtypeStruct((n // 128, k // (block_size * 4), 32, 16), scale_jax_dtype),
mgpu.TMABarrier(4),
mgpu.Barrier(1),
mgpu.TMEM((m, n), out_jax_dtype),
mgpu.TMEM((m, k // block_size), scale_jax_dtype, layout=tcgen05.scales_layout()),
mgpu.TMEM((n, k // block_size), scale_jax_dtype, layout=tcgen05.scales_layout()),
]
a_scales, b_scales = self._sample_scales(m, k, n, block_size, scale_jax_dtype)
def format_scales(scales):
mn, k = scales.shape
assert mn % 128 == 0 and k % 4 == 0, scales.shape
return (
scales.reshape(mn // 128, 4, 32, k // 4, 4)
.transpose(0, 3, 2, 1, 4)
.reshape(mn // 128, k // 4, 32, 16)
)
a_gpu_scales, b_gpu_scales = map(format_scales, (a_scales, b_scales))
args = (x, y, a_gpu_scales, b_gpu_scales)
z = mgpu.as_gpu_kernel(
kernel, (1, 1, 1), (128, 1, 1), args, out_shape, scratch_shape
)(*args)
x32, y32 = x.astype(np.float32), y.astype(np.float32)
a_logical_scales = jnp.repeat(a_scales, block_size, axis=1).astype(jnp.float32)
b_logical_scales = jnp.repeat(b_scales, block_size, axis=1).astype(jnp.float32)
ref = (x32 * a_logical_scales) @ (y32 * b_logical_scales).T
np.testing.assert_allclose(z, ref, atol=2e-4, rtol=5e-6)
@parameterized.product(
m=(256,),
n=(64, 128, 256),
scale_jax_dtype=(jnp.float8_e8m0fnu, jnp.float8_e4m3fn),
)
def test_mma_block_scaled_collective(self, m, n, scale_jax_dtype):
m, n = 256, 256
in_jax_dtype = jnp.float4_e2m1fn
out_jax_dtype = jnp.float32
scale_block = 32 if scale_jax_dtype == jnp.float8_e8m0fnu else 16
swizzle = 128
k_steps = 2
in_mlir_dtype = utils.dtype_to_ir_type(in_jax_dtype)
swizzle_elems = 8 * swizzle // bitwidth(in_mlir_dtype)
k = swizzle_elems * k_steps
lhs_tiling = rhs_tiling = (8, swizzle_elems)
def kernel(ctx, lhs, rhs, lhs_scales_gmem, rhs_scales_gmem, out, scratch):
(
lhs_smem, rhs_smem, lhs_scales_smem, rhs_scales_smem,
barriers, mma_barrier, acc, lhs_scales, rhs_scales
) = scratch
ctx.async_copy(
src_ref=lhs,
dst_ref=lhs_smem,
barrier=barriers[0],
swizzle=swizzle,
gmem_transform=mgpu.TileTransform(lhs_tiling),
collective=gpu.Dimension.x,
partitioned=0,
)
ctx.async_copy(
src_ref=rhs,
dst_ref=rhs_smem,
barrier=barriers[1],
swizzle=swizzle,
gmem_transform=mgpu.TileTransform(rhs_tiling),
collective=gpu.Dimension.x,
partitioned=0,
)
ctx.async_copy(
src_ref=lhs_scales_gmem,
dst_ref=lhs_scales_smem,
barrier=barriers[2],
collective=gpu.Dimension.x,
partitioned=0,
)
# B scales are replicated! Note that this does not use 2CTA TMA and will
# need to be awaited in the non-leader CTA or else we will double arrive.
ctx.async_copy(
src_ref=rhs_scales_gmem,
dst_ref=rhs_scales_smem,
barrier=barriers[3],
collective=gpu.Dimension.x,
)
is_leader_thread = single_thread_predicate()
index = ir.IndexType.get()
block_id = gpu.cluster_block_id(gpu.Dimension.x)
is_first_block = arith.cmpi(arith.CmpIPredicate.eq, block_id, c(0, index))
with when(arith.andi(is_first_block, is_leader_thread)):
for i in range(4):
barriers[i].wait()
tcgen05.async_copy_scales_smem_to_tmem(lhs_scales_smem, lhs_scales, collective=True)
tcgen05.async_copy_scales_smem_to_tmem(rhs_scales_smem, rhs_scales, collective=True)
tcgen05.mma(
acc,
lhs_smem,
mgpu.memref_transpose(rhs_smem, (1, 0, 3, 2)),
a_swizzle=swizzle,
b_swizzle=swizzle,
a_scale=lhs_scales,
b_scale=rhs_scales,
accumulate=False,
collective=True,
)
tcgen05.commit_arrive(mma_barrier, collective=True, ctx=ctx)
mma_barrier.wait(orders_tensor_core=True)
m_block_tile = m // 2
m_slice = ds(arith.muli(block_id, c(m_block_tile, index)), m_block_tile)
acc.load().store_untiled(memref_slice(out, m_slice), optimized=False)
x_shape = (m, k)
x = self.prng.uniform(-1, 1, x_shape).astype(in_jax_dtype)
y_shape = (n, k)
y = self.prng.uniform(-1, 1, y_shape).astype(in_jax_dtype)
out_shape = jax.ShapeDtypeStruct((m, n), out_jax_dtype)
m_block = m // 2
n_block = n // 2
scratch_shape = [
jax.ShapeDtypeStruct(
tile_shape((m_block, k), lhs_tiling), in_jax_dtype
),
jax.ShapeDtypeStruct(
tile_shape((n_block, k), rhs_tiling), in_jax_dtype
),
jax.ShapeDtypeStruct(
(m_block // 128, k // (scale_block * 4), 32, 16), scale_jax_dtype
),
jax.ShapeDtypeStruct(
(n // 128, k // (scale_block * 4), 32, 16), scale_jax_dtype
),
mgpu.TMABarrier(4),
mgpu.Barrier(1),
mgpu.TMEM((m_block, n), out_jax_dtype, collective=True),
mgpu.TMEM(
(m_block, k // scale_block),
scale_jax_dtype,
layout=tcgen05.scales_layout(),
collective=True,
),
mgpu.TMEM(
(n, k // scale_block),
scale_jax_dtype,
layout=tcgen05.scales_layout(),
collective=True,
),
]
a_scales, b_scales = self._sample_scales(m, k, n, scale_block, scale_jax_dtype)
def format_scales(scales):
mn, k = scales.shape
assert mn % 128 == 0 and k % 4 == 0, scales.shape
return (
scales.reshape(mn // 128, 4, 32, k // 4, 4)
.transpose(0, 3, 2, 1, 4)
.reshape(mn // 128, k // 4, 32, 16)
)
a_gpu_scales = format_scales(a_scales)
b_gpu_scales = format_scales(b_scales)
args = (x, y, a_gpu_scales, b_gpu_scales)
z = mgpu.as_gpu_kernel(
kernel, (2, 1, 1), (128, 1, 1), args, out_shape, scratch_shape, cluster=(2, 1, 1),
)(*args)
x32, y32 = x.astype(np.float32), y.astype(np.float32)
a_logical_scales = jnp.repeat(a_scales, scale_block, axis=1).astype(jnp.float32)
b_logical_scales = jnp.repeat(b_scales, scale_block, axis=1).astype(jnp.float32)
ref = (x32 * a_logical_scales) @ (y32 * b_logical_scales).T
np.testing.assert_allclose(z, ref, atol=2e-4, rtol=5e-6)
@parameterized.product(
lhs_transpose=(False, True),
rhs_transpose=(False, True),
in_jax_dtype=(jnp.float16, jnp.bfloat16, jnp.int8, jnp.float8_e4m3fn),
m=(128,), # TODO(apaszke): 256
n=(128, 256), # TODO(apaszke): other non-power-of-2
lhs_swizzle=(32, 64, 128),
rhs_swizzle=(64, 128), # 32 is too small and unsuported.
)
def test_mma_sparse(self, m, n, in_jax_dtype, lhs_swizzle, rhs_swizzle, lhs_transpose, rhs_transpose):
if jnp.issubdtype(in_jax_dtype, jnp.floating):
out_jax_dtype = jnp.float32
else:
out_jax_dtype = jnp.int32
sparse_meta_dtype = jnp.uint2
in_mlir_dtype = utils.dtype_to_ir_type(in_jax_dtype)
k = 256
lhs_tiling = (8, 8 * lhs_swizzle // bitwidth(in_mlir_dtype))
rhs_tiling = (8, 8 * rhs_swizzle // bitwidth(in_mlir_dtype))
def kernel(ctx, lhs, rhs, lhs_sparse_gmem, out, scratch):
lhs_smem, rhs_smem, lhs_sparse_smem, barriers, mma_barrier, acc, lhs_sparse = scratch
ctx.async_copy(src_ref=lhs, dst_ref=lhs_smem, barrier=barriers[0], swizzle=lhs_swizzle, gmem_transform=mgpu.TileTransform(lhs_tiling))
ctx.async_copy(src_ref=rhs, dst_ref=rhs_smem, barrier=barriers[1], swizzle=rhs_swizzle, gmem_transform=mgpu.TileTransform(rhs_tiling))
ctx.async_copy(src_ref=lhs_sparse_gmem, dst_ref=lhs_sparse_smem, barrier=barriers[2])
for i in range(3):
barriers[i].wait()
with mgpu.single_thread():
tcgen05.async_copy_sparse_metadata_smem_to_tmem(lhs_sparse_smem, lhs_sparse)
if lhs_transpose:
lhs_smem = mgpu.memref_transpose(lhs_smem, (1, 0, 3, 2))
if rhs_transpose:
rhs_smem = mgpu.memref_transpose(rhs_smem, (1, 0, 3, 2))
tcgen05.mma(
acc,
lhs_smem,
rhs_smem,
a_swizzle=lhs_swizzle,
b_swizzle=rhs_swizzle,
a_sparse_metadata=lhs_sparse,
accumulate=False,
)
tcgen05.commit_arrive(mma_barrier)
mma_barrier.wait(orders_tensor_core=True)
is_signed = True if jnp.issubdtype(in_jax_dtype, jnp.integer) else None
acc.load(is_signed=is_signed).store_untiled(out, optimized=False)
x_shape = (k // 2, m) if lhs_transpose else (m, k // 2)
y_shape = (n, k) if rhs_transpose else (k, n)
if jnp.issubdtype(in_jax_dtype, jnp.integer):
x = jax.random.randint(jax.random.key(1234), x_shape, -64, 64, dtype=in_jax_dtype)
y = jax.random.randint(jax.random.key(2567), y_shape, -64, 64, dtype=in_jax_dtype)
else:
x = self.prng.uniform(-1, 1, x_shape).astype(in_jax_dtype)
y = self.prng.uniform(-1, 1, y_shape).astype(in_jax_dtype)
out_shape = jax.ShapeDtypeStruct((m, n), out_jax_dtype)
scratch_shape = [
jax.ShapeDtypeStruct(tile_shape(x_shape, lhs_tiling), in_jax_dtype),
jax.ShapeDtypeStruct(tile_shape(y_shape, rhs_tiling), in_jax_dtype),
jax.ShapeDtypeStruct((m // 128, k // 128, 128, 64), sparse_meta_dtype),
mgpu.TMABarrier(3),
mgpu.Barrier(1),
mgpu.TMEM((m, n), out_jax_dtype),
mgpu.TMEM((m, k // 2), sparse_meta_dtype, layout=tcgen05.sparse_meta_layout()),
]
index_pairs = np.asarray(np.meshgrid(range(4), range(4))).T.reshape(-1, 2)
valid_pairs = index_pairs[index_pairs[:, 0] < index_pairs[:, 1]]
assert len(valid_pairs) == 6
x_pairs = jax.random.randint(jax.random.key(1234), (m, k // 4), 0, 6, dtype=jnp.uint8)
x_sparse = valid_pairs[x_pairs]
assert x_sparse.shape == (m, k // 4, 2)
def format_sparse_meta(meta):
mn, k, _2 = meta.shape
assert _2 == 2
k *= 2
if jnp.dtype(in_jax_dtype).itemsize == 1:
meta_tiled = (
meta.reshape(mn // 128, 128, k // 64, 64).transpose(0, 2, 1, 3)
)
else:
meta_tiled = (
meta.reshape(mn // 128, 8, 2, 8, k // 64, 4, 2, 8)
.transpose(0, 4, 1, 6, 3, 5, 2, 7)
)
return (
meta_tiled.reshape(mn // 128, k // 64, 128, 64)
.astype(sparse_meta_dtype)
)
x_gpu_sparse = format_sparse_meta(x_sparse)
args = (x, y, x_gpu_sparse)
z = mgpu.as_gpu_kernel(
kernel, (1, 1, 1), (128, 1, 1), args, out_shape, scratch_shape
)(*args)
if lhs_transpose:
x = x.T
if rhs_transpose:
y = y.T
x_logical = np.zeros_like(x, shape=(m, k // 4, 4))
np.put_along_axis(x_logical, x_sparse, x.reshape(x_sparse.shape), axis=-1)
x_logical = x_logical.reshape(m, k)
ref = x_logical.astype(jnp.float32) @ y.astype(jnp.float32)
atol = 2e-2 if out_jax_dtype == jnp.float16 else 7e-5
rtol = 8e-4 if out_jax_dtype == jnp.float16 else 5e-6
np.testing.assert_allclose(z, ref, atol=atol, rtol=rtol)
@parameterized.product(
in_jax_dtype=(jnp.float16, jnp.bfloat16),
m=(128,), # TODO(apaszke): 256
n=(128, 256), # TODO(apaszke): other non-power-of-2
lhs_swizzle=(32, 64, 128),
rhs_swizzle=(64, 128), # 32 is too small and unsuported.
)
def test_mma_sparse_lhs_tmem(
self, m, n, in_jax_dtype, lhs_swizzle, rhs_swizzle
):
out_jax_dtype = jnp.float32
sparse_meta_dtype = jnp.uint2
in_mlir_dtype = utils.dtype_to_ir_type(in_jax_dtype)
k = 256
rhs_tiling = (8, 8 * rhs_swizzle // bitwidth(in_mlir_dtype))
def kernel(ctx, lhs, rhs, lhs_sparse_gmem, out, scratch):
(
rhs_smem,
lhs_sparse_smem,
barriers,
mma_barrier,
acc,
lhs_tmem,
lhs_sparse,
) = scratch
ctx.async_copy(
src_ref=rhs,
dst_ref=rhs_smem,
barrier=barriers[0],
swizzle=rhs_swizzle,
gmem_transform=mgpu.TileTransform(rhs_tiling),
)
ctx.async_copy(
src_ref=lhs_sparse_gmem, dst_ref=lhs_sparse_smem, barrier=barriers[1]
)
barriers[0].wait()
barriers[1].wait()
lhs_tmem.store(
fa.FragmentedArray.load_untiled(
lhs, layout=tcgen05.LAYOUT, optimized=False
)
)
tcgen05.commit_tmem()
with mgpu.single_thread():
tcgen05.async_copy_sparse_metadata_smem_to_tmem(
lhs_sparse_smem, lhs_sparse
)
tcgen05.mma(
acc,
lhs_tmem,
rhs_smem,
a_swizzle=lhs_swizzle,
b_swizzle=rhs_swizzle,
a_sparse_metadata=lhs_sparse,
accumulate=False,
)
tcgen05.commit_arrive(mma_barrier)
mma_barrier.wait(orders_tensor_core=True)
acc.load().store_untiled(out, optimized=False)
x_shape = (m, k // 2)
x = self.prng.uniform(-1, 1, x_shape).astype(in_jax_dtype)
y_shape = (k, n)
y = self.prng.uniform(-1, 1, y_shape).astype(in_jax_dtype)
out_shape = jax.ShapeDtypeStruct((m, n), out_jax_dtype)
scratch_shape = [
jax.ShapeDtypeStruct(tile_shape(y_shape, rhs_tiling), in_jax_dtype),
jax.ShapeDtypeStruct((m // 128, k // 128, 128, 64), sparse_meta_dtype),
mgpu.TMABarrier(2),
mgpu.Barrier(1),
mgpu.TMEM((m, n), out_jax_dtype),
mgpu.TMEM((m, k // 2), in_jax_dtype, packing=2),
mgpu.TMEM(
(m, k // 2), sparse_meta_dtype, layout=tcgen05.sparse_meta_layout()
),
]
index_pairs = np.asarray(np.meshgrid(range(4), range(4))).T.reshape(-1, 2)
valid_pairs = index_pairs[index_pairs[:, 0] < index_pairs[:, 1]]
assert len(valid_pairs) == 6
x_pairs = jax.random.randint(
jax.random.key(1234), (m, k // 4), 0, 6, dtype=jnp.uint8
)
x_sparse = valid_pairs[x_pairs]
assert x_sparse.shape == (m, k // 4, 2)
def format_sparse_meta(meta):
mn, k, _2 = meta.shape
assert _2 == 2
k *= 2
return (
meta.reshape(mn // 128, 8, 2, 8, k // 64, 4, 2, 8)
.transpose(0, 4, 1, 6, 3, 5, 2, 7)
.reshape(mn // 128, k // 64, 128, 64)
.astype(sparse_meta_dtype)
)
x_gpu_sparse = format_sparse_meta(x_sparse)
args = (x, y, x_gpu_sparse)
z = mgpu.as_gpu_kernel(
kernel, (1, 1, 1), (128, 1, 1), args, out_shape, scratch_shape
)(*args)
x_logical = np.zeros_like(x, shape=(m, k // 4, 4))
np.put_along_axis(x_logical, x_sparse, x.reshape(x_sparse.shape), axis=-1)
x_logical = x_logical.reshape(m, k)
ref = x_logical.astype(jnp.float32) @ y.astype(jnp.float32)
atol = 2e-2 if out_jax_dtype == jnp.float16 else 7e-5
rtol = 8e-4 if out_jax_dtype == jnp.float16 else 5e-6
np.testing.assert_allclose(z, ref, atol=atol, rtol=rtol)
@parameterized.product(
in_jax_dtype=(jnp.float16, jnp.float8_e4m3fn),
m=(256,), # TODO(apaszke): 256
n=(128, 256), # TODO(apaszke): other non-power-of-2
lhs_swizzle=(32, 64, 128),
rhs_swizzle=(64, 128), # 32 is too small and unsupported.
)
def test_mma_sparse_collective(self, m, n, in_jax_dtype, lhs_swizzle, rhs_swizzle):
out_jax_dtype = jnp.float32
sparse_meta_dtype = jnp.uint2
in_mlir_dtype = utils.dtype_to_ir_type(in_jax_dtype)
k = 256
lhs_tiling = (8, 8 * lhs_swizzle // bitwidth(in_mlir_dtype))
rhs_tiling = (8, 8 * rhs_swizzle // bitwidth(in_mlir_dtype))
if m // 2 < lhs_tiling[1]:
self.skipTest("LHS too small for this swizzle")
if n // 2 < rhs_tiling[1]:
self.skipTest("RHS too small for this swizzle")
def kernel(ctx, lhs, rhs, lhs_sparse_gmem, out, scratch):
lhs_smem, rhs_smem, lhs_sparse_smem, barriers, mma_barrier, acc, lhs_sparse = scratch
ctx.async_copy(
src_ref=lhs,
dst_ref=lhs_smem,
barrier=barriers[0],
swizzle=lhs_swizzle,
gmem_transform=mgpu.TileTransform(lhs_tiling),
collective=gpu.Dimension.x,
partitioned=0,
)
ctx.async_copy(
src_ref=rhs,
dst_ref=rhs_smem,
barrier=barriers[1],
swizzle=rhs_swizzle,
gmem_transform=mgpu.TileTransform(rhs_tiling),
collective=gpu.Dimension.x,
partitioned=1,
)
ctx.async_copy(
src_ref=lhs_sparse_gmem,
dst_ref=lhs_sparse_smem,
barrier=barriers[2],
collective=gpu.Dimension.x,
partitioned=0,
)
index = ir.IndexType.get()
block_id = gpu.cluster_block_id(gpu.Dimension.x)
is_first_block = arith.cmpi(arith.CmpIPredicate.eq, block_id, c(0, index))
is_leader_thread = single_thread_predicate()
with when(arith.andi(is_first_block, is_leader_thread)):
for i in range(3):
barriers[i].wait()
tcgen05.async_copy_sparse_metadata_smem_to_tmem(lhs_sparse_smem, lhs_sparse, collective=True)
tcgen05.mma(
acc,
lhs_smem,
rhs_smem,
a_swizzle=lhs_swizzle,
b_swizzle=rhs_swizzle,
a_sparse_metadata=lhs_sparse,
accumulate=False,
collective=True,
)
tcgen05.commit_arrive(mma_barrier, collective=True, ctx=ctx)
mma_barrier.wait(orders_tensor_core=True)
m_block_tile = m // 2
m_slice = ds(arith.muli(block_id, c(m_block_tile, index)), m_block_tile)
acc.load().store_untiled(memref_slice(out, m_slice), optimized=False)
x_shape = (m, k // 2)
y_shape = (k, n)
x = self.prng.uniform(-1, 1, x_shape).astype(in_jax_dtype)
y = self.prng.uniform(-1, 1, y_shape).astype(in_jax_dtype)
out_shape = jax.ShapeDtypeStruct((m, n), out_jax_dtype)
m_block = m // 2
n_block = n // 2
scratch_shape = [
jax.ShapeDtypeStruct(tile_shape((m_block, k // 2), lhs_tiling), in_jax_dtype),
jax.ShapeDtypeStruct(tile_shape((k, n_block), rhs_tiling), in_jax_dtype),
jax.ShapeDtypeStruct((m_block // 128, k // 128, 128, 64), sparse_meta_dtype),
mgpu.TMABarrier(3),
mgpu.Barrier(1),
mgpu.TMEM((m_block, n), out_jax_dtype, collective=True),
mgpu.TMEM((m_block, k // 2), sparse_meta_dtype, layout=tcgen05.sparse_meta_layout(), collective=True),
]
index_pairs = np.asarray(np.meshgrid(range(4), range(4))).T.reshape(-1, 2)
valid_pairs = index_pairs[index_pairs[:, 0] < index_pairs[:, 1]]
assert len(valid_pairs) == 6
x_pairs = jax.random.randint(jax.random.key(1234), (m, k // 4), 0, 6, dtype=jnp.uint8)
x_sparse = valid_pairs[x_pairs]
assert x_sparse.shape == (m, k // 4, 2)
def format_sparse_meta(meta):
mn, k, _2 = meta.shape
assert _2 == 2
k *= 2
if jnp.dtype(in_jax_dtype).itemsize == 1:
meta_tiled = (
meta.reshape(mn // 128, 128, k // 64, 64).transpose(0, 2, 1, 3)
)
else:
meta_tiled = (
meta.reshape(mn // 128, 8, 2, 8, k // 64, 4, 2, 8)
.transpose(0, 4, 1, 6, 3, 5, 2, 7)
)
return (
meta_tiled.reshape(mn // 128, k // 64, 128, 64)
.astype(sparse_meta_dtype)
)
x_gpu_sparse = format_sparse_meta(x_sparse)
args = (x, y, x_gpu_sparse)
z = mgpu.as_gpu_kernel(
kernel, (2, 1, 1), (128, 1, 1), args, out_shape, scratch_shape, cluster=(2, 1, 1)
)(*args)
x_logical = np.zeros_like(x, shape=(m, k // 4, 4))
np.put_along_axis(x_logical, x_sparse, x.reshape(x_sparse.shape), axis=-1)
x_logical = x_logical.reshape(m, k)
ref = x_logical.astype(jnp.float32) @ y.astype(jnp.float32)
atol = 2e-2 if out_jax_dtype == jnp.float16 else 7e-5
rtol = 8e-4 if out_jax_dtype == jnp.float16 else 5e-6
np.testing.assert_allclose(z, ref, atol=atol, rtol=rtol)
@parameterized.product(
lhs_transpose=(False, True),
rhs_transpose=(False, True),
in_jax_dtype=(jnp.float16,),
out_jax_dtype=(jnp.float32,),
m=(128, 256), # TODO(apaszke): 192, 256
n=(128, 160, 256),
swizzle=(32, 64, 128,),
)
def test_mma_collective(
self,
m,
n,
swizzle,
lhs_transpose,
rhs_transpose,
in_jax_dtype,
out_jax_dtype,
):
k_steps = 2 # Reducing to 1 can be helpful while debugging.
if out_jax_dtype == jnp.float16 and in_jax_dtype != jnp.float16:
raise self.skipTest("Only f16 input is supported for f16 output.")
in_mlir_dtype = utils.dtype_to_ir_type(in_jax_dtype)
m_block_tile = m // 2
n_block_tile = n // 2
swizzle_elems = swizzle // bytewidth(in_mlir_dtype)
k = swizzle_elems * k_steps
index = ir.IndexType.get()
tiling = (8, swizzle_elems)
def kernel(ctx, lhs, rhs, out, scratch):
lhs_smem, rhs_smem, barriers, mma_barrier, acc = scratch
block_id = gpu.cluster_block_id(gpu.Dimension.x)
ctx.async_copy(
src_ref=lhs,
dst_ref=lhs_smem,
swizzle=swizzle,
gmem_transform=mgpu.TileTransform(tiling),
barrier=barriers[0],
collective=gpu.Dimension.x,
partitioned=1 if lhs_transpose else 0, # Split non-contracting dim.
)
ctx.async_copy(
src_ref=rhs,
dst_ref=rhs_smem,
swizzle=swizzle,
gmem_transform=mgpu.TileTransform(tiling),
barrier=barriers[1],
collective=gpu.Dimension.x,
partitioned=0 if rhs_transpose else 1, # Split non-contracting dim.
)
is_leader_thread = single_thread_predicate()
is_first_block = arith.cmpi(arith.CmpIPredicate.eq, block_id, c(0, index))
with when(arith.andi(is_first_block, is_leader_thread)):
barriers[0].wait()
barriers[1].wait()
if lhs_transpose:
lhs_smem = memref_transpose(lhs_smem, (1, 0, 3, 2))
if rhs_transpose:
rhs_smem = memref_transpose(rhs_smem, (1, 0, 3, 2))
tcgen05.mma(
acc, lhs_smem, rhs_smem, a_swizzle=swizzle, b_swizzle=swizzle, accumulate=False, collective=True
)
tcgen05.commit_arrive(mma_barrier, collective=True, ctx=ctx)
mma_barrier.wait(orders_tensor_core=True)
m_slice = ds(arith.muli(block_id, c(m_block_tile, index)), m_block_tile)
acc.load().store_untiled(memref_slice(out, m_slice), optimized=False)
in_finfo = jnp.finfo(in_jax_dtype)
exponent_bits, mantissa_bits = in_finfo.nexp, in_finfo.nmant
def quantize(x):
# Quantize the input to avoid rounding when feeding the TensorCore
return jax.lax.reduce_precision(x, exponent_bits, mantissa_bits)
x_shape = (k, m) if lhs_transpose else (m, k)
x_block_shape = (k, m_block_tile) if lhs_transpose else (m_block_tile, k)
x = quantize(self.prng.uniform(-1, 1, x_shape)).astype(in_jax_dtype)
y_shape = (n, k) if rhs_transpose else (k, n)
y_block_shape = (n_block_tile, k) if rhs_transpose else (k, n_block_tile)
y = quantize(self.prng.uniform(-1, 1, y_shape)).astype(in_jax_dtype)
out_shape = jax.ShapeDtypeStruct((m, n), out_jax_dtype)
if any(s % t for s, t in zip(x_block_shape, tiling)):
self.skipTest("LHS block shape not divisible by tiling.")
if any(s % t for s, t in zip(y_block_shape, tiling)):
self.skipTest("RHS block shape not divisible by tiling.")
scratch_shape = [
jax.ShapeDtypeStruct(tile_shape(x_block_shape, tiling), in_jax_dtype),
jax.ShapeDtypeStruct(tile_shape(y_block_shape, tiling), in_jax_dtype),
mgpu.TMABarrier(2),
mgpu.Barrier(1),
mgpu.TMEM((m_block_tile, n), out_jax_dtype, collective=True),
]
z = mgpu.as_gpu_kernel(
kernel, (2, 1, 1), (128, 1, 1), (x, y), out_shape, scratch_shape, cluster=(2, 1, 1)
)(x, y)
x32, y32 = x.astype(np.float32), y.astype(np.float32)
ref = (x32.T if lhs_transpose else x32) @ (y32.T if rhs_transpose else y32)
atol = 2e-2 if out_jax_dtype == jnp.float16 else 5e-6
np.testing.assert_allclose(z, ref, atol=atol)
@parameterized.product(
in_jax_dtype=(jnp.float16,),
out_jax_dtype=(jnp.float32,),
m=(256,), # TODO(apaszke): 64, 192, 256
n=(128, 192, 224, 256,),
k_steps=(2,), # Note: reducing to 1 can be useful for debugging.
swizzle=(32, 64, 128,),
)
def test_mma_collective_lhs_tmem(
self,
m,
n,
k_steps,
swizzle,
in_jax_dtype,
out_jax_dtype,
):
if out_jax_dtype == jnp.float16 and in_jax_dtype != jnp.float16:
raise self.skipTest("Only f16 input is supported for f16 output.")
in_mlir_dtype = utils.dtype_to_ir_type(in_jax_dtype)
m_block_tile = m // 2
n_block_tile = n // 2
swizzle_elems = swizzle // bytewidth(in_mlir_dtype)
k = swizzle_elems * k_steps
index = ir.IndexType.get()
tiling = (8, swizzle_elems)
def kernel(ctx, lhs, rhs, out, scratch):
lhs_smem, rhs_smem, barriers, mma_barrier, cluster_barrier, acc, lhs_tmem = scratch
block_id = gpu.cluster_block_id(gpu.Dimension.x)
ctx.async_copy(
src_ref=lhs,
dst_ref=lhs_smem,
swizzle=swizzle,
gmem_transform=mgpu.TileTransform(tiling),
barrier=barriers[0],
collective=gpu.Dimension.x,
partitioned=0, # Split non-contracting dim.
)
ctx.async_copy(
src_ref=rhs,
dst_ref=rhs_smem,
swizzle=swizzle,
gmem_transform=mgpu.TileTransform(tiling),
barrier=barriers[1],
collective=gpu.Dimension.x,
partitioned=1, # Split non-contracting dim.
)
is_leader_thread = single_thread_predicate()
is_first_block = arith.cmpi(arith.CmpIPredicate.eq, block_id, c(0, index))
with when(arith.andi(is_first_block, is_leader_thread)):
barriers[0].wait()
gpu.barrier()
# Because only block 1 waits on the TMA, we need a cluster barrier so
# that the SMEM updates are visible on block 2.
cluster_barrier.arrive(orders_tensor_core=True)
cluster_barrier.wait(orders_tensor_core=True)
lhs_tmem.store(
fa.FragmentedArray.load_tiled(
lhs_smem, swizzle, layout=tcgen05.LAYOUT
)
)
tcgen05.commit_tmem()
# Make sure TMEM has been loaded on both blocks.
cluster_barrier.arrive(orders_tensor_core=True)
cluster_barrier.wait(orders_tensor_core=True)
with when(arith.andi(is_first_block, is_leader_thread)):
barriers[1].wait()
tcgen05.mma(
acc,
lhs_tmem,
rhs_smem,
a_swizzle=swizzle,
b_swizzle=swizzle,
accumulate=False,
collective=True,
)
tcgen05.commit_arrive(mma_barrier, collective=True, ctx=ctx)
mma_barrier.wait(orders_tensor_core=True)
m_slice = ds(arith.muli(block_id, c(m_block_tile, index)), m_block_tile)
acc.load().store_untiled(memref_slice(out, m_slice), optimized=False)
in_finfo = jnp.finfo(in_jax_dtype)
exponent_bits, mantissa_bits = in_finfo.nexp, in_finfo.nmant
def quantize(x):
# Quantize the input to avoid rounding when feeding the TensorCore
return jax.lax.reduce_precision(x, exponent_bits, mantissa_bits)
x_shape = (m, k)
x_block_shape = (m_block_tile, k)
x = quantize(self.prng.uniform(-1, 1, x_shape)).astype(in_jax_dtype)
y_shape = (k, n)
y_block_shape = (k, n_block_tile)
y = quantize(self.prng.uniform(-1, 1, y_shape)).astype(in_jax_dtype)
out_shape = jax.ShapeDtypeStruct((m, n), out_jax_dtype)
if any(s % t for s, t in zip(x_block_shape, tiling)):
self.skipTest("LHS block shape not divisible by tiling.")
if any(s % t for s, t in zip(y_block_shape, tiling)):
self.skipTest("RHS block shape not divisible by tiling.")
scratch_shape = [
jax.ShapeDtypeStruct(tile_shape(x_block_shape, tiling), in_jax_dtype),
jax.ShapeDtypeStruct(tile_shape(y_block_shape, tiling), in_jax_dtype),
mgpu.TMABarrier(2),
mgpu.Barrier(1),
mgpu.ClusterBarrier(collective_dims=(gpu.Dimension.x,)),
mgpu.TMEM((128, n), out_jax_dtype, collective=True),
mgpu.TMEM((128, k), in_jax_dtype, collective=True, packing=2),
]
z = mgpu.as_gpu_kernel(
kernel,
(2, 1, 1),
(128, 1, 1),
(x, y),
out_shape,
scratch_shape,
cluster=(2, 1, 1),
)(x, y)
x32, y32 = x.astype(np.float32), y.astype(np.float32)
ref = x32 @ y32
atol = 2e-2 if out_jax_dtype == jnp.float16 else 5e-6
np.testing.assert_allclose(z, ref, atol=atol)
def test_raises_error_if_tmem_oom(self):
def kernel(ctx, input, output, scratch):
del ctx, input, output, scratch
x = jnp.arange(128 * 128, dtype=jnp.float32).reshape(128, 128)
scratch_shape = [
mgpu.TMEM((128, 384), jnp.float32), # Should round up to 512 columns.
mgpu.TMEM((128, 64), jnp.float32), # Will trigger OOM.
]
with self.assertRaisesRegex(ValueError,
"Total TMEM allocation exceeds memory limit."):
mgpu.as_gpu_kernel(
kernel, (1, 1, 1), (128, 1, 1), x, x, scratch_shape
)(x).block_until_ready()
def test_raises_error_if_collective_tmem_without_cluster(self):
def kernel(ctx, input, output, scratch):
del ctx, input, output, scratch
x = jnp.arange(128 * 128, dtype=jnp.float32).reshape(128, 128)
scratch_shape = [mgpu.TMEM((128, 384), jnp.float32, collective=True)]
with self.assertRaisesRegex(
ValueError,
"Collective TMEM allocations are only supported for clusters with an"
" even number of blocks in them.",
):
mgpu.as_gpu_kernel(
kernel, (1, 1, 1), (128, 1, 1), x, x, scratch_shape
)(x).block_until_ready()
@parameterized.parameters((0,), (1,), (2,))
def test_cluster_launch_control(self, dim):
# Let's say we have 148 SMs in our gpu. We attempt to schedule 149 blocks on
# 148 SMs. Only one SM will succeed in stealing the 149th block, and the
# others will fail. Therefore we test that there is exactly 1 stolen block
# and the others fail and return -1.
num_sms = jax.devices()[0].core_count
num_blocks = num_sms + 1
grid = [1, 1, 1]
grid[dim] = num_blocks
def kernel(ctx, out, scratch):
del ctx
cancel_result_ref, barrier, _ = scratch
is_leader_thread = single_thread_predicate()
barrier.arrive_expect_tx(16, predicate=is_leader_thread)
mgpu.try_cluster_cancel(cancel_result_ref, barrier, is_leader_thread)
barrier.wait()
*cta_ids, cancelled_launch = mgpu.query_cluster_cancel(cancel_result_ref)
cta_id = arith.addi(cta_ids[0], arith.addi(cta_ids[1], cta_ids[2]))
# Store a sentinel value if no work can be scheduled.
idx = arith.index_cast(ir.IndexType.get(), utils.block_idx())
sentinel_val = arith.constant(ir.IntegerType.get_signless(32), -1)
value = arith.select(cancelled_launch, cta_id, sentinel_val)
memref.store(value, out, [idx])
cancel_result_ref = jax.ShapeDtypeStruct((16,), jnp.int8) # 128 bits
out_ty = jax.ShapeDtypeStruct((num_sms,), jnp.int32)
scratch = (
cancel_result_ref,
mgpu.Barrier(1),
# Requesting SMEM close to the 228kb limit to ensure that each SM only
# schedules 1 block.
jax.ShapeDtypeStruct((220 * 1024,), jnp.int8),
)
out = mgpu.as_gpu_kernel(kernel, grid, (128, 1, 1), (), out_ty, scratch)()
out = np.sort(out)
out_ref = np.array([-1] * (num_sms - 1) + [num_sms])
np.testing.assert_array_equal(out, out_ref)
| TCGen05Test |
python | numba__numba | numba/core/typing/npydecl.py | {
"start": 21561,
"end": 24251
} | class ____(object):
def matmul_typer(self, a, b, out=None):
"""
Typer function for Numpy matrix multiplication.
"""
if not isinstance(a, types.Array) or not isinstance(b, types.Array):
return
if not all(x.ndim in (1, 2) for x in (a, b)):
raise TypingError("%s only supported on 1-D and 2-D arrays"
% (self.func_name, ))
# Output dimensionality
ndims = set([a.ndim, b.ndim])
if ndims == set([2]):
# M * M
out_ndim = 2
elif ndims == set([1, 2]):
# M* V and V * M
out_ndim = 1
elif ndims == set([1]):
# V * V
out_ndim = 0
if out is not None:
if out_ndim == 0:
raise TypingError(
"explicit output unsupported for vector * vector")
elif out.ndim != out_ndim:
raise TypingError(
"explicit output has incorrect dimensionality")
if not isinstance(out, types.Array) or out.layout != 'C':
raise TypingError("output must be a C-contiguous array")
all_args = (a, b, out)
else:
all_args = (a, b)
if not (config.DISABLE_PERFORMANCE_WARNINGS or
all(x.layout in 'CF' for x in (a, b))):
msg = ("%s is faster on contiguous arrays, called on %s" %
(self.func_name, (a, b)))
warnings.warn(NumbaPerformanceWarning(msg))
if not all(x.dtype == a.dtype for x in all_args):
raise TypingError("%s arguments must all have "
"the same dtype" % (self.func_name,))
if not isinstance(a.dtype, (types.Float, types.Complex)):
raise TypingError("%s only supported on "
"float and complex arrays"
% (self.func_name,))
if out:
return out
elif out_ndim > 0:
return types.Array(a.dtype, out_ndim, 'C')
else:
return a.dtype
def _check_linalg_matrix(a, func_name):
if not isinstance(a, types.Array):
return
if not a.ndim == 2:
raise TypingError("np.linalg.%s() only supported on 2-D arrays"
% func_name)
if not isinstance(a.dtype, (types.Float, types.Complex)):
raise TypingError("np.linalg.%s() only supported on "
"float and complex arrays" % func_name)
# -----------------------------------------------------------------------------
# Miscellaneous functions
@infer_global(np.ndenumerate)
| MatMulTyperMixin |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/refurb/FURB180.py | {
"start": 411,
"end": 485
} | class ____(B0, before_metaclass=1, metaclass=abc.ABCMeta):
pass
# OK
| A3 |
python | apache__airflow | airflow-core/tests/unit/dag_processing/bundles/test_base.py | {
"start": 6458,
"end": 9011
} | class ____:
@pytest.mark.parametrize(
("threshold_hours", "min_versions", "when_hours", "expected_remaining"),
[
(3, 0, 3, 5),
(3, 0, 6, 2),
(10, 0, 3, 5),
(10, 0, 6, 5),
(0, 0, 3, 2), # two of them are in future
(0, 0, 6, 0), # all of them are in past
(0, 5, 3, 5), # keep all no matter what
(0, 5, 6, 5), # keep all no matter what
(0, 4, 3, 4), # keep 4 no matter what
(0, 4, 6, 4), # keep 4 no matter what
],
)
@patch("airflow.dag_processing.bundles.base.get_bundle_tracking_dir")
def test_that_stale_bundles_are_removed(
self, mock_get_dir, threshold_hours, min_versions, when_hours, expected_remaining
):
age_threshold = threshold_hours * 60 * 60
with (
conf_vars(
{
("dag_processor", "stale_bundle_cleanup_age_threshold"): str(age_threshold),
("dag_processor", "stale_bundle_cleanup_min_versions"): str(min_versions),
}
),
tempfile.TemporaryDirectory() as td,
):
bundle_tracking_dir = Path(td)
mock_get_dir.return_value = bundle_tracking_dir
h0 = tz.datetime(2025, 1, 1, 0)
bundle_name = "abc"
for num in range(5):
with time_machine.travel(h0 + timedelta(hours=num), tick=False):
version = f"hour-{num}"
b = FakeBundle(version=version, name=bundle_name)
b.path.mkdir(exist_ok=True, parents=True)
with BundleVersionLock(
bundle_name=bundle_name,
bundle_version=version,
):
print(version)
lock_files = list(bundle_tracking_dir.iterdir())
assert len(lock_files) == 5
bundle_folders = list(b.versions_dir.iterdir())
assert len(bundle_folders) == 5
num += 1
with time_machine.travel(h0 + timedelta(hours=when_hours), tick=False):
BundleUsageTrackingManager()._remove_stale_bundle_versions_for_bundle(bundle_name=bundle_name)
lock_files = list(bundle_tracking_dir.iterdir())
assert len(lock_files) == expected_remaining
bundle_folders = list(b.versions_dir.iterdir())
assert len(bundle_folders) == expected_remaining
| TestBundleUsageTrackingManager |
python | keras-team__keras | keras/src/ops/numpy.py | {
"start": 222275,
"end": 223124
} | class ____(Operation):
def call(self, x):
x = backend.convert_to_tensor(x)
return backend.numpy.sqrt(x)
def compute_output_spec(self, x):
dtype = (
backend.floatx()
if backend.standardize_dtype(x.dtype) == "int64"
else dtypes.result_type(x.dtype, float)
)
sparse = getattr(x, "sparse", False)
return KerasTensor(x.shape, dtype=dtype, sparse=sparse)
@keras_export(["keras.ops.sqrt", "keras.ops.numpy.sqrt"])
def sqrt(x):
"""Return the non-negative square root of a tensor, element-wise.
Args:
x: Input tensor.
Returns:
Output tensor, the non-negative square root of `x`.
"""
if any_symbolic_tensors((x,)):
return Sqrt().symbolic_call(x)
x = backend.convert_to_tensor(x)
return backend.numpy.sqrt(x)
| Sqrt |
python | PrefectHQ__prefect | src/integrations/prefect-email/tests/conftest.py | {
"start": 227,
"end": 661
} | class ____:
def __enter__(self):
return self
def __exit__(self, *exc):
return False
def send_message(self, message):
return message
@pytest.fixture
def email_server_credentials():
email_server_credentials = MagicMock(username="someone@email.com")
email_server_credentials.get_server.side_effect = lambda: EmailServerMethodsMock()
return email_server_credentials
| EmailServerMethodsMock |
python | ZoranPandovski__al-go-rithms | search/ternary_search/python/ternary_sr.py | {
"start": 0,
"end": 148
} | class ____:
def __init__(self, data=None):
self.data = data
self.right = None
self.left = None
self.eq = None
| Node |
python | apache__avro | lang/py/avro/test/test_protocol.py | {
"start": 14974,
"end": 15919
} | class ____(unittest.TestCase):
"""Enable generating error protocol test cases across all the valid test protocols."""
def __init__(self, test_proto):
"""Ignore the normal signature for unittest.TestCase because we are generating
many test cases from this one class. This is safe as long as the autoloader
ignores this class. The autoloader will ignore this class as long as it has
no methods starting with `test_`.
"""
super().__init__("check_error_protocol_exists")
self.test_proto = test_proto
def check_error_protocol_exists(self):
"""Protocol messages should always have at least a string error protocol."""
p = self.test_proto.parse()
if p.messages is not None:
for k, m in p.messages.items():
self.assertIsNotNone(m.errors, f"Message {k} did not have the expected implicit string error protocol.")
| ErrorProtocolTestCase |
python | spyder-ide__spyder | spyder/plugins/statusbar/widgets/status.py | {
"start": 1115,
"end": 2087
} | class ____(BaseTimerStatus):
""""Add clock to statusbar in a fullscreen mode."""
ID = 'clock_status'
def get_value(self):
"""Return the time."""
from time import localtime, strftime
text = strftime("%H:%M", localtime())
return text.rjust(3)
def get_tooltip(self):
"""Return the widget tooltip text."""
return _('Clock')
def test():
from qtpy.QtWidgets import QMainWindow
from spyder.utils.qthelpers import qapplication
app = qapplication(test_time=5)
win = QMainWindow()
win.setWindowTitle("Status widgets test")
win.resize(900, 300)
status_widgets = []
statusbar = win.statusBar()
for status_class in (MemoryStatus, CPUStatus, ClockStatus):
status_widget = status_class(win)
statusbar.insertPermanentWidget(0, status_widget)
status_widgets.append(status_widget)
win.show()
app.exec_()
if __name__ == "__main__":
test()
| ClockStatus |
python | modin-project__modin | modin/tests/pandas/dataframe/test_map_metadata.py | {
"start": 19829,
"end": 64285
} | class ____:
"""This class contains test and test usilities for the ``LazyProxyCategoricalDtype`` class."""
@staticmethod
def _get_lazy_proxy():
"""
Build a dataframe containing a column that has a proxy type and return
this proxy together with an original dtype that this proxy is emulating.
Returns
-------
(LazyProxyCategoricalDtype, pandas.CategoricalDtype, modin.pandas.DataFrame)
"""
nchunks = 3
pandas_df = pandas.DataFrame({"a": [1, 1, 2, 2, 3, 2], "b": [1, 2, 3, 4, 5, 6]})
original_dtype = pandas_df.astype({"a": "category"}).dtypes["a"]
chunks = split_result_of_axis_func_pandas(
axis=0,
num_splits=nchunks,
result=pandas_df,
min_block_size=MinRowPartitionSize.get(),
length_list=[2, 2, 2],
)
if StorageFormat.get() == "Pandas":
df = pd.concat([pd.DataFrame(chunk) for chunk in chunks])
assert df._query_compiler._modin_frame._partitions.shape == (nchunks, 1)
df = df.astype({"a": "category"})
return df.dtypes["a"], original_dtype, df
else:
raise NotImplementedError()
def test_update_proxy(self):
"""Verify that ``LazyProxyCategoricalDtype._update_proxy`` method works as expected."""
lazy_proxy, _, _ = self._get_lazy_proxy()
new_parent = pd.DataFrame({"a": [10, 20, 30]})._query_compiler._modin_frame
assert isinstance(lazy_proxy, LazyProxyCategoricalDtype)
# When we try to create a new proxy from the same arguments it should return itself
assert (
lazy_proxy._update_proxy(lazy_proxy._parent, lazy_proxy._column_name)
is lazy_proxy
)
# When any of the arguments is changing we should create a new proxy
proxy_with_new_column = lazy_proxy._update_proxy(
lazy_proxy._parent, "other_column"
)
assert proxy_with_new_column is not lazy_proxy and isinstance(
proxy_with_new_column, LazyProxyCategoricalDtype
)
# When any of the arguments is changing we should create a new proxy
proxy_with_new_parent = lazy_proxy._update_proxy(
new_parent, lazy_proxy._column_name
)
assert proxy_with_new_parent is not lazy_proxy and isinstance(
proxy_with_new_parent, LazyProxyCategoricalDtype
)
lazy_proxy.categories # trigger materialization
# `._update_proxy` now should produce pandas Categoricals instead of a proxy as it already has materialized data
assert (
type(lazy_proxy._update_proxy(lazy_proxy._parent, lazy_proxy._column_name))
== pandas.CategoricalDtype
)
def test_update_proxy_implicit(self):
"""
Verify that a lazy proxy correctly updates its parent when passed from one parent to another.
"""
lazy_proxy, _, parent = self._get_lazy_proxy()
parent_frame = parent._query_compiler._modin_frame
if StorageFormat.get() == "Pandas":
assert lazy_proxy._parent is parent_frame
else:
raise NotImplementedError(
f"The test is not implemented for {StorageFormat.get()} storage format"
)
# Making a copy of the dataframe, the new proxy should now start pointing to the new parent
new_parent = parent.copy()
new_parent_frame = new_parent._query_compiler._modin_frame
new_lazy_proxy = new_parent_frame.dtypes[lazy_proxy._column_name]
if StorageFormat.get() == "Pandas":
# Make sure that the old proxy still pointing to the old parent
assert lazy_proxy._parent is parent_frame
assert new_lazy_proxy._parent is new_parent_frame
else:
raise NotImplementedError(
f"The test is not implemented for {StorageFormat.get()} storage format"
)
def test_if_proxy_lazy(self):
"""Verify that proxy is able to pass simple comparison checks without triggering materialization."""
lazy_proxy, actual_dtype, _ = self._get_lazy_proxy()
assert isinstance(lazy_proxy, LazyProxyCategoricalDtype)
assert not lazy_proxy._is_materialized
assert lazy_proxy == "category"
assert isinstance(lazy_proxy, pd.CategoricalDtype)
assert isinstance(lazy_proxy, pandas.CategoricalDtype)
assert str(lazy_proxy) == "category"
assert str(lazy_proxy) == str(actual_dtype)
assert not lazy_proxy.ordered
assert not lazy_proxy._is_materialized
# Further, there are all checks that materialize categories
assert lazy_proxy == actual_dtype
assert actual_dtype == lazy_proxy
assert repr(lazy_proxy) == repr(actual_dtype)
assert lazy_proxy.categories.equals(actual_dtype.categories)
assert lazy_proxy._is_materialized
def test_proxy_as_dtype(self):
"""Verify that proxy can be used as an actual dtype."""
lazy_proxy, actual_dtype, _ = self._get_lazy_proxy()
assert isinstance(lazy_proxy, LazyProxyCategoricalDtype)
assert not lazy_proxy._is_materialized
modin_df2, pandas_df2 = create_test_dfs({"c": [2, 2, 3, 4, 5, 6]})
eval_general(
(modin_df2, lazy_proxy),
(pandas_df2, actual_dtype),
lambda args: args[0].astype({"c": args[1]}),
)
def test_proxy_with_pandas_constructor(self):
"""Verify that users still can use pandas' constructor using `type(cat)(...)` notation."""
lazy_proxy, _, _ = self._get_lazy_proxy()
assert isinstance(lazy_proxy, LazyProxyCategoricalDtype)
new_cat_values = pandas.Index([3, 4, 5])
new_category_dtype = type(lazy_proxy)(categories=new_cat_values, ordered=True)
assert not lazy_proxy._is_materialized
assert new_category_dtype._is_materialized
assert new_category_dtype.categories.equals(new_cat_values)
assert new_category_dtype.ordered
def test_infer_objects_single_partition():
data = {"a": ["s", 2, 3]}
modin_df = pd.DataFrame(data).iloc[1:]
pandas_df = pandas.DataFrame(data).iloc[1:]
modin_result = modin_df.infer_objects()
pandas_result = pandas_df.infer_objects()
df_equals(modin_result, pandas_result)
assert modin_result.dtypes.equals(pandas_result.dtypes)
@pytest.mark.parametrize(
"infer_objects", bool_arg_values, ids=arg_keys("infer_objects", bool_arg_keys)
)
@pytest.mark.parametrize(
"convert_string", bool_arg_values, ids=arg_keys("convert_string", bool_arg_keys)
)
@pytest.mark.parametrize(
"convert_integer", bool_arg_values, ids=arg_keys("convert_integer", bool_arg_keys)
)
@pytest.mark.parametrize(
"convert_boolean", bool_arg_values, ids=arg_keys("convert_boolean", bool_arg_keys)
)
@pytest.mark.parametrize(
"convert_floating", bool_arg_values, ids=arg_keys("convert_floating", bool_arg_keys)
)
@pytest.mark.exclude_in_sanity
def test_convert_dtypes_single_partition(
infer_objects, convert_string, convert_integer, convert_boolean, convert_floating
):
# Sanity check, copied from pandas documentation:
# https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.convert_dtypes.html
data = {
"a": pd.Series([1, 2, 3], dtype=np.dtype("int32")),
"b": pd.Series(["x", "y", "z"], dtype=np.dtype("O")),
"c": pd.Series([True, False, np.nan], dtype=np.dtype("O")),
"d": pd.Series(["h", "i", np.nan], dtype=np.dtype("O")),
"e": pd.Series([10, np.nan, 20], dtype=np.dtype("float")),
"f": pd.Series([np.nan, 100.5, 200], dtype=np.dtype("float")),
}
kwargs = {
"infer_objects": infer_objects,
"convert_string": convert_string,
"convert_integer": convert_integer,
"convert_boolean": convert_boolean,
"convert_floating": convert_floating,
}
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.convert_dtypes(**kwargs)
pandas_result = pandas_df.convert_dtypes(**kwargs)
assert modin_result.dtypes.equals(pandas_result.dtypes)
@pytest.mark.parametrize("dtype_backend", ["numpy_nullable", "pyarrow"])
def test_convert_dtypes_dtype_backend(dtype_backend):
data = {
"a": pd.Series([1, 2, 3], dtype=np.dtype("int32")),
"b": pd.Series(["x", "y", "z"], dtype=np.dtype("O")),
"c": pd.Series([True, False, np.nan], dtype=np.dtype("O")),
"d": pd.Series(["h", "i", np.nan], dtype=np.dtype("O")),
"e": pd.Series([10, np.nan, 20], dtype=np.dtype("float")),
"f": pd.Series([np.nan, 100.5, 200], dtype=np.dtype("float")),
}
def comparator(df1, df2):
df_equals(df1, df2)
df_equals(df1.dtypes, df2.dtypes)
eval_general(
*create_test_dfs(data),
lambda df: df.convert_dtypes(dtype_backend=dtype_backend),
comparator=comparator,
)
@pytest.mark.skipif(
current_execution_is_native(),
reason="NativeQueryCompiler does not contain partitions.",
)
def test_convert_dtypes_multiple_row_partitions():
# Column 0 should have string dtype
modin_part1 = pd.DataFrame(["a"]).convert_dtypes()
# Column 0 should have an int dtype
modin_part2 = pd.DataFrame([1]).convert_dtypes()
modin_df = pd.concat([modin_part1, modin_part2])
if StorageFormat.get() == "Pandas":
assert modin_df._query_compiler._modin_frame._partitions.shape == (2, 1)
pandas_df = pandas.DataFrame(["a", 1], index=[0, 0])
# The initial dataframes should be the same
df_equals(modin_df, pandas_df)
# TODO(https://github.com/modin-project/modin/pull/3805): delete
# this assert once df_equals checks dtypes
assert modin_df.dtypes.equals(pandas_df.dtypes)
modin_result = modin_df.convert_dtypes()
pandas_result = pandas_df.convert_dtypes()
df_equals(modin_result, pandas_result)
assert modin_result.dtypes.equals(pandas_result.dtypes)
def test_convert_dtypes_5653():
modin_part1 = pd.DataFrame({"col1": ["a", "b", "c", "d"]})
modin_part2 = pd.DataFrame({"col1": [None, None, None, None]})
modin_df = pd.concat([modin_part1, modin_part2])
if StorageFormat.get() == "Pandas":
assert modin_df._query_compiler._modin_frame._partitions.shape == (2, 1)
modin_df = modin_df.convert_dtypes()
assert len(modin_df.dtypes) == 1
assert modin_df.dtypes.iloc[0] == "string"
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("bound_type", ["list", "series"], ids=["list", "series"])
@pytest.mark.exclude_in_sanity
def test_clip(request, data, axis, bound_type):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
lower, upper = np.sort(random_state.randint(RAND_LOW, RAND_HIGH, 2))
# test only upper scalar bound
modin_result = modin_df.clip(None, upper, axis=axis)
pandas_result = pandas_df.clip(None, upper, axis=axis)
df_equals(modin_result, pandas_result)
# test lower and upper scalar bound
modin_result = modin_df.clip(lower, upper, axis=axis)
pandas_result = pandas_df.clip(lower, upper, axis=axis)
df_equals(modin_result, pandas_result)
lower = random_state.randint(RAND_LOW, RAND_HIGH, ind_len)
upper = random_state.randint(RAND_LOW, RAND_HIGH, ind_len)
if bound_type == "series":
modin_lower = pd.Series(lower)
pandas_lower = pandas.Series(lower)
modin_upper = pd.Series(upper)
pandas_upper = pandas.Series(upper)
else:
modin_lower = pandas_lower = lower
modin_upper = pandas_upper = upper
# test lower and upper list bound on each column
modin_result = modin_df.clip(modin_lower, modin_upper, axis=axis)
pandas_result = pandas_df.clip(pandas_lower, pandas_upper, axis=axis)
df_equals(modin_result, pandas_result)
# test only upper list bound on each column
modin_result = modin_df.clip(np.nan, modin_upper, axis=axis)
pandas_result = pandas_df.clip(np.nan, pandas_upper, axis=axis)
df_equals(modin_result, pandas_result)
with pytest.raises(ValueError):
modin_df.clip(lower=[1, 2, 3], axis=None)
def test_clip_4485():
modin_result = pd.DataFrame([1]).clip([3])
pandas_result = pandas.DataFrame([1]).clip([3])
df_equals(modin_result, pandas_result)
def test_drop():
frame_data = {"A": [1, 2, 3, 4], "B": [0, 1, 2, 3]}
simple = pandas.DataFrame(frame_data)
modin_simple = pd.DataFrame(frame_data)
df_equals(modin_simple.drop("A", axis=1), simple[["B"]])
df_equals(modin_simple.drop(["A", "B"], axis="columns"), simple[[]])
df_equals(modin_simple.drop([0, 1, 3], axis=0), simple.loc[[2], :])
df_equals(modin_simple.drop([0, 3], axis="index"), simple.loc[[1, 2], :])
pytest.raises(KeyError, modin_simple.drop, 5)
pytest.raises(KeyError, modin_simple.drop, "C", axis=1)
pytest.raises(KeyError, modin_simple.drop, [1, 5])
pytest.raises(KeyError, modin_simple.drop, ["A", "C"], axis=1)
# errors = 'ignore'
df_equals(modin_simple.drop(5, errors="ignore"), simple)
df_equals(modin_simple.drop([0, 5], errors="ignore"), simple.loc[[1, 2, 3], :])
df_equals(modin_simple.drop("C", axis=1, errors="ignore"), simple)
df_equals(modin_simple.drop(["A", "C"], axis=1, errors="ignore"), simple[["B"]])
# non-unique
nu_df = pandas.DataFrame(
zip(range(3), range(-3, 1), list("abc")), columns=["a", "a", "b"]
)
modin_nu_df = pd.DataFrame(nu_df)
df_equals(modin_nu_df.drop("a", axis=1), nu_df[["b"]])
df_equals(modin_nu_df.drop("b", axis="columns"), nu_df["a"])
df_equals(modin_nu_df.drop([]), nu_df)
nu_df = nu_df.set_index(pandas.Index(["X", "Y", "X"]))
nu_df.columns = list("abc")
modin_nu_df = pd.DataFrame(nu_df)
df_equals(modin_nu_df.drop("X", axis="rows"), nu_df.loc[["Y"], :])
df_equals(modin_nu_df.drop(["X", "Y"], axis=0), nu_df.loc[[], :])
# inplace cache issue
frame_data = random_state.randn(10, 3)
df = pandas.DataFrame(frame_data, columns=list("abc"))
modin_df = pd.DataFrame(frame_data, columns=list("abc"))
expected = df[~(df.b > 0)]
modin_df.drop(labels=df[df.b > 0].index, inplace=True)
df_equals(modin_df, expected)
midx = pd.MultiIndex(
levels=[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
df = pd.DataFrame(
index=midx,
columns=["big", "small"],
data=[
[45, 30],
[200, 100],
[1.5, 1],
[30, 20],
[250, 150],
[1.5, 0.8],
[320, 250],
[1, 0.8],
[0.3, 0.2],
],
)
with warns_that_defaulting_to_pandas_if(
not df_or_series_using_native_execution(df)
):
df.drop(index="length", level=1)
def test_drop_api_equivalence():
# equivalence of the labels/axis and index/columns API's
frame_data = [[1, 2, 3], [3, 4, 5], [5, 6, 7]]
modin_df = pd.DataFrame(frame_data, index=["a", "b", "c"], columns=["d", "e", "f"])
modin_df1 = modin_df.drop("a")
modin_df2 = modin_df.drop(index="a")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop("d", axis=1)
modin_df2 = modin_df.drop(columns="d")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(labels="e", axis=1)
modin_df2 = modin_df.drop(columns="e")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(["a"], axis=0)
modin_df2 = modin_df.drop(index=["a"])
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(["a"], axis=0).drop(["d"], axis=1)
modin_df2 = modin_df.drop(index=["a"], columns=["d"])
df_equals(modin_df1, modin_df2)
with pytest.raises(ValueError):
modin_df.drop(labels="a", index="b")
with pytest.raises(ValueError):
modin_df.drop(labels="a", columns="b")
with pytest.raises(ValueError):
modin_df.drop(axis=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_drop_transpose(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.T.drop(columns=[0, 1, 2])
pandas_result = pandas_df.T.drop(columns=[0, 1, 2])
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.drop(index=["col3", "col1"])
pandas_result = pandas_df.T.drop(index=["col3", "col1"])
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.drop(columns=[0, 1, 2], index=["col3", "col1"])
pandas_result = pandas_df.T.drop(columns=[0, 1, 2], index=["col3", "col1"])
df_equals(modin_result, pandas_result)
def test_droplevel():
df = (
pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
.set_index([0, 1])
.rename_axis(["a", "b"])
)
df.columns = pd.MultiIndex.from_tuples(
[("c", "e"), ("d", "f")], names=["level_1", "level_2"]
)
df.droplevel("a")
df.droplevel("level_2", axis=1)
@pytest.mark.parametrize(
"data", test_data_with_duplicates_values, ids=test_data_with_duplicates_keys
)
@pytest.mark.parametrize(
"keep", ["last", "first", False], ids=["last", "first", "False"]
)
@pytest.mark.parametrize(
"subset",
[None, "col1", "name", ("col1", "col3"), ["col1", "col3", "col7"]],
ids=["None", "string", "name", "tuple", "list"],
)
@pytest.mark.parametrize("ignore_index", [True, False], ids=["True", "False"])
@pytest.mark.exclude_in_sanity
def test_drop_duplicates(data, keep, subset, ignore_index):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_df.drop_duplicates(
keep=keep, inplace=False, subset=subset, ignore_index=ignore_index
)
except Exception as err:
with pytest.raises(type(err)):
modin_df.drop_duplicates(
keep=keep, inplace=False, subset=subset, ignore_index=ignore_index
)
else:
sort_if_range_partitioning(
pandas_df.drop_duplicates(
keep=keep, inplace=False, subset=subset, ignore_index=ignore_index
),
modin_df.drop_duplicates(
keep=keep, inplace=False, subset=subset, ignore_index=ignore_index
),
)
try:
pandas_df.drop_duplicates(
keep=keep, inplace=True, subset=subset, ignore_index=ignore_index
)
except Exception as err:
with pytest.raises(type(err)):
modin_df.drop_duplicates(
keep=keep, inplace=True, subset=subset, ignore_index=ignore_index
)
else:
modin_df.drop_duplicates(
keep=keep, inplace=True, subset=subset, ignore_index=ignore_index
)
sort_if_range_partitioning(modin_df, pandas_df)
def test_drop_duplicates_with_missing_index_values():
data = {
"columns": ["value", "time", "id"],
"index": [
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
20,
21,
22,
23,
24,
25,
26,
27,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
],
"data": [
["3", 1279213398000.0, 88.0],
["3", 1279204682000.0, 88.0],
["0", 1245772835000.0, 448.0],
["0", 1270564258000.0, 32.0],
["0", 1267106669000.0, 118.0],
["7", 1300621123000.0, 5.0],
["0", 1251130752000.0, 957.0],
["0", 1311683506000.0, 62.0],
["9", 1283692698000.0, 89.0],
["9", 1270234253000.0, 64.0],
["0", 1285088818000.0, 50.0],
["0", 1218212725000.0, 695.0],
["2", 1383933968000.0, 348.0],
["0", 1368227625000.0, 257.0],
["1", 1454514093000.0, 446.0],
["1", 1428497427000.0, 134.0],
["1", 1459184936000.0, 568.0],
["1", 1502293302000.0, 599.0],
["1", 1491833358000.0, 829.0],
["1", 1485431534000.0, 806.0],
["8", 1351800505000.0, 101.0],
["0", 1357247721000.0, 916.0],
["0", 1335804423000.0, 370.0],
["24", 1327547726000.0, 720.0],
["0", 1332334140000.0, 415.0],
["0", 1309543100000.0, 30.0],
["18", 1309541141000.0, 30.0],
["0", 1298979435000.0, 48.0],
["14", 1276098160000.0, 59.0],
["0", 1233936302000.0, 109.0],
],
}
pandas_df = pandas.DataFrame(
data["data"], index=data["index"], columns=data["columns"]
)
modin_df = pd.DataFrame(data["data"], index=data["index"], columns=data["columns"])
modin_result = modin_df.sort_values(["id", "time"]).drop_duplicates(["id"])
pandas_result = pandas_df.sort_values(["id", "time"]).drop_duplicates(["id"])
sort_if_range_partitioning(modin_result, pandas_result)
def test_drop_duplicates_after_sort():
data = [
{"value": 1, "time": 2},
{"value": 1, "time": 1},
{"value": 2, "time": 1},
{"value": 2, "time": 2},
]
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.sort_values(["value", "time"]).drop_duplicates(["value"])
pandas_result = pandas_df.sort_values(["value", "time"]).drop_duplicates(["value"])
sort_if_range_partitioning(modin_result, pandas_result)
def test_drop_duplicates_with_repeated_index_values():
# This tests for issue #4467: https://github.com/modin-project/modin/issues/4467
data = [[0], [1], [0]]
index = [0, 0, 0]
modin_df, pandas_df = create_test_dfs(data, index=index)
eval_general(
modin_df,
pandas_df,
lambda df: df.drop_duplicates(),
comparator=sort_if_range_partitioning,
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("how", ["any", "all"], ids=["any", "all"])
def test_dropna(data, axis, how):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
modin_df.dropna(axis=axis, how="invalid")
with pytest.raises(TypeError):
modin_df.dropna(axis=axis, how=None, thresh=None)
with pytest.raises(KeyError):
modin_df.dropna(axis=axis, subset=["NotExists"], how=how)
modin_result = modin_df.dropna(axis=axis, how=how)
pandas_result = pandas_df.dropna(axis=axis, how=how)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_inplace(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_result = pandas_df.dropna()
modin_df.dropna(inplace=True)
df_equals(modin_df, pandas_result)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_df.dropna(thresh=2, inplace=True)
modin_df.dropna(thresh=2, inplace=True)
df_equals(modin_df, pandas_df)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_df.dropna(axis=1, how="any", inplace=True)
modin_df.dropna(axis=1, how="any", inplace=True)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_multiple_axes(data):
modin_df = pd.DataFrame(data)
with pytest.raises(TypeError):
modin_df.dropna(how="all", axis=[0, 1])
with pytest.raises(TypeError):
modin_df.dropna(how="all", axis=(0, 1))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_subset(request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if "empty_data" not in request.node.name:
column_subset = modin_df.columns[0:2]
df_equals(
modin_df.dropna(how="all", subset=column_subset),
pandas_df.dropna(how="all", subset=column_subset),
)
df_equals(
modin_df.dropna(how="any", subset=column_subset),
pandas_df.dropna(how="any", subset=column_subset),
)
row_subset = modin_df.index[0:2]
df_equals(
modin_df.dropna(how="all", axis=1, subset=row_subset),
pandas_df.dropna(how="all", axis=1, subset=row_subset),
)
df_equals(
modin_df.dropna(how="any", axis=1, subset=row_subset),
pandas_df.dropna(how="any", axis=1, subset=row_subset),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis,subset", [(0, list("EF")), (1, [4, 5])])
def test_dropna_subset_error(data, axis, subset):
eval_general(
*create_test_dfs(data),
lambda df: df.dropna(axis=axis, subset=subset),
expected_exception=KeyError(["E", "F"]),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("astype", ["category", "int32", "float"])
def test_insert_dtypes(data, astype, request):
modin_df, pandas_df = pd.DataFrame(data), pandas.DataFrame(data)
# categories with NaN works incorrect for now
if astype == "category" and pandas_df.iloc[:, 0].isnull().any():
return
expected_exception = None
if "int32-float_nan_data" in request.node.callspec.id:
pytest.xfail(reason="https://github.com/modin-project/modin/issues/7026")
eval_insert(
modin_df,
pandas_df,
col="TypeSaver",
value=lambda df: df.iloc[:, 0].astype(astype),
expected_exception=expected_exception,
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("loc", [-3, 0, 3])
def test_insert_loc(data, loc):
modin_df, pandas_df = pd.DataFrame(data), pandas.DataFrame(data)
expected_exception = None
if loc == -3:
expected_exception = ValueError("unbounded slice")
eval_insert(
modin_df,
pandas_df,
loc=loc,
value=lambda df: df.iloc[:, 0],
expected_exception=expected_exception,
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_insert(data):
modin_df, pandas_df = pd.DataFrame(data), pandas.DataFrame(data)
eval_insert(
modin_df, pandas_df, col="Duplicate", value=lambda df: df[df.columns[0]]
)
eval_insert(modin_df, pandas_df, col="Scalar", value=100)
eval_insert(
pd.DataFrame(columns=list("ab")),
pandas.DataFrame(columns=list("ab")),
col="Series insert",
value=lambda df: df[df.columns[0]],
)
eval_insert(
modin_df,
pandas_df,
col="DataFrame insert",
value=lambda df: df[[df.columns[0]]],
)
eval_insert(
modin_df,
pandas_df,
col="Different indices",
value=lambda df: df[[df.columns[0]]].set_index(df.index[::-1]),
)
eval_insert(
modin_df,
pandas_df,
col="2d list insert",
value=lambda df: [[1, 2]] * len(df),
)
# Bad inserts
eval_insert(
modin_df,
pandas_df,
col="Bad Column",
value=lambda df: df,
expected_exception=ValueError(
f"Expected a one-dimensional object, got a DataFrame with {len(pandas_df.columns)} columns instead."
),
)
eval_insert(
modin_df,
pandas_df,
col="Too Short",
value=lambda df: list(df[df.columns[0]])[:-1],
expected_exception=ValueError(
f"Length of values ({len(pandas_df)-1}) does not match length of index ({len(pandas_df)})"
),
)
eval_insert(
modin_df,
pandas_df,
col=lambda df: df.columns[0],
value=lambda df: df[df.columns[0]],
expected_exception=ValueError("cannot insert 2d list insert, already exists"),
)
eval_insert(
modin_df,
pandas_df,
loc=lambda df: len(df.columns) + 100,
col="Bad Loc",
value=100,
expected_exception=IndexError(
f"index {len(pandas_df.columns) + 100} is out of bounds for axis 0 with size {len(pandas_df.columns)}"
),
)
def test_insert_4407():
data = {"col1": [1, 2, 3], "col2": [2, 3, 4]}
modin_df, pandas_df = create_test_dfs(data)
def comparator(df1, df2):
assert_series_equal(df1.dtypes, df2.dtypes, check_index=False)
return df_equals(df1, df2)
for idx, value in enumerate(
(pandas_df.to_numpy(), np.array([[1]] * 3), np.array([[1, 2, 3], [4, 5, 6]]))
):
expected_exception = None
if idx == 0:
expected_exception = ValueError(
"Expected a 1D array, got an array with shape (3, 2)"
)
elif idx == 2:
# FIXME: https://github.com/modin-project/modin/issues/7080
expected_exception = False
eval_insert(
modin_df,
pandas_df,
loc=0,
col=f"test_col{idx}",
value=value,
comparator=lambda df1, df2: comparator(df1, df2),
expected_exception=expected_exception,
)
def test_insert_modin_array():
from modin.numpy import array
data = {"col1": [1, 2, 3], "col2": [2, 3, 4]}
modin_df1, modin_df2 = pd.DataFrame(data), pd.DataFrame(data)
np_value = np.array([7, 7, 7])
md_np_value = array(np_value)
modin_df1.insert(1, "new_col", np_value)
modin_df2.insert(1, "new_col", md_np_value)
df_equals(modin_df1, modin_df2)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ndim(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.ndim == pandas_df.ndim
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_notna(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.notna(), pandas_df.notna())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_notnull(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.notnull(), pandas_df.notnull())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_round(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.round(), pandas_df.round())
df_equals(modin_df.round(1), pandas_df.round(1))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_set_axis(data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
x = pandas.DataFrame()._get_axis_number(axis)
index = modin_df.columns if x else modin_df.index
labels = ["{0}_{1}".format(index[i], i) for i in range(modin_df.shape[x])]
eval_general(
modin_df, pandas_df, lambda df: df.set_axis(labels, axis=axis, copy=True)
)
modin_df_copy = modin_df.copy()
modin_df = modin_df.set_axis(labels, axis=axis, copy=False)
# Check that the copy and original are different
try:
df_equals(modin_df, modin_df_copy)
except AssertionError:
assert True
else:
assert False
pandas_df = pandas_df.set_axis(labels, axis=axis)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("drop", bool_arg_values, ids=arg_keys("drop", bool_arg_keys))
@pytest.mark.parametrize(
"append", bool_arg_values, ids=arg_keys("append", bool_arg_keys)
)
def test_set_index(request, data, drop, append):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if "empty_data" not in request.node.name:
key = modin_df.columns[0]
modin_result = modin_df.set_index(key, drop=drop, append=append, inplace=False)
pandas_result = pandas_df.set_index(
key, drop=drop, append=append, inplace=False
)
df_equals(modin_result, pandas_result)
modin_df_copy = modin_df.copy()
modin_df.set_index(key, drop=drop, append=append, inplace=True)
# Check that the copy and original are different
try:
df_equals(modin_df, modin_df_copy)
except AssertionError:
assert True
else:
assert False
pandas_df.set_index(key, drop=drop, append=append, inplace=True)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_shape(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.shape == pandas_df.shape
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_size(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.size == pandas_df.size
def test_squeeze():
frame_data = {
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 10, 11],
"col4": [12, 13, 14, 15],
"col5": [0, 0, 0, 0],
}
frame_data_2 = {"col1": [0, 1, 2, 3]}
frame_data_3 = {
"col1": [0],
"col2": [4],
"col3": [8],
"col4": [12],
"col5": [0],
}
frame_data_4 = {"col1": [2]}
frame_data_5 = {"col1": ["string"]}
# Different data for different cases
pandas_df = pandas.DataFrame(frame_data).squeeze()
modin_df = pd.DataFrame(frame_data).squeeze()
df_equals(modin_df, pandas_df)
pandas_df_2 = pandas.DataFrame(frame_data_2).squeeze()
modin_df_2 = pd.DataFrame(frame_data_2).squeeze()
df_equals(modin_df_2, pandas_df_2)
pandas_df_3 = pandas.DataFrame(frame_data_3).squeeze()
modin_df_3 = pd.DataFrame(frame_data_3).squeeze()
df_equals(modin_df_3, pandas_df_3)
pandas_df_4 = pandas.DataFrame(frame_data_4).squeeze()
modin_df_4 = pd.DataFrame(frame_data_4).squeeze()
df_equals(modin_df_4, pandas_df_4)
pandas_df_5 = pandas.DataFrame(frame_data_5).squeeze()
modin_df_5 = pd.DataFrame(frame_data_5).squeeze()
df_equals(modin_df_5, pandas_df_5)
data = [
[
pd.Timestamp("2019-01-02"),
pd.Timestamp("2019-01-03"),
pd.Timestamp("2019-01-04"),
pd.Timestamp("2019-01-05"),
],
[1, 1, 1, 2],
]
df = pd.DataFrame(data, index=["date", "value"]).T
pf = pandas.DataFrame(data, index=["date", "value"]).T
df.set_index("date", inplace=True)
pf.set_index("date", inplace=True)
df_equals(df.iloc[0], pf.iloc[0])
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_transpose(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.T, pandas_df.T)
df_equals(modin_df.transpose(), pandas_df.transpose())
# Test for map across full axis for select indices
df_equals(modin_df.T.dropna(), pandas_df.T.dropna())
# Test for map across full axis
df_equals(modin_df.T.nunique(), pandas_df.T.nunique())
# Test for map across blocks
df_equals(modin_df.T.notna(), pandas_df.T.notna())
@pytest.mark.parametrize(
"data, other_data",
[
({"A": [1, 2, 3], "B": [400, 500, 600]}, {"B": [4, 5, 6], "C": [7, 8, 9]}),
({"C": [1, 2, 3], "B": [400, 500, 600]}, {"B": [4, 5, 6], "A": [7, 8, 9]}),
(
{"A": ["a", "b", "c"], "B": ["x", "y", "z"]},
{"B": ["d", "e", "f", "g", "h", "i"]},
),
({"A": [1, 2, 3], "B": [400, 500, 600]}, {"B": [4, np.nan, 6]}),
],
)
@pytest.mark.parametrize("errors", ["raise", "ignore"])
def test_update(data, other_data, errors):
modin_df, pandas_df = create_test_dfs(data)
other_modin_df, other_pandas_df = create_test_dfs(other_data)
expected_exception = None
if errors == "raise":
expected_exception = ValueError("Data overlaps.")
eval_general(
modin_df,
pandas_df,
lambda df: (
df.update(other_modin_df, errors=errors)
if isinstance(df, pd.DataFrame)
else df.update(other_pandas_df, errors=errors)
),
__inplace__=True,
expected_exception=expected_exception,
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___neg__(request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.__neg__()
except Exception as err:
with pytest.raises(type(err)):
modin_df.__neg__()
else:
modin_result = modin_df.__neg__()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___invert__(data, request):
expected_exception = None
if "float_nan_data" in request.node.callspec.id:
# FIXME: https://github.com/modin-project/modin/issues/7081
expected_exception = False
eval_general(
*create_test_dfs(data), lambda df: ~df, expected_exception=expected_exception
)
def test___invert___bool():
data = [False]
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = ~modin_df
pandas_result = ~pandas_df
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___delitem__(request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if "empty_data" not in request.node.name:
key = pandas_df.columns[0]
modin_df = modin_df.copy()
pandas_df = pandas_df.copy()
modin_df.__delitem__(key)
pandas_df.__delitem__(key)
df_equals(modin_df, pandas_df)
# Issue 2027
last_label = pandas_df.iloc[:, -1].name
modin_df.__delitem__(last_label)
pandas_df.__delitem__(last_label)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___nonzero__(data):
modin_df = pd.DataFrame(data)
with pytest.raises(ValueError):
# Always raises ValueError
modin_df.__nonzero__()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___abs__(request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = abs(pandas_df)
except Exception as err:
with pytest.raises(type(err)):
abs(modin_df)
else:
modin_result = abs(modin_df)
df_equals(modin_result, pandas_result)
def test___round__():
data = test_data_values[0]
eval_general(pd.DataFrame(data), pandas.DataFrame(data), lambda df: df.__round__())
@pytest.mark.parametrize(
"get_index",
[
pytest.param(lambda idx: None, id="None_idx"),
pytest.param(lambda idx: ["a", "b", "c"], id="No_intersection_idx"),
pytest.param(lambda idx: idx, id="Equal_idx"),
pytest.param(lambda idx: idx[::-1], id="Reversed_idx"),
],
)
@pytest.mark.parametrize(
"get_columns",
[
pytest.param(lambda idx: None, id="None_idx"),
pytest.param(lambda idx: ["a", "b", "c"], id="No_intersection_idx"),
pytest.param(lambda idx: idx, id="Equal_idx"),
pytest.param(lambda idx: idx[::-1], id="Reversed_idx"),
],
)
@pytest.mark.parametrize("dtype", [None, "str"])
@pytest.mark.exclude_in_sanity
def test_constructor_from_modin_series(get_index, get_columns, dtype):
modin_df, pandas_df = create_test_dfs(test_data_values[0])
modin_data = {f"new_col{i}": modin_df.iloc[:, i] for i in range(modin_df.shape[1])}
pandas_data = {
f"new_col{i}": pandas_df.iloc[:, i] for i in range(pandas_df.shape[1])
}
index = get_index(modin_df.index)
columns = get_columns(list(modin_data.keys()))
new_modin = pd.DataFrame(modin_data, index=index, columns=columns, dtype=dtype)
new_pandas = pandas.DataFrame(
pandas_data, index=index, columns=columns, dtype=dtype
)
df_equals(new_modin, new_pandas)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_constructor(data):
pandas_df = pandas.DataFrame(data)
modin_df = pd.DataFrame(data)
df_equals(pandas_df, modin_df)
pandas_df = pandas.DataFrame({k: pandas.Series(v) for k, v in data.items()})
modin_df = pd.DataFrame({k: pd.Series(v) for k, v in data.items()})
df_equals(pandas_df, modin_df)
def test_pyarrow_constructor():
pa = pytest.importorskip("pyarrow")
data = [[Decimal("3.19"), None], [None, Decimal("-1.23")]]
df_equals(*create_test_dfs(data, dtype=pd.ArrowDtype(pa.decimal128(3, scale=2))))
@pytest.mark.parametrize(
"data",
[
np.arange(1, 10000, dtype=np.float32),
[
pd.Series([1, 2, 3], dtype="int32"),
pandas.Series([4, 5, 6], dtype="int64"),
np.array([7, 8, 9], dtype=np.float32),
],
pandas.Categorical([1, 2, 3, 4, 5]),
],
)
def test_constructor_dtypes(data):
modin_df, pandas_df = create_test_dfs(data)
df_equals(modin_df, pandas_df)
def test_constructor_columns_and_index():
modin_df = pd.DataFrame(
[[1, 1, 10], [2, 4, 20], [3, 7, 30]],
index=[1, 2, 3],
columns=["id", "max_speed", "health"],
)
pandas_df = pandas.DataFrame(
[[1, 1, 10], [2, 4, 20], [3, 7, 30]],
index=[1, 2, 3],
columns=["id", "max_speed", "health"],
)
df_equals(modin_df, pandas_df)
df_equals(pd.DataFrame(modin_df), pandas.DataFrame(pandas_df))
df_equals(
pd.DataFrame(modin_df, columns=["max_speed", "health"]),
pandas.DataFrame(pandas_df, columns=["max_speed", "health"]),
)
df_equals(
pd.DataFrame(modin_df, index=[1, 2]),
pandas.DataFrame(pandas_df, index=[1, 2]),
)
df_equals(
pd.DataFrame(modin_df, index=[1, 2], columns=["health"]),
pandas.DataFrame(pandas_df, index=[1, 2], columns=["health"]),
)
df_equals(
pd.DataFrame(modin_df.iloc[:, 0], index=[1, 2, 3]),
pandas.DataFrame(pandas_df.iloc[:, 0], index=[1, 2, 3]),
)
df_equals(
pd.DataFrame(modin_df.iloc[:, 0], columns=["NO_EXIST"]),
pandas.DataFrame(pandas_df.iloc[:, 0], columns=["NO_EXIST"]),
)
with pytest.raises(NotImplementedError):
pd.DataFrame(modin_df, index=[1, 2, 99999])
with pytest.raises(NotImplementedError):
pd.DataFrame(modin_df, columns=["NO_EXIST"])
def test_constructor_from_index():
data = pd.Index([1, 2, 3], name="pricing_date")
modin_df, pandas_df = create_test_dfs(data)
df_equals(modin_df, pandas_df)
def test_insert_datelike_string_issue_7371():
# When a new value is inserted into a frame, we call pandas.api.types.pandas_dtype(value) to
# extract the dtype of an object like a pandas Series or numpy array. When a scalar value is passed,
# this usually raises a TypeError, so we construct a local pandas Series from the object and
# extract the dtype from there.
# When the passed value is a date-like string, pandas will instead raise a ValueError because
# it tries to parse it as a numpy structured dtype. After fixing GH#7371, we now catch
# ValueError in addition to TypeError to handle this case.
modin_df = pd.DataFrame({"a": [0]})
modin_df["c"] = "2020-01-01"
pandas_df = pandas.DataFrame({"a": [0]})
pandas_df["c"] = "2020-01-01"
df_equals(modin_df, pandas_df)
| TestCategoricalProxyDtype |
python | apache__airflow | providers/apache/pig/tests/unit/apache/pig/operators/test_pig.py | {
"start": 1075,
"end": 2615
} | class ____:
def test_prepare_template(self):
pig = "sh echo $DATE;"
task_id = TEST_TASK_ID
operator = PigOperator(pig=pig, task_id=task_id)
operator.prepare_template()
assert pig == operator.pig
# converts when pigparams_jinja_translate = true
operator = PigOperator(pig=pig, task_id=task_id, pigparams_jinja_translate=True)
operator.prepare_template()
assert operator.pig == "sh echo {{ DATE }};"
@pytest.mark.db_test
@mock.patch.object(PigCliHook, "run_cli")
def test_execute(self, mock_run_cli):
pig_opts = "-x mapreduce"
operator = PigOperator(pig=PIG, pig_opts=pig_opts, task_id=TEST_TASK_ID)
operator.execute(context=TEST_CONTEXT_ID)
mock_run_cli.assert_called_once_with(pig=PIG, pig_opts=pig_opts)
@pytest.mark.db_test
@mock.patch.object(PigCliHook, "run_cli")
def test_execute_default_pig_opts_to_none(self, mock_run_cli):
operator = PigOperator(pig=PIG, task_id=TEST_TASK_ID)
operator.execute(context=TEST_CONTEXT_ID)
mock_run_cli.assert_called_once_with(pig=PIG, pig_opts=None)
@pytest.mark.db_test
@mock.patch.object(PigCliHook, "run_cli")
@mock.patch.object(PigCliHook, "kill")
def test_on_kill(self, mock_kill, mock_rul_cli):
operator = PigOperator(pig=PIG, task_id=TEST_TASK_ID)
operator.execute(context=TEST_CONTEXT_ID)
operator.on_kill()
mock_rul_cli.assert_called()
mock_kill.assert_called()
| TestPigOperator |
python | cython__cython | Cython/Compiler/PyrexTypes.py | {
"start": 59150,
"end": 64768
} | class ____(PyObjectType):
#
# A Python extension type.
#
# name string
# scope CClassScope Attribute namespace
# typedef_flag boolean
# base_type PyExtensionType or None
# module_name string or None Qualified name of defining module
# objstruct_cname string Name of PyObject struct
# objtypedef_cname string Name of PyObject struct typedef
# typeobj_cname string or None C code fragment referring to type object
# typeptr_cname string or None Name of pointer to external type object
# vtabslot_cname string Name of C method table member
# vtabstruct_cname string Name of C method table struct
# vtabptr_cname string Name of pointer to C method table
# vtable_cname string Name of C method table definition
# early_init boolean Whether to initialize early (as opposed to during module execution).
# defered_declarations [thunk] Used to declare class hierarchies in order
# is_external boolean Defined in a extern block
# check_size 'warn', 'error', 'ignore' What to do if tp_basicsize does not match
# dataclass_fields OrderedDict nor None Used for inheriting from dataclasses
# multiple_bases boolean Does this class have multiple bases
# has_sequence_flag boolean Set Py_TPFLAGS_SEQUENCE
is_extension_type = 1
has_attributes = 1
early_init = 1
objtypedef_cname = None
dataclass_fields = None
multiple_bases = False
has_sequence_flag = False
def __init__(self, name, typedef_flag, base_type, is_external=0, check_size=None):
self.name = name
self.scope = None
self.typedef_flag = typedef_flag
if base_type is not None:
base_type.is_subclassed = True
self.base_type = base_type
self.module_name = None
self.objstruct_cname = None
self.typeobj_cname = None
self.typeptr_cname = None
self.vtabslot_cname = None
self.vtabstruct_cname = None
self.vtabptr_cname = None
self.vtable_cname = None
self.is_external = is_external
self.check_size = check_size or 'warn'
self.defered_declarations = []
def set_scope(self, scope):
self.scope = scope
if scope:
scope.parent_type = self
def needs_nonecheck(self):
return True
def subtype_of_resolved_type(self, other_type):
if other_type.is_extension_type or other_type.is_builtin_type:
return self is other_type or (
self.base_type and self.base_type.subtype_of(other_type))
else:
return other_type is py_object_type
def typeobj_is_available(self):
# Do we have a pointer to the type object?
return self.typeptr_cname
def typeobj_is_imported(self):
# If we don't know the C name of the type object but we do
# know which module it's defined in, it will be imported.
return self.typeobj_cname is None and self.module_name is not None
def assignable_from(self, src_type):
if self == src_type:
return True
if isinstance(src_type, PyExtensionType):
if src_type.base_type is not None:
return self.assignable_from(src_type.base_type)
if isinstance(src_type, BuiltinObjectType):
# FIXME: This is an ugly special case that we currently
# keep supporting. It allows users to specify builtin
# types as external extension types, while keeping them
# compatible with the real builtin types. We already
# generate a warning for it. Big TODO: remove!
return (self.module_name == '__builtin__' and
self.name == src_type.name)
return False
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0, deref = 0):
if pyrex or for_display:
base_code = self.name
else:
if self.typedef_flag:
objstruct = self.objstruct_cname
else:
objstruct = "struct %s" % self.objstruct_cname
base_code = public_decl(objstruct, dll_linkage)
if deref:
assert not entity_code
else:
entity_code = "*%s" % entity_code
return self.base_declaration_code(base_code, entity_code)
def type_test_code(self, scope, py_arg, allow_none=True, exact=False):
assert not exact, "exact extension type tests are not currently implemented here"
typeptr_cname = scope.name_in_module_state(self.typeptr_cname)
type_check = f"likely(__Pyx_TypeTest({py_arg}, {typeptr_cname}))"
scope.use_utility_code(UtilityCode.load_cached("ExtTypeTest", "ObjectHandling.c"))
if allow_none:
type_check = f"likely((({py_arg}) == Py_None) || {type_check})"
return type_check
def attributes_known(self):
return self.scope is not None
def __str__(self):
return self.name
def __repr__(self):
return "<PyExtensionType %s%s>" % (self.scope.class_name,
("", " typedef")[self.typedef_flag])
def py_type_name(self):
if not self.module_name:
return self.name
return "__import__(%r, None, None, ['']).%s" % (self.module_name,
self.name)
| PyExtensionType |
python | pytorch__pytorch | torch/_numpy/_dtypes.py | {
"start": 2414,
"end": 2513
} | class ____(floating):
name = "float64"
typecode = "d"
torch_dtype = torch.float64
| float64 |
python | apache__airflow | task-sdk/src/airflow/sdk/execution_time/comms.py | {
"start": 24503,
"end": 24645
} | class ____(TIRetryStatePayload):
"""Update a task instance state to up_for_retry."""
type: Literal["RetryTask"] = "RetryTask"
| RetryTask |
python | pypa__warehouse | tests/unit/admin/views/test_organizations.py | {
"start": 950,
"end": 2877
} | class ____:
def test_validate_success(self):
form_data = MultiDict(
{
"display_name": "My Organization",
"link_url": "https://example.com",
"description": "A test organization",
"orgtype": "Company",
}
)
form = views.OrganizationForm(formdata=form_data)
assert form.validate(), str(form.errors)
def test_validate_invalid_url(self):
form_data = MultiDict(
{
"display_name": "My Organization",
"link_url": "not-a-url",
"description": "A test organization",
"orgtype": "Company",
}
)
form = views.OrganizationForm(formdata=form_data)
assert not form.validate()
assert "Organization URL must start with http:// or https://" in str(
form.link_url.errors
)
def test_validate_missing_required_fields(self):
form_data = MultiDict({})
form = views.OrganizationForm(formdata=form_data)
assert not form.validate()
assert form.display_name.errors
assert form.link_url.errors
assert form.description.errors
assert form.orgtype.errors
def test_validate_field_too_long(self):
form_data = MultiDict(
{
"display_name": "x" * 101, # Max is 100
"link_url": "https://example.com/" + "x" * 381, # Max is 400
"description": "x" * 401, # Max is 400
"orgtype": "Company",
}
)
form = views.OrganizationForm(formdata=form_data)
assert not form.validate()
assert "100 characters or less" in str(form.display_name.errors)
assert "400 characters or less" in str(form.link_url.errors)
assert "400 characters or less" in str(form.description.errors)
| TestOrganizationForm |
python | huggingface__transformers | tests/models/zoedepth/test_modeling_zoedepth.py | {
"start": 1398,
"end": 4926
} | class ____:
def __init__(
self,
parent,
batch_size=2,
num_channels=3,
image_size=32,
patch_size=16,
use_labels=True,
num_labels=3,
is_training=True,
hidden_size=4,
num_hidden_layers=2,
num_attention_heads=2,
intermediate_size=8,
out_features=["stage1", "stage2"],
apply_layernorm=False,
reshape_hidden_states=False,
neck_hidden_sizes=[2, 2],
fusion_hidden_size=6,
bottleneck_features=6,
num_out_features=[6, 6, 6, 6],
):
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.patch_size = patch_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.out_features = out_features
self.apply_layernorm = apply_layernorm
self.reshape_hidden_states = reshape_hidden_states
self.use_labels = use_labels
self.num_labels = num_labels
self.is_training = is_training
self.neck_hidden_sizes = neck_hidden_sizes
self.fusion_hidden_size = fusion_hidden_size
self.bottleneck_features = bottleneck_features
self.num_out_features = num_out_features
# ZoeDepth's sequence length
self.seq_length = (self.image_size // self.patch_size) ** 2 + 1
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
labels = None
if self.use_labels:
labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels)
config = self.get_config()
return config, pixel_values, labels
def get_config(self):
return ZoeDepthConfig(
backbone_config=self.get_backbone_config(),
backbone=None,
neck_hidden_sizes=self.neck_hidden_sizes,
fusion_hidden_size=self.fusion_hidden_size,
bottleneck_features=self.bottleneck_features,
num_out_features=self.num_out_features,
)
def get_backbone_config(self):
return Dinov2Config(
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
is_training=self.is_training,
out_features=self.out_features,
reshape_hidden_states=self.reshape_hidden_states,
)
def create_and_check_for_depth_estimation(self, config, pixel_values, labels):
config.num_labels = self.num_labels
model = ZoeDepthForDepthEstimation(config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
self.parent.assertEqual(result.predicted_depth.shape, (self.batch_size, self.image_size, self.image_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values, labels = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
| ZoeDepthModelTester |
python | scipy__scipy | scipy/optimize/_trustregion_constr/tests/test_qp_subproblem.py | {
"start": 15707,
"end": 18754
} | class ____(TestCase):
def test_cauchypoint_equalsto_newtonpoint(self):
A = np.array([[1, 8]])
b = np.array([-16])
_, _, Y = projections(A)
newton_point = np.array([0.24615385, 1.96923077])
# Newton point inside boundaries
x = modified_dogleg(A, Y, b, 2, [-np.inf, -np.inf], [np.inf, np.inf])
assert_array_almost_equal(x, newton_point)
# Spherical constraint active
x = modified_dogleg(A, Y, b, 1, [-np.inf, -np.inf], [np.inf, np.inf])
assert_array_almost_equal(x, newton_point/np.linalg.norm(newton_point))
# Box constraints active
x = modified_dogleg(A, Y, b, 2, [-np.inf, -np.inf], [0.1, np.inf])
assert_array_almost_equal(x, (newton_point/newton_point[0]) * 0.1)
def test_3d_example(self):
A = np.array([[1, 8, 1],
[4, 2, 2]])
b = np.array([-16, 2])
Z, LS, Y = projections(A)
newton_point = np.array([-1.37090909, 2.23272727, -0.49090909])
cauchy_point = np.array([0.11165723, 1.73068711, 0.16748585])
origin = np.zeros_like(newton_point)
# newton_point inside boundaries
x = modified_dogleg(A, Y, b, 3, [-np.inf, -np.inf, -np.inf],
[np.inf, np.inf, np.inf])
assert_array_almost_equal(x, newton_point)
# line between cauchy_point and newton_point contains best point
# (spherical constraint is active).
x = modified_dogleg(A, Y, b, 2, [-np.inf, -np.inf, -np.inf],
[np.inf, np.inf, np.inf])
z = cauchy_point
d = newton_point-cauchy_point
t = ((x-z)/(d))
assert_array_almost_equal(t, np.full(3, 0.40807330))
assert_array_almost_equal(np.linalg.norm(x), 2)
# line between cauchy_point and newton_point contains best point
# (box constraint is active).
x = modified_dogleg(A, Y, b, 5, [-1, -np.inf, -np.inf],
[np.inf, np.inf, np.inf])
z = cauchy_point
d = newton_point-cauchy_point
t = ((x-z)/(d))
assert_array_almost_equal(t, np.full(3, 0.7498195))
assert_array_almost_equal(x[0], -1)
# line between origin and cauchy_point contains best point
# (spherical constraint is active).
x = modified_dogleg(A, Y, b, 1, [-np.inf, -np.inf, -np.inf],
[np.inf, np.inf, np.inf])
z = origin
d = cauchy_point
t = ((x-z)/(d))
assert_array_almost_equal(t, np.full(3, 0.573936265))
assert_array_almost_equal(np.linalg.norm(x), 1)
# line between origin and newton_point contains best point
# (box constraint is active).
x = modified_dogleg(A, Y, b, 2, [-np.inf, -np.inf, -np.inf],
[np.inf, 1, np.inf])
z = origin
d = newton_point
t = ((x-z)/(d))
assert_array_almost_equal(t, np.full(3, 0.4478827364))
assert_array_almost_equal(x[1], 1)
| TestModifiedDogleg |
python | encode__django-rest-framework | rest_framework/fields.py | {
"start": 23330,
"end": 24922
} | class ____(Field):
default_error_messages = {
'invalid': _('Must be a valid boolean.')
}
default_empty_html = False
initial = False
TRUE_VALUES = {
't',
'y',
'yes',
'true',
'on',
'1',
1,
True,
}
FALSE_VALUES = {
'f',
'n',
'no',
'false',
'off',
'0',
0,
0.0,
False,
}
NULL_VALUES = {'null', '', None}
def __init__(self, **kwargs):
if kwargs.get('allow_null', False):
self.default_empty_html = None
self.initial = None
super().__init__(**kwargs)
@staticmethod
def _lower_if_str(value):
if isinstance(value, str):
return value.lower()
return value
def to_internal_value(self, data):
with contextlib.suppress(TypeError):
if self._lower_if_str(data) in self.TRUE_VALUES:
return True
elif self._lower_if_str(data) in self.FALSE_VALUES:
return False
elif self._lower_if_str(data) in self.NULL_VALUES and self.allow_null:
return None
self.fail("invalid", input=data)
def to_representation(self, value):
if self._lower_if_str(value) in self.TRUE_VALUES:
return True
elif self._lower_if_str(value) in self.FALSE_VALUES:
return False
if self._lower_if_str(value) in self.NULL_VALUES and self.allow_null:
return None
return bool(value)
# String types...
| BooleanField |
python | ansible__ansible | test/units/utils/test_serialization_profiles.py | {
"start": 10204,
"end": 15013
} | class ____:
def __init__(self, profile_name: str) -> None:
self.profile_name = profile_name
profile = _serialization.get_serialization_profile(profile_name)
supported_tags = {obj: None for obj in profile.serialize_map if issubclass(obj, AnsibleDatatagBase)}
if supported_tags:
self.supported_tag_values = tuple(tag_value for tag_type, tag_value in tag_values.items() if tag_type in supported_tags)
if not self.supported_tag_values:
raise Exception(f'Profile {profile} supports tags {supported_tags}, but no supported tag value is available.')
else:
self.supported_tag_values = tuple()
self.unsupported_tag_value = next((tag_value for tag_type, tag_value in tag_values.items() if tag_type not in supported_tags), None)
if not self.unsupported_tag_value and profile.profile_name != _cache_persistence._Profile.profile_name:
raise Exception(f'Profile {profile} supports tags {supported_tags}, but no unsupported tag value is available.')
def create_parameters_from_values(self, *values: t.Any) -> list[_TestParameters]:
return list(itertools.chain.from_iterable(self.create_parameters_from_value(value) for value in values))
def create_parameters_from_value(self, value: t.Any) -> list[_TestParameters]:
test_parameters: list[_TestParameters] = [
_TestParameters(
profile_name=self.profile_name,
value=value,
)
]
if self.supported_tag_values:
test_parameters.append(_TestParameters(
profile_name=self.profile_name,
value=value,
tags=self.supported_tag_values,
))
if self.unsupported_tag_value:
test_parameters.append(_TestParameters(
profile_name=self.profile_name,
value=value,
tags=(self.unsupported_tag_value,),
))
# test lazy containers on all non m2c profiles
if not self.profile_name.endswith("_m2c") and isinstance(value, (list, dict)):
test_parameters.extend([dataclasses.replace(p, lazy=True) for p in test_parameters])
return test_parameters
additional_test_parameters: list[_TestParameters] = []
# DTFIX5: need better testing for containers, especially for tagged values in containers
additional_test_parameters.extend(ProfileHelper(_fallback_to_str._Profile.profile_name).create_parameters_from_values(
b'\x00', # valid utf-8 strict, JSON escape sequence required
b'\x80', # utf-8 strict decoding fails, forcing the use of an error handler such as surrogateescape, JSON escape sequence required
'\udc80', # same as above, but already a string (verify that the string version is handled the same as the bytes version)
{1: "1"}, # integer key
{b'hi': "1"}, # bytes key
{TrustedAsTemplate().tag(b'hi'): "2"}, # tagged bytes key
{(b'hi',): 3}, # tuple[bytes] key
))
_generate = False
"""Set to True to regenerate all test data; a test failure will occur until it is set back to False."""
def get_test_cases() -> list[_TestCase]:
data_set = DataSet(generate=_generate)
data_set.load()
test_parameters: list[_TestParameters] = []
for profile_name in get_profile_names():
helper = ProfileHelper(profile_name)
for value in basic_values:
test_parameters.extend(helper.create_parameters_from_value(value))
test_parameters.extend(additional_test_parameters)
test_cases = [_TestCase(parameters=parameters, expected=data_set.fetch_or_create_expected(parameters)) for parameters in test_parameters]
data_set.save()
return test_cases
@pytest.mark.parametrize("test_case", get_test_cases(), ids=str)
def test_profile(test_case: _TestCase) -> None:
output = test_case.parameters.get_test_output()
if isinstance(output.payload, Exception):
if type(output.payload) is not type(test_case.expected.payload):
raise Exception('unexpected exception') from output.payload
assert str(output.payload) == str(test_case.expected.payload)
else:
assert output.payload == test_case.expected.payload
assert type(output.round_trip) is type(test_case.expected.round_trip)
if isinstance(output.round_trip, AnsibleRuntimeError):
assert str(output.round_trip._original_message) == str(test_case.expected.round_trip._original_message)
else:
assert output.round_trip == test_case.expected.round_trip
assert not set(output.tags).symmetric_difference(test_case.expected.tags)
def test_not_generate_mode():
assert not _generate, "set _generate=False to statically test expected behavior"
| ProfileHelper |
python | kamyu104__LeetCode-Solutions | Python/count-number-of-possible-root-nodes.py | {
"start": 1408,
"end": 2495
} | class ____(object):
def rootCount(self, edges, guesses, k):
"""
:type edges: List[List[int]]
:type guesses: List[List[int]]
:type k: int
:rtype: int
"""
def dfs(u, p):
cnt = int((p, u) in lookup)
for v in adj[u]:
if v == p:
continue
cnt += dfs(v, u)
return cnt
def dfs2(u, p, curr):
if (p, u) in lookup:
curr -= 1
if (u, p) in lookup:
curr += 1
cnt = int(curr >= k)
for v in adj[u]:
if v == p:
continue
cnt += dfs2(v, u, curr)
return cnt
adj = collections.defaultdict(list)
for u, v in edges:
adj[u].append(v)
adj[v].append(u)
lookup = {(u, v) for u, v in guesses}
curr = dfs(0, -1)
return dfs2(0, -1, curr)
# Time: O(n) ~ O(n^2), worst case in star tree
# Space: O(n)
import collections
# memoization
| Solution2 |
python | arrow-py__arrow | arrow/locales.py | {
"start": 115497,
"end": 117215
} | class ____(Locale):
names = ["se", "se-fi", "se-no", "se-se"]
past = "{0} dassái"
future = "{0} " # NOTE: couldn't find preposition for Sami here, none needed?
timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[str, Mapping[str, str]]]] = {
"now": "dál",
"second": "sekunda",
"seconds": "{0} sekundda",
"minute": "minuhta",
"minutes": "{0} minuhta",
"hour": "diimmu",
"hours": "{0} diimmu",
"day": "beaivvi",
"days": "{0} beaivvi",
"week": "vahku",
"weeks": "{0} vahku",
"month": "mánu",
"months": "{0} mánu",
"year": "jagi",
"years": "{0} jagi",
}
month_names = [
"",
"Ođđajagimánnu",
"Guovvamánnu",
"Njukčamánnu",
"Cuoŋománnu",
"Miessemánnu",
"Geassemánnu",
"Suoidnemánnu",
"Borgemánnu",
"Čakčamánnu",
"Golggotmánnu",
"Skábmamánnu",
"Juovlamánnu",
]
month_abbreviations = [
"",
"Ođđajagimánnu",
"Guovvamánnu",
"Njukčamánnu",
"Cuoŋománnu",
"Miessemánnu",
"Geassemánnu",
"Suoidnemánnu",
"Borgemánnu",
"Čakčamánnu",
"Golggotmánnu",
"Skábmamánnu",
"Juovlamánnu",
]
day_names = [
"",
"Mánnodat",
"Disdat",
"Gaskavahkku",
"Duorastat",
"Bearjadat",
"Lávvordat",
"Sotnabeaivi",
]
day_abbreviations = [
"",
"Mánnodat",
"Disdat",
"Gaskavahkku",
"Duorastat",
"Bearjadat",
"Lávvordat",
"Sotnabeaivi",
]
| SamiLocale |
python | pytorch__pytorch | torch/testing/_internal/common_pruning.py | {
"start": 1527,
"end": 2214
} | class ____(nn.Module):
r"""Model with only Linear layers without biases, some wrapped in a Sequential,
some following the Sequential. Used to test basic pruned Linear-Linear fusion."""
def __init__(self) -> None:
super().__init__()
self.seq = nn.Sequential(
nn.Linear(7, 5, bias=False),
nn.Linear(5, 6, bias=False),
nn.Linear(6, 4, bias=False),
)
self.linear1 = nn.Linear(4, 4, bias=False)
self.linear2 = nn.Linear(4, 10, bias=False)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.seq(x)
x = self.linear1(x)
x = self.linear2(x)
return x
| SimpleLinear |
python | jazzband__django-waffle | waffle/tests/test_testutils.py | {
"start": 12115,
"end": 12440
} | class ____(OverrideSampleOnClassTestCase):
"""
Extend ``OverrideSampleOnClassTestCase``
and make sure ``override_sample`` change still works.
"""
def test_child_undecorated_method_is_set_properly_for_sample(self):
self.assertFalse(waffle.sample_is_active('foo'))
| InheritanceOverrideSampleOnClassTests |
python | tensorflow__tensorflow | tensorflow/python/autograph/operators/logical_test.py | {
"start": 928,
"end": 3138
} | class ____(test.TestCase):
def assertNotCalled(self):
self.fail('this should not be called')
def _tf_true(self):
return constant_op.constant(True)
def _tf_false(self):
return constant_op.constant(False)
def test_and_python(self):
self.assertTrue(logical.and_(lambda: True, lambda: True))
self.assertTrue(logical.and_(lambda: [1], lambda: True))
self.assertListEqual(logical.and_(lambda: True, lambda: [1]), [1])
self.assertFalse(logical.and_(lambda: False, lambda: True))
self.assertFalse(logical.and_(lambda: False, self.assertNotCalled))
@test_util.run_deprecated_v1
def test_and_tf(self):
with self.cached_session() as sess:
t = logical.and_(self._tf_true, self._tf_true)
self.assertEqual(self.evaluate(t), True)
t = logical.and_(self._tf_true, lambda: True)
self.assertEqual(self.evaluate(t), True)
t = logical.and_(self._tf_false, lambda: True)
self.assertEqual(self.evaluate(t), False)
# TODO(mdan): Add a test for ops with side effects.
def test_or_python(self):
self.assertFalse(logical.or_(lambda: False, lambda: False))
self.assertFalse(logical.or_(lambda: [], lambda: False))
self.assertListEqual(logical.or_(lambda: False, lambda: [1]), [1])
self.assertTrue(logical.or_(lambda: False, lambda: True))
self.assertTrue(logical.or_(lambda: True, self.assertNotCalled))
@test_util.run_deprecated_v1
def test_or_tf(self):
with self.cached_session() as sess:
t = logical.or_(self._tf_false, self._tf_true)
self.assertEqual(self.evaluate(t), True)
t = logical.or_(self._tf_false, lambda: True)
self.assertEqual(self.evaluate(t), True)
t = logical.or_(self._tf_true, lambda: True)
self.assertEqual(self.evaluate(t), True)
# TODO(mdan): Add a test for ops with side effects.
def test_not_python(self):
self.assertFalse(logical.not_(True))
self.assertFalse(logical.not_([1]))
self.assertTrue(logical.not_([]))
def test_not_tf(self):
with self.cached_session() as sess:
t = logical.not_(self._tf_false())
self.assertEqual(self.evaluate(t), True)
if __name__ == '__main__':
test.main()
| LogicalOperatorsTest |
python | PyCQA__pylint | pylint/typing.py | {
"start": 1347,
"end": 1540
} | class ____(TypedDict):
"""Represents data about errors collected during checking of a module."""
key: Literal["fatal"]
mod: str
ex: ImportError | SyntaxError
| ErrorDescriptionDict |
python | pydantic__pydantic | pydantic-core/tests/validators/test_dataclasses.py | {
"start": 47019,
"end": 47113
} | class ____(FooDataclassSlots):
c: str
@dataclasses.dataclass(**kwargs)
| FooDataclassMoreSlots |
python | sympy__sympy | sympy/utilities/matchpy_connector.py | {
"start": 3949,
"end": 5710
} | class ____(Wildcard, Symbol):
min_length: int # abstract field required in subclasses
fixed_size: bool # abstract field required in subclasses
def __init__(self, variable_name=None, optional=None, **assumptions):
min_length = self.min_length
fixed_size = self.fixed_size
if optional is not None:
optional = _sympify(optional)
Wildcard.__init__(self, min_length, fixed_size, str(variable_name), optional)
def __getstate__(self):
return {
"min_length": self.min_length,
"fixed_size": self.fixed_size,
"min_count": self.min_count,
"variable_name": self.variable_name,
"optional": self.optional,
}
def __new__(cls, variable_name=None, optional=None, **assumptions):
cls._sanitize(assumptions, cls)
return _WildAbstract.__xnew__(cls, variable_name, optional, **assumptions)
def __getnewargs__(self):
return self.variable_name, self.optional
@staticmethod
def __xnew__(cls, variable_name=None, optional=None, **assumptions):
obj = Symbol.__xnew__(cls, variable_name, **assumptions)
return obj
def _hashable_content(self):
if self.optional:
return super()._hashable_content() + (self.min_count, self.fixed_size, self.variable_name, self.optional)
else:
return super()._hashable_content() + (self.min_count, self.fixed_size, self.variable_name)
def __copy__(self) -> '_WildAbstract':
return type(self)(variable_name=self.variable_name, optional=self.optional)
def __repr__(self):
return str(self)
def __str__(self):
return self.name
@doctest_depends_on(modules=('matchpy',))
| _WildAbstract |
python | huggingface__transformers | src/transformers/pipelines/zero_shot_image_classification.py | {
"start": 629,
"end": 7956
} | class ____(Pipeline):
"""
Zero shot image classification pipeline using `CLIPModel`. This pipeline predicts the class of an image when you
provide an image and a set of `candidate_labels`.
Example:
```python
>>> from transformers import pipeline
>>> classifier = pipeline(model="google/siglip-so400m-patch14-384")
>>> classifier(
... "https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png",
... candidate_labels=["animals", "humans", "landscape"],
... )
[{'score': 0.965, 'label': 'animals'}, {'score': 0.03, 'label': 'humans'}, {'score': 0.005, 'label': 'landscape'}]
>>> classifier(
... "https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png",
... candidate_labels=["black and white", "photorealist", "painting"],
... )
[{'score': 0.996, 'label': 'black and white'}, {'score': 0.003, 'label': 'photorealist'}, {'score': 0.0, 'label': 'painting'}]
```
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
This image classification pipeline can currently be loaded from [`pipeline`] using the following task identifier:
`"zero-shot-image-classification"`.
See the list of available models on
[huggingface.co/models](https://huggingface.co/models?filter=zero-shot-image-classification).
"""
_load_processor = False
_load_image_processor = True
_load_feature_extractor = False
_load_tokenizer = True
def __init__(self, **kwargs):
super().__init__(**kwargs)
requires_backends(self, "vision")
self.check_model_type(MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES)
@overload
def __call__(
self, image: Union[str, "Image.Image"], candidate_labels: list[str], **kwargs: Any
) -> list[dict[str, Any]]: ...
@overload
def __call__(
self, image: list[str] | list["Image.Image"], candidate_labels: list[str], **kwargs: Any
) -> list[list[dict[str, Any]]]: ...
def __call__(
self,
image: Union[str, list[str], "Image.Image", list["Image.Image"]],
candidate_labels: list[str],
**kwargs: Any,
) -> list[dict[str, Any]] | list[list[dict[str, Any]]]:
"""
Assign labels to the image(s) passed as inputs.
Args:
image (`str`, `list[str]`, `PIL.Image` or `list[PIL.Image]`):
The pipeline handles three types of images:
- A string containing a http link pointing to an image
- A string containing a local path to an image
- An image loaded in PIL directly
candidate_labels (`list[str]`):
The candidate labels for this image. They will be formatted using *hypothesis_template*.
hypothesis_template (`str`, *optional*, defaults to `"This is a photo of {}"`):
The format used in conjunction with *candidate_labels* to attempt the image classification by
replacing the placeholder with the candidate_labels. Pass "{}" if *candidate_labels* are
already formatted.
timeout (`float`, *optional*, defaults to None):
The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and
the call may block forever.
Return:
A list of dictionaries containing one entry per proposed label. Each dictionary contains the
following keys:
- **label** (`str`) -- One of the suggested *candidate_labels*.
- **score** (`float`) -- The score attributed by the model to that label. It is a value between
0 and 1, computed as the `softmax` of `logits_per_image`.
"""
# After deprecation of this is completed, remove the default `None` value for `image`
if "images" in kwargs:
image = kwargs.pop("images")
if image is None:
raise ValueError("Cannot call the zero-shot-image-classification pipeline without an images argument!")
return super().__call__(image, candidate_labels=candidate_labels, **kwargs)
def _sanitize_parameters(self, tokenizer_kwargs=None, **kwargs):
preprocess_params = {}
if "candidate_labels" in kwargs:
preprocess_params["candidate_labels"] = kwargs["candidate_labels"]
if "timeout" in kwargs:
preprocess_params["timeout"] = kwargs["timeout"]
if "hypothesis_template" in kwargs:
preprocess_params["hypothesis_template"] = kwargs["hypothesis_template"]
if tokenizer_kwargs is not None:
warnings.warn(
"The `tokenizer_kwargs` argument is deprecated and will be removed in version 5 of Transformers",
FutureWarning,
)
preprocess_params["tokenizer_kwargs"] = tokenizer_kwargs
return preprocess_params, {}, {}
def preprocess(
self,
image,
candidate_labels=None,
hypothesis_template="This is a photo of {}.",
timeout=None,
tokenizer_kwargs=None,
):
if tokenizer_kwargs is None:
tokenizer_kwargs = {}
image = load_image(image, timeout=timeout)
inputs = self.image_processor(images=[image], return_tensors="pt")
inputs = inputs.to(self.dtype)
inputs["candidate_labels"] = candidate_labels
sequences = [hypothesis_template.format(x) for x in candidate_labels]
tokenizer_default_kwargs = {"padding": True}
if "siglip" in self.model.config.model_type:
tokenizer_default_kwargs.update(padding="max_length", max_length=64, truncation=True)
tokenizer_default_kwargs.update(tokenizer_kwargs)
text_inputs = self.tokenizer(sequences, return_tensors="pt", **tokenizer_default_kwargs)
inputs["text_inputs"] = [text_inputs]
return inputs
def _forward(self, model_inputs):
candidate_labels = model_inputs.pop("candidate_labels")
text_inputs = model_inputs.pop("text_inputs")
if isinstance(text_inputs[0], UserDict):
text_inputs = text_inputs[0]
else:
# Batching case.
text_inputs = text_inputs[0][0]
outputs = self.model(**text_inputs, **model_inputs)
model_outputs = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def postprocess(self, model_outputs):
candidate_labels = model_outputs.pop("candidate_labels")
logits = model_outputs["logits"][0]
if "siglip" in self.model.config.model_type:
probs = torch.sigmoid(logits).squeeze(-1)
scores = probs.tolist()
if not isinstance(scores, list):
scores = [scores]
else:
probs = logits.softmax(dim=-1).squeeze(-1)
scores = probs.tolist()
if not isinstance(scores, list):
scores = [scores]
result = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(scores, candidate_labels), key=lambda x: -x[0])
]
return result
| ZeroShotImageClassificationPipeline |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/dataclassNamedTuple1.py | {
"start": 119,
"end": 197
} | class ____:
pass
def standalone(obj: object) -> None:
print(obj)
| Other |
python | kamyu104__LeetCode-Solutions | Python/count-the-number-of-inversions.py | {
"start": 137,
"end": 1266
} | class ____(object):
def numberOfPermutations(self, n, requirements):
"""
:type n: int
:type requirements: List[List[int]]
:rtype: int
"""
MOD = 10**9+7
lookup = [-1]*n
for i, c in requirements:
lookup[i] = c
dp = [1]
prev = 0
for i in xrange(n):
if lookup[i] != -1: # optimized
dp = [reduce(lambda total, i: (total+dp[i])%MOD, xrange(max((lookup[i]-i)-prev, 0), min((lookup[i]+1)-prev, len(dp))), 0)]
prev = lookup[i]
continue
new_dp = [0]*min(len(dp)+((i+1)-1), (lookup[-1]+1)-prev)
for j in xrange(len(new_dp)):
new_dp[j] = dp[j] if j < len(dp) else 0
if j-1 >= 0:
new_dp[j] = (new_dp[j]+new_dp[j-1])%MOD
if j-(i+1) >= 0:
new_dp[j] = (new_dp[j]-dp[j-(i+1)])%MOD
dp = new_dp
return dp[-1]
# Time: O(n * k), k = max(cnt for _, cnt in requirements)
# Space: O(n + k)
# knapsack dp, combinatorics, sliding window, two pointers
| Solution |
python | ethereum__web3.py | web3/exceptions.py | {
"start": 1774,
"end": 1886
} | class ____(Web3Exception):
"""
Raised when unable to connect to a provider
"""
| ProviderConnectionError |
python | tensorflow__tensorflow | tensorflow/python/framework/convert_to_constants_test.py | {
"start": 3080,
"end": 6440
} | class ____(object):
"""GraphDef merging methods for testing purposes."""
@staticmethod
def merge_any(x1, x2, empty_fn):
"""Merges two values using the message's CopyFrom/MergeFrom methods."""
merged = empty_fn()
merged.CopyFrom(x1)
merged.MergeFrom(x2)
return merged
@staticmethod
def merge_nodes(node1, node2):
"""Merges two NodeDef messages."""
merged = _GraphMerger.merge_any(node1, node2, node_def_pb2.NodeDef)
merged_inputs = node1.input[:]
merged_inputs.extend([i for i in node2.input[:] if i not in merged_inputs])
merged.input[:] = merged_inputs
return merged
@staticmethod
def merge_lists(repeated1, repeated2, empty_fn, key_fn, merge_fn):
"""Merges two lists representing maps."""
merged = {}
xs1 = {key_fn(x): x for x in repeated1}
xs2 = {key_fn(x): x for x in repeated2}
for name in set().union(xs1.keys(), xs2.keys()):
x1 = empty_fn() if name not in xs1 else xs1[name]
x2 = empty_fn() if name not in xs2 else xs2[name]
merged[name] = merge_fn(x1, x2)
return sorted(merged.values(), key=key_fn)
@staticmethod
def merge_node_lists(repeated_nodes1, repeated_nodes2):
"""Merges two repeated node fields."""
return _GraphMerger.merge_lists(repeated_nodes1, repeated_nodes2,
node_def_pb2.NodeDef, lambda n: n.name,
_GraphMerger.merge_nodes)
@staticmethod
def merge_functions(fn1, fn2):
"""Merges two FunctionDefs."""
merged = _GraphMerger.merge_any(fn1, fn2, function_pb2.FunctionDef)
del merged.signature.input_arg[:]
merged.signature.input_arg.extend(
_GraphMerger.merge_lists(
fn1.signature.input_arg[:], fn2.signature.input_arg[:],
op_def_pb2.OpDef.ArgDef, lambda a: a.name,
lambda x, y: _GraphMerger.merge_any(x, y, op_def_pb2.OpDef.ArgDef)))
del merged.signature.output_arg[:]
merged.signature.output_arg.extend(
_GraphMerger.merge_lists(
fn1.signature.output_arg[:], fn2.signature.output_arg[:],
op_def_pb2.OpDef.ArgDef, lambda a: a.name,
lambda x, y: _GraphMerger.merge_any(x, y, op_def_pb2.OpDef.ArgDef)))
del merged.node_def[:]
merged.node_def.extend(
_GraphMerger.merge_node_lists(fn1.node_def[:], fn2.node_def[:]))
return merged
@staticmethod
def merge_graphs(graph1, graph2):
"""Merges two GraphDef messages."""
merged = graph_pb2.GraphDef()
merged.node.extend(
_GraphMerger.merge_node_lists(graph1.node[:], graph2.node[:]))
merged.library.function.extend(
_GraphMerger.merge_lists(graph1.library.function,
graph2.library.function,
function_pb2.FunctionDef,
lambda f: f.signature.name,
_GraphMerger.merge_functions))
return merged
def has_stateful_partitioned_call_op(graph_def):
"""Determines if a StatefulPartitionedCall op exists in the graph."""
for node in graph_def.node:
if node.op == "StatefulPartitionedCall":
return True
return False
def get_num_variables(graph_def):
"""Returns the number of ReadVariableOp in the graph."""
return sum(node.op == "ReadVariableOp" for node in graph_def.node)
| _GraphMerger |
python | tensorflow__tensorflow | tensorflow/dtensor/python/tests/device_test.py | {
"start": 1934,
"end": 16711
} | class ____(test_util.DTensorBaseTest, parameterized.TestCase):
def setUp(self):
super(DTensorDeviceTest, self).setUp()
device_ids = test_util.create_device_ids_array((2,))
local_device_ids = np.ravel(device_ids).tolist()
mesh_dict = { # pylint: disable=g-complex-comprehension
device: Mesh(
[_BATCH_DIM],
device_ids,
local_device_ids,
test_util.create_device_list((2,), device),
)
for device in ("CPU", "GPU", "TPU")
}
self.mesh = self.configTestMesh(mesh_dict)
def testInvalidLayout(self):
a = api.copy_to_mesh(
constant_op.constant([1.0]), Layout.replicated(self.mesh, rank=1)
)
b = array_ops.identity(a)
with self.assertRaises(ValueError):
api.check_layout(b, Layout.batch_sharded(self.mesh, _BATCH_DIM, rank=1))
@parameterized.parameters(True, False)
def testAsyncOption(self, is_async):
try:
# There isn't a great way to test whether something actually executed
# synchronously; this test just exercises the option.
api.reset_dtensor_device(is_async=is_async)
with api._dtensor_device()._experimental_default_mesh(self.mesh):
with ops.device_v2(api.device_name()):
a = api.copy_to_mesh(
constant_op.constant([1.0]), Layout.replicated(self.mesh, rank=1)
)
b = array_ops.identity(a)
self.assertEqual([1.0], b.numpy())
finally:
api._reset() # pylint: disable=protected-access
def testBasicTypeBasedDispatch(self):
# Tests for b = Op(a).
a = constant_op.constant([1.0, 2.0, 3.0, 4.0])
a = api.copy_to_mesh(a, Layout.replicated(self.mesh, rank=1))
# __getitem__
b = a[2:-2]
api.check_layout(b, Layout.replicated(self.mesh, rank=1))
c = a * 2
api.check_layout(b, Layout.replicated(self.mesh, rank=1))
self.assertAllEqual(a.numpy(), [1., 2., 3., 4.])
self.assertAllEqual(c.numpy(), [2., 4., 6., 8.])
@mock.patch.dict(os.environ, {"DTENSOR_ENABLE_MULTI_DEVICE_EXPANSION": "1"})
def testMultiDeviceExpansion(self):
# Tests for b = Op(a).
a = constant_op.constant([1.0, 2.0, 3.0, 4.0])
a = api.copy_to_mesh(a, Layout.replicated(self.mesh, rank=1))
# __getitem__
b = a[2:-2]
api.check_layout(b, Layout.replicated(self.mesh, rank=1))
c = a * 2
api.check_layout(b, Layout.replicated(self.mesh, rank=1))
self.assertAllEqual(a.numpy(), [1.0, 2.0, 3.0, 4.0])
self.assertAllEqual(c.numpy(), [2.0, 4.0, 6.0, 8.0])
def testNoImplicitCopyOnForLargeIntegerTensors(self):
a = array_ops.ones([10, 10], dtype=dtypes.int32)
a = api.copy_to_mesh(a, Layout.replicated(self.mesh, rank=2))
big = array_ops.ones([10, 10], dtype=dtypes.int32)
small = array_ops.ones([10], dtype=dtypes.int32)
with self.assertRaises(errors_impl.UnimplementedError):
a + big # pylint:disable=pointless-statement
a + small # pylint:disable=pointless-statement
def testConcurrentExecute(self):
results = {}
def func(thread_id):
@polymorphic_function.function
def update_variable(initial_value, num_round):
y = math_ops.multiply(initial_value, num_round)
return math_ops.add(initial_value, y)
for n in range(10):
with api._dtensor_device()._experimental_default_mesh(self.mesh):
x = stateless_random_ops.stateless_random_uniform(
[10], seed=(1, 2), minval=0, maxval=255
)
y = api.copy_to_mesh(x, Layout.replicated(self.mesh, rank=1))
y = update_variable(y, n + 1)
results[thread_id] = y
threads = {}
for a in range(10):
t = threading.Thread(target=func, args=(a,))
threads[a] = t
t.start()
for thrad_id, thread in threads.items():
thread.join()
self.assertIsNotNone(results[thrad_id])
def testNoImplicitCopyOnForScalarVariableOnNonCPUMesh(self):
self.skipForTfrt("b/235088250")
self.skipForDeviceType(["CPU"], "CPU mesh implicit copy is allowed.")
init_value = api.call_with_layout(
array_ops.ones, shape=(1), layout=Layout.replicated(self.mesh, rank=1)
)
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError,
r"Using a non-DTensor variable with DTensor is only supported for..*\n"
r".*Shape: \[1\].*\n"
".*device_test.py.*",
):
api.copy_to_mesh(
variables.Variable(init_value), Layout.replicated(self.mesh, rank=1)
)
@parameterized.named_parameters(
test_util.product(
[
("Int32", dtypes.int32),
("Float32", dtypes.float32),
("Int64", dtypes.int64),
("Float64", dtypes.float64),
],
[
(
"Scalar",
[],
),
(
"RankOne",
[1],
),
("RankTwo", [2, 2]),
],
)
)
def testImplicitCopyVariableOnCPUMesh(self, dtype, shape):
self.skipForTfrt("b/235088250")
self.skipForDeviceType(
["GPU", "TPU"], "Variable implicit copy is only allowed for CPU mesh.")
variable = variables.Variable(array_ops.ones(shape=shape, dtype=dtype))
new_value = array_ops.zeros(shape=shape, dtype=dtype)
@polymorphic_function.function
def assign_function(v, new_value):
return v.assign(new_value)
layout = Layout.replicated(self.mesh, rank=len(shape))
# Run explicitly on the dtensor device with a default mesh since
# we do not have any registered mesh to broadcast the inputs to.
with api.default_mesh(self.mesh):
assign_function(variable, api.pack([new_value] * self.mesh.size, layout))
read_value = variable.read_value()
self.assertDTensorEqual(new_value, layout, read_value)
def testNumpyCallWithReplicatedInput(self):
a = constant_op.constant([1.0, 2.0, 3.0, 4.0])
a = api.copy_to_mesh(a, Layout.replicated(self.mesh, rank=1))
b = a.numpy()
self.assertAllEqual(b, [1., 2., 3., 4.])
def testTensorIteration(self):
a = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
a = api.copy_to_mesh(a, Layout.replicated(self.mesh, rank=1))
iterator = iter(a)
self.assertAllClose(1., next(iterator))
def testCopyToMeshWithSameLayout(self):
a = constant_op.constant([1.0, 2.0, 3.0, 4.0])
a = api.copy_to_mesh(a, Layout.replicated(self.mesh, rank=1))
a = api.copy_to_mesh(a, Layout.replicated(self.mesh, rank=1))
api.check_layout(a, Layout.replicated(self.mesh, rank=1))
def testSetDefaultLayoutEager(self):
tensor = constant_op.constant([[1.0], [1.0]])
tensor = api.copy_to_mesh(tensor, Layout.replicated(self.mesh, rank=2))
with api._dtensor_device()._default_layout(
Layout.replicated(self.mesh, rank=1)):
tensor = array_ops.reshape(tensor, [-1])
api.check_layout(tensor, Layout.replicated(self.mesh, rank=1))
self.assertAllClose([1., 1.], tensor.numpy())
def testSetDefaultLayoutFunction(self):
@polymorphic_function.function
def func():
tensor = constant_op.constant([[1.0], [1.0]])
return array_ops.reshape(tensor, [-1]), array_ops.reshape(tensor, [-1])
with api._dtensor_device()._default_layout(
Layout.batch_sharded(self.mesh, batch_dim=_BATCH_DIM, rank=1)
):
tensor1, tensor2 = func()
api.check_layout(
tensor1, Layout.batch_sharded(self.mesh, batch_dim=_BATCH_DIM, rank=1)
)
api.check_layout(tensor2, Layout.replicated(self.mesh, rank=1))
tensor1 = api.relayout(tensor1, Layout.replicated(self.mesh, rank=1))
self.assertAllClose([1.0, 1.0], tensor1.numpy())
self.assertAllClose([1.0, 1.0], tensor2.numpy())
@parameterized.named_parameters(
# pylint: disable=unnecessary-lambda
# Needed for the DVariable monkey patch to work.
("Variable", lambda x: d_variable.DVariable(x)),
# pylint: enable=unnecessary-lambda
("Tensor", lambda x: x),
)
def testStringRepresentation(self, transform):
replicated = api.copy_to_mesh(
constant_op.constant(8.0), Layout.replicated(self.mesh, rank=0)
)
replicated = transform(replicated)
replicated_str = str(replicated)
self.assertIn("8", replicated_str)
self.assertIn("layout", replicated_str)
sharded = api.pack(
[constant_op.constant([8.0]), constant_op.constant([9.0])],
layout=Layout([_BATCH_DIM], self.mesh),
)
sharded = transform(sharded)
sharded_str = str(sharded)
self.assertIn("8", sharded_str)
self.assertIn("9", sharded_str)
self.assertIn("layout", sharded_str)
@parameterized.named_parameters(("Async", True), ("Sync", False))
def testCancellation(self, is_async):
self.skipForTfrt("b/181368626: support cancellation in tfrt.")
self.skipForDeviceType(["TPU"], "b/195552283: Fix cancellation on TPU.")
device = dtensor_device.DTensorDevice(meshes=[self.mesh], is_async=is_async)
@polymorphic_function.function
def f(x):
# Integer division by 0 on one device, which returns a bad status.
x = math_ops.cast(gen_math_ops.div(x=x, y=x), dtypes.float32)
# A reduction requiring a collective, which would normally deadlock with
# one of its participants missing.
return math_ops.reduce_sum(x, axis=0)
a = constant_op.constant([[1, 2]])
b = constant_op.constant([[0, 1]])
x = device.pack([a, b], layout=Layout([_BATCH_DIM, UNSHARDED], self.mesh))
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError, "Integer division by zero"
):
y = f(x)
y.numpy()
z = array_ops.identity(x)
self.assertAllClose([[[1, 2]], [[0, 1]]], device.unpack(z))
def testCopyToMeshShapeFn(self):
@polymorphic_function.function
def f():
c = constant_op.constant([1.0, 2.0])
on_mesh = api.copy_to_mesh(c, Layout.replicated(self.mesh, rank=1))
return on_mesh
output, = f.get_concrete_function().outputs
self.assertEqual([2], output.shape)
def testUnpackInvalidInput(self):
# Test for b/255629824
with self.assertRaisesRegex(TypeError, "Expecting a Tensor"):
api.unpack(
**{
"tensor": [[
41.8684053521925,
731.610023060566,
356.0701500440248,
9.62928117100512,
185.0041559439026,
225.87663065861508,
450.2403652750002,
268.7273627027147,
]]
}
)
def testIsDTensorInvalidInput(self):
# Test for b/272381211
self.assertFalse(api.fetch_layout(**{"tensor": -1024}))
def testFetchLayoutInvalidInput(self):
# Test for b/272381211
self.assertIsNone(api.fetch_layout(**{"tensor": -1024}))
def testFetchLayoutForDVariablesReturnsCorrectLayout(self):
layout = Layout.replicated(self.mesh, 2)
with api._dtensor_device()._experimental_default_mesh(self.mesh):
dvariable = d_variable.DVariable(
api.call_with_layout(
array_ops.ones, shape=[2, 3], dtype=dtypes.float32, layout=layout
)
)
self.assertEqual(layout, api.fetch_layout(dvariable))
def testFetchLayoutForDTensorReturnsCorrectLayout(self):
layout = Layout.replicated(self.mesh, 2)
tensor = api.call_with_layout(
array_ops.ones, shape=[2, 3], dtype=dtypes.float32, layout=layout
)
self.assertEqual(layout, api.fetch_layout(tensor))
def testFetchLayoutForRegularTensorsThrowsError(self):
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError,
"FetchLayout expects a tensor placed on the layout device.",
):
api.fetch_layout(constant_op.constant([2, 3]))
def testFetchLayoutNotEagerlyRaisesRuntimeError(self):
@polymorphic_function.function
def f(dtensor_input):
api.fetch_layout(dtensor_input)
with self.assertRaisesRegex(RuntimeError,
"`fetch_layout` must be called eagerly."):
f(
api.copy_to_mesh(
constant_op.constant(1.0), Layout.replicated(self.mesh, rank=0)
)
)
def testIsDTensor(self):
normal_tensor = array_ops.zeros(shape=[10, 10])
self.assertFalse(api.is_dtensor(normal_tensor))
layout = Layout.replicated(self.mesh, rank=1)
d_tensor = api.call_with_layout(array_ops.zeros, layout=layout, shape=[10])
self.assertTrue(api.is_dtensor(d_tensor))
var = d_variable.DVariable(d_tensor)
self.assertTrue(api.is_dtensor(var))
self.assertFalse(api.is_dtensor([0, 1]))
self.assertFalse(api.is_dtensor({False: True}))
self.assertFalse(api.is_dtensor(1))
class C:
pass
self.assertFalse(api.is_dtensor(C()))
def testIsDTensorNotEagerlyRaisesRuntimeError(self):
@polymorphic_function.function
def f(dtensor_input):
api.is_dtensor(dtensor_input)
with self.assertRaisesRegex(
RuntimeError, "`is_dtensor` must be called eagerly."):
f(
api.copy_to_mesh(
constant_op.constant(1.0), Layout.replicated(self.mesh, 0)
)
)
def testSingleDeviceMesh(self):
# FIXME(b/274647196): Add a mesh_util API that takes CPU:0.
cpu0_mesh = Mesh.from_device("/job:localhost/replica:0/task:0/device:CPU:0")
with api.default_mesh(cpu0_mesh):
a = constant_op.constant(1.0)
self.assertFalse(api.is_dtensor(a))
self.assertIn("CPU:0", a.device)
with api.default_mesh(cpu0_mesh):
b = array_ops.ones(shape=(3, 3))
self.assertTrue(api.is_dtensor(b))
self.assertEqual(api.fetch_layout(b).mesh, cpu0_mesh)
def testUnsupportedOpReplicatedInput(self):
with api.default_mesh(self.mesh):
t = array_ops.ones(shape=(8, 3))
a = gen_collective_ops.collective_reduce_v2(
t,
group_size=1,
group_key=1030,
instance_key=1,
merge_op="Add",
final_op="Id",
ordering_token=[],
)
self.assertFalse(api.is_dtensor(a))
self.assertAllClose(a.numpy(), t.numpy())
def testUnsupportedOpShardedInput(self):
with api.default_mesh(self.mesh):
t = array_ops.ones(shape=(8, 3))
t = api.relayout(
t, Layout.batch_sharded(mesh=self.mesh, batch_dim=_BATCH_DIM, rank=2)
)
with self.assertRaisesRegex(
errors_impl.UnimplementedError, "not supported"
):
# This is an Op that we don't have a SPMD expander.
gen_collective_ops.collective_reduce_v2(
t,
group_size=1,
group_key=1030,
instance_key=1,
merge_op="Add",
final_op="Id",
ordering_token=[],
)
| DTensorDeviceTest |
python | django__django | django/db/models/fields/related_lookups.py | {
"start": 4109,
"end": 5682
} | class ____:
def get_prep_lookup(self):
if not isinstance(self.lhs, ColPairs) and not hasattr(
self.rhs, "resolve_expression"
):
# If we get here, we are dealing with single-column relations.
self.rhs = get_normalized_value(self.rhs, self.lhs)[0]
# We need to run the related field's get_prep_value(). Consider
# case ForeignKey to IntegerField given value 'abc'. The ForeignKey
# itself doesn't have validation for non-integers, so we must run
# validation using the target field.
if self.prepare_rhs and hasattr(self.lhs.output_field, "path_infos"):
# Get the target field. We can safely assume there is only one
# as we don't get to the direct value branch otherwise.
target_field = self.lhs.output_field.path_infos[-1].target_fields[-1]
self.rhs = target_field.get_prep_value(self.rhs)
return super().get_prep_lookup()
def as_sql(self, compiler, connection):
if isinstance(self.lhs, ColPairs):
if not self.rhs_is_direct_value():
raise ValueError(
f"'{self.lookup_name}' doesn't support multi-column subqueries."
)
self.rhs = get_normalized_value(self.rhs, self.lhs)
lookup_class = tuple_lookups[self.lookup_name]
lookup = lookup_class(self.lhs, self.rhs)
return compiler.compile(lookup)
return super().as_sql(compiler, connection)
| RelatedLookupMixin |
python | altair-viz__altair | sphinxext/code_ref.py | {
"start": 9407,
"end": 12089
} | class ____(SphinxDirective):
"""
Formatted code block, referencing the contents of a function definition.
Options:
.. altair-code-ref::
:output: [code, plot]
:fold: flag
:summary: str
Examples
--------
Reference a function, generating a code block:
.. altair-code-ref:: package.module.function
Wrap the code block in a collapsible `details`_ tag:
.. altair-code-ref:: package.module.function
:fold:
Override default ``"Show code"`` `details`_ summary:
.. altair-code-ref:: package.module.function
:fold:
:summary: Look here!
Use `altair-plot`_ instead of a code block:
.. altair-code-ref:: package.module.function
:output: plot
.. note::
Using `altair-plot`_ currently ignores the other options.
.. _details:
https://developer.mozilla.org/en-US/docs/Web/HTML/Element/details
.. _altair-plot:
https://github.com/vega/sphinxext-altair
"""
has_content: ClassVar[bool] = False
required_arguments: ClassVar[int] = 1
option_spec: ClassVar[dict[_Option, Callable[[str], Any]]] = { # pyright: ignore[reportIncompatibleVariableOverride]
"output": validate_output,
"fold": directives.flag,
"summary": directives.unchanged_required,
}
def __init__(
self,
name: str,
arguments: list[str],
options: dict[_Option, Any],
content: StringList,
lineno: int,
content_offset: int,
block_text: str,
state: RSTState,
state_machine: RSTStateMachine,
) -> None:
super().__init__(name, arguments, options, content, lineno, content_offset, block_text, state, state_machine) # fmt: skip # pyright: ignore[reportArgumentType]
self.options: dict[_Option, Any] # pyright: ignore[reportIncompatibleVariableOverride]
def run(self) -> Sequence[nodes.Node]:
qual_name = self.arguments[0]
module_name, func_name = qual_name.rsplit(".", 1)
output: _OutputLong = self.options.get("output", "code-block")
content = extract_func_def(module_name, func_name, output=output)
parsed = nested_parse_to_nodes(self.state, content)
return maybe_details(parsed, self.options, default_summary="Show code")
def setup(app: Sphinx) -> None:
app.add_directive_to_domain("py", "altair-code-ref", CodeRefDirective)
app.add_js_file(_PYSCRIPT_URL, loading_method="defer", type="module")
# app.add_directive("altair-pyscript", PyScriptDirective)
app.add_directive("altair-theme", ThemeDirective)
| CodeRefDirective |
python | getsentry__sentry | tests/sentry/integrations/slack/notifications/test_deploy.py | {
"start": 339,
"end": 2898
} | class ____(SlackActivityNotificationTest):
def test_deploy_block(self) -> None:
"""
Test that a Slack message is sent with the expected payload when a deploy happens.
and block kit is enabled.
"""
release = self.create_release(
version="meow" * 10,
date_released=timezone.now(),
)
# The projects can appear out of order.
projects = (self.project, self.create_project(name="battlesnake"))
SLUGS_TO_PROJECT = {project.slug: project for project in projects}
for project in projects:
release.add_project(project)
deploy = Deploy.objects.create(
release=release,
organization_id=self.organization.id,
environment_id=self.environment.id,
)
notification = ReleaseActivityNotification(
Activity(
project=self.project,
user_id=self.user.id,
type=ActivityType.RELEASE.value,
data={"version": release.version, "deploy_id": deploy.id},
)
)
with self.tasks():
notification.send()
blocks = orjson.loads(self.mock_post.call_args.kwargs["blocks"])
fallback_text = self.mock_post.call_args.kwargs["text"]
assert (
fallback_text
== f"Release {release.version} was deployed to {self.environment.name} for these projects"
)
assert blocks[0]["text"]["text"] == fallback_text
first_project = None
for i in range(len(projects)):
project = SLUGS_TO_PROJECT[blocks[2]["elements"][i]["text"]["text"]]
if not first_project:
first_project = project
assert (
blocks[2]["elements"][i]["url"]
== f"http://testserver/organizations/{self.organization.slug}/releases/"
f"{release.version}/?project={project.id}&unselectedSeries=Healthy&referrer=release_activity¬ification_uuid={notification.notification_uuid}"
)
assert blocks[2]["elements"][i]["value"] == "link_clicked"
assert first_project is not None
# footer project is the first project in the actions list
assert (
blocks[1]["elements"][0]["text"]
== f"{first_project.slug} | <http://testserver/settings/account/notifications/deploy/?referrer=release_activity-slack-user¬ification_uuid={notification.notification_uuid}|Notification Settings>"
)
| SlackDeployNotificationTest |
python | doocs__leetcode | solution/0900-0999/0906.Super Palindromes/Solution.py | {
"start": 148,
"end": 533
} | class ____:
def superpalindromesInRange(self, left: str, right: str) -> int:
def is_palindrome(x: int) -> bool:
y, t = 0, x
while t:
y = y * 10 + t % 10
t //= 10
return x == y
l, r = int(left), int(right)
return sum(l <= x <= r and is_palindrome(x) for x in map(lambda x: x * x, ps))
| Solution |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_textbox40.py | {
"start": 315,
"end": 1612
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("textbox40.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with textbox(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [79264000, 79486976]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
worksheet.insert_chart("E9", chart)
worksheet.insert_image(
"E25", self.image_dir + "red.png", {"url": "https://github.com/jmcnamara"}
)
worksheet.insert_textbox(
"G25", "This is some text", {"url": "https://github.com/jmcnamara"}
)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | spack__spack | lib/spack/spack/vendor/pyrsistent/_plist.py | {
"start": 107,
"end": 883
} | class ____(object):
"""
Helper class to allow construction of a list without
having to reverse it in the end.
"""
__slots__ = ('_head', '_tail')
def __init__(self):
self._head = _EMPTY_PLIST
self._tail = _EMPTY_PLIST
def _append(self, elem, constructor):
if not self._tail:
self._head = constructor(elem)
self._tail = self._head
else:
self._tail.rest = constructor(elem)
self._tail = self._tail.rest
return self._head
def append_elem(self, elem):
return self._append(elem, lambda e: PList(e, _EMPTY_PLIST))
def append_plist(self, pl):
return self._append(pl, lambda l: l)
def build(self):
return self._head
| _PListBuilder |
python | facelessuser__pymdown-extensions | tests/test_extensions/test_superfences.py | {
"start": 7211,
"end": 8209
} | class ____(util.MdCase):
"""Test highlight line wraps."""
extension = ['pymdownx.highlight', 'pymdownx.superfences']
extension_configs = {
'pymdownx.highlight': {
'line_spans': '__my_span',
'linenums_style': 'table'
}
}
def test_linespans(self):
"""Test wrapping a line in line spans."""
self.check_markdown(
r'''
```python linenums="2"
import test
```
''',
r'''
<div class="highlight"><table class="highlighttable"><tr><td class="linenos"><div class="linenodiv"><pre><span></span><span class="normal">2</span></pre></div></td><td class="code"><div><pre><span></span><code><span id="__my_span-0-2"><span class="kn">import</span><span class="w"> </span><span class="nn">test</span>
</span></code></pre></div></td></tr></table></div>
''', # noqa: E501
True
)
| TestHighlightLineWrapsPymdownsTable |
python | huggingface__transformers | src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py | {
"start": 130173,
"end": 144297
} | class ____(SeamlessM4Tv2PreTrainedModel, GenerationMixin):
input_modalities = "audio"
_keys_to_ignore_on_load_missing = ["text_encoder", "t2u_model", "vocoder"]
main_input_name = "input_features"
_tied_weights_keys = {
"lm_head.weight": "shared.weight",
"text_decoder.embed_tokens.weight": "shared.weight",
}
# Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForSpeechToText.__init__ with SeamlessM4T->SeamlessM4Tv2
def __init__(self, config: SeamlessM4Tv2Config):
super().__init__(config)
self.shared = nn.Embedding(config.vocab_size, config.hidden_size, config.pad_token_id)
self.speech_encoder = SeamlessM4Tv2SpeechEncoder(config)
self.text_decoder = SeamlessM4Tv2Decoder(config)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
# Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForSpeechToText.get_encoder
def get_encoder(self):
return self.speech_encoder
# Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForSpeechToText.get_decoder
def get_decoder(self):
return self.text_decoder
# Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForSpeechToText.get_input_embeddings
def get_input_embeddings(self):
return self.text_decoder.embed_tokens
# Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForSpeechToText.set_input_embeddings
def set_input_embeddings(self, value):
self.text_decoder.embed_tokens = value
@auto_docstring(custom_args=SEAMLESS_M4T_V2_COMMON_CUSTOM_ARGS)
# Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForSpeechToText.forward
def forward(
self,
input_features: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.LongTensor] = None,
encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
**kwargs,
) -> Union[Seq2SeqLMOutput, tuple[torch.FloatTensor]]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
"""
if labels is not None:
if use_cache:
logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.")
use_cache = False
if decoder_input_ids is None and decoder_inputs_embeds is None:
decoder_input_ids = shift_tokens_right(
labels, self.config.pad_token_id, self.config.decoder_start_token_id
)
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if encoder_outputs is None:
encoder_outputs = self.speech_encoder(
input_features=input_features,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
encoder_attention_mask = attention_mask
if attention_mask is not None:
sub_sampled_lengths = self._compute_sub_sample_lengths_from_attention_mask(attention_mask).to(
encoder_outputs[0].device
)
encoder_attention_mask = _compute_new_attention_mask(
hidden_states=encoder_outputs[0], seq_lens=sub_sampled_lengths
)
# decoder outputs consists of (dec_features, past_key_values, dec_hidden, dec_attn)
decoder_outputs = self.text_decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
encoder_hidden_states=encoder_outputs[0],
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
lm_logits = self.lm_head(decoder_outputs[0])
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
labels = labels.to(lm_logits.device)
masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
outputs = decoder_outputs + encoder_outputs
output = (lm_logits,) + outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return Seq2SeqLMOutput(
loss=masked_lm_loss,
logits=lm_logits,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
# Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForSpeechToText.generate
def generate(
self,
input_features=None,
tgt_lang=None,
generation_config=None,
logits_processor=None,
stopping_criteria=None,
prefix_allowed_tokens_fn=None,
synced_gpus=False,
**kwargs,
):
"""
Generates sequences of token ids.
<Tip warning={true}>
Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the
model's default generation configuration. You can override any `generation_config` by passing the corresponding
parameters to generate(), e.g. `.generate(inputs, num_beams=4, do_sample=True)`.
For an overview of generation strategies and code examples, check out the [following
guide](./generation_strategies).
</Tip>
Parameters:
input_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_banks)`):
Input audio features. This should be returned by the [`SeamlessM4TFeatureExtractor`] class or the
[`SeamlessM4TProcessor`] class. See [`SeamlessM4TFeatureExtractor.__call__`] for details.
tgt_lang (`str`, *optional*):
The language to use as target language for translation.
generation_config (`~generation.GenerationConfig`, *optional*):
The generation configuration to be used as base parametrization for the generation call. `**kwargs`
passed to generate matching the attributes of `generation_config` will override them. If
`generation_config` is not provided, the default will be used, which had the following loading
priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model
configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s
default values, whose documentation should be checked to parameterize generation.
logits_processor (`LogitsProcessorList`, *optional*):
Custom logits processors that complement the default logits processors built from arguments and
generation config. If a logit processor is passed that is already created with the arguments or a
generation config an error is thrown. This feature is intended for advanced users.
stopping_criteria (`StoppingCriteriaList`, *optional*):
Custom stopping criteria that complement the default stopping criteria built from arguments and a
generation config. If a stopping criteria is passed that is already created with the arguments or a
generation config an error is thrown. This feature is intended for advanced users.
prefix_allowed_tokens_fn (`Callable[[int, torch.Tensor], list[int]]`, *optional*):
If provided, this function constraints the beam search to allowed tokens only at each step. If not
provided no constraint is applied. This function takes 2 arguments: the batch ID `batch_id` and
`input_ids`. It has to return a list with the allowed tokens for the next generation step conditioned
on the batch ID `batch_id` and the previously generated tokens `inputs_ids`. This argument is useful
for constrained generation conditioned on the prefix, as described in [Autoregressive Entity
Retrieval](https://huggingface.co/papers/2010.00904).
synced_gpus (`bool`, *optional*, defaults to `False`):
Whether to continue running the while loop until max_length (needed to avoid deadlocking with
`FullyShardedDataParallel` and DeepSpeed ZeRO Stage 3).
kwargs (`dict[str, Any]`, *optional*):
Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be
forwarded to the `forward` function of the model.
Return:
[`~utils.ModelOutput`] or `torch.LongTensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True`
or when `config.return_dict_in_generate=True`) or a `torch.FloatTensor`. The possible
[`~utils.ModelOutput`] types are:
- [`~generation.GenerateEncoderDecoderOutput`],
- [`~generation.GenerateBeamEncoderDecoderOutput`]
"""
text_decoder_input_ids = kwargs.pop("decoder_input_ids", None)
# overwrite text_decoder_input_ids if tgt_lang is passed. The latter gets priority over decoder_input_ids.
input_features = input_features if input_features is not None else kwargs.pop("inputs")
if tgt_lang is not None:
inputs = kwargs.get("input_embeds") if input_features is None else input_features
inputs = (
inputs
if inputs is not None
else kwargs.get("encoder_outputs", {"last_hidden_state": None})["last_hidden_state"]
)
batch_size = len(inputs)
if hasattr(self.generation_config, "text_decoder_lang_to_code_id"):
# also accept __xxx__
tgt_lang = tgt_lang.replace("__", "")
if tgt_lang not in self.generation_config.text_decoder_lang_to_code_id:
raise ValueError(
f"""`tgt_lang={tgt_lang}` is not supported by this model. Please specify a `tgt_lang` in
{", ".join(self.generation_config.text_decoder_lang_to_code_id.keys())}"""
)
# tgt_lang gets priority over decoder input ids
text_tgt_lang_id = self.generation_config.text_decoder_lang_to_code_id.get(tgt_lang)
text_decoder_input_ids = torch.tensor([[text_tgt_lang_id]] * batch_size, device=self.device)
else:
raise ValueError(
"""This model generation config doesn't have a `text_decoder_lang_to_code_id` key which maps
the target language to the right token id. Make sure to load the right generation config."""
)
else:
# only a warning, otherwise errors appear in the tests
logger.warning(
"""You must either specify a `tgt_lang` or pass a correct `text_decoder_input_ids` to get
a correct generation, otherwise the generation will probably make no sense."""
)
return super().generate(
input_features,
generation_config,
logits_processor,
stopping_criteria,
prefix_allowed_tokens_fn,
synced_gpus,
decoder_input_ids=text_decoder_input_ids,
**kwargs,
)
@auto_docstring(
custom_intro="""
The text-to-speech SeamlessM4Tv2 Model transformer which can be used for T2ST.
"""
)
| SeamlessM4Tv2ForSpeechToText |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 43849,
"end": 44206
} | class ____(sgqlc.types.Enum):
"""The permissions available to members on an Organization.
Enumeration Choices:
* `ADMIN`: Can read, clone, push, and add collaborators to
repositories.
* `READ`: Can read and clone repositories.
"""
__schema__ = github_schema
__choices__ = ("ADMIN", "READ")
| OrgAddMemberAuditEntryPermission |
python | pandas-dev__pandas | pandas/tests/series/methods/test_matmul.py | {
"start": 132,
"end": 2767
} | class ____:
def test_matmul(self):
# matmul test is for GH#10259
a = Series(
np.random.default_rng(2).standard_normal(4), index=["p", "q", "r", "s"]
)
b = DataFrame(
np.random.default_rng(2).standard_normal((3, 4)),
index=["1", "2", "3"],
columns=["p", "q", "r", "s"],
).T
# Series @ DataFrame -> Series
result = operator.matmul(a, b)
expected = Series(np.dot(a.values, b.values), index=["1", "2", "3"])
tm.assert_series_equal(result, expected)
# DataFrame @ Series -> Series
result = operator.matmul(b.T, a)
expected = Series(np.dot(b.T.values, a.T.values), index=["1", "2", "3"])
tm.assert_series_equal(result, expected)
# Series @ Series -> scalar
result = operator.matmul(a, a)
expected = np.dot(a.values, a.values)
tm.assert_almost_equal(result, expected)
# GH#21530
# vector (1D np.array) @ Series (__rmatmul__)
result = operator.matmul(a.values, a)
expected = np.dot(a.values, a.values)
tm.assert_almost_equal(result, expected)
# GH#21530
# vector (1D list) @ Series (__rmatmul__)
result = operator.matmul(a.values.tolist(), a)
expected = np.dot(a.values, a.values)
tm.assert_almost_equal(result, expected)
# GH#21530
# matrix (2D np.array) @ Series (__rmatmul__)
result = operator.matmul(b.T.values, a)
expected = np.dot(b.T.values, a.values)
tm.assert_almost_equal(result, expected)
# GH#21530
# matrix (2D nested lists) @ Series (__rmatmul__)
result = operator.matmul(b.T.values.tolist(), a)
expected = np.dot(b.T.values, a.values)
tm.assert_almost_equal(result, expected)
# mixed dtype DataFrame @ Series
a["p"] = int(a.p)
result = operator.matmul(b.T, a)
expected = Series(np.dot(b.T.values, a.T.values), index=["1", "2", "3"])
tm.assert_series_equal(result, expected)
# different dtypes DataFrame @ Series
a = a.astype(int)
result = operator.matmul(b.T, a)
expected = Series(np.dot(b.T.values, a.T.values), index=["1", "2", "3"])
tm.assert_series_equal(result, expected)
msg = r"Dot product shape mismatch, \(4,\) vs \(3,\)"
# exception raised is of type Exception
with pytest.raises(Exception, match=msg):
a.dot(a.values[:3])
msg = "matrices are not aligned"
with pytest.raises(ValueError, match=msg):
a.dot(b.T)
| TestMatmul |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/resources.py | {
"start": 8323,
"end": 8488
} | class ____(graphene.ObjectType):
results = non_null_list(GrapheneResourceDetails)
class Meta:
name = "ResourceDetailsList"
| GrapheneResourceDetailsList |
python | GoogleCloudPlatform__python-docs-samples | appengine/standard/images/api/blobstore.py | {
"start": 881,
"end": 1633
} | class ____(webapp2.RequestHandler):
def get(self):
blob_key = self.request.get("blob_key")
if blob_key:
blob_info = blobstore.get(blob_key)
if blob_info:
img = images.Image(blob_key=blob_key)
img.resize(width=80, height=100)
img.im_feeling_lucky()
thumbnail = img.execute_transforms(output_encoding=images.JPEG)
self.response.headers["Content-Type"] = "image/jpeg"
self.response.out.write(thumbnail)
return
# Either "blob_key" wasn't provided, or there was no value with that ID
# in the Blobstore.
self.error(404)
# [END gae_images_api_blobstore_thumbnailer]
| Thumbnailer |
python | walkccc__LeetCode | solutions/2905. Find Indices With Index and Value Difference II/2905.py | {
"start": 0,
"end": 788
} | class ____:
def findIndices(
self,
nums: list[int],
indexDifference: int,
valueDifference: int,
) -> list[int]:
# nums[minIndex] := the minimum number with enough index different from the current number
minIndex = 0
# nums[maxIndex] := the maximum number with enough index different from the current number
maxIndex = 0
for i in range(indexDifference, len(nums)):
if nums[i - indexDifference] < nums[minIndex]:
minIndex = i - indexDifference
if nums[i - indexDifference] > nums[maxIndex]:
maxIndex = i - indexDifference
if nums[i] - nums[minIndex] >= valueDifference:
return [i, minIndex]
if nums[maxIndex] - nums[i] >= valueDifference:
return [i, maxIndex]
return [-1, -1]
| Solution |
python | walkccc__LeetCode | solutions/791. Custom Sort String/791.py | {
"start": 0,
"end": 397
} | class ____:
def customSortString(self, order: str, s: str) -> str:
ans = ""
count = [0] * 26
for c in s:
count[ord(c) - ord('a')] += 1
for c in order:
while count[ord(c) - ord('a')] > 0:
ans += c
count[ord(c) - ord('a')] -= 1
for c in string.ascii_lowercase:
for _ in range(count[ord(c) - ord('a')]):
ans += c
return ans
| Solution |
python | crytic__slither | slither/tools/upgradeability/checks/variables_order.py | {
"start": 1849,
"end": 4749
} | class ____(AbstractCheck):
ARGUMENT = "order-vars-proxy"
IMPACT = CheckClassification.HIGH
HELP = "Incorrect vars order with the proxy"
WIKI = "https://github.com/crytic/slither/wiki/Upgradeability-Checks#incorrect-variables-with-the-proxy"
WIKI_TITLE = "Incorrect variables with the proxy"
# region wiki_description
WIKI_DESCRIPTION = """
Detect variables that are different between the contract and the proxy.
"""
# endregion wiki_description
# region wiki_exploit_scenario
WIKI_EXPLOIT_SCENARIO = """
```solidity
contract Contract{
uint variable1;
}
contract Proxy{
address variable1;
}
```
`Contract` and `Proxy` do not have the same storage layout. As a result the storage of both contracts can be corrupted.
"""
# endregion wiki_exploit_scenario
# region wiki_recommendation
WIKI_RECOMMENDATION = """
Avoid variables in the proxy. If a variable is in the proxy, ensure it has the same layout than in the contract.
"""
# endregion wiki_recommendation
REQUIRE_CONTRACT = True
REQUIRE_PROXY = True
def _contract1(self) -> Contract:
return self.contract
def _contract2(self) -> Contract:
assert self.proxy
return self.proxy
def _check(self) -> List[Output]:
contract1 = self._contract1()
contract2 = self._contract2()
results: List[Output] = []
def _check_internal(
contract1: Contract, contract2: Contract, results: List[Output], is_transient: bool
):
if is_transient:
order1 = contract1.transient_variables_ordered
order2 = contract2.transient_variables_ordered
else:
order1 = contract1.storage_variables_ordered
order2 = contract2.storage_variables_ordered
for idx, _ in enumerate(order1):
if len(order2) <= idx:
# Handle by MissingVariable
return
variable1 = order1[idx]
variable2 = order2[idx]
if (variable1.name != variable2.name) or (variable1.type != variable2.type):
info: CHECK_INFO = [
"Different variables between ",
contract1,
" and ",
contract2,
"\n",
]
info += ["\t ", variable1, "\n"]
info += ["\t ", variable2, "\n"]
json = self.generate_result(info)
results.append(json)
# Checking state variables with storage location
_check_internal(contract1, contract2, results, False)
# Checking state variables with transient location
_check_internal(contract1, contract2, results, True)
return results
| DifferentVariableContractProxy |
python | pytransitions__transitions | transitions/extensions/nesting.py | {
"start": 6137,
"end": 6484
} | class ____(EventData):
"""Collection of relevant data related to the ongoing nested transition attempt."""
def __init__(self, state, event, machine, model, args, kwargs):
super(NestedEventData, self).__init__(state, event, machine, model, args, kwargs)
self.source_path = None
self.source_name = None
| NestedEventData |
python | huggingface__transformers | src/transformers/models/falcon_mamba/modeling_falcon_mamba.py | {
"start": 30275,
"end": 31233
} | class ____(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
cache_params (`FalconMambaCache`):
The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to
avoid providing the old `input_ids`.
Includes both the State space model state matrices after the selective scan, and the Convolutional states
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
cache_params: Optional[FalconMambaCache] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
@auto_docstring
| FalconMambaCausalLMOutput |
python | PrefectHQ__prefect | tests/test_tasks.py | {
"start": 72931,
"end": 74356
} | class ____:
async def test_task_timeouts_actually_timeout(self, timeout_test_flow):
flow_state = timeout_test_flow(return_state=True)
timed_out, _, _ = await flow_state.result(raise_on_failure=False)
assert timed_out.name == "TimedOut"
assert timed_out.is_failed()
async def test_task_timeouts_are_not_task_crashes(self, timeout_test_flow):
flow_state = timeout_test_flow(return_state=True)
timed_out, _, _ = await flow_state.result(raise_on_failure=False)
assert timed_out.is_crashed() is False
async def test_task_timeouts_do_not_crash_flow_runs(self, timeout_test_flow):
flow_state = timeout_test_flow(return_state=True)
timed_out, _, _ = await flow_state.result(raise_on_failure=False)
assert timed_out.name == "TimedOut"
assert timed_out.is_failed()
assert flow_state.is_failed()
assert flow_state.is_crashed() is False
async def test_task_timeouts_do_not_timeout_prematurely(self):
@task(timeout_seconds=100)
def my_task():
time.sleep(1)
return 42
@flow
def my_flow():
x = my_task.submit()
return x
flow_state = my_flow(return_state=True)
assert flow_state.type == StateType.COMPLETED
task_res = await flow_state.result()
assert task_res.type == StateType.COMPLETED
| TestTaskTimeouts |
python | sympy__sympy | sympy/stats/drv_types.py | {
"start": 18577,
"end": 19865
} | class ____(SingleDiscreteDistribution):
_argnames = ('s',)
set = S.Naturals
@staticmethod
def check(s):
_value_check(s > 1, 's should be greater than 1')
def pdf(self, k):
s = self.s
return 1 / (k**s * zeta(s))
def _characteristic_function(self, t):
return polylog(self.s, exp(I*t)) / zeta(self.s)
def _moment_generating_function(self, t):
return polylog(self.s, exp(t)) / zeta(self.s)
def Zeta(name, s):
r"""
Create a discrete random variable with a Zeta distribution.
Explanation
===========
The density of the Zeta distribution is given by
.. math::
f(k) := \frac{1}{k^s \zeta{(s)}}
Parameters
==========
s : A value greater than 1
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import Zeta, density, E, variance
>>> from sympy import Symbol
>>> s = 5
>>> z = Symbol("z")
>>> X = Zeta("x", s)
>>> density(X)(z)
1/(z**5*zeta(5))
>>> E(X)
pi**4/(90*zeta(5))
>>> variance(X)
-pi**8/(8100*zeta(5)**2) + zeta(3)/zeta(5)
References
==========
.. [1] https://en.wikipedia.org/wiki/Zeta_distribution
"""
return rv(name, ZetaDistribution, s)
| ZetaDistribution |
python | great-expectations__great_expectations | contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_icd_ten_category_or_subcategory.py | {
"start": 2008,
"end": 5194
} | class ____(ColumnMapExpectation):
"""Expect column values to consist only of ICD-10 categories or subcategories."""
examples = [
{
"data": {
"all_valid_categories_or_subcategories": [
"C00",
"C00.1",
"C002",
"C01",
"C00.4",
],
"mostly_valid_categories_or_subcategories": [
"D10",
"D17.1",
"D171",
"C00.3",
"INVALID",
],
"some_invalid_categories_or_subcategories": [
"C00",
"C00.1",
"C00-C14",
"DOG",
"Z999",
],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "all_valid_categories_or_subcategories"},
"out": {
"success": True,
},
},
{
"title": "mostly_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "mostly_valid_categories_or_subcategories",
"mostly": 0.8,
},
"out": {
"success": True,
},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "some_invalid_categories_or_subcategories",
"mostly": 1,
},
"out": {
"success": False,
},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.match_valid_icd_ten_category_or_subcategory"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This object contains metadata for display in the public Gallery
library_metadata = {
"tags": [
"hackathon",
"semantic-type",
"experimental",
"typed-entities",
],
"contributors": [
"@andyjessen",
],
"requirements": ["simple_icd_10"],
}
if __name__ == "__main__":
ExpectColumnValuesToBeIcdTenCategoryOrSubcategory().print_diagnostic_checklist()
| ExpectColumnValuesToBeIcdTenCategoryOrSubcategory |
python | pennersr__django-allauth | allauth/mfa/recovery_codes/views.py | {
"start": 1997,
"end": 3030
} | class ____(TemplateView):
template_name = "mfa/recovery_codes/download.txt"
content_type = "text/plain"
def dispatch(self, request, *args, **kwargs):
self.authenticator = flows.view_recovery_codes(self.request)
if not self.authenticator:
raise Http404()
self.unused_codes = self.authenticator.wrap().get_unused_codes()
if not self.unused_codes:
return Http404()
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
ret = super().get_context_data(**kwargs)
ret["unused_codes"] = self.unused_codes
return ret
def render_to_response(self, context, **response_kwargs):
response = super().render_to_response(context, **response_kwargs)
response["Content-Disposition"] = 'attachment; filename="recovery-codes.txt"'
return response
download_recovery_codes = DownloadRecoveryCodesView.as_view()
@method_decorator(login_required, name="dispatch")
| DownloadRecoveryCodesView |
python | coleifer__peewee | tests/mysql_ext.py | {
"start": 1112,
"end": 2692
} | class ____(ModelTestCase):
database = mysql_ext_db
requires = [Person, Note]
def test_basic_operations(self):
with self.database.atomic():
charlie, huey, zaizee = [Person.create(first=f, last='leifer')
for f in ('charlie', 'huey', 'zaizee')]
# Use nested-transaction.
with self.database.atomic():
data = (
(charlie, ('foo', 'bar', 'zai')),
(huey, ('meow', 'purr', 'hiss')),
(zaizee, ()))
for person, notes in data:
for note in notes:
Note.create(person=person, content=note)
with self.database.atomic() as sp:
Person.create(first='x', last='y')
sp.rollback()
people = Person.select().order_by(Person.first)
self.assertEqual([person.first for person in people],
['charlie', 'huey', 'zaizee'])
with self.assertQueryCount(1):
notes = (Note
.select(Note, Person)
.join(Person)
.order_by(Note.content))
self.assertEqual([(n.person.first, n.content) for n in notes], [
('charlie', 'bar'),
('charlie', 'foo'),
('huey', 'hiss'),
('huey', 'meow'),
('huey', 'purr'),
('charlie', 'zai')])
@requires_mysql
@skip_if(mariadb is None, 'mariadb connector not installed')
| TestMySQLConnector |
python | django__django | django/contrib/auth/password_validation.py | {
"start": 9127,
"end": 9629
} | class ____:
"""
Validate that the password is not entirely numeric.
"""
def validate(self, password, user=None):
if password.isdigit():
raise ValidationError(
self.get_error_message(),
code="password_entirely_numeric",
)
def get_error_message(self):
return _("This password is entirely numeric.")
def get_help_text(self):
return _("Your password can’t be entirely numeric.")
| NumericPasswordValidator |
python | openai__openai-python | src/openai/_exceptions.py | {
"start": 2237,
"end": 2724
} | class ____(APIError):
"""Raised when an API response has a status code of 4xx or 5xx."""
response: httpx.Response
status_code: int
request_id: str | None
def __init__(self, message: str, *, response: httpx.Response, body: object | None) -> None:
super().__init__(message, response.request, body=body)
self.response = response
self.status_code = response.status_code
self.request_id = response.headers.get("x-request-id")
| APIStatusError |
python | tensorflow__tensorflow | tensorflow/python/feature_column/feature_column.py | {
"start": 116545,
"end": 122945
} | class ____(_DenseColumn, _SequenceDenseColumn,
collections.namedtuple('_IndicatorColumn',
['categorical_column'])):
"""Represents a one-hot column for use in deep networks.
Args:
categorical_column: A `_CategoricalColumn` which is created by
`categorical_column_with_*` function.
"""
@property
def name(self):
return '{}_indicator'.format(self.categorical_column.name)
def _transform_feature(self, inputs):
"""Returns dense `Tensor` representing feature.
Args:
inputs: A `_LazyBuilder` object to access inputs.
Returns:
Transformed feature `Tensor`.
Raises:
ValueError: if input rank is not known at graph building time.
"""
id_weight_pair = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access
id_tensor = id_weight_pair.id_tensor
weight_tensor = id_weight_pair.weight_tensor
# If the underlying column is weighted, return the input as a dense tensor.
if weight_tensor is not None:
weighted_column = sparse_ops.sparse_merge(
sp_ids=id_tensor,
sp_values=weight_tensor,
vocab_size=int(self._variable_shape[-1]))
# Remove (?, -1) index.
weighted_column = sparse_ops.sparse_slice(weighted_column, [0, 0],
weighted_column.dense_shape)
# Use scatter_nd to merge duplicated indices if existed,
# instead of sparse_tensor_to_dense.
return array_ops.scatter_nd(weighted_column.indices,
weighted_column.values,
weighted_column.dense_shape)
dense_id_tensor = sparse_ops.sparse_tensor_to_dense(
id_tensor, default_value=-1)
# One hot must be float for tf.concat reasons since all other inputs to
# input_layer are float32.
one_hot_id_tensor = array_ops.one_hot(
dense_id_tensor,
depth=self._variable_shape[-1],
on_value=1.0,
off_value=0.0)
# Reduce to get a multi-hot per example.
return math_ops.reduce_sum(one_hot_id_tensor, axis=[-2])
@property
def _parse_example_spec(self):
return self.categorical_column._parse_example_spec # pylint: disable=protected-access
@property
def _variable_shape(self):
"""Returns a `TensorShape` representing the shape of the dense `Tensor`."""
return tensor_shape.TensorShape([1, self.categorical_column._num_buckets]) # pylint: disable=protected-access
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
"""Returns dense `Tensor` representing feature.
Args:
inputs: A `_LazyBuilder` object to access inputs.
weight_collections: Unused `weight_collections` since no variables are
created in this function.
trainable: Unused `trainable` bool since no variables are created in this
function.
Returns:
Dense `Tensor` created within `_transform_feature`.
Raises:
ValueError: If `categorical_column` is a `_SequenceCategoricalColumn`.
"""
# Do nothing with weight_collections and trainable since no variables are
# created in this function.
del weight_collections
del trainable
if isinstance(self.categorical_column, _SequenceCategoricalColumn):
raise ValueError(
'In indicator_column: {}. '
'categorical_column must not be of type _SequenceCategoricalColumn. '
'Suggested fix A: If you wish to use input_layer, use a '
'non-sequence categorical_column_with_*. '
'Suggested fix B: If you wish to create sequence input, use '
'sequence_input_layer instead of input_layer. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
# Feature has been already transformed. Return the intermediate
# representation created by _transform_feature.
return inputs.get(self)
def _get_sequence_dense_tensor(self,
inputs,
weight_collections=None,
trainable=None):
# Do nothing with weight_collections and trainable since no variables are
# created in this function.
del weight_collections
del trainable
if not isinstance(self.categorical_column, _SequenceCategoricalColumn):
raise ValueError(
'In indicator_column: {}. '
'categorical_column must be of type _SequenceCategoricalColumn '
'to use sequence_input_layer. '
'Suggested fix: Use one of sequence_categorical_column_with_*. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
# Feature has been already transformed. Return the intermediate
# representation created by _transform_feature.
dense_tensor = inputs.get(self)
sparse_tensors = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access
sequence_length = fc_utils.sequence_length_from_sparse_tensor(
sparse_tensors.id_tensor)
return _SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=dense_tensor, sequence_length=sequence_length)
def _verify_static_batch_size_equality(tensors, columns):
"""Validates that the first dim (batch size) of all tensors are equal or None.
Args:
tensors: list of tensors to check.
columns: list of feature columns matching tensors. Will be used for error
messaging.
Raises:
ValueError: if one of the tensors has a variant batch size
"""
# bath_size is a tf.compat.v1.Dimension object.
expected_batch_size = None
for i in range(0, len(tensors)):
if tensors[i].shape.dims[0].value is not None:
if expected_batch_size is None:
bath_size_column_index = i
expected_batch_size = tensors[i].shape.dims[0]
elif not expected_batch_size.is_compatible_with(tensors[i].shape.dims[0]):
raise ValueError(
'Batch size (first dimension) of each feature must be same. '
'Batch size of columns ({}, {}): ({}, {})'.format(
columns[bath_size_column_index].name, columns[i].name,
expected_batch_size, tensors[i].shape.dims[0]))
| _IndicatorColumn |
python | openai__openai-python | src/openai/resources/fine_tuning/checkpoints/checkpoints.py | {
"start": 2980,
"end": 3284
} | class ____:
def __init__(self, checkpoints: Checkpoints) -> None:
self._checkpoints = checkpoints
@cached_property
def permissions(self) -> PermissionsWithStreamingResponse:
return PermissionsWithStreamingResponse(self._checkpoints.permissions)
| CheckpointsWithStreamingResponse |
python | sqlalchemy__sqlalchemy | test/orm/test_query.py | {
"start": 207117,
"end": 216628
} | class ____(QueryTest, AssertsCompiledSQL):
__dialect__ = "default"
def test_no_strings(self):
User = self.classes.User
sess = fixture_session()
q = sess.query(User)
u1 = q.filter_by(name="jack").one()
with expect_raises_message(
sa_exc.ArgumentError,
r"with_parent\(\) accepts class-bound mapped "
"attributes, not strings",
):
with_parent(u1, "orders")
def test_o2m(self):
User, Order = (
self.classes.User,
self.classes.Order,
)
sess = fixture_session()
q = sess.query(User)
u1 = q.filter_by(name="jack").one()
o = sess.query(Order).filter(with_parent(u1, User.orders)).all()
assert [
Order(description="order 1"),
Order(description="order 3"),
Order(description="order 5"),
] == o
def test_invalid_property(self):
"""Test if with_parent is passed a non-relationship
found_during_type_annotation
"""
User = self.classes.User
sess = fixture_session()
u1 = sess.get(User, 7)
with expect_raises_message(
sa_exc.ArgumentError,
r"Expected relationship property for with_parent\(\), "
"got User.name",
):
with_parent(u1, User.name)
def test_select_from(self):
User, Address = self.classes.User, self.classes.Address
sess = fixture_session()
u1 = sess.get(User, 7)
q = (
sess.query(Address)
.select_from(Address)
.filter(with_parent(u1, User.addresses))
)
self.assert_compile(
q,
"SELECT addresses.id AS addresses_id, "
"addresses.user_id AS addresses_user_id, "
"addresses.email_address AS addresses_email_address "
"FROM addresses WHERE :param_1 = addresses.user_id",
{"param_1": 7},
)
def test_from_entity_standalone_fn(self):
User, Address = self.classes.User, self.classes.Address
sess = fixture_session()
u1 = sess.get(User, 7)
q = sess.query(User, Address).filter(
with_parent(u1, User.addresses, from_entity=Address)
)
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name, "
"addresses.id AS addresses_id, addresses.user_id "
"AS addresses_user_id, "
"addresses.email_address AS addresses_email_address "
"FROM users, addresses "
"WHERE :param_1 = addresses.user_id",
{"param_1": 7},
)
def test_select_from_alias(self):
User, Address = self.classes.User, self.classes.Address
sess = fixture_session()
u1 = sess.get(User, 7)
a1 = aliased(Address)
q = sess.query(a1).filter(with_parent(u1, User.addresses.of_type(a1)))
self.assert_compile(
q,
"SELECT addresses_1.id AS addresses_1_id, "
"addresses_1.user_id AS addresses_1_user_id, "
"addresses_1.email_address AS addresses_1_email_address "
"FROM addresses AS addresses_1 "
"WHERE :param_1 = addresses_1.user_id",
{"param_1": 7},
)
def test_select_from_alias_from_entity(self):
User, Address = self.classes.User, self.classes.Address
sess = fixture_session()
u1 = sess.get(User, 7)
a1 = aliased(Address)
a2 = aliased(Address)
q = sess.query(a1, a2).filter(
with_parent(u1, User.addresses, from_entity=a2)
)
self.assert_compile(
q,
"SELECT addresses_1.id AS addresses_1_id, "
"addresses_1.user_id AS addresses_1_user_id, "
"addresses_1.email_address AS addresses_1_email_address, "
"addresses_2.id AS addresses_2_id, "
"addresses_2.user_id AS addresses_2_user_id, "
"addresses_2.email_address AS addresses_2_email_address "
"FROM addresses AS addresses_1, "
"addresses AS addresses_2 WHERE :param_1 = addresses_2.user_id",
{"param_1": 7},
)
def test_select_from_alias_of_type(self):
User, Address = self.classes.User, self.classes.Address
sess = fixture_session()
u1 = sess.get(User, 7)
a1 = aliased(Address)
a2 = aliased(Address)
q = sess.query(a1, a2).filter(
with_parent(u1, User.addresses.of_type(a2))
)
self.assert_compile(
q,
"SELECT addresses_1.id AS addresses_1_id, "
"addresses_1.user_id AS addresses_1_user_id, "
"addresses_1.email_address AS addresses_1_email_address, "
"addresses_2.id AS addresses_2_id, "
"addresses_2.user_id AS addresses_2_user_id, "
"addresses_2.email_address AS addresses_2_email_address "
"FROM addresses AS addresses_1, "
"addresses AS addresses_2 WHERE :param_1 = addresses_2.user_id",
{"param_1": 7},
)
def test_noparent(self):
Item, User = self.classes.Item, self.classes.User
sess = fixture_session()
q = sess.query(User)
u1 = q.filter_by(name="jack").one()
# TODO: this can perhaps raise an error, then again it's doing what's
# asked...
q = sess.query(Item).filter(with_parent(u1, User.orders))
self.assert_compile(
q,
"SELECT items.id AS items_id, "
"items.description AS items_description "
"FROM items, orders WHERE :param_1 = orders.user_id",
)
def test_m2m(self):
Item, Keyword = self.classes.Item, self.classes.Keyword
sess = fixture_session()
i1 = sess.query(Item).filter_by(id=2).one()
k = sess.query(Keyword).filter(with_parent(i1, Item.keywords)).all()
eq_(
k,
[
Keyword(name="red"),
Keyword(name="small"),
Keyword(name="square"),
],
)
def test_with_transient(self):
User, Order = self.classes.User, self.classes.Order
sess = fixture_session()
q = sess.query(User)
u1 = q.filter_by(name="jack").one()
utrans = User(id=u1.id)
o = sess.query(Order).filter(with_parent(utrans, User.orders))
eq_(
[
Order(description="order 1"),
Order(description="order 3"),
Order(description="order 5"),
],
o.all(),
)
def test_with_pending_autoflush(self):
Order, User = self.classes.Order, self.classes.User
sess = fixture_session()
o1 = sess.query(Order).first()
opending = Order(id=20, user_id=o1.user_id)
sess.add(opending)
eq_(
sess.query(User).filter(with_parent(opending, Order.user)).one(),
User(id=o1.user_id),
)
def test_with_pending_no_autoflush(self):
Order, User = self.classes.Order, self.classes.User
sess = fixture_session(autoflush=False)
o1 = sess.query(Order).first()
opending = Order(user_id=o1.user_id)
sess.add(opending)
eq_(
sess.query(User).filter(with_parent(opending, Order.user)).one(),
User(id=o1.user_id),
)
def test_unique_binds_union(self):
"""bindparams used in the 'parent' query are unique"""
User, Address = self.classes.User, self.classes.Address
sess = fixture_session()
u1, u2 = sess.query(User).order_by(User.id)[0:2]
q1 = sess.query(Address).filter(with_parent(u1, User.addresses))
q2 = sess.query(Address).filter(with_parent(u2, User.addresses))
self.assert_compile(
q1.union(q2),
"SELECT anon_1.addresses_id AS anon_1_addresses_id, "
"anon_1.addresses_user_id AS anon_1_addresses_user_id, "
"anon_1.addresses_email_address AS "
"anon_1_addresses_email_address FROM (SELECT addresses.id AS "
"addresses_id, addresses.user_id AS addresses_user_id, "
"addresses.email_address AS addresses_email_address FROM "
"addresses WHERE :param_1 = addresses.user_id UNION SELECT "
"addresses.id AS addresses_id, addresses.user_id AS "
"addresses_user_id, addresses.email_address "
"AS addresses_email_address "
"FROM addresses WHERE :param_2 = addresses.user_id) AS anon_1",
checkparams={"param_1": 7, "param_2": 8},
)
def test_unique_binds_or(self):
User, Address = self.classes.User, self.classes.Address
sess = fixture_session()
u1, u2 = sess.query(User).order_by(User.id)[0:2]
self.assert_compile(
sess.query(Address).filter(
or_(
with_parent(u1, User.addresses),
with_parent(u2, User.addresses),
)
),
"SELECT addresses.id AS addresses_id, addresses.user_id AS "
"addresses_user_id, addresses.email_address AS "
"addresses_email_address FROM addresses WHERE "
":param_1 = addresses.user_id OR :param_2 = addresses.user_id",
checkparams={"param_1": 7, "param_2": 8},
)
| ParentTest |
python | geekcomputers__Python | nitkarshchourasia/to_sort/one_rep_max_calculator/one_rep_max_calculator_gui.py | {
"start": 23,
"end": 2356
} | class ____:
"""
A class used to calculate the estimated one-repetition maximum (1RM) for a weightlifting exercise.
Attributes
----------
window : tk.Tk
The main window of the application.
weight_entry : tk.Entry
Entry field to input the weight lifted.
rep_entry : tk.Entry
Entry field to input the number of reps performed.
result_value_label : tk.Label
Label to display the calculated 1RM.
Methods
-------
calculate_1rm():
Calculates the estimated 1RM based on the Epley formula.
display_result():
Displays the calculated 1RM in the application window.
run():
Runs the application.
"""
def __init__(self):
"""Initializes the OneRepMaxCalculator with a window and widgets."""
self.window = tk.Tk()
self.window.title("One-Rep Max Calculator")
self.window.geometry("300x150")
# Create and pack widgets
tk.Label(self.window, text="Enter the weight you lifted (in kg):").pack()
self.weight_entry = tk.Entry(self.window)
self.weight_entry.pack()
tk.Label(self.window, text="Enter the number of reps you performed:").pack()
self.rep_entry = tk.Entry(self.window)
self.rep_entry.pack()
tk.Button(self.window, text="Calculate", command=self.display_result).pack()
tk.Label(self.window, text="Your estimated one-rep max (1RM):").pack()
self.result_value_label = tk.Label(self.window)
self.result_value_label.pack()
def calculate_1rm(self):
"""Calculates and returns the estimated 1RM."""
weight = int(self.weight_entry.get())
reps = int(self.rep_entry.get())
return (weight * reps * 0.0333) + weight
def display_result(self):
"""Calculates the 1RM and updates result_value_label with it."""
one_rep_max = self.calculate_1rm()
self.result_value_label.config(text=f"{one_rep_max} kg")
def run(self):
"""Runs the Tkinter event loop."""
self.window.mainloop()
# Usage
if __name__ == "__main__":
calculator = OneRepMaxCalculator()
calculator.run()
# Improve the program.
# Make the fonts, bigger.
# - Use text formatting...
# Use dark mode.
# Have an option to use dark mode and light mode.
| OneRepMaxCalculator |
python | numpy__numpy | numpy/_core/_internal.py | {
"start": 17416,
"end": 29437
} | class ____:
def __init__(self, s):
self.s = s
self.byteorder = '@'
def advance(self, n):
res = self.s[:n]
self.s = self.s[n:]
return res
def consume(self, c):
if self.s[:len(c)] == c:
self.advance(len(c))
return True
return False
def consume_until(self, c):
if callable(c):
i = 0
while i < len(self.s) and not c(self.s[i]):
i = i + 1
return self.advance(i)
else:
i = self.s.index(c)
res = self.advance(i)
self.advance(len(c))
return res
@property
def next(self):
return self.s[0]
def __bool__(self):
return bool(self.s)
def _dtype_from_pep3118(spec):
stream = _Stream(spec)
dtype, align = __dtype_from_pep3118(stream, is_subdtype=False)
return dtype
def __dtype_from_pep3118(stream, is_subdtype):
field_spec = {
'names': [],
'formats': [],
'offsets': [],
'itemsize': 0
}
offset = 0
common_alignment = 1
is_padding = False
# Parse spec
while stream:
value = None
# End of structure, bail out to upper level
if stream.consume('}'):
break
# Sub-arrays (1)
shape = None
if stream.consume('('):
shape = stream.consume_until(')')
shape = tuple(map(int, shape.split(',')))
# Byte order
if stream.next in ('@', '=', '<', '>', '^', '!'):
byteorder = stream.advance(1)
if byteorder == '!':
byteorder = '>'
stream.byteorder = byteorder
# Byte order characters also control native vs. standard type sizes
if stream.byteorder in ('@', '^'):
type_map = _pep3118_native_map
type_map_chars = _pep3118_native_typechars
else:
type_map = _pep3118_standard_map
type_map_chars = _pep3118_standard_typechars
# Item sizes
itemsize_str = stream.consume_until(lambda c: not c.isdigit())
if itemsize_str:
itemsize = int(itemsize_str)
else:
itemsize = 1
# Data types
is_padding = False
if stream.consume('T{'):
value, align = __dtype_from_pep3118(
stream, is_subdtype=True)
elif stream.next in type_map_chars:
if stream.next == 'Z':
typechar = stream.advance(2)
else:
typechar = stream.advance(1)
is_padding = (typechar == 'x')
dtypechar = type_map[typechar]
if dtypechar in 'USV':
dtypechar += '%d' % itemsize
itemsize = 1
numpy_byteorder = {'@': '=', '^': '='}.get(
stream.byteorder, stream.byteorder)
value = dtype(numpy_byteorder + dtypechar)
align = value.alignment
elif stream.next in _pep3118_unsupported_map:
desc = _pep3118_unsupported_map[stream.next]
raise NotImplementedError(
f"Unrepresentable PEP 3118 data type {stream.next!r} ({desc})")
else:
raise ValueError(
f"Unknown PEP 3118 data type specifier {stream.s!r}"
)
#
# Native alignment may require padding
#
# Here we assume that the presence of a '@' character implicitly
# implies that the start of the array is *already* aligned.
#
extra_offset = 0
if stream.byteorder == '@':
start_padding = (-offset) % align
intra_padding = (-value.itemsize) % align
offset += start_padding
if intra_padding != 0:
if itemsize > 1 or (shape is not None and _prod(shape) > 1):
# Inject internal padding to the end of the sub-item
value = _add_trailing_padding(value, intra_padding)
else:
# We can postpone the injection of internal padding,
# as the item appears at most once
extra_offset += intra_padding
# Update common alignment
common_alignment = _lcm(align, common_alignment)
# Convert itemsize to sub-array
if itemsize != 1:
value = dtype((value, (itemsize,)))
# Sub-arrays (2)
if shape is not None:
value = dtype((value, shape))
# Field name
if stream.consume(':'):
name = stream.consume_until(':')
else:
name = None
if not (is_padding and name is None):
if name is not None and name in field_spec['names']:
raise RuntimeError(
f"Duplicate field name '{name}' in PEP3118 format"
)
field_spec['names'].append(name)
field_spec['formats'].append(value)
field_spec['offsets'].append(offset)
offset += value.itemsize
offset += extra_offset
field_spec['itemsize'] = offset
# extra final padding for aligned types
if stream.byteorder == '@':
field_spec['itemsize'] += (-offset) % common_alignment
# Check if this was a simple 1-item type, and unwrap it
if (field_spec['names'] == [None]
and field_spec['offsets'][0] == 0
and field_spec['itemsize'] == field_spec['formats'][0].itemsize
and not is_subdtype):
ret = field_spec['formats'][0]
else:
_fix_names(field_spec)
ret = dtype(field_spec)
# Finished
return ret, common_alignment
def _fix_names(field_spec):
""" Replace names which are None with the next unused f%d name """
names = field_spec['names']
for i, name in enumerate(names):
if name is not None:
continue
j = 0
while True:
name = f'f{j}'
if name not in names:
break
j = j + 1
names[i] = name
def _add_trailing_padding(value, padding):
"""Inject the specified number of padding bytes at the end of a dtype"""
if value.fields is None:
field_spec = {
'names': ['f0'],
'formats': [value],
'offsets': [0],
'itemsize': value.itemsize
}
else:
fields = value.fields
names = value.names
field_spec = {
'names': names,
'formats': [fields[name][0] for name in names],
'offsets': [fields[name][1] for name in names],
'itemsize': value.itemsize
}
field_spec['itemsize'] += padding
return dtype(field_spec)
def _prod(a):
p = 1
for x in a:
p *= x
return p
def _gcd(a, b):
"""Calculate the greatest common divisor of a and b"""
if not (math.isfinite(a) and math.isfinite(b)):
raise ValueError('Can only find greatest common divisor of '
f'finite arguments, found "{a}" and "{b}"')
while b:
a, b = b, a % b
return a
def _lcm(a, b):
return a // _gcd(a, b) * b
def array_ufunc_errmsg_formatter(dummy, ufunc, method, *inputs, **kwargs):
""" Format the error message for when __array_ufunc__ gives up. """
args_string = ', '.join([f'{arg!r}' for arg in inputs] +
[f'{k}={v!r}'
for k, v in kwargs.items()])
args = inputs + kwargs.get('out', ())
types_string = ', '.join(repr(type(arg).__name__) for arg in args)
return ('operand type(s) all returned NotImplemented from '
f'__array_ufunc__({ufunc!r}, {method!r}, {args_string}): {types_string}'
)
def array_function_errmsg_formatter(public_api, types):
""" Format the error message for when __array_ufunc__ gives up. """
func_name = f'{public_api.__module__}.{public_api.__name__}'
return (f"no implementation found for '{func_name}' on types that implement "
f'__array_function__: {list(types)}')
def _ufunc_doc_signature_formatter(ufunc):
"""
Builds a signature string which resembles PEP 457
This is used to construct the first line of the docstring
Keep in sync with `_ufunc_inspect_signature_builder`.
"""
# input arguments are simple
if ufunc.nin == 1:
in_args = 'x'
else:
in_args = ', '.join(f'x{i + 1}' for i in range(ufunc.nin))
# output arguments are both keyword or positional
if ufunc.nout == 0:
out_args = ', /, out=()'
elif ufunc.nout == 1:
out_args = ', /, out=None'
else:
out_args = '[, {positional}], / [, out={default}]'.format(
positional=', '.join(
f'out{i + 1}' for i in range(ufunc.nout)),
default=repr((None,) * ufunc.nout)
)
# keyword only args depend on whether this is a gufunc
kwargs = (
", casting='same_kind'"
", order='K'"
", dtype=None"
", subok=True"
)
# NOTE: gufuncs may or may not support the `axis` parameter
if ufunc.signature is None:
kwargs = f", where=True{kwargs}[, signature]"
else:
kwargs += "[, signature, axes, axis]"
# join all the parts together
return f'{ufunc.__name__}({in_args}{out_args}, *{kwargs})'
def _ufunc_inspect_signature_builder(ufunc):
"""
Builds a ``__signature__`` string.
Should be kept in sync with `_ufunc_doc_signature_formatter`.
"""
from inspect import Parameter, Signature
params = []
# positional-only input parameters
if ufunc.nin == 1:
params.append(Parameter("x", Parameter.POSITIONAL_ONLY))
else:
params.extend(
Parameter(f"x{i}", Parameter.POSITIONAL_ONLY)
for i in range(1, ufunc.nin + 1)
)
# for the sake of simplicity, we only consider a single output parameter
if ufunc.nout == 1:
out_default = None
else:
out_default = (None,) * ufunc.nout
params.append(
Parameter("out", Parameter.POSITIONAL_OR_KEYWORD, default=out_default),
)
if ufunc.signature is None:
params.append(Parameter("where", Parameter.KEYWORD_ONLY, default=True))
else:
# NOTE: not all gufuncs support the `axis` parameters
params.append(Parameter("axes", Parameter.KEYWORD_ONLY, default=_NoValue))
params.append(Parameter("axis", Parameter.KEYWORD_ONLY, default=_NoValue))
params.append(Parameter("keepdims", Parameter.KEYWORD_ONLY, default=False))
params.extend((
Parameter("casting", Parameter.KEYWORD_ONLY, default='same_kind'),
Parameter("order", Parameter.KEYWORD_ONLY, default='K'),
Parameter("dtype", Parameter.KEYWORD_ONLY, default=None),
Parameter("subok", Parameter.KEYWORD_ONLY, default=True),
Parameter("signature", Parameter.KEYWORD_ONLY, default=None),
))
return Signature(params)
def npy_ctypes_check(cls):
# determine if a class comes from ctypes, in order to work around
# a bug in the buffer protocol for those objects, bpo-10746
try:
# ctypes class are new-style, so have an __mro__. This probably fails
# for ctypes classes with multiple inheritance.
if IS_PYPY:
# (..., _ctypes.basics._CData, Bufferable, object)
ctype_base = cls.__mro__[-3]
else:
# # (..., _ctypes._CData, object)
ctype_base = cls.__mro__[-2]
# right now, they're part of the _ctypes module
return '_ctypes' in ctype_base.__module__
except Exception:
return False
# used to handle the _NoValue default argument for na_object
# in the C implementation of the __reduce__ method for stringdtype
def _convert_to_stringdtype_kwargs(coerce, na_object=_NoValue):
if na_object is _NoValue:
return StringDType(coerce=coerce)
return StringDType(coerce=coerce, na_object=na_object)
| _Stream |
python | pypa__pipenv | pipenv/vendor/dotenv/variables.py | {
"start": 1104,
"end": 2348
} | class ____(Atom):
def __init__(self, name: str, default: Optional[str]) -> None:
self.name = name
self.default = default
def __repr__(self) -> str:
return f"Variable(name={self.name}, default={self.default})"
def __eq__(self, other: object) -> bool:
if not isinstance(other, self.__class__):
return NotImplemented
return (self.name, self.default) == (other.name, other.default)
def __hash__(self) -> int:
return hash((self.__class__, self.name, self.default))
def resolve(self, env: Mapping[str, Optional[str]]) -> str:
default = self.default if self.default is not None else ""
result = env.get(self.name, default)
return result if result is not None else ""
def parse_variables(value: str) -> Iterator[Atom]:
cursor = 0
for match in _posix_variable.finditer(value):
(start, end) = match.span()
name = match["name"]
default = match["default"]
if start > cursor:
yield Literal(value=value[cursor:start])
yield Variable(name=name, default=default)
cursor = end
length = len(value)
if cursor < length:
yield Literal(value=value[cursor:length])
| Variable |
python | tensorflow__tensorflow | tensorflow/python/module/module_test.py | {
"start": 18801,
"end": 19500
} | class ____(module.Module):
def __init__(self):
super().__init__()
self._trainable_variables = [
variables.Variable(1., name="a"),
variables.Variable(2., name="b"),
]
self._non_trainable_variables = [
variables.Variable(3., name="c"),
variables.Variable(4., name="d"),
]
self._bonus = variables.Variable(5., name="e")
@property
def variables(self):
def key_function(name):
indexes = {"_trainable_variables": 0, "_non_trainable_variables": 1}
return indexes.get(name, 2), name
return list(
self._flatten(
predicate=module._is_variable,
attribute_traversal_key=key_function))
| LayerModule |
python | encode__httpx | httpx/_models.py | {
"start": 3936,
"end": 12179
} | class ____(typing.MutableMapping[str, str]):
"""
HTTP headers, as a case-insensitive multi-dict.
"""
def __init__(
self,
headers: HeaderTypes | None = None,
encoding: str | None = None,
) -> None:
self._list = [] # type: typing.List[typing.Tuple[bytes, bytes, bytes]]
if isinstance(headers, Headers):
self._list = list(headers._list)
elif isinstance(headers, Mapping):
for k, v in headers.items():
bytes_key = _normalize_header_key(k, encoding)
bytes_value = _normalize_header_value(v, encoding)
self._list.append((bytes_key, bytes_key.lower(), bytes_value))
elif headers is not None:
for k, v in headers:
bytes_key = _normalize_header_key(k, encoding)
bytes_value = _normalize_header_value(v, encoding)
self._list.append((bytes_key, bytes_key.lower(), bytes_value))
self._encoding = encoding
@property
def encoding(self) -> str:
"""
Header encoding is mandated as ascii, but we allow fallbacks to utf-8
or iso-8859-1.
"""
if self._encoding is None:
for encoding in ["ascii", "utf-8"]:
for key, value in self.raw:
try:
key.decode(encoding)
value.decode(encoding)
except UnicodeDecodeError:
break
else:
# The else block runs if 'break' did not occur, meaning
# all values fitted the encoding.
self._encoding = encoding
break
else:
# The ISO-8859-1 encoding covers all 256 code points in a byte,
# so will never raise decode errors.
self._encoding = "iso-8859-1"
return self._encoding
@encoding.setter
def encoding(self, value: str) -> None:
self._encoding = value
@property
def raw(self) -> list[tuple[bytes, bytes]]:
"""
Returns a list of the raw header items, as byte pairs.
"""
return [(raw_key, value) for raw_key, _, value in self._list]
def keys(self) -> typing.KeysView[str]:
return {key.decode(self.encoding): None for _, key, value in self._list}.keys()
def values(self) -> typing.ValuesView[str]:
values_dict: dict[str, str] = {}
for _, key, value in self._list:
str_key = key.decode(self.encoding)
str_value = value.decode(self.encoding)
if str_key in values_dict:
values_dict[str_key] += f", {str_value}"
else:
values_dict[str_key] = str_value
return values_dict.values()
def items(self) -> typing.ItemsView[str, str]:
"""
Return `(key, value)` items of headers. Concatenate headers
into a single comma separated value when a key occurs multiple times.
"""
values_dict: dict[str, str] = {}
for _, key, value in self._list:
str_key = key.decode(self.encoding)
str_value = value.decode(self.encoding)
if str_key in values_dict:
values_dict[str_key] += f", {str_value}"
else:
values_dict[str_key] = str_value
return values_dict.items()
def multi_items(self) -> list[tuple[str, str]]:
"""
Return a list of `(key, value)` pairs of headers. Allow multiple
occurrences of the same key without concatenating into a single
comma separated value.
"""
return [
(key.decode(self.encoding), value.decode(self.encoding))
for _, key, value in self._list
]
def get(self, key: str, default: typing.Any = None) -> typing.Any:
"""
Return a header value. If multiple occurrences of the header occur
then concatenate them together with commas.
"""
try:
return self[key]
except KeyError:
return default
def get_list(self, key: str, split_commas: bool = False) -> list[str]:
"""
Return a list of all header values for a given key.
If `split_commas=True` is passed, then any comma separated header
values are split into multiple return strings.
"""
get_header_key = key.lower().encode(self.encoding)
values = [
item_value.decode(self.encoding)
for _, item_key, item_value in self._list
if item_key.lower() == get_header_key
]
if not split_commas:
return values
split_values = []
for value in values:
split_values.extend([item.strip() for item in value.split(",")])
return split_values
def update(self, headers: HeaderTypes | None = None) -> None: # type: ignore
headers = Headers(headers)
for key in headers.keys():
if key in self:
self.pop(key)
self._list.extend(headers._list)
def copy(self) -> Headers:
return Headers(self, encoding=self.encoding)
def __getitem__(self, key: str) -> str:
"""
Return a single header value.
If there are multiple headers with the same key, then we concatenate
them with commas. See: https://tools.ietf.org/html/rfc7230#section-3.2.2
"""
normalized_key = key.lower().encode(self.encoding)
items = [
header_value.decode(self.encoding)
for _, header_key, header_value in self._list
if header_key == normalized_key
]
if items:
return ", ".join(items)
raise KeyError(key)
def __setitem__(self, key: str, value: str) -> None:
"""
Set the header `key` to `value`, removing any duplicate entries.
Retains insertion order.
"""
set_key = key.encode(self._encoding or "utf-8")
set_value = value.encode(self._encoding or "utf-8")
lookup_key = set_key.lower()
found_indexes = [
idx
for idx, (_, item_key, _) in enumerate(self._list)
if item_key == lookup_key
]
for idx in reversed(found_indexes[1:]):
del self._list[idx]
if found_indexes:
idx = found_indexes[0]
self._list[idx] = (set_key, lookup_key, set_value)
else:
self._list.append((set_key, lookup_key, set_value))
def __delitem__(self, key: str) -> None:
"""
Remove the header `key`.
"""
del_key = key.lower().encode(self.encoding)
pop_indexes = [
idx
for idx, (_, item_key, _) in enumerate(self._list)
if item_key.lower() == del_key
]
if not pop_indexes:
raise KeyError(key)
for idx in reversed(pop_indexes):
del self._list[idx]
def __contains__(self, key: typing.Any) -> bool:
header_key = key.lower().encode(self.encoding)
return header_key in [key for _, key, _ in self._list]
def __iter__(self) -> typing.Iterator[typing.Any]:
return iter(self.keys())
def __len__(self) -> int:
return len(self._list)
def __eq__(self, other: typing.Any) -> bool:
try:
other_headers = Headers(other)
except ValueError:
return False
self_list = [(key, value) for _, key, value in self._list]
other_list = [(key, value) for _, key, value in other_headers._list]
return sorted(self_list) == sorted(other_list)
def __repr__(self) -> str:
class_name = self.__class__.__name__
encoding_str = ""
if self.encoding != "ascii":
encoding_str = f", encoding={self.encoding!r}"
as_list = list(_obfuscate_sensitive_headers(self.multi_items()))
as_dict = dict(as_list)
no_duplicate_keys = len(as_dict) == len(as_list)
if no_duplicate_keys:
return f"{class_name}({as_dict!r}{encoding_str})"
return f"{class_name}({as_list!r}{encoding_str})"
| Headers |
python | tensorflow__tensorflow | tensorflow/python/saved_model/load_optimizer_test.py | {
"start": 878,
"end": 1339
} | class ____(test.TestCase):
def test_load_optimizer_without_keras(self):
# Make sure that a SavedModel w/ optimizer can be loaded without the Keras
# module imported.
save_path = test.test_src_dir_path(
"cc/saved_model/testdata/OptimizerSlotVariableModule")
loaded = load.load(save_path)
self.assertIsInstance(
loaded.opt.get_slot(loaded.v, "v"), variables.Variable)
if __name__ == "__main__":
test.main()
| LoadOptimizerTest |
python | ijl__orjson | test/test_fake.py | {
"start": 372,
"end": 1108
} | class ____:
@pytest.mark.skipif(Faker is None, reason="faker not available")
def test_faker(self):
fake = Faker(FAKER_LOCALES)
profile_keys = list(
set(fake.profile().keys()) - {"birthdate", "current_location"},
)
for _ in range(NUM_LOOPS):
data = [
{
"person": fake.profile(profile_keys),
"emoji": fake.emoji(),
"text": fake.paragraphs(),
}
for _ in range(NUM_ENTRIES)
]
for _ in range(NUM_SHUFFLES):
random.shuffle(data)
output = orjson.dumps(data)
assert orjson.loads(output) == data
| TestFaker |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 795005,
"end": 795552
} | class ____(sgqlc.types.Type, Node):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("actor", "created_at", "lock_reason", "lockable")
actor = sgqlc.types.Field(Actor, graphql_name="actor")
created_at = sgqlc.types.Field(
sgqlc.types.non_null(DateTime), graphql_name="createdAt"
)
lock_reason = sgqlc.types.Field(LockReason, graphql_name="lockReason")
lockable = sgqlc.types.Field(
sgqlc.types.non_null(Lockable), graphql_name="lockable"
)
| LockedEvent |
python | django-extensions__django-extensions | tests/testapp/models.py | {
"start": 14237,
"end": 14447
} | class ____(models.Model):
field_to_update = models.BooleanField(default=True)
modified = ModificationDateTimeField()
class Meta:
app_label = "django_extensions"
| ModelModificationDateTimeField |
python | spyder-ide__spyder | spyder/api/plugin_registration/registry.py | {
"start": 1493,
"end": 27053
} | class ____(QObject, PreferencesAdapter):
"""
Global plugin registry.
This class handles a plugin initialization/teardown lifetime, including
notifications when a plugin is available or not.
This registry alleviates the limitations of a topological sort-based
plugin initialization by enabling plugins to have bidirectional
dependencies instead of unidirectional ones.
Notes
-----
1. This class should be instantiated as a singleton.
2. A plugin should not depend on other plugin to perform its
initialization since it could cause deadlocks.
"""
sig_plugin_ready = Signal(str, bool)
"""
This signal is used to let the main window know that a plugin is ready.
Parameters
----------
plugin_name: str
Name of the plugin that is available.
omit_conf: bool
True if the plugin configuration does not need to be written.
"""
def __init__(self):
super().__init__()
PreferencesAdapter.__init__(self)
# Reference to the main window
self.main = None
# Dictionary that maps a plugin name to a list of the plugin names
# that depend on it.
self.plugin_dependents = {} # type: Dict[str, Dict[str, List[str]]]
# Dictionary that maps a plugin name to a list of the plugin names
# that the plugin depends on.
self.plugin_dependencies = {} # type: Dict[str, Dict[str, List[str]]]
# Plugin dictionary mapped by their names
self.plugin_registry = {} # type: Dict[str, SpyderPluginClass]
# Dictionary that maps a plugin name to its availability.
self.plugin_availability = {} # type: Dict[str, bool]
# Set that stores the plugin names of all Spyder 4 plugins.
self.old_plugins = set({}) # type: set[str]
# Set that stores the names of the plugins that are enabled
self.enabled_plugins = set({}) # type: set[str]
# Set that stores the names of the internal plugins
self.internal_plugins = set({}) # type: set[str]
# Set that stores the names of the external plugins
self.external_plugins = set({}) # type: set[str]
# Dictionary that contains all the internal plugins (enabled or not)
self.all_internal_plugins = {} # type: Dict[str, Tuple[str, Type[SpyderPluginClass]]]
# Dictionary that contains all the external plugins (enabled or not)
self.all_external_plugins = {} # type: Dict[str, Tuple[str, Type[SpyderPluginClass]]]
# This is used to allow disabling external plugins through Preferences
self._external_plugins_conf_section = "external_plugins"
# ------------------------- PRIVATE API -----------------------------------
def _update_dependents(self, plugin: str, dependent_plugin: str, key: str):
"""Add `dependent_plugin` to the list of dependents of `plugin`."""
plugin_dependents = self.plugin_dependents.get(plugin, {})
plugin_strict_dependents = plugin_dependents.get(key, [])
plugin_strict_dependents.append(dependent_plugin)
plugin_dependents[key] = plugin_strict_dependents
self.plugin_dependents[plugin] = plugin_dependents
def _update_dependencies(self, plugin: str, required_plugin: str,
key: str):
"""Add `required_plugin` to the list of dependencies of `plugin`."""
plugin_dependencies = self.plugin_dependencies.get(plugin, {})
plugin_strict_dependencies = plugin_dependencies.get(key, [])
plugin_strict_dependencies.append(required_plugin)
plugin_dependencies[key] = plugin_strict_dependencies
self.plugin_dependencies[plugin] = plugin_dependencies
def _update_plugin_info(self, plugin_name: str,
required_plugins: List[str],
optional_plugins: List[str]):
"""Update the dependencies and dependents of `plugin_name`."""
for plugin in required_plugins:
self._update_dependencies(plugin_name, plugin, 'requires')
self._update_dependents(plugin, plugin_name, 'requires')
for plugin in optional_plugins:
self._update_dependencies(plugin_name, plugin, 'optional')
self._update_dependents(plugin, plugin_name, 'optional')
def _instantiate_spyder_plugin(
self,
main_window: Any,
PluginClass: Type[SpyderPluginClass],
external: bool,
) -> SpyderPluginClass:
"""Instantiate and register a Spyder 5+ plugin."""
required_plugins = list(set(PluginClass.REQUIRES))
optional_plugins = list(set(PluginClass.OPTIONAL))
plugin_name = PluginClass.NAME
logger.debug(f'Registering plugin {plugin_name} - {PluginClass}')
if PluginClass.CONF_FILE:
CONF.register_plugin(PluginClass)
for plugin in list(required_plugins):
if plugin == Plugins.All:
required_plugins = list(set(required_plugins + ALL_PLUGINS))
for plugin in list(optional_plugins):
if plugin == Plugins.All:
optional_plugins = list(set(optional_plugins + ALL_PLUGINS))
# Update plugin dependency information
self._update_plugin_info(plugin_name, required_plugins,
optional_plugins)
# Create and store plugin instance
plugin_instance = PluginClass(main_window, configuration=CONF)
self.plugin_registry[plugin_name] = plugin_instance
# Connect plugin availability signal to notification system
plugin_instance.sig_plugin_ready.connect(
lambda: self.notify_plugin_availability(
plugin_name, omit_conf=PluginClass.CONF_FILE))
# Initialize plugin instance
plugin_instance.initialize()
# Register plugins that are already available
self._notify_plugin_dependencies(plugin_name)
# Register the plugin name under the external or internal
# plugin set
if external:
self.external_plugins |= {plugin_name}
else:
self.internal_plugins |= {plugin_name}
if external:
# These attributes come from spyder.app.find_plugins
module = PluginClass._spyder_module_name
package_name = PluginClass._spyder_package_name
version = PluginClass._spyder_version
description = plugin_instance.get_description()
dependencies.add(module, package_name, description,
version, None, kind=dependencies.PLUGIN)
return plugin_instance
def _notify_plugin_dependencies(self, plugin_name: str):
"""Notify a plugin of its available dependencies."""
plugin_instance = self.plugin_registry[plugin_name]
plugin_dependencies = self.plugin_dependencies.get(plugin_name, {})
required_plugins = plugin_dependencies.get('requires', [])
optional_plugins = plugin_dependencies.get('optional', [])
for plugin in required_plugins + optional_plugins:
if plugin in self.plugin_registry:
if self.plugin_availability.get(plugin, False):
logger.debug(f'Plugin {plugin} has already loaded')
plugin_instance._on_plugin_available(plugin)
def _notify_plugin_teardown(self, plugin_name: str):
"""Notify dependents of a plugin that is going to be unavailable."""
plugin_dependents = self.plugin_dependents.get(plugin_name, {})
required_plugins = plugin_dependents.get('requires', [])
optional_plugins = plugin_dependents.get('optional', [])
for plugin in required_plugins + optional_plugins:
if plugin in self.plugin_registry:
if self.plugin_availability.get(plugin, False):
logger.debug(f'Notifying plugin {plugin} that '
f'{plugin_name} is going to be turned off')
plugin_instance = self.plugin_registry[plugin]
plugin_instance._on_plugin_teardown(plugin_name)
def _teardown_plugin(self, plugin_name: str):
"""Disconnect a plugin from its dependencies."""
plugin_instance = self.plugin_registry[plugin_name]
plugin_dependencies = self.plugin_dependencies.get(plugin_name, {})
required_plugins = plugin_dependencies.get('requires', [])
optional_plugins = plugin_dependencies.get('optional', [])
for plugin in required_plugins + optional_plugins:
if plugin in self.plugin_registry:
if self.plugin_availability.get(plugin, False):
logger.debug(f'Disconnecting {plugin_name} from {plugin}')
plugin_instance._on_plugin_teardown(plugin)
# -------------------------- PUBLIC API -----------------------------------
def register_plugin(
self, main_window: Any,
PluginClass: Type[SpyderPluginClass],
*args: tuple, external: bool = False,
**kwargs: dict) -> SpyderPluginClass:
"""
Register a plugin into the Spyder registry.
Parameters
----------
main_window: spyder.app.mainwindow.MainWindow
Reference to Spyder's main window.
PluginClass: type[SpyderPluginClass]
The plugin class to register and create. It must be one of
`spyder.app.registry.SpyderPluginClass`.
*args: tuple
Positional arguments used to initialize the plugin
instance.
external: bool
If True, then the plugin is stored as a external plugin. Otherwise
it will be marked as an internal plugin. Default: False
**kwargs: dict
Optional keyword arguments used to initialize the plugin instance.
Returns
-------
plugin: SpyderPluginClass
The instance of the registered plugin.
Raises
------
TypeError
If the `PluginClass` does not inherit from any of
`spyder.app.registry.SpyderPluginClass`
Notes
-----
The optional `*args` and `**kwargs` will be removed once all
plugins are migrated.
"""
if not issubclass(PluginClass, SpyderPluginV2):
raise TypeError(
f"{PluginClass} does not inherit from SpyderPluginV2"
)
instance = None
if issubclass(PluginClass, SpyderPluginV2):
# Register a Spyder 5+ plugin
instance = self._instantiate_spyder_plugin(
main_window, PluginClass, external
)
return instance
def notify_plugin_availability(self, plugin_name: str,
notify_main: bool = True,
omit_conf: bool = False):
"""
Notify dependent plugins of a given plugin of its availability.
Parameters
----------
plugin_name: str
Name of the plugin that is available.
notify_main: bool
If True, then a signal is emitted to the main window to perform
further registration steps.
omit_conf: bool
If True, then the main window is instructed to not write the
plugin configuration into the Spyder configuration file.
"""
logger.debug(f'Plugin {plugin_name} has finished loading, '
'sending notifications')
# Set plugin availability to True
self.plugin_availability[plugin_name] = True
# Notify the main window that the plugin is ready
if notify_main:
self.sig_plugin_ready.emit(plugin_name, omit_conf)
# Notify plugin dependents
plugin_dependents = self.plugin_dependents.get(plugin_name, {})
required_plugins = plugin_dependents.get('requires', [])
optional_plugins = plugin_dependents.get('optional', [])
for plugin in required_plugins + optional_plugins:
if plugin in self.plugin_registry:
plugin_instance = self.plugin_registry[plugin]
plugin_instance._on_plugin_available(plugin_name)
if plugin_name == Plugins.Preferences and not running_under_pytest():
plugin_instance = self.plugin_registry[plugin_name]
plugin_instance.register_plugin_preferences(self)
def can_delete_plugin(self, plugin_name: str) -> bool:
"""
Check if a plugin from the registry can be deleted by its name.
Paremeters
----------
plugin_name: str
Name of the plugin to check for deletion.
Returns
-------
can_close: bool
True if the plugin can be closed. False otherwise.
"""
plugin_instance = self.plugin_registry[plugin_name]
# Determine if plugin can be closed
return plugin_instance.can_close()
def dock_undocked_plugin(
self, plugin_name: str, save_undocked: bool = False):
"""
Dock plugin if undocked and save undocked state if requested
Parameters
----------
plugin_name: str
Name of the plugin to check for deletion.
save_undocked : bool, optional
True if the undocked state needs to be saved. The default is False.
Returns
-------
None.
"""
plugin_instance = self.plugin_registry[plugin_name]
if isinstance(plugin_instance, SpyderDockablePlugin):
# Close undocked plugin if needed and save undocked state
plugin_instance.close_window(save_undocked=save_undocked)
def delete_plugin(self, plugin_name: str, teardown: bool = True,
check_can_delete: bool = True) -> bool:
"""
Remove and delete a plugin from the registry by its name.
Paremeters
----------
plugin_name: str
Name of the plugin to delete.
teardown: bool
True if the teardown notification to other plugins should be sent
when deleting the plugin, False otherwise.
check_can_delete: bool
True if the plugin should validate if it can be closed when this
method is called, False otherwise.
Returns
-------
plugin_deleted: bool
True if the registry was able to teardown and remove the plugin.
False otherwise.
"""
logger.debug(f'Deleting plugin {plugin_name}')
plugin_instance = self.plugin_registry[plugin_name]
# Determine if plugin can be closed
if check_can_delete:
can_delete = self.can_delete_plugin(plugin_name)
if not can_delete:
return False
if isinstance(plugin_instance, SpyderPluginV2):
# Cleanly delete plugin widgets. This avoids segfautls with
# PyQt 5.15
if isinstance(plugin_instance, SpyderDockablePlugin):
try:
plugin_instance.get_widget().close()
plugin_instance.get_widget().deleteLater()
except RuntimeError:
pass
else:
container = plugin_instance.get_container()
if container:
try:
container.close()
container.deleteLater()
except RuntimeError:
pass
# Delete plugin
try:
plugin_instance.deleteLater()
except RuntimeError:
pass
if teardown:
# Disconnect plugin from other plugins
self._teardown_plugin(plugin_name)
# Disconnect depending plugins from the plugin to delete
self._notify_plugin_teardown(plugin_name)
# Perform plugin closure tasks
try:
plugin_instance.on_close(True)
except RuntimeError:
pass
# Delete plugin from the registry and auxiliary structures
self.plugin_dependents.pop(plugin_name, None)
self.plugin_dependencies.pop(plugin_name, None)
if plugin_instance.CONF_FILE:
# This must be done after on_close() so that plugins can modify
# their (external) config therein.
CONF.unregister_plugin(plugin_instance)
for plugin in self.plugin_dependents:
all_plugin_dependents = self.plugin_dependents[plugin]
for key in {'requires', 'optional'}:
plugin_dependents = all_plugin_dependents.get(key, [])
if plugin_name in plugin_dependents:
plugin_dependents.remove(plugin_name)
for plugin in self.plugin_dependencies:
all_plugin_dependencies = self.plugin_dependencies[plugin]
for key in {'requires', 'optional'}:
plugin_dependencies = all_plugin_dependencies.get(key, [])
if plugin_name in plugin_dependencies:
plugin_dependencies.remove(plugin_name)
self.plugin_availability.pop(plugin_name)
self.old_plugins -= {plugin_name}
self.enabled_plugins -= {plugin_name}
self.internal_plugins -= {plugin_name}
self.external_plugins -= {plugin_name}
# Remove the plugin from the registry
self.plugin_registry.pop(plugin_name)
return True
def dock_all_undocked_plugins(self, save_undocked: bool = False):
"""
Dock undocked plugins and save undocked state if required.
Parameters
----------
save_undocked : bool, optional
True if the undocked state needs to be saved. The default is False.
Returns
-------
None.
"""
for plugin_name in (
set(self.external_plugins) | set(self.internal_plugins)):
self.dock_undocked_plugin(
plugin_name, save_undocked=save_undocked)
def can_delete_all_plugins(self,
excluding: Optional[Set[str]] = None) -> bool:
"""
Determine if all plugins can be deleted except the ones to exclude.
Parameters
----------
excluding: Optional[Set[str]]
A set that lists plugins (by name) that will not be deleted.
Returns
-------
bool
True if all plugins can be closed. False otherwise.
"""
excluding = excluding or set({})
can_close = True
# Check external plugins
for plugin_name in (
set(self.external_plugins) | set(self.internal_plugins)):
if plugin_name not in excluding:
can_close &= self.can_delete_plugin(plugin_name)
if not can_close:
break
return can_close
def delete_all_plugins(self, excluding: Optional[Set[str]] = None,
close_immediately: bool = False) -> bool:
"""
Remove all plugins from the registry.
The teardown mechanism will remove external plugins first and then
internal ones, where the Spyder 4 plugins will be removed first and
then the Spyder 5 ones.
Parameters
----------
excluding: Optional[Set[str]]
A set that lists plugins (by name) that will not be deleted.
close_immediately: bool
If true, then the `can_close` status will be ignored.
Returns
-------
all_deleted: bool
True if all the plugins were closed and deleted. False otherwise.
"""
excluding = excluding or set({})
can_close = True
# Check if all the plugins can be closed
can_close = self.can_delete_all_plugins(excluding=excluding)
if not can_close and not close_immediately:
return False
# Delete Spyder 5+ external plugins
for plugin_name in set(self.external_plugins):
if plugin_name not in excluding:
plugin_instance = self.plugin_registry[plugin_name]
if isinstance(plugin_instance, SpyderPluginV2):
can_close &= self.delete_plugin(
plugin_name, teardown=False, check_can_delete=False)
if not can_close and not close_immediately:
break
if not can_close and not close_immediately:
return False
# Delete Spyder 5+ internal plugins
for plugin_name in set(self.internal_plugins):
if plugin_name not in excluding:
plugin_instance = self.plugin_registry[plugin_name]
if isinstance(plugin_instance, SpyderPluginV2):
can_close &= self.delete_plugin(
plugin_name, teardown=False, check_can_delete=False)
if not can_close and not close_immediately:
break
return can_close
def get_plugin(self, plugin_name: str) -> SpyderPluginClass:
"""
Get a reference to a plugin instance by its name.
Parameters
----------
plugin_name: str
Name of the plugin to retrieve.
Returns
-------
plugin: SpyderPluginClass
The instance of the requested plugin.
Raises
------
SpyderAPIError
If the plugin name was not found in the registry.
"""
if plugin_name in self.plugin_registry:
plugin_instance = self.plugin_registry[plugin_name]
return plugin_instance
else:
raise SpyderAPIError(f'Plugin {plugin_name} was not found in '
'the registry')
def set_plugin_enabled(self, plugin_name: str):
"""
Add a plugin name to the set of enabled plugins.
Parameters
----------
plugin_name: str
Name of the plugin to add.
"""
self.enabled_plugins |= {plugin_name}
def is_plugin_enabled(self, plugin_name: str) -> bool:
"""
Determine if a given plugin is enabled and is going to be
loaded.
Parameters
----------
plugin_name: str
Name of the plugin to query.
Returns
-------
plugin_enabled: bool
True if the plugin is enabled and False if not.
"""
return plugin_name in self.enabled_plugins
def is_plugin_available(self, plugin_name: str) -> bool:
"""
Determine if a given plugin was loaded and is available.
Parameters
----------
plugin_name: str
Name of the plugin to query.
Returns
-------
plugin_available: bool
True if the plugin is available and False if not.
"""
return self.plugin_availability.get(plugin_name, False)
def reset(self):
"""Reset and empty the plugin registry."""
# Dictionary that maps a plugin name to a list of the plugin names
# that depend on it.
self.plugin_dependents = {} # type: Dict[str, Dict[str, List[str]]]
# Dictionary that maps a plugin name to a list of the plugin names
# that the plugin depends on.
self.plugin_dependencies = {} # type: Dict[str, Dict[str, List[str]]]
# Plugin dictionary mapped by their names
self.plugin_registry = {} # type: Dict[str, SpyderPluginClass]
# Dictionary that maps a plugin name to its availability.
self.plugin_availability = {} # type: Dict[str, bool]
# Set that stores the plugin names of all Spyder 4 plugins.
self.old_plugins = set({}) # type: set[str]
# Set that stores the names of the plugins that are enabled
self.enabled_plugins = set({})
# Set that stores the names of the internal plugins
self.internal_plugins = set({})
# Set that stores the names of the external plugins
self.external_plugins = set({})
try:
self.sig_plugin_ready.disconnect()
except (TypeError, RuntimeError):
# Omit failures if there are no slots connected
pass
dependencies.DEPENDENCIES = []
def set_all_internal_plugins(
self, all_plugins: Dict[str, Type[SpyderPluginClass]]):
self.all_internal_plugins = all_plugins
def set_all_external_plugins(
self, all_plugins: Dict[str, Type[SpyderPluginClass]]):
self.all_external_plugins = all_plugins
def set_main(self, main):
self.main = main
def get_icon(self):
return ima.icon('plugins')
def get_name(self):
return _('Plugins')
def __contains__(self, plugin_name: str) -> bool:
"""
Determine if a plugin name is contained in the registry.
Parameters
----------
plugin_name: str
Name of the plugin to seek.
Returns
-------
is_contained: bool
If True, the plugin name is contained on the registry, False
otherwise.
"""
return plugin_name in self.plugin_registry
def __iter__(self):
return iter(self.plugin_registry)
PLUGIN_REGISTRY = SpyderPluginRegistry()
| SpyderPluginRegistry |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/nocover/test_stateful.py | {
"start": 2282,
"end": 2654
} | class ____(RuleBasedStateMachine):
charges = Bundle("charges")
@rule(targets=(charges,), child=charges)
def charge(self, child):
return DepthCharge(child)
@rule(targets=(charges,))
def none_charge(self):
return DepthCharge(None)
@rule(check=charges)
def is_not_too_deep(self, check):
assert check.depth < 3
| DepthMachine |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pydocstyle/D208.py | {
"start": 771,
"end": 887
} | class ____:
"""Over indented last line with content
Args:
Some content on the last line
"""
| Platform |
python | pytorch__pytorch | torch/_inductor/memory.py | {
"start": 680,
"end": 801
} | class ____:
order: list[BaseSchedulerNode]
peak_memory: int
method: str
@dataclasses.dataclass
| PeakMemoryResult |
python | PrefectHQ__prefect | src/prefect/server/utilities/messaging/__init__.py | {
"start": 4029,
"end": 5825
} | class ____(Protocol):
Publisher: type[Publisher]
Consumer: type[Consumer]
ephemeral_subscription: Callable[
[str], AbstractAsyncContextManager[Mapping[str, Any]]
]
# Used for testing: a context manager that breaks the topic in a way that raises
# a ValueError("oops") when attempting to publish a message.
break_topic: Callable[[], AbstractAsyncContextManager[None]]
def create_publisher(
topic: str, cache: Optional[Cache] = None, deduplicate_by: Optional[str] = None
) -> Publisher:
"""
Creates a new publisher with the applications default settings.
Args:
topic: the topic to publish to
Returns:
a new Consumer instance
"""
cache = cache or create_cache()
module = importlib.import_module(PREFECT_MESSAGING_BROKER.value())
assert isinstance(module, BrokerModule)
return module.Publisher(topic, cache, deduplicate_by=deduplicate_by)
@asynccontextmanager
async def ephemeral_subscription(topic: str) -> AsyncGenerator[Mapping[str, Any], Any]:
"""
Creates an ephemeral subscription to the given source, removing it when the context
exits.
"""
module = importlib.import_module(PREFECT_MESSAGING_BROKER.value())
assert isinstance(module, BrokerModule)
async with module.ephemeral_subscription(topic) as consumer_create_kwargs:
yield consumer_create_kwargs
def create_consumer(topic: str, **kwargs: Any) -> Consumer:
"""
Creates a new consumer with the applications default settings.
Args:
topic: the topic to consume from
Returns:
a new Consumer instance
"""
module = importlib.import_module(PREFECT_MESSAGING_BROKER.value())
assert isinstance(module, BrokerModule)
return module.Consumer(topic, **kwargs)
| BrokerModule |
python | django__django | tests/requests_tests/test_data_upload_settings.py | {
"start": 3503,
"end": 4379
} | class ____(SimpleTestCase):
def setUp(self):
self.request = WSGIRequest(
{
"REQUEST_METHOD": "GET",
"wsgi.input": BytesIO(b""),
"CONTENT_LENGTH": 3,
}
)
def test_data_upload_max_memory_size_exceeded(self):
with self.settings(DATA_UPLOAD_MAX_MEMORY_SIZE=2):
with self.assertRaisesMessage(RequestDataTooBig, TOO_MUCH_DATA_MSG):
self.request.body
def test_size_not_exceeded(self):
with self.settings(DATA_UPLOAD_MAX_MEMORY_SIZE=3):
self.request.body
def test_no_limit(self):
with self.settings(DATA_UPLOAD_MAX_MEMORY_SIZE=None):
self.request.body
def test_empty_content_length(self):
self.request.environ["CONTENT_LENGTH"] = ""
self.request.body
| DataUploadMaxMemorySizeGetTests |
python | getsentry__sentry | src/sentry/grouping/component.py | {
"start": 8657,
"end": 8752
} | class ____(BaseGroupingComponent[str]):
id: str = "context_line"
| ContextLineGroupingComponent |
python | allegroai__clearml | clearml/backend_api/services/v2_23/dataviews.py | {
"start": 145238,
"end": 148373
} | class ____(Response):
"""
Response of dataviews.unarchive_many endpoint.
:param succeeded:
:type succeeded: Sequence[dict]
:param failed:
:type failed: Sequence[dict]
"""
_service = "dataviews"
_action = "unarchive_many"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"failed": {
"items": {
"properties": {
"error": {
"description": "Error info",
"properties": {
"codes": {
"items": {"type": "integer"},
"type": "array",
},
"data": {
"additionalProperties": True,
"type": "object",
},
"msg": {"type": "string"},
},
"type": "object",
},
"id": {
"description": "ID of the failed entity",
"type": "string",
},
},
"type": "object",
},
"type": ["array", "null"],
},
"succeeded": {
"items": {
"properties": {
"id": {
"description": "ID of the succeeded entity",
"type": "string",
},
"unarchived": {
"description": "Indicates whether the dataview was unarchived",
"type": "boolean",
},
},
"type": "object",
},
"type": ["array", "null"],
},
},
"type": "object",
}
def __init__(self, succeeded=None, failed=None, **kwargs):
super(UnarchiveManyResponse, self).__init__(**kwargs)
self.succeeded = succeeded
self.failed = failed
@schema_property("succeeded")
def succeeded(self):
return self._property_succeeded
@succeeded.setter
def succeeded(self, value):
if value is None:
self._property_succeeded = None
return
self.assert_isinstance(value, "succeeded", (list, tuple))
self.assert_isinstance(value, "succeeded", (dict,), is_array=True)
self._property_succeeded = value
@schema_property("failed")
def failed(self):
return self._property_failed
@failed.setter
def failed(self, value):
if value is None:
self._property_failed = None
return
self.assert_isinstance(value, "failed", (list, tuple))
self.assert_isinstance(value, "failed", (dict,), is_array=True)
self._property_failed = value
| UnarchiveManyResponse |
python | astropy__astropy | astropy/io/ascii/ecsv.py | {
"start": 1600,
"end": 9979
} | class ____(basic.BasicHeader):
"""Header class for which the column definition line starts with the
comment character. See the :class:`CommentedHeader` class for an example.
"""
splitter_class = ECSVHeaderSplitter
def process_lines(self, lines):
"""Return only non-blank lines that start with the comment regexp. For these
lines strip out the matching characters and leading/trailing whitespace.
"""
re_comment = re.compile(self.comment)
for line in lines:
line = line.strip()
if not line:
continue
match = re_comment.match(line)
if match:
out = line[match.end() :]
if out:
yield out
else:
# Stop iterating on first failed match for a non-blank line
return
def write(self, lines):
"""
Write header information in the ECSV ASCII format.
This function is called at the point when preprocessing has been done to
convert the input table columns to `self.cols` which is a list of
`astropy.io.ascii.core.Column` objects. In particular `col.str_vals`
is available for each column with the string representation of each
column item for output.
This format starts with a delimiter separated list of the column names
in order to make this format readable by humans and simple csv-type
readers. It then encodes the full table meta and column attributes and
meta as YAML and pretty-prints this in the header. Finally the
delimited column names are repeated again, for humans and readers that
look for the *last* comment line as defining the column names.
"""
if self.splitter.delimiter not in DELIMITERS:
raise ValueError(
"only space and comma are allowed for delimiter in ECSV format"
)
# Now assemble the header dict that will be serialized by the YAML dumper
header = {"cols": self.cols, "schema": "astropy-2.0"}
if self.table_meta:
header["meta"] = OrderedDict(self.table_meta)
# Set the delimiter only for the non-default option(s)
if self.splitter.delimiter != " ":
header["delimiter"] = self.splitter.delimiter
header_yaml_lines = [
f"%ECSV {ECSV_VERSION}",
"---",
] + meta.get_yaml_from_header(header)
lines.extend([self.write_comment + line for line in header_yaml_lines])
names = [col.info.name for col in self.cols]
# If first col name looks like ECSV header start (r"\s*#") or any have leading
# or trailing whitespace then quote all fields in the header line.
if (names and re.match(self.comment, names[0])) or any(
name.strip() != name for name in names
):
splitter = ECSVHeaderSplitterQuoteAll()
splitter.delimiter = self.splitter.delimiter
else:
splitter = self.splitter # use default splitter
lines.append(splitter.join(names))
def write_comments(self, lines, meta):
"""
WRITE: Override the default write_comments to do nothing since this is handled
in the custom write method.
"""
def update_meta(self, lines, meta):
"""
READ: Override the default update_meta to do nothing. This process is done
in get_cols() for this reader.
"""
def get_cols(self, lines):
"""
READ: Initialize the header Column objects from the table ``lines``.
Parameters
----------
lines : list
List of table lines
"""
# Cache a copy of the original input lines before processing below
raw_lines = lines
# Extract non-blank comment (header) lines with comment character stripped
lines = list(self.process_lines(lines))
# Validate that this is a ECSV file
ecsv_header_re = r"""%ECSV [ ]
(?P<major> \d+)
\. (?P<minor> \d+)
\.? (?P<bugfix> \d+)? $"""
no_header_msg = (
'ECSV header line like "# %ECSV <version>" not found as first line.'
" This is required for a ECSV file."
)
if not lines:
raise core.InconsistentTableError(no_header_msg)
match = re.match(ecsv_header_re, lines[0].strip(), re.VERBOSE)
if not match:
raise core.InconsistentTableError(no_header_msg)
try:
header = meta.get_header_from_yaml(lines)
except meta.YamlParseError as e:
raise core.InconsistentTableError(
"unable to parse yaml in meta header"
) from e
if "meta" in header:
self.table_meta = header["meta"]
if "delimiter" in header:
delimiter = header["delimiter"]
if delimiter not in DELIMITERS:
raise ValueError(
"only space and comma are allowed for delimiter in ECSV format"
)
self.splitter.delimiter = delimiter
self.data.splitter.delimiter = delimiter
# Create the list of io.ascii column objects from `header`
header_cols = {x["name"]: x for x in header["datatype"]}
self.names = [x["name"] for x in header["datatype"]]
# Read the first non-commented line of table and split to get the CSV
# header column names. This is essentially what the Basic reader does.
try:
header_line = next(super().process_lines(raw_lines))
header_names = next(self.splitter([header_line]))
except StopIteration:
# there are no non-commented lines
header_line = ""
header_names = []
# Check for consistency of the ECSV vs. CSV header column names
if header_names != self.names:
raise core.InconsistentTableError(
f"column names from ECSV header {self.names} do not "
f"match names from header line of CSV data {header_names}"
)
# BaseHeader method to create self.cols, which is a list of
# io.ascii.core.Column objects (*not* Table Column objects).
self._set_cols_from_names()
# Transfer attributes from the column descriptor stored in the input
# header YAML metadata to the new columns to create this table.
for col in self.cols:
for attr in ("description", "format", "unit", "meta", "subtype"):
if attr in header_cols[col.name]:
setattr(col, attr, header_cols[col.name][attr])
col.dtype = header_cols[col.name]["datatype"]
# Warn if col dtype is not a valid ECSV datatype, but allow reading for
# back-compatibility with existing older files that have numpy datatypes
# like datetime64 or object or python str, which are not in the ECSV standard.
if col.dtype not in ECSV_DATATYPES:
msg = (
f"unexpected datatype {col.dtype!r} of column {col.name!r} "
f"is not in allowed ECSV datatypes {ECSV_DATATYPES}. "
"Using anyway as a numpy dtype but beware since unexpected "
"results are possible."
)
warnings.warn(msg, category=InvalidEcsvDatatypeWarning)
# Subtype is written like "int64[2,null]" and we want to split this
# out to "int64" and [2, None].
subtype = col.subtype
if subtype and "[" in subtype:
idx = subtype.index("[")
col.subtype = subtype[:idx]
col.shape = json.loads(subtype[idx:])
# Convert ECSV "string" to numpy "str"
for attr in ("dtype", "subtype"):
if getattr(col, attr) == "string":
setattr(col, attr, "str")
# ECSV subtype of 'json' maps to numpy 'object' dtype
if col.subtype == "json":
col.subtype = "object"
def _check_dtype_is_str(col):
if col.dtype != "str":
raise ValueError(f'datatype of column {col.name!r} must be "string"')
| EcsvHeader |
python | pytorch__pytorch | benchmarks/dynamo/genai_layers/kernels.py | {
"start": 20337,
"end": 23406
} | class ____(BenchmarkKernel):
def __init__(self, script_args):
super().__init__(script_args)
self.available_backends = ["eager", "compiled", "liger"]
def get_shapes(self) -> tuple[tuple[int, ...], ...]:
# OOM for (16384, 131072), (8192, 262144)
return (
(32768, 256),
(32768, 512),
(32768, 1024),
(32768, 2048),
(32768, 4096),
(32768, 8192),
(32768, 16384),
(32768, 32768),
(32768, 65536),
) + extra_shapes_for_norm
def get_memory_bytes(self, args, kwargs) -> int:
x, w, dy = args
M, N = x.shape
# Read x ([M, N]), w ([N]), dy ([M, N]), write dx ([M, N]), dw ([N])
return (
2 * M * N * x.dtype.itemsize
+ 2 * N * w.dtype.itemsize
+ M * N * dy.dtype.itemsize
)
def layernorm_ref(self, x: torch.Tensor, w: torch.Tensor, eps: float = 1e-6):
x_f32 = x.float()
return F.layer_norm(x_f32, w.shape, w, None, eps).to(x.dtype)
def eager(self, args, kwargs=None) -> Any:
assert kwargs is None
x, w, dy = args
y = self.layernorm_ref(x, w)
return lambda: torch.autograd.grad(
y, [x, w], grad_outputs=dy, retain_graph=True
)
def compiled(self, args, kwargs=None) -> Any:
assert kwargs is None
x, w, dy = args
compiled_layernorm = torch.compile(
self.layernorm_ref, mode=self.compile_mode, fullgraph=True
)
y = compiled_layernorm(x, w)
return lambda: torch.autograd.grad(
y, [x, w], grad_outputs=dy, retain_graph=True
)
def compute_mean_rstd(self, x, eps):
x = x.float()
var, mean = torch.var_mean(x, dim=-1, keepdim=True, correction=0)
rstd = torch.rsqrt(var + eps)
return mean, rstd
def liger(self, args, kwargs) -> Any:
"""
Call layer_norm_backward directly rather than calling
liger_kernel.transformers.layer_norm.LigerLayerNorm and
torch.autograd.grad.
The latter fashion saves mean/rstd in x.dtype which can fail
accuracy test. We call layer_norm_backward with fp32 mean and
rstd.
"""
from liger_kernel.ops.layer_norm import layer_norm_backward
x, w, dy = args
eps = 1e-6
mean, rstd = self.compute_mean_rstd(x, eps)
M, N = x.shape
return lambda: layer_norm_backward(dy, x, w, None, mean, rstd)[0:2]
def benchmark(self):
for M, N in self.get_shapes():
print(f"Tensor dimensions: [{M}, {N}]")
torch_dtype = cutlass_torch.dtype(cutlass.BFloat16)
x = torch.randn(M, N, device="cuda", dtype=torch_dtype, requires_grad=True)
w = torch.randn(N, device="cuda", dtype=torch.float32, requires_grad=True)
dy = torch.randn(M, N, device="cuda", dtype=torch_dtype)
self.benchmark_single_shape((x, w, dy), setting=f"shape: [{M}, {N}]")
| LayerNormBackward |
python | huggingface__transformers | src/transformers/models/hunyuan_v1_moe/modeling_hunyuan_v1_moe.py | {
"start": 16802,
"end": 17690
} | class ____(PreTrainedModel):
config: HunYuanMoEV1Config
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["HunYuanMoEV1DecoderLayer"]
_skip_keys_device_placement = ["past_key_values"]
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = False
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": HunYuanMoEV1DecoderLayer,
"attentions": HunYuanMoEV1Attention,
}
@torch.no_grad()
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, HunYuanMoEV1Experts):
init.normal_(module.gate_up_proj, mean=0.0, std=self.config.initializer_range)
init.normal_(module.down_proj, mean=0.0, std=self.config.initializer_range)
| HunYuanMoEV1PreTrainedModel |
python | getsentry__sentry | src/sentry/releases/endpoints/organization_release_file_details.py | {
"start": 694,
"end": 4200
} | class ____(
OrganizationReleasesBaseEndpoint, ReleaseFileDetailsMixin
):
publish_status = {
"DELETE": ApiPublishStatus.UNKNOWN,
"GET": ApiPublishStatus.UNKNOWN,
"PUT": ApiPublishStatus.UNKNOWN,
}
def get(self, request: Request, organization, version, file_id) -> Response:
"""
Retrieve an Organization Release's File
```````````````````````````````````````
Return details on an individual file within a release. This does
not actually return the contents of the file, just the associated
metadata.
:pparam string organization_id_or_slug: the id or slug of the organization the
release belongs to.
:pparam string version: the version identifier of the release.
:pparam string file_id: the ID of the file to retrieve.
:auth: required
"""
try:
release = Release.objects.get(organization_id=organization.id, version=version)
except Release.DoesNotExist:
raise ResourceDoesNotExist
if not self.has_release_permission(request, organization, release):
raise ResourceDoesNotExist
return self.get_releasefile(
request,
release,
file_id,
check_permission_fn=lambda: request.access.has_scope("project:write"),
)
def put(self, request: Request, organization: Organization, version, file_id) -> Response:
"""
Update an Organization Release's File
`````````````````````````````````````
Update metadata of an existing file. Currently only the name of
the file can be changed.
:pparam string organization_id_or_slug: the id or slug of the organization the
release belongs to.
:pparam string version: the version identifier of the release.
:pparam string file_id: the ID of the file to update.
:param string name: the new name of the file.
:param string dist: the name of the dist.
:auth: required
"""
try:
release = Release.objects.get(organization_id=organization.id, version=version)
except Release.DoesNotExist:
raise ResourceDoesNotExist
if not self.has_release_permission(request, organization, release):
raise ResourceDoesNotExist
return self.update_releasefile(request, release, file_id)
def delete(self, request: Request, organization, version, file_id) -> Response:
"""
Delete an Organization Release's File
`````````````````````````````````````
Permanently remove a file from a release.
This will also remove the physical file from storage.
:pparam string organization_id_or_slug: the id or slug of the organization the
release belongs to.
:pparam string version: the version identifier of the release.
:pparam string file_id: the ID of the file to delete.
:auth: required
"""
try:
release = Release.objects.get(organization_id=organization.id, version=version)
except Release.DoesNotExist:
raise ResourceDoesNotExist
if not self.has_release_permission(request, organization, release):
raise ResourceDoesNotExist
return self.delete_releasefile(release, file_id)
| OrganizationReleaseFileDetailsEndpoint |
python | spack__spack | lib/spack/spack/util/environment.py | {
"start": 7545,
"end": 8326
} | class ____:
"""Base class for modifiers that act on the environment variable as a whole, and thus
store just its name
"""
__slots__ = ("name", "separator", "trace")
def __init__(self, name: str, *, separator: str = os.pathsep, trace: Optional[Trace] = None):
self.name = name.upper() if sys.platform == "win32" else name
self.separator = separator
self.trace = trace
def __eq__(self, other: object):
if not isinstance(other, NameModifier):
return NotImplemented
return self.name == other.name
def execute(self, env: MutableMapping[str, str]):
"""Apply the modification to the mapping passed as input"""
raise NotImplementedError("must be implemented by derived classes")
| NameModifier |
python | rq__rq | tests/test_spawn_worker.py | {
"start": 2257,
"end": 2757
} | class ____:
def setUp(self):
# we want tests to fail if signal are ignored and the work remain
# running, so set a signal to kill them after X seconds
self.killtimeout = 15
signal.signal(signal.SIGALRM, self._timeout)
signal.alarm(self.killtimeout)
def _timeout(self, signal, frame):
raise AssertionError(
"test still running after %i seconds, likely the worker wasn't shutdown correctly" % self.killtimeout
)
| TimeoutTestCase |
python | cloudpipe__cloudpickle | tests/cloudpickle_test.py | {
"start": 2959,
"end": 112176
} | class ____(unittest.TestCase):
protocol = cloudpickle.DEFAULT_PROTOCOL
def setUp(self):
self.tmpdir = tempfile.mkdtemp(prefix="tmp_cloudpickle_test_")
def tearDown(self):
shutil.rmtree(self.tmpdir)
@pytest.mark.skipif(
platform.python_implementation() != "CPython" or sys.version_info < (3, 8, 2),
reason="Underlying bug fixed upstream starting Python 3.8.2",
)
def test_reducer_override_reference_cycle(self):
# Early versions of Python 3.8 introduced a reference cycle between a
# Pickler and it's reducer_override method. Because a Pickler
# object references every object it has pickled through its memo, this
# cycle prevented the garbage-collection of those external pickled
# objects. See #327 as well as https://bugs.python.org/issue39492
# This bug was fixed in Python 3.8.2, but is still present using
# cloudpickle and Python 3.8.0/1, hence the skipif directive.
class MyClass:
pass
my_object = MyClass()
wr = weakref.ref(my_object)
cloudpickle.dumps(my_object)
del my_object
assert wr() is None, "'del'-ed my_object has not been collected"
def test_itemgetter(self):
d = range(10)
getter = itemgetter(1)
getter2 = pickle_depickle(getter, protocol=self.protocol)
self.assertEqual(getter(d), getter2(d))
getter = itemgetter(0, 3)
getter2 = pickle_depickle(getter, protocol=self.protocol)
self.assertEqual(getter(d), getter2(d))
def test_attrgetter(self):
class C:
def __getattr__(self, item):
return item
d = C()
getter = attrgetter("a")
getter2 = pickle_depickle(getter, protocol=self.protocol)
self.assertEqual(getter(d), getter2(d))
getter = attrgetter("a", "b")
getter2 = pickle_depickle(getter, protocol=self.protocol)
self.assertEqual(getter(d), getter2(d))
d.e = C()
getter = attrgetter("e.a")
getter2 = pickle_depickle(getter, protocol=self.protocol)
self.assertEqual(getter(d), getter2(d))
getter = attrgetter("e.a", "e.b")
getter2 = pickle_depickle(getter, protocol=self.protocol)
self.assertEqual(getter(d), getter2(d))
# Regression test for SPARK-3415
def test_pickling_file_handles(self):
out1 = sys.stderr
out2 = pickle.loads(cloudpickle.dumps(out1, protocol=self.protocol))
self.assertEqual(out1, out2)
def test_func_globals(self):
class Unpicklable:
def __reduce__(self):
raise Exception("not picklable")
global exit
exit = Unpicklable()
self.assertRaises(
Exception, lambda: cloudpickle.dumps(exit, protocol=self.protocol)
)
def foo():
sys.exit(0)
self.assertTrue("exit" in foo.__code__.co_names)
cloudpickle.dumps(foo)
def test_memoryview(self):
buffer_obj = memoryview(b"Hello")
self.assertEqual(
pickle_depickle(buffer_obj, protocol=self.protocol), buffer_obj.tobytes()
)
def test_dict_keys(self):
keys = {"a": 1, "b": 2}.keys()
results = pickle_depickle(keys)
self.assertEqual(results, keys)
assert isinstance(results, _collections_abc.dict_keys)
def test_dict_values(self):
values = {"a": 1, "b": 2}.values()
results = pickle_depickle(values)
self.assertEqual(sorted(results), sorted(values))
assert isinstance(results, _collections_abc.dict_values)
def test_dict_items(self):
items = {"a": 1, "b": 2}.items()
results = pickle_depickle(items)
self.assertEqual(results, items)
assert isinstance(results, _collections_abc.dict_items)
def test_odict_keys(self):
keys = collections.OrderedDict([("a", 1), ("b", 2)]).keys()
results = pickle_depickle(keys)
self.assertEqual(results, keys)
assert type(keys) is type(results)
def test_odict_values(self):
values = collections.OrderedDict([("a", 1), ("b", 2)]).values()
results = pickle_depickle(values)
self.assertEqual(list(results), list(values))
assert type(values) is type(results)
def test_odict_items(self):
items = collections.OrderedDict([("a", 1), ("b", 2)]).items()
results = pickle_depickle(items)
self.assertEqual(results, items)
assert type(items) is type(results)
def test_sliced_and_non_contiguous_memoryview(self):
buffer_obj = memoryview(b"Hello!" * 3)[2:15:2]
self.assertEqual(
pickle_depickle(buffer_obj, protocol=self.protocol), buffer_obj.tobytes()
)
def test_large_memoryview(self):
buffer_obj = memoryview(b"Hello!" * int(1e7))
self.assertEqual(
pickle_depickle(buffer_obj, protocol=self.protocol), buffer_obj.tobytes()
)
def test_lambda(self):
self.assertEqual(pickle_depickle(lambda: 1, protocol=self.protocol)(), 1)
def test_nested_lambdas(self):
a, b = 1, 2
f1 = lambda x: x + a # noqa: E731
f2 = lambda x: f1(x) // b # noqa: E731
self.assertEqual(pickle_depickle(f2, protocol=self.protocol)(1), 1)
def test_recursive_closure(self):
def f1():
def g():
return g
return g
def f2(base):
def g(n):
return base if n <= 1 else n * g(n - 1)
return g
g1 = pickle_depickle(f1(), protocol=self.protocol)
self.assertEqual(g1(), g1)
g2 = pickle_depickle(f2(2), protocol=self.protocol)
self.assertEqual(g2(5), 240)
def test_closure_none_is_preserved(self):
def f():
"""A function with no closure cells"""
self.assertTrue(
f.__closure__ is None,
msg="f actually has closure cells!",
)
g = pickle_depickle(f, protocol=self.protocol)
self.assertTrue(
g.__closure__ is None,
msg="g now has closure cells even though f does not",
)
def test_empty_cell_preserved(self):
def f():
if False: # pragma: no cover
cell = None
def g():
cell # NameError, unbound free variable
return g
g1 = f()
with pytest.raises(NameError):
g1()
g2 = pickle_depickle(g1, protocol=self.protocol)
with pytest.raises(NameError):
g2()
def test_unhashable_closure(self):
def f():
s = {1, 2} # mutable set is unhashable
def g():
return len(s)
return g
g = pickle_depickle(f(), protocol=self.protocol)
self.assertEqual(g(), 2)
def test_class_no_firstlineno_deletion_(self):
# `__firstlineno__` is a new attribute of classes introduced in Python 3.13.
# This attribute used to be automatically deleted when unpickling a class as a
# consequence of cloudpickle setting a class's `__module__` attribute at
# unpickling time (see https://github.com/python/cpython/blob/73c152b346a18ed8308e469bdd232698e6cd3a63/Objects/typeobject.c#L1353-L1356).
# This deletion would cause tests like
# `test_deterministic_dynamic_class_attr_ordering_for_chained_pickling` to fail.
# This test makes sure that the attribute `__firstlineno__` is preserved
# across a cloudpickle roundtrip.
class A:
pass
if hasattr(A, "__firstlineno__"):
A_roundtrip = pickle_depickle(A, protocol=self.protocol)
assert hasattr(A_roundtrip, "__firstlineno__")
assert A_roundtrip.__firstlineno__ == A.__firstlineno__
def test_dynamically_generated_class_that_uses_super(self):
class Base:
def method(self):
return 1
class Derived(Base):
"Derived Docstring"
def method(self):
return super().method() + 1
self.assertEqual(Derived().method(), 2)
# Pickle and unpickle the class.
UnpickledDerived = pickle_depickle(Derived, protocol=self.protocol)
self.assertEqual(UnpickledDerived().method(), 2)
# We have special logic for handling __doc__ because it's a readonly
# attribute on PyPy.
self.assertEqual(UnpickledDerived.__doc__, "Derived Docstring")
# Pickle and unpickle an instance.
orig_d = Derived()
d = pickle_depickle(orig_d, protocol=self.protocol)
self.assertEqual(d.method(), 2)
def test_cycle_in_classdict_globals(self):
class C:
def it_works(self):
return "woohoo!"
C.C_again = C
C.instance_of_C = C()
depickled_C = pickle_depickle(C, protocol=self.protocol)
depickled_instance = pickle_depickle(C())
# Test instance of depickled class.
self.assertEqual(depickled_C().it_works(), "woohoo!")
self.assertEqual(depickled_C.C_again().it_works(), "woohoo!")
self.assertEqual(depickled_C.instance_of_C.it_works(), "woohoo!")
self.assertEqual(depickled_instance.it_works(), "woohoo!")
def test_locally_defined_function_and_class(self):
LOCAL_CONSTANT = 42
def some_function(x, y):
# Make sure the __builtins__ are not broken (see #211)
sum(range(10))
return (x + y) / LOCAL_CONSTANT
# pickle the function definition
result = pickle_depickle(some_function, protocol=self.protocol)(41, 1)
assert result == 1
result = pickle_depickle(some_function, protocol=self.protocol)(81, 3)
assert result == 2
hidden_constant = lambda: LOCAL_CONSTANT # noqa: E731
class SomeClass:
"""Overly complicated class with nested references to symbols"""
def __init__(self, value):
self.value = value
def one(self):
return LOCAL_CONSTANT / hidden_constant()
def some_method(self, x):
return self.one() + some_function(x, 1) + self.value
# pickle the class definition
clone_class = pickle_depickle(SomeClass, protocol=self.protocol)
self.assertEqual(clone_class(1).one(), 1)
self.assertEqual(clone_class(5).some_method(41), 7)
clone_class = subprocess_pickle_echo(SomeClass, protocol=self.protocol)
self.assertEqual(clone_class(5).some_method(41), 7)
# pickle the class instances
self.assertEqual(pickle_depickle(SomeClass(1)).one(), 1)
self.assertEqual(pickle_depickle(SomeClass(5)).some_method(41), 7)
new_instance = subprocess_pickle_echo(SomeClass(5), protocol=self.protocol)
self.assertEqual(new_instance.some_method(41), 7)
# pickle the method instances
self.assertEqual(pickle_depickle(SomeClass(1).one)(), 1)
self.assertEqual(pickle_depickle(SomeClass(5).some_method)(41), 7)
new_method = subprocess_pickle_echo(
SomeClass(5).some_method, protocol=self.protocol
)
self.assertEqual(new_method(41), 7)
def test_partial(self):
partial_obj = functools.partial(min, 1)
partial_clone = pickle_depickle(partial_obj, protocol=self.protocol)
self.assertEqual(partial_clone(4), 1)
@pytest.mark.skipif(
platform.python_implementation() == "PyPy",
reason="Skip numpy and scipy tests on PyPy",
)
def test_ufunc(self):
# test a numpy ufunc (universal function), which is a C-based function
# that is applied on a numpy array
if np:
# simple ufunc: np.add
self.assertEqual(pickle_depickle(np.add, protocol=self.protocol), np.add)
else: # skip if numpy is not available
pass
if spp:
# custom ufunc: scipy.special.iv
self.assertEqual(pickle_depickle(spp.iv, protocol=self.protocol), spp.iv)
else: # skip if scipy is not available
pass
def test_loads_namespace(self):
obj = 1, 2, 3, 4
returned_obj = cloudpickle.loads(cloudpickle.dumps(obj, protocol=self.protocol))
self.assertEqual(obj, returned_obj)
def test_load_namespace(self):
obj = 1, 2, 3, 4
bio = io.BytesIO()
cloudpickle.dump(obj, bio)
bio.seek(0)
returned_obj = cloudpickle.load(bio)
self.assertEqual(obj, returned_obj)
def test_generator(self):
def some_generator(cnt):
yield from range(cnt)
gen2 = pickle_depickle(some_generator, protocol=self.protocol)
assert isinstance(gen2(3), type(some_generator(3)))
assert list(gen2(3)) == list(range(3))
def test_classmethod(self):
class A:
@staticmethod
def test_sm():
return "sm"
@classmethod
def test_cm(cls):
return "cm"
sm = A.__dict__["test_sm"]
cm = A.__dict__["test_cm"]
A.test_sm = pickle_depickle(sm, protocol=self.protocol)
A.test_cm = pickle_depickle(cm, protocol=self.protocol)
self.assertEqual(A.test_sm(), "sm")
self.assertEqual(A.test_cm(), "cm")
def test_bound_classmethod(self):
class A:
@classmethod
def test_cm(cls):
return "cm"
A.test_cm = pickle_depickle(A.test_cm, protocol=self.protocol)
self.assertEqual(A.test_cm(), "cm")
def test_method_descriptors(self):
f = pickle_depickle(str.upper)
self.assertEqual(f("abc"), "ABC")
def test_instancemethods_without_self(self):
class F:
def f(self, x):
return x + 1
g = pickle_depickle(F.f, protocol=self.protocol)
self.assertEqual(g.__name__, F.f.__name__)
# self.assertEqual(g(F(), 1), 2) # still fails
def test_module(self):
pickle_clone = pickle_depickle(pickle, protocol=self.protocol)
self.assertEqual(pickle, pickle_clone)
def _check_dynamic_module(self, mod):
mod = types.ModuleType("mod")
code = """
x = 1
def f(y):
return x + y
class Foo:
def method(self, x):
return f(x)
"""
exec(textwrap.dedent(code), mod.__dict__)
mod2 = pickle_depickle(mod, protocol=self.protocol)
self.assertEqual(mod.x, mod2.x)
self.assertEqual(mod.f(5), mod2.f(5))
self.assertEqual(mod.Foo().method(5), mod2.Foo().method(5))
if platform.python_implementation() != "PyPy":
# XXX: this fails with excessive recursion on PyPy.
mod3 = subprocess_pickle_echo(mod, protocol=self.protocol)
self.assertEqual(mod.x, mod3.x)
self.assertEqual(mod.f(5), mod3.f(5))
self.assertEqual(mod.Foo().method(5), mod3.Foo().method(5))
# Test dynamic modules when imported back are singletons
mod1, mod2 = pickle_depickle([mod, mod])
self.assertEqual(id(mod1), id(mod2))
# Ensure proper pickling of mod's functions when module "looks" like a
# file-backed module even though it is not:
try:
sys.modules["mod"] = mod
depickled_f = pickle_depickle(mod.f, protocol=self.protocol)
self.assertEqual(mod.f(5), depickled_f(5))
finally:
sys.modules.pop("mod", None)
def test_dynamic_module(self):
mod = types.ModuleType("mod")
assert mod.__package__ is None
self._check_dynamic_module(mod)
def test_dynamic_module_no_package(self):
# non-regression test for #116
mod = types.ModuleType("mod")
del mod.__package__
assert not hasattr(mod, "__package__")
self._check_dynamic_module(mod)
def test_module_locals_behavior(self):
# Makes sure that a local function defined in another module is
# correctly serialized. This notably checks that the globals are
# accessible and that there is no issue with the builtins (see #211)
pickled_func_path = os.path.join(self.tmpdir, "local_func_g.pkl")
child_process_script = """
import pickle
import gc
with open("{pickled_func_path}", 'rb') as f:
func = pickle.load(f)
assert func(range(10)) == 45
"""
child_process_script = child_process_script.format(
pickled_func_path=_escape(pickled_func_path)
)
try:
from .testutils import make_local_function
g = make_local_function()
with open(pickled_func_path, "wb") as f:
cloudpickle.dump(g, f, protocol=self.protocol)
assert_run_python_script(textwrap.dedent(child_process_script))
finally:
os.unlink(pickled_func_path)
def test_dynamic_module_with_unpicklable_builtin(self):
# Reproducer of https://github.com/cloudpipe/cloudpickle/issues/316
# Some modules such as scipy inject some unpicklable objects into the
# __builtins__ module, which appears in every module's __dict__ under
# the '__builtins__' key. In such cases, cloudpickle used to fail
# when pickling dynamic modules.
class UnpickleableObject:
def __reduce__(self):
raise ValueError("Unpicklable object")
mod = types.ModuleType("mod")
exec("f = lambda x: abs(x)", mod.__dict__)
assert mod.f(-1) == 1
assert "__builtins__" in mod.__dict__
unpicklable_obj = UnpickleableObject()
with pytest.raises(ValueError):
cloudpickle.dumps(unpicklable_obj)
# Emulate the behavior of scipy by injecting an unpickleable object
# into mod's builtins.
# The __builtins__ entry of mod's __dict__ can either be the
# __builtins__ module, or the __builtins__ module's __dict__. #316
# happens only in the latter case.
if isinstance(mod.__dict__["__builtins__"], dict):
mod.__dict__["__builtins__"]["unpickleable_obj"] = unpicklable_obj
elif isinstance(mod.__dict__["__builtins__"], types.ModuleType):
mod.__dict__["__builtins__"].unpickleable_obj = unpicklable_obj
depickled_mod = pickle_depickle(mod, protocol=self.protocol)
assert "__builtins__" in depickled_mod.__dict__
if isinstance(depickled_mod.__dict__["__builtins__"], dict):
assert "abs" in depickled_mod.__builtins__
elif isinstance(depickled_mod.__dict__["__builtins__"], types.ModuleType):
assert hasattr(depickled_mod.__builtins__, "abs")
assert depickled_mod.f(-1) == 1
# Additional check testing that the issue #425 is fixed: without the
# fix for #425, `mod.f` would not have access to `__builtins__`, and
# thus calling `mod.f(-1)` (which relies on the `abs` builtin) would
# fail.
assert mod.f(-1) == 1
def test_load_dynamic_module_in_grandchild_process(self):
# Make sure that when loaded, a dynamic module preserves its dynamic
# property. Otherwise, this will lead to an ImportError if pickled in
# the child process and reloaded in another one.
# We create a new dynamic module
mod = types.ModuleType("mod")
code = """
x = 1
"""
exec(textwrap.dedent(code), mod.__dict__)
# This script will be ran in a separate child process. It will import
# the pickled dynamic module, and then re-pickle it under a new name.
# Finally, it will create a child process that will load the re-pickled
# dynamic module.
parent_process_module_file = os.path.join(
self.tmpdir, "dynamic_module_from_parent_process.pkl"
)
child_process_module_file = os.path.join(
self.tmpdir, "dynamic_module_from_child_process.pkl"
)
child_process_script = """
import pickle
import textwrap
import cloudpickle
from testutils import assert_run_python_script
child_of_child_process_script = {child_of_child_process_script}
with open('{parent_process_module_file}', 'rb') as f:
mod = pickle.load(f)
with open('{child_process_module_file}', 'wb') as f:
cloudpickle.dump(mod, f, protocol={protocol})
assert_run_python_script(textwrap.dedent(child_of_child_process_script))
"""
# The script ran by the process created by the child process
child_of_child_process_script = """ '''
import pickle
with open('{child_process_module_file}','rb') as fid:
mod = pickle.load(fid)
''' """
# Filling the two scripts with the pickled modules filepaths and,
# for the first child process, the script to be executed by its
# own child process.
child_of_child_process_script = child_of_child_process_script.format(
child_process_module_file=child_process_module_file
)
child_process_script = child_process_script.format(
parent_process_module_file=_escape(parent_process_module_file),
child_process_module_file=_escape(child_process_module_file),
child_of_child_process_script=_escape(child_of_child_process_script),
protocol=self.protocol,
)
try:
with open(parent_process_module_file, "wb") as fid:
cloudpickle.dump(mod, fid, protocol=self.protocol)
assert_run_python_script(textwrap.dedent(child_process_script))
finally:
# Remove temporary created files
if os.path.exists(parent_process_module_file):
os.unlink(parent_process_module_file)
if os.path.exists(child_process_module_file):
os.unlink(child_process_module_file)
def test_correct_globals_import(self):
def nested_function(x):
return x + 1
def unwanted_function(x):
return math.exp(x)
def my_small_function(x, y):
return nested_function(x) + y
b = cloudpickle.dumps(my_small_function, protocol=self.protocol)
# Make sure that the pickle byte string only includes the definition
# of my_small_function and its dependency nested_function while
# extra functions and modules such as unwanted_function and the math
# module are not included so as to keep the pickle payload as
# lightweight as possible.
assert b"my_small_function" in b
assert b"nested_function" in b
assert b"unwanted_function" not in b
assert b"math" not in b
def test_module_importability(self):
import pickle
import os.path
import collections
import collections.abc
assert _should_pickle_by_reference(pickle)
assert _should_pickle_by_reference(os.path) # fake (aliased) module
assert _should_pickle_by_reference(collections) # package
assert _should_pickle_by_reference(collections.abc) # module in package
dynamic_module = types.ModuleType("dynamic_module")
assert not _should_pickle_by_reference(dynamic_module)
if platform.python_implementation() == "PyPy":
import _codecs
assert _should_pickle_by_reference(_codecs)
# #354: Check that modules created dynamically during the import of
# their parent modules are considered importable by cloudpickle.
# See the mod_with_dynamic_submodule documentation for more
# details of this use case.
m = pytest.importorskip(
"_cloudpickle_testpkg.mod.dynamic_submodule"
) # noqa F841
assert _should_pickle_by_reference(m)
assert pickle_depickle(m, protocol=self.protocol) is m
# Check for similar behavior for a module that cannot be imported by
# attribute lookup.
from _cloudpickle_testpkg.mod import dynamic_submodule_two as m2
assert _should_pickle_by_reference(m2)
assert pickle_depickle(m2, protocol=self.protocol) is m2
# Submodule_three is a dynamic module only importable via module lookup
with pytest.raises(ImportError):
import _cloudpickle_testpkg.mod.submodule_three # noqa
from _cloudpickle_testpkg.mod import submodule_three as m3
assert not _should_pickle_by_reference(m3)
# This module cannot be pickled using attribute lookup (as it does not
# have a `__module__` attribute like classes and functions.
assert not hasattr(m3, "__module__")
depickled_m3 = pickle_depickle(m3, protocol=self.protocol)
assert depickled_m3 is not m3
assert m3.f(1) == depickled_m3.f(1)
# Do the same for an importable dynamic submodule inside a dynamic
# module inside a file-backed module.
import _cloudpickle_testpkg.mod.dynamic_submodule.dynamic_subsubmodule as sm # noqa
assert _should_pickle_by_reference(sm)
assert pickle_depickle(sm, protocol=self.protocol) is sm
expected = "cannot check importability of object instances"
with pytest.raises(TypeError, match=expected):
_should_pickle_by_reference(object())
def test_Ellipsis(self):
self.assertEqual(Ellipsis, pickle_depickle(Ellipsis, protocol=self.protocol))
def test_NotImplemented(self):
ExcClone = pickle_depickle(NotImplemented, protocol=self.protocol)
self.assertEqual(NotImplemented, ExcClone)
def test_NoneType(self):
res = pickle_depickle(type(None), protocol=self.protocol)
self.assertEqual(type(None), res)
def test_EllipsisType(self):
res = pickle_depickle(type(Ellipsis), protocol=self.protocol)
self.assertEqual(type(Ellipsis), res)
def test_NotImplementedType(self):
res = pickle_depickle(type(NotImplemented), protocol=self.protocol)
self.assertEqual(type(NotImplemented), res)
def test_builtin_function(self):
# Note that builtin_function_or_method are special-cased by cloudpickle
# only in python2.
# builtin function from the __builtin__ module
assert pickle_depickle(zip, protocol=self.protocol) is zip
from os import mkdir
# builtin function from a "regular" module
assert pickle_depickle(mkdir, protocol=self.protocol) is mkdir
def test_builtin_type_constructor(self):
# This test makes sure that cloudpickling builtin-type
# constructors works for all python versions/implementation.
# pickle_depickle some builtin methods of the __builtin__ module
for t in list, tuple, set, frozenset, dict, object:
cloned_new = pickle_depickle(t.__new__, protocol=self.protocol)
assert isinstance(cloned_new(t), t)
# The next 4 tests cover all cases into which builtin python methods can
# appear.
# There are 4 kinds of method: 'classic' methods, classmethods,
# staticmethods and slotmethods. They will appear under different types
# depending on whether they are called from the __dict__ of their
# class, their class itself, or an instance of their class. This makes
# 12 total combinations.
# This discussion and the following tests are relevant for the CPython
# implementation only. In PyPy, there is no builtin method or builtin
# function types/flavours. The only way into which a builtin method can be
# identified is with it's builtin-code __code__ attribute.
def test_builtin_classicmethod(self):
obj = 1.5 # float object
bound_classicmethod = obj.hex # builtin_function_or_method
unbound_classicmethod = type(obj).hex # method_descriptor
clsdict_classicmethod = type(obj).__dict__["hex"] # method_descriptor
assert unbound_classicmethod is clsdict_classicmethod
depickled_bound_meth = pickle_depickle(
bound_classicmethod, protocol=self.protocol
)
depickled_unbound_meth = pickle_depickle(
unbound_classicmethod, protocol=self.protocol
)
depickled_clsdict_meth = pickle_depickle(
clsdict_classicmethod, protocol=self.protocol
)
# No identity on the bound methods they are bound to different float
# instances
assert depickled_bound_meth() == bound_classicmethod()
assert depickled_unbound_meth is unbound_classicmethod
assert depickled_clsdict_meth is clsdict_classicmethod
def test_builtin_classmethod(self):
obj = 1.5 # float object
bound_clsmethod = obj.fromhex # builtin_function_or_method
unbound_clsmethod = type(obj).fromhex # builtin_function_or_method
depickled_bound_meth = pickle_depickle(bound_clsmethod, protocol=self.protocol)
depickled_unbound_meth = pickle_depickle(
unbound_clsmethod, protocol=self.protocol
)
# float.fromhex takes a string as input.
arg = "0x1"
# Identity on both the bound and the unbound methods cannot be
# tested: the bound methods are bound to different objects, and the
# unbound methods are actually recreated at each call.
assert depickled_bound_meth(arg) == bound_clsmethod(arg)
assert depickled_unbound_meth(arg) == unbound_clsmethod(arg)
@pytest.mark.skipif(
(
sys.version_info >= (3, 10, 8)
and platform.python_implementation() == "CPython"
),
reason=(
"CPython dropped support for pickling classmethod_descriptor,"
"https://github.com/python/cpython/issues/95196"
),
)
def test_builtin_classmethod_descriptor(self):
# `classmethod_descriptor` is the analogue `classmethod` (used for
# pure Python classes) for builtin types. Until CPython 3.10.8,
# `classmethod_descriptor` implemented an (incorrect) reducer. After
# https://github.com/python/cpython/issues/95196 revealed its
# incorrectness, this reducer was dropped (and not fixed), on the
# ground that pickling its Pythonic equivalent, `classmethod`,
# was never supported in the first place.
# Note that cloudpickle supports pickling `classmethod` objects,
# but never patched pickle's incorrect `classmethod_descriptor`
# reducer: pickling `classmethod_descriptor` objects using cloudpickle
# has always been broken.
obj = 1.5 # float object
clsdict_clsmethod = type(obj).__dict__["fromhex"] # classmethod_descriptor
depickled_clsdict_meth = pickle_depickle(
clsdict_clsmethod, protocol=self.protocol
)
# float.fromhex takes a string as input.
arg = "0x1"
if platform.python_implementation() == "CPython":
# Roundtripping a classmethod_descriptor results in a
# builtin_function_or_method (CPython upstream issue).
assert depickled_clsdict_meth(arg) == clsdict_clsmethod(float, arg)
if platform.python_implementation() == "PyPy":
# builtin-classmethods are simple classmethod in PyPy (not
# callable). We test equality of types and the functionality of the
# __func__ attribute instead. We do not test the the identity of
# the functions as __func__ attributes of classmethods are not
# pickleable and must be reconstructed at depickling time.
assert type(depickled_clsdict_meth) is type(clsdict_clsmethod)
assert depickled_clsdict_meth.__func__(
float, arg
) == clsdict_clsmethod.__func__(float, arg)
def test_builtin_slotmethod(self):
obj = 1.5 # float object
bound_slotmethod = obj.__repr__ # method-wrapper
unbound_slotmethod = type(obj).__repr__ # wrapper_descriptor
clsdict_slotmethod = type(obj).__dict__["__repr__"] # ditto
depickled_bound_meth = pickle_depickle(bound_slotmethod, protocol=self.protocol)
depickled_unbound_meth = pickle_depickle(
unbound_slotmethod, protocol=self.protocol
)
depickled_clsdict_meth = pickle_depickle(
clsdict_slotmethod, protocol=self.protocol
)
# No identity tests on the bound slotmethod are they are bound to
# different float instances
assert depickled_bound_meth() == bound_slotmethod()
assert depickled_unbound_meth is unbound_slotmethod
assert depickled_clsdict_meth is clsdict_slotmethod
@pytest.mark.skipif(
platform.python_implementation() == "PyPy",
reason="No known staticmethod example in the pypy stdlib",
)
def test_builtin_staticmethod(self):
obj = "foo" # str object
bound_staticmethod = obj.maketrans # builtin_function_or_method
unbound_staticmethod = type(obj).maketrans # ditto
clsdict_staticmethod = type(obj).__dict__["maketrans"] # staticmethod
assert bound_staticmethod is unbound_staticmethod
depickled_bound_meth = pickle_depickle(
bound_staticmethod, protocol=self.protocol
)
depickled_unbound_meth = pickle_depickle(
unbound_staticmethod, protocol=self.protocol
)
depickled_clsdict_meth = pickle_depickle(
clsdict_staticmethod, protocol=self.protocol
)
assert depickled_bound_meth is bound_staticmethod
assert depickled_unbound_meth is unbound_staticmethod
# staticmethod objects are recreated at depickling time, but the
# underlying __func__ object is pickled by attribute.
assert depickled_clsdict_meth.__func__ is clsdict_staticmethod.__func__
type(depickled_clsdict_meth) is type(clsdict_staticmethod)
def test_tornado_coroutine(self):
# Pickling a locally defined coroutine function
gen = pytest.importorskip("tornado.gen")
ioloop = pytest.importorskip("tornado.ioloop")
@gen.coroutine
def f(x, y):
yield gen.sleep(x)
raise gen.Return(y + 1)
@gen.coroutine
def g(y):
res = yield f(0.01, y) # noqa: F821
raise gen.Return(res + 1)
with pytest.warns(DeprecationWarning):
assert cloudpickle.is_tornado_coroutine(g)
data = cloudpickle.dumps([g, g], protocol=self.protocol)
del f, g
g2, g3 = pickle.loads(data)
assert g2 is g3
loop = ioloop.IOLoop(make_current=False)
res = loop.run_sync(functools.partial(g2, 5))
assert res == 7
@pytest.mark.skipif(
(3, 11, 0, "beta") <= sys.version_info < (3, 11, 0, "beta", 4),
reason="https://github.com/python/cpython/issues/92932",
)
def test_extended_arg(self):
# Functions with more than 65535 global vars prefix some global
# variable references with the EXTENDED_ARG opcode.
nvars = 65537 + 258
names = ["g%d" % i for i in range(1, nvars)]
r = random.Random(42)
d = {name: r.randrange(100) for name in names}
# def f(x):
# x = g1, g2, ...
# return zlib.crc32(bytes(bytearray(x)))
code = """
import zlib
def f():
x = {tup}
return zlib.crc32(bytes(bytearray(x)))
""".format(
tup=", ".join(names)
)
exec(textwrap.dedent(code), d, d)
f = d["f"]
res = f()
data = cloudpickle.dumps([f, f], protocol=self.protocol)
d = f = None
f2, f3 = pickle.loads(data)
self.assertTrue(f2 is f3)
self.assertEqual(f2(), res)
def test_submodule(self):
# Function that refers (by attribute) to a sub-module of a package.
# Choose any module NOT imported by __init__ of its parent package
# examples in standard library include:
# http.cookies, unittest.mock, curses.textpad, xml.etree.ElementTree
import xml
import xml.etree.ElementTree
def example():
_ = xml.etree.ElementTree.Comment # noqa: F821
example() # smoke test
s = cloudpickle.dumps(example, protocol=self.protocol)
# refresh the environment, i.e., unimport the dependency
del xml
for item in list(sys.modules):
if item.split(".")[0] == "xml":
del sys.modules[item]
# deserialise
f = pickle.loads(s)
f() # smoke test
def test_submodule_closure(self):
# Same as test_submodule except the xml package has not been imported
def scope():
import xml.etree.ElementTree
def example():
_ = xml.etree.ElementTree.Comment # potential AttributeError
return example
example = scope()
example() # smoke test
s = cloudpickle.dumps(example, protocol=self.protocol)
# refresh the environment (unimport dependency)
for item in list(sys.modules):
if item.split(".")[0] == "xml":
del sys.modules[item]
f = cloudpickle.loads(s)
f() # smoke test
def test_multiprocess(self):
# running a function pickled by another process (a la dask.distributed)
def scope():
def example():
_ = xml.etree.ElementTree.Comment
return example
global xml
import xml.etree.ElementTree
example = scope()
s = cloudpickle.dumps(example, protocol=self.protocol)
# choose "subprocess" rather than "multiprocessing" because the latter
# library uses fork to preserve the parent environment.
command = (
"import base64; import pickle; pickle.loads(base64.b32decode('"
+ base64.b32encode(s).decode("ascii")
+ "'))()"
)
assert not subprocess.call([sys.executable, "-c", command])
def test_import(self):
# like test_multiprocess except subpackage modules referenced directly
# (unlike test_submodule)
global etree
def scope():
import xml.etree as foobar
def example():
_ = etree.Comment
_ = foobar.ElementTree
return example
example = scope()
import xml.etree.ElementTree as etree
s = cloudpickle.dumps(example, protocol=self.protocol)
command = (
"import base64; from pickle import loads; loads(base64.b32decode('"
+ base64.b32encode(s).decode("ascii")
+ "'))()"
)
assert not subprocess.call([sys.executable, "-c", command])
def test_multiprocessing_lock_raises(self):
lock = multiprocessing.Lock()
with pytest.raises(
RuntimeError, match="only be shared between processes through inheritance"
):
cloudpickle.dumps(lock)
def test_cell_manipulation(self):
cell = _make_empty_cell()
with pytest.raises(ValueError):
cell.cell_contents
ob = object()
cell.cell_contents = ob
assert cell.cell_contents is ob
def check_logger(self, name):
logger = logging.getLogger(name)
pickled = pickle_depickle(logger, protocol=self.protocol)
self.assertTrue(pickled is logger, (pickled, logger))
dumped = cloudpickle.dumps(logger)
code = """if 1:
import base64, cloudpickle, logging
logging.basicConfig(level=logging.INFO)
logger = cloudpickle.loads(base64.b32decode(b'{}'))
logger.info('hello')
""".format(
base64.b32encode(dumped).decode("ascii")
)
proc = subprocess.Popen(
[sys.executable, "-W ignore", "-c", code],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
out, _ = proc.communicate()
self.assertEqual(proc.wait(), 0)
self.assertEqual(out.strip().decode(), f"INFO:{logger.name}:hello")
def test_logger(self):
# logging.RootLogger object
self.check_logger(None)
# logging.Logger object
self.check_logger("cloudpickle.dummy_test_logger")
def test_getset_descriptor(self):
assert isinstance(float.real, types.GetSetDescriptorType)
depickled_descriptor = pickle_depickle(float.real)
self.assertIs(depickled_descriptor, float.real)
def test_abc_cache_not_pickled(self):
# cloudpickle issue #302: make sure that cloudpickle does not pickle
# the caches populated during instance/subclass checks of abc.ABCMeta
# instances.
MyClass = abc.ABCMeta("MyClass", (), {})
class MyUnrelatedClass:
pass
class MyRelatedClass:
pass
MyClass.register(MyRelatedClass)
assert not issubclass(MyUnrelatedClass, MyClass)
assert issubclass(MyRelatedClass, MyClass)
s = cloudpickle.dumps(MyClass)
assert b"MyUnrelatedClass" not in s
assert b"MyRelatedClass" in s
depickled_class = cloudpickle.loads(s)
assert not issubclass(MyUnrelatedClass, depickled_class)
assert issubclass(MyRelatedClass, depickled_class)
def test_abc(self):
class AbstractClass(abc.ABC):
@abc.abstractmethod
def some_method(self):
"""A method"""
@classmethod
@abc.abstractmethod
def some_classmethod(cls):
"""A classmethod"""
@staticmethod
@abc.abstractmethod
def some_staticmethod():
"""A staticmethod"""
@property
@abc.abstractmethod
def some_property():
"""A property"""
class ConcreteClass(AbstractClass):
def some_method(self):
return "it works!"
@classmethod
def some_classmethod(cls):
assert cls == ConcreteClass
return "it works!"
@staticmethod
def some_staticmethod():
return "it works!"
@property
def some_property(self):
return "it works!"
# This abstract class is locally defined so we can safely register
# tuple in it to verify the unpickled class also register tuple.
AbstractClass.register(tuple)
concrete_instance = ConcreteClass()
depickled_base = pickle_depickle(AbstractClass, protocol=self.protocol)
depickled_class = pickle_depickle(ConcreteClass, protocol=self.protocol)
depickled_instance = pickle_depickle(concrete_instance)
assert issubclass(tuple, AbstractClass)
assert issubclass(tuple, depickled_base)
self.assertEqual(depickled_class().some_method(), "it works!")
self.assertEqual(depickled_instance.some_method(), "it works!")
self.assertEqual(depickled_class.some_classmethod(), "it works!")
self.assertEqual(depickled_instance.some_classmethod(), "it works!")
self.assertEqual(depickled_class().some_staticmethod(), "it works!")
self.assertEqual(depickled_instance.some_staticmethod(), "it works!")
self.assertEqual(depickled_class().some_property, "it works!")
self.assertEqual(depickled_instance.some_property, "it works!")
self.assertRaises(TypeError, depickled_base)
class DepickledBaseSubclass(depickled_base):
def some_method(self):
return "it works for realz!"
@classmethod
def some_classmethod(cls):
assert cls == DepickledBaseSubclass
return "it works for realz!"
@staticmethod
def some_staticmethod():
return "it works for realz!"
@property
def some_property():
return "it works for realz!"
self.assertEqual(DepickledBaseSubclass().some_method(), "it works for realz!")
class IncompleteBaseSubclass(depickled_base):
def some_method(self):
return "this class lacks some concrete methods"
self.assertRaises(TypeError, IncompleteBaseSubclass)
def test_abstracts(self):
# Same as `test_abc` but using deprecated `abc.abstract*` methods.
# See https://github.com/cloudpipe/cloudpickle/issues/367
class AbstractClass(abc.ABC):
@abc.abstractmethod
def some_method(self):
"""A method"""
@abc.abstractclassmethod
def some_classmethod(cls):
"""A classmethod"""
@abc.abstractstaticmethod
def some_staticmethod():
"""A staticmethod"""
@abc.abstractproperty
def some_property(self):
"""A property"""
class ConcreteClass(AbstractClass):
def some_method(self):
return "it works!"
@classmethod
def some_classmethod(cls):
assert cls == ConcreteClass
return "it works!"
@staticmethod
def some_staticmethod():
return "it works!"
@property
def some_property(self):
return "it works!"
# This abstract class is locally defined so we can safely register
# tuple in it to verify the unpickled class also register tuple.
AbstractClass.register(tuple)
concrete_instance = ConcreteClass()
depickled_base = pickle_depickle(AbstractClass, protocol=self.protocol)
depickled_class = pickle_depickle(ConcreteClass, protocol=self.protocol)
depickled_instance = pickle_depickle(concrete_instance)
assert issubclass(tuple, AbstractClass)
assert issubclass(tuple, depickled_base)
self.assertEqual(depickled_class().some_method(), "it works!")
self.assertEqual(depickled_instance.some_method(), "it works!")
self.assertEqual(depickled_class.some_classmethod(), "it works!")
self.assertEqual(depickled_instance.some_classmethod(), "it works!")
self.assertEqual(depickled_class().some_staticmethod(), "it works!")
self.assertEqual(depickled_instance.some_staticmethod(), "it works!")
self.assertEqual(depickled_class().some_property, "it works!")
self.assertEqual(depickled_instance.some_property, "it works!")
self.assertRaises(TypeError, depickled_base)
class DepickledBaseSubclass(depickled_base):
def some_method(self):
return "it works for realz!"
@classmethod
def some_classmethod(cls):
assert cls == DepickledBaseSubclass
return "it works for realz!"
@staticmethod
def some_staticmethod():
return "it works for realz!"
@property
def some_property(self):
return "it works for realz!"
self.assertEqual(DepickledBaseSubclass().some_method(), "it works for realz!")
class IncompleteBaseSubclass(depickled_base):
def some_method(self):
return "this class lacks some concrete methods"
self.assertRaises(TypeError, IncompleteBaseSubclass)
def test_weakset_identity_preservation(self):
# Test that weaksets don't lose all their inhabitants if they're
# pickled in a larger data structure that includes other references to
# their inhabitants.
class SomeClass:
def __init__(self, x):
self.x = x
obj1, obj2, obj3 = SomeClass(1), SomeClass(2), SomeClass(3)
things = [weakref.WeakSet([obj1, obj2]), obj1, obj2, obj3]
result = pickle_depickle(things, protocol=self.protocol)
weakset, depickled1, depickled2, depickled3 = result
self.assertEqual(depickled1.x, 1)
self.assertEqual(depickled2.x, 2)
self.assertEqual(depickled3.x, 3)
self.assertEqual(len(weakset), 2)
self.assertEqual(set(weakset), {depickled1, depickled2})
def test_non_module_object_passing_whichmodule_test(self):
# https://github.com/cloudpipe/cloudpickle/pull/326: cloudpickle should
# not try to instrospect non-modules object when trying to discover the
# module of a function/class. This happenened because codecov injects
# tuples (and not modules) into sys.modules, but type-checks were not
# carried out on the entries of sys.modules, causing cloupdickle to
# then error in unexpected ways
def func(x):
return x**2
# Trigger a loop during the execution of whichmodule(func) by
# explicitly setting the function's module to None
func.__module__ = None
class NonModuleObject:
def __ini__(self):
self.some_attr = None
def __getattr__(self, name):
# We whitelist func so that a _whichmodule(func, None) call
# returns the NonModuleObject instance if a type check on the
# entries of sys.modules is not carried out, but manipulating
# this instance thinking it really is a module later on in the
# pickling process of func errors out
if name == "func":
return func
else:
raise AttributeError
non_module_object = NonModuleObject()
assert func(2) == 4
assert func is non_module_object.func
# Any manipulation of non_module_object relying on attribute access
# will raise an Exception
with pytest.raises(AttributeError):
_ = non_module_object.some_attr
try:
sys.modules["NonModuleObject"] = non_module_object
func_module_name = _whichmodule(func, "func")
assert func_module_name != "NonModuleObject"
assert func_module_name is None
depickled_func = pickle_depickle(func, protocol=self.protocol)
assert depickled_func(2) == 4
finally:
sys.modules.pop("NonModuleObject")
def test_importing_multiprocessing_does_not_impact_whichmodule(self):
# non-regression test for #528
script = textwrap.dedent(
"""
import multiprocessing
import cloudpickle
from cloudpickle.cloudpickle import dumps
# Trigger a loop during the execution of whichmodule() by
# explicitly setting the function's module to None
dumps.__module__ = None
print(cloudpickle.cloudpickle._whichmodule(dumps, dumps.__name__))
"""
)
script_path = Path(self.tmpdir) / "whichmodule_and_multiprocessing.py"
with open(script_path, mode="w") as f:
f.write(script)
proc = subprocess.Popen(
[sys.executable, str(script_path)],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
out, _ = proc.communicate()
self.assertEqual(proc.wait(), 0, msg="Stdout: " + str(out))
self.assertEqual(out.strip(), b"cloudpickle.cloudpickle")
def test_unrelated_faulty_module(self):
# Check that pickling a dynamically defined function or class does not
# fail when introspecting the currently loaded modules in sys.modules
# as long as those faulty modules are unrelated to the class or
# function we are currently pickling.
for base_class in (object, types.ModuleType):
for module_name in ["_missing_module", None]:
class FaultyModule(base_class):
def __getattr__(self, name):
# This throws an exception while looking up within
# pickle.whichmodule or getattr(module, name, None)
raise Exception()
class Foo:
__module__ = module_name
def foo(self):
return "it works!"
def foo():
return "it works!"
foo.__module__ = module_name
if base_class is types.ModuleType: # noqa
faulty_module = FaultyModule("_faulty_module")
else:
faulty_module = FaultyModule()
sys.modules["_faulty_module"] = faulty_module
try:
# Test whichmodule in save_global.
self.assertEqual(pickle_depickle(Foo()).foo(), "it works!")
# Test whichmodule in save_function.
cloned = pickle_depickle(foo, protocol=self.protocol)
self.assertEqual(cloned(), "it works!")
finally:
sys.modules.pop("_faulty_module", None)
def test_function_module_name(self):
def local_func(x):
return x
for func in [local_func, lambda x: x]:
cloned = pickle_depickle(func, protocol=self.protocol)
self.assertEqual(cloned.__module__, func.__module__)
def test_function_qualname(self):
def func(x):
return x
# Default __qualname__ attribute (Python 3 only)
if hasattr(func, "__qualname__"):
cloned = pickle_depickle(func, protocol=self.protocol)
self.assertEqual(cloned.__qualname__, func.__qualname__)
# Mutated __qualname__ attribute
func.__qualname__ = "<modifiedlambda>"
cloned = pickle_depickle(func, protocol=self.protocol)
self.assertEqual(cloned.__qualname__, func.__qualname__)
def test_property(self):
# Note that the @property decorator only has an effect on new-style
# classes.
class MyObject:
_read_only_value = 1
_read_write_value = 1
@property
def read_only_value(self):
"A read-only attribute"
return self._read_only_value
@property
def read_write_value(self):
return self._read_write_value
@read_write_value.setter
def read_write_value(self, value):
self._read_write_value = value
my_object = MyObject()
assert my_object.read_only_value == 1
assert MyObject.read_only_value.__doc__ == "A read-only attribute"
with pytest.raises(AttributeError):
my_object.read_only_value = 2
my_object.read_write_value = 2
depickled_obj = pickle_depickle(my_object)
assert depickled_obj.read_only_value == 1
assert depickled_obj.read_write_value == 2
# make sure the depickled read_only_value attribute is still read-only
with pytest.raises(AttributeError):
my_object.read_only_value = 2
# make sure the depickled read_write_value attribute is writeable
depickled_obj.read_write_value = 3
assert depickled_obj.read_write_value == 3
type(depickled_obj).read_only_value.__doc__ == "A read-only attribute"
def test_namedtuple(self):
MyTuple = collections.namedtuple("MyTuple", ["a", "b", "c"])
t1 = MyTuple(1, 2, 3)
t2 = MyTuple(3, 2, 1)
depickled_t1, depickled_MyTuple, depickled_t2 = pickle_depickle(
[t1, MyTuple, t2], protocol=self.protocol
)
assert isinstance(depickled_t1, MyTuple)
assert depickled_t1 == t1
assert depickled_MyTuple is MyTuple
assert isinstance(depickled_t2, MyTuple)
assert depickled_t2 == t2
def test_NamedTuple(self):
class MyTuple(typing.NamedTuple):
a: int
b: int
c: int
t1 = MyTuple(1, 2, 3)
t2 = MyTuple(3, 2, 1)
depickled_t1, depickled_MyTuple, depickled_t2 = pickle_depickle(
[t1, MyTuple, t2], protocol=self.protocol
)
assert isinstance(depickled_t1, MyTuple)
assert depickled_t1 == t1
assert depickled_MyTuple is MyTuple
assert isinstance(depickled_t2, MyTuple)
assert depickled_t2 == t2
def test_interactively_defined_function(self):
# Check that callables defined in the __main__ module of a Python
# script (or jupyter kernel) can be pickled / unpickled / executed.
code = """\
from testutils import subprocess_pickle_echo
CONSTANT = 42
class Foo(object):
def method(self, x):
return x
foo = Foo()
def f0(x):
return x ** 2
def f1():
return Foo
def f2(x):
return Foo().method(x)
def f3():
return Foo().method(CONSTANT)
def f4(x):
return foo.method(x)
def f5(x):
# Recursive call to a dynamically defined function.
if x <= 0:
return f4(x)
return f5(x - 1) + 1
cloned = subprocess_pickle_echo(lambda x: x**2, protocol={protocol})
assert cloned(3) == 9
cloned = subprocess_pickle_echo(f0, protocol={protocol})
assert cloned(3) == 9
cloned = subprocess_pickle_echo(Foo, protocol={protocol})
assert cloned().method(2) == Foo().method(2)
cloned = subprocess_pickle_echo(Foo(), protocol={protocol})
assert cloned.method(2) == Foo().method(2)
cloned = subprocess_pickle_echo(f1, protocol={protocol})
assert cloned()().method('a') == f1()().method('a')
cloned = subprocess_pickle_echo(f2, protocol={protocol})
assert cloned(2) == f2(2)
cloned = subprocess_pickle_echo(f3, protocol={protocol})
assert cloned() == f3()
cloned = subprocess_pickle_echo(f4, protocol={protocol})
assert cloned(2) == f4(2)
cloned = subprocess_pickle_echo(f5, protocol={protocol})
assert cloned(7) == f5(7) == 7
""".format(
protocol=self.protocol
)
assert_run_python_script(textwrap.dedent(code))
def test_interactively_defined_global_variable(self):
# Check that callables defined in the __main__ module of a Python
# script (or jupyter kernel) correctly retrieve global variables.
code_template = """\
from testutils import subprocess_pickle_echo
from cloudpickle import dumps, loads
def local_clone(obj, protocol=None):
return loads(dumps(obj, protocol=protocol))
VARIABLE = "default_value"
def f0():
global VARIABLE
VARIABLE = "changed_by_f0"
def f1():
return VARIABLE
assert f0.__globals__ is f1.__globals__
# pickle f0 and f1 inside the same pickle_string
cloned_f0, cloned_f1 = {clone_func}([f0, f1], protocol={protocol})
# cloned_f0 and cloned_f1 now share a global namespace that is isolated
# from any previously existing namespace
assert cloned_f0.__globals__ is cloned_f1.__globals__
assert cloned_f0.__globals__ is not f0.__globals__
# pickle f1 another time, but in a new pickle string
pickled_f1 = dumps(f1, protocol={protocol})
# Change the value of the global variable in f0's new global namespace
cloned_f0()
# thanks to cloudpickle isolation, depickling and calling f0 and f1
# should not affect the globals of already existing modules
assert VARIABLE == "default_value", VARIABLE
# Ensure that cloned_f1 and cloned_f0 share the same globals, as f1 and
# f0 shared the same globals at pickling time, and cloned_f1 was
# depickled from the same pickle string as cloned_f0
shared_global_var = cloned_f1()
assert shared_global_var == "changed_by_f0", shared_global_var
# f1 is unpickled another time, but because it comes from another
# pickle string than pickled_f1 and pickled_f0, it will not share the
# same globals as the latter two.
new_cloned_f1 = loads(pickled_f1)
assert new_cloned_f1.__globals__ is not cloned_f1.__globals__
assert new_cloned_f1.__globals__ is not f1.__globals__
# get the value of new_cloned_f1's VARIABLE
new_global_var = new_cloned_f1()
assert new_global_var == "default_value", new_global_var
"""
for clone_func in ["local_clone", "subprocess_pickle_echo"]:
code = code_template.format(protocol=self.protocol, clone_func=clone_func)
assert_run_python_script(textwrap.dedent(code))
def test_closure_interacting_with_a_global_variable(self):
global _TEST_GLOBAL_VARIABLE
assert _TEST_GLOBAL_VARIABLE == "default_value"
orig_value = _TEST_GLOBAL_VARIABLE
try:
def f0():
global _TEST_GLOBAL_VARIABLE
_TEST_GLOBAL_VARIABLE = "changed_by_f0"
def f1():
return _TEST_GLOBAL_VARIABLE
# pickle f0 and f1 inside the same pickle_string
cloned_f0, cloned_f1 = pickle_depickle([f0, f1], protocol=self.protocol)
# cloned_f0 and cloned_f1 now share a global namespace that is
# isolated from any previously existing namespace
assert cloned_f0.__globals__ is cloned_f1.__globals__
assert cloned_f0.__globals__ is not f0.__globals__
# pickle f1 another time, but in a new pickle string
pickled_f1 = cloudpickle.dumps(f1, protocol=self.protocol)
# Change the global variable's value in f0's new global namespace
cloned_f0()
# depickling f0 and f1 should not affect the globals of already
# existing modules
assert _TEST_GLOBAL_VARIABLE == "default_value"
# Ensure that cloned_f1 and cloned_f0 share the same globals, as f1
# and f0 shared the same globals at pickling time, and cloned_f1
# was depickled from the same pickle string as cloned_f0
shared_global_var = cloned_f1()
assert shared_global_var == "changed_by_f0", shared_global_var
# f1 is unpickled another time, but because it comes from another
# pickle string than pickled_f1 and pickled_f0, it will not share
# the same globals as the latter two.
new_cloned_f1 = pickle.loads(pickled_f1)
assert new_cloned_f1.__globals__ is not cloned_f1.__globals__
assert new_cloned_f1.__globals__ is not f1.__globals__
# get the value of new_cloned_f1's VARIABLE
new_global_var = new_cloned_f1()
assert new_global_var == "default_value", new_global_var
finally:
_TEST_GLOBAL_VARIABLE = orig_value
def test_interactive_remote_function_calls(self):
code = """if __name__ == "__main__":
from testutils import subprocess_worker
def interactive_function(x):
return x + 1
with subprocess_worker(protocol={protocol}) as w:
assert w.run(interactive_function, 41) == 42
# Define a new function that will call an updated version of
# the previously called function:
def wrapper_func(x):
return interactive_function(x)
def interactive_function(x):
return x - 1
# The change in the definition of interactive_function in the main
# module of the main process should be reflected transparently
# in the worker process: the worker process does not recall the
# previous definition of `interactive_function`:
assert w.run(wrapper_func, 41) == 40
""".format(
protocol=self.protocol
)
assert_run_python_script(code)
def test_interactive_remote_function_calls_no_side_effect(self):
code = """if __name__ == "__main__":
from testutils import subprocess_worker
import sys
with subprocess_worker(protocol={protocol}) as w:
GLOBAL_VARIABLE = 0
class CustomClass(object):
def mutate_globals(self):
global GLOBAL_VARIABLE
GLOBAL_VARIABLE += 1
return GLOBAL_VARIABLE
custom_object = CustomClass()
assert w.run(custom_object.mutate_globals) == 1
# The caller global variable is unchanged in the main process.
assert GLOBAL_VARIABLE == 0
# Calling the same function again starts again from zero. The
# worker process is stateless: it has no memory of the past call:
assert w.run(custom_object.mutate_globals) == 1
# The symbols defined in the main process __main__ module are
# not set in the worker process main module to leave the worker
# as stateless as possible:
def is_in_main(name):
return hasattr(sys.modules["__main__"], name)
assert is_in_main("CustomClass")
assert not w.run(is_in_main, "CustomClass")
assert is_in_main("GLOBAL_VARIABLE")
assert not w.run(is_in_main, "GLOBAL_VARIABLE")
""".format(
protocol=self.protocol
)
assert_run_python_script(code)
def test_interactive_dynamic_type_and_remote_instances(self):
code = """if __name__ == "__main__":
from testutils import subprocess_worker
with subprocess_worker(protocol={protocol}) as w:
class CustomCounter:
def __init__(self):
self.count = 0
def increment(self):
self.count += 1
return self
counter = CustomCounter().increment()
assert counter.count == 1
returned_counter = w.run(counter.increment)
assert returned_counter.count == 2, returned_counter.count
# Check that the class definition of the returned instance was
# matched back to the original class definition living in __main__.
assert isinstance(returned_counter, CustomCounter)
# Check that memoization does not break provenance tracking:
def echo(*args):
return args
C1, C2, c1, c2 = w.run(echo, CustomCounter, CustomCounter,
CustomCounter(), returned_counter)
assert C1 is CustomCounter
assert C2 is CustomCounter
assert isinstance(c1, CustomCounter)
assert isinstance(c2, CustomCounter)
""".format(
protocol=self.protocol
)
assert_run_python_script(code)
def test_interactive_dynamic_type_and_stored_remote_instances(self):
"""Simulate objects stored on workers to check isinstance semantics
Such instances stored in the memory of running worker processes are
similar to dask-distributed futures for instance.
"""
code = """if __name__ == "__main__":
import cloudpickle, uuid
from testutils import subprocess_worker
with subprocess_worker(protocol={protocol}) as w:
class A:
'''Original class definition'''
pass
def store(x):
storage = getattr(cloudpickle, "_test_storage", None)
if storage is None:
storage = cloudpickle._test_storage = dict()
obj_id = uuid.uuid4().hex
storage[obj_id] = x
return obj_id
def lookup(obj_id):
return cloudpickle._test_storage[obj_id]
id1 = w.run(store, A())
# The stored object on the worker is matched to a singleton class
# definition thanks to provenance tracking:
assert w.run(lambda obj_id: isinstance(lookup(obj_id), A), id1)
# Retrieving the object from the worker yields a local copy that
# is matched back the local class definition this instance
# originally stems from.
assert isinstance(w.run(lookup, id1), A)
# Changing the local class definition should be taken into account
# in all subsequent calls. In particular the old instances on the
# worker do not map back to the new class definition, neither on
# the worker itself, nor locally on the main program when the old
# instance is retrieved:
class A:
'''Updated class definition'''
assert not w.run(lambda obj_id: isinstance(lookup(obj_id), A), id1)
retrieved1 = w.run(lookup, id1)
assert not isinstance(retrieved1, A)
assert retrieved1.__class__ is not A
assert retrieved1.__class__.__doc__ == "Original class definition"
# New instances on the other hand are proper instances of the new
# class definition everywhere:
a = A()
id2 = w.run(store, a)
assert w.run(lambda obj_id: isinstance(lookup(obj_id), A), id2)
assert isinstance(w.run(lookup, id2), A)
# Monkeypatch the class defintion in the main process to a new
# class method:
A.echo = lambda cls, x: x
# Calling this method on an instance will automatically update
# the remote class definition on the worker to propagate the monkey
# patch dynamically.
assert w.run(a.echo, 42) == 42
# The stored instance can therefore also access the new class
# method:
assert w.run(lambda obj_id: lookup(obj_id).echo(43), id2) == 43
""".format(
protocol=self.protocol
)
assert_run_python_script(code)
def test_dynamic_func_deterministic_roundtrip(self):
# Check that the pickle serialization for a dynamic func is the same
# in two processes.
def get_dynamic_func_pickle():
def test_method(arg_1, arg_2):
pass
return cloudpickle.dumps(test_method)
with subprocess_worker(protocol=self.protocol) as w:
A_dump = w.run(get_dynamic_func_pickle)
check_deterministic_pickle(A_dump, get_dynamic_func_pickle())
def test_dynamic_class_deterministic_roundtrip(self):
# Check that the pickle serialization for a dynamic class is the same
# in two processes.
pytest.xfail("This test fails due to different tracker_id.")
def get_dynamic_class_pickle():
class A:
"""Class with potential string interning issues."""
arg_1 = "class_value"
def join(self):
pass
def test_method(self, arg_1, join):
pass
return cloudpickle.dumps(A)
with subprocess_worker(protocol=self.protocol) as w:
A_dump = w.run(get_dynamic_class_pickle)
check_deterministic_pickle(A_dump, get_dynamic_class_pickle())
def test_deterministic_dynamic_class_attr_ordering_for_chained_pickling(self):
# Check that the pickle produced by pickling a reconstructed class definition
# in a remote process matches the pickle produced by pickling the original
# class definition.
# In particular, this test checks that the order of the class attributes is
# deterministic.
with subprocess_worker(protocol=self.protocol) as w:
class A:
"""Simple class definition"""
pass
A_dump = w.run(cloudpickle.dumps, A)
check_deterministic_pickle(A_dump, cloudpickle.dumps(A))
# If the `__doc__` attribute is defined after some other class
# attribute, this can cause class attribute ordering changes due to
# the way we reconstruct the class definition in
# `_make_skeleton_class`, which creates the class and thus its
# `__doc__` attribute before populating the class attributes.
class A:
name = "A"
__doc__ = "Updated class definition"
A_dump = w.run(cloudpickle.dumps, A)
check_deterministic_pickle(A_dump, cloudpickle.dumps(A))
# If a `__doc__` is defined on the `__init__` method, this can
# cause ordering changes due to the way we reconstruct the class
# with `_make_skeleton_class`.
class A:
def __init__(self):
"""Class definition with explicit __init__"""
pass
A_dump = w.run(cloudpickle.dumps, A)
check_deterministic_pickle(A_dump, cloudpickle.dumps(A))
def test_deterministic_str_interning_for_chained_dynamic_class_pickling(self):
# Check that the pickle produced by the unpickled instance is the same.
# This checks that there is no issue related to the string interning of
# the names of attributes of class definitions and names of attributes
# of the `__code__` objects of the methods.
with subprocess_worker(protocol=self.protocol) as w:
# Due to interning of class attributes, check that this does not
# create issues with dynamic function definition.
class A:
"""Class with potential string interning issues."""
arg_1 = "class_value"
def join(self):
pass
def test_method(self, arg_1, join):
pass
A_dump = w.run(cloudpickle.dumps, A)
check_deterministic_pickle(A_dump, cloudpickle.dumps(A))
# Also check that memoization of string value inside the class does
# not cause non-deterministic pickle with interned method names.
class A:
"""Class with potential string interning issues."""
arg_1 = "join"
def join(self, arg_1):
pass
# Set a custom method attribute that can potentially trigger
# undeterministic memoization depending on the interning state of
# the string used for the attribute name.
A.join.arg_1 = "join"
A_dump = w.run(cloudpickle.dumps, A)
check_deterministic_pickle(A_dump, cloudpickle.dumps(A))
def test_dynamic_class_determinist_subworker_tuple_memoization(self):
# Check that the pickle produced by the unpickled instance is the same.
# This highlights some issues with tuple memoization.
with subprocess_worker(protocol=self.protocol) as w:
# Arguments' tuple is memoized in the main process but not in the
# subprocess as the tuples do not share the same id in the loaded
# class.
class A:
"""Class with potential tuple memoization issues."""
def func1(self):
pass
def func2(self):
pass
A_dump = w.run(cloudpickle.dumps, A)
check_deterministic_pickle(A_dump, cloudpickle.dumps(A))
@pytest.mark.skipif(
platform.python_implementation() == "PyPy",
reason="Skip PyPy because memory grows too much",
)
def test_interactive_remote_function_calls_no_memory_leak(self):
code = """if __name__ == "__main__":
from testutils import subprocess_worker
import struct
with subprocess_worker(protocol={protocol}) as w:
reference_size = w.memsize()
assert reference_size > 0
def make_big_closure(i):
# Generate a byte string of size 1MB
itemsize = len(struct.pack("l", 1))
data = struct.pack("l", i) * (int(1e6) // itemsize)
def process_data():
return len(data)
return process_data
for i in range(100):
func = make_big_closure(i)
result = w.run(func)
assert result == int(1e6), result
import gc
w.run(gc.collect)
# By this time the worker process has processed 100MB worth of data
# passed in the closures. The worker memory size should not have
# grown by more than a few MB as closures are garbage collected at
# the end of each remote function call.
growth = w.memsize() - reference_size
# For some reason, the memory growth after processing 100MB of
# data is ~50MB on MacOS, and ~1MB on Linux, so the upper bound on
# memory growth we use is only tight for MacOS. However,
# - 50MB is still 2x lower than the expected memory growth in case
# of a leak (which would be the total size of the processed data,
# 100MB)
# - the memory usage growth does not increase if using 10000
# iterations instead of 100 as used now (100x more data)
assert growth < 5e7, growth
""".format(
protocol=self.protocol
)
assert_run_python_script(code)
def test_pickle_reraise(self):
for exc_type in [Exception, ValueError, TypeError, RuntimeError]:
obj = RaiserOnPickle(exc_type("foo"))
with pytest.raises((exc_type, pickle.PicklingError)):
cloudpickle.dumps(obj, protocol=self.protocol)
def test_unhashable_function(self):
d = {"a": 1}
depickled_method = pickle_depickle(d.get, protocol=self.protocol)
self.assertEqual(depickled_method("a"), 1)
self.assertEqual(depickled_method("b"), None)
@unittest.skipIf(
sys.version_info >= (3, 14),
"itertools.count() doesn't support pickle on Python 3.14+",
)
def test_itertools_count(self):
counter = itertools.count(1, step=2)
# advance the counter a bit
next(counter)
next(counter)
new_counter = pickle_depickle(counter, protocol=self.protocol)
self.assertTrue(counter is not new_counter)
for _ in range(10):
self.assertEqual(next(counter), next(new_counter))
def test_wraps_preserves_function_name(self):
from functools import wraps
def f():
pass
@wraps(f)
def g():
f()
f2 = pickle_depickle(g, protocol=self.protocol)
self.assertEqual(f2.__name__, f.__name__)
def test_wraps_preserves_function_doc(self):
from functools import wraps
def f():
"""42"""
pass
@wraps(f)
def g():
f()
f2 = pickle_depickle(g, protocol=self.protocol)
self.assertEqual(f2.__doc__, f.__doc__)
def test_wraps_preserves_function_annotations(self):
def f(x: int) -> float:
pass
@wraps(f)
def g(x):
f(x)
f2 = pickle_depickle(g, protocol=self.protocol)
self.assertEqual(f2.__annotations__, f.__annotations__)
def test_type_hint(self):
t = typing.Union[list, int]
assert pickle_depickle(t) == t
def test_instance_with_slots(self):
for slots in [["registered_attribute"], "registered_attribute"]:
class ClassWithSlots:
__slots__ = slots
def __init__(self):
self.registered_attribute = 42
initial_obj = ClassWithSlots()
depickled_obj = pickle_depickle(initial_obj, protocol=self.protocol)
assert depickled_obj.__class__.__slots__ == slots
for obj in [initial_obj, depickled_obj]:
self.assertEqual(obj.registered_attribute, 42)
with pytest.raises(AttributeError):
obj.non_registered_attribute = 1
class SubclassWithSlots(ClassWithSlots):
def __init__(self):
self.unregistered_attribute = 1
obj = SubclassWithSlots()
s = cloudpickle.dumps(obj, protocol=self.protocol)
del SubclassWithSlots
depickled_obj = cloudpickle.loads(s)
assert depickled_obj.unregistered_attribute == 1
@unittest.skipIf(
not hasattr(types, "MappingProxyType"),
"Old versions of Python do not have this type.",
)
def test_mappingproxy(self):
mp = types.MappingProxyType({"some_key": "some value"})
assert mp == pickle_depickle(mp, protocol=self.protocol)
def test_dataclass(self):
dataclasses = pytest.importorskip("dataclasses")
DataClass = dataclasses.make_dataclass("DataClass", [("x", int)])
data = DataClass(x=42)
pickle_depickle(DataClass, protocol=self.protocol)
assert data.x == pickle_depickle(data, protocol=self.protocol).x == 42
def test_locally_defined_enum(self):
class StringEnum(str, enum.Enum):
"""Enum when all members are also (and must be) strings"""
class Color(StringEnum):
"""3-element color space"""
RED = "1"
GREEN = "2"
BLUE = "3"
def is_green(self):
return self is Color.GREEN
green1, green2, ClonedColor = pickle_depickle(
[Color.GREEN, Color.GREEN, Color], protocol=self.protocol
)
assert green1 is green2
assert green1 is ClonedColor.GREEN
assert green1 is not ClonedColor.BLUE
assert isinstance(green1, str)
assert green1.is_green()
# cloudpickle systematically tracks provenance of class definitions
# and ensure reconciliation in case of round trips:
assert green1 is Color.GREEN
assert ClonedColor is Color
green3 = pickle_depickle(Color.GREEN, protocol=self.protocol)
assert green3 is Color.GREEN
def test_locally_defined_intenum(self):
# Try again with a IntEnum defined with the functional API
DynamicColor = enum.IntEnum("Color", {"RED": 1, "GREEN": 2, "BLUE": 3})
green1, green2, ClonedDynamicColor = pickle_depickle(
[DynamicColor.GREEN, DynamicColor.GREEN, DynamicColor],
protocol=self.protocol,
)
assert green1 is green2
assert green1 is ClonedDynamicColor.GREEN
assert green1 is not ClonedDynamicColor.BLUE
assert ClonedDynamicColor is DynamicColor
def test_interactively_defined_enum(self):
code = """if __name__ == "__main__":
from enum import Enum
from testutils import subprocess_worker
with subprocess_worker(protocol={protocol}) as w:
class Color(Enum):
RED = 1
GREEN = 2
def check_positive(x):
return Color.GREEN if x >= 0 else Color.RED
result = w.run(check_positive, 1)
# Check that the returned enum instance is reconciled with the
# locally defined Color enum type definition:
assert result is Color.GREEN
# Check that changing the definition of the Enum class is taken
# into account on the worker for subsequent calls:
class Color(Enum):
RED = 1
BLUE = 2
def check_positive(x):
return Color.BLUE if x >= 0 else Color.RED
result = w.run(check_positive, 1)
assert result is Color.BLUE
""".format(
protocol=self.protocol
)
assert_run_python_script(code)
def test_relative_import_inside_function(self):
# Make sure relative imports inside round-tripped functions is not
# broken. This was a bug in cloudpickle versions <= 0.5.3 and was
# re-introduced in 0.8.0.
_cloudpickle_testpkg = pytest.importorskip("_cloudpickle_testpkg")
relative_imports_factory = _cloudpickle_testpkg.relative_imports_factory
f, g = relative_imports_factory()
for func, source in zip([f, g], ["module", "package"]):
# Make sure relative imports are initially working
assert func() == f"hello from a {source}!"
# Make sure relative imports still work after round-tripping
cloned_func = pickle_depickle(func, protocol=self.protocol)
assert cloned_func() == f"hello from a {source}!"
def test_interactively_defined_func_with_keyword_only_argument(self):
# fixes https://github.com/cloudpipe/cloudpickle/issues/263
def f(a, *, b=1):
return a + b
depickled_f = pickle_depickle(f, protocol=self.protocol)
for func in (f, depickled_f):
assert func(2) == 3
assert func.__kwdefaults__ == {"b": 1}
@pytest.mark.skipif(
not hasattr(types.CodeType, "co_posonlyargcount"),
reason="Requires positional-only argument syntax",
)
def test_interactively_defined_func_with_positional_only_argument(self):
# Fixes https://github.com/cloudpipe/cloudpickle/issues/266
# The source code of this test is bundled in a string and is ran from
# the __main__ module of a subprocess in order to avoid a SyntaxError
# in versions of python that do not support positional-only argument
# syntax.
code = """
import pytest
from cloudpickle import loads, dumps
def f(a, /, b=1):
return a + b
depickled_f = loads(dumps(f, protocol={protocol}))
for func in (f, depickled_f):
assert func(2) == 3
assert func.__code__.co_posonlyargcount == 1
with pytest.raises(TypeError):
func(a=2)
""".format(
protocol=self.protocol
)
assert_run_python_script(textwrap.dedent(code))
def test___reduce___returns_string(self):
# Non regression test for objects with a __reduce__ method returning a
# string, meaning "save by attribute using save_global"
_cloudpickle_testpkg = pytest.importorskip("_cloudpickle_testpkg")
some_singleton = _cloudpickle_testpkg.some_singleton
assert some_singleton.__reduce__() == "some_singleton"
depickled_singleton = pickle_depickle(some_singleton, protocol=self.protocol)
assert depickled_singleton is some_singleton
def test_cloudpickle_extract_nested_globals(self):
def function_factory():
def inner_function():
global _TEST_GLOBAL_VARIABLE
return _TEST_GLOBAL_VARIABLE
return inner_function
globals_ = set(
cloudpickle.cloudpickle._extract_code_globals(
function_factory.__code__
).keys()
)
assert globals_ == {"_TEST_GLOBAL_VARIABLE"}
depickled_factory = pickle_depickle(function_factory, protocol=self.protocol)
inner_func = depickled_factory()
assert inner_func() == _TEST_GLOBAL_VARIABLE
# TODO: remove this xfail when we drop support for Python 3.8. We don't
# plan to fix it because Python 3.8 is EOL.
@pytest.mark.skipif(
sys.version_info < (3, 9),
reason="Can cause CPython 3.8 to segfault",
)
def test_recursion_during_pickling(self):
class A:
def __getattribute__(self, name):
return getattr(self, name)
a = A()
with pytest.raises(pickle.PicklingError, match="deep recursion"):
cloudpickle.dumps(a)
def test_out_of_band_buffers(self):
if self.protocol < 5:
pytest.skip("Need Pickle Protocol 5 or later")
np = pytest.importorskip("numpy")
class LocallyDefinedClass:
data = np.zeros(10)
data_instance = LocallyDefinedClass()
buffers = []
pickle_bytes = cloudpickle.dumps(
data_instance, protocol=self.protocol, buffer_callback=buffers.append
)
assert len(buffers) == 1
reconstructed = pickle.loads(pickle_bytes, buffers=buffers)
np.testing.assert_allclose(reconstructed.data, data_instance.data)
def test_pickle_dynamic_typevar(self):
T = typing.TypeVar("T")
depickled_T = pickle_depickle(T, protocol=self.protocol)
attr_list = [
"__name__",
"__bound__",
"__constraints__",
"__covariant__",
"__contravariant__",
]
for attr in attr_list:
assert getattr(T, attr) == getattr(depickled_T, attr)
def test_pickle_dynamic_typevar_tracking(self):
T = typing.TypeVar("T")
T2 = subprocess_pickle_echo(T, protocol=self.protocol)
assert T is T2
def test_pickle_dynamic_typevar_memoization(self):
T = typing.TypeVar("T")
depickled_T1, depickled_T2 = pickle_depickle((T, T), protocol=self.protocol)
assert depickled_T1 is depickled_T2
def test_pickle_importable_typevar(self):
_cloudpickle_testpkg = pytest.importorskip("_cloudpickle_testpkg")
T1 = pickle_depickle(_cloudpickle_testpkg.T, protocol=self.protocol)
assert T1 is _cloudpickle_testpkg.T
# Standard Library TypeVar
from typing import AnyStr
assert AnyStr is pickle_depickle(AnyStr, protocol=self.protocol)
def test_generic_type(self):
T = typing.TypeVar("T")
class C(typing.Generic[T]):
pass
assert pickle_depickle(C, protocol=self.protocol) is C
# Identity is not part of the typing contract: only test for
# equality instead.
assert pickle_depickle(C[int], protocol=self.protocol) == C[int]
with subprocess_worker(protocol=self.protocol) as worker:
def check_generic(generic, origin, type_value):
assert generic.__origin__ is origin
assert len(origin.__orig_bases__) == 1
ob = origin.__orig_bases__[0]
assert ob.__origin__ is typing.Generic
assert len(generic.__args__) == 1
assert generic.__args__[0] is type_value
assert len(ob.__parameters__) == 1
return "ok"
assert check_generic(C[int], C, int) == "ok"
assert worker.run(check_generic, C[int], C, int) == "ok"
def test_generic_subclass(self):
T = typing.TypeVar("T")
class Base(typing.Generic[T]):
pass
class DerivedAny(Base):
pass
class LeafAny(DerivedAny):
pass
class DerivedInt(Base[int]):
pass
class LeafInt(DerivedInt):
pass
class DerivedT(Base[T]):
pass
class LeafT(DerivedT[T]):
pass
klasses = [Base, DerivedAny, LeafAny, DerivedInt, LeafInt, DerivedT, LeafT]
for klass in klasses:
assert pickle_depickle(klass, protocol=self.protocol) is klass
with subprocess_worker(protocol=self.protocol) as worker:
def check_mro(klass, expected_mro):
assert klass.mro() == expected_mro
return "ok"
for klass in klasses:
mro = klass.mro()
assert check_mro(klass, mro)
assert worker.run(check_mro, klass, mro) == "ok"
def test_locally_defined_class_with_type_hints(self):
with subprocess_worker(protocol=self.protocol) as worker:
for type_ in _all_types_to_test():
class MyClass:
def method(self, arg: type_) -> type_:
return arg
MyClass.__annotations__ = {"attribute": type_}
def check_annotations(obj, expected_type, expected_type_str):
# On Python 3.14, it's no longer possible to access class
# annotations from an instance, so use type().
assert type(obj).__annotations__["attribute"] == expected_type
assert obj.method.__annotations__["arg"] == expected_type
assert obj.method.__annotations__["return"] == expected_type
return "ok"
obj = MyClass()
assert check_annotations(obj, type_, "type_") == "ok"
assert worker.run(check_annotations, obj, type_, "type_") == "ok"
def test_class_annotations(self):
class C:
pass
C.__annotations__ = {"a": int}
C1 = pickle_depickle(C, protocol=self.protocol)
assert C1.__annotations__ == C.__annotations__
def test_class_annotations_abstractclass(self):
# see https://github.com/cloudpipe/cloudpickle/issues/572
class C(abc.ABC):
a: int
C1 = pickle_depickle(C, protocol=self.protocol)
assert C1.__annotations__ == C.__annotations__
C2 = pickle_depickle(C1, protocol=self.protocol)
if sys.version_info >= (3, 14):
# check that __annotate_func__ is created by Python
assert hasattr(C2, "__annotate_func__")
assert C2.__annotations__ == C1.__annotations__
c2 = C2()
assert isinstance(c2, C2)
def test_function_annotations(self):
def f(a: int) -> str:
pass
f1 = pickle_depickle(f, protocol=self.protocol)
assert f1.__annotations__ == f.__annotations__
def test_always_use_up_to_date_copyreg(self):
# test that updates of copyreg.dispatch_table are taken in account by
# cloudpickle
import copyreg
try:
class MyClass:
pass
def reduce_myclass(x):
return MyClass, (), {"custom_reduce": True}
copyreg.dispatch_table[MyClass] = reduce_myclass
my_obj = MyClass()
depickled_myobj = pickle_depickle(my_obj, protocol=self.protocol)
assert hasattr(depickled_myobj, "custom_reduce")
finally:
copyreg.dispatch_table.pop(MyClass)
def test_literal_misdetection(self):
# see https://github.com/cloudpipe/cloudpickle/issues/403
class MyClass:
@property
def __values__(self):
return ()
o = MyClass()
pickle_depickle(o, protocol=self.protocol)
def test_final_or_classvar_misdetection(self):
# see https://github.com/cloudpipe/cloudpickle/issues/403
class MyClass:
@property
def __type__(self):
return int
o = MyClass()
pickle_depickle(o, protocol=self.protocol)
def test_pickle_constructs_from_module_registered_for_pickling_by_value(
self,
): # noqa
_prev_sys_path = sys.path.copy()
try:
# We simulate an interactive session that:
# - we start from the /path/to/cloudpickle/tests directory, where a
# local .py file (mock_local_file) is located.
# - uses constructs from mock_local_file in remote workers that do
# not have access to this file. This situation is
# the justification behind the
# (un)register_pickle_by_value(module) api that cloudpickle
# exposes.
_mock_interactive_session_cwd = os.path.dirname(__file__)
# First, remove sys.path entries that could point to
# /path/to/cloudpickle/tests and be in inherited by the worker
_maybe_remove(sys.path, "")
_maybe_remove(sys.path, _mock_interactive_session_cwd)
# Add the desired session working directory
sys.path.insert(0, _mock_interactive_session_cwd)
with subprocess_worker(protocol=self.protocol) as w:
# Make the module unavailable in the remote worker
w.run(lambda p: sys.path.remove(p), _mock_interactive_session_cwd)
# Import the actual file after starting the module since the
# worker is started using fork on Linux, which will inherits
# the parent sys.modules. On Python>3.6, the worker can be
# started using spawn using mp_context in ProcessPoolExectutor.
# TODO Once Python 3.6 reaches end of life, rely on mp_context
# instead.
import mock_local_folder.mod as mod
# The constructs whose pickling mechanism is changed using
# register_pickle_by_value are functions, classes, TypeVar and
# modules.
from mock_local_folder.mod import local_function, LocalT, LocalClass
# Make sure the module/constructs are unimportable in the
# worker.
with pytest.raises(ImportError):
w.run(lambda: __import__("mock_local_folder.mod"))
with pytest.raises(ImportError):
w.run(lambda: __import__("mock_local_folder.subfolder.mod"))
for o in [mod, local_function, LocalT, LocalClass]:
with pytest.raises(ImportError):
w.run(lambda: o)
register_pickle_by_value(mod)
# function
assert w.run(lambda: local_function()) == local_function()
# typevar
assert w.run(lambda: LocalT.__name__) == LocalT.__name__
# classes
assert w.run(lambda: LocalClass().method()) == LocalClass().method()
# modules
assert w.run(lambda: mod.local_function()) == local_function()
# Constructs from modules inside subfolders should be pickled
# by value if a namespace module pointing to some parent folder
# was registered for pickling by value. A "mock_local_folder"
# namespace module falls into that category, but a
# "mock_local_folder.mod" one does not.
from mock_local_folder.subfolder.submod import (
LocalSubmodClass,
LocalSubmodT,
local_submod_function,
)
# Shorter aliases to comply with line-length limits
_t, _func, _class = (
LocalSubmodT,
local_submod_function,
LocalSubmodClass,
)
with pytest.raises(ImportError):
w.run(lambda: __import__("mock_local_folder.subfolder.mod"))
with pytest.raises(ImportError):
w.run(lambda: local_submod_function)
unregister_pickle_by_value(mod)
with pytest.raises(ImportError):
w.run(lambda: local_function)
with pytest.raises(ImportError):
w.run(lambda: __import__("mock_local_folder.mod"))
# Test the namespace folder case
import mock_local_folder
register_pickle_by_value(mock_local_folder)
assert w.run(lambda: local_function()) == local_function()
assert w.run(lambda: _func()) == _func()
unregister_pickle_by_value(mock_local_folder)
with pytest.raises(ImportError):
w.run(lambda: local_function)
with pytest.raises(ImportError):
w.run(lambda: local_submod_function)
# Test the case of registering a single module inside a
# subfolder.
import mock_local_folder.subfolder.submod
register_pickle_by_value(mock_local_folder.subfolder.submod)
assert w.run(lambda: _func()) == _func()
assert w.run(lambda: _t.__name__) == _t.__name__
assert w.run(lambda: _class().method()) == _class().method()
# Registering a module from a subfolder for pickling by value
# should not make constructs from modules from the parent
# folder pickleable
with pytest.raises(ImportError):
w.run(lambda: local_function)
with pytest.raises(ImportError):
w.run(lambda: __import__("mock_local_folder.mod"))
unregister_pickle_by_value(mock_local_folder.subfolder.submod)
with pytest.raises(ImportError):
w.run(lambda: local_submod_function)
# Test the subfolder namespace module case
import mock_local_folder.subfolder
register_pickle_by_value(mock_local_folder.subfolder)
assert w.run(lambda: _func()) == _func()
assert w.run(lambda: _t.__name__) == _t.__name__
assert w.run(lambda: _class().method()) == _class().method()
unregister_pickle_by_value(mock_local_folder.subfolder)
finally:
_fname = "mock_local_folder"
sys.path = _prev_sys_path
for m in [
_fname,
f"{_fname}.mod",
f"{_fname}.subfolder",
f"{_fname}.subfolder.submod",
]:
mod = sys.modules.pop(m, None)
if mod and mod.__name__ in list_registry_pickle_by_value():
unregister_pickle_by_value(mod)
def test_pickle_constructs_from_installed_packages_registered_for_pickling_by_value( # noqa
self,
):
for package_or_module in ["package", "module"]:
if package_or_module == "package":
m = pytest.importorskip("_cloudpickle_testpkg")
f = m.package_function_with_global
_original_global = m.global_variable
elif package_or_module == "module":
m = pytest.importorskip("_cloudpickle_testpkg.mod")
f = m.module_function_with_global
_original_global = m.global_variable
try:
with subprocess_worker(protocol=self.protocol) as w:
assert w.run(lambda: f()) == _original_global
# Test that f is pickled by value by modifying a global
# variable that f uses, and making sure that this
# modification shows up when calling the function remotely
register_pickle_by_value(m)
assert w.run(lambda: f()) == _original_global
m.global_variable = "modified global"
assert m.global_variable != _original_global
assert w.run(lambda: f()) == "modified global"
unregister_pickle_by_value(m)
finally:
m.global_variable = _original_global
if m.__name__ in list_registry_pickle_by_value():
unregister_pickle_by_value(m)
def test_pickle_various_versions_of_the_same_function_with_different_pickling_method( # noqa
self,
):
# Make sure that different versions of the same function (possibly
# pickled in a different way - by value and/or by reference) can
# peacefully co-exist (e.g. without globals interaction) in a remote
# worker.
_cloudpickle_testpkg = pytest.importorskip("_cloudpickle_testpkg")
f = _cloudpickle_testpkg.package_function_with_global
_original_global = _cloudpickle_testpkg.global_variable
def _create_registry():
_main = __import__("sys").modules["__main__"]
_main._cloudpickle_registry = {}
# global _cloudpickle_registry
def _add_to_registry(v, k):
_main = __import__("sys").modules["__main__"]
_main._cloudpickle_registry[k] = v
def _call_from_registry(k):
_main = __import__("sys").modules["__main__"]
return _main._cloudpickle_registry[k]()
try:
with subprocess_worker(protocol=self.protocol) as w:
w.run(_create_registry)
w.run(_add_to_registry, f, "f_by_ref")
register_pickle_by_value(_cloudpickle_testpkg)
_cloudpickle_testpkg.global_variable = "modified global"
w.run(_add_to_registry, f, "f_by_val")
assert w.run(_call_from_registry, "f_by_ref") == _original_global
assert w.run(_call_from_registry, "f_by_val") == "modified global"
finally:
_cloudpickle_testpkg.global_variable = _original_global
if "_cloudpickle_testpkg" in list_registry_pickle_by_value():
unregister_pickle_by_value(_cloudpickle_testpkg)
def test_deterministic_pickle_bytes_for_function(self):
# Ensure that functions with references to several global names are
# pickled to fixed bytes that do not depend on the PYTHONHASHSEED of
# the Python process.
vals = set()
def func_with_globals():
return _TEST_GLOBAL_VARIABLE + _TEST_GLOBAL_VARIABLE2
for i in range(5):
vals.add(
subprocess_pickle_string(
func_with_globals,
protocol=self.protocol,
add_env={"PYTHONHASHSEED": str(i)},
)
)
if len(vals) > 1:
# Print additional debug info on stdout with dis:
for val in vals:
pickletools.dis(val)
pytest.fail("Expected a single deterministic payload, got %d/5" % len(vals))
def test_dataclass_fields_are_preserved(self):
@dataclasses.dataclass
class SampleDataclass:
x: int
y: dataclasses.InitVar[int]
z: typing.ClassVar[int]
PickledSampleDataclass = pickle_depickle(
SampleDataclass, protocol=self.protocol
)
found_fields = list(PickledSampleDataclass.__dataclass_fields__.values())
assert set(f.name for f in found_fields) == {"x", "y", "z"}
expected_ftypes = {
"x": dataclasses._FIELD,
"y": dataclasses._FIELD_INITVAR,
"z": dataclasses._FIELD_CLASSVAR,
}
for f in found_fields:
assert f._field_type is expected_ftypes[f.name]
def test_interactively_defined_dataclass_with_initvar_and_classvar(self):
code = """if __name__ == "__main__":
import dataclasses
from testutils import subprocess_worker
import typing
with subprocess_worker(protocol={protocol}) as w:
@dataclasses.dataclass
class SampleDataclass:
x: int
y: dataclasses.InitVar[int] = None
z: typing.ClassVar[int] = 42
def __post_init__(self, y=0):
self.x += y
def large_enough(self):
return self.x > self.z
value = SampleDataclass(2, y=2)
def check_dataclass_instance(value):
assert isinstance(value, SampleDataclass)
assert value.x == 4
assert value.z == 42
expected_dict = dict(x=4)
assert dataclasses.asdict(value) == expected_dict
assert not value.large_enough()
try:
SampleDataclass.z = 0
assert value.z == 0
assert value.large_enough()
finally:
SampleDataclass.z = 42
return "ok"
assert check_dataclass_instance(value) == "ok"
# Check that this instance of an interactively defined dataclass
# behavesconsistently in a remote worker process:
assert w.run(check_dataclass_instance, value) == "ok"
# Check class provenance tracking is not impacted by the
# @dataclass decorator:
def echo(*args):
return args
cloned_value, cloned_type = w.run(echo, value, SampleDataclass)
assert cloned_type is SampleDataclass
assert isinstance(cloned_value, SampleDataclass)
""".format(
protocol=self.protocol
)
assert_run_python_script(code)
| CloudPickleTest |
python | sqlalchemy__sqlalchemy | test/base/test_events.py | {
"start": 593,
"end": 946
} | class ____:
def teardown_test(self):
classes = set()
for entry in event.base._registrars.values():
for evt_cls in entry:
if evt_cls.__module__ == __name__:
classes.add(evt_cls)
for evt_cls in classes:
event.base._remove_dispatcher(evt_cls)
| TearDownLocalEventsFixture |
python | realpython__materials | python-class/counter.py | {
"start": 0,
"end": 106
} | class ____:
num_instances = 0
def __init__(self):
type(self).num_instances += 1
| ObjectCounter |
python | django__django | tests/postgres_tests/test_array.py | {
"start": 54552,
"end": 57741
} | class ____(PostgreSQLWidgetTestCase):
def test_get_context(self):
self.assertEqual(
SplitArrayWidget(forms.TextInput(), size=2).get_context(
"name", ["val1", "val2"]
),
{
"widget": {
"name": "name",
"is_hidden": False,
"required": False,
"value": "['val1', 'val2']",
"attrs": {},
"template_name": "postgres/widgets/split_array.html",
"subwidgets": [
{
"name": "name_0",
"is_hidden": False,
"required": False,
"value": "val1",
"attrs": {},
"template_name": "django/forms/widgets/text.html",
"type": "text",
},
{
"name": "name_1",
"is_hidden": False,
"required": False,
"value": "val2",
"attrs": {},
"template_name": "django/forms/widgets/text.html",
"type": "text",
},
],
}
},
)
def test_checkbox_get_context_attrs(self):
context = SplitArrayWidget(
forms.CheckboxInput(),
size=2,
).get_context("name", [True, False])
self.assertEqual(context["widget"]["value"], "[True, False]")
self.assertEqual(
[subwidget["attrs"] for subwidget in context["widget"]["subwidgets"]],
[{"checked": True}, {}],
)
def test_render(self):
self.check_html(
SplitArrayWidget(forms.TextInput(), size=2),
"array",
None,
"""
<input name="array_0" type="text">
<input name="array_1" type="text">
""",
)
def test_render_attrs(self):
self.check_html(
SplitArrayWidget(forms.TextInput(), size=2),
"array",
["val1", "val2"],
attrs={"id": "foo"},
html=(
"""
<input id="foo_0" name="array_0" type="text" value="val1">
<input id="foo_1" name="array_1" type="text" value="val2">
"""
),
)
def test_value_omitted_from_data(self):
widget = SplitArrayWidget(forms.TextInput(), size=2)
self.assertIs(widget.value_omitted_from_data({}, {}, "field"), True)
self.assertIs(
widget.value_omitted_from_data({"field_0": "value"}, {}, "field"), False
)
self.assertIs(
widget.value_omitted_from_data({"field_1": "value"}, {}, "field"), False
)
self.assertIs(
widget.value_omitted_from_data(
{"field_0": "value", "field_1": "value"}, {}, "field"
),
False,
)
| TestSplitFormWidget |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/expression2.py | {
"start": 51,
"end": 149
} | class ____:
def do_something1(self):
pass
def do_something2(self):
pass
| Foo |
python | ansible__ansible | test/units/module_utils/facts/test_ansible_collector.py | {
"start": 17099,
"end": 17390
} | class ____(TestCollectedFacts):
expected_facts = []
min_fact_count = 0
def _collectors(self, module,
all_collector_classes=None,
minimal_gather_subset=None):
return [NoneReturningCollector(namespace='ansible')]
| TestOnlyNoneCollector |
python | django-debug-toolbar__django-debug-toolbar | tests/panels/test_custom.py | {
"start": 322,
"end": 1532
} | class ____(IntegrationTestCase):
def test_escapes_panel_title(self):
response = self.client.get("/regular/basic/")
self.assertContains(
response,
"""
<li id="djdt-CustomPanel" class="djDebugPanelButton">
<input type="checkbox" checked title="Disable for next and successive requests" data-cookie="djdtCustomPanel">
<a class="CustomPanel" href="#" title="Title with special chars &"'<>">
Title with special chars &"'<>
</a>
</li>
""",
html=True,
)
self.assertContains(
response,
"""
<div id="CustomPanel" class="djdt-panelContent djdt-hidden">
<div class="djDebugPanelTitle">
<h3>Title with special chars &"'<></h3>
<button type="button" class="djDebugClose">×</button>
</div>
<div class="djDebugPanelContent">
<div class="djdt-loader"></div>
<div class="djdt-scroll"></div>
</div>
</div>
""",
html=True,
)
| CustomPanelTestCase |
python | bokeh__bokeh | tests/unit/bokeh/embed/test_util__embed.py | {
"start": 2227,
"end": 2570
} | class ____:
def __init__(self) -> None:
self.last_name = None
self.last_old = None
self.last_new = None
def __call__(self, event):
self.method(event)
def method(self, event):
self.event = event
def partially_good(self, arg, event):
pass
# Taken from test_model
| _GoodEventCallback |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-pinecone/destination_pinecone/destination.py | {
"start": 891,
"end": 3746
} | class ____(Destination):
indexer: Indexer
embedder: Embedder
def _init_indexer(self, config: ConfigModel):
try:
self.embedder = create_from_config(config.embedding, config.processing)
self.indexer = PineconeIndexer(config.indexing, self.embedder.embedding_dimensions)
except Exception as e:
return AirbyteConnectionStatus(status=Status.FAILED, message=str(e))
def write(
self, config: Mapping[str, Any], configured_catalog: ConfiguredAirbyteCatalog, input_messages: Iterable[AirbyteMessage]
) -> Iterable[AirbyteMessage]:
try:
config_model = ConfigModel.parse_obj(config)
self._init_indexer(config_model)
writer = Writer(
config_model.processing, self.indexer, self.embedder, batch_size=BATCH_SIZE, omit_raw_text=config_model.omit_raw_text
)
yield from writer.write(configured_catalog, input_messages)
except Exception as e:
log_message = AirbyteLogMessage(level=Level.ERROR, message=str(e))
yield AirbyteMessage(type="LOG", message=log_message)
def check(self, logger: logging.Logger, config: Mapping[str, Any]) -> AirbyteConnectionStatus:
try:
parsed_config = ConfigModel.parse_obj(config)
init_status = self._init_indexer(parsed_config)
if init_status and init_status.status == Status.FAILED:
logger.error(f"Initialization failed with message: {init_status.message}")
return init_status # Return the failure status immediately if initialization fails
checks = [self.embedder.check(), self.indexer.check(), DocumentProcessor.check_config(parsed_config.processing)]
errors = [error for error in checks if error is not None]
if len(errors) > 0:
error_message = "\n".join(errors)
logger.error(f"Configuration check failed: {error_message}")
return AirbyteConnectionStatus(status=Status.FAILED, message=error_message)
else:
return AirbyteConnectionStatus(status=Status.SUCCEEDED)
except Exception as e:
logger.error(f"Exception during configuration check: {str(e)}")
return AirbyteConnectionStatus(status=Status.FAILED, message=str(e))
def spec(self, *args: Any, **kwargs: Any) -> ConnectorSpecification:
return ConnectorSpecification(
documentationUrl="https://docs.airbyte.com/integrations/destinations/pinecone",
supportsIncremental=True,
supported_destination_sync_modes=[DestinationSyncMode.overwrite, DestinationSyncMode.append, DestinationSyncMode.append_dedup],
connectionSpecification=ConfigModel.schema(), # type: ignore[attr-defined]
)
| DestinationPinecone |
python | tiangolo__fastapi | docs_src/custom_request_and_route/tutorial001.py | {
"start": 139,
"end": 451
} | class ____(Request):
async def body(self) -> bytes:
if not hasattr(self, "_body"):
body = await super().body()
if "gzip" in self.headers.getlist("Content-Encoding"):
body = gzip.decompress(body)
self._body = body
return self._body
| GzipRequest |
python | tensorflow__tensorflow | tensorflow/python/keras/layers/core.py | {
"start": 26394,
"end": 39135
} | class ____(Layer):
"""Wraps arbitrary expressions as a `Layer` object.
The `Lambda` layer exists so that arbitrary expressions can be used
as a `Layer` when constructing `Sequential`
and Functional API models. `Lambda` layers are best suited for simple
operations or quick experimentation. For more advanced use cases, follow
[this guide](https://www.tensorflow.org/guide/keras/custom_layers_and_models)
for subclassing `tf.keras.layers.Layer`.
WARNING: `tf.keras.layers.Lambda` layers have (de)serialization limitations!
The main reason to subclass `tf.keras.layers.Layer` instead of using a
`Lambda` layer is saving and inspecting a Model. `Lambda` layers
are saved by serializing the Python bytecode, which is fundamentally
non-portable. They should only be loaded in the same environment where
they were saved. Subclassed layers can be saved in a more portable way
by overriding their `get_config` method. Models that rely on
subclassed Layers are also often easier to visualize and reason about.
Examples:
```python
# add a x -> x^2 layer
model.add(Lambda(lambda x: x ** 2))
```
```python
# add a layer that returns the concatenation
# of the positive part of the input and
# the opposite of the negative part
def antirectifier(x):
x -= K.mean(x, axis=1, keepdims=True)
x = K.l2_normalize(x, axis=1)
pos = K.relu(x)
neg = K.relu(-x)
return K.concatenate([pos, neg], axis=1)
model.add(Lambda(antirectifier))
```
Variables:
While it is possible to use Variables with Lambda layers, this practice is
discouraged as it can easily lead to bugs. For instance, consider the
following layer:
```python
scale = tf.Variable(1.)
scale_layer = tf.keras.layers.Lambda(lambda x: x * scale)
```
Because scale_layer does not directly track the `scale` variable, it will
not appear in `scale_layer.trainable_weights` and will therefore not be
trained if `scale_layer` is used in a Model.
A better pattern is to write a subclassed Layer:
```python
class ScaleLayer(tf.keras.layers.Layer):
def __init__(self):
super(ScaleLayer, self).__init__()
self.scale = tf.Variable(1.)
def call(self, inputs):
return inputs * self.scale
```
In general, Lambda layers can be convenient for simple stateless
computation, but anything more complex should use a subclass Layer instead.
Args:
function: The function to be evaluated. Takes input tensor as first
argument.
output_shape: Expected output shape from function. This argument can be
inferred if not explicitly provided. Can be a tuple or function. If a
tuple, it only specifies the first dimension onward;
sample dimension is assumed either the same as the input: `output_shape =
(input_shape[0], ) + output_shape` or, the input is `None` and
the sample dimension is also `None`: `output_shape = (None, ) +
output_shape` If a function, it specifies the entire shape as a function
of the
input shape: `output_shape = f(input_shape)`
mask: Either None (indicating no masking) or a callable with the same
signature as the `compute_mask` layer method, or a tensor that will be
returned as output mask regardless of what the input is.
arguments: Optional dictionary of keyword arguments to be passed to the
function.
Input shape:
Arbitrary. Use the keyword argument input_shape (tuple of
integers, does not include the samples axis) when using this layer as the
first layer in a model.
Output shape:
Specified by `output_shape` argument
"""
@trackable.no_automatic_dependency_tracking
def __init__(self, function, output_shape=None, mask=None, arguments=None,
**kwargs):
super(Lambda, self).__init__(**kwargs)
self.arguments = arguments or {}
self.function = function
if mask is not None:
self.supports_masking = True
self.mask = mask
self._output_shape = output_shape
# Warning on every invocation will be quite irksome in Eager mode.
self._already_warned = False
function_args = tf_inspect.getfullargspec(function).args
self._fn_expects_training_arg = 'training' in function_args
self._fn_expects_mask_arg = 'mask' in function_args
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
if self._output_shape is None:
# Make use of existing autocomputation but provide Lambda-specific
# error message. This is always safe to run even when the outer context
# is Graph mode because Lambda layers don't have side effects such as
# `add_loss`.
with context.eager_mode():
try:
return super(Lambda, self).compute_output_shape(input_shape)
except NotImplementedError:
raise NotImplementedError(
'We could not automatically infer the shape of the Lambda\'s '
'output. Please specify `output_shape` for this Lambda.')
if callable(self._output_shape):
output_shapes = self._output_shape(input_shape)
return tf_utils.convert_shapes(output_shapes, to_tuples=False)
# Output shapes are passed directly and don't include batch dimension.
input_tensor_shape = tf_utils.convert_shapes(input_shape, to_tuples=False)
batch_size = nest.flatten(input_tensor_shape)[0][0] if input_shape else None
def _add_batch(shape):
return tensor_shape.TensorShape([batch_size] + shape.as_list())
output_shapes = tf_utils.convert_shapes(self._output_shape, to_tuples=False)
return nest.map_structure(_add_batch, output_shapes)
def call(self, inputs, mask=None, training=None):
# We must copy for thread safety, but it only needs to be a shallow copy.
kwargs = {k: v for k, v in self.arguments.items()}
if self._fn_expects_mask_arg:
kwargs['mask'] = mask
if self._fn_expects_training_arg:
kwargs['training'] = training
created_variables = []
def _variable_creator(next_creator, **kwargs):
var = next_creator(**kwargs)
created_variables.append(var)
return var
with backprop.GradientTape(watch_accessed_variables=True) as tape,\
variable_scope.variable_creator_scope(_variable_creator):
result = self.function(inputs, **kwargs)
self._check_variables(created_variables, tape.watched_variables())
return result
def _check_variables(self, created_variables, accessed_variables):
if not created_variables and not accessed_variables:
# In the common case that a Lambda layer does not touch a Variable, we
# don't want to incur the runtime cost of assembling any state used for
# checking only to immediately discard it.
return
tracked_weights = set(v.ref() for v in self.weights)
untracked_new_vars = [
v for v in created_variables if v.ref() not in tracked_weights
]
if untracked_new_vars:
variable_str = '\n'.join(' {}'.format(i) for i in untracked_new_vars)
error_str = textwrap.dedent("""
The following Variables were created within a Lambda layer ({name})
but are not tracked by said layer:
{variable_str}
The layer cannot safely ensure proper Variable reuse across multiple
calls, and consequently this behavior is disallowed for safety. Lambda
layers are not well suited to stateful computation; instead, writing a
subclassed Layer is the recommend way to define layers with
Variables.""").format(name=self.name, variable_str=variable_str)
raise ValueError(error_str)
untracked_used_vars = [
v for v in accessed_variables if v.ref() not in tracked_weights
]
if untracked_used_vars and not self._already_warned:
variable_str = '\n'.join(' {}'.format(i) for i in untracked_used_vars)
self._warn(textwrap.dedent(
'''
The following Variables were used a Lambda layer's call ({name}), but
are not present in its tracked objects:
{variable_str}
It is possible that this is intended behavior, but it is more likely
an omission. This is a strong indication that this layer should be
formulated as a subclassed Layer rather than a Lambda layer.'''
).format(name=self.name, variable_str=variable_str))
self._already_warned = True
def _warn(self, msg):
# This method will be overridden in a unit test to raise an error, because
# self.assertWarns is not universally implemented.
return tf_logging.warning(msg)
def compute_mask(self, inputs, mask=None):
if callable(self.mask):
return self.mask(inputs, mask)
return self.mask
def get_config(self):
function_config = self._serialize_function_to_config(self.function)
output_shape_config = self._serialize_function_to_config(self._output_shape,
allow_raw=True)
config = {
'function': function_config[0],
'function_type': function_config[1],
'module': function_config[2],
'output_shape': output_shape_config[0],
'output_shape_type': output_shape_config[1],
'output_shape_module': output_shape_config[2],
}
if self.mask is not None:
mask_config = self._serialize_function_to_config(self.mask)
config.update({
'mask': mask_config[0],
'mask_type': mask_config[1],
'mask_module': mask_config[2]
})
config['arguments'] = self.arguments
base_config = super(Lambda, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def _serialize_function_to_config(self, inputs, allow_raw=False):
if isinstance(inputs, python_types.LambdaType):
output = generic_utils.func_dump(inputs)
output_type = 'lambda'
module = inputs.__module__
elif callable(inputs):
output = inputs.__name__
output_type = 'function'
module = inputs.__module__
elif allow_raw:
output = inputs
output_type = 'raw'
module = None
else:
raise ValueError(
'Invalid input for serialization, type: %s ' % type(inputs))
return output, output_type, module
@classmethod
def from_config(cls, config, custom_objects=None):
config = config.copy()
function = cls._parse_function_from_config(
config, custom_objects, 'function', 'module', 'function_type')
output_shape = cls._parse_function_from_config(
config, custom_objects, 'output_shape', 'output_shape_module',
'output_shape_type')
if 'mask' in config:
mask = cls._parse_function_from_config(
config, custom_objects, 'mask', 'mask_module', 'mask_type')
else:
mask = None
config['function'] = function
config['output_shape'] = output_shape
config['mask'] = mask
# If arguments were numpy array, they have been saved as
# list. We need to recover the ndarray
if 'arguments' in config:
for key in config['arguments']:
if isinstance(config['arguments'][key], dict):
arg_dict = config['arguments'][key]
if 'type' in arg_dict and arg_dict['type'] == 'ndarray':
# Overwrite the argument with its numpy translation
config['arguments'][key] = np.array(arg_dict['value'])
return cls(**config)
@classmethod
def _parse_function_from_config(
cls, config, custom_objects, func_attr_name, module_attr_name,
func_type_attr_name):
globs = globals().copy()
module = config.pop(module_attr_name, None)
if module in sys.modules:
globs.update(sys.modules[module].__dict__)
elif module is not None:
# Note: we don't know the name of the function if it's a lambda.
warnings.warn('{} is not loaded, but a Lambda layer uses it. '
'It may cause errors.'.format(module)
, UserWarning)
if custom_objects:
globs.update(custom_objects)
function_type = config.pop(func_type_attr_name)
if function_type == 'function':
# Simple lookup in custom objects
function = generic_utils.deserialize_keras_object(
config[func_attr_name],
custom_objects=custom_objects,
printable_module_name='function in Lambda layer')
elif function_type == 'lambda':
# Unsafe deserialization from bytecode
function = generic_utils.func_load(
config[func_attr_name], globs=globs)
elif function_type == 'raw':
function = config[func_attr_name]
else:
raise TypeError('Unknown function type:', function_type)
return function
| Lambda |
python | tornadoweb__tornado | tornado/test/routing_test.py | {
"start": 3939,
"end": 4997
} | class ____(AsyncHTTPTestCase):
def get_app(self):
router = CustomRouter()
class CustomApplication(Application):
def reverse_url(self, name, *args):
return router.reverse_url(name, *args)
app1 = CustomApplication(app_name="app1")
app2 = CustomApplication(app_name="app2")
router.add_routes(
{
"/first_handler": (app1, FirstHandler),
"/second_handler": (app2, SecondHandler),
"/first_handler_second_app": (app2, FirstHandler),
}
)
return router
def test_custom_router(self):
response = self.fetch("/first_handler")
self.assertEqual(response.body, b"app1: first_handler: /first_handler")
response = self.fetch("/second_handler")
self.assertEqual(response.body, b"app2: second_handler: /second_handler")
response = self.fetch("/first_handler_second_app")
self.assertEqual(response.body, b"app2: first_handler: /first_handler")
| CustomRouterTestCase |
python | encode__django-rest-framework | tests/test_pagination.py | {
"start": 36098,
"end": 38817
} | class ____(CursorPaginationTestsMixin):
"""
Unit tests for `pagination.CursorPagination`.
"""
def setup_method(self):
class MockObject:
def __init__(self, idx):
self.created = idx
class MockQuerySet:
def __init__(self, items):
self.items = items
def filter(self, created__gt=None, created__lt=None):
if created__gt is not None:
return MockQuerySet([
item for item in self.items
if item.created > int(created__gt)
])
assert created__lt is not None
return MockQuerySet([
item for item in self.items
if item.created < int(created__lt)
])
def order_by(self, *ordering):
if ordering[0].startswith('-'):
return MockQuerySet(list(reversed(self.items)))
return self
def __getitem__(self, sliced):
return self.items[sliced]
class ExamplePagination(pagination.CursorPagination):
page_size = 5
page_size_query_param = 'page_size'
max_page_size = 20
ordering = 'created'
self.pagination = ExamplePagination()
self.queryset = MockQuerySet([
MockObject(idx) for idx in [
1, 1, 1, 1, 1,
1, 2, 3, 4, 4,
4, 4, 5, 6, 7,
7, 7, 7, 7, 7,
7, 7, 7, 8, 9,
9, 9, 9, 9, 9
]
])
def get_pages(self, url):
"""
Given a URL return a tuple of:
(previous page, current page, next page, previous url, next url)
"""
request = Request(factory.get(url))
queryset = self.pagination.paginate_queryset(self.queryset, request)
current = [item.created for item in queryset]
next_url = self.pagination.get_next_link()
previous_url = self.pagination.get_previous_link()
if next_url is not None:
request = Request(factory.get(next_url))
queryset = self.pagination.paginate_queryset(self.queryset, request)
next = [item.created for item in queryset]
else:
next = None
if previous_url is not None:
request = Request(factory.get(previous_url))
queryset = self.pagination.paginate_queryset(self.queryset, request)
previous = [item.created for item in queryset]
else:
previous = None
return (previous, current, next, previous_url, next_url)
| TestCursorPagination |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.