language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pypa__warehouse | warehouse/organizations/models.py | {
"start": 9240,
"end": 9752
} | class ____:
def __init__(self, request):
self.request = request
def __getitem__(self, organization_application_id):
# Try returning organization application with matching id.
try:
return (
self.request.db.query(OrganizationApplication)
.filter(OrganizationApplication.id == organization_application_id)
.one()
)
except NoResultFound:
raise KeyError from None
| OrganizationApplicationFactory |
python | pytorch__pytorch | test/jit/test_save_load.py | {
"start": 26001,
"end": 40232
} | class ____(JitTestCase):
def test_different_modules(self):
"""
Exercise the situation where we have the same qualified name
in two different CompilationUnits on save/load.
"""
class Foo(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.foo = torch.nn.Linear(2, 2)
self.bar = torch.nn.Linear(2, 2)
def forward(self, x):
x = self.foo(x)
x = self.bar(x)
return x
first_script_module = torch.jit.script(Foo())
first_saved_module = script_module_to_buffer(first_script_module)
clear_class_registry()
class Foo(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.foo = torch.nn.Linear(2, 2)
def forward(self, x):
x = self.foo(x)
return x
second_script_module = torch.jit.script(Foo())
second_saved_module = script_module_to_buffer(second_script_module)
clear_class_registry()
self.assertEqual(
first_script_module._c.qualified_name,
second_script_module._c.qualified_name,
)
class ContainsBoth(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.add_module("second", torch.jit.load(second_saved_module))
self.add_module("first", torch.jit.load(first_saved_module))
def forward(self, x):
x = self.first(x)
x = self.second(x)
return x
sm = torch.jit.script(ContainsBoth())
contains_both = script_module_to_buffer(sm)
sm = torch.jit.load(contains_both)
def test_different_functions(self):
"""
Exercise the situation where we have the same qualified name
in two different CompilationUnits on save/load.
"""
def lol(x):
return x
class Foo(torch.nn.Module):
def forward(self, x):
return lol(x)
first_script_module = torch.jit.script(Foo())
first_saved_module = script_module_to_buffer(first_script_module)
clear_class_registry()
def lol(x): # noqa: F811
return "hello"
class Foo(torch.nn.Module):
def forward(self, x):
return lol(x)
second_script_module = torch.jit.script(Foo())
second_saved_module = script_module_to_buffer(second_script_module)
clear_class_registry()
self.assertEqual(
first_script_module._c.qualified_name,
second_script_module._c.qualified_name,
)
class ContainsBoth(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.add_module("second", torch.jit.load(second_saved_module))
self.add_module("first", torch.jit.load(first_saved_module))
def forward(self, x):
x = self.first(x)
x = self.second(x)
return x
sm = torch.jit.script(ContainsBoth())
contains_both = script_module_to_buffer(sm)
sm = torch.jit.load(contains_both)
def test_different_interfaces(self):
"""
Exercise the situation where we have the same qualified name
in two different CompilationUnits on save/load.
"""
@torch.jit.interface
class MyInterface:
def bar(self, x: Tensor) -> Tensor:
pass
@torch.jit.script
class ImplementInterface:
def __init__(self) -> None:
pass
def bar(self, x):
return x
class Foo(torch.nn.Module):
__annotations__ = {"interface": MyInterface}
def __init__(self) -> None:
super().__init__()
self.interface = ImplementInterface()
def forward(self, x):
return self.interface.bar(x)
first_script_module = torch.jit.script(Foo())
first_saved_module = script_module_to_buffer(first_script_module)
clear_class_registry()
@torch.jit.interface
class MyInterface:
def not_bar(self, x: Tensor) -> Tensor:
pass
@torch.jit.script # noqa: F811
class ImplementInterface: # noqa: F811
def __init__(self) -> None:
pass
def not_bar(self, x):
return x
class Foo(torch.nn.Module):
__annotations__ = {"interface": MyInterface}
def __init__(self) -> None:
super().__init__()
self.interface = ImplementInterface()
def forward(self, x):
return self.interface.not_bar(x)
second_script_module = torch.jit.script(Foo())
second_saved_module = script_module_to_buffer(second_script_module)
clear_class_registry()
self.assertEqual(
first_script_module._c.qualified_name,
second_script_module._c.qualified_name,
)
class ContainsBoth(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.add_module("second", torch.jit.load(second_saved_module))
self.add_module("first", torch.jit.load(first_saved_module))
def forward(self, x):
x = self.first(x)
x = self.second(x)
return x
sm = torch.jit.script(ContainsBoth())
contains_both = script_module_to_buffer(sm)
sm = torch.jit.load(contains_both)
def test_many_collisions(self):
class MyCoolNamedTuple(NamedTuple):
a: int
@torch.jit.interface
class MyInterface:
def bar(self, x: Tensor) -> Tensor:
pass
@torch.jit.script
class ImplementInterface:
def __init__(self) -> None:
pass
def bar(self, x):
return x
def lol(x):
return x
class Foo(torch.nn.Module):
interface: MyInterface
def __init__(self) -> None:
super().__init__()
self.foo = torch.nn.Linear(2, 2)
self.bar = torch.nn.Linear(2, 2)
self.interface = ImplementInterface()
def forward(self, x):
x = self.foo(x)
x = self.bar(x)
x = lol(x)
x = self.interface.bar(x)
return x, MyCoolNamedTuple(a=5)
first_script_module = torch.jit.script(Foo())
first_saved_module = script_module_to_buffer(first_script_module)
clear_class_registry()
@torch.jit.interface
class MyInterface:
def not_bar(self, x: Tensor) -> Tensor:
pass
@torch.jit.script # noqa: F811
class ImplementInterface: # noqa: F811
def __init__(self) -> None:
pass
def not_bar(self, x):
return x
def lol(x): # noqa: F811
return "asdofij"
class MyCoolNamedTuple(NamedTuple): # noqa: F811
a: str
class Foo(torch.nn.Module):
interface: MyInterface
def __init__(self) -> None:
super().__init__()
self.foo = torch.nn.Linear(2, 2)
self.interface = ImplementInterface()
def forward(self, x):
x = self.foo(x)
self.interface.not_bar(x)
x = lol(x)
return x, MyCoolNamedTuple(a="hello")
second_script_module = torch.jit.script(Foo())
second_saved_module = script_module_to_buffer(second_script_module)
clear_class_registry()
self.assertEqual(
first_script_module._c.qualified_name,
second_script_module._c.qualified_name,
)
class ContainsBoth(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.add_module("second", torch.jit.load(second_saved_module))
self.add_module("first", torch.jit.load(first_saved_module))
def forward(self, x):
x, named_tuple_1 = self.first(x)
x, named_tuple_2 = self.second(x)
return len(x + named_tuple_2.a) + named_tuple_1.a
sm = torch.jit.script(ContainsBoth())
contains_both = script_module_to_buffer(sm)
sm = torch.jit.load(contains_both)
def test_save_load_using_pathlib(self):
class MyMod(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, a):
return 2 * a
m = MyMod()
# Save then load.
with TemporaryFileName() as fname:
path = Path(fname)
torch.jit.save_jit_module_to_flatbuffer(m, path)
m2 = torch.jit.load(path)
x = torch.tensor([1.0, 2.0, 3.0, 4.0])
self.assertTrue(torch.equal(m(x), m2(x)))
def test_save_namedtuple_input_only(self):
"""
Even if a NamedTuple is only used as an input argument, saving and
loading should work correctly.
"""
global FooTuple # see [local resolution in python]
class FooTuple(NamedTuple):
a: int
class MyModule(torch.nn.Module):
def forward(self, x: FooTuple) -> torch.Tensor:
return torch.tensor(3)
m_loaded = self.getExportImportCopy(torch.jit.script(MyModule()))
output = m_loaded(FooTuple(a=5))
self.assertEqual(output, torch.tensor(3))
def test_save_namedtuple_output_only(self):
"""
Even if a NamedTuple is only used as an output argument, saving and
loading should work correctly.
"""
global FooTuple # see [local resolution in python]
class FooTuple(NamedTuple):
a: int
class MyModule(torch.nn.Module):
def forward(self) -> Optional[FooTuple]:
return None
m_loaded = self.getExportImportCopy(torch.jit.script(MyModule()))
output = m_loaded()
self.assertEqual(output, None)
def test_module_info_flatbuffer(self):
class Foo(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.foo = torch.nn.Linear(2, 2)
self.bar = torch.nn.Linear(2, 2)
def forward(self, x):
x = self.foo(x)
x = self.bar(x)
return x
first_script_module = torch.jit.script(Foo())
first_saved_module = io.BytesIO()
torch.jit.save_jit_module_to_flatbuffer(first_script_module, first_saved_module)
first_saved_module.seek(0)
ff_info = torch.jit._serialization.get_flatbuffer_module_info(
first_saved_module
)
self.assertEqual(ff_info["bytecode_version"], 9)
self.assertEqual(ff_info["operator_version"], 1)
self.assertEqual(ff_info["type_names"], set())
self.assertEqual(ff_info["opname_to_num_args"], {"aten::linear": 3})
self.assertEqual(len(ff_info["function_names"]), 1)
self.assertTrue(next(iter(ff_info["function_names"])).endswith("forward"))
def test_save_load_params_buffers_submodules(self):
"""
Check that parameters, buffers, and submodules are the same after loading.
"""
class Submodule(torch.nn.Module):
pass
class TestModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.add_module("submodule_a", Submodule())
self.register_parameter(
"parameter_a", torch.nn.Parameter(torch.randn(4))
)
self.buffer = torch.nn.Buffer(torch.randn(4))
self.t = torch.rand(4) # not buffer
self.parameter_b = torch.nn.Parameter(torch.randn(4))
self.submodule_b = Submodule()
self.buffer_b = torch.nn.Buffer(torch.randn(4))
m = TestModule()
m_loaded = self.getExportImportCopy(torch.jit.script(m))
# Check submodules.
self.assertEqual(
len(list(m.named_modules())), len(list(m_loaded.named_modules()))
)
for m_s, loaded_s in zip(m.named_modules(), m_loaded.named_modules()):
m_name, _ = m_s
loaded_name, _ = loaded_s
self.assertEqual(m_name, loaded_name)
# Check parameters.
self.assertEqual(len(list(m.parameters())), len(list(m_loaded.parameters())))
for m_p, loaded_p in zip(m.parameters(), m_loaded.parameters()):
self.assertEqual(m_p, loaded_p)
# Check buffers.
self.assertEqual(
len(list(m.named_buffers())), len(list(m_loaded.named_buffers()))
)
for m_b, loaded_b in zip(m.named_buffers(), m_loaded.named_buffers()):
m_name, m_buffer = m_b
loaded_name, loaded_buffer = loaded_b
self.assertEqual(m_name, loaded_name)
self.assertEqual(m_buffer, loaded_buffer)
def test_save_load_with_extra_files(self):
"""
Check that parameters, buffers, and submodules are the same after loading.
"""
class Module(torch.nn.Module):
def forward(self, x: Tensor):
return x
module = Module()
script_module = torch.jit.script(module)
extra_files = {"abc.json": b"[1,2,3]"}
script_module_io = script_module._save_to_buffer_for_lite_interpreter(
_extra_files=extra_files, _use_flatbuffer=True
)
re_extra_files = {}
torch._C._get_model_extra_files_from_buffer(script_module_io, re_extra_files)
self.assertEqual(extra_files, re_extra_files)
if __name__ == "__main__":
raise_on_run_directly("test/test_jit.py")
| TestSaveLoadFlatbuffer |
python | apache__airflow | task-sdk/src/airflow/sdk/execution_time/comms.py | {
"start": 18895,
"end": 19547
} | class ____(PrevSuccessfulDagRunResponse):
type: Literal["PrevSuccessfulDagRunResult"] = "PrevSuccessfulDagRunResult"
@classmethod
def from_dagrun_response(cls, prev_dag_run: PrevSuccessfulDagRunResponse) -> PrevSuccessfulDagRunResult:
"""
Get a result object from response object.
PrevSuccessfulDagRunResponse is autogenerated from the API schema, so we need to convert it to
PrevSuccessfulDagRunResult for communication between the Supervisor and the task process.
"""
return cls(**prev_dag_run.model_dump(exclude_defaults=True), type="PrevSuccessfulDagRunResult")
| PrevSuccessfulDagRunResult |
python | walkccc__LeetCode | solutions/2487. Remove Nodes From Linked List/2487.py | {
"start": 0,
"end": 235
} | class ____:
def removeNodes(self, head: ListNode | None) -> ListNode | None:
if not head:
return None
head.next = self.removeNodes(head.next)
return head.next if head.next and head.val < head.next.val else head
| Solution |
python | ansible__ansible | lib/ansible/module_utils/facts/hardware/hurd.py | {
"start": 890,
"end": 1599
} | class ____(LinuxHardware):
"""
GNU Hurd specific subclass of Hardware. Define memory and mount facts
based on procfs compatibility translator mimicking the interface of
the Linux kernel.
"""
platform = 'GNU'
def populate(self, collected_facts=None):
hardware_facts = {}
uptime_facts = self.get_uptime_facts()
memory_facts = self.get_memory_facts()
mount_facts = {}
try:
mount_facts = self.get_mount_facts()
except TimeoutError:
pass
hardware_facts.update(uptime_facts)
hardware_facts.update(memory_facts)
hardware_facts.update(mount_facts)
return hardware_facts
| HurdHardware |
python | kamyu104__LeetCode-Solutions | Python/1-bit-and-2-bit-characters.py | {
"start": 30,
"end": 344
} | class ____(object):
def isOneBitCharacter(self, bits):
"""
:type bits: List[int]
:rtype: bool
"""
parity = 0
for i in reversed(xrange(len(bits)-1)):
if bits[i] == 0:
break
parity ^= bits[i]
return parity == 0
| Solution |
python | pytorch__pytorch | test/test_foreach.py | {
"start": 1188,
"end": 2072
} | class ____:
def __init__(self, func):
self.func = func
def __call__(self, inputs, scalars=None, **kwargs):
if scalars is not None:
assert len(inputs) == 3
# We need to distribute each scalar to the regular func and it needs
# special consideration as it is a keyword only argument to the
# regular func. (Strangely, it is not a keyword only argument to the
# foreach func)
return [
self.func(*i, value=scalars[idx], **kwargs)
for idx, i in enumerate(zip(*inputs))
]
if len(inputs) == 2 and isinstance(inputs[1], (Number, torch.Tensor)):
# binary op with tensorlist and scalar.
inputs[1] = [inputs[1] for _ in range(len(inputs[0]))]
return [self.func(*i, **kwargs) for i in zip(*inputs)]
| RegularFuncWrapper |
python | google__jax | jax/_src/pallas/mosaic/lowering.py | {
"start": 42148,
"end": 57774
} | class ____(Exception):
pass
def _compute_name_stack_updates(
old_name_stack: list[str],
new_name_stack: list[str]
) -> tuple[list[str], list[str]]:
"""Computes the popped/pushed items to the name stack after an update.
Args:
old_name_stack: The name stack prior to the update.
new_name_stack: The name stack after the update.
Returns:
popped: A list of names popped from the name stack as part of the update.
pushed: A list of names pushed to the name stack as part of the update.
"""
common_prefix_idx = 0
for i, (old, new) in enumerate(unsafe_zip(old_name_stack, new_name_stack)):
if old == new:
common_prefix_idx = i+1
else:
break
return old_name_stack[common_prefix_idx:], new_name_stack[common_prefix_idx:]
def jaxpr_subcomp(
ctx: LoweringContext, jaxpr: jax_core.Jaxpr, *args: ir.Value
) -> list[ir.Value]:
assert not jaxpr.constvars
env = {}
block_shape_env = {}
def read_block_shape(atom: jax_core.Atom):
if isinstance(atom, jax_core.Literal):
return None
return block_shape_env.get(atom, None)
def read_env(atom: jax_core.Atom):
return atom.val if isinstance(atom, jax_core.Literal) else env[atom]
def write_env(var: jax_core.Var, val):
is_valid_type = isinstance(val, (ir.Value, KeyScalarBundle))
assert is_valid_type, type(val)
env[var] = val
for invar, bs in zip(jaxpr.invars, ctx.block_shapes):
block_shape_env[invar] = bs
foreach(write_env, jaxpr.invars, args)
initial_name_stack = [scope.name for scope in ctx.name_stack.stack]
current_name_stack: list[str] = []
# TODO(justinfu): Handle transform scopes.
current_name_stack.extend(initial_name_stack)
for eqn in jaxpr.eqns:
invals = map(read_env, eqn.invars)
eqn_name_stack = ctx.name_stack + eqn.source_info.name_stack
loc = mlir.source_info_to_location( # pytype: disable=wrong-arg-types
ctx, eqn.primitive, eqn_name_stack, eqn.source_info.traceback
)
with (source_info_util.user_context(eqn.source_info.traceback), loc,
eqn.ctx.manager):
if eqn.primitive in lowering_rules[ctx.kernel_type]:
if (eqn.primitive, ctx.kernel_type) not in skip_mlir_conversions:
invals = [
_ensure_mlir_value(x, cast(ShapedAbstractValue, v.aval))
for x, v in zip(invals, eqn.invars)
]
block_shapes = map(read_block_shape, eqn.invars)
rule_context = LoweringRuleContext(
ctx,
cast(Sequence[ShapedAbstractValue], [v.aval for v in eqn.invars]),
cast(Sequence[ShapedAbstractValue], [v.aval for v in eqn.outvars]),
block_shapes,
)
# Insert trace_start and trace_stop ops on named_scope boundaries.
name_stack = [scope.name for scope in eqn_name_stack.stack]
popped, pushed = _compute_name_stack_updates(
current_name_stack, name_stack)
current_name_stack = name_stack
for _ in popped:
tpu.trace_stop()
for name in pushed:
tpu.trace_start(message=name, level=10)
try:
ans = lowering_rules[ctx.kernel_type][eqn.primitive](
rule_context, *invals, **eqn.params
)
except LoweringException:
raise # We only add the extra info to the innermost exception.
except Exception as e:
if not config.jax_pallas_verbose_errors.value:
raise
msg = (f"{type(e).__name__}: {e}\n" +
"Additional diagnostics: \n" +
f"Failing jaxpr equation: {eqn}\n")
new_error = LoweringException(msg)
# We insert the traceback here so that the user code shows
# up in the traceback for the post-transform error.
if eqn.source_info.traceback is not None:
tb = eqn.source_info.traceback.as_python_traceback()
new_error.__traceback__ = traceback_util.filter_traceback(tb)
raise new_error from e
else:
raise NotImplementedError(
"Unimplemented primitive in Pallas TPU lowering for"
f" {ctx.kernel_type}: {eqn.primitive.name}. Please file an issue on"
" https://github.com/jax-ml/jax/issues."
)
if eqn.primitive.multiple_results:
foreach(write_env, eqn.outvars, ans)
else:
write_env(eqn.outvars[0], ans)
# Drain the name stack at the end of a jaxpr and insert trace_stop ops.
popped, pushed = _compute_name_stack_updates(
current_name_stack, initial_name_stack)
for _ in popped:
tpu.trace_stop()
assert len(pushed) == 0
outvals = map(read_env, jaxpr.outvars)
outvals = [
ir_constant(x) if isinstance(var, jax_core.Literal) else x
for x, var in zip(outvals, jaxpr.outvars)
]
return outvals
def _ensure_mlir_value(val: object, aval: ShapedAbstractValue) -> Any:
if isinstance(val, ir.Value):
return val
if isinstance(val, KeyScalarBundle):
# TODO(slebedev): Drop this branch and change the return type to ir.Value.
return val
elif isinstance(val, (np.generic, np.ndarray, int, float,
literals.TypedNdArray)):
return ir_constant(val, _dtype_to_ir_type(aval.dtype))
else:
raise RuntimeError(
f"Unsupported argument to a JAX primitive of type: {type(val)}"
)
@register_lowering_rule(state_primitives.get_p, ensure_mlir_values=False)
def _get_lowering_rule(
ctx: LoweringRuleContext, ref, *idx, tree,
):
indexers = tree_util.tree_unflatten(tree, idx)
indexers_avals = tree_util.tree_unflatten(tree, ctx.avals_in[1:])
# Call _load_lowering_rule (since it's more general)
ref_aval, *_ = ctx.avals_in
args_flat, args_tree = tree_util.tree_flatten((ref, indexers, None, None))
avals_flat = tree_util.tree_leaves((ref_aval, indexers_avals, None, None))
ctx = ctx.replace(
avals_in=avals_flat,
block_shapes=[ctx.block_shapes[0], *[None] * (len(avals_flat) - 1)],
)
return _load_lowering_rule(ctx, *args_flat, args_tree=args_tree)
@register_lowering_rule(state_primitives.swap_p, ensure_mlir_values=False)
def _swap_lowering_rule(
ctx: LoweringRuleContext,
ref,
val,
*idx,
tree
):
indexers = tree_util.tree_unflatten(tree, idx)
indexers_avals = tree_util.tree_unflatten(tree, ctx.avals_in[2:])
# Call _masked_swap_lowering_rule (since it's more general)
ref_aval, val_aval, *_ = ctx.avals_in
args_flat, args_tree = tree_util.tree_flatten((ref, indexers, val, None))
avals_flat = tree_util.tree_leaves(
(ref_aval, indexers_avals, val_aval, None)
)
ctx = ctx.replace(
avals_in=avals_flat,
block_shapes=[ctx.block_shapes[0], *[None] * (len(avals_flat) - 1)],
)
return _masked_swap_lowering_rule(ctx, *args_flat, args_tree=args_tree)
def _make_index(s):
if isinstance(s, (int, np.ndarray, literals.TypedNdArray)):
return ir_constant(s, ir.IndexType.get())
if s.type == ir.IndexType.get():
return s
return arith.index_cast(ir.IndexType.get(), s)
def _maybe_cast_to_index(cast_to_index, x):
if cast_to_index:
return _make_index(x)
return _ensure_mlir_value(x, aval=pallas_core.index_map_grid_aval)
def _index_to_start_size_stride(
idx: tuple[indexing.Slice | int | ir.Value, ...], cast_to_index: bool
) -> tuple[ir.Value, int | ir.Value, int, bool]:
assert not isinstance(idx, slice)
if isinstance(idx, indexing.Slice):
start = _maybe_cast_to_index(cast_to_index, idx.start)
size = idx.size
stride = idx.stride
squeeze = False
elif isinstance(idx, int):
start = _maybe_cast_to_index(cast_to_index, idx)
size = 1
stride = 1
squeeze = True
else:
if np.shape(idx):
raise ValueError(f"Can only use ()-shaped and slice indexing: {idx}")
start = _maybe_cast_to_index(cast_to_index, idx)
size = 1
stride = 1
squeeze = True
return start, size, stride, squeeze
def _indexer_to_start_size_stride(
indexer: NDIndexer,
ref_block_shape: tuple[int | pallas_core.Squeezed, ...],
*,
cast_to_index: bool,
) -> tuple[
tuple[ir.Value, ...],
tuple[int | ir.Value, ...],
tuple[int, ...],
tuple[bool, ...],
tuple[int | pallas_core.Squeezed, ...],
]:
indices_iter = iter(indexer.indices)
starts, sizes, strides, squeeze_dims = [], [], [], []
for s in ref_block_shape:
match s:
case pallas_core.Squeezed():
start = _maybe_cast_to_index(cast_to_index, 0)
size = 1
stride = 1
squeeze_dim = True
case _:
start, size, stride, squeeze_dim = _index_to_start_size_stride(
next(indices_iter), cast_to_index # pytype: disable=wrong-arg-types
)
starts.append(start)
sizes.append(size)
strides.append(stride)
squeeze_dims.append(squeeze_dim)
next_index = next(indices_iter, None)
assert next_index is None, (indexer.indices, ref_block_shape)
new_ref_block_shape = tuple(s for s, squeeze in zip(sizes, squeeze_dims)
if not squeeze)
return (
tuple(starts),
tuple(sizes),
tuple(strides),
tuple(squeeze_dims),
new_ref_block_shape,
)
def _compute_squeezed_dims(source_shape: Sequence[int], target_shape: Sequence[int]) -> Sequence[bool]:
# This function only exists to align the ``tpu.memref_squeeze`` layout
# inference logic between Python and MLIR.
result = []
source_index = len(source_shape) - 1
target_index = len(target_shape) - 1
while source_index >= 0 or target_index >= 0:
target_dim = target_shape[target_index] if target_index >= 0 else -1
assert source_index >= 0
if source_shape[source_index] == target_dim:
result.append(False)
source_index -= 1
target_index -= 1
else:
assert source_shape[source_index] == 1
result.append(True)
source_index -= 1
result.reverse()
return result
def _slice_memref(
ref: ir.Value,
indexer: NDIndexer,
ref_dtype: DTypeLike,
ref_block_shape: tuple[int | pallas_core.Squeezed, ...],
) -> tuple[ir.Value, tuple[int | pallas_core.Squeezed, ...]]:
assert ref_block_shape is not None
starts, sizes, strides, squeeze_dims, ref_block_shape = (
_indexer_to_start_size_stride(
indexer,
ref_block_shape,
cast_to_index=False,
)
)
if not all((s is None or s == 1) for s in strides):
raise NotImplementedError("Strided slices of references are unsupported.")
ir_dynamic_size = ir.ShapedType.get_dynamic_size()
static_starts = []
for s in starts:
if not isinstance(s, ir.Value):
static_starts.append(s)
elif (v := _fold_and_get_constant_value(s)) is not None:
static_starts.append(v)
else:
static_starts.append(ir_dynamic_size)
static_sizes = []
dynamic_sizes = []
for s in sizes:
if not isinstance(s, ir.Value):
static_sizes.append(s)
elif (v := _fold_and_get_constant_value(s)) is not None:
static_sizes.append(v)
else:
static_sizes.append(ir_dynamic_size)
dynamic_sizes.append(s)
ref_ty = ir.MemRefType(ref.type)
ref_strides, ref_offset = ref_ty.get_strides_and_offset()
if ref_offset == ir_dynamic_size or ir_dynamic_size in static_starts:
target_offset = ir_dynamic_size
else:
target_offset = sum(
map(operator.mul, static_starts, ref_strides), ref_offset
)
out_layout = ir.StridedLayoutAttr.get(target_offset, ref_strides)
out_ty = ir.MemRefType.get(
static_sizes, ref_ty.element_type, out_layout, ref_ty.memory_space
)
out = tpu.memref_slice(out_ty, ref, starts, dynamic_sizes)
if any(squeeze_dims):
# We need to squeeze out some dimensions.
ref_ty = out_ty
del out_ty
ref_strides, ref_offset = ref_ty.get_strides_and_offset()
target_sizes = [dim for i, dim in enumerate(ref_ty.shape) if not squeeze_dims[i]]
del squeeze_dims
# We re-infer the squeezed dimensions to align with the tpu.memref_squeeze
# verification logic in MLIR in ambiguous cases, e.g. when squeezing
# from [1, 1, 128] to [1, 128].
squeeze_dims = _compute_squeezed_dims(ref_ty.shape, target_sizes)
target_strides = [s for i, s in enumerate(ref_strides) if not squeeze_dims[i]]
out_layout = ir.StridedLayoutAttr.get(ref_offset, target_strides)
out_ty = ir.MemRefType.get(
target_sizes,
ref_ty.element_type,
out_layout,
ref_ty.memory_space,
)
out = tpu.memref_squeeze(out_ty, out)
return out, ref_block_shape
def _bitcast_memref(
ref: ir.Value,
bitcaster: RefBitcaster,
ref_dtype: DTypeLike,
ref_block_shape: tuple[int | pallas_core.Squeezed, ...],
) -> tuple[ir.Value, DTypeLike, tuple[int | pallas_core.Squeezed, ...]]:
src_bitwidth = dtypes.itemsize_bits(ref_dtype)
dst_bitwidth = dtypes.itemsize_bits(bitcaster.dtype)
if src_bitwidth != dst_bitwidth:
if len(ref_block_shape) < 2:
raise NotImplementedError(
"Bitcast 1D ref with bitwidth change is not supported."
)
if ref_block_shape[-2] is pallas_core.squeezed:
raise NotImplementedError(
"Bitcast a ref whose 2nd minormost dimension is squeezed when"
" bitwidth changes."
)
new_ref_dtype = bitcaster.dtype
ref_ty = ir.MemRefType(ref.type)
target_ref_ty = ir.MemRefType.get(
bitcaster.shape,
_dtype_to_ir_type(new_ref_dtype),
memory_space=ref_ty.memory_space,
)
new_ref_block_shape = list(ref_block_shape)
if (
len(new_ref_block_shape) >= 2
and new_ref_block_shape[-2] is not pallas_core.squeezed
):
new_ref_block_shape[-2] = (
new_ref_block_shape[-2] * src_bitwidth // dst_bitwidth
)
return (
tpu.memref_bitcast(target_ref_ty, ref),
new_ref_dtype,
tuple(new_ref_block_shape),
)
def _reshape_memref(
ref: ir.Value,
reshaper: RefReshaper,
ref_dtype: DTypeLike,
ref_block_shape: tuple[int | pallas_core.Squeezed, ...],
) -> tuple[ir.Value, tuple[int, ...]]:
if ref_dtype != reshaper.dtype:
raise ValueError(
f"Reshape a ref with dtype change: {reshaper.dtype} vs {ref_dtype}"
)
if len(ref_block_shape) < 2:
raise NotImplementedError("Reshape 1D ref is not supported.")
if (
ref_block_shape[-2] is pallas_core.squeezed
or ref_block_shape[-1] is pallas_core.squeezed
):
raise NotImplementedError(
"Reshape a ref with squeezed dimension on last two dimensions."
)
if np.prod(ref_block_shape) != np.prod(reshaper.shape):
raise ValueError(
f"Reshape a ref with different number of elements: {ref_block_shape} "
f"vs {reshaper.shape}"
)
ref_ty = ir.MemRefType(ref.type)
target_ref_ty = ir.MemRefType.get(
reshaper.shape,
_dtype_to_ir_type(reshaper.dtype),
memory_space=ref_ty.memory_space,
)
return (
tpu.memref_reshape(target_ref_ty, ref),
reshaper.shape,
)
def _transform_ref(ref, ref_dtype, ref_block_shape, transforms):
for transform in transforms:
match transform:
case NDIndexer():
ref, ref_block_shape = _slice_memref(
ref, transform, ref_dtype, ref_block_shape
)
case RefBitcaster():
ref, ref_dtype, ref_block_shape = _bitcast_memref(
ref, transform, ref_dtype, ref_block_shape
)
case RefReshaper():
ref, ref_block_shape = _reshape_memref(
ref, transform, ref_dtype, ref_block_shape
)
case _:
raise NotImplementedError(f"Unsupported transform: {transform}")
return ref, ref_block_shape
@dataclasses.dataclass(frozen=True)
| LoweringException |
python | getsentry__sentry | src/sentry/dashboards/endpoints/organization_dashboards_starred.py | {
"start": 911,
"end": 1103
} | class ____(OrganizationPermission):
scope_map = {
"GET": ["member:read", "member:write"],
"PUT": ["member:read", "member:write"],
}
@region_silo_endpoint
| MemberPermission |
python | giampaolo__psutil | scripts/internal/print_dist.py | {
"start": 389,
"end": 2064
} | class ____:
def __init__(self, path):
self._path = path
self._name = os.path.basename(path)
def __repr__(self):
return "<{}(name={}, plat={}, arch={}, pyver={})>".format(
self.__class__.__name__,
self.name,
self.platform(),
self.arch(),
self.pyver(),
)
__str__ = __repr__
@property
def name(self):
return self._name
def platform(self):
plat = self.name.split('-')[-1]
pyimpl = self.name.split('-')[3]
ispypy = 'pypy' in pyimpl
if 'linux' in plat:
if ispypy:
return 'pypy_on_linux'
else:
return 'linux'
elif 'win' in plat:
if ispypy:
return 'pypy_on_windows'
else:
return 'windows'
elif 'macosx' in plat:
if ispypy:
return 'pypy_on_macos'
else:
return 'macos'
else:
raise ValueError(f"unknown platform {self.name!r}")
def arch(self):
if self.name.endswith(('x86_64.whl', 'amd64.whl')):
return '64-bit'
if self.name.endswith(("i686.whl", "win32.whl")):
return '32-bit'
if self.name.endswith("arm64.whl"):
return 'arm64'
if self.name.endswith("aarch64.whl"):
return 'aarch64'
return '?'
def pyver(self):
pyver = 'pypy' if self.name.split('-')[3].startswith('pypy') else 'py'
pyver += self.name.split('-')[2][2:]
return pyver
def size(self):
return os.path.getsize(self._path)
| Wheel |
python | ray-project__ray | python/ray/dag/tests/experimental/test_dag_error_handling.py | {
"start": 23796,
"end": 24490
} | class ____:
def sleep(self, x):
time.sleep(x)
a = Actor.remote()
with InputNode() as inp:
dag = a.sleep.bind(inp)
compiled_dag = dag.experimental_compile()
ref = compiled_dag.execute(100)
print("executing", flush=True)
ray.get(ref)
"""
driver_proc = run_string_as_driver_nonblocking(
driver_script, env={"RAY_CGRAPH_teardown_timeout": "0"}
)
# wait for graph execution to start
assert driver_proc.stdout.readline() == b"executing\n"
driver_proc.send_signal(signal.SIGINT) # ctrl+c
# teardown will kill actors after timeout
wait_for_pid_to_exit(driver_proc.pid, 10)
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| Actor |
python | plotly__plotly.py | plotly/graph_objs/scatter3d/legendgrouptitle/_font.py | {
"start": 233,
"end": 9937
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scatter3d.legendgrouptitle"
_path_str = "scatter3d.legendgrouptitle.font"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Font object
Sets this legend group's title font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scatter3d.lege
ndgrouptitle.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scatter3d.legendgrouptitle.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatter3d.legendgrouptitle.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Font |
python | pytorch__pytorch | test/dynamo/test_torchrec.py | {
"start": 2367,
"end": 6704
} | class ____(TestCase):
def test_pooled(self):
tables = [
(nn.EmbeddingBag(2000, 8), ["a0", "b0"]),
(nn.EmbeddingBag(2000, 8), ["a1", "b1"]),
(nn.EmbeddingBag(2000, 8), ["b2"]),
]
embedding_groups = {
"a": ["a0", "a1"],
"b": ["b0", "b1", "b2"],
}
counter = CompileCounter()
@torch.compile(backend=counter, fullgraph=True, dynamic=True)
def f(id_list_features: KeyedJaggedTensor):
id_list_jt_dict: dict[str, JaggedTensor] = id_list_features.to_dict()
pooled_embeddings = {}
# TODO: run feature processor
for emb_module, feature_names in tables:
features_dict = id_list_jt_dict
for feature_name in feature_names:
f = features_dict[feature_name]
pooled_embeddings[feature_name] = emb_module(
f.values(), f.offsets()
)
pooled_embeddings_by_group = {}
for group_name, group_embedding_names in embedding_groups.items():
group_embeddings = [
pooled_embeddings[name] for name in group_embedding_names
]
pooled_embeddings_by_group[group_name] = torch.cat(
group_embeddings, dim=1
)
return pooled_embeddings_by_group
dataset = RandomRecDataset(
keys=["a0", "a1", "b0", "b1", "b2"],
batch_size=4,
hash_size=2000,
ids_per_feature=3,
num_dense=0,
)
di = iter(dataset)
# unsync should work
d1 = next(di).sparse_features.unsync()
d2 = next(di).sparse_features.unsync()
d3 = next(di).sparse_features.unsync()
r1 = f(d1)
r2 = f(d2)
r3 = f(d3)
self.assertEqual(counter.frame_count, 1)
counter.frame_count = 0
# sync should work too
d1 = next(di).sparse_features.sync()
d2 = next(di).sparse_features.sync()
d3 = next(di).sparse_features.sync()
r1 = f(d1)
r2 = f(d2)
r3 = f(d3)
self.assertEqual(counter.frame_count, 1)
# export only works with unsync
gm = torch._dynamo.export(f)(next(di).sparse_features.unsync()).graph_module
gm.print_readable()
self.assertEqual(gm(d1), r1)
self.assertEqual(gm(d2), r2)
self.assertEqual(gm(d3), r3)
def test_bucketize(self):
mod = BucketizeMod({"f1": [0.0, 0.5, 1.0]})
features = KeyedJaggedTensor.from_lengths_sync(
keys=["f1"],
values=torch.tensor([0, 1, 2, 3, 4, 5, 6, 7]),
lengths=torch.tensor([2, 0, 1, 1, 1, 3]),
weights=torch.tensor([0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7]),
).unsync()
def f(x):
# This is a trick to populate the computed cache and instruct
# ShapeEnv that they're all sizey
x.to_dict()
return mod(x)
torch._dynamo.export(f, aten_graph=True)(features).graph_module.print_readable()
@unittest.expectedFailure
def test_simple(self):
jag_tensor1 = KeyedJaggedTensor(
values=torch.tensor([3.0, 4.0, 5.0, 6.0, 7.0, 8.0]),
keys=["index_0", "index_1"],
lengths=torch.tensor([0, 0, 1, 1, 1, 3]),
).sync()
# ordinarily, this would trigger one specialization
self.assertEqual(jag_tensor1.length_per_key(), [1, 5])
counter = CompileCounter()
@torch.compile(backend=counter, fullgraph=True)
def f(jag_tensor):
# The indexing here requires more symbolic reasoning
# and doesn't work right now
return jag_tensor["index_0"].values().sum()
f(jag_tensor1)
self.assertEqual(counter.frame_count, 1)
jag_tensor2 = KeyedJaggedTensor(
values=torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]),
keys=["index_0", "index_1"],
lengths=torch.tensor([2, 0, 1, 1, 1, 3]),
).sync()
f(jag_tensor2)
self.assertEqual(counter.frame_count, 1)
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
run_tests()
| TorchRecTests |
python | huggingface__transformers | tests/models/xlm/test_modeling_xlm.py | {
"start": 12252,
"end": 17875
} | class ____(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": XLMModel,
"fill-mask": XLMWithLMHeadModel,
"question-answering": XLMForQuestionAnsweringSimple,
"text-classification": XLMForSequenceClassification,
"text-generation": XLMWithLMHeadModel,
"token-classification": XLMForTokenClassification,
"zero-shot": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
# TODO: Fix the failed tests
def is_pipeline_test_to_skip(
self,
pipeline_test_case_name,
config_class,
model_architecture,
tokenizer_name,
image_processor_name,
feature_extractor_name,
processor_name,
):
if (
pipeline_test_case_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast")
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
# XLM has 2 QA models -> need to manually set the correct labels for one of them here
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
inputs_dict["start_positions"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=torch_device
)
inputs_dict["end_positions"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=torch_device
)
return inputs_dict
def setUp(self):
self.model_tester = XLMModelTester(self)
self.config_tester = ConfigTester(self, config_class=XLMConfig, emb_dim=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_xlm_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*config_and_inputs)
# Copied from tests/models/distilbert/test_modeling_distilbert.py with Distilbert->XLM
def test_xlm_model_with_sinusoidal_encodings(self):
config = XLMConfig(sinusoidal_embeddings=True)
model = XLMModel(config=config)
sinusoidal_pos_embds = torch.empty((config.max_position_embeddings, config.emb_dim), dtype=torch.float32)
create_sinusoidal_embeddings(config.max_position_embeddings, config.emb_dim, sinusoidal_pos_embds)
self.model_tester.parent.assertTrue(torch.equal(model.position_embeddings.weight, sinusoidal_pos_embds))
def test_xlm_lm_head(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*config_and_inputs)
def test_xlm_simple_qa(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*config_and_inputs)
def test_xlm_qa(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*config_and_inputs)
def test_xlm_sequence_classif(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*config_and_inputs)
def test_xlm_token_classif(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*config_and_inputs)
def test_xlm_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*config_and_inputs)
def _check_attentions_for_generate(
self, batch_size, attentions, prompt_length, output_length, config, decoder_past_key_values
):
# adds PAD dummy token, expected shape is off by 1
prompt_length += 1
output_length += 1
super()._check_attentions_for_generate(
batch_size, attentions, prompt_length, output_length, config, decoder_past_key_values
)
def _check_hidden_states_for_generate(
self, batch_size, hidden_states, prompt_length, output_length, config, use_cache=False
):
# adds PAD dummy token, expected shape is off by 1
prompt_length += 1
output_length += 1
super()._check_hidden_states_for_generate(
batch_size, hidden_states, prompt_length, output_length, config, use_cache
)
@slow
def test_model_from_pretrained(self):
model_name = "FacebookAI/xlm-mlm-en-2048"
model = XLMModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@require_torch
| XLMModelTest |
python | spack__spack | lib/spack/spack/cmd/create.py | {
"start": 16415,
"end": 17788
} | class ____(PackageTemplate):
"""Provides appropriate overrides for R extensions"""
base_class_name = "RPackage"
package_class_import = "from spack_repo.builtin.build_systems.r import RPackage"
dependencies = """\
# FIXME: Add dependencies if required.
# depends_on("r-foo", type=("build", "run"))"""
body_def = """\
def configure_args(self):
# FIXME: Add arguments to pass to install via --configure-args
# FIXME: If not needed delete this function
args = []
return args"""
def __init__(self, name, url, versions, languages: List[str]):
# If the user provided `--name r-rcpp`, don't rename it r-r-rcpp
if not name.startswith("r-"):
# Make it more obvious that we are renaming the package
tty.msg("Changing package name from {0} to r-{0}".format(name))
name = "r-{0}".format(name)
r_name = parse_name(url)
cran = re.search(r"(?:r-project|rstudio)[^/]+/src" + "/([^/]+)" * 2, url)
if cran:
url = r_name
self.url_line = ' cran = "{url}"'
bioc = re.search(r"(?:bioconductor)[^/]+/packages" + "/([^/]+)" * 5, url)
if bioc:
self.url_line = ' url = "{0}"\n' ' bioc = "{1}"'.format(url, r_name)
super().__init__(name, url, versions, languages)
| RPackageTemplate |
python | wandb__wandb | wandb/vendor/pygments/lexers/robotframework.py | {
"start": 14025,
"end": 14252
} | class ____(TestCaseTable):
_tokenizer_class = KeywordCall
_setting_class = KeywordSetting
def _is_template(self, value):
return False
# Following code copied directly from Robot Framework 2.7.5.
| KeywordTable |
python | huggingface__transformers | src/transformers/models/oneformer/modeling_oneformer.py | {
"start": 116994,
"end": 119096
} | class ____(nn.Module):
def __init__(self, config: OneFormerConfig):
super().__init__()
self.text_encoder = OneFormerTextEncoder(
context_length=config.text_encoder_context_length,
width=config.text_encoder_width,
layers=config.text_encoder_num_layers,
vocab_size=config.text_encoder_vocab_size,
layer_norm_eps=config.layer_norm_eps,
)
self.text_projector = OneFormerMLPPredictionHead(
config.text_encoder_width,
config.hidden_dim,
config.hidden_dim,
config.text_encoder_proj_layers,
)
if config.text_encoder_n_ctx > 0:
self.prompt_ctx = nn.Embedding(
config.text_encoder_n_ctx,
config.text_encoder_width,
)
else:
self.prompt_ctx = None
def forward(
self,
inputs: Tensor,
) -> Tensor:
text_queries = self.encode_text(inputs)
return text_queries
def encode_text(self, text):
if text.ndim is None:
raise ValueError("text must not be NoneType")
if text.ndim not in [2, 3]:
raise ValueError("Number of dimensions in text must be 2 or 3")
squeeze_dim = False
num_text = 1
if text.ndim == 3:
num_text = text.shape[1]
batch_size, num_text, hidden_dim = text.shape
text = text.reshape(batch_size * num_text, hidden_dim)
squeeze_dim = True
# [batch_size, num_channels]
encoded_text = self.text_encoder(text)
text_queries = self.text_projector(encoded_text)
if squeeze_dim:
_, hidden_dim = text_queries.shape
text_queries = text_queries.reshape(batch_size, num_text, hidden_dim)
if self.prompt_ctx is not None:
text_queries_ctx = self.prompt_ctx.weight.unsqueeze(0).repeat(text_queries.shape[0], 1, 1)
text_queries = torch.cat([text_queries, text_queries_ctx], dim=1)
return text_queries
| OneFormerTextMapper |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_cloud_composer.py | {
"start": 11984,
"end": 14454
} | class ____:
@mock.patch(COMPOSER_STRING.format("ExecuteAirflowCommandResponse.to_dict"))
@mock.patch(COMPOSER_STRING.format("CloudComposerHook"))
def test_execute(self, mock_hook, to_dict_mode) -> None:
mock_hook.return_value.wait_command_execution_result.return_value = {
"exit_info": {"exit_code": 0},
"output": [
{"content": "test"},
],
}
op = CloudComposerRunAirflowCLICommandOperator(
task_id=TASK_ID,
project_id=TEST_GCP_PROJECT,
region=TEST_GCP_REGION,
environment_id=TEST_ENVIRONMENT_ID,
command=TEST_USER_COMMAND,
gcp_conn_id=TEST_GCP_CONN_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
op.execute(mock.MagicMock())
mock_hook.assert_called_once_with(
gcp_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
mock_hook.return_value.execute_airflow_command.assert_called_once_with(
project_id=TEST_GCP_PROJECT,
region=TEST_GCP_REGION,
environment_id=TEST_ENVIRONMENT_ID,
command=TEST_COMMAND,
subcommand=TEST_SUBCOMMAND,
parameters=TEST_PARAMETERS,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(COMPOSER_STRING.format("ExecuteAirflowCommandResponse.to_dict"))
@mock.patch(COMPOSER_STRING.format("CloudComposerHook"))
@mock.patch(COMPOSER_TRIGGERS_STRING.format("CloudComposerAsyncHook"))
def test_execute_deferrable(self, mock_trigger_hook, mock_hook, to_dict_mode):
op = CloudComposerRunAirflowCLICommandOperator(
task_id=TASK_ID,
project_id=TEST_GCP_PROJECT,
region=TEST_GCP_REGION,
environment_id=TEST_ENVIRONMENT_ID,
command=TEST_USER_COMMAND,
gcp_conn_id=TEST_GCP_CONN_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
deferrable=True,
)
with pytest.raises(TaskDeferred) as exc:
op.execute(mock.MagicMock())
assert isinstance(exc.value.trigger, CloudComposerAirflowCLICommandTrigger)
assert exc.value.method_name == GOOGLE_DEFAULT_DEFERRABLE_METHOD_NAME
| TestCloudComposerRunAirflowCLICommandOperator |
python | boto__boto3 | tests/unit/dynamodb/test_types.py | {
"start": 709,
"end": 1733
} | class ____(unittest.TestCase):
def test_bytes_input(self):
data = Binary(b'\x01')
assert b'\x01' == data
assert b'\x01' == data.value
def test_non_ascii_bytes_input(self):
# Binary data that is out of ASCII range
data = Binary(b'\x88')
assert b'\x88' == data
assert b'\x88' == data.value
def test_bytearray_input(self):
data = Binary(bytearray([1]))
assert b'\x01' == data
assert b'\x01' == data.value
def test_unicode_throws_error(self):
with pytest.raises(TypeError):
Binary('\u00e9')
def test_integer_throws_error(self):
with pytest.raises(TypeError):
Binary(1)
def test_not_equal(self):
assert Binary(b'\x01') != b'\x02'
def test_str(self):
assert Binary(b'\x01').__str__() == b'\x01'
def test_bytes(self):
self.assertEqual(bytes(Binary(b'\x01')), b'\x01')
def test_repr(self):
assert 'Binary' in repr(Binary(b'1'))
| TestBinary |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/stateful.py | {
"start": 26881,
"end": 37696
} | class ____(Iterable[Ex]):
values: tuple[Ex, ...]
def __iter__(self):
return iter(self.values)
def multiple(*args: T) -> MultipleResults[T]:
"""This function can be used to pass multiple results to the target(s) of
a rule. Just use ``return multiple(result1, result2, ...)`` in your rule.
It is also possible to use ``return multiple()`` with no arguments in
order to end a rule without passing any result.
"""
return MultipleResults(args)
def _convert_targets(targets, target):
"""Single validator and converter for target arguments."""
if target is not None:
if targets:
raise InvalidArgument(
f"Passing both targets={targets!r} and target={target!r} is "
f"redundant - pass targets={(*targets, target)!r} instead."
)
targets = (target,)
converted_targets = []
for t in targets:
if not isinstance(t, Bundle):
msg = "Got invalid target %r of type %r, but all targets must be Bundles."
if isinstance(t, OneOfStrategy):
msg += (
"\nIt looks like you passed `one_of(a, b)` or `a | b` as "
"a target. You should instead pass `targets=(a, b)` to "
"add the return value of this rule to both the `a` and "
"`b` bundles, or define a rule for each target if it "
"should be added to exactly one."
)
raise InvalidArgument(msg % (t, type(t)))
while isinstance(t, Bundle):
if isinstance(t, BundleConsumer):
note_deprecation(
f"Using consumes({t.name}) doesn't makes sense in this context. "
"This will be an error in a future version of Hypothesis.",
since="2021-09-08",
has_codemod=False,
stacklevel=2,
)
t = t.name
converted_targets.append(t)
return tuple(converted_targets)
RULE_MARKER = "hypothesis_stateful_rule"
INITIALIZE_RULE_MARKER = "hypothesis_stateful_initialize_rule"
PRECONDITIONS_MARKER = "hypothesis_stateful_preconditions"
INVARIANT_MARKER = "hypothesis_stateful_invariant"
_RuleType = Callable[..., MultipleResults[Ex] | Ex]
_RuleWrapper = Callable[[_RuleType[Ex]], _RuleType[Ex]]
def _rule_qualname(f: Any) -> str:
# we define rules / invariants / initializes inside of wrapper functions, which
# makes f.__qualname__ look like:
# test_precondition.<locals>.BadStateMachine.has_precondition_but_no_rule
# which is not ideal. This function returns just
# BadStateMachine.has_precondition_but_no_rule
# instead.
return f.__qualname__.rsplit("<locals>.")[-1]
# We cannot exclude `target` or `targets` from any of these signatures because
# otherwise they would be matched against the `kwargs`, either leading to
# overlapping overloads of incompatible return types, or a concrete
# implementation that does not accept all overloaded variant signatures.
# Although it is possible to reorder the variants to fix the former, it will
# always lead to the latter, as then the omitted parameter could be typed as
# a `SearchStrategy`, which the concrete implementation does not accept.
#
# Omitted `targets` parameters, where the default value is used, are typed with
# a special `_OmittedArgument` type. We cannot type them as `tuple[()]`, because
# `tuple[()]` is a subtype of `Sequence[Bundle[Ex]]`, leading to signature
# overlaps with incompatible return types. The `_OmittedArgument` type will never be
# encountered at runtime, and exists solely to annotate the default of `targets`.
# PEP 661 (Sentinel Values) might provide a more elegant alternative in the future.
#
# We could've also annotated `targets` as `tuple[_OmittedArgument]`, but then when
# both `target` and `targets` are provided, mypy describes the type error as an
# invalid argument type for `targets` (expected `tuple[_OmittedArgument]`, got ...).
# By annotating it as a bare `_OmittedArgument` type, mypy's error will warn that
# there is no overloaded signature matching the call, which is more descriptive.
#
# When `target` xor `targets` is provided, the function to decorate must return
# a value whose type matches the one stored in the bundle. When neither are
# provided, the function to decorate must return nothing. There is no variant
# for providing `target` and `targets`, as these parameters are mutually exclusive.
@overload
def rule(
*,
targets: Sequence[Bundle[Ex]],
target: None = ...,
**kwargs: SearchStrategy,
) -> _RuleWrapper[Ex]: # pragma: no cover
...
@overload
def rule(
*, target: Bundle[Ex], targets: _OmittedArgument = ..., **kwargs: SearchStrategy
) -> _RuleWrapper[Ex]: # pragma: no cover
...
@overload
def rule(
*,
target: None = ...,
targets: _OmittedArgument = ...,
**kwargs: SearchStrategy,
) -> Callable[[Callable[..., None]], Callable[..., None]]: # pragma: no cover
...
def rule(
*,
targets: Sequence[Bundle[Ex]] | _OmittedArgument = (),
target: Bundle[Ex] | None = None,
**kwargs: SearchStrategy,
) -> _RuleWrapper[Ex] | Callable[[Callable[..., None]], Callable[..., None]]:
"""Decorator for RuleBasedStateMachine. Any Bundle present in ``target`` or
``targets`` will define where the end result of this function should go. If
both are empty then the end result will be discarded.
``target`` must be a Bundle, or if the result should be replicated to multiple
bundles you can pass a tuple of them as the ``targets`` argument.
It is invalid to use both arguments for a single rule. If the result
should go to exactly one of several bundles, define a separate rule for
each case.
kwargs then define the arguments that will be passed to the function
invocation. If their value is a Bundle, or if it is ``consumes(b)``
where ``b`` is a Bundle, then values that have previously been produced
for that bundle will be provided. If ``consumes`` is used, the value
will also be removed from the bundle.
Any other kwargs should be strategies and values from them will be
provided.
"""
converted_targets = _convert_targets(targets, target)
for k, v in kwargs.items():
check_strategy(v, name=k)
def accept(f):
if getattr(f, INVARIANT_MARKER, None):
raise InvalidDefinition(
f"{_rule_qualname(f)} is used with both @rule and @invariant, "
"which is not allowed. A function may be either a rule or an "
"invariant, but not both."
)
existing_rule = getattr(f, RULE_MARKER, None)
existing_initialize_rule = getattr(f, INITIALIZE_RULE_MARKER, None)
if existing_rule is not None:
raise InvalidDefinition(
f"{_rule_qualname(f)} has been decorated with @rule twice, which is "
"not allowed."
)
if existing_initialize_rule is not None:
raise InvalidDefinition(
f"{_rule_qualname(f)} has been decorated with both @rule and "
"@initialize, which is not allowed."
)
preconditions = getattr(f, PRECONDITIONS_MARKER, ())
rule = Rule(
targets=converted_targets,
arguments=kwargs,
function=f,
preconditions=preconditions,
)
@proxies(f)
def rule_wrapper(*args, **kwargs):
return f(*args, **kwargs)
setattr(rule_wrapper, RULE_MARKER, rule)
return rule_wrapper
return accept
# See also comments of `rule`'s overloads.
@overload
def initialize(
*,
targets: Sequence[Bundle[Ex]],
target: None = ...,
**kwargs: SearchStrategy,
) -> _RuleWrapper[Ex]: # pragma: no cover
...
@overload
def initialize(
*, target: Bundle[Ex], targets: _OmittedArgument = ..., **kwargs: SearchStrategy
) -> _RuleWrapper[Ex]: # pragma: no cover
...
@overload
def initialize(
*,
target: None = ...,
targets: _OmittedArgument = ...,
**kwargs: SearchStrategy,
) -> Callable[[Callable[..., None]], Callable[..., None]]: # pragma: no cover
...
def initialize(
*,
targets: Sequence[Bundle[Ex]] | _OmittedArgument = (),
target: Bundle[Ex] | None = None,
**kwargs: SearchStrategy,
) -> _RuleWrapper[Ex] | Callable[[Callable[..., None]], Callable[..., None]]:
"""Decorator for RuleBasedStateMachine.
An initialize decorator behaves like a rule, but all ``@initialize()`` decorated
methods will be called before any ``@rule()`` decorated methods, in an arbitrary
order. Each ``@initialize()`` method will be called exactly once per run, unless
one raises an exception - after which only the ``.teardown()`` method will be run.
``@initialize()`` methods may not have preconditions.
"""
converted_targets = _convert_targets(targets, target)
for k, v in kwargs.items():
check_strategy(v, name=k)
def accept(f):
if getattr(f, INVARIANT_MARKER, None):
raise InvalidDefinition(
f"{_rule_qualname(f)} is used with both @initialize and @invariant, "
"which is not allowed. A function may be either an initialization "
"rule or an invariant, but not both."
)
existing_rule = getattr(f, RULE_MARKER, None)
existing_initialize_rule = getattr(f, INITIALIZE_RULE_MARKER, None)
if existing_rule is not None:
raise InvalidDefinition(
f"{_rule_qualname(f)} has been decorated with both @rule and "
"@initialize, which is not allowed."
)
if existing_initialize_rule is not None:
raise InvalidDefinition(
f"{_rule_qualname(f)} has been decorated with @initialize twice, "
"which is not allowed."
)
preconditions = getattr(f, PRECONDITIONS_MARKER, ())
if preconditions:
raise InvalidDefinition(
f"{_rule_qualname(f)} has been decorated with both @initialize and "
"@precondition, which is not allowed. An initialization rule "
"runs unconditionally and may not have a precondition."
)
rule = Rule(
targets=converted_targets,
arguments=kwargs,
function=f,
preconditions=preconditions,
)
@proxies(f)
def rule_wrapper(*args, **kwargs):
return f(*args, **kwargs)
setattr(rule_wrapper, INITIALIZE_RULE_MARKER, rule)
return rule_wrapper
return accept
@dataclass(slots=True, frozen=True)
| MultipleResults |
python | pytorch__pytorch | test/test_fx_passes.py | {
"start": 21124,
"end": 21652
} | class ____:
@staticmethod
def forward(x):
x = x + 1
# target subgraph to match
y1 = x.relu()
y2 = x.sigmoid()
return y1, y2
@staticmethod
def pattern(a):
b1 = a.relu()
b2 = a.sigmoid()
return b1, b2
test_cases = [
# match_output, match_placeholder, num_matches
TestCase(False, False, 1),
TestCase(True, False, 1),
TestCase(False, True, 0),
TestCase(True, True, 0)
]
| MultipleOutputsHorizontalPattern |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/view_ignore_conflict/package.py | {
"start": 228,
"end": 684
} | class ____(Package):
"""Installs a file in <prefix>/bin/x, conflicting with the file <dep>/bin/x in a view. In
a view, we should find this package's file, not the dependency's file."""
has_code = False
version("0.1.0")
depends_on("view-file")
def install(self, spec, prefix):
os.mkdir(os.path.join(prefix, "bin"))
with open(os.path.join(prefix, "bin", "x"), "wb") as f:
f.write(b"file")
| ViewIgnoreConflict |
python | pytorch__pytorch | torch/_higher_order_ops/auto_functionalize.py | {
"start": 1911,
"end": 2501
} | class ____(ViewInfo):
size: Sequence[Union[int, torch.SymInt]]
stride: Sequence[Union[int, torch.SymInt]]
storage_offset: int
def __init__(self, base_index, size, stride, storage_offset):
super().__init__(base_index)
self.size = size
self.stride = stride
self.storage_offset = storage_offset
def regenerate_view(self, bases_list: list[Tensor]):
return torch.as_strided(
bases_list[self.base_index],
self.size,
self.stride,
self.storage_offset,
)
@dataclass
| AsStridedViewInfo |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-opendal/llama_index/readers/opendal/azblob/base.py | {
"start": 327,
"end": 2244
} | class ____(BaseReader):
"""General reader for any Azblob file or directory."""
def __init__(
self,
container: str,
path: str = "/",
endpoint: str = "",
account_name: str = "",
account_key: str = "",
file_extractor: Optional[Dict[str, Union[str, BaseReader]]] = None,
) -> None:
"""
Initialize Azblob container, along with credentials if needed.
If key is not set, the entire bucket (filtered by prefix) is parsed.
Args:
container (str): the name of your azblob bucket
path (str): the path of the data. If none is provided,
this loader will iterate through the entire bucket. If path is endswith `/`, this loader will iterate through the entire dir. Otherwise, this loeader will load the file.
endpoint Optional[str]: the endpoint of the azblob service.
account_name (Optional[str]): provide azblob access key directly.
account_key (Optional[str]): provide azblob access key directly.
file_extractor (Optional[Dict[str, BaseReader]]): A mapping of file
extension to a BaseReader class that specifies how to convert that file
to text. See `SimpleDirectoryReader` for more details.
"""
super().__init__()
self.path = path
self.file_extractor = file_extractor
# opendal service related config.
self.options = {
"container": container,
"endpoint": endpoint,
"account_name": account_name,
"account_key": account_key,
}
def load_data(self) -> List[Document]:
"""Load file(s) from OpenDAL."""
loader = OpendalReader(
scheme="azblob",
path=self.path,
file_extractor=self.file_extractor,
**self.options,
)
return loader.load_data()
| OpendalAzblobReader |
python | keras-team__keras | keras/src/constraints/constraints.py | {
"start": 3865,
"end": 4150
} | class ____(Constraint):
"""Constrains the weights to be non-negative."""
def __call__(self, w):
w = backend.convert_to_tensor(w)
return ops.multiply(w, ops.greater_equal(w, 0.0))
@keras_export(["keras.constraints.UnitNorm", "keras.constraints.unit_norm"])
| NonNeg |
python | yandexdataschool__Practical_RL | week07_seq2seq/voc.py | {
"start": 21,
"end": 2581
} | class ____:
def __init__(self, tokens, bos="__BOS__", eos="__EOS__", sep=''):
"""
A special class that handles tokenizing and detokenizing
"""
assert bos in tokens, eos in tokens
self.tokens = tokens
self.token_to_ix = {t: i for i, t in enumerate(tokens)}
self.bos = bos
self.bos_ix = self.token_to_ix[bos]
self.eos = eos
self.eos_ix = self.token_to_ix[eos]
self.sep = sep
def __len__(self):
return len(self.tokens)
@staticmethod
def from_lines(lines, bos="__BOS__", eos="__EOS__", sep=''):
flat_lines = sep.join(list(lines))
flat_lines = list(flat_lines.split(sep)) if sep else list(flat_lines)
tokens = sorted(set(sep.join(flat_lines)))
tokens = [t for t in tokens if t not in (bos, eos) and len(t) != 0]
tokens = [bos, eos] + tokens
return Vocab(tokens, bos, eos, sep)
def tokenize(self, string):
"""converts string to a list of tokens"""
tokens = list(filter(len, string.split(self.sep))) \
if self.sep != '' else list(string)
return [self.bos] + tokens + [self.eos]
def to_matrix(self, lines, max_len=None):
"""
convert variable length token sequences into fixed size matrix
example usage:
>>>print( as_matrix(words[:3],source_to_ix))
[[15 22 21 28 27 13 -1 -1 -1 -1 -1]
[30 21 15 15 21 14 28 27 13 -1 -1]
[25 37 31 34 21 20 37 21 28 19 13]]
"""
max_len = max_len or max(map(len, lines)) + 2 # 2 for bos and eos
matrix = np.zeros((len(lines), max_len), dtype='int32') + self.eos_ix
for i, seq in enumerate(lines):
tokens = self.tokenize(seq)
row_ix = list(map(self.token_to_ix.get, tokens))[:max_len]
matrix[i, :len(row_ix)] = row_ix
return matrix
def to_lines(self, matrix, crop=True):
"""
Convert matrix of token ids into strings
:param matrix: matrix of tokens of int32, shape=[batch,time]
:param crop: if True, crops BOS and EOS from line
:return:
"""
lines = []
for line_ix in map(list, matrix):
if crop:
if line_ix[0] == self.bos_ix:
line_ix = line_ix[1:]
if self.eos_ix in line_ix:
line_ix = line_ix[:line_ix.index(self.eos_ix)]
line = self.sep.join(self.tokens[i] for i in line_ix)
lines.append(line)
return lines
| Vocab |
python | joke2k__faker | tests/providers/test_geo.py | {
"start": 4324,
"end": 4967
} | class ____(unittest.TestCase):
def setUp(self):
self.fake = Faker("pl_PL")
Faker.seed(0)
def test_location_on_land(self):
loc = self.fake.location_on_land()
assert isinstance(loc, tuple)
assert len(loc) == 5
assert Decimal(loc[0]) # Should be able to cast first two elements of tuple to Decimal
assert Decimal(loc[1])
assert isinstance(loc[2], str) # Place is a string
assert isinstance(loc[3], str) # Country code is a string
assert len(loc[3]) == 2 # Country code is two letters
assert isinstance(loc[4], str) # Timezone is a string
| TestPlPl |
python | huggingface__transformers | src/transformers/models/mistral/modeling_mistral.py | {
"start": 21351,
"end": 21647
} | class ____(GenericForQuestionAnswering, MistralPreTrainedModel): ...
__all__ = [
"MistralForCausalLM",
"MistralForQuestionAnswering",
"MistralModel",
"MistralPreTrainedModel",
"MistralForSequenceClassification",
"MistralForTokenClassification",
]
| MistralForQuestionAnswering |
python | pandas-dev__pandas | pandas/tests/frame/indexing/test_getitem.py | {
"start": 6887,
"end": 7803
} | class ____:
def test_getitem_callable(self, float_frame):
# GH#12533
result = float_frame[lambda x: "A"]
expected = float_frame.loc[:, "A"]
tm.assert_series_equal(result, expected)
result = float_frame[lambda x: ["A", "B"]]
expected = float_frame.loc[:, ["A", "B"]]
tm.assert_frame_equal(result, float_frame.loc[:, ["A", "B"]])
df = float_frame[:3]
result = df[lambda x: [True, False, True]]
expected = float_frame.iloc[[0, 2], :]
tm.assert_frame_equal(result, expected)
def test_loc_multiindex_columns_one_level(self):
# GH#29749
df = DataFrame([[1, 2]], columns=[["a", "b"]])
expected = DataFrame([1], columns=[["a"]])
result = df["a"]
tm.assert_frame_equal(result, expected)
result = df.loc[:, "a"]
tm.assert_frame_equal(result, expected)
| TestGetitemCallable |
python | google__jax | tests/lax_scipy_spectral_dac_test.py | {
"start": 850,
"end": 2091
} | class ____(jtu.JaxTestCase):
@jtu.sample_product(
linear_size=linear_sizes,
dtype=jtu.dtypes.floating + jtu.dtypes.complex,
termination_size=[1, 19],
)
def test_spectral_dac_eigh(self, linear_size, dtype, termination_size):
if not jtu.test_device_matches(["tpu"]) and termination_size != 1:
raise unittest.SkipTest(
"Termination sizes greater than 1 only work on TPU")
rng = self.rng()
H = rng.randn(linear_size, linear_size)
H = jnp.array(0.5 * (H + H.conj().T)).astype(dtype)
if jnp.dtype(dtype).name in ("bfloat16", "float16"):
self.assertRaises(
NotImplementedError, lax_eigh.eigh, H)
return
evs, V = lax_eigh.eigh(H, termination_size=termination_size)
ev_exp, _ = jnp.linalg.eigh(H)
HV = jnp.dot(H, V, precision=lax.Precision.HIGHEST)
vV = evs.astype(V.dtype)[None, :] * V
eps = jnp.finfo(H.dtype).eps
atol = jnp.linalg.norm(H) * eps
self.assertAllClose(ev_exp, jnp.sort(evs), atol=20 * atol)
self.assertAllClose(
HV, vV, atol=atol * (140 if jnp.issubdtype(dtype, jnp.complexfloating)
else 40))
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| LaxScipySpectralDacTest |
python | Pylons__pyramid | src/pyramid/events.py | {
"start": 5587,
"end": 6309
} | class ____:
"""
An instance of this class is emitted as an :term:`event` after the
:app:`Pyramid` :term:`router` has attempted to find a :term:`route` object
but before any traversal or view code is executed. The instance has an
attribute, ``request``, which is the request object generated by
:app:`Pyramid`.
Notably, the request object **may** have an attribute named
``matched_route``, which is the matched route if found. If no route
matched, this attribute is not available.
This class implements the :class:`pyramid.interfaces.IBeforeTraversal`
interface.
"""
def __init__(self, request):
self.request = request
@implementer(IContextFound)
| BeforeTraversal |
python | pytorch__pytorch | torchgen/gen_functionalization_type.py | {
"start": 31238,
"end": 47241
} | class ____:
g: NativeFunctionsViewGroup
f: NativeFunction
@property
def is_multi_output(self) -> bool:
return functionalization.is_multi_output(self.f.func)
@property
def is_as_strided(self) -> bool:
return str(self.f.func.name) == "as_strided"
@property
def out_index(self) -> str:
if self.is_multi_output:
return functionalization.out_index_binding.name
return "0"
@property
def classname(self) -> str:
return functionalization.classname(self.f.func)
def decl(self) -> list[str]:
base_ctor_arguments = functionalization.base_ctor_arguments(self.f.func)
extra_ctor_arguments = functionalization.extra_ctor_arguments(self.f.func)
attributes = functionalization.attributes(self.f.func)
# List of types for declaring the `SerializableTuple` type.
serializable_tuple_args = ",\n".join(
f" {binding.type} /* {binding.name} */"
for binding in (base_ctor_arguments + attributes)
)
# Arguments used for forwarding the tuple elements to the constructor.
destructure_tuple_args = ", ".join(
f"std::get<{i}>(tpl)"
for i in range(len(base_ctor_arguments) + len(extra_ctor_arguments))
)
# List of constructor parameters
ctor_parameters = ", ".join(
binding.decl() for binding in (base_ctor_arguments + extra_ctor_arguments)
)
# Call the base class `ViewMeta` constructor.
#
# Both of `is_multi_output` and `is_as_strided` are known values, given the
# operation schema.
is_multi_output_str = str(self.is_multi_output).lower()
is_as_strided_str = str(self.is_as_strided).lower()
base_ctor_bindings = ", ".join(
[
# `has_symbolic_inputs` is always taken as parameter.
functionalization.has_symbolic_inputs_binding.name,
f"/*is_multi_output=*/{is_multi_output_str}",
f"/*is_as_strided=*/{is_as_strided_str}",
# `out_index` is know if the operation returns only one value. Otherwise,
# we also take it as parameter.
f"/*out_index=*/{self.out_index}",
]
)
# Assignments of `extra_ctor_arguments` to their corresponding fields.
# These are extra fields to-be-declared in this specialization.
#
# We need to set `allow_expensive_conversions`, since we are storing owned versions
# of the non-owning arguments.
ctor_assignments = ",\n".join(
f" {e.type.name}({e.expr})"
for e in translate(
extra_ctor_arguments,
attributes,
method=False,
allow_expensive_conversions=True,
)
)
# List of arguments for constructing the `SerializableTuple` from an instance.
tuple_arguments = ", ".join(
binding.name for binding in (base_ctor_arguments + attributes)
)
# List of field declarations.
attr_declarations = "\n".join(f" {binding.decl()};" for binding in attributes)
# Override `to_out_index` if this operation returns more than 1 value.
to_out_index_decl = ""
if self.is_multi_output:
to_out_index_decl = (
" std::shared_ptr<ViewMeta> to_out_index(int64_t out_idx) override;"
)
return [
f"""
struct TORCH_API {self.classname} : public ViewMeta {{
FUNCTIONALIZATION_VIEWMETA_NAME({self.classname})
FUNCTIONALIZATION_VIEWMETA_SERIALIZABLE_TUPLE(\n{serializable_tuple_args});
{self.classname}(const SerializableTuple& tpl)
: {self.classname}({destructure_tuple_args}) {{}}
{self.classname}({ctor_parameters})
: at::functionalization::ViewMeta({base_ctor_bindings}),
{ctor_assignments} {{}}
Tensor forward(const Tensor& base) override;
Tensor reverse(const Tensor& base, const Tensor& mutated_view) override;
{to_out_index_decl}
SerializableTuple to_serializable_tuple() {{
return std::make_tuple({tuple_arguments});
}}
{attr_declarations}
}};
"""
]
# Generate a call to the actual operation.
def opcall(self, is_reverse: bool, reapply_views: bool) -> str:
opname = functionalization.name(
self.g,
is_reverse=is_reverse,
include_namespace=True,
reapply_views=reapply_views,
)
# Expected arguments for the operation.
assert self.g.view_copy is not None
op_arguments = functionalization.op_arguments(self.g.view_copy.func, is_reverse)
# The context is composed by the constructor arguments (which are also
# the field variables stored in the instance), and the `base` tensor.
context = [functionalization.base_binding]
context += functionalization.base_ctor_arguments(self.f.func)
context += functionalization.attributes(self.f.func)
# If we are generating the call for the reverse function, we also have
# access to `mutated_view` argument.
if is_reverse:
context.append(functionalization.mutated_view_binding)
arguments = ", ".join(
[e.expr for e in translate(context, op_arguments, method=False)]
)
# Index the result if this operation returns multiple values.
maybe_index = ""
if not is_reverse and self.is_multi_output:
maybe_index = f"[{self.out_index}]"
return f"{opname}({arguments}){maybe_index}"
def impl(self) -> list[str]:
functions = [
f"""
at::Tensor {self.classname}::forward(const at::Tensor& base) {{
if (reapply_views) {{
return {self.opcall(is_reverse=False, reapply_views=True)};
}} else {{
return {self.opcall(is_reverse=False, reapply_views=False)};
}}
}}""",
f"""
at::Tensor {self.classname}::reverse(const at::Tensor& base, const Tensor& mutated_view) {{
return {self.opcall(is_reverse=True, reapply_views=True)};
}}""",
]
# If this operation returns multiple values, also generate a `to_out_index`
# implementation.
if self.is_multi_output:
functions.append(f"""
std::shared_ptr<at::functionalization::ViewMeta> {self.classname}::to_out_index(int64_t out_index) {{
return {self.new("out_index")};
}}
""")
return functions
# Create the Python binding for this specialized class.
def binding(self) -> list[str]:
name = functionalization.classname(self.f.func, with_namespace=True)
return [f" create_binding_with_pickle<{name}>(functionalization);"]
# Generate an instantiation of this specialized class.
def new(self, out_index: str = "0") -> str:
name = functionalization.classname(self.f.func, with_namespace=True)
ctor_arguments = functionalization.base_ctor_arguments(
self.f.func
) + functionalization.extra_ctor_arguments(self.f.func)
# Replace the `out_index` parameter with the given `out_index`.
arguments = ", ".join(
binding.name if binding.name != "out_index" else out_index
for binding in ctor_arguments
)
return f"std::make_shared<{name}>({arguments})"
# Run the function `run` for both: `view` and `view_inplace` functions.
@staticmethod
def map(
g: NativeFunctionsViewGroup, run: Callable[[ViewMetaSpecialization], list[str]]
) -> list[str]:
def maybe_run(f: NativeFunction | None) -> list[str]:
if f is None:
return []
with native_function_manager(f):
return run(ViewMetaSpecialization(g, f))
return list(concatMap(maybe_run, (g.view, g.view_inplace)))
def gen_functionalization_view_meta_classes_base(
selector: SelectiveBuilder,
g: NativeFunctionsViewGroup,
run: Callable[[ViewMetaSpecialization], list[str]],
) -> list[str]:
if not selector.include_all_operators:
return []
if g.composite:
return []
return ViewMetaSpecialization.map(g, run)
def gen_functionalization_view_meta_classes_decl(
selector: SelectiveBuilder, g: NativeFunctionsViewGroup
) -> list[str]:
return gen_functionalization_view_meta_classes_base(
selector, g, ViewMetaSpecialization.decl
)
def gen_functionalization_view_meta_classes_impl(
selector: SelectiveBuilder, g: NativeFunctionsViewGroup
) -> list[str]:
return gen_functionalization_view_meta_classes_base(
selector, g, ViewMetaSpecialization.impl
)
def gen_functionalization_view_meta_classes_binding(
selector: SelectiveBuilder, g: NativeFunctionsViewGroup
) -> list[str]:
return gen_functionalization_view_meta_classes_base(
selector, g, ViewMetaSpecialization.binding
)
# Generates the Python bindings for the `ViewMeta` specialized classes.
def gen_functionalization_view_meta_classes(
native_functions_path: str,
tags_path: str,
selector: SelectiveBuilder,
install_dir: str,
template_dir: str,
) -> None:
from torchgen.gen import get_grouped_by_view_native_functions, parse_native_yaml
# Parse the native_functions.yaml.
# Then, group them into `NativeFunctionsViewGroup`.
#
# This is the same steps we do in gen.py (ATen codegen).
native_functions = parse_native_yaml(
native_functions_path, tags_path
).native_functions
native_functions_with_view_groups = get_grouped_by_view_native_functions(
native_functions
)
view_groups = [
g
for g in native_functions_with_view_groups
if isinstance(g, NativeFunctionsViewGroup)
]
fm = FileManager(install_dir=install_dir, template_dir=template_dir, dry_run=False)
fm.write(
"ViewMetaClassesPythonBinding.cpp",
lambda: {
"view_meta_bindings": list(
concatMap(
lambda g: gen_functionalization_view_meta_classes_binding(
selector, g
),
view_groups,
)
),
},
)
def gen_functionalization_registration(
selector: SelectiveBuilder,
g: NativeFunction | NativeFunctionsGroup | NativeFunctionsViewGroup,
composite_implicit_autograd_index: BackendIndex,
) -> list[str]:
@with_native_function
def emit_registration_helper(f: NativeFunction) -> str:
if f.has_composite_implicit_autograd_kernel:
metadata = composite_implicit_autograd_index.get_kernel(f)
assert metadata is not None
native_api_name = metadata.kernel
sig = NativeSignature(f.func, symint=metadata.supports_symint())
# Note [Composite view ops in the functionalization pass]
# We don't need to worry about implemententing functionalization kernels for views with
# CompositeImplicitAutograd kernels, because we can just decompose them into their base operators.
# We can't just opt the entire Functionalization dispatch key into the composite keyset though,
# because we don't want to decompose non-view ops that are composite, like `at::ones`.
registration_str = (
f"static_cast<{sig.ptr_type()}>(at::native::{native_api_name})"
)
else:
# non-composite view ops (and inplace ops) get a normal registration.
registration_str = f"TORCH_FN(functionalization::{wrapper_name(f.func)})"
return f'm.impl("{f.func.name}", {registration_str});'
# Don't generate kernels in mobile build
if not selector.include_all_operators:
return []
if isinstance(g, NativeFunctionsViewGroup):
# functionalization needs to register kernels for view + view_inplace ops
# See Note [Functionalization <> torch.Tensor constructor]
if str(g.view.func.name) == "lift_fresh":
return []
view_str = []
view_str.append(emit_registration_helper(g.view))
if g.view_inplace is not None:
assert g.view_inplace.is_view_op
view_str.append(emit_registration_helper(g.view_inplace))
return view_str
elif isinstance(g, NativeFunctionsGroup):
# Gets a hand-written functionalization kernel
if g.inplace is not None and str(g.inplace.func.name) == "set_.source_Tensor":
fns = []
else:
fns = list(g.functions())
else:
if str(g.func.name) in MUTABLE_OPS_NOT_USING_FUNCTIONALIZATION:
return []
fns = [g]
registrations = []
for f in fns:
if f.has_composite_implicit_autograd_kernel:
continue
if str(f.func.name) == "lift":
# See Note [Functionalization <> torch.Tensor constructor]
return []
if str(f.func.name) == "resize_":
# See Note [resize_ in Functionalization]
return []
if str(f.func.name.name) != "set_":
assert not f.is_view_op
# functionalization needs to generate and register kernels for inplace ops.
# We *also* need to directly register CompositeImplicitAUtograd kernels
# so that they decompose properly before functioanlization.
if modifies_arguments(f):
registrations.append(emit_registration_helper(f))
return registrations
def gen_functionalization_definition(
selector: SelectiveBuilder,
# Note: Ideally this code should never have to look at NativeFunction
# (and instead only need to operate on grouped NativeFunctions).
# The only reason currently is because we need to emit direct dispatch registrations
# For CompositeImplicitAutograd operators, which are potentially ungrouped.
g: NativeFunction | NativeFunctionsGroup | NativeFunctionsViewGroup,
) -> list[str]:
# Don't generate kernels in mobile build
if not selector.include_all_operators:
return []
if isinstance(g, NativeFunctionsViewGroup):
# Case 1: emit view -> view_copy kernels for the functionalization pass
view_defs = []
if not g.composite:
# invariant: NativeFunctionsViewGroup's always have a view_copy operator
# if the view is not composite (implicit autograd)
assert g.view_copy is not None, dataclass_repr(g, indent=1)
view_defs.append(emit_view_functionalization_body(g, view_inplace=False))
if g.view_inplace is not None:
view_defs.append(emit_view_functionalization_body(g, view_inplace=True))
return view_defs
elif isinstance(g, NativeFunction):
# Invariant: all mutable operators that we need to handle in functionalization
# should have been properly grouped up.
# TODO: The below ops all have "problematic" schemas that prevent them from
# getting functionalized. Instead of bending over backwards to get things to work,
# I think we should either:
# (1) fix their schemas (BC-breaking)
# (2) hand-write their functionalization kernels
if (
str(g.func.name) not in MUTABLE_OPS_NOT_USING_FUNCTIONALIZATION
and str(g.func.name.name) not in MUTABLE_OPS_NOT_USING_FUNCTIONALIZATION
):
assert g.has_composite_implicit_autograd_kernel or not modifies_arguments(g)
return []
else:
# Case 2: emit inplace -> out-of-place kernels for the functionalization pass
mutation_defs = []
mutation_defs.append(emit_inplace_functionalization_body(g.out, g))
if g.inplace is not None:
mutation_defs.append(emit_inplace_functionalization_body(g.inplace, g))
if g.mutable is not None:
mutation_defs.append(emit_inplace_functionalization_body(g.mutable, g))
return mutation_defs
return []
| ViewMetaSpecialization |
python | dagster-io__dagster | python_modules/dagster-test/dagster_test/test_project/__init__.py | {
"start": 4467,
"end": 6937
} | class ____(RemoteJob):
def __init__(
self, remote_job: RemoteJob, container_image=None, container_context=None, filename=None
):
self._container_image = container_image
self._container_context = container_context
self._filename = filename or "repo.py"
super().__init__(
remote_job.job_data_snap,
remote_job.repository_handle,
)
def get_python_origin(self):
"""Hack! Inject origin that the k8s images will use. The BK image uses a different directory
structure (/workdir/python_modules/dagster-test/dagster_test/test_project) than the images
inside the kind cluster (/dagster_test/test_project). As a result the normal origin won't
work, we need to inject this one.
"""
return JobPythonOrigin(
self._job_index.name,
RepositoryPythonOrigin(
executable_path="python",
code_pointer=FileCodePointer(
f"/dagster_test/test_project/test_jobs/{self._filename}",
"define_demo_execution_repo",
),
container_image=self._container_image,
entry_point=DEFAULT_DAGSTER_ENTRY_POINT,
container_context=self._container_context,
),
)
def get_remote_origin(self) -> RemoteJobOrigin:
"""Hack! Inject origin that the k8s images will use. The BK image uses a different directory
structure (/workdir/python_modules/dagster-test/dagster_test/test_project) than the images
inside the kind cluster (/dagster_test/test_project). As a result the normal origin won't
work, we need to inject this one.
"""
return RemoteJobOrigin(
repository_origin=RemoteRepositoryOrigin(
code_location_origin=InProcessCodeLocationOrigin(
loadable_target_origin=LoadableTargetOrigin(
executable_path="python",
python_file=f"/dagster_test/test_project/test_jobs/{self._filename}",
attribute="define_demo_execution_repo",
),
container_image=self._container_image,
entry_point=DEFAULT_DAGSTER_ENTRY_POINT,
),
repository_name="demo_execution_repo",
),
job_name=self._job_index.name,
)
| ReOriginatedExternalJobForTest |
python | walkccc__LeetCode | solutions/2083. Substrings That Begin and End With the Same Letter/2083.py | {
"start": 0,
"end": 188
} | class ____:
def numberOfSubstrings(self, s: str) -> int:
ans = 0
count = collections.Counter()
for c in s:
ans += count[c] + 1
count[c] += 1
return ans
| Solution |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-yandex-metrica/source_yandex_metrica/streams.py | {
"start": 9323,
"end": 9422
} | class ____(IncrementalYandexMetricaStream):
primary_key = "visitID"
_source = "visits"
| Sessions |
python | tensorflow__tensorflow | tensorflow/python/distribute/cluster_resolver/slurm_cluster_resolver_test.py | {
"start": 1151,
"end": 8352
} | class ____(test.TestCase):
def test_expand_hostlist(self):
self.assertEqual(expand_hostlist('n1'), ['n1'])
self.assertEqual(expand_hostlist('n[1,3]'), ['n1', 'n3'])
self.assertEqual(expand_hostlist('n[1-3]'), ['n1', 'n2', 'n3'])
self.assertEqual(
expand_hostlist('n[1-2],m5,o[3-4,6,7-9]'),
['n1', 'n2', 'm5', 'o3', 'o4', 'o6', 'o7', 'o8', 'o9'])
self.assertEqual(
expand_hostlist('n[0001-0003],m5,o[009-011]'),
['n0001', 'n0002', 'n0003', 'm5', 'o009', 'o010', 'o011'])
def test_expand_tasks_per_node(self):
self.assertEqual(expand_tasks_per_node('2'), [2])
self.assertEqual(expand_tasks_per_node('2,1,3'), [2, 1, 3])
self.assertEqual(expand_tasks_per_node('3(x2),2,1'), [3, 3, 2, 1])
self.assertEqual(
expand_tasks_per_node('3(x2),2,11(x4)'), [3, 3, 2, 11, 11, 11, 11])
self.assertEqual(
expand_tasks_per_node('13(x10)'),
[13, 13, 13, 13, 13, 13, 13, 13, 13, 13])
def _verifyClusterSpecEquality(self, cluster_spec, expected_proto):
self.assertProtoEquals(expected_proto, cluster_spec.as_cluster_def())
self.assertProtoEquals(
expected_proto,
server_lib.ClusterSpec(cluster_spec).as_cluster_def())
self.assertProtoEquals(
expected_proto,
server_lib.ClusterSpec(cluster_spec.as_cluster_def()).as_cluster_def())
self.assertProtoEquals(
expected_proto,
server_lib.ClusterSpec(cluster_spec.as_dict()).as_cluster_def())
@mock.patch.dict(
os.environ, {
'SLURM_PROCID': '0',
'SLURM_STEP_NUM_TASKS': '3',
'SLURM_STEP_TASKS_PER_NODE': '1(x3)',
'SLURM_STEP_NODELIST': 't02n13,t02n41,t02n43',
'CUDA_VISIBLE_DEVICES': '0',
})
def testSimpleRetrievalFromEnv(self):
slurm_cluster_resolver = SlurmClusterResolver()
actual_cluster_spec = slurm_cluster_resolver.cluster_spec()
expected_proto = """
job { name: 'worker' tasks { key: 0 value: 't02n13:8888' }
tasks { key: 1 value: 't02n41:8888' }
tasks { key: 2 value: 't02n43:8888' } }
"""
self._verifyClusterSpecEquality(actual_cluster_spec, expected_proto)
self.assertEqual(
slurm_cluster_resolver.master('worker', 0, rpc_layer='grpc'),
'grpc://t02n13:8888')
self.assertEqual(slurm_cluster_resolver.num_accelerators(), {'GPU': 1})
self.assertEqual(os.environ['CUDA_VISIBLE_DEVICES'], '0')
@mock.patch.dict(
os.environ, {
'SLURM_PROCID': '0',
'SLURM_STEP_NUM_TASKS': '3',
'SLURM_STEP_NODELIST': 't02n13,t02n41,t02n43',
})
def testSimpleSuccessfulRetrieval(self):
slurm_cluster_resolver = SlurmClusterResolver(
jobs={
'ps': 1,
'worker': 2
},
port_base=8888,
tasks_per_node=1,
gpus_per_node=1,
gpus_per_task=1,
auto_set_gpu=False)
actual_cluster_spec = slurm_cluster_resolver.cluster_spec()
expected_proto = """
job { name: 'ps' tasks { value: 't02n13:8888' } }
job { name: 'worker' tasks { key: 0 value: 't02n41:8888' }
tasks { key: 1 value: 't02n43:8888' } }
"""
self._verifyClusterSpecEquality(actual_cluster_spec, expected_proto)
@mock.patch.dict(
os.environ, {
'SLURM_PROCID': '0',
'SLURM_STEP_NUM_TASKS': '3',
'SLURM_STEP_NODELIST': 't02n13,t02n41,t02n43',
})
def testSimpleMasterRetrieval(self):
slurm_cluster_resolver = SlurmClusterResolver(
jobs={
'ps': 1,
'worker': 2
},
port_base=8888,
tasks_per_node=1,
gpus_per_node=1,
gpus_per_task=1,
auto_set_gpu=False)
slurm_cluster_resolver.task_type = 'worker'
slurm_cluster_resolver.task_id = 1
self.assertEqual(slurm_cluster_resolver.master(), 'grpc://t02n43:8888')
slurm_cluster_resolver.rpc_layer = 'ab'
self.assertEqual(slurm_cluster_resolver.master('ps', 0), 'ab://t02n13:8888')
self.assertEqual(
slurm_cluster_resolver.master('ps', 0, rpc_layer='test'),
'test://t02n13:8888')
@mock.patch.dict(
os.environ, {
'SLURM_PROCID': '0',
'SLURM_STEP_NUM_TASKS': '3',
'SLURM_STEP_TASKS_PER_NODE': '1(x3)',
'SLURM_STEP_NODELIST': 't02n13,t02n41,t02n43',
})
def testTaskPerNodeNotSetRetrieval(self):
slurm_cluster_resolver = SlurmClusterResolver(
jobs={
'ps': 1,
'worker': 2
},
port_base=8888,
gpus_per_node=1,
gpus_per_task=1,
auto_set_gpu=False)
actual_cluster_spec = slurm_cluster_resolver.cluster_spec()
expected_proto = """
job { name: 'ps' tasks { value: 't02n13:8888' } }
job { name: 'worker' tasks { key: 0 value: 't02n41:8888' }
tasks { key: 1 value: 't02n43:8888' } }
"""
self._verifyClusterSpecEquality(actual_cluster_spec, expected_proto)
@mock.patch.dict(
os.environ, {
'SLURM_PROCID': '1',
'SLURM_STEP_NUM_TASKS': '5',
'SLURM_STEP_TASKS_PER_NODE': '2(x2),1',
'SLURM_STEP_NODELIST': 't02n13,t02n41,t02n43',
'CUDA_VISIBLE_DEVICES': '',
})
def testMultiTaskPerNodeRetrieval(self):
slurm_cluster_resolver = SlurmClusterResolver(
jobs={
'ps': 1,
'worker': 4
},
port_base=8888,
gpus_per_node=2,
gpus_per_task=1,
auto_set_gpu=True)
actual_cluster_spec = slurm_cluster_resolver.cluster_spec()
expected_proto = """
job { name: 'ps' tasks { value: 't02n13:8888' } }
job { name: 'worker' tasks { key: 0 value: 't02n13:8889' }
tasks { key: 1 value: 't02n41:8888' }
tasks { key: 2 value: 't02n41:8889' }
tasks { key: 3 value: 't02n43:8888' } }
"""
self._verifyClusterSpecEquality(actual_cluster_spec, expected_proto)
assert os.environ['CUDA_VISIBLE_DEVICES'] == '1'
@mock.patch.dict(
os.environ, {
'SLURM_PROCID': '1',
'SLURM_STEP_NUM_TASKS': '5',
'SLURM_STEP_TASKS_PER_NODE': '2(x2),1',
'SLURM_STEP_NODELIST': 't02n13,t02n41,t02n43',
'CUDA_VISIBLE_DEVICES': '',
})
def testMultipleGpusPerTaskRetrieval(self):
slurm_cluster_resolver = SlurmClusterResolver(
jobs={
'ps': 1,
'worker': 4
},
port_base=8888,
gpus_per_node=4,
gpus_per_task=2,
auto_set_gpu=True)
actual_cluster_spec = slurm_cluster_resolver.cluster_spec()
expected_proto = """
job { name: 'ps' tasks { value: 't02n13:8888' } }
job { name: 'worker' tasks { key: 0 value: 't02n13:8889' }
tasks { key: 1 value: 't02n41:8888' }
tasks { key: 2 value: 't02n41:8889' }
tasks { key: 3 value: 't02n43:8888' } }
"""
self._verifyClusterSpecEquality(actual_cluster_spec, expected_proto)
assert os.environ['CUDA_VISIBLE_DEVICES'] == '2,3'
if __name__ == '__main__':
test.main()
| SlurmClusterResolverTest |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/core_api/datamodels/ui/structure.py | {
"start": 1177,
"end": 1542
} | class ____(BaseNodeResponse):
"""Node serializer for responses."""
children: list[NodeResponse] | None = None
is_mapped: bool | None = None
tooltip: str | None = None
setup_teardown_type: Literal["setup", "teardown"] | None = None
operator: str | None = None
asset_condition_type: Literal["or-gate", "and-gate"] | None = None
| NodeResponse |
python | kamyu104__LeetCode-Solutions | Python/count-special-quadruplets.py | {
"start": 651,
"end": 1074
} | class ____(object):
def countQuadruplets(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
lookup = collections.defaultdict(list)
for d in xrange(3, len(nums)):
for c in xrange(2, d):
lookup[nums[d]-nums[c]].append(c)
return sum(sum(b < c for c in lookup[nums[a]+nums[b]]) for b in xrange(1, len(nums)-2) for a in xrange(b))
| Solution2 |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_set.py | {
"start": 64605,
"end": 64772
} | class ____:
def __eq__(self, other):
if be_bad:
dict2.clear()
return self is other
def __hash__(self):
return 0
| bad_dict_clear |
python | plotly__plotly.py | plotly/graph_objs/scattergl/hoverlabel/_font.py | {
"start": 233,
"end": 17153
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scattergl.hoverlabel"
_path_str = "scattergl.hoverlabel.font"
_valid_props = {
"color",
"colorsrc",
"family",
"familysrc",
"lineposition",
"linepositionsrc",
"shadow",
"shadowsrc",
"size",
"sizesrc",
"style",
"stylesrc",
"textcase",
"textcasesrc",
"variant",
"variantsrc",
"weight",
"weightsrc",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def familysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `family`.
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def linepositionsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`lineposition`.
The 'linepositionsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["linepositionsrc"]
@linepositionsrc.setter
def linepositionsrc(self, val):
self["linepositionsrc"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def shadowsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `shadow`.
The 'shadowsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["shadowsrc"]
@shadowsrc.setter
def shadowsrc(self, val):
self["shadowsrc"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `size`.
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def stylesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `style`.
The 'stylesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["stylesrc"]
@stylesrc.setter
def stylesrc(self, val):
self["stylesrc"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def textcasesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `textcase`.
The 'textcasesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textcasesrc"]
@textcasesrc.setter
def textcasesrc(self, val):
self["textcasesrc"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def variantsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `variant`.
The 'variantsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["variantsrc"]
@variantsrc.setter
def variantsrc(self, val):
self["variantsrc"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def weightsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `weight`.
The 'weightsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["weightsrc"]
@weightsrc.setter
def weightsrc(self, val):
self["weightsrc"] = val
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud for
`lineposition`.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
shadowsrc
Sets the source reference on Chart Studio Cloud for
`shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
style
Sets whether a font should be styled with a normal or
italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud for
`style`.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud for
`textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud for
`variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud for
`weight`.
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
lineposition=None,
linepositionsrc=None,
shadow=None,
shadowsrc=None,
size=None,
sizesrc=None,
style=None,
stylesrc=None,
textcase=None,
textcasesrc=None,
variant=None,
variantsrc=None,
weight=None,
weightsrc=None,
**kwargs,
):
"""
Construct a new Font object
Sets the font used in hover labels.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scattergl.hoverlabel.Font`
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud for
`lineposition`.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
shadowsrc
Sets the source reference on Chart Studio Cloud for
`shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
style
Sets whether a font should be styled with a normal or
italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud for
`style`.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud for
`textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud for
`variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud for
`weight`.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scattergl.hoverlabel.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattergl.hoverlabel.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("colorsrc", arg, colorsrc)
self._set_property("family", arg, family)
self._set_property("familysrc", arg, familysrc)
self._set_property("lineposition", arg, lineposition)
self._set_property("linepositionsrc", arg, linepositionsrc)
self._set_property("shadow", arg, shadow)
self._set_property("shadowsrc", arg, shadowsrc)
self._set_property("size", arg, size)
self._set_property("sizesrc", arg, sizesrc)
self._set_property("style", arg, style)
self._set_property("stylesrc", arg, stylesrc)
self._set_property("textcase", arg, textcase)
self._set_property("textcasesrc", arg, textcasesrc)
self._set_property("variant", arg, variant)
self._set_property("variantsrc", arg, variantsrc)
self._set_property("weight", arg, weight)
self._set_property("weightsrc", arg, weightsrc)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Font |
python | ray-project__ray | python/ray/data/_internal/execution/ranker.py | {
"start": 2007,
"end": 2853
} | class ____(Ranker[Tuple[int, int]]):
"""Ranker implementation."""
def rank_operator(
self,
op: PhysicalOperator,
topology: "Topology",
resource_manager: ResourceManager,
) -> Tuple[int, int]:
"""Computes rank for op. *Lower means better rank*
1. Whether operator's could be throttled (int)
2. Operators' object store utilization
Args:
op: Operator to rank
topology: Current execution topology
resource_manager: Resource manager for usage information
Returns:
Rank (tuple) for operator
"""
throttling_disabled = 0 if op.throttling_disabled() else 1
return (
throttling_disabled,
resource_manager.get_op_usage(op).object_store_memory,
)
| DefaultRanker |
python | huggingface__transformers | src/transformers/models/florence2/modular_florence2.py | {
"start": 65129,
"end": 70238
} | class ____(LlavaModel):
_checkpoint_conversion_mapping = {}
def __init__(self, config: Florence2Config):
super().__init__(config)
self.vision_tower = Florence2VisionBackbone(config=config.vision_config)
def get_encoder(self, modality=None):
if modality is None:
return self.language_model.get_encoder()
else:
return super().get_encoder(modality=modality)
def get_image_features(self, pixel_values: torch.Tensor, **kwargs):
"""
Obtains image last hidden states from the vision tower and apply multimodal projection.
Args:
pixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`):
The tensors corresponding to the input images.
Returns:
image_features (`torch.Tensor`): Image feature tensor of shape `(num_images, image_length, embed_dim)`).
"""
image_features = self.vision_tower(pixel_values, **kwargs)
image_embeds = self.multi_modal_projector(image_features)
return image_embeds
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.LongTensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
encoder_outputs: Optional[list[torch.FloatTensor]] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
) -> Union[tuple, Florence2Seq2SeqModelOutput]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if encoder_outputs is None:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.get_input_embeddings()(input_ids)
if pixel_values is not None:
image_features = self.get_image_features(pixel_values)
image_features = image_features.to(inputs_embeds.device, inputs_embeds.dtype)
special_image_mask = self.get_placeholder_mask(
input_ids, inputs_embeds=inputs_embeds, image_features=image_features
)
inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features)
encoder_outputs = self.language_model.encoder(
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
)
if decoder_input_ids is None:
decoder_start_token_id = self.config.text_config.decoder_start_token_id
decoder_input_ids = torch.ones((inputs_embeds.size()[0], 1), dtype=torch.long, device=inputs_embeds.device)
decoder_input_ids *= decoder_start_token_id
decoder_outputs = self.language_model.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
encoder_hidden_states=encoder_outputs[0],
encoder_attention_mask=attention_mask,
past_key_values=past_key_values,
inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
cache_position=cache_position,
return_dict=True,
)
return Florence2Seq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
image_hidden_states=image_features if pixel_values is not None else None,
)
@auto_docstring(
custom_intro="""
Florence-2 is a vision model for captioning, detection, and segmentation.
"""
)
| Florence2Model |
python | etianen__django-reversion | reversion/models.py | {
"start": 8831,
"end": 13448
} | class ____(models.Model):
"""A saved version of a database model."""
objects = VersionQuerySet.as_manager()
revision = models.ForeignKey(
Revision,
on_delete=models.CASCADE,
help_text="The revision that contains this version.",
)
object_id = models.CharField(
max_length=191,
help_text="Primary key of the model under version control.",
)
content_type = models.ForeignKey(
ContentType,
on_delete=models.CASCADE,
help_text="Content type of the model under version control.",
)
@property
def _content_type(self):
return ContentType.objects.db_manager(self._state.db).get_for_id(self.content_type_id)
@property
def _model(self):
return self._content_type.model_class()
# A link to the current instance, not the version stored in this Version!
object = GenericForeignKey(
ct_field="content_type",
fk_field="object_id",
)
db = models.CharField(
max_length=191,
help_text="The database the model under version control is stored in.",
)
format = models.CharField(
max_length=255,
help_text="The serialization format used by this model.",
)
serialized_data = models.TextField(
help_text="The serialized form of this version of the model.",
)
object_repr = models.TextField(
help_text="A string representation of the object.",
)
@cached_property
def _object_version(self):
version_options = _get_options(self._model)
data = self.serialized_data
data = force_str(data.encode("utf8"))
try:
return list(serializers.deserialize(self.format, data, ignorenonexistent=True,
use_natural_foreign_keys=version_options.use_natural_foreign_keys))[0]
except DeserializationError:
raise RevertError(gettext("Could not load %(object_repr)s version - incompatible version data.") % {
"object_repr": self.object_repr,
})
except serializers.SerializerDoesNotExist:
raise RevertError(gettext("Could not load %(object_repr)s version - unknown serializer %(format)s.") % {
"object_repr": self.object_repr,
"format": self.format,
})
@cached_property
def _local_field_dict(self):
"""
A dictionary mapping field names to field values in this version
of the model.
Parent links of inherited multi-table models will not be followed.
"""
version_options = _get_options(self._model)
object_version = self._object_version
obj = object_version.object
model = self._model
field_dict = {}
for field_name in version_options.fields:
field = model._meta.get_field(field_name)
if isinstance(field, models.ManyToManyField):
# M2M fields with a custom through are not stored in m2m_data, but as a separate model.
if object_version.m2m_data and field.attname in object_version.m2m_data:
field_dict[field.attname] = object_version.m2m_data[field.attname]
else:
field_dict[field.attname] = getattr(obj, field.attname)
return field_dict
@cached_property
def field_dict(self):
"""
A dictionary mapping field names to field values in this version
of the model.
This method will follow parent links, if present.
"""
field_dict = self._local_field_dict
# Add parent data.
for parent_model, field in self._model._meta.concrete_model._meta.parents.items():
content_type = _get_content_type(parent_model, self._state.db)
parent_id = field_dict[field.attname]
parent_version = self.revision.version_set.get(
content_type=content_type,
object_id=parent_id,
db=self.db,
)
field_dict.update(parent_version.field_dict)
return field_dict
def revert(self):
self._object_version.save(using=self.db)
def __str__(self):
return self.object_repr
class Meta:
verbose_name = _('version')
verbose_name_plural = _('versions')
app_label = 'reversion'
unique_together = (
("db", "content_type", "object_id", "revision"),
)
indexes = (
models.Index(
fields=["content_type", "db"]
),
)
ordering = ("-pk",)
| Version |
python | redis__redis-py | redis/multidb/event.py | {
"start": 233,
"end": 994
} | class ____:
"""
Event fired when an active database has been changed.
"""
def __init__(
self,
old_database: SyncDatabase,
new_database: SyncDatabase,
command_executor,
**kwargs,
):
self._old_database = old_database
self._new_database = new_database
self._command_executor = command_executor
self._kwargs = kwargs
@property
def old_database(self) -> SyncDatabase:
return self._old_database
@property
def new_database(self) -> SyncDatabase:
return self._new_database
@property
def command_executor(self):
return self._command_executor
@property
def kwargs(self):
return self._kwargs
| ActiveDatabaseChanged |
python | tornadoweb__tornado | tornado/test/httpclient_test.py | {
"start": 2549,
"end": 2691
} | class ____(RequestHandler):
def get(self):
self.write(self.request.headers.get("User-Agent", "User agent not set"))
| UserAgentHandler |
python | gevent__gevent | src/gevent/tests/test__fileobject.py | {
"start": 15018,
"end": 16313
} | class ____(ConcurrentFileObjectMixin, # pylint:disable=too-many-ancestors
TestFileObjectBlock):
if sysinfo.LIBUV and sysinfo.LINUX:
# On Linux, initializing the watcher for a regular
# file results in libuv raising EPERM. But that works
# fine on other platforms.
WORKS_WITH_REGULAR_FILES = False
def _getTargetClass(self):
return fileobject.FileObjectPosix
def test_seek_raises_ioerror(self):
# https://github.com/gevent/gevent/issues/1323
# Get a non-seekable file descriptor
r, _w = self._pipe()
with self.assertRaises(OSError) as ctx:
os.lseek(r, 0, os.SEEK_SET)
os_ex = ctx.exception
with self.assertRaises(IOError) as ctx:
f = self._makeOne(r, 'r', close=False)
# Seek directly using the underlying GreenFileDescriptorIO;
# the buffer may do different things, depending
# on the version of Python (especially 3.7+)
f.fileio.seek(0)
io_ex = ctx.exception
self.assertEqual(io_ex.errno, os_ex.errno)
self.assertEqual(io_ex.strerror, os_ex.strerror)
self.assertEqual(io_ex.args, os_ex.args)
self.assertEqual(str(io_ex), str(os_ex))
| TestFileObjectPosix |
python | allegroai__clearml | clearml/backend_api/services/v2_20/models.py | {
"start": 151614,
"end": 155511
} | class ____(Response):
"""
Response of models.update_for_task endpoint.
:param id: ID of the model
:type id: str
:param created: Was the model created
:type created: bool
:param updated: Number of models updated (0 or 1)
:type updated: int
:param fields: Updated fields names and values
:type fields: dict
"""
_service = "models"
_action = "update_for_task"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"created": {
"description": "Was the model created",
"type": ["boolean", "null"],
},
"fields": {
"additionalProperties": True,
"description": "Updated fields names and values",
"type": ["object", "null"],
},
"id": {"description": "ID of the model", "type": ["string", "null"]},
"updated": {
"description": "Number of models updated (0 or 1)",
"type": ["integer", "null"],
},
},
"type": "object",
}
def __init__(
self,
id: Optional[str] = None,
created: Optional[bool] = None,
updated: Optional[int] = None,
fields: Optional[dict] = None,
**kwargs: Any
) -> None:
super(UpdateForTaskResponse, self).__init__(**kwargs)
self.id = id
self.created = created
self.updated = updated
self.fields = fields
@schema_property("id")
def id(self) -> Optional[str]:
return self._property_id
@id.setter
def id(self, value: Optional[str]) -> None:
if value is None:
self._property_id = None
return
self.assert_isinstance(value, "id", six.string_types)
self._property_id = value
@schema_property("created")
def created(self) -> Optional[bool]:
return self._property_created
@created.setter
def created(self, value: Optional[bool]) -> None:
if value is None:
self._property_created = None
return
self.assert_isinstance(value, "created", (bool,))
self._property_created = value
@schema_property("updated")
def updated(self) -> Optional[int]:
return self._property_updated
@updated.setter
def updated(self, value: Optional[int]) -> None:
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
@schema_property("fields")
def fields(self) -> Optional[dict]:
return self._property_fields
@fields.setter
def fields(self, value: Optional[dict]) -> None:
if value is None:
self._property_fields = None
return
self.assert_isinstance(value, "fields", (dict,))
self._property_fields = value
response_mapping = {
GetByIdRequest: GetByIdResponse,
GetByTaskIdRequest: GetByTaskIdResponse,
GetAllRequest: GetAllResponse,
GetFrameworksRequest: GetFrameworksResponse,
UpdateForTaskRequest: UpdateForTaskResponse,
CreateRequest: CreateResponse,
EditRequest: EditResponse,
UpdateRequest: UpdateResponse,
PublishManyRequest: PublishManyResponse,
SetReadyRequest: SetReadyResponse,
ArchiveManyRequest: ArchiveManyResponse,
UnarchiveManyRequest: UnarchiveManyResponse,
DeleteManyRequest: DeleteManyResponse,
DeleteRequest: DeleteResponse,
MakePublicRequest: MakePublicResponse,
MakePrivateRequest: MakePrivateResponse,
MoveRequest: MoveResponse,
AddOrUpdateMetadataRequest: AddOrUpdateMetadataResponse,
DeleteMetadataRequest: DeleteMetadataResponse,
}
| UpdateForTaskResponse |
python | boto__boto3 | tests/integration/test_dynamodb.py | {
"start": 781,
"end": 2088
} | class ____(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.session = boto3.session.Session(region_name='us-west-2')
cls.dynamodb = cls.session.resource('dynamodb')
cls.table_name = unique_id('boto3db')
cls.item_data = {
'MyHashKey': 'mykey',
'MyNull': None,
'MyBool': True,
'MyString': 'mystring',
'MyNumber': Decimal('1.25'),
'MyBinary': Binary(b'\x01'),
'MyStringSet': {'foo'},
'MyNumberSet': {Decimal('1.25')},
'MyBinarySet': {Binary(b'\x01')},
'MyList': ['foo'],
'MyMap': {'foo': 'bar'},
}
cls.table = cls.dynamodb.create_table(
TableName=cls.table_name,
ProvisionedThroughput={
"ReadCapacityUnits": 5,
"WriteCapacityUnits": 5,
},
KeySchema=[{"AttributeName": "MyHashKey", "KeyType": "HASH"}],
AttributeDefinitions=[
{"AttributeName": "MyHashKey", "AttributeType": "S"}
],
)
waiter = cls.dynamodb.meta.client.get_waiter('table_exists')
waiter.wait(TableName=cls.table_name)
@classmethod
def tearDownClass(cls):
cls.table.delete()
| BaseDynamoDBTest |
python | milvus-io__pymilvus | pymilvus/milvus_client/base.py | {
"start": 342,
"end": 4062
} | class ____:
"""Base class for Milvus clients (synchronous and asynchronous)."""
@classmethod
def create_schema(cls, **kwargs):
"""Create a collection schema.
Args:
**kwargs: Additional keyword arguments for schema creation.
Returns:
CollectionSchema: The created collection schema.
"""
kwargs["check_fields"] = False # do not check fields for now
return CollectionSchema([], **kwargs)
@classmethod
def create_struct_field_schema(cls) -> StructFieldSchema:
"""Create a struct field schema.
Returns:
StructFieldSchema: The created struct field schema.
"""
return StructFieldSchema()
@classmethod
def create_field_schema(
cls, name: str, data_type: DataType, desc: str = "", **kwargs
) -> FieldSchema:
"""Create a field schema. Wrapping orm.FieldSchema.
Args:
name (str): The name of the field.
data_type (DataType): The data type of the field.
desc (str): The description of the field.
**kwargs: Additional keyword arguments.
Returns:
FieldSchema: the FieldSchema created.
"""
return FieldSchema(name, data_type, desc, **kwargs)
@classmethod
def prepare_index_params(cls, field_name: str = "", **kwargs) -> IndexParams:
"""Prepare index parameters.
Args:
field_name (str): The name of the field to create index for.
**kwargs: Additional keyword arguments for index creation.
Returns:
IndexParams: The created index parameters.
"""
index_params = IndexParams()
if field_name:
validate_param("field_name", field_name, str)
index_params.add_index(field_name, **kwargs)
return index_params
def get_server_type(self) -> str:
"""Get the server type.
Returns:
str: The server type (e.g., "milvus", "zilliz").
"""
return self._get_connection().get_server_type()
def _get_connection(self):
"""Get the connection handler.
Returns:
The connection handler instance.
"""
return connections._fetch_handler(self._using)
def _extract_primary_field(self, schema_dict: Dict) -> dict:
"""Extract the primary field from a schema dictionary.
Args:
schema_dict (Dict): The schema dictionary.
Returns:
dict: The primary field dictionary, or empty dict if not found.
"""
fields = schema_dict.get("fields", [])
if not fields:
return {}
for field_dict in fields:
if field_dict.get("is_primary", None) is not None:
return field_dict
return {}
def _pack_pks_expr(self, schema_dict: Dict, pks: List) -> str:
"""Pack primary keys into an expression string.
Args:
schema_dict (Dict): The schema dictionary.
pks (List): List of primary key values.
Returns:
str: The expression string for filtering by primary keys.
"""
primary_field = self._extract_primary_field(schema_dict)
pk_field_name = primary_field["name"]
data_type = primary_field["type"]
# Varchar pks need double quotes around the values
if data_type == DataType.VARCHAR:
ids = ["'" + str(entry) + "'" for entry in pks]
expr = f"""{pk_field_name} in [{",".join(ids)}]"""
else:
ids = [str(entry) for entry in pks]
expr = f"{pk_field_name} in [{','.join(ids)}]"
return expr
| BaseMilvusClient |
python | gevent__gevent | src/gevent/tests/test__queue.py | {
"start": 15790,
"end": 15869
} | class ____(TestGetInterrupt):
kind = queue.Queue
| TestGetInterruptJoinableQueue |
python | keras-team__keras | keras/src/losses/losses_test.py | {
"start": 74324,
"end": 77755
} | class ____(testing.TestCase):
def setup(self):
self.y_true = np.array([1, 1, 2, 2, 3])
self.y_pred = np.array(
[
[0.70014004, -0.42008403, 0.14002801, 0.56011203],
[0.17609018, 0.70436073, -0.52827054, 0.44022545],
[-0.34050261, 0.25537696, -0.68100522, 0.59587957],
[0.32163376, -0.75047877, 0.53605627, -0.21442251],
[0.51261459, -0.34174306, 0.17087153, 0.76892189],
]
)
self.ref_labels = np.array([1, 1, 2, 2, 3, 4])
self.ref_embeddings = np.array(
[
[0.40824829, -0.54433105, 0.27216553, 0.68041382],
[0.76376261, 0.10910895, -0.54554473, 0.32732684],
[-0.74420841, 0.24806947, 0.49613894, -0.3721042],
[0.52981294, -0.13245324, 0.79471941, -0.26490647],
[0.54554473, -0.32732684, 0.10910895, 0.76376261],
[-0.27216553, 0.68041382, 0.40824829, -0.54433105],
]
)
def test_config(self):
self.run_class_serialization_test(
losses.Circle(name="mycircle", gamma=80.0, margin=0.4)
)
def test_correctness(self):
self.setup()
circle_loss = losses.Circle(gamma=80.0, margin=0.4)
loss = circle_loss(self.y_true, self.y_pred)
self.assertAlmostEqual(loss, 188.3883)
circle_loss = losses.Circle(gamma=256, margin=0.25)
loss = circle_loss(self.y_true, self.y_pred)
self.assertAlmostEqual(loss, 652.7617)
loss = losses.circle(
self.y_true,
self.y_pred,
ref_labels=self.ref_labels,
ref_embeddings=self.ref_embeddings,
gamma=80.0,
margin=0.4,
remove_diagonal=False,
)
self.assertAllClose(
loss, (61.5844, 94.3465, 276.9344, 90.9873, 48.8963)
)
def test_correctness_weighted(self):
self.setup()
sample_weight = np.array([2.0, 2.0, 1.0, 1.0, 0.5])
circle_loss = losses.Circle(gamma=80.0, margin=0.4)
loss = circle_loss(
self.y_true, self.y_pred, sample_weight=sample_weight
)
self.assertAlmostEqual(loss, 244.91918)
def test_no_reduction(self):
self.setup()
circle_loss = losses.Circle(gamma=80.0, margin=0.4, reduction=None)
loss = circle_loss(self.ref_labels, self.ref_embeddings)
self.assertAllClose(
loss, [82.9116, 36.7942, 92.4590, 52.6798, 0.0, 0.0]
)
def test_sum_reduction(self):
self.setup()
circle_loss = losses.Circle(gamma=80.0, margin=0.4, reduction="sum")
loss = circle_loss(self.ref_labels, self.ref_embeddings)
self.assertAlmostEqual(loss, 264.845)
def test_mean_with_sample_weight_reduction(self):
self.setup()
sample_weight = np.array([2.0, 2.0, 1.0, 1.0, 0.5])
circle_loss = losses.Circle(
gamma=80.0, margin=0.4, reduction="mean_with_sample_weight"
)
loss = circle_loss(
self.y_true, self.y_pred, sample_weight=sample_weight
)
self.assertAlmostEqual(loss, 163.27948)
def test_dtype_arg(self):
self.setup()
circle_loss = losses.Circle(dtype="bfloat16")
loss = circle_loss(self.y_true, self.y_pred)
self.assertDType(loss, "bfloat16")
| CircleTest |
python | getsentry__sentry | tests/sentry/tasks/test_post_process.py | {
"start": 57211,
"end": 64548
} | class ____(BasePostProgressGroupMixin):
github_blame_return_value = {
"commitId": "asdfwreqr",
"committedDate": (timezone.now() - timedelta(days=2)),
"commitMessage": "placeholder commit message",
"commitAuthorName": "",
"commitAuthorEmail": "admin@localhost",
}
def setUp(self) -> None:
self.created_event = self.create_event(
data={
"message": "Kaboom!",
"platform": "python",
"timestamp": before_now(seconds=10).isoformat(),
"stacktrace": {
"frames": [
{
"function": "handle_set_commits",
"abs_path": "/usr/src/sentry/src/sentry/tasks.py",
"module": "sentry.tasks",
"in_app": False,
"lineno": 30,
"filename": "sentry/tasks.py",
},
{
"function": "set_commits",
"abs_path": "/usr/src/sentry/src/sentry/models/release.py",
"module": "sentry.models.release",
"in_app": True,
"lineno": 39,
"filename": "sentry/models/release.py",
},
]
},
"fingerprint": ["put-me-in-the-control-group"],
},
project_id=self.project.id,
)
self.cache_key = write_event_to_cache(self.created_event)
self.repo = self.create_repo(
name="org/example", integration_id=self.integration.id, provider="integrations:github"
)
self.code_mapping = self.create_code_mapping(
repo=self.repo, project=self.project, stack_root="sentry/", source_root="sentry/"
)
self.commit_author = self.create_commit_author(project=self.project, user=self.user)
self.commit = self.create_commit(
project=self.project,
repo=self.repo,
author=self.commit_author,
key="asdfwreqr",
message="placeholder commit message",
)
self.github_blame_all_files_return_value = [
FileBlameInfo(
code_mapping=self.code_mapping,
lineno=39,
path="sentry/models/release.py",
ref="master",
repo=self.repo,
commit=CommitInfo(
commitId="asdfwreqr",
committedDate=(timezone.now() - timedelta(days=2)),
commitMessage="placeholder commit message",
commitAuthorName="",
commitAuthorEmail="admin@localhost",
),
)
]
@patch(
"sentry.integrations.github.integration.GitHubIntegration.get_commit_context_all_frames",
return_value=github_blame_return_value,
)
def test_logic_fallback_no_scm(self, mock_get_commit_context: MagicMock) -> None:
with assume_test_silo_mode(SiloMode.CONTROL):
with unguarded_write(using=router.db_for_write(Integration)):
Integration.objects.all().delete()
integration = self.create_provider_integration(provider="bitbucket")
integration.add_organization(self.organization)
with self.tasks():
self.call_post_process_group(
is_new=True,
is_regression=False,
is_new_group_environment=True,
event=self.created_event,
)
assert not mock_get_commit_context.called
@patch(
"sentry.integrations.github_enterprise.integration.GitHubEnterpriseIntegration.get_commit_context_all_frames",
)
def test_github_enterprise(self, mock_get_commit_context: MagicMock) -> None:
mock_get_commit_context.return_value = self.github_blame_all_files_return_value
with assume_test_silo_mode(SiloMode.CONTROL):
with unguarded_write(using=router.db_for_write(Integration)):
Integration.objects.all().delete()
integration = self.create_provider_integration(
external_id="35.232.149.196:12345",
provider="github_enterprise",
metadata={
"domain_name": "35.232.149.196/baxterthehacker",
"installation_id": "12345",
"installation": {"id": "2", "private_key": "private_key", "verify_ssl": True},
},
)
organization_integration = integration.add_organization(self.organization)
assert organization_integration is not None
self.repo.update(integration_id=integration.id, provider="integrations:github_enterprise")
self.code_mapping.update(organization_integration_id=organization_integration.id)
with self.tasks():
self.call_post_process_group(
is_new=True,
is_regression=False,
is_new_group_environment=True,
event=self.created_event,
)
assert GroupOwner.objects.get(
group=self.created_event.group,
project=self.created_event.project,
organization=self.created_event.project.organization,
type=GroupOwnerType.SUSPECT_COMMIT.value,
)
@patch("sentry.integrations.github.integration.GitHubIntegration.get_commit_context_all_frames")
def test_skip_when_not_is_new(self, mock_get_commit_context: MagicMock) -> None:
"""
Tests that we do not process commit context if the group isn't new.
"""
with self.tasks():
self.call_post_process_group(
is_new=False,
is_regression=False,
is_new_group_environment=True,
event=self.created_event,
)
assert not mock_get_commit_context.called
assert not GroupOwner.objects.filter(
group=self.created_event.group,
project=self.created_event.project,
organization=self.created_event.project.organization,
type=GroupOwnerType.SUSPECT_COMMIT.value,
).exists()
@patch(
"sentry.integrations.github.integration.GitHubIntegration.get_commit_context_all_frames",
)
def test_does_not_skip_when_is_new(self, mock_get_commit_context: MagicMock) -> None:
"""
Tests that the commit context should be processed when the group is new.
"""
mock_get_commit_context.return_value = self.github_blame_all_files_return_value
with self.tasks():
self.call_post_process_group(
is_new=True,
is_regression=False,
is_new_group_environment=True,
event=self.created_event,
)
assert mock_get_commit_context.called
assert GroupOwner.objects.get(
group=self.created_event.group,
project=self.created_event.project,
organization=self.created_event.project.organization,
type=GroupOwnerType.SUSPECT_COMMIT.value,
)
| ProcessCommitsTestMixin |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-facebook-marketing/unit_tests/test_base_streams.py | {
"start": 1140,
"end": 4223
} | class ____:
def test_date_time_value(self):
record = {
"bla": "2023-01-19t20:38:59 0000",
"created_time": "2023-01-19t20:38:59 0000",
"creation_time": "2023-01-19t20:38:59 0000",
"updated_time": "2023-01-19t20:38:59 0000",
"event_time": "2023-01-19t20:38:59 0000",
"first_fired_time": "2023-01-19t20:38:59 0000",
"last_fired_time": "2023-01-19t20:38:59 0000",
"sub_list": [
{
"bla": "2023-01-19t20:38:59 0000",
"created_time": "2023-01-19t20:38:59 0000",
"creation_time": "2023-01-19t20:38:59 0000",
"updated_time": "2023-01-19t20:38:59 0000",
"event_time": "2023-01-19t20:38:59 0000",
"first_fired_time": "2023-01-19t20:38:59 0000",
"last_fired_time": "2023-01-19t20:38:59 0000",
}
],
"sub_entries1": {
"sub_entries2": {
"bla": "2023-01-19t20:38:59 0000",
"created_time": "2023-01-19t20:38:59 0000",
"creation_time": "2023-01-19t20:38:59 0000",
"updated_time": "2023-01-19t20:38:59 0000",
"event_time": "2023-01-19t20:38:59 0000",
"first_fired_time": "2023-01-19t20:38:59 0000",
"last_fired_time": "2023-01-19t20:38:59 0000",
}
},
}
FBMarketingStream.fix_date_time(record)
assert {
"bla": "2023-01-19t20:38:59 0000",
"created_time": "2023-01-19T20:38:59+0000",
"creation_time": "2023-01-19T20:38:59+0000",
"updated_time": "2023-01-19T20:38:59+0000",
"event_time": "2023-01-19T20:38:59+0000",
"first_fired_time": "2023-01-19T20:38:59+0000",
"last_fired_time": "2023-01-19T20:38:59+0000",
"sub_list": [
{
"bla": "2023-01-19t20:38:59 0000",
"created_time": "2023-01-19T20:38:59+0000",
"creation_time": "2023-01-19T20:38:59+0000",
"updated_time": "2023-01-19T20:38:59+0000",
"event_time": "2023-01-19T20:38:59+0000",
"first_fired_time": "2023-01-19T20:38:59+0000",
"last_fired_time": "2023-01-19T20:38:59+0000",
}
],
"sub_entries1": {
"sub_entries2": {
"bla": "2023-01-19t20:38:59 0000",
"created_time": "2023-01-19T20:38:59+0000",
"creation_time": "2023-01-19T20:38:59+0000",
"updated_time": "2023-01-19T20:38:59+0000",
"event_time": "2023-01-19T20:38:59+0000",
"first_fired_time": "2023-01-19T20:38:59+0000",
"last_fired_time": "2023-01-19T20:38:59+0000",
}
},
} == record
| TestDateTimeValue |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 40734,
"end": 40894
} | class ____(sgqlc.types.Enum):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__choices__ = ("STARRED_AT",)
| StarOrderField |
python | sqlalchemy__sqlalchemy | examples/versioned_rows/versioned_rows.py | {
"start": 1553,
"end": 1980
} | class ____(Versioned, Base):
__tablename__ = "example"
id = Column(Integer, primary_key=True)
data = Column(String)
Base.metadata.create_all(engine)
session = Session()
e1 = Example(data="e1")
session.add(e1)
session.commit()
e1.data = "e2"
session.commit()
assert session.query(Example.id, Example.data).order_by(Example.id).all() == (
[(1, "e1"), (2, "e2")]
)
# example 2, versioning with a parent
| Example |
python | sqlalchemy__sqlalchemy | test/typing/plain_files/ext/association_proxy/association_proxy_three.py | {
"start": 490,
"end": 787
} | class ____:
id: Mapped[int] = mapped_column(primary_key=True)
@declared_attr
def users(self) -> Mapped[List["User"]]:
return relationship("User")
@declared_attr
def user_ids(self) -> AssociationProxy[List[int]]:
return association_proxy("users", "id")
| Milestone |
python | great-expectations__great_expectations | great_expectations/metrics/column/distinct_values_count.py | {
"start": 192,
"end": 362
} | class ____(ColumnMetric[ColumnDistinctValuesCountResult]):
"""Count of distinct values in a column"""
name = "column.distinct_values.count"
| ColumnDistinctValuesCount |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/oracle/cx_oracle.py | {
"start": 19727,
"end": 22470
} | class ____(sqltypes.NumericCommon, sqltypes.TypeEngine):
is_number = False
def bind_processor(self, dialect):
if self.scale == 0:
return None
elif self.asdecimal:
processor = processors.to_decimal_processor_factory(
decimal.Decimal, self._effective_decimal_return_scale
)
def process(value):
if isinstance(value, (int, float)):
return processor(value)
elif value is not None and value.is_infinite():
return float(value)
else:
return value
return process
else:
return processors.to_float
def result_processor(self, dialect, coltype):
return None
def _cx_oracle_outputtypehandler(self, dialect):
cx_Oracle = dialect.dbapi
def handler(cursor, name, default_type, size, precision, scale):
outconverter = None
if precision:
if self.asdecimal:
if default_type == cx_Oracle.NATIVE_FLOAT:
# receiving float and doing Decimal after the fact
# allows for float("inf") to be handled
type_ = default_type
outconverter = decimal.Decimal
else:
type_ = decimal.Decimal
else:
if self.is_number and scale == 0:
# integer. cx_Oracle is observed to handle the widest
# variety of ints when no directives are passed,
# from 5.2 to 7.0. See [ticket:4457]
return None
else:
type_ = cx_Oracle.NATIVE_FLOAT
else:
if self.asdecimal:
if default_type == cx_Oracle.NATIVE_FLOAT:
type_ = default_type
outconverter = decimal.Decimal
else:
type_ = decimal.Decimal
else:
if self.is_number and scale == 0:
# integer. cx_Oracle is observed to handle the widest
# variety of ints when no directives are passed,
# from 5.2 to 7.0. See [ticket:4457]
return None
else:
type_ = cx_Oracle.NATIVE_FLOAT
return cursor.var(
type_,
255,
arraysize=cursor.arraysize,
outconverter=outconverter,
)
return handler
| _OracleNumericCommon |
python | readthedocs__readthedocs.org | readthedocs/projects/forms.py | {
"start": 43066,
"end": 43367
} | class ____(forms.ModelForm):
"""Project promotion opt-out form."""
class Meta:
model = Project
fields = ["allow_promos"]
def __init__(self, *args, **kwargs):
self.project = kwargs.pop("project", None)
super().__init__(*args, **kwargs)
| ProjectAdvertisingForm |
python | astropy__astropy | astropy/visualization/wcsaxes/formatter_locator.py | {
"start": 2143,
"end": 4742
} | class ____:
"""
A joint formatter/locator.
"""
def __init__(
self,
values=None,
number=None,
spacing=None,
format=None,
unit=None,
format_unit=None,
):
if len([x for x in (values, number, spacing) if x is None]) < 2:
raise ValueError("At most one of values/number/spacing can be specified")
self._unit = unit
self._format_unit = format_unit or unit
if values is not None:
self.values = values
elif number is not None:
self.number = number
elif spacing is not None:
self.spacing = spacing
else:
self.number = 5
self.format = format
@property
def values(self):
return self._values
@values.setter
def values(self, values):
if not isinstance(values, u.Quantity) or (not values.ndim == 1):
raise TypeError("values should be an astropy.units.Quantity array")
if not values.unit.is_equivalent(self._unit):
raise UnitsError(
"value should be in units compatible with "
f"coordinate units ({self._unit}) but found {values.unit}"
)
self._number = None
self._spacing = None
self._values = values
@property
def number(self):
return self._number
@number.setter
def number(self, number):
self._number = number
self._spacing = None
self._values = None
@property
def spacing(self):
return self._spacing
@spacing.setter
def spacing(self, spacing):
self._number = None
self._spacing = spacing
self._values = None
def minor_locator(self, spacing, frequency, value_min, value_max):
if self.values is not None:
return [] * self._unit
minor_spacing = spacing.value / frequency
values = self._locate_values(value_min, value_max, minor_spacing)
index = np.where((values % frequency) == 0)
index = index[0][0]
values = np.delete(values, np.s_[index::frequency])
return values * minor_spacing * self._unit
@property
def format_unit(self):
return self._format_unit
@format_unit.setter
def format_unit(self, unit):
self._format_unit = u.Unit(unit)
@staticmethod
def _locate_values(value_min, value_max, spacing):
imin = np.ceil(value_min / spacing)
imax = np.floor(value_max / spacing)
return np.arange(imin, imax + 1, dtype=int)
| BaseFormatterLocator |
python | kamyu104__LeetCode-Solutions | Python/increasing-triplet-subsequence.py | {
"start": 532,
"end": 1006
} | class ____(object):
def increasingTriplet(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
def increasingKUplet(nums, k):
inc = [float('inf')] * (k - 1)
for num in nums:
i = bisect.bisect_left(inc, num)
if i >= k - 1:
return True
inc[i] = num
return k == 0
return increasingKUplet(nums, 3)
| Solution_Generalization |
python | apache__airflow | providers/google/tests/unit/google/cloud/hooks/test_cloud_storage_transfer_service.py | {
"start": 5242,
"end": 22133
} | class ____:
def setup_method(self):
with mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.__init__",
new=mock_base_gcp_hook_no_default_project_id,
):
self.gct_hook = CloudDataTransferServiceHook(gcp_conn_id="test")
@mock.patch(
"airflow.providers.google.cloud.hooks.cloud_storage_transfer_service"
".CloudDataTransferServiceHook._authorize"
)
@mock.patch("airflow.providers.google.cloud.hooks.cloud_storage_transfer_service.build")
def test_gct_client_creation(self, mock_build, mock_authorize):
result = self.gct_hook.get_conn()
mock_build.assert_called_once_with(
"storagetransfer", "v1", http=mock_authorize.return_value, cache_discovery=False
)
assert mock_build.return_value == result
assert self.gct_hook._conn == result
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id",
new_callable=PropertyMock,
return_value=None,
)
@mock.patch(
"airflow.providers.google.cloud.hooks.cloud_storage_transfer_service"
".CloudDataTransferServiceHook.get_conn"
)
def test_create_transfer_job(self, get_conn, mock_project_id):
create_method = get_conn.return_value.transferJobs.return_value.create
execute_method = create_method.return_value.execute
execute_method.return_value = TEST_TRANSFER_JOB
res = self.gct_hook.create_transfer_job(body=TEST_BODY)
assert res == TEST_TRANSFER_JOB
create_method.assert_called_once_with(body=TEST_BODY)
execute_method.assert_called_once_with(num_retries=5)
@mock.patch(
"airflow.providers.google.cloud.hooks.cloud_storage_transfer_service"
".CloudDataTransferServiceHook.get_conn"
)
def test_get_transfer_job(self, get_conn):
get_method = get_conn.return_value.transferJobs.return_value.get
execute_method = get_method.return_value.execute
execute_method.return_value = TEST_TRANSFER_JOB
res = self.gct_hook.get_transfer_job(job_name=TEST_TRANSFER_JOB_NAME, project_id=TEST_PROJECT_ID)
assert res is not None
assert res[NAME] == TEST_TRANSFER_JOB_NAME
get_method.assert_called_once_with(jobName=TEST_TRANSFER_JOB_NAME, projectId=TEST_PROJECT_ID)
execute_method.assert_called_once_with(num_retries=5)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id",
new_callable=PropertyMock,
return_value=None,
)
@mock.patch(
"airflow.providers.google.cloud.hooks.cloud_storage_transfer_service"
".CloudDataTransferServiceHook.get_conn"
)
def test_list_transfer_job(self, get_conn, mock_project_id):
list_method = get_conn.return_value.transferJobs.return_value.list
list_execute_method = list_method.return_value.execute
list_execute_method.return_value = {TRANSFER_JOBS: [TEST_TRANSFER_JOB]}
list_next = get_conn.return_value.transferJobs.return_value.list_next
list_next.return_value = None
res = self.gct_hook.list_transfer_job(request_filter=TEST_TRANSFER_JOB_FILTER)
assert res is not None
assert res == [TEST_TRANSFER_JOB]
list_method.assert_called_once_with(filter=mock.ANY)
args, kwargs = list_method.call_args_list[0]
assert json.loads(kwargs["filter"]) == {
FILTER_PROJECT_ID: TEST_PROJECT_ID,
FILTER_JOB_NAMES: [TEST_TRANSFER_JOB_NAME],
}
list_execute_method.assert_called_once_with(num_retries=5)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id",
new_callable=PropertyMock,
return_value=None,
)
@mock.patch(
"airflow.providers.google.cloud.hooks.cloud_storage_transfer_service"
".CloudDataTransferServiceHook.get_conn"
)
def test_update_transfer_job(self, get_conn, mock_project_id):
update_method = get_conn.return_value.transferJobs.return_value.patch
execute_method = update_method.return_value.execute
execute_method.return_value = TEST_TRANSFER_JOB
res = self.gct_hook.update_transfer_job(
job_name=TEST_TRANSFER_JOB_NAME, body=TEST_UPDATE_TRANSFER_JOB_BODY
)
assert res is not None
update_method.assert_called_once_with(
jobName=TEST_TRANSFER_JOB_NAME, body=TEST_UPDATE_TRANSFER_JOB_BODY
)
execute_method.assert_called_once_with(num_retries=5)
@mock.patch(
"airflow.providers.google.cloud.hooks.cloud_storage_transfer_service."
"CloudDataTransferServiceHook.get_conn"
)
def test_delete_transfer_job(self, get_conn):
update_method = get_conn.return_value.transferJobs.return_value.patch
execute_method = update_method.return_value.execute
self.gct_hook.delete_transfer_job(job_name=TEST_TRANSFER_JOB_NAME, project_id=TEST_PROJECT_ID)
update_method.assert_called_once_with(
jobName=TEST_TRANSFER_JOB_NAME,
body={
PROJECT_ID: TEST_PROJECT_ID,
TRANSFER_JOB: {STATUS: GcpTransferJobsStatus.DELETED},
TRANSFER_JOB_FIELD_MASK: STATUS,
},
)
execute_method.assert_called_once_with(num_retries=5)
@mock.patch(
"airflow.providers.google.cloud.hooks.cloud_storage_transfer_service."
"CloudDataTransferServiceHook.get_conn"
)
def test_run_transfer_job(self, get_conn):
run_method = get_conn.return_value.transferJobs.return_value.run
execute_method = run_method.return_value.execute
execute_method.return_value = TEST_TRANSFER_OPERATION
res = self.gct_hook.run_transfer_job(job_name=TEST_TRANSFER_JOB_NAME, project_id=TEST_PROJECT_ID)
assert res == TEST_TRANSFER_OPERATION
run_method.assert_called_once_with(
jobName=TEST_TRANSFER_JOB_NAME,
body={
PROJECT_ID: TEST_PROJECT_ID,
},
)
execute_method.assert_called_once_with(num_retries=5)
@mock.patch(
"airflow.providers.google.cloud.hooks.cloud_storage_transfer_service"
".CloudDataTransferServiceHook.get_conn"
)
def test_cancel_transfer_operation(self, get_conn):
cancel_method = get_conn.return_value.transferOperations.return_value.cancel
execute_method = cancel_method.return_value.execute
self.gct_hook.cancel_transfer_operation(operation_name=TEST_TRANSFER_OPERATION_NAME)
cancel_method.assert_called_once_with(name=TEST_TRANSFER_OPERATION_NAME)
execute_method.assert_called_once_with(num_retries=5)
@mock.patch(
"airflow.providers.google.cloud.hooks.cloud_storage_transfer_service"
".CloudDataTransferServiceHook.get_conn"
)
def test_get_transfer_operation(self, get_conn):
get_method = get_conn.return_value.transferOperations.return_value.get
execute_method = get_method.return_value.execute
execute_method.return_value = TEST_TRANSFER_OPERATION
res = self.gct_hook.get_transfer_operation(operation_name=TEST_TRANSFER_OPERATION_NAME)
assert res == TEST_TRANSFER_OPERATION
get_method.assert_called_once_with(name=TEST_TRANSFER_OPERATION_NAME)
execute_method.assert_called_once_with(num_retries=5)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id",
new_callable=PropertyMock,
return_value=None,
)
@mock.patch(
"airflow.providers.google.cloud.hooks.cloud_storage_transfer_service"
".CloudDataTransferServiceHook.get_conn"
)
def test_list_transfer_operation(self, get_conn, mock_project_id):
list_method = get_conn.return_value.transferOperations.return_value.list
list_execute_method = list_method.return_value.execute
list_execute_method.return_value = {OPERATIONS: [TEST_TRANSFER_OPERATION]}
list_next = get_conn.return_value.transferOperations.return_value.list_next
list_next.return_value = None
res = self.gct_hook.list_transfer_operations(request_filter=TEST_TRANSFER_OPERATION_FILTER)
assert res is not None
assert res == [TEST_TRANSFER_OPERATION]
list_method.assert_called_once_with(filter=mock.ANY, name="transferOperations")
args, kwargs = list_method.call_args_list[0]
assert json.loads(kwargs["filter"]) == {
FILTER_PROJECT_ID: TEST_PROJECT_ID,
FILTER_JOB_NAMES: [TEST_TRANSFER_JOB_NAME],
}
list_execute_method.assert_called_once_with(num_retries=5)
@mock.patch(
"airflow.providers.google.cloud.hooks.cloud_storage_transfer_service"
".CloudDataTransferServiceHook.get_conn"
)
def test_pause_transfer_operation(self, get_conn):
pause_method = get_conn.return_value.transferOperations.return_value.pause
execute_method = pause_method.return_value.execute
self.gct_hook.pause_transfer_operation(operation_name=TEST_TRANSFER_OPERATION_NAME)
pause_method.assert_called_once_with(name=TEST_TRANSFER_OPERATION_NAME)
execute_method.assert_called_once_with(num_retries=5)
@mock.patch(
"airflow.providers.google.cloud.hooks.cloud_storage_transfer_service"
".CloudDataTransferServiceHook.get_conn"
)
def test_resume_transfer_operation(self, get_conn):
resume_method = get_conn.return_value.transferOperations.return_value.resume
execute_method = resume_method.return_value.execute
self.gct_hook.resume_transfer_operation(operation_name=TEST_TRANSFER_OPERATION_NAME)
resume_method.assert_called_once_with(name=TEST_TRANSFER_OPERATION_NAME)
execute_method.assert_called_once_with(num_retries=5)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id",
new_callable=PropertyMock,
return_value=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.cloud_storage_transfer_service.time.sleep")
@mock.patch(
"airflow.providers.google.cloud.hooks.cloud_storage_transfer_service."
"CloudDataTransferServiceHook.list_transfer_operations"
)
def test_wait_for_transfer_job(self, mock_list, mock_sleep, mock_project_id):
mock_list.side_effect = [
[
{
NAME: TEST_NAME,
METADATA: {
STATUS: GcpTransferOperationStatus.IN_PROGRESS,
COUNTERS: TEST_COUNTERS,
},
},
],
[
{
NAME: TEST_NAME,
METADATA: {
STATUS: GcpTransferOperationStatus.SUCCESS,
COUNTERS: TEST_COUNTERS,
},
},
],
]
job_name = "transferJobs/test-job"
self.gct_hook.wait_for_transfer_job({PROJECT_ID: TEST_PROJECT_ID, "name": job_name})
calls = [
mock.call(request_filter={FILTER_PROJECT_ID: TEST_PROJECT_ID, FILTER_JOB_NAMES: [job_name]}),
mock.call(request_filter={FILTER_PROJECT_ID: TEST_PROJECT_ID, FILTER_JOB_NAMES: [job_name]}),
]
mock_list.assert_has_calls(calls, any_order=True)
mock_sleep.assert_called_once_with(TIME_TO_SLEEP_IN_SECONDS)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id",
new_callable=PropertyMock,
return_value=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.cloud_storage_transfer_service.time.sleep")
@mock.patch(
"airflow.providers.google.cloud.hooks.cloud_storage_transfer_service"
".CloudDataTransferServiceHook.get_conn"
)
def test_wait_for_transfer_job_failed(self, mock_get_conn, mock_sleep, mock_project_id):
list_method = mock_get_conn.return_value.transferOperations.return_value.list
list_execute_method = list_method.return_value.execute
list_execute_method.return_value = {
OPERATIONS: [
{
NAME: TEST_TRANSFER_OPERATION_NAME,
METADATA: {
STATUS: GcpTransferOperationStatus.FAILED,
COUNTERS: TEST_COUNTERS,
},
}
]
}
mock_get_conn.return_value.transferOperations.return_value.list_next.return_value = None
with pytest.raises(AirflowException):
self.gct_hook.wait_for_transfer_job({PROJECT_ID: TEST_PROJECT_ID, NAME: "transferJobs/test-job"})
assert list_method.called
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id",
new_callable=PropertyMock,
return_value=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.cloud_storage_transfer_service.time.sleep")
@mock.patch(
"airflow.providers.google.cloud.hooks.cloud_storage_transfer_service"
".CloudDataTransferServiceHook.get_conn"
)
def test_wait_for_transfer_job_expect_failed(self, get_conn, mock_sleep, mock_project_id):
list_method = get_conn.return_value.transferOperations.return_value.list
list_execute_method = list_method.return_value.execute
list_execute_method.return_value = {
OPERATIONS: [
{
NAME: TEST_TRANSFER_OPERATION_NAME,
METADATA: {
STATUS: GcpTransferOperationStatus.FAILED,
COUNTERS: TEST_COUNTERS,
},
}
]
}
get_conn.return_value.transferOperations.return_value.list_next.return_value = None
with pytest.raises(
AirflowException, match="An unexpected operation status was encountered. Expected: SUCCESS"
):
self.gct_hook.wait_for_transfer_job(
job={PROJECT_ID: "test-project", NAME: "transferJobs/test-job"},
expected_statuses=GcpTransferOperationStatus.SUCCESS,
)
@pytest.mark.parametrize(
("statuses", "expected_statuses"),
[
([GcpTransferOperationStatus.ABORTED], (GcpTransferOperationStatus.IN_PROGRESS,)),
(
[GcpTransferOperationStatus.SUCCESS, GcpTransferOperationStatus.ABORTED],
(GcpTransferOperationStatus.IN_PROGRESS,),
),
(
[GcpTransferOperationStatus.PAUSED, GcpTransferOperationStatus.ABORTED],
(GcpTransferOperationStatus.IN_PROGRESS,),
),
],
)
def test_operations_contain_expected_statuses_red_path(self, statuses, expected_statuses):
operations = [{NAME: TEST_TRANSFER_OPERATION_NAME, METADATA: {STATUS: status}} for status in statuses]
with pytest.raises(
AirflowException,
match=f"An unexpected operation status was encountered. Expected: {', '.join(expected_statuses)}",
):
CloudDataTransferServiceHook.operations_contain_expected_statuses(
operations, GcpTransferOperationStatus.IN_PROGRESS
)
@pytest.mark.parametrize(
("statuses", "expected_statuses"),
[
([GcpTransferOperationStatus.ABORTED], GcpTransferOperationStatus.ABORTED),
(
[GcpTransferOperationStatus.SUCCESS, GcpTransferOperationStatus.ABORTED],
GcpTransferOperationStatus.ABORTED,
),
(
[GcpTransferOperationStatus.PAUSED, GcpTransferOperationStatus.ABORTED],
GcpTransferOperationStatus.ABORTED,
),
([GcpTransferOperationStatus.ABORTED], (GcpTransferOperationStatus.ABORTED,)),
(
[GcpTransferOperationStatus.SUCCESS, GcpTransferOperationStatus.ABORTED],
(GcpTransferOperationStatus.ABORTED,),
),
(
[GcpTransferOperationStatus.PAUSED, GcpTransferOperationStatus.ABORTED],
(GcpTransferOperationStatus.ABORTED,),
),
],
)
def test_operations_contain_expected_statuses_green_path(self, statuses, expected_statuses):
operations = [
{NAME: TEST_TRANSFER_OPERATION_NAME, METADATA: {STATUS: status, COUNTERS: TEST_COUNTERS}}
for status in statuses
]
result = CloudDataTransferServiceHook.operations_contain_expected_statuses(
operations, expected_statuses
)
assert result
| TestGCPTransferServiceHookWithPassedProjectId |
python | EpistasisLab__tpot | tpot/builtin_modules/column_one_hot_encoder.py | {
"start": 2374,
"end": 7791
} | class ____(TransformerMixin, BaseEstimator ):
def __init__(self, columns='auto', drop=None, handle_unknown='infrequent_if_exist', sparse_output=False, min_frequency=None,max_categories=None):
'''
A wrapper for OneHotEncoder that allows for onehot encoding of specific columns in a DataFrame or np array.
Parameters
----------
columns : str, list, default='auto'
Determines which columns to onehot encode with sklearn.preprocessing.OneHotEncoder.
- 'auto' : Automatically select categorical features based on columns with less than 10 unique values
- 'categorical' : Automatically select categorical features
- 'numeric' : Automatically select numeric features
- 'all' : Select all features
- list : A list of columns to select
drop, handle_unknown, sparse_output, min_frequency, max_categories : see sklearn.preprocessing.OneHotEncoder
'''
self.columns = columns
self.drop = drop
self.handle_unknown = handle_unknown
self.sparse_output = sparse_output
self.min_frequency = min_frequency
self.max_categories = max_categories
def fit(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Dense array or sparse matrix.
y: array-like {n_samples,} (Optional, ignored)
Feature labels
"""
if (self.columns == "categorical" or self.columns == "numeric") and not isinstance(X, pd.DataFrame):
raise ValueError(f"Invalid value for columns: {self.columns}. "
"Only 'all' or <list> is supported for np arrays")
if self.columns == "categorical":
self.columns_ = list(X.select_dtypes(exclude='number').columns)
elif self.columns == "numeric":
self.columns_ = [col for col in X.columns if is_numeric_dtype(X[col])]
elif self.columns == "auto":
self.columns_ = auto_select_categorical_features(X)
elif self.columns == "all":
if isinstance(X, pd.DataFrame):
self.columns_ = X.columns
else:
self.columns_ = list(range(X.shape[1]))
elif isinstance(self.columns, list):
self.columns_ = self.columns
else:
raise ValueError(f"Invalid value for columns: {self.columns}")
if len(self.columns_) == 0:
return self
self.enc = sklearn.preprocessing.OneHotEncoder( categories='auto',
drop = self.drop,
handle_unknown = self.handle_unknown,
sparse_output = self.sparse_output,
min_frequency = self.min_frequency,
max_categories = self.max_categories)
#TODO make this more consistent with sklearn baseimputer/baseencoder
if isinstance(X, pd.DataFrame):
self.enc.set_output(transform="pandas")
for col in X.columns:
# check if the column name is not a string
if not isinstance(col, str):
# if it's not a string, rename the column with "X" prefix
X.rename(columns={col: f"X{col}"}, inplace=True)
if len(self.columns_) == X.shape[1]:
X_sel = self.enc.fit(X)
else:
X_sel, X_not_sel = _X_selected(X, self.columns_)
X_sel = self.enc.fit(X_sel)
return self
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Dense array or sparse matrix.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
if len(self.columns_) == 0:
return X
#TODO make this more consistent with sklearn baseimputer/baseencoder
if isinstance(X, pd.DataFrame):
for col in X.columns:
# check if the column name is not a string
if not isinstance(col, str):
# if it's not a string, rename the column with "X" prefix
X.rename(columns={col: f"X{col}"}, inplace=True)
if len(self.columns_) == X.shape[1]:
return self.enc.transform(X)
else:
X_sel, X_not_sel= _X_selected(X, self.columns_)
X_sel = self.enc.transform(X_sel)
#If X is dataframe
if isinstance(X, pd.DataFrame):
X_sel = pd.DataFrame(X_sel, columns=self.enc.get_feature_names_out())
return pd.concat([X_not_sel.reset_index(drop=True), X_sel.reset_index(drop=True)], axis=1)
else:
return np.hstack((X_not_sel, X_sel))
| ColumnOneHotEncoder |
python | sphinx-doc__sphinx | sphinx/addnodes.py | {
"start": 6765,
"end": 6888
} | class ____(nodes.Part, nodes.Inline, nodes.FixedTextElement):
"""Node for return types or object type names."""
| desc_type |
python | streamlit__streamlit | lib/streamlit/runtime/memory_session_storage.py | {
"start": 862,
"end": 3001
} | class ____(SessionStorage):
"""A SessionStorage that stores sessions in memory.
At most maxsize sessions are stored with a TTL of ttl seconds. This class is really
just a thin wrapper around cachetools.TTLCache that complies with the SessionStorage
protocol.
"""
# NOTE: The defaults for maxsize and ttl are chosen arbitrarily for now. These
# numbers are reasonable as the main problems we're trying to solve at the moment are
# caused by transient disconnects that are usually just short network blips. In the
# future, we may want to increase both to support use cases such as saving state for
# much longer periods of time. For example, we may want session state to persist if
# a user closes their laptop lid and comes back to an app hours later.
def __init__(
self,
maxsize: int = 128,
ttl_seconds: int = 2 * 60, # 2 minutes
) -> None:
"""Instantiate a new MemorySessionStorage.
Parameters
----------
maxsize
The maximum number of sessions we allow to be stored in this
MemorySessionStorage. If an entry needs to be removed because we have
exceeded this number, either
- an expired entry is removed, or
- the least recently used entry is removed (if no entries have expired).
ttl_seconds
The time in seconds for an entry added to a MemorySessionStorage to live.
After this amount of time has passed for a given entry, it becomes
inaccessible and will be removed eventually.
"""
self._cache: MutableMapping[str, SessionInfo] = TTLCache(
maxsize=maxsize, ttl=ttl_seconds
)
def get(self, session_id: str) -> SessionInfo | None:
return self._cache.get(session_id, None)
def save(self, session_info: SessionInfo) -> None:
self._cache[session_info.session.id] = session_info
def delete(self, session_id: str) -> None:
del self._cache[session_id]
def list(self) -> list[SessionInfo]:
return list(self._cache.values())
| MemorySessionStorage |
python | ray-project__ray | python/ray/dashboard/modules/job/tests/test_job_manager.py | {
"start": 23252,
"end": 30444
} | class ____:
async def test_pass_env_var(self, job_manager):
"""Test we can pass env vars in the subprocess that executes job's
driver script.
"""
job_id = await job_manager.submit_job(
entrypoint="echo $TEST_SUBPROCESS_JOB_CONFIG_ENV_VAR",
runtime_env={"env_vars": {"TEST_SUBPROCESS_JOB_CONFIG_ENV_VAR": "233"}},
)
await async_wait_for_condition(
check_job_succeeded, job_manager=job_manager, job_id=job_id
)
assert "233\n" in job_manager.get_job_logs(job_id)
async def test_niceness(self, job_manager):
job_id = await job_manager.submit_job(
entrypoint=f"python {_driver_script_path('check_niceness.py')}",
)
await async_wait_for_condition(
check_job_succeeded, job_manager=job_manager, job_id=job_id
)
logs = job_manager.get_job_logs(job_id)
assert "driver 0" in logs
assert "worker 15" in logs
async def test_multiple_runtime_envs(self, job_manager):
# Test that you can run two jobs in different envs without conflict.
job_id_1 = await job_manager.submit_job(
entrypoint=f"python {_driver_script_path('print_runtime_env.py')}",
runtime_env={
"env_vars": {"TEST_SUBPROCESS_JOB_CONFIG_ENV_VAR": "JOB_1_VAR"}
},
)
await async_wait_for_condition(
check_job_succeeded, job_manager=job_manager, job_id=job_id_1
)
logs = job_manager.get_job_logs(job_id_1)
assert "'TEST_SUBPROCESS_JOB_CONFIG_ENV_VAR': 'JOB_1_VAR'" in logs
job_id_2 = await job_manager.submit_job(
entrypoint=f"python {_driver_script_path('print_runtime_env.py')}",
runtime_env={
"env_vars": {"TEST_SUBPROCESS_JOB_CONFIG_ENV_VAR": "JOB_2_VAR"}
},
)
await async_wait_for_condition(
check_job_succeeded, job_manager=job_manager, job_id=job_id_2
)
logs = job_manager.get_job_logs(job_id_2)
assert "'TEST_SUBPROCESS_JOB_CONFIG_ENV_VAR': 'JOB_2_VAR'" in logs
async def test_failed_runtime_env_validation(self, job_manager):
"""Ensure job status is correctly set as failed if job has an invalid
runtime_env.
"""
run_cmd = f"python {_driver_script_path('override_env_var.py')}"
job_id = await job_manager.submit_job(
entrypoint=run_cmd, runtime_env={"working_dir": "path_not_exist"}
)
data = await job_manager.get_job_info(job_id)
assert data.status == JobStatus.FAILED
assert "path_not_exist is not a valid path" in data.message
assert data.driver_exit_code is None
async def test_failed_runtime_env_setup(self, job_manager):
"""Ensure job status is correctly set as failed if job has a valid
runtime_env that fails to be set up.
"""
run_cmd = f"python {_driver_script_path('override_env_var.py')}"
job_id = await job_manager.submit_job(
entrypoint=run_cmd, runtime_env={"working_dir": "s3://does_not_exist.zip"}
)
await async_wait_for_condition(
check_job_failed,
job_manager=job_manager,
job_id=job_id,
expected_error_type=JobErrorType.RUNTIME_ENV_SETUP_FAILURE,
)
data = await job_manager.get_job_info(job_id)
assert "runtime_env setup failed" in data.message
assert data.driver_exit_code is None
log_path = JobLogStorageClient().get_log_file_path(job_id=job_id)
with open(log_path, "r") as f:
job_logs = f.read()
assert "Traceback (most recent call last):" in job_logs
async def test_pass_metadata(self, job_manager):
def dict_to_str(d):
return str(dict(sorted(d.items())))
print_metadata_cmd = (
'python -c"'
"import ray;"
"ray.init();"
"job_config=ray._private.worker.global_worker.core_worker.get_job_config();"
"print(dict(sorted(job_config.metadata.items())))"
'"'
)
# Check that we default to only the job ID and job name.
job_id = await job_manager.submit_job(entrypoint=print_metadata_cmd)
await async_wait_for_condition(
check_job_succeeded, job_manager=job_manager, job_id=job_id
)
assert dict_to_str(
{JOB_NAME_METADATA_KEY: job_id, JOB_ID_METADATA_KEY: job_id}
) in job_manager.get_job_logs(job_id)
# Check that we can pass custom metadata.
job_id = await job_manager.submit_job(
entrypoint=print_metadata_cmd, metadata={"key1": "val1", "key2": "val2"}
)
await async_wait_for_condition(
check_job_succeeded, job_manager=job_manager, job_id=job_id
)
assert dict_to_str(
{
JOB_NAME_METADATA_KEY: job_id,
JOB_ID_METADATA_KEY: job_id,
"key1": "val1",
"key2": "val2",
}
) in job_manager.get_job_logs(job_id)
# Check that we can override job name.
job_id = await job_manager.submit_job(
entrypoint=print_metadata_cmd,
metadata={JOB_NAME_METADATA_KEY: "custom_name"},
)
await async_wait_for_condition(
check_job_succeeded, job_manager=job_manager, job_id=job_id
)
assert dict_to_str(
{JOB_NAME_METADATA_KEY: "custom_name", JOB_ID_METADATA_KEY: job_id}
) in job_manager.get_job_logs(job_id)
@pytest.mark.parametrize(
"env_vars",
[None, {}, {"hello": "world"}],
)
@pytest.mark.parametrize(
"resource_kwarg",
[
{},
{"entrypoint_num_cpus": 1},
{"entrypoint_num_gpus": 1},
{"entrypoint_memory": 4},
{"entrypoint_resources": {"Custom": 1}},
],
)
async def test_cuda_visible_devices(self, job_manager, resource_kwarg, env_vars):
"""Check CUDA_VISIBLE_DEVICES behavior introduced in #24546.
Should not be set in the driver, but should be set in tasks.
We test a variety of `env_vars` parameters due to custom parsing logic
that caused https://github.com/ray-project/ray/issues/25086.
If the user specifies a resource, we should not use the CUDA_VISIBLE_DEVICES
logic. Instead, the behavior should match that of the user specifying
resources for any other actor. So CUDA_VISIBLE_DEVICES should be set in the
driver and tasks.
"""
run_cmd = f"python {_driver_script_path('check_cuda_devices.py')}"
runtime_env = {"env_vars": env_vars}
if resource_kwarg:
run_cmd = "RAY_TEST_RESOURCES_SPECIFIED=1 " + run_cmd
job_id = await job_manager.submit_job(
entrypoint=run_cmd,
runtime_env=runtime_env,
**resource_kwarg,
)
await async_wait_for_condition(
check_job_succeeded, job_manager=job_manager, job_id=job_id
)
@pytest.mark.asyncio
| TestRuntimeEnv |
python | scipy__scipy | scipy/stats/tests/test_discrete_distns.py | {
"start": 22766,
"end": 24720
} | class ____:
@pytest.mark.parametrize('x, n, a, b, ref',
[[5, 5e6, 5, 20, 1.1520944824139114e-107],
[100, 50, 5, 20, 0.002855762954310226],
[10000, 1000, 5, 20, 1.9648515726019154e-05]])
def test_betanbinom_pmf(self, x, n, a, b, ref):
# test that PMF stays accurate in the distribution tails
# reference values computed with mpmath
# from mpmath import mp
# mp.dps = 500
# def betanbinom_pmf(k, n, a, b):
# k = mp.mpf(k)
# a = mp.mpf(a)
# b = mp.mpf(b)
# n = mp.mpf(n)
# return float(mp.binomial(n + k - mp.one, k)
# * mp.beta(a + n, b + k) / mp.beta(a, b))
assert_allclose(betanbinom.pmf(x, n, a, b), ref, rtol=1e-10)
@pytest.mark.parametrize('n, a, b, ref',
[[10000, 5000, 50, 0.12841520515722202],
[10, 9, 9, 7.9224400871459695],
[100, 1000, 10, 1.5849602176622748]])
def test_betanbinom_kurtosis(self, n, a, b, ref):
# reference values were computed via mpmath
# from mpmath import mp
# def kurtosis_betanegbinom(n, a, b):
# n = mp.mpf(n)
# a = mp.mpf(a)
# b = mp.mpf(b)
# four = mp.mpf(4.)
# mean = n * b / (a - mp.one)
# var = (n * b * (n + a - 1.) * (a + b - 1.)
# / ((a - 2.) * (a - 1.)**2.))
# def f(k):
# return (mp.binomial(n + k - mp.one, k)
# * mp.beta(a + n, b + k) / mp.beta(a, b)
# * (k - mean)**four)
# fourth_moment = mp.nsum(f, [0, mp.inf])
# return float(fourth_moment/var**2 - 3.)
assert_allclose(betanbinom.stats(n, a, b, moments="k"),
ref, rtol=3e-15)
| TestBetaNBinom |
python | getsentry__sentry | tests/sentry/utils/test_http.py | {
"start": 247,
"end": 1022
} | class ____(unittest.TestCase):
def test_without_path(self) -> None:
assert absolute_uri() == options.get("system.url-prefix")
def test_override_url_prefix(self) -> None:
assert absolute_uri("/foo/bar", url_prefix="http://foobar/") == "http://foobar/foo/bar"
def test_with_path(self) -> None:
assert absolute_uri("/foo/bar") == "{}/foo/bar".format(options.get("system.url-prefix"))
def test_hostname_present(self) -> None:
assert (
absolute_uri("https://orgslug.sentry.io/foo/bar") == "https://orgslug.sentry.io/foo/bar"
)
assert (
absolute_uri("https://orgslug.sentry.io/foo/bar", url_prefix="http://foobar/")
== "https://orgslug.sentry.io/foo/bar"
)
| AbsoluteUriTest |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1146386,
"end": 1146664
} | class ____(VegaLiteSchema):
"""ScaleInvalidDataShowAsstrokeWidth schema wrapper."""
_schema = {"$ref": '#/definitions/ScaleInvalidDataShowAs<"strokeWidth">'}
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
| ScaleInvalidDataShowAsstrokeWidth |
python | django__django | tests/staticfiles_tests/test_management.py | {
"start": 16815,
"end": 17262
} | class ____(TestNoFilesCreated, CollectionTestCase):
"""
Test ``--dry-run`` option for ``collectstatic`` management command.
"""
def run_collectstatic(self):
super().run_collectstatic(dry_run=True)
@override_settings(
STORAGES={
**settings.STORAGES,
STATICFILES_STORAGE_ALIAS: {
"BACKEND": "django.contrib.staticfiles.storage.ManifestStaticFilesStorage"
},
}
)
| TestCollectionDryRun |
python | ZoranPandovski__al-go-rithms | dp/Lowest_Common_Ancestor/Python/lcs.py | {
"start": 39,
"end": 1864
} | class ____:
# Constructor to create a new binary node
def __init__(self, key):
self.key = key
self.left = None
self.right = None
# Finds the path from root node to given root of the tree.
# Stores the path in a list path[], returns true if path
# exists otherwise false
def findPath( root, path, k):
# Baes Case
if root is None:
return False
# Store this node is path vector. The node will be
# removed if not in path from root to k
path.append(root.key)
# See if the k is same as root's key
if root.key == k :
return True
# Check if k is found in left or right sub-tree
if ((root.left != None and findPath(root.left, path, k)) or
(root.right!= None and findPath(root.right, path, k))):
return True
# If not present in subtree rooted with root, remove
# root from path and return False
path.pop()
return False
# Returns LCA if node n1 , n2 are present in the given
# binary tre otherwise return -1
def findLCA(root, n1, n2):
# To store paths to n1 and n2 fromthe root
path1 = []
path2 = []
# Find paths from root to n1 and root to n2.
# If either n1 or n2 is not present , return -1
if (not findPath(root, path1, n1) or not findPath(root, path2, n2)):
return -1
# Compare the paths to get the first different value
i = 0
while(i < len(path1) and i < len(path2)):
if path1[i] != path2[i]:
break
i += 1
return path1[i-1]
# Driver program to test above function
# Let's create the Binary Tree shown in above diagram
root = Node(1)
root.left = Node(2)
root.right = Node(3)
root.left.left = Node(4)
root.left.right = Node(5)
root.right.left = Node(6)
root.right.right = Node(7)
print("LCA(4, 5) = %d" %(findLCA(root, 4, 5,)))
print("LCA(4, 6) = %d" %(findLCA(root, 4, 6)))
print("LCA(3, 4) = %d" %(findLCA(root,3,4)))
print("LCA(2, 4) = %d" %(findLCA(root,2, 4)))
| Node |
python | GoogleCloudPlatform__python-docs-samples | service_extensions/callouts/add_header/service_pb2_grpc.py | {
"start": 2045,
"end": 4570
} | class ____(object):
"""[#protodoc-title: External processing service]
A service that can access and modify HTTP requests and responses
as part of a filter chain.
The overall external processing protocol works like this:
1. Envoy sends to the service information about the HTTP request.
2. The service sends back a ProcessingResponse message that directs Envoy
to either stop processing, continue without it, or send it the
next chunk of the message body.
3. If so requested, Envoy sends the server chunks of the message body,
or the entire body at once. In either case, the server sends back
a ProcessingResponse after each message it receives.
4. If so requested, Envoy sends the server the HTTP trailers,
and the server sends back a ProcessingResponse.
5. At this point, request processing is done, and we pick up again
at step 1 when Envoy receives a response from the upstream server.
6. At any point above, if the server closes the gRPC stream cleanly,
then Envoy proceeds without consulting the server.
7. At any point above, if the server closes the gRPC stream with an error,
then Envoy returns a 500 error to the client, unless the filter
was configured to ignore errors.
In other words, the process is a request/response conversation, but
using a gRPC stream to make it easier for the server to
maintain state.
"""
def Process(self, request_iterator, context):
"""This begins the bidirectional stream that Envoy will use to
give the server control over what the filter does. The actual
protocol is described by the ProcessingRequest and ProcessingResponse
messages below.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def add_ExternalProcessorServicer_to_server(servicer, server):
rpc_method_handlers = {
"Process": grpc.stream_stream_rpc_method_handler(
servicer.Process,
request_deserializer=service__pb2.ProcessingRequest.FromString,
response_serializer=service__pb2.ProcessingResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
"envoy.service.ext_proc.v3.ExternalProcessor", rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
| ExternalProcessorServicer |
python | huggingface__transformers | src/transformers/models/gpt_neox_japanese/tokenization_gpt_neox_japanese.py | {
"start": 1740,
"end": 8335
} | class ____(PreTrainedTokenizer):
"""
This tokenizer inherits from [`PreTrainedTokenizer`] and is based on Japanese special Sub-Word-Encoding that is
used in this repository (https://github.com/tanreinama/Japanese-BPEEncoder_V2). Check the repository for details.
Japanese has a relatively large vocabulary and there is no separation between words. Furthermore, the language is a
combination of hiragana, katakana, and kanji, and variants such as "1" and "①" are often used. In order to cope
with these, this tokenizer has the following features
- Subword-by-subword segmentation, which is intermediate between byte strings and morphological analysis.
- BPEs are created for each Kanji, Hiragana, and Katakana character, and there are no BPEs that cross character
types, such as Kanji + Hiragana or Hiragana + Katakana.
- All-byte encoding that does not require <unk>.
- Independent of UTF codes such as 2-byte and 3-byte characters
- Conversion of heterographs to the same token_id
- Emoji and Emoticon are grouped into 12 types as special tags.
Example:
```python
>>> from transformers import GPTNeoXJapaneseTokenizer
>>> tokenizer = GPTNeoXJapaneseTokenizer.from_pretrained("abeja/gpt-neox-japanese-2.7b")
>>> # You can confirm both 慶応 and 慶應 are encoded to 17749
>>> tokenizer("吾輩は猫である🐯。実は慶応(慶應)大学出身")["input_ids"]
[30014, 26883, 26638, 27228, 25, 26650, 31732, 31679, 27809, 26638, 17749, 31592, 17749, 31593, 321, 1281]
>>> # Both 慶応 and 慶應 are decoded to 慶応
>>> tokenizer.decode(tokenizer("吾輩は猫である🐯。実は慶応(慶應)大学出身")["input_ids"])
'吾輩は猫である🐯。実は慶応(慶応)大学出身'
```
Args:
vocab_file (`str`):
File containing the vocabulary.
emoji_file (`str`):
File containing the emoji.
unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
The token used for padding
bos_token (`str`, *optional*, defaults to `"<|startoftext|>"`):
The beginning of sequence token.
eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
The end of sequence token.
do_clean_text (`bool`, *optional*, defaults to `False`):
Whether or not to clean text for URL, EMAIL, TEL, Japanese DATE and Japanese PRICE.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ["input_ids", "attention_mask"]
def __init__(
self,
vocab_file,
emoji_file,
unk_token="<|endoftext|>",
pad_token="<|endoftext|>",
bos_token="<|startoftext|>",
eos_token="<|endoftext|>",
do_clean_text=False,
**kwargs,
):
if not os.path.isfile(vocab_file):
raise ValueError(
f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
)
if not os.path.isfile(emoji_file):
raise ValueError(
f"Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
)
self.do_clean_text = do_clean_text
self.vocab, self.raw_vocab, self.ids_to_tokens, self.emoji = load_vocab_and_emoji(vocab_file, emoji_file)
self.subword_tokenizer = SubWordJapaneseTokenizer(
vocab=self.vocab, ids_to_tokens=self.ids_to_tokens, emoji=self.emoji
)
super().__init__(
unk_token=unk_token,
pad_token=pad_token,
bos_token=bos_token,
eos_token=eos_token,
do_clean_text=do_clean_text,
special_tokens_pattern="none",
**kwargs,
)
@property
def vocab_size(self):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab)
def get_vocab(self):
return dict(self.raw_vocab, **self.added_tokens_encoder)
def _tokenize(self, text):
return self.subword_tokenizer.tokenize(text, clean=self.do_clean_text)
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.vocab.get(token, self.vocab.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.subword_tokenizer.convert_id_to_token(index)
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
out_string = "".join(tokens).strip()
return out_string
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]:
index = 0
if os.path.isdir(save_directory):
vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
emoji_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"]
)
else:
vocab_file = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
emoji_file = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(vocab_file, "w", encoding="utf-8") as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!"
)
index = token_index
writer.write(",".join(token) + "\n")
index += 1
with open(emoji_file, "w", encoding="utf-8") as writer:
json.dump(self.emoji, writer)
return vocab_file, emoji_file
| GPTNeoXJapaneseTokenizer |
python | numba__llvmlite | llvmlite/ir/values.py | {
"start": 10079,
"end": 10257
} | class ____(object):
"""
The base class for all values.
"""
def __repr__(self):
return "<ir.%s type='%s' ...>" % (self.__class__.__name__, self.type,)
| Value |
python | scipy__scipy | scipy/special/tests/test_basic.py | {
"start": 76871,
"end": 80010
} | class ____:
def test_erf(self):
er = special.erf(.25)
assert_allclose(er, 0.2763263902, atol=1.5e-8, rtol=0)
def test_erf_zeros(self):
erz = special.erf_zeros(5)
erzr = array([1.45061616+1.88094300j,
2.24465928+2.61657514j,
2.83974105+3.17562810j,
3.33546074+3.64617438j,
3.76900557+4.06069723j])
assert_allclose(erz, erzr, atol=1.5e-4, rtol=0)
def _check_variant_func(self, func, other_func, rtol, atol=0):
rng = np.random.RandomState(1234)
n = 10000
x = rng.pareto(0.02, n) * (2*rng.randint(0, 2, n) - 1)
y = rng.pareto(0.02, n) * (2*rng.randint(0, 2, n) - 1)
z = x + 1j*y
with np.errstate(all='ignore'):
w = other_func(z)
w_real = other_func(x).real
mask = np.isfinite(w)
w = w[mask]
z = z[mask]
mask = np.isfinite(w_real)
w_real = w_real[mask]
x = x[mask]
# test both real and complex variants
assert_func_equal(func, w, z, rtol=rtol, atol=atol)
assert_func_equal(func, w_real, x, rtol=rtol, atol=atol)
def test_erfc_consistent(self):
self._check_variant_func(
cephes.erfc,
lambda z: 1 - cephes.erf(z),
rtol=1e-12,
atol=1e-14 # <- the test function loses precision
)
def test_erfcx_consistent(self):
self._check_variant_func(
cephes.erfcx,
lambda z: np.exp(z*z) * cephes.erfc(z),
rtol=1e-12
)
def test_erfi_consistent(self):
self._check_variant_func(
cephes.erfi,
lambda z: -1j * cephes.erf(1j*z),
rtol=1e-12
)
def test_dawsn_consistent(self):
self._check_variant_func(
cephes.dawsn,
lambda z: sqrt(pi)/2 * np.exp(-z*z) * cephes.erfi(z),
rtol=1e-12
)
def test_erf_nan_inf(self):
vals = [np.nan, -np.inf, np.inf]
expected = [np.nan, -1, 1]
assert_allclose(special.erf(vals), expected, rtol=1e-15)
def test_erfc_nan_inf(self):
vals = [np.nan, -np.inf, np.inf]
expected = [np.nan, 2, 0]
assert_allclose(special.erfc(vals), expected, rtol=1e-15)
def test_erfcx_nan_inf(self):
vals = [np.nan, -np.inf, np.inf]
expected = [np.nan, np.inf, 0]
assert_allclose(special.erfcx(vals), expected, rtol=1e-15)
def test_erfi_nan_inf(self):
vals = [np.nan, -np.inf, np.inf]
expected = [np.nan, -np.inf, np.inf]
assert_allclose(special.erfi(vals), expected, rtol=1e-15)
def test_dawsn_nan_inf(self):
vals = [np.nan, -np.inf, np.inf]
expected = [np.nan, -0.0, 0.0]
assert_allclose(special.dawsn(vals), expected, rtol=1e-15)
def test_wofz_nan_inf(self):
vals = [np.nan, -np.inf, np.inf]
expected = [np.nan + np.nan * 1.j, 0.-0.j, 0.+0.j]
assert_allclose(special.wofz(vals), expected, rtol=1e-15)
| TestErf |
python | etianen__django-reversion | tests/test_app/tests/test_api.py | {
"start": 2438,
"end": 3973
} | class ____(TestModelMixin, TestBase):
def testCreateRevision(self):
with reversion.create_revision():
obj = TestModel.objects.create()
self.assertSingleRevision((obj,))
def testCreateRevisionNested(self):
with reversion.create_revision():
with reversion.create_revision():
obj = TestModel.objects.create()
self.assertSingleRevision((obj,))
def testCreateRevisionEmpty(self):
with reversion.create_revision():
pass
self.assertNoRevision()
def testCreateRevisionException(self):
try:
with reversion.create_revision():
TestModel.objects.create()
raise Exception("Boom!")
except Exception:
pass
self.assertNoRevision()
def testCreateRevisionDecorator(self):
obj = reversion.create_revision()(TestModel.objects.create)()
self.assertSingleRevision((obj,))
def testPreRevisionCommitSignal(self):
_callback = MagicMock()
reversion.signals.pre_revision_commit.connect(_callback)
with reversion.create_revision():
TestModel.objects.create()
self.assertEqual(_callback.call_count, 1)
def testPostRevisionCommitSignal(self):
_callback = MagicMock()
reversion.signals.post_revision_commit.connect(_callback)
with reversion.create_revision():
TestModel.objects.create()
self.assertEqual(_callback.call_count, 1)
| CreateRevisionTest |
python | doocs__leetcode | lcci/02.02.Kth Node From End of List/Solution.py | {
"start": 136,
"end": 393
} | class ____:
def kthToLast(self, head: ListNode, k: int) -> int:
slow = fast = head
for _ in range(k):
fast = fast.next
while fast:
slow = slow.next
fast = fast.next
return slow.val
| Solution |
python | ray-project__ray | python/ray/serve/_private/replica_result.py | {
"start": 593,
"end": 1814
} | class ____(ABC):
@abstractmethod
async def get_rejection_response(self) -> Optional[ReplicaQueueLengthInfo]:
raise NotImplementedError
@abstractmethod
def get(self, timeout_s: Optional[float]):
raise NotImplementedError
@abstractmethod
async def get_async(self):
raise NotImplementedError
@abstractmethod
def __next__(self):
raise NotImplementedError
@abstractmethod
async def __anext__(self):
raise NotImplementedError
@abstractmethod
def add_done_callback(self, callback: Callable):
raise NotImplementedError
@abstractmethod
def cancel(self):
raise NotImplementedError
@abstractmethod
def to_object_ref(self, timeout_s: Optional[float]) -> ray.ObjectRef:
raise NotImplementedError
@abstractmethod
async def to_object_ref_async(self) -> ray.ObjectRef:
raise NotImplementedError
@abstractmethod
def to_object_ref_gen(self) -> ray.ObjectRefGenerator:
# NOTE(edoakes): there is only a sync version of this method because it
# does not block like `to_object_ref` (so there's also no timeout argument).
raise NotImplementedError
| ReplicaResult |
python | encode__django-rest-framework | rest_framework/throttling.py | {
"start": 6370,
"end": 8067
} | class ____(SimpleRateThrottle):
"""
Limits the rate of API calls by different amounts for various parts of
the API. Any view that has the `throttle_scope` property set will be
throttled. The unique cache key will be generated by concatenating the
user id of the request, and the scope of the view being accessed.
"""
scope_attr = 'throttle_scope'
def __init__(self):
# Override the usual SimpleRateThrottle, because we can't determine
# the rate until called by the view.
pass
def allow_request(self, request, view):
# We can only determine the scope once we're called by the view.
self.scope = getattr(view, self.scope_attr, None)
# If a view does not have a `throttle_scope` always allow the request
if not self.scope:
return True
# Determine the allowed request rate as we normally would during
# the `__init__` call.
self.rate = self.get_rate()
self.num_requests, self.duration = self.parse_rate(self.rate)
# We can now proceed as normal.
return super().allow_request(request, view)
def get_cache_key(self, request, view):
"""
If `view.throttle_scope` is not set, don't apply this throttle.
Otherwise generate the unique cache key by concatenating the user id
with the `.throttle_scope` property of the view.
"""
if request.user and request.user.is_authenticated:
ident = request.user.pk
else:
ident = self.get_ident(request)
return self.cache_format % {
'scope': self.scope,
'ident': ident
}
| ScopedRateThrottle |
python | kamyu104__LeetCode-Solutions | Python/minimum-degree-of-a-connected-trio-in-a-graph.py | {
"start": 33,
"end": 698
} | class ____(object):
def minTrioDegree(self, n, edges):
"""
:type n: int
:type edges: List[List[int]]
:rtype: int
"""
adj = [set() for _ in xrange(n+1)]
degree = [0]*(n+1)
for u, v in edges:
adj[min(u, v)].add(max(u, v))
degree[u] += 1
degree[v] += 1
result = float("inf")
for u in xrange(1, n+1):
for v in adj[u]:
for w in adj[u]:
if v < w and w in adj[v]:
result = min(result, degree[u]+degree[v]+degree[w] - 6)
return result if result != float("inf") else -1
| Solution |
python | huggingface__transformers | src/transformers/models/patchtst/configuration_patchtst.py | {
"start": 841,
"end": 12309
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of an [`PatchTSTModel`]. It is used to instantiate an
PatchTST model according to the specified arguments, defining the model architecture.
[ibm/patchtst](https://huggingface.co/ibm/patchtst) architecture.
Configuration objects inherit from [`PreTrainedConfig`] can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
num_input_channels (`int`, *optional*, defaults to 1):
The size of the target variable which by default is 1 for univariate targets. Would be > 1 in case of
multivariate targets.
context_length (`int`, *optional*, defaults to 32):
The context length of the input sequence.
distribution_output (`str`, *optional*, defaults to `"student_t"`):
The distribution emission head for the model when loss is "nll". Could be either "student_t", "normal" or
"negative_binomial".
loss (`str`, *optional*, defaults to `"mse"`):
The loss function for the model corresponding to the `distribution_output` head. For parametric
distributions it is the negative log likelihood ("nll") and for point estimates it is the mean squared
error "mse".
patch_length (`int`, *optional*, defaults to 1):
Define the patch length of the patchification process.
patch_stride (`int`, *optional*, defaults to 1):
Define the stride of the patchification process.
num_hidden_layers (`int`, *optional*, defaults to 3):
Number of hidden layers.
d_model (`int`, *optional*, defaults to 128):
Dimensionality of the transformer layers.
num_attention_heads (`int`, *optional*, defaults to 4):
Number of attention heads for each attention layer in the Transformer encoder.
share_embedding (`bool`, *optional*, defaults to `True`):
Sharing the input embedding across all channels.
channel_attention (`bool`, *optional*, defaults to `False`):
Activate channel attention block in the Transformer to allow channels to attend each other.
ffn_dim (`int`, *optional*, defaults to 512):
Dimension of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
norm_type (`str` , *optional*, defaults to `"batchnorm"`):
Normalization at each Transformer layer. Can be `"batchnorm"` or `"layernorm"`.
norm_eps (`float`, *optional*, defaults to 1e-05):
A value added to the denominator for numerical stability of normalization.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout probability for the attention probabilities.
positional_dropout (`float`, *optional*, defaults to 0.0):
The dropout probability in the positional embedding layer.
path_dropout (`float`, *optional*, defaults to 0.0):
The dropout path in the residual block.
ff_dropout (`float`, *optional*, defaults to 0.0):
The dropout probability used between the two layers of the feed-forward networks.
bias (`bool`, *optional*, defaults to `True`):
Whether to add bias in the feed-forward networks.
activation_function (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function (string) in the Transformer.`"gelu"` and `"relu"` are supported.
pre_norm (`bool`, *optional*, defaults to `True`):
Normalization is applied before self-attention if pre_norm is set to `True`. Otherwise, normalization is
applied after residual block.
positional_encoding_type (`str`, *optional*, defaults to `"sincos"`):
Positional encodings. Options `"random"` and `"sincos"` are supported.
use_cls_token (`bool`, *optional*, defaults to `False`):
Whether cls token is used.
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated normal weight initialization distribution.
share_projection (`bool`, *optional*, defaults to `True`):
Sharing the projection layer across different channels in the forecast head.
scaling (`Union`, *optional*, defaults to `"std"`):
Whether to scale the input targets via "mean" scaler, "std" scaler or no scaler if `None`. If `True`, the
scaler is set to "mean".
do_mask_input (`bool`, *optional*):
Apply masking during the pretraining.
mask_type (`str`, *optional*, defaults to `"random"`):
Masking type. Only `"random"` and `"forecast"` are currently supported.
random_mask_ratio (`float`, *optional*, defaults to 0.5):
Masking ratio applied to mask the input data during random pretraining.
num_forecast_mask_patches (`int` or `list`, *optional*, defaults to `[2]`):
Number of patches to be masked at the end of each batch sample. If it is an integer,
all the samples in the batch will have the same number of masked patches. If it is a list,
samples in the batch will be randomly masked by numbers defined in the list. This argument is only used
for forecast pretraining.
channel_consistent_masking (`bool`, *optional*, defaults to `False`):
If channel consistent masking is True, all the channels will have the same masking pattern.
unmasked_channel_indices (`list`, *optional*):
Indices of channels that are not masked during pretraining. Values in the list are number between 1 and
`num_input_channels`
mask_value (`int`, *optional*, defaults to 0):
Values in the masked patches will be filled by `mask_value`.
pooling_type (`str`, *optional*, defaults to `"mean"`):
Pooling of the embedding. `"mean"`, `"max"` and `None` are supported.
head_dropout (`float`, *optional*, defaults to 0.0):
The dropout probability for head.
prediction_length (`int`, *optional*, defaults to 24):
The prediction horizon that the model will output.
num_targets (`int`, *optional*, defaults to 1):
Number of targets for regression and classification tasks. For classification, it is the number of
classes.
output_range (`list`, *optional*):
Output range for regression task. The range of output values can be set to enforce the model to produce
values within a range.
num_parallel_samples (`int`, *optional*, defaults to 100):
The number of samples is generated in parallel for probabilistic prediction.
```python
>>> from transformers import PatchTSTConfig, PatchTSTModel
>>> # Initializing an PatchTST configuration with 12 time steps for prediction
>>> configuration = PatchTSTConfig(prediction_length=12)
>>> # Randomly initializing a model (with random weights) from the configuration
>>> model = PatchTSTModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "patchtst"
attribute_map = {
"hidden_size": "d_model",
"num_attention_heads": "num_attention_heads",
"num_hidden_layers": "num_hidden_layers",
}
def __init__(
self,
# time series specific configuration
num_input_channels: int = 1,
context_length: int = 32,
distribution_output: str = "student_t",
loss: str = "mse",
# PatchTST arguments
patch_length: int = 1,
patch_stride: int = 1,
# Transformer architecture configuration
num_hidden_layers: int = 3,
d_model: int = 128,
num_attention_heads: int = 4,
share_embedding: bool = True,
channel_attention: bool = False,
ffn_dim: int = 512,
norm_type: str = "batchnorm",
norm_eps: float = 1e-05,
attention_dropout: float = 0.0,
positional_dropout: float = 0.0,
path_dropout: float = 0.0,
ff_dropout: float = 0.0,
bias: bool = True,
activation_function: str = "gelu",
pre_norm: bool = True,
positional_encoding_type: str = "sincos",
use_cls_token: bool = False,
init_std: float = 0.02,
share_projection: bool = True,
scaling: Optional[Union[str, bool]] = "std",
# mask pretraining
do_mask_input: Optional[bool] = None,
mask_type: str = "random",
random_mask_ratio: float = 0.5,
num_forecast_mask_patches: Optional[Union[list[int], int]] = [2],
channel_consistent_masking: Optional[bool] = False,
unmasked_channel_indices: Optional[list[int]] = None,
mask_value: int = 0,
# head
pooling_type: str = "mean",
head_dropout: float = 0.0,
prediction_length: int = 24,
num_targets: int = 1,
output_range: Optional[list] = None,
# distribution head
num_parallel_samples: int = 100,
**kwargs,
):
# time series specific configuration
self.context_length = context_length
self.num_input_channels = num_input_channels # n_vars
self.loss = loss
self.distribution_output = distribution_output
self.num_parallel_samples = num_parallel_samples
# Transformer architecture configuration
self.d_model = d_model
self.num_attention_heads = num_attention_heads
self.ffn_dim = ffn_dim
self.num_hidden_layers = num_hidden_layers
self.attention_dropout = attention_dropout
self.share_embedding = share_embedding
self.channel_attention = channel_attention
self.norm_type = norm_type
self.norm_eps = norm_eps
self.positional_dropout = positional_dropout
self.path_dropout = path_dropout
self.ff_dropout = ff_dropout
self.bias = bias
self.activation_function = activation_function
self.pre_norm = pre_norm
self.positional_encoding_type = positional_encoding_type
self.use_cls_token = use_cls_token
self.init_std = init_std
self.scaling = scaling
# PatchTST parameters
self.patch_length = patch_length
self.patch_stride = patch_stride
# Mask pretraining
self.do_mask_input = do_mask_input
self.mask_type = mask_type
self.random_mask_ratio = random_mask_ratio # for random masking
self.num_forecast_mask_patches = num_forecast_mask_patches # for forecast masking
self.channel_consistent_masking = channel_consistent_masking
self.unmasked_channel_indices = unmasked_channel_indices
self.mask_value = mask_value
# general head params
self.pooling_type = pooling_type
self.head_dropout = head_dropout
# For prediction head
self.share_projection = share_projection
self.prediction_length = prediction_length
# For prediction and regression head
self.num_parallel_samples = num_parallel_samples
# Regression
self.num_targets = num_targets
self.output_range = output_range
super().__init__(**kwargs)
__all__ = ["PatchTSTConfig"]
| PatchTSTConfig |
python | numpy__numpy | doc/neps/nep-0016-benchmark.py | {
"start": 173,
"end": 211
} | class ____(ArrayBase):
pass
| ABCArray1 |
python | walkccc__LeetCode | solutions/821. Shortest Distance to a Character/821.py | {
"start": 0,
"end": 334
} | class ____:
def shortestToChar(self, s: str, c: str) -> list[int]:
n = len(s)
ans = [0] * n
prev = -n
for i in range(n):
if s[i] == c:
prev = i
ans[i] = i - prev
for i in range(prev - 1, -1, -1):
if s[i] == c:
prev = i
ans[i] = min(ans[i], prev - i)
return ans
| Solution |
python | ray-project__ray | python/ray/serve/_private/deployment_scheduler.py | {
"start": 6531,
"end": 8186
} | class ____:
deployment_id: DeploymentID
scheduling_policy: Any
actor_resources: Optional[Resources] = None
placement_group_bundles: Optional[List[Resources]] = None
placement_group_strategy: Optional[str] = None
max_replicas_per_node: Optional[int] = None
@property
def required_resources(self) -> Resources:
"""The resources required to schedule a replica of this deployment on a node.
If this replicas uses a strict pack placement group, the
required resources is the sum of the placement group bundles.
Otherwise, required resources is simply the actor resources.
"""
if (
self.placement_group_bundles is not None
and self.placement_group_strategy == "STRICT_PACK"
):
return sum(self.placement_group_bundles, Resources())
else:
required = self.actor_resources
# Using implicit resource (resources that every node
# implicitly has and total is 1)
# to limit the number of replicas on a single node.
if self.max_replicas_per_node:
implicit_resource = (
f"{ray._raylet.IMPLICIT_RESOURCE_PREFIX}"
f"{self.deployment_id.app_name}:{self.deployment_id.name}"
)
required[implicit_resource] = 1.0 / self.max_replicas_per_node
return required
def is_non_strict_pack_pg(self) -> bool:
return (
self.placement_group_bundles is not None
and self.placement_group_strategy != "STRICT_PACK"
)
@dataclass
| DeploymentSchedulingInfo |
python | langchain-ai__langchain | libs/langchain_v1/langchain/agents/middleware/human_in_the_loop.py | {
"start": 1067,
"end": 1474
} | class ____(TypedDict):
"""Policy for reviewing a HITL request."""
action_name: str
"""Name of the action associated with this review configuration."""
allowed_decisions: list[DecisionType]
"""The decisions that are allowed for this request."""
args_schema: NotRequired[dict[str, Any]]
"""JSON schema for the args associated with the action, if edits are allowed."""
| ReviewConfig |
python | pytorch__pytorch | test/distributed/fsdp/test_fsdp_core.py | {
"start": 10653,
"end": 14699
} | class ____(FSDPTest):
@skip_if_lt_x_gpu(2)
@parametrize("cuda_first", [False, True])
def test_pre_backward_hook_registration(self, cuda_first: bool):
"""Tests that FSDP pre-backward hooks are registered on forward pass
outputs."""
fsdp_kwargs = {"device_id": device_type.type}
fsdp_model = TransformerWithSharedParams.init(
self.process_group,
FSDPInitMode.RECURSIVE,
DEVICEInitMode.DEVICE_BEFORE if cuda_first else DEVICEInitMode.DEVICE_AFTER,
fsdp_kwargs,
)
self._test_pre_backward_hook_registration(fsdp_model)
@skip_if_lt_x_gpu(2)
def test_pre_backward_hook_registration_after_state_dict(self):
"""Tests that FSDP pre-backward hooks are registered on forward pass
outputs after saving and loading the model from a checkpoint."""
fsdp_kwargs = {"device_id": device_type.type}
fsdp_model = TransformerWithSharedParams.init(
self.process_group,
FSDPInitMode.RECURSIVE,
DEVICEInitMode.DEVICE_AFTER,
fsdp_kwargs,
)
self._train_for_several_steps(fsdp_model, num_steps=2, autocast=False)
state_dict = fsdp_model.state_dict()
fsdp_model.load_state_dict(state_dict)
self._test_pre_backward_hook_registration(fsdp_model)
def _test_pre_backward_hook_registration(self, model):
optim = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
optim.zero_grad()
# Inputs always cuda, as computation happens on CUDA device only
input = model.module.get_input(device_type)
output = model(*input)
# this is pre-bwd hook
self.assertEqual(len(output._backward_hooks), 1)
loss = model.module.get_loss(input, output).to(device_type.type)
loss.backward()
# It doesn't get removed
self.assertEqual(len(output._backward_hooks), 1)
optim.step()
self.assertEqual(len(output._backward_hooks), 1)
@skip_if_lt_x_gpu(2)
@parametrize("cuda_first", [False, True])
@parametrize("mixed_precision", [True, False])
def test_register_functions_called(self, cuda_first: bool, mixed_precision: bool):
"""Tests that ``_register_{pre|post}_backward_hooks()`` are called
during the FSDP forward."""
fsdp_kwargs = {"device_id": device_type.type}
if mixed_precision:
fsdp_kwargs["mixed_precision"] = MixedPrecision()
fsdp_model = TransformerWithSharedParams.init(
self.process_group,
FSDPInitMode.RECURSIVE,
DEVICEInitMode.DEVICE_BEFORE if cuda_first else DEVICEInitMode.DEVICE_AFTER,
fsdp_kwargs,
)
input = fsdp_model.module.get_input(device_type)
# Since `_register_pre_backward_hooks()` modifies the forward output,
# we cannot directly mock it. We implement our own counter instead.
orig_register_pre_backward_hooks = (
torch.distributed.fsdp._runtime_utils._register_pre_backward_hooks
)
register_pre_backward_hooks_call_count = 0
def _register_pre_backward_hooks_with_count(*args, **kwargs):
nonlocal register_pre_backward_hooks_call_count
register_pre_backward_hooks_call_count += 1
return orig_register_pre_backward_hooks(*args, **kwargs)
with (
mock.patch(
"torch.distributed.fsdp._runtime_utils._register_pre_backward_hooks",
_register_pre_backward_hooks_with_count,
),
mock.patch(
"torch.distributed.fsdp._runtime_utils._register_post_backward_hook"
) as register_post_bwd_mock,
):
self.assertEqual(register_pre_backward_hooks_call_count, 0)
self.assertFalse(register_post_bwd_mock.called)
fsdp_model(*input)
self.assertTrue(register_pre_backward_hooks_call_count > 0)
self.assertTrue(register_post_bwd_mock.called)
| TestHooks |
python | scipy__scipy | scipy/optimize/tests/test_nonlin.py | {
"start": 17484,
"end": 20250
} | class ____:
""" Test case for a simple constrained entropy maximization problem
(the machine translation example of Berger et al in
Computational Linguistics, vol 22, num 1, pp 39--72, 1996.)
"""
def test_broyden1(self):
x = nonlin.broyden1(F, F.xin, iter=12, alpha=1)
assert_(nonlin.norm(x) < 1e-9)
assert_(nonlin.norm(F(x)) < 1e-9)
def test_broyden2(self):
x = nonlin.broyden2(F, F.xin, iter=12, alpha=1)
assert_(nonlin.norm(x) < 1e-9)
assert_(nonlin.norm(F(x)) < 1e-9)
def test_anderson(self):
x = nonlin.anderson(F, F.xin, iter=12, alpha=0.03, M=5)
assert_(nonlin.norm(x) < 0.33)
def test_linearmixing(self):
x = nonlin.linearmixing(F, F.xin, iter=60, alpha=0.5)
assert_(nonlin.norm(x) < 1e-7)
assert_(nonlin.norm(F(x)) < 1e-7)
def test_exciting(self):
x = nonlin.excitingmixing(F, F.xin, iter=20, alpha=0.5)
assert_(nonlin.norm(x) < 1e-5)
assert_(nonlin.norm(F(x)) < 1e-5)
def test_diagbroyden(self):
x = nonlin.diagbroyden(F, F.xin, iter=11, alpha=1)
assert_(nonlin.norm(x) < 1e-8)
assert_(nonlin.norm(F(x)) < 1e-8)
def test_root_broyden1(self):
res = root(F, F.xin, method='broyden1',
options={'nit': 12, 'jac_options': {'alpha': 1}})
assert_(nonlin.norm(res.x) < 1e-9)
assert_(nonlin.norm(res.fun) < 1e-9)
def test_root_broyden2(self):
res = root(F, F.xin, method='broyden2',
options={'nit': 12, 'jac_options': {'alpha': 1}})
assert_(nonlin.norm(res.x) < 1e-9)
assert_(nonlin.norm(res.fun) < 1e-9)
def test_root_anderson(self):
res = root(F, F.xin, method='anderson',
options={'nit': 12,
'jac_options': {'alpha': 0.03, 'M': 5}})
assert_(nonlin.norm(res.x) < 0.33)
def test_root_linearmixing(self):
res = root(F, F.xin, method='linearmixing',
options={'nit': 60,
'jac_options': {'alpha': 0.5}})
assert_(nonlin.norm(res.x) < 1e-7)
assert_(nonlin.norm(res.fun) < 1e-7)
def test_root_excitingmixing(self):
res = root(F, F.xin, method='excitingmixing',
options={'nit': 20,
'jac_options': {'alpha': 0.5}})
assert_(nonlin.norm(res.x) < 1e-5)
assert_(nonlin.norm(res.fun) < 1e-5)
def test_root_diagbroyden(self):
res = root(F, F.xin, method='diagbroyden',
options={'nit': 11,
'jac_options': {'alpha': 1}})
assert_(nonlin.norm(res.x) < 1e-8)
assert_(nonlin.norm(res.fun) < 1e-8)
| TestNonlinOldTests |
python | jina-ai__jina | tests/unit/orchestrate/flow/flow-orchestrate/test_flow_http_extend.py | {
"start": 94,
"end": 1029
} | class ____(Executor):
@requests(on='/foo')
def foo(self, docs, **kwargs):
pass
def test_flow_debug_endpoints():
f1 = Flow(protocol='http', no_debug_endpoints=True, no_crud_endpoints=True).add(
uses=MyExec
)
with pytest.raises(BadClient):
with f1:
f1.post('/foo')
f2 = Flow(protocol='http', no_crud_endpoints=True).add(uses=MyExec)
with f2:
f2.post('/foo')
def test_flow_expose_endpoints():
f1 = Flow(protocol='http', no_debug_endpoints=True, no_crud_endpoints=True).add(
uses=MyExec
)
import requests
with f1:
r = requests.get(f'http://localhost:{f1.port}/foo')
assert r.status_code == 404
f1.expose_endpoint('/foo')
with f1:
r = requests.post(
f'http://localhost:{f1.port}/foo',
json={'data': [{'text': 'hello'}, {'text': 'world'}]},
)
assert r.status_code == 200
| MyExec |
python | readthedocs__readthedocs.org | readthedocs/projects/views/private.py | {
"start": 4424,
"end": 7600
} | class ____(FilterContextMixin, PrivateViewMixin, ListView):
"""Project dashboard."""
model = Project
template_name = "projects/project_dashboard.html"
filterset_class = ProjectListFilterSet
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
# Set the default search to search files instead of projects
context["type"] = "file"
context["filter"] = self.get_filterset()
context["project_list"] = self.get_filtered_queryset()
# Alternatively, dynamically override super()-derived `project_list` context_data
# context[self.get_context_object_name(filter.qs)] = filter.qs
template_name = None
projects = AdminPermission.projects(user=self.request.user, admin=True)
n_projects = projects.count()
# We can't yet back down to another announcement as we don't have
# the ability to evaluate local storage. Until we add the ability to
# dynamically change the announcement, this is going to be the only
# announcement shown.
if n_projects == 0 or (
n_projects < 3 and (timezone.now() - projects.first().pub_date).days < 7
):
template_name = "example-projects.html"
elif n_projects:
template_name = "github-app.html"
elif n_projects and not projects.filter(external_builds_enabled=True).exists():
template_name = "pull-request-previews.html"
elif n_projects and not projects.filter(addons__analytics_enabled=True).exists():
template_name = "traffic-analytics.html"
elif AdminPermission.organizations(
user=self.request.user,
owner=True,
).exists():
template_name = "security-logs.html"
if template_name:
context["announcement"] = f"projects/partials/announcements/{template_name}"
return context
def validate_primary_email(self, user):
"""
Sends a dismissable site notification to this user.
Checks if the user has a primary email or if the primary email
is verified or not. Sends a dismissable notification if
either of the condition is False.
"""
email_qs = user.emailaddress_set.filter(primary=True)
email = email_qs.first()
if not email or not email.verified:
Notification.objects.add(
attached_to=user,
message_id=MESSAGE_EMAIL_VALIDATION_PENDING,
dismissable=True,
)
# NOTE: This method is called twice, on .org it doesn't matter,
# as the queryset is straightforward, but on .com it
# does some extra work that results in several queries.
@lru_cache(maxsize=1)
def get_queryset(self):
return Project.objects.dashboard(self.request.user)
def get(self, request, *args, **kwargs):
self.validate_primary_email(request.user)
return super().get(self, request, *args, **kwargs)
# SuccessMessageMixin is used when we are operating on the Project model itself,
# instead of a related model, where we use ProjectAdminMixin.
| ProjectDashboard |
python | pyca__cryptography | tests/x509/test_x509.py | {
"start": 97450,
"end": 173897
} | class ____:
def test_checks_for_unsupported_extensions(
self, rsa_key_2048: rsa.RSAPrivateKey, backend
):
private_key = rsa_key_2048
builder = (
x509.CertificateBuilder()
.subject_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.issuer_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.public_key(private_key.public_key())
.serial_number(777)
.not_valid_before(datetime.datetime(1999, 1, 1))
.not_valid_after(datetime.datetime(2020, 1, 1))
.add_extension(DummyExtension(), False)
)
with pytest.raises(NotImplementedError):
builder.sign(private_key, hashes.SHA256(), backend)
def test_encode_nonstandard_aia(
self, rsa_key_2048: rsa.RSAPrivateKey, backend
):
private_key = rsa_key_2048
aia = x509.AuthorityInformationAccess(
[
x509.AccessDescription(
x509.ObjectIdentifier("2.999.7"),
x509.UniformResourceIdentifier("http://example.com"),
),
]
)
builder = (
x509.CertificateBuilder()
.subject_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.issuer_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.public_key(private_key.public_key())
.serial_number(777)
.not_valid_before(datetime.datetime(1999, 1, 1))
.not_valid_after(datetime.datetime(2020, 1, 1))
.add_extension(aia, False)
)
builder.sign(private_key, hashes.SHA256(), backend)
def test_encode_nonstandard_sia(
self, rsa_key_2048: rsa.RSAPrivateKey, backend
):
private_key = rsa_key_2048
sia = x509.SubjectInformationAccess(
[
x509.AccessDescription(
x509.ObjectIdentifier("2.999.7"),
x509.UniformResourceIdentifier("http://example.com"),
),
]
)
builder = (
x509.CertificateBuilder()
.subject_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.issuer_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.public_key(private_key.public_key())
.serial_number(777)
.not_valid_before(datetime.datetime(2015, 1, 1))
.not_valid_after(datetime.datetime(2040, 1, 1))
.add_extension(sia, False)
)
cert = builder.sign(private_key, hashes.SHA256(), backend)
ext = cert.extensions.get_extension_for_oid(
ExtensionOID.SUBJECT_INFORMATION_ACCESS
)
assert ext.value == sia
def test_subject_dn_asn1_types(
self, rsa_key_2048: rsa.RSAPrivateKey, backend
):
private_key = rsa_key_2048
name = x509.Name(
[
x509.NameAttribute(NameOID.COMMON_NAME, "mysite.com"),
x509.NameAttribute(NameOID.COUNTRY_NAME, "US"),
x509.NameAttribute(NameOID.LOCALITY_NAME, "value"),
x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, "value"),
x509.NameAttribute(NameOID.STREET_ADDRESS, "value"),
x509.NameAttribute(NameOID.ORGANIZATION_NAME, "value"),
x509.NameAttribute(NameOID.ORGANIZATIONAL_UNIT_NAME, "value"),
x509.NameAttribute(NameOID.SERIAL_NUMBER, "value"),
x509.NameAttribute(NameOID.SURNAME, "value"),
x509.NameAttribute(NameOID.GIVEN_NAME, "value"),
x509.NameAttribute(NameOID.TITLE, "value"),
x509.NameAttribute(NameOID.GENERATION_QUALIFIER, "value"),
x509.NameAttribute(NameOID.X500_UNIQUE_IDENTIFIER, "value"),
x509.NameAttribute(NameOID.DN_QUALIFIER, "value"),
x509.NameAttribute(NameOID.PSEUDONYM, "value"),
x509.NameAttribute(NameOID.USER_ID, "value"),
x509.NameAttribute(NameOID.DOMAIN_COMPONENT, "value"),
x509.NameAttribute(NameOID.EMAIL_ADDRESS, "value"),
x509.NameAttribute(NameOID.JURISDICTION_COUNTRY_NAME, "US"),
x509.NameAttribute(
NameOID.JURISDICTION_LOCALITY_NAME, "value"
),
x509.NameAttribute(
NameOID.JURISDICTION_STATE_OR_PROVINCE_NAME, "value"
),
x509.NameAttribute(NameOID.BUSINESS_CATEGORY, "value"),
x509.NameAttribute(NameOID.POSTAL_ADDRESS, "value"),
x509.NameAttribute(NameOID.POSTAL_CODE, "value"),
]
)
cert = (
x509.CertificateBuilder()
.subject_name(name)
.issuer_name(name)
.public_key(private_key.public_key())
.serial_number(777)
.not_valid_before(datetime.datetime(1999, 1, 1))
.not_valid_after(datetime.datetime(2020, 1, 1))
.sign(private_key, hashes.SHA256(), backend)
)
for dn in (cert.subject, cert.issuer):
for oid, asn1_type in TestNameAttribute.EXPECTED_TYPES:
assert dn.get_attributes_for_oid(oid)[0]._type == asn1_type
@pytest.mark.parametrize(
("not_valid_before", "not_valid_after"),
[
[datetime.datetime(1970, 2, 1), datetime.datetime(9999, 1, 1)],
[datetime.datetime(1970, 2, 1), datetime.datetime(9999, 12, 31)],
],
)
def test_extreme_times(
self,
rsa_key_2048: rsa.RSAPrivateKey,
not_valid_before,
not_valid_after,
backend,
):
private_key = rsa_key_2048
builder = (
x509.CertificateBuilder()
.subject_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.issuer_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.public_key(private_key.public_key())
.serial_number(777)
.not_valid_before(not_valid_before)
.not_valid_after(not_valid_after)
)
cert = builder.sign(private_key, hashes.SHA256(), backend)
_check_cert_times(
cert,
not_valid_before=not_valid_before,
not_valid_after=not_valid_after,
)
parsed = test_support.test_parse_certificate(
cert.public_bytes(serialization.Encoding.DER)
)
# UTC TIME
assert parsed.not_before_tag == 0x17
# GENERALIZED TIME
assert parsed.not_after_tag == 0x18
def test_rdns_preserve_iteration_order(
self, rsa_key_2048: rsa.RSAPrivateKey, backend
):
"""
This test checks that RDN ordering is consistent when loading
data from a certificate. Since the underlying RDN is an ASN.1
set these values get lexicographically ordered on encode and
the parsed value won't necessarily be in the same order as
the originally provided list. However, we want to make sure
that the order is always consistent since it confuses people
when it isn't.
"""
name = x509.Name(
[
x509.RelativeDistinguishedName(
[
x509.NameAttribute(NameOID.TITLE, "Test"),
x509.NameAttribute(NameOID.COMMON_NAME, "Multivalue"),
x509.NameAttribute(NameOID.SURNAME, "RDNs"),
]
),
]
)
cert = (
x509.CertificateBuilder()
.serial_number(1)
.issuer_name(name)
.subject_name(name)
.public_key(rsa_key_2048.public_key())
.not_valid_before(datetime.datetime(2020, 1, 1))
.not_valid_after(datetime.datetime(2038, 1, 1))
.sign(rsa_key_2048, hashes.SHA256(), backend)
)
loaded_cert = x509.load_pem_x509_certificate(
cert.public_bytes(encoding=serialization.Encoding.PEM)
)
assert next(iter(loaded_cert.subject.rdns[0])) == x509.NameAttribute(
NameOID.SURNAME, "RDNs"
)
@pytest.mark.parametrize(
("alg", "mgf_alg"),
[
(hashes.SHA512(), hashes.SHA256()),
(hashes.SHA3_512(), hashes.SHA3_256()),
],
)
def test_sign_pss(
self, rsa_key_2048: rsa.RSAPrivateKey, alg, mgf_alg, backend
):
if not backend.signature_hash_supported(alg):
pytest.skip(f"{alg} signature not supported")
builder = (
x509.CertificateBuilder()
.subject_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.issuer_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.public_key(rsa_key_2048.public_key())
.serial_number(777)
.not_valid_before(datetime.datetime(2020, 1, 1))
.not_valid_after(datetime.datetime(2038, 1, 1))
)
pss = padding.PSS(
mgf=padding.MGF1(mgf_alg), salt_length=alg.digest_size
)
cert = builder.sign(rsa_key_2048, alg, rsa_padding=pss)
pk = cert.public_key()
assert isinstance(pk, rsa.RSAPublicKey)
assert isinstance(cert.signature_hash_algorithm, type(alg))
cert_params = cert.signature_algorithm_parameters
assert isinstance(cert_params, padding.PSS)
assert cert_params._salt_length == pss._salt_length
assert isinstance(cert_params._mgf, padding.MGF1)
assert isinstance(cert_params._mgf._algorithm, type(mgf_alg))
pk.verify(
cert.signature,
cert.tbs_certificate_bytes,
cert_params,
alg,
)
@pytest.mark.parametrize(
("padding_len", "computed_len"),
[
(padding.PSS.MAX_LENGTH, 222),
(padding.PSS.DIGEST_LENGTH, 32),
],
)
def test_sign_pss_length_options(
self,
rsa_key_2048: rsa.RSAPrivateKey,
padding_len,
computed_len,
backend,
):
pss = padding.PSS(
mgf=padding.MGF1(hashes.SHA256()), salt_length=padding_len
)
if not backend.rsa_padding_supported(pss):
pytest.skip("PSS padding with these parameters not supported")
builder = (
x509.CertificateBuilder()
.subject_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.issuer_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.public_key(rsa_key_2048.public_key())
.serial_number(777)
.not_valid_before(datetime.datetime(2020, 1, 1))
.not_valid_after(datetime.datetime(2038, 1, 1))
)
cert = builder.sign(rsa_key_2048, hashes.SHA256(), rsa_padding=pss)
assert isinstance(cert.signature_algorithm_parameters, padding.PSS)
assert cert.signature_algorithm_parameters._salt_length == computed_len
def test_sign_pss_auto_unsupported(
self, rsa_key_2048: rsa.RSAPrivateKey, backend
):
builder = (
x509.CertificateBuilder()
.subject_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.issuer_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.public_key(rsa_key_2048.public_key())
.serial_number(777)
.not_valid_before(datetime.datetime(2020, 1, 1))
.not_valid_after(datetime.datetime(2038, 1, 1))
)
pss = padding.PSS(
mgf=padding.MGF1(hashes.SHA256()), salt_length=padding.PSS.AUTO
)
with pytest.raises(TypeError):
builder.sign(rsa_key_2048, hashes.SHA256(), rsa_padding=pss)
def test_sign_invalid_padding(
self, rsa_key_2048: rsa.RSAPrivateKey, backend
):
builder = (
x509.CertificateBuilder()
.subject_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.issuer_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.public_key(rsa_key_2048.public_key())
.serial_number(777)
.not_valid_before(datetime.datetime(2020, 1, 1))
.not_valid_after(datetime.datetime(2038, 1, 1))
)
with pytest.raises(TypeError):
builder.sign(
rsa_key_2048,
hashes.SHA256(),
rsa_padding=b"notapadding", # type: ignore[arg-type]
)
eckey = ec.generate_private_key(ec.SECP256R1())
with pytest.raises(TypeError):
builder.sign(
eckey, hashes.SHA256(), rsa_padding=padding.PKCS1v15()
)
def test_sign_pss_hash_none(
self, rsa_key_2048: rsa.RSAPrivateKey, backend
):
builder = (
x509.CertificateBuilder()
.subject_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.issuer_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.public_key(rsa_key_2048.public_key())
.serial_number(777)
.not_valid_before(datetime.datetime(2020, 1, 1))
.not_valid_after(datetime.datetime(2038, 1, 1))
)
pss = padding.PSS(mgf=padding.MGF1(hashes.SHA256()), salt_length=32)
with pytest.raises(TypeError):
builder.sign(rsa_key_2048, None, rsa_padding=pss)
def test_no_subject_name(self, rsa_key_2048: rsa.RSAPrivateKey, backend):
subject_private_key = rsa_key_2048
builder = (
x509.CertificateBuilder()
.serial_number(777)
.issuer_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.public_key(subject_private_key.public_key())
.not_valid_before(datetime.datetime(2002, 1, 1, 12, 1))
.not_valid_after(datetime.datetime(2030, 12, 31, 8, 30))
)
with pytest.raises(ValueError):
builder.sign(subject_private_key, hashes.SHA256(), backend)
def test_no_issuer_name(self, rsa_key_2048: rsa.RSAPrivateKey, backend):
subject_private_key = rsa_key_2048
builder = (
x509.CertificateBuilder()
.serial_number(777)
.subject_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.public_key(subject_private_key.public_key())
.not_valid_before(datetime.datetime(2002, 1, 1, 12, 1))
.not_valid_after(datetime.datetime(2030, 12, 31, 8, 30))
)
with pytest.raises(ValueError):
builder.sign(subject_private_key, hashes.SHA256(), backend)
def test_no_public_key(self, rsa_key_2048: rsa.RSAPrivateKey, backend):
subject_private_key = rsa_key_2048
builder = (
x509.CertificateBuilder()
.serial_number(777)
.issuer_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.subject_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.not_valid_before(datetime.datetime(2002, 1, 1, 12, 1))
.not_valid_after(datetime.datetime(2030, 12, 31, 8, 30))
)
with pytest.raises(ValueError):
builder.sign(subject_private_key, hashes.SHA256(), backend)
def test_no_not_valid_before(
self, rsa_key_2048: rsa.RSAPrivateKey, backend
):
subject_private_key = rsa_key_2048
builder = (
x509.CertificateBuilder()
.serial_number(777)
.issuer_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.subject_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.public_key(subject_private_key.public_key())
.not_valid_after(datetime.datetime(2030, 12, 31, 8, 30))
)
with pytest.raises(ValueError):
builder.sign(subject_private_key, hashes.SHA256(), backend)
def test_no_not_valid_after(
self, rsa_key_2048: rsa.RSAPrivateKey, backend
):
subject_private_key = rsa_key_2048
builder = (
x509.CertificateBuilder()
.serial_number(777)
.issuer_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.subject_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.public_key(subject_private_key.public_key())
.not_valid_before(datetime.datetime(2002, 1, 1, 12, 1))
)
with pytest.raises(ValueError):
builder.sign(subject_private_key, hashes.SHA256(), backend)
def test_no_serial_number(self, rsa_key_2048: rsa.RSAPrivateKey, backend):
subject_private_key = rsa_key_2048
builder = (
x509.CertificateBuilder()
.issuer_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.subject_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.public_key(subject_private_key.public_key())
.not_valid_before(datetime.datetime(2002, 1, 1, 12, 1))
.not_valid_after(datetime.datetime(2030, 12, 31, 8, 30))
)
with pytest.raises(ValueError):
builder.sign(subject_private_key, hashes.SHA256(), backend)
def test_issuer_name_must_be_a_name_type(self):
builder = x509.CertificateBuilder()
with pytest.raises(TypeError):
builder.issuer_name("subject") # type:ignore[arg-type]
with pytest.raises(TypeError):
builder.issuer_name(object) # type:ignore[arg-type]
def test_issuer_name_may_only_be_set_once(self):
name = x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
builder = x509.CertificateBuilder().issuer_name(name)
with pytest.raises(ValueError):
builder.issuer_name(name)
def test_subject_name_must_be_a_name_type(self):
builder = x509.CertificateBuilder()
with pytest.raises(TypeError):
builder.subject_name("subject") # type:ignore[arg-type]
with pytest.raises(TypeError):
builder.subject_name(object) # type:ignore[arg-type]
def test_subject_name_may_only_be_set_once(self):
name = x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
builder = x509.CertificateBuilder().subject_name(name)
with pytest.raises(ValueError):
builder.subject_name(name)
def test_not_valid_before_after_not_valid_after(self):
builder = x509.CertificateBuilder()
builder = builder.not_valid_after(datetime.datetime(2002, 1, 1, 12, 1))
with pytest.raises(ValueError):
builder.not_valid_before(datetime.datetime(2003, 1, 1, 12, 1))
def test_not_valid_after_before_not_valid_before(self):
builder = x509.CertificateBuilder()
builder = builder.not_valid_before(
datetime.datetime(2002, 1, 1, 12, 1)
)
with pytest.raises(ValueError):
builder.not_valid_after(datetime.datetime(2001, 1, 1, 12, 1))
def test_public_key_must_be_public_key(
self, rsa_key_2048: rsa.RSAPrivateKey, backend
):
private_key = rsa_key_2048
builder = x509.CertificateBuilder()
with pytest.raises(TypeError):
builder.public_key(private_key) # type: ignore[arg-type]
def test_public_key_may_only_be_set_once(
self, rsa_key_2048: rsa.RSAPrivateKey, backend
):
private_key = rsa_key_2048
public_key = private_key.public_key()
builder = x509.CertificateBuilder().public_key(public_key)
with pytest.raises(ValueError):
builder.public_key(public_key)
def test_serial_number_must_be_an_integer_type(self):
with pytest.raises(TypeError):
x509.CertificateBuilder().serial_number(
10.0 # type:ignore[arg-type]
)
def test_serial_number_must_be_non_negative(self):
with pytest.raises(ValueError):
x509.CertificateBuilder().serial_number(-1)
def test_serial_number_must_be_positive(self):
with pytest.raises(ValueError):
x509.CertificateBuilder().serial_number(0)
def test_minimal_serial_number(
self, rsa_key_2048: rsa.RSAPrivateKey, backend
):
subject_private_key = rsa_key_2048
builder = (
x509.CertificateBuilder()
.serial_number(1)
.subject_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "RU")])
)
.issuer_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "RU")])
)
.public_key(subject_private_key.public_key())
.not_valid_before(datetime.datetime(2002, 1, 1, 12, 1))
.not_valid_after(datetime.datetime(2030, 12, 31, 8, 30))
)
cert = builder.sign(subject_private_key, hashes.SHA256(), backend)
assert cert.serial_number == 1
def test_biggest_serial_number(
self, rsa_key_2048: rsa.RSAPrivateKey, backend
):
subject_private_key = rsa_key_2048
builder = (
x509.CertificateBuilder()
.serial_number((1 << 159) - 1)
.subject_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "RU")])
)
.issuer_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "RU")])
)
.public_key(subject_private_key.public_key())
.not_valid_before(datetime.datetime(2002, 1, 1, 12, 1))
.not_valid_after(datetime.datetime(2030, 12, 31, 8, 30))
)
cert = builder.sign(subject_private_key, hashes.SHA256(), backend)
assert cert.serial_number == (1 << 159) - 1
def test_serial_number_must_be_less_than_160_bits_long(self):
with pytest.raises(ValueError):
x509.CertificateBuilder().serial_number(1 << 159)
def test_serial_number_may_only_be_set_once(self):
builder = x509.CertificateBuilder().serial_number(10)
with pytest.raises(ValueError):
builder.serial_number(20)
def test_aware_not_valid_after(
self, rsa_key_2048: rsa.RSAPrivateKey, backend
):
tz = datetime.timezone(datetime.timedelta(hours=-8))
time = datetime.datetime(2012, 1, 16, 22, 43, tzinfo=tz)
utc_time = datetime.datetime(2012, 1, 17, 6, 43)
private_key = rsa_key_2048
cert_builder = x509.CertificateBuilder().not_valid_after(time)
cert_builder = (
cert_builder.subject_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.issuer_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.serial_number(1)
.public_key(private_key.public_key())
.not_valid_before(utc_time - datetime.timedelta(days=365))
)
cert = cert_builder.sign(private_key, hashes.SHA256(), backend)
_check_cert_times(
cert, not_valid_before=None, not_valid_after=utc_time
)
def test_earliest_time(self, rsa_key_2048: rsa.RSAPrivateKey, backend):
time = datetime.datetime(1950, 1, 1)
private_key = rsa_key_2048
cert_builder = (
x509.CertificateBuilder()
.subject_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.issuer_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.serial_number(1)
.public_key(private_key.public_key())
.not_valid_before(time)
.not_valid_after(time)
)
cert = cert_builder.sign(private_key, hashes.SHA256(), backend)
_check_cert_times(cert, not_valid_before=time, not_valid_after=time)
parsed = test_support.test_parse_certificate(
cert.public_bytes(serialization.Encoding.DER)
)
# UTC TIME
assert parsed.not_before_tag == 0x17
assert parsed.not_after_tag == 0x17
def test_invalid_not_valid_after(self):
with pytest.raises(TypeError):
x509.CertificateBuilder().not_valid_after(
104204304504 # type:ignore[arg-type]
)
with pytest.raises(TypeError):
x509.CertificateBuilder().not_valid_after(
datetime.time() # type:ignore[arg-type]
)
with pytest.raises(ValueError):
x509.CertificateBuilder().not_valid_after(
datetime.datetime(1940, 8, 10)
)
def test_not_valid_after_may_only_be_set_once(self):
builder = x509.CertificateBuilder().not_valid_after(
datetime.datetime.now()
)
with pytest.raises(ValueError):
builder.not_valid_after(datetime.datetime.now())
def test_aware_not_valid_before(
self, rsa_key_2048: rsa.RSAPrivateKey, backend
):
tz = datetime.timezone(datetime.timedelta(hours=-8))
time = datetime.datetime(2012, 1, 16, 22, 43, tzinfo=tz)
utc_time = datetime.datetime(2012, 1, 17, 6, 43)
private_key = rsa_key_2048
cert_builder = x509.CertificateBuilder().not_valid_before(time)
cert_builder = (
cert_builder.subject_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.issuer_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.serial_number(1)
.public_key(private_key.public_key())
.not_valid_after(utc_time + datetime.timedelta(days=366))
)
cert = cert_builder.sign(private_key, hashes.SHA256(), backend)
_check_cert_times(
cert, not_valid_before=utc_time, not_valid_after=None
)
def test_invalid_not_valid_before(self):
with pytest.raises(TypeError):
x509.CertificateBuilder().not_valid_before(
104204304504 # type:ignore[arg-type]
)
with pytest.raises(TypeError):
x509.CertificateBuilder().not_valid_before(
datetime.time() # type:ignore[arg-type]
)
with pytest.raises(ValueError):
x509.CertificateBuilder().not_valid_before(
datetime.datetime(1940, 8, 10)
)
def test_not_valid_before_may_only_be_set_once(self):
builder = x509.CertificateBuilder().not_valid_before(
datetime.datetime.now()
)
with pytest.raises(ValueError):
builder.not_valid_before(datetime.datetime.now())
def test_add_extension_checks_for_duplicates(self):
builder = x509.CertificateBuilder().add_extension(
x509.BasicConstraints(ca=False, path_length=None),
True,
)
with pytest.raises(ValueError):
builder.add_extension(
x509.BasicConstraints(ca=False, path_length=None),
True,
)
def test_add_invalid_extension_type(self):
builder = x509.CertificateBuilder()
with pytest.raises(TypeError):
builder.add_extension(
object(), # type:ignore[arg-type]
False,
)
@pytest.mark.parametrize("algorithm", [object(), None])
def test_sign_with_unsupported_hash(
self, rsa_key_2048: rsa.RSAPrivateKey, algorithm, backend
):
private_key = rsa_key_2048
builder = x509.CertificateBuilder()
builder = (
builder.subject_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.issuer_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.serial_number(1)
.public_key(private_key.public_key())
.not_valid_before(datetime.datetime(2002, 1, 1, 12, 1))
.not_valid_after(datetime.datetime(2032, 1, 1, 12, 1))
)
with pytest.raises(TypeError):
builder.sign(private_key, algorithm, backend)
@pytest.mark.supported(
only_if=lambda backend: backend.ed25519_supported(),
skip_message="Requires OpenSSL with Ed25519 support",
)
def test_sign_with_unsupported_hash_ed25519(self, backend):
private_key = ed25519.Ed25519PrivateKey.generate()
builder = (
x509.CertificateBuilder()
.subject_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.issuer_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.serial_number(1)
.public_key(private_key.public_key())
.not_valid_before(datetime.datetime(2002, 1, 1, 12, 1))
.not_valid_after(datetime.datetime(2032, 1, 1, 12, 1))
)
with pytest.raises(ValueError):
builder.sign(private_key, hashes.SHA256(), backend)
@pytest.mark.supported(
only_if=lambda backend: backend.ed448_supported(),
skip_message="Requires OpenSSL with Ed448 support",
)
def test_sign_with_unsupported_hash_ed448(self, backend):
private_key = ed448.Ed448PrivateKey.generate()
builder = (
x509.CertificateBuilder()
.subject_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.issuer_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.serial_number(1)
.public_key(private_key.public_key())
.not_valid_before(datetime.datetime(2002, 1, 1, 12, 1))
.not_valid_after(datetime.datetime(2032, 1, 1, 12, 1))
)
with pytest.raises(ValueError):
builder.sign(private_key, hashes.SHA256(), backend)
@pytest.mark.supported(
only_if=lambda backend: backend.hash_supported(hashes.MD5()),
skip_message="Requires OpenSSL with MD5 support",
)
@pytest.mark.supported(
only_if=lambda backend: backend.dsa_supported(),
skip_message="Does not support DSA.",
)
@pytest.mark.parametrize(
"hash_algorithm",
[
hashes.MD5(),
hashes.SHA3_224(),
hashes.SHA3_256(),
hashes.SHA3_384(),
hashes.SHA3_512(),
],
)
def test_sign_dsa_with_unsupported_hash(self, hash_algorithm, backend):
private_key = DSA_KEY_2048.private_key(backend)
builder = x509.CertificateBuilder()
builder = (
builder.subject_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.issuer_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.serial_number(1)
.public_key(private_key.public_key())
.not_valid_before(datetime.datetime(2002, 1, 1, 12, 1))
.not_valid_after(datetime.datetime(2032, 1, 1, 12, 1))
)
with pytest.raises(UnsupportedAlgorithm):
builder.sign(private_key, hash_algorithm, backend)
@pytest.mark.supported(
only_if=lambda backend: backend.hash_supported(hashes.MD5()),
skip_message="Requires OpenSSL with MD5 support",
)
def test_sign_ec_with_md5(self, backend):
_skip_curve_unsupported(backend, ec.SECP256R1())
private_key = EC_KEY_SECP256R1.private_key(backend)
builder = x509.CertificateBuilder()
builder = (
builder.subject_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.issuer_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.serial_number(1)
.public_key(private_key.public_key())
.not_valid_before(datetime.datetime(2002, 1, 1, 12, 1))
.not_valid_after(datetime.datetime(2032, 1, 1, 12, 1))
)
with pytest.raises(UnsupportedAlgorithm):
builder.sign(
private_key,
hashes.MD5(), # type: ignore[arg-type]
backend,
)
@pytest.mark.supported(
only_if=lambda backend: backend.dsa_supported(),
skip_message="Does not support DSA.",
)
@pytest.mark.parametrize(
("hashalg", "hashalg_oid"),
[
(hashes.SHA224, x509.SignatureAlgorithmOID.DSA_WITH_SHA224),
(hashes.SHA256, x509.SignatureAlgorithmOID.DSA_WITH_SHA256),
(hashes.SHA384, x509.SignatureAlgorithmOID.DSA_WITH_SHA384),
(hashes.SHA512, x509.SignatureAlgorithmOID.DSA_WITH_SHA512),
],
)
def test_build_cert_with_dsa_private_key(
self, hashalg, hashalg_oid, backend
):
issuer_private_key = DSA_KEY_2048.private_key(backend)
subject_private_key = DSA_KEY_2048.private_key(backend)
not_valid_before = datetime.datetime(2002, 1, 1, 12, 1)
not_valid_after = datetime.datetime(2030, 12, 31, 8, 30)
builder = (
x509.CertificateBuilder()
.serial_number(777)
.issuer_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.subject_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.public_key(subject_private_key.public_key())
.add_extension(
x509.BasicConstraints(ca=False, path_length=None),
True,
)
.add_extension(
x509.SubjectAlternativeName([x509.DNSName("cryptography.io")]),
critical=False,
)
.not_valid_before(not_valid_before)
.not_valid_after(not_valid_after)
)
cert = builder.sign(issuer_private_key, hashalg(), backend)
assert cert.version is x509.Version.v3
assert cert.signature_algorithm_oid == hashalg_oid
public_key = cert.public_key()
assert isinstance(public_key, dsa.DSAPublicKey)
assert cert.public_key_algorithm_oid == PublicKeyAlgorithmOID.DSA
_check_cert_times(
cert,
not_valid_before=not_valid_before,
not_valid_after=not_valid_after,
)
basic_constraints = cert.extensions.get_extension_for_oid(
ExtensionOID.BASIC_CONSTRAINTS
)
assert isinstance(basic_constraints.value, x509.BasicConstraints)
assert basic_constraints.value.ca is False
assert basic_constraints.value.path_length is None
subject_alternative_name = cert.extensions.get_extension_for_oid(
ExtensionOID.SUBJECT_ALTERNATIVE_NAME
)
assert isinstance(
subject_alternative_name.value, x509.SubjectAlternativeName
)
assert list(subject_alternative_name.value) == [
x509.DNSName("cryptography.io"),
]
@pytest.mark.parametrize(
("hashalg", "hashalg_oid"),
[
(hashes.SHA224, x509.SignatureAlgorithmOID.ECDSA_WITH_SHA224),
(hashes.SHA256, x509.SignatureAlgorithmOID.ECDSA_WITH_SHA256),
(hashes.SHA384, x509.SignatureAlgorithmOID.ECDSA_WITH_SHA384),
(hashes.SHA512, x509.SignatureAlgorithmOID.ECDSA_WITH_SHA512),
(hashes.SHA3_224, x509.SignatureAlgorithmOID.ECDSA_WITH_SHA3_224),
(hashes.SHA3_256, x509.SignatureAlgorithmOID.ECDSA_WITH_SHA3_256),
(hashes.SHA3_384, x509.SignatureAlgorithmOID.ECDSA_WITH_SHA3_384),
(hashes.SHA3_512, x509.SignatureAlgorithmOID.ECDSA_WITH_SHA3_512),
],
)
def test_build_cert_with_ec_private_key(
self, hashalg, hashalg_oid, backend
):
_skip_curve_unsupported(backend, ec.SECP256R1())
if not backend.signature_hash_supported(hashalg()):
pytest.skip(f"{hashalg} signature not supported")
issuer_private_key = ec.generate_private_key(ec.SECP256R1(), backend)
subject_private_key = ec.generate_private_key(ec.SECP256R1(), backend)
not_valid_before = datetime.datetime(2002, 1, 1, 12, 1)
not_valid_after = datetime.datetime(2030, 12, 31, 8, 30)
builder = (
x509.CertificateBuilder()
.serial_number(777)
.issuer_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.subject_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.public_key(subject_private_key.public_key())
.add_extension(
x509.BasicConstraints(ca=False, path_length=None),
True,
)
.add_extension(
x509.SubjectAlternativeName([x509.DNSName("cryptography.io")]),
critical=False,
)
.not_valid_before(not_valid_before)
.not_valid_after(not_valid_after)
)
cert = builder.sign(issuer_private_key, hashalg(), backend)
assert cert.version is x509.Version.v3
public_key = cert.public_key()
assert isinstance(public_key, ec.EllipticCurvePublicKey)
assert (
cert.public_key_algorithm_oid
== PublicKeyAlgorithmOID.EC_PUBLIC_KEY
)
assert cert.signature_algorithm_oid == hashalg_oid
assert type(cert.signature_hash_algorithm) is hashalg
_check_cert_times(
cert,
not_valid_before=not_valid_before,
not_valid_after=not_valid_after,
)
basic_constraints = cert.extensions.get_extension_for_oid(
ExtensionOID.BASIC_CONSTRAINTS
)
assert isinstance(basic_constraints.value, x509.BasicConstraints)
assert basic_constraints.value.ca is False
assert basic_constraints.value.path_length is None
subject_alternative_name = cert.extensions.get_extension_for_oid(
ExtensionOID.SUBJECT_ALTERNATIVE_NAME
)
assert isinstance(
subject_alternative_name.value, x509.SubjectAlternativeName
)
assert list(subject_alternative_name.value) == [
x509.DNSName("cryptography.io"),
]
def test_build_cert_with_deterministic_ecdsa_signature(self, backend):
_skip_curve_unsupported(backend, ec.SECP256R1())
_skip_deterministic_ecdsa_unsupported(backend)
private_key = ec.generate_private_key(ec.SECP256R1())
not_valid_before = datetime.datetime(2002, 1, 1, 12, 1)
not_valid_after = datetime.datetime(2030, 12, 31, 8, 30)
builder = (
x509.CertificateBuilder()
.serial_number(777)
.issuer_name(x509.Name([]))
.subject_name(x509.Name([]))
.public_key(private_key.public_key())
.not_valid_before(not_valid_before)
.not_valid_after(not_valid_after)
)
cert1 = builder.sign(
private_key,
hashes.SHA256(),
backend,
ecdsa_deterministic=True,
)
cert2 = builder.sign(
private_key,
hashes.SHA256(),
backend,
ecdsa_deterministic=True,
)
cert_nondet = builder.sign(private_key, hashes.SHA256(), backend)
cert_nondet2 = builder.sign(
private_key, hashes.SHA256(), backend, ecdsa_deterministic=False
)
assert cert1.signature == cert2.signature
assert cert1.signature != cert_nondet.signature
assert cert_nondet.signature != cert_nondet2.signature
private_key.public_key().verify(
cert1.signature,
cert1.tbs_certificate_bytes,
ec.ECDSA(hashes.SHA256()),
)
def test_sign_deterministic_wrong_key_type(self, rsa_key_2048, backend):
not_valid_before = datetime.datetime(2002, 1, 1, 12, 1)
not_valid_after = datetime.datetime(2030, 12, 31, 8, 30)
builder = (
x509.CertificateBuilder()
.serial_number(777)
.issuer_name(x509.Name([]))
.subject_name(x509.Name([]))
.public_key(rsa_key_2048.public_key())
.not_valid_before(not_valid_before)
.not_valid_after(not_valid_after)
)
with pytest.raises(TypeError):
builder.sign(
rsa_key_2048,
hashes.SHA256(),
backend,
ecdsa_deterministic=True,
)
def test_build_cert_with_bmpstring_universalstring_name(
self, rsa_key_2048: rsa.RSAPrivateKey, backend
):
private_key = rsa_key_2048
issuer = x509.Name(
[
x509.NameAttribute(
NameOID.COMMON_NAME,
"cryptography.io",
_ASN1Type.BMPString,
),
x509.NameAttribute(NameOID.ORGANIZATION_NAME, "PyCA"),
]
)
subject = x509.Name(
[
x509.NameAttribute(
NameOID.COMMON_NAME,
"cryptography.io",
_ASN1Type.UniversalString,
),
x509.NameAttribute(NameOID.ORGANIZATION_NAME, "PyCA"),
]
)
builder = x509.CertificateBuilder()
builder = (
builder.subject_name(subject)
.issuer_name(issuer)
.serial_number(1)
.public_key(private_key.public_key())
.not_valid_before(datetime.datetime(2002, 1, 1, 12, 1))
.not_valid_after(datetime.datetime(2032, 1, 1, 12, 1))
)
cert = builder.sign(private_key, hashes.SHA256(), backend)
assert cert.issuer == issuer
assert cert.subject == subject
@pytest.mark.supported(
only_if=lambda backend: backend.ed25519_supported(),
skip_message="Requires OpenSSL with Ed25519 support",
)
def test_build_cert_with_ed25519(self, backend):
issuer_private_key = ed25519.Ed25519PrivateKey.generate()
subject_private_key = ed25519.Ed25519PrivateKey.generate()
not_valid_before = datetime.datetime(2002, 1, 1, 12, 1)
not_valid_after = datetime.datetime(2030, 12, 31, 8, 30)
builder = (
x509.CertificateBuilder()
.serial_number(777)
.issuer_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.subject_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.public_key(subject_private_key.public_key())
.add_extension(
x509.BasicConstraints(ca=False, path_length=None),
True,
)
.add_extension(
x509.SubjectAlternativeName([x509.DNSName("cryptography.io")]),
critical=False,
)
.not_valid_before(not_valid_before)
.not_valid_after(not_valid_after)
)
cert = builder.sign(issuer_private_key, None, backend)
issuer_private_key.public_key().verify(
cert.signature, cert.tbs_certificate_bytes
)
assert cert.signature_algorithm_oid == SignatureAlgorithmOID.ED25519
assert cert.signature_hash_algorithm is None
assert isinstance(cert.public_key(), ed25519.Ed25519PublicKey)
assert cert.public_key_algorithm_oid == PublicKeyAlgorithmOID.ED25519
assert cert.version is x509.Version.v3
_check_cert_times(
cert,
not_valid_before=not_valid_before,
not_valid_after=not_valid_after,
)
basic_constraints = cert.extensions.get_extension_for_oid(
ExtensionOID.BASIC_CONSTRAINTS
)
assert isinstance(basic_constraints.value, x509.BasicConstraints)
assert basic_constraints.value.ca is False
assert basic_constraints.value.path_length is None
subject_alternative_name = cert.extensions.get_extension_for_oid(
ExtensionOID.SUBJECT_ALTERNATIVE_NAME
)
assert isinstance(
subject_alternative_name.value, x509.SubjectAlternativeName
)
assert list(subject_alternative_name.value) == [
x509.DNSName("cryptography.io"),
]
@pytest.mark.supported(
only_if=lambda backend: backend.ed25519_supported(),
skip_message="Requires OpenSSL with Ed25519 support",
)
def test_build_cert_with_public_ed25519_rsa_sig(
self, rsa_key_2048: rsa.RSAPrivateKey, backend
):
issuer_private_key = rsa_key_2048
subject_private_key = ed25519.Ed25519PrivateKey.generate()
not_valid_before = datetime.datetime(2002, 1, 1, 12, 1)
not_valid_after = datetime.datetime(2030, 12, 31, 8, 30)
builder = (
x509.CertificateBuilder()
.serial_number(777)
.issuer_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.subject_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.public_key(subject_private_key.public_key())
.not_valid_before(not_valid_before)
.not_valid_after(not_valid_after)
)
cert = builder.sign(issuer_private_key, hashes.SHA256(), backend)
assert cert.signature_hash_algorithm is not None
issuer_private_key.public_key().verify(
cert.signature,
cert.tbs_certificate_bytes,
padding.PKCS1v15(),
cert.signature_hash_algorithm,
)
assert cert.signature_algorithm_oid == (
SignatureAlgorithmOID.RSA_WITH_SHA256
)
assert isinstance(cert.signature_hash_algorithm, hashes.SHA256)
assert isinstance(cert.public_key(), ed25519.Ed25519PublicKey)
assert cert.public_key_algorithm_oid == PublicKeyAlgorithmOID.ED25519
@pytest.mark.supported(
only_if=lambda backend: backend.ed448_supported(),
skip_message="Requires OpenSSL with Ed448 support",
)
def test_build_cert_with_ed448(self, backend):
issuer_private_key = ed448.Ed448PrivateKey.generate()
subject_private_key = ed448.Ed448PrivateKey.generate()
not_valid_before = datetime.datetime(2002, 1, 1, 12, 1)
not_valid_after = datetime.datetime(2030, 12, 31, 8, 30)
builder = (
x509.CertificateBuilder()
.serial_number(777)
.issuer_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.subject_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.public_key(subject_private_key.public_key())
.add_extension(
x509.BasicConstraints(ca=False, path_length=None),
True,
)
.add_extension(
x509.SubjectAlternativeName([x509.DNSName("cryptography.io")]),
critical=False,
)
.not_valid_before(not_valid_before)
.not_valid_after(not_valid_after)
)
cert = builder.sign(issuer_private_key, None, backend)
issuer_private_key.public_key().verify(
cert.signature, cert.tbs_certificate_bytes
)
assert cert.signature_algorithm_oid == SignatureAlgorithmOID.ED448
assert cert.signature_hash_algorithm is None
assert isinstance(cert.public_key(), ed448.Ed448PublicKey)
assert cert.public_key_algorithm_oid == PublicKeyAlgorithmOID.ED448
assert cert.version is x509.Version.v3
_check_cert_times(
cert,
not_valid_before=not_valid_before,
not_valid_after=not_valid_after,
)
basic_constraints = cert.extensions.get_extension_for_oid(
ExtensionOID.BASIC_CONSTRAINTS
)
assert isinstance(basic_constraints.value, x509.BasicConstraints)
assert basic_constraints.value.ca is False
assert basic_constraints.value.path_length is None
subject_alternative_name = cert.extensions.get_extension_for_oid(
ExtensionOID.SUBJECT_ALTERNATIVE_NAME
)
assert isinstance(
subject_alternative_name.value, x509.SubjectAlternativeName
)
assert list(subject_alternative_name.value) == [
x509.DNSName("cryptography.io"),
]
@pytest.mark.supported(
only_if=lambda backend: backend.ed448_supported(),
skip_message="Requires OpenSSL with Ed448 support",
)
def test_build_cert_with_public_ed448_rsa_sig(
self, rsa_key_2048: rsa.RSAPrivateKey, backend
):
issuer_private_key = rsa_key_2048
subject_private_key = ed448.Ed448PrivateKey.generate()
not_valid_before = datetime.datetime(2002, 1, 1, 12, 1)
not_valid_after = datetime.datetime(2030, 12, 31, 8, 30)
builder = (
x509.CertificateBuilder()
.serial_number(777)
.issuer_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.subject_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.public_key(subject_private_key.public_key())
.not_valid_before(not_valid_before)
.not_valid_after(not_valid_after)
)
cert = builder.sign(issuer_private_key, hashes.SHA256(), backend)
assert cert.signature_hash_algorithm is not None
issuer_private_key.public_key().verify(
cert.signature,
cert.tbs_certificate_bytes,
padding.PKCS1v15(),
cert.signature_hash_algorithm,
)
assert cert.signature_algorithm_oid == (
SignatureAlgorithmOID.RSA_WITH_SHA256
)
assert isinstance(cert.signature_hash_algorithm, hashes.SHA256)
assert isinstance(cert.public_key(), ed448.Ed448PublicKey)
assert cert.public_key_algorithm_oid == PublicKeyAlgorithmOID.ED448
@pytest.mark.supported(
only_if=lambda backend: (
backend.x25519_supported() and backend.x448_supported()
),
skip_message="Requires OpenSSL with x25519 & x448 support",
)
@pytest.mark.parametrize(
("priv_key_cls", "pub_key_cls", "pub_key_oid"),
[
(
x25519.X25519PrivateKey,
x25519.X25519PublicKey,
PublicKeyAlgorithmOID.X25519,
),
(
x448.X448PrivateKey,
x448.X448PublicKey,
PublicKeyAlgorithmOID.X448,
),
],
)
def test_build_cert_with_public_x25519_x448_rsa_sig(
self,
rsa_key_2048: rsa.RSAPrivateKey,
priv_key_cls,
pub_key_cls,
pub_key_oid,
backend,
):
issuer_private_key = rsa_key_2048
subject_private_key = priv_key_cls.generate()
not_valid_before = datetime.datetime(2002, 1, 1, 12, 1)
not_valid_after = datetime.datetime(2030, 12, 31, 8, 30)
builder = (
x509.CertificateBuilder()
.serial_number(777)
.issuer_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.subject_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.public_key(subject_private_key.public_key())
.not_valid_before(not_valid_before)
.not_valid_after(not_valid_after)
)
cert = builder.sign(issuer_private_key, hashes.SHA256(), backend)
assert cert.signature_hash_algorithm is not None
issuer_private_key.public_key().verify(
cert.signature,
cert.tbs_certificate_bytes,
padding.PKCS1v15(),
cert.signature_hash_algorithm,
)
assert cert.signature_algorithm_oid == (
SignatureAlgorithmOID.RSA_WITH_SHA256
)
assert isinstance(cert.signature_hash_algorithm, hashes.SHA256)
assert isinstance(cert.public_key(), pub_key_cls)
assert cert.public_key_algorithm_oid == pub_key_oid
def test_build_cert_with_rsa_key_too_small(
self, rsa_key_512: rsa.RSAPrivateKey, backend
):
issuer_private_key = rsa_key_512
subject_private_key = rsa_key_512
not_valid_before = datetime.datetime(2002, 1, 1, 12, 1)
not_valid_after = datetime.datetime(2030, 12, 31, 8, 30)
builder = (
x509.CertificateBuilder()
.serial_number(777)
.issuer_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.subject_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.public_key(subject_private_key.public_key())
.not_valid_before(not_valid_before)
.not_valid_after(not_valid_after)
)
with pytest.raises(ValueError):
builder.sign(issuer_private_key, hashes.SHA512(), backend)
@pytest.mark.parametrize(
"add_ext",
[
x509.SubjectAlternativeName(
[
# These examples exist to verify compatibility with
# certificates that have utf8 encoded data in the ia5string
x509.DNSName._init_without_validation("a\xedt\xe1s.test"),
x509.RFC822Name._init_without_validation(
"test@a\xedt\xe1s.test"
),
x509.UniformResourceIdentifier._init_without_validation(
"http://a\xedt\xe1s.test"
),
]
),
x509.CertificatePolicies(
[
x509.PolicyInformation(
x509.ObjectIdentifier("2.16.840.1.12345.1.2.3.4.1"),
["http://other.com/cps"],
)
]
),
x509.CertificatePolicies(
[
x509.PolicyInformation(
x509.ObjectIdentifier("2.16.840.1.12345.1.2.3.4.1"),
None,
)
]
),
x509.CertificatePolicies(
[
x509.PolicyInformation(
x509.ObjectIdentifier("2.16.840.1.12345.1.2.3.4.1"),
[
"http://example.com/cps",
"http://other.com/cps",
x509.UserNotice(
x509.NoticeReference("my org", [1, 2, 3, 4]),
"thing",
),
],
)
]
),
x509.CertificatePolicies(
[
x509.PolicyInformation(
x509.ObjectIdentifier("2.16.840.1.12345.1.2.3.4.1"),
[
"http://example.com/cps",
x509.UserNotice(
x509.NoticeReference(
"UTF8\u2122'", [1, 2, 3, 4]
),
"We heart UTF8!\u2122",
),
],
)
]
),
x509.CertificatePolicies(
[
x509.PolicyInformation(
x509.ObjectIdentifier("2.16.840.1.12345.1.2.3.4.1"),
[x509.UserNotice(None, "thing")],
)
]
),
x509.CertificatePolicies(
[
x509.PolicyInformation(
x509.ObjectIdentifier("2.16.840.1.12345.1.2.3.4.1"),
[
x509.UserNotice(
x509.NoticeReference("my org", [1, 2, 3, 4]),
None,
)
],
)
]
),
x509.IssuerAlternativeName(
[
x509.DNSName("myissuer"),
x509.RFC822Name("email@domain.com"),
]
),
x509.ExtendedKeyUsage(
[
ExtendedKeyUsageOID.CLIENT_AUTH,
ExtendedKeyUsageOID.SERVER_AUTH,
ExtendedKeyUsageOID.CODE_SIGNING,
]
),
x509.InhibitAnyPolicy(3),
x509.TLSFeature([x509.TLSFeatureType.status_request]),
x509.TLSFeature([x509.TLSFeatureType.status_request_v2]),
x509.TLSFeature(
[
x509.TLSFeatureType.status_request,
x509.TLSFeatureType.status_request_v2,
]
),
x509.NameConstraints(
permitted_subtrees=[
x509.IPAddress(ipaddress.IPv4Network("192.168.0.0/24")),
x509.IPAddress(ipaddress.IPv4Network("192.168.0.0/29")),
x509.IPAddress(ipaddress.IPv4Network("127.0.0.1/32")),
x509.IPAddress(ipaddress.IPv4Network("8.0.0.0/8")),
x509.IPAddress(ipaddress.IPv4Network("0.0.0.0/0")),
x509.IPAddress(
ipaddress.IPv6Network("FF:0:0:0:0:0:0:0/96")
),
x509.IPAddress(
ipaddress.IPv6Network("FF:FF:0:0:0:0:0:0/128")
),
],
excluded_subtrees=[x509.DNSName("name.local")],
),
x509.NameConstraints(
permitted_subtrees=[
x509.IPAddress(ipaddress.IPv4Network("0.0.0.0/0")),
],
excluded_subtrees=None,
),
x509.NameConstraints(
permitted_subtrees=None,
excluded_subtrees=[x509.DNSName("name.local")],
),
x509.PolicyConstraints(
require_explicit_policy=None, inhibit_policy_mapping=1
),
x509.PolicyConstraints(
require_explicit_policy=3, inhibit_policy_mapping=1
),
x509.PolicyConstraints(
require_explicit_policy=0, inhibit_policy_mapping=None
),
x509.CRLDistributionPoints(
[
x509.DistributionPoint(
full_name=None,
relative_name=x509.RelativeDistinguishedName(
[
x509.NameAttribute(
NameOID.COMMON_NAME,
"indirect CRL for indirectCRL CA3",
),
]
),
reasons=None,
crl_issuer=[
x509.DirectoryName(
x509.Name(
[
x509.NameAttribute(
NameOID.COUNTRY_NAME, "US"
),
x509.NameAttribute(
NameOID.ORGANIZATION_NAME,
"Test Certificates 2011",
),
x509.NameAttribute(
NameOID.ORGANIZATIONAL_UNIT_NAME,
"indirectCRL CA3 cRLIssuer",
),
]
)
)
],
)
]
),
x509.CRLDistributionPoints(
[
x509.DistributionPoint(
full_name=[
x509.DirectoryName(
x509.Name(
[
x509.NameAttribute(
NameOID.COUNTRY_NAME, "US"
),
]
)
)
],
relative_name=None,
reasons=None,
crl_issuer=[
x509.DirectoryName(
x509.Name(
[
x509.NameAttribute(
NameOID.ORGANIZATION_NAME,
"cryptography Testing",
),
]
)
)
],
)
]
),
x509.CRLDistributionPoints(
[
x509.DistributionPoint(
full_name=[
x509.UniformResourceIdentifier(
"http://myhost.com/myca.crl"
),
x509.UniformResourceIdentifier(
"http://backup.myhost.com/myca.crl"
),
],
relative_name=None,
reasons=frozenset(
[
x509.ReasonFlags.key_compromise,
x509.ReasonFlags.ca_compromise,
]
),
crl_issuer=[
x509.DirectoryName(
x509.Name(
[
x509.NameAttribute(
NameOID.COUNTRY_NAME, "US"
),
x509.NameAttribute(
NameOID.COMMON_NAME,
"cryptography CA",
),
]
)
)
],
)
]
),
x509.CRLDistributionPoints(
[
x509.DistributionPoint(
full_name=[
x509.UniformResourceIdentifier(
"http://domain.com/some.crl"
)
],
relative_name=None,
reasons=frozenset(
[
x509.ReasonFlags.key_compromise,
x509.ReasonFlags.ca_compromise,
x509.ReasonFlags.affiliation_changed,
x509.ReasonFlags.superseded,
x509.ReasonFlags.privilege_withdrawn,
x509.ReasonFlags.cessation_of_operation,
x509.ReasonFlags.aa_compromise,
x509.ReasonFlags.certificate_hold,
]
),
crl_issuer=None,
)
]
),
x509.CRLDistributionPoints(
[
x509.DistributionPoint(
full_name=None,
relative_name=None,
reasons=None,
crl_issuer=[
x509.DirectoryName(
x509.Name(
[
x509.NameAttribute(
NameOID.COMMON_NAME,
"cryptography CA",
),
]
)
)
],
)
]
),
x509.CRLDistributionPoints(
[
x509.DistributionPoint(
full_name=[
x509.UniformResourceIdentifier(
"http://domain.com/some.crl"
)
],
relative_name=None,
reasons=frozenset([x509.ReasonFlags.aa_compromise]),
crl_issuer=None,
)
]
),
x509.FreshestCRL(
[
x509.DistributionPoint(
full_name=[
x509.UniformResourceIdentifier(
"http://domain.com/some.crl"
)
],
relative_name=None,
reasons=frozenset(
[
x509.ReasonFlags.key_compromise,
x509.ReasonFlags.ca_compromise,
x509.ReasonFlags.affiliation_changed,
x509.ReasonFlags.superseded,
x509.ReasonFlags.privilege_withdrawn,
x509.ReasonFlags.cessation_of_operation,
x509.ReasonFlags.aa_compromise,
x509.ReasonFlags.certificate_hold,
]
),
crl_issuer=None,
)
]
),
x509.FreshestCRL(
[
x509.DistributionPoint(
full_name=None,
relative_name=x509.RelativeDistinguishedName(
[
x509.NameAttribute(
NameOID.COMMON_NAME,
"indirect CRL for indirectCRL CA3",
),
]
),
reasons=None,
crl_issuer=None,
)
]
),
x509.FreshestCRL(
[
x509.DistributionPoint(
full_name=None,
relative_name=x509.RelativeDistinguishedName(
[
x509.NameAttribute(
NameOID.COMMON_NAME,
"indirect CRL for indirectCRL CA3",
),
x509.NameAttribute(NameOID.COUNTRY_NAME, "US"),
]
),
reasons=None,
crl_issuer=None,
)
]
),
x509.AuthorityInformationAccess(
[
x509.AccessDescription(
AuthorityInformationAccessOID.OCSP,
x509.UniformResourceIdentifier(
"http://ocsp.domain.com"
),
),
x509.AccessDescription(
AuthorityInformationAccessOID.CA_ISSUERS,
x509.UniformResourceIdentifier(
"http://domain.com/ca.crt"
),
),
]
),
x509.SubjectInformationAccess(
[
x509.AccessDescription(
SubjectInformationAccessOID.CA_REPOSITORY,
x509.UniformResourceIdentifier("http://ca.domain.com"),
),
]
),
x509.AuthorityKeyIdentifier(
b"\xc3\x9c\xf3\xfc\xd3F\x084\xbb\xceF\x7f\xa0|[\xf3\xe2\x08"
b"\xcbY",
None,
None,
),
x509.AuthorityKeyIdentifier(
b"\xc3\x9c\xf3\xfc\xd3F\x084\xbb\xceF\x7f\xa0|[\xf3\xe2\x08"
b"\xcbY",
[
x509.DirectoryName(
x509.Name(
[
x509.NameAttribute(
NameOID.ORGANIZATION_NAME, "PyCA"
),
x509.NameAttribute(
NameOID.COMMON_NAME, "cryptography CA"
),
]
)
)
],
333,
),
x509.AuthorityKeyIdentifier(
None,
[
x509.DirectoryName(
x509.Name(
[
x509.NameAttribute(
NameOID.ORGANIZATION_NAME, "PyCA"
),
x509.NameAttribute(
NameOID.COMMON_NAME, "cryptography CA"
),
]
)
)
],
333,
),
x509.KeyUsage(
digital_signature=True,
content_commitment=True,
key_encipherment=False,
data_encipherment=False,
key_agreement=False,
key_cert_sign=True,
crl_sign=False,
encipher_only=False,
decipher_only=False,
),
x509.PrivateKeyUsagePeriod(
not_before=datetime.datetime(2002, 1, 1, 12, 1),
not_after=datetime.datetime(2030, 12, 31, 8, 30),
),
x509.OCSPNoCheck(),
x509.SubjectKeyIdentifier,
],
)
def test_extensions(
self, rsa_key_2048: rsa.RSAPrivateKey, add_ext, backend
):
issuer_private_key = rsa_key_2048
subject_private_key = rsa_key_2048
not_valid_before = datetime.datetime(2002, 1, 1, 12, 1)
not_valid_after = datetime.datetime(2030, 12, 31, 8, 30)
if add_ext is x509.SubjectKeyIdentifier:
add_ext = x509.SubjectKeyIdentifier.from_public_key(
subject_private_key.public_key()
)
# Cert
cert = (
x509.CertificateBuilder()
.subject_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.issuer_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.not_valid_before(not_valid_before)
.not_valid_after(not_valid_after)
.public_key(subject_private_key.public_key())
.serial_number(123)
.add_extension(add_ext, critical=False)
.sign(issuer_private_key, hashes.SHA256(), backend)
)
ext = cert.extensions.get_extension_for_class(type(add_ext))
assert ext.critical is False
assert ext.value == add_ext
# CSR
csr = (
x509.CertificateSigningRequestBuilder()
.subject_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, "US")])
)
.add_extension(add_ext, False)
.sign(subject_private_key, hashes.SHA256())
)
ext = csr.extensions.get_extension_for_class(type(add_ext))
assert ext.critical is False
assert ext.value == add_ext
def test_build_ca_request_with_path_length_none(
self, rsa_key_2048: rsa.RSAPrivateKey, backend
):
private_key = rsa_key_2048
request = (
x509.CertificateSigningRequestBuilder()
.subject_name(
x509.Name(
[x509.NameAttribute(NameOID.ORGANIZATION_NAME, "PyCA")]
)
)
.add_extension(
x509.BasicConstraints(ca=True, path_length=None), critical=True
)
.sign(private_key, hashes.SHA256(), backend)
)
loaded_request = x509.load_pem_x509_csr(
request.public_bytes(encoding=serialization.Encoding.PEM), backend
)
subject = loaded_request.subject
assert isinstance(subject, x509.Name)
basic_constraints = request.extensions.get_extension_for_oid(
ExtensionOID.BASIC_CONSTRAINTS
)
assert isinstance(basic_constraints.value, x509.BasicConstraints)
assert basic_constraints.value.path_length is None
@pytest.mark.parametrize(
"unrecognized",
[
x509.UnrecognizedExtension(
x509.ObjectIdentifier("1.2.3.4.5"),
b"abcdef",
)
],
)
def test_unrecognized_extension(
self, rsa_key_2048: rsa.RSAPrivateKey, backend, unrecognized
):
private_key = rsa_key_2048
cert = (
x509.CertificateBuilder()
.subject_name(
x509.Name([x509.NameAttribute(x509.OID_COUNTRY_NAME, "US")])
)
.issuer_name(
x509.Name([x509.NameAttribute(x509.OID_COUNTRY_NAME, "US")])
)
.not_valid_before(datetime.datetime(2002, 1, 1, 12, 1))
.not_valid_after(datetime.datetime(2030, 12, 31, 8, 30))
.public_key(private_key.public_key())
.serial_number(123)
.add_extension(unrecognized, critical=False)
.sign(private_key, hashes.SHA256(), backend)
)
ext = cert.extensions.get_extension_for_oid(unrecognized.oid)
assert ext.value == unrecognized
| TestCertificateBuilder |
python | doocs__leetcode | solution/2900-2999/2977.Minimum Cost to Convert String II/Solution.py | {
"start": 148,
"end": 2006
} | class ____:
def minimumCost(
self,
source: str,
target: str,
original: List[str],
changed: List[str],
cost: List[int],
) -> int:
m = len(cost)
g = [[inf] * (m << 1) for _ in range(m << 1)]
for i in range(m << 1):
g[i][i] = 0
root = Node()
idx = 0
def insert(w: str) -> int:
node = root
for c in w:
i = ord(c) - ord("a")
if node.children[i] is None:
node.children[i] = Node()
node = node.children[i]
if node.v < 0:
nonlocal idx
node.v = idx
idx += 1
return node.v
@cache
def dfs(i: int) -> int:
if i >= len(source):
return 0
res = dfs(i + 1) if source[i] == target[i] else inf
p = q = root
for j in range(i, len(source)):
p = p.children[ord(source[j]) - ord("a")]
q = q.children[ord(target[j]) - ord("a")]
if p is None or q is None:
break
if p.v < 0 or q.v < 0:
continue
res = min(res, dfs(j + 1) + g[p.v][q.v])
return res
for x, y, z in zip(original, changed, cost):
x = insert(x)
y = insert(y)
g[x][y] = min(g[x][y], z)
for k in range(idx):
for i in range(idx):
if g[i][k] >= inf:
continue
for j in range(idx):
# g[i][j] = min(g[i][j], g[i][k] + g[k][j])
if g[i][k] + g[k][j] < g[i][j]:
g[i][j] = g[i][k] + g[k][j]
ans = dfs(0)
return -1 if ans >= inf else ans
| Solution |
python | sympy__sympy | sympy/liealgebras/type_d.py | {
"start": 77,
"end": 4681
} | class ____(Standard_Cartan):
def __new__(cls, n):
if n < 3:
raise ValueError("n cannot be less than 3")
return Standard_Cartan.__new__(cls, "D", n)
def dimension(self):
"""Dmension of the vector space V underlying the Lie algebra
Examples
========
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType("D4")
>>> c.dimension()
4
"""
return self.n
def basic_root(self, i, j):
"""
This is a method just to generate roots
with a 1 iin the ith position and a -1
in the jth position.
"""
n = self.n
root = [0]*n
root[i] = 1
root[j] = -1
return root
def simple_root(self, i):
"""
Every lie algebra has a unique root system.
Given a root system Q, there is a subset of the
roots such that an element of Q is called a
simple root if it cannot be written as the sum
of two elements in Q. If we let D denote the
set of simple roots, then it is clear that every
element of Q can be written as a linear combination
of elements of D with all coefficients non-negative.
In D_n, the first n-1 simple roots are the same as
the roots in A_(n-1) (a 1 in the ith position, a -1
in the (i+1)th position, and zeroes elsewhere).
The nth simple root is the root in which there 1s in
the nth and (n-1)th positions, and zeroes elsewhere.
This method returns the ith simple root for the D series.
Examples
========
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType("D4")
>>> c.simple_root(2)
[0, 1, -1, 0]
"""
n = self.n
if i < n:
return self.basic_root(i-1, i)
else:
root = [0]*n
root[n-2] = 1
root[n-1] = 1
return root
def positive_roots(self):
"""
This method generates all the positive roots of
A_n. This is half of all of the roots of D_n
by multiplying all the positive roots by -1 we
get the negative roots.
Examples
========
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType("A3")
>>> c.positive_roots()
{1: [1, -1, 0, 0], 2: [1, 0, -1, 0], 3: [1, 0, 0, -1], 4: [0, 1, -1, 0],
5: [0, 1, 0, -1], 6: [0, 0, 1, -1]}
"""
n = self.n
posroots = {}
k = 0
for i in range(0, n-1):
for j in range(i+1, n):
k += 1
posroots[k] = self.basic_root(i, j)
k += 1
root = self.basic_root(i, j)
root[j] = 1
posroots[k] = root
return posroots
def roots(self):
"""
Returns the total number of roots for D_n"
"""
n = self.n
return 2*n*(n-1)
def cartan_matrix(self):
"""
Returns the Cartan matrix for D_n.
The Cartan matrix matrix for a Lie algebra is
generated by assigning an ordering to the simple
roots, (alpha[1], ...., alpha[l]). Then the ijth
entry of the Cartan matrix is (<alpha[i],alpha[j]>).
Examples
========
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType('D4')
>>> c.cartan_matrix()
Matrix([
[ 2, -1, 0, 0],
[-1, 2, -1, -1],
[ 0, -1, 2, 0],
[ 0, -1, 0, 2]])
"""
n = self.n
m = 2*eye(n)
for i in range(1, n - 2):
m[i,i+1] = -1
m[i,i-1] = -1
m[n-2, n-3] = -1
m[n-3, n-1] = -1
m[n-1, n-3] = -1
m[0, 1] = -1
return m
def basis(self):
"""
Returns the number of independent generators of D_n
"""
n = self.n
return n*(n-1)/2
def lie_algebra(self):
"""
Returns the Lie algebra associated with D_n"
"""
n = self.n
return "so(" + str(2*n) + ")"
def dynkin_diagram(self):
n = self.n
diag = " "*4*(n-3) + str(n-1) + "\n"
diag += " "*4*(n-3) + "0\n"
diag += " "*4*(n-3) +"|\n"
diag += " "*4*(n-3) + "|\n"
diag += "---".join("0" for i in range(1,n)) + "\n"
diag += " ".join(str(i) for i in range(1, n-1)) + " "+str(n)
return diag
| TypeD |
python | FactoryBoy__factory_boy | factory/errors.py | {
"start": 37,
"end": 117
} | class ____(Exception):
"""Any exception raised by factory_boy."""
| FactoryError |
python | huggingface__transformers | tests/models/llava_next_video/test_modeling_llava_next_video.py | {
"start": 1479,
"end": 6618
} | class ____:
def __init__(
self,
parent,
ignore_index=-100,
image_token_index=0,
video_token_index=1,
projector_hidden_act="gelu",
seq_length=7,
vision_feature_select_strategy="default",
vision_feature_layer=-1,
text_config={
"model_type": "llama",
"seq_length": 7,
"is_training": True,
"use_input_mask": True,
"use_token_type_ids": False,
"use_labels": True,
"vocab_size": 99,
"hidden_size": 32,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"intermediate_size": 37,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"attention_probs_dropout_prob": 0.1,
"max_position_embeddings": 580,
"type_vocab_size": 16,
"type_sequence_label_size": 2,
"initializer_range": 0.02,
"num_labels": 3,
"num_choices": 4,
"pad_token_id": 3,
},
is_training=True,
vision_config={
"image_size": 8,
"patch_size": 4,
"num_channels": 3,
"is_training": True,
"hidden_size": 32,
"projection_dim": 32,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"intermediate_size": 37,
"dropout": 0.1,
"attention_dropout": 0.1,
"initializer_range": 0.02,
},
):
self.parent = parent
self.ignore_index = ignore_index
self.image_token_index = image_token_index
self.video_token_index = video_token_index
self.projector_hidden_act = projector_hidden_act
self.vision_feature_select_strategy = vision_feature_select_strategy
self.vision_feature_layer = vision_feature_layer
self.text_config = text_config
self.vision_config = vision_config
self.pad_token_id = text_config["pad_token_id"]
self.num_hidden_layers = text_config["num_hidden_layers"]
self.vocab_size = text_config["vocab_size"]
self.hidden_size = text_config["hidden_size"]
self.num_attention_heads = text_config["num_attention_heads"]
self.is_training = is_training
self.batch_size = 3
self.num_channels = 3
self.image_size = 30
self.image_grid_pinpoints = [[16, 16]]
self.num_image_tokens = 24
self.num_video_tokens = 8
self.seq_length = seq_length + self.num_image_tokens + self.num_video_tokens
def get_config(self):
return LlavaNextVideoConfig(
text_config=self.text_config,
vision_config=self.vision_config,
ignore_index=self.ignore_index,
image_token_index=self.image_token_index,
video_token_index=self.video_token_index,
projector_hidden_act=self.projector_hidden_act,
vision_feature_select_strategy=self.vision_feature_select_strategy,
vision_feature_layer=self.vision_feature_layer,
image_grid_pinpoints=self.image_grid_pinpoints,
video_seq_length=self.num_video_tokens,
image_seq_length=self.num_image_tokens,
)
def prepare_config_and_inputs(self):
pixel_values = floats_tensor(
[
self.batch_size,
5,
self.vision_config["num_channels"],
self.vision_config["image_size"],
self.vision_config["image_size"],
]
)
pixel_values_videos = floats_tensor(
[
self.batch_size,
8,
self.vision_config["num_channels"],
self.vision_config["image_size"],
self.vision_config["image_size"],
]
)
config = self.get_config()
return config, pixel_values, pixel_values_videos
def prepare_config_and_inputs_for_common(self):
config, pixel_values, pixel_values_videos = self.prepare_config_and_inputs()
input_ids = ids_tensor([self.batch_size, self.seq_length], config.text_config.vocab_size - 2) + 2
attention_mask = torch.ones(input_ids.shape, dtype=torch.long).to(torch_device)
input_ids[input_ids == config.image_token_index] = self.pad_token_id
input_ids[input_ids == config.video_token_index] = self.pad_token_id
input_ids[:, : self.num_image_tokens] = config.image_token_index
input_ids[:, self.num_image_tokens : self.num_video_tokens + self.num_image_tokens] = config.video_token_index
inputs_dict = {
"pixel_values": pixel_values,
"pixel_values_videos": pixel_values_videos,
"image_sizes": torch.tensor(
[[self.vision_config["image_size"], self.vision_config["image_size"]]] * self.batch_size
),
"input_ids": input_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
| LlavaNextVideoVisionText2TextModelTester |
python | python-openxml__python-docx | tests/opc/test_rel.py | {
"start": 1817,
"end": 9479
} | class ____:
def it_can_add_a_relationship(self, _Relationship_):
baseURI, rId, reltype, target, external = (
"baseURI",
"rId9",
"reltype",
"target",
False,
)
rels = Relationships(baseURI)
rel = rels.add_relationship(reltype, target, rId, external)
_Relationship_.assert_called_once_with(rId, reltype, target, baseURI, external)
assert rels[rId] == rel
assert rel == _Relationship_.return_value
def it_can_add_an_external_relationship(self, add_ext_rel_fixture_):
rels, reltype, url = add_ext_rel_fixture_
rId = rels.get_or_add_ext_rel(reltype, url)
rel = rels[rId]
assert rel.is_external
assert rel.target_ref == url
assert rel.reltype == reltype
def it_can_find_a_relationship_by_rId(self):
rel = Mock(name="rel", rId="foobar")
rels = Relationships(None)
rels["foobar"] = rel
assert rels["foobar"] == rel
def it_can_find_or_add_a_relationship(self, rels_with_matching_rel_, rels_with_missing_rel_):
rels, reltype, part, matching_rel = rels_with_matching_rel_
assert rels.get_or_add(reltype, part) == matching_rel
rels, reltype, part, new_rel = rels_with_missing_rel_
assert rels.get_or_add(reltype, part) == new_rel
def it_can_find_or_add_an_external_relationship(self, add_matching_ext_rel_fixture_):
rels, reltype, url, rId = add_matching_ext_rel_fixture_
_rId = rels.get_or_add_ext_rel(reltype, url)
assert _rId == rId
assert len(rels) == 1
def it_can_find_a_related_part_by_rId(self, rels_with_known_target_part):
rels, rId, known_target_part = rels_with_known_target_part
part = rels.related_parts[rId]
assert part is known_target_part
def it_raises_on_related_part_not_found(self, rels):
with pytest.raises(KeyError):
rels.related_parts["rId666"]
def it_can_find_a_related_part_by_reltype(self, rels_with_target_known_by_reltype):
rels, reltype, known_target_part = rels_with_target_known_by_reltype
part = rels.part_with_reltype(reltype)
assert part is known_target_part
def it_can_compose_rels_xml(self, rels, rels_elm):
# exercise ---------------------
rels.xml
# verify -----------------------
rels_elm.assert_has_calls(
[
call.add_rel("rId1", "http://rt-hyperlink", "http://some/link", True),
call.add_rel("rId2", "http://rt-image", "../media/image1.png", False),
call.xml(),
],
any_order=True,
)
def it_knows_the_next_available_rId_to_help(self, rels_with_rId_gap):
rels, expected_next_rId = rels_with_rId_gap
next_rId = rels._next_rId
assert next_rId == expected_next_rId
# fixtures ---------------------------------------------
@pytest.fixture
def add_ext_rel_fixture_(self, reltype, url):
rels = Relationships(None)
return rels, reltype, url
@pytest.fixture
def add_matching_ext_rel_fixture_(self, request, reltype, url):
rId = "rId369"
rels = Relationships(None)
rels.add_relationship(reltype, url, rId, is_external=True)
return rels, reltype, url, rId
# fixture components -----------------------------------
@pytest.fixture
def _baseURI(self):
return "/baseURI"
@pytest.fixture
def _Relationship_(self, request):
return class_mock(request, "docx.opc.rel._Relationship")
@pytest.fixture
def _rel_with_target_known_by_reltype(self, _rId, reltype, _target_part, _baseURI):
rel = _Relationship(_rId, reltype, _target_part, _baseURI)
return rel, reltype, _target_part
@pytest.fixture
def rels(self):
"""
Populated Relationships instance that will exercise the rels.xml
property.
"""
rels = Relationships("/baseURI")
rels.add_relationship(
reltype="http://rt-hyperlink",
target="http://some/link",
rId="rId1",
is_external=True,
)
part = Mock(name="part")
part.partname.relative_ref.return_value = "../media/image1.png"
rels.add_relationship(reltype="http://rt-image", target=part, rId="rId2")
return rels
@pytest.fixture
def rels_elm(self):
"""
Return a rels_elm mock that will be returned from
CT_Relationships.new()
"""
# create rels_elm mock with a .xml property
rels_elm = Mock(name="rels_elm")
xml = PropertyMock(name="xml")
type(rels_elm).xml = xml
rels_elm.attach_mock(xml, "xml")
rels_elm.reset_mock() # to clear attach_mock call
# patch CT_Relationships to return that rels_elm
patch_ = patch.object(CT_Relationships, "new", return_value=rels_elm)
patch_.start()
yield rels_elm
patch_.stop()
@pytest.fixture
def _rel_with_known_target_part(self, _rId, reltype, _target_part, _baseURI):
rel = _Relationship(_rId, reltype, _target_part, _baseURI)
return rel, _rId, _target_part
@pytest.fixture
def rels_with_known_target_part(self, rels, _rel_with_known_target_part):
rel, rId, target_part = _rel_with_known_target_part
rels.add_relationship(None, target_part, rId)
return rels, rId, target_part
@pytest.fixture
def rels_with_matching_rel_(self, request, rels):
matching_reltype_ = instance_mock(request, str, name="matching_reltype_")
matching_part_ = instance_mock(request, Part, name="matching_part_")
matching_rel_ = instance_mock(
request,
_Relationship,
name="matching_rel_",
reltype=matching_reltype_,
target_part=matching_part_,
is_external=False,
)
rels[1] = matching_rel_
return rels, matching_reltype_, matching_part_, matching_rel_
@pytest.fixture
def rels_with_missing_rel_(self, request, rels, _Relationship_):
missing_reltype_ = instance_mock(request, str, name="missing_reltype_")
missing_part_ = instance_mock(request, Part, name="missing_part_")
new_rel_ = instance_mock(
request,
_Relationship,
name="new_rel_",
reltype=missing_reltype_,
target_part=missing_part_,
is_external=False,
)
_Relationship_.return_value = new_rel_
return rels, missing_reltype_, missing_part_, new_rel_
@pytest.fixture
def rels_with_rId_gap(self, request):
rels = Relationships(None)
rel_with_rId1 = instance_mock(request, _Relationship, name="rel_with_rId1", rId="rId1")
rel_with_rId3 = instance_mock(request, _Relationship, name="rel_with_rId3", rId="rId3")
rels["rId1"] = rel_with_rId1
rels["rId3"] = rel_with_rId3
return rels, "rId2"
@pytest.fixture
def rels_with_target_known_by_reltype(self, rels, _rel_with_target_known_by_reltype):
rel, reltype, target_part = _rel_with_target_known_by_reltype
rels[1] = rel
return rels, reltype, target_part
@pytest.fixture
def reltype(self):
return "http://rel/type"
@pytest.fixture
def _rId(self):
return "rId6"
@pytest.fixture
def _target_part(self, request):
return instance_mock(request, Part)
@pytest.fixture
def url(self):
return "https://github.com/scanny/python-docx"
| DescribeRelationships |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_set.py | {
"start": 56920,
"end": 57294
} | class ____(_TestOnlySetsInBinaryOps, __TestCase):
def setUp(self):
def gen():
for i in range(0, 10, 2):
yield i
self.set = set((1, 2, 3))
self.other = gen()
self.otherIsIterable = True
super().setUp()
#==============================================================================
| TestOnlySetsGenerator |
python | pytorch__pytorch | torch/testing/_internal/static_module.py | {
"start": 75,
"end": 893
} | class ____:
def __init__(self, scripted):
# this is an nn.Module
if hasattr(scripted, "_c"):
self.static_module = torch._C._jit_to_static_module(scripted._c)
else:
self.static_module = torch._C._jit_to_static_module(scripted.graph)
def __call__(self, *args, **kwargs):
return self.static_module(*args, **kwargs)
def benchmark(self, args, kwargs, warmup_runs, main_runs):
self.static_module.benchmark(args, kwargs, warmup_runs, main_runs)
def runAsync(self, args, kwargs):
return self.static_module.runAsync(args, kwargs)
def benchmark_individual_ops(self, args, kwargs, warmup_runs, main_runs):
return self.static_module.benchmark_individual_ops(
args, kwargs, warmup_runs, main_runs
)
| StaticModule |
python | plotly__plotly.py | plotly/graph_objs/layout/map/_domain.py | {
"start": 235,
"end": 5011
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.map"
_path_str = "layout.map.domain"
_valid_props = {"column", "row", "x", "y"}
@property
def column(self):
"""
If there is a layout grid, use the domain for this column in
the grid for this map subplot .
The 'column' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["column"]
@column.setter
def column(self, val):
self["column"] = val
@property
def row(self):
"""
If there is a layout grid, use the domain for this row in the
grid for this map subplot .
The 'row' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["row"]
@row.setter
def row(self, val):
self["row"] = val
@property
def x(self):
"""
Sets the horizontal domain of this map subplot (in plot
fraction).
The 'x' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'x[0]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
(1) The 'x[1]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
list
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
@property
def y(self):
"""
Sets the vertical domain of this map subplot (in plot
fraction).
The 'y' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'y[0]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
(1) The 'y[1]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
list
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
@property
def _prop_descriptions(self):
return """\
column
If there is a layout grid, use the domain for this
column in the grid for this map subplot .
row
If there is a layout grid, use the domain for this row
in the grid for this map subplot .
x
Sets the horizontal domain of this map subplot (in plot
fraction).
y
Sets the vertical domain of this map subplot (in plot
fraction).
"""
def __init__(self, arg=None, column=None, row=None, x=None, y=None, **kwargs):
"""
Construct a new Domain object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.map.Domain`
column
If there is a layout grid, use the domain for this
column in the grid for this map subplot .
row
If there is a layout grid, use the domain for this row
in the grid for this map subplot .
x
Sets the horizontal domain of this map subplot (in plot
fraction).
y
Sets the vertical domain of this map subplot (in plot
fraction).
Returns
-------
Domain
"""
super().__init__("domain")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.map.Domain
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.map.Domain`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("column", arg, column)
self._set_property("row", arg, row)
self._set_property("x", arg, x)
self._set_property("y", arg, y)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Domain |
python | pallets__quart | src/quart/typing.py | {
"start": 9608,
"end": 10005
} | class ____(Protocol):
def __init__(self, app: Quart) -> None: ...
def test_client(self) -> TestClientProtocol: ...
async def startup(self) -> None: ...
async def shutdown(self) -> None: ...
async def __aenter__(self) -> TestAppProtocol: ...
async def __aexit__(
self, exc_type: type, exc_value: BaseException, tb: TracebackType
) -> None: ...
| TestAppProtocol |
python | pytorch__pytorch | benchmarks/sparse/utils.py | {
"start": 143,
"end": 1516
} | class ____:
def __init__(self, enable_timing):
pass
def record(self):
self.time = time.perf_counter()
def elapsed_time(self, end_event):
assert isinstance(end_event, Event)
return end_event.time - self.time
def gen_sparse_csr(shape, nnz):
fill_value = 0
total_values = functools.reduce(operator.mul, shape, 1)
dense = np.random.randn(total_values)
fills = random.sample(list(range(total_values)), total_values - nnz)
for f in fills:
dense[f] = fill_value
dense = torch.from_numpy(dense.reshape(shape))
return dense.to_sparse_csr()
def gen_sparse_coo(shape, nnz):
dense = np.random.randn(*shape)
values = []
indices = [[], []]
for n in range(nnz):
row = random.randint(0, shape[0] - 1)
col = random.randint(0, shape[1] - 1)
indices[0].append(row)
indices[1].append(col)
values.append(dense[row, col])
return torch.sparse_coo_tensor(indices, values, size=shape)
def gen_sparse_coo_and_csr(shape, nnz):
total_values = functools.reduce(operator.mul, shape, 1)
dense = np.random.randn(total_values)
fills = random.sample(list(range(total_values)), total_values - nnz)
for f in fills:
dense[f] = 0
dense = torch.from_numpy(dense.reshape(shape))
return dense.to_sparse(), dense.to_sparse_csr()
| Event |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.