language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | django__django | tests/model_fields/test_generatedfield.py | {
"start": 5918,
"end": 14630
} | class ____:
def test_unsaved_error(self):
m = self.base_model(a=1, b=2)
msg = "Cannot retrieve deferred field 'field' from an unsaved model."
with self.assertRaisesMessage(AttributeError, msg):
m.field
def test_full_clean(self):
m = self.base_model(a=1, b=2)
# full_clean() ignores GeneratedFields.
m.full_clean()
m.save()
expected_num_queries = (
0 if connection.features.can_return_columns_from_insert else 1
)
with self.assertNumQueries(expected_num_queries):
self.assertEqual(m.field, 3)
@skipUnlessDBFeature("supports_table_check_constraints")
def test_full_clean_with_check_constraint(self):
model_name = self.check_constraint_model._meta.verbose_name.capitalize()
m = self.check_constraint_model(a=2)
m.full_clean()
m.save()
expected_num_queries = (
0 if connection.features.can_return_columns_from_insert else 1
)
with self.assertNumQueries(expected_num_queries):
self.assertEqual(m.a_squared, 4)
m = self.check_constraint_model(a=-1)
with self.assertRaises(ValidationError) as cm:
m.full_clean()
self.assertEqual(
cm.exception.message_dict,
{"__all__": [f"Constraint “{model_name} a > 0” is violated."]},
)
@skipUnlessDBFeature("supports_expression_indexes")
def test_full_clean_with_unique_constraint_expression(self):
model_name = self.unique_constraint_model._meta.verbose_name.capitalize()
m = self.unique_constraint_model(a=2)
m.full_clean()
m.save()
expected_num_queries = (
0 if connection.features.can_return_columns_from_insert else 1
)
with self.assertNumQueries(expected_num_queries):
self.assertEqual(m.a_squared, 4)
m = self.unique_constraint_model(a=2)
with self.assertRaises(ValidationError) as cm:
m.full_clean()
self.assertEqual(
cm.exception.message_dict,
{"__all__": [f"Constraint “{model_name} a” is violated."]},
)
def test_create(self):
m = self.base_model.objects.create(a=1, b=2)
expected_num_queries = (
0 if connection.features.can_return_columns_from_insert else 1
)
with self.assertNumQueries(expected_num_queries):
self.assertEqual(m.field, 3)
def test_non_nullable_create(self):
with self.assertRaises(IntegrityError):
self.base_model.objects.create()
def test_save(self):
# Insert.
m = self.base_model(a=2, b=4)
m.save()
expected_num_queries = (
0 if connection.features.can_return_columns_from_insert else 1
)
with self.assertNumQueries(expected_num_queries):
self.assertEqual(m.field, 6)
# Update.
m.a = 4
m.save()
expected_num_queries = (
0 if connection.features.can_return_rows_from_update else 1
)
with self.assertNumQueries(expected_num_queries):
self.assertEqual(m.field, 8)
# Update non-dependent field.
self.base_model.objects.filter(pk=m.pk).update(a=6)
m.save(update_fields=["fk"])
with self.assertNumQueries(0):
self.assertEqual(m.field, 8)
# Update dependent field without persisting local changes.
m.save(update_fields=["b"])
with self.assertNumQueries(expected_num_queries):
self.assertEqual(m.field, 10)
# Update dependent field while persisting local changes.
m.a = 8
m.save(update_fields=["a"])
with self.assertNumQueries(expected_num_queries):
self.assertEqual(m.field, 12)
def test_save_model_with_pk(self):
m = self.base_model(pk=1, a=1, b=2)
m.save()
expected_num_queries = (
0 if connection.features.can_return_columns_from_insert else 1
)
with self.assertNumQueries(expected_num_queries):
self.assertEqual(m.field, 3)
def test_save_model_with_foreign_key(self):
fk_object = Foo.objects.create(a="abc", d=Decimal("12.34"))
m = self.base_model(a=1, b=2, fk=fk_object)
m.save()
expected_num_queries = (
0 if connection.features.can_return_columns_from_insert else 1
)
with self.assertNumQueries(expected_num_queries):
self.assertEqual(m.field, 3)
def test_generated_fields_can_be_deferred(self):
fk_object = Foo.objects.create(a="abc", d=Decimal("12.34"))
m = self.base_model.objects.create(a=1, b=2, fk=fk_object)
m = self.base_model.objects.defer("field").get(id=m.id)
self.assertEqual(m.get_deferred_fields(), {"field"})
def test_update(self):
m = self.base_model.objects.create(a=1, b=2)
self.base_model.objects.update(b=3)
m = self.base_model.objects.get(pk=m.pk)
self.assertEqual(m.field, 4)
def test_bulk_create(self):
m = self.base_model(a=3, b=4)
(m,) = self.base_model.objects.bulk_create([m])
if not connection.features.can_return_rows_from_bulk_insert:
m = self.base_model.objects.get()
self.assertEqual(m.field, 7)
def test_bulk_update(self):
m = self.base_model.objects.create(a=1, b=2)
m.a = 3
self.base_model.objects.bulk_update([m], fields=["a"])
m = self.base_model.objects.get(pk=m.pk)
self.assertEqual(m.field, 5)
def test_output_field_lookups(self):
"""Lookups from the output_field are available on GeneratedFields."""
internal_type = IntegerField().get_internal_type()
min_value, max_value = connection.ops.integer_field_range(internal_type)
if min_value is None:
self.skipTest("Backend doesn't define an integer min value.")
if max_value is None:
self.skipTest("Backend doesn't define an integer max value.")
does_not_exist = self.base_model.DoesNotExist
underflow_value = min_value - 1
with self.assertNumQueries(0), self.assertRaises(does_not_exist):
self.base_model.objects.get(field=underflow_value)
with self.assertNumQueries(0), self.assertRaises(does_not_exist):
self.base_model.objects.get(field__lt=underflow_value)
with self.assertNumQueries(0), self.assertRaises(does_not_exist):
self.base_model.objects.get(field__lte=underflow_value)
overflow_value = max_value + 1
with self.assertNumQueries(0), self.assertRaises(does_not_exist):
self.base_model.objects.get(field=overflow_value)
with self.assertNumQueries(0), self.assertRaises(does_not_exist):
self.base_model.objects.get(field__gt=overflow_value)
with self.assertNumQueries(0), self.assertRaises(does_not_exist):
self.base_model.objects.get(field__gte=overflow_value)
def test_output_field_db_collation(self):
collation = connection.features.test_collations["virtual"]
m = self.output_field_db_collation_model.objects.create(name="NAME")
field = m._meta.get_field("lower_name")
db_parameters = field.db_parameters(connection)
self.assertEqual(db_parameters["collation"], collation)
self.assertEqual(db_parameters["type"], field.output_field.db_type(connection))
def test_db_type_parameters(self):
db_type_parameters = self.output_field_db_collation_model._meta.get_field(
"lower_name"
).db_type_parameters(connection)
self.assertEqual(db_type_parameters["max_length"], 11)
def test_model_with_params(self):
m = self.params_model.objects.create()
expected_num_queries = (
0 if connection.features.can_return_columns_from_insert else 1
)
with self.assertNumQueries(expected_num_queries):
self.assertEqual(m.field, "Constant")
def test_nullable(self):
m1 = self.nullable_model.objects.create()
none_val = "" if connection.features.interprets_empty_strings_as_nulls else None
expected_num_queries = (
0 if connection.features.can_return_columns_from_insert else 1
)
with self.assertNumQueries(expected_num_queries):
self.assertEqual(m1.lower_name, none_val)
m2 = self.nullable_model.objects.create(name="NaMe")
with self.assertNumQueries(expected_num_queries):
self.assertEqual(m2.lower_name, "name")
@skipUnlessDBFeature("supports_stored_generated_columns")
| GeneratedFieldTestMixin |
python | coleifer__peewee | tests/fields.py | {
"start": 4464,
"end": 4889
} | class ____(ModelTestCase):
requires = [BoolModel]
def test_boolean_field(self):
BoolModel.create(value=True, name='t')
BoolModel.create(value=False, name='f')
BoolModel.create(value=None, name='n')
vals = sorted((b.name, b.value) for b in BoolModel.select())
self.assertEqual(vals, [
('f', False),
('n', None),
('t', True)])
| TestBooleanField |
python | pytorch__pytorch | test/inductor/test_decompose_mem_bound_mm.py | {
"start": 1383,
"end": 1794
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(
self, z: torch.Tensor, x: torch.Tensor, y: torch.Tensor
) -> torch.Tensor:
return torch.ops.aten.addmm.default(z, x, y)
@requires_gpu
@torch._inductor.config.patch(
post_grad_fusion_options={
"decompose_mm_pass": {},
}
)
@instantiate_parametrized_tests
| TestDecomposeAddMM |
python | tensorflow__tensorflow | tensorflow/python/ops/control_flow_ops.py | {
"start": 35963,
"end": 82844
} | class ____(ControlFlowContext):
"""The context for the loop construct."""
def __init__(self,
maximum_iterations=None,
parallel_iterations=10,
back_prop=True,
swap_memory=False,
name="while_context",
grad_state=None,
context_def=None,
import_scope=None):
""""Creates a `WhileContext`.
Args:
maximum_iterations: Optional upper bound on number of loop iterations.
parallel_iterations: The number of iterations allowed to run in parallel.
back_prop: Whether backprop is enabled for this while loop.
swap_memory: Whether GPU-CPU memory swap is enabled for this loop.
name: Optional name prefix for the returned tensors.
grad_state: The gradient loop state.
context_def: Optional `WhileContextDef` protocol buffer to initialize the
`Whilecontext` python object from.
import_scope: Optional `string`. Name scope to add. Only used when
initialing from protocol buffer.
"""
if context_def:
self._init_from_proto(context_def, import_scope=import_scope)
else:
ControlFlowContext.__init__(self)
self._init_from_args(maximum_iterations, parallel_iterations, back_prop,
swap_memory, name)
# The gradient loop state.
self._grad_state = grad_state
def _init_from_args(self, maximum_iterations, parallel_iterations, back_prop,
swap_memory, name):
"""Creates a new `WhileContext` from arguments.
Args:
maximum_iterations: Optional upper bound on number of loop iterations.
parallel_iterations: The number of iterations allowed to run in parallel.
back_prop: Whether backprop is enabled for this while loop.
swap_memory: Whether GPU-CPU memory swap is enabled for this loop.
name: Optional name prefix for the returned tensors.
Raises:
ValueError: If `parallel_iterations` has invalid value.
"""
if not isinstance(parallel_iterations, int) or (parallel_iterations <= 0):
raise ValueError("'parallel_iterations' must be a positive integer: "
"%s" % parallel_iterations)
self._name = ops.get_default_graph().unique_name(name)
self._maximum_iterations = maximum_iterations
self._parallel_iterations = parallel_iterations
self._back_prop = back_prop
self._swap_memory = swap_memory
# We use this node to control constants created by the pred lambda.
self._pivot_for_pred = None
# We use this node to control constants created by the body lambda.
self._pivot_for_body = None
# The boolean tensor for loop termination condition. Used in code
# generation for gradient computation
self._pivot = None
# The list of exit tensors for loop variables.
self._loop_exits = []
# The list of enter tensors for loop variables.
self._loop_enters = []
self._graph = ops.get_default_graph()
def _init_from_proto(self, context_def, import_scope=None):
"""Creates a new `WhileContext` from protocol buffer.
Args:
context_def: `WhileContextDef` protocol buffer.
import_scope: Optional `string`. Name scope to add.
"""
assert isinstance(context_def, control_flow_pb2.WhileContextDef)
# Create from context_def.
g = ops.get_default_graph()
self._name = ops.prepend_name_scope(context_def.context_name, import_scope)
if context_def.maximum_iterations_name:
self._maximum_iterations = g.as_graph_element(
ops.prepend_name_scope(context_def.maximum_iterations_name,
import_scope))
else:
self._maximum_iterations = None
self._parallel_iterations = context_def.parallel_iterations
self._back_prop = context_def.back_prop
self._swap_memory = context_def.swap_memory
self._pivot_for_pred = g.as_graph_element(
ops.prepend_name_scope(context_def.pivot_for_pred_name, import_scope))
# We use this node to control constants created by the body lambda.
self._pivot_for_body = g.as_graph_element(
ops.prepend_name_scope(context_def.pivot_for_body_name, import_scope))
# The boolean tensor for loop termination condition. Used in code
# generation for gradient computation.
self._pivot = g.as_graph_element(
ops.prepend_name_scope(context_def.pivot_name, import_scope))
# The list of exit tensors for loop variables.
self._loop_exits = [
g.as_graph_element(ops.prepend_name_scope(exit_name, import_scope))
for exit_name in context_def.loop_exit_names
]
# The list of enter tensors for loop variables.
self._loop_enters = [
g.as_graph_element(ops.prepend_name_scope(enter_name, import_scope))
for enter_name in context_def.loop_enter_names
]
super(WhileContext, self).__init__(
values_def=context_def.values_def, import_scope=import_scope)
# import_scope causes self.name to be different from the original serialized
# context's name. Rewrite "frame_name" attrs with the new name.
if import_scope:
for tensor_name in self._values:
op = g.as_graph_element(tensor_name).op
if util.IsLoopEnter(op):
# pylint: disable=protected-access
op._set_attr("frame_name",
attr_value_pb2.AttrValue(s=compat.as_bytes(self.name)))
# pylint: enable=protected-access
self._graph = ops.get_default_graph()
@property
def maximum_iterations(self):
"""The maximum number of iterations that will be executed."""
return self._maximum_iterations
@property
def parallel_iterations(self):
"""The number of iterations allowed to run in parallel."""
return self._parallel_iterations
@property
def back_prop(self):
"""True iff backprop is enabled for this while loop."""
return self._back_prop
@property
def swap_memory(self):
"""True iff GPU-CPU memory swap is enabled for this while loop."""
return self._swap_memory
@property
def pivot(self):
"""The boolean tensor representing the loop termination condition."""
return self._pivot
@property
def loop_enters(self):
"""The list of enter tensors for loop variables."""
return self._loop_enters
@property
def loop_exits(self):
"""The list of exit tensors for loop variables."""
return self._loop_exits
@property
def grad_state(self):
"""The gradient loop state."""
return self._grad_state
def to_proto(self, export_scope=None):
"""Converts a `WhileContext` to a `WhileContextDef` protocol buffer.
Args:
export_scope: Optional `string`. Name scope to remove.
Returns:
A `WhileContextDef` protocol buffer.
"""
if (export_scope is None or self.name.startswith(export_scope)):
context_def = control_flow_pb2.WhileContextDef()
context_def.context_name = ops.strip_name_scope(self.name, export_scope)
context_def.parallel_iterations = self._parallel_iterations
if self._maximum_iterations is not None:
context_def.maximum_iterations_name = ops.strip_name_scope(
self._maximum_iterations.name, export_scope)
context_def.back_prop = self._back_prop
context_def.swap_memory = self._swap_memory
context_def.pivot_for_pred_name = ops.strip_name_scope(
self._pivot_for_pred.name, export_scope)
context_def.pivot_for_body_name = ops.strip_name_scope(
self._pivot_for_body.name, export_scope)
context_def.pivot_name = ops.strip_name_scope(self._pivot.name,
export_scope)
context_def.loop_exit_names.extend([
ops.strip_name_scope(l.name, export_scope) for l in self._loop_exits
])
context_def.loop_enter_names.extend([
ops.strip_name_scope(l.name, export_scope) for l in self._loop_enters
])
context_def.values_def.MergeFrom(
super(WhileContext, self)._to_values_def(export_scope=export_scope))
for nested in self._nested_contexts:
nested_def = context_def.nested_contexts.add()
nested.to_control_flow_context_def(nested_def)
return context_def
else:
return None
def to_control_flow_context_def(self, context_def, export_scope=None):
context_def.while_ctxt.CopyFrom(self.to_proto(export_scope=export_scope))
@staticmethod
def from_proto(context_def, import_scope=None):
"""Returns a `WhileContext` object created from `context_def`.
Args:
context_def: A `WhileContextDef` protocol buffer.
import_scope: Optional `string`. Name scope to add.
Returns:
A `WhileContext` Python object.
"""
ret = WhileContext(context_def=context_def, import_scope=import_scope)
ret.Enter()
for nested_def in context_def.nested_contexts:
from_control_flow_context_def(nested_def, import_scope=import_scope)
ret.Exit()
return ret
def GetWhileContext(self):
return self
def GetControlPivot(self):
if self._pivot_for_body is not None:
return self._pivot_for_body
return self._pivot_for_pred
def AddValue(self, val):
"""Add `val` to the current context and its outer context recursively."""
result = val
new_value = val.name not in self._values
# Don't treat ops in this context as new values. Usually all known values
# are in self._values, except when we're importing a while loop inside this
# WhileContext. Since there's a cycle in this case, `val` may be part of the
# imported while loop but not yet processed by this context and added to
# self._values in _AddOpInternal. We only want to process external input
# tensors to the while loop here.
new_value &= val.op._control_flow_context is not self # pylint: disable=protected-access
if new_value:
self._values.add(val.name)
# If we are in a grad context and val is from its forward context,
# use GetRealValue(), which adds the logic to save the history of
# val in forward.
grad_ctxt = ops.get_default_graph()._get_control_flow_context()
if grad_ctxt:
grad_ctxt = grad_ctxt.GetWhileContext()
if grad_ctxt.grad_state:
forward_ctxt = util.GetWhileContext(val.op)
if util.IsLoopExit(val.op):
forward_ctxt = forward_ctxt.outer_context
if forward_ctxt:
forward_ctxt = forward_ctxt.GetWhileContext()
if forward_ctxt == grad_ctxt.grad_state.forward_context:
real_val = grad_ctxt.grad_state.GetRealValue(val)
self._external_values[val.name] = real_val
return real_val
if self._outer_context is not None:
result = self._outer_context.AddValue(val)
# Create an Enter to make `result` known to this loop context.
with ops.control_dependencies(None):
enter = _Enter(
result,
self._name,
is_constant=True,
parallel_iterations=self._parallel_iterations)
enter.graph.prevent_feeding(enter)
if self._outer_context:
self._outer_context.AddInnerOp(enter.op)
# Fix the control inputs and control flow context of these enter ops.
self._FixControlInputsAndContext([enter])
# Add `enter` in this context.
self._values.add(enter.name)
self._external_values[val.name] = enter
result = enter
else:
actual_val = self._external_values.get(val.name)
if actual_val is not None:
result = actual_val
return result
def AddOp(self, op: ops.Operation):
"""Add `op` to the current context."""
# For a reduction op, if op is in a grad context and its input is from
# its forward context, moving op to the forward context means we would
# store the tensor after the reduction as opposed to the tensor before
# reduction, and therefore could significantly reduce memory consumption.
# For now, we do this only for a few ops.
#
# If in XLA context, do not move constant ops to forward pass as pushing to
# and popping from a stack removes the constant property of an op and breaks
# XLA compilation, which requires certain inputs to be constant for certain
# ops.
if not util.IsInXLAContext(op) and op.type in {"Shape", "Size", "Rank"}:
grad_ctxt = ops.get_default_graph()._get_control_flow_context()
if grad_ctxt:
grad_ctxt = grad_ctxt.GetWhileContext()
if grad_ctxt.grad_state:
op_input_forward_ctxt = util.GetWhileContext(op.inputs[0].op)
if op_input_forward_ctxt == grad_ctxt.grad_state.forward_context:
op_input_ctxt = op.inputs[0].op._get_control_flow_context()
op._set_control_flow_context(op_input_ctxt)
op_input_ctxt._AddOpInternal(op)
return
self._AddOpInternal(op)
# pylint: disable=g-doc-args
def _AddOpInternal(self, op: ops.Operation):
"""Add `op` to the current context.
We move any external control dependencies of the op to the loop pivot, to
ensure they get executed.
"""
# This is needed to prevent frame mismatch errors where there are Const
# nodes inside tf.function in v1 while_loop and inlining is turned on.
if op.type in ["PartitionedCall", "StatefulPartitionedCall"]:
op._add_control_input(self.GetControlPivot().op) # pylint: disable=protected-access
if not op.inputs:
# Remove any external control dependency on this op
control_inputs, external_inputs = self._RemoveExternalControlEdges(op)
# Add a control edge from the control pivot to this op.
if not control_inputs:
# pylint: disable=protected-access
op._add_control_input(self.GetControlPivot().op)
# pylint: enable=protected-access
for x in op.outputs:
self._values.add(x.name)
else:
for index in range(len(op.inputs)):
x = op.inputs[index]
real_x = self.AddValue(x)
if real_x != x:
op._update_input(index, real_x) # pylint: disable=protected-access
# Remove any external control dependency on this op.
_, external_inputs = self._RemoveExternalControlEdges(op)
# Add a control dependency to prevent loop invariants from
# enabling ops that should not be executed.
self._MaybeAddControlDependency(op)
for x in op.outputs:
self._values.add(x.name)
if external_inputs:
# Use an identity to pull control inputs as data inputs. Note that we
# ignore ops which don't have outputs. TODO(apassos): fix that
with ops.control_dependencies(None):
self.Enter()
external_inputs = [
array_ops.identity(x.outputs[0]).op
for x in external_inputs
if x.outputs
]
self.Exit()
op._add_control_inputs(external_inputs) # pylint: disable=protected-access
if self._outer_context or not util.IsLoopExit(op):
op.graph.prevent_fetching(op)
for x in op.outputs:
op.graph.prevent_feeding(x)
if self._outer_context:
self._outer_context.AddInnerOp(op)
def _MaybeAddControlDependency(self, op: ops.Operation):
"""Add a control input to the op if it only depends on loop invariants."""
def _IsOpFree(op):
"""Determines if `op` needs a control dependency."""
if op.control_inputs:
return False
# pylint: disable=protected-access
if op.graph._is_function(op.type) or op.type == "SymbolicGradient":
return True
# pylint: enable=protected-access
for x in op.inputs:
if not util.IsLoopConstantEnter(x.op):
return False
return True
if _IsOpFree(op):
# pylint: disable=protected-access
op._add_control_input(self.GetControlPivot().op)
# pylint: enable=protected-access
def AddForwardLoopCounter(self, outer_grad_state):
"""Adds a loop that counts the number of iterations.
This is added to the forward loop at the time when we start to
create the loop for backprop gradient computation. Called in
the outer context of this forward context.
The pseudocode is:
`n = 0; while (_pivot) { n++; }`
Note that a control dependency is added to `n` to ensure the correct
execution order of stack push ops.
Args:
outer_grad_state: The outer grad state. None if not nested.
Returns:
The number of iterations taken by the forward loop and the loop index.
"""
n = constant_op.constant(0, name="f_count")
if outer_grad_state is not None:
# Force the stack pushes of i-th execution of an inner loop to be ordered
# before the pushes of (i+1)-th execution of the same inner loop.
outer_add_op = outer_grad_state.forward_index.op.inputs[0].op
n.op._add_control_input(outer_add_op) # pylint: disable=protected-access
self.Enter()
self.AddName(n.name)
enter_n = _Enter(
n,
self._name,
is_constant=False,
parallel_iterations=self._parallel_iterations,
name="f_count")
self.loop_enters.append(enter_n)
merge_n = merge([enter_n, enter_n])[0]
switch_n = switch(merge_n, self._pivot)
index = math_ops.add(switch_n[1], 1)
next_n = _NextIteration(index)
merge_n.op._update_input(1, next_n)
total_iterations = exit(switch_n[0], name="f_count")
self.loop_exits.append(total_iterations)
self.ExitResult([total_iterations])
self.Exit()
return total_iterations, next_n
def AddBackpropLoopCounter(self, count, outer_grad_state):
"""Add the backprop loop that controls the iterations.
This is added to the backprop loop. It is used to control the loop
termination of the backprop loop. Called in the outer context of
this grad context.
The pseudocode is:
`n = count; while (n >= 1) { n--; }`
Note that a control dependency is added to `final_zero` to ensure the
correct execution order of stack pop ops.
Args:
count: The number of iterations for backprop.
outer_grad_state: The outer grad state. None if not nested.
Returns:
The loop index.
"""
in_separate_functions = count.graph is not ops.get_default_graph()
if in_separate_functions:
# Brings the count into this graph
count = array_ops.identity(count)
else:
# TODO(apassos) XLA expects this constant to be created outside the loop,
# so doing that for now.
one = constant_op.constant(1, name="b_count")
self.Enter()
self.AddName(count.name)
enter_count = _Enter(
count,
self._name,
is_constant=False,
parallel_iterations=self._parallel_iterations,
name="b_count")
self.loop_enters.append(enter_count)
merge_count = merge([enter_count, enter_count])[0]
self._pivot_for_pred = merge_count
if in_separate_functions:
one = constant_op.constant(1, name="b_count")
pred = math_ops.greater_equal(merge_count, one)
self._pivot = loop_cond(pred, name="b_count")
switch_count = switch(merge_count, self._pivot)
index = math_ops.subtract(switch_count[1], one)
self._pivot_for_body = index
next_count = _NextIteration(index)
merge_count.op._update_input(1, next_count)
final_zero = exit(switch_count[0], name="b_count")
self.loop_exits.append(final_zero)
if outer_grad_state is not None:
# Force the stack pops of i-th execution of an inner loop to be ordered
# before the pops of (i+1)-th execution of the same inner loop.
# pylint: disable=protected-access
outer_grad_state.grad_sync._add_control_input(final_zero.op)
# pylint: enable=protected-access
self.ExitResult([final_zero])
self.Exit()
return next_count
def AddBackpropAccumulator(self, op: ops.Operation, grad):
"""Add an accumulation loop for every loop invariant.
This is added to the backprop loop. It is used to accumulate partial
gradients within each loop iteration. Called when in the gradient while
context.
The pseudocode is:
```
acc = 0.0;
while (_pivot) {
acc += grad;
}
```
Args:
op: The Enter op for a loop invariant.
grad: The partial gradient of an iteration for a loop invariant.
Returns:
The gradient for a loop invariant.
"""
self.Exit()
# Create a zeros tensor with the right shape for acc. If we don't
# know the full shape statically, we will have to get the shape
# dynamically from the forward inference. Getting the shape right
# for the zeros is only needed for the base case when the loop exits
# without running any iterations.
shape = grad.get_shape()
if shape.is_fully_defined():
if self.outer_context:
self.outer_context.Enter()
acc = constant_op.constant(0, grad.dtype, shape=shape, name="b_acc")
if self.outer_context:
self.outer_context.Exit()
else:
value = op.inputs[0]
if (isinstance(self.outer_context, WhileContext) and
self.outer_context.grad_state is not None):
# We are in a nested while loop.
forward_ctxt = self.grad_state.forward_context
forward_ctxt.outer_context.Enter()
zeros_shape = array_ops.shape_internal(value, optimize=False)
forward_ctxt.outer_context.Exit()
outer_grad_state = self.grad_state.outer_grad_state
history_zeros_shape = outer_grad_state.AddForwardAccumulator(
zeros_shape)
self.outer_context.Enter()
real_shape = outer_grad_state.AddBackpropAccumulatedValue(
history_zeros_shape, zeros_shape)
acc = array_ops.zeros(real_shape, grad.dtype)
self.outer_context.Exit()
else:
if self.outer_context:
self.outer_context.Enter()
zeros_shape = array_ops.shape_internal(value, optimize=False)
acc = array_ops.zeros(zeros_shape, grad.dtype)
if self.outer_context:
self.outer_context.Exit()
self.Enter()
self.AddName(acc.name)
enter_acc = _Enter(
acc,
self._name,
is_constant=False,
parallel_iterations=self._parallel_iterations,
name="b_acc")
self.loop_enters.append(enter_acc)
merge_acc = merge([enter_acc, enter_acc], name="b_acc")[0]
switch_acc_false, switch_acc_true = switch(merge_acc, self._pivot)
add_acc = math_ops.add(switch_acc_true, grad)
next_acc = _NextIteration(add_acc)
merge_acc.op._update_input(1, next_acc) # pylint: disable=protected-access
result_acc = exit(switch_acc_false, name="b_acc")
self.loop_exits.append(result_acc)
self.ExitResult([result_acc])
return result_acc
def AddBackpropIndexedSlicesAccumulator(self, op: ops.Operation, grad):
"""This is used for accumulating gradients that are IndexedSlices.
This is essentially the equivalent of AddBackpropAccumulator but optimized
for things like updating embeddings from within a while loop.
Args:
op: The Enter op for a loop invariant.
grad: The partial gradients represented as an IndexedSlices.
Returns:
The accumulated IndexedSlices gradient of the loop invariant.
"""
values = grad.values
indices = grad.indices
dense_shape = grad.dense_shape
self.Exit()
if self.outer_context:
self.outer_context.Enter()
if values.get_shape().is_fully_defined():
values_shape = tensor_shape.TensorShape([tensor_shape.Dimension(1)] +
values.get_shape().dims[1:])
if self.outer_context:
self.outer_context.Enter()
values_acc = constant_op.constant(
0, values.dtype, shape=values_shape, name="b_acc")
if self.outer_context:
self.outer_context.Exit()
else:
values_shape = _resource_safe_shape(op.inputs[0])[1:]
values_shape = array_ops.concat([[1], values_shape], 0)
values_acc = array_ops.zeros(values_shape, dtype=values.dtype)
indices_acc = constant_op.constant([0], indices.dtype)
shape_acc = None
if dense_shape is not None:
if dense_shape.get_shape().is_fully_defined():
if self.outer_context:
self.outer_context.Enter()
shape_acc = constant_op.constant(
0, dense_shape.dtype, shape=dense_shape.get_shape())
if self.outer_context:
self.outer_context.Exit()
else:
shape_acc = array_ops.zeros_like(
array_ops.shape_internal(
op.inputs[0], optimize=False, out_type=dense_shape.dtype),
optimize=False)
if self.outer_context:
self.outer_context.Exit()
self.Enter()
self.AddName(values_acc.name)
self.AddName(indices_acc.name)
init_acc = [indices_acc, values_acc]
if shape_acc is not None:
self.AddName(shape_acc.name)
init_acc.append(shape_acc)
# Set use_input_shape=False since the accumulator tensors will grow in
# size. If use_input_shape=True, the _update_input call below will result in
# incompatible shapes.
enter_acc = [
_Enter(
x,
self._name,
is_constant=False,
parallel_iterations=self._parallel_iterations,
use_input_shape=False,
name="b_acc") for x in init_acc
]
# Manually set appropriate partial shapes.
enter_acc[0].set_shape([None])
if values_acc.shape.dims is not None:
enter_acc[1].set_shape([None] + values_acc.shape.as_list()[1:])
self.loop_enters.extend(enter_acc)
merge_acc = [merge([x, x], name="b_acc")[0] for x in enter_acc]
switch_acc = [switch(x, self._pivot) for x in merge_acc]
# The actual accumulation.
acc_indexed_slices = [
array_ops.concat([xa[1], xv], 0)
for xa, xv in zip(switch_acc[:2], [indices, values])
]
if shape_acc is not None:
# For the shape we just keep the maximum
acc_indexed_slices.append(math_ops.maximum(dense_shape, switch_acc[2][1]))
next_acc = [_NextIteration(x) for x in acc_indexed_slices]
for xm, xn in zip(merge_acc, next_acc):
xm.op._update_input(1, xn) # pylint: disable=protected-access
exit_acc = [exit(x[0], name="b_acc") for x in switch_acc]
self.loop_exits.extend(exit_acc)
self.ExitResult(exit_acc)
return indexed_slices.IndexedSlices(
indices=exit_acc[0],
values=exit_acc[1],
dense_shape=exit_acc[2] if shape_acc is not None else None)
def _InitializeValues(self, values):
"""Makes the values known to this context."""
self._values = set()
for x in values:
if isinstance(x, tensor_lib.Tensor):
self._values.add(x.name)
else:
raise TypeError("'values' must be a list of Tensors. "
f"Received: {type(x)}.")
def _BuildLoop(self, pred, body, flat_orig_loop_vars, flat_loop_vars,
loop_vars_signature):
"""Core: Add the loop termination condition and body to the graph."""
flat_shape_invariants = nest.map_structure(
lambda spec: spec.shape,
nest.flatten(loop_vars_signature, expand_composites=True))
# Let the context know the loop variables so the loop variables
# would be added in the outer contexts properly.
self._InitializeValues(flat_loop_vars)
if self._outer_context:
real_vars = [self._outer_context.AddValue(x) for x in flat_loop_vars]
else:
real_vars = flat_loop_vars
enter_vars = []
with ops.control_dependencies(None):
for real_var, shape_invariant in zip(real_vars, flat_shape_invariants):
enter_var = _Enter(
real_var,
self._name,
is_constant=False,
parallel_iterations=self._parallel_iterations,
use_input_shape=False)
if _ShapeLessThanOrEqual(real_var.get_shape(), shape_invariant):
enter_var.set_shape(shape_invariant)
else:
raise ValueError(
f"The shape invariant specified for {real_var.name} is not "
"compatible with the initial shape of the loop variable. It "
f"enters the loop with shape {real_var.get_shape()}, but the "
f"specified shape invariant is {shape_invariant}.")
enter_var.graph.prevent_feeding(enter_var)
if self._outer_context:
self._outer_context.AddInnerOp(enter_var.op)
enter_vars.append(enter_var)
# Finds the closest enclosing non-None control pivot.
outer_context = self._outer_context
control_pivot = None
while outer_context is not None and control_pivot is None:
control_pivot = outer_context.GetControlPivot()
# pylint: disable=protected-access
outer_context = outer_context._outer_context
# pylint: enable=protected-access
if control_pivot is not None:
for var in enter_vars:
if util.IsLoopConstantEnter(var.op.inputs[0].op):
# pylint: disable=protected-access
var.op._add_control_input(control_pivot.op)
# pylint: enable=protected-access
# Fix the control inputs and control flow context of these enter ops.
self._FixControlInputsAndContext(enter_vars)
self._InitializeValues(enter_vars)
self._loop_enters = enter_vars
merge_vars = [merge([x, x])[0] for x in enter_vars]
self._pivot_for_pred = merge_vars[0]
merge_vars_with_tensorarrays = nest.map_structure(
_convert_flow_to_tensorarray, flat_orig_loop_vars, merge_vars)
# Build the graph for pred.
packed_vars = nest.pack_sequence_as(
structure=loop_vars_signature,
flat_sequence=merge_vars_with_tensorarrays,
expand_composites=True)
c = ops.convert_to_tensor(pred(*packed_vars))
self._pivot = loop_cond(c, name="LoopCond")
switch_vars = [_SwitchRefOrTensor(x, self._pivot) for x in merge_vars]
# Build the graph for body.
vars_for_body = [_Identity(x[1]) for x in switch_vars]
self._pivot_for_body = vars_for_body[0]
# Convert TensorArray flow variables inside the context back into
# their associated TensorArrays for calling the body.
vars_for_body_with_tensorarrays = nest.map_structure(
_convert_flow_to_tensorarray, flat_orig_loop_vars, vars_for_body)
packed_vars_for_body = nest.pack_sequence_as(
structure=loop_vars_signature,
flat_sequence=vars_for_body_with_tensorarrays,
expand_composites=True)
pre_summaries = ops.get_collection(ops.GraphKeys._SUMMARY_COLLECTION) # pylint: disable=protected-access
body_result = body(*packed_vars_for_body)
post_summaries = ops.get_collection(ops.GraphKeys._SUMMARY_COLLECTION) # pylint: disable=protected-access
if not nest.is_nested(body_result):
body_result = [body_result]
if len(post_summaries) > len(pre_summaries):
new_summaries = post_summaries[len(pre_summaries):]
summary_ref = ops.get_collection_ref(ops.GraphKeys._SUMMARY_COLLECTION) # pylint: disable=protected-access
summary_ref[:] = pre_summaries
with ops.control_dependencies(new_summaries):
def map_fn(x):
# TODO(apassos) figure out how to trigger with tensor arrays as well
if isinstance(x, tensor_array_ops.TensorArray):
return x
return array_ops.identity(x)
body_result = nest.map_structure(
map_fn, body_result, expand_composites=True)
body_result = variable_utils.convert_variables_to_tensors(body_result)
# Compare the structure types of input and output of body.
# For backwards compatibility, the first layer is forced to a list
# during this comparison, because inputs are typically lists and
# outputs of the body are typically tuples.
nest.assert_same_structure(
list(packed_vars_for_body), list(body_result), expand_composites=True)
# Store body_result to keep track of TensorArrays returned by body
original_body_result = body_result
# Convert TensorArrays returned by body into their flow variables
result = nest.map_structure(
_convert_tensorarray_to_flow,
nest.flatten(body_result, expand_composites=True),
expand_composites=True)
result = ops.convert_n_to_tensor_or_composite(result)
# Add NextIteration and the back edges to complete the loop.
if len(merge_vars) != len(result):
raise ValueError("Number of inputs and outputs of 'body' must match "
f"'loop_vars'. Got {len(merge_vars)} for the number of "
f"inputs/outputs, and {len(result)} for 'loop_vars'.")
next_vars = []
for m, v in zip(merge_vars, result):
next_vars.append(_AddNextAndBackEdge(m, v))
# Add the exit ops.
exit_vars = [exit(x[0]) for x in switch_vars]
self._loop_exits = exit_vars
# Exit the loop.
self.ExitResult(exit_vars)
return original_body_result, exit_vars
def BuildLoop(self, pred, body, loop_vars, shape_invariants,
return_same_structure):
"""Add the loop termination condition and body to the graph."""
# Keep flat_orig_loop_vars to identify which are TensorArrays
flat_orig_loop_vars = nest.flatten(loop_vars, expand_composites=True)
loop_vars = nest.map_structure(
_convert_to_tensor_or_composite_or_tensorarray, loop_vars)
# Convert TensorArrays to their flow variables
flat_loop_vars = nest.map_structure(
_convert_tensorarray_to_flow,
nest.flatten(loop_vars, expand_composites=True))
if shape_invariants is not None:
loop_vars_signature = nest.map_structure(
_shape_invariant_to_type_spec, loop_vars, shape_invariants)
else:
loop_vars_signature = nest.map_structure(
_shape_invariant_to_type_spec, loop_vars)
try:
self.Enter()
# _BuildLoop calls _update_input in several places. _mutation_lock()
# ensures a Session.run call cannot occur between creating and mutating
# new ops.
with ops.get_default_graph()._mutation_lock(): # pylint: disable=protected-access
original_body_result, exit_vars = self._BuildLoop(
pred, body, flat_orig_loop_vars, flat_loop_vars,
loop_vars_signature)
finally:
self.Exit()
flat_result = nest.flatten(original_body_result, expand_composites=True)
# Convert TensorArray flow variables outside the context back into
# their associated TensorArrays for returning to caller.
exit_vars_with_tensorarrays = nest.map_structure(
_convert_flow_to_tensorarray, flat_result, exit_vars)
packed_exit_vars = nest.pack_sequence_as(
structure=original_body_result,
flat_sequence=exit_vars_with_tensorarrays,
expand_composites=True)
if return_same_structure:
return packed_exit_vars
else:
return packed_exit_vars[0] if len(exit_vars) == 1 else packed_exit_vars
def _FixControlInputsAndContext(self, enters):
graph = ops.get_default_graph()
# pylint: disable=protected-access
for e in enters:
if isinstance(e, tensor_lib.Tensor):
xs = [e]
else:
raise TypeError("'enters' must be a list of Tensors. "
f"Received: {type(e)}.")
for x in xs:
inp_op = x.op.inputs[0].op
control_inputs = graph._control_dependencies_for_inputs([inp_op])
outer_control_inputs = []
for op in control_inputs:
# We need to keep control inputs that are in any ancestor
# ControlFlowContext, and within outer WhileContext.
keep_as_control_input = True
op_ctxt = util.GetOutputContext(op)
outer_ctxt = self.outer_context
outer_while_context = (None if outer_ctxt is None else
outer_ctxt.GetWhileContext())
while outer_ctxt != op_ctxt:
if outer_ctxt is None or outer_ctxt == outer_while_context:
keep_as_control_input = False
break
outer_ctxt = outer_ctxt.outer_context
if keep_as_control_input:
outer_control_inputs.append(op)
x.op._set_control_flow_context(self)
x.op._add_control_inputs(outer_control_inputs)
graph._record_op_seen_by_control_dependencies(x.op)
# pylint: enable=protected-access
def IsWhileContext(self):
return True
# pylint: enable=redefined-outer-name
def _AsTensorList(x, p):
"""Return x as a list of Tensors or IndexedSlices.
For entries of `x` that are Operations, this returns an Identity of `p`
with a dependency on the operation.
Args:
x: A Tensor/IndexedSlices/Operation or a list or tuple of them.
p: A Tensor to return for entries in `x` that are Operations.
Returns:
A list of Tensors or IndexedSlices.
"""
if not isinstance(x, (list, _basetuple)):
x = [x]
l = []
for v in x:
if isinstance(v, ops.Operation):
v = with_dependencies([v], p)
v = ops.convert_to_tensor_or_composite(v)
if isinstance(v, tensor_lib.Tensor):
l.append(array_ops.identity(v))
else:
l.append(
indexed_slices.IndexedSlices(
array_ops.identity(v.values), array_ops.identity(v.indices)))
return l
def _CheckResults(a, b):
assert len(a) == len(b), (
"Values returned by a() and b() must have the same length.")
for x, y in zip(a, b):
assert x.dtype == y.dtype, (
"Values returned by a() [%s] and b() [%s] must have "
"the same type: %s, %s." % (x.name, y.name, x.dtype.name, y.dtype.name))
def with_dependencies(dependencies, output_tensor, name=None):
"""Produces the content of `output_tensor` only after `dependencies`.
In some cases, a user may want the output of an operation to be
consumed externally only after some other dependencies have run
first. This function ensures returns `output_tensor`, but only after all
operations in `dependencies` have run. Note that this means that there is
no guarantee that `output_tensor` will be evaluated after any `dependencies`
have run.
See also `tf.tuple` and `tf.group`.
Args:
dependencies: Iterable of operations to run before this op finishes.
output_tensor: A `Tensor` or `IndexedSlices` that will be returned.
name: (Optional) A name for this operation.
Returns:
Same as `output_tensor`.
Raises:
TypeError: if `output_tensor` is not a `Tensor` or `IndexedSlices`.
"""
if context.executing_eagerly():
return output_tensor
with ops.name_scope(name, "control_dependency",
list(dependencies) + [output_tensor]) as name:
with ops.colocate_with(output_tensor):
with ops.control_dependencies(dependencies):
output_tensor = ops.convert_to_tensor_or_composite(output_tensor)
if isinstance(output_tensor, indexed_slices.IndexedSlices):
return indexed_slices.IndexedSlices(
_Identity(output_tensor.values, name=name), output_tensor.indices,
output_tensor.dense_shape)
else:
return _Identity(output_tensor, name=name)
def _GroupControlDeps(dev, deps, name=None):
with ops.control_dependencies(deps):
if dev is None:
return no_op(name=name)
else:
with ops.device(dev):
return no_op(name=name)
# TODO(touts): Accept "inputs" as a list.
@tf_export("group")
def group(*inputs, **kwargs):
"""Create an op that groups multiple operations.
When this op finishes, all ops in `inputs` have finished. This op has no
output.
Note: *In TensorFlow 2 with eager and/or Autograph, you should not require
this method, as ops execute in the expected order thanks to automatic control
dependencies.* Only use `tf.group` when working with v1
`tf.Graph` code.
When operating in a v1-style graph context, ops are not executed in the same
order as specified in the code; TensorFlow will attempt to execute ops in
parallel or in an order convenient to the result it is computing. `tf.group`
allows you to request that one or more results finish before execution
continues.
`tf.group` creates a single op (of type `NoOp`), and then adds appropriate
control dependencies. Thus, `c = tf.group(a, b)` will compute the same graph
as this:
with tf.control_dependencies([a, b]):
c = tf.no_op()
See also `tf.tuple` and
`tf.control_dependencies`.
Args:
*inputs: Zero or more tensors to group.
name: A name for this operation (optional).
Returns:
An Operation that executes all its inputs.
Raises:
ValueError: If an unknown keyword argument is provided.
"""
if context.executing_eagerly():
return None
name = kwargs.pop("name", None)
if kwargs:
raise ValueError("Unknown keyword arguments: " + ", ".join(kwargs.keys()))
with ops.name_scope(name, "group_deps", inputs) as name:
# Grouping no inputs means do nothing
if not inputs:
return no_op(name=name)
# Sorts *inputs according to their devices.
ops_on_device = {} # device -> operations specified on the device.
for inp in nest.flatten(inputs, expand_composites=True):
if not hasattr(inp, "device"):
raise TypeError("'inputs' should be zero or more (nested) Tensors. "
f"Received '{inp}' with type '{type(inp)}'.")
dev = inp.device
if dev in ops_on_device:
ops_on_device[dev].append(inp)
else:
ops_on_device[dev] = [inp]
if len(ops_on_device) == 1:
# 1-level tree. The root node is the returned NoOp node.
(dev, deps), = ops_on_device.items()
return _GroupControlDeps(dev, deps, name=name)
# 2-level tree. The root node is the returned NoOp node.
# deps contains 1 NoOp node for each device.
deps = []
def device_key(dev):
"""A sort key that allows None to be compared to strings."""
return "" if dev is None else dev
for dev in sorted(ops_on_device, key=device_key):
deps.append(_GroupControlDeps(dev, ops_on_device[dev]))
with ops.control_dependencies(deps):
return no_op(name=name)
@tf_export("tuple", v1=[])
@dispatch.add_dispatch_support
def tuple_v2(tensors, control_inputs=None, name=None):
"""Groups tensors together.
The returned tensors have the same value as the input tensors, but they
are computed only after all the input tensors have been computed.
Note: *In TensorFlow 2 with eager and/or Autograph, you should not require
this method, as ops execute in the expected order thanks to automatic control
dependencies.* Only use `tf.tuple` when working with v1 `tf.Graph` code.
See also `tf.group` and `tf.control_dependencies`.
Example:
>>> with tf.Graph().as_default():
... with tf.compat.v1.Session() as sess:
... v = tf.Variable(0.0)
... a = tf.constant(1.0)
... sess.run(tf.compat.v1.global_variables_initializer())
... for i in range(5):
... update_op = v.assign_add(1.0)
... b = a + v
... res_b = sess.run(b)
... res_v = sess.run(v)
... print(res_v)
0.0
0.0
0.0
0.0
0.0
>>> with tf.Graph().as_default():
... with tf.compat.v1.Session() as sess:
... v = tf.Variable(0.0)
... a = tf.constant(1.0)
... sess.run(tf.compat.v1.global_variables_initializer())
... for i in range(5):
... update_op = v.assign_add(1.0)
... calc = [a + v]
... # `tf.tuple` ensures `update_op` is run before `b`
... b = tf.tuple(calc, [tf.group(update_op)])
... res_b = sess.run(b)
... res_v = sess.run(v)
... print(res_v)
1.0
2.0
3.0
4.0
5.0
Args:
tensors: A list of `Tensor`s or `IndexedSlices`, some entries can be `None`.
control_inputs: List of additional ops to finish before returning.
name: (optional) A name to use as a `name_scope` for the operation.
Returns:
Same as `tensors`.
Raises:
ValueError: If `tensors` does not contain any `Tensor` or `IndexedSlices`.
TypeError: If `control_inputs` is not a list of `Operation` or `Tensor`
objects.
"""
return tuple(tensors=tensors, name=name, control_inputs=control_inputs) # pylint: disable=redefined-builtin
@tf_export(v1=["tuple"])
@dispatch.add_dispatch_support
def tuple(tensors, name=None, control_inputs=None): # pylint: disable=redefined-builtin
"""Group tensors together.
This creates a tuple of tensors with the same values as the `tensors`
argument, except that the value of each tensor is only returned after the
values of all tensors have been computed.
`control_inputs` contains additional ops that have to finish before this op
finishes, but whose outputs are not returned.
This can be used as a "join" mechanism for parallel computations: all the
argument tensors can be computed in parallel, but the values of any tensor
returned by `tuple` are only available after all the parallel computations
are done.
See also `tf.group` and
`tf.control_dependencies`.
Args:
tensors: A list of `Tensor`s or `IndexedSlices`, some entries can be `None`.
name: (optional) A name to use as a `name_scope` for the operation.
control_inputs: List of additional ops to finish before returning.
Returns:
Same as `tensors`.
Raises:
ValueError: If `tensors` does not contain any `Tensor` or `IndexedSlices`.
TypeError: If `control_inputs` is not a list of `Operation` or `Tensor`
objects.
"""
if context.executing_eagerly():
return tensors
with ops.name_scope(name, "tuple", tensors) as name:
tensors = [
t if (isinstance(t, ops.Operation) or tensor_util.is_tf_type(t) or
t is None) else ops.convert_to_tensor(t) for t in tensors
]
gating_ops = [
t if isinstance(t, ops.Operation) else t.op
for t in tensors
if t is not None
]
if control_inputs:
for c in control_inputs:
if isinstance(c, tensor_lib.Tensor):
c = c.op
elif not isinstance(c, ops.Operation):
raise TypeError(
"'control_inputs' must only contain Operation or Tensor. "
f"Received: {type(c)}")
gating_ops.append(c)
# Note that in order to ensure ordering in the pbtxt, we must take care to
# ensure the order here.
gating_ops = sorted(set(gating_ops), key=lambda op: op._id) # Uniquify ops.
if not gating_ops:
raise ValueError("'tensors' must have at least one Tensor. "
f"Received: {tensors}.")
gate = group(*gating_ops)
tpl = []
for t in tensors:
if tensor_util.is_tf_type(t):
tpl.append(with_dependencies([gate], t))
elif isinstance(t, ops.Operation):
with ops.control_dependencies([gate]):
tpl.append(group(t))
else:
tpl.append(None)
return tpl
| WhileContext |
python | Pylons__pyramid | tests/test_paster.py | {
"start": 1435,
"end": 2801
} | class ____(unittest.TestCase):
def _callFUT(self, config_file, section_name, options=None, _loader=None):
import pyramid.paster
old_loader = pyramid.paster.get_config_loader
try:
if _loader is not None:
pyramid.paster.get_config_loader = _loader
return pyramid.paster.get_appsettings(
config_file, section_name, options=options
)
finally:
pyramid.paster.get_config_loader = old_loader
def test_it(self):
values = {'a': 1}
loader = DummyLoader(app_settings=values)
result = self._callFUT(
'/foo/bar/myapp.ini', 'myapp', options={'a': 'b'}, _loader=loader
)
self.assertEqual(loader.uri.path, '/foo/bar/myapp.ini')
self.assertEqual(len(loader.calls), 1)
self.assertEqual(loader.calls[0]['op'], 'app_settings')
self.assertEqual(loader.calls[0]['name'], 'myapp')
self.assertEqual(loader.calls[0]['defaults'], {'a': 'b'})
self.assertEqual(result, values)
def test_it_with_dummyapp_requiring_options(self):
options = {'bar': 'baz'}
result = self._callFUT(
os.path.join(here, 'fixtures', 'dummy.ini'),
'myapp',
options=options,
)
self.assertEqual(result['foo'], 'baz')
| Test_get_appsettings |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-remote/llama_index/readers/remote/base.py | {
"start": 432,
"end": 3274
} | class ____(BaseReader):
"""General reader for any remote page or file."""
def __init__(
self,
*args: Any,
file_extractor: Optional[Dict[str, Union[str, BaseReader]]] = None,
**kwargs: Any,
) -> None:
"""Init params."""
super().__init__(*args, **kwargs)
self.file_extractor = file_extractor
@staticmethod
def _is_youtube_video(url: str) -> bool:
# TODO create more global method for detecting all types
"""
Returns True if the given URL is a video on YouTube, False otherwise.
"""
# Regular expression pattern to match YouTube video URLs
youtube_pattern = r"(?:https?:\/\/)?(?:www\.)?(?:youtube\.com|youtu\.be)\/(?:watch\?v=)?([^\s&]+)"
# Match the pattern against the URL
match = re.match(youtube_pattern, url)
# If there's a match, it's a YouTube video URL
return match is not None
def load_data(self, url: str) -> List[Document]:
"""Parse whatever is at the URL."""
import tempfile
from urllib.parse import urlparse
from urllib.request import Request, urlopen
# check the URL
parsed_url = urlparse(url)
# Check if the scheme is http or https
if parsed_url.scheme not in (
"http",
"https",
"ftp",
"ws",
"wss",
"sftp",
"ftps",
"s3",
):
raise ValueError(
"Invalid URL scheme. Only http, https, ftp, ftps, sftp, ws, wss, and s3 are allowed."
)
extra_info = {"Source": url}
req = Request(url, headers={"User-Agent": "Magic Browser"})
result = urlopen(req)
url_type = result.info().get_content_type()
documents = []
if url_type == "text/html" or url_type == "text/plain":
text = "\n\n".join([str(el.decode("utf-8-sig")) for el in result])
documents = [Document(text=text, extra_info=extra_info)]
elif self._is_youtube_video(url):
youtube_reader = YoutubeTranscriptReader()
# TODO should we have another language, like english / french?
documents = youtube_reader.load_data([url])
else:
suffix = Path(urlparse(url).path).suffix
with tempfile.TemporaryDirectory() as temp_dir:
filepath = f"{temp_dir}/temp{suffix}"
with open(filepath, "wb") as output:
output.write(result.read())
loader = SimpleDirectoryReader(
temp_dir,
file_metadata=(lambda _: extra_info),
file_extractor=self.file_extractor,
)
documents = loader.load_data()
return documents
| RemoteReader |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/connectors/migrate_to_manifest_only/declarative_component_schema.py | {
"start": 35534,
"end": 35618
} | class ____(BaseModel):
pass
class Config:
extra = Extra.allow
| Schemas |
python | google__jax | jax/_src/pallas/triton/lowering.py | {
"start": 2791,
"end": 3030
} | class ____:
context: ModuleContext
avals_in: Sequence[jax_core.ShapedArray]
avals_out: Sequence[jax_core.ShapedArray]
block_infos: Sequence[BlockInfo | None]
replace = dataclasses.replace
@dataclasses.dataclass
| LoweringRuleContext |
python | huggingface__transformers | src/transformers/models/nemotron/modeling_nemotron.py | {
"start": 20349,
"end": 24415
} | class ____(NemotronAttention):
"""
Nemotron attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
`NemotronAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
SDPA API.
"""
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
output_attentions: bool = False,
use_cache: bool = False,
cache_position: Optional[torch.LongTensor] = None,
**kwargs,
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
if output_attentions:
logger.warning_once(
f"{self.__class__.__name__} does not support `output_attentions=True`. The returned attention weights will "
"be `None`. If you want to get attention weights, please set `attn_implementation='eager'` when loading the model."
)
bsz, q_len, _ = hidden_states.size()
query_states = self.q_proj(hidden_states)
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
key_states = repeat_kv(key_states, self.num_key_value_groups)
value_states = repeat_kv(value_states, self.num_key_value_groups)
causal_mask = attention_mask
if attention_mask is not None:
causal_mask = causal_mask[:, :, :, : key_states.shape[-2]]
# SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
# Reference: https://github.com/pytorch/pytorch/issues/112577.
if query_states.device.type == "cuda" and causal_mask is not None:
query_states = query_states.contiguous()
key_states = key_states.contiguous()
value_states = value_states.contiguous()
# We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment
# in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling.
is_causal = causal_mask is None and q_len > 1
attn_output = torch.nn.functional.scaled_dot_product_attention(
query_states,
key_states,
value_states,
attn_mask=causal_mask,
dropout_p=self.attention_dropout if self.training else 0.0,
is_causal=is_causal,
)
attn_output = attn_output.transpose(1, 2).contiguous()
attn_output = attn_output.view(bsz, q_len, -1)
attn_output = self.o_proj(attn_output)
return attn_output, None
NEMOTRON_ATTENTION_CLASSES = {
"eager": NemotronAttention,
"flash_attention_2": NemotronFlashAttention2,
"sdpa": NemotronSdpaAttention,
}
# copied from transformers.models.llama.modeling_llama.LlamaDecoderLayer with LLAMA->NEMOTRON,Llama->Nemotron,llama->nemotron
# no longer copied after attention refactors
| NemotronSdpaAttention |
python | walkccc__LeetCode | solutions/1999. Smallest Greater Multiple Made of Two Digits/1999-2.py | {
"start": 0,
"end": 474
} | class ____:
def findInteger(self, k: int, digit1: int, digit2: int) -> int:
def dfs(x: int) -> int:
if x > 2**31 - 1:
return -1
if x > k and x % k == 0:
return x
# Skip if digit1/digit2 and x are zero.
a = -1 if x + digit1 == 0 else dfs(x * 10 + digit1)
b = -1 if x + digit2 == 0 else dfs(x * 10 + digit2)
if a == -1:
return b
if b == -1:
return a
return min(a, b)
return dfs(0)
| Solution |
python | conda__conda | tests/plugins/test_auth_handlers.py | {
"start": 606,
"end": 771
} | class ____:
@plugins.hookimpl
def conda_auth_handlers(self):
yield plugins.CondaAuthHandler(handler=CustomCondaAuth, name=PLUGIN_NAME)
| CustomAuthPlugin |
python | sympy__sympy | sympy/printing/rust.py | {
"start": 2377,
"end": 2573
} | class ____(floor):
"""
Same as `sympy.floor`, but mimics the Rust behavior of returning a float rather than an integer
"""
def _eval_is_integer(self):
return False
| float_floor |
python | ray-project__ray | python/ray/experimental/gpu_object_manager/gpu_object_store.py | {
"start": 5776,
"end": 6038
} | class ____:
# A list of tensors representing the GPU object.
data: List["torch.Tensor"]
# Whether the GPU object is the primary copy.
is_primary: bool
# If a recv failed, we store the error here.
error: Optional[Exception] = None
| _GPUObject |
python | ray-project__ray | doc/source/serve/doc_code/local_dev.py | {
"start": 195,
"end": 287
} | class ____:
def double(self, s: str):
return s + " " + s
@serve.deployment
| Doubler |
python | PyCQA__pylint | tests/functional/n/not_async_context_manager.py | {
"start": 246,
"end": 351
} | class ____:
def __enter__(self):
pass
def __exit__(self, *args):
pass
| ContextManager |
python | pytorch__pytorch | functorch/docs/source/tutorials/_src/plot_ensembling.py | {
"start": 923,
"end": 4788
} | class ____(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
output = x
return output
# Let's generate some dummy data. Pretend that we're working with an MNIST dataset
# where the images are 28 by 28.
# Furthermore, let's say we wish to combine the predictions from 10 different
# models.
device = "cuda"
num_models = 10
data = torch.randn(100, 64, 1, 28, 28, device=device)
targets = torch.randint(10, (6400,), device=device)
models = [SimpleCNN().to(device) for _ in range(num_models)]
# We have a couple of options for generating predictions. Maybe we want
# to give each model a different randomized minibatch of data, or maybe we
# want to run the same minibatch of data through each model (e.g. if we were
# testing the effect of different model initializations).
# Option 1: different minibatch for each model
minibatches = data[:num_models]
predictions1 = [model(minibatch) for model, minibatch in zip(models, minibatches)]
# Option 2: Same minibatch
minibatch = data[0]
predictions2 = [model(minibatch) for model in models]
######################################################################
# Using vmap to vectorize the ensemble
# --------------------------------------------------------------------
# Let's use ``vmap`` to speed up the for-loop. We must first prepare the models
# for use with ``vmap``.
#
# First, let's combine the states of the model together by stacking each parameter.
# For example, model[i].fc1.weight has shape [9216, 128]; we are going to stack the
# .fc1.weight of each of the 10 models to produce a big weight of shape [10, 9216, 128].
#
# functorch offers the following convenience function to do that. It returns a
# stateless version of the model (fmodel) and stacked parameters and buffers.
from functorch import combine_state_for_ensemble
fmodel, params, buffers = combine_state_for_ensemble(models)
[p.requires_grad_() for p in params]
# Option 1: get predictions using a different minibatch for each model.
# By default, vmap maps a function across the first dimension of all inputs to the
# passed-in function. After `combine_state_for_ensemble`, each of of ``params``,
# ``buffers`` have an additional dimension of size ``num_models`` at the front;
# and ``minibatches`` has a dimension of size ``num_models``.
print([p.size(0) for p in params])
assert minibatches.shape == (num_models, 64, 1, 28, 28)
from functorch import vmap
predictions1_vmap = vmap(fmodel)(params, buffers, minibatches)
assert torch.allclose(
predictions1_vmap, torch.stack(predictions1), atol=1e-6, rtol=1e-6
)
# Option 2: get predictions using the same minibatch of data
# vmap has an in_dims arg that specify which dimensions to map over.
# Using ``None``, we tell vmap we want the same minibatch to apply for all of
# the 10 models.
predictions2_vmap = vmap(fmodel, in_dims=(0, 0, None))(params, buffers, minibatch)
assert torch.allclose(
predictions2_vmap, torch.stack(predictions2), atol=1e-6, rtol=1e-6
)
# A quick note: there are limitations around what types of functions can be
# transformed by vmap. The best functions to transform are ones that are
# pure functions: a function where the outputs are only determined by the inputs
# that have no side effects (e.g. mutation). vmap is unable to handle mutation of
# arbitrary Python data structures, but it is able to handle many in-place
# PyTorch operations.
| SimpleCNN |
python | pytorch__pytorch | test/distributed/_shard/sharded_tensor/test_sharded_tensor.py | {
"start": 122218,
"end": 123175
} | class ____(ShardedTensorTestBase):
@with_comms
@requires_nccl()
def test_shard_metadata_init(self):
pg = dist.distributed_c10d._get_default_group()
md = ShardMetadata([10], [0])
self.assertIsNone(md.placement)
with self.assertRaisesRegex(ValueError, "remote device is None"):
_parse_and_validate_remote_device(pg, md.placement)
# String placement gets converted by ctor
md = ShardMetadata([10], [0], "rank:0/cpu")
self.assertEqual(md.placement, _remote_device("rank:0/cpu"))
rank, device = _parse_and_validate_remote_device(pg, md.placement)
self.assertEqual(0, rank)
self.assertEqual(device, torch.device("cpu"))
@with_comms
@requires_nccl()
def test_create_shard_with_no_placement(self):
md = ShardMetadata([0], [10])
shard = Shard(torch.zeros(10), md)
self.assertIsNone(shard.metadata.placement)
| TestShardMetadata |
python | walkccc__LeetCode | solutions/1018. Binary Prefix Divisible By 5/1018.py | {
"start": 0,
"end": 205
} | class ____:
def prefixesDivBy5(self, nums: list[int]) -> list[bool]:
ans = []
curr = 0
for num in nums:
curr = (curr * 2 + num) % 5
ans.append(curr % 5 == 0)
return ans
| Solution |
python | tensorflow__tensorflow | tensorflow/python/trackable/trackable_utils.py | {
"start": 1012,
"end": 6802
} | class ____(Exception):
def __init__(self, leftover_dependency_map):
"""Creates a CyclicDependencyException."""
# Leftover edges that were not able to be topologically sorted.
self.leftover_dependency_map = leftover_dependency_map
super(CyclicDependencyError, self).__init__()
def order_by_dependency(dependency_map):
"""Topologically sorts the keys of a map so that dependencies appear first.
Uses Kahn's algorithm:
https://en.wikipedia.org/wiki/Topological_sorting#Kahn's_algorithm
Args:
dependency_map: a dict mapping values to a list of dependencies (other keys
in the map). All keys and dependencies must be hashable types.
Returns:
A sorted array of keys from dependency_map.
Raises:
CyclicDependencyError: if there is a cycle in the graph.
ValueError: If there are values in the dependency map that are not keys in
the map.
"""
# Maps trackables -> trackables that depend on them. These are the edges used
# in Kahn's algorithm.
reverse_dependency_map = collections.defaultdict(set)
for x, deps in dependency_map.items():
for dep in deps:
reverse_dependency_map[dep].add(x)
# Validate that all values in the dependency map are also keys.
unknown_keys = reverse_dependency_map.keys() - dependency_map.keys()
if unknown_keys:
raise ValueError("Found values in the dependency map which are not keys: "
f"{unknown_keys}")
# Generate the list sorted by objects without dependencies -> dependencies.
# The returned list will reverse this.
reversed_dependency_arr = []
# Prefill `to_visit` with all nodes that do not have other objects depending
# on them.
to_visit = [x for x in dependency_map if x not in reverse_dependency_map]
while to_visit:
x = to_visit.pop(0)
reversed_dependency_arr.append(x)
for dep in set(dependency_map[x]):
edges = reverse_dependency_map[dep]
edges.remove(x)
if not edges:
to_visit.append(dep)
reverse_dependency_map.pop(dep)
if reverse_dependency_map:
leftover_dependency_map = collections.defaultdict(list)
for dep, xs in reverse_dependency_map.items():
for x in xs:
leftover_dependency_map[x].append(dep)
raise CyclicDependencyError(leftover_dependency_map)
return reversed(reversed_dependency_arr)
_ESCAPE_CHAR = "." # For avoiding conflicts with user-specified names.
# Keyword for identifying that the next bit of a checkpoint variable name is a
# slot name. Checkpoint names for slot variables look like:
#
# <path to variable>/<_OPTIMIZER_SLOTS_NAME>/<path to optimizer>/<slot name>
#
# Where <path to variable> is a full path from the checkpoint root to the
# variable being slotted for.
_OPTIMIZER_SLOTS_NAME = _ESCAPE_CHAR + "OPTIMIZER_SLOT"
# Keyword for separating the path to an object from the name of an
# attribute in checkpoint names. Used like:
# <path to variable>/<_OBJECT_ATTRIBUTES_NAME>/<name of attribute>
OBJECT_ATTRIBUTES_NAME = _ESCAPE_CHAR + "ATTRIBUTES"
# A constant string that is used to reference the save and restore functions of
# Trackable objects that define `_serialize_to_tensors` and
# `_restore_from_tensors`. This is written as the key in the
# `SavedObject.saveable_objects<string, SaveableObject>` map in the SavedModel.
SERIALIZE_TO_TENSORS_NAME = _ESCAPE_CHAR + "TENSORS"
def escape_local_name(name):
# We need to support slashes in local names for compatibility, since this
# naming scheme is being patched in to things like Layer.add_variable where
# slashes were previously accepted. We also want to use slashes to indicate
# edges traversed to reach the variable, so we escape forward slashes in
# names.
return (name.replace(_ESCAPE_CHAR, _ESCAPE_CHAR + _ESCAPE_CHAR).replace(
r"/", _ESCAPE_CHAR + "S"))
def object_path_to_string(node_path_arr):
"""Converts a list of nodes to a string."""
return "/".join(
(escape_local_name(trackable.name) for trackable in node_path_arr))
def checkpoint_key(object_path, local_name):
"""Returns the checkpoint key for a local attribute of an object."""
key_suffix = escape_local_name(local_name)
if local_name == SERIALIZE_TO_TENSORS_NAME:
# In the case that Trackable uses the _serialize_to_tensor API for defining
# tensors to save to the checkpoint, the suffix should be the key(s)
# returned by `_serialize_to_tensor`. The suffix used here is empty.
key_suffix = ""
return f"{object_path}/{OBJECT_ATTRIBUTES_NAME}/{key_suffix}"
def extract_object_name(key):
"""Substrings the checkpoint key to the start of "/.ATTRIBUTES"."""
search_key = "/" + OBJECT_ATTRIBUTES_NAME
return key[:key.index(search_key)]
def extract_local_name(key, prefix=None):
"""Returns the substring after the "/.ATTIBUTES/" in the checkpoint key."""
# "local name" refers to the the keys of `Trackable._serialize_to_tensors.`
prefix = prefix or ""
search_key = OBJECT_ATTRIBUTES_NAME + "/" + prefix
# If checkpoint is saved from TF1, return key as is.
try:
return key[key.index(search_key) + len(search_key):]
except ValueError:
return key
def slot_variable_key(variable_path, optimizer_path, slot_name):
"""Returns checkpoint key for a slot variable."""
# Name slot variables:
#
# <variable name>/<_OPTIMIZER_SLOTS_NAME>/<optimizer path>/<slot name>
#
# where <variable name> is exactly the checkpoint name used for the original
# variable, including the path from the checkpoint root and the local name in
# the object which owns it. Note that we only save slot variables if the
# variable it's slotting for is also being saved.
return (f"{variable_path}/{_OPTIMIZER_SLOTS_NAME}/{optimizer_path}/"
f"{escape_local_name(slot_name)}")
| CyclicDependencyError |
python | gevent__gevent | src/greentest/3.14/test_thread.py | {
"start": 11758,
"end": 13223
} | class ____(BasicThreadTest):
def test_barrier(self):
with threading_helper.wait_threads_exit():
self.bar = Barrier(NUMTASKS)
self.running = NUMTASKS
for i in range(NUMTASKS):
thread.start_new_thread(self.task2, (i,))
verbose_print("waiting for tasks to end")
self.done_mutex.acquire()
verbose_print("tasks done")
def task2(self, ident):
for i in range(NUMTRIPS):
if ident == 0:
# give it a good chance to enter the next
# barrier before the others are all out
# of the current one
delay = 0
else:
with self.random_mutex:
delay = random.random() / 10000.0
verbose_print("task %s will run for %sus" %
(ident, round(delay * 1e6)))
time.sleep(delay)
verbose_print("task %s entering %s" % (ident, i))
self.bar.enter()
verbose_print("task %s leaving barrier" % ident)
with self.running_mutex:
self.running -= 1
# Must release mutex before releasing done, else the main thread can
# exit and set mutex to None as part of global teardown; then
# mutex.release() raises AttributeError.
finished = self.running == 0
if finished:
self.done_mutex.release()
| BarrierTest |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/history.py | {
"start": 2814,
"end": 6924
} | class ____(History):
"""
Wrapper around `History` implementations that run the `load()` generator in
a thread.
Use this to increase the start-up time of prompt_toolkit applications.
History entries are available as soon as they are loaded. We don't have to
wait for everything to be loaded.
"""
def __init__(self, history: History) -> None:
super().__init__()
self.history = history
self._load_thread: threading.Thread | None = None
# Lock for accessing/manipulating `_loaded_strings` and `_loaded`
# together in a consistent state.
self._lock = threading.Lock()
# Events created by each `load()` call. Used to wait for new history
# entries from the loader thread.
self._string_load_events: list[threading.Event] = []
async def load(self) -> AsyncGenerator[str, None]:
"""
Like `History.load(), but call `self.load_history_strings()` in a
background thread.
"""
# Start the load thread, if this is called for the first time.
if not self._load_thread:
self._load_thread = threading.Thread(
target=self._in_load_thread,
daemon=True,
)
self._load_thread.start()
# Consume the `_loaded_strings` list, using asyncio.
loop = get_running_loop()
# Create threading Event so that we can wait for new items.
event = threading.Event()
event.set()
self._string_load_events.append(event)
items_yielded = 0
try:
while True:
# Wait for new items to be available.
# (Use a timeout, because the executor thread is not a daemon
# thread. The "slow-history.py" example would otherwise hang if
# Control-C is pressed before the history is fully loaded,
# because there's still this non-daemon executor thread waiting
# for this event.)
got_timeout = await loop.run_in_executor(
None, lambda: event.wait(timeout=0.5)
)
if not got_timeout:
continue
# Read new items (in lock).
def in_executor() -> tuple[list[str], bool]:
with self._lock:
new_items = self._loaded_strings[items_yielded:]
done = self._loaded
event.clear()
return new_items, done
new_items, done = await loop.run_in_executor(None, in_executor)
items_yielded += len(new_items)
for item in new_items:
yield item
if done:
break
finally:
self._string_load_events.remove(event)
def _in_load_thread(self) -> None:
try:
# Start with an empty list. In case `append_string()` was called
# before `load()` happened. Then `.store_string()` will have
# written these entries back to disk and we will reload it.
self._loaded_strings = []
for item in self.history.load_history_strings():
with self._lock:
self._loaded_strings.append(item)
for event in self._string_load_events:
event.set()
finally:
with self._lock:
self._loaded = True
for event in self._string_load_events:
event.set()
def append_string(self, string: str) -> None:
with self._lock:
self._loaded_strings.insert(0, string)
self.store_string(string)
# All of the following are proxied to `self.history`.
def load_history_strings(self) -> Iterable[str]:
return self.history.load_history_strings()
def store_string(self, string: str) -> None:
self.history.store_string(string)
def __repr__(self) -> str:
return f"ThreadedHistory({self.history!r})"
| ThreadedHistory |
python | huggingface__transformers | tests/models/flava/test_modeling_flava.py | {
"start": 5358,
"end": 12279
} | class ____(ModelTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as FLAVA does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (FlavaImageModel,) if is_torch_available() else ()
test_resize_embeddings = False
def setUp(self):
self.model_tester = FlavaImageModelTester(self)
self.config_tester = ConfigTester(self, config_class=FlavaImageConfig, has_text_modality=False, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip("Flava does not use input_ids")
def test_inputs_embeds(self):
pass
def test_model_get_set_embeddings(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, nn.Linear))
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["pixel_values"]
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
# in FLAVA, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
image_size = (self.model_tester.image_size, self.model_tester.image_size)
patch_size = (self.model_tester.patch_size, self.model_tester.patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
seq_len = num_patches + 1
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class._from_config(config, attn_implementation="eager")
config = model.config
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
out_len = len(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
added_hidden_states = 1
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.attentions
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, seq_len, seq_len],
)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
self.assertEqual(len(hidden_states), expected_num_layers)
# FLAVA has a different seq_length
image_size = (self.model_tester.image_size, self.model_tester.image_size)
patch_size = (self.model_tester.patch_size, self.model_tester.patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
seq_length = num_patches + 1
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[seq_length, self.model_tester.hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
@unittest.skip
def test_training(self):
pass
@unittest.skip
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@slow
def test_model_from_pretrained(self):
model_name = "facebook/flava-full"
model = FlavaImageModel.from_pretrained(model_name)
self.assertIsNotNone(model)
| FlavaImageModelTest |
python | aio-libs__aiohttp | aiohttp/web_urldispatcher.py | {
"start": 24435,
"end": 24912
} | class ____(abc.ABC):
@abc.abstractmethod # pragma: no branch
async def match(self, request: Request) -> bool:
"""Return bool if the request satisfies the criteria"""
@abc.abstractmethod # pragma: no branch
def get_info(self) -> _InfoDict:
"""Return a dict with additional info useful for introspection"""
@property
@abc.abstractmethod # pragma: no branch
def canonical(self) -> str:
"""Return a str"""
| AbstractRuleMatching |
python | astral-sh__uv | crates/uv-python/fetch-download-metadata.py | {
"start": 4276,
"end": 4844
} | class ____(StrEnum):
FREETHREADED = "freethreaded"
DEBUG = "debug"
FREETHREADED_DEBUG = "freethreaded+debug"
@classmethod
def from_build_options(
cls: type["Variant"], build_options: list[str]
) -> "Variant" | None:
if "debug" in build_options and "freethreaded" in build_options:
return cls.FREETHREADED_DEBUG
elif "debug" in build_options:
return cls.DEBUG
elif "freethreaded" in build_options:
return cls.FREETHREADED
else:
return None
@dataclass
| Variant |
python | dask__dask | dask/tests/test_task_spec.py | {
"start": 12321,
"end": 17504
} | class ____(namedtuple("NewArgsExNamedTuple", "ab, c, k, v")):
"""Namedtuple with a custom constructor including keywords-only arguments."""
def __new__(cls, a, b, c, **kw):
return super().__new__(cls, f"{a}-{b}", c, tuple(kw.keys()), tuple(kw.values()))
def __getnewargs_ex__(self):
return (*self.ab.split("-"), self.c), dict(zip(self.k, self.v))
@pytest.mark.parametrize(
"typ, args, kwargs",
[
(PlainNamedTuple, ["some-data"], {}),
(NewArgsNamedTuple, ["some", "data", "more"], {}),
(NewArgsExNamedTuple, ["some", "data", "more"], {"another": "data"}),
],
)
def test_parse_graph_namedtuple_legacy(typ, args, kwargs):
def func(x):
return x
dsk = {"foo": (func, typ(*args, **kwargs))}
new_dsk = convert_and_verify_keys(dsk)
assert new_dsk["foo"]() == typ(*args, **kwargs)
@pytest.mark.parametrize(
"typ, args, kwargs",
[
(PlainNamedTuple, ["some-data"], {}),
(NewArgsNamedTuple, ["some", "data", "more"], {}),
(NewArgsExNamedTuple, ["some", "data", "more"], {"another": "data"}),
],
)
def test_parse_namedtuple(typ, args, kwargs):
def func(x):
return x
obj = typ(*args, **kwargs)
t = Task("foo", func, parse_input(obj))
assert t() == obj
# The other test tuple do weird things to their input
if typ is PlainNamedTuple:
args = tuple([TaskRef("b")] + list(args)[1:])
obj = typ(*args, **kwargs)
t = Task("foo", func, parse_input(obj))
assert t.dependencies == {"b"}
assert t({"b": "foo"}) == typ(*tuple(["foo"] + list(args)[1:]), **kwargs)
def test_pickle_literals():
np = pytest.importorskip("numpy")
obj = DataNode("foo", np.transpose)
roundtripped = pickle.loads(pickle.dumps(obj))
assert roundtripped == obj
@pytest.mark.parametrize("obj", [set, {0}, [], [1], {}, {2: 3}, (), (4,)])
def test_parse_non_task_inputs(obj):
assert parse_input(obj) == obj
def test_resolve_aliases():
tasks = [
Alias("bar", "foo"),
Task("foo", func, "a", "b"),
Alias("baz", "bar"),
]
dsk = {t.key: t for t in tasks}
assert len(dsk) == 3
optimized = resolve_aliases(dsk, {"baz"}, reverse_dict(DependenciesMapping(dsk)))
assert len(optimized) == 1
expected = dsk["foo"].copy()
expected.key = "baz"
assert optimized["baz"] == expected
optimized = resolve_aliases(
dsk, {"baz", "bar"}, reverse_dict(DependenciesMapping(dsk))
)
assert len(optimized) == 2
expected = dsk["foo"].copy()
expected.key = "bar"
assert optimized["bar"] == expected
tasks = [
bar := Alias("bar", "foo"),
Task("foo", func, "a", "b"),
Alias("baz", bar.ref()),
Task("foo2", func, bar.ref(), "c"),
]
dsk = {t.key: t for t in tasks}
optimized = resolve_aliases(
dsk, {"baz", "foo2"}, reverse_dict(DependenciesMapping(dsk))
)
assert len(optimized) == 3
# FIXME: Ideally, the above example would optimize to this but this isn't
# implemented. Instead, we'll block to not mess up anything
# assert sorted(optimized.values(), key=lambda t: t.key) == sorted(
# [
# Task("baz", func, "a", "b"),
# Task("foo", func, TaskRef("baz"), "c"),
# ],
# key=lambda t: t.key,
# )
# `bar` won't be inlined because it's used in `foo2` AND `baz`
assert "bar" in optimized
assert optimized["bar"].key == "bar"
assert "foo" not in optimized
# Handle cases with external dependencies
foo = Task("foo", func, "a", TaskRef("b"))
dsk = {t.key: t for t in [foo]}
optimized = resolve_aliases(dsk, {"foo"}, reverse_dict(DependenciesMapping(dsk)))
assert optimized == dsk
def test_resolve_multiple_aliases():
tasks = [
Task("first", func, 10),
Alias("second", "first"),
Task("third", func, TaskRef("second")),
Alias("fourth", "third"),
Task("fifth", func, TaskRef("fourth")),
]
dsk = {t.key: t for t in tasks}
assert len(dsk) == 5
optimized = resolve_aliases(dsk, {"fifth"}, reverse_dict(DependenciesMapping(dsk)))
assert len(optimized) == 3
expected = dsk["third"].copy()
expected.key = "fourth"
assert optimized["fourth"] == expected
expected = dsk["first"].copy()
expected.key = "second"
assert optimized["second"] == expected
def test_convert_resolve():
dsk = {
"first": (func, 10),
"second": "first",
"third": (func, "second"),
"fourth": "third",
"fifth": (func, "fourth"),
}
dsk = convert_and_verify_keys(dsk)
assert len(dsk) == 5
optimized = resolve_aliases(dsk, {"fifth"}, reverse_dict(DependenciesMapping(dsk)))
assert len(optimized) == 3
expected = dsk["third"].copy()
expected.key = "fourth"
assert optimized["fourth"] == expected
expected = dsk["first"].copy()
expected.key = "second"
assert optimized["second"] == expected
def test_parse_nested():
t = Task(
"key",
func3,
x=TaskRef("y"),
)
assert t({"y": "y"}) == "//x=y"
| NewArgsExNamedTuple |
python | wandb__wandb | wandb/apis/importers/wandb.py | {
"start": 10533,
"end": 50141
} | class ____:
"""Transfers runs, reports, and artifact sequences between W&B instances."""
def __init__(
self,
src_base_url: str,
src_api_key: str,
dst_base_url: str,
dst_api_key: str,
*,
custom_api_kwargs: Optional[Dict[str, Any]] = None,
) -> None:
self.src_base_url = src_base_url
self.src_api_key = src_api_key
self.dst_base_url = dst_base_url
self.dst_api_key = dst_api_key
if custom_api_kwargs is None:
custom_api_kwargs = {"timeout": 600}
self.src_api = wandb.Api(
api_key=src_api_key,
overrides={"base_url": src_base_url},
**custom_api_kwargs,
)
self.dst_api = wandb.Api(
api_key=dst_api_key,
overrides={"base_url": dst_base_url},
**custom_api_kwargs,
)
self.run_api_kwargs = {
"src_base_url": src_base_url,
"src_api_key": src_api_key,
"dst_base_url": dst_base_url,
"dst_api_key": dst_api_key,
}
def __repr__(self):
return f"<WandbImporter src={self.src_base_url}, dst={self.dst_base_url}>" # pragma: no cover
def _import_run(
self,
run: WandbRun,
*,
namespace: Optional[Namespace] = None,
config: Optional[internal.SendManagerConfig] = None,
) -> None:
"""Import one WandbRun.
Use `namespace` to specify alternate settings like where the run should be uploaded
"""
if namespace is None:
namespace = Namespace(run.entity(), run.project())
if config is None:
config = internal.SendManagerConfig(
metadata=True,
files=True,
media=True,
code=True,
history=True,
summary=True,
terminal_output=True,
)
settings_override = {
"api_key": self.dst_api_key,
"base_url": self.dst_base_url,
"resume": "true",
"resumed": True,
}
# Send run with base config
logger.debug(f"Importing run, {run=}")
internal.send_run(
run,
overrides=namespace.send_manager_overrides,
settings_override=settings_override,
config=config,
)
if config.history:
# Send run again with history artifacts in case config history=True, artifacts=False
# The history artifact must come with the actual history data
logger.debug(f"Collecting history artifacts, {run=}")
history_arts = []
for art in run.run.logged_artifacts():
if art.type != "wandb-history":
continue
logger.debug(f"Collecting history artifact {art.name=}")
new_art = _clone_art(art)
history_arts.append(new_art)
logger.debug(f"Importing history artifacts, {run=}")
internal.send_run(
run,
extra_arts=history_arts,
overrides=namespace.send_manager_overrides,
settings_override=settings_override,
config=config,
)
def _delete_collection_in_dst(
self,
seq: ArtifactSequence,
namespace: Optional[Namespace] = None,
):
"""Deletes the equivalent artifact collection in destination.
Intended to clear the destination when an uploaded artifact does not pass validation.
"""
entity = coalesce(namespace.entity, seq.entity)
project = coalesce(namespace.project, seq.project)
art_type = f"{entity}/{project}/{seq.type_}"
art_name = seq.name
logger.info(
f"Deleting collection {entity=}, {project=}, {art_type=}, {art_name=}"
)
try:
dst_collection = self.dst_api.artifact_collection(art_type, art_name)
except (wandb.CommError, ValueError):
logger.warning(f"Collection doesn't exist {art_type=}, {art_name=}")
return
try:
dst_collection.delete()
except (wandb.CommError, ValueError) as e:
logger.warning(
f"Collection can't be deleted, {art_type=}, {art_name=}, {e=}"
)
return
def _import_artifact_sequence(
self,
seq: ArtifactSequence,
*,
namespace: Optional[Namespace] = None,
) -> None:
"""Import one artifact sequence.
Use `namespace` to specify alternate settings like where the artifact sequence should be uploaded
"""
if not seq.artifacts:
# The artifact sequence has no versions. This usually means all artifacts versions were deleted intentionally,
# but it can also happen if the sequence represents run history and that run was deleted.
logger.warning(f"Artifact {seq=} has no artifacts, skipping.")
return
if namespace is None:
namespace = Namespace(seq.entity, seq.project)
settings_override = {
"api_key": self.dst_api_key,
"base_url": self.dst_base_url,
"resume": "true",
"resumed": True,
}
send_manager_config = internal.SendManagerConfig(log_artifacts=True)
# Delete any existing artifact sequence, otherwise versions will be out of order
# Unfortunately, you can't delete only part of the sequence because versions are "remembered" even after deletion
self._delete_collection_in_dst(seq, namespace)
# Get a placeholder run for dummy artifacts we'll upload later
art = seq.artifacts[0]
run_or_dummy: Optional[Run] = _get_run_or_dummy_from_art(art, self.src_api)
# Each `group_of_artifacts` is either:
# 1. A single "real" artifact in a list; or
# 2. A list of dummy artifacts that are uploaded together.
# This guarantees the real artifacts have the correct version numbers while allowing for parallel upload of dummies.
groups_of_artifacts = list(_make_groups_of_artifacts(seq))
for i, group in enumerate(groups_of_artifacts, 1):
art = group[0]
if art.description == ART_SEQUENCE_DUMMY_PLACEHOLDER:
run = WandbRun(run_or_dummy, **self.run_api_kwargs)
else:
try:
wandb_run = art.logged_by()
except ValueError:
# The run used to exist but has since been deleted
# wandb_run = None
pass
# Could be logged by None (rare) or ValueError
if wandb_run is None:
logger.warning(
f"Run for {art.name=} does not exist (deleted?), using {run_or_dummy=}"
)
wandb_run = run_or_dummy
new_art = _clone_art(art)
group = [new_art]
run = WandbRun(wandb_run, **self.run_api_kwargs)
logger.info(
f"Uploading partial artifact {seq=}, {i}/{len(groups_of_artifacts)}"
)
internal.send_run(
run,
extra_arts=group,
overrides=namespace.send_manager_overrides,
settings_override=settings_override,
config=send_manager_config,
)
logger.info(f"Finished uploading {seq=}")
# query it back and remove placeholders
self._remove_placeholders(seq)
def _remove_placeholders(self, seq: ArtifactSequence) -> None:
try:
retry_arts_func = internal.exp_retry(self._dst_api.artifacts)
dst_arts = list(retry_arts_func(seq.type_, seq.name))
except wandb.CommError:
logger.warning(
f"{seq=} does not exist in dst. Has it already been deleted?"
)
return
except TypeError:
logger.exception("Problem getting dst versions (try again later).")
return
for art in dst_arts:
if art.description != ART_SEQUENCE_DUMMY_PLACEHOLDER:
continue
if art.type in ("wandb-history", "job"):
continue
try:
art.delete(delete_aliases=True)
except wandb.CommError as e:
if "cannot delete system managed artifact" in str(e):
logger.warning("Cannot delete system managed artifact")
else:
raise
def _get_dst_art(
self, src_art: Run, entity: Optional[str] = None, project: Optional[str] = None
) -> Artifact:
entity = coalesce(entity, src_art.entity)
project = coalesce(project, src_art.project)
name = src_art.name
return self.dst_api._artifact(f"{entity}/{project}/{name}")
def _get_run_problems(
self, src_run: Run, dst_run: Run, force_retry: bool = False
) -> List[dict]:
problems = []
if force_retry:
problems.append("__force_retry__")
if non_matching_metadata := self._compare_run_metadata(src_run, dst_run):
problems.append("metadata:" + str(non_matching_metadata))
if non_matching_summary := self._compare_run_summary(src_run, dst_run):
problems.append("summary:" + str(non_matching_summary))
# TODO: Compare files?
return problems
def _compare_run_metadata(self, src_run: Run, dst_run: Run) -> dict:
fname = "wandb-metadata.json"
# problems = {}
src_f = src_run.file(fname)
if src_f.size == 0:
# the src was corrupted so no comparisons here will ever work
return {}
dst_f = dst_run.file(fname)
try:
contents = wandb.util.download_file_into_memory(
dst_f.url, self.dst_api.api_key
)
except urllib3.exceptions.ReadTimeoutError:
return {"Error checking": "Timeout"}
except requests.HTTPError as e:
if e.response.status_code == 404:
return {"Bad upload": f"File not found: {fname}"}
return {"http problem": f"{fname}: ({e})"}
dst_meta = json_util.loads(contents)
non_matching = {}
if src_run.metadata:
for k, src_v in src_run.metadata.items():
if k not in dst_meta:
non_matching[k] = {"src": src_v, "dst": "KEY NOT FOUND"}
continue
dst_v = dst_meta[k]
if src_v != dst_v:
non_matching[k] = {"src": src_v, "dst": dst_v}
return non_matching
def _compare_run_summary(self, src_run: Run, dst_run: Run) -> dict:
non_matching = {}
for k, src_v in src_run.summary.items():
# These won't match between systems and that's ok
if isinstance(src_v, str) and src_v.startswith("wandb-client-artifact://"):
continue
if k in ("_wandb", "_runtime"):
continue
src_v = _recursive_cast_to_dict(src_v)
dst_v = dst_run.summary.get(k)
dst_v = _recursive_cast_to_dict(dst_v)
if isinstance(src_v, dict) and isinstance(dst_v, dict):
for kk, sv in src_v.items():
# These won't match between systems and that's ok
if isinstance(sv, str) and sv.startswith(
"wandb-client-artifact://"
):
continue
dv = dst_v.get(kk)
if not _almost_equal(sv, dv):
non_matching[f"{k}-{kk}"] = {"src": sv, "dst": dv}
else:
if not _almost_equal(src_v, dst_v):
non_matching[k] = {"src": src_v, "dst": dst_v}
return non_matching
def _collect_failed_artifact_sequences(self) -> Iterable[ArtifactSequence]:
if (df := _read_ndjson(ARTIFACT_ERRORS_FNAME)) is None:
logger.debug(f"{ARTIFACT_ERRORS_FNAME=} is empty, returning nothing")
return
unique_failed_sequences = df[
["src_entity", "src_project", "name", "type"]
].unique()
for row in unique_failed_sequences.iter_rows(named=True):
entity = row["src_entity"]
project = row["src_project"]
name = row["name"]
_type = row["type"]
art_name = f"{entity}/{project}/{name}"
arts = self.src_api.artifacts(_type, art_name)
arts = sorted(arts, key=lambda a: int(a.version.lstrip("v")))
arts = sorted(arts, key=lambda a: a.type)
yield ArtifactSequence(arts, entity, project, _type, name)
def _cleanup_dummy_runs(
self,
*,
namespaces: Optional[Iterable[Namespace]] = None,
api: Optional[Api] = None,
remapping: Optional[Dict[Namespace, Namespace]] = None,
) -> None:
api = coalesce(api, self.dst_api)
namespaces = coalesce(namespaces, self._all_namespaces())
for ns in namespaces:
if remapping and ns in remapping:
ns = remapping[ns]
logger.debug(f"Cleaning up, {ns=}")
try:
runs = list(
api.runs(ns.path, filters={"displayName": RUN_DUMMY_PLACEHOLDER})
)
except ValueError as e:
if "Could not find project" in str(e):
logger.exception("Could not find project, does it exist?")
continue
for run in runs:
logger.debug(f"Deleting dummy {run=}")
run.delete(delete_artifacts=False)
def _import_report(
self, report: Report, *, namespace: Optional[Namespace] = None
) -> None:
"""Import one wandb.Report.
Use `namespace` to specify alternate settings like where the report should be uploaded
"""
if namespace is None:
namespace = Namespace(report.entity, report.project)
entity = coalesce(namespace.entity, report.entity)
project = coalesce(namespace.project, report.project)
name = report.name
title = report.title
description = report.description
api = self.dst_api
# We shouldn't need to upsert the project for every report
logger.debug(f"Upserting {entity=}/{project=}")
try:
api.create_project(project, entity)
except requests.exceptions.HTTPError as e:
if e.response.status_code != 409:
logger.warning(f"Issue upserting {entity=}/{project=}, {e=}")
logger.debug(f"Upserting report {entity=}, {project=}, {name=}, {title=}")
api.client.execute(
wr.report.UPSERT_VIEW,
variable_values={
"id": None, # Is there any benefit for this to be the same as default report?
"name": name,
"entityName": entity,
"projectName": project,
"description": description,
"displayName": title,
"type": "runs",
"spec": json.dumps(report.spec),
},
)
def _use_artifact_sequence(
self,
sequence: ArtifactSequence,
*,
namespace: Optional[Namespace] = None,
):
if namespace is None:
namespace = Namespace(sequence.entity, sequence.project)
settings_override = {
"api_key": self.dst_api_key,
"base_url": self.dst_base_url,
"resume": "true",
"resumed": True,
}
logger.debug(f"Using artifact sequence with {settings_override=}, {namespace=}")
send_manager_config = internal.SendManagerConfig(use_artifacts=True)
for art in sequence:
if (used_by := art.used_by()) is None:
continue
for wandb_run in used_by:
run = WandbRun(wandb_run, **self.run_api_kwargs)
internal.send_run(
run,
overrides=namespace.send_manager_overrides,
settings_override=settings_override,
config=send_manager_config,
)
def import_runs(
self,
*,
namespaces: Optional[Iterable[Namespace]] = None,
remapping: Optional[Dict[Namespace, Namespace]] = None,
parallel: bool = True,
incremental: bool = True,
max_workers: Optional[int] = None,
limit: Optional[int] = None,
metadata: bool = True,
files: bool = True,
media: bool = True,
code: bool = True,
history: bool = True,
summary: bool = True,
terminal_output: bool = True,
):
logger.info("START: Import runs")
logger.info("Setting up for import")
_create_files_if_not_exists()
_clear_fname(RUN_ERRORS_FNAME)
logger.info("Collecting runs")
runs = list(self._collect_runs(namespaces=namespaces, limit=limit))
logger.info(f"Validating runs, {len(runs)=}")
self._validate_runs(
runs,
skip_previously_validated=incremental,
remapping=remapping,
)
logger.info("Collecting failed runs")
runs = list(self._collect_failed_runs())
logger.info(f"Importing runs, {len(runs)=}")
def _import_run_wrapped(run):
namespace = Namespace(run.entity(), run.project())
if remapping is not None and namespace in remapping:
namespace = remapping[namespace]
config = internal.SendManagerConfig(
metadata=metadata,
files=files,
media=media,
code=code,
history=history,
summary=summary,
terminal_output=terminal_output,
)
logger.debug(f"Importing {run=}, {namespace=}, {config=}")
self._import_run(run, namespace=namespace, config=config)
logger.debug(f"Finished importing {run=}, {namespace=}, {config=}")
for_each(_import_run_wrapped, runs, max_workers=max_workers, parallel=parallel)
logger.info("END: Importing runs")
def import_reports(
self,
*,
namespaces: Optional[Iterable[Namespace]] = None,
limit: Optional[int] = None,
remapping: Optional[Dict[Namespace, Namespace]] = None,
):
logger.info("START: Importing reports")
logger.info("Collecting reports")
reports = self._collect_reports(namespaces=namespaces, limit=limit)
logger.info("Importing reports")
def _import_report_wrapped(report):
namespace = Namespace(report.entity, report.project)
if remapping is not None and namespace in remapping:
namespace = remapping[namespace]
logger.debug(f"Importing {report=}, {namespace=}")
self._import_report(report, namespace=namespace)
logger.debug(f"Finished importing {report=}, {namespace=}")
for_each(_import_report_wrapped, reports)
logger.info("END: Importing reports")
def import_artifact_sequences(
self,
*,
namespaces: Optional[Iterable[Namespace]] = None,
incremental: bool = True,
max_workers: Optional[int] = None,
remapping: Optional[Dict[Namespace, Namespace]] = None,
):
"""Import all artifact sequences from `namespaces`.
Note: There is a known bug with the AWS backend where artifacts > 2048MB will fail to upload. This seems to be related to multipart uploads, but we don't have a fix yet.
"""
logger.info("START: Importing artifact sequences")
_clear_fname(ARTIFACT_ERRORS_FNAME)
logger.info("Collecting artifact sequences")
seqs = list(self._collect_artifact_sequences(namespaces=namespaces))
logger.info("Validating artifact sequences")
self._validate_artifact_sequences(
seqs,
incremental=incremental,
remapping=remapping,
)
logger.info("Collecting failed artifact sequences")
seqs = list(self._collect_failed_artifact_sequences())
logger.info(f"Importing artifact sequences, {len(seqs)=}")
def _import_artifact_sequence_wrapped(seq):
namespace = Namespace(seq.entity, seq.project)
if remapping is not None and namespace in remapping:
namespace = remapping[namespace]
logger.debug(f"Importing artifact sequence {seq=}, {namespace=}")
self._import_artifact_sequence(seq, namespace=namespace)
logger.debug(f"Finished importing artifact sequence {seq=}, {namespace=}")
for_each(_import_artifact_sequence_wrapped, seqs, max_workers=max_workers)
# it's safer to just use artifact on all seqs to make sure we don't miss anything
# For seqs that have already been used, this is a no-op.
logger.debug(f"Using artifact sequences, {len(seqs)=}")
def _use_artifact_sequence_wrapped(seq):
namespace = Namespace(seq.entity, seq.project)
if remapping is not None and namespace in remapping:
namespace = remapping[namespace]
logger.debug(f"Using artifact sequence {seq=}, {namespace=}")
self._use_artifact_sequence(seq, namespace=namespace)
logger.debug(f"Finished using artifact sequence {seq=}, {namespace=}")
for_each(_use_artifact_sequence_wrapped, seqs, max_workers=max_workers)
# Artifacts whose parent runs have been deleted should have that run deleted in the
# destination as well
logger.info("Cleaning up dummy runs")
self._cleanup_dummy_runs(
namespaces=namespaces,
remapping=remapping,
)
logger.info("END: Importing artifact sequences")
def import_all(
self,
*,
runs: bool = True,
artifacts: bool = True,
reports: bool = True,
namespaces: Optional[Iterable[Namespace]] = None,
incremental: bool = True,
remapping: Optional[Dict[Namespace, Namespace]] = None,
):
logger.info(f"START: Importing all, {runs=}, {artifacts=}, {reports=}")
if runs:
self.import_runs(
namespaces=namespaces,
incremental=incremental,
remapping=remapping,
)
if reports:
self.import_reports(
namespaces=namespaces,
remapping=remapping,
)
if artifacts:
self.import_artifact_sequences(
namespaces=namespaces,
incremental=incremental,
remapping=remapping,
)
logger.info("END: Importing all")
def _validate_run(
self,
src_run: Run,
*,
remapping: Optional[Dict[Namespace, Namespace]] = None,
) -> None:
namespace = Namespace(src_run.entity, src_run.project)
if remapping is not None and namespace in remapping:
namespace = remapping[namespace]
dst_entity = namespace.entity
dst_project = namespace.project
run_id = src_run.id
try:
dst_run = self.dst_api.run(f"{dst_entity}/{dst_project}/{run_id}")
except wandb.CommError:
problems = [f"run does not exist in dst at {dst_entity=}/{dst_project=}"]
else:
problems = self._get_run_problems(src_run, dst_run)
d = {
"src_entity": src_run.entity,
"src_project": src_run.project,
"dst_entity": dst_entity,
"dst_project": dst_project,
"run_id": run_id,
}
if problems:
d["problems"] = problems
fname = RUN_ERRORS_FNAME
else:
fname = RUN_SUCCESSES_FNAME
with filelock.FileLock("runs.lock"):
with open(fname, "a") as f:
f.write(json.dumps(d) + "\n")
def _filter_previously_checked_runs(
self,
runs: Iterable[Run],
*,
remapping: Optional[Dict[Namespace, Namespace]] = None,
) -> Iterable[Run]:
if (df := _read_ndjson(RUN_SUCCESSES_FNAME)) is None:
logger.debug(f"{RUN_SUCCESSES_FNAME=} is empty, yielding all runs")
yield from runs
return
data = []
for r in runs:
namespace = Namespace(r.entity, r.project)
if remapping is not None and namespace in remapping:
namespace = remapping[namespace]
data.append(
{
"src_entity": r.entity,
"src_project": r.project,
"dst_entity": namespace.entity,
"dst_project": namespace.project,
"run_id": r.id,
"data": r,
}
)
df2 = pl.DataFrame(data)
logger.debug(f"Starting with {len(runs)=} in namespaces")
results = df2.join(
df,
how="anti",
on=["src_entity", "src_project", "dst_entity", "dst_project", "run_id"],
)
logger.debug(f"After filtering out already successful runs, {len(results)=}")
if not results.is_empty():
results = results.filter(~results["run_id"].is_null())
results = results.unique(
["src_entity", "src_project", "dst_entity", "dst_project", "run_id"]
)
for r in results.iter_rows(named=True):
yield r["data"]
def _validate_artifact(
self,
src_art: Artifact,
dst_entity: str,
dst_project: str,
download_files_and_compare: bool = False,
check_entries_are_downloadable: bool = True,
):
problems = []
# These patterns of artifacts are special and should not be validated
ignore_patterns = [
r"^job-(.*?)\.py(:v\d+)?$",
# r"^run-.*-history(?:\:v\d+)?$$",
]
for pattern in ignore_patterns:
if re.search(pattern, src_art.name):
return (src_art, dst_entity, dst_project, problems)
try:
dst_art = self._get_dst_art(src_art, dst_entity, dst_project)
except Exception:
problems.append("destination artifact not found")
return (src_art, dst_entity, dst_project, problems)
try:
logger.debug("Comparing artifact manifests")
except Exception as e:
problems.append(
f"Problem getting problems! problem with {src_art.entity=}, {src_art.project=}, {src_art.name=} {e=}"
)
else:
problems += validation._compare_artifact_manifests(src_art, dst_art)
if check_entries_are_downloadable:
# validation._check_entries_are_downloadable(src_art)
validation._check_entries_are_downloadable(dst_art)
if download_files_and_compare:
logger.debug(f"Downloading {src_art=}")
try:
src_dir = _download_art(src_art, root=f"{SRC_ART_PATH}/{src_art.name}")
except requests.HTTPError as e:
problems.append(
f"Invalid download link for src {src_art.entity=}, {src_art.project=}, {src_art.name=}, {e}"
)
logger.debug(f"Downloading {dst_art=}")
try:
dst_dir = _download_art(dst_art, root=f"{DST_ART_PATH}/{dst_art.name}")
except requests.HTTPError as e:
problems.append(
f"Invalid download link for dst {dst_art.entity=}, {dst_art.project=}, {dst_art.name=}, {e}"
)
else:
logger.debug(f"Comparing artifact dirs {src_dir=}, {dst_dir=}")
if problem := validation._compare_artifact_dirs(src_dir, dst_dir):
problems.append(problem)
return (src_art, dst_entity, dst_project, problems)
def _validate_runs(
self,
runs: Iterable[WandbRun],
*,
skip_previously_validated: bool = True,
remapping: Optional[Dict[Namespace, Namespace]] = None,
):
base_runs = [r.run for r in runs]
if skip_previously_validated:
base_runs = list(
self._filter_previously_checked_runs(
base_runs,
remapping=remapping,
)
)
def _validate_run(run):
logger.debug(f"Validating {run=}")
self._validate_run(run, remapping=remapping)
logger.debug(f"Finished validating {run=}")
for_each(_validate_run, base_runs)
def _collect_failed_runs(self):
if (df := _read_ndjson(RUN_ERRORS_FNAME)) is None:
logger.debug(f"{RUN_ERRORS_FNAME=} is empty, returning nothing")
return
unique_failed_runs = df[
["src_entity", "src_project", "dst_entity", "dst_project", "run_id"]
].unique()
for row in unique_failed_runs.iter_rows(named=True):
src_entity = row["src_entity"]
src_project = row["src_project"]
# dst_entity = row["dst_entity"]
# dst_project = row["dst_project"]
run_id = row["run_id"]
run = self.src_api.run(f"{src_entity}/{src_project}/{run_id}")
yield WandbRun(run, **self.run_api_kwargs)
def _filter_previously_checked_artifacts(self, seqs: Iterable[ArtifactSequence]):
if (df := _read_ndjson(ARTIFACT_SUCCESSES_FNAME)) is None:
logger.info(
f"{ARTIFACT_SUCCESSES_FNAME=} is empty, yielding all artifact sequences"
)
for seq in seqs:
yield from seq.artifacts
return
for seq in seqs:
for art in seq:
try:
logged_by = _get_run_or_dummy_from_art(art, self.src_api)
except requests.HTTPError:
logger.exception(f"Failed to get run, skipping: {art=}")
continue
if art.type == "wandb-history" and isinstance(logged_by, _DummyRun):
logger.debug(f"Skipping history artifact {art=}")
# We can never upload valid history for a deleted run, so skip it
continue
entity = art.entity
project = art.project
_type = art.type
name, ver = _get_art_name_ver(art)
filtered_df = df.filter(
(df["src_entity"] == entity)
& (df["src_project"] == project)
& (df["name"] == name)
& (df["version"] == ver)
& (df["type"] == _type)
)
# not in file, so not verified yet, don't filter out
if len(filtered_df) == 0:
yield art
def _validate_artifact_sequences(
self,
seqs: Iterable[ArtifactSequence],
*,
incremental: bool = True,
download_files_and_compare: bool = False,
check_entries_are_downloadable: bool = True,
remapping: Optional[Dict[Namespace, Namespace]] = None,
):
if incremental:
logger.info("Validating in incremental mode")
def filtered_sequences():
for seq in seqs:
if not seq.artifacts:
continue
art = seq.artifacts[0]
try:
logged_by = _get_run_or_dummy_from_art(art, self.src_api)
except requests.HTTPError:
logger.exception(
f"Validate Artifact http error: {art.entity=},"
f" {art.project=}, {art.name=}"
)
continue
if art.type == "wandb-history" and isinstance(logged_by, _DummyRun):
# We can never upload valid history for a deleted run, so skip it
continue
yield seq
artifacts = self._filter_previously_checked_artifacts(filtered_sequences())
else:
logger.info("Validating in non-incremental mode")
artifacts = [art for seq in seqs for art in seq.artifacts]
def _validate_artifact_wrapped(args):
art, entity, project = args
if (
remapping is not None
and (namespace := Namespace(entity, project)) in remapping
):
remapped_ns = remapping[namespace]
entity = remapped_ns.entity
project = remapped_ns.project
logger.debug(f"Validating {art=}, {entity=}, {project=}")
result = self._validate_artifact(
art,
entity,
project,
download_files_and_compare=download_files_and_compare,
check_entries_are_downloadable=check_entries_are_downloadable,
)
logger.debug(f"Finished validating {art=}, {entity=}, {project=}")
return result
args = ((art, art.entity, art.project) for art in artifacts)
art_problems = for_each(_validate_artifact_wrapped, args)
for art, dst_entity, dst_project, problems in art_problems:
name, ver = _get_art_name_ver(art)
d = {
"src_entity": art.entity,
"src_project": art.project,
"dst_entity": dst_entity,
"dst_project": dst_project,
"name": name,
"version": ver,
"type": art.type,
}
if problems:
d["problems"] = problems
fname = ARTIFACT_ERRORS_FNAME
else:
fname = ARTIFACT_SUCCESSES_FNAME
with open(fname, "a") as f:
f.write(json.dumps(d) + "\n")
def _collect_runs(
self,
*,
namespaces: Optional[Iterable[Namespace]] = None,
limit: Optional[int] = None,
skip_ids: Optional[List[str]] = None,
start_date: Optional[str] = None,
api: Optional[Api] = None,
) -> Iterable[WandbRun]:
api = coalesce(api, self.src_api)
namespaces = coalesce(namespaces, self._all_namespaces())
filters: Dict[str, Any] = {}
if skip_ids is not None:
filters["name"] = {"$nin": skip_ids}
if start_date is not None:
filters["createdAt"] = {"$gte": start_date}
def _runs():
for ns in namespaces:
logger.debug(f"Collecting runs from {ns=}")
for run in api.runs(ns.path, filters=filters):
yield WandbRun(run, **self.run_api_kwargs)
runs = itertools.islice(_runs(), limit)
yield from runs
def _all_namespaces(
self, *, entity: Optional[str] = None, api: Optional[Api] = None
):
api = coalesce(api, self.src_api)
entity = coalesce(entity, api.default_entity)
projects = api.projects(entity)
for p in projects:
yield Namespace(p.entity, p.name)
def _collect_reports(
self,
*,
namespaces: Optional[Iterable[Namespace]] = None,
limit: Optional[int] = None,
api: Optional[Api] = None,
):
api = coalesce(api, self.src_api)
namespaces = coalesce(namespaces, self._all_namespaces())
wandb.login(key=self.src_api_key, host=self.src_base_url)
def reports():
for ns in namespaces:
for r in api.reports(ns.path):
yield wr.Report.from_url(r.url, api=api)
yield from itertools.islice(reports(), limit)
def _collect_artifact_sequences(
self,
*,
namespaces: Optional[Iterable[Namespace]] = None,
limit: Optional[int] = None,
api: Optional[Api] = None,
):
api = coalesce(api, self.src_api)
namespaces = coalesce(namespaces, self._all_namespaces())
def artifact_sequences():
for ns in namespaces:
logger.debug(f"Collecting artifact sequences from {ns=}")
types = []
try:
types = [t for t in api.artifact_types(ns.path)]
except Exception:
logger.exception("Failed to get artifact types.")
for t in types:
collections = []
# Skip history because it's really for run history
if t.name == "wandb-history":
continue
try:
collections = t.collections()
except Exception:
logger.exception("Failed to get artifact collections.")
for c in collections:
if c.is_sequence():
yield ArtifactSequence.from_collection(c)
seqs = itertools.islice(artifact_sequences(), limit)
unique_sequences = {seq.identifier: seq for seq in seqs}
yield from unique_sequences.values()
def _get_art_name_ver(art: Artifact) -> Tuple[str, int]:
name, ver = art.name.split(":v")
return name, int(ver)
def _make_dummy_art(name: str, _type: str, ver: int):
art = Artifact(name, ART_DUMMY_PLACEHOLDER_TYPE)
art._type = _type
art._description = ART_SEQUENCE_DUMMY_PLACEHOLDER
p = Path(ART_DUMMY_PLACEHOLDER_PATH)
p.mkdir(parents=True, exist_ok=True)
# dummy file with different name to prevent dedupe
fname = p / str(ver)
with open(fname, "w"):
pass
art.add_file(fname)
return art
def _make_groups_of_artifacts(seq: ArtifactSequence, start: int = 0):
prev_ver = start - 1
for art in seq:
name, ver = _get_art_name_ver(art)
# If there's a gap between versions, fill with dummy artifacts
if ver - prev_ver > 1:
yield [_make_dummy_art(name, art.type, v) for v in range(prev_ver + 1, ver)]
# Then yield the actual artifact
# Must always be a list of one artifact to guarantee ordering
yield [art]
prev_ver = ver
def _recursive_cast_to_dict(obj):
if isinstance(obj, list):
return [_recursive_cast_to_dict(item) for item in obj]
elif isinstance(obj, dict) or hasattr(obj, "items"):
new_dict = {}
for key, value in obj.items():
new_dict[key] = _recursive_cast_to_dict(value)
return new_dict
else:
return obj
def _almost_equal(x, y, eps=1e-6):
if isinstance(x, dict) and isinstance(y, dict):
if x.keys() != y.keys():
return False
return all(_almost_equal(x[k], y[k], eps) for k in x)
if isinstance(x, numbers.Number) and isinstance(y, numbers.Number):
return abs(x - y) < eps
if type(x) is not type(y):
return False
return x == y
@dataclass
| WandbImporter |
python | django__django | django/template/defaulttags.py | {
"start": 14773,
"end": 15299
} | class ____(Node):
mapping = {
"openblock": BLOCK_TAG_START,
"closeblock": BLOCK_TAG_END,
"openvariable": VARIABLE_TAG_START,
"closevariable": VARIABLE_TAG_END,
"openbrace": SINGLE_BRACE_START,
"closebrace": SINGLE_BRACE_END,
"opencomment": COMMENT_TAG_START,
"closecomment": COMMENT_TAG_END,
}
def __init__(self, tagtype):
self.tagtype = tagtype
def render(self, context):
return self.mapping.get(self.tagtype, "")
| TemplateTagNode |
python | Textualize__textual | docs/examples/widgets/horizontal_rules.py | {
"start": 125,
"end": 787
} | class ____(App):
CSS_PATH = "horizontal_rules.tcss"
def compose(self) -> ComposeResult:
with Vertical():
yield Label("solid (default)")
yield Rule()
yield Label("heavy")
yield Rule(line_style="heavy")
yield Label("thick")
yield Rule(line_style="thick")
yield Label("dashed")
yield Rule(line_style="dashed")
yield Label("double")
yield Rule(line_style="double")
yield Label("ascii")
yield Rule(line_style="ascii")
if __name__ == "__main__":
app = HorizontalRulesApp()
app.run()
| HorizontalRulesApp |
python | allegroai__clearml | clearml/backend_api/services/v2_23/tasks.py | {
"start": 83333,
"end": 84978
} | class ____(NonStrictDataModel):
"""
:param section: Section that the parameter belongs to
:type section: str
:param name: Name of the parameter. If the name is ommitted then the
corresponding operation is performed on the whole section
:type name: str
"""
_schema = {
"properties": {
"name": {
"description": (
"Name of the parameter. If the name is ommitted then the corresponding operation is performed on"
" the whole section"
),
"type": ["string", "null"],
},
"section": {
"description": "Section that the parameter belongs to",
"type": ["string", "null"],
},
},
"type": "object",
}
def __init__(self, section=None, name=None, **kwargs):
super(ParamKey, self).__init__(**kwargs)
self.section = section
self.name = name
@schema_property("section")
def section(self):
return self._property_section
@section.setter
def section(self, value):
if value is None:
self._property_section = None
return
self.assert_isinstance(value, "section", six.string_types)
self._property_section = value
@schema_property("name")
def name(self):
return self._property_name
@name.setter
def name(self, value):
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
| ParamKey |
python | ray-project__ray | python/ray/_private/worker.py | {
"start": 130585,
"end": 145560
} | class ____(Protocol):
@overload
def __call__(self, __t: Type[T]) -> ActorClass[T]:
...
@overload
def __call__(self, __function: Callable[[], R]) -> RemoteFunctionNoArgs[R]:
...
@overload
def __call__(self, __function: Callable[[T0], R]) -> RemoteFunction0[R, T0]:
...
@overload
def __call__(self, __function: Callable[[T0, T1], R]) -> RemoteFunction1[R, T0, T1]:
...
@overload
def __call__(
self, __function: Callable[[T0, T1, T2], R]
) -> RemoteFunction2[R, T0, T1, T2]:
...
@overload
def __call__(
self, __function: Callable[[T0, T1, T2, T3], R]
) -> RemoteFunction3[R, T0, T1, T2, T3]:
...
@overload
def __call__(
self, __function: Callable[[T0, T1, T2, T3, T4], R]
) -> RemoteFunction4[R, T0, T1, T2, T3, T4]:
...
@overload
def __call__(
self, __function: Callable[[T0, T1, T2, T3, T4, T5], R]
) -> RemoteFunction5[R, T0, T1, T2, T3, T4, T5]:
...
@overload
def __call__(
self, __function: Callable[[T0, T1, T2, T3, T4, T5, T6], R]
) -> RemoteFunction6[R, T0, T1, T2, T3, T4, T5, T6]:
...
@overload
def __call__(
self, __function: Callable[[T0, T1, T2, T3, T4, T5, T6, T7], R]
) -> RemoteFunction7[R, T0, T1, T2, T3, T4, T5, T6, T7]:
...
@overload
def __call__(
self, __function: Callable[[T0, T1, T2, T3, T4, T5, T6, T7, T8], R]
) -> RemoteFunction8[R, T0, T1, T2, T3, T4, T5, T6, T7, T8]:
...
@overload
def __call__(
self, __function: Callable[[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9], R]
) -> RemoteFunction9[R, T0, T1, T2, T3, T4, T5, T6, T7, T8, T9]:
...
@overload
def remote(__t: Type[T]) -> ActorClass[T]:
...
@overload
def remote(__function: Callable[[], R]) -> RemoteFunctionNoArgs[R]:
...
@overload
def remote(__function: Callable[[T0], R]) -> RemoteFunction0[R, T0]:
...
@overload
def remote(__function: Callable[[T0, T1], R]) -> RemoteFunction1[R, T0, T1]:
...
@overload
def remote(__function: Callable[[T0, T1, T2], R]) -> RemoteFunction2[R, T0, T1, T2]:
...
@overload
def remote(
__function: Callable[[T0, T1, T2, T3], R]
) -> RemoteFunction3[R, T0, T1, T2, T3]:
...
@overload
def remote(
__function: Callable[[T0, T1, T2, T3, T4], R]
) -> RemoteFunction4[R, T0, T1, T2, T3, T4]:
...
@overload
def remote(
__function: Callable[[T0, T1, T2, T3, T4, T5], R]
) -> RemoteFunction5[R, T0, T1, T2, T3, T4, T5]:
...
@overload
def remote(
__function: Callable[[T0, T1, T2, T3, T4, T5, T6], R]
) -> RemoteFunction6[R, T0, T1, T2, T3, T4, T5, T6]:
...
@overload
def remote(
__function: Callable[[T0, T1, T2, T3, T4, T5, T6, T7], R]
) -> RemoteFunction7[R, T0, T1, T2, T3, T4, T5, T6, T7]:
...
@overload
def remote(
__function: Callable[[T0, T1, T2, T3, T4, T5, T6, T7, T8], R]
) -> RemoteFunction8[R, T0, T1, T2, T3, T4, T5, T6, T7, T8]:
...
@overload
def remote(
__function: Callable[[T0, T1, T2, T3, T4, T5, T6, T7, T8, T9], R]
) -> RemoteFunction9[R, T0, T1, T2, T3, T4, T5, T6, T7, T8, T9]:
...
# Passing options
@overload
def remote(
*,
num_returns: Union[int, Literal["streaming"]] = Undefined,
num_cpus: Union[int, float] = Undefined,
num_gpus: Union[int, float] = Undefined,
resources: Dict[str, float] = Undefined,
accelerator_type: str = Undefined,
memory: Union[int, float] = Undefined,
max_calls: int = Undefined,
max_restarts: int = Undefined,
max_task_retries: int = Undefined,
max_retries: int = Undefined,
runtime_env: Dict[str, Any] = Undefined,
retry_exceptions: bool = Undefined,
scheduling_strategy: Union[
None, Literal["DEFAULT"], Literal["SPREAD"], PlacementGroupSchedulingStrategy
] = Undefined,
label_selector: Dict[str, str] = Undefined,
fallback_strategy: List[Dict[str, Any]] = Undefined,
) -> RemoteDecorator:
...
@PublicAPI
def remote(
*args, **kwargs
) -> Union[ray.remote_function.RemoteFunction, ray.actor.ActorClass]:
"""Defines a remote function or an actor class.
This function can be used as a decorator with no arguments
to define a remote function or actor as follows:
.. testcode::
import ray
@ray.remote
def f(a, b, c):
return a + b + c
object_ref = f.remote(1, 2, 3)
result = ray.get(object_ref)
assert result == (1 + 2 + 3)
@ray.remote
class Foo:
def __init__(self, arg):
self.x = arg
def method(self, a):
return self.x + a
actor_handle = Foo.remote(123)
object_ref = actor_handle.method.remote(321)
result = ray.get(object_ref)
assert result == (123 + 321)
Equivalently, use a function call to create a remote function or actor.
.. testcode::
def g(a, b, c):
return a + b + c
remote_g = ray.remote(g)
object_ref = remote_g.remote(1, 2, 3)
assert ray.get(object_ref) == (1 + 2 + 3)
class Bar:
def __init__(self, arg):
self.x = arg
def method(self, a):
return self.x + a
RemoteBar = ray.remote(Bar)
actor_handle = RemoteBar.remote(123)
object_ref = actor_handle.method.remote(321)
result = ray.get(object_ref)
assert result == (123 + 321)
It can also be used with specific keyword arguments as follows:
.. testcode::
@ray.remote(num_gpus=1, max_calls=1, num_returns=2)
def f():
return 1, 2
@ray.remote(num_cpus=2, resources={"CustomResource": 1})
class Foo:
def method(self):
return 1
Remote task and actor objects returned by @ray.remote can also be
dynamically modified with the same arguments as above using
``.options()`` as follows:
.. testcode::
:hide:
ray.shutdown()
ray.init(num_cpus=5, num_gpus=5)
.. testcode::
@ray.remote(num_gpus=1, max_calls=1, num_returns=2)
def f():
return 1, 2
f_with_2_gpus = f.options(num_gpus=2)
object_refs = f_with_2_gpus.remote()
assert ray.get(object_refs) == [1, 2]
@ray.remote(num_cpus=2, resources={"CustomResource": 1})
class Foo:
def method(self):
return 1
Foo_with_no_resources = Foo.options(num_cpus=1, resources=None)
foo_actor = Foo_with_no_resources.remote()
assert ray.get(foo_actor.method.remote()) == 1
A remote actor will be terminated when all actor handle to it
in Python is deleted, which will cause them to complete any outstanding
work and then shut down. If you only have 1 reference to an actor handle,
calling ``del actor`` *could* trigger actor deletion. Note that your program
may have multiple references to the same ActorHandle, and actor termination
will not occur until the reference count goes to 0. See the Python
documentation for more context about object deletion.
https://docs.python.org/3.9/reference/datamodel.html#object.__del__
If you want to kill actors immediately, you can also call ``ray.kill(actor)``.
.. tip::
Avoid repeatedly passing in large arguments to remote task or method calls.
Instead, use ray.put to create a copy of the object in the object store.
See :ref:`more info here <ray-pass-large-arg-by-value>`.
Args:
num_returns: This is only for *remote functions*. It specifies
the number of object refs returned by the remote function
invocation. The default value is 1.
Pass "dynamic" to allow the task to decide how many
return values to return during execution, and the caller will
receive an ObjectRef[DynamicObjectRefGenerator].
See :ref:`dynamic generators <dynamic-generators>` for more details.
num_cpus: The quantity of CPU resources to reserve
for this task or for the lifetime of the actor.
By default, tasks use 1 CPU resource and actors use 1 CPU
for scheduling and 0 CPU for running
(This means, by default, actors cannot get scheduled on a zero-cpu node,
but an infinite number of them can run on any non-zero cpu node.
The default value for actors was chosen for historical reasons.
It's recommended to always explicitly set num_cpus for actors
to avoid any surprises.
If resources are specified explicitly,
they are required for both scheduling and running.)
See :ref:`specifying resource requirements <resource-requirements>`
for more details.
num_gpus: The quantity of GPU resources to reserve
for this task or for the lifetime of the actor.
The default value is 0.
See :ref:`Ray GPU support <gpu-support>` for more details.
resources (Dict[str, float]): The quantity of various
:ref:`custom resources <custom-resources>`
to reserve for this task or for the lifetime of the actor.
This is a dictionary mapping strings (resource names) to floats.
By default it is empty.
label_selector: [Experimental] If specified, the labels required for the node on
which this actor can be scheduled on. The label selector consist of key-value pairs,
where the keys are label names and the value are expressions consisting of an operator
with label values or just a value to indicate equality.
fallback_strategy: [Experimental] If specified, expresses soft constraints for scheduling
through a list of dicts of decorator options to fall back on when scheduling on a node.
Decorator options are evaluated together during scheduling. The first satisfied
dict of options is used. Currently only `label_selector` is a supported option.
accelerator_type: If specified, requires that the task or actor run
on a node with the specified type of accelerator.
See :ref:`accelerator types <accelerator_types>`.
memory: The heap memory request in bytes for this task/actor,
rounded down to the nearest integer.
max_calls: Only for *remote functions*. This specifies the
maximum number of times that a given worker can execute
the given remote function before it must exit
(this can be used to address :ref:`memory leaks <gpu-leak>` in third-party
libraries or to reclaim resources that cannot easily be
released, e.g., GPU memory that was acquired by TensorFlow).
By default this is infinite for CPU tasks and 1 for GPU tasks
(to force GPU tasks to release resources after finishing).
max_restarts: Only for *actors*. This specifies the maximum
number of times that the actor should be restarted when it dies
unexpectedly. The minimum valid value is 0 (default),
which indicates that the actor doesn't need to be restarted.
A value of -1 indicates that an actor should be restarted
indefinitely.
See :ref:`actor fault tolerance <fault-tolerance-actors>` for more details.
max_task_retries: Only for *actors*. How many times to
retry an actor task if the task fails due to a system error,
e.g., the actor has died. If set to -1, the system will
retry the failed task until the task succeeds, or the actor
has reached its max_restarts limit. If set to `n > 0`, the
system will retry the failed task up to n times, after which the
task will throw a `RayActorError` exception upon :obj:`ray.get`.
Note that Python exceptions are not considered system errors
and will not trigger retries.
The default value is 0.
See :ref:`actor fault tolerance <fault-tolerance-actors>` for more details.
max_retries: Only for *remote functions*. This specifies
the maximum number of times that the remote function
should be rerun when the worker process executing it
crashes unexpectedly. The minimum valid value is 0,
the default value is 3, and a value of -1 indicates
infinite retries.
See :ref:`task fault tolerance <fault-tolerance-tasks>` for more details.
allow_out_of_order_execution: Only for *actors*. Whether Ray executes actor
tasks out of order. If you're using multi-threaded (``max_concurrency > 1``)
or async actors, you can't set this to False. Defaults to True if you're
using multi-threaded or async actors, and False otherwise. Actor task
retries are always executed out of order.
runtime_env (Dict[str, Any]): Specifies the runtime environment for
this actor or task and its children. See
:ref:`runtime-environments` for detailed documentation.
retry_exceptions: Only for *remote functions*. This specifies whether
application-level errors should be retried up to max_retries times.
This can be a boolean or a list of exceptions that should be retried.
See :ref:`task fault tolerance <fault-tolerance-tasks>` for more details.
scheduling_strategy: Strategy about how to
schedule a remote function or actor. Possible values are
None: ray will figure out the scheduling strategy to use, it
will either be the PlacementGroupSchedulingStrategy using parent's
placement group if parent has one and has
placement_group_capture_child_tasks set to true,
or "DEFAULT";
"DEFAULT": default hybrid scheduling;
"SPREAD": best effort spread scheduling;
`PlacementGroupSchedulingStrategy`:
placement group based scheduling;
`NodeAffinitySchedulingStrategy`:
node id based affinity scheduling.
See :ref:`Ray scheduling strategies <ray-scheduling-strategies>`
for more details.
_labels: The key-value labels of a task or actor.
"""
# "callable" returns true for both function and class.
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
# This is the case where the decorator is just @ray.remote.
# "args[0]" is the class or function under the decorator.
return _make_remote(args[0], {})
assert len(args) == 0 and len(kwargs) > 0, ray_option_utils.remote_args_error_string
return functools.partial(_make_remote, options=kwargs)
| RemoteDecorator |
python | getsentry__sentry | src/sentry/replays/lib/new_query/fields.py | {
"start": 1714,
"end": 3455
} | class ____(Generic[T]):
def __init__(self, parse: Callable[[str], T], query: type[GenericBase]) -> None:
self.parse = parse
self.query = query
def apply(self, search_filter: SearchFilter) -> Condition:
raise NotImplementedError
def _apply_wildcard(self, expression: Expression, operator: str, value: T) -> Condition:
if operator == "=":
visitor = self.query.visit_match
elif operator == "!=":
visitor = self.query.visit_not_match
else:
raise OperatorNotSupported(f"Unsupported wildcard search operator: '{operator}'")
return visitor(expression, value)
def _apply_composite(self, expression: Expression, operator: str, value: list[T]) -> Condition:
if operator == "IN":
visitor = self.query.visit_in
elif operator == "NOT IN":
visitor = self.query.visit_not_in
else:
raise OperatorNotSupported(f"Unsupported composite search operator: '{operator}'")
return visitor(expression, value)
def _apply_scalar(self, expression: Expression, operator: str, value: T) -> Condition:
if operator == "=":
visitor = self.query.visit_eq
elif operator == "!=":
visitor = self.query.visit_neq
elif operator == ">":
visitor = self.query.visit_gt
elif operator == ">=":
visitor = self.query.visit_gte
elif operator == "<":
visitor = self.query.visit_lt
elif operator == "<=":
visitor = self.query.visit_lte
else:
raise OperatorNotSupported(f"Unsupported search operator: '{operator}'")
return visitor(expression, value)
| BaseField |
python | walkccc__LeetCode | solutions/2781. Length of the Longest Valid Substring/2781-2.py | {
"start": 108,
"end": 569
} | class ____:
def __init__(self):
self.root = TrieNode()
def insert(self, word: str) -> None:
node: TrieNode = self.root
for c in word:
node = node.children.setdefault(c, TrieNode())
node.isWord = True
def search(self, word: str, l: int, r: int) -> bool:
node: TrieNode = self.root
for i in range(l, r):
if word[i] not in node.children:
return False
node = node.children[word[i]]
return node.isWord
| Trie |
python | python-openxml__python-docx | src/docx/oxml/text/font.py | {
"start": 1644,
"end": 10519
} | class ____(BaseOxmlElement):
"""`<w:rPr>` element, containing the properties for a run."""
get_or_add_color: Callable[[], CT_Color]
get_or_add_highlight: Callable[[], CT_Highlight]
get_or_add_rFonts: Callable[[], CT_Fonts]
get_or_add_sz: Callable[[], CT_HpsMeasure]
get_or_add_vertAlign: Callable[[], CT_VerticalAlignRun]
_add_rStyle: Callable[..., CT_String]
_add_u: Callable[[], CT_Underline]
_remove_color: Callable[[], None]
_remove_highlight: Callable[[], None]
_remove_rFonts: Callable[[], None]
_remove_rStyle: Callable[[], None]
_remove_sz: Callable[[], None]
_remove_u: Callable[[], None]
_remove_vertAlign: Callable[[], None]
_tag_seq = (
"w:rStyle",
"w:rFonts",
"w:b",
"w:bCs",
"w:i",
"w:iCs",
"w:caps",
"w:smallCaps",
"w:strike",
"w:dstrike",
"w:outline",
"w:shadow",
"w:emboss",
"w:imprint",
"w:noProof",
"w:snapToGrid",
"w:vanish",
"w:webHidden",
"w:color",
"w:spacing",
"w:w",
"w:kern",
"w:position",
"w:sz",
"w:szCs",
"w:highlight",
"w:u",
"w:effect",
"w:bdr",
"w:shd",
"w:fitText",
"w:vertAlign",
"w:rtl",
"w:cs",
"w:em",
"w:lang",
"w:eastAsianLayout",
"w:specVanish",
"w:oMath",
)
rStyle: CT_String | None = ZeroOrOne("w:rStyle", successors=_tag_seq[1:])
rFonts: CT_Fonts | None = ZeroOrOne("w:rFonts", successors=_tag_seq[2:])
b: CT_OnOff | None = ZeroOrOne("w:b", successors=_tag_seq[3:])
bCs = ZeroOrOne("w:bCs", successors=_tag_seq[4:])
i = ZeroOrOne("w:i", successors=_tag_seq[5:])
iCs = ZeroOrOne("w:iCs", successors=_tag_seq[6:])
caps = ZeroOrOne("w:caps", successors=_tag_seq[7:])
smallCaps = ZeroOrOne("w:smallCaps", successors=_tag_seq[8:])
strike = ZeroOrOne("w:strike", successors=_tag_seq[9:])
dstrike = ZeroOrOne("w:dstrike", successors=_tag_seq[10:])
outline = ZeroOrOne("w:outline", successors=_tag_seq[11:])
shadow = ZeroOrOne("w:shadow", successors=_tag_seq[12:])
emboss = ZeroOrOne("w:emboss", successors=_tag_seq[13:])
imprint = ZeroOrOne("w:imprint", successors=_tag_seq[14:])
noProof = ZeroOrOne("w:noProof", successors=_tag_seq[15:])
snapToGrid = ZeroOrOne("w:snapToGrid", successors=_tag_seq[16:])
vanish = ZeroOrOne("w:vanish", successors=_tag_seq[17:])
webHidden = ZeroOrOne("w:webHidden", successors=_tag_seq[18:])
color: CT_Color | None = ZeroOrOne("w:color", successors=_tag_seq[19:])
sz: CT_HpsMeasure | None = ZeroOrOne("w:sz", successors=_tag_seq[24:])
highlight: CT_Highlight | None = ZeroOrOne("w:highlight", successors=_tag_seq[26:])
u: CT_Underline | None = ZeroOrOne("w:u", successors=_tag_seq[27:])
vertAlign: CT_VerticalAlignRun | None = ZeroOrOne("w:vertAlign", successors=_tag_seq[32:])
rtl = ZeroOrOne("w:rtl", successors=_tag_seq[33:])
cs = ZeroOrOne("w:cs", successors=_tag_seq[34:])
specVanish = ZeroOrOne("w:specVanish", successors=_tag_seq[38:])
oMath = ZeroOrOne("w:oMath", successors=_tag_seq[39:])
del _tag_seq
def _new_color(self):
"""Override metaclass method to set `w:color/@val` to RGB black on create."""
return parse_xml('<w:color %s w:val="000000"/>' % nsdecls("w"))
@property
def highlight_val(self) -> WD_COLOR_INDEX | None:
"""Value of `./w:highlight/@val`.
Specifies font's highlight color, or `None` if the text is not highlighted.
"""
highlight = self.highlight
if highlight is None:
return None
return highlight.val
@highlight_val.setter
def highlight_val(self, value: WD_COLOR_INDEX | None) -> None:
if value is None:
self._remove_highlight()
return
highlight = self.get_or_add_highlight()
highlight.val = value
@property
def rFonts_ascii(self) -> str | None:
"""The value of `w:rFonts/@w:ascii` or |None| if not present.
Represents the assigned typeface name. The rFonts element also specifies other
special-case typeface names; this method handles the case where just the common
name is required.
"""
rFonts = self.rFonts
if rFonts is None:
return None
return rFonts.ascii
@rFonts_ascii.setter
def rFonts_ascii(self, value: str | None) -> None:
if value is None:
self._remove_rFonts()
return
rFonts = self.get_or_add_rFonts()
rFonts.ascii = value
@property
def rFonts_hAnsi(self) -> str | None:
"""The value of `w:rFonts/@w:hAnsi` or |None| if not present."""
rFonts = self.rFonts
if rFonts is None:
return None
return rFonts.hAnsi
@rFonts_hAnsi.setter
def rFonts_hAnsi(self, value: str | None):
if value is None and self.rFonts is None:
return
rFonts = self.get_or_add_rFonts()
rFonts.hAnsi = value
@property
def style(self) -> str | None:
"""String in `./w:rStyle/@val`, or None if `w:rStyle` is not present."""
rStyle = self.rStyle
if rStyle is None:
return None
return rStyle.val
@style.setter
def style(self, style: str | None) -> None:
"""Set `./w:rStyle/@val` to `style`, adding the `w:rStyle` element if necessary.
If `style` is |None|, remove `w:rStyle` element if present.
"""
if style is None:
self._remove_rStyle()
elif self.rStyle is None:
self._add_rStyle(val=style)
else:
self.rStyle.val = style
@property
def subscript(self) -> bool | None:
"""|True| if `./w:vertAlign/@w:val` is "subscript".
|False| if `w:vertAlign/@w:val` contains any other value. |None| if
`w:vertAlign` is not present.
"""
vertAlign = self.vertAlign
if vertAlign is None:
return None
return vertAlign.val == ST_VerticalAlignRun.SUBSCRIPT
@subscript.setter
def subscript(self, value: bool | None) -> None:
if value is None:
self._remove_vertAlign()
elif bool(value) is True:
self.get_or_add_vertAlign().val = ST_VerticalAlignRun.SUBSCRIPT
# -- assert bool(value) is False --
elif self.vertAlign is not None and self.vertAlign.val == ST_VerticalAlignRun.SUBSCRIPT:
self._remove_vertAlign()
@property
def superscript(self) -> bool | None:
"""|True| if `w:vertAlign/@w:val` is 'superscript'.
|False| if `w:vertAlign/@w:val` contains any other value. |None| if
`w:vertAlign` is not present.
"""
vertAlign = self.vertAlign
if vertAlign is None:
return None
return vertAlign.val == ST_VerticalAlignRun.SUPERSCRIPT
@superscript.setter
def superscript(self, value: bool | None):
if value is None:
self._remove_vertAlign()
elif bool(value) is True:
self.get_or_add_vertAlign().val = ST_VerticalAlignRun.SUPERSCRIPT
# -- assert bool(value) is False --
elif self.vertAlign is not None and self.vertAlign.val == ST_VerticalAlignRun.SUPERSCRIPT:
self._remove_vertAlign()
@property
def sz_val(self) -> Length | None:
"""The value of `w:sz/@w:val` or |None| if not present."""
sz = self.sz
if sz is None:
return None
return sz.val
@sz_val.setter
def sz_val(self, value: Length | None):
if value is None:
self._remove_sz()
return
sz = self.get_or_add_sz()
sz.val = value
@property
def u_val(self) -> WD_UNDERLINE | None:
"""Value of `w:u/@val`, or None if not present.
Values `WD_UNDERLINE.SINGLE` and `WD_UNDERLINE.NONE` are mapped to `True` and
`False` respectively.
"""
u = self.u
if u is None:
return None
return u.val
@u_val.setter
def u_val(self, value: WD_UNDERLINE | None):
self._remove_u()
if value is not None:
self._add_u().val = value
def _get_bool_val(self, name: str) -> bool | None:
"""Value of boolean child with `name`, e.g. "w:b", "w:i", and "w:smallCaps"."""
element = getattr(self, name)
if element is None:
return None
return element.val
def _set_bool_val(self, name: str, value: bool | None):
if value is None:
getattr(self, "_remove_%s" % name)()
return
element = getattr(self, "get_or_add_%s" % name)()
element.val = value
| CT_RPr |
python | pydantic__pydantic | pydantic/v1/errors.py | {
"start": 14529,
"end": 14646
} | class ____(PydanticValueError):
code = 'regex_pattern'
msg_template = 'Invalid regular expression'
| PatternError |
python | scikit-learn__scikit-learn | sklearn/tests/test_pipeline.py | {
"start": 2370,
"end": 2532
} | class ____(BaseEstimator):
"""Small class to test parameter dispatching."""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
| NoFit |
python | openai__openai-python | src/openai/types/beta/realtime/response_audio_transcript_done_event.py | {
"start": 210,
"end": 787
} | class ____(BaseModel):
content_index: int
"""The index of the content part in the item's content array."""
event_id: str
"""The unique ID of the server event."""
item_id: str
"""The ID of the item."""
output_index: int
"""The index of the output item in the response."""
response_id: str
"""The ID of the response."""
transcript: str
"""The final transcript of the audio."""
type: Literal["response.audio_transcript.done"]
"""The event type, must be `response.audio_transcript.done`."""
| ResponseAudioTranscriptDoneEvent |
python | allegroai__clearml | clearml/backend_api/services/v2_23/queues.py | {
"start": 82290,
"end": 83591
} | class ____(Response):
"""
Response of queues.move_task_to_back endpoint.
:param position: The new position of the task entry in the queue (index, -1
represents bottom of queue)
:type position: int
"""
_service = "queues"
_action = "move_task_to_back"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"position": {
"description": "The new position of the task entry in the queue (index, -1 represents bottom of queue)",
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, position: Optional[int] = None, **kwargs: Any) -> None:
super(MoveTaskToBackResponse, self).__init__(**kwargs)
self.position = position
@schema_property("position")
def position(self) -> Optional[int]:
return self._property_position
@position.setter
def position(self, value: Optional[int]) -> None:
if value is None:
self._property_position = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "position", six.integer_types)
self._property_position = value
| MoveTaskToBackResponse |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 43038,
"end": 45262
} | class ____:
xlClipboardFormatBIFF = 8 # from enum XlClipboardFormat
xlClipboardFormatBIFF12 = 63 # from enum XlClipboardFormat
xlClipboardFormatBIFF2 = 18 # from enum XlClipboardFormat
xlClipboardFormatBIFF3 = 20 # from enum XlClipboardFormat
xlClipboardFormatBIFF4 = 30 # from enum XlClipboardFormat
xlClipboardFormatBinary = 15 # from enum XlClipboardFormat
xlClipboardFormatBitmap = 9 # from enum XlClipboardFormat
xlClipboardFormatCGM = 13 # from enum XlClipboardFormat
xlClipboardFormatCSV = 5 # from enum XlClipboardFormat
xlClipboardFormatDIF = 4 # from enum XlClipboardFormat
xlClipboardFormatDspText = 12 # from enum XlClipboardFormat
xlClipboardFormatEmbedSource = 22 # from enum XlClipboardFormat
xlClipboardFormatEmbeddedObject = 21 # from enum XlClipboardFormat
xlClipboardFormatLink = 11 # from enum XlClipboardFormat
xlClipboardFormatLinkSource = 23 # from enum XlClipboardFormat
xlClipboardFormatLinkSourceDesc = 32 # from enum XlClipboardFormat
xlClipboardFormatMovie = 24 # from enum XlClipboardFormat
xlClipboardFormatNative = 14 # from enum XlClipboardFormat
xlClipboardFormatObjectDesc = 31 # from enum XlClipboardFormat
xlClipboardFormatObjectLink = 19 # from enum XlClipboardFormat
xlClipboardFormatOwnerLink = 17 # from enum XlClipboardFormat
xlClipboardFormatPICT = 2 # from enum XlClipboardFormat
xlClipboardFormatPrintPICT = 3 # from enum XlClipboardFormat
xlClipboardFormatRTF = 7 # from enum XlClipboardFormat
xlClipboardFormatSYLK = 6 # from enum XlClipboardFormat
xlClipboardFormatScreenPICT = 29 # from enum XlClipboardFormat
xlClipboardFormatStandardFont = 28 # from enum XlClipboardFormat
xlClipboardFormatStandardScale = 27 # from enum XlClipboardFormat
xlClipboardFormatTable = 16 # from enum XlClipboardFormat
xlClipboardFormatText = 0 # from enum XlClipboardFormat
xlClipboardFormatToolFace = 25 # from enum XlClipboardFormat
xlClipboardFormatToolFacePICT = 26 # from enum XlClipboardFormat
xlClipboardFormatVALU = 1 # from enum XlClipboardFormat
xlClipboardFormatWK1 = 10 # from enum XlClipboardFormat
| ClipboardFormat |
python | plotly__plotly.py | plotly/graph_objs/scattercarpet/marker/_line.py | {
"start": 233,
"end": 20958
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scattercarpet.marker"
_path_str = "scattercarpet.marker.line"
_valid_props = {
"autocolorscale",
"cauto",
"cmax",
"cmid",
"cmin",
"color",
"coloraxis",
"colorscale",
"colorsrc",
"reversescale",
"width",
"widthsrc",
}
@property
def autocolorscale(self):
"""
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.line.colorscale`. Has an effect only if in
`marker.line.color` is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is true, the
default palette will be chosen according to whether numbers in
the `color` array are all positive, all negative or mixed.
The 'autocolorscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autocolorscale"]
@autocolorscale.setter
def autocolorscale(self, val):
self["autocolorscale"] = val
@property
def cauto(self):
"""
Determines whether or not the color domain is computed with
respect to the input data (here in `marker.line.color`) or the
bounds set in `marker.line.cmin` and `marker.line.cmax` Has an
effect only if in `marker.line.color` is set to a numerical
array. Defaults to `false` when `marker.line.cmin` and
`marker.line.cmax` are set by the user.
The 'cauto' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["cauto"]
@cauto.setter
def cauto(self, val):
self["cauto"] = val
@property
def cmax(self):
"""
Sets the upper bound of the color domain. Has an effect only if
in `marker.line.color` is set to a numerical array. Value
should have the same units as in `marker.line.color` and if
set, `marker.line.cmin` must be set as well.
The 'cmax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmax"]
@cmax.setter
def cmax(self, val):
self["cmax"] = val
@property
def cmid(self):
"""
Sets the mid-point of the color domain by scaling
`marker.line.cmin` and/or `marker.line.cmax` to be equidistant
to this point. Has an effect only if in `marker.line.color` is
set to a numerical array. Value should have the same units as
in `marker.line.color`. Has no effect when `marker.line.cauto`
is `false`.
The 'cmid' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmid"]
@cmid.setter
def cmid(self, val):
self["cmid"] = val
@property
def cmin(self):
"""
Sets the lower bound of the color domain. Has an effect only if
in `marker.line.color` is set to a numerical array. Value
should have the same units as in `marker.line.color` and if
set, `marker.line.cmax` must be set as well.
The 'cmin' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmin"]
@cmin.setter
def cmin(self, val):
self["cmin"] = val
@property
def color(self):
"""
Sets the marker.line color. It accepts either a specific color
or an array of numbers that are mapped to the colorscale
relative to the max and min values of the array or relative to
`marker.line.cmin` and `marker.line.cmax` if set.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A number that will be interpreted as a color
according to scattercarpet.marker.line.colorscale
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def coloraxis(self):
"""
Sets a reference to a shared color axis. References to these
shared color axes are "coloraxis", "coloraxis2", "coloraxis3",
etc. Settings for these shared color axes are set in the
layout, under `layout.coloraxis`, `layout.coloraxis2`, etc.
Note that multiple color scales can be linked to the same color
axis.
The 'coloraxis' property is an identifier of a particular
subplot, of type 'coloraxis', that may be specified as the string 'coloraxis'
optionally followed by an integer >= 1
(e.g. 'coloraxis', 'coloraxis1', 'coloraxis2', 'coloraxis3', etc.)
Returns
-------
str
"""
return self["coloraxis"]
@coloraxis.setter
def coloraxis(self, val):
self["coloraxis"] = val
@property
def colorscale(self):
"""
Sets the colorscale. Has an effect only if in
`marker.line.color` is set to a numerical array. The colorscale
must be an array containing arrays mapping a normalized value
to an rgb, rgba, hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and highest (1) values
are required. For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the colorscale in
color space, use `marker.line.cmin` and `marker.line.cmax`.
Alternatively, `colorscale` may be a palette name string of the
following list: Blackbody,Bluered,Blues,Cividis,Earth,Electric,
Greens,Greys,Hot,Jet,Picnic,Portland,Rainbow,RdBu,Reds,Viridis,
YlGnBu,YlOrRd.
The 'colorscale' property is a colorscale and may be
specified as:
- A list of colors that will be spaced evenly to create the colorscale.
Many predefined colorscale lists are included in the sequential, diverging,
and cyclical modules in the plotly.colors package.
- A list of 2-element lists where the first element is the
normalized color level value (starting at 0 and ending at 1),
and the second item is a valid color string.
(e.g. [[0, 'green'], [0.5, 'red'], [1.0, 'rgb(0, 0, 255)']])
- One of the following named colorscales:
['aggrnyl', 'agsunset', 'algae', 'amp', 'armyrose', 'balance',
'blackbody', 'bluered', 'blues', 'blugrn', 'bluyl', 'brbg',
'brwnyl', 'bugn', 'bupu', 'burg', 'burgyl', 'cividis', 'curl',
'darkmint', 'deep', 'delta', 'dense', 'earth', 'edge', 'electric',
'emrld', 'fall', 'geyser', 'gnbu', 'gray', 'greens', 'greys',
'haline', 'hot', 'hsv', 'ice', 'icefire', 'inferno', 'jet',
'magenta', 'magma', 'matter', 'mint', 'mrybm', 'mygbm', 'oranges',
'orrd', 'oryel', 'oxy', 'peach', 'phase', 'picnic', 'pinkyl',
'piyg', 'plasma', 'plotly3', 'portland', 'prgn', 'pubu', 'pubugn',
'puor', 'purd', 'purp', 'purples', 'purpor', 'rainbow', 'rdbu',
'rdgy', 'rdpu', 'rdylbu', 'rdylgn', 'redor', 'reds', 'solar',
'spectral', 'speed', 'sunset', 'sunsetdark', 'teal', 'tealgrn',
'tealrose', 'tempo', 'temps', 'thermal', 'tropic', 'turbid',
'turbo', 'twilight', 'viridis', 'ylgn', 'ylgnbu', 'ylorbr',
'ylorrd'].
Appending '_r' to a named colorscale reverses it.
Returns
-------
str
"""
return self["colorscale"]
@colorscale.setter
def colorscale(self, val):
self["colorscale"] = val
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
@property
def reversescale(self):
"""
Reverses the color mapping if true. Has an effect only if in
`marker.line.color` is set to a numerical array. If true,
`marker.line.cmin` will correspond to the last color in the
array and `marker.line.cmax` will correspond to the first
color.
The 'reversescale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["reversescale"]
@reversescale.setter
def reversescale(self, val):
self["reversescale"] = val
@property
def width(self):
"""
Sets the width (in px) of the lines bounding the marker points.
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
@property
def widthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `width`.
The 'widthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["widthsrc"]
@widthsrc.setter
def widthsrc(self, val):
self["widthsrc"] = val
@property
def _prop_descriptions(self):
return """\
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.line.colorscale`. Has an effect only if in
`marker.line.color` is set to a numerical array. In
case `colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in
`marker.line.color`) or the bounds set in
`marker.line.cmin` and `marker.line.cmax` Has an effect
only if in `marker.line.color` is set to a numerical
array. Defaults to `false` when `marker.line.cmin` and
`marker.line.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.line.color` is set to a numerical
array. Value should have the same units as in
`marker.line.color` and if set, `marker.line.cmin` must
be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.line.cmin` and/or `marker.line.cmax` to be
equidistant to this point. Has an effect only if in
`marker.line.color` is set to a numerical array. Value
should have the same units as in `marker.line.color`.
Has no effect when `marker.line.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.line.color` is set to a numerical
array. Value should have the same units as in
`marker.line.color` and if set, `marker.line.cmax` must
be set as well.
color
Sets the marker.line color. It accepts either a
specific color or an array of numbers that are mapped
to the colorscale relative to the max and min values of
the array or relative to `marker.line.cmin` and
`marker.line.cmax` if set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorscale
Sets the colorscale. Has an effect only if in
`marker.line.color` is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use `marker.line.cmin` and `marker.line.cmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Blackbody,Bluered,Blues,C
ividis,Earth,Electric,Greens,Greys,Hot,Jet,Picnic,Portl
and,Rainbow,RdBu,Reds,Viridis,YlGnBu,YlOrRd.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.line.color` is set to a numerical array.
If true, `marker.line.cmin` will correspond to the last
color in the array and `marker.line.cmax` will
correspond to the first color.
width
Sets the width (in px) of the lines bounding the marker
points.
widthsrc
Sets the source reference on Chart Studio Cloud for
`width`.
"""
def __init__(
self,
arg=None,
autocolorscale=None,
cauto=None,
cmax=None,
cmid=None,
cmin=None,
color=None,
coloraxis=None,
colorscale=None,
colorsrc=None,
reversescale=None,
width=None,
widthsrc=None,
**kwargs,
):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scattercarpet.marker.Line`
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.line.colorscale`. Has an effect only if in
`marker.line.color` is set to a numerical array. In
case `colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in
`marker.line.color`) or the bounds set in
`marker.line.cmin` and `marker.line.cmax` Has an effect
only if in `marker.line.color` is set to a numerical
array. Defaults to `false` when `marker.line.cmin` and
`marker.line.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.line.color` is set to a numerical
array. Value should have the same units as in
`marker.line.color` and if set, `marker.line.cmin` must
be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.line.cmin` and/or `marker.line.cmax` to be
equidistant to this point. Has an effect only if in
`marker.line.color` is set to a numerical array. Value
should have the same units as in `marker.line.color`.
Has no effect when `marker.line.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.line.color` is set to a numerical
array. Value should have the same units as in
`marker.line.color` and if set, `marker.line.cmax` must
be set as well.
color
Sets the marker.line color. It accepts either a
specific color or an array of numbers that are mapped
to the colorscale relative to the max and min values of
the array or relative to `marker.line.cmin` and
`marker.line.cmax` if set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorscale
Sets the colorscale. Has an effect only if in
`marker.line.color` is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use `marker.line.cmin` and `marker.line.cmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Blackbody,Bluered,Blues,C
ividis,Earth,Electric,Greens,Greys,Hot,Jet,Picnic,Portl
and,Rainbow,RdBu,Reds,Viridis,YlGnBu,YlOrRd.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.line.color` is set to a numerical array.
If true, `marker.line.cmin` will correspond to the last
color in the array and `marker.line.cmax` will
correspond to the first color.
width
Sets the width (in px) of the lines bounding the marker
points.
widthsrc
Sets the source reference on Chart Studio Cloud for
`width`.
Returns
-------
Line
"""
super().__init__("line")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scattercarpet.marker.Line
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattercarpet.marker.Line`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("autocolorscale", arg, autocolorscale)
self._set_property("cauto", arg, cauto)
self._set_property("cmax", arg, cmax)
self._set_property("cmid", arg, cmid)
self._set_property("cmin", arg, cmin)
self._set_property("color", arg, color)
self._set_property("coloraxis", arg, coloraxis)
self._set_property("colorscale", arg, colorscale)
self._set_property("colorsrc", arg, colorsrc)
self._set_property("reversescale", arg, reversescale)
self._set_property("width", arg, width)
self._set_property("widthsrc", arg, widthsrc)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Line |
python | apache__airflow | airflow-ctl/src/airflowctl/api/datamodels/generated.py | {
"start": 6074,
"end": 6326
} | class ____(BaseModel):
"""
Config Section Schema.
"""
model_config = ConfigDict(
extra="forbid",
)
name: Annotated[str, Field(title="Name")]
options: Annotated[list[ConfigOption], Field(title="Options")]
| ConfigSection |
python | getsentry__sentry | src/sentry/grouping/component.py | {
"start": 10133,
"end": 10221
} | class ____(BaseGroupingComponent[str]):
id: str = "hostname"
| HostnameGroupingComponent |
python | pennersr__django-allauth | allauth/headless/usersessions/response.py | {
"start": 103,
"end": 796
} | class ____(APIResponse):
def __init__(self, request, sessions):
super().__init__(request, data=[self._session_data(s) for s in sessions])
def _session_data(self, session):
data = {
"user_agent": session.user_agent,
"ip": session.ip,
"created_at": session.created_at.timestamp(),
"is_current": session.is_current(),
"id": session.pk,
}
if app_settings.TRACK_ACTIVITY:
data["last_seen_at"] = session.last_seen_at.timestamp()
return data
def get_config_data(request):
data = {"usersessions": {"track_activity": app_settings.TRACK_ACTIVITY}}
return data
| SessionsResponse |
python | kubernetes-client__python | kubernetes/client/models/v2_resource_metric_status.py | {
"start": 383,
"end": 4520
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'current': 'V2MetricValueStatus',
'name': 'str'
}
attribute_map = {
'current': 'current',
'name': 'name'
}
def __init__(self, current=None, name=None, local_vars_configuration=None): # noqa: E501
"""V2ResourceMetricStatus - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._current = None
self._name = None
self.discriminator = None
self.current = current
self.name = name
@property
def current(self):
"""Gets the current of this V2ResourceMetricStatus. # noqa: E501
:return: The current of this V2ResourceMetricStatus. # noqa: E501
:rtype: V2MetricValueStatus
"""
return self._current
@current.setter
def current(self, current):
"""Sets the current of this V2ResourceMetricStatus.
:param current: The current of this V2ResourceMetricStatus. # noqa: E501
:type: V2MetricValueStatus
"""
if self.local_vars_configuration.client_side_validation and current is None: # noqa: E501
raise ValueError("Invalid value for `current`, must not be `None`") # noqa: E501
self._current = current
@property
def name(self):
"""Gets the name of this V2ResourceMetricStatus. # noqa: E501
name is the name of the resource in question. # noqa: E501
:return: The name of this V2ResourceMetricStatus. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V2ResourceMetricStatus.
name is the name of the resource in question. # noqa: E501
:param name: The name of this V2ResourceMetricStatus. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V2ResourceMetricStatus):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V2ResourceMetricStatus):
return True
return self.to_dict() != other.to_dict()
| V2ResourceMetricStatus |
python | tensorflow__tensorflow | tensorflow/python/ops/image_ops_test.py | {
"start": 243093,
"end": 244877
} | class ____(test_util.TensorFlowTestCase):
def testImageGradients(self):
shape = [1, 2, 4, 1]
img = constant_op.constant([[1, 3, 4, 2], [8, 7, 5, 6]])
img = array_ops.reshape(img, shape)
expected_dy = np.reshape([[7, 4, 1, 4], [0, 0, 0, 0]], shape)
expected_dx = np.reshape([[2, 1, -2, 0], [-1, -2, 1, 0]], shape)
dy, dx = image_ops.image_gradients(img)
with self.cached_session():
actual_dy = self.evaluate(dy)
actual_dx = self.evaluate(dx)
self.assertAllClose(expected_dy, actual_dy)
self.assertAllClose(expected_dx, actual_dx)
def testImageGradientsMultiChannelBatch(self):
batch = [[[[1, 2], [2, 5], [3, 3]],
[[8, 4], [5, 1], [9, 8]]],
[[[5, 3], [7, 9], [1, 6]],
[[1, 2], [6, 3], [6, 3]]]]
expected_dy = [[[[7, 2], [3, -4], [6, 5]],
[[0, 0], [0, 0], [0, 0]]],
[[[-4, -1], [-1, -6], [5, -3]],
[[0, 0], [0, 0], [0, 0]]]]
expected_dx = [[[[1, 3], [1, -2], [0, 0]],
[[-3, -3], [4, 7], [0, 0]]],
[[[2, 6], [-6, -3], [0, 0]],
[[5, 1], [0, 0], [0, 0]]]]
batch = constant_op.constant(batch)
assert batch.get_shape().as_list() == [2, 2, 3, 2]
dy, dx = image_ops.image_gradients(batch)
with self.cached_session():
actual_dy = self.evaluate(dy)
actual_dx = self.evaluate(dx)
self.assertAllClose(expected_dy, actual_dy)
self.assertAllClose(expected_dx, actual_dx)
def testImageGradientsBadShape(self):
# [2 x 4] image but missing batch and depth dimensions.
img = constant_op.constant([[1, 3, 4, 2], [8, 7, 5, 6]])
with self.assertRaises(ValueError):
image_ops.image_gradients(img)
| ImageGradientsTest |
python | pytorch__pytorch | torch/utils/_content_store.py | {
"start": 7447,
"end": 9327
} | class ____:
def __init__(self, loc: str, *, cache=True) -> None:
self.loc = loc
self.storage_cache: (
dict[torch.device | None, dict[str, StorageWeakRef]] | None
) = None
if cache:
self.storage_cache = defaultdict(dict)
def read_storage(self, h: str, *, device=None) -> torch.UntypedStorage:
if device is not None:
device = torch.device(device)
ws = (
self.storage_cache[device].get(h)
if self.storage_cache is not None
else None
)
s: torch.UntypedStorage | None
if ws is not None:
s = torch.UntypedStorage._new_with_weak_ptr(ws.cdata)
if s is not None:
return s
s = torch.load(
os.path.join(self.loc, "storages", h),
weights_only=True,
map_location=device,
)._untyped_storage
if s is None:
raise AssertionError(
f"expected storage for hash {h} in {os.path.join(self.loc, 'storages')}, got None"
)
if self.storage_cache is not None:
self.storage_cache[device][h] = StorageWeakRef(s)
return s
def read_tensor_metadata(self, name: str):
fn = os.path.join(self.loc, "tensors", name)
if not os.path.exists(fn):
raise FileNotFoundError(fn)
return torch.load(fn, weights_only=True)
def read_tensor(self, name: str, *, device=None) -> torch.Tensor:
dtype, h, storage_offset, size, stride, metadata = self.read_tensor_metadata(
name
)
storage = self.read_storage(h, device=device)
t = torch.tensor([], dtype=dtype, device=storage.device)
t.set_(storage, storage_offset, size, stride)
torch._utils.set_tensor_metadata(t, metadata)
return t
| ContentStoreReader |
python | ray-project__ray | python/ray/serve/tests/test_http_cancellation.py | {
"start": 395,
"end": 7018
} | class ____:
def __init__(self):
self.items = []
def add(self, item):
self.items.append(item)
def get(self):
return self.items
def test_collector_class(serve_instance):
collector = Collector.remote()
random_items = ["this", "is", 1, "demo", "string"]
for item in random_items:
collector.add.remote(item)
result = ray.get(collector.get.remote())
assert len(result) == len(random_items)
for i in range(0, len(result)):
assert result[i] == random_items[i]
@pytest.mark.parametrize("use_fastapi", [False, True])
def test_cancel_on_http_client_disconnect_during_execution(
serve_instance, use_fastapi: bool
):
"""Test the client disconnecting while the handler is executing."""
inner_signal_actor = SignalActor.remote()
outer_signal_actor = SignalActor.remote()
@serve.deployment
async def inner():
async with send_signal_on_cancellation(inner_signal_actor):
pass
if use_fastapi:
app = FastAPI()
@serve.deployment
@serve.ingress(app)
class Ingress:
def __init__(self, handle):
self._handle = handle
@app.get("/")
async def wait_for_cancellation(self):
_ = self._handle.remote()
async with send_signal_on_cancellation(outer_signal_actor):
pass
else:
@serve.deployment
class Ingress:
def __init__(self, handle):
self._handle = handle
async def __call__(self, request: Request):
_ = self._handle.remote()
async with send_signal_on_cancellation(outer_signal_actor):
pass
serve.run(Ingress.bind(inner.bind()))
# Intentionally time out on the client, causing it to disconnect.
with pytest.raises(httpx.ReadTimeout):
httpx.get(get_application_url("HTTP"), timeout=0.5)
# Both the HTTP handler and the inner deployment handle call should be cancelled.
ray.get(inner_signal_actor.wait.remote(), timeout=10)
ray.get(outer_signal_actor.wait.remote(), timeout=10)
def test_cancel_on_http_client_disconnect_during_assignment(serve_instance):
"""Test the client disconnecting while the proxy is assigning the request."""
signal_actor = SignalActor.remote()
@serve.deployment(max_ongoing_requests=1)
class Ingress:
def __init__(self):
self._num_requests = 0
async def __call__(self, *args):
self._num_requests += 1
await signal_actor.wait.remote()
return self._num_requests
h = serve.run(Ingress.bind())
# Send a request and wait for it to be ongoing so we know that further requests
# will block trying to assign a replica.
initial_response = h.remote()
wait_for_condition(lambda: ray.get(signal_actor.cur_num_waiters.remote()) == 1)
# Intentionally time out on the client, causing it to disconnect.
with pytest.raises(httpx.ReadTimeout):
httpx.get(get_application_url("HTTP"), timeout=0.5)
# Now signal the initial request to finish and check that the request sent via HTTP
# never reaches the replica.
ray.get(signal_actor.send.remote())
assert initial_response.result() == 1
for i in range(2, 12):
assert h.remote().result() == i
@pytest.mark.asyncio
async def test_request_cancelled_error_on_http_client_disconnect_during_execution(
serve_instance,
):
"""Test the exception thrown for executing request on http client disconnect"""
collector = Collector.remote()
child_signal = SignalActor.remote()
@serve.deployment(max_ongoing_requests=1)
class Child:
async def __call__(self):
try:
await child_signal.wait.remote()
except asyncio.CancelledError:
await collector.add.remote("Child_CancelledError")
raise
@serve.deployment
class Parent:
def __init__(self, child):
self.child = child
async def __call__(self):
try:
await self.child.remote()
except asyncio.CancelledError:
await collector.add.remote("Parent_AsyncioCancelledError")
raise
except RequestCancelledError:
await collector.add.remote("Parent_RequestCancelledError")
raise
serve.run(Parent.bind(Child.bind()))
# Make a request with short timeout that will cause disconnection
try:
await httpx.AsyncClient(timeout=0.5).get(get_application_url("HTTP"))
except httpx.ReadTimeout:
pass
wait_for_condition(
lambda: set(ray.get(collector.get.remote()))
== {"Child_CancelledError", "Parent_AsyncioCancelledError"}
)
@pytest.mark.asyncio
async def test_request_cancelled_error_on_http_client_disconnect_during_assignment(
serve_instance,
):
"""Test the exception thrown for queued request on http client disconnect"""
collector = Collector.remote()
child_signal = SignalActor.remote()
@serve.deployment(max_ongoing_requests=1)
class Child:
async def __call__(self):
try:
await child_signal.wait.remote()
except asyncio.CancelledError:
await collector.add.remote("Child_CancelledError")
raise
@serve.deployment
class Parent:
def __init__(self, child):
self.child = child
async def __call__(self):
try:
await self.child.remote()
except asyncio.CancelledError:
await collector.add.remote("Parent_AsyncioCancelledError")
raise
except RequestCancelledError:
await collector.add.remote("Parent_RequestCancelledError")
raise
h = serve.run(Parent.bind(Child.bind()))
# Block Child with first request
r = h.remote()
wait_for_condition(lambda: ray.get(child_signal.cur_num_waiters.remote()) == 1)
# Make a second request with short timeout that will cause disconnection
try:
await httpx.AsyncClient(timeout=0.5).get(get_application_url("HTTP"))
except httpx.ReadTimeout:
pass
wait_for_condition(
lambda: ray.get(collector.get.remote()) == ["Parent_AsyncioCancelledError"]
)
# Clean up first request
r.cancel()
try:
await r
except RequestCancelledError:
pass
if __name__ == "__main__":
sys.exit(pytest.main(["-v", "-s", __file__]))
| Collector |
python | redis__redis-py | redis/cluster.py | {
"start": 105353,
"end": 121660
} | class ____(AbstractStrategy):
def __init__(self, pipe: ClusterPipeline):
super().__init__(pipe)
self.command_flags = pipe.command_flags
def execute_command(self, *args, **kwargs):
return self.pipeline_execute_command(*args, **kwargs)
def _raise_first_error(self, stack):
"""
Raise the first exception on the stack
"""
for c in stack:
r = c.result
if isinstance(r, Exception):
self.annotate_exception(r, c.position + 1, c.args)
raise r
def execute(self, raise_on_error: bool = True) -> List[Any]:
stack = self._command_queue
if not stack:
return []
try:
return self.send_cluster_commands(stack, raise_on_error)
finally:
self.reset()
def reset(self):
"""
Reset back to empty pipeline.
"""
self._command_queue = []
def send_cluster_commands(
self, stack, raise_on_error=True, allow_redirections=True
):
"""
Wrapper for RedisCluster.ERRORS_ALLOW_RETRY errors handling.
If one of the retryable exceptions has been thrown we assume that:
- connection_pool was disconnected
- connection_pool was reset
- refresh_table_asap set to True
It will try the number of times specified by
the retries in config option "self.retry"
which defaults to 3 unless manually configured.
If it reaches the number of times, the command will
raises ClusterDownException.
"""
if not stack:
return []
retry_attempts = self._pipe.retry.get_retries()
while True:
try:
return self._send_cluster_commands(
stack,
raise_on_error=raise_on_error,
allow_redirections=allow_redirections,
)
except RedisCluster.ERRORS_ALLOW_RETRY as e:
if retry_attempts > 0:
# Try again with the new cluster setup. All other errors
# should be raised.
retry_attempts -= 1
pass
else:
raise e
def _send_cluster_commands(
self, stack, raise_on_error=True, allow_redirections=True
):
"""
Send a bunch of cluster commands to the redis cluster.
`allow_redirections` If the pipeline should follow
`ASK` & `MOVED` responses automatically. If set
to false it will raise RedisClusterException.
"""
# the first time sending the commands we send all of
# the commands that were queued up.
# if we have to run through it again, we only retry
# the commands that failed.
attempt = sorted(stack, key=lambda x: x.position)
is_default_node = False
# build a list of node objects based on node names we need to
nodes = {}
# as we move through each command that still needs to be processed,
# we figure out the slot number that command maps to, then from
# the slot determine the node.
for c in attempt:
command_policies = self._pipe._policy_resolver.resolve(c.args[0].lower())
while True:
# refer to our internal node -> slot table that
# tells us where a given command should route to.
# (it might be possible we have a cached node that no longer
# exists in the cluster, which is why we do this in a loop)
passed_targets = c.options.pop("target_nodes", None)
if passed_targets and not self._is_nodes_flag(passed_targets):
target_nodes = self._parse_target_nodes(passed_targets)
if not command_policies:
command_policies = CommandPolicies()
else:
if not command_policies:
command = c.args[0].upper()
if (
len(c.args) >= 2
and f"{c.args[0]} {c.args[1]}".upper()
in self._pipe.command_flags
):
command = f"{c.args[0]} {c.args[1]}".upper()
# We only could resolve key properties if command is not
# in a list of pre-defined request policies
command_flag = self.command_flags.get(command)
if not command_flag:
# Fallback to default policy
if not self._pipe.get_default_node():
keys = None
else:
keys = self._pipe._get_command_keys(*c.args)
if not keys or len(keys) == 0:
command_policies = CommandPolicies()
else:
command_policies = CommandPolicies(
request_policy=RequestPolicy.DEFAULT_KEYED,
response_policy=ResponsePolicy.DEFAULT_KEYED,
)
else:
if command_flag in self._pipe._command_flags_mapping:
command_policies = CommandPolicies(
request_policy=self._pipe._command_flags_mapping[
command_flag
]
)
else:
command_policies = CommandPolicies()
target_nodes = self._determine_nodes(
*c.args,
request_policy=command_policies.request_policy,
node_flag=passed_targets,
)
if not target_nodes:
raise RedisClusterException(
f"No targets were found to execute {c.args} command on"
)
c.command_policies = command_policies
if len(target_nodes) > 1:
raise RedisClusterException(
f"Too many targets for command {c.args}"
)
node = target_nodes[0]
if node == self._pipe.get_default_node():
is_default_node = True
# now that we know the name of the node
# ( it's just a string in the form of host:port )
# we can build a list of commands for each node.
node_name = node.name
if node_name not in nodes:
redis_node = self._pipe.get_redis_connection(node)
try:
connection = get_connection(redis_node)
except (ConnectionError, TimeoutError):
for n in nodes.values():
n.connection_pool.release(n.connection)
# Connection retries are being handled in the node's
# Retry object. Reinitialize the node -> slot table.
self._nodes_manager.initialize()
if is_default_node:
self._pipe.replace_default_node()
raise
nodes[node_name] = NodeCommands(
redis_node.parse_response,
redis_node.connection_pool,
connection,
)
nodes[node_name].append(c)
break
# send the commands in sequence.
# we write to all the open sockets for each node first,
# before reading anything
# this allows us to flush all the requests out across the
# network
# so that we can read them from different sockets as they come back.
# we dont' multiplex on the sockets as they come available,
# but that shouldn't make too much difference.
try:
node_commands = nodes.values()
for n in node_commands:
n.write()
for n in node_commands:
n.read()
finally:
# release all of the redis connections we allocated earlier
# back into the connection pool.
# we used to do this step as part of a try/finally block,
# but it is really dangerous to
# release connections back into the pool if for some
# reason the socket has data still left in it
# from a previous operation. The write and
# read operations already have try/catch around them for
# all known types of errors including connection
# and socket level errors.
# So if we hit an exception, something really bad
# happened and putting any oF
# these connections back into the pool is a very bad idea.
# the socket might have unread buffer still sitting in it,
# and then the next time we read from it we pass the
# buffered result back from a previous command and
# every single request after to that connection will always get
# a mismatched result.
for n in nodes.values():
n.connection_pool.release(n.connection)
# if the response isn't an exception it is a
# valid response from the node
# we're all done with that command, YAY!
# if we have more commands to attempt, we've run into problems.
# collect all the commands we are allowed to retry.
# (MOVED, ASK, or connection errors or timeout errors)
attempt = sorted(
(
c
for c in attempt
if isinstance(c.result, ClusterPipeline.ERRORS_ALLOW_RETRY)
),
key=lambda x: x.position,
)
if attempt and allow_redirections:
# RETRY MAGIC HAPPENS HERE!
# send these remaining commands one at a time using `execute_command`
# in the main client. This keeps our retry logic
# in one place mostly,
# and allows us to be more confident in correctness of behavior.
# at this point any speed gains from pipelining have been lost
# anyway, so we might as well make the best
# attempt to get the correct behavior.
#
# The client command will handle retries for each
# individual command sequentially as we pass each
# one into `execute_command`. Any exceptions
# that bubble out should only appear once all
# retries have been exhausted.
#
# If a lot of commands have failed, we'll be setting the
# flag to rebuild the slots table from scratch.
# So MOVED errors should correct themselves fairly quickly.
self._pipe.reinitialize_counter += 1
if self._pipe._should_reinitialized():
self._nodes_manager.initialize()
if is_default_node:
self._pipe.replace_default_node()
for c in attempt:
try:
# send each command individually like we
# do in the main client.
c.result = self._pipe.parent_execute_command(*c.args, **c.options)
except RedisError as e:
c.result = e
# turn the response back into a simple flat array that corresponds
# to the sequence of commands issued in the stack in pipeline.execute()
response = []
for c in sorted(stack, key=lambda x: x.position):
if c.args[0] in self._pipe.cluster_response_callbacks:
# Remove keys entry, it needs only for cache.
c.options.pop("keys", None)
c.result = self._pipe._policies_callback_mapping[
c.command_policies.response_policy
](
self._pipe.cluster_response_callbacks[c.args[0]](
c.result, **c.options
)
)
response.append(c.result)
if raise_on_error:
self._raise_first_error(stack)
return response
def _is_nodes_flag(self, target_nodes):
return isinstance(target_nodes, str) and target_nodes in self._pipe.node_flags
def _parse_target_nodes(self, target_nodes):
if isinstance(target_nodes, list):
nodes = target_nodes
elif isinstance(target_nodes, ClusterNode):
# Supports passing a single ClusterNode as a variable
nodes = [target_nodes]
elif isinstance(target_nodes, dict):
# Supports dictionaries of the format {node_name: node}.
# It enables to execute commands with multi nodes as follows:
# rc.cluster_save_config(rc.get_primaries())
nodes = target_nodes.values()
else:
raise TypeError(
"target_nodes type can be one of the following: "
"node_flag (PRIMARIES, REPLICAS, RANDOM, ALL_NODES),"
"ClusterNode, list<ClusterNode>, or dict<any, ClusterNode>. "
f"The passed type is {type(target_nodes)}"
)
return nodes
def _determine_nodes(
self, *args, request_policy: RequestPolicy, **kwargs
) -> List["ClusterNode"]:
# Determine which nodes should be executed the command on.
# Returns a list of target nodes.
command = args[0].upper()
if (
len(args) >= 2
and f"{args[0]} {args[1]}".upper() in self._pipe.command_flags
):
command = f"{args[0]} {args[1]}".upper()
nodes_flag = kwargs.pop("nodes_flag", None)
if nodes_flag is not None:
# nodes flag passed by the user
command_flag = nodes_flag
else:
# get the nodes group for this command if it was predefined
command_flag = self._pipe.command_flags.get(command)
if command_flag in self._pipe._command_flags_mapping:
request_policy = self._pipe._command_flags_mapping[command_flag]
policy_callback = self._pipe._policies_callback_mapping[request_policy]
if request_policy == RequestPolicy.DEFAULT_KEYED:
nodes = policy_callback(command, *args)
elif request_policy == RequestPolicy.MULTI_SHARD:
nodes = policy_callback(*args, **kwargs)
elif request_policy == RequestPolicy.DEFAULT_KEYLESS:
nodes = policy_callback(args[0])
else:
nodes = policy_callback()
if args[0].lower() == "ft.aggregate":
self._aggregate_nodes = nodes
return nodes
def multi(self):
raise RedisClusterException(
"method multi() is not supported outside of transactional context"
)
def discard(self):
raise RedisClusterException(
"method discard() is not supported outside of transactional context"
)
def watch(self, *names):
raise RedisClusterException(
"method watch() is not supported outside of transactional context"
)
def unwatch(self, *names):
raise RedisClusterException(
"method unwatch() is not supported outside of transactional context"
)
def delete(self, *names):
if len(names) != 1:
raise RedisClusterException(
"deleting multiple keys is not implemented in pipeline command"
)
return self.execute_command("DEL", names[0])
def unlink(self, *names):
if len(names) != 1:
raise RedisClusterException(
"unlinking multiple keys is not implemented in pipeline command"
)
return self.execute_command("UNLINK", names[0])
| PipelineStrategy |
python | getsentry__sentry | src/sentry/api/serializers/models/organization_member/expand/teams.py | {
"start": 471,
"end": 1735
} | class ____(OrganizationMemberSerializer):
def get_attrs(
self,
item_list: Sequence[OrganizationMember],
user: User | RpcUser | AnonymousUser,
**kwargs: Any,
) -> MutableMapping[OrganizationMember, MutableMapping[str, Any]]:
attrs = super().get_attrs(item_list, user)
teams, teams_with_role = get_teams_by_organization_member_id(item_list)
for item in item_list:
try:
attrs[item]["teams"] = teams.get(item.id, []) # Deprecated
attrs[item]["teamRoles"] = teams_with_role.get(item.id, [])
except KeyError:
attrs[item] = {
"teams": teams, # Deprecated
"teamRoles": teams_with_role,
}
return attrs
def serialize(
self,
obj: OrganizationMember,
attrs: Mapping[str, Any],
user: User | RpcUser | AnonymousUser,
**kwargs: Any,
) -> OrganizationMemberWithTeamsResponse:
d = cast(OrganizationMemberWithTeamsResponse, super().serialize(obj, attrs, user))
d["teams"] = attrs.get("teams", []) # Deprecated
d["teamRoles"] = attrs.get("teamRoles", [])
return d
| OrganizationMemberWithTeamsSerializer |
python | google__jax | jax/_src/core.py | {
"start": 99357,
"end": 99788
} | class ____:
# The underlying data wrapped by the token, could be used to threaded in and
# out of computations to build up data dependency.
_buf: Array
def __init__(self, buf):
self._buf = buf
def block_until_ready(self):
self._buf.block_until_ready()
pytype_aval_mappings[Token] = lambda _: abstract_token
dtypes.canonicalize_value_handlers[Token] = lambda x: x
### Operations on shapes and dimension sizes.
| Token |
python | ray-project__ray | python/ray/data/tests/test_consumption.py | {
"start": 24845,
"end": 30169
} | class ____(CSVDatasink):
def __init__(self, path, **csv_datasink_kwargs):
super().__init__(path, **csv_datasink_kwargs)
self.counter = Counter.remote()
def write_block_to_file(self, block: BlockAccessor, file):
count = self.counter.increment.remote()
if ray.get(count) == 1:
raise ValueError("oops")
else:
super().write_block_to_file(block, file)
def test_datasource(ray_start_regular):
source = ray.data.datasource.RandomIntRowDatasource(n=10, num_columns=2)
assert len(ray.data.read_datasource(source).take()) == 10
source = RangeDatasource(n=10)
assert extract_values(
"value",
ray.data.read_datasource(source).take(),
) == list(range(10))
@pytest.mark.skip(reason="")
def test_polars_lazy_import(shutdown_only):
import sys
ctx = ray.data.context.DataContext.get_current()
try:
original_use_polars = ctx.use_polars
ctx.use_polars = True
num_items = 100
parallelism = 4
ray.init(num_cpus=4)
@ray.remote
def f(should_import_polars):
# Sleep to spread the tasks.
time.sleep(1)
polars_imported = "polars" in sys.modules.keys()
return polars_imported == should_import_polars
# We should not use polars for non-Arrow sort.
_ = ray.data.range(num_items, override_num_blocks=parallelism).sort()
assert all(ray.get([f.remote(False) for _ in range(parallelism)]))
a = range(100)
dfs = []
partition_size = num_items // parallelism
for i in range(parallelism):
dfs.append(
pd.DataFrame({"a": a[i * partition_size : (i + 1) * partition_size]})
)
# At least one worker should have imported polars.
_ = (
ray.data.from_pandas(dfs)
.map_batches(lambda t: t, batch_format="pyarrow", batch_size=None)
.sort(key="a")
.materialize()
)
assert any(ray.get([f.remote(True) for _ in range(parallelism)]))
finally:
ctx.use_polars = original_use_polars
def test_batch_formats(shutdown_only):
ds = ray.data.range(100)
assert isinstance(next(iter(ds.iter_batches(batch_format=None))), pa.Table)
assert isinstance(next(iter(ds.iter_batches(batch_format="default"))), dict)
assert isinstance(next(iter(ds.iter_batches(batch_format="pandas"))), pd.DataFrame)
assert isinstance(next(iter(ds.iter_batches(batch_format="pyarrow"))), pa.Table)
assert isinstance(next(iter(ds.iter_batches(batch_format="numpy"))), dict)
ds = ray.data.range_tensor(100)
assert isinstance(next(iter(ds.iter_batches(batch_format=None))), pa.Table)
assert isinstance(next(iter(ds.iter_batches(batch_format="default"))), dict)
assert isinstance(next(iter(ds.iter_batches(batch_format="pandas"))), pd.DataFrame)
assert isinstance(next(iter(ds.iter_batches(batch_format="pyarrow"))), pa.Table)
assert isinstance(next(iter(ds.iter_batches(batch_format="numpy"))), dict)
df = pd.DataFrame({"foo": ["a", "b"], "bar": [0, 1]})
ds = ray.data.from_pandas(df)
assert isinstance(next(iter(ds.iter_batches(batch_format=None))), pd.DataFrame)
assert isinstance(next(iter(ds.iter_batches(batch_format="default"))), dict)
assert isinstance(next(iter(ds.iter_batches(batch_format="pandas"))), pd.DataFrame)
assert isinstance(next(iter(ds.iter_batches(batch_format="pyarrow"))), pa.Table)
assert isinstance(next(iter(ds.iter_batches(batch_format="numpy"))), dict)
def test_dataset_schema_after_read_stats(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=1)
ray.init(cluster.address)
cluster.add_node(num_cpus=1, resources={"foo": 1})
ds = ray.data.read_csv(
"example://iris.csv", ray_remote_args={"resources": {"foo": 1}}
)
schema = ds.schema()
ds.stats()
assert schema == ds.schema()
def test_dataset_plan_as_string(ray_start_cluster):
ds = ray.data.read_parquet("example://iris.parquet", override_num_blocks=8)
assert ds._plan.get_plan_as_string(type(ds)) == (
"Dataset(\n"
" num_rows=?,\n"
" schema={\n"
" sepal.length: double,\n"
" sepal.width: double,\n"
" petal.length: double,\n"
" petal.width: double,\n"
" variety: string\n"
" }\n"
")"
)
for _ in range(5):
ds = ds.map_batches(lambda x: x)
assert ds._plan.get_plan_as_string(type(ds)) == (
"MapBatches(<lambda>)\n"
"+- MapBatches(<lambda>)\n"
" +- MapBatches(<lambda>)\n"
" +- MapBatches(<lambda>)\n"
" +- MapBatches(<lambda>)\n"
" +- Dataset(\n"
" num_rows=?,\n"
" schema={\n"
" sepal.length: double,\n"
" sepal.width: double,\n"
" petal.length: double,\n"
" petal.width: double,\n"
" variety: string\n"
" }\n"
" )"
)
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| FlakyCSVDatasink |
python | pytorch__pytorch | torch/fx/traceback.py | {
"start": 1957,
"end": 2080
} | class ____(Enum):
CREATE = "create"
REPLACE = "replace"
@compatibility(is_backward_compatible=False)
| NodeSourceAction |
python | django__django | django/utils/html.py | {
"start": 8702,
"end": 18531
} | class ____:
"""
Convert any URLs in text into clickable links.
Work on http://, https://, www. links, and also on links ending in one of
the original seven gTLDs (.com, .edu, .gov, .int, .mil, .net, and .org).
Links can have trailing punctuation (periods, commas, close-parens) and
leading punctuation (opening parens) and it'll still do the right thing.
"""
trailing_punctuation_chars = ".,:;!"
wrapping_punctuation = [("(", ")"), ("[", "]")]
simple_url_re = _lazy_re_compile(r"^https?://\[?\w", re.IGNORECASE)
simple_url_2_re = _lazy_re_compile(
rf"^www\.|^(?!http)(?:{DomainNameValidator.hostname_re})"
rf"(?:{DomainNameValidator.domain_re})"
r"\.(com|edu|gov|int|mil|net|org)($|/.*)$",
re.IGNORECASE,
)
word_split_re = _lazy_re_compile(r"""([\s<>"']+)""")
mailto_template = "mailto:{local}@{domain}"
url_template = '<a href="{href}"{attrs}>{url}</a>'
def __call__(self, text, trim_url_limit=None, nofollow=False, autoescape=False):
"""
If trim_url_limit is not None, truncate the URLs in the link text
longer than this limit to trim_url_limit - 1 characters and append an
ellipsis.
If nofollow is True, give the links a rel="nofollow" attribute.
If autoescape is True, autoescape the link text and URLs.
"""
safe_input = isinstance(text, SafeData)
words = self.word_split_re.split(str(text))
local_cache = {}
urlized_words = []
for word in words:
if (urlized_word := local_cache.get(word)) is None:
urlized_word = self.handle_word(
word,
safe_input=safe_input,
trim_url_limit=trim_url_limit,
nofollow=nofollow,
autoescape=autoescape,
)
local_cache[word] = urlized_word
urlized_words.append(urlized_word)
return "".join(urlized_words)
def handle_word(
self,
word,
*,
safe_input,
trim_url_limit=None,
nofollow=False,
autoescape=False,
):
if "." in word or "@" in word or ":" in word:
# lead: Punctuation trimmed from the beginning of the word.
# middle: State of the word.
# trail: Punctuation trimmed from the end of the word.
lead, middle, trail = self.trim_punctuation(word)
# Make URL we want to point to.
url = None
nofollow_attr = ' rel="nofollow"' if nofollow else ""
if len(middle) <= MAX_URL_LENGTH and self.simple_url_re.match(middle):
url = smart_urlquote(html.unescape(middle))
elif len(middle) <= MAX_URL_LENGTH and self.simple_url_2_re.match(middle):
unescaped_middle = html.unescape(middle)
# RemovedInDjango70Warning: When the deprecation ends, replace
# with:
# url = smart_urlquote(f"https://{unescaped_middle}")
protocol = (
"https"
if getattr(settings, "URLIZE_ASSUME_HTTPS", False)
else "http"
)
if not settings.URLIZE_ASSUME_HTTPS:
warnings.warn(
"The default protocol will be changed from HTTP to "
"HTTPS in Django 7.0. Set the URLIZE_ASSUME_HTTPS "
"transitional setting to True to opt into using HTTPS as the "
"new default protocol.",
RemovedInDjango70Warning,
stacklevel=2,
)
url = smart_urlquote(f"{protocol}://{unescaped_middle}")
elif ":" not in middle and self.is_email_simple(middle):
local, domain = middle.rsplit("@", 1)
# Encode per RFC 6068 Section 2 (items 1, 4, 5). Defer any IDNA
# to the user agent. See #36013.
local = quote(local, safe="")
domain = quote(domain, safe="")
url = self.mailto_template.format(local=local, domain=domain)
nofollow_attr = ""
# Make link.
if url:
trimmed = self.trim_url(middle, limit=trim_url_limit)
if autoescape and not safe_input:
lead, trail = escape(lead), escape(trail)
trimmed = escape(trimmed)
middle = self.url_template.format(
href=escape(url),
attrs=nofollow_attr,
url=trimmed,
)
return mark_safe(f"{lead}{middle}{trail}")
else:
if safe_input:
return mark_safe(word)
elif autoescape:
return escape(word)
elif safe_input:
return mark_safe(word)
elif autoescape:
return escape(word)
return word
def trim_url(self, x, *, limit):
if limit is None or len(x) <= limit:
return x
return "%s…" % x[: max(0, limit - 1)]
@cached_property
def wrapping_punctuation_openings(self):
return "".join(dict(self.wrapping_punctuation).keys())
@cached_property
def trailing_punctuation_chars_no_semicolon(self):
return self.trailing_punctuation_chars.replace(";", "")
@cached_property
def trailing_punctuation_chars_has_semicolon(self):
return ";" in self.trailing_punctuation_chars
def trim_punctuation(self, word):
"""
Trim trailing and wrapping punctuation from `word`. Return the items of
the new state.
"""
# Strip all opening wrapping punctuation.
middle = word.lstrip(self.wrapping_punctuation_openings)
lead = word[: len(word) - len(middle)]
trail = deque()
# Continue trimming until middle remains unchanged.
trimmed_something = True
counts = CountsDict(word=middle)
while trimmed_something and middle:
trimmed_something = False
# Trim wrapping punctuation.
for opening, closing in self.wrapping_punctuation:
if counts[opening] < counts[closing]:
rstripped = middle.rstrip(closing)
if rstripped != middle:
strip = counts[closing] - counts[opening]
trail.appendleft(middle[-strip:])
middle = middle[:-strip]
trimmed_something = True
counts[closing] -= strip
amp = middle.rfind("&")
if amp == -1:
rstripped = middle.rstrip(self.trailing_punctuation_chars)
else:
rstripped = middle.rstrip(self.trailing_punctuation_chars_no_semicolon)
if rstripped != middle:
trail.appendleft(middle[len(rstripped) :])
middle = rstripped
trimmed_something = True
if self.trailing_punctuation_chars_has_semicolon and middle.endswith(";"):
# Only strip if not part of an HTML entity.
potential_entity = middle[amp:]
escaped = html.unescape(potential_entity)
if escaped == potential_entity or escaped.endswith(";"):
rstripped = middle.rstrip(self.trailing_punctuation_chars)
trail_start = len(rstripped)
amount_trailing_semicolons = len(middle) - len(middle.rstrip(";"))
if amp > -1 and amount_trailing_semicolons > 1:
# Leave up to most recent semicolon as might be an
# entity.
recent_semicolon = middle[trail_start:].index(";")
middle_semicolon_index = recent_semicolon + trail_start + 1
trail.appendleft(middle[middle_semicolon_index:])
middle = rstripped + middle[trail_start:middle_semicolon_index]
else:
trail.appendleft(middle[trail_start:])
middle = rstripped
trimmed_something = True
trail = "".join(trail)
return lead, middle, trail
@staticmethod
def is_email_simple(value):
"""Return True if value looks like an email address."""
try:
EmailValidator(allowlist=[])(value)
except ValidationError:
return False
return True
urlizer = Urlizer()
@keep_lazy_text
def urlize(text, trim_url_limit=None, nofollow=False, autoescape=False):
return urlizer(
text, trim_url_limit=trim_url_limit, nofollow=nofollow, autoescape=autoescape
)
def avoid_wrapping(value):
"""
Avoid text wrapping in the middle of a phrase by adding non-breaking
spaces where there previously were normal spaces.
"""
return value.replace(" ", "\xa0")
def html_safe(klass):
"""
A decorator that defines the __html__ method. This helps non-Django
templates to detect classes whose __str__ methods return SafeString.
"""
if "__html__" in klass.__dict__:
raise ValueError(
"can't apply @html_safe to %s because it defines "
"__html__()." % klass.__name__
)
if "__str__" not in klass.__dict__:
raise ValueError(
"can't apply @html_safe to %s because it doesn't "
"define __str__()." % klass.__name__
)
klass_str = klass.__str__
klass.__str__ = lambda self: mark_safe(klass_str(self))
klass.__html__ = lambda self: str(self)
return klass
| Urlizer |
python | getsentry__sentry | src/sentry/integrations/cursor/models.py | {
"start": 256,
"end": 341
} | class ____(BaseModel):
repository: str
ref: str | None = None
| CursorAgentSource |
python | wandb__wandb | wandb/sdk/lib/redirect.py | {
"start": 15516,
"end": 19142
} | class ____(RedirectBase):
"""Patches the write method of current sys.stdout/sys.stderr."""
def __init__(
self,
src: Literal["stdout", "stderr"],
cbs: Iterable[Callable[[str], None]] = (),
*,
flush_periodically: bool,
) -> None:
super().__init__(src=src, cbs=cbs)
self._uninstall: Callable[[], None] | None = None
self._emulator = TerminalEmulator()
self._queue: queue.Queue[str] = queue.Queue()
self._stopped = threading.Event()
self._flush_periodically = flush_periodically
def _emulator_write(self) -> None:
while True:
if self._queue.empty():
if self._stopped.is_set():
return
time.sleep(0.5)
continue
data: list[str] = []
while not self._queue.empty():
data.append(self._queue.get())
if self._stopped.is_set() and sum(map(len, data)) > 100000:
wandb.termlog("Terminal output too large. Logging without processing.")
self.flush()
for line in data:
self.flush(line)
return
try:
self._emulator.write("".join(data))
except Exception:
pass
def _callback(self) -> None:
while not (self._stopped.is_set() and self._queue.empty()):
self.flush()
time.sleep(_MIN_CALLBACK_INTERVAL)
def _on_write(self, data: str | bytes, written: int, /) -> None:
if isinstance(data, bytes):
written_data = data[:written].decode("utf-8")
else:
written_data = data[:written]
self._queue.put(written_data)
def install(self) -> None:
if self._uninstall:
return
try:
if self.src == "stdout":
self._uninstall = console_capture.capture_stdout(self._on_write)
else:
self._uninstall = console_capture.capture_stderr(self._on_write)
except console_capture.CannotCaptureConsoleError:
logger.exception("failed to install %s hooks", self.src)
wandb.termwarn(
f"Failed to wrap {self.src}. Console logs will not be captured.",
)
return
self._emulator_write_thread = threading.Thread(target=self._emulator_write)
self._emulator_write_thread.daemon = True
self._emulator_write_thread.start()
if self._flush_periodically:
self._callback_thread = threading.Thread(target=self._callback)
self._callback_thread.daemon = True
self._callback_thread.start()
def flush(self, data: str | None = None) -> None:
if data is None:
try:
data = self._emulator.read().encode("utf-8")
except Exception:
logger.exception("exception reading TerminalEmulator")
if data:
for cb in self.cbs:
try:
cb(data)
except Exception:
logger.exception("exception in StreamWrapper callback")
def uninstall(self) -> None:
if not self._uninstall:
return
self._uninstall()
self._stopped.set()
self._emulator_write_thread.join(timeout=5)
if self._emulator_write_thread.is_alive():
wandb.termlog(f"Processing terminal output ({self.src})...")
self._emulator_write_thread.join()
wandb.termlog("Done.")
self.flush()
| StreamWrapper |
python | readthedocs__readthedocs.org | readthedocs/builds/models.py | {
"start": 20700,
"end": 22528
} | class ____(Version):
"""
Version proxy model for API data deserialization.
This replaces the pattern where API data was deserialized into a mocked
:py:class:`Version` object.
This pattern was confusing, as it was not explicit
as to what form of object you were working with -- API backed or database
backed.
This model preserves the Version model methods, allowing for overrides on
model field differences. This model pattern will generally only be used on
builder instances, where we are interacting solely with API data.
"""
project = None
# This is a property in the original model, in order to
# be able to assign it a value in the constructor, we need to re-declare it
# as an attribute here.
git_identifier = None
class Meta:
proxy = True
def __init__(self, *args, **kwargs):
self.project = APIProject(**kwargs.pop("project", {}))
self.canonical_url = kwargs.pop("canonical_url", None)
self.git_identifier = kwargs.pop("git_identifier", None)
# These fields only exist on the API return, not on the model, so we'll
# remove them to avoid throwing exceptions due to unexpected fields
for key in ["resource_uri", "absolute_url", "downloads"]:
try:
del kwargs[key]
except KeyError:
pass
valid_attributes, invalid_attributes = extract_valid_attributes_for_model(
model=Version,
attributes=kwargs,
)
if invalid_attributes:
log.warning(
"APIVersion got unexpected attributes.",
invalid_attributes=invalid_attributes,
)
super().__init__(*args, **valid_attributes)
def save(self, *args, **kwargs):
return 0
| APIVersion |
python | walkccc__LeetCode | solutions/868. Binary Gap/868.py | {
"start": 0,
"end": 231
} | class ____:
def binaryGap(self, n: int) -> int:
ans = 0
d = -32 # the distance between any two 1s
while n:
if n % 2 == 1:
ans = max(ans, d)
d = 0
n //= 2
d += 1
return ans
| Solution |
python | run-llama__llama_index | llama-index-finetuning/llama_index/finetuning/types.py | {
"start": 328,
"end": 614
} | class ____(ABC):
"""Base LLM finetuning engine."""
@abstractmethod
def finetune(self) -> None:
"""Goes off and does stuff."""
@abstractmethod
def get_finetuned_model(self, **model_kwargs: Any) -> LLM:
"""Gets finetuned model."""
| BaseLLMFinetuneEngine |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/decl_api.py | {
"start": 64630,
"end": 75512
} | class ____:
"""Primary argument to the :meth:`.RegistryEvents.resolve_type_annotation`
event.
This object contains all the information needed to resolve a Python
type to a SQLAlchemy type. The :attr:`.TypeResolve.resolved_type` is
typically the main type that's resolved. To resolve an arbitrary
Python type against the current type map, the :meth:`.TypeResolve.resolve`
method may be used.
.. versionadded:: 2.1
"""
__slots__ = (
"registry",
"cls",
"key",
"raw_type",
"resolved_type",
"raw_pep_593_type",
"raw_pep_695_type",
"pep_593_resolved_argument",
"pep_695_resolved_value",
)
cls: Any
"The class being processed during declarative mapping"
registry: "registry"
"The :class:`registry` being used"
key: str
"String name of the ORM mapped attribute being processed"
raw_type: _MatchedOnType
"""The type annotation object directly from the attribute's annotations.
It's recommended to look at :attr:`.TypeResolve.resolved_type` or
one of :attr:`.TypeResolve.pep_593_resolved_argument` or
:attr:`.TypeResolve.pep_695_resolved_value` rather than the raw type, as
the raw type will not be de-optionalized.
"""
resolved_type: _MatchedOnType
"""The de-optionalized, "resolved" type after accounting for :pep:`695`
and :pep:`593` indirection:
* If the annotation were a plain Python type or simple alias e.g.
``Mapped[int]``, the resolved_type will be ``int``
* If the annotation refers to a :pep:`695` type that references a
plain Python type or simple alias, e.g. ``type MyType = int``
then ``Mapped[MyType]``, the type will refer to the ``__value__``
of the :pep:`695` type, e.g. ``int``, the same as
:attr:`.TypeResolve.pep_695_resolved_value`.
* If the annotation refers to a :pep:`593` ``Annotated`` object, or
a :pep:`695` type alias that in turn refers to a :pep:`593` type,
then the type will be the inner type inside of the ``Annotated``,
e.g. ``MyType = Annotated[float, mapped_column(...)]`` with
``Mapped[MyType]`` becomes ``float``, the same as
:attr:`.TypeResolve.pep_593_resolved_argument`.
"""
raw_pep_593_type: Optional[GenericProtocol[Any]]
"""The de-optionalized :pep:`593` type, if the raw type referred to one.
This would refer to an ``Annotated`` object.
"""
pep_593_resolved_argument: Optional[_MatchedOnType]
"""The type extracted from a :pep:`593` ``Annotated`` construct, if the
type referred to one.
When present, this type would be the same as the
:attr:`.TypeResolve.resolved_type`.
"""
raw_pep_695_type: Optional[TypeAliasType]
"The de-optionalized :pep:`695` type, if the raw type referred to one."
pep_695_resolved_value: Optional[_MatchedOnType]
"""The de-optionalized type referenced by the raw :pep:`695` type, if the
raw type referred to one.
When present, and a :pep:`593` type is not present, this type would be the
same as the :attr:`.TypeResolve.resolved_type`.
"""
def __init__(
self,
registry: RegistryType,
cls: Any,
key: str,
raw_type: _MatchedOnType,
resolved_type: _MatchedOnType,
raw_pep_593_type: Optional[GenericProtocol[Any]],
pep_593_resolved_argument: Optional[_MatchedOnType],
raw_pep_695_type: Optional[TypeAliasType],
pep_695_resolved_value: Optional[_MatchedOnType],
):
self.registry = registry
self.cls = cls
self.key = key
self.raw_type = raw_type
self.resolved_type = resolved_type
self.raw_pep_593_type = raw_pep_593_type
self.pep_593_resolved_argument = pep_593_resolved_argument
self.raw_pep_695_type = raw_pep_695_type
self.pep_695_resolved_value = pep_695_resolved_value
def resolve(
self, python_type: _MatchedOnType
) -> Optional[sqltypes.TypeEngine[Any]]:
"""Resolve the given python type using the type_annotation_map of
the :class:`registry`.
:param python_type: a Python type (e.g. ``int``, ``str``, etc.) Any
type object that's present in
:paramref:`_orm.registry_type_annotation_map` should produce a
non-``None`` result.
:return: a SQLAlchemy :class:`.TypeEngine` instance
(e.g. :class:`.Integer`,
:class:`.String`, etc.), or ``None`` to indicate no type could be
matched.
"""
return self.registry._resolve_type(python_type)
def as_declarative(**kw: Any) -> Callable[[Type[_T]], Type[_T]]:
"""
Class decorator which will adapt a given class into a
:func:`_orm.declarative_base`.
This function makes use of the :meth:`_orm.registry.as_declarative_base`
method, by first creating a :class:`_orm.registry` automatically
and then invoking the decorator.
E.g.::
from sqlalchemy.orm import as_declarative
@as_declarative()
class Base:
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
id = Column(Integer, primary_key=True)
class MyMappedClass(Base): ...
.. seealso::
:meth:`_orm.registry.as_declarative_base`
"""
metadata, class_registry = (
kw.pop("metadata", None),
kw.pop("class_registry", None),
)
return registry(
metadata=metadata, class_registry=class_registry
).as_declarative_base(**kw)
@compat_typing.dataclass_transform(
field_specifiers=(
MappedColumn,
RelationshipProperty,
Composite,
Synonym,
mapped_column,
relationship,
composite,
synonym,
deferred,
),
)
def mapped_as_dataclass(
registry: RegistryType,
/,
*,
init: Union[_NoArg, bool] = _NoArg.NO_ARG,
repr: Union[_NoArg, bool] = _NoArg.NO_ARG, # noqa: A002
eq: Union[_NoArg, bool] = _NoArg.NO_ARG,
order: Union[_NoArg, bool] = _NoArg.NO_ARG,
unsafe_hash: Union[_NoArg, bool] = _NoArg.NO_ARG,
match_args: Union[_NoArg, bool] = _NoArg.NO_ARG,
kw_only: Union[_NoArg, bool] = _NoArg.NO_ARG,
dataclass_callable: Union[
_NoArg, Callable[..., Type[Any]]
] = _NoArg.NO_ARG,
) -> Callable[[Type[_O]], Type[_O]]:
"""Standalone function form of :meth:`_orm.registry.mapped_as_dataclass`
which may have better compatibility with mypy.
The :class:`_orm.registry` is passed as the first argument to the
decorator.
e.g.::
from sqlalchemy.orm import Mapped
from sqlalchemy.orm import mapped_as_dataclass
from sqlalchemy.orm import mapped_column
from sqlalchemy.orm import registry
some_registry = registry()
@mapped_as_dataclass(some_registry)
class Relationships:
__tablename__ = "relationships"
entity_id1: Mapped[int] = mapped_column(primary_key=True)
entity_id2: Mapped[int] = mapped_column(primary_key=True)
level: Mapped[int] = mapped_column(Integer)
.. versionadded:: 2.0.44
"""
def decorate(cls: Type[_O]) -> Type[_O]:
_generate_dc_transforms(
init=init,
repr=repr,
eq=eq,
order=order,
unsafe_hash=unsafe_hash,
match_args=match_args,
kw_only=kw_only,
dataclass_callable=dataclass_callable,
cls_=cls,
)
_ORMClassConfigurator._as_declarative(registry, cls, cls.__dict__)
return cls
return decorate
@inspection._inspects(
DeclarativeMeta, DeclarativeBase, DeclarativeAttributeIntercept
)
def _inspect_decl_meta(cls: Type[Any]) -> Optional[Mapper[Any]]:
mp: Optional[Mapper[Any]] = _inspect_mapped_class(cls)
if mp is None:
if _DeferredDeclarativeConfig.has_cls(cls):
_DeferredDeclarativeConfig.raise_unmapped_for_cls(cls)
return mp
@compat_typing.dataclass_transform(
field_specifiers=(
MappedColumn,
RelationshipProperty,
Composite,
Synonym,
mapped_column,
relationship,
composite,
synonym,
deferred,
),
)
@overload
def unmapped_dataclass(__cls: Type[_O], /) -> Type[_O]: ...
@overload
def unmapped_dataclass(
__cls: Literal[None] = ...,
/,
*,
init: Union[_NoArg, bool] = ...,
repr: Union[_NoArg, bool] = ..., # noqa: A002
eq: Union[_NoArg, bool] = ...,
order: Union[_NoArg, bool] = ...,
unsafe_hash: Union[_NoArg, bool] = ...,
match_args: Union[_NoArg, bool] = ...,
kw_only: Union[_NoArg, bool] = ...,
dataclass_callable: Union[_NoArg, Callable[..., Type[Any]]] = ...,
) -> Callable[[Type[_O]], Type[_O]]: ...
def unmapped_dataclass(
__cls: Optional[Type[_O]] = None,
/,
*,
init: Union[_NoArg, bool] = _NoArg.NO_ARG,
repr: Union[_NoArg, bool] = _NoArg.NO_ARG, # noqa: A002
eq: Union[_NoArg, bool] = _NoArg.NO_ARG,
order: Union[_NoArg, bool] = _NoArg.NO_ARG,
unsafe_hash: Union[_NoArg, bool] = _NoArg.NO_ARG,
match_args: Union[_NoArg, bool] = _NoArg.NO_ARG,
kw_only: Union[_NoArg, bool] = _NoArg.NO_ARG,
dataclass_callable: Union[
_NoArg, Callable[..., Type[Any]]
] = _NoArg.NO_ARG,
) -> Union[Type[_O], Callable[[Type[_O]], Type[_O]]]:
"""Decorator which allows the creation of dataclass-compatible mixins
within mapped class hierarchies based on the
:func:`_orm.mapped_as_dataclass` decorator.
Parameters are the same as those of :func:`_orm.mapped_as_dataclass`.
The decorator turns the given class into a SQLAlchemy-compatible dataclass
in the same way that :func:`_orm.mapped_as_dataclass` does, taking
into account :func:`_orm.mapped_column` and other attributes for dataclass-
specific directives, but not actually mapping the class.
To create unmapped dataclass mixins when using a class hierarchy defined
by :class:`.DeclarativeBase` and :class:`.MappedAsDataclass`, the
:class:`.MappedAsDataclass` class may be subclassed alone for a similar
effect.
.. versionadded:: 2.1
.. seealso::
:ref:`orm_declarative_dc_mixins` - background and example use.
"""
def decorate(cls: Type[_O]) -> Type[_O]:
_generate_dc_transforms(
init=init,
repr=repr,
eq=eq,
order=order,
unsafe_hash=unsafe_hash,
match_args=match_args,
kw_only=kw_only,
dataclass_callable=dataclass_callable,
cls_=cls,
)
_ORMClassConfigurator._as_unmapped_dataclass(cls, cls.__dict__)
return cls
if __cls:
return decorate(__cls)
else:
return decorate
| TypeResolve |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/operators/test_redshift_cluster.py | {
"start": 5692,
"end": 9184
} | class ____:
@mock.patch.object(RedshiftHook, "cluster_status")
@mock.patch.object(RedshiftHook, "conn")
def test_create_cluster_snapshot_is_called_when_cluster_is_available(
self, mock_conn, mock_cluster_status
):
mock_cluster_status.return_value = "available"
create_snapshot = RedshiftCreateClusterSnapshotOperator(
task_id="test_snapshot",
cluster_identifier="test_cluster",
snapshot_identifier="test_snapshot",
retention_period=1,
tags=[
{
"Key": "user",
"Value": "airflow",
}
],
)
create_snapshot.execute(None)
mock_conn.create_cluster_snapshot.assert_called_once_with(
ClusterIdentifier="test_cluster",
SnapshotIdentifier="test_snapshot",
ManualSnapshotRetentionPeriod=1,
Tags=[
{
"Key": "user",
"Value": "airflow",
}
],
)
mock_conn.get_waiter.assert_not_called()
@mock.patch.object(RedshiftHook, "cluster_status")
def test_raise_exception_when_cluster_is_not_available(self, mock_cluster_status):
mock_cluster_status.return_value = "paused"
create_snapshot = RedshiftCreateClusterSnapshotOperator(
task_id="test_snapshot", cluster_identifier="test_cluster", snapshot_identifier="test_snapshot"
)
with pytest.raises(AirflowException):
create_snapshot.execute(None)
@mock.patch.object(RedshiftHook, "cluster_status")
@mock.patch.object(RedshiftHook, "conn")
def test_create_cluster_snapshot_with_wait(self, mock_conn, mock_cluster_status):
mock_cluster_status.return_value = "available"
create_snapshot = RedshiftCreateClusterSnapshotOperator(
task_id="test_snapshot",
cluster_identifier="test_cluster",
snapshot_identifier="test_snapshot",
wait_for_completion=True,
)
create_snapshot.execute(None)
mock_conn.get_waiter.return_value.wait.assert_called_once_with(
ClusterIdentifier="test_cluster",
WaiterConfig={"Delay": 15, "MaxAttempts": 20},
)
@mock.patch.object(RedshiftHook, "cluster_status")
@mock.patch.object(RedshiftHook, "create_cluster_snapshot")
def test_create_cluster_snapshot_deferred(self, mock_create_cluster_snapshot, mock_cluster_status):
mock_cluster_status.return_value = "available"
mock_create_cluster_snapshot.return_value = True
create_snapshot = RedshiftCreateClusterSnapshotOperator(
task_id="test_snapshot",
cluster_identifier="test_cluster",
snapshot_identifier="test_snapshot",
deferrable=True,
)
with pytest.raises(TaskDeferred) as exc:
create_snapshot.execute(None)
assert isinstance(exc.value.trigger, RedshiftCreateClusterSnapshotTrigger), (
"Trigger is not a RedshiftCreateClusterSnapshotTrigger"
)
def test_template_fields(self):
operator = RedshiftCreateClusterSnapshotOperator(
task_id="test_snapshot",
cluster_identifier="test_cluster",
snapshot_identifier="test_snapshot",
wait_for_completion=True,
)
validate_template_fields(operator)
| TestRedshiftCreateClusterSnapshotOperator |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/waiters/test_comprehend.py | {
"start": 3170,
"end": 4841
} | class ____(TestComprehendCustomWaitersBase):
WAITER_NAME = "create_document_classifier_complete"
@pytest.fixture
def mock_describe_document_classifier(self):
with mock.patch.object(self.client, "describe_document_classifier") as mock_getter:
yield mock_getter
@pytest.mark.parametrize("state", ComprehendCreateDocumentClassifierCompletedSensor.SUCCESS_STATES)
def test_create_document_classifier_complete(self, state, mock_describe_document_classifier):
mock_describe_document_classifier.return_value = {"DocumentClassifierProperties": {"Status": state}}
ComprehendHook().get_waiter(self.WAITER_NAME).wait(DocumentClassifierArn="arn")
@pytest.mark.parametrize("state", ComprehendCreateDocumentClassifierCompletedSensor.FAILURE_STATES)
def test_create_document_classifier_failed(self, state, mock_describe_document_classifier):
mock_describe_document_classifier.return_value = {"DocumentClassifierProperties": {"Status": state}}
with pytest.raises(botocore.exceptions.WaiterError):
ComprehendHook().get_waiter(self.WAITER_NAME).wait(DocumentClassifierArn="arn")
def test_create_document_classifier_wait(self, mock_describe_document_classifier):
wait = {"DocumentClassifierProperties": {"Status": "TRAINING"}}
success = {"DocumentClassifierProperties": {"Status": "TRAINED"}}
mock_describe_document_classifier.side_effect = [wait, wait, success]
ComprehendHook().get_waiter(self.WAITER_NAME).wait(
DocumentClassifierArn="arn", WaiterConfig={"Delay": 0.01, "MaxAttempts": 3}
)
| TestComprehendDocumentClassifierCompleteWaiter |
python | spulec__freezegun | freezegun/api.py | {
"start": 11056,
"end": 15967
} | class ____(real_datetime, FakeDate, metaclass=FakeDatetimeMeta):
def __add__(self, other: Any) -> "FakeDatetime": # type: ignore
result = real_datetime.__add__(self, other)
if result is NotImplemented:
return result
return datetime_to_fakedatetime(result)
def __sub__(self, other: Any) -> "FakeDatetime": # type: ignore
result = real_datetime.__sub__(self, other)
if result is NotImplemented:
return result # type: ignore
if isinstance(result, real_datetime):
return datetime_to_fakedatetime(result)
else:
return result # type: ignore
def astimezone(self, tz: Optional[datetime.tzinfo]=None) -> "FakeDatetime":
if tz is None:
tz = tzlocal()
return datetime_to_fakedatetime(real_datetime.astimezone(self, tz))
@classmethod
def fromtimestamp(cls, t: float, tz: Optional[datetime.tzinfo]=None) -> "FakeDatetime":
if tz is None:
tz = dateutil.tz.tzoffset("freezegun", cls._tz_offset())
result = real_datetime.fromtimestamp(t, tz=tz).replace(tzinfo=None)
else:
result = real_datetime.fromtimestamp(t, tz)
return datetime_to_fakedatetime(result)
def timestamp(self) -> float:
if self.tzinfo is None:
return (self - _EPOCH - self._tz_offset()).total_seconds() # type: ignore
return (self - _EPOCHTZ).total_seconds() # type: ignore
@classmethod
def now(cls, tz: Optional[datetime.tzinfo] = None) -> "FakeDatetime":
now = cls._time_to_freeze() or real_datetime.now()
if tz:
result = tz.fromutc(now.replace(tzinfo=tz)) + cls._tz_offset()
else:
result = now + cls._tz_offset()
return datetime_to_fakedatetime(result)
def date(self) -> "FakeDate":
return date_to_fakedate(self)
@property
def nanosecond(self) -> int:
try:
# noinspection PyUnresolvedReferences
return real_datetime.nanosecond # type: ignore
except AttributeError:
return 0
@classmethod
def today(cls) -> "FakeDatetime":
return cls.now(tz=None)
@classmethod
def utcnow(cls) -> "FakeDatetime":
result = cls._time_to_freeze() or real_datetime.now(datetime.timezone.utc)
return datetime_to_fakedatetime(result)
@staticmethod
def _time_to_freeze() -> Optional[datetime.datetime]:
if freeze_factories:
return get_current_time()
return None
@classmethod
def _tz_offset(cls) -> datetime.timedelta:
return tz_offsets[-1]
FakeDatetime.min = datetime_to_fakedatetime(real_datetime.min)
FakeDatetime.max = datetime_to_fakedatetime(real_datetime.max)
def convert_to_timezone_naive(time_to_freeze: datetime.datetime) -> datetime.datetime:
"""
Converts a potentially timezone-aware datetime to be a naive UTC datetime
"""
if time_to_freeze.tzinfo:
time_to_freeze -= time_to_freeze.utcoffset() # type: ignore
time_to_freeze = time_to_freeze.replace(tzinfo=None)
return time_to_freeze
def pickle_fake_date(datetime_: datetime.date) -> Tuple[Type[FakeDate], Tuple[int, int, int]]:
# A pickle function for FakeDate
return FakeDate, (
datetime_.year,
datetime_.month,
datetime_.day,
)
def pickle_fake_datetime(datetime_: datetime.datetime) -> Tuple[Type[FakeDatetime], Tuple[int, int, int, int, int, int, int, Optional[datetime.tzinfo]]]:
# A pickle function for FakeDatetime
return FakeDatetime, (
datetime_.year,
datetime_.month,
datetime_.day,
datetime_.hour,
datetime_.minute,
datetime_.second,
datetime_.microsecond,
datetime_.tzinfo,
)
def _parse_time_to_freeze(time_to_freeze_str: Optional[_Freezable]) -> datetime.datetime:
"""Parses all the possible inputs for freeze_time
:returns: a naive ``datetime.datetime`` object
"""
if time_to_freeze_str is None:
time_to_freeze_str = datetime.datetime.now(datetime.timezone.utc)
if isinstance(time_to_freeze_str, datetime.datetime):
time_to_freeze = time_to_freeze_str
elif isinstance(time_to_freeze_str, datetime.date):
time_to_freeze = datetime.datetime.combine(time_to_freeze_str, datetime.time())
elif isinstance(time_to_freeze_str, datetime.timedelta):
time_to_freeze = datetime.datetime.now(datetime.timezone.utc) + time_to_freeze_str
else:
time_to_freeze = parser.parse(time_to_freeze_str) # type: ignore
return convert_to_timezone_naive(time_to_freeze)
def _parse_tz_offset(tz_offset: Union[datetime.timedelta, float]) -> datetime.timedelta:
if isinstance(tz_offset, datetime.timedelta):
return tz_offset
else:
return datetime.timedelta(hours=tz_offset)
| FakeDatetime |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_C.py | {
"start": 4314,
"end": 7477
} | class ____(Benchmark):
r"""
Cola objective function.
This class defines the Cola global optimization problem. The 17-dimensional
function computes indirectly the formula :math:`f(n, u)` by setting
:math:`x_0 = y_0, x_1 = u_0, x_i = u_{2(i2)}, y_i = u_{2(i2)+1}` :
.. math::
f_{\text{Cola}}(x) = \sum_{i<j}^{n} \left (r_{i,j} - d_{i,j} \right )^2
Where :math:`r_{i, j}` is given by:
.. math::
r_{i, j} = \sqrt{(x_i - x_j)^2 + (y_i - y_j)^2}
And :math:`d` is a symmetric matrix given by:
.. math::
\{d} = \left [ d_{ij} \right ] = \begin{pmatrix}
1.27 & & & & & & & & \\
1.69 & 1.43 & & & & & & & \\
2.04 & 2.35 & 2.43 & & & & & & \\
3.09 & 3.18 & 3.26 & 2.85 & & & & & \\
3.20 & 3.22 & 3.27 & 2.88 & 1.55 & & & & \\
2.86 & 2.56 & 2.58 & 2.59 & 3.12 & 3.06 & & & \\
3.17 & 3.18 & 3.18 & 3.12 & 1.31 & 1.64 & 3.00 & \\
3.21 & 3.18 & 3.18 & 3.17 & 1.70 & 1.36 & 2.95 & 1.32 & \\
2.38 & 2.31 & 2.42 & 1.94 & 2.85 & 2.81 & 2.56 & 2.91 & 2.97
\end{pmatrix}
This function has bounds :math:`x_0 \in [0, 4]` and :math:`x_i \in [-4, 4]`
for :math:`i = 1, ..., n-1`.
*Global optimum* 11.7464.
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=17):
Benchmark.__init__(self, dimensions)
self._bounds = [[0.0, 4.0]] + list(zip([-4.0] * (self.N - 1),
[4.0] * (self.N - 1)))
self.global_optimum = [[0.651906, 1.30194, 0.099242, -0.883791,
-0.8796, 0.204651, -3.28414, 0.851188,
-3.46245, 2.53245, -0.895246, 1.40992,
-3.07367, 1.96257, -2.97872, -0.807849,
-1.68978]]
self.fglob = 11.7464
self.d = asarray([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1.27, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1.69, 1.43, 0, 0, 0, 0, 0, 0, 0, 0],
[2.04, 2.35, 2.43, 0, 0, 0, 0, 0, 0, 0],
[3.09, 3.18, 3.26, 2.85, 0, 0, 0, 0, 0, 0],
[3.20, 3.22, 3.27, 2.88, 1.55, 0, 0, 0, 0, 0],
[2.86, 2.56, 2.58, 2.59, 3.12, 3.06, 0, 0, 0, 0],
[3.17, 3.18, 3.18, 3.12, 1.31, 1.64, 3.00, 0, 0, 0],
[3.21, 3.18, 3.18, 3.17, 1.70, 1.36, 2.95, 1.32, 0, 0],
[2.38, 2.31, 2.42, 1.94, 2.85, 2.81, 2.56, 2.91, 2.97, 0.]])
def fun(self, x, *args):
self.nfev += 1
xi = atleast_2d(asarray([0.0, x[0]] + list(x[1::2])))
xj = repeat(xi, size(xi, 1), axis=0)
xi = xi.T
yi = atleast_2d(asarray([0.0, 0.0] + list(x[2::2])))
yj = repeat(yi, size(yi, 1), axis=0)
yi = yi.T
inner = (sqrt((xi - xj) ** 2 + (yi - yj) ** 2) - self.d) ** 2
inner = tril(inner, -1)
return sum(sum(inner, axis=1))
| Cola |
python | pytorch__pytorch | test/higher_order_ops/test_invoke_subgraph.py | {
"start": 100040,
"end": 104152
} | class ____(torch.nn.Module):
def forward(self, x: "f32[8]", y: "f32[8]"):
repeated_subgraph0 = self.repeated_subgraph0
invoke_subgraph = torch.ops.higher_order.invoke_subgraph(repeated_subgraph0, 'subgraph_0', x, y); repeated_subgraph0 = x = None
getitem: "f32[8]" = invoke_subgraph[0]; invoke_subgraph = None
repeated_subgraph0_1 = self.repeated_subgraph0
invoke_subgraph_1 = torch.ops.higher_order.invoke_subgraph(repeated_subgraph0_1, 'subgraph_0', getitem, y); repeated_subgraph0_1 = getitem = y = None
getitem_1: "f32[8]" = invoke_subgraph_1[0]; invoke_subgraph_1 = None
return (getitem_1,)
class repeated_subgraph0(torch.nn.Module):
def forward(self, arg0_1: "f32[8]", arg1_1: "f32[8]"):
mul: "f32[8]" = torch.ops.aten.mul.Tensor(arg0_1, arg1_1); arg0_1 = arg1_1 = None
return (mul,)
""",
)
def test_unbacked(self):
@nested_compile_region
def gn(x, y):
b = x.item()
torch._check(b < y.shape[0])
return y[:b].clone()
class M(torch.nn.Module):
def forward(self, x, y):
res = []
for _ in range(10):
res.append(gn(x, y))
return torch.cat(res)
x = torch.tensor(4)
y = torch.randn(8)
ep = torch.export.export(M(), (x, y), strict=self.strict)
ep = ep.run_decompositions()
self.assertTrue(torch.allclose(ep.module()(x, y), M()(x, y)))
self.assertEqual(len(list(ep.graph_module.named_modules())), 2)
def test_pending_unbacked(self):
class M(torch.nn.Module):
@nested_compile_region
def gn(self, x):
u = x[0].item()
return x * u
def forward(self, x):
for _ in range(4):
x = self.gn(x)
return x
ep = torch.export.export(
M(),
(torch.randn(8),),
strict=self.strict,
dynamic_shapes={"x": {0: torch.export.Dim.DYNAMIC}},
)
ep = ep.run_decompositions()
self.assertEqual(len(list(ep.graph_module.named_modules())), 2)
ep = torch.export.export(
M(),
(torch.randn(8, requires_grad=True),),
strict=self.strict,
dynamic_shapes={"x": {0: torch.export.Dim.DYNAMIC}},
)
ep = ep.run_decompositions()
self.assertEqual(len(list(ep.graph_module.named_modules())), 2)
def test_simple_method(self):
class M(torch.nn.Module):
@nested_compile_region
def gn(self, x, y):
return torch.mul(x, y)
def forward(self, x, y):
x = self.gn(x, y)
x = self.gn(x, y)
return x
x = torch.randn(8, requires_grad=True)
y = torch.randn(8, requires_grad=True)
ep = torch.export.export(M(), (x, y), strict=self.strict)
self.assertTrue(torch.allclose(ep.module()(x, y), M()(x, y)))
self.assertEqual(len(list(ep.graph_module.named_modules())), 2)
def test_multiple_module(self):
b = torch.randn(8)
class N(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer("buf", b)
@nested_compile_region
def forward(self, x, y):
return x * y + self.buf
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.mod_list = torch.nn.ModuleList(N() for _ in range(10))
def forward(self, x, y):
for m in self.mod_list:
x = m(x, y)
return x
x = torch.randn(8, requires_grad=True)
y = torch.randn(8, requires_grad=True)
ep = torch.export.export(M(), (x, y), strict=self.strict)
self.assertTrue(torch.allclose(ep.module()(x, y), M()(x, y)))
self.assertEqual(len(list(ep.graph_module.named_modules())), 2)
| GraphModule |
python | django__django | tests/admin_inlines/admin.py | {
"start": 3886,
"end": 3955
} | class ____(admin.TabularInline):
model = Inner2
| InnerInline2Tabular |
python | keras-team__keras | keras/src/backend/common/symbolic_scope_test.py | {
"start": 214,
"end": 825
} | class ____(testing.TestCase):
def test_basic_flow(self):
# Define a function that behaves differently according to
# `in_symbolic_scope`.
def compute_loss(y, y_pred):
if in_symbolic_scope():
return ops.zeros_like(y)
return ops.add(y, y_pred)
y = ops.ones(shape=(2,))
y_pred = ops.ones(shape=(2,))
with SymbolicScope():
loss = compute_loss(y, y_pred)
self.assertAllClose(loss, np.zeros((2,)))
loss = compute_loss(y, y_pred)
self.assertAllClose(loss, 2 * np.ones((2,)))
| TestSymbolicScope |
python | doocs__leetcode | solution/1800-1899/1803.Count Pairs With XOR in a Range/Solution.py | {
"start": 0,
"end": 773
} | class ____:
def __init__(self):
self.children = [None] * 2
self.cnt = 0
def insert(self, x):
node = self
for i in range(15, -1, -1):
v = x >> i & 1
if node.children[v] is None:
node.children[v] = Trie()
node = node.children[v]
node.cnt += 1
def search(self, x, limit):
node = self
ans = 0
for i in range(15, -1, -1):
if node is None:
return ans
v = x >> i & 1
if limit >> i & 1:
if node.children[v]:
ans += node.children[v].cnt
node = node.children[v ^ 1]
else:
node = node.children[v]
return ans
| Trie |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_format15.py | {
"start": 315,
"end": 1614
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_format15.xlsx")
def test_create_file(self):
"""Test the creation of an XlsxWriter file with chart formatting."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "line"})
chart.axis_ids = [42401792, 42403712]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$B$1:$B$5",
"trendline": {"type": "linear"},
}
)
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$C$1:$C$5",
}
)
chart.set_legend({"delete_series": [2, 0]})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | ray-project__ray | ci/ray_ci/test_ray_docker_container.py | {
"start": 490,
"end": 23361
} | class ____(RayCITestBase):
cmds = []
def test_run(self) -> None:
def _mock_run_script(input: List[str]) -> None:
self.cmds.append(input[0])
with mock.patch(
"ci.ray_ci.ray_docker_container.docker_pull", return_value=None
), mock.patch(
"ci.ray_ci.docker_container.LinuxContainer.run_script",
side_effect=_mock_run_script,
):
sha = "123456"
ray_ci_build_id = "a1b2c3d4"
cuda = "cu12.4.1-cudnn"
# Run with default python version and ray image
self.cmds = []
v = DEFAULT_PYTHON_TAG_VERSION
cv = self.get_cpp_version(v)
pv = self.get_python_version(v)
container = RayDockerContainer(v, cuda, "ray")
container.run()
cmd = self.cmds[-1]
assert cmd == (
"./ci/build/build-ray-docker.sh "
f"ray-{RAY_VERSION}-{cv}-{cv}-manylinux2014_x86_64.whl "
f"{_DOCKER_ECR_REPO}:{ray_ci_build_id}-ray-py{v}-{cuda}-base "
"requirements_compiled.txt "
f"rayproject/ray:{sha}-{pv}-cu124 "
f"ray:{sha}-{pv}-cu124_pip-freeze.txt"
)
# Run with specific python version and ray-llm image
v = "3.11"
cv = self.get_cpp_version(v)
pv = self.get_python_version(v)
cuda = "cu12.8.1-cudnn"
container = RayDockerContainer(v, cuda, "ray-llm")
container.run()
cmd = self.cmds[-1]
assert cmd == (
"./ci/build/build-ray-docker.sh "
f"ray-{RAY_VERSION}-{cv}-{cv}-manylinux2014_x86_64.whl "
f"{_DOCKER_ECR_REPO}:{ray_ci_build_id}-ray-llm-py{v}-{cuda}-base "
"requirements_compiled.txt "
f"rayproject/ray-llm:{sha}-{pv}-cu128 "
f"ray-llm:{sha}-{pv}-cu128_pip-freeze.txt"
)
# Run with non-default python version and ray-ml image
v = self.get_non_default_python()
cv = self.get_cpp_version(v)
pv = self.get_python_version(v)
cuda = "cu12.4.1-cudnn"
container = RayDockerContainer(v, "cpu", "ray-ml")
container.run()
cmd = self.cmds[-1]
assert cmd == (
"./ci/build/build-ray-docker.sh "
f"ray-{RAY_VERSION}-{cv}-{cv}-manylinux2014_x86_64.whl "
f"{_DOCKER_ECR_REPO}:{ray_ci_build_id}-ray-ml-py{v}-cpu-base "
"requirements_compiled.txt "
f"rayproject/ray-ml:{sha}-{pv}-cpu "
f"ray-ml:{sha}-{pv}-cpu_pip-freeze.txt"
)
def test_run_nightly(self) -> None:
def _mock_run_script(input: List[str]) -> None:
for i in input:
self.cmds.append(i)
with mock.patch(
"ci.ray_ci.ray_docker_container.docker_pull", return_value=None
), mock.patch(
"ci.ray_ci.docker_container.LinuxContainer.run_script",
side_effect=_mock_run_script,
), mock.patch(
"ci.ray_ci.ray_docker_container.RayDockerContainer._should_upload",
return_value=True,
), mock.patch.dict(
os.environ, {"RAYCI_SCHEDULE": "nightly"}
):
formatted_date = datetime.now().strftime("%y%m%d")
sha = "123456"
ray_ci_build_id = "a1b2c3d4"
# Run with default python version and ray image
self.cmds = []
v = DEFAULT_PYTHON_TAG_VERSION
cv = self.get_cpp_version(v)
pv = self.get_python_version(v)
cuda = "cu12.1.1-cudnn8"
container = RayDockerContainer(v, cuda, "ray")
container.run()
assert len(self.cmds) == 18
assert self.cmds[0] == (
"./ci/build/build-ray-docker.sh "
f"ray-{RAY_VERSION}-{cv}-{cv}-manylinux2014_x86_64.whl "
f"{_DOCKER_ECR_REPO}:{ray_ci_build_id}-ray-py{v}-{cuda}-base "
"requirements_compiled.txt "
f"rayproject/ray:{sha}-{pv}-cu121 "
f"ray:{sha}-{pv}-cu121_pip-freeze.txt"
)
assert (
self.cmds[1]
== "bazel run .buildkite:copy_files -- --destination docker_login"
)
for i in range(2, 10): # check nightly.date.sha alias
assert f"/ray:nightly.{formatted_date}.{sha}" in self.cmds[i]
for i in range(10, len(self.cmds)): # check nightly alias
assert "/ray:nightly-" in self.cmds[i]
# Run with specific python version and ray-llm image
self.cmds = []
v = "3.11"
cv = self.get_cpp_version(v)
pv = self.get_python_version(v)
cuda = "cu12.8.1-cudnn"
container = RayDockerContainer(v, cuda, "ray-llm")
container.run()
assert len(self.cmds) == 6
assert self.cmds[0] == (
"./ci/build/build-ray-docker.sh "
f"ray-{RAY_VERSION}-{cv}-{cv}-manylinux2014_x86_64.whl "
f"{_DOCKER_ECR_REPO}:{ray_ci_build_id}-ray-llm-py{v}-{cuda}-base "
"requirements_compiled.txt "
f"rayproject/ray-llm:{sha}-{pv}-cu128 "
f"ray-llm:{sha}-{pv}-cu128_pip-freeze.txt"
)
assert (
self.cmds[1]
== "bazel run .buildkite:copy_files -- --destination docker_login"
)
for i in range(2, 4): # check nightly.date.sha alias
assert f"/ray-llm:nightly.{formatted_date}.{sha}" in self.cmds[i]
for i in range(4, len(self.cmds)): # check nightly alias
assert "/ray-llm:nightly-" in self.cmds[i]
# Run with non-default python version and ray-ml image
self.cmds = []
v = self.get_non_default_python()
cv = self.get_cpp_version(v)
pv = self.get_python_version(v)
container = RayDockerContainer(v, "cpu", "ray-ml")
container.run()
assert len(self.cmds) == 6
assert self.cmds[0] == (
"./ci/build/build-ray-docker.sh "
f"ray-{RAY_VERSION}-{cv}-{cv}-manylinux2014_x86_64.whl "
f"{_DOCKER_ECR_REPO}:{ray_ci_build_id}-ray-ml-py{v}-cpu-base "
"requirements_compiled.txt "
f"rayproject/ray-ml:{sha}-{pv}-cpu "
f"ray-ml:{sha}-{pv}-cpu_pip-freeze.txt"
)
assert (
self.cmds[1]
== "bazel run .buildkite:copy_files -- --destination docker_login"
)
for i in range(2, 4): # check nightly.date.sha alias
assert f"/ray-ml:nightly.{formatted_date}.{sha}" in self.cmds[i]
for i in range(4, len(self.cmds)): # check nightly alias
assert "/ray-ml:nightly-" in self.cmds[i]
def test_run_daytime(self) -> None:
def _mock_run_script(input: List[str]) -> None:
for i in input:
self.cmds.append(i)
with mock.patch(
"ci.ray_ci.ray_docker_container.docker_pull", return_value=None
), mock.patch(
"ci.ray_ci.docker_container.LinuxContainer.run_script",
side_effect=_mock_run_script,
), mock.patch(
"ci.ray_ci.ray_docker_container.RayDockerContainer._should_upload",
return_value=False,
), mock.patch.dict(
os.environ, {"RAYCI_SCHEDULE": "daytime"}
):
sha = "123456"
ray_ci_build_id = "a1b2c3d4"
cuda = "cu11.8.0-cudnn8"
# Run with default python version and ray image
self.cmds = []
v = DEFAULT_PYTHON_TAG_VERSION
cv = self.get_cpp_version(v)
pv = self.get_python_version(v)
container = RayDockerContainer(v, cuda, "ray")
container.run()
assert len(self.cmds) == 1
assert self.cmds[0] == (
"./ci/build/build-ray-docker.sh "
f"ray-{RAY_VERSION}-{cv}-{cv}-manylinux2014_x86_64.whl "
f"{_DOCKER_ECR_REPO}:{ray_ci_build_id}-ray-py{v}-{cuda}-base "
"requirements_compiled.txt "
f"rayproject/ray:{sha}-{pv}-cu118 "
f"ray:{sha}-{pv}-cu118_pip-freeze.txt"
)
# Run with specific python version and ray-llm image
self.cmds = []
v = "3.11"
cuda = "cu12.8.1-cudnn"
cv = self.get_cpp_version(v)
pv = self.get_python_version(v)
container = RayDockerContainer(v, cuda, "ray-llm")
container.run()
assert len(self.cmds) == 1
assert self.cmds[0] == (
"./ci/build/build-ray-docker.sh "
f"ray-{RAY_VERSION}-{cv}-{cv}-manylinux2014_x86_64.whl "
f"{_DOCKER_ECR_REPO}:{ray_ci_build_id}-ray-llm-py{v}-{cuda}-base "
"requirements_compiled.txt "
f"rayproject/ray-llm:{sha}-{pv}-cu128 "
f"ray-llm:{sha}-{pv}-cu128_pip-freeze.txt"
)
# Run with non-default python version and ray-ml image
self.cmds = []
v = self.get_non_default_python()
cv = self.get_cpp_version(v)
pv = self.get_python_version(v)
container = RayDockerContainer(v, "cpu", "ray-ml")
container.run()
assert len(self.cmds) == 1
assert self.cmds[0] == (
"./ci/build/build-ray-docker.sh "
f"ray-{RAY_VERSION}-{cv}-{cv}-manylinux2014_x86_64.whl "
f"{_DOCKER_ECR_REPO}:{ray_ci_build_id}-ray-ml-py{v}-cpu-base "
"requirements_compiled.txt "
f"rayproject/ray-ml:{sha}-{pv}-cpu "
f"ray-ml:{sha}-{pv}-cpu_pip-freeze.txt"
)
def test_canonical_tag(self) -> None:
sha = "123456"
v = DEFAULT_PYTHON_TAG_VERSION
pv = self.get_python_version(v)
container = RayDockerContainer(v, "cpu", "ray", canonical_tag="abc")
assert container._get_canonical_tag() == "abc"
container = RayDockerContainer(v, "cpu", "ray")
assert container._get_canonical_tag() == f"{sha}-{pv}-cpu"
container = RayDockerContainer(v, "cpu", "ray", "aarch64")
assert container._get_canonical_tag() == f"{sha}-{pv}-cpu-aarch64"
container = RayDockerContainer(v, GPU_PLATFORM, "ray-ml")
assert container._get_canonical_tag() == f"{sha}-{pv}-cu121"
with mock.patch.dict(os.environ, {"BUILDKITE_BRANCH": "releases/1.0.0"}):
container = RayDockerContainer(v, "cpu", "ray")
assert container._get_canonical_tag() == f"1.0.0.{sha}-{pv}-cpu"
with mock.patch.dict(
os.environ, {"BUILDKITE_BRANCH": "abc", "BUILDKITE_PULL_REQUEST": "123"}
):
container = RayDockerContainer(v, "cpu", "ray")
assert container._get_canonical_tag() == f"pr-123.{sha}-{pv}-cpu"
def test_get_image_tags(self) -> None:
# bulk logic of _get_image_tags is tested in its callers (get_image_name and
# get_canonical_tag), so we only test the basic cases here
sha = "123456"
rayci_build_id = "a1b2c3d4"
v = DEFAULT_PYTHON_TAG_VERSION
pv = self.get_python_version(v)
container = RayDockerContainer(v, "cpu", "ray")
formatted_date = datetime.now().strftime("%y%m%d")
with mock.patch.dict(os.environ, {"RAYCI_SCHEDULE": "daytime"}):
assert container._get_image_tags(external=True) == [
f"{sha}-{pv}-cpu",
f"{sha}-cpu",
f"{sha}-{pv}",
f"{sha}",
f"{rayci_build_id}-{pv}-cpu",
f"{rayci_build_id}-cpu",
f"{rayci_build_id}-{pv}",
f"{rayci_build_id}",
]
with mock.patch.dict(os.environ, {"RAYCI_SCHEDULE": "nightly"}):
assert container._get_image_tags(external=True) == [
f"nightly.{formatted_date}.{sha}-{pv}-cpu",
f"nightly.{formatted_date}.{sha}-cpu",
f"nightly.{formatted_date}.{sha}-{pv}",
f"nightly.{formatted_date}.{sha}",
f"nightly-{pv}-cpu",
"nightly-cpu",
f"nightly-{pv}",
"nightly",
]
def test_get_image_name(self) -> None:
sha = "123456"
rayci_build_id = "a1b2c3d4"
v = DEFAULT_PYTHON_TAG_VERSION
pv = self.get_python_version(v)
formatted_date = datetime.now().strftime("%y%m%d")
container = RayDockerContainer(v, "cpu", "ray")
with mock.patch.dict(os.environ, {"RAYCI_SCHEDULE": "daytime"}):
assert container._get_image_names() == [
f"rayproject/ray:{sha}-{pv}-cpu",
f"rayproject/ray:{sha}-cpu",
f"rayproject/ray:{sha}-{pv}",
f"rayproject/ray:{sha}",
f"rayproject/ray:{rayci_build_id}-{pv}-cpu",
f"rayproject/ray:{rayci_build_id}-cpu",
f"rayproject/ray:{rayci_build_id}-{pv}",
f"rayproject/ray:{rayci_build_id}",
]
with mock.patch.dict(os.environ, {"RAYCI_SCHEDULE": "nightly"}):
assert container._get_image_names() == [
f"rayproject/ray:nightly.{formatted_date}.{sha}-{pv}-cpu",
f"rayproject/ray:nightly.{formatted_date}.{sha}-cpu",
f"rayproject/ray:nightly.{formatted_date}.{sha}-{pv}",
f"rayproject/ray:nightly.{formatted_date}.{sha}",
f"rayproject/ray:nightly-{pv}-cpu",
"rayproject/ray:nightly-cpu",
f"rayproject/ray:nightly-{pv}",
"rayproject/ray:nightly",
]
container = RayDockerContainer(v, "cpu", "ray-extra")
with mock.patch.dict(os.environ, {"RAYCI_SCHEDULE": "daytime"}):
assert container._get_image_names() == [
f"rayproject/ray:{sha}-extra-{pv}-cpu",
f"rayproject/ray:{sha}-extra-cpu",
f"rayproject/ray:{sha}-extra-{pv}",
f"rayproject/ray:{sha}-extra",
f"rayproject/ray:{rayci_build_id}-extra-{pv}-cpu",
f"rayproject/ray:{rayci_build_id}-extra-cpu",
f"rayproject/ray:{rayci_build_id}-extra-{pv}",
f"rayproject/ray:{rayci_build_id}-extra",
]
with mock.patch.dict(os.environ, {"RAYCI_SCHEDULE": "nightly"}):
assert container._get_image_names() == [
f"rayproject/ray:nightly.{formatted_date}.{sha}-extra-{pv}-cpu",
f"rayproject/ray:nightly.{formatted_date}.{sha}-extra-cpu",
f"rayproject/ray:nightly.{formatted_date}.{sha}-extra-{pv}",
f"rayproject/ray:nightly.{formatted_date}.{sha}-extra",
f"rayproject/ray:nightly-extra-{pv}-cpu",
"rayproject/ray:nightly-extra-cpu",
f"rayproject/ray:nightly-extra-{pv}",
"rayproject/ray:nightly-extra",
]
v = "3.11"
pv = self.get_python_version(v)
container = RayDockerContainer(v, "cu12.8.1-cudnn", "ray-llm")
with mock.patch.dict(os.environ, {"RAYCI_SCHEDULE": "daytime"}):
assert container._get_image_names() == [
f"rayproject/ray-llm:{sha}-{pv}-cu128",
f"rayproject/ray-llm:{rayci_build_id}-{pv}-cu128",
]
with mock.patch.dict(os.environ, {"RAYCI_SCHEDULE": "nightly"}):
assert container._get_image_names() == [
f"rayproject/ray-llm:nightly.{formatted_date}.{sha}-{pv}-cu128",
f"rayproject/ray-llm:nightly-{pv}-cu128",
]
container = RayDockerContainer(v, "cu12.8.1-cudnn", "ray-llm-extra")
with mock.patch.dict(os.environ, {"RAYCI_SCHEDULE": "daytime"}):
assert container._get_image_names() == [
f"rayproject/ray-llm:{sha}-extra-{pv}-cu128",
f"rayproject/ray-llm:{rayci_build_id}-extra-{pv}-cu128",
]
with mock.patch.dict(os.environ, {"RAYCI_SCHEDULE": "nightly"}):
assert container._get_image_names() == [
f"rayproject/ray-llm:nightly.{formatted_date}.{sha}-extra-{pv}-cu128",
f"rayproject/ray-llm:nightly-extra-{pv}-cu128",
]
v = self.get_non_default_python()
pv = self.get_python_version(v)
container = RayDockerContainer(v, "cu12.1.1-cudnn8", "ray-ml")
with mock.patch.dict(os.environ, {"RAYCI_SCHEDULE": "daytime"}):
assert container._get_image_names() == [
f"rayproject/ray-ml:{sha}-{pv}-cu121",
f"rayproject/ray-ml:{sha}-{pv}-gpu",
f"rayproject/ray-ml:{sha}-{pv}",
f"rayproject/ray-ml:{rayci_build_id}-{pv}-cu121",
f"rayproject/ray-ml:{rayci_build_id}-{pv}-gpu",
f"rayproject/ray-ml:{rayci_build_id}-{pv}",
]
with mock.patch.dict(os.environ, {"RAYCI_SCHEDULE": "nightly"}):
assert container._get_image_names() == [
f"rayproject/ray-ml:nightly.{formatted_date}.{sha}-{pv}-cu121",
f"rayproject/ray-ml:nightly.{formatted_date}.{sha}-{pv}-gpu",
f"rayproject/ray-ml:nightly.{formatted_date}.{sha}-{pv}",
f"rayproject/ray-ml:nightly-{pv}-cu121",
f"rayproject/ray-ml:nightly-{pv}-gpu",
f"rayproject/ray-ml:nightly-{pv}",
]
container = RayDockerContainer(v, "cu12.1.1-cudnn8", "ray-ml-extra")
with mock.patch.dict(os.environ, {"RAYCI_SCHEDULE": "daytime"}):
assert container._get_image_names() == [
f"rayproject/ray-ml:{sha}-extra-{pv}-cu121",
f"rayproject/ray-ml:{sha}-extra-{pv}-gpu",
f"rayproject/ray-ml:{sha}-extra-{pv}",
f"rayproject/ray-ml:{rayci_build_id}-extra-{pv}-cu121",
f"rayproject/ray-ml:{rayci_build_id}-extra-{pv}-gpu",
f"rayproject/ray-ml:{rayci_build_id}-extra-{pv}",
]
release_version = "1.0.0"
with mock.patch.dict(
os.environ, {"BUILDKITE_BRANCH": f"releases/{release_version}"}
):
v = DEFAULT_PYTHON_TAG_VERSION
pv = self.get_python_version(v)
container = RayDockerContainer(v, "cpu", "ray")
assert container._get_image_names() == [
f"rayproject/ray:{release_version}.{sha}-{pv}-cpu",
f"rayproject/ray:{release_version}.{sha}-cpu",
f"rayproject/ray:{release_version}.{sha}-{pv}",
f"rayproject/ray:{release_version}.{sha}",
]
def test_get_python_version_tag(self) -> None:
v = DEFAULT_PYTHON_TAG_VERSION
pv = self.get_python_version(v)
container = RayDockerContainer(v, "cpu", "ray")
assert container._get_python_version_tag() == f"-{pv}"
def test_get_platform_tag(self) -> None:
v = DEFAULT_PYTHON_TAG_VERSION
container = RayDockerContainer(v, "cpu", "ray")
assert container._get_platform_tag() == "-cpu"
container = RayDockerContainer(v, "cu11.8.0-cudnn8", "ray")
assert container._get_platform_tag() == "-cu118"
container = RayDockerContainer(v, "cu12.3.2-cudnn9", "ray")
assert container._get_platform_tag() == "-cu123"
container = RayDockerContainer(v, "cu12.4.1-cudnn", "ray")
assert container._get_platform_tag() == "-cu124"
container = RayDockerContainer(v, "cu12.5.1-cudnn", "ray")
assert container._get_platform_tag() == "-cu125"
container = RayDockerContainer(v, "cu12.6.3-cudnn", "ray")
assert container._get_platform_tag() == "-cu126"
container = RayDockerContainer(v, "cu12.8.1-cudnn", "ray")
assert container._get_platform_tag() == "-cu128"
def test_should_upload(self) -> None:
v = DEFAULT_PYTHON_TAG_VERSION
test_cases = [
# environment_variables, expected_result (with upload flag on)
(
{
"BUILDKITE_PIPELINE_ID": get_global_config()[
"ci_pipeline_postmerge"
][0],
"BUILDKITE_BRANCH": "releases/1.0.0",
},
True, # satisfy upload requirements
),
(
{
"BUILDKITE_PIPELINE_ID": get_global_config()[
"ci_pipeline_postmerge"
][0],
"BUILDKITE_BRANCH": "master",
"RAYCI_SCHEDULE": "nightly",
},
True, # satisfy upload requirements
),
(
{
"BUILDKITE_PIPELINE_ID": "123456",
"RAYCI_SCHEDULE": "nightly",
"BUILDKITE_BRANCH": "master",
},
False, # not satisfied: pipeline is not postmerge
),
(
{
"BUILDKITE_PIPELINE_ID": get_global_config()[
"ci_pipeline_postmerge"
][-1],
"BUILDKITE_BRANCH": "non-release/1.2.3",
},
False, # not satisfied: branch is not release/master
),
(
{
"BUILDKITE_PIPELINE_ID": get_global_config()[
"ci_pipeline_postmerge"
][-1],
"BUILDKITE_BRANCH": "123",
"RAYCI_SCHEDULE": "nightly",
},
False, # not satisfied: branch is not master with nightly schedule
),
]
# Run with upload flag on
container = RayDockerContainer(v, "cpu", "ray", upload=True)
for env_var, expected_result in test_cases:
with mock.patch.dict(os.environ, env_var):
assert container._should_upload() is expected_result
# Run with upload flag off
container = RayDockerContainer(v, "cpu", "ray", upload=False)
for env_var, _ in test_cases:
with mock.patch.dict(os.environ, env_var):
assert container._should_upload() is False
if __name__ == "__main__":
sys.exit(pytest.main(["-vv", __file__]))
| TestRayDockerContainer |
python | joke2k__faker | tests/providers/test_address.py | {
"start": 70745,
"end": 75069
} | class ____:
"""Test ru_RU address provider methods"""
def test_city_name(self, faker, num_samples):
for _ in range(num_samples):
city = faker.city_name()
assert isinstance(city, str)
assert city in RuRuAddressProvider.city_names
def test_country(self, faker, num_samples):
for _ in range(num_samples):
country = faker.country()
assert isinstance(country, str)
assert country in RuRuAddressProvider.countries
def test_region(self, faker, num_samples):
region_pattern: Pattern = re.compile(
r"(?:респ\. (?P<region_republic>.*))|"
r"(?:(?P<region_krai>.*?) край)|"
r"(?:(?P<region_oblast>.*?) обл.)|"
r"(?:(?P<region_ao>.*?) АО)",
)
for _ in range(num_samples):
region = faker.region()
assert isinstance(region, str)
match = region_pattern.fullmatch(region)
assert match
groupdict = match.groupdict()
assert any(
[
groupdict.get("region_republic") in RuRuAddressProvider.region_republics,
groupdict.get("region_krai") in RuRuAddressProvider.region_krai,
groupdict.get("region_oblast") in RuRuAddressProvider.region_oblast,
groupdict.get("region_ao") in RuRuAddressProvider.region_ao,
]
)
def test_postcode(self, faker, num_samples):
for _ in range(num_samples):
postcode = faker.postcode()
assert isinstance(postcode, str)
assert re.fullmatch(r"\d{6}", postcode)
def test_city_prefix(self, faker, num_samples):
for _ in range(num_samples):
city_prefix = faker.city_prefix()
assert isinstance(city_prefix, str)
assert city_prefix in RuRuAddressProvider.city_prefixes
def test_street_suffix(self, faker, num_samples):
for _ in range(num_samples):
street_suffix = faker.street_suffix()
assert isinstance(street_suffix, str)
assert street_suffix in RuRuAddressProvider.street_suffixes
def test_street_title(self, faker, num_samples):
for _ in range(num_samples):
street_title = faker.street_title()
assert isinstance(street_title, str)
def test_street_name(self, faker, num_samples):
for _ in range(num_samples):
street_name = faker.street_name()
assert isinstance(street_name, str)
@pytest.mark.parametrize(
"street_title,street_suffix,expected",
[
("Фрунзе", "ул.", "ул. Фрунзе"),
("Ставропольская", "ул.", "ул. Ставропольская"),
("Фрунзе", "пр.", "пр. Фрунзе"),
("Осенняя", "пр.", "пр. Осенний"),
("Гвардейская", "пр.", "пр. Гвардейский"),
("Рыбацкая", "пр.", "пр. Рыбацкий"),
("Безымянная", "пр.", "пр. Безымянный"),
("Проезжая", "ш.", "ш. Проезжее"),
("Магистральная", "ш.", "ш. Магистральное"),
],
ids=[
"feminine_suffix_and_noflex_title",
"feminine_suffix_and_flex_title",
"non_feminine_suffix_and_noflex_title",
"masc_suffix_and_irregular_masc_title",
"masc_suffix_and_ck_street_stem",
"masc_suffix_and_uk_street_stem",
"masc_suffix_and_other_stem",
"neu_suffx_and_iregular_neu_street_title",
"neu_suffix_and_regular_street_title",
],
)
def test_street_name_lexical(self, faker, street_title, street_suffix, expected):
"""Test that random street names are formed correctly, given
the case of suffixes and streets that have been randomly selected.
"""
title_patch = mock.patch(
"faker.providers.address.ru_RU.Provider.street_title",
autospec=True,
return_value=street_title,
)
suffix_patch = mock.patch(
"faker.providers.address.ru_RU.Provider.street_suffix",
autospec=True,
return_value=street_suffix,
)
with title_patch, suffix_patch:
result = faker.street_name()
assert result == expected
| TestRuRu |
python | bottlepy__bottle | bottle.py | {
"start": 131509,
"end": 132196
} | class ____:
quiet = False
def __init__(self, host='127.0.0.1', port=8080, **options):
self.options = options
self.host = host
self.port = int(port)
def run(self, handler): # pragma: no cover
pass
@property
def _listen_url(self):
if self.host.startswith("unix:"):
return self.host
elif ':' in self.host:
return "http://[%s]:%d/" % (self.host, self.port)
else:
return "http://%s:%d/" % (self.host, self.port)
def __repr__(self):
args = ', '.join('%s=%r' % kv for kv in self.options.items())
return "%s(%s)" % (self.__class__.__name__, args)
| ServerAdapter |
python | getsentry__sentry | src/sentry/analytics/events/sentry_app_token_exchanged.py | {
"start": 83,
"end": 246
} | class ____(analytics.Event):
sentry_app_installation_id: int
exchange_type: str
analytics.register(SentryAppTokenExchangedEvent)
| SentryAppTokenExchangedEvent |
python | google__flatbuffers | tests/MyGame/Example/ArrayTable.py | {
"start": 278,
"end": 1989
} | class ____(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset: int = 0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = ArrayTable()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsArrayTable(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def ArrayTableBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x41\x52\x52\x54", size_prefixed=size_prefixed)
# ArrayTable
def Init(self, buf: bytes, pos: int):
self._tab = flatbuffers.table.Table(buf, pos)
# ArrayTable
def A(self) -> Optional[ArrayStruct]:
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
x = o + self._tab.Pos
obj = ArrayStruct()
obj.Init(self._tab.Bytes, x)
return obj
return None
def ArrayTableStart(builder: flatbuffers.Builder):
builder.StartObject(1)
def Start(builder: flatbuffers.Builder):
ArrayTableStart(builder)
def ArrayTableAddA(builder: flatbuffers.Builder, a: Any):
builder.PrependStructSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(a), 0)
def AddA(builder: flatbuffers.Builder, a: Any):
ArrayTableAddA(builder, a)
def ArrayTableEnd(builder: flatbuffers.Builder) -> int:
return builder.EndObject()
def End(builder: flatbuffers.Builder) -> int:
return ArrayTableEnd(builder)
import MyGame.Example.ArrayStruct
try:
from typing import Optional
except:
pass
| ArrayTable |
python | django__django | django/core/management/commands/createcachetable.py | {
"start": 323,
"end": 4656
} | class ____(BaseCommand):
help = "Creates the tables needed to use the SQL cache backend."
requires_system_checks = []
def add_arguments(self, parser):
parser.add_argument(
"args",
metavar="table_name",
nargs="*",
help=(
"Optional table names. Otherwise, settings.CACHES is used to find "
"cache tables."
),
)
parser.add_argument(
"--database",
default=DEFAULT_DB_ALIAS,
choices=tuple(connections),
help="Nominates a database onto which the cache tables will be "
'installed. Defaults to the "default" database.',
)
parser.add_argument(
"--dry-run",
action="store_true",
help="Does not create the table, just prints the SQL that would be run.",
)
def handle(self, *tablenames, **options):
db = options["database"]
self.verbosity = options["verbosity"]
dry_run = options["dry_run"]
if tablenames:
# Legacy behavior, tablename specified as argument
for tablename in tablenames:
self.create_table(db, tablename, dry_run)
else:
for cache_alias in settings.CACHES:
cache = caches[cache_alias]
if isinstance(cache, BaseDatabaseCache):
self.create_table(db, cache._table, dry_run)
def create_table(self, database, tablename, dry_run):
cache = BaseDatabaseCache(tablename, {})
if not router.allow_migrate_model(database, cache.cache_model_class):
return
connection = connections[database]
if tablename in connection.introspection.table_names():
if self.verbosity > 0:
self.stdout.write("Cache table '%s' already exists." % tablename)
return
fields = (
# "key" is a reserved word in MySQL, so use "cache_key" instead.
models.CharField(
name="cache_key", max_length=255, unique=True, primary_key=True
),
models.TextField(name="value"),
models.DateTimeField(name="expires", db_index=True),
)
table_output = []
index_output = []
qn = connection.ops.quote_name
for f in fields:
field_output = [
qn(f.name),
f.db_type(connection=connection),
"%sNULL" % ("NOT " if not f.null else ""),
]
if f.primary_key:
field_output.append("PRIMARY KEY")
elif f.unique:
field_output.append("UNIQUE")
if f.db_index:
unique = "UNIQUE " if f.unique else ""
index_output.append(
"CREATE %sINDEX %s ON %s (%s);"
% (
unique,
qn("%s_%s" % (tablename, f.name)),
qn(tablename),
qn(f.name),
)
)
table_output.append(" ".join(field_output))
full_statement = ["CREATE TABLE %s (" % qn(tablename)]
for i, line in enumerate(table_output):
full_statement.append(
" %s%s" % (line, "," if i < len(table_output) - 1 else "")
)
full_statement.append(");")
full_statement = "\n".join(full_statement)
if dry_run:
self.stdout.write(full_statement)
for statement in index_output:
self.stdout.write(statement)
return
with transaction.atomic(
using=database, savepoint=connection.features.can_rollback_ddl
):
with connection.cursor() as curs:
try:
curs.execute(full_statement)
except DatabaseError as e:
raise CommandError(
"Cache table '%s' could not be created.\nThe error was: %s."
% (tablename, e)
)
for statement in index_output:
curs.execute(statement)
if self.verbosity > 1:
self.stdout.write("Cache table '%s' created." % tablename)
| Command |
python | pytorch__pytorch | test/test_dataloader.py | {
"start": 120555,
"end": 121637
} | class ____(TestCase):
def setUp(self):
super().setUp()
self.dataset = NamedTupleDataset()
def test_dataloader_with_namedtuple(self):
# auto-collation
loader = DataLoader(self.dataset, batch_size=2, pin_memory=TEST_CUDA)
for batch in loader:
self.assertIsInstance(batch, NamedTupleDataset.Batch)
self.assertEqual(batch.random_tensor.is_pinned(), TEST_CUDA)
self.assertIsInstance(batch.data, NamedTupleDataset.Data)
self.assertIsInstance(batch.data.positive, torch.Tensor)
self.assertEqual(batch.data.positive.is_pinned(), TEST_CUDA)
# no auto-collation
loader = DataLoader(self.dataset, batch_size=None, pin_memory=TEST_CUDA)
for batch in loader:
self.assertIsInstance(batch, NamedTupleDataset.Batch)
self.assertEqual(batch.random_tensor.is_pinned(), TEST_CUDA)
self.assertIsInstance(batch.data, NamedTupleDataset.Data)
self.assertNotIsInstance(batch.data.positive, torch.Tensor)
| TestNamedTupleDataLoader |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/operators/neptune.py | {
"start": 3505,
"end": 9014
} | class ____(AwsBaseOperator[NeptuneHook]):
"""
Starts an Amazon Neptune DB cluster.
Amazon Neptune Database is a serverless graph database designed for superior scalability
and availability. Neptune Database provides built-in security, continuous backups, and
integrations with other AWS services
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:NeptuneStartDbClusterOperator`
:param db_cluster_id: The DB cluster identifier of the Neptune DB cluster to be started.
:param wait_for_completion: Whether to wait for the cluster to start. (default: True)
:param deferrable: If True, the operator will wait asynchronously for the cluster to start.
This implies waiting for completion. This mode requires aiobotocore module to be installed.
(default: False)
:param waiter_delay: Time in seconds to wait between status checks.
:param waiter_max_attempts: Maximum number of attempts to check for job completion.
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param botocore_config: Configuration dictionary (key-values) for botocore client. See:
https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html
:return: dictionary with Neptune cluster id
"""
aws_hook_class = NeptuneHook
template_fields: Sequence[str] = aws_template_fields("cluster_id")
def __init__(
self,
db_cluster_id: str,
wait_for_completion: bool = True,
waiter_delay: int = 30,
waiter_max_attempts: int = 60,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
):
super().__init__(**kwargs)
self.cluster_id = db_cluster_id
self.wait_for_completion = wait_for_completion
self.deferrable = deferrable
self.waiter_delay = waiter_delay
self.waiter_max_attempts = waiter_max_attempts
def execute(self, context: Context, event: dict[str, Any] | None = None, **kwargs) -> dict[str, str]:
self.log.info("Starting Neptune cluster: %s", self.cluster_id)
# Check to make sure the cluster is not already available.
status = self.hook.get_cluster_status(self.cluster_id)
if status.lower() in NeptuneHook.AVAILABLE_STATES:
self.log.info("Neptune cluster %s is already available.", self.cluster_id)
return {"db_cluster_id": self.cluster_id}
if status.lower() in NeptuneHook.ERROR_STATES:
# some states will not allow you to start the cluster
self.log.error(
"Neptune cluster %s is in error state %s and cannot be started", self.cluster_id, status
)
raise AirflowException(f"Neptune cluster {self.cluster_id} is in error state {status}")
"""
A cluster and its instances must be in a valid state to send the start request.
This loop covers the case where the cluster is not available and also the case where
the cluster is available, but one or more of the instances are in an invalid state.
If either are in an invalid state, wait for the availability and retry.
Let the waiters handle retries and detecting the error states.
"""
try:
self.hook.conn.start_db_cluster(DBClusterIdentifier=self.cluster_id)
except ClientError as ex:
code = ex.response["Error"]["Code"]
self.log.warning("Received client error when attempting to start the cluster: %s", code)
if code in ["InvalidDBInstanceState", "InvalidClusterState", "InvalidDBClusterStateFault"]:
handle_waitable_exception(operator=self, err=code)
else:
# re raise for any other type of client error
raise
if self.deferrable:
self.log.info("Deferring for cluster start: %s", self.cluster_id)
self.defer(
trigger=NeptuneClusterAvailableTrigger(
aws_conn_id=self.aws_conn_id,
db_cluster_id=self.cluster_id,
waiter_delay=self.waiter_delay,
waiter_max_attempts=self.waiter_max_attempts,
),
method_name="execute_complete",
)
elif self.wait_for_completion:
self.log.info("Waiting for Neptune cluster %s to start.", self.cluster_id)
self.hook.wait_for_cluster_availability(
self.cluster_id, self.waiter_delay, self.waiter_max_attempts
)
return {"db_cluster_id": self.cluster_id}
def execute_complete(self, context: Context, event: dict[str, Any] | None = None) -> dict[str, str]:
status = ""
cluster_id = ""
if event:
status = event.get("status", "")
cluster_id = event.get("cluster_id", "")
self.log.info("Neptune cluster %s available with status: %s", cluster_id, status)
return {"db_cluster_id": cluster_id}
| NeptuneStartDbClusterOperator |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 18601,
"end": 18971
} | class ____(BaseModel):
number_of_collections: int = Field(..., description="")
max_collections: Optional[int] = Field(default=None, description="")
collections: Optional[List["CollectionTelemetryEnum"]] = Field(default=None, description="")
snapshots: Optional[List["CollectionSnapshotTelemetry"]] = Field(default=None, description="")
| CollectionsTelemetry |
python | python-pillow__Pillow | src/PIL/XpmImagePlugin.py | {
"start": 637,
"end": 3170
} | class ____(ImageFile.ImageFile):
format = "XPM"
format_description = "X11 Pixel Map"
def _open(self) -> None:
assert self.fp is not None
if not _accept(self.fp.read(9)):
msg = "not an XPM file"
raise SyntaxError(msg)
# skip forward to next string
while True:
line = self.fp.readline()
if not line:
msg = "broken XPM file"
raise SyntaxError(msg)
m = xpm_head.match(line)
if m:
break
self._size = int(m.group(1)), int(m.group(2))
palette_length = int(m.group(3))
bpp = int(m.group(4))
#
# load palette description
palette = {}
for _ in range(palette_length):
line = self.fp.readline().rstrip()
c = line[1 : bpp + 1]
s = line[bpp + 1 : -2].split()
for i in range(0, len(s), 2):
if s[i] == b"c":
# process colour key
rgb = s[i + 1]
if rgb == b"None":
self.info["transparency"] = c
elif rgb.startswith(b"#"):
rgb_int = int(rgb[1:], 16)
palette[c] = (
o8((rgb_int >> 16) & 255)
+ o8((rgb_int >> 8) & 255)
+ o8(rgb_int & 255)
)
else:
# unknown colour
msg = "cannot read this XPM file"
raise ValueError(msg)
break
else:
# missing colour key
msg = "cannot read this XPM file"
raise ValueError(msg)
args: tuple[int, dict[bytes, bytes] | tuple[bytes, ...]]
if palette_length > 256:
self._mode = "RGB"
args = (bpp, palette)
else:
self._mode = "P"
self.palette = ImagePalette.raw("RGB", b"".join(palette.values()))
args = (bpp, tuple(palette.keys()))
self.tile = [ImageFile._Tile("xpm", (0, 0) + self.size, self.fp.tell(), args)]
def load_read(self, read_bytes: int) -> bytes:
#
# load all image data in one chunk
xsize, ysize = self.size
assert self.fp is not None
s = [self.fp.readline()[1 : xsize + 1].ljust(xsize) for i in range(ysize)]
return b"".join(s)
| XpmImageFile |
python | pandas-dev__pandas | pandas/tests/frame/methods/test_set_axis.py | {
"start": 3945,
"end": 4118
} | class ____(SharedSetAxisTests):
@pytest.fixture
def obj(self):
ser = Series(np.arange(4), index=[1, 3, 5, 7], dtype="int64")
return ser
| TestSeriesSetAxis |
python | PrefectHQ__prefect | tests/server/models/test_block_types.py | {
"start": 10245,
"end": 10780
} | class ____:
async def test_delete_block_type(self, session, block_type_x):
await models.block_types.delete_block_type(
session=session, block_type_id=block_type_x.id
)
assert not await models.block_types.read_block_type(
session=session, block_type_id=block_type_x.id
)
async def test_delete_nonexistant_block_type(self, session):
assert not await models.block_types.delete_block_type(
session=session, block_type_id=uuid4()
)
| TestDeleteBlockType |
python | ray-project__ray | python/ray/dashboard/modules/data/data_head.py | {
"start": 1518,
"end": 6633
} | class ____(SubprocessModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.prometheus_host = os.environ.get(
PROMETHEUS_HOST_ENV_VAR, DEFAULT_PROMETHEUS_HOST
)
self.prometheus_headers = parse_prom_headers(
os.environ.get(
PROMETHEUS_HEADERS_ENV_VAR,
DEFAULT_PROMETHEUS_HEADERS,
)
)
@routes.get("/api/data/datasets/{job_id}")
@optional_utils.init_ray_and_catch_exceptions()
async def get_datasets(self, req: Request) -> Response:
job_id = req.match_info["job_id"]
try:
from ray.data._internal.stats import get_or_create_stats_actor
_stats_actor = get_or_create_stats_actor()
datasets = await _stats_actor.get_datasets.remote(job_id)
# Initializes dataset metric values
for dataset in datasets:
for metric, queries in DATASET_METRICS.items():
datasets[dataset][metric] = {query.value[0]: 0 for query in queries}
for operator in datasets[dataset]["operators"]:
datasets[dataset]["operators"][operator][metric] = {
query.value[0]: 0 for query in queries
}
# Query dataset metric values from prometheus
try:
# TODO (Zandew): store results of completed datasets in stats actor.
for metric, queries in DATASET_METRICS.items():
for query in queries:
query_name, prom_query = query.value
# Dataset level
dataset_result = await self._query_prometheus(
prom_query.format(metric, self.session_name, "dataset")
)
for res in dataset_result["data"]["result"]:
dataset, value = res["metric"]["dataset"], res["value"][1]
if dataset in datasets:
datasets[dataset][metric][query_name] = value
# Operator level
operator_result = await self._query_prometheus(
prom_query.format(
metric, self.session_name, "dataset, operator"
)
)
for res in operator_result["data"]["result"]:
dataset, operator, value = (
res["metric"]["dataset"],
res["metric"]["operator"],
res["value"][1],
)
# Check if dataset/operator is in current _StatsActor scope.
# Prometheus server may contain metrics from previous
# cluster if not reset.
if (
dataset in datasets
and operator in datasets[dataset]["operators"]
):
datasets[dataset]["operators"][operator][metric][
query_name
] = value
except aiohttp.client_exceptions.ClientConnectorError:
# Prometheus server may not be running,
# leave these values blank and return other data
logging.exception(
"Exception occurred while querying Prometheus. "
"The Prometheus server may not be running."
)
# Flatten response
for dataset in datasets:
datasets[dataset]["operators"] = list(
map(
lambda item: {"operator": item[0], **item[1]},
datasets[dataset]["operators"].items(),
)
)
datasets = list(
map(lambda item: {"dataset": item[0], **item[1]}, datasets.items())
)
# Sort by descending start time
datasets = sorted(datasets, key=lambda x: x["start_time"], reverse=True)
return Response(
text=json.dumps({"datasets": datasets}),
content_type="application/json",
)
except Exception as e:
logging.exception("Exception occurred while getting datasets.")
return Response(
status=503,
text=str(e),
)
async def _query_prometheus(self, query):
async with self.http_session.get(
f"{self.prometheus_host}/api/v1/query?query={quote(query)}",
headers=self.prometheus_headers,
) as resp:
if resp.status == 200:
prom_data = await resp.json()
return prom_data
message = await resp.text()
raise PrometheusQueryError(resp.status, message)
| DataHead |
python | apache__airflow | providers/openlineage/tests/system/openlineage/example_openlineage_base_complex_dag.py | {
"start": 2786,
"end": 6032
} | class ____(BaseOperator):
def __init__(self, value, **kwargs):
super().__init__(**kwargs)
self.value = value
def execute(self, context):
return self.value + 1
DAG_ID = "openlineage_base_complex_dag"
with DAG(
dag_id=DAG_ID,
start_date=datetime(2021, 1, 1),
schedule=None,
catchup=False,
description="OpenLineage complex DAG description",
owner_links={"airflow": "https://airflow.apache.org/"},
tags=["first", "second@", "with'quote", 'z"e'],
default_args={"retries": 0},
) as dag:
# task_0 will not emit any events, but the owner will be picked up and added to DAG
task_0 = EmptyOperator(task_id="task_0", owner='owner"1')
task_1 = BashOperator(
task_id="task_1.id.with.dots",
bash_command="exit 0;",
owner="owner'2",
execution_timeout=timedelta(seconds=456),
doc_rst="RST doc",
)
task_2 = PythonOperator(
task_id="task_2",
python_callable=do_nothing,
inlets=[Asset(uri="s3://bucket2/dir2/file2.txt"), Asset(uri="s3://bucket2/dir2/file3.txt")],
max_retry_delay=42,
doc="text doc",
doc_md="should be skipped",
doc_json="should be skipped",
doc_yaml="should be skipped",
doc_rst="should be skipped",
)
task_3 = EmptyOperator(
task_id="task_3",
outlets=[Asset(uri="s3://bucket/dir/file.txt")],
doc_md="MD doc",
doc_json="should be skipped",
doc_yaml="should be skipped",
doc_rst="should be skipped",
)
task_4 = SomeCustomOperator(
task_id="task_4",
bash_command="exit 0;",
owner="owner3",
max_active_tis_per_dag=7,
max_active_tis_per_dagrun=2,
doc_json="JSON doc",
doc_yaml="should be skipped",
doc_rst="should be skipped",
)
with TaskGroup("section_1", prefix_group_id=True) as tg:
task_5 = CustomMappedOperator.partial(task_id="task_5", doc_md="md doc").expand(value=[1])
with TaskGroup("section_2", parent_group=tg, tooltip="group_tooltip") as tg2:
add_args: dict[str, Any] = {"sla": timedelta(seconds=123)} if AIRFLOW_VERSION.major == 2 else {}
task_6 = EmptyOperator(
task_id="task_6",
on_success_callback=lambda x: print(1),
doc_yaml="YAML doc",
doc_rst="should be skipped",
**add_args,
)
with TaskGroup("section_3", parent_group=tg2):
task_7 = PythonOperator(task_id="task_7", python_callable=lambda: 1)
check_events_number = PythonOperator(
task_id="check_events_number", python_callable=check_events_number_func
)
check_events = OpenLineageTestOperator(
task_id="check_events", file_path=get_expected_event_file_path(DAG_ID)
)
task_1 >> [task_2, task_7] >> check_events_number
task_2 >> task_3 >> [task_4, task_5] >> task_6 >> check_events_number
check_events_number >> check_events
from tests_common.test_utils.system_tests import get_test_run # noqa: E402
# Needed to run the example DAG with pytest (see: tests/system/README.md#run_via_pytest)
test_run = get_test_run(dag)
| CustomMappedOperator |
python | pytorch__pytorch | test/dynamo/test_fake_distributed.py | {
"start": 682,
"end": 1776
} | class ____(DynamoTestCase):
def setUp(self):
# Use FakeProcessGroup to run tests on a single process
dist.init_process_group(backend="fake", rank=0, world_size=2)
self.local_rank = 0
self.world_size = 2
def tearDown(self):
dist.destroy_process_group()
def test_all_to_all_single_autograd(self):
backend = AotEagerAndRecordGraphs()
@torch.compile(fullgraph=True, backend=backend)
def fn(x):
return all_to_all_single_autograd(
x,
None, # Will use equal splits
None, # Will use equal splits
group=dist.group.WORLD,
)
# Test backed shapes
x = torch.randn(8, 8, requires_grad=True)
torch._dynamo.mark_dynamic(x, 0)
torch._dynamo.mark_dynamic(x, 1)
wait_tensor(fn(x))
self.assertEqual(len(backend.fw_graphs), 1)
self.assertEqual(len(backend.bw_graphs), 1)
self.assertExpectedInline(
normalize_graph(backend.fw_graphs[0]),
"""\
| TestFakeDistributed |
python | PrefectHQ__prefect | src/prefect/locking/filesystem.py | {
"start": 716,
"end": 8134
} | class ____(LockManager):
"""
A lock manager that implements locking using local files.
Attributes:
lock_files_directory: the directory where lock files are stored
"""
def __init__(self, lock_files_directory: Path) -> None:
self.lock_files_directory: Path = lock_files_directory.expanduser().resolve()
self._locks: dict[str, _LockInfo] = {}
def _ensure_lock_files_directory_exists(self) -> None:
self.lock_files_directory.mkdir(parents=True, exist_ok=True)
def _lock_path_for_key(self, key: str) -> Path:
if (lock_info := self._locks.get(key)) is not None:
return lock_info["path"]
return self.lock_files_directory.joinpath(key).with_suffix(".lock")
def _get_lock_info(self, key: str, use_cache: bool = True) -> Optional[_LockInfo]:
if use_cache:
if (lock_info := self._locks.get(key)) is not None:
return lock_info
lock_path = self._lock_path_for_key(key)
try:
with open(lock_path, "rb") as lock_file:
lock_info = pydantic_core.from_json(lock_file.read())
lock_info["path"] = lock_path
expiration = lock_info.get("expiration")
lock_info["expiration"] = (
parse_datetime(expiration) if expiration is not None else None
)
self._locks[key] = lock_info
return lock_info
except FileNotFoundError:
return None
async def _aget_lock_info(
self, key: str, use_cache: bool = True
) -> Optional[_LockInfo]:
if use_cache:
if (lock_info := self._locks.get(key)) is not None:
return lock_info
lock_path = self._lock_path_for_key(key)
try:
lock_info_bytes = await anyio.Path(lock_path).read_bytes()
lock_info = pydantic_core.from_json(lock_info_bytes)
lock_info["path"] = lock_path
expiration = lock_info.get("expiration")
lock_info["expiration"] = (
parse_datetime(expiration) if expiration is not None else None
)
self._locks[key] = lock_info
return lock_info
except FileNotFoundError:
return None
def acquire_lock(
self,
key: str,
holder: str,
acquire_timeout: Optional[float] = None,
hold_timeout: Optional[float] = None,
) -> bool:
self._ensure_lock_files_directory_exists()
lock_path = self._lock_path_for_key(key)
if self.is_locked(key) and not self.is_lock_holder(key, holder):
lock_free = self.wait_for_lock(key, acquire_timeout)
if not lock_free:
return False
try:
Path(lock_path).touch(exist_ok=False)
except FileExistsError:
if not self.is_lock_holder(key, holder):
logger.debug(
f"Another actor acquired the lock for record with key {key}. Trying again."
)
return self.acquire_lock(key, holder, acquire_timeout, hold_timeout)
expiration = (
now("UTC") + timedelta(seconds=hold_timeout)
if hold_timeout is not None
else None
)
with open(Path(lock_path), "wb") as lock_file:
lock_file.write(
pydantic_core.to_json(
{
"holder": holder,
"expiration": str(expiration)
if expiration is not None
else None,
},
)
)
self._locks[key] = {
"holder": holder,
"expiration": expiration,
"path": lock_path,
}
return True
async def aacquire_lock(
self,
key: str,
holder: str,
acquire_timeout: Optional[float] = None,
hold_timeout: Optional[float] = None,
) -> bool:
await anyio.Path(self.lock_files_directory).mkdir(parents=True, exist_ok=True)
lock_path = self._lock_path_for_key(key)
if self.is_locked(key) and not self.is_lock_holder(key, holder):
lock_free = await self.await_for_lock(key, acquire_timeout)
if not lock_free:
return False
try:
await anyio.Path(lock_path).touch(exist_ok=False)
except FileExistsError:
if not self.is_lock_holder(key, holder):
logger.debug(
f"Another actor acquired the lock for record with key {key}. Trying again."
)
return self.acquire_lock(key, holder, acquire_timeout, hold_timeout)
expiration = (
now("UTC") + timedelta(seconds=hold_timeout)
if hold_timeout is not None
else None
)
async with await anyio.Path(lock_path).open("wb") as lock_file:
await lock_file.write(
pydantic_core.to_json(
{
"holder": holder,
"expiration": str(expiration)
if expiration is not None
else None,
},
)
)
self._locks[key] = {
"holder": holder,
"expiration": expiration,
"path": lock_path,
}
return True
def release_lock(self, key: str, holder: str) -> None:
lock_path = self._lock_path_for_key(key)
if not self.is_locked(key):
raise ValueError(f"No lock for transaction with key {key}")
if self.is_lock_holder(key, holder):
Path(lock_path).unlink(missing_ok=True)
self._locks.pop(key, None)
else:
raise ValueError(f"No lock held by {holder} for transaction with key {key}")
def is_locked(self, key: str, use_cache: bool = False) -> bool:
if (lock_info := self._get_lock_info(key, use_cache=use_cache)) is None:
return False
if (expiration := lock_info.get("expiration")) is None:
return True
expired = expiration < now("UTC")
if expired:
Path(lock_info["path"]).unlink()
self._locks.pop(key, None)
return False
else:
return True
def is_lock_holder(self, key: str, holder: str) -> bool:
if not self.is_locked(key):
return False
if not self.is_locked(key):
return False
if (lock_info := self._get_lock_info(key)) is None:
return False
return lock_info["holder"] == holder
def wait_for_lock(self, key: str, timeout: Optional[float] = None) -> bool:
seconds_waited = 0
while self.is_locked(key, use_cache=False):
if timeout and seconds_waited >= timeout:
return False
seconds_waited += 0.1
time.sleep(0.1)
return True
async def await_for_lock(self, key: str, timeout: Optional[float] = None) -> bool:
seconds_waited = 0
while self.is_locked(key, use_cache=False):
if timeout and seconds_waited >= timeout:
return False
seconds_waited += 0.1
await anyio.sleep(0.1)
return True
| FileSystemLockManager |
python | getsentry__sentry | tests/sentry/issues/endpoints/test_project_stacktrace_link.py | {
"start": 14809,
"end": 16357
} | class ____(BaseProjectStacktraceLink):
def setUp(self) -> None:
BaseProjectStacktraceLink.setUp(self)
def test_multiple_code_mapping_matches(self) -> None:
"""
Test that the code mapping that is user generated and has the most defined stack
root of the user generated code mappings is chosen
"""
prefix = "/usr/src/getsentry"
src_path = "src/sentry/utils/safe.py"
filepath = f"{prefix}/src/sentry/{src_path}"
# All of these code mappings would match filepath
self._create_code_mapping("", "", False)
self._create_code_mapping(f"{prefix}/src/", "", True)
cm = self._create_code_mapping(prefix, "", False)
self._create_code_mapping("/usr/src/", "", False)
self._create_code_mapping(f"{prefix}/src/sentry/", "", True)
with patch.object(
ExampleIntegration,
"get_stacktrace_link",
return_value=f"https://github.com/getsentry/sentry/{src_path}",
):
response = self.get_success_response(
self.organization.slug, self.project.slug, qs_params={"file": filepath}
)
# Assert that the code mapping that is user generated and has the most defined stack
# trace of the user generated code mappings is chosen
assert response.data["config"] == self.expected_configurations(cm)
assert response.data["sourceUrl"] == f"https://github.com/getsentry/sentry/{src_path}"
| ProjectStacktraceLinkTestMultipleMatches |
python | python-pillow__Pillow | Tests/test_image_resize.py | {
"start": 5994,
"end": 9619
} | class ____:
def test_reducing_gap_values(self, gradients_image: ImageFile.ImageFile) -> None:
ref = gradients_image.resize(
(52, 34), Image.Resampling.BICUBIC, reducing_gap=None
)
im = gradients_image.resize((52, 34), Image.Resampling.BICUBIC)
assert_image_equal(ref, im)
with pytest.raises(ValueError):
gradients_image.resize((52, 34), Image.Resampling.BICUBIC, reducing_gap=0)
with pytest.raises(ValueError):
gradients_image.resize(
(52, 34), Image.Resampling.BICUBIC, reducing_gap=0.99
)
@pytest.mark.parametrize(
"box, epsilon",
((None, 4), ((1.1, 2.2, 510.8, 510.9), 4), ((3, 10, 410, 256), 10)),
)
def test_reducing_gap_1(
self,
gradients_image: ImageFile.ImageFile,
box: tuple[float, float, float, float],
epsilon: float,
) -> None:
ref = gradients_image.resize((52, 34), Image.Resampling.BICUBIC, box=box)
im = gradients_image.resize(
(52, 34), Image.Resampling.BICUBIC, box=box, reducing_gap=1.0
)
with pytest.raises(pytest.fail.Exception):
assert_image_equal(ref, im)
assert_image_similar(ref, im, epsilon)
@pytest.mark.parametrize(
"box, epsilon",
((None, 1.5), ((1.1, 2.2, 510.8, 510.9), 1.5), ((3, 10, 410, 256), 1)),
)
def test_reducing_gap_2(
self,
gradients_image: ImageFile.ImageFile,
box: tuple[float, float, float, float],
epsilon: float,
) -> None:
ref = gradients_image.resize((52, 34), Image.Resampling.BICUBIC, box=box)
im = gradients_image.resize(
(52, 34), Image.Resampling.BICUBIC, box=box, reducing_gap=2.0
)
with pytest.raises(pytest.fail.Exception):
assert_image_equal(ref, im)
assert_image_similar(ref, im, epsilon)
@pytest.mark.parametrize(
"box, epsilon",
((None, 1), ((1.1, 2.2, 510.8, 510.9), 1), ((3, 10, 410, 256), 0.5)),
)
def test_reducing_gap_3(
self,
gradients_image: ImageFile.ImageFile,
box: tuple[float, float, float, float],
epsilon: float,
) -> None:
ref = gradients_image.resize((52, 34), Image.Resampling.BICUBIC, box=box)
im = gradients_image.resize(
(52, 34), Image.Resampling.BICUBIC, box=box, reducing_gap=3.0
)
with pytest.raises(pytest.fail.Exception):
assert_image_equal(ref, im)
assert_image_similar(ref, im, epsilon)
@pytest.mark.parametrize("box", (None, (1.1, 2.2, 510.8, 510.9), (3, 10, 410, 256)))
def test_reducing_gap_8(
self,
gradients_image: ImageFile.ImageFile,
box: tuple[float, float, float, float],
) -> None:
ref = gradients_image.resize((52, 34), Image.Resampling.BICUBIC, box=box)
im = gradients_image.resize(
(52, 34), Image.Resampling.BICUBIC, box=box, reducing_gap=8.0
)
assert_image_equal(ref, im)
@pytest.mark.parametrize(
"box, epsilon",
(((0, 0, 512, 512), 5.5), ((0.9, 1.7, 128, 128), 9.5)),
)
def test_box_filter(
self,
gradients_image: ImageFile.ImageFile,
box: tuple[float, float, float, float],
epsilon: float,
) -> None:
ref = gradients_image.resize((52, 34), Image.Resampling.BOX, box=box)
im = gradients_image.resize(
(52, 34), Image.Resampling.BOX, box=box, reducing_gap=1.0
)
assert_image_similar(ref, im, epsilon)
| TestReducingGapResize |
python | spack__spack | lib/spack/spack/compilers/libraries.py | {
"start": 4375,
"end": 9218
} | class ____:
"""Detects compiler properties of a given compiler spec. Useful for compiler wrappers."""
def __init__(self, compiler_spec: spack.spec.Spec):
assert compiler_spec.concrete, "only concrete compiler specs are allowed"
self.spec = compiler_spec
self.cache = COMPILER_CACHE
@contextlib.contextmanager
def compiler_environment(self):
"""Sets the environment to run this compiler"""
# No modifications for Spack managed compilers
if not self.spec.external:
yield
return
# Avoid modifying os.environ if possible.
environment = self.spec.extra_attributes.get("environment", {})
modules = self.spec.external_modules or []
if not self.spec.external_modules and not environment:
yield
return
# store environment to replace later
backup_env = os.environ.copy()
try:
# load modules and set env variables
for module in modules:
spack.util.module_cmd.load_module(module)
# apply other compiler environment changes
spack.schema.environment.parse(environment).apply_modifications()
yield
finally:
# Restore environment regardless of whether inner code succeeded
os.environ.clear()
os.environ.update(backup_env)
def _compile_dummy_c_source(self) -> Optional[str]:
compiler_pkg = self.spec.package
if getattr(compiler_pkg, "cc"):
cc = compiler_pkg.cc
ext = "c"
else:
cc = compiler_pkg.cxx
ext = "cc"
if not cc or not self.spec.package.verbose_flags:
return None
try:
tmpdir = tempfile.mkdtemp(prefix="spack-implicit-link-info")
fout = os.path.join(tmpdir, "output")
fin = os.path.join(tmpdir, f"main.{ext}")
with open(fin, "w", encoding="utf-8") as csource:
csource.write(
"int main(int argc, char* argv[]) { (void)argc; (void)argv; return 0; }\n"
)
cc_exe = spack.util.executable.Executable(cc)
if self.spec.external:
compiler_flags = self.spec.extra_attributes.get("flags", {})
for flag_type in [
"cflags" if cc == compiler_pkg.cc else "cxxflags",
"cppflags",
"ldflags",
]:
current_flags = compiler_flags.get(flag_type, "").strip()
if current_flags:
cc_exe.add_default_arg(*current_flags.split(" "))
with self.compiler_environment():
return cc_exe("-v", fin, "-o", fout, output=str, error=str)
except spack.util.executable.ProcessError as pe:
tty.debug(f"ProcessError: Command exited with non-zero status: {pe.long_message}")
return None
finally:
shutil.rmtree(tmpdir, ignore_errors=True)
def compiler_verbose_output(self) -> Optional[str]:
"""Get the compiler verbose output from the cache or by compiling a dummy C source."""
return self.cache.get(self.spec).c_compiler_output
def default_dynamic_linker(self) -> Optional[str]:
"""Determine the default dynamic linker path from the compiler verbose output."""
output = self.compiler_verbose_output()
if not output:
return None
return spack.util.libc.parse_dynamic_linker(output)
def default_libc(self) -> Optional[spack.spec.Spec]:
"""Determine libc targeted by the compiler from link line"""
# technically this should be testing the target platform of the compiler, but we don't have
# that, so stick to host platform for now.
if sys.platform in ("darwin", "win32"):
return None
dynamic_linker = self.default_dynamic_linker()
if dynamic_linker is None:
return None
return spack.util.libc.libc_from_dynamic_linker(dynamic_linker)
def implicit_rpaths(self) -> List[str]:
"""Obtain the implicit rpaths to be added from the default ``-L`` link directories,
excluding system directories."""
output = self.compiler_verbose_output()
if output is None:
return []
link_dirs = parse_non_system_link_dirs(output)
all_required_libs = list(self.spec.package.implicit_rpath_libs) + [
"libc",
"libc++",
"libstdc++",
]
dynamic_linker = self.default_dynamic_linker()
result = DefaultDynamicLinkerFilter(dynamic_linker)(
paths_containing_libs(link_dirs, all_required_libs)
)
return list(result)
| CompilerPropertyDetector |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/asset_key.py | {
"start": 7089,
"end": 9822
} | class ____(NamedTuple):
"""Check names are expected to be unique per-asset. Thus, this combination of asset key and
check name uniquely identifies an asset check within a deployment.
"""
asset_key: PublicAttr[AssetKey]
name: PublicAttr[str]
@staticmethod
def from_graphql_input(graphql_input: Mapping[str, Any]) -> "AssetCheckKey":
return AssetCheckKey(
asset_key=AssetKey.from_graphql_input(graphql_input["assetKey"]),
name=graphql_input["name"],
)
def to_user_string(self) -> str:
return f"{self.asset_key.to_user_string()}:{self.name}"
@staticmethod
def from_user_string(user_string: str) -> "AssetCheckKey":
asset_key_str, name = user_string.split(":")
return AssetCheckKey(AssetKey.from_user_string(asset_key_str), name)
@staticmethod
def from_db_string(db_string: str) -> Optional["AssetCheckKey"]:
try:
values = seven.json.loads(db_string)
if isinstance(values, dict) and values.keys() == {"asset_key", "check_name"}:
return AssetCheckKey(
asset_key=check.not_none(AssetKey.from_db_string(values["asset_key"])),
name=check.inst(values["check_name"], str),
)
else:
return None
except seven.JSONDecodeError:
return None
def to_db_string(self) -> str:
return seven.json.dumps({"asset_key": self.asset_key.to_string(), "check_name": self.name})
def with_asset_key_prefix(self, prefix: CoercibleToAssetKeyPrefix) -> "AssetCheckKey":
return AssetCheckKey(self.asset_key.with_prefix(prefix), self.name)
def replace_asset_key(self, asset_key: AssetKey) -> "AssetCheckKey":
return AssetCheckKey(asset_key, self.name)
EntityKey = Union[AssetKey, AssetCheckKey]
T_EntityKey = TypeVar("T_EntityKey", AssetKey, AssetCheckKey, EntityKey)
def entity_key_from_db_string(db_string: str) -> EntityKey:
check_key = AssetCheckKey.from_db_string(db_string)
return check_key if check_key else check.not_none(AssetKey.from_db_string(db_string))
def asset_keys_from_defs_and_coercibles(
assets: Sequence[Union["AssetsDefinition", CoercibleToAssetKey]],
) -> Sequence[AssetKey]:
from dagster._core.definitions.assets.definition.assets_definition import AssetsDefinition
result: list[AssetKey] = []
for el in assets:
if isinstance(el, AssetsDefinition):
result.extend(el.keys)
else:
result.append(
AssetKey.from_user_string(el)
if isinstance(el, str)
else AssetKey.from_coercible(el)
)
return result
| AssetCheckKey |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/core_api/datamodels/dags.py | {
"start": 1960,
"end": 4176
} | class ____(BaseModel):
"""DAG serializer for responses."""
model_config = ConfigDict(
alias_generator=AliasGenerator(
validation_alias=lambda field_name: DAG_ALIAS_MAPPING.get(field_name, field_name),
),
)
dag_id: str
dag_display_name: str
is_paused: bool
is_stale: bool
last_parsed_time: datetime | None
last_parse_duration: float | None
last_expired: datetime | None
bundle_name: str | None
bundle_version: str | None
relative_fileloc: str | None
fileloc: str
description: str | None
timetable_summary: str | None
timetable_description: str | None
tags: list[DagTagResponse]
max_active_tasks: int
max_active_runs: int | None
max_consecutive_failed_dag_runs: int
has_task_concurrency_limits: bool
has_import_errors: bool
next_dagrun_logical_date: datetime | None
next_dagrun_data_interval_start: datetime | None
next_dagrun_data_interval_end: datetime | None
next_dagrun_run_after: datetime | None
owners: list[str]
@field_validator("owners", mode="before")
@classmethod
def get_owners(cls, v: Any) -> list[str] | None:
"""Convert owners attribute to DAG representation."""
if not (v is None or isinstance(v, str)):
return v
if v is None:
return []
if isinstance(v, str):
return [x.strip() for x in v.split(",")]
return v
@field_validator("timetable_summary", mode="before")
@classmethod
def get_timetable_summary(cls, tts: str | None) -> str | None:
"""Validate the string representation of timetable_summary."""
if tts is None or tts == "None":
return None
return str(tts)
# Mypy issue https://github.com/python/mypy/issues/1362
@computed_field # type: ignore[prop-decorator]
@property
def file_token(self) -> str:
"""Return file token."""
serializer = URLSafeSerializer(conf.get_mandatory_value("api", "secret_key"))
payload = {
"bundle_name": self.bundle_name,
"relative_fileloc": self.relative_fileloc,
}
return serializer.dumps(payload)
| DAGResponse |
python | ray-project__ray | python/ray/data/random_access_dataset.py | {
"start": 7440,
"end": 9766
} | class ____:
def __init__(self, key_field):
self.blocks = None
self.key_field = key_field
self.num_accesses = 0
self.total_time = 0
def assign_blocks(self, block_ref_dict):
self.blocks = {k: ray.get(ref) for k, ref in block_ref_dict.items()}
def get(self, block_index, key):
start = time.perf_counter()
result = self._get(block_index, key)
self.total_time += time.perf_counter() - start
self.num_accesses += 1
return result
def multiget(self, block_indices, keys):
start = time.perf_counter()
block = self.blocks[block_indices[0]]
if len(set(block_indices)) == 1 and isinstance(
self.blocks[block_indices[0]], pa.Table
):
# Fast path: use np.searchsorted for vectorized search on a single block.
# This is ~3x faster than the naive case.
block = self.blocks[block_indices[0]]
col = block[self.key_field]
indices = np.searchsorted(col, keys)
acc = BlockAccessor.for_block(block)
result = [
acc._get_row(i) if k1.as_py() == k2 else None
for i, k1, k2 in zip(indices, col.take(indices), keys)
]
else:
result = [self._get(i, k) for i, k in zip(block_indices, keys)]
self.total_time += time.perf_counter() - start
self.num_accesses += 1
return result
def ping(self):
return ray.get_runtime_context().get_node_id()
def stats(self) -> dict:
return {
"num_blocks": len(self.blocks),
"num_accesses": self.num_accesses,
"total_time": self.total_time,
}
def _get(self, block_index, key):
if block_index is None:
return None
block = self.blocks[block_index]
column = block[self.key_field]
if isinstance(block, pa.Table):
column = _ArrowListWrapper(column)
i = _binary_search_find(column, key)
if i is None:
return None
acc = BlockAccessor.for_block(block)
return acc._get_row(i)
def _binary_search_find(column, x):
i = bisect.bisect_left(column, x)
if i != len(column) and column[i] == x:
return i
return None
| _RandomAccessWorker |
python | walkccc__LeetCode | solutions/2671. Frequency Tracker/2671.py | {
"start": 0,
"end": 614
} | class ____:
def __init__(self):
self.count = collections.Counter()
self.freqCount = collections.Counter()
def add(self, number: int) -> None:
if self.count[number] > 0:
self.freqCount[self.count[number]] -= 1
self.count[number] += 1
self.freqCount[self.count[number]] += 1
def deleteOne(self, number: int) -> None:
if self.count[number] == 0:
return
self.freqCount[self.count[number]] -= 1
self.count[number] -= 1
self.freqCount[self.count[number]] += 1
def hasFrequency(self, frequency: int) -> bool:
return self.freqCount[frequency] > 0
| FrequencyTracker |
python | sphinx-doc__sphinx | tests/roots/test-ext-autodoc/target/__init__.py | {
"start": 2553,
"end": 2617
} | class ____(Outer.Inner):
"""InnerChild docstring"""
| InnerChild |
python | faif__python-patterns | patterns/structural/decorator.py | {
"start": 1352,
"end": 1951
} | class ____(TextTag):
"""Wraps a tag in <i>"""
def __init__(self, wrapped: TextTag) -> None:
self._wrapped = wrapped
def render(self) -> str:
return f"<i>{self._wrapped.render()}</i>"
def main():
"""
>>> simple_hello = TextTag("hello, world!")
>>> special_hello = ItalicWrapper(BoldWrapper(simple_hello))
>>> print("before:", simple_hello.render())
before: hello, world!
>>> print("after:", special_hello.render())
after: <i><b>hello, world!</b></i>
"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| ItalicWrapper |
python | kamyu104__LeetCode-Solutions | Python/node-with-highest-edge-score.py | {
"start": 42,
"end": 335
} | class ____(object):
def edgeScore(self, edges):
"""
:type edges: List[int]
:rtype: int
"""
score = [0]*len(edges)
for u, v in enumerate(edges):
score[v] += u
return max(xrange(len(edges)), key=lambda x:score[x])
| Solution |
python | huggingface__transformers | src/transformers/models/pix2struct/modeling_pix2struct.py | {
"start": 37474,
"end": 38842
} | class ____(nn.Module):
def __init__(self, config, layer_idx: Optional[int] = None):
super().__init__()
self.attention = Pix2StructTextAttention(config, has_relative_attention_bias=False, layer_idx=layer_idx)
self.layer_norm = Pix2StructLayerNorm(config.hidden_size, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(
self,
hidden_states,
key_value_states,
attention_mask=None,
position_bias=None,
past_key_values=None,
use_cache=False,
query_length=None,
output_attentions=False,
cache_position=None,
):
normed_hidden_states = self.layer_norm(hidden_states)
attention_output = self.attention(
normed_hidden_states,
mask=attention_mask,
key_value_states=key_value_states,
position_bias=position_bias,
past_key_values=past_key_values,
use_cache=use_cache,
query_length=query_length,
output_attentions=output_attentions,
cache_position=cache_position,
)
layer_output = hidden_states + self.dropout(attention_output[0])
outputs = (layer_output,) + attention_output[1:] # add attentions if we output them
return outputs
| Pix2StructTextLayerCrossAttention |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/oracle/cx_oracle.py | {
"start": 28969,
"end": 36924
} | class ____(OracleExecutionContext):
out_parameters = None
def _generate_out_parameter_vars(self):
# check for has_out_parameters or RETURNING, create cx_Oracle.var
# objects if so
if self.compiled.has_out_parameters or self.compiled._oracle_returning:
out_parameters = self.out_parameters
assert out_parameters is not None
len_params = len(self.parameters)
quoted_bind_names = self.compiled.escaped_bind_names
for bindparam in self.compiled.binds.values():
if bindparam.isoutparam:
name = self.compiled.bind_names[bindparam]
type_impl = bindparam.type.dialect_impl(self.dialect)
if hasattr(type_impl, "_cx_oracle_var"):
out_parameters[name] = type_impl._cx_oracle_var(
self.dialect, self.cursor, arraysize=len_params
)
else:
dbtype = type_impl.get_dbapi_type(self.dialect.dbapi)
cx_Oracle = self.dialect.dbapi
assert cx_Oracle is not None
if dbtype is None:
raise exc.InvalidRequestError(
"Cannot create out parameter for "
"parameter "
"%r - its type %r is not supported by"
" cx_oracle" % (bindparam.key, bindparam.type)
)
# note this is an OUT parameter. Using
# non-LOB datavalues with large unicode-holding
# values causes the failure (both cx_Oracle and
# oracledb):
# ORA-22835: Buffer too small for CLOB to CHAR or
# BLOB to RAW conversion (actual: 16507,
# maximum: 4000)
# [SQL: INSERT INTO long_text (x, y, z) VALUES
# (:x, :y, :z) RETURNING long_text.x, long_text.y,
# long_text.z INTO :ret_0, :ret_1, :ret_2]
# so even for DB_TYPE_NVARCHAR we convert to a LOB
if isinstance(type_impl, _LOBDataType):
if dbtype == cx_Oracle.DB_TYPE_NVARCHAR:
dbtype = cx_Oracle.NCLOB
elif dbtype == cx_Oracle.DB_TYPE_RAW:
dbtype = cx_Oracle.BLOB
# other LOB types go in directly
out_parameters[name] = self.cursor.var(
dbtype,
# this is fine also in oracledb_async since
# the driver will await the read coroutine
outconverter=lambda value: value.read(),
arraysize=len_params,
)
elif (
isinstance(type_impl, _OracleNumericCommon)
and type_impl.asdecimal
):
out_parameters[name] = self.cursor.var(
decimal.Decimal,
arraysize=len_params,
)
else:
out_parameters[name] = self.cursor.var(
dbtype, arraysize=len_params
)
for param in self.parameters:
param[quoted_bind_names.get(name, name)] = (
out_parameters[name]
)
def _generate_cursor_outputtype_handler(self):
output_handlers = {}
for keyname, name, objects, type_ in self.compiled._result_columns:
handler = type_._cached_custom_processor(
self.dialect,
"cx_oracle_outputtypehandler",
self._get_cx_oracle_type_handler,
)
if handler:
denormalized_name = self.dialect.denormalize_name(keyname)
output_handlers[denormalized_name] = handler
if output_handlers:
default_handler = self._dbapi_connection.outputtypehandler
def output_type_handler(
cursor, name, default_type, size, precision, scale
):
if name in output_handlers:
return output_handlers[name](
cursor, name, default_type, size, precision, scale
)
else:
return default_handler(
cursor, name, default_type, size, precision, scale
)
self.cursor.outputtypehandler = output_type_handler
def _get_cx_oracle_type_handler(self, impl):
if hasattr(impl, "_cx_oracle_outputtypehandler"):
return impl._cx_oracle_outputtypehandler(self.dialect)
else:
return None
def pre_exec(self):
super().pre_exec()
if not getattr(self.compiled, "_oracle_cx_sql_compiler", False):
return
self.out_parameters = {}
self._generate_out_parameter_vars()
self._generate_cursor_outputtype_handler()
def post_exec(self):
if (
self.compiled
and is_sql_compiler(self.compiled)
and self.compiled._oracle_returning
):
initial_buffer = self.fetchall_for_returning(
self.cursor, _internal=True
)
fetch_strategy = _cursor.FullyBufferedCursorFetchStrategy(
self.cursor,
[
(entry.keyname, None)
for entry in self.compiled._result_columns
],
initial_buffer=initial_buffer,
)
self.cursor_fetch_strategy = fetch_strategy
def create_cursor(self):
c = self._dbapi_connection.cursor()
if self.dialect.arraysize:
c.arraysize = self.dialect.arraysize
return c
def fetchall_for_returning(self, cursor, *, _internal=False):
compiled = self.compiled
if (
not _internal
and compiled is None
or not is_sql_compiler(compiled)
or not compiled._oracle_returning
):
raise NotImplementedError(
"execution context was not prepared for Oracle RETURNING"
)
# create a fake cursor result from the out parameters. unlike
# get_out_parameter_values(), the result-row handlers here will be
# applied at the Result level
numcols = len(self.out_parameters)
# [stmt_result for stmt_result in outparam.values] == each
# statement in executemany
# [val for val in stmt_result] == each row for a particular
# statement
return list(
zip(
*[
[
val
for stmt_result in self.out_parameters[
f"ret_{j}"
].values
for val in (stmt_result or ())
]
for j in range(numcols)
]
)
)
def get_out_parameter_values(self, out_param_names):
# this method should not be called when the compiler has
# RETURNING as we've turned the has_out_parameters flag set to
# False.
assert not self.compiled.returning
return [
self.dialect._paramval(self.out_parameters[name])
for name in out_param_names
]
| OracleExecutionContext_cx_oracle |
python | pytorch__pytorch | test/quantization/fx/test_quantize_fx.py | {
"start": 5535,
"end": 6157
} | class ____(torch.nn.Module):
def __init__(self, binary_op, ibinary_op, is_inplace, is_scalar):
""" ibinary_op means inplace binary op
"""
super().__init__()
self.conv1 = torch.nn.Conv2d(1, 1, 1).float()
self.conv2 = torch.nn.Conv2d(1, 1, 1).float()
self.is_scalar = is_scalar
self.op = ibinary_op if ibinary_op and is_inplace else binary_op
def forward(self, x, y):
x = self.conv1(x)
y = 3 if self.is_scalar else self.conv2(y)
# x = x + y
x = self.op(x, y)
# x = y + x
x = self.op(y, x)
return x
| BinaryOp |
python | neetcode-gh__leetcode | python/0929-unique-email-addresses.py | {
"start": 0,
"end": 422
} | class ____:
def numUniqueEmails(self, emails: list[str]) -> int:
unique_emails: set[str] = set()
for email in emails:
local_name, domain_name = email.split('@')
local_name = local_name.split('+')[0]
local_name = local_name.replace('.', '')
email = local_name + '@' + domain_name
unique_emails.add(email)
return len(unique_emails)
| Solution |
python | pytorch__pytorch | torch/testing/_internal/opinfo/refs.py | {
"start": 5579,
"end": 6861
} | class ____(UnaryUfuncInfo):
"""
An OpInfo for a Python reference of an elementwise unary operation.
"""
def __init__(
self,
name, # the stringname of the callable Python reference
*,
op=None, # the function variant of the operation, populated as torch.<name> if None
op_db=None, # The database of opinfos to search for the parent opinfo
torch_opinfo_name, # the string name of the corresponding torch opinfo
torch_opinfo_variant_name="", # the variant name for corresponding torch opinfo
validate_view_consistency=True,
**kwargs,
): # additional kwargs override kwargs inherited from the torch opinfo
self.torch_opinfo_name = torch_opinfo_name
self.torch_opinfo_variant_name = torch_opinfo_variant_name
self.torch_opinfo = _find_referenced_opinfo(
torch_opinfo_name, torch_opinfo_variant_name, op_db=op_db
)
self.validate_view_consistency = validate_view_consistency
assert isinstance(self.torch_opinfo, UnaryUfuncInfo)
inherited = self.torch_opinfo._original_unary_ufunc_args
ukwargs = _inherit_constructor_args(name, op, inherited, kwargs)
super().__init__(**ukwargs)
| ElementwiseUnaryPythonRefInfo |
python | jazzband__django-simple-history | simple_history/tests/models.py | {
"start": 21609,
"end": 21795
} | class ____(models.Model):
name = models.CharField(max_length=15, unique=True)
history = HistoricalRecords(custom_model_name="MyHistoricalCustomNameModel")
| OverrideModelNameAsString |
python | plotly__plotly.py | plotly/graph_objs/scattermapbox/marker/colorbar/_title.py | {
"start": 233,
"end": 4063
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scattermapbox.marker.colorbar"
_path_str = "scattermapbox.marker.colorbar.title"
_valid_props = {"font", "side", "text"}
@property
def font(self):
"""
Sets this color bar's title font.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattermapbox.marker.colorbar.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.scattermapbox.marker.colorbar.title.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def side(self):
"""
Determines the location of color bar's title with respect to
the color bar. Defaults to "top" when `orientation` if "v" and
defaults to "right" when `orientation` if "h".
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['right', 'top', 'bottom']
Returns
-------
Any
"""
return self["side"]
@side.setter
def side(self, val):
self["side"] = val
@property
def text(self):
"""
Sets the title of the color bar.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def _prop_descriptions(self):
return """\
font
Sets this color bar's title font.
side
Determines the location of color bar's title with
respect to the color bar. Defaults to "top" when
`orientation` if "v" and defaults to "right" when
`orientation` if "h".
text
Sets the title of the color bar.
"""
def __init__(self, arg=None, font=None, side=None, text=None, **kwargs):
"""
Construct a new Title object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scattermapbox.
marker.colorbar.Title`
font
Sets this color bar's title font.
side
Determines the location of color bar's title with
respect to the color bar. Defaults to "top" when
`orientation` if "v" and defaults to "right" when
`orientation` if "h".
text
Sets the title of the color bar.
Returns
-------
Title
"""
super().__init__("title")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scattermapbox.marker.colorbar.Title
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattermapbox.marker.colorbar.Title`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("font", arg, font)
self._set_property("side", arg, side)
self._set_property("text", arg, text)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Title |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.