language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | tensorflow__tensorflow | tensorflow/python/ops/parallel_for/pfor.py | {
"start": 51585,
"end": 186697
} | class ____:
"""Implementation of rewrite of parallel-for loops.
This class takes a DAG or a set of DAGs representing the body of a
parallel-for loop, and adds new operations to the graph that implements
functionality equivalent to running that loop body for a specified number of
iterations. This new set of nodes may or may not use a tensorflow loop
construct.
The process of conversion does not delete or change any existing operations.
It only adds operations that efficiently implement the equivalent
functionality. We refer to the added ops as "converted ops".
The conversion process uses a simple greedy heuristic. It walks the loop body
and tries to express the functionality of running each node in a loop with a
new set of nodes. When converting an op several cases are possible:
- The op is not inside the loop body. Hence it can be used as is.
- The op does not depend on the iteration number and is stateless. In this
case, it can be used as is.
- The op is not stateful, and depends on iteration number only through control
dependencies. In this case, we can create a single op with same inputs and
attributes, but with "converted" control dependencies.
- The op is not stateful, and all its inputs are loop invariant. In this
case, similar to above, we can create a single op with same inputs and
attributes, but with "converted" control dependencies.
- The op is stateful or at least one of the inputs is not loop invariant. In
this case, we run the registered converter for that op to create a set of
converted ops. All nodes in the set will have converted control dependencies
corresponding to control dependencies of the original op. If the op returned
multiple outputs, "converted outputs" could be produced by different ops in
this set.
"""
def __init__(self,
loop_var,
loop_len,
pfor_ops,
fallback_to_while_loop,
all_indices=None,
all_indices_partitioned=False,
pfor_config=None,
warn=False):
"""Creates an object to rewrite a parallel-for loop.
Args:
loop_var: Tensor output of a Placeholder operation. The value should
be an int32 scalar representing the loop iteration number.
loop_len: A scalar or scalar Tensor representing the number of iterations
the loop is run for.
pfor_ops: List of all ops inside the loop body.
fallback_to_while_loop: If True, on failure to vectorize an op, a while
loop is used to sequentially execute that op.
all_indices: If not None, an int32 vector with size `loop_len`
representing the iteration ids that are still active. These values
should be unique and sorted. However they may not be contiguous. This is
typically the case when inside a control flow construct which has
partitioned the indices of the iterations that are being converted.
all_indices_partitioned: If True, this object is being constructed from a
control flow construct where not all the pfor iterations are guaranteed
to be active.
pfor_config: PForConfig object used while constructing the loop body.
warn: Whether or not to warn on while loop conversions.
"""
assert isinstance(loop_var, tensor_lib.Tensor)
assert loop_var.op.type == "PlaceholderWithDefault"
self._loop_var = loop_var
loop_len_value = tensor_util.constant_value(loop_len)
if loop_len_value is not None:
loop_len = loop_len_value
self._loop_len_vector = ops.convert_to_tensor([loop_len])
else:
self._loop_len_vector = array_ops.reshape(loop_len, [1])
self._all_indices_partitioned = all_indices_partitioned
if all_indices_partitioned:
assert all_indices is not None
if all_indices is None:
self.all_indices = math_ops.range(
loop_len, dtype=dtypes.int32, name="all_indices"
)
else:
self.all_indices = all_indices
self._conversion_map = object_identity.ObjectIdentityDictionary()
self._conversion_map[loop_var] = wrap(self.all_indices, True)
self._pfor_ops = set(pfor_ops)
self._pfor_op_ids = set(x._id for x in pfor_ops)
self._fallback_to_while_loop = fallback_to_while_loop
self._warn = warn
self._pfor_config = pfor_config
def op_is_inside_loop(self, op):
"""True if op was created inside the pfor loop body."""
assert isinstance(op, ops.Operation)
# Note that we use self._pfor_op_ids for the check and not self._pfor_ops
# since it appears there tensorflow API could return different python
# objects representing the same Operation node.
return op._id in self._pfor_op_ids
def _convert_sparse(self, y):
"""Returns the converted value corresponding to SparseTensor y.
For SparseTensors, instead of stacking the component tensors separately,
resulting in component tensors with shapes (N, m, rank), (N, m), and (N,
rank) respectively for indices, values, and dense_shape (where N is the loop
length and m is the number of sparse tensor values per loop iter), we want
to logically stack the SparseTensors, to create a SparseTensor whose
components are size (N * m, rank + 1), (N * m, ), and (rank + 1,)
respectively.
Here, we try to get the conversion of each component tensor.
If the tensors are stacked via a sparse conversion, return the resulting
SparseTensor composed of the converted components. Otherwise, the component
tensors are either unstacked or stacked naively. In the latter case, we
unstack the component tensors to reform loop_len SparseTensor elements,
then correctly batch them.
The unstacked tensors must have the same rank. Each dimension of each
SparseTensor will expand to be the largest among all SparseTensor elements
for that dimension. For example, if there are N SparseTensors of rank 3
being stacked, with N dense shapes, where the i_th shape is (x_i, y_i, z_i),
the new dense shape will be (N, max_i(x_i), max_i(y_i), max_i(z_i)).
Args:
y: A tf.sparse.SparseTensor.
Returns:
A tf.sparse.SparseTensor that is the converted value corresponding to y.
"""
outputs = [
self._convert_helper(t) for t in (y.indices, y.values, y.dense_shape)
]
assert all(isinstance(o, WrappedTensor) for o in outputs)
if all(w.is_sparse_stacked for w in outputs):
return sparse_tensor.SparseTensor(*[w.t for w in outputs])
assert not any(w.is_sparse_stacked for w in outputs), (
"Error converting SparseTensor. All components should be logically "
"stacked, or none.")
# If component tensors were not sparsely stacked, they are either unstacked
# or stacked without knowledge that they are components of sparse tensors.
# In this case, we have to restack them.
return self._restack_sparse_tensor_logically(
*[self._unwrap_or_tile(w) for w in outputs])
def _restack_sparse_tensor_logically(self, indices, values, shape):
sparse_tensor_rank = indices.get_shape().dims[-1].value
if sparse_tensor_rank is not None:
sparse_tensor_rank += 1
def fn(args):
res = gen_sparse_ops.serialize_sparse(
args[0], args[1], args[2], out_type=dtypes.variant)
return res
# Applies a map function to the component tensors to serialize each
# sparse tensor element and batch them all, then deserializes the batch.
# TODO(rachelim): Try to do this without map_fn -- add the right offsets
# to shape and indices tensors instead.
result = map_fn.map_fn(fn, [indices, values, shape], dtype=dtypes.variant)
return sparse_ops.deserialize_sparse(
result, dtype=values.dtype, rank=sparse_tensor_rank)
def _unwrap_or_tile(self, wrapped_tensor):
"""Given a wrapped tensor, unwrap if stacked. Otherwise, tiles it."""
output, is_stacked = wrapped_tensor.t, wrapped_tensor.is_stacked
if is_stacked:
return output
else:
return _stack(output, self._loop_len_vector).t
def convert(self, y):
"""Returns the converted value corresponding to y.
Args:
y: A Tensor or a ops.Operation object. If latter, y should not have
any outputs.
Returns:
If y does not need to be converted, it returns y as is. Else it returns
the "converted value" corresponding to y.
"""
if y is None:
return None
if isinstance(y, sparse_tensor.SparseTensor):
return self._convert_sparse(y)
assert isinstance(y, (tensor_lib.Tensor, ops.Operation)), y
output = self._convert_helper(y)
if isinstance(output, WrappedTensor):
assert isinstance(y, tensor_lib.Tensor)
return self._unwrap_or_tile(output)
else:
assert isinstance(y, ops.Operation)
assert not y.outputs
assert isinstance(output, ops.Operation)
return output
def _was_converted(self, t):
"""True if t is not a conversion of itself."""
converted_t = self._conversion_map[t]
return converted_t.t is not t
def _add_conversion(self, old_output, new_output):
assert isinstance(
old_output, (tensor_lib.Tensor, ops.Operation)), old_output
assert isinstance(new_output, (WrappedTensor, ops.Operation)), new_output
self._conversion_map[old_output] = new_output
def _convert_reduction(self, y):
# Handle reductions.
if self._pfor_config is None or isinstance(y, ops.Operation):
return None
reduction = self._pfor_config._lookup_reduction(y)
if reduction is None:
return None
(reduction_fn, reduction_args) = reduction
batched_args = []
for reduction_arg in reduction_args:
assert isinstance(reduction_arg, tensor_lib.Tensor), reduction_arg
# Tensor being reduced should already be converted due to a control
# dependency on the created placeholder.
# Note that in cases where reduction_arg is in an outer context, one
# needs to locate the corresponding Enter node and use that to lookup
# the conversion.
# TODO(agarwal): handle reductions inside control flow constructs.
assert reduction_arg in self._conversion_map, (
"Unable to handle reduction of %s, possibly as it was used "
"inside a control flow construct. Note that reductions across "
"pfor iterations are currently not supported inside control flow "
"constructs." % reduction_arg)
batched_arg = self._conversion_map[reduction_arg]
batched_args.append(self._unwrap_or_tile(batched_arg))
outputs = reduction_fn(*batched_args)
return [wrap(output, False) for output in nest.flatten(outputs)]
def _convert_helper(self, op_or_tensor):
stack = collections.deque([op_or_tensor])
while stack:
y = stack[0]
if y in self._conversion_map:
assert isinstance(self._conversion_map[y],
(WrappedTensor, ops.Operation))
stack.popleft()
continue
if isinstance(y, ops.Operation):
assert not y.outputs, (
"We only support converting Operation objects with no outputs. "
"Got %s", y)
y_op = y
else:
assert isinstance(y, tensor_lib.Tensor), y
y_op = y.op
is_while_loop = y_op.type == "Exit"
if is_while_loop:
while_op = WhileOp(
y, pfor_ops=self._pfor_ops,
fallback_to_while_loop=self.fallback_to_while_loop,
pfor_config=self._pfor_config)
is_inside_loop = while_op.is_inside_loop
# If all nodes in the while_loop graph were created inside the pfor, we
# treat the whole loop subgraph as a single op (y_op) and try to convert
# it. For while_loops that are created completely or partially outside,
# we treat them as external and should be able to simply return the Exit
# node output as is without needing any conversion. Note that for
# while_loops that are partially constructed inside, we assume they will
# be loop invariant. If that is not the case, it will create runtime
# errors since the converted graph would depend on the self._loop_var
# placeholder.
if is_inside_loop:
y_op = while_op
else:
is_inside_loop = self.op_is_inside_loop(y_op)
# If this op was not created inside the loop body, we will return as is.
# 1. Convert inputs and control inputs.
def _add_to_stack(x):
if x not in self._conversion_map:
stack.appendleft(x)
return True
else:
return False
if is_inside_loop:
added_to_stack = False
for inp in y_op.inputs:
added_to_stack |= _add_to_stack(inp)
for cinp in y_op.control_inputs:
if cinp.outputs:
for t in cinp.outputs:
added_to_stack |= _add_to_stack(t)
else:
added_to_stack |= _add_to_stack(cinp)
if added_to_stack:
continue
converted_inputs = [self._conversion_map[inp] for inp in y_op.inputs]
some_input_converted = any(self._was_converted(x) for x in y_op.inputs)
some_input_stacked = any(x.is_stacked for x in converted_inputs)
converted_control_ops = set()
some_control_input_converted = False
for cinp in y_op.control_inputs:
if cinp.outputs:
for t in cinp.outputs:
converted_t = self._conversion_map[t]
if self._was_converted(t):
some_control_input_converted = True
converted_control_ops.add(converted_t.t.op)
else:
converted_cinp = self._conversion_map[cinp]
assert isinstance(converted_cinp, ops.Operation)
if converted_cinp != cinp:
some_control_input_converted = True
converted_control_ops.add(converted_cinp)
converted_control_ops = list(converted_control_ops)
is_stateful = _is_stateful_pfor_op(y_op)
else:
converted_inputs = []
converted_control_ops = []
logging.vlog(3, "converting op:%s\ninputs:%s\ncontrol_inputs:%s", y_op,
converted_inputs, converted_control_ops)
# 2. Convert y_op
# If converting a while_loop, we let the while_loop convertor deal with
# putting the control dependencies appropriately.
control_dependencies = [] if is_while_loop else converted_control_ops
with ops.control_dependencies(control_dependencies), ops.name_scope(
y_op.name + "/pfor/"), ops.get_default_graph()._original_op(y_op):
# Op is a placeholder for a reduction.
reduce_output = self._convert_reduction(y)
if reduce_output is not None:
new_outputs = reduce_output
# None of the inputs and control inputs were converted.
elif ((not is_inside_loop or
(not is_stateful and not some_input_converted and
not some_control_input_converted)) and
y.graph == ops.get_default_graph()):
if y is y_op:
assert not isinstance(y_op, WhileOp)
new_outputs = y_op
else:
new_outputs = [wrap(x, False) for x in y_op.outputs]
elif not (is_stateful or is_while_loop or some_input_stacked):
# All inputs are unstacked or unconverted but some control inputs are
# converted.
# TODO(rachelim): Handle the case where some inputs are sparsely
# stacked (i.e. any(x.is_sparse_stacked for x in converted_inputs))
new_op = _create_op(y_op.type, [x.t for x in converted_inputs],
[x.dtype for x in y_op.outputs],
y_op.node_def.attr)
if y is y_op:
new_outputs = new_op
else:
new_outputs = []
for old_output, new_output in zip(y_op.outputs, new_op.outputs):
handle_data_util.copy_handle_data(old_output, new_output)
new_outputs.append(wrap(new_output, False))
else:
# Either some inputs are not loop invariant or op is stateful.
if hasattr(y_op, "pfor_converter"):
converter = y_op.pfor_converter
else:
converter = _pfor_converter_registry.get(y_op.type, None)
if converter is None:
root_cause = "there is no registered converter for this op."
has_variant_outputs = any(x.dtype == dtypes.variant for x in
y_op.outputs)
has_vectorized_variant_inputs = any(
_is_variant_with_internal_stacking(x) for x in
y_op.inputs)
if (self._fallback_to_while_loop and not has_variant_outputs
and not has_vectorized_variant_inputs):
converter = functools.partial(
_fallback_converter, root_cause=root_cause, warn=self._warn)
else:
message = (f"No pfor vectorization defined for {y_op.type}\n"
f"{y_op}\n inputs: {converted_inputs}.")
if not self._fallback_to_while_loop:
message += ("Consider enabling the fallback_to_while_loop "
"option to pfor, which may run slower.")
raise ValueError(message)
# TODO(rachelim): Handle the case where some inputs are sparsely
# stacked. We should only call the converter if it supports handling
# those inputs.
pfor_inputs = _PforInput(self, y_op, converted_inputs)
try:
try:
new_outputs = converter(pfor_inputs)
except ConversionNotImplementedError as e:
has_vectorized_variant_inputs = any(
_is_variant_with_internal_stacking(x) for x in
y_op.inputs)
if (self._fallback_to_while_loop
and not has_vectorized_variant_inputs):
new_outputs = _fallback_converter(
pfor_inputs, root_cause=str(e))
else:
raise ValueError(str(e)).with_traceback(sys.exc_info()[2])
except Exception as e: # pylint: disable=broad-except
logging.error(
f"Got error while pfor was converting op {y_op} with inputs "
f"{y_op.inputs[:]}\n, converted inputs {pfor_inputs.inputs}\n"
f"Here are the pfor conversion stack traces: {e}")
original_op = y_op
while isinstance(original_op, ops.Operation):
logging.error(
"%s\ncreated at:\n %s", original_op,
" ".join(traceback.format_list(original_op.traceback)))
original_op = original_op._original_op
raise
if isinstance(new_outputs, WrappedTensor):
new_outputs = [new_outputs]
assert isinstance(new_outputs,
(list, tuple, ops.Operation)), new_outputs
logging.vlog(2, f"converted {y_op} {new_outputs}")
# Insert into self._conversion_map
if y is y_op:
assert isinstance(new_outputs, ops.Operation)
self._add_conversion(y_op, new_outputs)
else:
assert len(y_op.outputs) == len(new_outputs), (y_op, y_op.outputs,
new_outputs)
for old_output, new_output in zip(y_op.outputs, new_outputs):
assert isinstance(new_output, WrappedTensor), (new_output, y, y_op)
assert old_output.dtype == new_output.t.dtype, (new_output, y, y_op)
# Set shape for converted output.
output_shape = old_output.shape
if not new_output.is_sparse_stacked:
if new_output.is_stacked:
loop_len = tensor_util.constant_value(self.loop_len_vector)
if loop_len is None:
batch_dim = tensor_shape.TensorShape([None])
else:
batch_dim = tensor_shape.TensorShape(loop_len)
output_shape = batch_dim.concatenate(output_shape)
if _is_variant_with_internal_stacking(new_output.t):
new_output.t.set_shape([])
else:
new_output.t.set_shape(output_shape)
self._add_conversion(old_output, new_output)
stack.popleft()
return self._conversion_map[op_or_tensor]
@property
def loop_len_vector(self):
"""Returns a single element vector whose value is number of iterations."""
return self._loop_len_vector
@property
def loop_var(self):
"""Returns placeholder loop variable."""
return self._loop_var
@property
def pfor_ops(self):
return self._pfor_ops
@property
def pfor_config(self):
return self._pfor_config
@property
def all_indices_partitioned(self):
"""all_indices_partitioned property.
Returns:
True if we are inside a control flow construct and not all pfor iterations
may be active.
"""
return self._all_indices_partitioned
@property
def fallback_to_while_loop(self):
return self._fallback_to_while_loop
# The code below defines converters for different operations. Please see comment
# for RegisterPFor to see how converters should be defined.
# image_ops
@RegisterPFor("AdjustContrastv2")
def _convert_adjust_contrastv2(pfor_input: _PforInput):
images = pfor_input.stacked_input(0)
contrast_factor = pfor_input.unstacked_input(1)
return wrap(gen_image_ops.adjust_contrastv2(images, contrast_factor), True)
@RegisterPFor("AdjustHue")
def _convert_adjust_hue(pfor_input: _PforInput):
images = pfor_input.stacked_input(0)
delta = pfor_input.unstacked_input(1)
return wrap(gen_image_ops.adjust_hue(images, delta), True)
@RegisterPFor("AdjustSaturation")
def _convert_adjust_saturation(pfor_input: _PforInput):
images = pfor_input.stacked_input(0)
scale = pfor_input.unstacked_input(1)
return wrap(gen_image_ops.adjust_saturation(images, scale), True)
# nn_ops
def _flatten_first_two_dims(x):
"""Merges first two dimensions."""
old_shape = array_ops.shape(x)
first_dim = constant_op.constant([-1], dtype=old_shape.dtype)
new_shape = array_ops.concat([first_dim, old_shape[2:]], axis=0)
return array_ops.reshape(x, new_shape)
def _unflatten_first_dim(x, first_dim):
"""Splits first dimension into [first_dim, -1]."""
old_shape = array_ops.shape(x)
first_dim = math_ops.cast(first_dim, old_shape.dtype)
second_dim = constant_op.constant([-1], dtype=old_shape.dtype)
new_shape = array_ops.concat([first_dim, second_dim, old_shape[1:]], axis=0)
return array_ops.reshape(x, new_shape)
def _inputs_with_flattening(pfor_input: _PforInput, input_indices):
"""Stacks and flattens first dim of inputs at indices `input_indices`."""
if input_indices is None:
input_indices = []
pfor_input.stack_inputs(stack_indices=input_indices)
inputs = []
for i in range(pfor_input.num_inputs):
if i in input_indices:
inp = pfor_input.stacked_input(i)
inp = _flatten_first_two_dims(inp)
else:
inp = pfor_input.unstacked_input(i)
inputs.append(inp)
return inputs
@RegisterPForWithArgs("Conv2D", dims=[0])
@RegisterPForWithArgs("DepthToSpace", dims=[0])
@RegisterPForWithArgs("AvgPool", dims=[0])
@RegisterPForWithArgs("AvgPool3D", dims=[0])
@RegisterPForWithArgs("MaxPool", dims=[0])
@RegisterPForWithArgs("MaxPoolV2", dims=[0])
@RegisterPForWithArgs("MaxPool3D", dims=[0])
@RegisterPForWithArgs("MaxPool3DGrad", dims=[0, 1, 2])
@RegisterPForWithArgs("MaxPoolGrad", dims=[0, 1, 2])
@RegisterPForWithArgs("MaxPoolGradV2", dims=[0, 1, 2])
@RegisterPForWithArgs("MaxPool3DGradGrad", dims=[0, 1, 2])
@RegisterPForWithArgs("MaxPoolGradGrad", dims=[0, 1, 2])
@RegisterPForWithArgs("MaxPoolGradGradV2", dims=[0, 1, 2])
@RegisterPForWithArgs("SoftmaxCrossEntropyWithLogits", dims=[0, 1])
@RegisterPForWithArgs("SparseSoftmaxCrossEntropyWithLogits", dims=[0, 1])
@RegisterPForWithArgs("SpaceToDepth", dims=[0])
def _convert_flatten_batch(pfor_input: _PforInput, op_type, dims):
del op_type
inputs = _inputs_with_flattening(pfor_input, dims)
outputs = _create_op(
pfor_input.op_type,
inputs, [x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
n = pfor_input.pfor.loop_len_vector
outputs = [_unflatten_first_dim(x, n) for x in outputs]
return [wrap(x, True) for x in outputs]
_channel_flatten_input_cache = {}
@RegisterPFor("BatchToSpaceND")
def _convert_batch_to_space_nd(pfor_input: _PforInput):
inp = pfor_input.stacked_input(0)
block_shape = pfor_input.unstacked_input(1)
crops = pfor_input.unstacked_input(2)
inp_shape = array_ops.shape(inp)
n = math_ops.cast(pfor_input.pfor.loop_len_vector, inp_shape.dtype)
block_shape = math_ops.cast(block_shape, inp_shape.dtype)
# Reshape and transpose to move the vectorization axis inside the axes that
# will move to space.
# Reshape to 4D and transpose
block_size = math_ops.reduce_prod(block_shape)
neg_one = constant_op.constant(-1, dtype=inp_shape.dtype)
new_shape = [n[0], block_size, inp_shape[1] // block_size, neg_one]
inp = array_ops.reshape(inp, new_shape)
inp = array_ops.transpose(inp, [1, 0, 2, 3])
# Reshape back to merge the block, vectorization and batch dimension, and
# restore the other dimensions.
new_shape = array_ops.concat([n * inp_shape[1], inp_shape[2:]], axis=0)
inp = array_ops.reshape(inp, new_shape)
# Call batch_to_space and then split the new batch axis.
output = gen_array_ops.batch_to_space_nd(inp, block_shape, crops)
output = _unflatten_first_dim(output, n)
return wrap(output, True)
@RegisterPFor("SpaceToBatchND")
def _convert_space_to_batch_nd(pfor_input: _PforInput):
inp = pfor_input.stacked_input(0)
block_shape = pfor_input.unstacked_input(1)
paddings = pfor_input.unstacked_input(2)
inp_shape = array_ops.shape(inp)
n = math_ops.cast(pfor_input.pfor.loop_len_vector, inp_shape.dtype)
block_shape = math_ops.cast(block_shape, inp_shape.dtype)
inp = _flatten_first_two_dims(inp)
output = gen_array_ops.space_to_batch_nd(inp, block_shape, paddings)
output_shape = array_ops.shape(output)
block_size = math_ops.reduce_prod(block_shape)
neg_one = constant_op.constant(-1, dtype=inp_shape.dtype)
new_shape = [block_size, n[0], neg_one]
output = array_ops.reshape(output, new_shape)
output = array_ops.transpose(output, [1, 0, 2])
new_shape = array_ops.concat(
[n, block_size * inp_shape[1:2], output_shape[1:]], axis=0)
output = array_ops.reshape(output, new_shape)
return wrap(output, True)
def _channel_flatten_input(x, data_format):
"""Merge the stack dimension with the channel dimension.
If S is pfor's stacking dimension, then,
- for SNCHW, we transpose to NSCHW. If N dimension has size 1, the transpose
should be cheap.
- for SNHWC, we transpose to NHWSC.
We then merge the S and C dimension.
Args:
x: tensor_lib.Tensor to transform.
data_format: "NCHW" or "NHWC".
Returns:
A 3-element tuple with the transformed value, along with the shape for
reshape and order for transpose required to transform back.
"""
graph = ops.get_default_graph()
cache_key = (graph, x.ref(), data_format)
if cache_key not in _channel_flatten_input_cache:
x_shape = array_ops.shape(x)
neg_ones = constant_op.constant([-1], dtype=x_shape.dtype)
if data_format == b"NCHW":
order = [1, 0, 2, 3, 4]
shape = array_ops.concat([x_shape[1:2], neg_ones, x_shape[3:]], axis=0)
reverse_order = order
else:
order = [1, 2, 3, 0, 4]
shape = array_ops.concat([x_shape[1:4], neg_ones], axis=0)
reverse_order = [3, 0, 1, 2, 4]
# Move S dimension next to C dimension.
x = array_ops.transpose(x, order)
reverse_shape = array_ops.shape(x)
# Reshape to merge the S and C dimension.
x = array_ops.reshape(x, shape)
outputs = x, reverse_order, reverse_shape
_channel_flatten_input_cache[cache_key] = outputs
else:
outputs = _channel_flatten_input_cache[cache_key]
return outputs
# Note that with training=True, running FusedBatchNormV3 on individual examples
# is very different from running FusedBatchNormV3 on a batch of those examples.
# This is because, for the latter case, the operation can be considered as first
# computing the mean and variance over all the examples and then using these
# to scale all those examples. This creates a data dependency between these
# different "iterations" since the inputs to the scaling step depends on the
# statistics coming from all these inputs.
# As with other kernels, the conversion here effectively runs the kernel
# independently for each iteration, and returns outputs by stacking outputs from
# each of those iterations.
@RegisterPFor("FusedBatchNormV3")
def _convert_fused_batch_norm(pfor_input: _PforInput):
is_training = pfor_input.get_attr("is_training")
# When BatchNorm is used with training=False, mean and variance are provided
# externally and used as is by the op. Thus, we can merge the S and N
# dimensions as we do for regular operations.
# When BatchNorm is used with training=True, mean and variance are computed
# for each channel across the batch dimension (first one). If we merge S and N
# dimensions, mean and variances will be computed over a larger set. So, we
# merge the S and C dimensions instead.
if not is_training:
# We return zeros for batch_mean and batch_variance output. Note that CPU
# and GPU seem to have different behavior for those two outputs. CPU outputs
# zero because these values are not used during inference. GPU outputs
# something, probably real means and variances.
inputs = _inputs_with_flattening(pfor_input, [0])
outputs = _create_op(
pfor_input.op_type,
inputs, [x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
y = outputs[0]
n = pfor_input.pfor.loop_len_vector
y = _unflatten_first_dim(y, n)
mean = pfor_input.unstacked_input(3)
zeros = array_ops.zeros_like(mean)
return [wrap(y, True)] + [wrap(zeros, False)] * 5
pfor_input.stack_inputs()
data_format = pfor_input.get_attr("data_format")
# We merge the first dimension with the "C" dimension, run FusedBatchNormV3,
# and then transpose back.
x = pfor_input.stacked_input(0)
x, reverse_order, reverse_shape = _channel_flatten_input(x, data_format)
# Note that we stack all the other inputs as well so that they are the same
# size as the new size of the channel dimension.
inputs = [x] + [
array_ops.reshape(pfor_input.stacked_input(i), [-1])
for i in range(1, pfor_input.num_inputs)
]
outputs = _create_op(
pfor_input.op_type,
inputs, [x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
y = outputs[0]
y = array_ops.reshape(y, reverse_shape)
y = array_ops.transpose(y, reverse_order)
n = pfor_input.pfor.loop_len_vector
outputs = [_unflatten_first_dim(x, n) for x in outputs[1:]]
outputs = [y] + outputs
return [wrap(x, True) for x in outputs]
@RegisterPFor("FusedBatchNormGradV3")
def _convert_fused_batch_norm_grad(pfor_input: _PforInput):
pfor_input.stack_inputs()
data_format = pfor_input.get_attr("data_format")
y_backprop = pfor_input.stacked_input(0)
y_backprop, _, _ = _channel_flatten_input(y_backprop, data_format)
x = pfor_input.stacked_input(1)
x, x_reverse_order, x_reverse_shape = _channel_flatten_input(x, data_format)
inputs = [y_backprop, x] + [
array_ops.reshape(pfor_input.stacked_input(i), [-1])
for i in range(2, pfor_input.num_inputs)
]
outputs = _create_op(
pfor_input.op_type,
inputs, [x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
x_backprop = outputs[0]
x_backprop = array_ops.reshape(x_backprop, x_reverse_shape)
x_backprop = array_ops.transpose(x_backprop, x_reverse_order)
n = pfor_input.pfor.loop_len_vector
outputs = [_unflatten_first_dim(x, n) for x in outputs[1:]]
outputs = [x_backprop] + outputs
return [wrap(output, True) for output in outputs]
@RegisterPForWithArgs("Conv2DBackpropInput", flatten_dims=[2], shape_dim=0)
@RegisterPForWithArgs("AvgPoolGrad", flatten_dims=[1], shape_dim=0)
@RegisterPForWithArgs("AvgPool3DGrad", flatten_dims=[1], shape_dim=0)
def _convert_flatten_batch_shape_input(
pfor_input: _PforInput, op_type, flatten_dims, shape_dim):
del op_type
inputs = _inputs_with_flattening(pfor_input, flatten_dims)
n = pfor_input.pfor.loop_len_vector
# Adjust the `input_sizes` input.
ones = array_ops.ones([array_ops.shape(inputs[shape_dim])[0] - 1],
dtype=n.dtype)
inputs[shape_dim] *= array_ops.concat([n, ones], axis=0)
outputs = _create_op(
pfor_input.op_type,
inputs, [x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
outputs = [_unflatten_first_dim(x, n) for x in outputs]
return [wrap(x, True) for x in outputs]
@RegisterPFor("Conv2DBackpropFilter")
def _convert_conv2d_backprop_filter(pfor_input: _PforInput):
pfor_input.stack_inputs(stack_indices=[2])
inputs, inputs_stacked, _ = pfor_input.input(0)
filter_sizes = pfor_input.unstacked_input(1)
grads = pfor_input.stacked_input(2)
strides = pfor_input.get_attr("strides")
padding = pfor_input.get_attr("padding")
use_cudnn_on_gpu = pfor_input.get_attr("use_cudnn_on_gpu")
data_format = pfor_input.get_attr("data_format")
dilations = pfor_input.get_attr("dilations")
if inputs_stacked:
# TODO(agarwal): Implement this efficiently.
logging.warning("Conv2DBackpropFilter uses a while_loop. Fix that!")
def while_body(i, ta):
inp_i = inputs[i, ...]
grad_i = grads[i, ...]
output = nn_ops.conv2d_backprop_filter(
inp_i,
filter_sizes,
grad_i,
strides=strides,
padding=padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
data_format=data_format,
dilations=dilations)
return i + 1, ta.write(i, output)
n = array_ops.reshape(pfor_input.pfor.loop_len_vector, [])
_, ta = while_loop.while_loop(
lambda i, ta: i < n, while_body,
(0, tensor_array_ops.TensorArray(inputs.dtype, n)))
output = ta.stack()
return wrap(output, True)
else:
# We merge the stack dimension with the channel dimension of the gradients
# and pretend we had a larger filter (see change to filter_sizes below).
# Once the filter backprop is computed, we reshape and transpose back
# appropriately.
grads, _, _ = _channel_flatten_input(grads, data_format)
n = pfor_input.pfor.loop_len_vector
old_filter_sizes = filter_sizes
filter_sizes *= array_ops.concat([[1, 1, 1], n], axis=0)
output = nn_ops.conv2d_backprop_filter(
inputs,
filter_sizes,
grads,
strides=strides,
padding=padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
data_format=data_format,
dilations=dilations)
new_filter_shape = array_ops.concat([old_filter_sizes[:3], n, [-1]], axis=0)
output = array_ops.reshape(output, new_filter_shape)
output = array_ops.transpose(output, [3, 0, 1, 2, 4])
return wrap(output, True)
def _flatten_with_inner_dim(x, dim, x_rank):
"""Merges the first dim with the specified dim."""
shape = array_ops.shape(x)
x = array_ops.transpose(x,
list(range(1, dim)) + [0] + list(range(dim, x_rank)))
if dim < x_rank - 1:
new_shape_pieces = [shape[1:dim], [-1], shape[dim + 1:]]
else:
new_shape_pieces = [shape[1:dim], [-1]]
new_shape = array_ops.concat(new_shape_pieces, axis=0)
return array_ops.reshape(x, new_shape)
def _unflatten_with_inner_dim(x, dim, x_rank, stack_size):
"""Undoes _flatten_with_inner_dim."""
shape = array_ops.shape(x)
if dim < x_rank - 1:
new_shape_pieces = [shape[:dim], [stack_size], [-1], shape[dim + 1:]]
else:
new_shape_pieces = [shape[:dim], [stack_size], [-1]]
new_shape = array_ops.concat(new_shape_pieces, axis=0)
x = array_ops.reshape(x, new_shape)
dims_permutation = [dim] + list(range(dim)) + list(range(dim + 1, x_rank + 1))
return array_ops.transpose(x, dims_permutation)
@RegisterPFor("DepthwiseConv2dNative")
def _convert_depthwise_conv2d_native(pfor_input: _PforInput):
# Kernel can be vectorized, so folding to batch dimension does not work. We
# instead fold into the channel dimension because it is parallel.
stack_size = pfor_input.pfor.loop_len_vector[0]
data_format = pfor_input.get_attr("data_format")
c_dim = 1 if data_format == b"NCHW" else 3
t = _flatten_with_inner_dim(pfor_input.stacked_input(0), c_dim + 1, 5)
kernel = _flatten_with_inner_dim(pfor_input.stacked_input(1), 3, 5)
conv = _create_op(
"DepthwiseConv2dNative", [t, kernel],
[x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs[0]
return wrap(_unflatten_with_inner_dim(conv, c_dim, 4, stack_size), True)
@RegisterPFor("DepthwiseConv2dNativeBackpropInput")
def _convert_depthwise_conv2d_native_backprop_input(pfor_input: _PforInput):
stack_size = pfor_input.pfor.loop_len_vector[0]
input_sizes = pfor_input.unstacked_input(0)
data_format = pfor_input.get_attr("data_format")
c_dim = 1 if data_format == b"NCHW" else 3
input_sizes_mutipliers = [
constant_op.constant([1] * c_dim, dtype=dtypes.int32), [stack_size]
]
if c_dim < 3:
input_sizes_mutipliers += [
constant_op.constant([1] * (3 - c_dim), dtype=dtypes.int32)
]
input_sizes *= array_ops.concat(input_sizes_mutipliers, axis=0)
kernel = _flatten_with_inner_dim(pfor_input.stacked_input(1), 3, 5)
out_backprop = _flatten_with_inner_dim(
pfor_input.stacked_input(2), c_dim + 1, 5)
result = _create_op(
"DepthwiseConv2dNativeBackpropInput", [input_sizes, kernel, out_backprop],
[x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs[0]
return wrap(_unflatten_with_inner_dim(result, c_dim, 4, stack_size), True)
@RegisterPFor("DepthwiseConv2dNativeBackpropFilter")
def _convert_depthwise_conv2d_native_backprop_filter(pfor_input: _PforInput):
stack_size = pfor_input.pfor.loop_len_vector[0]
data_format = pfor_input.get_attr("data_format")
c_dim = 1 if data_format == b"NCHW" else 3
inputs = _flatten_with_inner_dim(pfor_input.stacked_input(0), c_dim + 1, 5)
filter_sizes = pfor_input.unstacked_input(1)
filter_sizes_multipliers = [
constant_op.constant([1, 1], dtype=dtypes.int32), [stack_size],
constant_op.constant([1], dtype=dtypes.int32)
]
filter_sizes *= array_ops.concat(filter_sizes_multipliers, axis=0)
out_backprop = _flatten_with_inner_dim(
pfor_input.stacked_input(2), c_dim + 1, 5)
result = _create_op(
"DepthwiseConv2dNativeBackpropFilter",
[inputs, filter_sizes, out_backprop],
[x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs[0]
return wrap(_unflatten_with_inner_dim(result, 2, 4, stack_size), True)
@RegisterPForWithArgs("LogSoftmax", gen_nn_ops.log_softmax)
@RegisterPForWithArgs("Softmax", gen_nn_ops.softmax)
def _convert_softmax(pfor_input: _PforInput, op_type, op_func):
del op_type
return wrap(op_func(pfor_input.stacked_input(0)), True)
# array_ops
@RegisterPForWithArgs("Identity", array_ops.identity)
@RegisterPForWithArgs("StopGradient", array_ops.stop_gradient)
@RegisterPForWithArgs("MatrixDiag", array_ops.matrix_diag)
@RegisterPForWithArgs("MatrixDiagPart", array_ops.matrix_diag_part)
@RegisterPForWithArgs("_EagerConst", array_ops.identity)
def _convert_identity(pfor_input: _PforInput, op_type, op_func):
del op_type
return wrap(op_func(*[x.t for x in pfor_input.inputs]), True)
@RegisterPFor("IdentityN")
def _convert_identity_n(pfor_input: _PforInput):
outputs = array_ops.identity_n([x.t for x in pfor_input.inputs])
return [
wrap(out, inp.is_stacked) for out, inp in zip(outputs, pfor_input.inputs)
]
@RegisterPFor("Reshape")
def _convert_reshape(pfor_input: _PforInput):
t = pfor_input.stacked_input(0)
shape = pfor_input.unstacked_input(1)
n = math_ops.cast(pfor_input.pfor.loop_len_vector, shape.dtype)
new_shape = array_ops.concat([n, shape], axis=0)
return wrap(array_ops.reshape(t, new_shape), True)
@RegisterPFor("TopK")
@RegisterPFor("TopKV2")
def _convert_top_k(pfor_input: _PforInput):
outputs = _create_op(
op_type=pfor_input.op_type,
inputs=[x.t for x in pfor_input.inputs],
op_dtypes=[x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
return [wrap(x, True) for x in outputs]
@RegisterPFor("Fill")
def _convert_fill(pfor_input: _PforInput):
dims = pfor_input.unstacked_input(0)
value = pfor_input.stacked_input(1)
# Expand the rank of `value`
new_shape = array_ops.concat(
[[-1], array_ops.ones([array_ops.size(dims)], dtype=dtypes.int32)],
axis=0)
value = array_ops.reshape(value, new_shape)
# Compute the new output shape
new_dims = array_ops.concat([pfor_input.pfor.loop_len_vector, dims], axis=0)
# Broadcast
return wrap(array_ops.broadcast_to(value, new_dims), True)
@RegisterPFor("BroadcastTo")
def _convert_broadcast_to(pfor_input: _PforInput):
shape = pfor_input.unstacked_input(1)
n = math_ops.cast(pfor_input.pfor.loop_len_vector, shape.dtype)
new_shape = array_ops.concat([n, shape], axis=0)
new_rank = _size(new_shape, dtypes.int32)
t = pfor_input.stacked_input(0)
t = _expand_dims(t, 1, new_rank - _rank(t))
return wrap(array_ops.broadcast_to(t, new_shape), True)
@RegisterPFor("ExpandDims")
def _convert_expanddims(pfor_input: _PforInput):
t = pfor_input.stacked_input(0)
dim = pfor_input.unstacked_input(1)
dim += math_ops.cast(dim >= 0, dim.dtype)
return wrap(array_ops.expand_dims(t, axis=dim), True)
@RegisterPForWithArgs("LowerBound", gen_array_ops.lower_bound)
@RegisterPForWithArgs("UpperBound", gen_array_ops.upper_bound)
def _convert_searchsorted(pfor_input: _PforInput, _, op_func):
pfor_input.stack_inputs()
sorted_inputs = _flatten_first_two_dims(pfor_input.stacked_input(0))
values = _flatten_first_two_dims(pfor_input.stacked_input(1))
out_type = pfor_input.get_attr("out_type")
output = op_func(sorted_inputs, values, out_type)
return wrap(
_unflatten_first_dim(output, pfor_input.pfor.loop_len_vector), True)
@RegisterPFor("MatrixBandPart")
def _convert_matrix_band_part(pfor_input: _PforInput):
t = pfor_input.stacked_input(0)
num_lower = pfor_input.unstacked_input(1)
num_upper = pfor_input.unstacked_input(2)
return wrap(
array_ops.matrix_band_part(t, num_lower=num_lower, num_upper=num_upper),
True)
@RegisterPFor("MatrixSetDiag")
def _convert_matrix_set_diag(pfor_input: _PforInput):
pfor_input.stack_inputs()
t = pfor_input.stacked_input(0)
diag = pfor_input.stacked_input(1)
return wrap(array_ops.matrix_set_diag(t, diag), True)
# Registrations for Matrix{Diag,DiagPart,SetDiag}V2-3.
# The input orders defined in the OpKernel and the actual python API are
# different (for compatibility with V1), so we cannot use _convert_identity.
# v2 is not compatible with v3 and is never exposed on the public API.
@RegisterPFor("MatrixDiagV2")
@RegisterPFor("MatrixDiagV3")
def _convert_matrix_diag_v2(pfor_input: _PforInput):
params = {
"diagonal": pfor_input.stacked_input(0),
"k": pfor_input.unstacked_input(1),
"num_rows": pfor_input.unstacked_input(2),
"num_cols": pfor_input.unstacked_input(3),
"padding_value": pfor_input.unstacked_input(4)
}
if pfor_input.op_type == "MatrixDiagV2":
return wrap(array_ops.matrix_diag_v2(**params), True)
params["align"] = pfor_input.get_attr("align")
return wrap(array_ops.matrix_diag(**params), True)
@RegisterPFor("Diag")
def _convert_diag(pfor_input: _PforInput):
diag = pfor_input.stacked_input(0)
if diag.shape.ndims == 2:
# We can use matrix_diag.
return wrap(array_ops.matrix_diag(diag), True)
else:
# It is not clear if we can do better than a while loop here with existing
# kernels.
return _fallback_converter(pfor_input, warn=False)
# See notes for MatrixDiagV2
@RegisterPFor("MatrixDiagPartV2")
@RegisterPFor("MatrixDiagPartV3")
def _convert_matrix_diag_part_v2(pfor_input: _PforInput):
params = {
"input": pfor_input.stacked_input(0),
"k": pfor_input.unstacked_input(1),
"padding_value": pfor_input.unstacked_input(2)
}
if pfor_input.op_type == "MatrixDiagPartV2":
return wrap(array_ops.matrix_diag_part_v2(**params), True)
params["align"] = pfor_input.get_attr("align")
return wrap(array_ops.matrix_diag_part(**params), True)
# See notes for MatrixDiagV2
@RegisterPFor("MatrixSetDiagV2")
@RegisterPFor("MatrixSetDiagV3")
def _convert_matrix_set_diag_v2(pfor_input: _PforInput):
pfor_input.stack_inputs([0, 1])
params = {
"input": pfor_input.stacked_input(0),
"diagonal": pfor_input.stacked_input(1),
"k": pfor_input.unstacked_input(2)
}
if pfor_input.op_type == "MatrixSetDiagV2":
return wrap(array_ops.matrix_set_diag_v2(**params), True)
params["align"] = pfor_input.get_attr("align")
return wrap(array_ops.matrix_set_diag(**params), True)
@RegisterPFor("DiagPart")
def _convert_diag_part(pfor_input: _PforInput):
inp = pfor_input.stacked_input(0)
if inp.shape.ndims == 3:
# We can use matrix_diag_part.
return wrap(array_ops.matrix_diag_part(inp), True)
else:
# It is not clear if we can do better than a while loop here with existing
# kernels.
return _fallback_converter(pfor_input, warn=False)
@RegisterPFor("OneHot")
def _convert_one_hot(pfor_input: _PforInput):
indices = pfor_input.stacked_input(0)
depth = pfor_input.unstacked_input(1)
on_value = pfor_input.unstacked_input(2)
off_value = pfor_input.unstacked_input(3)
axis = pfor_input.get_attr("axis")
if axis >= 0:
axis += 1
return wrap(
array_ops.one_hot(indices, depth, on_value, off_value, axis), True)
@RegisterPFor("Slice")
def _convert_slice(pfor_input: _PforInput):
t = pfor_input.stacked_input(0)
begin, begin_stacked, _ = pfor_input.input(1)
size = pfor_input.unstacked_input(2)
if not begin_stacked:
begin = array_ops.concat([[0], begin], axis=0)
size = array_ops.concat([[-1], size], axis=0)
return wrap(array_ops.slice(t, begin, size), True)
else:
# Handle negative sizes.
#
# If the `begin` entry corresponding to a negative `size` is loop-variant,
# the output would be ragged. This case is not supported. But `size` having
# some negative values and some loop-variant `begin`s is OK (and it's hard
# to tell the difference statically).
t_shape = array_ops.shape(t)
size = math_ops.cast(size, t_shape.dtype)
begin = math_ops.cast(begin, t_shape.dtype)
n = math_ops.cast(pfor_input.pfor.loop_len_vector, t_shape.dtype)
original_unstacked_shape = _stack(t_shape[1:], n).t
broadcast_size = _stack(size, n).t
result_shape = array_ops.where(
math_ops.less(broadcast_size, 0),
original_unstacked_shape - begin + broadcast_size + 1, broadcast_size)
result_shape = math_ops.cast(math_ops.reduce_max(result_shape, axis=0),
dtypes.int64)
# Now we enumerate points in the sliced region for each pfor iteration and
# gather them.
cumsize = math_ops.cumprod(result_shape, exclusive=True, reverse=True)
result_num_elements = math_ops.reduce_prod(result_shape)
# Offsets are loop-variant. We first compute loop-invariant gather
# coordinates, then broadcast-add the loop-variant `begin` offsets.
result_base_coordinates = (
math_ops.range(result_num_elements, dtype=dtypes.int64)[:, None]
// cumsize[None, :]) % result_shape[None, :]
result_coordinates = (
begin[:, None, :]
+ math_ops.cast(result_base_coordinates, begin.dtype)[None, :, :])
result_flat = array_ops.gather_nd(params=t, indices=result_coordinates,
batch_dims=1)
result_stacked_shape = array_ops.concat(
[math_ops.cast(pfor_input.pfor.loop_len_vector, result_shape.dtype),
result_shape],
axis=0)
return wrap(array_ops.reshape(result_flat, result_stacked_shape), True)
@RegisterPFor("Tile")
def _convert_tile(pfor_input: _PforInput):
t = pfor_input.stacked_input(0)
multiples = pfor_input.unstacked_input(1)
multiples = array_ops.concat([[1], multiples], 0)
return wrap(array_ops.tile(t, multiples), True)
@RegisterPFor("Pack")
def _convert_pack(pfor_input: _PforInput):
pfor_input.stack_inputs()
axis = pfor_input.get_attr("axis")
if axis >= 0:
axis += 1
return wrap(
array_ops_stack.stack([x.t for x in pfor_input.inputs], axis=axis), True)
@RegisterPFor("Unpack")
def _convert_unpack(pfor_input: _PforInput):
value = pfor_input.stacked_input(0)
axis = pfor_input.get_attr("axis")
if axis >= 0:
axis += 1
num = pfor_input.get_attr("num")
return [wrap(x, True) for x
in array_ops_stack.unstack(value, axis=axis, num=num)]
@RegisterPFor("Pad")
def _convert_pad(pfor_input: _PforInput):
t = pfor_input.stacked_input(0)
paddings = pfor_input.unstacked_input(1)
paddings = array_ops.concat([[[0, 0]], paddings], 0)
return wrap(array_ops.pad(t, paddings, mode="CONSTANT"), True)
@RegisterPFor("PadV2")
def _convert_pad_v2(pfor_input: _PforInput):
t = pfor_input.stacked_input(0)
paddings = pfor_input.unstacked_input(1)
paddings = array_ops.concat([[[0, 0]], paddings], 0)
return wrap(array_ops.pad_v2(t, paddings, mode="CONSTANT"), True)
@RegisterPFor("Split")
def _convert_split(pfor_input: _PforInput):
split_dim = pfor_input.unstacked_input(0)
t = pfor_input.stacked_input(1)
num_split = pfor_input.get_attr("num_split")
split_dim += math_ops.cast(split_dim >= 0, dtypes.int32)
return [wrap(x, True) for x in array_ops.split(t, num_split, axis=split_dim)]
@RegisterPFor("SplitV")
def _convert_split_v(pfor_input: _PforInput):
t = pfor_input.stacked_input(0)
splits = pfor_input.unstacked_input(1)
split_dim = pfor_input.unstacked_input(2)
split_dim += math_ops.cast(split_dim >= 0, dtypes.int32)
return [wrap(x, True) for x in array_ops.split(t, splits, axis=split_dim)]
@RegisterPFor("Squeeze")
def _convert_squeeze(pfor_input: _PforInput):
t = pfor_input.stacked_input(0)
squeeze_dims = pfor_input.get_attr("squeeze_dims")
squeeze_dims = [i + 1 if i >= 0 else i for i in squeeze_dims]
return wrap(array_ops.squeeze(t, axis=squeeze_dims), True)
@RegisterPFor("ReverseV2")
def _convert_reverse(pfor_input: _PforInput):
value = pfor_input.stacked_input(0)
axis = pfor_input.unstacked_input(1)
new_axis = array_ops.where_v2(axis >= 0, axis + 1, axis)
return wrap(gen_array_ops.reverse_v2(value, axis=new_axis), True)
@RegisterPForWithArgs("Transpose", gen_array_ops.transpose)
@RegisterPForWithArgs("ConjugateTranspose", gen_array_ops.conjugate_transpose)
def _convert_transpose(pfor_input: _PforInput, _, op_func):
t = pfor_input.stacked_input(0)
perm = pfor_input.unstacked_input(1)
new_perm = array_ops.concat([[0], perm + 1], axis=0)
return wrap(op_func(t, new_perm), True)
@RegisterPFor("ZerosLike")
def _convert_zeros_like(pfor_input: _PforInput):
t = pfor_input.stacked_input(0)
shape = array_ops.shape(t)[1:]
return wrap(array_ops.zeros(shape, dtype=t.dtype), False)
@RegisterPFor("OnesLike")
def _convert_ones_like(pfor_input: _PforInput):
t = pfor_input.stacked_input(0)
shape = array_ops.shape(t)[1:]
return wrap(array_ops.ones(shape, dtype=t.dtype), False)
@RegisterPFor("Gather")
@RegisterPFor("GatherV2")
def _convert_gather(pfor_input: _PforInput):
param, param_stacked, _ = pfor_input.input(0)
indices, indices_stacked, _ = pfor_input.input(1)
batch_dims = pfor_input.get_attr("batch_dims")
op_type = pfor_input.op_type
if op_type == "Gather":
validate_indices = pfor_input.get_attr("validate_indices")
axis = 0
else:
validate_indices = None
# Assume we will never have a Tensor with rank > 2**32.
axis = math_ops.cast(pfor_input.unstacked_input(2), dtypes.int32)
axis_value = tensor_util.constant_value(axis)
if axis_value is not None:
axis = axis_value
if indices_stacked and not param_stacked:
if indices is pfor_input.pfor.all_indices and axis == 0:
param_shape0 = tensor_shape.dimension_value(param.shape[0])
indices_shape0 = tensor_shape.dimension_value(indices.shape[0])
if param_shape0 is not None and indices_shape0 == param_shape0:
# Note that with loops and conditionals, indices may not be contiguous.
# However they will be sorted and unique. So if the shape matches, then
# it must be picking up all the rows of param.
return wrap(param, True)
if batch_dims != 0:
# Convert `batch_dims` to its positive equivalent if necessary.
batch_dims_pos = batch_dims
if batch_dims < 0:
batch_dims_pos += array_ops.rank(indices)
# In order to maintain
# indices.shape[:batch_dims] == params.shape[:batch_dims]
# with stacked indices, we move the first dimension of `indices` to the
# `batch_dims + 1`th position. The (non-batch) index dimensions will be
# inserted into the shape of `output` at the `axis` dimension, which is
# then transposed to the front (below).
order = array_ops.concat([
math_ops.range(1, batch_dims_pos + 1),
[0],
math_ops.range(batch_dims_pos + 1, array_ops.rank(indices))], axis=0)
indices = array_ops.transpose(indices, order)
output = array_ops.gather(
param, indices, validate_indices=validate_indices, axis=axis,
batch_dims=batch_dims)
if axis != 0:
axis = smart_cond.smart_cond(axis < 0,
lambda: axis + array_ops.rank(param),
lambda: ops.convert_to_tensor(axis))
order = array_ops.concat(
[[axis],
math_ops.range(axis),
math_ops.range(axis + 1, array_ops.rank(output))],
axis=0)
output = smart_cond.smart_cond(
math_ops.equal(axis, 0), lambda: output,
lambda: array_ops.transpose(output, order))
return wrap(output, True)
if param_stacked:
pfor_input.stack_inputs(stack_indices=[1])
indices = pfor_input.stacked_input(1)
if isinstance(axis, tensor_lib.Tensor):
axis = array_ops.where(axis >= 0, axis + 1, axis)
else:
axis = axis + 1 if axis >= 0 else axis
batch_dims = batch_dims + 1 if batch_dims >= 0 else batch_dims
output = array_ops.gather(param, indices, axis=axis, batch_dims=batch_dims)
return wrap(output, True)
@RegisterPFor("GatherNd")
def _convert_gather_nd(pfor_input: _PforInput):
# TODO(jmenick): Add support for unstacked params.
pfor_input.stack_inputs(stack_indices=[1])
params = pfor_input.stacked_input(0)
indices = pfor_input.stacked_input(1)
stacked_result = array_ops.gather_nd(params, indices, batch_dims=1)
return wrap(stacked_result, True)
@RegisterPFor("ConcatV2")
def _convert_concatv2(pfor_input: _PforInput):
n = pfor_input.num_inputs
pfor_input.stack_inputs(stack_indices=range(n - 1))
axis = pfor_input.unstacked_input(n - 1)
axis += math_ops.cast(axis >= 0, axis.dtype)
return wrap(
array_ops.concat([x.t for x in pfor_input.inputs[:n - 1]], axis=axis),
True)
@RegisterPFor("StridedSlice")
def _convert_strided_slice(pfor_input: _PforInput):
inp = pfor_input.stacked_input(0)
begin = pfor_input.unstacked_input(1)
end = pfor_input.unstacked_input(2)
strides = pfor_input.unstacked_input(3)
begin_mask = pfor_input.get_attr("begin_mask")
end_mask = pfor_input.get_attr("end_mask")
ellipsis_mask = pfor_input.get_attr("ellipsis_mask")
new_axis_mask = pfor_input.get_attr("new_axis_mask")
shrink_axis_mask = pfor_input.get_attr("shrink_axis_mask")
begin = array_ops.concat([[0], begin], axis=0)
end = array_ops.concat([[0], end], axis=0)
strides = array_ops.concat([[1], strides], axis=0)
begin_mask = begin_mask << 1 | 1
end_mask = end_mask << 1 | 1
ellipsis_mask <<= 1
new_axis_mask <<= 1
shrink_axis_mask <<= 1
return wrap(
array_ops.strided_slice(
inp,
begin,
end,
strides,
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask), True)
@RegisterPFor("StridedSliceGrad")
def _convert_strided_slice_grad(pfor_input: _PforInput):
shape = pfor_input.unstacked_input(0)
begin = pfor_input.unstacked_input(1)
end = pfor_input.unstacked_input(2)
strides = pfor_input.unstacked_input(3)
dy = pfor_input.stacked_input(4)
begin_mask = pfor_input.get_attr("begin_mask")
end_mask = pfor_input.get_attr("end_mask")
ellipsis_mask = pfor_input.get_attr("ellipsis_mask")
new_axis_mask = pfor_input.get_attr("new_axis_mask")
shrink_axis_mask = pfor_input.get_attr("shrink_axis_mask")
shape = array_ops.concat(
[math_ops.cast(pfor_input.pfor.loop_len_vector, shape.dtype), shape],
axis=0)
begin = array_ops.concat([[0], begin], axis=0)
end = array_ops.concat([[0], end], axis=0)
strides = array_ops.concat([[1], strides], axis=0)
begin_mask = begin_mask << 1 | 1
end_mask = end_mask << 1 | 1
ellipsis_mask <<= 1
new_axis_mask <<= 1
shrink_axis_mask <<= 1
return wrap(
array_ops.strided_slice_grad(
shape,
begin,
end,
strides,
dy,
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask), True)
@RegisterPFor("CheckNumerics")
def _convert_check_numerics(pfor_input: _PforInput):
t = pfor_input.stacked_input(0)
message = pfor_input.get_attr("message")
return wrap(gen_array_ops.check_numerics(t, message), True)
@RegisterPFor("EnsureShape")
def _convert_ensure_shape(pfor_input: _PforInput):
t = pfor_input.stacked_input(0)
shape = tensor_shape.TensorShape(pfor_input.get_attr("shape"))
return wrap(gen_array_ops.ensure_shape(t, [None] + shape), True)
# manip_ops
@RegisterPFor("Roll")
def _convert_roll(pfor_input: _PforInput):
t = pfor_input.stacked_input(0)
shift, shift_stacked, _ = pfor_input.input(1)
axis = pfor_input.unstacked_input(2)
if not shift_stacked:
return wrap(manip_ops.roll(t, shift, axis + 1), True)
else:
# `axis` and `shift` may both be vectors, with repeated axes summing the
# corresponding `shift`s. We scatter shifts into a dense array of shape
# [loop_len, num_unstacked_axes] indicating the offset for each axis.
num_unstacked_axes = math_ops.cast(array_ops.rank(t), dtypes.int64) - 1
axis = math_ops.cast(array_ops.reshape(axis, [-1]), dtypes.int64)
loop_len = math_ops.cast(pfor_input.pfor.loop_len_vector[0], dtypes.int64)
shift = math_ops.cast(array_ops.reshape(shift, [loop_len, -1]),
dtypes.int64)
axis_segment_ids = (
math_ops.range(loop_len, dtype=dtypes.int64)[:, None]
* num_unstacked_axes + axis[None, :])
axis_offsets = array_ops.reshape(
math_ops.unsorted_segment_sum(
data=shift, segment_ids=axis_segment_ids,
num_segments=loop_len * num_unstacked_axes),
[loop_len, num_unstacked_axes])
# Determine the coordinates in the input array of each result and gather
# them.
unstacked_shape = array_ops.shape(t, out_type=dtypes.int64)[1:]
cumsize = math_ops.cumprod(unstacked_shape, exclusive=True, reverse=True)
num_unstacked_elements = math_ops.reduce_prod(unstacked_shape)
result_coordinates = (
(math_ops.range(num_unstacked_elements,
dtype=dtypes.int64)[None, :, None]
// cumsize[None, None, :] - axis_offsets[:, None, :])
% unstacked_shape[None, None, :])
result_flat = array_ops.gather_nd(params=t, indices=result_coordinates,
batch_dims=1)
return wrap(array_ops.reshape(result_flat, array_ops.shape(t)),
True)
# math_ops
@RegisterPFor("MatMul")
def _convert_matmul(pfor_input: _PforInput):
# TODO(agarwal): Check if tiling is faster than two transposes.
a, a_stacked, _ = pfor_input.input(0)
b, b_stacked, _ = pfor_input.input(1)
tr_a = pfor_input.get_attr("transpose_a")
tr_b = pfor_input.get_attr("transpose_b")
if a_stacked and b_stacked:
output = wrap(
math_ops.matmul(a, b, transpose_a=tr_a, transpose_b=tr_b), True
)
return output
elif a_stacked:
if tr_a:
a = array_ops.transpose(a, [0, 2, 1])
if a.shape.is_fully_defined():
x, y, z = a.shape
else:
x, y, z = [
array_ops.reshape(i, [])
for i in array_ops.split(array_ops.shape(a), 3)
]
a = array_ops.reshape(a, [x * y, z])
prod = math_ops.matmul(a, b, transpose_b=tr_b)
return wrap(array_ops.reshape(prod, [x, y, -1]), True)
else:
assert b_stacked
if tr_b:
perm = [2, 0, 1]
b = array_ops.transpose(b, perm)
else:
# As an optimization, if one of the first two dimensions is 1, then we can
# reshape instead of transpose.
# TODO(agarwal): This check can be done inside Transpose kernel.
b_shape = array_ops.shape(b)
min_dim = math_ops.minimum(b_shape[0], b_shape[1])
perm = array_ops.where(
math_ops.equal(min_dim, 1), [0, 1, 2], [1, 0, 2])
new_shape = array_ops_stack.stack([b_shape[1], b_shape[0], b_shape[2]])
b = array_ops.transpose(b, perm)
b = array_ops.reshape(b, new_shape)
if b.shape.is_fully_defined():
x, y, z = b.shape
else:
x, y, z = [
array_ops.reshape(i, [])
for i in array_ops.split(array_ops.shape(b), 3)
]
b = array_ops.reshape(b, [x, y * z])
prod = math_ops.matmul(a, b, transpose_a=tr_a)
prod = array_ops.reshape(prod, [-1, y, z])
prod = array_ops.transpose(prod, [1, 0, 2])
return wrap(prod, True)
# TODO(rmlarsen): Use the converter of BatchMatMulV2 once compatibility window
# is met.
@RegisterPFor("BatchMatMul")
def _convert_batch_mat_mul(pfor_input: _PforInput):
# TODO(agarwal): There may be a more efficient way to do this instead of
# stacking the inputs.
pfor_input.stack_inputs()
x = pfor_input.stacked_input(0)
y = pfor_input.stacked_input(1)
adj_x = pfor_input.get_attr("adj_x")
adj_y = pfor_input.get_attr("adj_y")
x = _flatten_first_two_dims(x)
y = _flatten_first_two_dims(y)
output = math_ops.matmul(x, y, adjoint_a=adj_x, adjoint_b=adj_y)
output = _unflatten_first_dim(output, pfor_input.pfor.loop_len_vector)
return wrap(output, True)
@RegisterPFor("BatchMatMulV2")
def _convert_batch_mat_mul_v2(pfor_input: _PforInput):
pfor_input.expanddim_inputs_for_broadcast()
x = pfor_input.input(0)[0]
y = pfor_input.input(1)[0]
adj_x = pfor_input.get_attr("adj_x")
adj_y = pfor_input.get_attr("adj_y")
output = math_ops.matmul(x, y, adjoint_a=adj_x, adjoint_b=adj_y)
return wrap(output, True)
@RegisterPForWithArgs("Sum", math_ops.reduce_sum)
@RegisterPForWithArgs("Prod", math_ops.reduce_prod)
@RegisterPForWithArgs("Max", math_ops.reduce_max)
@RegisterPForWithArgs("Min", math_ops.reduce_min)
@RegisterPForWithArgs("Mean", math_ops.reduce_mean)
@RegisterPForWithArgs("All", math_ops.reduce_all)
@RegisterPForWithArgs("Any", math_ops.reduce_any)
def _convert_reduction(pfor_input: _PforInput, _, op_func):
t = pfor_input.stacked_input(0)
indices = pfor_input.unstacked_input(1)
# Shift positive indices by one to account for the extra dimension.
indices += math_ops.cast(indices >= 0, indices.dtype)
keep_dims = pfor_input.get_attr("keep_dims")
return wrap(op_func(t, indices, keepdims=keep_dims), True)
@RegisterPForWithArgs("ArgMax", math_ops.argmax)
@RegisterPForWithArgs("ArgMin", math_ops.argmin)
def _convert_argmax_argmin(pfor_input: _PforInput, _, op_func):
t = pfor_input.stacked_input(0)
dimension = pfor_input.unstacked_input(1)
dimension += math_ops.cast(dimension >= 0, dimension.dtype)
output_type = pfor_input.get_attr("output_type")
return wrap(op_func(t, axis=dimension, output_type=output_type), True)
@RegisterPFor("Bucketize")
def _convert_bucketize(pfor_input: _PforInput):
t = pfor_input.stacked_input(0)
boundaries = pfor_input.get_attr("boundaries")
return wrap(math_ops.bucketize(t, boundaries), True)
@RegisterPFor("ClipByValue")
def _convert_clip_by_value(pfor_input: _PforInput):
t = pfor_input.stacked_input(0)
clip_value_min = pfor_input.unstacked_input(1)
clip_value_max = pfor_input.unstacked_input(2)
return wrap(gen_math_ops._clip_by_value(t, clip_value_min, clip_value_max),
True)
@RegisterPForWithArgs("Cumsum", math_ops.cumsum)
@RegisterPForWithArgs("Cumprod", math_ops.cumprod)
def _convert_cumfoo(pfor_input: _PforInput, _, op_func):
t = pfor_input.stacked_input(0)
axis = pfor_input.unstacked_input(1)
# Shift positive indices by one to account for the extra dimension.
axis += math_ops.cast(axis >= 0, axis.dtype)
exclusive = pfor_input.get_attr("exclusive")
reverse = pfor_input.get_attr("reverse")
return wrap(op_func(t, axis, exclusive=exclusive, reverse=reverse), True)
@RegisterPFor("BiasAdd")
def _convert_biasadd(pfor_input: _PforInput):
t, t_stacked, _ = pfor_input.input(0)
bias, bias_stacked, _ = pfor_input.input(1)
data_format = pfor_input.get_attr("data_format").decode()
if bias_stacked:
# BiasAdd only supports 1-D biases, so cast bias to match value and use Add.
pfor_input.expanddim_inputs_for_broadcast()
t, _, _ = pfor_input.input(0)
bias = math_ops.cast(pfor_input.stacked_input(1), t.dtype)
if compat.as_bytes(data_format) == b"NCHW":
b_shape = array_ops.shape(bias)
new_b_shape = array_ops.concat(
[b_shape[:-3], b_shape[-1:], b_shape[-3:-1]], axis=0)
bias = array_ops.reshape(bias, new_b_shape)
return wrap(math_ops.add(t, bias), True)
else:
assert t_stacked, "At least one input to BiasAdd should be loop variant."
if compat.as_bytes(data_format) == b"NCHW":
shape = array_ops.shape(t)
flattened_shape = array_ops.concat([[-1], shape[2:]], axis=0)
t = array_ops.reshape(t, flattened_shape)
t = nn_ops.bias_add(t, bias, data_format="NCHW")
t = array_ops.reshape(t, shape)
return wrap(t, True)
return wrap(nn_ops.bias_add(t, bias, data_format=data_format), True)
@RegisterPForWithArgs("UnsortedSegmentSum", math_ops.unsorted_segment_sum)
@RegisterPForWithArgs("UnsortedSegmentMax", math_ops.unsorted_segment_max)
@RegisterPForWithArgs("UnsortedSegmentMin", math_ops.unsorted_segment_min)
@RegisterPForWithArgs("UnsortedSegmentProd", math_ops.unsorted_segment_prod)
def _convert_unsortedsegmentsum(pfor_input: _PforInput, _, op_func):
pfor_input.stack_inputs([0, 1])
data = pfor_input.stacked_input(0)
segment_ids = pfor_input.stacked_input(1)
# TODO(agarwal): handle stacked?
num_segments = pfor_input.unstacked_input(2)
if segment_ids.dtype != num_segments.dtype:
segment_ids = math_ops.cast(segment_ids, dtypes.int64)
num_segments = math_ops.cast(num_segments, dtypes.int64)
dtype = segment_ids.dtype
segment_shape = array_ops.shape(segment_ids, out_type=dtype)
n = segment_shape[0]
ones = array_ops.ones_like(segment_shape, dtype=dtype)[1:]
segment_offset = num_segments * math_ops.range(n, dtype=dtype)
segment_offset = array_ops.reshape(segment_offset,
array_ops.concat([[n], ones], axis=0))
segment_ids = array_ops.where(
segment_ids >= 0, segment_ids + segment_offset, segment_ids
)
num_segments = math_ops.cast(num_segments, dtypes.int64) * math_ops.cast(
n, dtypes.int64)
output = op_func(data, segment_ids, num_segments)
new_output_shape = array_ops.concat(
[[n, -1], array_ops.shape(output)[1:]], axis=0)
output = array_ops.reshape(output, new_output_shape)
return wrap(output, True)
def _flatten_array_with_offset(ids, offset_delta, num_rows):
"""Flattens a rank 2 tensor, adding an offset to each row."""
# Note that if `ids` is rank 1, it is broadcast to rank 2.
offset_delta = math_ops.cast(offset_delta, ids.dtype)
n = math_ops.cast(num_rows, dtype=ids.dtype)
offsets = math_ops.range(
start=0, limit=n * offset_delta, delta=offset_delta, dtype=ids.dtype)
offsets = array_ops.expand_dims(offsets, -1)
ids += offsets
return array_ops.reshape(ids, [-1])
@RegisterPForWithArgs("SparseSegmentSum", math_ops.sparse_segment_sum_v2)
@RegisterPForWithArgs("SparseSegmentMean", math_ops.sparse_segment_mean_v2)
@RegisterPForWithArgs("SparseSegmentSqrtN", math_ops.sparse_segment_sqrt_n_v2)
@RegisterPForWithArgs("SparseSegmentSumWithNumSegments",
math_ops.sparse_segment_sum_v2)
@RegisterPForWithArgs("SparseSegmentMeanWithNumSegments",
math_ops.sparse_segment_mean_v2)
@RegisterPForWithArgs("SparseSegmentSqrtNWithNumSegments",
math_ops.sparse_segment_sqrt_n_v2)
def _convert_sparse_segment(pfor_input: _PforInput, _, op_func):
_, segment_ids_stacked, _ = pfor_input.input(2)
if segment_ids_stacked:
pfor_input.stack_inputs([1])
data, data_stacked, _ = pfor_input.input(0)
indices, _, _ = pfor_input.input(1)
num_inputs = len(pfor_input.inputs)
assert num_inputs in (3, 4)
if num_inputs == 3:
# `segment_ids` needs to be unstacked since otherwise output sizes could
# differ across pfor iterations.
segment_ids = pfor_input.unstacked_input(2)
num_segments = nn_ops.relu(math_ops.reduce_max(segment_ids) + 1)
else:
segment_ids, _, _ = pfor_input.input(2)
num_segments = pfor_input.unstacked_input(3)
n = pfor_input.pfor.loop_len_vector[0]
if data_stacked:
indices = _flatten_array_with_offset(indices, array_ops.shape(data)[1], n)
data = _flatten_first_two_dims(data)
else:
indices = array_ops.reshape(indices, [-1])
segment_ids = _flatten_array_with_offset(segment_ids, num_segments, n)
if num_inputs == 3:
num_segments = None
else:
num_segments *= n
output = op_func(data, indices, segment_ids, num_segments=num_segments)
output = _unflatten_first_dim(output, [n])
return wrap(output, True)
@RegisterPForWithArgs("SparseSegmentSumGrad", math_ops.sparse_segment_sum_grad)
@RegisterPForWithArgs("SparseSegmentMeanGrad",
math_ops.sparse_segment_mean_grad)
@RegisterPForWithArgs("SparseSegmentSqrtNGrad",
math_ops.sparse_segment_sqrt_n_grad)
def _convert_sparse_segment_grad(pfor_input: _PforInput, _, op_func):
grad = pfor_input.stacked_input(0)
indices = pfor_input.unstacked_input(1)
segment_ids = pfor_input.unstacked_input(2)
dim0 = pfor_input.unstacked_input(3)
n = pfor_input.pfor.loop_len_vector[0]
indices = _flatten_array_with_offset(indices, dim0, n)
num_segments = nn_ops.relu(math_ops.reduce_max(segment_ids) + 1)
segment_ids = _flatten_array_with_offset(segment_ids, num_segments, n)
grad = _flatten_first_two_dims(grad)
dim0 *= n
output = op_func(grad, indices, segment_ids, dim0)
output = _unflatten_first_dim(output, [n])
return wrap(output, True)
@RegisterPFor("Cast")
def _convert_cast(pfor_input: _PforInput):
inp = pfor_input.stacked_input(0)
dtype = pfor_input.get_attr("DstT")
return wrap(math_ops.cast(inp, dtype), True)
@RegisterPFor("Abs")
@RegisterPFor("Acos")
@RegisterPFor("Acosh")
@RegisterPFor("Add")
@RegisterPFor("AddV2")
@RegisterPFor("Angle")
@RegisterPFor("Asin")
@RegisterPFor("Asinh")
@RegisterPFor("Atan")
@RegisterPFor("Atan2")
@RegisterPFor("Atanh")
@RegisterPFor("BesselI0")
@RegisterPFor("BesselI1")
@RegisterPFor("BesselI0e")
@RegisterPFor("BesselI1e")
@RegisterPFor("BesselK0")
@RegisterPFor("BesselK1")
@RegisterPFor("BesselK0e")
@RegisterPFor("BesselK1e")
@RegisterPFor("BesselJ0")
@RegisterPFor("BesselJ1")
@RegisterPFor("BesselY0")
@RegisterPFor("BesselY1")
@RegisterPFor("BitwiseAnd")
@RegisterPFor("BitwiseOr")
@RegisterPFor("BitwiseXor")
@RegisterPFor("Ceil")
@RegisterPFor("Complex")
@RegisterPFor("ComplexAbs")
@RegisterPFor("Conj")
@RegisterPFor("Cos")
@RegisterPFor("Cosh")
@RegisterPFor("Dawsn")
@RegisterPFor("Digamma")
@RegisterPFor("Div")
@RegisterPFor("DivNoNan")
@RegisterPFor("Elu")
@RegisterPFor("Erf")
@RegisterPFor("Erfc")
@RegisterPFor("Erfinv")
@RegisterPFor("Exp")
@RegisterPFor("Expint")
@RegisterPFor("Expm1")
@RegisterPFor("Floor")
@RegisterPFor("FloorDiv")
@RegisterPFor("FloorMod")
@RegisterPFor("FresnelCos")
@RegisterPFor("FresnelSin")
@RegisterPFor("Greater")
@RegisterPFor("GreaterEqual")
@RegisterPFor("Igamma")
@RegisterPFor("IgammaGradA")
@RegisterPFor("Igammac")
@RegisterPFor("Imag")
@RegisterPFor("Inv")
@RegisterPFor("Invert")
@RegisterPFor("IsFinite")
@RegisterPFor("IsInf")
@RegisterPFor("IsNan")
@RegisterPFor("LeftShift")
@RegisterPFor("Less")
@RegisterPFor("LessEqual")
@RegisterPFor("Lgamma")
@RegisterPFor("Log")
@RegisterPFor("Log1p")
@RegisterPFor("LogicalAnd")
@RegisterPFor("LogicalNot")
@RegisterPFor("LogicalOr")
@RegisterPFor("LogicalXor")
@RegisterPFor("Maximum")
@RegisterPFor("Minimum")
@RegisterPFor("Mod")
@RegisterPFor("Mul")
@RegisterPFor("MulNoNan")
@RegisterPFor("Ndtri")
@RegisterPFor("Neg")
@RegisterPFor("Polygamma")
@RegisterPFor("Pow")
@RegisterPFor("Real")
@RegisterPFor("RealDiv")
@RegisterPFor("Reciprocal")
@RegisterPFor("Relu")
@RegisterPFor("Relu6")
@RegisterPFor("RightShift")
@RegisterPFor("Rint")
@RegisterPFor("Round")
@RegisterPFor("Rsqrt")
@RegisterPFor("Selu")
@RegisterPFor("Sigmoid")
@RegisterPFor("Sign")
@RegisterPFor("Sin")
@RegisterPFor("Sinh")
@RegisterPFor("Softplus")
@RegisterPFor("Softsign")
@RegisterPFor("Spence")
@RegisterPFor("Sqrt")
@RegisterPFor("Square")
@RegisterPFor("SquaredDifference")
@RegisterPFor("Sub")
@RegisterPFor("Tan")
@RegisterPFor("Tanh")
@RegisterPFor("TruncateDiv")
@RegisterPFor("TruncateMod")
@RegisterPFor("Xdivy")
@RegisterPFor("Xlogy")
@RegisterPFor("Xlog1py")
@RegisterPFor("Zeta")
def _convert_cwise(pfor_input: _PforInput):
if pfor_input.num_inputs > 1:
pfor_input.expanddim_inputs_for_broadcast()
out = _create_op(
pfor_input.op_type, [x.t for x in pfor_input.inputs],
[x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
assert len(out) == 1
out = out[0]
op_output = wrap(out, True)
return op_output
@RegisterPFor("XlaSharding")
def _convert_xla_sharding(pfor_input: _PforInput):
t = pfor_input.stacked_input(0)
sharding = pfor_input.get_attr("sharding")
return wrap(xla.sharding(t, sharding=sharding), True)
@RegisterPFor("LeakyRelu")
def _convert_leaky_relu(pfor_input: _PforInput):
t = pfor_input.stacked_input(0)
alpha = pfor_input.get_attr("alpha")
return wrap(gen_nn_ops.leaky_relu(t, alpha=alpha), True)
@RegisterPFor("Equal")
def _convert_equal(pfor_input: _PforInput):
pfor_input.expanddim_inputs_for_broadcast()
x = pfor_input.input(0)[0]
y = pfor_input.input(1)[0]
incompatible_shape_error = pfor_input.get_attr("incompatible_shape_error")
return wrap(gen_math_ops.equal(
x, y, incompatible_shape_error=incompatible_shape_error), True)
@RegisterPFor("NotEqual")
def _convert_not_equal(pfor_input: _PforInput):
pfor_input.expanddim_inputs_for_broadcast()
x = pfor_input.input(0)[0]
y = pfor_input.input(1)[0]
incompatible_shape_error = pfor_input.get_attr("incompatible_shape_error")
return wrap(gen_math_ops.not_equal(
x, y, incompatible_shape_error=incompatible_shape_error), True)
@RegisterPFor("ApproximateEqual")
def _convert_approximate_equal(pfor_input: _PforInput):
pfor_input.expanddim_inputs_for_broadcast()
x = pfor_input.input(0)[0]
y = pfor_input.input(1)[0]
tolerance = pfor_input.get_attr("tolerance")
return wrap(math_ops.approximate_equal(x, y, tolerance=tolerance), True)
@RegisterPFor("Shape")
def _convert_shape(pfor_input: _PforInput):
out_type = pfor_input.get_attr("out_type")
return wrap(
array_ops.shape(pfor_input.stacked_input(0), out_type=out_type)[1:],
False)
@RegisterPFor("ShapeN")
def _convert_shape_n(pfor_input: _PforInput):
out_type = pfor_input.get_attr("out_type")
shapes = [
array_ops.shape(x, out_type=out_type)[1:] if stacked else array_ops.shape(
x, out_type=out_type) for x, stacked, _ in pfor_input.inputs
]
return [wrap(x, False) for x in shapes]
@RegisterPFor("Size")
def _convert_size(pfor_input: _PforInput):
out_type = pfor_input.get_attr("out_type")
n = math_ops.cast(pfor_input.pfor.loop_len_vector[0], out_type)
return wrap(
array_ops.size(pfor_input.stacked_input(0), out_type=out_type) // n,
False)
@RegisterPFor("Rank")
def _convert_rank(pfor_input: _PforInput):
return wrap(array_ops.rank(pfor_input.stacked_input(0)) - 1, False)
@RegisterPFor("AddN")
def _convert_addn(pfor_input: _PforInput):
# AddN does not support broadcasting.
pfor_input.stack_inputs(tile_variants=False)
return _wrap_and_tile_variants(
math_ops.add_n([x.t for x in pfor_input.inputs]),
pfor_input.pfor.loop_len_vector)
@RegisterPFor("Cross")
def _convert_cross(pfor_input: _PforInput):
pfor_input.stack_inputs()
a = pfor_input.stacked_input(0)
b = pfor_input.stacked_input(1)
return wrap(math_ops.cross(a, b), True)
@RegisterPFor("BiasAddGrad")
def _convert_biasaddgrad(pfor_input: _PforInput):
grad = pfor_input.stacked_input(0)
fmt = pfor_input.get_attr("data_format")
if fmt == b"NCHW":
output = math_ops.reduce_sum(grad, axis=[1, 3, 4], keepdims=False)
else:
grad_shape = array_ops.shape(grad)
last_dim_shape = grad_shape[-1]
first_dim_shape = grad_shape[0]
output = array_ops.reshape(grad, [first_dim_shape, -1, last_dim_shape])
output = math_ops.reduce_sum(output, axis=[1], keepdims=False)
return wrap(output, True)
# Some required ops are not exposed under the tf namespace. Hence relying on
# _create_op to create them.
@RegisterPForWithArgs("EluGrad")
@RegisterPForWithArgs("LeakyReluGrad")
@RegisterPForWithArgs("ReciprocalGrad")
@RegisterPForWithArgs("Relu6Grad")
@RegisterPForWithArgs("ReluGrad")
@RegisterPForWithArgs("RsqrtGrad")
@RegisterPForWithArgs("SeluGrad")
@RegisterPForWithArgs("SigmoidGrad")
@RegisterPForWithArgs("SoftplusGrad")
@RegisterPForWithArgs("SoftsignGrad")
@RegisterPForWithArgs("SqrtGrad")
@RegisterPForWithArgs("TanhGrad")
def _convert_grads(pfor_input: _PforInput, op_type, *args, **kw_args):
del args
del kw_args
# TODO(agarwal): Looks like these ops don't support broadcasting. Hence we
# have to use tiling here.
pfor_input.stack_inputs()
outputs = _create_op(
op_type, [x.t for x in pfor_input.inputs],
[x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
return [wrap(x, True) for x in outputs]
@RegisterPFor("Select")
def _convert_select(pfor_input: _PforInput):
pfor_input.stack_inputs()
cond = pfor_input.stacked_input(0)
t = pfor_input.stacked_input(1)
e = pfor_input.stacked_input(2)
cond_rank = array_ops.rank(cond)
cond, t, e = smart_cond.smart_cond(
cond_rank > 1, lambda: _inputs_with_flattening(pfor_input, [0, 1, 2]),
lambda: [cond, t, e])
outputs = _create_op(
pfor_input.op_type, [cond, t, e], [x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
n = pfor_input.pfor.loop_len_vector
out = smart_cond.smart_cond(cond_rank > 1,
lambda: _unflatten_first_dim(outputs[0], n),
lambda: outputs[0])
return [wrap(out, True) for x in outputs]
@RegisterPFor("SelectV2")
def _convert_selectv2(pfor_input: _PforInput):
pfor_input.expanddim_inputs_for_broadcast()
cond = pfor_input.input(0)[0]
t = pfor_input.input(1)[0]
e = pfor_input.input(2)[0]
out = array_ops.where_v2(cond, t, e)
return wrap(out, True)
# random_ops
def _transpose_dim_to_front(x, dim):
rank = array_ops.rank(x)
return array_ops.transpose(
x,
perm=array_ops.concat(
[[dim], math_ops.range(0, dim),
math_ops.range(dim + 1, rank)],
axis=0))
@RegisterPForWithArgs("RandomUniform")
@RegisterPForWithArgs("RandomUniformInt")
@RegisterPForWithArgs("RandomStandardNormal")
@RegisterPForWithArgs("TruncatedNormal")
def _convert_random(pfor_input: _PforInput, op_type, *args, **kw_args):
del args
del kw_args
inputs = [pfor_input.unstacked_input(i) for i in range(pfor_input.num_inputs)]
# inputs[0] is "shape"
n = math_ops.cast(pfor_input.pfor.loop_len_vector, inputs[0].dtype)
inputs[0] = array_ops.concat([n, inputs[0]], axis=0)
# TODO(b/222761732): Turn this warning back on when legacy RNGs are
# deprecated.
# logging.warning(
# "Note that %s inside pfor op may not give same output as "
# "inside a sequential loop.", op_type)
outputs = _create_op(
op_type,
inputs, [x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
return [wrap(x, True) for x in outputs]
@RegisterPFor("RandomGamma")
@RegisterPFor("RandomPoissonV2")
def _convert_random_with_param(pfor_input: _PforInput):
shape = pfor_input.unstacked_input(0)
# param is lam (Poisson rate) or alpha (Gamma shape).
param, param_stacked, _ = pfor_input.input(1)
# TODO(b/222761732): Turn this warning back on when legacy RNGs are
# deprecated.
# logging.warning(
# "Note that %s inside pfor op may not give same output as "
# "inside a sequential loop.", pfor_input.op_type)
if param_stacked:
samples = _create_op(
pfor_input.op_type,
inputs=[shape, param],
op_dtypes=[x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs[0]
loop_dim = array_ops.shape(shape)[0]
stacked_samples = _transpose_dim_to_front(samples, loop_dim)
else:
n = math_ops.cast(pfor_input.pfor.loop_len_vector, shape.dtype)
shape = array_ops.concat([n, shape], axis=0)
stacked_samples = _create_op(
pfor_input.op_type,
inputs=[shape, param],
op_dtypes=[x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs[0]
return wrap(stacked_samples, True)
@RegisterPFor("Multinomial")
def _convert_multinomial(pfor_input: _PforInput):
logits, logits_stacked, _ = pfor_input.input(0)
num_samples = pfor_input.unstacked_input(1)
seed = pfor_input.get_attr("seed")
seed2 = pfor_input.get_attr("seed2")
output_dtype = pfor_input.get_attr("output_dtype")
# TODO(b/222761732): Turn this warning back on when legacy RNGs are
# deprecated.
# logging.warning(
# "Note that Multinomial inside pfor op may not give same output as "
# "inside a sequential loop.")
n = pfor_input.pfor.loop_len_vector[0]
if logits_stacked:
flattened_logits = _flatten_first_two_dims(logits)
samples = gen_random_ops.multinomial(
flattened_logits,
num_samples,
seed=seed,
seed2=seed2,
output_dtype=output_dtype)
stacked_samples = _unflatten_first_dim(samples, [n])
else:
samples = gen_random_ops.multinomial(
logits,
num_samples * n,
seed=seed,
seed2=seed2,
output_dtype=output_dtype)
stacked_samples = array_ops.transpose(
array_ops.reshape(samples, [-1, n, num_samples]), [1, 0, 2])
return wrap(stacked_samples, True)
@RegisterPFor("StatelessMultinomial")
@RegisterPFor("StatelessParameterizedTruncatedNormal")
@RegisterPFor("StatelessRandomBinomial")
@RegisterPFor("StatelessRandomGammaV2")
@RegisterPFor("StatelessRandomNormal")
@RegisterPFor("StatelessRandomPoisson")
@RegisterPFor("StatelessRandomUniform")
@RegisterPFor("StatelessRandomUniformInt")
@RegisterPFor("StatelessRandomUniformFullInt")
@RegisterPFor("StatelessTruncatedNormal")
def _convert_stateless_multinomial(pfor_input: _PforInput):
# Unlike stateful random ops, for stateless ones we want better
# reproducibility based on seed. Hence we don't want to use a similar strategy
# as used for stateful ones where we generate a possibly different set of
# random numbers under vectorization.
# Unfortunately, the kernels currently are not necessarily setup to do this
# efficiently and hence we fallback to a sequential loop for vectorization.
return _fallback_converter(pfor_input, warn=False)
# linalg_ops
@RegisterPForWithArgs("XlaEinsum")
@RegisterPForWithArgs("Einsum")
def _convert_einsum(pfor_input: _PforInput, op_type):
# Einsum may have either 1 or 2 inputs.
inputs, input_stacked, _ = zip(*[
pfor_input.input(i)
for i in range(pfor_input.num_inputs)])
# Parse the einsum equation.
equation = pfor_input.get_attr("equation").decode("utf-8")
input_expr, output_expr = equation.split("->")
input_exprs = input_expr.split(",")
# Pick a placeholder symbol to use for the new axis.
chosen_symbol = None
for s in string.ascii_letters:
if s in equation:
continue
else:
chosen_symbol = s
break
if chosen_symbol is None:
raise ValueError("Could not figure out what symbol to use for new axis.")
assert any(input_stacked)
for i in range(len(inputs)):
if input_stacked[i]:
input_exprs[i] = "{}{}".format(chosen_symbol, input_exprs[i])
output_expr = "{}{}".format(chosen_symbol, output_expr)
new_equation = "{}->{}".format(",".join(input_exprs), output_expr)
if op_type == "XlaEinsum":
if len(inputs) == 1:
result = xla.einsum(equation=new_equation, a=inputs[0])
else:
result = xla.einsum(equation=new_equation, a=inputs[0], b=inputs[1])
else:
assert op_type == "Einsum"
result = special_math_ops.einsum(new_equation, *inputs)
return wrap(result, True)
@RegisterPFor("Cholesky")
def _convert_cholesky(pfor_input: _PforInput):
t = pfor_input.stacked_input(0)
return wrap(linalg_ops.cholesky(t), True)
@RegisterPFor("LogMatrixDeterminant")
def _convert_log_matrix_determinant(pfor_input: _PforInput):
t = pfor_input.stacked_input(0)
return [wrap(x, True) for x in linalg_ops.log_matrix_determinant(t)]
@RegisterPFor("MatrixInverse")
def _convert_matrix_inverse(pfor_input: _PforInput):
t = pfor_input.stacked_input(0)
adjoint = pfor_input.get_attr("adjoint")
return wrap(gen_linalg_ops.matrix_inverse(t, adjoint=adjoint), True)
@RegisterPFor("MatrixSolve")
def _convert_matrix_solve(pfor_input: _PforInput):
pfor_input.stack_inputs()
matrix = pfor_input.stacked_input(0)
rhs = pfor_input.stacked_input(1)
adjoint = pfor_input.get_attr("adjoint")
output = gen_linalg_ops.matrix_solve(
matrix, rhs, adjoint=adjoint)
return wrap(output, True)
@RegisterPFor("MatrixTriangularSolve")
def _convert_matrix_triangular_solve(pfor_input: _PforInput):
pfor_input.expanddim_inputs_for_broadcast()
matrix = pfor_input.input(0)[0]
rhs = pfor_input.input(1)[0]
lower = pfor_input.get_attr("lower")
adjoint = pfor_input.get_attr("adjoint")
output = linalg_ops.matrix_triangular_solve(
matrix, rhs, lower=lower, adjoint=adjoint)
return wrap(output, True)
@RegisterPFor("SelfAdjointEigV2")
def _convert_self_adjoint_eig(pfor_input: _PforInput):
t = pfor_input.stacked_input(0)
compute_v = pfor_input.get_attr("compute_v")
e, v = gen_linalg_ops.self_adjoint_eig_v2(t, compute_v=compute_v)
# If compute_v is False, v will have shape [0].
return wrap(e, True), wrap(v, compute_v)
# logging_ops
@RegisterPFor("Assert")
def _convert_assert(pfor_input: _PforInput):
cond, cond_stacked, _ = pfor_input.input(0)
if cond_stacked:
cond = math_ops.reduce_all(cond)
data_list = [x.t for x in pfor_input.inputs][1:]
return _create_op(
"Assert", [cond] + data_list, [], attrs=pfor_input.op.node_def.attr)
@RegisterPFor("Print")
def _convert_print(pfor_input: _PforInput):
# Note that we don't stack all the inputs. Hence unstacked values are printed
# once here vs multiple times in a while_loop.
pfor_input.stack_inputs([0])
outputs = _create_op(
"Print", [x.t for x in pfor_input.inputs],
[x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr).outputs
return [wrap(x, True) for x in outputs]
@RegisterPFor("PrintV2")
def _convert_print_v2(pfor_input: _PforInput):
# Print the full input Tensor(s), including the batch dimension if stacked.
return _create_op(
"PrintV2", [x.t for x in pfor_input.inputs],
[x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr)
@RegisterPFor("StringFormat")
def _convert_string_format(pfor_input: _PforInput):
# Format using the full input Tensor(s), including the batch dimension if
# stacked.
op = _create_op(
"StringFormat", [x.t for x in pfor_input.inputs],
[x.dtype for x in pfor_input.outputs],
attrs=pfor_input.op.node_def.attr)
return [wrap(output, False) for output in op.outputs]
# data_flow_ops
# TensorArray conversion is tricky since we don't support arrays of
# TensorArrays. For converting them, we consider two distinct cases:
#
# 1. The array is constructed outside the pfor call, and read/written inside the
# loop.
# This is an easier case since we don't need to make an array of TensorArrays.
# A correctness requirement is that these parallel iterations shouldn't attempt
# to write to the same location. Hence at conversion time we disallow indices to
# be loop-invariant as that would guarantee a collision. Even if the indices are
# not loop-invariant, they could conflict and that shall trigger runtime errors.
#
# 2. The array is constructed and used entirely inside each pfor iteration.
# For simplicity, here we require that the indices used for write/scatter are
# "unstacked". Otherwise it becomes hard to merge the TensorArrays created in
# different pfor iterations. We consider two sub_cases:
#
# 2a Elements written to the array are "stacked"
# To simulate multiple TensorArrays, we may increase the dimension of each
# element of the array. i.e. the i_th row of the j_th entry of the converted
# TensorArray corresponds to the j_th entry of the TensorArray in the i_th
# pfor iteration.
#
# 2b Elements written to the array are "unstacked"
# In this case we don't increase the dimensions to avoid redundant tiling. Each
# iteration is trying to write the same value. So we convert that to a single
# write.
#
# Here are some tricks used to implement the above:
# - TensorArrayV3 constructor encodes the element shape as an attr. Instead of
# trying to trace whether future writes are stacked or unstacked in order to set
# this attr, we set it to correspond to unknown shape.
# - We use the "flow" output of the different ops to track whether the array
# elements are stacked or unstacked. If a stacked write/scatter is done, we make
# the flow stacked as well.
# - We use some heuristic traversal of the graph to track whether the
# TensorArray handle was created inside or outside the pfor loop.
@RegisterPFor("TensorArrayV3")
def _convert_tensor_array_v3(pfor_input: _PforInput):
size = pfor_input.unstacked_input(0)
dtype = pfor_input.get_attr("dtype")
dynamic_size = pfor_input.get_attr("dynamic_size")
clear_after_read = pfor_input.get_attr("clear_after_read")
identical_element_shapes = pfor_input.get_attr("identical_element_shapes")
tensor_array_name = pfor_input.get_attr("tensor_array_name")
handle, flow = data_flow_ops.tensor_array_v3(
size,
dtype=dtype,
# We don't set element shape since we don't know if writes are stacked or
# not yet.
element_shape=None,
dynamic_size=dynamic_size,
clear_after_read=clear_after_read,
identical_element_shapes=identical_element_shapes,
tensor_array_name=tensor_array_name)
# Note we keep flow unstacked for now since we don't know if writes will be
# stacked or not.
return wrap(handle, False), wrap(flow, False)
@RegisterPFor("TensorArraySizeV3")
def _convert_tensor_array_size_v3(pfor_input: _PforInput):
handle = pfor_input.unstacked_input(0)
flow, flow_stacked, _ = pfor_input.input(1)
if flow_stacked:
flow = _unstack_flow(flow)
size = data_flow_ops.tensor_array_size_v3(handle, flow)
return wrap(size, False)
def _handle_inside_pfor(pfor_input: _PforInput, handle):
"""Returns True if handle was created inside the pfor loop."""
# We use some heuristic to find the original TensorArray creation op.
# The logic should handle the common cases (except cond based subgraphs).
# In theory the user could perform different operations on the handle (like
# Reshape, stack multiple handles, etc) which could break this logic.
# TODO(agarwal): handle Switch/Merge.
while handle.op.type in ("Enter", "Identity"):
handle = handle.op.inputs[0]
if handle.op.type not in [
"TensorArrayV3", "TensorArrayGradV3", "TensorArrayGradWithShape"
]:
raise ValueError(f"Unable to find source for handle {handle}.")
else:
return pfor_input.pfor.op_is_inside_loop(handle.op)
def _unstack_flow(value):
# TODO(agarwal): consider looking if this is a Tile op then get its input.
# This may avoid running the Tile operations.
return array_ops.gather(value, 0)
@RegisterPFor("TensorArrayReadV3")
def _convert_tensor_array_read_v3(pfor_input: _PforInput):
handle = pfor_input.unstacked_input(0)
index, index_stacked, _ = pfor_input.input(1)
dtype = pfor_input.get_attr("dtype")
flow, flow_stacked, _ = pfor_input.input(2)
if flow_stacked:
flow = _unstack_flow(flow)
is_inside_pfor = _handle_inside_pfor(pfor_input, pfor_input.op.inputs[0])
if is_inside_pfor:
# Note that if we are inside a control flow construct inside the pfor, and
# only some of the iterations are doing the read (i.e.
# `all_indices_partitioned` is True), then the read operation should only
# return values for the currently active pfor iterations (`all_indices`
# below). Hence, whenever the returned value is stacked (i.e. `flow` is
# stacked), we may need to do an extra gather after reading the values. Also
# note that if `is_inside` is false, then values in the tensor array are
# unstacked. So the check is only needed in this branch.
all_indices = pfor_input.pfor.all_indices
all_indices_partitioned = pfor_input.pfor.all_indices_partitioned
# Note: flow_stacked indicates if values in the TensorArray are stacked or
# not.
if index_stacked:
if flow_stacked:
raise ValueError(
"It looks like TensorArrayReadV3 was called on a TensorArray whose"
" values are not loop-invariant, and the read indices were also"
" not loop invariant. This is currently unsupported.")
value = data_flow_ops.tensor_array_gather_v3(
handle, index, flow, dtype=dtype)
return wrap(value, True)
value = data_flow_ops.tensor_array_read_v3(handle, index, flow, dtype=dtype)
if flow_stacked and all_indices_partitioned:
value = array_ops.gather(value, all_indices)
return wrap(value, flow_stacked)
# Values in the TensorArray should be unstacked (since different iterations
# couldn't write to the same location). So whether output is stacked or not
# depends on index_stacked.
if index_stacked:
value = data_flow_ops.tensor_array_gather_v3(
handle, index, flow, dtype=dtype)
else:
value = data_flow_ops.tensor_array_read_v3(handle, index, flow, dtype=dtype)
return wrap(value, index_stacked)
@RegisterPFor("TensorArrayWriteV3")
def _convert_tensor_array_write_v3(pfor_input: _PforInput):
handle = pfor_input.unstacked_input(0)
index, index_stacked, _ = pfor_input.input(1)
value, value_stacked, _ = pfor_input.input(2)
flow, flow_stacked, _ = pfor_input.input(3)
if value_stacked and pfor_input.pfor.all_indices_partitioned:
# Looks like we are in a control flow in a pfor where not all iterations are
# active now. We don't allow that since that could lead to different indices
# having different shapes which will be hard to merge later.
raise ValueError("Writing non loop invariant values to TensorArray from "
"inside a while_loop/cond not supported.")
if flow_stacked:
flow = _unstack_flow(flow)
is_inside = _handle_inside_pfor(pfor_input, pfor_input.op.inputs[0])
if is_inside:
if index_stacked:
raise ValueError(f"Need indices for {handle} to be loop invariant.")
if not flow_stacked and not value_stacked:
flow_out = data_flow_ops.tensor_array_write_v3(handle, index, value, flow)
return wrap(flow_out, False)
else:
if not value_stacked:
value = _stack(value, pfor_input.pfor.loop_len_vector).t
# TODO(agarwal): Note that if flow is unstacked and value is stacked, then
# this may or may not be a safe situation. flow is unstacked both for a
# freshly created TensorArray, as well as after unstacked values are
# written to it. If it is the latter, then we cannot write a stacked value
# now since that may cause runtime errors due to different shapes in the
# array. At the moment we are not able to handle this gracefully and
# distinguish between the two cases. That would require some heuristic
# traversal of the graph to figure out whether all the writes are
# unstacked or not.
flow_out = data_flow_ops.tensor_array_write_v3(handle, index, value, flow)
return _stack(flow_out, pfor_input.pfor.loop_len_vector)
else:
if not index_stacked:
raise ValueError(f"Need indices for {handle} to be not loop invariant.")
# Note that even when index_stacked is true, actual values in index may
# still not be unique. However that will cause runtime error when executing
# the scatter operation below.
if not value_stacked:
value = _stack(value, pfor_input.pfor.loop_len_vector).t
flow_out = data_flow_ops.tensor_array_scatter_v3(handle, index, value, flow)
return _stack(flow_out, pfor_input.pfor.loop_len_vector)
def _transpose_first_two_dims(value):
# TODO(agarwal): optimize if one of the dims == 1.
value_shape = array_ops.shape(value)
v0 = value_shape[0]
v1 = value_shape[1]
value = array_ops.reshape(value, [v0, v1, -1])
value = array_ops.transpose(value, [1, 0, 2])
new_shape = array_ops.concat([[v1, v0], value_shape[2:]], axis=0)
return array_ops.reshape(value, new_shape)
@RegisterPFor("TensorArrayGatherV3")
def _convert_tensor_array_gather_v3(pfor_input: _PforInput):
handle = pfor_input.unstacked_input(0)
indices, indices_stacked, _ = pfor_input.input(1)
indices = array_ops.reshape(indices, [-1])
flow, flow_stacked, _ = pfor_input.input(2)
if flow_stacked:
flow = _unstack_flow(flow)
dtype = pfor_input.get_attr("dtype")
# TODO(agarwal): support element_shape attr?
n = pfor_input.pfor.loop_len_vector
value = data_flow_ops.tensor_array_gather_v3(
handle, indices, flow, dtype=dtype)
is_inside = _handle_inside_pfor(pfor_input, pfor_input.op.inputs[0])
if is_inside:
# flow_stacked indicates if values in the TensorArray are stacked or not.
if indices_stacked:
if flow_stacked:
raise ValueError(
"It looks like TensorArrayGatherV3 was called on a TensorArray "
"whose values are not loop-invariant, and the indices were also "
"not loop invariant. This is currently unsupported.")
else:
value = _unflatten_first_dim(value, n)
return wrap(value, True)
else:
if flow_stacked:
# Since elements in this array are stacked and `value` was produced by
# gather, its first two dims are "gathered elements" and "stack
# dimension". Our semantics require these two to be flipped.
value = _transpose_first_two_dims(value)
return wrap(value, flow_stacked)
else:
# Values in the TensorArray should be unstacked (since different iterations
# couldn't write to the same location). So whether output is stacked or not
# depends on indices_stacked.
if indices_stacked:
value = _unflatten_first_dim(value, n)
return wrap(value, indices_stacked)
@RegisterPFor("TensorArrayScatterV3")
def _convert_tensor_array_scatter_v3(pfor_input: _PforInput):
handle = pfor_input.unstacked_input(0)
indices, indices_stacked, _ = pfor_input.input(1)
indices = array_ops.reshape(indices, [-1])
value, value_stacked, _ = pfor_input.input(2)
flow, flow_stacked, _ = pfor_input.input(3)
if flow_stacked:
flow = _unstack_flow(flow)
is_inside = _handle_inside_pfor(pfor_input, pfor_input.op.inputs[0])
if is_inside:
if indices_stacked:
raise ValueError(f"Need indices for {handle} to be loop invariant.")
# Note that flow_stacked indicates if existing values in the array are
# stacked or not.
if not flow_stacked and not value_stacked:
flow_out = data_flow_ops.tensor_array_scatter_v3(handle, indices, value,
flow)
return wrap(flow_out, False)
if not value_stacked:
# TODO(agarwal): tile in the second dimension directly instead of
# transposing below.
value = _stack(value, pfor_input.pfor.loop_len_vector).t
value = _transpose_first_two_dims(value)
# TODO(agarwal): Note that if a previous write was unstacked, flow will be
# unstacked, and a stacked value may be written here which may cause
# runtime error due to different elements having different shape. We do
# not try to prevent that.
flow_out = data_flow_ops.tensor_array_scatter_v3(handle, indices, value,
flow)
return _stack(flow_out, pfor_input.pfor.loop_len_vector)
if not indices_stacked:
raise ValueError(f"Need indices for {handle} to be not loop invariant.")
if not value_stacked:
value = _stack(value, pfor_input.pfor.loop_len_vector).t
value = _flatten_first_two_dims(value)
flow_out = data_flow_ops.tensor_array_scatter_v3(handle, indices, value, flow)
return _stack(flow_out, pfor_input.pfor.loop_len_vector)
@RegisterPFor("TensorArrayGradV3")
def _convert_tensor_array_grad_v3(pfor_input: _PforInput):
handle = pfor_input.unstacked_input(0)
flow, flow_stacked, _ = pfor_input.input(1)
if flow_stacked:
flow = _unstack_flow(flow)
source = pfor_input.get_attr("source")
# TODO(agarwal): For now, we assume that gradients are stacked if the
# TensorArrayGradV3 call is being done inside the pfor. Getting that wrong
# will give runtime error due to incorrect shape being written to the
# accumulator. It is difficult to know in advance if gradients written will be
# stacked or not. Note that flow being stacked is not indicative of the
# gradient being stacked or not. Revisit this later.
shape_to_prepend = pfor_input.pfor.loop_len_vector
grad_handle, flow_out = data_flow_ops.tensor_array_grad_with_shape(
handle=handle,
flow_in=flow,
shape_to_prepend=shape_to_prepend,
source=source)
flow_out = _stack(flow_out, pfor_input.pfor.loop_len_vector).t
return [wrap(grad_handle, False), wrap(flow_out, True)]
def _stack_tensor_list_shape(shape, first_dim):
shape_value = tensor_util.constant_value(shape)
# Note that negative values in the shape are used to signify unknown shapes
# and are handled in a special way.
if shape_value is not None:
shape_value = numpy_compat.np_asarray(shape_value)
if -1 in shape_value:
return constant_op.constant(-1)
elif not shape_value.size:
return first_dim
else:
shape = array_ops.reshape(shape, [-1])
return tf_cond.cond(
math_ops.reduce_any(shape < 0),
lambda: constant_op.constant(-1),
lambda: array_ops.concat([first_dim, shape], axis=0))
def _tile_variant_with_length(t, length):
"""stacks `t` `length` times."""
if _is_variant_with_internal_stacking(t):
# The content of TensorLists is vectorized, not the variant itself.
return t
original_tensor = t
t.set_shape([])
t = array_ops.reshape(t, [-1])
with ops.device("CPU:0"):
result = array_ops.tile(t, length)
# TODO(b/169968286): Should regular shape functions do handle data
# propagation here?
handle_data_util.copy_handle_data(original_tensor, result)
return result
def _tile_variant(t, pfor_input: _PforInput):
"""stacks `t` according to its loop context."""
return _tile_variant_with_length(t, pfor_input.pfor.loop_len_vector)
def _untile_variant(t):
if _is_variant_with_internal_stacking(t):
# The content of TensorLists is vectorized, not the variant itself.
if not t.shape.is_compatible_with([]):
raise AssertionError(
("Unexpectedly saw a vectorized variant (e.g. TensorList) with "
f"non-scalar shape: {t!r}"))
return t
return array_ops.gather(t, 0)
@RegisterPFor("OptionalFromValue")
def _convert_optional_from_value(pfor_input: _PforInput):
pfor_input.stack_inputs()
return wrap(
gen_optional_ops.optional_from_value([x.t for x in pfor_input.inputs]),
True,
)
@RegisterPFor("OptionalGetValue")
def _convert_optional_get_value(pfor_input: _PforInput):
handle = pfor_input.stacked_input(0)
output_types = pfor_input.get_attr("output_types")
original_output_shapes = pfor_input.get_attr("output_shapes")
output_shapes = []
for shape in original_output_shapes:
shape = tensor_shape.TensorShape(shape)
loop_len_value = tensor_util.constant_value(pfor_input.pfor.loop_len_vector)
loop_len_shape = tensor_shape.TensorShape(
[loop_len_value[0] if loop_len_value is not None else None]
)
shape = loop_len_shape.concatenate(shape)
output_shapes.append(shape.as_proto())
results = gen_optional_ops.optional_get_value(
handle, output_types, output_shapes
)
return [wrap(t, True) for t in results]
@RegisterPFor("TensorListReserve")
def _convert_tensor_list_reserve(pfor_input: _PforInput):
element_shape = pfor_input.unstacked_input(0)
num_elements = pfor_input.unstacked_input(1)
element_dtype = pfor_input.get_attr("element_dtype")
# Prepend a dimension to element_shape.
element_shape = _stack_tensor_list_shape(element_shape,
pfor_input.pfor.loop_len_vector)
handle = list_ops.tensor_list_reserve(
element_shape, num_elements, element_dtype=element_dtype)
return wrap(_tile_variant(handle, pfor_input), True)
@RegisterPFor("TensorListElementShape")
def _convert_tensor_list_element_shape(pfor_input: _PforInput):
handle = _untile_variant(pfor_input.stacked_input(0))
shape_type = pfor_input.get_attr("shape_type")
shape = list_ops.tensor_list_element_shape(handle, shape_type)
shape = array_ops.reshape(shape, [-1])
shape = shape[1:]
return wrap(shape, False)
@RegisterPFor("TensorListLength")
def _convert_tensor_list_length(pfor_input: _PforInput):
handle = _untile_variant(pfor_input.stacked_input(0))
return wrap(list_ops.tensor_list_length(handle), False)
def _stack_tensor_list(handle, dtype, loop_len_vector, element_shape=None):
if element_shape is None:
element_shape = list_ops.tensor_list_element_shape(handle, dtypes.int32)
length = list_ops.tensor_list_length(handle)
new_handle = list_ops.tensor_list_reserve(
_stack_tensor_list_shape(element_shape, loop_len_vector), length, dtype)
def _body_fn(i, h):
elem = list_ops.tensor_list_get_item(handle, i, dtype, element_shape)
elem = _stack(elem, loop_len_vector).t
return i + 1, list_ops.tensor_list_set_item(h, i, elem)
return while_loop.while_loop(lambda i, _: i < length, _body_fn,
[0, new_handle])[1]
@RegisterPFor("TensorListGetItem")
def _convert_tensor_list_get_item(pfor_input: _PforInput):
handle, handle_stacked, _ = pfor_input.input(0)
index, index_stacked, _ = pfor_input.input(1)
element_shape = pfor_input.unstacked_input(2)
element_dtype = pfor_input.get_attr("element_dtype")
if handle_stacked:
handle = _untile_variant(handle)
element_shape = _stack_tensor_list_shape(element_shape,
pfor_input.pfor.loop_len_vector)
if index_stacked:
# We use a sequential loop since that may be more efficient than first
# gathering and concatenating all the element corresponding to `index`,
# and then doing a gather on it.
def _map_fn(i):
item_i = list_ops.tensor_list_get_item(
handle,
index[i],
element_dtype=element_dtype)
return array_ops.gather(item_i, i)
output = map_fn.map_fn(_map_fn, pfor_input.pfor.all_indices)
return wrap(output, True)
else:
output = list_ops.tensor_list_get_item(
handle,
index,
element_shape=element_shape,
element_dtype=element_dtype)
return wrap(output, True)
else:
assert index_stacked
return wrap(
list_ops.tensor_list_gather(
handle,
index,
element_shape=element_shape,
element_dtype=element_dtype), True)
@RegisterPFor("TensorListSetItem")
def _convert_tensor_array_set_item(pfor_input: _PforInput):
handle, handle_stacked, _ = pfor_input.input(0)
index, index_stacked, _ = pfor_input.input(1)
item, item_stacked, _ = pfor_input.input(2)
if not handle_stacked:
# Special case where we can statically guarantee that the indices are
# disjoint.
if index is pfor_input.pfor.all_indices:
if not item_stacked:
item = _stack(item, pfor_input.pfor.loop_len_vector).t
return wrap(
list_ops.tensor_list_scatter(item, index, input_handle=handle), False)
else:
handle = _stack_tensor_list(handle, item.dtype,
pfor_input.pfor.loop_len_vector)
else:
handle = _untile_variant(handle)
if index_stacked:
# TODO(agarwal): handle this.
raise ValueError("Vectorizing writes to a TensorList with loop "
"variant indices is currently unsupported.")
else:
if not item_stacked:
item = _stack(item, pfor_input.pfor.loop_len_vector).t
handle = list_ops.tensor_list_set_item(handle, index, item)
return wrap(_tile_variant(handle, pfor_input), True)
@RegisterPFor("TensorListPushBack")
def _convert_tensor_list_push_back(pfor_input: _PforInput):
handle, handle_stacked, _ = pfor_input.input(0)
tensor, tensor_stacked, _ = pfor_input.input(1)
if handle_stacked:
handle = _untile_variant(handle)
else:
handle = _stack_tensor_list(handle, tensor.dtype,
pfor_input.pfor.loop_len_vector)
if not tensor_stacked:
tensor = _stack(tensor, pfor_input.pfor.loop_len_vector).t
handle = list_ops.tensor_list_push_back(handle, tensor)
return wrap(_tile_variant(handle, pfor_input), True)
@RegisterPFor("TensorListPopBack")
def _convert_tensor_array_push_back(pfor_input: _PforInput):
handle = pfor_input.stacked_input(0)
element_shape = pfor_input.unstacked_input(1)
handle = _untile_variant(handle)
if element_shape.shape.ndims == 0:
# Default / unspecified
vectorized_shape = -1
else:
# PopBack has an element shape set when it's the gradient of PushBack, only
# used when the list is uninitialized.
n = math_ops.cast(pfor_input.pfor.loop_len_vector, element_shape.dtype)
vectorized_shape = array_ops.concat([n, element_shape], axis=0)
output_handle, tensor = gen_list_ops.tensor_list_pop_back(
input_handle=handle, element_dtype=pfor_input.get_attr("element_dtype"),
element_shape=vectorized_shape)
return wrap(output_handle, True), wrap(tensor, True)
@RegisterPFor("TensorListConcatV2")
def _convert_tensor_list_concat_v2(pfor_input: _PforInput):
input_handle = pfor_input.stacked_input(0)
element_shape = pfor_input.unstacked_input(1)
leading_dims = pfor_input.unstacked_input(2)
element_dtype = pfor_input.get_attr("element_dtype")
handle = _untile_variant(input_handle)
length = list_ops.tensor_list_length(handle)
# Note that element_shape attribute can have incomplete shapes. This doesn't
# seem to work well when creating another list and then doing a concat on it.
# Hence we try to find the dynamic shape here.
element_shape = tf_cond.cond(
length > 0, lambda: array_ops.shape(
list_ops.tensor_list_get_item(handle, 0, element_dtype, None)),
lambda: constant_op.constant([0, 0], dtype=dtypes.int32))
# The code below creates a copy of the list with each elements' first two
# dimensions transposed.
new_element_shape = array_ops.concat(
[element_shape[1:2], element_shape[0:1], element_shape[2:]], axis=0)
# Create a new TensorList with elements transposed.
def _transpose_elem(i, h):
elem = list_ops.tensor_list_get_item(handle, i, element_dtype, None)
elem = _transpose_first_two_dims(elem)
return i + 1, list_ops.tensor_list_set_item(h, i, elem)
new_handle = list_ops.tensor_list_reserve(new_element_shape, length,
element_dtype)
new_handle = while_loop.while_loop(lambda i, _: i < length, _transpose_elem,
[0, new_handle])[1]
output, lengths = gen_list_ops.tensor_list_concat_v2(
input_handle=new_handle,
element_dtype=element_dtype,
element_shape=new_element_shape,
leading_dims=leading_dims)
output = _transpose_first_two_dims(output)
return wrap(output, True), wrap(lengths, False)
@RegisterPFor("TensorListStack")
def _convert_tensor_list_stack(pfor_input: _PforInput):
handle = pfor_input.stacked_input(0)
input_shape = pfor_input.unstacked_input(1)
element_dtype = pfor_input.get_attr("element_dtype")
num_elements = pfor_input.get_attr("num_elements")
handle = _untile_variant(handle)
input_shape = _stack_tensor_list_shape(input_shape,
pfor_input.pfor.loop_len_vector)
output = list_ops.tensor_list_stack(
handle,
element_dtype,
element_shape=input_shape,
num_elements=num_elements)
output = _transpose_first_two_dims(output)
return wrap(output, True)
@RegisterPFor("TensorListGather")
def _convert_tensor_list_gather(pfor_input: _PforInput):
handle, handle_stacked, _ = pfor_input.input(0)
index, index_stacked, _ = pfor_input.input(1)
element_shape = pfor_input.unstacked_input(2)
element_dtype = pfor_input.get_attr("element_dtype")
if handle_stacked:
handle = _untile_variant(handle)
element_shape = _stack_tensor_list_shape(element_shape,
pfor_input.pfor.loop_len_vector)
if index_stacked:
# We use a sequential loop since that may be more efficient than first
# gathering and concatenating all the element corresponding to `index`,
# and then doing a gather on it.
def _map_fn(i):
item_i = list_ops.tensor_list_gather(
handle,
index[i],
element_dtype=element_dtype)
axis = array_ops.rank(index) - 1
return array_ops.gather(item_i, i, axis=axis)
output = map_fn.map_fn(_map_fn, pfor_input.pfor.all_indices)
return wrap(output, True)
else:
output = list_ops.tensor_list_gather(
handle,
index,
element_shape=element_shape,
element_dtype=element_dtype)
return wrap(output, True)
else:
assert index_stacked
index_shape = array_ops.shape(index)
index = array_ops.reshape(index, [-1])
values = list_ops.tensor_list_gather(
handle, index, element_shape=element_shape, element_dtype=element_dtype)
final_shape = array_ops.concat(
[index_shape, array_ops.shape(values)[1:]], axis=0)
return wrap(array_ops.reshape(values, final_shape), True)
@RegisterPFor("TensorListScatterIntoExistingList")
def _convert_tensor_list_scatter(pfor_input: _PforInput):
pfor_input.stack_inputs([1])
handle, handle_stacked, _ = pfor_input.input(0)
item = pfor_input.stacked_input(1)
indices, indices_stacked, _ = pfor_input.input(2)
if handle_stacked:
handle = _untile_variant(handle)
else:
handle = _stack_tensor_list(handle, item.dtype,
pfor_input.pfor.loop_len_vector)
item = _transpose_first_two_dims(item)
if indices_stacked:
# Pretend the list is a dense tensor:
# list_as_dense: Tensor[list_len, loop_len, ...]
# And indices are a tensor with shape (before transpose):
# indices: Tensor[loop_len, num_scatters]
# The item to scatter has shape (before transpose):
# item: Tensor[loop_len, num_scatters, ...]
#
# We want list_as_dense[indices[i, j], i] = item[i, j]
#
# Since we're not just indexing along the first axis of `list_as_dense`, we
# need to first extract the relevant list entries based on `indices`,
# scatter into them according to the loop index, and re-scatter the chunks
# we updated back into the list.
indices = _transpose_first_two_dims(indices)
indices_flat = array_ops.reshape(indices, [-1])
# In many cases `indices` will be unique across pfor iterations, but this is
# not guaranteed. If there are duplicates, we need to map multiple updates
# to a single chunk extracted from the list. The last update should win.
unique_indices = array_ops.unique(indices_flat)
gathered_items = list_ops.tensor_list_gather(
handle, unique_indices.y, element_dtype=item.dtype,
element_shape=array_ops.shape(item)[1:])
loop_idx = math_ops.range(pfor_input.pfor.loop_len_vector[0])
scatters_per_op = array_ops.shape(indices)[0]
unique_indices_loop_idx = array_ops.reshape(array_ops.tile(
loop_idx[None, :], [scatters_per_op, 1]), [-1])
scatter_indices = array_ops_stack.stack(
[unique_indices.idx, unique_indices_loop_idx],
axis=1)
# This op does *not* guarantee last-update-wins on GPU, so semantics may not
# be exactly preserved for duplicate updates there.
scattered = array_ops.tensor_scatter_nd_update(
tensor=gathered_items,
indices=scatter_indices,
updates=_flatten_first_two_dims(item))
handle = list_ops.tensor_list_scatter(
scattered, unique_indices.y, input_handle=handle)
else:
handle = list_ops.tensor_list_scatter(item, indices, input_handle=handle)
return wrap(_tile_variant(handle, pfor_input), True)
@RegisterPFor("TensorListFromTensor")
def _convert_tensor_list_from_tensor(pfor_input: _PforInput):
tensor = pfor_input.stacked_input(0)
element_shape = pfor_input.unstacked_input(1)
tensor = _transpose_first_two_dims(tensor)
element_shape = _stack_tensor_list_shape(element_shape,
pfor_input.pfor.loop_len_vector)
handle = list_ops.tensor_list_from_tensor(tensor, element_shape)
return wrap(_tile_variant(handle, pfor_input), True)
@RegisterPFor("TensorScatterUpdate")
def _convert_tensor_scatter_update(pfor_input: _PforInput):
pfor_input.stack_inputs([0, 1, 2])
tensor = pfor_input.stacked_input(0)
indices = pfor_input.stacked_input(1)
updates = pfor_input.stacked_input(2)
indices_shape = array_ops.shape(indices)
indices_rank = array_ops.rank(indices)
loop_length = indices_shape[0]
# Create a loop count range and extend its dimensions to match `indices`.
loop_count_shape = array_ops.tensor_scatter_nd_update(
array_ops.ones([indices_rank], dtype=dtypes.int32), [[0]], [loop_length])
loop_count = array_ops.reshape(math_ops.range(loop_length), loop_count_shape)
# Tile the loop count range for the batch dimensions (all except the first and
# last dimensions of indices).
# Rank(indices) >= 3 always for this function so we always have at least 1.
tile_multiplier = array_ops.tensor_scatter_nd_update(
indices_shape, [[0], [indices_rank - 1]], [1, 1])
meta_index = array_ops.tile(loop_count, tile_multiplier)
# Insert the loop-identifying index.
indices = array_ops.concat([meta_index, indices], axis=-1)
result = array_ops.tensor_scatter_nd_update(tensor, indices, updates)
return wrap(result, True)
# StackV2 conversion is tricky since we don't have arrays of StackV2. So similar
# to TensorArrays, we convert them by changing the dimension of the elements
# inside the stack.
#
# We consider two cases:
#
# 1. StackV2 is constructed and used entirely inside the pfor loop.
# We keep a single Stack and perform the push/pop operations of all the
# iterations in lock-step. We also assume that all the iterations perform these
# operations. In case of dynamic control flow, if only some of the iterations
# try to perform a push/pop, then the conversion may not work correctly and may
# cause undefined behavior.
# TODO(agarwal): test StackV2 with dynamic control flow.
#
# 2. StackV2 is constructed outside the pfor loop.
# Performing stack push/pop in a parallel fashion is ill-defined. However given
# that reading stacks created externally is a common operation when computing
# jacobians, we provide some special semantics here as follows.
# - disallow push operations to the stack
# - pop operations are performed in lock step by all iterations, similar to the
# case when the stack is created inside. A single value is popped during the
# lock-step operation and broadcast to all the iterations. Values in the stack
# are assumed to be loop-invariant.
#
# Some other implementation details:
# We use an ugly logic to find whether values in Stack data structure are
# loop invariant or not. When converting push/pop operations, we keep track of
# whether the last conversion used a stacked value or not (see _stack_cache
# below). As a result if an unstacked value is written first, subsequent stacked
# writes are disallowed when they could have been allowed in theory.
# Map from cache key based on StackV2 handle to a bool indicating whether values
# are stacked or not.
# TODO(agarwal): move _stack_cache inside pfor?
_stack_cache = {}
def _stack_cache_key(pfor_input: _PforInput):
"""Create cache key corresponding to a stack handle."""
op_type = pfor_input.op_type
assert op_type in ["StackPushV2", "StackPopV2"], op_type
orig_handle = pfor_input.op.inputs[0]
while orig_handle.op.type in ["Identity", "Enter"]:
orig_handle = orig_handle.op.inputs[0]
assert orig_handle.op.type == "StackV2", orig_handle.op
return ops.get_default_graph(), pfor_input.pfor, orig_handle
def _stack_handle_inside_pfor(handle, pfor_input: _PforInput):
while handle.op.type in ["Identity", "Enter"]:
handle = handle.op.inputs[0]
assert handle.op.type == "StackV2", ("Unable to find StackV2 op. Got %s" %
handle.op)
return pfor_input.pfor.op_is_inside_loop(handle.op)
@RegisterPFor("StackPushV2")
def _convert_stack_push_v2(pfor_input: _PforInput):
handle = pfor_input.unstacked_input(0)
elem, elem_stacked, _ = pfor_input.input(1)
swap_memory = pfor_input.get_attr("swap_memory")
if not _stack_handle_inside_pfor(pfor_input.op.inputs[0], pfor_input):
raise ValueError("StackPushV2 not allowed on stacks created outside pfor.")
stack_cache_key = _stack_cache_key(pfor_input)
stacked = _stack_cache.get(stack_cache_key, None)
if stacked is None:
stacked = elem_stacked
_stack_cache[stack_cache_key] = stacked
else:
# If we previously made it unstacked then we can't revert to being stacked.
if not stacked and elem_stacked:
raise ValueError(
"It looks like the stack was previously determined to be loop "
"invariant, but we are now trying to push a loop dependent value "
"to it. This is currently unsupported.")
if stacked and not elem_stacked:
elem = _stack(elem, pfor_input.pfor.loop_len_vector).t
out = data_flow_ops.stack_push_v2(handle, elem, swap_memory=swap_memory)
return wrap(out, stacked)
# Note that inputs to this convertor will be unstacked. However it should get
# called since it is a stateful op.
@RegisterPFor("StackPopV2")
def _convert_stack_pop_v2(pfor_input: _PforInput):
handle = pfor_input.unstacked_input(0)
stack_cache_key = _stack_cache_key(pfor_input)
stacked = _stack_cache.get(stack_cache_key, None)
# If a StackPushV2 has not been converted yet, we default to unstacked since
# the push could be outside of pfor, or the convertor may not be called if the
# inputs are unconverted.
if stacked is None:
stacked = False
_stack_cache[stack_cache_key] = False
elem_type = pfor_input.get_attr("elem_type")
out = data_flow_ops.stack_pop_v2(handle, elem_type)
return wrap(out, stacked)
# parsing_ops
@RegisterPFor("DecodeCSV")
def _convert_decode_csv(pfor_input: _PforInput):
lines = pfor_input.stacked_input(0)
record_defaults = [
pfor_input.unstacked_input(i) for i in range(1, pfor_input.num_inputs)
]
field_delim = pfor_input.get_attr("field_delim")
use_quote_delim = pfor_input.get_attr("use_quote_delim")
select_cols = pfor_input.get_attr("select_cols")
if not select_cols:
select_cols = None
return [
wrap(t, True) for t in gen_parsing_ops.decode_csv(
lines,
record_defaults,
field_delim=field_delim,
use_quote_delim=use_quote_delim,
select_cols=select_cols)
]
@RegisterPFor("ParseSingleExample")
def _convert_parse_single_example(pfor_input: _PforInput):
serialized = pfor_input.stacked_input(0)
dense_defaults = [
pfor_input.unstacked_input(i) for i in range(1, pfor_input.num_inputs)
]
sparse_keys = pfor_input.get_attr("sparse_keys")
dense_keys = pfor_input.get_attr("dense_keys")
sparse_types = pfor_input.get_attr("sparse_types")
dense_shapes = pfor_input.get_attr("dense_shapes")
output = gen_parsing_ops.parse_example(
serialized=serialized,
names=[],
dense_defaults=dense_defaults,
sparse_keys=sparse_keys,
dense_keys=dense_keys,
sparse_types=sparse_types,
dense_shapes=dense_shapes)
return [wrap(t, True, True) for t in nest.flatten(output)]
@RegisterPFor("ParseExampleV2")
def _convert_parse_example_v2(pfor_input: _PforInput):
serialized = pfor_input.stacked_input(0)
sparse_keys = pfor_input.unstacked_input(2)
dense_keys = pfor_input.unstacked_input(3)
ragged_keys = pfor_input.unstacked_input(4)
dense_defaults = [
pfor_input.unstacked_input(i) for i in range(5, pfor_input.num_inputs)
]
num_sparse = pfor_input.get_attr("num_sparse")
sparse_types = pfor_input.get_attr("sparse_types")
ragged_value_types = pfor_input.get_attr("ragged_value_types")
ragged_split_types = pfor_input.get_attr("ragged_split_types")
dense_shapes = pfor_input.get_attr("dense_shapes")
if serialized.shape.ndims not in (None, 1):
raise ValueError("ParseExampleV2 can only be converted if `serialized` "
f"is scalar. Received shape: {serialized.shape}.")
output = gen_parsing_ops.parse_example_v2(
serialized=serialized,
names=[],
sparse_keys=sparse_keys,
dense_keys=dense_keys,
ragged_keys=ragged_keys,
dense_defaults=dense_defaults,
num_sparse=num_sparse,
sparse_types=sparse_types,
ragged_value_types=ragged_value_types,
ragged_split_types=ragged_split_types,
dense_shapes=dense_shapes)
return [wrap(t, True, True) for t in nest.flatten(output)]
# functional_ops
def _convert_function_call(func, converter, inputs):
assert isinstance(func.graph, func_graph.FuncGraph), func
assert isinstance(converter, PFor)
graph_outputs = func.graph.outputs[:len(func.function_type.flat_outputs)]
# TODO(agarwal): consider caching this function definition.
@def_function.function
def f(*args):
assert all(isinstance(arg, WrappedTensor) for arg in args), args
assert len(args) == len(func.graph.inputs), (args, func.graph.inputs)
# Map inputs to function arguments.
for inp, arg in zip(func.graph.inputs, args):
converter._add_conversion(inp, arg)
# Convert output tensors.
return tuple([converter._convert_helper(x).t for x in graph_outputs])
call_outputs = f(*inputs)
assert len(call_outputs) == len(graph_outputs)
outputs = []
for call_output, output_tensor in zip(call_outputs, graph_outputs):
func_output = converter._convert_helper(output_tensor)
outputs.append(
wrap(call_output, func_output.is_stacked, func_output.is_sparse_stacked)
)
return outputs
@RegisterPFor("StatefulPartitionedCall")
@RegisterPFor("PartitionedCall")
def _convert_partitioned_call(pfor_input: _PforInput):
func_name = pfor_input.get_attr("f").name
func = pfor_input.op.graph._get_function(compat.as_bytes(func_name))
assert isinstance(func.graph, func_graph.FuncGraph), (
"Could not find FuncGraph object for %s. Got func %s" % (func_name, func))
pfor = pfor_input.pfor
converter = PFor(
loop_var=pfor.loop_var,
loop_len=pfor.loop_len_vector[0],
pfor_ops=func.graph.get_operations(),
fallback_to_while_loop=pfor.fallback_to_while_loop,
all_indices=pfor.all_indices,
all_indices_partitioned=pfor.all_indices_partitioned,
pfor_config=pfor.pfor_config)
return _convert_function_call(func, converter, pfor_input.inputs)
def _partition_inputs_for_indices(inputs, indices):
new_inputs = []
for inp in inputs:
if inp.is_stacked:
new_inputs.append(wrap(array_ops.gather(inp.t, indices), True))
else:
new_inputs.append(inp)
return new_inputs
def _outputs_for_branch(func_name, indices, pfor_input: _PforInput, inputs):
if indices is None:
indices = pfor_input.pfor.all_indices
partitioned = pfor_input.pfor.all_indices_partitioned
else:
partitioned = True
func = pfor_input.op.graph._get_function(func_name)
converter = PFor(
loop_var=pfor_input.pfor.loop_var,
loop_len=array_ops.size(indices),
pfor_ops=func.graph.get_operations(),
fallback_to_while_loop=pfor_input.pfor.fallback_to_while_loop,
all_indices=indices,
all_indices_partitioned=partitioned,
pfor_config=pfor_input.pfor.pfor_config)
outputs = _convert_function_call(func, converter, inputs)
stacked_outputs = []
for out in outputs:
if not out.is_stacked:
stacked_outputs.append(_stack(out.t, [array_ops.size(indices)]).t)
else:
stacked_outputs.append(out.t)
return stacked_outputs
# TODO(agarwal): Currently the converted code aggressively tiles loop variant
# outputs from the then/else branches. Instead, it could do so only if at least
# one of the branch outputs is loop variant.
@RegisterPFor("StatelessIf")
@RegisterPFor("If")
def _convert_if(pfor_input: _PforInput):
cond, cond_stacked, _ = pfor_input.input(0)
inputs = pfor_input.inputs[1:]
then_branch = pfor_input.get_attr("then_branch")
else_branch = pfor_input.get_attr("else_branch")
if cond_stacked:
cond_int = math_ops.cast(cond, dtypes.int32)
# Compute loop indices for the different branches
false_indices, true_indices = data_flow_ops.dynamic_partition(
pfor_input.pfor.all_indices, cond_int, 2)
# Compute indices for cond being True or False.
if pfor_input.pfor.all_indices_partitioned:
else_indices, then_indices = data_flow_ops.dynamic_partition(
math_ops.range(pfor_input.pfor.loop_len_vector[0]),
cond_int, 2)
else:
else_indices, then_indices = false_indices, true_indices
# Partition inputs
then_inputs = _partition_inputs_for_indices(inputs, then_indices)
else_inputs = _partition_inputs_for_indices(inputs, else_indices)
# Convert "then" branch.
then_outputs = _outputs_for_branch(then_branch.name, true_indices,
pfor_input, then_inputs)
# Convert "else" branch.
else_outputs = _outputs_for_branch(else_branch.name, false_indices,
pfor_input, else_inputs)
assert len(then_outputs) == len(else_outputs)
# Note that if the "then" and "else" branches are updating the same state,
# and possibly reading them as well, it could lead to undefined behavior
# since the ordering of those operations is not well defined.
# One possibility is to order all the "then" branches to execute before all
# the "else" branches so that the side-effects in the former are visible to
# the latter. For now, we leave that as undefined behavior.
outputs = []
# Merge outputs
for then_output, else_output in zip(then_outputs, else_outputs):
out = data_flow_ops.dynamic_stitch([then_indices, else_indices],
[then_output, else_output])
outputs.append(wrap(out, True))
return outputs
else:
outputs = tf_cond.cond(
cond,
lambda: _outputs_for_branch(then_branch.name, None, pfor_input, inputs),
lambda: _outputs_for_branch(else_branch.name, None, pfor_input, inputs))
return [wrap(t, True) for t in outputs]
@RegisterPFor("Case")
@RegisterPFor("StatelessCase")
def _convert_stateless_case(pfor_input: _PforInput):
branch_idx, is_stacked, _ = pfor_input.input(0)
branches = pfor_input.get_attr("branches")
inputs = pfor_input.inputs[1:]
if is_stacked:
logging.info("Running stacked flow")
# Compute loop indices for the different branches
switch_indices = data_flow_ops.dynamic_partition(
pfor_input.pfor.all_indices, branch_idx, len(branches))
if pfor_input.pfor.all_indices_partitioned:
partitioned_indices = data_flow_ops.dynamic_partition(
math_ops.range(pfor_input.pfor.loop_len_vector[0]), branch_idx,
len(branches))
else:
partitioned_indices = switch_indices
# Partition inputs
input_list = []
for indices in partitioned_indices:
input_list.append(_partition_inputs_for_indices(inputs, indices))
outputs = []
for (b, indices, inputs) in zip(branches, switch_indices, input_list):
out = _outputs_for_branch(b.name, indices, pfor_input, inputs)
outputs.extend(out)
out = data_flow_ops.dynamic_stitch(partitioned_indices, outputs)
return [wrap(out, True)]
else:
new_branches = []
for b in branches:
def new_function(func=b.name):
return _outputs_for_branch(func, None, pfor_input,
pfor_input.inputs[1:])
new_branches.append(new_function)
outputs = []
outputs = control_flow_switch_case.switch_case(branch_idx, new_branches)
return [wrap(t, True) for t in outputs]
| PFor |
python | great-expectations__great_expectations | contrib/capitalone_dataprofiler_expectations/capitalone_dataprofiler_expectations/expectations/expect_profile_numeric_columns_percent_diff_between_inclusive_threshold_range.py | {
"start": 7913,
"end": 15746
} | class ____(
ProfileNumericColumnsDiffExpectation
):
"""Expect a statistic's percent delta for a given column of a DataProfiler percent difference report to be within the specified threshold, inclusive.
This expectation takes the percent difference report between the data it is called on and a DataProfiler profile of the same schema loaded from a provided path.
Each numerical column percent delta will be checked against a user provided dictionary of columns paired with dictionaries of statistics containing lower and upper bounds.
This function builds upon the custom ProfileNumericColumnsDiff Expectation of Capital One's DataProfiler Expectations.
It is expected that a statistic's percent delta for a given column is within the specified threshold, inclusive.
Args:
profile_path (str): A path to a saved DataProfiler profile object on the local filesystem.
limit_check_report_keys (dict): A dict, containing column names as keys and dicts as values that contain statistics as keys and dicts as values containing two keys:
"lower" denoting the lower bound for the threshold range, and "upper" denoting the upper bound for the threshold range.
mostly (float - optional): a value indicating the lower bound percentage of successful values that must be present to evaluate to success=True.
validator.expect_profile_numeric_columns_percent_diff_between_inclusive_threshold_range(
profile_path = "C:/path_to/my_profile.pkl",
limit_check_report_keys = {
"column_one": {
"min": {"lower": 0.5, "upper": 1.5}, #Indicating 50% lower bound and 150% upper bound
},
"*": {
"*": {"lower": 0.0, "upper": 2.0}, #Indicating 0% lower bound and 200% upper bound
},
}
)
Note: In limit_check_report_keys, "*" in place of a column denotes a general operator in which the value it stores will be applied to every column in the data that has no explicit key.
"*" in place of a statistic denotes a general operator in which the bounds it stores will be applied to every statistic for the given column that has no explicit key.
"""
example_profile_data = [
[2, 5, "10", "ten", 25],
[4, 10, "20", "twenty", 50],
[6, 15, "30", "thirty", 75],
[8, 20, "40", "forty", 100],
[10, 25, "50", "fifty", 125],
]
example_profile_columns = [
"by_2",
"by_5",
"str_by_10",
"words_by_10",
"by_25",
]
df = pd.DataFrame(example_profile_data, columns=example_profile_columns)
profiler_opts = dp.ProfilerOptions()
profiler_opts.structured_options.multiprocess.is_enabled = False
example_profile = dp.Profiler(df, options=profiler_opts)
profile_path = "/example_profiles/expect_profile_diff_less_than_threshold_profile.pkl"
dir_path = os.path.dirname(os.path.abspath(__file__)) # noqa: PTH120, PTH100
profile_path = dir_path + profile_path
example_profile.save(filepath=profile_path)
examples = [
{
"data": {
"by_2": [4, 6, 8, 10, 12],
"by_5": [10, 15, 20, 25, 30],
"str_by_10": ["20", "30", "40", "50", "60"],
"words_by_10": ["twenty", "thirty", "forty", "fifty", "sixty"],
"by_25": [50, 75, 100, 125, 150],
},
"tests": [
{
"title": "profile_min_delta_witin_threshold",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"profile_path": profile_path,
"limit_check_report_keys": {
"*": {
"min": {"lower": 0.5, "upper": 2.0},
},
},
},
"out": {"success": True},
},
{
"title": "single_column_min_delta_equals_threshold",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"profile_path": profile_path,
"limit_check_report_keys": {
"by_2": {
"min": {"lower": 1.0, "upper": 1.0},
},
},
},
"out": {"success": True},
},
{
"title": "single_column_min_delta_above_threshold",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"profile_path": profile_path,
"limit_check_report_keys": {
"by_2": {
"min": {"lower": 0.0, "upper": 0.99},
},
},
},
"out": {"success": False},
},
{
"title": "profile_all_stats_beyond_delta_threshold",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"profile_path": profile_path,
"limit_check_report_keys": {
"*": {"*": {"lower": 0, "upper": 0}},
},
},
"out": {"success": False},
},
],
},
]
profile_metric = (
"data_profiler.profile_numeric_columns_percent_diff_between_inclusive_threshold_range"
)
success_keys = (
"profile_path",
"limit_check_report_keys",
"numerical_diff_statistics",
"mostly",
)
default_limit_check_report_keys = {
"*": {
"min": {"lower": 0, "upper": 0},
"max": {"lower": 0, "upper": 0},
"sum": {"lower": 0, "upper": 0},
"mean": {"lower": 0, "upper": 0},
"median": {"lower": 0, "upper": 0},
"median_absolute_deviation": {"lower": 0, "upper": 0},
"variance": {"lower": 0, "upper": 0},
"stddev": {"lower": 0, "upper": 0},
"unique_count": {"lower": 0, "upper": 0},
"unique_ratio": {"lower": 0, "upper": 0},
"gini_impurity": {"lower": 0, "upper": 0},
"unalikeability": {"lower": 0, "upper": 0},
"sample_size": {"lower": 0, "upper": 0},
"null_count": {"lower": 0, "upper": 0},
}
}
numerical_diff_statistics = list(default_limit_check_report_keys["*"].keys())
default_kwarg_values = {
"limit_check_report_keys": default_limit_check_report_keys,
"numerical_diff_statistics": numerical_diff_statistics,
"mostly": 1.0,
}
library_metadata = {
"requirements": ["dataprofiler", "tensorflow", "scikit-learn", "numpy"],
"maturity": "experimental", # "concept_only", "experimental", "beta", or "production"
"tags": [
"dataprofiler",
"dataassistance",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@stevensecreti", # Don't forget to add your github handle here!
],
}
if __name__ == "__main__":
diagnostics_report = (
ExpectProfileNumericColumnsPercentDiffBetweenInclusiveThresholdRange().run_diagnostics()
)
print(diagnostics_report.generate_checklist())
| ExpectProfileNumericColumnsPercentDiffBetweenInclusiveThresholdRange |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/kubernetes_engine.py | {
"start": 45112,
"end": 48555
} | class ____(GKEOperatorMixin, KubernetesDeleteResourceOperator):
"""
Delete a resource in the specified Google Kubernetes Engine cluster.
This Operator assumes that the system has gcloud installed and has configured a
connection id with a service account.
.. seealso::
For more detail about Kubernetes Engine authentication have a look at the reference:
https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-access-for-kubectl#internal_ip
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GKEDeleteCustomResourceOperator`
:param location: The name of the Google Kubernetes Engine zone or region in which the
cluster resides, e.g. 'us-central1-a'
:param cluster_name: The name of the Google Kubernetes Engine cluster.
:param use_internal_ip: Use the internal IP address as the endpoint.
:param use_dns_endpoint: Use the DNS address as the endpoint.
:param project_id: The Google Developers Console project id
:param gcp_conn_id: The Google cloud connection id to use. This allows for
users to specify a service account.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = tuple(
set(GKEOperatorMixin.template_fields) | set(KubernetesDeleteResourceOperator.template_fields)
)
def __init__(
self,
location: str,
cluster_name: str,
use_internal_ip: bool = False,
use_dns_endpoint: bool = False,
project_id: str = PROVIDE_PROJECT_ID,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.project_id = project_id
self.location = location
self.cluster_name = cluster_name
self.gcp_conn_id = gcp_conn_id
self.use_internal_ip = use_internal_ip
self.use_dns_endpoint = use_dns_endpoint
self.impersonation_chain = impersonation_chain
if self.gcp_conn_id is None:
raise AirflowException(
"The gcp_conn_id parameter has become required. If you want to use Application Default "
"Credentials (ADC) strategy for authorization, create an empty connection "
"called `google_cloud_default`.",
)
# There is no need to manage the kube_config file, as it will be generated automatically.
# All Kubernetes parameters (except config_file) are also valid for the GKEDeleteCustomResourceOperator.
if self.config_file:
raise AirflowException(
"config_file is not an allowed parameter for the GKEDeleteCustomResourceOperator."
)
| GKEDeleteCustomResourceOperator |
python | wandb__wandb | wandb/sdk/lib/redirect.py | {
"start": 19142,
"end": 20611
} | class ____(RedirectBase):
"""Patches the write method of current sys.stdout/sys.stderr.
Captures data in a raw form rather than using the emulator
"""
def __init__(
self,
src: Literal["stdout", "stderr"],
cbs: Iterable[Callable[[str], None]] = (),
) -> None:
super().__init__(src=src, cbs=cbs)
self._uninstall: Callable[[], None] | None = None
def _on_write(self, data: str | bytes, written: int, /) -> None:
if isinstance(data, bytes):
written_data = data[:written].decode("utf-8")
else:
written_data = data[:written]
for cb in self.cbs:
try:
cb(written_data)
except Exception:
logger.exception("error in %s callback", self.src)
def install(self) -> None:
if self._uninstall:
return
try:
if self.src == "stdout":
self._uninstall = console_capture.capture_stdout(self._on_write)
else:
self._uninstall = console_capture.capture_stderr(self._on_write)
except console_capture.CannotCaptureConsoleError:
logger.exception("failed to install %s hooks", self.src)
wandb.termwarn(
f"Failed to wrap {self.src}. Console logs will not be captured.",
)
def uninstall(self) -> None:
if self._uninstall:
self._uninstall()
| StreamRawWrapper |
python | django__django | tests/syndication_tests/models.py | {
"start": 350,
"end": 601
} | class ____(models.Model):
title = models.CharField(max_length=200)
entry = models.ForeignKey(Entry, models.CASCADE)
updated = models.DateTimeField()
published = models.DateTimeField()
class Meta:
ordering = ["updated"]
| Article |
python | FactoryBoy__factory_boy | tests/test_using.py | {
"start": 88071,
"end": 88979
} | class ____(unittest.TestCase):
def test_same_seed_is_used_between_fuzzy_and_faker_generators(self):
class StudentFactory(factory.Factory):
one = factory.fuzzy.FuzzyDecimal(4.0)
two = factory.Faker('name')
three = factory.Faker('name', locale='it')
four = factory.Faker('name')
class Meta:
model = TestObject
seed = 1000
factory.random.reseed_random(seed)
students_1 = (StudentFactory(), StudentFactory())
factory.random.reseed_random(seed)
students_2 = (StudentFactory(), StudentFactory())
self.assertEqual(students_1[0].one, students_2[0].one)
self.assertEqual(students_1[0].two, students_2[0].two)
self.assertEqual(students_1[0].three, students_2[0].three)
self.assertEqual(students_1[0].four, students_2[0].four)
| RepeatableRandomSeedFakerTests |
python | kamyu104__LeetCode-Solutions | Python/counting-bits.py | {
"start": 29,
"end": 572
} | class ____(object):
def countBits(self, num):
"""
:type num: int
:rtype: List[int]
"""
res = [0]
for i in xrange(1, num + 1):
# Number of 1's in i = (i & 1) + number of 1's in (i / 2).
res.append((i & 1) + res[i >> 1])
return res
def countBits2(self, num):
"""
:type num: int
:rtype: List[int]
"""
s = [0]
while len(s) <= num:
s.extend(map(lambda x: x + 1, s))
return s[:num + 1]
| Solution |
python | RaRe-Technologies__gensim | gensim/models/poincare.py | {
"start": 53366,
"end": 55354
} | class ____:
"""Stream relations for `PoincareModel` from a tsv-like file."""
def __init__(self, file_path, encoding='utf8', delimiter='\t'):
"""Initialize instance from file containing a pair of nodes (a relation) per line.
Parameters
----------
file_path : str
Path to file containing a pair of nodes (a relation) per line, separated by `delimiter`.
Since the relations are asymmetric, the order of `u` and `v` nodes in each pair matters.
To express a "u is v" relation, the lines should take the form `u delimeter v`.
e.g: `kangaroo mammal` is a tab-delimited line expressing a "`kangaroo is a mammal`" relation.
For a full input file example, see `gensim/test/test_data/poincare_hypernyms.tsv
<https://github.com/RaRe-Technologies/gensim/blob/master/gensim/test/test_data/poincare_hypernyms.tsv>`_.
encoding : str, optional
Character encoding of the input file.
delimiter : str, optional
Delimiter character for each relation.
"""
self.file_path = file_path
self.encoding = encoding
self.delimiter = delimiter
def __iter__(self):
"""Stream relations from self.file_path decoded into unicode strings.
Yields
-------
(unicode, unicode)
Relation from input file.
"""
with utils.open(self.file_path, 'rb') as file_obj:
if sys.version_info[0] < 3:
lines = file_obj
else:
lines = (line.decode(self.encoding) for line in file_obj)
# csv.reader requires bytestring input in python2, unicode input in python3
reader = csv.reader(lines, delimiter=self.delimiter)
for row in reader:
if sys.version_info[0] < 3:
row = [value.decode(self.encoding) for value in row]
yield tuple(row)
| PoincareRelations |
python | paramiko__paramiko | paramiko/channel.py | {
"start": 47659,
"end": 48665
} | class ____(BufferedFile):
"""
A file-like wrapper around `.Channel`. A ChannelFile is created by calling
`Channel.makefile`.
.. warning::
To correctly emulate the file object created from a socket's `makefile
<python:socket.socket.makefile>` method, a `.Channel` and its
`.ChannelFile` should be able to be closed or garbage-collected
independently. Currently, closing the `ChannelFile` does nothing but
flush the buffer.
"""
def __init__(self, channel, mode="r", bufsize=-1):
self.channel = channel
BufferedFile.__init__(self)
self._set_mode(mode, bufsize)
def __repr__(self):
"""
Returns a string representation of this object, for debugging.
"""
return "<paramiko.ChannelFile from " + repr(self.channel) + ">"
def _read(self, size):
return self.channel.recv(size)
def _write(self, data):
self.channel.sendall(data)
return len(data)
| ChannelFile |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-google-ads/unit_tests/common.py | {
"start": 1494,
"end": 4259
} | class ____:
_instance = None
def __new__(cls):
if cls._instance is None:
cls._instance = super(MockGoogleAdsFieldService, cls).__new__(cls)
cls._instance.request_query = None
return cls._instance
def search_google_ads_fields(self, request):
self.request_query = request.query
class MockResponse:
def __init__(self, name):
self.name = name
fields = [name.strip("'") for name in request.query.split("WHERE name in (")[1].split(")")[0].split(",")]
return [MockResponse(name) for name in fields]
ERROR_MAP = {
"CUSTOMER_NOT_FOUND": {
"failure_code": AuthenticationErrorEnum.AuthenticationError.CUSTOMER_NOT_FOUND,
"failure_msg": "msg2",
"error_type": "authenticationError",
},
"USER_PERMISSION_DENIED": {
"failure_code": AuthorizationErrorEnum.AuthorizationError.USER_PERMISSION_DENIED,
"failure_msg": "msg1",
"error_type": "authorizationError",
},
"CUSTOMER_NOT_ENABLED": {
"failure_code": AuthorizationErrorEnum.AuthorizationError.CUSTOMER_NOT_ENABLED,
"failure_msg": "msg2",
"error_type": "authorizationError",
},
"QUERY_ERROR": {
"failure_code": QueryErrorEnum.QueryError.UNEXPECTED_END_OF_QUERY,
"failure_msg": "Error in query: unexpected end of query.",
"error_type": "queryError",
},
"UNRECOGNIZED_FIELD": {
"failure_code": QueryErrorEnum.QueryError.UNRECOGNIZED_FIELD,
"failure_msg": "unrecognized field in the query.",
"error_type": "queryError",
},
"RESOURCE_EXHAUSTED": {"failure_code": QuotaErrorEnum.QuotaError.RESOURCE_EXHAUSTED, "failure_msg": "msg4", "error_type": "quotaError"},
"UNEXPECTED_ERROR": {
"failure_code": AuthorizationErrorEnum.AuthorizationError.UNKNOWN,
"failure_msg": "Unexpected error message",
"error_type": "authorizationError",
},
}
def mock_google_ads_request_failure(mocker, error_names):
errors = []
for error_name in error_names:
param = ERROR_MAP[error_name]
# Extract the parameter values from the request object
failure_code = param.get("failure_code", 1)
failure_msg = param.get("failure_msg", "it failed")
error_type = param.get("error_type", "requestError")
errors.append({"error_code": {error_type: failure_code}, "message": failure_msg})
protobuf_as_json = json.dumps({"errors": errors, "request_id": "1"})
failure = GoogleAdsFailure.from_json(protobuf_as_json)
exception = GoogleAdsException(None, None, failure, 1)
mocker.patch("source_google_ads.google_ads.GoogleAds.send_request", side_effect=exception)
| MockGoogleAdsFieldService |
python | pypa__pip | src/pip/_internal/metadata/pkg_resources.py | {
"start": 1036,
"end": 1112
} | class ____(NamedTuple):
name: str
value: str
group: str
| EntryPoint |
python | tensorflow__tensorflow | tensorflow/python/tpu/tpu_embedding_v1.py | {
"start": 1733,
"end": 18433
} | class ____(tpu_embedding_base.TPUEmbeddingBase):
"""The TPUEmbedding mid level API running on TPU without Embedding accelerator.
NOTE: This mid level API is not intended for large embedding table lookup.
Embedding tables will be replicated across devices rather than sharding
across them. To do large embedding table lookup, please use the
`tpu.experimental.embedding.TPUEmbedding` class. This class is an alternative
way to do embedding lookups when the TPU doesn't support any version of
embedding feature. See
`tpu.experimental.tpu_hardware_feature.embedding_feature` for a detailed
explanation.
This class has to be created under the `TPUStrategy`, Otherwise a RuntimeError
will be raised.
```python
strategy = tf.distribute.TPUStrategy(...)
with strategy.scope():
embedding = tf.tpu.experimental.embedding.TPUEmbeddingV0(
feature_config=feature_config,
optimizer=tf.tpu.experimental.embedding.SGD(0.1))
```
When creating a distributed dataset that is to be passed to the lookup
operation a special input option must be specified:
```python
distributed_dataset = (
strategy.distribute_datasets_from_function(
dataset_fn=...,
options=tf.distribute.InputOptions(
experimental_fetch_to_device=False))
dataset_iterator = iter(distributed_dataset)
```
Below is an example of a training and evaluation step:
```python
optimizer = tf.keras.optimizers.SGD(0.1)
@tf.function
def training_step(dataset_iterator, num_steps):
def tpu_step(embedding_features):
with tf.GradientTape() as tape:
tape.watch(embedding.embedding_table.values())
activation = embedding(embedding_features)
model_output = model(activations)
loss = ... # some function of labels and model_output
embedding_gradients = tape.gradient(loss,
embedding.embedding_table.values())
optimizer.apply_gradients(list(zip(gradients,
mid_level_api.embedding_tables.values())))
# Insert your model gradient and optimizer application here
for _ in tf.range(num_steps):
strategy.run(tpu_step, args=(next(dataset_iterator), ))
@tf.function
def evalution_step(dataset_iterator, num_steps):
def tpu_step(embedding_features):
activations = embedding(embedding_features)
model_output = model(activations)
# Insert your evaluation code here.
for _ in tf.range(num_steps):
strategy.run(tpu_step, args=(next(dataset_iterator), ))
```
NOTE: The optimizer used here is a Keras optimizer. In order to make the slot
variable creation stay consistent between Keras optimizers and
embedding optimizers, the `slot_variable_creation_fn` argument of the
embedding optimizers has to be passed with the Keras `add_slot` function. Also
note that the slot names might be slightly different between them.
```python
optimizer = tf.keras.optimizers.Adagrad(learning_rate=0.1)
def slot_variable_creation_fn(table, slot_names, slot_initializers):
slots = {}
for slot, initializer in zip(slot_names, slot_initializers):
slots[slot] = optimizer.add_slot(table, slot, initializer)
return slots
embedding_optimizer = tf.experimental.embedding.Adagrad(
learning_rate=0.1,
slot_variable_creation_fn=slot_variable_creation_fn)
# Use the embedding optimizer to create mid level api and keras optimizer to
# apply gradients.
```
"""
def __init__(
self,
feature_config: Union[tpu_embedding_v2_utils.FeatureConfig, Iterable], # pylint:disable=g-bare-generic
optimizer: Optional[tpu_embedding_v2_utils._Optimizer]): # pylint:disable=protected-access
super(TPUEmbeddingV0, self).__init__(feature_config, optimizer)
self._strategy = distribute_lib.get_strategy()
if not isinstance(self._strategy,
(tpu_strategy.TPUStrategy, tpu_strategy.TPUStrategyV2)):
raise RuntimeError(
"TPUEmbeddingV0 should be created under TPUStrategy but found {}."
.format(self._strategy))
self._built = False
@property
def embedding_tables(
self) -> Dict[tpu_embedding_v2_utils.TableConfig, tf_variables.Variable]:
"""Returns a dict of embedding tables, keyed by `TableConfig`."""
self._maybe_build()
# Only return the tables and not the slot variables.
return {
table: self._variables[table.name]["parameters"]
for table in self._table_config
}
def _create_variables_and_slots(
self) -> Dict[Text, Dict[Text, tf_variables.Variable]]:
"""Create variables for TPU embeddings.
Note that this will always ensure that the variable is created under the
TPUStrategy.
Returns:
A dict of dicts. The outer dict is keyed by the table names and the inner
dicts are keyed by 'parameters' and the slot variable names.
"""
variables = {}
for table in self._table_config:
# created TPUDistributedVariable.
variables[table.name] = self._create_variables(table, trainable=True)
return variables
def _maybe_build(self):
if not self._built:
# This can be called while tracing a function, so we wrap the
# initialization code with init_scope so it runs eagerly, this means that
# it will not be included in the function graph generated by tracing so
# that we can be sure that we only initialize the TPU for embeddings
# exactly once.
with ops.init_scope():
self.build()
def _apply_combiner_to_embeddings(
self,
embeddings: tensor.Tensor,
weight: tensor.Tensor,
combiner: Optional[Text] = None) -> tensor.Tensor:
"""Apply the combiner to the embedding look up result on second to last axis.
Args:
embeddings: A Tensor of the embedding lookup result.
weight: A Tensor of weight which has the same shape of the embeddings.
combiner: One of "mean", "sum", "sqrtn". Defaults to "mean".
Raises:
ValueError: If the combiner is not one of 'mean', 'sqrtn' or 'sum'.
Returns:
A Tensor.
"""
if combiner is None:
combiner = "mean"
if combiner == "sum":
embeddings = math_ops.reduce_sum(embeddings, axis=-2)
elif combiner == "mean":
embeddings = math_ops.reduce_sum(embeddings, axis=-2)
weight_sum = math_ops.reduce_sum(weight, axis=-2)
embeddings = math_ops.div_no_nan(embeddings, weight_sum)
elif combiner == "sqrtn":
embeddings = math_ops.reduce_sum(embeddings, axis=-2)
weight_squared = math_ops.pow(weight, 2)
weight_sum = math_ops.reduce_sum(weight_squared, axis=-2)
weight_sum_sqrt = math_ops.sqrt(weight_sum)
embeddings = math_ops.div_no_nan(embeddings, weight_sum_sqrt)
else:
raise ValueError(
f"combiner must be one of 'mean', 'sqrtn' or 'sum', got {combiner}")
return embeddings
def _pad_or_truncate_with_sequence_length(
self, embeddings: tensor.Tensor, sequence_length: int
) -> tensor.Tensor:
"""Pad or truncate the embedding lookup result based on the sequence length.
Args:
embeddings: A rank 3 Tensor of the embedding lookup result.
sequence_length: number of the max sequence length set in the feature
config.
Returns:
A Tensor with second last axis padded or truncated.
"""
original_sequence_length = embeddings.shape[1]
if original_sequence_length > sequence_length:
embeddings = array_ops.slice(
embeddings, begin=[0, 0, 0], size=[-1, sequence_length, -1])
else:
embeddings = array_ops.pad(
embeddings,
paddings=[[0, 0], [0, sequence_length - original_sequence_length],
[0, 0]])
return embeddings
def embedding_lookup(self,
features: Any,
weights: Optional[Any] = None) -> Any:
"""Apply embedding lookup on TPUs using Tensorcore.
Note that all the sparse and ragged tensors will be converted to dense
tensors on CPU and then passed to the TPU to do embedding look up. Large
embedding lookup is not supported by this API, use the TPUEmbedding mid
level api instead.
Args:
features: a nested structure of Tensors, SparseTensors or RaggedTensors.
weights: a nested structure of Tensors, SparseTensors or RaggedTensors or
None for no weights. If not None, structure must match that of inputs,
but entries are allowed to be None.
Returns:
A nested structure of Tensors with the same structure as inputs.
"""
if not self._built:
self.build()
nest.assert_same_structure(features, self._feature_config)
flat_inputs = nest.flatten(features)
flat_weights = [None] * len(flat_inputs)
if weights is not None:
nest.assert_same_structure(features, weights)
flat_weights = nest.flatten(weights)
flat_features = nest.flatten_with_joined_string_paths(self._feature_config)
outputs = []
for inp, weight, (path, feature) in zip(flat_inputs, flat_weights,
flat_features):
table = self.embedding_tables[feature.table]
if weight is not None:
if isinstance(inp, tensor.Tensor):
raise ValueError(
"Weight specified for {}, but input is dense.".format(path))
elif type(weight) is not type(inp):
raise ValueError(
"Weight for {} is of type {} but it does not match type of the "
"input which is {}.".format(path, type(weight), type(inp)))
elif feature.max_sequence_length > 0:
raise ValueError("Weight specified for {}, but this is a sequence "
"feature.".format(path))
if isinstance(inp, tensor.Tensor):
if feature.max_sequence_length > 0:
raise ValueError(
"Feature {} is a sequence feature but a dense tensor "
"was passed.".format(path))
outputs.append(embedding_ops.embedding_lookup_v2(table, inp))
elif isinstance(inp, sparse_tensor.SparseTensor):
outputs.append(
self._embedding_lookup_for_sparse_tensor(inp, weight, table,
feature))
elif isinstance(inp, ragged_tensor.RaggedTensor):
outputs.append(
self._embedding_lookup_for_ragged_tensor(inp, weight, table,
feature))
else:
raise ValueError("Input {} is type {}. Tensor, SparseTensor or "
"RaggedTensor expected.".format(path, type(inp)))
return nest.pack_sequence_as(self._feature_config, outputs)
def _embedding_lookup_for_sparse_tensor(
self, inp: sparse_tensor.SparseTensor,
weight: Optional[sparse_tensor.SparseTensor],
table: tf_variables.Variable,
feature: tpu_embedding_v2_utils.FeatureConfig) -> tensor.Tensor:
"""Embedding lookup for sparse tensor based on its feature config.
Args:
inp: a single SparseTensor input.
weight: None or SparseTensor which has the same shape of the input.
table: a table variable.
feature: a feature config.
Returns:
Embedding lookup result.
"""
# This computation needs to placed outside of tpu as the size of the
# indices and values can change for different batch which can cause
# the program to re-compile.
def sparse_to_dense_computation(inp, weight):
if weight is None:
weight = sparse_tensor.SparseTensor(
inp.indices,
array_ops.ones_like(inp.values, dtype=dtypes.float32),
dense_shape=inp.dense_shape)
# Pad the sparse tensor to be dense tensor.
inp = sparse_ops.sparse_tensor_to_dense(inp)
weight = sparse_ops.sparse_tensor_to_dense(weight)
return inp, weight
inp, weight = tpu_replication.outside_compilation(
sparse_to_dense_computation, inp=inp, weight=weight)
embeddings = embedding_ops.embedding_lookup_v2(table, inp)
weight = array_ops.expand_dims(weight, -1)
embeddings *= weight
if not feature.output_shape and feature.max_sequence_length > 0:
embeddings = self._pad_or_truncate_with_sequence_length(
embeddings, feature.max_sequence_length)
else:
embeddings = self._apply_combiner_to_embeddings(embeddings, weight,
feature.table.combiner)
return embeddings
def _embedding_lookup_for_ragged_tensor(
self, inp: ragged_tensor.RaggedTensor,
weight: Optional[ragged_tensor.RaggedTensor],
table: tf_variables.Variable,
feature: tpu_embedding_v2_utils.FeatureConfig) -> tensor.Tensor:
"""Embedding lookup for ragged tensor based on its feature config.
Args:
inp: a single rank 2 RaggedTensor input.
weight: None or RaggedTensor which has the same shape of the input.
table: a table variable.
feature: a feature config.
Returns:
Embedding lookup result.
Raises:
ValueError: if input ragged tensor is not rank 2 or output shape set in
the feature config doesn't match with the first dim size of the input.
"""
if inp.shape.rank != 2:
raise ValueError(
"Only rank 2 ragged tensor is supported, but got rank {}".format(
inp.shape.rank))
batch_size = inp.shape[0]
# This computation needs to placed outside of tpu as the size of the row
# splits and values can change for different batch which can cause
# the program to re-compile.
def ragged_to_dense_outside_compilation(inp, weight, batch_size, feature):
if weight is None:
weight = ragged_tensor.RaggedTensor.from_row_splits(
array_ops.ones_like(inp.values, dtype=dtypes.float32),
inp.row_splits)
if not feature.output_shape and feature.max_sequence_length > 0:
inp = inp.to_tensor(shape=(batch_size, feature.max_sequence_length))
# Ignore weight if it is a sequence feature.
weight = array_ops.ones_like(inp, dtype=dtypes.float32)
elif feature.output_shape:
# Eagerly run the following op as the result as to be a number in
# order to use it as part of the output shape.
with ops.init_scope():
output_batch_size = math_ops.reduce_prod(feature.output_shape).numpy()
# If the output batch size matches the data batch size, treat it as
# normal ragged input.
if output_batch_size == batch_size:
inp, weight = inp.to_tensor(), weight.to_tensor()
# If the data batch size is a factor of the output batch size, the
# divide result will be the sequence length. Ignore the weights and
# combiner.
elif (
output_batch_size > batch_size
and output_batch_size % batch_size == 0
):
# Pad or truncate in the sequence dimension
seq_length = output_batch_size // batch_size
inp = inp.to_tensor(shape=(batch_size, seq_length))
# Ignore weight if it is a sequence feature.
weight = array_ops.ones_like(inp, dtype=dtypes.float32)
else:
raise ValueError(
"Output shape set in the FeatureConfig should be the factor of "
"the input data batch size. But instead got output shape {}, "
"input data batch size {}".format(feature.output_shape,
batch_size))
else:
inp, weight = inp.to_tensor(), weight.to_tensor()
return inp, weight
inp, weight = tpu_replication.outside_compilation(
ragged_to_dense_outside_compilation,
inp=inp,
weight=weight,
batch_size=batch_size,
feature=feature)
embeddings = embedding_ops.embedding_lookup_v2(table, inp)
weight = array_ops.expand_dims(weight, -1)
embeddings *= weight
if feature.output_shape:
with ops.init_scope():
output_batch_size = math_ops.reduce_prod(feature.output_shape).numpy()
if output_batch_size == batch_size:
embeddings = self._apply_combiner_to_embeddings(embeddings, weight,
feature.table.combiner)
embeddings = array_ops.reshape(
embeddings, shape=feature.output_shape + [feature.table.dim])
else:
if feature.max_sequence_length == 0:
embeddings = self._apply_combiner_to_embeddings(embeddings, weight,
feature.table.combiner)
return embeddings
| TPUEmbeddingV0 |
python | pytorch__pytorch | torch/_higher_order_ops/wrap.py | {
"start": 753,
"end": 1254
} | class ____(HigherOrderOperator):
def __init__(self) -> None:
super().__init__("wrap")
def __call__(self, func, *args, **kwargs):
# Dynamo already traces the body of HigherOrderOp beforehand when it
# so no need to trace into it.
import torch._dynamo # noqa: F401
from torch._dynamo import disable
@disable
def wrapper():
result = func(*args, **kwargs)
return result
return wrapper()
wrap = Wrap()
| Wrap |
python | pytorch__pytorch | test/dynamo/test_exc.py | {
"start": 507,
"end": 10619
} | class ____(LoggingTestCase):
maxDiff = None
def test_unsupported_real_stack(self):
# exercise Unsupported constructor and augment_exc_message
def fn002(x):
torch._dynamo.graph_break()
def fn001(x):
x = x + 1
fn002(x)
self.assertExpectedInlineMunged(
Unsupported,
lambda: torch.compile(fn001, backend="eager", fullgraph=True)(
torch.randn(1)
),
"""\
Call to `torch._dynamo.graph_break()`
Explanation: User-inserted graph break. Message: None
Hint: Remove the `torch._dynamo.graph_break()` call.
Developer debug context: Called `torch._dynamo.graph_break()` with args `[]`, kwargs `{}`
For more details about this graph break, please visit: https://meta-pytorch.github.io/compile-graph-break-site/gb/gb0025.html
from user code:
File "test_exc.py", line N, in fn001
fn002(x)
File "test_exc.py", line N, in fn002
torch._dynamo.graph_break()""",
)
@torch._dynamo.config.patch(verbose=True, suppress_errors=True)
@make_logging_test()
@unittest.skipIf(IS_FBCODE, "stack trace slightly different in fbcode")
def test_internal_error_suppress_errors(self, records):
def fn001(x):
def f(ctx):
raise AssertionError
comptime(f)
torch.compile(fn001, backend="eager")(torch.randn(1))
record = self.getRecord(records, "WON'T CONVERT")
self.assertExpectedInline(
munge_exc(record.getMessage()),
"""\
WON'T CONVERT fn001 test_exc.py line N
========== TorchDynamo Stack Trace ==========
Traceback (most recent call last):
File "test_exc.py", line N, in f
raise AssertionError
AssertionError:
from user code:
File "test_exc.py", line N, in fn001
comptime(f)
========== The above exception occurred while processing the following code ==========
File "test_exc.py", line N, in test_internal_error_suppress_errors
torch.compile(fn001, backend="eager")(torch.randn(1))
File "test_exc.py", line N, in fn001
comptime(f)
==========""",
)
@make_logging_test()
def test_not_implemented_error(self, records):
def fn001(x):
def f(ctx):
raise NotImplementedError
# Ensure graph break is not possible
for _ in range(3):
comptime(f)
torch.compile(fn001, backend="eager")(torch.randn(1))
record = self.getRecord(records, "WON'T CONVERT")
self.assertExpectedInline(
munge_exc(record.getMessage()),
"""\
WON'T CONVERT fn001 test_exc.py line N
due to:
Traceback (most recent call last):
File "test_exc.py", line N, in f
raise NotImplementedError
torch._dynamo.exc.InternalTorchDynamoError: NotImplementedError:
from user code:
File "test_exc.py", line N, in fn001
comptime(f)""",
)
@torch._dynamo.config.patch(inject_BUILD_SET_unimplemented_TESTING_ONLY=True)
@make_logging_test(dynamo=logging.DEBUG)
def test_unsupported_error(self, records):
def fn001(x):
return {1, 2}
torch.compile(fn001, backend="eager")(torch.randn(1))
# TODO: There is no graph break log! This is because the graph break
# logging is not in a centralized location; unsupported
# instruction bypasses it
self.getRecord(records, "Graph break:")
@torch._dynamo.config.patch(suppress_errors=False)
def test_internal_error_no_suppress(self):
def fn001(x):
# NB: avoid decorator, as 3.11 changed the line number attributed
# in this situation
def f(ctx):
raise AssertionError
comptime(f)
# NB: OK for user code to be truncated here, because the regular
# exception backtrace has the rest of the crumbs
self.assertExpectedInlineMunged(
AssertionError,
lambda: torch.compile(fn001, backend="eager")(torch.randn(1)),
"""\
from user code:
File "test_exc.py", line N, in fn001
comptime(f)""",
)
@make_logging_test(graph_breaks=True)
def test_graph_break_log(self, records):
def fn002(x):
x = x + 1
torch._dynamo.graph_break()
x = x + 1
return x
def fn001(x):
return fn002(x)
torch.compile(fn001, backend="eager")(torch.randn(1))
record = self.getRecord(records, "Graph break in user code")
# TODO: This should also report the enclosing frames; need to plumb
# frame object to it
self.assertExpectedInline(
munge_exc(record.getMessage()),
"""\
Graph break in user code at test_exc.py:N
Graph Break Reason: Call to `torch._dynamo.graph_break()`
Explanation: User-inserted graph break. Message: None
Hint: Remove the `torch._dynamo.graph_break()` call.
Developer debug context: Called `torch._dynamo.graph_break()` with args `[]`, kwargs `{}`
For more details about this graph break, please visit: https://meta-pytorch.github.io/compile-graph-break-site/gb/gb0025.html
User code traceback:
File "test_exc.py", line N, in test_graph_break_log
torch.compile(fn001, backend="eager")(torch.randn(1))
File "test_exc.py", line N, in fn001
return fn002(x)
File "test_exc.py", line N, in fn002
torch._dynamo.graph_break()
""", # noqa: B950
)
@make_logging_test(graph_breaks=True)
def test_graph_break_log_generic_jump(self, records):
def fn(x):
if x.sum() > 0:
return x + 1
else:
return x - 1
torch.compile(fn, backend="eager")(torch.ones(3, 3))
# check for record existence
self.getRecord(records, "Graph break in user code")
@torch._dynamo.config.patch(suppress_errors=False)
def test_backend_suppress_line(self):
def fn001(x):
x = torch.relu(x)
return x + 1
# Do NOT let this get attributed to x + 1
self.assertExpectedInlineMunged(
torch._dynamo.exc.BackendCompilerFailed,
lambda: torch.compile(fn001, backend="relu_compile_error_TESTING_ONLY")(
torch.randn(1)
),
"""\
backend='relu_compile_error_TESTING_ONLY' raised:
ReluCompileError:""",
)
@skipIf(not TEST_Z3, "z3 not installed")
@torch._dynamo.config.patch(
assume_static_by_default=False,
suppress_errors=False,
)
@torch.fx.experimental._config.patch(
inject_EVALUATE_EXPR_flip_equality_TESTING_ONLY=True,
translation_validation=True,
translation_validation_no_bisect=True,
)
@skipIfWindows(
msg='AssertionError: "tran[551 chars]s1 s2 s3) s0)\n ==> (<= (+ s1 s2) (+ s0 (* -1[511 chars][0])' # noqa: PLR0133
!= 'tran[551 chars]s1 s2) (+ s0 (* -1 s3)))\n ==> (<= (+ s1 s2) [483 chars][0])"'
)
def test_trigger_on_error(self):
from torch.fx.experimental.validator import ValidationException
@torch.compile
def fn(x, shape):
return x.split(shape)
self.assertExpectedInlineMunged(
ValidationException,
lambda: fn(torch.randn(20), (5, 10, 5)),
"""\
translation validation failed.
Model:
==> L['shape'][0]: 0
==> L['shape'][1]: 0
==> L['shape'][2]: 0
==> L['x'].size()[0]: 3
==> L['x'].storage_offset(): 0
==> L['x'].stride()[0]: 1
==> s3: 0
==> s52: 0
==> s77: 3
==> s86: 0
Assertions:
==> (== 0 L['x'].storage_offset())
==> (== 1 L['x'].stride()[0])
==> (== L['shape'][0] s86)
==> (== L['shape'][1] s52)
==> (== L['shape'][2] s3)
==> (== L['x'].size()[0] s77)
==> (> s77 1)
Target Expressions:
==> (!= (+ s3 s52 s86) s77)
==> (<= 0 s3)
==> (<= 0 s52)
==> (<= 0 s86)
==> (<= 2 s77)
==> (== 0 L['x'].storage_offset())
==> (== 1 L['x'].stride()[0])
==> (== L['shape'][0] s86)
==> (== L['shape'][1] s52)
==> (== L['shape'][2] s3)
==> (== L['x'].size()[0] s77)
==> (> s77 0)
==> (>= 0 s86)
Failed Source Expressions:
==> (== (+ L['shape'][0] L['shape'][1] L['shape'][2]) L['x'].size()[0])""",
)
@skipIf(not TEST_Z3, "z3 not installed")
@torch._dynamo.config.patch(
assume_static_by_default=False,
suppress_errors=False,
)
@torch.fx.experimental._config.patch(
inject_EVALUATE_EXPR_flip_equality_TESTING_ONLY=True,
translation_validation=True,
)
def test_trigger_bisect_on_error(self):
from torch.fx.experimental.validator import BisectValidationException
@torch.compile
def fn(x, shape):
return x.split(shape)
self.assertExpectedInlineMunged(
BisectValidationException,
lambda: fn(torch.randn(20), (5, 10, 5)),
"""\
translation validation failed when evaluating: Eq(s3 + s52 + s86, s77)
Failure occurred while running node:
%split : [num_users=3] = call_method[target=split](args = (%l_x_, (%l_shape_0_, %l_shape_1_, %l_shape_2_)), kwargs = {})
Model:
==> L['shape'][0]: 0
==> L['shape'][1]: 0
==> L['shape'][2]: 0
==> L['x'].size()[0]: 3
==> L['x'].storage_offset(): 0
==> L['x'].stride()[0]: 1
==> s3: 0
==> s52: 0
==> s77: 3
==> s86: 0
Assertions:
==> (== 0 L['x'].storage_offset())
==> (== 1 L['x'].stride()[0])
==> (== L['shape'][0] s86)
==> (== L['shape'][1] s52)
==> (== L['shape'][2] s3)
==> (== L['x'].size()[0] s77)
==> (> s77 1)
Target Expressions:
==> (!= (+ s3 s52 s86) s77)
==> (<= 0 s3)
==> (<= 0 s52)
==> (<= 0 s86)
==> (<= 2 s77)
==> (== 0 L['x'].storage_offset())
==> (== 1 L['x'].stride()[0])
==> (== L['shape'][0] s86)
==> (== L['shape'][1] s52)
==> (== L['shape'][2] s3)
==> (== L['x'].size()[0] s77)
==> (> s77 0)
Failed Source Expressions:
==> (== (+ L['shape'][0] L['shape'][1] L['shape'][2]) L['x'].size()[0])""",
)
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
run_tests()
| ExcTests |
python | walkccc__LeetCode | solutions/2341. Maximum Number of Pairs in Array/2341.py | {
"start": 0,
"end": 230
} | class ____:
def numberOfPairs(self, nums: list[int]) -> list[int]:
ans = [0] * 2
count = collections.Counter(nums)
for i in range(101):
ans[0] += count[i] // 2
ans[1] += count[i] & 1
return ans
| Solution |
python | cython__cython | Cython/Build/Tests/TestInline.py | {
"start": 3489,
"end": 5695
} | class ____(unittest.TestCase):
def _run(self, code, setup_code=None, **kwargs):
timings, number = cymeit(code, setup_code=setup_code, **kwargs)
self.assertGreater(min(timings), 0)
# Guard that autoscaling leads to reasonable timings.
# Note: we cannot compare against the expected 0.2 due to large timing variations on CI.
max_time = max(timing * number for timing in timings)
if isinstance(max_time, int):
self.assertGreaterEqual(max_time, 100_000)
else:
self.assertGreaterEqual(max_time, 0.0001)
self.assertGreater(number, 10) # arbitrary lower bound for our very quick benchmarks
return timings
def test_benchmark_simple(self):
setup_code = "numbers = list(range(0, 1000, 3))"
self._run("sum([num for num in numbers])", setup_code, repeat=3)
def test_benchmark_timer(self):
import time
setup_code = "numbers = list(range(0, 1000, 3))"
timings = self._run("sum([num for num in numbers])", setup_code, timer=time.perf_counter, repeat=3)
for timing in timings:
self.assertIsInstance(timing, float)
def test_benchmark_timer_ns(self):
import time
setup_code = "numbers = list(range(0, 1000, 3))"
timings = self._run("sum([num for num in numbers])", setup_code, timer=time.perf_counter_ns, repeat=3)
for timing in timings:
self.assertIsInstance(timing, int)
def test_benchmark_multiline_setup(self):
setup_code = """
numbers = list(range(0, 100, 3))
def csum(numbers):
result = 0
for number in numbers:
result += number
return result
"""
self._run("csum(numbers)", setup_code)
def test_benchmark_multiline_code(self):
setup_code = "numbers = list(range(0, 100, 3))"
self._run("""
sum([
num
for num in numbers
])
""",
setup_code,
repeat=3
)
def test_benchmark_in_module(self):
self._run("fsum(range(100))", import_module='math', repeat=2)
| TestCymeit |
python | tensorflow__tensorflow | tensorflow/compiler/tests/rmsprop_test.py | {
"start": 1037,
"end": 5165
} | class ____(xla_test.XLATestCase):
def _rmsprop_update_numpy(self,
var,
g,
mg,
rms,
mom,
lr,
decay=0.9,
momentum=0.0,
epsilon=1e-10,
centered=False):
rms_t = rms * decay + (1 - decay) * g * g
denom_t = rms_t + epsilon
if centered:
mg_t = mg * decay + (1 - decay) * g
denom_t -= mg_t * mg_t
else:
mg_t = mg
mom_t = momentum * mom + lr * g / np.sqrt(denom_t, dtype=denom_t.dtype)
var_t = var - mom_t
return var_t, mg_t, rms_t, mom_t
def testBasic(self):
for dtype in self.float_types | self.complex_types:
for centered in [False, True]:
with self.session(), self.test_scope():
# Initialize variables for numpy implementation.
var0_np = np.array([1.0, 2.0], dtype=dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype)
mg0_np = np.array([0.0, 0.0], dtype=dtype)
mg1_np = np.array([0.0, 0.0], dtype=dtype)
rms0_np = np.array([1.0, 1.0], dtype=dtype)
rms1_np = np.array([1.0, 1.0], dtype=dtype)
mom0_np = np.array([0.0, 0.0], dtype=dtype)
mom1_np = np.array([0.0, 0.0], dtype=dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = 3.0
rms_opt = rmsprop.RMSPropOptimizer(learning_rate, centered=centered)
rms_update = rms_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
mg0 = rms_opt.get_slot(var0, "mg")
self.assertEqual(mg0 is not None, centered)
mg1 = rms_opt.get_slot(var1, "mg")
self.assertEqual(mg1 is not None, centered)
rms0 = rms_opt.get_slot(var0, "rms")
self.assertIsNotNone(rms0)
rms1 = rms_opt.get_slot(var1, "rms")
self.assertIsNotNone(rms1)
mom0 = rms_opt.get_slot(var0, "momentum")
self.assertIsNotNone(mom0)
mom1 = rms_opt.get_slot(var1, "momentum")
self.assertIsNotNone(mom1)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 3 steps of RMSProp
for _ in range(3):
self.evaluate(rms_update)
var0_np, mg0_np, rms0_np, mom0_np = self._rmsprop_update_numpy(
var0_np,
grads0_np,
mg0_np,
rms0_np,
mom0_np,
learning_rate,
centered=centered)
var1_np, mg1_np, rms1_np, mom1_np = self._rmsprop_update_numpy(
var1_np,
grads1_np,
mg1_np,
rms1_np,
mom1_np,
learning_rate,
centered=centered)
# Validate updated params
if centered:
self.assertAllCloseAccordingToType(mg0_np, self.evaluate(mg0))
self.assertAllCloseAccordingToType(mg1_np, self.evaluate(mg1))
self.assertAllCloseAccordingToType(rms0_np, self.evaluate(rms0))
self.assertAllCloseAccordingToType(rms1_np, self.evaluate(rms1))
self.assertAllCloseAccordingToType(mom0_np, self.evaluate(mom0))
self.assertAllCloseAccordingToType(mom1_np, self.evaluate(mom1))
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
if __name__ == "__main__":
test.main()
| RmspropTest |
python | dagster-io__dagster | python_modules/libraries/dagster-aws/dagster_aws/emr/pyspark_step_launcher.py | {
"start": 9501,
"end": 20413
} | class ____(StepLauncher):
def __init__(
self,
region_name,
staging_bucket,
staging_prefix,
wait_for_logs,
action_on_failure,
cluster_id,
spark_config,
local_job_package_path,
deploy_local_job_package,
s3_job_package_path=None,
):
self.region_name = check.str_param(region_name, "region_name")
self.staging_bucket = check.str_param(staging_bucket, "staging_bucket")
self.staging_prefix = check.str_param(staging_prefix, "staging_prefix")
self.wait_for_logs = check.bool_param(wait_for_logs, "wait_for_logs")
self.action_on_failure = check.str_param(action_on_failure, "action_on_failure")
self.cluster_id = check.str_param(cluster_id, "cluster_id")
self.spark_config = spark_config
check.invariant(
not deploy_local_job_package or not s3_job_package_path,
"If deploy_local_job_package is set to True, s3_job_package_path should not "
"also be set.",
)
self.local_job_package_path = check.str_param(
local_job_package_path, "local_job_package_path"
)
self.deploy_local_job_package = check.bool_param(
deploy_local_job_package, "deploy_local_job_package"
)
self.s3_job_package_path = check.opt_str_param(s3_job_package_path, "s3_job_package_path")
self.emr_job_runner = EmrJobRunner(region=self.region_name)
def _post_artifacts(self, log, step_run_ref, run_id, step_key):
"""Synchronize the step run ref and pyspark code to an S3 staging bucket for use on EMR.
For the zip file, consider the following toy example:
# Folder: my_pyspark_project/
# a.py
def foo():
print(1)
# b.py
def bar():
print(2)
# main.py
from a import foo
from b import bar
foo()
bar()
This will zip up `my_pyspark_project/` as `my_pyspark_project.zip`. Then, when running
`spark-submit --py-files my_pyspark_project.zip emr_step_main.py` on EMR this will
print 1, 2.
"""
from dagster_pyspark.utils import build_pyspark_zip
with tempfile.TemporaryDirectory() as temp_dir:
s3 = boto3.client("s3", region_name=self.region_name)
# Upload step run ref
def _upload_file_to_s3(local_path, s3_filename):
key = self._artifact_s3_key(run_id, step_key, s3_filename)
s3_uri = self._artifact_s3_uri(run_id, step_key, s3_filename)
log.debug(f"Uploading file {local_path} to {s3_uri}")
s3.upload_file(Filename=local_path, Bucket=self.staging_bucket, Key=key)
# Upload main file.
# The remote Dagster installation should also have the file, but locating it there
# could be a pain.
main_local_path = self._main_file_local_path()
_upload_file_to_s3(main_local_path, self._main_file_name())
if self.deploy_local_job_package:
# Zip and upload package containing job
zip_local_path = os.path.join(temp_dir, CODE_ZIP_NAME)
build_pyspark_zip(zip_local_path, self.local_job_package_path)
_upload_file_to_s3(zip_local_path, CODE_ZIP_NAME)
# Create step run ref pickle file
step_run_ref_local_path = os.path.join(temp_dir, PICKLED_STEP_RUN_REF_FILE_NAME)
with open(step_run_ref_local_path, "wb") as step_pickle_file:
pickle.dump(step_run_ref, step_pickle_file)
_upload_file_to_s3(step_run_ref_local_path, PICKLED_STEP_RUN_REF_FILE_NAME)
def launch_step(self, step_context):
step_run_ref = step_context_to_step_run_ref(step_context, self.local_job_package_path)
run_id = step_context.dagster_run.run_id
log = step_context.log
step_key = step_run_ref.step_key
self._post_artifacts(log, step_run_ref, run_id, step_key)
emr_step_def = self._get_emr_step_def(run_id, step_key, step_context.op.name)
emr_step_id = self.emr_job_runner.add_job_flow_steps(log, self.cluster_id, [emr_step_def])[
0
]
yield from self.wait_for_completion_and_log(run_id, step_key, emr_step_id, step_context)
def wait_for_completion_and_log(self, run_id, step_key, emr_step_id, step_context):
s3 = boto3.resource("s3", region_name=self.region_name)
try:
yield from self.wait_for_completion(step_context, s3, run_id, step_key, emr_step_id)
except EmrError as emr_error:
if self.wait_for_logs:
self._log_logs_from_s3(step_context.log, emr_step_id)
raise emr_error
if self.wait_for_logs:
self._log_logs_from_s3(step_context.log, emr_step_id)
def wait_for_completion(
self, step_context, s3, run_id, step_key, emr_step_id, check_interval=15
):
"""We want to wait for the EMR steps to complete, and while that's happening, we want to
yield any events that have been written to S3 for us by the remote process.
After the EMR steps complete, we want a final chance to fetch events before finishing
the step.
"""
done = False
all_events = []
# If this is being called within a `capture_interrupts` context, allow interrupts
# while waiting for the pyspark execution to complete, so that we can terminate slow or
# hanging steps
while not done:
with raise_execution_interrupts():
time.sleep(check_interval) # AWS rate-limits us if we poll it too often
done = self.emr_job_runner.is_emr_step_complete(
step_context.log, self.cluster_id, emr_step_id
)
all_events_new = self.read_events(s3, run_id, step_key)
if len(all_events_new) > len(all_events): # pyright: ignore[reportArgumentType]
for i in range(len(all_events), len(all_events_new)): # pyright: ignore[reportArgumentType]
event = all_events_new[i] # pyright: ignore[reportOptionalSubscript,reportArgumentType,reportIndexIssue]
# write each event from the EMR instance to the local instance
step_context.instance.handle_new_event(event)
if event.is_dagster_event: # pyright: ignore[reportOptionalMemberAccess,reportAttributeAccessIssue]
yield event.dagster_event # pyright: ignore[reportOptionalMemberAccess,reportAttributeAccessIssue]
all_events = all_events_new
def read_events(self, s3, run_id, step_key):
events_s3_obj = s3.Object(
self.staging_bucket, self._artifact_s3_key(run_id, step_key, PICKLED_EVENTS_FILE_NAME)
)
try:
events_data = events_s3_obj.get()["Body"].read()
return deserialize_value(pickle.loads(events_data))
except ClientError as ex:
# The file might not be there yet, which is fine
if ex.response["Error"]["Code"] == "NoSuchKey": # pyright: ignore[reportTypedDictNotRequiredAccess]
return []
else:
raise ex
def _log_logs_from_s3(self, log, emr_step_id):
"""Retrieves the logs from the remote PySpark process that EMR posted to S3 and logs
them to the given log.
"""
stdout_log, stderr_log = self.emr_job_runner.retrieve_logs_for_step_id(
log, self.cluster_id, emr_step_id
)
# Since stderr is YARN / Hadoop Log4J output, parse and reformat those log lines for
# Dagster's logging system.
records = parse_hadoop_log4j_records(stderr_log)
for record in records:
if record.level:
log.log(
level=record.level,
msg="".join(["Spark Driver stderr: ", record.logger, ": ", record.message]),
)
else:
log.debug(f"Spark Driver stderr: {record.message}")
sys.stdout.write(
"---------- Spark Driver stdout: ----------\n"
+ stdout_log
+ "\n"
+ "---------- End of Spark Driver stdout ----------\n"
)
def _get_emr_step_def(self, run_id, step_key, solid_name):
"""From the local Dagster instance, construct EMR steps that will kick off execution on a
remote EMR cluster.
"""
from dagster_spark.utils import flatten_dict, format_for_cli
action_on_failure = self.action_on_failure
# Execute Solid via spark-submit
conf = dict(flatten_dict(self.spark_config))
conf["spark.app.name"] = conf.get("spark.app.name", solid_name)
check.invariant(
conf.get("spark.master", "yarn") == "yarn",
desc=(
"spark.master is configured as {}; cannot set Spark master on EMR to anything "
'other than "yarn"'
).format(conf.get("spark.master")),
)
command = (
[
EMR_SPARK_HOME + "bin/spark-submit",
"--master",
"yarn",
"--deploy-mode",
conf.get("spark.submit.deployMode", "client"),
]
+ format_for_cli(list(flatten_dict(conf)))
+ [
"--py-files",
self._artifact_s3_uri(run_id, step_key, CODE_ZIP_NAME),
self._artifact_s3_uri(run_id, step_key, self._main_file_name()),
self.staging_bucket,
self._artifact_s3_key(run_id, step_key, PICKLED_STEP_RUN_REF_FILE_NAME),
]
)
return EmrJobRunner.construct_step_dict_for_command(
f"Execute Solid/Op {solid_name}", command, action_on_failure=action_on_failure
)
def _main_file_name(self):
return os.path.basename(self._main_file_local_path())
def _main_file_local_path(self):
return emr_step_main.__file__
def _sanitize_step_key(self, step_key: str) -> str:
# step_keys of dynamic steps contain brackets, which are invalid characters
return step_key.replace("[", "__").replace("]", "__")
def _artifact_s3_uri(self, run_id, step_key, filename):
key = self._artifact_s3_key(run_id, self._sanitize_step_key(step_key), filename)
return f"s3://{self.staging_bucket}/{key}"
def _artifact_s3_key(self, run_id, step_key, filename):
return "/".join(
[
self.staging_prefix,
run_id,
self._sanitize_step_key(step_key),
os.path.basename(filename),
]
)
| EmrPySparkStepLauncher |
python | yaml__pyyaml | setup.py | {
"start": 6655,
"end": 9990
} | class ____(_build_ext):
def finalize_options(self):
super().finalize_options()
pep517_config = ActiveConfigSettings.current()
build_config = pep517_config.get('pyyaml_build_config')
if build_config:
import json
build_config = json.loads(build_config)
print(f"`pyyaml_build_config`: {build_config}")
else:
build_config = {}
print("No `pyyaml_build_config` setting found.")
for key, value in build_config.items():
existing_value = getattr(self, key, ...)
if existing_value is ...:
print(f"ignoring unknown config key {key!r}")
continue
if existing_value:
print(f"combining {key!r} {existing_value!r} and {value!r}")
value = existing_value + value # FIXME: handle type diff
setattr(self, key, value)
def run(self):
optional = True
disabled = True
for ext in self.extensions:
with_ext = self.distribution.ext_status(ext)
if with_ext is None:
disabled = False
elif with_ext:
optional = False
disabled = False
break
if disabled:
return
try:
_build_ext.run(self)
except DistutilsPlatformError:
exc = sys.exc_info()[1]
if optional:
log.warn(str(exc))
log.warn("skipping build_ext")
else:
raise
def get_source_files(self):
self.check_extensions_list(self.extensions)
filenames = []
for ext in self.extensions:
if with_cython:
self.cython_sources(ext.sources, ext)
for filename in ext.sources:
filenames.append(filename)
base = os.path.splitext(filename)[0]
for ext in ['c', 'h', 'pyx', 'pxd']:
filename = '%s.%s' % (base, ext)
if filename not in filenames and os.path.isfile(filename):
filenames.append(filename)
return filenames
def get_outputs(self):
self.check_extensions_list(self.extensions)
outputs = []
for ext in self.extensions:
fullname = self.get_ext_fullname(ext.name)
filename = os.path.join(self.build_lib,
self.get_ext_filename(fullname))
if os.path.isfile(filename):
outputs.append(filename)
return outputs
def build_extensions(self):
self.check_extensions_list(self.extensions)
for ext in self.extensions:
with_ext = self.distribution.ext_status(ext)
if with_ext is not None and not with_ext:
continue
if with_cython:
print(f"BUILDING CYTHON EXT; {self.include_dirs=} {self.library_dirs=} {self.define=}")
ext.sources = self.cython_sources(ext.sources, ext)
try:
self.build_extension(ext)
except (CompileError, LinkError):
if with_ext is not None:
raise
log.warn("Error compiling module, falling back to pure Python")
| build_ext |
python | TheAlgorithms__Python | maths/pi_monte_carlo_estimation.py | {
"start": 16,
"end": 2042
} | class ____:
def __init__(self, x: float, y: float) -> None:
self.x = x
self.y = y
def is_in_unit_circle(self) -> bool:
"""
True, if the point lies in the unit circle
False, otherwise
"""
return (self.x**2 + self.y**2) <= 1
@classmethod
def random_unit_square(cls):
"""
Generates a point randomly drawn from the unit square [0, 1) x [0, 1).
"""
return cls(x=random.random(), y=random.random())
def estimate_pi(number_of_simulations: int) -> float:
"""
Generates an estimate of the mathematical constant PI.
See https://en.wikipedia.org/wiki/Monte_Carlo_method#Overview
The estimate is generated by Monte Carlo simulations. Let U be uniformly drawn from
the unit square [0, 1) x [0, 1). The probability that U lies in the unit circle is:
P[U in unit circle] = 1/4 PI
and therefore
PI = 4 * P[U in unit circle]
We can get an estimate of the probability P[U in unit circle].
See https://en.wikipedia.org/wiki/Empirical_probability by:
1. Draw a point uniformly from the unit square.
2. Repeat the first step n times and count the number of points in the unit
circle, which is called m.
3. An estimate of P[U in unit circle] is m/n
"""
if number_of_simulations < 1:
raise ValueError("At least one simulation is necessary to estimate PI.")
number_in_unit_circle = 0
for _ in range(number_of_simulations):
random_point = Point.random_unit_square()
if random_point.is_in_unit_circle():
number_in_unit_circle += 1
return 4 * number_in_unit_circle / number_of_simulations
if __name__ == "__main__":
# import doctest
# doctest.testmod()
from math import pi
prompt = "Please enter the desired number of Monte Carlo simulations: "
my_pi = estimate_pi(int(input(prompt).strip()))
print(f"An estimate of PI is {my_pi} with an error of {abs(my_pi - pi)}")
| Point |
python | spyder-ide__spyder | spyder/plugins/editor/plugin.py | {
"start": 1141,
"end": 44051
} | class ____(SpyderDockablePlugin):
"""
Editor plugin.
"""
NAME = 'editor'
REQUIRES = [Plugins.Application, Plugins.Console, Plugins.Preferences]
OPTIONAL = [
Plugins.Completions,
Plugins.Debugger,
Plugins.IPythonConsole,
Plugins.MainMenu,
Plugins.Projects,
Plugins.OutlineExplorer,
Plugins.Run,
Plugins.StatusBar,
Plugins.Switcher,
Plugins.Toolbar
]
WIDGET_CLASS = EditorMainWidget
CONF_SECTION = NAME
CONF_WIDGET_CLASS = EditorConfigPage
CONF_FILE = False
# ---- Signals
# ------------------------------------------------------------------------
sig_dir_opened = Signal(str)
"""
This signal is emitted when the editor changes the current directory.
Parameters
----------
new_working_directory: str
The new working directory path.
Notes
-----
This option is available on the options menu of the editor plugin
"""
sig_file_opened_closed_or_updated = Signal(str, str)
"""
This signal is emitted when a file is opened, closed or updated,
including switching among files.
Parameters
----------
filename: str
Name of the file that was opened, closed or updated.
language: str
Name of the programming language of the file that was opened,
closed or updated.
"""
# This signal is fired for any focus change among all editor stacks
sig_editor_focus_changed = Signal()
sig_help_requested = Signal(dict)
"""
This signal is emitted to request help on a given object `name`.
Parameters
----------
help_data: dict
Dictionary required by the Help pane to render a docstring.
Examples
--------
>>> help_data = {
'obj_text': str,
'name': str,
'argspec': str,
'note': str,
'docstring': str,
'force_refresh': bool,
'path': str,
}
See Also
--------
:py:meth:spyder.plugins.editor.widgets.editorstack.EditorStack.send_to_help
"""
sig_open_files_finished = Signal()
"""
This signal is emitted when the editor finished to open files.
"""
sig_codeeditor_created = Signal(object)
"""
This signal is emitted when a codeeditor is created.
Parameters
----------
codeeditor: spyder.plugins.editor.widgets.codeeditor.CodeEditor
The codeeditor.
"""
sig_codeeditor_deleted = Signal(object)
"""
This signal is emitted when a codeeditor is closed.
Parameters
----------
codeeditor: spyder.plugins.editor.widgets.codeeditor.CodeEditor
The codeeditor.
"""
sig_codeeditor_changed = Signal(object)
"""
This signal is emitted when the current codeeditor changes.
Parameters
----------
codeeditor: spyder.plugins.editor.widgets.codeeditor.CodeEditor
The codeeditor.
"""
# ---- SpyderDockablePlugin API
# ------------------------------------------------------------------------
@staticmethod
def get_name():
return _('Editor')
@staticmethod
def get_description():
return _(
"Edit Python, Markdown, Cython and many other types of text files."
)
@classmethod
def get_icon(cls):
return cls.create_icon('edit')
def on_initialize(self):
widget = self.get_widget()
# ---- Help related signals
widget.sig_help_requested.connect(self.sig_help_requested)
# ---- General signals
widget.starting_long_process.connect(self.before_long_process)
widget.ending_long_process.connect(self.after_long_process)
widget.sig_dir_opened.connect(self.sig_dir_opened)
widget.sig_file_opened_closed_or_updated.connect(
self.sig_file_opened_closed_or_updated
)
widget.sig_open_files_finished.connect(self.sig_open_files_finished)
# ---- CodeEditor related signals
widget.sig_codeeditor_created.connect(self.sig_codeeditor_created)
widget.sig_codeeditor_deleted.connect(self.sig_codeeditor_deleted)
widget.sig_codeeditor_changed.connect(self.sig_codeeditor_changed)
widget.sig_editor_focus_changed.connect(self.sig_editor_focus_changed)
# ---- Plugin related signals
widget.sig_switch_to_plugin_requested.connect(
lambda: self.switch_to_plugin(force_focus=True)
)
@on_plugin_available(plugin=Plugins.Preferences)
def on_preferences_available(self):
preferences = self.get_plugin(Plugins.Preferences)
preferences.register_plugin_preferences(self)
@on_plugin_teardown(plugin=Plugins.Preferences)
def on_preferences_teardown(self):
preferences = self.get_plugin(Plugins.Preferences)
preferences.deregister_plugin_preferences(self)
@on_plugin_available(plugin=Plugins.StatusBar)
def on_statusbar_available(self):
# Add status widgets
statusbar = self.get_plugin(Plugins.StatusBar)
widget = self.get_widget()
statusbar.add_status_widget(widget.readwrite_status)
statusbar.add_status_widget(widget.eol_status)
statusbar.add_status_widget(widget.encoding_status)
statusbar.add_status_widget(widget.cursorpos_status)
statusbar.add_status_widget(widget.vcs_status)
@on_plugin_teardown(plugin=Plugins.StatusBar)
def on_statusbar_teardown(self):
# Remove status widgets
statusbar = self.get_plugin(Plugins.StatusBar)
widget = self.get_widget()
statusbar.remove_status_widget(widget.readwrite_status.ID)
statusbar.remove_status_widget(widget.eol_status.ID)
statusbar.remove_status_widget(widget.encoding_status.ID)
statusbar.remove_status_widget(widget.cursorpos_status.ID)
statusbar.remove_status_widget(widget.vcs_status.ID)
@on_plugin_available(plugin=Plugins.Run)
def on_run_available(self):
widget = self.get_widget()
run = self.get_plugin(Plugins.Run)
widget.sig_editor_focus_changed_uuid.connect(
run.switch_focused_run_configuration
)
widget.sig_register_run_configuration_provider_requested.connect(
lambda supported_extensions:
run.register_run_configuration_provider(
self.NAME, supported_extensions
)
)
widget.sig_deregister_run_configuration_provider_requested.connect(
lambda unsupported_extensions:
run.deregister_run_configuration_provider(
self.NAME, unsupported_extensions
)
)
# This is necessary to register run configs that were added before Run
# is available
for extension in widget.supported_run_extensions:
run.register_run_configuration_provider(self.NAME, [extension])
# Buttons creation
run.create_run_button(
RunContext.Cell,
_("Run cell"),
icon=self.create_icon('run_cell'),
tip=_("Run cell"),
shortcut_context=self.NAME,
register_shortcut=True,
add_to_toolbar=True,
add_to_menu=True
)
run.create_run_button(
RunContext.Cell,
_("Run cell and advance"),
icon=self.create_icon('run_cell_advance'),
tip=_("Run cell and advance"),
shortcut_context=self.NAME,
register_shortcut=True,
add_to_toolbar=True,
add_to_menu=True,
extra_action_name=ExtraAction.Advance
)
run.create_run_button(
RunContext.Cell,
_("Re-run last cell"),
tip=_("Re run last cell "),
shortcut_context=self.NAME,
register_shortcut=True,
add_to_menu=True,
re_run=True
)
run.create_run_button(
RunContext.Selection,
_("Run ¤t line/selection"),
icon=self.create_icon('run_selection'),
tip=_("Run current line or selection"),
shortcut_context=self.NAME,
register_shortcut=True,
add_to_toolbar=True,
add_to_menu=True,
extra_action_name=ExtraAction.Advance,
)
run.create_run_button(
RunContext.Selection,
_("Run &to line"),
tip=_("Run selection up to the current line"),
shortcut_context=self.NAME,
register_shortcut=True,
add_to_toolbar=False,
add_to_menu=True,
context_modificator=SelectionContextModificator.ToLine
)
run.create_run_button(
RunContext.Selection,
_("Run &from line"),
tip=_("Run selection from the current line"),
shortcut_context=self.NAME,
register_shortcut=True,
add_to_toolbar=False,
add_to_menu=True,
context_modificator=SelectionContextModificator.FromLine
)
@on_plugin_teardown(plugin=Plugins.Run)
def on_run_teardown(self):
widget = self.get_widget()
run = self.get_plugin(Plugins.Run)
run.deregister_run_configuration_provider(
self.NAME, widget.supported_run_extensions
)
run.destroy_run_button(RunContext.Cell)
run.destroy_run_button(
RunContext.Cell,
extra_action_name=ExtraAction.Advance
)
run.destroy_run_button(RunContext.Cell, re_run=True)
run.destroy_run_button(
RunContext.Selection,
extra_action_name=ExtraAction.Advance
)
run.destroy_run_button(
RunContext.Selection,
context_modificator=SelectionContextModificator.ToLine
)
run.destroy_run_button(
RunContext.Selection,
context_modificator=SelectionContextModificator.FromLine
)
@on_plugin_available(plugin=Plugins.MainMenu)
def on_mainmenu_available(self):
widget = self.get_widget()
mainmenu = self.get_plugin(Plugins.MainMenu)
# ---- File menu ----
# Print
print_actions = [
widget.print_preview_action,
widget.print_action,
]
for print_action in print_actions:
mainmenu.add_item_to_application_menu(
print_action,
menu_id=ApplicationMenus.File,
section=FileMenuSections.Print,
before_section=FileMenuSections.Close
)
# Navigation
if sys.platform == 'darwin':
tab_navigation_actions = [
widget.go_to_previous_file_action,
widget.go_to_next_file_action
]
for tab_navigation_action in tab_navigation_actions:
mainmenu.add_item_to_application_menu(
tab_navigation_action,
menu_id=ApplicationMenus.File,
section=FileMenuSections.Navigation,
before_section=FileMenuSections.Restart
)
# ---- Edit menu ----
edit_menu = mainmenu.get_application_menu(ApplicationMenus.Edit)
edit_menu.aboutToShow.connect(widget.update_edit_menu)
# Editor section
for edit_item in widget.edit_menu_actions:
mainmenu.add_item_to_application_menu(
edit_item,
menu_id=ApplicationMenus.Edit,
section=EditMenuSections.Editor,
before_section=EditMenuSections.Formatting,
)
# Formatting section
formatting_actions = [
widget.eol_menu,
widget.trailingspaces_action,
widget.fixindentation_action,
]
for formatting_item in formatting_actions:
mainmenu.add_item_to_application_menu(
formatting_item,
menu_id=ApplicationMenus.Edit,
section=EditMenuSections.Formatting,
)
# ---- Search menu ----
# Cursor section
cursor_actions = [
widget.previous_edit_cursor_action,
widget.previous_cursor_action,
widget.next_cursor_action,
]
for cursor_item in cursor_actions:
mainmenu.add_item_to_application_menu(
cursor_item,
menu_id=ApplicationMenus.Search,
section=SearchMenuSections.Cursor,
before_section=SearchMenuSections.FindInFiles,
)
# ---- Source menu ----
source_menu = mainmenu.get_application_menu(
ApplicationMenus.Source
)
source_menu.aboutToShow.connect(widget.refresh_formatter_name)
# Options section
option_actions = widget.checkable_actions.values()
for option_item in option_actions:
mainmenu.add_item_to_application_menu(
option_item,
menu_id=ApplicationMenus.Source,
section=SourceMenuSections.Options,
before_section=SourceMenuSections.Linting,
)
# Linting section
linting_actions = [
widget.todo_list_action,
widget.warning_list_action,
widget.previous_warning_action,
widget.next_warning_action,
]
for linting_item in linting_actions:
mainmenu.add_item_to_application_menu(
linting_item,
menu_id=ApplicationMenus.Source,
section=SourceMenuSections.Linting,
before_section=SourceMenuSections.Autofix,
)
# Autofix section
autofix_actions = [
widget.formatting_action,
]
for autofix_item in autofix_actions:
mainmenu.add_item_to_application_menu(
autofix_item,
menu_id=ApplicationMenus.Source,
section=SourceMenuSections.Autofix,
)
@on_plugin_teardown(plugin=Plugins.MainMenu)
def on_mainmenu_teardown(self):
widget = self.get_widget()
mainmenu = self.get_plugin(Plugins.MainMenu)
# ---- File menu ----
# Print
print_actions = [
widget.print_preview_action,
widget.print_action,
]
for print_action in print_actions:
mainmenu.remove_item_from_application_menu(
print_action,
menu_id=ApplicationMenus.File
)
# Navigation
if sys.platform == 'darwin':
tab_navigation_actions = [
widget.go_to_previous_file_action,
widget.go_to_next_file_action
]
for tab_navigation_action in tab_navigation_actions:
mainmenu.remove_item_from_application_menu(
tab_navigation_action,
menu_id=ApplicationMenus.File
)
# ---- Edit menu ----
edit_menu = mainmenu.get_application_menu(ApplicationMenus.Edit)
edit_menu.aboutToShow.disconnect(widget.update_edit_menu)
# Editor section
for edit_item in widget.edit_menu_actions:
mainmenu.remove_item_from_application_menu(
edit_item,
menu_id=ApplicationMenus.Edit
)
# Formatting section
formatting_actions = [
widget.eol_menu,
widget.trailingspaces_action,
widget.fixindentation_action,
]
for formatting_item in formatting_actions:
mainmenu.remove_item_from_application_menu(
formatting_item,
menu_id=ApplicationMenus.Edit
)
# ---- Search menu ----
search_menu = mainmenu.get_application_menu(ApplicationMenus.Search)
search_menu.aboutToShow.disconnect(widget.update_search_menu)
# Find section
for search_item in widget.search_menu_actions:
mainmenu.remove_item_from_application_menu(
search_item,
menu_id=ApplicationMenus.Search
)
# Cursor section
cursor_actions = [
widget.previous_edit_cursor_action,
widget.previous_cursor_action,
widget.next_cursor_action,
]
for cursor_item in cursor_actions:
mainmenu.remove_item_from_application_menu(
cursor_item,
menu_id=ApplicationMenus.Search
)
# ---- Source menu ----
source_menu = mainmenu.get_application_menu(
ApplicationMenus.Source
)
source_menu.aboutToShow.disconnect(widget.refresh_formatter_name)
# Options section
option_actions = widget.checkable_actions.values()
for option_item in option_actions:
mainmenu.remove_item_from_application_menu(
option_item,
menu_id=ApplicationMenus.Source
)
# Linting section
linting_actions = [
widget.todo_list_action,
widget.warning_list_action,
widget.previous_warning_action,
widget.next_warning_action,
]
for linting_item in linting_actions:
mainmenu.remove_item_from_application_menu(
linting_item,
menu_id=ApplicationMenus.Source
)
# Autofix section
autofix_actions = [
widget.formatting_action,
]
for autofix_item in autofix_actions:
mainmenu.remove_item_from_application_menu(
autofix_item,
menu_id=ApplicationMenus.Source
)
@on_plugin_available(plugin=Plugins.Toolbar)
def on_toolbar_available(self):
widget = self.get_widget()
toolbar = self.get_plugin(Plugins.Toolbar)
toolbar.add_item_to_application_toolbar(
widget.create_new_cell,
toolbar_id=ApplicationToolbars.File,
)
@on_plugin_teardown(plugin=Plugins.Toolbar)
def on_toolbar_teardown(self):
toolbar = self.get_plugin(Plugins.Toolbar)
toolbar.remove_item_from_application_toolbar(
EditorWidgetActions.NewCell,
toolbar_id=ApplicationToolbars.File,
)
@on_plugin_available(plugin=Plugins.Completions)
def on_completions_available(self):
widget = self.get_widget()
completions = self.get_plugin(Plugins.Completions)
widget.sig_after_configuration_update_requested.connect(
completions.after_configuration_update
)
self.sig_file_opened_closed_or_updated.connect(
completions.file_opened_closed_or_updated
)
completions.sig_language_completions_available.connect(
widget.register_completion_capabilities)
completions.sig_open_file.connect(widget.load)
completions.sig_stop_completions.connect(
widget.stop_completion_services)
@on_plugin_teardown(plugin=Plugins.Completions)
def on_completions_teardown(self):
widget = self.get_widget()
completions = self.get_plugin(Plugins.Completions)
widget.sig_after_configuration_update_requested.disconnect(
completions.after_configuration_update
)
self.sig_file_opened_closed_or_updated.disconnect(
completions.file_opened_closed_or_updated
)
completions.sig_language_completions_available.disconnect(
widget.register_completion_capabilities)
completions.sig_open_file.disconnect(widget.load)
completions.sig_stop_completions.disconnect(
widget.stop_completion_services)
@on_plugin_available(plugin=Plugins.OutlineExplorer)
def on_outlineexplorer_available(self):
widget = self.get_widget()
outline = self.get_plugin(Plugins.OutlineExplorer)
outline_widget = outline.get_widget()
widget.set_outlineexplorer(outline_widget)
@on_plugin_teardown(plugin=Plugins.OutlineExplorer)
def on_outlinexplorer_teardown(self):
self.get_widget().set_outlineexplorer(None)
@on_plugin_available(plugin=Plugins.IPythonConsole)
def on_ipyconsole_available(self):
widget = self.get_widget()
ipyconsole = self.get_plugin(Plugins.IPythonConsole)
ipyconsole.register_spyder_kernel_call_handler(
'cell_count', widget.handle_cell_count
)
ipyconsole.register_spyder_kernel_call_handler(
'current_filename', widget.handle_current_filename
)
ipyconsole.register_spyder_kernel_call_handler(
'get_file_code', widget.handle_get_file_code
)
ipyconsole.register_spyder_kernel_call_handler(
'run_cell', widget.handle_run_cell
)
@on_plugin_teardown(plugin=Plugins.IPythonConsole)
def on_ipyconsole_teardown(self):
ipyconsole = self.get_plugin(Plugins.IPythonConsole)
ipyconsole.unregister_spyder_kernel_call_handler('cell_count')
ipyconsole.unregister_spyder_kernel_call_handler('current_filename')
ipyconsole.unregister_spyder_kernel_call_handler('get_file_code')
ipyconsole.unregister_spyder_kernel_call_handler('run_cell')
@on_plugin_available(plugin=Plugins.Switcher)
def on_switcher_available(self):
switcher = self.get_plugin(Plugins.Switcher)
self.get_widget().set_switcher(switcher)
@on_plugin_teardown(plugin=Plugins.Switcher)
def on_switcher_teardown(self):
self.get_widget().set_switcher(None)
@on_plugin_available(plugin=Plugins.Projects)
def on_projects_available(self):
projects = self.get_plugin(Plugins.Projects)
projects.sig_project_loaded.connect(self._on_project_loaded)
projects.sig_project_closed.connect(self._on_project_closed)
@on_plugin_teardown(plugin=Plugins.Projects)
def on_projects_teardown(self):
projects = self.get_plugin(Plugins.Projects)
projects.sig_project_loaded.disconnect(self._on_project_loaded)
projects.sig_project_closed.disconnect(self._on_project_closed)
@on_plugin_available(plugin=Plugins.Application)
def on_application_available(self):
application = self.get_plugin(Plugins.Application)
widget = self.get_widget()
widget.sig_new_recent_file.connect(application.add_recent_file)
widget.sig_file_action_enabled.connect(self._enable_file_action)
widget.sig_edit_action_enabled.connect(self._enable_edit_action)
# Enable Select All edit action
self._enable_edit_action(ApplicationActions.SelectAll, True)
# Enable Search actions
self._enable_search_action(ApplicationActions.FindText, True)
self._enable_search_action(ApplicationActions.FindNext, True)
self._enable_search_action(ApplicationActions.FindPrevious, True)
self._enable_search_action(ApplicationActions.ReplaceText, True)
@on_plugin_teardown(plugin=Plugins.Application)
def on_application_teardown(self):
application = self.get_plugin(Plugins.Application)
widget = self.get_widget()
widget.sig_new_recent_file.disconnect(application.add_recent_file)
widget.sig_file_action_enabled.disconnect(self._enable_file_action)
widget.sig_edit_action_enabled.disconnect(self._enable_edit_action)
def update_font(self):
"""Update font from Preferences"""
font = self.get_font(SpyderFontType.Monospace)
self.get_widget().update_font(font)
def before_mainwindow_visible(self):
# Don't move this to on_mainwindow_visible because the window appears
# empty while the recovery dialog is shown.
self.get_widget().autosave.try_recover_from_autosave()
def on_mainwindow_visible(self):
widget = self.get_widget()
widget.restore_scrollbar_position()
def can_close(self):
editorstack = self.get_widget().editorstacks[0]
return editorstack.save_if_changed(cancelable=True)
def on_close(self, cancelable=False):
widget = self.get_widget()
if not self.get_widget().get_active_project_path():
filenames = widget.get_filenames()
self.set_conf('filenames', filenames)
# ---- Public API
# ------------------------------------------------------------------------
def get_codeeditor_for_filename(self, filename):
"""
Get `CodeEditor` instance associated with the given filename.
Parameters
----------
filename : str
File path associated with a CodeEditor instance.
Returns
-------
spyder.plugins.editor.codeeditor.CodeEditor
`CodeEditor` associated with the given filename.
"""
return self.get_widget().get_editor(filename)
def refresh(self):
"""
Refresh main widget.
"""
self.get_widget().refresh()
def load(self, *args, **kwargs):
"""
Load a file or a group of files.
Parameters
----------
filenames: Optional[list]
Filenames to load.
goto: Optional[int]
If goto is not None, it represents a line to go to. Used alongside
`start_column` and `end_column`. Alternatively, the first match of
`word` is used as a position.
word: Optional[str]
The `word` to use to set the cursor position when using `goto`.
editorwindow: Optional[spyder.plugins.editor.widgets.window.EditorMainWindow] # noqa
Load in the given editorwindow (useful when clicking in the Outline
explorer with multiple editor windows).
processevents: Optional[bool]
Determines if `processEvents()` should be called at the end of this
method (set to `False` to prevent keyboard events from creeping
through to the editor while debugging).
start_column: Optional[int]
The start position in the line (goto)
end_column: Optional[int]
The length (so that the end position is `start_column` +
`end_column`), when providing a `goto` line.
set_focus: Optional[bool]
If the opened file should gain focus. `True` by default.
add_where: Optional[str]
Position where to add the new file finfo (affects the files tab
order). Possible values are: `start` to make the file the first and
`end` (or any other value) to append.
"""
return self.get_widget().load(*args, **kwargs)
def load_edit(self, filename):
"""
Load a `filename` passing to the base `load` method the `main_widget`
as the `editorwindow` to force focus.
Used by `spyder.plugins.outlineexplorer.plugin.[on_editor_available|on_editor_teardown]` # noqa
Parameters
----------
filename: str
Filename to load.
"""
widget = self.get_widget()
return self.get_widget().load(filenames=filename, editorwindow=widget)
def load_edit_goto(self, filename, goto, word):
"""
Load a `filename` and put the cursor in the line given by `goto` and
`word`, passing to the `load` call the `main_widget` as the
`editorwindow` to force focus.
Used by `spyder.plugins.outlineexplorer.plugin.[on_editor_available|on_editor_teardown]` # noqa
Parameters
----------
filename: str
Filename to load.
goto: int
Represents a line to go to.
word: str
The `word` to use to set the cursor position when using `goto`.
"""
widget = self.get_widget()
return widget.load(
filenames=filename, goto=goto, word=word, editorwindow=widget
)
def open_last_closed(self) -> None:
"""
Open the last closed tab again.
"""
return self.get_widget().open_last_closed()
def new(self, *args, **kwargs):
"""
Create a new file.
Parameters
----------
fname: Optional[str]
Name of the file to be created. The default is `None`.
If `None`, `fname` will be named `untitledXX.py`. No actual file
will be created until it is saved manually by the user.
editorstack: Optional[spyder.plugins.editor.widgets.editorstack.EditorStack] # noqa
Reference to the `EditorStack` instance that will be used to:
* Get `untitledXX.py` numbering for the file name.
* Check if a file with the same name already exists and it is
closeable.
* Set file as the current focused file.
The default is `None`. If that's the case, the current
`EditorStack` is used. See the `get_current_editorstack` method for
more details.
text: Optional[str]
Base text content that will be added to the file. The default is
`None`. If that's the case, the default content created will be
created via a template file. See
`Preferences > Editor > Advanced settings > Edit template for new files` # noqa
"""
return self.get_widget().new(*args, **kwargs)
def removed(self, filename):
"""
Close file given his filename since it was removed.
It's used, for instance, when a file was removed in the File or Project
explorer plugins.
Parameters
----------
filename: str
File path to be closed/removed.
"""
return self.get_widget().removed(filename)
def removed_tree(self, dirname):
"""
Close files given a directory since it was removed.
It's used, for instance, when a directory was removed in the File or
Project explorer plugins.
Parameters
----------
dirname: str
Base directory path of the files to be closed/removed.
"""
return self.get_widget().removed_tree(dirname)
def renamed(self, *args, **kwargs):
"""
Propagate file rename to editor stacks and autosave component.
This method is called when a file is renamed in the File or Project
explorer plugins. The file may not be opened in the editor.
Parameters
----------
source: str
Initial filename path.
dest: str
New filename path.
editorstack_id_str: Optional[str]
The default is `None`. If not, the `EditorStack` instance whose
identity corresponds to `editorstack_id_str` **doesn't** perform
the file rename operation.
"""
return self.get_widget().renamed(*args, **kwargs)
def renamed_tree(self, *args, **kwargs):
"""
Propagate directory rename to editor stacks and autosave component.
This is used when the directory was renamed in File or Project explorer
plugins.
Parameters
----------
source: str
Initial directory path.
dest: str
New directory path.
"""
return self.get_widget().renamed_tree(*args, **kwargs)
def add_supported_run_configuration(self, *args, **kwargs):
"""
Add a run configuration schema supported by the Editor.
Parameters
----------
config : spyder.plugins.editor.api.run.EditorRunConfiguration
New run configuration schema to be added.
"""
return self.get_widget().add_supported_run_configuration(
*args, **kwargs
)
def remove_supported_run_configuration(self, *args, **kwargs):
"""
Remove a run configuration schema supported by the Editor.
Parameters
----------
config : spyder.plugins.editor.api.run.EditorRunConfiguration
Run configuration schema to be removed.
"""
return self.get_widget().remove_supported_run_configuration(
*args, **kwargs
)
def get_current_editor(self):
"""
Get current `CodeEditor` instance if available.
Returns
-------
spyder.plugins.editor.codeeditor.CodeEditor
`CodeEditor` instance focused or available.
"""
return self.get_widget().get_current_editor()
def get_current_editorstack(self):
"""
Get current `EditorStack` instance if available.
Returns
-------
spyder.plugins.editor.editorstack.EditorStack
`EditorStack` instance focused or available.
"""
return self.get_widget().get_current_editorstack()
def get_focus_widget(self):
"""
Return the widget to give focus to.
This happens when plugin's main widget is raised to the top-level.
Returns
-------
spyder.plugins.editor.codeeditor.CodeEditor
`CodeEditor` instance focused or available.
"""
return self.get_widget().get_focus_widget()
def setup_open_files(self, close_previous_files=True):
"""
Open the list of saved files per project.
Also, open any files that the user selected in the recovery dialog and
setup toolbars and menus for 'New window' instances (i.e. it calls the
`setup_other_windows` method).
Parameters
----------
close_previous_files : Optional[bool]
If any previously open file should be closed. Default `True`.
"""
widget = self.get_widget()
outline = self.get_plugin(Plugins.OutlineExplorer, error=False)
if outline:
widget.setup_other_windows(self._main, outline)
return self.get_widget().setup_open_files(
close_previous_files=close_previous_files
)
def save_open_files(self,):
"""Save the list of open files."""
return self.get_widget().save_open_files()
def save(self, index=None, force=False):
"""
Save file.
Parameters
----------
index : Optional[int]
Index related to the file position in the current editorstack.
The default is `None`, which uses the current file index.
force : Optional[bool]
Force save regardless of file state. The default is `False`.
Returns
-------
bool
`True` if the save operation was sucessfull. `False` otherwise.
"""
return self.get_widget().save(index=None, force=False)
def save_all(self) -> None:
"""
Save all files.
"""
return self.get_widget().save_all()
def save_as(self) -> None:
"""
Save all files.
"""
self.get_widget().save_as()
def save_copy_as(self) -> None:
"""
Save copy of file under a different name.
"""
self.get_widget().save_copy_as()
def revert_file(self) -> None:
"""
Revert the currently edited file from disk.
"""
self.get_widget().revert()
def save_bookmark(self, slot_num):
"""
Save current line and position as bookmark.
Parameters
----------
slot_num : int
"""
return self.get_widget().save_bookmark(slot_num)
def load_bookmark(self, slot_num):
"""
Set cursor to bookmarked file and position.
Parameters
----------
slot_num : int
"""
return self.get_widget().load_bookmark(slot_num)
def edit_template(self):
"""Edit `New file` template."""
return self.get_widget().edit_template()
def get_current_filename(self):
"""Get current editor 'filename'."""
return self.get_widget().get_current_filename()
def current_file_is_temporary(self) -> bool:
"""Return whether file in current editor is a temporary file."""
return self.get_current_editor() == self.get_widget().TEMPFILE_PATH
def get_filenames(self):
"""
Get list with all open files.
Returns
-------
list
A list with the names of all files currently opened in
the editorstack.
"""
return self.get_widget().get_filenames()
def close_file(self):
"""Close current file."""
return self.get_widget().close_file()
def close_file_from_name(self, filename):
"""
Close file from its name.
Parameters
----------
filename : str
Filename to be closed.
"""
return self.get_widget().close_file_from_name(filename)
def close_all_files(self):
"""Close all opened files."""
return self.get_widget().close_all_files()
def go_to_line(self, line=None):
"""
Open 'go to line' dialog.
Parameters
----------
line : Optional[int]
Line to use for programatic calls without showing the dialog. The
default is `None`.
"""
return self.get_widget().go_to_line(line=line)
def set_current_filename(self, *args, **kwargs):
"""
Set current filename.
Returns
-------
spyder.plugins.editor.codeeditor.CodeEditor
The associated `CodeEditor` instance.
"""
return self.get_widget().set_current_filename(*args, **kwargs)
def set_current_project_path(self, root_path=None):
"""
Set the current active project root path.
Parameters
----------
root_path: Optional[str]
Path to current project root path. Default is `None`.
"""
return self.get_widget().set_current_project_path(root_path=root_path)
def undo(self) -> None:
return self.get_widget().undo()
def redo(self) -> None:
return self.get_widget().redo()
def cut(self) -> None:
return self.get_widget().cut()
def copy(self) -> None:
return self.get_widget().copy()
def paste(self) -> None:
return self.get_widget().paste()
def select_all(self) -> None:
return self.get_widget().select_all()
def find(self) -> None:
return self.get_widget().find()
def find_next(self) -> None:
return self.get_widget().find_next()
def find_previous(self) -> None:
return self.get_widget().find_previous()
def replace(self) -> None:
return self.get_widget().replace()
# ---- Private API
# ------------------------------------------------------------------------
# ---- Run related methods
def _register_run_configuration_metadata(self, metadata):
run = self.get_plugin(Plugins.Run, error=False)
if run is not None:
run.register_run_configuration_metadata(
self.get_widget(), metadata
)
def _deregister_run_configuration_metadata(self, file_id):
run = self.get_plugin(Plugins.Run, error=False)
if run is not None:
run.deregister_run_configuration_metadata(file_id)
def _get_currently_selected_run_configuration(self):
run = self.get_plugin(Plugins.Run, error=False)
if run is not None:
return run.get_currently_selected_configuration()
def _switch_focused_run_configuration(self, file_id):
run = self.get_plugin(Plugins.Run, error=False)
if run is not None:
run.switch_focused_run_configuration(file_id)
# ---- Completions related methods
def _register_file_completions(self, language, filename, codeeditor):
completions = self.get_plugin(Plugins.Completions, error=False)
status = None
fallback_only = False
if completions is not None:
status = (
completions.start_completion_services_for_language(
language.lower()
)
)
completions.register_file(
language.lower(), filename, codeeditor
)
fallback_only = completions.is_fallback_only(language.lower())
return (status, fallback_only)
def _send_completions_request(self, language, request, params):
completions = self.get_plugin(Plugins.Completions, error=False)
if completions is not None:
completions.send_request(language, request, params)
def _after_configuration_update(self, config):
completions = self.get_plugin(Plugins.Completions, error=False)
if completions is not None:
completions.after_configuration_update(config)
# ---- Projects related methods
def _start_project_workspace_services(self):
projects = self.get_plugin(Plugins.Projects, error=False)
if projects is not None:
projects.start_workspace_services()
def _get_project_filenames(self):
if self.get_widget().get_active_project_path():
projects = self.get_plugin(Plugins.Projects, error=False)
return projects.get_project_filenames()
def _on_project_loaded(self, path):
self.get_widget().update_active_project_path(path)
def _on_project_closed(self):
self.get_widget().update_active_project_path(None)
# ---- Debugger related methods
def _debugger_close_file(self, filename):
debugger = self.get_plugin(Plugins.Debugger, error=False)
if debugger is None:
return True
return debugger.can_close_file(filename)
# ---- Methods related to the Application plugin
# ------------------------------------------------------------------------
def _enable_file_action(self, action_name: str, enabled: bool) -> None:
"""
Enable or disable file action for this plugin.
"""
application = self.get_plugin(Plugins.Application, error=False)
if application:
application.enable_file_action(action_name, enabled, self.NAME)
def _enable_edit_action(self, action_name: str, enabled: bool) -> None:
"""Enable or disable edit action for this plugin."""
application = self.get_plugin(Plugins.Application, error=False)
if application:
application.enable_edit_action(action_name, enabled, self.NAME)
def _enable_search_action(self, action_name: str, enabled: bool) -> None:
"""Enable or disable search action for this plugin."""
application = self.get_plugin(Plugins.Application, error=False)
if application:
application.enable_search_action(action_name, enabled, self.NAME)
| Editor |
python | pyinstaller__pyinstaller | bootloader/waflib/Tools/d.py | {
"start": 1043,
"end": 2297
} | class ____(stlink_task):
pass
@extension('.d', '.di', '.D')
def d_hook(self, node):
ext = Utils.destos_to_binfmt(self.env.DEST_OS) == 'pe' and 'obj' or 'o'
out = '%s.%d.%s' % (node.name, self.idx, ext)
def create_compiled_task(self, name, node):
task = self.create_task(name, node, node.parent.find_or_declare(out))
try:
self.compiled_tasks.append(task)
except AttributeError:
self.compiled_tasks = [task]
return task
if getattr(self, 'generate_headers', None):
tsk = create_compiled_task(self, 'd_with_header', node)
tsk.outputs.append(node.change_ext(self.env.DHEADER_ext))
else:
tsk = create_compiled_task(self, 'd', node)
return tsk
@taskgen_method
def generate_header(self, filename):
try:
self.header_lst.append([filename, self.install_path])
except AttributeError:
self.header_lst = [[filename, self.install_path]]
@feature('d')
def process_header(self):
for i in getattr(self, 'header_lst', []):
node = self.path.find_resource(i[0])
if not node:
raise Errors.WafError('file %r not found on d obj' % i[0])
self.create_task('d_header', node, node.change_ext('.di'))
| dstlib |
python | getsentry__sentry | src/sentry/integrations/api/serializers/rest_framework/data_forwarder.py | {
"start": 633,
"end": 810
} | class ____(TypedDict, total=False):
queue_url: str
region: str
access_key: str
secret_key: str
message_group_id: str | None
s3_bucket: str | None
| SQSConfig |
python | readthedocs__readthedocs.org | readthedocs/domains/tests/test_tasks.py | {
"start": 586,
"end": 4909
} | class ____(TestCase):
def setUp(self):
self.user = get(User, email="user@example.com")
self.another_user = get(User, email="anotheruser@example.com")
self.project = get(Project, users=[self.user])
self.another_project = get(Project, users=[self.user, self.another_user])
self.domain = get(
Domain,
project=self.project,
ssl_status=SSL_STATUS_VALID,
domain="docs.domain.com",
)
self.domain_pending = get(
Domain,
project=self.project,
ssl_status=SSL_STATUS_PENDING,
domain="docs.domain2.com",
)
self.domain_invalid = get(
Domain,
project=self.another_project,
ssl_status=SSL_STATUS_INVALID,
domain="docs.domain3.com",
)
self.domain_skip = get(
Domain,
project=self.another_project,
ssl_status=SSL_STATUS_INVALID,
skip_validation=True,
domain="docs.domain4.com",
)
self.domain_recently_expired = get(
Domain,
project=self.another_project,
ssl_status=SSL_STATUS_PENDING,
domain="docs.domain5.com",
)
self.domain_recently_expired.validation_process_start -= timezone.timedelta(
days=settings.RTD_CUSTOM_DOMAINS_VALIDATION_PERIOD
)
self.domain_recently_expired.save()
self.domain_expired = get(
Domain,
project=self.another_project,
ssl_status=SSL_STATUS_PENDING,
domain="docs.domain6.com",
)
self.domain_expired.validation_process_start -= timezone.timedelta(
days=settings.RTD_CUSTOM_DOMAINS_VALIDATION_PERIOD + 10
)
self.domain_expired.save()
@mock.patch("readthedocs.notifications.email.send_email")
def test_email_pending_emails(self, send_email):
subject = "Pending configuration of custom domain"
email_pending_custom_domains.delay(number_of_emails=3)
self.assertEqual(send_email.call_count, 2)
kwargs = send_email.call_args_list[0][1]
self.assertEqual(kwargs["recipient"], self.user.email)
self.assertTrue(kwargs["subject"].startswith(subject))
self.assertIn(self.domain_recently_expired.domain, kwargs["subject"])
kwargs = send_email.call_args_list[1][1]
self.assertEqual(kwargs["recipient"], self.another_user.email)
self.assertTrue(kwargs["subject"].startswith(subject))
self.assertIn(self.domain_recently_expired.domain, kwargs["subject"])
@mock.patch("readthedocs.notifications.email.send_email")
def test_dont_send_email_on_given_days(self, send_email):
now = timezone.now()
days = [5, 8, 14, 16, 29, 31]
for day in days:
with mock.patch("django.utils.timezone.now") as nowmock:
nowmock.return_value = now + timezone.timedelta(days=day)
email_pending_custom_domains.delay(number_of_emails=3)
send_email.assert_not_called()
@mock.patch("readthedocs.notifications.email.send_email")
def test_send_email_on_given_days(self, send_email):
now = timezone.now()
days = [7, 15, 30]
for day in days:
send_email.reset_mock()
with mock.patch("django.utils.timezone.now") as nowmock:
nowmock.return_value = now + timezone.timedelta(days=day)
email_pending_custom_domains.delay(number_of_emails=3)
self.assertEqual(send_email.call_count, 3)
kwargs = send_email.call_args_list[0][1]
self.assertEqual(kwargs["recipient"], self.user.email)
self.assertIn(self.domain_pending.domain, kwargs["subject"])
kwargs = send_email.call_args_list[1][1]
self.assertEqual(kwargs["recipient"], self.user.email)
self.assertIn(self.domain_invalid.domain, kwargs["subject"])
kwargs = send_email.call_args_list[2][1]
self.assertEqual(kwargs["recipient"], self.another_user.email)
self.assertIn(self.domain_invalid.domain, kwargs["subject"])
@override_settings(RTD_ALLOW_ORGANIZATIONS=True)
| TestTasks |
python | keras-team__keras | keras/src/utils/file_utils_test.py | {
"start": 255,
"end": 1323
} | class ____(test_case.TestCase):
def test_path_to_string_with_string_path(self):
path = os.path.join(os.path.sep, "path", "to", "file.txt")
string_path = file_utils.path_to_string(path)
self.assertEqual(string_path, path)
def test_path_to_string_with_PathLike_object(self):
path = os.path.join(os.path.sep, "path", "to", "file.txt")
string_path = file_utils.path_to_string(path)
self.assertEqual(string_path, str(path))
def test_path_to_string_with_non_string_typed_path_object(self):
class NonStringTypedPathObject:
def __fspath__(self):
return os.path.join(os.path.sep, "path", "to", "file.txt")
path = NonStringTypedPathObject()
string_path = file_utils.path_to_string(path)
self.assertEqual(
string_path, os.path.join(os.path.sep, "path", "to", "file.txt")
)
def test_path_to_string_with_none_path(self):
string_path = file_utils.path_to_string(None)
self.assertEqual(string_path, None)
| PathToStringTest |
python | numpy__numpy | numpy/_core/tests/test_numerictypes.py | {
"start": 5973,
"end": 6164
} | class ____(CreateValues):
"""Check the creation of heterogeneous arrays (plain, single row)"""
_descr = Pdescr
multiple_rows = 0
_buffer = PbufferT[0]
| TestCreateValuesPlainSingle |
python | ipython__ipython | IPython/terminal/shortcuts/filters.py | {
"start": 4653,
"end": 10998
} | class ____(Filter):
"""A filter allowing to implement pass-through behaviour of keybindings.
Prompt toolkit key processor dispatches only one event per binding match,
which means that adding a new shortcut will suppress the old shortcut
if the keybindings are the same (unless one is filtered out).
To stop a shortcut binding from suppressing other shortcuts:
- add the `pass_through` filter to list of filter, and
- call `pass_through.reply(event)` in the shortcut handler.
"""
def __init__(self):
self._is_replying = False
def reply(self, event: KeyPressEvent):
self._is_replying = True
try:
event.key_processor.reset()
event.key_processor.feed_multiple(event.key_sequence)
event.key_processor.process_keys()
finally:
self._is_replying = False
def __call__(self):
return not self._is_replying
pass_through = PassThrough()
# these one is callable and re-used multiple times hence needs to be
# only defined once beforehand so that transforming back to human-readable
# names works well in the documentation.
default_buffer_focused = has_focus(DEFAULT_BUFFER)
KEYBINDING_FILTERS = {
"always": Always(),
# never is used for exposing commands which have no default keybindings
"never": Never(),
"has_line_below": has_line_below,
"has_line_above": has_line_above,
"is_cursor_at_the_end_of_line": is_cursor_at_the_end_of_line,
"has_selection": has_selection,
"has_suggestion": has_suggestion,
"vi_mode": vi_mode,
"vi_insert_mode": vi_insert_mode,
"emacs_insert_mode": emacs_insert_mode,
# https://github.com/ipython/ipython/pull/12603 argued for inclusion of
# emacs key bindings with a configurable `emacs_bindings_in_vi_insert_mode`
# toggle; when the toggle is on user can access keybindigns like `ctrl + e`
# in vi insert mode. Because some of the emacs bindings involve `escape`
# followed by another key, e.g. `escape` followed by `f`, prompt-toolkit
# needs to wait to see if there will be another character typed in before
# executing pure `escape` keybinding; in vi insert mode `escape` switches to
# command mode which is common and performance critical action for vi users.
# To avoid the delay users employ a workaround:
# https://github.com/ipython/ipython/issues/13443#issuecomment-1032753703
# which involves switching `emacs_bindings_in_vi_insert_mode` off.
#
# For the workaround to work:
# 1) end users need to toggle `emacs_bindings_in_vi_insert_mode` off
# 2) all keybindings which would involve `escape` need to respect that
# toggle by including either:
# - `vi_insert_mode & ebivim` for actions which have emacs keybindings
# predefined upstream in prompt-toolkit, or
# - `emacs_like_insert_mode` for actions which do not have existing
# emacs keybindings predefined upstream (or need overriding of the
# upstream bindings to modify behaviour), defined below.
"emacs_like_insert_mode": (vi_insert_mode & ebivim) | emacs_insert_mode,
"has_completions": has_completions,
"insert_mode": vi_insert_mode | emacs_insert_mode,
"default_buffer_focused": default_buffer_focused,
"search_buffer_focused": has_focus(SEARCH_BUFFER),
# `ebivim` stands for emacs bindings in vi insert mode
"ebivim": ebivim,
"supports_suspend": supports_suspend,
"is_windows_os": is_windows_os,
"auto_match": auto_match,
"focused_insert": (vi_insert_mode | emacs_insert_mode) & default_buffer_focused,
"not_inside_unclosed_string": not_inside_unclosed_string,
"readline_like_completions": readline_like_completions,
"preceded_by_paired_double_quotes": preceding_text(
lambda line: all_quotes_paired('"', line)
),
"preceded_by_paired_single_quotes": preceding_text(
lambda line: all_quotes_paired("'", line)
),
"preceded_by_raw_str_prefix": preceding_text(r".*(r|R)[\"'](-*)$"),
"preceded_by_two_double_quotes": preceding_text(r'^.*""$'),
"preceded_by_two_single_quotes": preceding_text(r"^.*''$"),
"followed_by_closing_paren_or_end": following_text(r"[,)}\]]|$"),
"preceded_by_opening_round_paren": preceding_text(r".*\($"),
"preceded_by_opening_bracket": preceding_text(r".*\[$"),
"preceded_by_opening_brace": preceding_text(r".*\{$"),
"preceded_by_double_quote": preceding_text('.*"$'),
"preceded_by_single_quote": preceding_text(r".*'$"),
"followed_by_closing_round_paren": following_text(r"^\)"),
"followed_by_closing_bracket": following_text(r"^\]"),
"followed_by_closing_brace": following_text(r"^\}"),
"followed_by_double_quote": following_text('^"'),
"followed_by_single_quote": following_text("^'"),
"navigable_suggestions": navigable_suggestions,
"cursor_in_leading_ws": cursor_in_leading_ws,
"pass_through": pass_through,
}
def eval_node(node: Union[ast.AST, None]):
if node is None:
return None
if isinstance(node, ast.Expression):
return eval_node(node.body)
if isinstance(node, ast.BinOp):
left = eval_node(node.left)
right = eval_node(node.right)
dunders = _find_dunder(node.op, BINARY_OP_DUNDERS)
if dunders:
return getattr(left, dunders[0])(right)
raise ValueError(f"Unknown binary operation: {node.op}")
if isinstance(node, ast.UnaryOp):
value = eval_node(node.operand)
dunders = _find_dunder(node.op, UNARY_OP_DUNDERS)
if dunders:
return getattr(value, dunders[0])()
raise ValueError(f"Unknown unary operation: {node.op}")
if isinstance(node, ast.Name):
if node.id in KEYBINDING_FILTERS:
return KEYBINDING_FILTERS[node.id]
else:
sep = "\n - "
known_filters = sep.join(sorted(KEYBINDING_FILTERS))
raise NameError(
f"{node.id} is not a known shortcut filter."
f" Known filters are: {sep}{known_filters}."
)
raise ValueError("Unhandled node", ast.dump(node))
def filter_from_string(code: str):
expression = ast.parse(code, mode="eval")
return eval_node(expression)
__all__ = ["KEYBINDING_FILTERS", "filter_from_string"]
| PassThrough |
python | encode__starlette | starlette/datastructures.py | {
"start": 13086,
"end": 15397
} | class ____:
"""
An uploaded file included as part of the request data.
"""
def __init__(
self,
file: BinaryIO,
*,
size: int | None = None,
filename: str | None = None,
headers: Headers | None = None,
) -> None:
self.filename = filename
self.file = file
self.size = size
self.headers = headers or Headers()
# Capture max size from SpooledTemporaryFile if one is provided. This slightly speeds up future checks.
# Note 0 means unlimited mirroring SpooledTemporaryFile's __init__
self._max_mem_size = getattr(self.file, "_max_size", 0)
@property
def content_type(self) -> str | None:
return self.headers.get("content-type", None)
@property
def _in_memory(self) -> bool:
# check for SpooledTemporaryFile._rolled
rolled_to_disk = getattr(self.file, "_rolled", True)
return not rolled_to_disk
def _will_roll(self, size_to_add: int) -> bool:
# If we're not in_memory then we will always roll
if not self._in_memory:
return True
# Check for SpooledTemporaryFile._max_size
future_size = self.file.tell() + size_to_add
return bool(future_size > self._max_mem_size) if self._max_mem_size else False
async def write(self, data: bytes) -> None:
new_data_len = len(data)
if self.size is not None:
self.size += new_data_len
if self._will_roll(new_data_len):
await run_in_threadpool(self.file.write, data)
else:
self.file.write(data)
async def read(self, size: int = -1) -> bytes:
if self._in_memory:
return self.file.read(size)
return await run_in_threadpool(self.file.read, size)
async def seek(self, offset: int) -> None:
if self._in_memory:
self.file.seek(offset)
else:
await run_in_threadpool(self.file.seek, offset)
async def close(self) -> None:
if self._in_memory:
self.file.close()
else:
await run_in_threadpool(self.file.close)
def __repr__(self) -> str:
return f"{self.__class__.__name__}(filename={self.filename!r}, size={self.size!r}, headers={self.headers!r})"
| UploadFile |
python | doocs__leetcode | solution/2200-2299/2225.Find Players With Zero or One Losses/Solution.py | {
"start": 0,
"end": 382
} | class ____:
def findWinners(self, matches: List[List[int]]) -> List[List[int]]:
cnt = Counter()
for winner, loser in matches:
if winner not in cnt:
cnt[winner] = 0
cnt[loser] += 1
ans = [[], []]
for x, v in sorted(cnt.items()):
if v < 2:
ans[v].append(x)
return ans
| Solution |
python | pypa__pip | src/pip/_vendor/pygments/filters/__init__.py | {
"start": 31761,
"end": 32670
} | class ____(Filter):
"""Convert keywords to lowercase or uppercase or capitalize them, which
means first letter uppercase, rest lowercase.
This can be useful e.g. if you highlight Pascal code and want to adapt the
code to your styleguide.
Options accepted:
`case` : string
The casing to convert keywords to. Must be one of ``'lower'``,
``'upper'`` or ``'capitalize'``. The default is ``'lower'``.
"""
def __init__(self, **options):
Filter.__init__(self, **options)
case = get_choice_opt(options, 'case',
['lower', 'upper', 'capitalize'], 'lower')
self.convert = getattr(str, case)
def filter(self, lexer, stream):
for ttype, value in stream:
if ttype in Keyword:
yield ttype, self.convert(value)
else:
yield ttype, value
| KeywordCaseFilter |
python | walkccc__LeetCode | solutions/3335. Total Characters in String After Transformations I/3335-2.py | {
"start": 0,
"end": 1449
} | class ____:
def lengthAfterTransformations(self, s: str, t: int) -> int:
MOD = 1_000_000_007
def matrixMult(A: list[list[int]], B: list[list[int]]) -> list[list[int]]:
"""Returns A * B."""
sz = len(A)
C = [[0] * sz for _ in range(sz)]
for i in range(sz):
for j in range(sz):
for k in range(sz):
C[i][j] += A[i][k] * B[k][j]
C[i][j] %= MOD
return C
def matrixPow(M: list[list[int]], n: int) -> list[list[int]]:
"""Returns M^n."""
if n == 0:
return [[1 if i == j else 0 # identity matrix
for j in range(len(M))]
for i in range(len(M))]
if n % 2 == 1:
return matrixMult(M, matrixPow(M, n - 1))
return matrixPow(matrixMult(M, M), n // 2)
# T[i][j] := the number of ways to transform ('a' + i) to ('a' + j)
T = self._getTransformationMatrix()
poweredT = matrixPow(T, t)
count = [0] * 26
lengths = [0] * 26
for c in s:
count[ord(c) - ord('a')] += 1
for i in range(26):
for j in range(26):
lengths[j] += count[i] * poweredT[i][j]
lengths[j] %= MOD
return sum(lengths) % MOD
def _getTransformationMatrix(self) -> list[list[int]]:
T = [[0] * 26 for _ in range(26)]
# 'z' -> 'ab'
T[25][0] = 1
T[25][1] = 1
# 'a' -> 'b', 'b' -> 'c', ..., 'y' -> 'z'
for i in range(25):
T[i][i + 1] = 1
return T
| Solution |
python | tensorflow__tensorflow | tensorflow/python/framework/type_spec_test.py | {
"start": 5336,
"end": 5510
} | class ____:
"""CompositeTensor containing a nest of tensors."""
def __init__(self, x):
self.nest = x
@type_spec_registry.register("tf.NestOfTensorsSpec")
| NestOfTensors |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/mysql/mysqldb.py | {
"start": 3442,
"end": 9886
} | class ____(MySQLDialect):
driver = "mysqldb"
supports_statement_cache = True
supports_unicode_statements = True
supports_sane_rowcount = True
supports_sane_multi_rowcount = True
supports_native_decimal = True
default_paramstyle = "format"
execution_ctx_cls = MySQLExecutionContext_mysqldb
statement_compiler = MySQLCompiler_mysqldb
preparer = MySQLIdentifierPreparer
server_version_info: tuple[int, ...]
def __init__(self, **kwargs: Any):
super().__init__(**kwargs)
self._mysql_dbapi_version = (
self._parse_dbapi_version(self.dbapi.__version__)
if self.dbapi is not None and hasattr(self.dbapi, "__version__")
else (0, 0, 0)
)
def _parse_dbapi_version(self, version: str) -> tuple[int, ...]:
m = re.match(r"(\d+)\.(\d+)(?:\.(\d+))?", version)
if m:
return tuple(int(x) for x in m.group(1, 2, 3) if x is not None)
else:
return (0, 0, 0)
@util.langhelpers.memoized_property
def supports_server_side_cursors(self) -> bool:
try:
cursors = __import__("MySQLdb.cursors").cursors
self._sscursor = cursors.SSCursor
return True
except (ImportError, AttributeError):
return False
@classmethod
def import_dbapi(cls) -> DBAPIModule:
return __import__("MySQLdb")
def on_connect(self) -> Callable[[DBAPIConnection], None]:
super_ = super().on_connect()
def on_connect(conn: DBAPIConnection) -> None:
if super_ is not None:
super_(conn)
charset_name = conn.character_set_name()
if charset_name is not None:
cursor = conn.cursor()
cursor.execute("SET NAMES %s" % charset_name)
cursor.close()
return on_connect
def do_ping(self, dbapi_connection: DBAPIConnection) -> Literal[True]:
dbapi_connection.ping()
return True
def do_executemany(
self,
cursor: DBAPICursor,
statement: str,
parameters: _DBAPIMultiExecuteParams,
context: Optional[ExecutionContext] = None,
) -> None:
rowcount = cursor.executemany(statement, parameters)
if context is not None:
cast(MySQLExecutionContext, context)._rowcount = rowcount
def create_connect_args(
self, url: URL, _translate_args: Optional[dict[str, Any]] = None
) -> ConnectArgsType:
if _translate_args is None:
_translate_args = dict(
database="db", username="user", password="passwd"
)
opts = url.translate_connect_args(**_translate_args)
opts.update(url.query)
util.coerce_kw_type(opts, "compress", bool)
util.coerce_kw_type(opts, "connect_timeout", int)
util.coerce_kw_type(opts, "read_timeout", int)
util.coerce_kw_type(opts, "write_timeout", int)
util.coerce_kw_type(opts, "client_flag", int)
util.coerce_kw_type(opts, "local_infile", bool)
# Note: using either of the below will cause all strings to be
# returned as Unicode, both in raw SQL operations and with column
# types like String and MSString.
util.coerce_kw_type(opts, "use_unicode", bool)
util.coerce_kw_type(opts, "charset", str)
# Rich values 'cursorclass' and 'conv' are not supported via
# query string.
ssl = {}
keys = [
("ssl_ca", str),
("ssl_key", str),
("ssl_cert", str),
("ssl_capath", str),
("ssl_cipher", str),
("ssl_check_hostname", bool),
]
for key, kw_type in keys:
if key in opts:
ssl[key[4:]] = opts[key]
util.coerce_kw_type(ssl, key[4:], kw_type)
del opts[key]
if ssl:
opts["ssl"] = ssl
# FOUND_ROWS must be set in CLIENT_FLAGS to enable
# supports_sane_rowcount.
client_flag = opts.get("client_flag", 0)
client_flag_found_rows = self._found_rows_client_flag()
if client_flag_found_rows is not None:
client_flag |= client_flag_found_rows
opts["client_flag"] = client_flag
return [], opts
def _found_rows_client_flag(self) -> Optional[int]:
if self.dbapi is not None:
try:
CLIENT_FLAGS = __import__(
self.dbapi.__name__ + ".constants.CLIENT"
).constants.CLIENT
except (AttributeError, ImportError):
return None
else:
return CLIENT_FLAGS.FOUND_ROWS # type: ignore
else:
return None
def _extract_error_code(self, exception: DBAPIModule.Error) -> int:
return exception.args[0] # type: ignore[no-any-return]
def _detect_charset(self, connection: Connection) -> str:
"""Sniff out the character set in use for connection results."""
try:
# note: the SQL here would be
# "SHOW VARIABLES LIKE 'character_set%%'"
cset_name: Callable[[], str] = (
connection.connection.character_set_name
)
except AttributeError:
util.warn(
"No 'character_set_name' can be detected with "
"this MySQL-Python version; "
"please upgrade to a recent version of MySQL-Python. "
"Assuming latin1."
)
return "latin1"
else:
return cset_name()
def get_isolation_level_values(
self, dbapi_conn: DBAPIConnection
) -> tuple[IsolationLevel, ...]:
return (
"SERIALIZABLE",
"READ UNCOMMITTED",
"READ COMMITTED",
"REPEATABLE READ",
"AUTOCOMMIT",
)
def detect_autocommit_setting(self, dbapi_conn: DBAPIConnection) -> bool:
return dbapi_conn.get_autocommit() # type: ignore[no-any-return]
def set_isolation_level(
self, dbapi_connection: DBAPIConnection, level: IsolationLevel
) -> None:
if level == "AUTOCOMMIT":
dbapi_connection.autocommit(True)
else:
dbapi_connection.autocommit(False)
super().set_isolation_level(dbapi_connection, level)
dialect = MySQLDialect_mysqldb
| MySQLDialect_mysqldb |
python | Pylons__pyramid | src/pyramid/httpexceptions.py | {
"start": 29462,
"end": 30051
} | class ____(HTTPClientError):
"""
subclass of :class:`~HTTPClientError`
The server SHOULD return a response with this status code if a
request included a Range request-header field, and none of the
range-specifier values in this field overlap the current extent
of the selected resource, and the request did not include an
If-Range request-header field.
code: 416, title: Request Range Not Satisfiable
"""
code = 416
title = 'Request Range Not Satisfiable'
explanation = 'The Range requested is not available.'
| HTTPRequestRangeNotSatisfiable |
python | cookiecutter__cookiecutter | cookiecutter/exceptions.py | {
"start": 2709,
"end": 2848
} | class ____(CookiecutterException):
"""
Exception for hook failures.
Raised when a hook script fails.
"""
| FailedHookException |
python | pandas-dev__pandas | pandas/core/arrays/boolean.py | {
"start": 7621,
"end": 13482
} | class ____(BaseMaskedArray):
"""
Array of boolean (True/False) data with missing values.
This is a pandas Extension array for boolean data, under the hood
represented by 2 numpy arrays: a boolean array with the data and
a boolean array with the mask (True indicating missing).
BooleanArray implements Kleene logic (sometimes called three-value
logic) for logical operations. See :ref:`boolean.kleene` for more.
To construct a BooleanArray from generic array-like input, use
:func:`pandas.array` specifying ``dtype="boolean"`` (see examples
below).
.. warning::
BooleanArray is considered experimental. The implementation and
parts of the API may change without warning.
Parameters
----------
values : numpy.ndarray
A 1-d boolean-dtype array with the data.
mask : numpy.ndarray
A 1-d boolean-dtype array indicating missing values (True
indicates missing).
copy : bool, default False
Whether to copy the `values` and `mask` arrays.
Attributes
----------
None
Methods
-------
None
Returns
-------
BooleanArray
See Also
--------
array : Create an array from data with the appropriate dtype.
BooleanDtype : Extension dtype for boolean data.
Series : One-dimensional ndarray with axis labels (including time series).
DataFrame : Two-dimensional, size-mutable, potentially heterogeneous tabular data.
Examples
--------
Create a BooleanArray with :func:`pandas.array`:
>>> pd.array([True, False, None], dtype="boolean")
<BooleanArray>
[True, False, <NA>]
Length: 3, dtype: boolean
"""
_TRUE_VALUES = {"True", "TRUE", "true", "1", "1.0"}
_FALSE_VALUES = {"False", "FALSE", "false", "0", "0.0"}
@classmethod
def _simple_new(cls, values: np.ndarray, mask: npt.NDArray[np.bool_]) -> Self:
result = super()._simple_new(values, mask)
result._dtype = BooleanDtype()
return result
def __init__(
self, values: np.ndarray, mask: np.ndarray, copy: bool = False
) -> None:
if not (isinstance(values, np.ndarray) and values.dtype == np.bool_):
raise TypeError(
"values should be boolean numpy array. Use "
"the 'pd.array' function instead"
)
self._dtype = BooleanDtype()
super().__init__(values, mask, copy=copy)
@property
def dtype(self) -> BooleanDtype:
return self._dtype
@classmethod
def _from_sequence_of_strings(
cls,
strings: list[str],
*,
dtype: ExtensionDtype,
copy: bool = False,
true_values: list[str] | None = None,
false_values: list[str] | None = None,
none_values: list[str] | None = None,
) -> BooleanArray:
true_values_union = cls._TRUE_VALUES.union(true_values or [])
false_values_union = cls._FALSE_VALUES.union(false_values or [])
if none_values is None:
none_values = []
def map_string(s) -> bool | None:
if s in true_values_union:
return True
elif s in false_values_union:
return False
elif s in none_values:
return None
else:
raise ValueError(f"{s} cannot be cast to bool")
scalars = np.array(strings, dtype=object)
mask = isna(scalars)
scalars[~mask] = list(map(map_string, scalars[~mask]))
return cls._from_sequence(scalars, dtype=dtype, copy=copy)
_HANDLED_TYPES = (np.ndarray, numbers.Number, bool, np.bool_)
@classmethod
def _coerce_to_array(
cls, value, *, dtype: DtypeObj, copy: bool = False
) -> tuple[np.ndarray, np.ndarray]:
if dtype:
assert dtype == "boolean"
return coerce_to_array(value, copy=copy)
def _logical_method(self, other, op):
assert op.__name__ in {"or_", "ror_", "and_", "rand_", "xor", "rxor"}
other_is_scalar = lib.is_scalar(other)
mask = None
if isinstance(other, BooleanArray):
other, mask = other._data, other._mask
elif is_list_like(other):
other = np.asarray(other, dtype="bool")
if other.ndim > 1:
return NotImplemented
other, mask = coerce_to_array(other, copy=False)
elif isinstance(other, np.bool_):
other = other.item()
if other_is_scalar and other is not libmissing.NA and not lib.is_bool(other):
raise TypeError(
"'other' should be pandas.NA or a bool. "
f"Got {type(other).__name__} instead."
)
if not other_is_scalar and len(self) != len(other):
raise ValueError("Lengths must match")
if op.__name__ in {"or_", "ror_"}:
result, mask = ops.kleene_or(self._data, other, self._mask, mask)
elif op.__name__ in {"and_", "rand_"}:
result, mask = ops.kleene_and(self._data, other, self._mask, mask)
else:
# i.e. xor, rxor
result, mask = ops.kleene_xor(self._data, other, self._mask, mask)
# i.e. BooleanArray
return self._maybe_mask_result(result, mask)
def _accumulate(
self, name: str, *, skipna: bool = True, **kwargs
) -> BaseMaskedArray:
data = self._data
mask = self._mask
if name in ("cummin", "cummax"):
op = getattr(masked_accumulations, name)
data, mask = op(data, mask, skipna=skipna, **kwargs)
return self._simple_new(data, mask)
else:
from pandas.core.arrays import IntegerArray
return IntegerArray(data.astype(int), mask)._accumulate(
name, skipna=skipna, **kwargs
)
| BooleanArray |
python | streamlit__streamlit | lib/tests/streamlit/elements/file_uploader_test.py | {
"start": 17324,
"end": 20091
} | class ____(DeltaGeneratorTestCase):
def test_stable_id_with_key(self):
"""Test that the widget ID is stable when a stable key is provided, unless whitelisted kwargs change."""
with patch(
"streamlit.elements.lib.utils._register_element_id",
return_value=MagicMock(),
):
st.file_uploader(
label="Label 1",
key="file_uploader_key",
help="help 1",
width="stretch",
on_change=lambda: None,
args=("arg1", "arg2"),
kwargs={"kwarg1": "kwarg1"},
label_visibility="visible",
disabled=False,
# Whitelisted kwargs
type=["txt", "csv"],
accept_multiple_files=False,
)
c1 = self.get_delta_from_queue().new_element.file_uploader
id1 = c1.id
st.file_uploader(
label="Label 2",
key="file_uploader_key",
help="help 2",
width=300,
on_change=lambda: None,
args=("arg_1", "arg_2"),
kwargs={"kwarg_1": "kwarg_1"},
label_visibility="hidden",
disabled=True,
# Whitelisted kwargs (same as before)
type=["txt", "csv"],
accept_multiple_files=False,
)
c2 = self.get_delta_from_queue().new_element.file_uploader
id2 = c2.id
assert id1 == id2
@parameterized.expand(
[
("type", ["txt"], ["pdf", "doc"]),
("type", None, ["csv"]),
("accept_multiple_files", False, True),
("accept_multiple_files", False, "directory"),
]
)
def test_whitelisted_stable_key_kwargs(
self, kwarg_name: str, value1: object, value2: object
):
"""Changing whitelisted kwargs should change the ID even when a key is provided."""
with patch(
"streamlit.elements.lib.utils._register_element_id",
return_value=MagicMock(),
):
base_kwargs = {
"label": "Label",
"key": "file_uploader_key2",
"type": ["txt"],
"accept_multiple_files": False,
}
base_kwargs[kwarg_name] = value1
st.file_uploader(**base_kwargs)
c1 = self.get_delta_from_queue().new_element.file_uploader
id1 = c1.id
base_kwargs[kwarg_name] = value2
st.file_uploader(**base_kwargs)
c2 = self.get_delta_from_queue().new_element.file_uploader
id2 = c2.id
assert id1 != id2
| FileUploaderStableIdTest |
python | pypa__warehouse | tests/unit/accounts/test_security_policy.py | {
"start": 5603,
"end": 21706
} | class ____:
def test_verify(self):
assert verifyClass(
ISecurityPolicy,
security_policy.SessionSecurityPolicy,
)
def test_noops(self):
policy = security_policy.SessionSecurityPolicy()
with pytest.raises(NotImplementedError):
policy.authenticated_userid(pretend.stub())
def test_forget_and_remember(self, monkeypatch):
request = pretend.stub()
userid = pretend.stub()
forgets = pretend.stub()
remembers = pretend.stub()
session_helper_obj = pretend.stub(
forget=pretend.call_recorder(lambda r, **kw: forgets),
remember=pretend.call_recorder(lambda r, uid, **kw: remembers),
)
session_helper_cls = pretend.call_recorder(lambda: session_helper_obj)
monkeypatch.setattr(
security_policy, "SessionAuthenticationHelper", session_helper_cls
)
policy = security_policy.SessionSecurityPolicy()
assert session_helper_cls.calls == [pretend.call()]
assert policy.forget(request, foo=None) == forgets
assert session_helper_obj.forget.calls == [pretend.call(request, foo=None)]
assert policy.remember(request, userid, foo=None) == remembers
assert session_helper_obj.remember.calls == [
pretend.call(request, userid, foo=None)
]
def test_identity_missing_route(self, monkeypatch):
session_helper_obj = pretend.stub()
session_helper_cls = pretend.call_recorder(lambda: session_helper_obj)
monkeypatch.setattr(
security_policy, "SessionAuthenticationHelper", session_helper_cls
)
policy = security_policy.SessionSecurityPolicy()
vary_cb = pretend.stub()
add_vary_cb = pretend.call_recorder(lambda *v: vary_cb)
monkeypatch.setattr(security_policy, "add_vary_callback", add_vary_cb)
request = pretend.stub(
add_response_callback=pretend.call_recorder(lambda cb: None),
matched_route=None,
banned=pretend.stub(by_ip=lambda ip_address: False),
remote_addr=REMOTE_ADDR,
)
assert policy.identity(request) is None
assert request.authentication_method == AuthenticationMethod.SESSION
assert session_helper_cls.calls == [pretend.call()]
assert add_vary_cb.calls == [pretend.call("Cookie")]
assert request.add_response_callback.calls == [pretend.call(vary_cb)]
@pytest.mark.parametrize(
"route_name",
[
"forklift.legacy.file_upload",
"api.echo",
],
)
def test_identity_invalid_route(self, route_name, monkeypatch):
session_helper_obj = pretend.stub()
session_helper_cls = pretend.call_recorder(lambda: session_helper_obj)
monkeypatch.setattr(
security_policy, "SessionAuthenticationHelper", session_helper_cls
)
policy = security_policy.SessionSecurityPolicy()
vary_cb = pretend.stub()
add_vary_cb = pretend.call_recorder(lambda *v: vary_cb)
monkeypatch.setattr(security_policy, "add_vary_callback", add_vary_cb)
request = pretend.stub(
add_response_callback=pretend.call_recorder(lambda cb: None),
matched_route=pretend.stub(name=route_name),
banned=pretend.stub(by_ip=lambda ip_address: False),
remote_addr=REMOTE_ADDR,
)
assert policy.identity(request) is None
assert request.authentication_method == AuthenticationMethod.SESSION
assert session_helper_cls.calls == [pretend.call()]
assert add_vary_cb.calls == [pretend.call("Cookie")]
assert request.add_response_callback.calls == [pretend.call(vary_cb)]
def test_identity_no_userid(self, monkeypatch):
session_helper_obj = pretend.stub(
authenticated_userid=pretend.call_recorder(lambda r: None)
)
session_helper_cls = pretend.call_recorder(lambda: session_helper_obj)
monkeypatch.setattr(
security_policy, "SessionAuthenticationHelper", session_helper_cls
)
policy = security_policy.SessionSecurityPolicy()
vary_cb = pretend.stub()
add_vary_cb = pretend.call_recorder(lambda *v: vary_cb)
monkeypatch.setattr(security_policy, "add_vary_callback", add_vary_cb)
request = pretend.stub(
add_response_callback=pretend.call_recorder(lambda cb: None),
matched_route=pretend.stub(name="a.permitted.route"),
banned=pretend.stub(by_ip=lambda ip_address: False),
remote_addr=REMOTE_ADDR,
)
assert policy.identity(request) is None
assert request.authentication_method == AuthenticationMethod.SESSION
assert session_helper_obj.authenticated_userid.calls == [pretend.call(request)]
assert session_helper_cls.calls == [pretend.call()]
assert add_vary_cb.calls == [pretend.call("Cookie")]
assert request.add_response_callback.calls == [pretend.call(vary_cb)]
def test_identity_no_user(self, monkeypatch):
userid = pretend.stub()
session_helper_obj = pretend.stub(
authenticated_userid=pretend.call_recorder(lambda r: userid)
)
session_helper_cls = pretend.call_recorder(lambda: session_helper_obj)
monkeypatch.setattr(
security_policy, "SessionAuthenticationHelper", session_helper_cls
)
policy = security_policy.SessionSecurityPolicy()
vary_cb = pretend.stub()
add_vary_cb = pretend.call_recorder(lambda *v: vary_cb)
monkeypatch.setattr(security_policy, "add_vary_callback", add_vary_cb)
user_service = pretend.stub(get_user=pretend.call_recorder(lambda uid: None))
request = pretend.stub(
add_response_callback=pretend.call_recorder(lambda cb: None),
matched_route=pretend.stub(name="a.permitted.route"),
find_service=pretend.call_recorder(lambda i, **kw: user_service),
banned=pretend.stub(by_ip=lambda ip_address: False),
remote_addr=REMOTE_ADDR,
)
assert policy.identity(request) is None
assert request.authentication_method == AuthenticationMethod.SESSION
assert session_helper_obj.authenticated_userid.calls == [pretend.call(request)]
assert session_helper_cls.calls == [pretend.call()]
assert request.find_service.calls == [pretend.call(IUserService, context=None)]
assert user_service.get_user.calls == [pretend.call(userid)]
assert add_vary_cb.calls == [pretend.call("Cookie")]
assert request.add_response_callback.calls == [pretend.call(vary_cb)]
def test_identity_password_outdated(self, monkeypatch):
userid = pretend.stub()
session_helper_obj = pretend.stub(
authenticated_userid=pretend.call_recorder(lambda r: userid)
)
session_helper_cls = pretend.call_recorder(lambda: session_helper_obj)
monkeypatch.setattr(
security_policy, "SessionAuthenticationHelper", session_helper_cls
)
policy = security_policy.SessionSecurityPolicy()
vary_cb = pretend.stub()
add_vary_cb = pretend.call_recorder(lambda *v: vary_cb)
monkeypatch.setattr(security_policy, "add_vary_callback", add_vary_cb)
user = pretend.stub()
timestamp = pretend.stub()
user_service = pretend.stub(
get_user=pretend.call_recorder(lambda uid: user),
get_password_timestamp=pretend.call_recorder(lambda uid: timestamp),
is_disabled=lambda uid: (False, None),
)
request = pretend.stub(
add_response_callback=pretend.call_recorder(lambda cb: None),
matched_route=pretend.stub(name="a.permitted.route"),
find_service=pretend.call_recorder(lambda i, **kw: user_service),
session=pretend.stub(
password_outdated=pretend.call_recorder(lambda ts: True),
invalidate=pretend.call_recorder(lambda: None),
flash=pretend.call_recorder(lambda *a, **kw: None),
),
banned=pretend.stub(by_ip=lambda ip_address: False),
remote_addr=REMOTE_ADDR,
)
assert policy.identity(request) is None
assert request.authentication_method == AuthenticationMethod.SESSION
assert session_helper_obj.authenticated_userid.calls == [pretend.call(request)]
assert session_helper_cls.calls == [pretend.call()]
assert request.find_service.calls == [pretend.call(IUserService, context=None)]
assert user_service.get_user.calls == [pretend.call(userid)]
assert request.session.password_outdated.calls == [pretend.call(timestamp)]
assert user_service.get_password_timestamp.calls == [pretend.call(userid)]
assert request.session.invalidate.calls == [pretend.call()]
assert request.session.flash.calls == [
pretend.call("Session invalidated by password change", queue="error")
]
assert add_vary_cb.calls == [pretend.call("Cookie")]
assert request.add_response_callback.calls == [pretend.call(vary_cb)]
def test_identity_is_disabled(self, monkeypatch):
userid = pretend.stub()
session_helper_obj = pretend.stub(
authenticated_userid=pretend.call_recorder(lambda r: userid)
)
session_helper_cls = pretend.call_recorder(lambda: session_helper_obj)
monkeypatch.setattr(
security_policy, "SessionAuthenticationHelper", session_helper_cls
)
policy = security_policy.SessionSecurityPolicy()
vary_cb = pretend.stub()
add_vary_cb = pretend.call_recorder(lambda *v: vary_cb)
monkeypatch.setattr(security_policy, "add_vary_callback", add_vary_cb)
user = pretend.stub()
timestamp = pretend.stub()
user_service = pretend.stub(
get_user=pretend.call_recorder(lambda uid: user),
get_password_timestamp=pretend.call_recorder(lambda uid: timestamp),
is_disabled=pretend.call_recorder(lambda uid: (True, "Said So!")),
)
request = pretend.stub(
add_response_callback=pretend.call_recorder(lambda cb: None),
matched_route=pretend.stub(name="a.permitted.route"),
find_service=pretend.call_recorder(lambda i, **kw: user_service),
session=pretend.stub(
password_outdated=pretend.call_recorder(lambda ts: True),
invalidate=pretend.call_recorder(lambda: None),
flash=pretend.call_recorder(lambda *a, **kw: None),
),
banned=pretend.stub(by_ip=lambda ip_address: False),
remote_addr=REMOTE_ADDR,
)
assert policy.identity(request) is None
assert request.authentication_method == AuthenticationMethod.SESSION
assert session_helper_obj.authenticated_userid.calls == [pretend.call(request)]
assert session_helper_cls.calls == [pretend.call()]
assert request.find_service.calls == [pretend.call(IUserService, context=None)]
assert user_service.get_user.calls == [pretend.call(userid)]
assert request.session.password_outdated.calls == []
assert user_service.get_password_timestamp.calls == []
assert user_service.is_disabled.calls == [pretend.call(userid)]
assert request.session.invalidate.calls == [pretend.call()]
assert request.session.flash.calls == [
pretend.call("Session invalidated", queue="error")
]
assert add_vary_cb.calls == [pretend.call("Cookie")]
assert request.add_response_callback.calls == [pretend.call(vary_cb)]
def test_identity(self, monkeypatch):
userid = pretend.stub()
session_helper_obj = pretend.stub(
authenticated_userid=pretend.call_recorder(lambda r: userid)
)
session_helper_cls = pretend.call_recorder(lambda: session_helper_obj)
monkeypatch.setattr(
security_policy, "SessionAuthenticationHelper", session_helper_cls
)
policy = security_policy.SessionSecurityPolicy()
vary_cb = pretend.stub()
add_vary_cb = pretend.call_recorder(lambda *v: vary_cb)
monkeypatch.setattr(security_policy, "add_vary_callback", add_vary_cb)
user = pretend.stub()
timestamp = pretend.stub()
user_service = pretend.stub(
get_user=pretend.call_recorder(lambda uid: user),
get_password_timestamp=pretend.call_recorder(lambda uid: timestamp),
is_disabled=lambda uid: (False, None),
)
request = pretend.stub(
add_response_callback=pretend.call_recorder(lambda cb: None),
matched_route=pretend.stub(name="a.permitted.route"),
find_service=pretend.call_recorder(lambda i, **kw: user_service),
session=pretend.stub(
password_outdated=pretend.call_recorder(lambda ts: False)
),
banned=pretend.stub(by_ip=lambda ip_address: False),
remote_addr=REMOTE_ADDR,
)
assert policy.identity(request).user is user
assert request.authentication_method == AuthenticationMethod.SESSION
assert session_helper_obj.authenticated_userid.calls == [pretend.call(request)]
assert session_helper_cls.calls == [pretend.call()]
assert request.find_service.calls == [pretend.call(IUserService, context=None)]
assert request.session.password_outdated.calls == [pretend.call(timestamp)]
assert user_service.get_password_timestamp.calls == [pretend.call(userid)]
assert user_service.get_user.calls == [pretend.call(userid)]
assert add_vary_cb.calls == [pretend.call("Cookie")]
assert request.add_response_callback.calls == [pretend.call(vary_cb)]
def test_identity_ip_banned(self, monkeypatch):
userid = pretend.stub()
session_helper_obj = pretend.stub(
authenticated_userid=pretend.call_recorder(lambda r: userid)
)
session_helper_cls = pretend.call_recorder(lambda: session_helper_obj)
monkeypatch.setattr(
security_policy, "SessionAuthenticationHelper", session_helper_cls
)
policy = security_policy.SessionSecurityPolicy()
vary_cb = pretend.stub()
add_vary_cb = pretend.call_recorder(lambda *v: vary_cb)
monkeypatch.setattr(security_policy, "add_vary_callback", add_vary_cb)
user = pretend.stub()
timestamp = pretend.stub()
user_service = pretend.stub(
get_user=pretend.call_recorder(lambda uid: user),
get_password_timestamp=pretend.call_recorder(lambda uid: timestamp),
)
request = pretend.stub(
add_response_callback=pretend.call_recorder(lambda cb: None),
matched_route=pretend.stub(name="a.permitted.route"),
find_service=pretend.call_recorder(lambda i, **kw: user_service),
session=pretend.stub(
password_outdated=pretend.call_recorder(lambda ts: False)
),
banned=pretend.stub(by_ip=lambda ip_address: True),
remote_addr=REMOTE_ADDR,
)
assert policy.identity(request) is None
assert request.authentication_method == AuthenticationMethod.SESSION
assert session_helper_obj.authenticated_userid.calls == []
assert session_helper_cls.calls == [pretend.call()]
assert request.find_service.calls == []
assert request.session.password_outdated.calls == []
assert user_service.get_password_timestamp.calls == []
assert user_service.get_user.calls == []
assert add_vary_cb.calls == [pretend.call("Cookie")]
assert request.add_response_callback.calls == [pretend.call(vary_cb)]
@pytest.mark.parametrize(
"policy_class",
[security_policy.SessionSecurityPolicy],
)
| TestSessionSecurityPolicy |
python | pallets__werkzeug | src/werkzeug/serving.py | {
"start": 2357,
"end": 4787
} | class ____(io.RawIOBase):
"""An input stream that handles Transfer-Encoding 'chunked'"""
def __init__(self, rfile: t.IO[bytes]) -> None:
self._rfile = rfile
self._done = False
self._len = 0
def readable(self) -> bool:
return True
def read_chunk_len(self) -> int:
try:
line = self._rfile.readline().decode("latin1")
_len = int(line.strip(), 16)
except ValueError as e:
raise OSError("Invalid chunk header") from e
if _len < 0:
raise OSError("Negative chunk length not allowed")
return _len
def readinto(self, buf: bytearray) -> int: # type: ignore
read = 0
while not self._done and read < len(buf):
if self._len == 0:
# This is the first chunk or we fully consumed the previous
# one. Read the next length of the next chunk
self._len = self.read_chunk_len()
if self._len == 0:
# Found the final chunk of size 0. The stream is now exhausted,
# but there is still a final newline that should be consumed
self._done = True
if self._len > 0:
# There is data (left) in this chunk, so append it to the
# buffer. If this operation fully consumes the chunk, this will
# reset self._len to 0.
n = min(len(buf), self._len)
# If (read + chunk size) becomes more than len(buf), buf will
# grow beyond the original size and read more data than
# required. So only read as much data as can fit in buf.
if read + n > len(buf):
buf[read:] = self._rfile.read(len(buf) - read)
self._len -= len(buf) - read
read = len(buf)
else:
buf[read : read + n] = self._rfile.read(n)
self._len -= n
read += n
if self._len == 0:
# Skip the terminating newline of a chunk that has been fully
# consumed. This also applies to the 0-sized final chunk
terminator = self._rfile.readline()
if terminator not in (b"\n", b"\r\n", b"\r"):
raise OSError("Missing chunk terminating newline")
return read
| DechunkedInput |
python | pypa__pipenv | pipenv/patched/pip/_internal/network/auth.py | {
"start": 1758,
"end": 2936
} | class ____(KeyRingBaseProvider):
"""Keyring interface which uses locally imported `keyring`"""
has_keyring = True
def __init__(self) -> None:
import keyring
self.keyring = keyring
def get_auth_info(self, url: str, username: Optional[str]) -> Optional[AuthInfo]:
# Support keyring's get_credential interface which supports getting
# credentials without a username. This is only available for
# keyring>=15.2.0.
if hasattr(self.keyring, "get_credential"):
logger.debug("Getting credentials from keyring for %s", url)
cred = self.keyring.get_credential(url, username)
if cred is not None:
return cred.username, cred.password
return None
if username is not None:
logger.debug("Getting password from keyring for %s", url)
password = self.keyring.get_password(url, username)
if password:
return username, password
return None
def save_auth_info(self, url: str, username: str, password: str) -> None:
self.keyring.set_password(url, username, password)
| KeyRingPythonProvider |
python | python__mypy | mypyc/test/test_external.py | {
"start": 223,
"end": 1832
} | class ____(unittest.TestCase):
# TODO: Get this to work on Windows.
# (Or don't. It is probably not a good use of time.)
@unittest.skipIf(sys.platform.startswith("win"), "rt tests don't work on windows")
def test_c_unit_test(self) -> None:
"""Run C unit tests in a subprocess."""
cppflags: list[str] = []
env = os.environ.copy()
if sys.platform == "darwin":
cppflags += ["-O0", "-mmacosx-version-min=10.10", "-stdlib=libc++"]
elif sys.platform == "linux":
cppflags += ["-O0"]
env["CPPFLAGS"] = " ".join(cppflags)
# Build Python wrapper for C unit tests.
with tempfile.TemporaryDirectory() as tmpdir:
status = subprocess.check_call(
[
sys.executable,
"setup.py",
"build_ext",
f"--build-lib={tmpdir}",
f"--build-temp={tmpdir}",
"--run-capi-tests",
],
env=env,
cwd=os.path.join(base_dir, "mypyc", "lib-rt"),
)
# Run C unit tests.
env = os.environ.copy()
if "GTEST_COLOR" not in os.environ:
env["GTEST_COLOR"] = "yes" # Use fancy colors
status = subprocess.call(
[sys.executable, "-c", "import sys, test_capi; sys.exit(test_capi.run_tests())"],
env=env,
cwd=tmpdir,
)
if status != 0:
raise AssertionError("make test: C unit test failure")
| TestExternal |
python | readthedocs__readthedocs.org | readthedocs/rtd_tests/tests/test_project_views.py | {
"start": 13394,
"end": 20357
} | class ____(TestCase):
def setUp(self):
self.user = new(User, username="eric")
self.user.set_password("test")
self.user.save()
self.client.login(username="eric", password="test")
self.project = get(Project, slug="pip", users=[self.user])
def test_dashboard_number_of_queries(self):
# NOTE: create more than 15 projects, as we paginate by 15.
for i in range(30):
project = get(
Project,
slug=f"project-{i}",
users=[self.user],
)
version = project.versions.first()
version.active = True
version.built = True
version.save()
for _ in range(3):
get(
Build,
project=project,
version=version,
success=True,
state=BUILD_STATE_FINISHED,
)
# This number is bit higher, but for projects with lots of builds
# is better to have more queries than optimizing with a prefetch,
# see comment in prefetch_latest_build.
with self.assertNumQueries(26):
r = self.client.get(reverse(("projects_dashboard")))
assert r.status_code == 200
def test_versions_page(self):
self.project.versions.create(verbose_name="1.0")
response = self.client.get("/projects/pip/versions/")
self.assertEqual(response.status_code, 200)
# Test if the versions page works with a version that contains a slash.
# That broke in the past, see issue #1176.
self.project.versions.create(verbose_name="1.0/with-slash")
response = self.client.get("/projects/pip/versions/")
self.assertEqual(response.status_code, 200)
def test_delete_project(self):
response = self.client.get("/dashboard/pip/delete/")
self.assertEqual(response.status_code, 200)
# Mocked like this because the function is imported inside a class method
# https://stackoverflow.com/a/22201798
with mock.patch(
"readthedocs.projects.tasks.utils.clean_project_resources"
) as clean_project_resources:
response = self.client.post("/dashboard/pip/delete/")
self.assertEqual(response.status_code, 302)
self.assertFalse(Project.objects.filter(slug="pip").exists())
clean_project_resources.assert_called_once()
self.assertEqual(
clean_project_resources.call_args[0][0].slug, self.project.slug
)
def test_delete_superproject(self):
sub_proj = get(Project, slug="test-sub-project", users=[self.user])
self.assertFalse(self.project.subprojects.all().exists())
self.project.add_subproject(sub_proj)
response = self.client.get("/dashboard/pip/delete/")
self.assertEqual(response.status_code, 200)
self.assertContains(
response,
'This project <a href="/dashboard/pip/subprojects/">has subprojects</a> under it. '
"Deleting this project will make them to become regular projects. "
"This will break the URLs of all its subprojects and they will be served normally as other projects.",
count=1,
html=True,
)
@mock.patch("readthedocs.projects.views.private.attach_webhook")
def test_integration_create(self, attach_webhook):
response = self.client.post(
reverse("projects_integrations_create", args=[self.project.slug]),
data={
"project": self.project.pk,
"integration_type": GitHubWebhook.GITHUB_WEBHOOK,
},
)
integration = GitHubWebhook.objects.filter(project=self.project)
self.assertTrue(integration.exists())
self.assertEqual(response.status_code, 302)
attach_webhook.assert_called_once_with(
project_pk=self.project.pk,
integration=integration.first(),
user_pk=None,
)
@mock.patch("readthedocs.projects.views.private.attach_webhook")
def test_integration_create_generic_webhook(self, attach_webhook):
response = self.client.post(
reverse("projects_integrations_create", args=[self.project.slug]),
data={
"project": self.project.pk,
"integration_type": GenericAPIWebhook.API_WEBHOOK,
},
)
integration = GenericAPIWebhook.objects.filter(project=self.project)
self.assertTrue(integration.exists())
self.assertEqual(response.status_code, 302)
attach_webhook.assert_not_called()
def test_integration_webhooks_sync_no_remote_repository(self):
self.project.has_valid_webhook = True
self.project.save()
integration = get(
GitHubWebhook,
project=self.project,
)
response = self.client.post(
reverse(
"projects_integrations_webhooks_sync",
kwargs={
"project_slug": self.project.slug,
"integration_pk": integration.pk,
},
),
)
self.project.refresh_from_db()
self.assertEqual(response.status_code, 302)
self.assertFalse(self.project.has_valid_webhook)
def test_remove_user(self):
user = get(User, username="test")
self.project.users.add(user)
self.assertEqual(self.project.users.count(), 2)
r = self.client.post(
reverse("projects_users_delete", args=(self.project.slug,)),
data={"username": "test"},
)
self.assertTrue(r.status_code, 302)
self.assertEqual(self.project.users.count(), 1)
self.assertEqual(self.project.users.last().username, "eric")
def test_remove_own_user(self):
user = get(User, username="test")
self.project.users.add(user)
self.assertEqual(self.project.users.count(), 2)
r = self.client.post(
reverse("projects_users_delete", args=(self.project.slug,)),
data={"username": "eric"},
)
self.assertTrue(r.status_code, 302)
self.assertEqual(self.project.users.count(), 1)
self.assertEqual(self.project.users.last().username, "test")
def test_remove_last_user(self):
self.assertEqual(self.project.users.count(), 1)
r = self.client.post(
reverse("projects_users_delete", args=(self.project.slug,)),
data={"username": "eric"},
)
self.assertTrue(r.status_code, 400)
self.assertEqual(self.project.users.count(), 1)
self.assertEqual(self.project.users.last().username, "eric")
@mock.patch("readthedocs.core.utils.trigger_build", mock.MagicMock())
@mock.patch("readthedocs.projects.tasks.builds.update_docs_task", mock.MagicMock())
| TestPrivateViews |
python | dagster-io__dagster | python_modules/automation/automation_tests/dagster_docs_tests/test_python_ast_rule.py | {
"start": 15565,
"end": 18941
} | class ____:
"""Test integration with the full validation pipeline."""
def test_integration_valid_docstring(self):
"""Test integration with valid Python code blocks."""
docstring = """
Function with valid Python examples.
Args:
name: The name parameter
Returns:
A greeting string
Examples:
Basic usage:
.. code-block:: python
result = greet("Alice")
print(result)
"""
result = validate_docstring_text(docstring, "test.symbol")
assert result.is_valid()
assert not result.has_errors()
def test_integration_invalid_python_code(self):
"""Test integration with invalid Python code blocks."""
docstring = """
Function with broken Python examples.
Args:
name: The name parameter
Examples:
This example has a syntax error:
.. code-block:: python
def broken_example(
return greet("Alice")
"""
result = validate_docstring_text(docstring, "test.symbol")
assert not result.is_valid()
assert result.has_errors()
# Should have Python AST error
python_errors = [e for e in result.errors if "Python code block syntax error" in e]
assert len(python_errors) == 1
def test_integration_multiple_validation_rules(self):
"""Test that AST validation works alongside other validation rules."""
docstring = """
Function with multiple issues.
args: # Wrong case - should be "Args:"
name: The name parameter
Examples:
.. code-block:: python
def broken(
return "syntax error"
"""
result = validate_docstring_text(docstring, "test.symbol")
assert not result.is_valid()
# Should have both section header issues and Python syntax issues
all_messages = result.errors + result.warnings
# Check for both types of issues
has_section_issue = any(
"malformed section header" in msg.lower()
or "possible malformed section header" in msg.lower()
for msg in all_messages
)
has_python_issue = any(
"python code block syntax error" in msg.lower() for msg in all_messages
)
assert has_section_issue, f"Expected section header issue in: {all_messages}"
assert has_python_issue, f"Expected Python syntax issue in: {all_messages}"
def test_integration_python_and_rst_errors(self):
"""Test Python AST validation alongside RST syntax validation."""
docstring = """
Function with RST and Python issues.
.. code-block:: python
def broken_python(
return "syntax error"
"""
result = validate_docstring_text(docstring, "test.symbol")
assert not result.is_valid()
assert result.has_errors()
# Should have Python syntax error
error_text = " ".join(result.errors).lower()
assert "python code block syntax error" in error_text
| TestIntegrationWithValidationPipeline |
python | getsentry__sentry | tests/sentry/seer/autofix/test_autofix.py | {
"start": 26085,
"end": 30920
} | class ____(TestCase, SnubaTestCase):
def setUp(self) -> None:
super().setUp()
# Create events with real tag data
# Event 1: production environment with user_role admin
self.store_event(
data={
"fingerprint": ["group-1"],
"environment": "production",
"tags": {"user_role": "admin", "service": "api"},
"timestamp": before_now(minutes=1).isoformat(),
},
project_id=self.project.id,
)
# Event 2: production environment with user_role admin (duplicate to test counts)
event2 = self.store_event(
data={
"fingerprint": ["group-1"],
"environment": "production",
"tags": {"user_role": "admin", "service": "web"},
"timestamp": before_now(minutes=2).isoformat(),
},
project_id=self.project.id,
)
# Event 3: staging environment with user_role user
self.store_event(
data={
"fingerprint": ["group-1"],
"environment": "staging",
"tags": {"user_role": "user", "service": "api"},
"timestamp": before_now(minutes=3).isoformat(),
},
project_id=self.project.id,
)
# Event 4: development environment with user_role user
self.store_event(
data={
"fingerprint": ["group-1"],
"environment": "development",
"tags": {"user_role": "user", "service": "worker"},
"timestamp": before_now(minutes=4).isoformat(),
},
project_id=self.project.id,
)
self.group = event2.group
def test_get_all_tags_overview_basic(self) -> None:
"""Test basic functionality of getting all tags overview with real data."""
result = get_all_tags_overview(self.group)
assert result is not None
assert "tags_overview" in result
# Should have environment, user_role, and service tags, but not level since it's excluded
assert len(result["tags_overview"]) >= 3
# Find specific tags
tag_keys = {tag["key"]: tag for tag in result["tags_overview"]}
# Check environment tag (built-in Sentry tag)
assert "environment" in tag_keys
env_tag = tag_keys["environment"]
assert env_tag["name"] == "Environment"
assert env_tag["total_values"] == 4 # 4 events
# Should have production (2), staging (1), development (1)
env_values = {val["value"]: val for val in env_tag["top_values"]}
assert "production" in env_values
assert env_values["production"]["count"] == 2
assert env_values["production"]["percentage"] == "50%"
# Check custom tag
assert "user_role" in tag_keys
user_tag = tag_keys["user_role"]
assert user_tag["name"] == "User Role" # Should get proper label
assert user_tag["total_values"] == 4
user_values = {val["value"]: val for val in user_tag["top_values"]}
assert "admin" in user_values
assert "user" in user_values
assert user_values["admin"]["count"] == 2
assert user_values["user"]["count"] == 2
def test_get_all_tags_overview_percentage_calculation(self) -> None:
"""Test that percentage calculations work correctly."""
result = get_all_tags_overview(self.group)
assert result is not None
# Find environment tag (we know this exists from setUp)
env_tag = next(
(tag for tag in result["tags_overview"] if tag["key"] == "environment"), None
)
assert env_tag is not None
assert env_tag["total_values"] == 4 # 4 events from setUp
# Check that percentages add up correctly
env_values = {val["value"]: val for val in env_tag["top_values"]}
# Verify percentage calculation for known values
# Production should be 2/4 = 50%
assert "production" in env_values
production_val = env_values["production"]
assert production_val["count"] == 2
assert production_val["percentage"] == "50%"
# Development and staging should each be 1/4 = 25%
assert "development" in env_values
dev_val = env_values["development"]
assert dev_val["count"] == 1
assert dev_val["percentage"] == "25%"
assert "staging" in env_values
staging_val = env_values["staging"]
assert staging_val["count"] == 1
assert staging_val["percentage"] == "25%"
@requires_snuba
@pytest.mark.django_db
@with_feature("organizations:gen-ai-features")
@patch("sentry.seer.autofix.autofix.get_seer_org_acknowledgement", return_value=True)
| TestGetAllTagsOverview |
python | conda__conda | tests/conftest.py | {
"start": 4579,
"end": 9351
} | class ____:
@staticmethod
def single_platform_export(env: Environment) -> str:
return "\n".join(
(
"# This is a single-platform export",
f"name: {env.name}",
f"single-platform: {env.platform}",
"packages:",
*(f"- {pkg}" for pkg in env.requested_packages),
*(f"- {pkg}" for pkg in env.explicit_packages),
*(f"- pip::{pkg}" for pkg in env.external_packages.get("pip", [])),
)
)
@staticmethod
def multi_platform_export(envs: Iterable[Environment]) -> str:
envs = tuple(envs)
return "\n".join(
(
"# This is a multi-platform export",
f"name: {envs[0].name}",
"multi-platforms:",
*(f" - {env.platform}" for env in envs),
"packages:",
*(
f" - {pkg}"
for env in envs
for pkg in (
*env.requested_packages,
*env.explicit_packages,
*(
f"pip::{pkg}"
for pkg in env.external_packages.get("pip", [])
),
)
),
)
)
@plugins.hookimpl
def conda_environment_exporters(self) -> Iterable[CondaEnvironmentExporter]:
yield CondaEnvironmentExporter(
name="test-single-platform",
aliases=(),
default_filenames=(),
export=self.single_platform_export,
)
yield CondaEnvironmentExporter(
name="test-multi-platform",
aliases=(),
default_filenames=(),
multiplatform_export=self.multi_platform_export,
)
@pytest.fixture
def plugin_manager_with_exporters(
plugin_manager_with_reporter_backends: CondaPluginManager,
) -> CondaPluginManager:
plugin_manager_with_reporter_backends.load_plugins(
solvers,
*environment_exporters.plugins,
Exporters(),
)
plugin_manager_with_reporter_backends.load_entrypoints(APP_NAME)
return plugin_manager_with_reporter_backends
@pytest.fixture
def clear_conda_session_cache() -> Iterable[None]:
"""
We use this to clean up the class/function cache on various things in the
``conda.gateways.connection.session`` module.
"""
try:
del CondaSession._thread_local.sessions
except AttributeError:
pass
get_session.cache_clear()
yield
try:
del CondaSession._thread_local.sessions
except AttributeError:
pass
get_session.cache_clear()
@pytest.fixture
def clear_package_cache() -> Iterable[None]:
PackageCacheData.clear()
yield
PackageCacheData.clear()
@pytest.fixture(scope="function")
def plugin_config(mocker) -> tuple[type[Configuration], str]:
"""
Fixture to create a plugin configuration class that can be created and used in tests
"""
app_name = "TEST_APP_NAME"
class PluginTest(PluginConfig):
def get_descriptions(self) -> dict[str, str]:
return {"bar": "Test plugins.bar"}
PluginTest.add_plugin_setting("bar", PrimitiveParameter(""))
class MockContext(Configuration):
foo = ParameterLoader(PrimitiveParameter(""))
json = ParameterLoader(PrimitiveParameter(False))
def __init__(self, *args, **kwargs):
"""
Defines the bare minimum of context object properties to be compatible with the
rest of conda.
TODO: Depending on how this fixture is used, we may need to add more properties
"""
super().__init__(**kwargs)
self._set_env_vars(app_name)
self.no_plugins = False
self.log_level = logging.WARNING
self.active_prefix = ""
self.plugin_manager = mocker.MagicMock()
self.repodata_fns = ["repodata.json", "current_repodata.json"]
self.subdir = mocker.MagicMock()
@property
def plugins(self) -> PluginConfig:
return PluginTest(self.raw_data)
def get_descriptions(self) -> dict[str, str]:
return {
"foo": "Test foo",
"json": "Test json",
}
return MockContext, app_name
@pytest.fixture(scope="function")
def minimal_env(tmp_path: Path) -> Path:
"""
Provides a minimal environment that only contains the "magic" file identifying it as a
conda environment.
"""
meta_dir = tmp_path.joinpath("conda-meta")
meta_dir.mkdir()
(meta_dir / "history").touch()
return tmp_path
| Exporters |
python | docker__docker-py | docker/types/networks.py | {
"start": 2926,
"end": 4240
} | class ____(dict):
"""
Create an IPAM pool config dictionary to be added to the
``pool_configs`` parameter of
:py:class:`~docker.types.IPAMConfig`.
Args:
subnet (str): Custom subnet for this IPAM pool using the CIDR
notation. Defaults to ``None``.
iprange (str): Custom IP range for endpoints in this IPAM pool using
the CIDR notation. Defaults to ``None``.
gateway (str): Custom IP address for the pool's gateway.
aux_addresses (dict): A dictionary of ``key -> ip_address``
relationships specifying auxiliary addresses that need to be
allocated by the IPAM driver.
Example:
>>> ipam_pool = docker.types.IPAMPool(
subnet='124.42.0.0/16',
iprange='124.42.0.0/24',
gateway='124.42.0.254',
aux_addresses={
'reserved1': '124.42.1.1'
}
)
>>> ipam_config = docker.types.IPAMConfig(
pool_configs=[ipam_pool])
"""
def __init__(self, subnet=None, iprange=None, gateway=None,
aux_addresses=None):
self.update({
'Subnet': subnet,
'IPRange': iprange,
'Gateway': gateway,
'AuxiliaryAddresses': aux_addresses
})
| IPAMPool |
python | wandb__wandb | wandb/sdk/launch/runner/sagemaker_runner.py | {
"start": 4221,
"end": 15268
} | class ____(AbstractRunner):
"""Runner class, uses a project to create a SagemakerSubmittedRun."""
def __init__(
self,
api: Api,
backend_config: Dict[str, Any],
environment: AwsEnvironment,
registry: AbstractRegistry,
) -> None:
"""Initialize the SagemakerRunner.
Arguments:
api (Api): The API instance.
backend_config (Dict[str, Any]): The backend configuration.
environment (AwsEnvironment): The AWS environment.
Raises:
LaunchError: If the runner cannot be initialized.
"""
super().__init__(api, backend_config)
self.environment = environment
self.registry = registry
async def run(
self,
launch_project: LaunchProject,
image_uri: str,
) -> Optional[AbstractRun]:
"""Run a project on Amazon Sagemaker.
Arguments:
launch_project (LaunchProject): The project to run.
Returns:
Optional[AbstractRun]: The run instance.
Raises:
LaunchError: If the launch is unsuccessful.
"""
_logger.info("using AWSSagemakerRunner")
given_sagemaker_args = launch_project.resource_args.get("sagemaker")
if given_sagemaker_args is None:
raise LaunchError(
"No sagemaker args specified. Specify sagemaker args in resource_args"
)
default_output_path = self.backend_config.get("runner", {}).get(
"s3_output_path"
)
if default_output_path is not None and not default_output_path.startswith(
"s3://"
):
default_output_path = f"s3://{default_output_path}"
session = await self.environment.get_session()
client = await event_loop_thread_exec(session.client)("sts")
caller_id = client.get_caller_identity()
account_id = caller_id["Account"]
_logger.info(f"Using account ID {account_id}")
partition = await self.environment.get_partition()
role_arn = get_role_arn(
given_sagemaker_args, self.backend_config, account_id, partition
)
# Create a sagemaker client to launch the job.
sagemaker_client = session.client("sagemaker")
log_client = None
try:
log_client = session.client("logs")
except Exception as e:
wandb.termwarn(
f"Failed to connect to cloudwatch logs with error {str(e)}, logs will not be available"
)
# if the user provided the image they want to use, use that, but warn it won't have swappable artifacts
if (
given_sagemaker_args.get("AlgorithmSpecification", {}).get("TrainingImage")
is not None
):
sagemaker_args = build_sagemaker_args(
launch_project,
self._api,
role_arn,
launch_project.override_entrypoint,
launch_project.override_args,
MAX_ENV_LENGTHS[self.__class__.__name__],
given_sagemaker_args.get("AlgorithmSpecification", {}).get(
"TrainingImage"
),
default_output_path,
)
_logger.info(
f"Launching sagemaker job on user supplied image with args: {sagemaker_args}"
)
run = await launch_sagemaker_job(
launch_project, sagemaker_args, sagemaker_client, log_client
)
if self.backend_config[PROJECT_SYNCHRONOUS]:
await run.wait()
return run
_logger.info("Connecting to sagemaker client")
entry_point = (
launch_project.override_entrypoint or launch_project.get_job_entry_point()
)
command_args = []
if entry_point is not None:
command_args += entry_point.command
command_args += launch_project.override_args
if command_args:
command_str = " ".join(command_args)
wandb.termlog(
f"{LOG_PREFIX}Launching run on sagemaker with entrypoint: {command_str}"
)
else:
wandb.termlog(
f"{LOG_PREFIX}Launching run on sagemaker with user-provided entrypoint in image"
)
sagemaker_args = build_sagemaker_args(
launch_project,
self._api,
role_arn,
entry_point,
launch_project.override_args,
MAX_ENV_LENGTHS[self.__class__.__name__],
image_uri,
default_output_path,
)
_logger.info(f"Launching sagemaker job with args: {sagemaker_args}")
run = await launch_sagemaker_job(
launch_project, sagemaker_args, sagemaker_client, log_client
)
if self.backend_config[PROJECT_SYNCHRONOUS]:
await run.wait()
return run
def merge_image_uri_with_algorithm_specification(
algorithm_specification: Optional[Dict[str, Any]],
image_uri: Optional[str],
entrypoint_command: List[str],
args: Optional[List[str]],
) -> Dict[str, Any]:
"""Create an AWS AlgorithmSpecification.
AWS Sagemaker algorithms require a training image and an input mode. If the user
does not specify the specification themselves, define the spec minimally using these
two fields. Otherwise, if they specify the AlgorithmSpecification set the training
image if it is not set.
"""
if algorithm_specification is None:
algorithm_specification = {
"TrainingImage": image_uri,
"TrainingInputMode": "File",
}
else:
if image_uri:
algorithm_specification["TrainingImage"] = image_uri
if entrypoint_command:
algorithm_specification["ContainerEntrypoint"] = entrypoint_command
if args:
algorithm_specification["ContainerArguments"] = args
if algorithm_specification["TrainingImage"] is None:
raise LaunchError("Failed determine tag for training image")
return algorithm_specification
def build_sagemaker_args(
launch_project: LaunchProject,
api: Api,
role_arn: str,
entry_point: Optional[EntryPoint],
args: Optional[List[str]],
max_env_length: int,
image_uri: str,
default_output_path: Optional[str] = None,
) -> Dict[str, Any]:
sagemaker_args: Dict[str, Any] = {}
resource_args = launch_project.fill_macros(image_uri)
given_sagemaker_args: Optional[Dict[str, Any]] = resource_args.get("sagemaker")
if given_sagemaker_args is None:
raise LaunchError(
"No sagemaker args specified. Specify sagemaker args in resource_args"
)
if (
given_sagemaker_args.get("OutputDataConfig") is None
and default_output_path is not None
):
sagemaker_args["OutputDataConfig"] = {"S3OutputPath": default_output_path}
else:
sagemaker_args["OutputDataConfig"] = given_sagemaker_args.get(
"OutputDataConfig"
)
if sagemaker_args.get("OutputDataConfig") is None:
raise LaunchError(
"Sagemaker launcher requires an OutputDataConfig Sagemaker resource argument"
)
training_job_name = cast(
str, (given_sagemaker_args.get("TrainingJobName") or launch_project.run_id)
)
sagemaker_args["TrainingJobName"] = training_job_name
entry_cmd = entry_point.command if entry_point else []
sagemaker_args["AlgorithmSpecification"] = (
merge_image_uri_with_algorithm_specification(
given_sagemaker_args.get(
"AlgorithmSpecification",
given_sagemaker_args.get("algorithm_specification"),
),
image_uri,
entry_cmd,
args,
)
)
sagemaker_args["RoleArn"] = role_arn
camel_case_args = {
to_camel_case(key): item for key, item in given_sagemaker_args.items()
}
sagemaker_args = {
**camel_case_args,
**sagemaker_args,
}
if sagemaker_args.get("ResourceConfig") is None:
raise LaunchError(
"Sagemaker launcher requires a ResourceConfig resource argument"
)
if sagemaker_args.get("StoppingCondition") is None:
raise LaunchError(
"Sagemaker launcher requires a StoppingCondition resource argument"
)
given_env = given_sagemaker_args.get(
"Environment", sagemaker_args.get("environment", {})
)
calced_env = launch_project.get_env_vars_dict(api, max_env_length)
total_env = {**calced_env, **given_env}
sagemaker_args["Environment"] = total_env
# Add wandb tag
tags = sagemaker_args.get("Tags", [])
tags.append({"Key": "WandbRunId", "Value": launch_project.run_id})
sagemaker_args["Tags"] = tags
# remove args that were passed in for launch but not passed to sagemaker
sagemaker_args.pop("EcrRepoName", None)
sagemaker_args.pop("region", None)
sagemaker_args.pop("profile", None)
# clear the args that are None so they are not passed
filtered_args = {k: v for k, v in sagemaker_args.items() if v is not None}
return filtered_args
async def launch_sagemaker_job(
launch_project: LaunchProject,
sagemaker_args: Dict[str, Any],
sagemaker_client: "boto3.Client",
log_client: Optional["boto3.Client"] = None,
) -> SagemakerSubmittedRun:
training_job_name = sagemaker_args.get("TrainingJobName") or launch_project.run_id
create_training_job = event_loop_thread_exec(sagemaker_client.create_training_job)
resp = await create_training_job(**sagemaker_args)
if resp.get("TrainingJobArn") is None:
raise LaunchError("Failed to create training job when submitting to SageMaker")
run = SagemakerSubmittedRun(training_job_name, sagemaker_client, log_client)
wandb.termlog(
f"{LOG_PREFIX}Run job submitted with arn: {resp.get('TrainingJobArn')}"
)
url = f"https://{sagemaker_client.meta.region_name}.console.aws.amazon.com/sagemaker/home?region={sagemaker_client.meta.region_name}#/jobs/{training_job_name}"
wandb.termlog(f"{LOG_PREFIX}See training job status at: {url}")
return run
def get_role_arn(
sagemaker_args: Dict[str, Any],
backend_config: Dict[str, Any],
account_id: str,
partition: str,
) -> str:
"""Get the role arn from the sagemaker args or the backend config."""
role_arn = sagemaker_args.get("RoleArn") or sagemaker_args.get("role_arn")
if role_arn is None:
role_arn = backend_config.get("runner", {}).get("role_arn")
if role_arn is None or not isinstance(role_arn, str):
raise LaunchError(
"AWS sagemaker require a string RoleArn set this by adding a `RoleArn` key to the sagemaker"
"field of resource_args"
)
if role_arn.startswith(f"arn:{partition}:iam::"):
return role_arn # type: ignore
return f"arn:{partition}:iam::{account_id}:role/{role_arn}"
| SageMakerRunner |
python | huggingface__transformers | src/transformers/models/fsmt/modeling_fsmt.py | {
"start": 7634,
"end": 10098
} | class ____(PreTrainedModel):
config: FSMTConfig
base_model_prefix = "model"
@torch.no_grad()
def _init_weights(self, module):
std = self.config.init_std
if isinstance(module, nn.Linear):
init.normal_(module.weight, mean=0.0, std=std)
if module.bias is not None:
init.zeros_(module.bias)
elif isinstance(module, SinusoidalPositionalEmbedding):
weight = module.get_embedding(*module.weight.shape, module.padding_idx)
init.copy_(module.weight, weight)
elif isinstance(module, nn.Embedding):
init.normal_(module.weight, mean=0.0, std=std)
# Here we need the check explicitly, as we slice the weight in the `zeros_` call, so it looses the flag
if module.padding_idx is not None and not getattr(module.weight, "_is_hf_initialized", False):
init.zeros_(module.weight[module.padding_idx])
@property
def dummy_inputs(self):
pad_token = self.config.pad_token_id
input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device)
dummy_inputs = {
"attention_mask": input_ids.ne(pad_token),
"input_ids": input_ids,
}
return dummy_inputs
def _make_linear_from_emb(emb):
vocab_size, emb_size = emb.weight.shape
lin_layer = nn.Linear(vocab_size, emb_size, bias=False)
lin_layer.weight.data = emb.weight.data
return lin_layer
# Helper Functions, mostly for making masks
def _check_shapes(shape_1, shape2):
if shape_1 != shape2:
raise AssertionError(f"shape mismatch: {shape_1} != {shape2}")
def shift_tokens_right(input_ids, pad_token_id):
"""Shift input ids one token to the right, and wrap the last non pad token (usually <eos>)."""
# replace possible -100 values in labels by `pad_token_id`
input_ids.masked_fill_(input_ids == -100, pad_token_id)
prev_output_tokens = input_ids.clone()
index_of_eos = (input_ids.ne(pad_token_id).sum(dim=1) - 1).unsqueeze(-1)
prev_output_tokens[:, 0] = input_ids.gather(1, index_of_eos).squeeze()
prev_output_tokens[:, 1:] = input_ids[:, :-1]
return prev_output_tokens
def make_padding_mask(input_ids, padding_idx=1):
"""True for pad tokens"""
padding_mask = input_ids.eq(padding_idx)
if not padding_mask.any():
padding_mask = None
return padding_mask
# Helper Modules
| PretrainedFSMTModel |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-zendesk-chat/components.py | {
"start": 386,
"end": 1167
} | class ____(RecordExtractor):
"""
Unnesting nested bans: `visitor`, `ip_address`.
"""
def extract_records(
self,
response: requests.Response,
) -> Iterable[Mapping[str, Any]]:
response_data = response.json()
ip_address: List[Mapping[str, Any]] = response_data.get("ip_address", [])
visitor: List[Mapping[str, Any]] = response_data.get("visitor", [])
bans = ip_address + visitor
bans = sorted(
bans,
key=lambda x: date_time_parser.parse(date=x["created_at"], format="%Y-%m-%dT%H:%M:%SZ")
if x["created_at"]
else date_time_parser.parse(date=DatetimeParser._UNIX_EPOCH, format="%Y-%m-%dT%H:%M:%SZ"),
)
yield from bans
| ZendeskChatBansRecordExtractor |
python | django__django | tests/forms_tests/tests/test_formsets.py | {
"start": 72214,
"end": 76962
} | class ____(SimpleTestCase):
def test_no_data_error(self):
formset = ArticleFormSet({})
self.assertIs(formset.is_valid(), False)
self.assertEqual(
formset.non_form_errors(),
[
"ManagementForm data is missing or has been tampered with. "
"Missing fields: form-TOTAL_FORMS, form-INITIAL_FORMS. "
"You may need to file a bug report if the issue persists.",
],
)
self.assertEqual(formset.errors, [])
# Can still render the formset.
self.assertHTMLEqual(
str(formset),
'<ul class="errorlist nonfield">'
"<li>(Hidden field TOTAL_FORMS) This field is required.</li>"
"<li>(Hidden field INITIAL_FORMS) This field is required.</li>"
"</ul>"
"<div>"
'<input type="hidden" name="form-TOTAL_FORMS" id="id_form-TOTAL_FORMS">'
'<input type="hidden" name="form-INITIAL_FORMS" id="id_form-INITIAL_FORMS">'
'<input type="hidden" name="form-MIN_NUM_FORMS" id="id_form-MIN_NUM_FORMS">'
'<input type="hidden" name="form-MAX_NUM_FORMS" id="id_form-MAX_NUM_FORMS">'
"</div>\n",
)
def test_management_form_invalid_data(self):
data = {
"form-TOTAL_FORMS": "two",
"form-INITIAL_FORMS": "one",
}
formset = ArticleFormSet(data)
self.assertIs(formset.is_valid(), False)
self.assertEqual(
formset.non_form_errors(),
[
"ManagementForm data is missing or has been tampered with. "
"Missing fields: form-TOTAL_FORMS, form-INITIAL_FORMS. "
"You may need to file a bug report if the issue persists.",
],
)
self.assertEqual(formset.errors, [])
# Can still render the formset.
self.assertHTMLEqual(
str(formset),
'<ul class="errorlist nonfield">'
"<li>(Hidden field TOTAL_FORMS) Enter a whole number.</li>"
"<li>(Hidden field INITIAL_FORMS) Enter a whole number.</li>"
"</ul>"
"<div>"
'<input type="hidden" name="form-TOTAL_FORMS" value="two" '
'id="id_form-TOTAL_FORMS">'
'<input type="hidden" name="form-INITIAL_FORMS" value="one" '
'id="id_form-INITIAL_FORMS">'
'<input type="hidden" name="form-MIN_NUM_FORMS" id="id_form-MIN_NUM_FORMS">'
'<input type="hidden" name="form-MAX_NUM_FORMS" id="id_form-MAX_NUM_FORMS">'
"</div>\n",
)
def test_customize_management_form_error(self):
formset = ArticleFormSet(
{}, error_messages={"missing_management_form": "customized"}
)
self.assertIs(formset.is_valid(), False)
self.assertEqual(formset.non_form_errors(), ["customized"])
self.assertEqual(formset.errors, [])
def test_with_management_data_attrs_work_fine(self):
data = {
"form-TOTAL_FORMS": "1",
"form-INITIAL_FORMS": "0",
}
formset = ArticleFormSet(data)
self.assertEqual(0, formset.initial_form_count())
self.assertEqual(1, formset.total_form_count())
self.assertTrue(formset.is_bound)
self.assertTrue(formset.forms[0].is_bound)
self.assertTrue(formset.is_valid())
self.assertTrue(formset.forms[0].is_valid())
self.assertEqual([{}], formset.cleaned_data)
def test_form_errors_are_caught_by_formset(self):
data = {
"form-TOTAL_FORMS": "2",
"form-INITIAL_FORMS": "0",
"form-0-title": "Test",
"form-0-pub_date": "1904-06-16",
"form-1-title": "Test",
"form-1-pub_date": "", # <-- this date is missing but required
}
formset = ArticleFormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(
[{}, {"pub_date": ["This field is required."]}], formset.errors
)
def test_empty_forms_are_unbound(self):
data = {
"form-TOTAL_FORMS": "1",
"form-INITIAL_FORMS": "0",
"form-0-title": "Test",
"form-0-pub_date": "1904-06-16",
}
unbound_formset = ArticleFormSet()
bound_formset = ArticleFormSet(data)
empty_forms = [unbound_formset.empty_form, bound_formset.empty_form]
# Empty forms should be unbound
self.assertFalse(empty_forms[0].is_bound)
self.assertFalse(empty_forms[1].is_bound)
# The empty forms should be equal.
self.assertHTMLEqual(empty_forms[0].as_p(), empty_forms[1].as_p())
@jinja2_tests
| TestIsBoundBehavior |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/backfill.py | {
"start": 8356,
"end": 8642
} | class ____(graphene.ObjectType):
start = graphene.NonNull(graphene.String)
end = graphene.NonNull(graphene.String)
class Meta:
name = "PartitionRange"
def __init__(self, start: str, end: str):
super().__init__(start=start, end=end)
| GraphenePartitionRange |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1029653,
"end": 1030123
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of UpdateEnvironment"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "environment")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
environment = sgqlc.types.Field("Environment", graphql_name="environment")
"""The updated environment."""
| UpdateEnvironmentPayload |
python | openai__openai-python | src/openai/types/fine_tuning/job_list_events_params.py | {
"start": 200,
"end": 400
} | class ____(TypedDict, total=False):
after: str
"""Identifier for the last event from the previous pagination request."""
limit: int
"""Number of events to retrieve."""
| JobListEventsParams |
python | google__jax | jax/_src/tpu_custom_call.py | {
"start": 4019,
"end": 4358
} | class ____(enum.Enum):
# No side effects, can be deduplicated / removed if unused.
PURE = "pure"
# Cannot be deduplicated, but can be removed if unused.
DATAFLOW_SIDE_EFFECTING = "dataflow_side_effecting"
# Cannot be deduplicated or removed.
SIDE_EFFECTING = "side_effecting"
@dataclasses.dataclass(frozen=True)
| TpuSideEffectType |
python | jmcnamara__XlsxWriter | xlsxwriter/test/vml/test_vml02.py | {
"start": 359,
"end": 2891
} | class ____(unittest.TestCase):
"""
Test assembling a complete Vml file.
"""
def test_assemble_xml_file(self):
"""Test writing a vml with no cell data."""
self.maxDiff = None
fh = StringIO()
vml = Vml()
vml._set_filehandle(fh)
button = ButtonType(1, 2, 17, 64, 1)
button.vertices = [2, 1, 0, 0, 3, 2, 0, 0, 128, 20, 64, 20]
vml._assemble_xml_file(
1,
1024,
None,
[button],
)
exp = _vml_to_list(
"""
<xml xmlns:v="urn:schemas-microsoft-com:vml" xmlns:o="urn:schemas-microsoft-com:office:office" xmlns:x="urn:schemas-microsoft-com:office:excel">
<o:shapelayout v:ext="edit">
<o:idmap v:ext="edit" data="1"/>
</o:shapelayout>
<v:shapetype id="_x0000_t201" coordsize="21600,21600" o:spt="201" path="m,l,21600r21600,l21600,xe">
<v:stroke joinstyle="miter"/>
<v:path shadowok="f" o:extrusionok="f" strokeok="f" fillok="f" o:connecttype="rect"/>
<o:lock v:ext="edit" shapetype="t"/>
</v:shapetype>
<v:shape id="_x0000_s1025" type="#_x0000_t201" style="position:absolute;margin-left:96pt;margin-top:15pt;width:48pt;height:15pt;z-index:1;mso-wrap-style:tight" o:button="t" fillcolor="buttonFace [67]" strokecolor="windowText [64]" o:insetmode="auto">
<v:fill color2="buttonFace [67]" o:detectmouseclick="t"/>
<o:lock v:ext="edit" rotation="t"/>
<v:textbox style="mso-direction-alt:auto" o:singleclick="f">
<div style="text-align:center">
<font face="Calibri" size="220" color="#000000">Button 1</font>
</div>
</v:textbox>
<x:ClientData ObjectType="Button">
<x:Anchor>2, 0, 1, 0, 3, 0, 2, 0</x:Anchor>
<x:PrintObject>False</x:PrintObject>
<x:AutoFill>False</x:AutoFill>
<x:FmlaMacro>[0]!Button1_Click</x:FmlaMacro>
<x:TextHAlign>Center</x:TextHAlign>
<x:TextVAlign>Center</x:TextVAlign>
</x:ClientData>
</v:shape>
</xml>
"""
)
got = _xml_to_list(fh.getvalue())
self.assertEqual(exp, got)
| TestAssembleVml |
python | pydantic__pydantic | pydantic-core/tests/serializers/test_pickling.py | {
"start": 2016,
"end": 2655
} | class ____:
__pydantic_serializer__: SchemaSerializer
__pydantic_complete__ = True
def test_schema_serializer_not_reused_when_unpickling() -> None:
s = SchemaSerializer(
core_schema.model_schema(
cls=Model,
schema=core_schema.model_fields_schema(fields={}, model_name='Model'),
config={'title': 'Model'},
ref='Model:123',
)
)
Model.__pydantic_serializer__ = s
assert 'Prebuilt' not in str(Model.__pydantic_serializer__)
reconstructed = pickle.loads(pickle.dumps(Model.__pydantic_serializer__))
assert 'Prebuilt' not in str(reconstructed)
| Model |
python | PrefectHQ__prefect | tests/cli/test_work_pool.py | {
"start": 29614,
"end": 31458
} | class ____:
async def test_provision_infra(self, monkeypatch, push_work_pool, prefect_client):
client_res = await prefect_client.read_work_pool(push_work_pool.name)
assert client_res.base_job_template != FAKE_DEFAULT_BASE_JOB_TEMPLATE
mock_provision = AsyncMock()
class MockProvisioner:
def __init__(self):
self._console = None
@property
def console(self):
return self._console
@console.setter
def console(self, value):
self._console = value
async def provision(self, *args, **kwargs):
await mock_provision(*args, **kwargs)
return FAKE_DEFAULT_BASE_JOB_TEMPLATE
monkeypatch.setattr(
"prefect.infrastructure.provisioners.get_infrastructure_provisioner_for_work_pool_type",
lambda *args: MockProvisioner(),
)
res = await run_sync_in_worker_thread(
invoke_and_assert,
f"work-pool provision-infra {push_work_pool.name}",
)
assert res.exit_code == 0
assert mock_provision.await_count == 1
# ensure work pool base job template was updated
client_res = await prefect_client.read_work_pool(push_work_pool.name)
assert client_res.base_job_template == FAKE_DEFAULT_BASE_JOB_TEMPLATE
async def test_provision_infra_unsupported(self, push_work_pool):
res = await run_sync_in_worker_thread(
invoke_and_assert,
f"work-pool provision-infrastructure {push_work_pool.name}",
)
assert res.exit_code == 0
assert (
"Automatic infrastructure provisioning is not supported for"
" 'push-work-pool:push' work pools." in res.output
)
| TestProvisionInfrastructure |
python | sqlalchemy__sqlalchemy | test/orm/inheritance/test_poly_persistence.py | {
"start": 891,
"end": 2884
} | class ____(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
global companies, people, engineers, managers, boss
companies = Table(
"companies",
metadata,
Column(
"company_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("name", String(50)),
)
people = Table(
"people",
metadata,
Column(
"person_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column(
"company_id",
Integer,
ForeignKey("companies.company_id"),
nullable=False,
),
Column("name", String(50)),
Column("type", String(30)),
)
engineers = Table(
"engineers",
metadata,
Column(
"person_id",
Integer,
ForeignKey("people.person_id"),
primary_key=True,
),
Column("status", String(30)),
Column("engineer_name", String(50)),
Column("primary_language", String(50)),
)
managers = Table(
"managers",
metadata,
Column(
"person_id",
Integer,
ForeignKey("people.person_id"),
primary_key=True,
),
Column("status", String(30)),
Column("manager_name", String(50)),
)
boss = Table(
"boss",
metadata,
Column(
"boss_id",
Integer,
ForeignKey("managers.person_id"),
primary_key=True,
),
Column("golf_swing", String(30)),
)
| PolymorphTest |
python | ansible__ansible | lib/ansible/plugins/doc_fragments/checksum_common.py | {
"start": 185,
"end": 995
} | class ____(object):
DOCUMENTATION = r"""
options:
checksum_algorithm:
description:
- Algorithm to determine checksum of file.
- Will throw an error if the host is unable to use specified algorithm.
- The remote host has to support the hashing method specified, V(md5)
can be unavailable if the host is FIPS-140 compliant.
- Availability might be restricted by the target system, for example FIPS systems won't allow md5 use
type: str
choices: [ md5, sha1, sha224, sha256, sha384, sha512 ]
default: sha1
aliases: [ checksum, checksum_algo ]
version_added: "2.0"
get_checksum:
description:
- Whether to return a checksum of the file.
type: bool
default: yes
"""
| ModuleDocFragment |
python | django__django | tests/model_fields/test_integerfield.py | {
"start": 8924,
"end": 9093
} | class ____(IntegerFieldTests):
model = SmallIntegerModel
documented_range = (-32768, 32767)
rel_db_type_class = models.SmallIntegerField
| SmallIntegerFieldTests |
python | PyCQA__pylint | tests/functional/u/unsubscriptable_value.py | {
"start": 1712,
"end": 1800
} | class ____(type):
def __getitem__(cls, key):
return key + key
| MetaSubscriptable |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/execution/context/hook.py | {
"start": 7580,
"end": 12370
} | class ____(HookContext):
def __init__(
self,
resources: Mapping[str, Any],
op: Optional[Union[OpDefinition, PendingNodeInvocation]],
run_id: Optional[str],
job_name: Optional[str],
op_exception: Optional[Exception],
instance: Optional["DagsterInstance"],
):
from dagster._core.execution.build_resources import (
build_resources,
wrap_resources_for_execution,
)
from dagster._core.execution.context_creation_job import initialize_console_manager
self._op = None
if op is not None:
@graph(name="hook_context_container")
def temp_graph():
op()
self._op = temp_graph.nodes[0]
# Open resource context manager
self._resource_defs = wrap_resources_for_execution(resources)
self._resources_cm = build_resources(self._resource_defs)
self._resources = self._resources_cm.__enter__()
self._resources_contain_cm = isinstance(self._resources, IContainsGenerator)
self._run_id = run_id
self._job_name = job_name
self._op_exception = op_exception
self._instance = instance
self._log = initialize_console_manager(None)
self._cm_scope_entered = False
def __enter__(self):
self._cm_scope_entered = True
return self
def __exit__(self, *exc: Any):
self._resources_cm.__exit__(*exc)
def __del__(self):
if self._resources_contain_cm and not self._cm_scope_entered:
self._resources_cm.__exit__(None, None, None)
@property
def job_name(self) -> str:
return _check_property_on_test_context(
self, attr_str="_job_name", user_facing_name="job_name", param_on_builder="job_name"
)
@property
def run_id(self) -> str:
return _check_property_on_test_context(
self, attr_str="_run_id", user_facing_name="run_id", param_on_builder="run_id"
)
@property
def hook_def(self) -> HookDefinition:
raise DagsterInvalidPropertyError(_property_msg("hook_def", "property"))
@property
def op(self) -> Node:
return _check_property_on_test_context(
self, attr_str="_op", user_facing_name="op", param_on_builder="op"
)
@property
def step(self) -> ExecutionStep:
raise DagsterInvalidPropertyError(_property_msg("step", "property"))
@property
def step_key(self) -> str:
raise DagsterInvalidPropertyError(_property_msg("step_key", "property"))
@property
def required_resource_keys(self) -> set[str]:
raise DagsterInvalidPropertyError(_property_msg("hook_def", "property"))
@property
def resources(self) -> "Resources":
if self._resources_contain_cm and not self._cm_scope_entered:
raise DagsterInvariantViolationError(
"At least one provided resource is a generator, but attempting to access "
"resources outside of context manager scope. You can use the following syntax to "
"open a context manager: `with build_hook_context(...) as context:`"
)
return self._resources
@property
def op_config(self) -> Any:
raise DagsterInvalidPropertyError(_property_msg("op_config", "property"))
@property
def log(self) -> DagsterLogManager:
return self._log
@property
def op_exception(self) -> Optional[BaseException]:
return self._op_exception
@property
def op_output_values(self) -> Mapping[str, Union[Any, Mapping[str, Any]]]:
"""The computed output values.
Returns a dictionary where keys are output names and the values are:
* the output values in the normal case
* a dictionary from mapping key to corresponding value in the mapped case
"""
raise DagsterInvalidPropertyError(_property_msg("op_output_values", "method"))
@property
def op_output_metadata(self) -> Mapping[str, Union[Any, Mapping[str, Any]]]:
"""The applied output metadata.
Returns a dictionary where keys are output names and the values are:
* the applied output metadata in the normal case
* a dictionary from mapping key to corresponding metadata in the mapped case
"""
raise DagsterInvalidPropertyError(_property_msg("op_output_metadata", "method"))
@property
def instance(self) -> "DagsterInstance":
if not self._instance:
raise DagsterInvariantViolationError(
"Tried to access the HookContext instance, but no instance was provided to"
" `build_hook_context`."
)
return self._instance
| UnboundHookContext |
python | ansible__ansible | test/lib/ansible_test/_internal/commands/integration/cloud/nios.py | {
"start": 288,
"end": 1986
} | class ____(CloudProvider):
"""Nios plugin. Sets up NIOS mock server for tests."""
# Default image to run the nios simulator.
#
# The simulator must be pinned to a specific version
# to guarantee CI passes with the version used.
#
# It's source source itself resides at:
# https://github.com/ansible/nios-test-container
DOCKER_IMAGE = 'quay.io/ansible/nios-test-container:7.0.0'
def __init__(self, args: IntegrationConfig) -> None:
super().__init__(args)
self.__container_from_env = os.environ.get('ANSIBLE_NIOSSIM_CONTAINER')
"""
Overrides target container, might be used for development.
Use ANSIBLE_NIOSSIM_CONTAINER=whatever_you_want if you want
to use other image. Omit/empty otherwise.
"""
self.image = self.__container_from_env or self.DOCKER_IMAGE
self.uses_docker = True
def setup(self) -> None:
"""Setup cloud resource before delegation and reg cleanup callback."""
super().setup()
if self._use_static_config():
self._setup_static()
else:
self._setup_dynamic()
def _setup_dynamic(self) -> None:
"""Spawn a NIOS simulator within docker container."""
nios_port = 443
ports = [
nios_port,
]
descriptor = run_support_container(
self.args,
self.platform,
self.image,
'nios-simulator',
ports,
)
if not descriptor:
return
self._set_cloud_config('NIOS_HOST', descriptor.name)
def _setup_static(self) -> None:
raise NotImplementedError()
| NiosProvider |
python | marshmallow-code__marshmallow | tests/test_schema.py | {
"start": 49150,
"end": 51311
} | class ____:
def test_errors_are_cleared_after_loading_collection(self):
def always_fail(val):
raise ValidationError("lol")
class MySchema(Schema):
foo = fields.Str(validate=always_fail)
schema = MySchema()
with pytest.raises(ValidationError) as excinfo:
schema.load([{"foo": "bar"}, {"foo": "baz"}], many=True)
errors = excinfo.value.messages
assert len(errors[0]["foo"]) == 1
assert len(errors[1]["foo"]) == 1
with pytest.raises(ValidationError) as excinfo:
schema.load({"foo": "bar"})
errors = excinfo.value.messages
assert len(errors["foo"]) == 1
def test_raises_error_with_list(self):
def validator(val):
raise ValidationError(["err1", "err2"])
class MySchema(Schema):
foo = fields.Raw(validate=validator)
s = MySchema()
errors = s.validate({"foo": 42})
assert errors["foo"] == ["err1", "err2"]
# https://github.com/marshmallow-code/marshmallow/issues/110
def test_raises_error_with_dict(self):
def validator(val):
raise ValidationError({"code": "invalid_foo"})
class MySchema(Schema):
foo = fields.Raw(validate=validator)
s = MySchema()
errors = s.validate({"foo": 42})
assert errors["foo"] == [{"code": "invalid_foo"}]
def test_ignored_if_not_in_only(self):
class MySchema(Schema):
a = fields.Raw()
b = fields.Raw()
@validates("a")
def validate_a(self, val, **kwargs):
raise ValidationError({"code": "invalid_a"})
@validates("b")
def validate_b(self, val, **kwargs):
raise ValidationError({"code": "invalid_b"})
s = MySchema(only=("b",))
errors = s.validate({"b": "data"})
assert errors == {"b": {"code": "invalid_b"}}
def test_schema_repr():
class MySchema(Schema):
name = fields.String()
ser = MySchema(many=True)
rep = repr(ser)
assert "MySchema" in rep
assert "many=True" in rep
| TestFieldValidation |
python | huggingface__transformers | src/transformers/models/gpt2/modeling_gpt2.py | {
"start": 46450,
"end": 52153
} | class ____(GPT2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = GPT2Model(config)
self.score = nn.Linear(config.n_embd, self.num_labels, bias=False)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, SequenceClassifierOutputWithPast]:
r"""
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else
`past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input
sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
logits = self.score(hidden_states)
if input_ids is not None:
batch_size, sequence_length = input_ids.shape[:2]
else:
batch_size, sequence_length = inputs_embeds.shape[:2]
if self.config.pad_token_id is None and batch_size != 1:
raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
if self.config.pad_token_id is None:
last_non_pad_token = -1
elif input_ids is not None:
# To handle both left- and right- padding, we take the rightmost token that is not equal to pad_token_id
non_pad_mask = (input_ids != self.config.pad_token_id).to(logits.device, torch.int32)
token_indices = torch.arange(input_ids.shape[-1], device=logits.device, dtype=torch.int32)
last_non_pad_token = (token_indices * non_pad_mask).argmax(-1)
else:
last_non_pad_token = -1
logger.warning_once(
f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
"unexpected if using padding tokens in conjunction with `inputs_embeds.`"
)
pooled_logits = logits[torch.arange(batch_size, device=logits.device), last_non_pad_token]
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(pooled_logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(pooled_logits, labels)
if not return_dict:
output = (pooled_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutputWithPast(
loss=loss,
logits=pooled_logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
@auto_docstring
| GPT2ForSequenceClassification |
python | django__django | tests/inspectdb/models.py | {
"start": 5304,
"end": 5478
} | class ____(models.Model):
pk = models.CompositePrimaryKey("column_1", "column_2")
column_1 = models.IntegerField()
column_2 = models.IntegerField()
| CompositePKModel |
python | cython__cython | Cython/Compiler/Nodes.py | {
"start": 307557,
"end": 312894
} | class ____(StatNode):
# raise statement
#
# exc_type ExprNode or None
# exc_value ExprNode or None
# exc_tb ExprNode or None
# cause ExprNode or None
#
# set in FlowControl
# in_try_block bool
child_attrs = ["exc_type", "exc_value", "exc_tb", "cause"]
is_terminator = True
builtin_exc_name = None
wrap_tuple_value = False
in_try_block = False
def analyse_expressions(self, env):
if self.exc_type:
exc_type = self.exc_type.analyse_types(env)
self.exc_type = exc_type.coerce_to_pyobject(env)
if self.exc_value:
exc_value = self.exc_value.analyse_types(env)
if self.wrap_tuple_value:
if exc_value.type is Builtin.tuple_type or not exc_value.type.is_builtin_type:
# prevent tuple values from being interpreted as argument value tuples
from .ExprNodes import TupleNode
exc_value = TupleNode(exc_value.pos, args=[exc_value.coerce_to_pyobject(env)], slow=True)
exc_value = exc_value.analyse_types(env, skip_children=True)
self.exc_value = exc_value.coerce_to_pyobject(env)
if self.exc_tb:
exc_tb = self.exc_tb.analyse_types(env)
self.exc_tb = exc_tb.coerce_to_pyobject(env)
if self.cause:
cause = self.cause.analyse_types(env)
self.cause = cause.coerce_to_pyobject(env)
# special cases for builtin exceptions
if self.exc_type and not self.exc_value and not self.exc_tb:
exc = self.exc_type
from . import ExprNodes
if (isinstance(exc, ExprNodes.SimpleCallNode) and
not (exc.args or (exc.arg_tuple is not None and exc.arg_tuple.args))):
exc = exc.function # extract the exception type
if exc.is_name and exc.entry.is_builtin:
from . import Symtab
self.builtin_exc_name = exc.name
if self.builtin_exc_name == 'MemoryError':
self.exc_type = None # has a separate implementation
elif (self.builtin_exc_name == 'StopIteration' and
env.is_local_scope and env.name == "__next__" and
env.parent_scope and env.parent_scope.is_c_class_scope and
not self.in_try_block):
# tp_iternext is allowed to return NULL without raising StopIteration.
# For the sake of simplicity, only allow this to happen when not in
# a try block
self.exc_type = None
return self
nogil_check = Node.gil_error
gil_message = "Raising exception"
def generate_execution_code(self, code):
code.mark_pos(self.pos)
if self.builtin_exc_name == 'MemoryError':
code.putln('PyErr_NoMemory(); %s' % code.error_goto(self.pos))
return
elif self.builtin_exc_name == 'StopIteration' and not self.exc_type:
code.putln('%s = 1;' % Naming.error_without_exception_cname)
code.putln('%s;' % code.error_goto(None))
code.funcstate.error_without_exception = True
return
if self.exc_type:
self.exc_type.generate_evaluation_code(code)
type_code = self.exc_type.py_result()
if self.exc_type.is_name:
code.globalstate.use_entry_utility_code(self.exc_type.entry)
else:
type_code = "0"
if self.exc_value:
self.exc_value.generate_evaluation_code(code)
value_code = self.exc_value.py_result()
else:
value_code = "0"
if self.exc_tb:
self.exc_tb.generate_evaluation_code(code)
tb_code = self.exc_tb.py_result()
else:
tb_code = "0"
if self.cause:
self.cause.generate_evaluation_code(code)
cause_code = self.cause.py_result()
else:
cause_code = "0"
code.globalstate.use_utility_code(raise_utility_code)
code.putln(
"__Pyx_Raise(%s, %s, %s, %s);" % (
type_code,
value_code,
tb_code,
cause_code))
for obj in (self.exc_type, self.exc_value, self.exc_tb, self.cause):
if obj:
obj.generate_disposal_code(code)
obj.free_temps(code)
code.putln(
code.error_goto(self.pos))
def generate_function_definitions(self, env, code):
if self.exc_type is not None:
self.exc_type.generate_function_definitions(env, code)
if self.exc_value is not None:
self.exc_value.generate_function_definitions(env, code)
if self.exc_tb is not None:
self.exc_tb.generate_function_definitions(env, code)
if self.cause is not None:
self.cause.generate_function_definitions(env, code)
def annotate(self, code):
if self.exc_type:
self.exc_type.annotate(code)
if self.exc_value:
self.exc_value.annotate(code)
if self.exc_tb:
self.exc_tb.annotate(code)
if self.cause:
self.cause.annotate(code)
| RaiseStatNode |
python | run-llama__llama_index | llama-index-integrations/vector_stores/llama-index-vector-stores-azurepostgresql/tests/common/test_connection.py | {
"start": 2484,
"end": 8408
} | class ____:
"""Tests for verifying the database connection and required extensions.
These tests exercise ``check_connection`` with various mocked cursor
responses to validate behavior for installed extensions, missing
extensions, version mismatches, and broken cursors.
"""
def test_it_works(self, connection: Connection) -> None:
"""Ensure ``check_connection`` returns None on a healthy connection."""
assert check_connection(connection) is None
@pytest.mark.parametrize(
["extension", "mock_cursor", "expected_result"],
[
(
Extension(ext_name="test_ext", ext_version="1.0", schema_name="public"),
MockCursor(
broken=False,
response={
"ext_name": "test_ext",
"ext_version": "1.0",
"schema_name": "public",
},
),
nullcontext(None),
),
(
Extension(ext_name="test_ext", ext_version="1.0", schema_name="public"),
MockCursor(broken=True, response=None),
pytest.raises(AssertionError, match="Connection check failed"),
),
(
Extension(ext_name="test_ext", ext_version="1.0", schema_name="public"),
MockCursor(broken=False, response=None),
pytest.raises(
RuntimeError,
match="Required extension 'test_ext' is not installed.",
),
),
(
Extension(ext_name="test_ext", ext_version="1.0", schema_name="public"),
MockCursor(broken=False, response={"ext_version": "wrong_version"}),
pytest.raises(
RuntimeError,
match="Required extension 'test_ext' version mismatch: expected 1.0, got wrong_version.",
),
),
(
Extension(ext_name="test_ext", ext_version="1.0", schema_name="public"),
MockCursor(
broken=False,
response={"ext_version": "1.0", "schema_name": "wrong_schema"},
),
pytest.raises(
RuntimeError,
match="Required extension 'test_ext' is not installed in the expected schema: expected public, got wrong_schema.",
),
),
],
ids=[
"extension-installed",
"broken-cursor",
"extension-not-installed",
"version-mismatch",
"schema-mismatch",
],
indirect=["mock_cursor"],
)
def test_mock_it_works(
self,
connection: Connection,
extension: Extension,
mock_cursor,
expected_result: nullcontext | pytest.RaisesExc,
) -> None:
"""Run parameterized checks of ``check_connection`` using mocked cursors.
Parameterization covers installed extension, broken cursor,
missing extension, version mismatch, and schema mismatch cases.
"""
with expected_result as e:
assert check_connection(connection, required_extensions=[extension]) == e
@pytest.fixture
def extension_creatable(
connection: Connection, request: pytest.FixtureRequest
) -> Generator[Extension, Any, None]:
"""Fixture that attempts to create (and later drop) a DB extension.
Uses the provided ``Extension`` instance via ``request.param`` and
will skip the test if creation fails. After the test, the extension
is dropped if it was not previously installed.
"""
assert isinstance(request.param, Extension), "Expected an Extension instance."
ext_already_installed = False
with connection.cursor() as cursor:
cursor.execute(
sql.SQL(
"""
select extname, extversion
from pg_extension
where extname = %(ext_name)s
"""
),
{"ext_name": request.param.ext_name},
)
result = cursor.fetchone()
ext_already_installed = result is not None
try:
cursor.execute(
sql.SQL(
"""
create extension if not exists {ext_name}
with {schema_expr}
{version_expr}
{cascade_expr}
"""
).format(
ext_name=sql.Identifier(request.param.ext_name),
schema_expr=sql.SQL("schema {schema_name}").format(
schema_name=sql.Identifier(request.param.schema_name)
)
if request.param.schema_name
else sql.SQL(""),
version_expr=sql.SQL("version {ext_version}").format(
ext_version=sql.Literal(request.param.ext_version)
)
if request.param.ext_version
else sql.SQL(""),
cascade_expr=sql.SQL("cascade")
if request.param.cascade
else sql.SQL(""),
)
)
except Exception as e:
pytest.skip(
reason=f"Extension {request.param.ext_name} could not be created: {e}"
)
yield request.param
if not ext_already_installed:
with connection.cursor() as cursor:
cursor.execute(
sql.SQL(
"""
drop extension if exists {ext_name}
"""
).format(
ext_name=sql.Identifier(request.param.ext_name),
)
)
| TestCheckConnection |
python | joke2k__faker | faker/providers/company/ru_RU/__init__.py | {
"start": 514,
"end": 32960
} | class ____(CompanyProvider):
formats = (
"{{company_prefix}} «{{last_name}}»",
"{{company_prefix}} «{{last_name}} {{last_name}}»",
"{{company_prefix}} «{{last_name}}-{{last_name}}»",
"{{company_prefix}} «{{last_name}}, {{last_name}} и {{last_name}}»",
"{{last_name}} {{company_suffix}}",
"{{large_company}}",
)
company_prefixes = (
"РАО",
"АО",
"ИП",
"НПО",
"ЗАО",
"ООО",
"ОАО",
)
company_suffixes = (
"Инк",
"Инкорпорэйтед",
"и партнеры",
"Групп",
"Лтд",
"Лимитед",
)
# Source: https://www.rbc.ru/rbc500/
large_companies = (
"Газпром",
"ЛУКОЙЛ",
"Роснефть",
"Сбербанк России",
"Российские железные дороги",
"Ростех",
"Сургутнефтегаз",
"X5 Retail Group",
"ВТБ",
"Магнит",
"САФМАР",
"Росатом",
"Российские сети",
"Интер РАО",
"Транснефть",
"Татнефть",
"НОВАТЭК",
"Евраз",
"АФК Система",
"En +",
"НЛМК",
"Норникель",
"ГК Мегаполис",
"Газпромбанк",
"Русал",
"Аэрофлот — Российские авиалинии",
"Сибур Холдинг",
"Северсталь",
"СУЭК",
"ММК",
"Группа УГМК",
"Мобильные телесистемы",
"Металлоинвест",
"Лента",
"Объединенная авиастроительная корпорация",
"РусГидро",
"Сахалин Энерджи",
"Т Плюс",
"Группа М.Видео-Эльдорадо",
"Еврохим",
"ВымпелКом",
"Банковский холдинг Альфа-банка",
"Объединенная судостроительная корпорация",
"МегаФон",
"Ростелеком",
"ТМК",
"Славнефть",
"Тойота Мотор (Toyota)",
"Мечел",
"Автотор холдинг",
"Стройгазмонтаж",
"Дж.Т.И. Россия (JTI)",
"Торговая сеть Красное и Белое",
"АК Алроса",
"Дикси Групп",
"ВЭБ.РФ",
"ФМСМ (PMI)",
"Фольксваген Груп Рус",
"АвтоВАЗ",
"Леруа Мерлен Восток (Leroi Merlin)",
"Ашан (Auchan)",
"Россельхозбанк",
"ДНС Групп",
"ГК ТНС энерго",
"Протек",
"Группа компаний ПИК",
"Объединенная двигателестроительная корпорация",
"Независимая нефтегазовая компания",
"Merlion",
"ФосАгро",
"КМР и СНГ (KIA)",
"Катрен",
"Банк ФК Открытие",
"Корпорация Тактическое ракетное вооружение",
"Группа Рольф",
"ТАИФ-НК",
"Трансмашхолдинг",
"Метро Кэш энд Керри (Metro Cash & Carry)",
"Мостотрест",
"СОГАЗ",
"Эппл Рус (Apple)",
"Арктикгаз",
"Нижнекамскнефтехим",
"«Томскнефть» ВНК",
"Зарубежнефть",
"ЕвроСибЭнерго",
"Вертолеты России",
"Группа ГАЗ",
"Почта России",
"МУМТ (BAT)",
"Стройтранснефтегаз",
"КамАЗ",
"ФК Пульс",
"Полюс",
"Хендэ Мотор СНГ (Hyundai)",
"S7 Group",
"Ямал СПГ",
"Группа Содружество",
"ЧТПЗ",
"Иркутская нефтяная компания",
"Русснефть",
"Национальная компьютерная корпорация",
"Мерседес-Бенц Рус (Mercedes-Benz)",
"Русэнергосбыт",
"ОМК",
"Уралкалий",
"ГК Ташир",
"Компания Газ-Альянс",
"ФортеИнвест",
"Группа Мэйджор",
"Российская электроника",
"ГК СНС",
"Сибирский антрацит",
"Группа О'кей",
"Мосинжпроект",
"UCL Holding",
"Группа Илим",
"Московский кредитный банк",
"Группа Синара",
"Нефтиса",
"Объединенная компания Связной — Евросеть",
"Группа ЛСР",
"Т2 РТК Холдинг",
"НЗНП",
"АльфаСтрахование",
"Ланит",
"НПК Уралвагонзавод",
"Рено Россия (Renault)",
"Удмуртнефть",
"Нестле Россия (Nestle)",
"Райффайзенбанк (Raiffeisen)",
"Техкомпания Хуавэй (Huawei)",
"КДВ Групп",
"Яндекс",
"Мессояханефтегаз",
"БМВ Русланд Трейдинг (BMW)",
"Салым Петролеум",
"Данон (Danone)",
"ЮниКредит Банк (UniCredit)",
"ТД Риф",
"Мираторг",
"Группа Волга-Днепр",
"Вайлдберриз",
"Московский метрополитен",
"Полиметалл",
"Группа РЕСО",
"Пепсико холдингс",
"ГК Эфко",
"СДС-Уголь",
"ЛокоТех",
"ГК Автомир",
"Совкомбанк",
"ФСК Лидер",
"Марс (Mars)",
"Детский мир",
"Группа НПФ Благосостояние",
"Госкорпорация по ОрВД",
"Трансойл",
"ОХК Уралхим",
"Каспийский трубопроводный консорциум-Р",
"Тинькофф Банк",
"Fix Price",
"Промсвязьбанк",
"Акрон",
"Спортмастер",
"Проктер Энд Гэмбл. Дистрибьюторская компания (Procter & Gamble)",
"Eurasia Drilling Company",
"Группа Черкизово",
"ИКЕА Дом (INGKA)",
"Славянск Эко",
"Корпорация ВСМПО-АВИСМА",
"Росбанк (Societe General)",
"Монетка",
"Стройсервис",
"ГК Транстехсервис",
"Совкомфлот",
"ВСК",
"СБСВ-Ключавто",
"Ингосстрах",
"Сэтл групп",
"Гиперглобус (Bruch-Beteiligungs)",
"Технониколь",
"Металлсервис",
"Нефтехимсервис",
"Промышленно-металлургический холдинг",
"Урало-Сибирская металлургическая компания",
"Мария-Ра",
"Globaltrans",
"Кубанская нефтегазовая компания",
"Авиакомпания ЮТэйр",
"НПФ Газфонд пенсионные накопления",
"Русагро",
"Л'Этуаль",
"ЛГ Электроникс Рус (LG)",
"Каргилл (Cargill)",
"ВАД",
"Астон",
"Уральские авиалинии",
"Сталепромышленная компания",
"НИПИ НГ Петон",
"Бристоль",
"Уралвтормет",
"Нефтетранссервис",
"Казаньоргсинтез",
"Газпром бурение",
"ГК Агро-Белогорье",
"Фортум (Fortum)",
"ПК Балтика (Carlsbergfondet)",
"Авилон АГ",
"Шелл Нефть (Shell)",
"Юнипро (Uniper)",
"Технологии машиностроения (Техмаш)",
"НПК Объединенная вагонная компания",
"Велесстрой",
"ТД Интерторг",
"Юнилевер Русь (Unilever)",
"Солид-товарные рынки",
"Вольво Восток (AB Volvo)",
"Энел Россия",
"Марвел КТ",
"ГК Эталон",
"Металлокомплект-М",
"Группа Ренессанс Страхование",
"Военторг",
"Nordgold",
"Сибуглемет",
"Акционерный банк Россия",
"ДОМ.РФ",
"Форд Соллерс Холдинг",
"ИКЕА Торг (INGKA)",
"Макдоналдc (McDonald`s)",
"Кузбасская топливная компания",
"Хенкель Рус (Henkel)",
"Дон-Строй Инвест",
"Главное управление обустройства войск (ГУОВ)",
"СК Росгосстрах",
"Кока-Кола Эйчбиси Евразия (Coca-Cola)",
"Хоум Кредит энд Финанс Банк (PPF)",
"Гленкор Агро Мзк (Firada)",
"Mail.Ru Group",
"Монди СЛПК (Mondi)",
"НПО Алмаз",
"ММС Рус (Mitsubishi Motors)",
"Объединенные кондитеры",
"Комацу СНГ (Komatsu)",
"Национальная медиа группа",
"Агентство по страхованию вкладов (АСВ)",
"Татэнергосбыт",
"Куйбышевазот",
"Азбука вкуса",
"Трансбункер",
"Башкирская содовая компания",
"Инвестнефтетрейд",
"Inventive Retail Group",
"Самсунг Электроникс Рус Калуга (Samsung)",
"Крокус",
"Гугл (Google)",
"АСЦ-Холдинг",
"Новороссийский морской торговый порт",
"Швабе",
"Русская медная компания",
"Евроцемент груп",
"Мосводоканал",
"Международный аэропорт Шереметьево",
"Сегежа",
"Р-Фарм",
"Фармстандарт",
"Ростсельмаш",
"Транспортная группа FESCO",
"Компания Адамас",
"Метафракс",
"Джонсон & Джонсон (Johnson & Johnson)",
"Softline",
"Ягуар ленд ровер",
"Байер",
"Эркафарм",
"Фармперспектива",
"Банк Уралсиб",
"ВО Машиноимпорт",
"Кордиант",
"Новосталь",
"ВкусВилл",
"Л'Ореаль (L'Oreal)",
"DDS",
"ТОАЗ",
"Банк Санкт-Петербург",
"Группа агропредприятий Ресурс",
"Ярче!",
"Ренейссанс Констракшн (Ronesans Holding Anonim Sirketi)",
"Санофи Россия (Sanofi)",
"Группа ГМС",
"Северный ветер",
"БСС",
"Скания-Русь (Scania)",
"ГК Фаворит Моторс",
"Группа РТК",
"Фармкомплект",
"Нокиан Шина (Nokian)",
"ДСК Автобан",
"Омега Групп",
"Квадра",
"Roust",
"ГК Невада (Самбери)",
"Восточный экспресс банк",
"Верисел-трейдинг",
"Гознак",
"Фирма Агрокомплекс им. Ткачева",
"Банк Русский стандарт",
"Мазда Мотор Рус (Mazda)",
"Группа Газфонд",
"СТД Петрович",
"Беркс",
"Кари",
"Арконик СМЗ",
"Мон Дэлис (Mondelez)",
"Комус",
"Группа Агат",
"Великолукский мясокомбинат",
"Верный",
"СДС Азот",
"М Фэшн",
"Белгранкорм-холдинг",
"Группа Нэфис",
"ФГ Будущее",
"Глория Джинс",
"Билла (Rewe)",
"Государственная транспортная лизинговая компания",
"ФК Гранд Капитал",
"ЭС",
"Компания Металл Профиль",
"ГК Орими Трэйд",
"ГСЛ",
"Интернешнл Пейпер (International Paper)",
"Лаборатория Касперского",
"ПСМА Рус",
"Аптечная сеть 36,6",
"Тетра Пак (Tetra Pak)",
"Центральная пригородная пассажирская компания",
"Самараэнерго",
"Азур Эйр",
"Командор-Холдинг",
"Белуга Групп",
"ТД БелАЗ",
"Мосгортранс",
"Спар Миддл Волга",
"Холдинг Транспортные компоненты",
"Московский аэропорт Домодедово",
"Рулог (Havi)",
"Эйч Энд Эм (H&M)",
"Концерн Автоматика",
"Татэнерго",
"Трубная грузовая компания",
"Комос Групп",
"Первая тяжеловесная компания",
"ОМПК",
"НК Дулисьма",
"Ачимгаз",
"Новосибирскэнергосбыт",
"Компания СИМ-Авто",
"Ситибанк",
"Остин",
"Адидас (Adidas)",
"Ферреро Руссия (Ferrero)",
"Пермэнергосбыт",
"РКК Энергия",
"Свеза",
"Росжелдорпроект",
"Мазда Соллерс Мануфэкчуринг Рус",
"БСХ Бытовые приборы (BSH Hausgerate)",
"Московская биржа ММВБ-РТС",
"Русэнергоресурс",
"Компания Луис Дрейфус Восток (Louis Dreyfus)",
"ЭР-Телеком Холдинг",
"Соллерс",
"Объединенная энергетическая компания",
"Уральские локомотивы",
"ТМК Чермет",
"Загорский трубный завод",
"Элко Рус (Elko)",
"Архангельский ЦБК",
"Мособлгаз",
"ДК Рус",
"Энергосбытовая компания Восток",
"ГКНПЦ им. М.В.Хруничева",
"Металлоторг",
"Агросила Групп",
"Ман Трак Энд Бас Рус (Volkswagen)",
"Петербургский метрополитен",
"ТГК-2",
"Концерн Титан-2",
"Ренейссанс Хэви Индастрис Ronesans Endustri",
"Бургер Рус (Burger King)",
"Ozon",
"Сони Электроникс (Sony)",
"Продо",
"Продимекс-Холдинг",
"АвтоГермес",
"Railgo",
"Новотранс",
"Новикомбанк",
"Рив Гош",
"Сибирская горно-металлургическая компания",
"Сименс (Siemens)",
"Лига ставок",
"Банк Ак Барс",
"Группа Полипластик",
"Водоканал Санкт-Петербурга",
"РэйлАльянс",
"Российская телевизионная и радиовещательная сеть",
"Зерно-трейд",
"Ренессанс Кредит",
"Роберт Бош (Robert Bosch)",
"ВО Промсырьеимпорт",
"САП СНГ (SAP)",
"А Групп",
"Приосколье",
"Зара СНГ (Zara)",
"Модум-транс",
"Эбботт лэбораториз (Abbott Laboratories)",
"Группа Магнезит",
"Газпром автоматизация",
"Газэнергосервис",
"Независимая энергосбытовая компания Краснодарского края",
"Группа ЭПМ",
"Минудобрения",
"Либхерр-Русланд (Liebherr)",
"Восточная техника (Vost-Tech)",
"Первый канал",
"ГМК Сплав",
"ГК Автодилерство",
"НМЖК",
"ВГТРК",
"Неофарм",
"Роскосмос",
"Вита Лайн",
"Краснодарзернопродукт-Экспо",
"Алкоторг",
"Красцветмет",
"Касторама Рус (Castorama)",
"Деловые линии",
"ГВСУ по специальным объектам",
"ПКФ ДиПОС",
"Восток-Запад",
"Амурская нефтебаза",
"Юг Руси",
"Шнейдер Электрик (Schneider Electric)",
"Сингента (Chemchina)",
"Титан",
"Петропавловск",
"Фармимэкс",
"АБ Инбев Эфес (Anheuser-Busch Inbev)",
"ABI Product",
"Профитмед",
"ТД Агроторг",
"ТЭК СПБ",
"ТД Ункомтех",
"ОПХ (Heineken)",
"ТГК-16",
"Уральский банк реконструкции и развития",
"QIWI",
"СК Согласие",
"Группа Эссен",
"Втормет",
"Эссити (Essity)",
"Hoff (Домашний интерьер)",
"Сиско Солюшенз (Cisco)",
"ВО ЖДТ России",
"Купишуз (Lamoda)",
"Делл (Dell)",
"ПСК",
"Каменск-Уральский металлургический завод",
"Аргос",
"А.П.Р.",
"ГК 1520",
"Артис-Агро Экспорт",
"Луидор",
"Порше Руссланд (Porsche)",
"Денцу Эйджис Си Эс (Dentsu)",
"Эйвон Бьюти Продактс Компани (Avon)",
"РКЦ Прогресс",
"Силовые машины",
"АНГК",
"Корпорация Гринн",
"Фаберлик",
"Сибирская сервисная компания",
"Банк Возрождение",
"Отисифарм",
"Боэс Констракшн (Boes Construction)",
"Саткинский чугуноплавильный завод",
"Алтайвагон",
"ПТК",
"Щекиноазот",
"Волгоградэнергосбыт",
"Русский уголь",
"Трест КХМ",
"РМ Рейл",
"Восточная горнорудная компания",
"Группа Стройтрансгаз",
"БАСФ (BASF)",
"Мерида",
"Брок-Инвест-Сервис и К",
"Вирлпул Рус (Whirlpool)",
"Карелия Палп",
"Тева (Teva)",
"Media Direction Group",
"Якобс Дау Эгбертс Рус (Jacobs Douwe Egberts)",
"ГК Великан",
"Август",
"Транслом",
"ОТП Банк",
"РусВинил",
"Системный оператор Единой энергетической системы",
"АСР-Углесбыт",
"ЦЭНКИ",
"Транстрейдойл",
"Росморпорт",
"Газнефтетрэйдинг",
"Сладковско-Заречное",
"Кроношпан (Kronoplus)",
"ТЦ Кунцево Лимитед",
"СНПХ",
"Кимберли-Кларк (Kimberly-Clark)",
"Катерпиллар Евразия (Caterpillar)",
"Крок инкорпорейтед",
"Ашинский металлургический завод",
"Автодом",
"Международный центр",
"Мишлен (Michelin)",
"Картли",
"БелАЗ-24",
"Первый завод",
"ГК ЕКС",
"Петролеум Трейдинг",
"Нижфарм (Nidda Midco)",
"Импэкснефтехим",
"Вольво Карс (Zhejiang Geely)",
"Мосметрострой",
"ТЭК Мосэнерго",
"Борисхоф 1 (Inchcape)",
"ГК Титан",
"ПТК Уголь",
"Авторусь",
"Юг-Авто",
"Нова",
"Метрострой",
"Ресурс",
"Сетевая компания",
"РЕ Трэйдинг (LPP)",
"Углетранс",
"ЭйчПи Инк (HP Inc.)",
"ТК Шлюмберже (Schlumberger)",
"ГК Мега-Авто",
"Корпорация Электросевкавмонтаж",
"ГК Российские коммунальные системы",
"Запсибгазпром",
"Нефтепродукттрейд",
"Сатурн-Р",
"Завод имени Дегтярева",
"Такеда Фармасьютикалс (Takeda Pharmaceutical)",
"Слата супермаркет",
"Emex",
"САМ-МБ",
"171 Меридиан",
"Армтек",
"Центр финансовых технологий",
"Группа компаний Пионер",
"АХ Степь",
"Таграс (ТНГ-Групп)",
"Fonbet",
"Сандоз (Sandoz)",
"Берлин-Хеми А. Менарини (Berlin Chemie)",
"ГК Агропромкомплектация",
"МАКС",
"Компания Трасса",
"Башкирэнерго",
"Охрана Росгвардии",
"Гала-Форм",
"КРКА Фарма (KRKA)",
"Максидом",
"Нефтехимремстрой",
"Нефтьмагистраль",
"Авеста Фармацевтика (Baby Dream)",
"Старттех",
"Конар",
"Нортгаз",
"УГС",
"АББ (ABB)",
"Металлстандарт",
"Балтийская топливная компания",
"Мострансавто",
"Аксель-Моторс",
"Группа компаний МИЦ",
"ПК Борец",
"Европа",
"Сибирская аграрная группа",
"РТИ",
"Ферронордик машины (Ferronordic)",
"Южуралзолото ГК",
"Прогресс",
"Юг-Нефтепродукт",
"Камский кабель",
"Familia",
"Транскапиталбанк",
"А-Ойл",
"Сибтрейд",
"МТС-банк",
"Московская инженерно-строительная компания",
"Курганмашзавод",
"Вектрум-К",
"Морской терминал Тамань",
"Таркетт Рус (Tarkett)",
"Несте Санкт-Петербург (Neste)",
"Ново-Уренгойская газовая компания",
"Национальная нерудная компания",
"Октоблу (Decathlon)",
"Снежная Королева",
"Новартис Фарма (Novartis)",
"Магнолия",
"Техинком",
"Дочки-Сыночки",
"Астеллас Фарма",
"General Fueller",
"Автозаправочные комплексы Atan",
"Псковвтормет",
"Авиакомпания Икар",
)
catch_phrase_adj = (
(
"Автоматизированный",
"Автономный",
"Адаптивный",
"Амортизированный",
"Ассимилированный",
"Безопасный",
"Бизнес-ориентированный",
"Взаимовыгодный",
"Виртуальный",
"Глубокий",
"Горизонтальный",
"Делегируемый",
"Децентрализованный",
"Дублируемый",
"Инверсный",
"Инновационный",
"Интегрированный",
"Интуитивный",
"Качественный",
"Клиент-ориентированный",
"Контролируемый",
"Концептуальный",
"Корпоративный",
"Кросс-платформенный",
"Межгрупповой",
"Многогранный",
"Многоканальный",
"Многослойный",
"Многоуровневый",
"Модернизируемый",
"Настраиваемый",
"Новый",
"Общедоступный",
"Объектный",
"Обязательный",
"Оперативный",
"Оптимизированный",
"Опциональный",
"Организованный",
"Органичный",
"Ориентированный",
"Открытый",
"Оцифрованный",
"Переключаемый",
"Переосмысленный",
"Переработанный",
"Перспективный",
"Полный",
"Поэтапный",
"Превентивный",
"Программируемый",
"Прогрессивный",
"Продвинутый",
"Прочный",
"Разнообразный",
"Распределённый",
"Расширенный",
"Реализованный",
"Реконструируемый",
"Самодостаточный",
"Сбалансированный",
"Сетевой",
"Синхронизированный",
"Совместимый",
"Сокращенный",
"Сосредоточенный",
"Стабильный",
"Стратегический",
"Увеличенный",
"Удобный",
"Улучшенный",
"Улучшенный",
"Уменьшенный",
"Универсальный",
"Управляемый",
"Устойчивый",
"Фундаментальный",
"Функциональный",
"Цельный",
"Централизованный",
"Эксклюзивный",
"Элегантный",
"Эргономичный",
),
(
"аналитический",
"асимметричный",
"асинхронный",
"бездефектный",
"бескомпромиссный",
"веб-ориентированный",
"встречный",
"вторичный",
"высокоуровневый",
"гибкий",
"гибридный",
"глобальный",
"двунаправленный",
"действенный",
"динамичный",
"единообразный",
"заметный",
"инструктивный",
"интерактивный",
"исполнительный",
"итернациональный",
"клиент-серверный",
"контекстуальный",
"круглосуточный",
"логистический",
"локальный",
"максимальный",
"масштабируемый",
"методичный",
"многозадачный",
"мобильный",
"модульный",
"мультимедийный",
"наглядный",
"направленный",
"национальный",
"нейтральный",
"нестандартный",
"объектно-ориентированный",
"однородный",
"оптимальный",
"основной",
"отказостойкий",
"переходный",
"последовательный",
"потенциальный",
"пошаговый",
"прибыльный",
"приоритетный",
"промежуточный",
"радикальный",
"раздвоенный",
"региональный",
"связный",
"систематический",
"системный",
"составной",
"социальный",
"специализированный",
"статический",
"третичный",
"ультрасовременный",
"целостный",
"широкий",
"широкопрофильный",
"эвристический",
"экоцентричный",
"энергонезависимый",
"яркий",
),
)
catch_phrase_nouns_masc = (
"адаптер",
"алгоритм",
"альянс",
"анализатор",
"архив",
"веб-сайт",
"вызов",
"графический интерфейс",
"графический интерфейс пользователя",
"доступ",
"инструментарий",
"интерфейс",
"инфопосредник",
"искусственный интеллект",
"массив",
"модератор",
"мониторинг",
"набор инструкций",
"параллелизм",
"подход",
"портал",
"прогноз",
"продукт",
"проект",
"протокол",
"ресурс",
"системный движок",
"успех",
"фреймворк",
"хаб",
"эталон",
)
catch_phrase_nouns_fem = (
"архитектура",
"база данных",
"база знаний",
"вероятность",
"возможность",
"гибкость",
"защищенная линия",
"иерархия",
"инициатива",
"инфраструктура",
"кодировка",
"конгломерация",
"концепция",
"координация",
"локальная сеть",
"матрица",
"методология",
"миграция",
"модель",
"нейронная сеть",
"парадигма",
"поддержка",
"политика",
"проекция",
"производительность",
"прошивка",
"рабочая группа",
"реализация",
"сеть Интранет",
"сеть Экстранет",
"служба поддержки",
"служба техподдержки",
"способность",
"стандартизация",
"стратегия",
"структура",
"суперструктура",
"установка",
"фокус-группа",
"функциональность",
"функция",
"ценовая структура",
"эмуляция",
)
catch_phrase_nouns_neu = (
"взаимодействие",
"групповое программное обеспечение",
"интернет-решение",
"использование",
"межплатформенное программное обеспечение",
"оборудование",
"определение",
"отношение",
"приложение",
"программное обеспечение",
"решение",
"совершенствование процесса",
"сотрудничество",
"управление бюджетом",
"хранилище данных",
"шифрование",
"ядро",
)
bsWords = (
(
"Адаптация",
"Визуализация",
"Включение",
"Внедрение",
"Генерация",
"Инновация",
"Интеграция",
"Использование",
"Итерация",
"Конструирование",
"Координация",
"Культивация",
"Максимизация",
"Модернизация",
"Монетизация",
"Мотивация",
"Обеспечение",
"Объединение",
"Оптимизация",
"Освоение",
"Охват",
"Оцифровка",
"Перезагрузка",
"Переопределение",
"Переосмысление",
"Перепрофилирование",
"Переход",
"Преображение",
"Приспособление",
"Продление",
"Производство",
"Развитие",
"Разворачивание",
"Разработка",
"Распределение",
"Реализация",
"Революция",
"Синтез",
"Синхронизация",
"Сравнение",
"Трансформация",
"Увеличение",
"Управление",
"Ускорение",
"Формирование",
"Шкалирование",
"Эксплуатация",
),
(
"B2B",
"B2C",
"активных",
"безотказных",
"беспроводных",
"богатых",
"веб-ориентированных",
"вертикальных",
"виртуальных",
"глобальных",
"действенных",
"динамичных",
"заказных",
"индивидуальных",
"инновационных",
"интегрированных",
"интерактивных",
"интуитивных",
"концептуальных",
"корпоративных",
"критически важных",
"кроссплатформенных",
"круглогодичных",
"круглосуточных",
"лучших в своём роде",
"масштабируемых",
"мультимедийных",
"наглядных",
"надежных",
"онлайн и офлайн",
"ориентированных на пользователя",
"открытых",
"передовых",
"подробных",
"популярных",
"престижных",
"прибыльных",
"притягательных",
"прозрачных",
"распределённых",
"распространенных",
"расширяемых",
"революционных",
"сенсационных",
"серверных",
"сетевых",
"соблазнительных",
"совместных",
"современных",
"стандартных",
"стратегических",
"ультрасовременных",
"фронт-энд",
"целостных",
"цельных",
"эффективных",
),
(
"архитектур",
"аудиторий",
"веб-сервисов",
"взаимодействий",
"действий",
"диапазонов",
"знаний",
"инициатив",
"интернет-компаний",
"интернет-магазинов",
"интернет-продавцов",
"интернет-услуг",
"интерфейсов",
"инфопосредников",
"инфраструктур",
"каналов",
"методик",
"метрик",
"моделей",
"ниш",
"областей интереса",
"отношений",
"парадигм",
"партнерств",
"платформ",
"пользователей",
"порталов",
"приложений",
"результатов",
"решений",
"рынков",
"сетей",
"систем",
"систем снабжения",
"сообществ",
"схем",
"технологий",
"функций",
),
)
def catch_phrase(self) -> str:
"""
:example: 'Адаптивный и масштабируемый графический интерфейс'
"""
noun: str = self.random_element(
self.catch_phrase_nouns_masc + self.catch_phrase_nouns_fem + self.catch_phrase_nouns_neu
)
adj_first: str = self.random_element(self.catch_phrase_adj[0])
adj_second: str = self.random_element(self.catch_phrase_adj[1])
if noun in self.catch_phrase_nouns_fem:
adj_first = adj_first[:-2] + "ая"
adj_second = adj_second[:-2] + "ая"
elif noun in self.catch_phrase_nouns_neu:
adj_first = adj_first[:-2] + "ое"
adj_second = adj_second[:-2] + "ое"
return adj_first + " и " + adj_second + " " + noun
def large_company(self) -> str:
"""
:example: 'АвтоВАЗ'
"""
return self.random_element(self.large_companies)
def company_prefix(self) -> str:
"""
:example: 'ООО'
"""
return self.random_element(self.company_prefixes)
def businesses_inn(self) -> str:
"""
Returns tax identification number for businesses (ru. идентификационный номер налогоплательщика, ИНН).
"""
region: str = "%02d" % self.random_int(min=1, max=92)
inspection: str = "%02d" % self.random_int(min=1, max=99)
tail: str = "%05d" % self.random_int(min=1, max=99999)
result: str = region + inspection + tail
return result + calculate_checksum(result)
def individuals_inn(self) -> str:
"""
Returns tax identification number for individuals (ru. идентификационный номер налогоплательщика, ИНН).
"""
region: str = "%02d" % self.random_int(min=1, max=92)
inspection: str = "%02d" % self.random_int(min=1, max=99)
tail: str = "%06d" % self.random_int(min=1, max=999999)
result: str = region + inspection + tail
result += calculate_checksum(result)
return result + calculate_checksum(result)
def businesses_ogrn(self) -> str:
"""
Returns primary state registration number for businesses
(ru. основной государственный регистрационный номер, ОГРН).
"""
sign: str = self.random_element(("1", "5"))
year: str = "%02d" % self.random_int(min=1, max=datetime.now().year - 2000)
region: str = "%02d" % self.random_int(min=1, max=92)
tail: str = "%07d" % self.random_int(min=1, max=9999999)
result: str = sign + year + region + tail
return result + str((int(result) % 11) % 10)
def individuals_ogrn(self) -> str:
"""
Returns primary state registration number for individuals
(ru. основной государственный регистрационный номер, ОГРН).
"""
year: str = "%02d" % self.random_int(min=1, max=datetime.now().year - 2000)
region: str = "%02d" % self.random_int(min=1, max=92)
tail: str = "%09d" % self.random_int(min=1, max=999999999)
result: str = "3" + year + region + tail
return result + str((int(result) % 13) % 10)
def kpp(self) -> str:
"""
Returns tax registration reason code (ru. код причины постановки на учет, КПП).
"""
region: str = "%02d" % self.random_int(min=1, max=92)
inspection: str = "%02d" % self.random_int(min=1, max=99)
reason: str = self.random_element(("01", "43", "44", "45"))
tail: str = "%03d" % self.random_int(min=1, max=999)
return region + inspection + reason + tail
def snils(self) -> str:
"""
Returns SNILS number (ru. СНИЛС).
"""
numbers: str = "%09d" % self.random_int(min=1, max=999999999)
return numbers + calculate_snils_checksum(numbers)
| Provider |
python | pytorch__pytorch | test/test_fx_passes.py | {
"start": 15190,
"end": 15627
} | class ____:
@staticmethod
def forward(x):
a = torch.neg(x)
return torch.add(a, a)
@staticmethod
def pattern(x):
a = torch.neg(x)
return torch.add(a, a)
test_cases = [
# match_output, match_placeholder, num_matches
TestCase(False, False, 1),
TestCase(True, False, 1),
TestCase(False, True, 1),
TestCase(True, True, 1)
]
| SimpleFullGraphMatching |
python | huggingface__transformers | src/transformers/models/glm4v/video_processing_glm4v.py | {
"start": 2041,
"end": 10034
} | class ____(BaseVideoProcessor):
resample = PILImageResampling.BICUBIC
size = {"shortest_edge": 112 * 112, "longest_edge": 28 * 28 * 2 * 30000}
max_image_size = {"longest_edge": 28 * 28 * 2 * 30000}
image_mean = OPENAI_CLIP_MEAN
image_std = OPENAI_CLIP_STD
do_resize = True
do_rescale = True
do_normalize = True
do_convert_rgb = True
do_sample_frames = True
patch_size = 14
temporal_patch_size = 2
max_duration = 300
merge_size = 2
valid_kwargs = Glm4vVideoProcessorInitKwargs
num_frames = 16
fps = 2
model_input_names = ["pixel_values_videos", "video_grid_thw"]
def __init__(self, **kwargs: Unpack[Glm4vVideoProcessorInitKwargs]):
super().__init__(**kwargs)
if self.size is not None and (
self.size.get("shortest_edge", None) is None or self.size.get("longest_edge", None) is None
):
raise ValueError("size must contain 'shortest_edge' and 'longest_edge' keys.")
def _further_process_kwargs(
self,
size: Optional[SizeDict] = None,
**kwargs,
) -> dict:
"""
Update kwargs that need further processing before being validated
Can be overridden by subclasses to customize the processing of kwargs.
"""
if size is not None and ("shortest_edge" not in size or "longest_edge" not in size):
raise ValueError("size must contain 'shortest_edge' and 'longest_edge' keys.")
return super()._further_process_kwargs(size=size, **kwargs)
def sample_frames(
self,
metadata: VideoMetadata,
fps: Optional[Union[int, float]] = None,
**kwargs,
):
"""
Args:
metadata (`VideoMetadata`):
Metadata of the video containing information about total duration, fps and total number of frames.
fps (`int` or `float`, *optional*):
Target frames to sample per second. Defaults to `self.fps`.
Returns:
np.ndarray:
Indices to sample video frames.
"""
if metadata is None or getattr(metadata, "fps", None) is None:
raise ValueError(
"Asked to sample frames per second but no video metadata was provided which is required when sampling in GLM4V. "
"Please pass in `VideoMetadata` object or set `do_sample_frames=False`"
)
total_frames = metadata.total_num_frames
requested_fps = fps if fps is not None else self.fps
max_frame_idx = total_frames - 1
duration = metadata.duration or round(max_frame_idx / metadata.fps) + 1
if duration <= self.max_duration:
n = int(math.floor(duration * requested_fps))
frame_indices = [min(max_frame_idx, int(math.ceil(i * metadata.fps / requested_fps))) for i in range(n)]
else:
num_samples = int(self.max_duration * requested_fps)
if num_samples >= total_frames:
frame_indices = list(range(total_frames))
else:
target_seconds = np.linspace(0, duration, num_samples, endpoint=True)
frame_indices = [min(max_frame_idx, int(math.ceil(t * metadata.fps))) for t in target_seconds]
seen, uniq = set(), []
for idx in frame_indices:
if idx not in seen:
seen.add(idx)
uniq.append(idx)
if len(uniq) & 1:
uniq.append(uniq[-1])
return np.array(uniq)
def _preprocess(
self,
videos: list[torch.Tensor],
do_convert_rgb: bool = True,
do_resize: bool = True,
size: Optional[SizeDict] = None,
interpolation: PILImageResampling = PILImageResampling.BICUBIC,
do_rescale: bool = True,
rescale_factor: float = 1 / 255.0,
do_normalize: bool = True,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
patch_size: Optional[int] = None,
temporal_patch_size: Optional[int] = None,
merge_size: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
**kwargs,
):
grouped_videos, grouped_videos_index = group_videos_by_shape(videos)
resized_videos_grouped = {}
for shape, stacked_videos in grouped_videos.items():
B, T, C, H, W = stacked_videos.shape
num_frames, height, width = T, H, W
if do_resize:
resized_height, resized_width = smart_resize(
num_frames=num_frames,
height=height,
width=width,
temporal_factor=temporal_patch_size,
factor=patch_size * merge_size,
min_pixels=size.shortest_edge,
max_pixels=size.longest_edge,
)
stacked_videos = stacked_videos.view(B * T, C, H, W)
stacked_videos = self.resize(
stacked_videos,
size=SizeDict(height=resized_height, width=resized_width),
interpolation=interpolation,
)
stacked_videos = stacked_videos.view(B, T, C, resized_height, resized_width)
resized_videos_grouped[shape] = stacked_videos
resized_videos = reorder_videos(resized_videos_grouped, grouped_videos_index)
# Group videos by size for further processing
# Needed in case do_resize is False, or resize returns videos with different sizes
grouped_videos, grouped_videos_index = group_videos_by_shape(resized_videos)
processed_videos_grouped = {}
processed_grids = {}
for shape, stacked_videos in grouped_videos.items():
resized_height, resized_width = get_image_size(stacked_videos[0], channel_dim=ChannelDimension.FIRST)
# Fused rescale and normalize
stacked_videos = self.rescale_and_normalize(
stacked_videos, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
patches = stacked_videos
# Check that videos have `num_frames` divisible by `temporal_patch_size`
if patches.shape[1] % temporal_patch_size != 0:
repeats = patches[:, -1:].repeat(1, temporal_patch_size - 1, 1, 1, 1)
patches = torch.cat([patches, repeats], dim=1)
batch_size, grid_t, channel = patches.shape[:3]
grid_t = grid_t // temporal_patch_size
grid_h, grid_w = resized_height // patch_size, resized_width // patch_size
patches = patches.view(
batch_size,
grid_t,
temporal_patch_size,
channel,
grid_h // merge_size,
merge_size,
patch_size,
grid_w // merge_size,
merge_size,
patch_size,
)
patches = patches.permute(0, 1, 4, 7, 5, 8, 3, 2, 6, 9)
flatten_patches = patches.reshape(
batch_size,
grid_t * grid_h * grid_w,
channel * temporal_patch_size * patch_size * patch_size,
)
processed_videos_grouped[shape] = flatten_patches
processed_grids[shape] = [[grid_t, grid_h, grid_w]] * batch_size
processed_videos = reorder_videos(processed_videos_grouped, grouped_videos_index)
processed_grids = reorder_videos(processed_grids, grouped_videos_index)
pixel_values_videos = torch.cat(processed_videos, dim=0)
video_grid_thw = torch.tensor(processed_grids)
data = {
"pixel_values_videos": pixel_values_videos,
"video_grid_thw": video_grid_thw,
}
return BatchFeature(data=data, tensor_type=return_tensors)
__all__ = ["Glm4vVideoProcessor"]
| Glm4vVideoProcessor |
python | google__pytype | pytype/abstract/_interpreter_function.py | {
"start": 3594,
"end": 37319
} | class ____(_function_base.SignedFunction):
"""An abstract value representing a user-defined function.
Attributes:
name: Function name. Might just be something like "<lambda>".
code: A code object.
closure: Tuple of cells (cfg.Variable) containing the free variables this
closure binds to.
ctx: context.Context instance.
"""
@classmethod
def make(
cls,
name: str,
*,
def_opcode: "opcodes.Opcode",
code: "blocks.OrderedCode",
f_locals: _instances.LazyConcreteDict,
f_globals: _instances.LazyConcreteDict,
defaults,
kw_defaults,
closure,
annotations: "dict[str, _base.BaseValue]",
ctx: "context.Context",
):
"""Get an InterpreterFunction.
Things like anonymous functions and generator expressions are created
every time the corresponding code executes. Caching them makes it easier
to detect when the environment hasn't changed and a function call can be
optimized away.
Arguments:
name: Function name.
def_opcode: The opcode for the def statement
code: A code object.
f_locals: The locals used for name resolution.
f_globals: The globals used for name resolution.
defaults: Default arguments.
kw_defaults: Default arguments for kwonly parameters.
closure: The free variables this closure binds to.
annotations: Function annotations. Dict of name -> BaseValue.
ctx: context.Context instance.
Returns:
An InterpreterFunction.
"""
annotations = annotations or {}
overloads = ctx.vm.frame.overloads[name]
if f_locals == ctx.convert.unsolvable:
local_members = {}
else:
local_members = f_locals.members
key = (
name,
code,
_hash_all_dicts(
(f_globals.members, set(code.names)),
(local_members, set(local_members) - set(code.varnames)),
(
{
key: ctx.program.NewVariable([value], [], ctx.root_node)
for key, value in annotations.items()
},
None,
),
(
dict(
enumerate(
ctx.program.NewVariable([f], [], ctx.root_node)
for f in overloads
)
),
None,
),
(dict(enumerate(defaults)), None),
(dict(enumerate(closure or ())), None),
),
)
if key not in ctx.function_cache:
ctx.function_cache[key] = cls(
name,
def_opcode,
code,
f_locals,
f_globals,
defaults,
kw_defaults,
closure,
annotations,
overloads,
ctx,
)
elif closure:
# Reusing the old closure variables would lead to the closure containing
# future values, such as Deleted.
ctx.function_cache[key].closure = closure
f = ctx.function_cache[key]
ctx.vm.frame.functions_created_in_frame[f.name.rsplit(".")[-1]].append(f)
return f
def __init__(
self,
name: str,
def_opcode: "opcodes.Opcode",
code: "blocks.OrderedCode",
f_locals: _instances.LazyConcreteDict,
f_globals: _instances.LazyConcreteDict,
defaults,
kw_defaults,
closure,
annotations: dict[str, pytd.Type],
overloads,
ctx: "context.Context",
) -> None:
log.debug("Creating InterpreterFunction %r for %r", name, code.name)
self.bound_class = _function_base.BoundInterpreterFunction
self.doc = code.consts[0] if code.consts else None
self.def_opcode = def_opcode
self.code = code
self.f_globals = f_globals
self.f_locals = f_locals
self.defaults = tuple(defaults)
self.kw_defaults = kw_defaults
self.closure = closure
self._call_cache = {}
self._call_records = []
# TODO(b/78034005): Combine this and PyTDFunction.signatures into a single
# way to handle multiple signatures that SignedFunction can also use.
self._all_overloads = overloads
self._active_overloads = overloads
self.has_overloads = bool(overloads)
self.is_overload = False # will be set by typing_overlay.Overload.call
self.posonlyarg_count = self.code.posonlyargcount
self.nonstararg_count = self.code.argcount + self.code.kwonlyargcount
signature = self._build_signature(name, annotations)
super().__init__(signature, ctx)
if not self.code.has_coroutine():
# Sanity check: has_iterable_coroutine() is set by the types.coroutine
# decorator, so it should always be False at function creation time.
assert not self.code.has_iterable_coroutine()
elif signature.has_return_annotation:
params = {
abstract_utils.T: ctx.convert.unsolvable,
abstract_utils.T2: ctx.convert.unsolvable,
abstract_utils.V: signature.annotations["return"],
}
coroutine_type = _classes.ParameterizedClass(
ctx.convert.coroutine_type,
params,
ctx, # pytype: disable=wrong-arg-types
)
signature.annotations["return"] = coroutine_type
self._check_signature()
self._update_signature_scope_from_closure()
self.last_frame = None # for BuildClass
self._store_call_records = False
self.is_class_builder = False # Will be set by BuildClass.
# Whether to cache the return value irrespective of call args
self.cache_return = False
@contextlib.contextmanager
def record_calls(self):
"""Turn on recording of function calls. Used by analyze.py."""
old = self._store_call_records
self._store_call_records = True
yield
self._store_call_records = old
def _check_signature(self) -> None:
"""Validate function signature."""
for ann in self.signature.annotations.values():
if isinstance(ann, _typing.FinalAnnotation):
self.ctx.errorlog.invalid_final_type(
self.ctx.vm.simple_stack(self.def_opcode)
)
if not self.signature.has_return_annotation:
return
ret_type = self.signature.annotations["return"]
# Check Generator/AsyncGenerator return type
if self.code.has_generator():
if not _matches_generator(ret_type):
self.ctx.errorlog.bad_yield_annotation(
self.ctx.vm.frames, self.signature.name, ret_type, is_async=False
)
elif self.code.has_async_generator():
if not _matches_async_generator(ret_type):
self.ctx.errorlog.bad_yield_annotation(
self.ctx.vm.frames, self.signature.name, ret_type, is_async=True
)
elif ret_type.full_name in abstract_utils.TYPE_GUARDS:
valid = True
if self.signature.mandatory_param_count() < 1:
guard = ret_type.full_name
self.ctx.errorlog.invalid_function_definition(
self.ctx.vm.frames,
f"A {guard} function must have at least one required parameter",
)
valid = False
if not isinstance(ret_type, _classes.ParameterizedClass):
self.ctx.errorlog.invalid_annotation(
self.ctx.vm.frames, ret_type, "Expected 1 parameter, got 0"
)
valid = False
if (
valid
and ret_type.name == "TypeIs"
and self.signature.param_names[0] in self.signature.annotations
):
# Check that the TypeIs parameter is consistent with the function's
# input type.
guard_type = ret_type.formal_type_parameters[abstract_utils.T]
guard_var = guard_type.instantiate(self.ctx.root_node)
input_type = self.signature.annotations[self.signature.param_names[0]]
m = self.ctx.matcher(self.ctx.root_node).compute_one_match(
guard_var, input_type
)
if not m.success:
guard_pytd = pytd_utils.Print(guard_type.to_pytd_type_of_instance())
input_pytd = pytd_utils.Print(input_type.to_pytd_type_of_instance())
self.ctx.errorlog.invalid_function_definition(
self.ctx.vm.frames,
f"TypeIs[{guard_pytd}] is not consistent with input type "
f"{input_pytd}",
)
def _build_signature(
self, name: str, annotations: dict[str, pytd.Type]
) -> function.Signature:
"""Build a function.Signature object representing this function."""
vararg_name = None
kwarg_name = None
kwonly = set(self.code.varnames[self.code.argcount : self.nonstararg_count])
arg_pos = self.nonstararg_count
if self.has_varargs():
vararg_name = self.code.varnames[arg_pos]
arg_pos += 1
if self.has_kwargs():
kwarg_name = self.code.varnames[arg_pos]
arg_pos += 1
defaults = dict(
zip(self.get_positional_names()[-len(self.defaults) :], self.defaults)
)
defaults.update(self.kw_defaults)
return function.Signature(
name,
tuple(self.code.varnames[: self.code.argcount]),
self.posonlyarg_count,
vararg_name,
tuple(kwonly),
kwarg_name,
defaults,
annotations,
)
def _update_signature_scope_from_closure(self) -> None:
# If this is a nested function in an instance method and the nested function
# accesses 'self', then the first variable in the closure is 'self'. We use
# 'self' to update the scopes of any type parameters in the nested method's
# signature to the containing class.
if not self.closure:
return
maybe_instance = self.closure[0]
try:
instance = abstract_utils.get_atomic_value(
maybe_instance, _instance_base.Instance
)
except abstract_utils.ConversionError:
return
if isinstance(instance.cls, _classes.InterpreterClass):
self.update_signature_scope(instance.cls)
def get_first_opcode(self) -> "opcodes.Opcode":
return self.code.get_first_opcode(skip_noop=True)
def argcount(self, _) -> int:
return self.code.argcount
def match_args(
self,
node: "cfg.CFGNode",
args: function.Args,
alias_map: "datatypes.UnionFind | None" = None,
match_all_views: bool = False,
):
if not self.signature.has_param_annotations:
return
return super().match_args(node, args, alias_map, match_all_views)
def _inner_cls_check(self, last_frame: "state.Frame") -> None:
"""Check if the function and its nested class use same type parameter."""
# get all type parameters from function annotations
all_type_parameters = []
for annot in self.signature.annotations.values():
params = self.ctx.annotation_utils.get_type_parameters(annot)
all_type_parameters.extend(itm.with_scope(None) for itm in params)
if all_type_parameters:
for key, value in last_frame.f_locals.pyval.items():
value = abstract_utils.get_atomic_value(
value, default=self.ctx.convert.unsolvable
)
if (
isinstance(value, _classes.InterpreterClass)
and value.template
and key == value.name
):
# `value` is a nested class definition.
inner_cls_types = value.collect_inner_cls_types()
inner_cls_types.update(
[(value, item.with_scope(None)) for item in value.template]
)
# Report errors in a deterministic order.
for cls, item in sorted(inner_cls_types, key=lambda typ: typ[1].name):
if item in all_type_parameters:
self.ctx.errorlog.invalid_annotation(
self.ctx.vm.simple_stack(self.get_first_opcode()),
item,
(
"Function [%s] and its nested generic class [%s] cannot"
" use the same type variable %s"
)
% (self.full_name, cls.full_name, item.name),
)
def signature_functions(self):
"""Get the functions that describe this function's signature."""
return self._active_overloads or [self]
def iter_signature_functions(
self,
) -> Generator["InterpreterFunction", None, None]:
"""Loop through signatures, setting each as the primary one in turn."""
if not self._all_overloads:
yield self
return
for f in self._all_overloads:
old_overloads = self._active_overloads
self._active_overloads = [f]
try:
yield f
finally:
self._active_overloads = old_overloads
@contextlib.contextmanager
def reset_overloads(self) -> Generator[None, None, None]:
if self._all_overloads == self._active_overloads:
yield
return
old_overloads = self._active_overloads
self._active_overloads = self._all_overloads
try:
yield
finally:
self._active_overloads = old_overloads
def _find_matching_sig(
self,
node: "cfg.CFGNode",
args: function.Args,
alias_map: "datatypes.UnionFind | None",
) -> "tuple[function.Signature, list[matcher.GoodMatch], dict[str, cfg.Variable]]":
error = None
for f in self.signature_functions():
try:
# match_args and _map_args both do some matching, so together they fully
# type-check the arguments.
substs, callargs = f.match_and_map_args(node, args, alias_map)
except error_types.FailedFunctionCall as e:
if e > error:
error = e
else:
# We use the first matching overload.
return f.signature, substs, callargs
raise error # pylint: disable=raising-bad-type
def _set_callself_maybe_missing_members(self) -> None:
if self.ctx.callself_stack:
for b in self.ctx.callself_stack[-1].bindings:
b.data.maybe_missing_members = True
def _is_unannotated_contextmanager_exit(
self, func: _function_base.Function, args: function.Args
) -> bool:
"""Returns whether this is an unannotated contextmanager __exit__ method.
If this is a bound method named __exit__ that has no type annotations and is
passed four positional args and nothing else, then we assume that it is a
contextmanager's __exit__ method that needs annotations added.
Args:
func: A method binding for self.
args: Passed arguments.
"""
if not isinstance(func, _function_base.BoundInterpreterFunction):
return False
if not self.name.endswith(".__exit__"):
return False
if self.signature.has_param_annotations:
return False
return (
len(args.posargs) == 4
and not args.has_namedargs()
and not args.starargs
and not args.starstarargs
)
def _fix_args_for_unannotated_contextmanager_exit(
self, node: "cfg.CFGNode", func: "cfg.Binding", args: function.Args
) -> function.Args:
"""Adjust argument types for a contextmanager's __exit__ method."""
if not self._is_unannotated_contextmanager_exit(func.data, args):
return args
# When a contextmanager is used in a 'with' statement, its __exit__ method
# is implicitly called with either (None, None, None) or
# (exc_type, exc_value, traceback) depending on whether an exception is
# encountered. These two cases generate different bytecode, and our VM
# always assumes no exception. But for analyzing __exit__, we should allow
# for both possibilities.
exception_type = self.ctx.convert.lookup_value("builtins", "BaseException")
arg1 = self.ctx.program.NewVariable(
[exception_type, self.ctx.convert.none], [], node
)
arg2 = exception_type.instantiate(node)
arg2.AddBinding(self.ctx.convert.none, [], node)
arg3 = self.ctx.program.NewVariable(
[self.ctx.convert.unsolvable, self.ctx.convert.none], [], node
)
return function.Args(posargs=(args.posargs[0], arg1, arg2, arg3))
def _hash_call(self, callargs, frame):
# Note that we ignore caching in __init__ calls, so that attributes are
# set correctly.
if self.cache_return:
# cache-return is a pragma, and overrides any other heuristics
# Return a fixed key that is unlikely to collide with the call-specific
# key computed in the next branch.
log.info("cache-return set for function %s", self.name)
callkey = 0x12345678
elif self.ctx.options.skip_repeat_calls and (
"self" not in callargs
or not self.ctx.callself_stack
or callargs["self"].data != self.ctx.callself_stack[-1].data
):
if frame.f_locals == self.ctx.convert.unsolvable:
local_members = {}
else:
local_members = frame.f_locals.members
callkey = _hash_all_dicts(
(callargs, None),
(frame.f_globals.members, set(self.code.names)),
(local_members, set(local_members) - set(self.code.varnames)),
)
else:
# Make the callkey the number of times this function has been called so
# that no call has the same key as a previous one.
callkey = len(self._call_cache)
return callkey
def _paramspec_signature(
self,
callable_type: _classes.ParameterizedClass,
substs: "list[matcher.GoodMatch]",
) -> function.Signature | None:
# Unpack the paramspec substitution we have created in the matcher.
rhs = callable_type.formal_type_parameters[0]
if isinstance(rhs, _abstract.Concatenate):
r_pspec = rhs.paramspec
r_args = rhs.args
else:
r_pspec = rhs
r_args = ()
# TODO(b/217789659): Handle substs[] with multiple entries
data = substs[0].get(r_pspec.name) # pytype: disable=attribute-error
if not data:
return
pspec_match = abstract_utils.get_atomic_value(data)
return_value = callable_type.formal_type_parameters[abstract_utils.RET]
return function.build_paramspec_signature(
pspec_match, r_args, return_value, self.ctx
)
def _handle_paramspec(
self,
sig: function.Signature,
annotations: "dict[str, _classes.CallableClass | _classes.ParameterizedClass]",
substs: "list[matcher.GoodMatch]",
callargs: function.Args,
) -> None:
if not sig.has_return_annotation:
return
retval = sig.annotations["return"]
if not (
isinstance(retval, _abstract.CallableClass) and retval.has_paramspec()
):
return
ret_sig = self._paramspec_signature(retval, substs)
if ret_sig:
ret_annot = self.ctx.pytd_convert.signature_to_callable(ret_sig)
annotations["return"] = ret_annot
for name, _, annot in sig.iter_args(callargs):
if isinstance(annot, _abstract.CallableClass) and annot.has_paramspec():
param_sig = self._paramspec_signature(annot, substs)
if param_sig:
param_annot = self.ctx.pytd_convert.signature_to_callable(param_sig)
annotations[name] = param_annot
def call(
self,
node: "cfg.CFGNode",
func: "cfg.Binding",
args: function.Args,
alias_map: "datatypes.UnionFind | None" = None,
new_locals=False,
frame_substs=(),
) -> "tuple[cfg.CFGNode, cfg.Variable]":
if self.is_overload:
raise error_types.NotCallable(self)
args = self._fix_args_for_unannotated_contextmanager_exit(node, func, args)
args = args.simplify(node, self.ctx, self.signature)
sig, substs, callargs = self._find_matching_sig(node, args, alias_map)
if sig is not self.signature:
# We've matched an overload; remap the callargs using the implementation
# so that optional parameters, etc, are correctly defined.
callargs = self._map_args(node, args)
self_arg = sig.get_self_arg(callargs)
annotation_substs = substs
annotations = sig.annotations.copy()
# Fill in any ParamSpec vars in the annotations
self._handle_paramspec(sig, annotations, substs, args)
# Adds type parameter substitutions from all containing classes. Note that
# lower frames (ones closer to the end of self.ctx.vm.frames) take
# precedence over higher ones.
for frame in reversed(self.ctx.vm.frames):
annotation_substs = abstract_utils.combine_substs( # pytype: disable=wrong-arg-types
frame.substs, annotation_substs
)
# Keep type parameters without substitutions, as they may be needed for
# type-checking down the road.
annotations = self.ctx.annotation_utils.sub_annotations(
node, annotations, annotation_substs, instantiate_unbound=False
)
if self.ctx.vm.is_at_maximum_depth() and not self.name.endswith(
".__init__"
):
log.info("Maximum depth reached. Not analyzing %r", self.name)
self._set_callself_maybe_missing_members()
if "return" not in annotations:
return node, self.ctx.new_unsolvable(node)
ret = self.ctx.vm.init_class(node, annotations["return"])
if self.is_unannotated_coroutine():
ret = _instances.Coroutine(self.ctx, ret, node).to_variable(node)
return node, ret
first_arg = sig.get_first_arg(callargs)
if first_arg and sig.has_return_annotation:
typeguard_return = function.handle_typeguard(
node,
function.AbstractReturnType(annotations["return"], self.ctx),
first_arg,
self.ctx,
func_name=self.name,
)
else:
typeguard_return = None
if sig.has_param_annotations:
if self_arg:
try:
maybe_container = abstract_utils.get_atomic_value(self_arg)
except abstract_utils.ConversionError:
container = None
else:
cls = maybe_container.cls
if (
isinstance(cls, _classes.InterpreterClass)
or isinstance(cls, _classes.ParameterizedClass)
and isinstance(cls.base_cls, _classes.InterpreterClass)
):
container = maybe_container
else:
container = None
else:
container = None
for name in callargs:
if name in annotations and (
not self.is_attribute_of_class
or self.argcount(node) == 0
or name != sig.param_names[0]
):
extra_key = (self.get_first_opcode(), name)
node, callargs[name] = self.ctx.annotation_utils.init_annotation(
node,
name,
annotations[name],
container=container,
extra_key=extra_key,
)
mutations = self._mutations_generator(node, self_arg, substs)
node = abstract_utils.apply_mutations(node, mutations)
if substs:
frame_substs = tuple(itertools.chain(frame_substs, substs))
try:
frame = self.ctx.vm.make_frame(
node,
self.code,
self.f_globals,
self.f_locals,
callargs,
self.closure,
new_locals=new_locals,
func=func,
first_arg=self_arg or first_arg,
substs=frame_substs,
)
except self.ctx.vm.VirtualMachineRecursionError:
# If we've encountered recursion in a constructor, then we have another
# incompletely initialized instance of the same class (or a subclass) at
# the same node. (See, e.g., testRecursiveConstructor and
# testRecursiveConstructorSubclass in test_classes.ClassesTest.) If we
# allow the VirtualMachineRecursionError to be raised, initialization of
# that first instance will be aborted. Instead, mark this second instance
# as incomplete.
self._set_callself_maybe_missing_members()
return node, self.ctx.new_unsolvable(node)
caller_is_abstract = _check_classes(self_arg, lambda cls: cls.is_abstract)
caller_is_protocol = _check_classes(self_arg, lambda cls: cls.is_protocol)
# We should avoid checking the return value against any return annotation
# when we are analyzing an attribute of a protocol or an abstract class's
# abstract method.
check_return = not (
self.is_attribute_of_class and caller_is_protocol
) and not (caller_is_abstract and self.is_abstract)
if sig.has_return_annotation or not check_return:
frame.allowed_returns = annotations.get(
"return", self.ctx.convert.unsolvable
)
frame.check_return = check_return
callkey_pre = self._hash_call(callargs, frame)
if callkey_pre in self._call_cache:
old_ret, old_remaining_depth = self._call_cache[callkey_pre]
# Optimization: This function has already been called, with the same
# environment and arguments, so recycle the old return value.
# We would want to skip this optimization and reanalyze the call if we can
# traverse the function deeper.
if self.ctx.vm.remaining_depth() > old_remaining_depth:
# TODO(rechen): Reanalysis is necessary only if the VM was unable to
# completely analyze the call with old_remaining_depth. For now, we can
# get away with not checking for completion because of how severely
# --quick constrains the maximum depth.
log.info(
"Reanalyzing %r because we can traverse deeper; "
"remaining_depth = %d, old_remaining_depth = %d",
self.name,
self.ctx.vm.remaining_depth(),
old_remaining_depth,
)
else:
log.info("Skipping call to %r and using cached return", self.name)
ret = typeguard_return or old_ret.AssignToNewVariable(node)
if self._store_call_records:
# Even if the call is cached, we might not have been recording it.
self._call_records.append((callargs, ret, node))
return node, ret
if self.code.has_generator():
generator = _instances.Generator(frame, self.ctx)
# Run the generator right now, even though the program didn't call it,
# because we need to know the contained type for further matching.
node2, _ = generator.run_generator(node)
if self.is_coroutine():
# This function is a generator-based coroutine. We convert the return
# value here even though byte_GET_AWAITABLE repeats the conversion so
# that matching against a typing.Awaitable annotation succeeds.
var = generator.get_instance_type_parameter(abstract_utils.V)
ret = _instances.Coroutine(self.ctx, var, node2).to_variable(node2)
else:
ret = generator.to_variable(node2)
node_after_call = node2
elif self.code.has_async_generator():
async_generator = _instances.AsyncGenerator(frame, self.ctx)
node2, _ = async_generator.run_generator(node)
node_after_call, ret = node2, async_generator.to_variable(node2)
else:
# If any parameters are annotated as Any, we add the annotations to the
# new frame's dictionary of local variable annotations, so that
# vm._apply_annotation will treat these as explicit Any annotations that
# disable inference.
annotated_locals = {}
for name, annot in annotations.items():
if name != "return" and annot == self.ctx.convert.unsolvable:
annotated_locals[name] = abstract_utils.Local(
node, self.get_first_opcode(), annot, callargs.get(name), self.ctx
)
# Log start and end of running the function frame, for quick profiling
indent = " " * (len(self.ctx.vm.frames) - 1)
log.info("%s Start running frame for %r", indent, self.name)
node2, ret = self.ctx.vm.run_frame(frame, node, annotated_locals)
log.info("%s Finished running frame for %r", indent, self.name)
if self.is_unannotated_coroutine():
ret = _instances.Coroutine(self.ctx, ret, node2).to_variable(node2)
node_after_call = node2
self._inner_cls_check(frame)
# Recompute the calllkey so that side effects are taken into account.
callkey_post = self._hash_call(callargs, frame)
self._call_cache[callkey_post] = ret, self.ctx.vm.remaining_depth()
if self._store_call_records:
self._call_records.append((callargs, ret, node_after_call))
self.last_frame = frame
return node_after_call, typeguard_return or ret
def get_call_combinations(
self, node: "cfg.CFGNode"
) -> "list[tuple[cfg.CFGNode, Mapping[str, cfg.Binding], cfg.Binding]]":
"""Get this function's call records."""
all_combinations = []
signature_data = set()
for callargs, ret, node_after_call in self._call_records:
try:
combinations = cfg_utils.variable_product_dict(callargs)
except cfg_utils.TooComplexError:
combination = {
name: self.ctx.convert.unsolvable.to_binding(node_after_call)
for name in callargs
}
combinations = [combination]
ret = self.ctx.new_unsolvable(node_after_call)
else:
if any(
retval == self.ctx.convert.unsolvable
for retval in ret.Data(node_after_call)
):
ret = self.ctx.new_unsolvable(node_after_call)
for combination in combinations:
for return_value in ret.bindings:
values = list(combination.values()) + [return_value]
data = tuple(v.data for v in values)
if data in signature_data:
# This combination yields a signature we already know is possible
continue
# Optimization: when only one combination exists, assume it's visible.
if (
len(combinations) == 1
and len(ret.bindings) == 1
or node_after_call.HasCombination(values)
):
signature_data.add(data)
all_combinations.append(
(node_after_call, combination, return_value)
)
if not all_combinations:
# Fallback: Generate signatures only from the definition of the
# method, not the way it's being used.
param_binding = self.ctx.convert.unsolvable.to_binding(node)
params = collections.defaultdict(lambda: param_binding)
ret = self.ctx.convert.unsolvable.to_binding(node)
all_combinations.append((node, params, ret))
return all_combinations
def get_positional_names(self) -> list[str]:
return list(self.code.varnames[: self.code.argcount])
def get_nondefault_params(self) -> Generator[tuple[str, bool], None, None]:
for i in range(self.nonstararg_count):
yield self.code.varnames[i], i >= self.code.argcount
def get_kwonly_names(self) -> list[str]:
return list(self.code.varnames[self.code.argcount : self.nonstararg_count])
def get_parameters(
self,
) -> Generator[tuple[str, pytd.ParameterKind, bool], None, None]:
default_pos = self.code.argcount - len(self.defaults)
i = 0
for name in self.get_positional_names():
if i < self.posonlyarg_count:
kind = pytd.ParameterKind.POSONLY
else:
kind = pytd.ParameterKind.REGULAR
yield name, kind, i >= default_pos
i += 1
for name in self.get_kwonly_names():
yield name, pytd.ParameterKind.KWONLY, name in self.kw_defaults
i += 1
def has_varargs(self) -> bool:
return self.code.has_varargs()
def has_kwargs(self) -> bool:
return self.code.has_varkeywords()
def property_get(
self, callself: "cfg.Variable", is_class: bool = False
) -> _function_base.BoundFunction | _function_base.Function:
if self.name.endswith(".__init__") and self.signature.param_names:
self_name = self.signature.param_names[0]
# If `_has_self_annot` is True, then we've intentionally temporarily
# annotated `self`; otherwise, a `self` annotation is illegal.
if not self._has_self_annot and self_name in self.signature.annotations:
self.ctx.errorlog.invalid_annotation(
self.ctx.vm.simple_stack(self.get_first_opcode()),
self.signature.annotations[self_name],
details="Cannot annotate self argument of __init__",
name=self_name,
)
self.signature.del_annotation(self_name)
for f in self._all_overloads:
f.is_attribute_of_class = True
return super().property_get(callself, is_class)
def is_coroutine(self):
return self.code.has_coroutine() or self.code.has_iterable_coroutine()
def is_unannotated_coroutine(self):
return self.is_coroutine() and not self.signature.has_return_annotation
def has_empty_body(self) -> bool:
# TODO(mdemello): Optimise this.
ops = list(self.code.code_iter)
if self.ctx.python_version >= (3, 12):
empty_body_ops = ["RESUME", "RETURN_CONST"]
op_with_ret_value = 1
elif self.ctx.python_version >= (3, 11):
empty_body_ops = ["RESUME", "LOAD_CONST", "RETURN_VALUE"]
op_with_ret_value = 1
else:
empty_body_ops = ["LOAD_CONST", "RETURN_VALUE"]
op_with_ret_value = 0
if len(ops) != len(empty_body_ops):
# This check isn't strictly necessary but prevents us from wastefully
# building a list of opcode names for a long method.
return False
if [op.name for op in ops] != empty_body_ops:
return False
return (
self.code.consts[
ops[op_with_ret_value].arg # pytype: disable=attribute-error
]
is None
)
def get_self_type_param(self) -> "_base.BaseValue | None":
if param := super().get_self_type_param():
return param
if self.is_overload:
return None
for f in self._all_overloads:
if param := f.get_self_type_param():
return param
return None
@contextlib.contextmanager
def set_self_annot(
self, annot_class: "_base.BaseValue | None"
) -> Generator[None, None, None]:
if self.is_overload or not self._active_overloads:
with super().set_self_annot(annot_class):
yield
return
with contextlib.ExitStack() as stack:
for f in self._active_overloads:
stack.enter_context(f.set_self_annot(annot_class))
yield
| InterpreterFunction |
python | walkccc__LeetCode | solutions/1423. Maximum Points You Can Obtain from Cards/1423.py | {
"start": 0,
"end": 347
} | class ____:
def maxScore(self, cardPoints: list[int], k: int) -> int:
n = len(cardPoints)
summ = sum(cardPoints)
windowSum = sum(cardPoints[:n - k])
ans = summ - windowSum
for i in range(k):
windowSum -= cardPoints[i]
windowSum += cardPoints[i + n - k]
ans = max(ans, summ - windowSum)
return ans
| Solution |
python | aio-libs__aiohttp | tests/test_web_response.py | {
"start": 36601,
"end": 44723
} | class ____(io.IOBase):
def __init__(self) -> None:
self._lines = [b"", b"", b"test"]
def read(self, size: int = -1) -> bytes:
return self._lines.pop()
@pytest.mark.parametrize(
"payload,expected",
(
("test", "test"),
(CustomIO(), "test"),
(io.StringIO("test"), "test"),
(io.TextIOWrapper(io.BytesIO(b"test")), "test"),
(io.BytesIO(b"test"), "test"),
(io.BufferedReader(io.BytesIO(b"test")), "test"),
(async_iter(), None),
(BodyPartReader(b"x", CIMultiDictProxy(CIMultiDict()), mock.Mock()), None),
(
mpwriter,
"--x\r\nContent-Type: text/plain; charset=utf-8\r\nContent-Length: 4\r\n\r\ntest",
),
),
)
def test_payload_body_get_text(payload: object, expected: str | None) -> None:
resp = web.Response(body=payload)
if expected is None:
with pytest.raises(TypeError):
resp.text
else:
assert resp.text == expected
def test_response_set_content_length() -> None:
resp = web.Response()
with pytest.raises(RuntimeError):
resp.content_length = 1
async def test_send_headers_for_empty_body(
buf: bytearray, writer: AbstractStreamWriter
) -> None:
req = make_request("GET", "/", writer=writer)
resp = web.Response()
await resp.prepare(req)
await resp.write_eof()
txt = buf.decode("utf8")
lines = txt.split("\r\n")
assert len(lines) == 6
assert lines[0] == "HTTP/1.1 200 OK"
assert lines[1] == "Content-Length: 0"
assert lines[2].startswith("Date: ")
assert lines[3].startswith("Server: ")
assert lines[4] == lines[5] == ""
async def test_render_with_body(buf: bytearray, writer: AbstractStreamWriter) -> None:
req = make_request("GET", "/", writer=writer)
resp = web.Response(body=b"data")
await resp.prepare(req)
await resp.write_eof()
txt = buf.decode("utf8")
lines = txt.split("\r\n")
assert len(lines) == 7
assert lines[0] == "HTTP/1.1 200 OK"
assert lines[1] == "Content-Length: 4"
assert lines[2] == "Content-Type: application/octet-stream"
assert lines[3].startswith("Date: ")
assert lines[4].startswith("Server: ")
assert lines[5] == ""
assert lines[6] == "data"
async def test_multiline_reason(buf: bytearray, writer: AbstractStreamWriter) -> None:
with pytest.raises(ValueError, match=r"Reason cannot contain \\n"):
web.Response(reason="Bad\r\nInjected-header: foo")
async def test_send_set_cookie_header(
buf: bytearray, writer: AbstractStreamWriter
) -> None:
resp = web.Response()
resp.cookies["name"] = "value"
req = make_request("GET", "/", writer=writer)
await resp.prepare(req)
await resp.write_eof()
txt = buf.decode("utf8")
lines = txt.split("\r\n")
assert len(lines) == 7
assert lines[0] == "HTTP/1.1 200 OK"
assert lines[1] == "Content-Length: 0"
assert lines[2] == "Set-Cookie: name=value"
assert lines[3].startswith("Date: ")
assert lines[4].startswith("Server: ")
assert lines[5] == lines[6] == ""
async def test_consecutive_write_eof() -> None:
writer = mock.Mock()
writer.write_eof = mock.AsyncMock()
writer.write_headers = mock.AsyncMock()
req = make_request("GET", "/", writer=writer)
data = b"data"
resp = web.Response(body=data)
await resp.prepare(req)
await resp.write_eof()
await resp.write_eof()
writer.write_eof.assert_called_once_with(data)
def test_set_text_with_content_type() -> None:
resp = web.Response()
resp.content_type = "text/html"
resp.text = "text"
assert "text" == resp.text
assert b"text" == resp.body
assert "text/html" == resp.content_type
def test_set_text_with_charset() -> None:
resp = web.Response()
resp.content_type = "text/plain"
resp.charset = "KOI8-R"
resp.text = "текст"
assert "текст" == resp.text
assert "текст".encode("koi8-r") == resp.body
assert "koi8-r" == resp.charset
def test_default_content_type_in_stream_response() -> None:
resp = web.StreamResponse()
assert resp.content_type == "application/octet-stream"
def test_default_content_type_in_response() -> None:
resp = web.Response()
assert resp.content_type == "application/octet-stream"
def test_content_type_with_set_text() -> None:
resp = web.Response(text="text")
assert resp.content_type == "text/plain"
def test_content_type_with_set_body() -> None:
resp = web.Response(body=b"body")
assert resp.content_type == "application/octet-stream"
def test_prepared_when_not_started() -> None:
resp = web.StreamResponse()
assert not resp.prepared
async def test_prepared_when_started() -> None:
resp = web.StreamResponse()
await resp.prepare(make_request("GET", "/"))
assert resp.prepared
async def test_prepared_after_eof() -> None:
resp = web.StreamResponse()
await resp.prepare(make_request("GET", "/"))
await resp.write(b"data")
await resp.write_eof()
assert resp.prepared
async def test_drain_before_start() -> None:
resp = web.StreamResponse()
with pytest.raises(AssertionError):
await resp.drain()
async def test_changing_status_after_prepare_raises() -> None:
resp = web.StreamResponse()
await resp.prepare(make_request("GET", "/"))
with pytest.raises(AssertionError):
resp.set_status(400)
def test_nonstr_text_in_ctor() -> None:
with pytest.raises(TypeError):
web.Response(text=b"data") # type: ignore[arg-type]
def test_text_in_ctor_with_content_type() -> None:
resp = web.Response(text="data", content_type="text/html")
assert "data" == resp.text
assert "text/html" == resp.content_type
def test_text_in_ctor_with_content_type_header() -> None:
resp = web.Response(
text="текст", headers={"Content-Type": "text/html; charset=koi8-r"}
)
assert "текст".encode("koi8-r") == resp.body
assert "text/html" == resp.content_type
assert "koi8-r" == resp.charset
def test_text_in_ctor_with_content_type_header_multidict() -> None:
headers = CIMultiDict({"Content-Type": "text/html; charset=koi8-r"})
resp = web.Response(text="текст", headers=headers)
assert "текст".encode("koi8-r") == resp.body
assert "text/html" == resp.content_type
assert "koi8-r" == resp.charset
def test_body_in_ctor_with_content_type_header_multidict() -> None:
headers = CIMultiDict({"Content-Type": "text/html; charset=koi8-r"})
resp = web.Response(body="текст".encode("koi8-r"), headers=headers)
assert "текст".encode("koi8-r") == resp.body
assert "text/html" == resp.content_type
assert "koi8-r" == resp.charset
def test_text_with_empty_payload() -> None:
resp = web.Response(status=200)
assert resp.body is None
assert resp.text is None
def test_response_with_content_length_header_without_body() -> None:
resp = web.Response(headers={"Content-Length": "123"})
assert resp.content_length == 123
def test_response_with_immutable_headers() -> None:
resp = web.Response(
text="text", headers=CIMultiDictProxy(CIMultiDict({"Header": "Value"}))
)
assert resp.headers == {
"Header": "Value",
"Content-Type": "text/plain; charset=utf-8",
}
async def test_response_prepared_after_header_preparation() -> None:
req = make_request("GET", "/")
resp = web.StreamResponse()
await resp.prepare(req)
assert type(resp.headers["Server"]) is str
async def _strip_server(req: web.Request, res: web.Response) -> None:
assert "Server" in res.headers
if "Server" in res.headers:
del res.headers["Server"]
app = mock.create_autospec(web.Application, spec_set=True)
app.on_response_prepare = aiosignal.Signal(app)
app.on_response_prepare.append(_strip_server)
req = make_request("GET", "/", app=app)
resp = web.StreamResponse()
await resp.prepare(req)
assert "Server" not in resp.headers
def test_weakref_creation() -> None:
resp = web.Response()
weakref.ref(resp)
| CustomIO |
python | tensorflow__tensorflow | tensorflow/python/types/internal.py | {
"start": 1019,
"end": 1170
} | class ____(object):
"""Interface for internal isinstance checks to framework/type_spec.py.
This helps to avoid circular dependencies.
"""
| TypeSpec |
python | instagram__MonkeyType | monkeytype/type_checking_imports_transformer.py | {
"start": 5688,
"end": 7745
} | class ____(CSTTransformer):
def __init__(
self,
import_items_to_be_removed: List[ImportItem],
) -> None:
super().__init__()
self.import_items_to_be_removed = import_items_to_be_removed
def leave_Import(
self, original_node: Import, updated_node: Import
) -> Union[
BaseSmallStatement, FlattenSentinel[BaseSmallStatement], RemovalSentinel
]:
names_to_keep = []
for name in updated_node.names:
module_name = name.evaluated_name
found = False
for import_item in self.import_items_to_be_removed:
if import_item.module_name == module_name:
found = True
break
if not found:
names_to_keep.append(name.with_changes(comma=MaybeSentinel.DEFAULT))
if not names_to_keep:
return RemoveFromParent()
else:
return updated_node.with_changes(names=names_to_keep)
def leave_ImportFrom(
self, original_node: ImportFrom, updated_node: ImportFrom
) -> Union[
BaseSmallStatement, FlattenSentinel[BaseSmallStatement], RemovalSentinel
]:
if isinstance(updated_node.names, ImportStar):
return updated_node
names_to_keep = []
module_name = get_absolute_module_from_package_for_import(None, updated_node)
for name in updated_node.names:
name_value = name.name.value
found = False
for import_item in self.import_items_to_be_removed:
if (
import_item.module_name == module_name
and import_item.obj_name == name_value
):
found = True
break
if not found:
names_to_keep.append(name.with_changes(comma=MaybeSentinel.DEFAULT))
if not names_to_keep:
return RemoveFromParent()
else:
return updated_node.with_changes(names=names_to_keep)
| RemoveImportsTransformer |
python | dask__distributed | distributed/shuffle/_disk.py | {
"start": 3017,
"end": 8248
} | class ____(ShardsBuffer):
"""Accept, buffer, and write many small objects to many files
This takes in lots of small objects, writes them to a local directory, and
then reads them back when all writes are complete. It buffers these
objects in memory so that it can optimize disk access for larger writes.
**State**
- shards: dict[str, list[bytes]]
This is our in-memory buffer of data waiting to be written to files.
- sizes: dict[str, int]
The size of each list of shards. We find the largest and write data from that buffer
Parameters
----------
directory : str or pathlib.Path
Where to write and read data. Ideally points to fast disk.
memory_limiter : ResourceLimiter
Limiter for in-memory buffering (at most this much data)
before writes to disk occur. If the incoming data that has yet
to be processed exceeds this limit, then the buffer will block
until below the threshold. See :meth:`.write` for the
implementation of this scheme.
"""
def __init__(
self,
directory: str | pathlib.Path,
read: Callable[[pathlib.Path], tuple[Any, int]],
memory_limiter: ResourceLimiter,
):
super().__init__(
memory_limiter=memory_limiter,
# Disk is not able to run concurrently atm
concurrency_limit=1,
)
self.directory = pathlib.Path(directory)
self.directory.mkdir(exist_ok=True)
self._closed = False
self._read = read
self._directory_lock = ReadWriteLock()
@log_errors
async def _process(self, id: str, shards: list[Any]) -> None:
"""Write one buffer to file
This function was built to offload the disk IO, but since then we've
decided to keep this within the event loop (disk bandwidth should be
prioritized, and writes are typically small enough to not be a big
deal).
Most of the logic here is about possibly going back to a separate
thread, or about diagnostics. If things don't change much in the
future then we should consider simplifying this considerably and
dropping the write into communicate above.
"""
frames: Iterable[bytes | bytearray | memoryview]
if isinstance(shards[0], bytes):
# Manually serialized dataframes
frames = shards
serialize_meter_ctx: Any = empty_context
else:
# Unserialized numpy arrays
# Note: no calls to pickle_bytelist will happen until we actually start
# writing to disk below.
frames = concat(pickle_bytelist(shard) for shard in shards)
serialize_meter_ctx = context_meter.meter("serialize", func=thread_time)
with (
self._directory_lock.read(),
context_meter.meter("disk-write"),
serialize_meter_ctx,
):
# Consider boosting total_size a bit here to account for duplication
# We only need shared (i.e., read) access to the directory to write
# to a file inside of it.
if self._closed:
raise RuntimeError("Already closed")
try:
self._write_frames(frames, id)
except OSError as e:
if e.errno == errno.ENOSPC:
raise P2POutOfDiskError from e
raise
context_meter.digest_metric("disk-write", 1, "count")
context_meter.digest_metric("disk-write", sum(map(nbytes, frames)), "bytes")
def _write_frames(
self, frames: Iterable[bytes | bytearray | memoryview], id: str
) -> None:
with open(self.directory / str(id), mode="ab") as f:
f.writelines(frames)
def read(self, id: str) -> Any:
"""Read a complete file back into memory"""
self.raise_on_exception()
if not self._inputs_done:
raise RuntimeError("Tried to read from file before done.")
try:
with self._directory_lock.read():
if self._closed:
raise RuntimeError("Already closed")
fname = (self.directory / str(id)).resolve()
# Note: don't add `with context_meter.meter("p2p-disk-read"):` to
# measure seconds here, as it would shadow "p2p-get-output-cpu" and
# "p2p-get-output-noncpu". Also, for rechunk it would not measure
# the whole disk access, as _read returns memory-mapped buffers.
data, size = self._read(fname)
context_meter.digest_metric("p2p-disk-read", 1, "count")
context_meter.digest_metric("p2p-disk-read", size, "bytes")
except FileNotFoundError:
raise DataUnavailable(id)
if data:
self.bytes_read += size
return data
else:
raise DataUnavailable(id)
async def close(self) -> None:
await super().close()
with self._directory_lock.write():
self._closed = True
with contextlib.suppress(FileNotFoundError):
shutil.rmtree(self.directory)
| DiskShardsBuffer |
python | ansible__ansible | test/lib/ansible_test/_internal/commands/integration/cloud/aws.py | {
"start": 510,
"end": 2646
} | class ____(CloudProvider):
"""AWS cloud provider plugin. Sets up cloud resources before delegation."""
def __init__(self, args: IntegrationConfig) -> None:
super().__init__(args)
self.uses_config = True
def filter(self, targets: tuple[IntegrationTarget, ...], exclude: list[str]) -> None:
"""Filter out the cloud tests when the necessary config and resources are not available."""
aci = self._create_ansible_core_ci()
if aci.available:
return
super().filter(targets, exclude)
def setup(self) -> None:
"""Setup the cloud resource before delegation and register a cleanup callback."""
super().setup()
aws_config_path = os.path.expanduser('~/.aws')
if os.path.exists(aws_config_path) and isinstance(self.args.controller, OriginConfig):
raise ApplicationError('Rename "%s" or use the --docker or --remote option to isolate tests.' % aws_config_path)
if not self._use_static_config():
self._setup_dynamic()
def _setup_dynamic(self) -> None:
"""Request AWS credentials through the Ansible Core CI service."""
display.info('Provisioning %s cloud environment.' % self.platform, verbosity=1)
config = self._read_config_template()
aci = self._create_ansible_core_ci()
response = aci.start()
if not self.args.explain:
credentials = response['aws']['credentials']
values = dict(
ACCESS_KEY=credentials['access_key'],
SECRET_KEY=credentials['secret_key'],
SECURITY_TOKEN=credentials['session_token'],
REGION='us-east-1',
)
display.sensitive.add(values['SECRET_KEY'])
display.sensitive.add(values['SECURITY_TOKEN'])
config = self._populate_config_template(config, values)
self._write_config(config)
def _create_ansible_core_ci(self) -> AnsibleCoreCI:
"""Return an AWS instance of AnsibleCoreCI."""
return AnsibleCoreCI(self.args, CloudResource(platform='aws'))
| AwsCloudProvider |
python | getsentry__sentry | src/sentry/utils/snuba.py | {
"start": 14849,
"end": 14961
} | class ____(QueryExecutionError):
"""
Exception raised when a column is missing.
"""
| QueryMissingColumn |
python | sqlalchemy__sqlalchemy | test/dialect/mssql/test_types.py | {
"start": 8549,
"end": 20270
} | class ____(fixtures.TestBase):
def test_boolean(self):
"Exercise type specification for boolean type."
columns = [
# column type, args, kwargs, expected ddl
(Boolean, [], {}, "BIT")
]
metadata = MetaData()
table_args = ["test_mssql_boolean", metadata]
for index, spec in enumerate(columns):
type_, args, kw, res = spec
table_args.append(
Column("c%s" % index, type_(*args, **kw), nullable=None)
)
boolean_table = Table(*table_args)
dialect = mssql.dialect()
gen = dialect.ddl_compiler(dialect, schema.CreateTable(boolean_table))
for col in boolean_table.c:
index = int(col.name[1:])
testing.eq_(
gen.get_column_specification(col),
"%s %s" % (col.name, columns[index][3]),
)
self.assert_(repr(col))
def test_numeric(self):
"Exercise type specification and options for numeric types."
columns = [
# column type, args, kwargs, expected ddl
(types.NUMERIC, [], {}, "NUMERIC"),
(types.NUMERIC, [None], {}, "NUMERIC"),
(types.NUMERIC, [12, 4], {}, "NUMERIC(12, 4)"),
(types.Float, [], {}, "FLOAT"),
(types.Float, [None], {}, "FLOAT"),
(types.Float, [12], {}, "FLOAT(12)"),
(mssql.MSReal, [], {}, "REAL"),
(types.Double, [], {}, "DOUBLE PRECISION"),
(types.Double, [53], {}, "DOUBLE PRECISION"),
(types.Integer, [], {}, "INTEGER"),
(types.BigInteger, [], {}, "BIGINT"),
(mssql.MSTinyInteger, [], {}, "TINYINT"),
(types.SmallInteger, [], {}, "SMALLINT"),
]
metadata = MetaData()
table_args = ["test_mssql_numeric", metadata]
for index, spec in enumerate(columns):
type_, args, kw, res = spec
table_args.append(
Column("c%s" % index, type_(*args, **kw), nullable=None)
)
numeric_table = Table(*table_args)
dialect = mssql.dialect()
gen = dialect.ddl_compiler(dialect, schema.CreateTable(numeric_table))
for col in numeric_table.c:
index = int(col.name[1:])
testing.eq_(
gen.get_column_specification(col),
"%s %s" % (col.name, columns[index][3]),
)
self.assert_(repr(col))
def test_char(self):
"""Exercise COLLATE-ish options on string types."""
columns = [
(mssql.MSChar, [], {}, "CHAR"),
(mssql.MSChar, [1], {}, "CHAR(1)"),
(
mssql.MSChar,
[1],
{"collation": "Latin1_General_CI_AS"},
"CHAR(1) COLLATE Latin1_General_CI_AS",
),
(mssql.MSNChar, [], {}, "NCHAR"),
(mssql.MSNChar, [1], {}, "NCHAR(1)"),
(
mssql.MSNChar,
[1],
{"collation": "Latin1_General_CI_AS"},
"NCHAR(1) COLLATE Latin1_General_CI_AS",
),
(mssql.MSString, [], {}, "VARCHAR(max)"),
(mssql.MSString, [1], {}, "VARCHAR(1)"),
(
mssql.MSString,
[1],
{"collation": "Latin1_General_CI_AS"},
"VARCHAR(1) COLLATE Latin1_General_CI_AS",
),
(mssql.MSNVarchar, [], {}, "NVARCHAR(max)"),
(mssql.MSNVarchar, [1], {}, "NVARCHAR(1)"),
(
mssql.MSNVarchar,
[1],
{"collation": "Latin1_General_CI_AS"},
"NVARCHAR(1) COLLATE Latin1_General_CI_AS",
),
(mssql.MSText, [], {}, "TEXT"),
(
mssql.MSText,
[],
{"collation": "Latin1_General_CI_AS"},
"TEXT COLLATE Latin1_General_CI_AS",
),
(mssql.MSNText, [], {}, "NTEXT"),
(
mssql.MSNText,
[],
{"collation": "Latin1_General_CI_AS"},
"NTEXT COLLATE Latin1_General_CI_AS",
),
]
metadata = MetaData()
table_args = ["test_mssql_charset", metadata]
for index, spec in enumerate(columns):
type_, args, kw, res = spec
table_args.append(
Column("c%s" % index, type_(*args, **kw), nullable=None)
)
charset_table = Table(*table_args)
dialect = mssql.dialect()
gen = dialect.ddl_compiler(dialect, schema.CreateTable(charset_table))
for col in charset_table.c:
index = int(col.name[1:])
testing.eq_(
gen.get_column_specification(col),
"%s %s" % (col.name, columns[index][3]),
)
self.assert_(repr(col))
@testing.combinations(
# column type, args, kwargs, expected ddl
(mssql.MSDateTime, [], {}, "DATETIME", None),
(types.DATE, [], {}, "DATE", None),
(types.Date, [], {}, "DATE", None),
(types.Date, [], {}, "DATETIME", MS_2005_VERSION),
(mssql.MSDate, [], {}, "DATE", None),
(mssql.MSDate, [], {}, "DATETIME", MS_2005_VERSION),
(types.TIME, [], {}, "TIME", None),
(types.Time, [], {}, "TIME", None),
(mssql.MSTime, [], {}, "TIME", None),
(mssql.MSTime, [1], {}, "TIME(1)", None),
(types.Time, [], {}, "DATETIME", MS_2005_VERSION),
(mssql.MSTime, [], {}, "TIME", None),
(mssql.MSSmallDateTime, [], {}, "SMALLDATETIME", None),
(mssql.MSDateTimeOffset, [], {}, "DATETIMEOFFSET", None),
(mssql.MSDateTimeOffset, [1], {}, "DATETIMEOFFSET(1)", None),
(mssql.MSDateTime2, [], {}, "DATETIME2", None),
(mssql.MSDateTime2, [0], {}, "DATETIME2(0)", None),
(mssql.MSDateTime2, [1], {}, "DATETIME2(1)", None),
(mssql.MSTime, [0], {}, "TIME(0)", None),
(mssql.MSDateTimeOffset, [0], {}, "DATETIMEOFFSET(0)", None),
(types.DateTime, [], {"timezone": True}, "DATETIMEOFFSET", None),
(types.DateTime, [], {"timezone": False}, "DATETIME", None),
argnames="type_, args, kw, res, server_version",
)
@testing.combinations((True,), (False,), argnames="use_type_descriptor")
@testing.combinations(
("base",), ("pyodbc",), ("pymssql",), argnames="driver"
)
def test_dates(
self, type_, args, kw, res, server_version, use_type_descriptor, driver
):
"Exercise type specification for date types."
if driver == "base":
from sqlalchemy.dialects.mssql import base
dialect = base.MSDialect()
elif driver == "pyodbc":
from sqlalchemy.dialects.mssql import pyodbc
dialect = pyodbc.dialect()
elif driver == "pymssql":
from sqlalchemy.dialects.mssql import pymssql
dialect = pymssql.dialect()
else:
assert False
if server_version:
dialect.server_version_info = server_version
else:
dialect.server_version_info = MS_2008_VERSION
metadata = MetaData()
typ = type_(*args, **kw)
if use_type_descriptor:
typ = dialect.type_descriptor(typ)
col = Column("date_c", typ, nullable=None)
date_table = Table("test_mssql_dates", metadata, col)
gen = dialect.ddl_compiler(dialect, schema.CreateTable(date_table))
testing.eq_(
gen.get_column_specification(col),
"%s %s"
% (
col.name,
res,
),
)
self.assert_(repr(col))
def test_large_type_deprecation(self):
d1 = mssql.dialect(deprecate_large_types=True)
d2 = mssql.dialect(deprecate_large_types=False)
d3 = mssql.dialect()
d3.server_version_info = (11, 0)
d3._setup_version_attributes()
d4 = mssql.dialect()
d4.server_version_info = (10, 0)
d4._setup_version_attributes()
for dialect in (d1, d3):
eq_(str(Text().compile(dialect=dialect)), "VARCHAR(max)")
eq_(str(UnicodeText().compile(dialect=dialect)), "NVARCHAR(max)")
eq_(str(LargeBinary().compile(dialect=dialect)), "VARBINARY(max)")
for dialect in (d2, d4):
eq_(str(Text().compile(dialect=dialect)), "TEXT")
eq_(str(UnicodeText().compile(dialect=dialect)), "NTEXT")
eq_(str(LargeBinary().compile(dialect=dialect)), "IMAGE")
def test_money(self):
"""Exercise type specification for money types."""
columns = [
(mssql.MSMoney, [], {}, "MONEY"),
(mssql.MSSmallMoney, [], {}, "SMALLMONEY"),
]
metadata = MetaData()
table_args = ["test_mssql_money", metadata]
for index, spec in enumerate(columns):
type_, args, kw, res = spec
table_args.append(
Column("c%s" % index, type_(*args, **kw), nullable=None)
)
money_table = Table(*table_args)
dialect = mssql.dialect()
gen = dialect.ddl_compiler(dialect, schema.CreateTable(money_table))
for col in money_table.c:
index = int(col.name[1:])
testing.eq_(
gen.get_column_specification(col),
"%s %s" % (col.name, columns[index][3]),
)
self.assert_(repr(col))
def test_binary(self):
"Exercise type specification for binary types."
columns = [
# column type, args, kwargs, expected ddl
(mssql.MSBinary, [], {}, "BINARY"),
(mssql.MSBinary, [10], {}, "BINARY(10)"),
(types.BINARY, [], {}, "BINARY"),
(types.BINARY, [10], {}, "BINARY(10)"),
(mssql.MSVarBinary, [], {}, "VARBINARY(max)"),
(mssql.MSVarBinary, [10], {}, "VARBINARY(10)"),
(types.VARBINARY, [10], {}, "VARBINARY(10)"),
(types.VARBINARY, [], {}, "VARBINARY(max)"),
(
mssql.MSVarBinary,
[],
{"filestream": True},
"VARBINARY(max) FILESTREAM",
),
(mssql.MSImage, [], {}, "IMAGE"),
(mssql.IMAGE, [], {}, "IMAGE"),
(types.LargeBinary, [], {}, "IMAGE"),
]
metadata = MetaData()
table_args = ["test_mssql_binary", metadata]
for index, spec in enumerate(columns):
type_, args, kw, res = spec
table_args.append(
Column("c%s" % index, type_(*args, **kw), nullable=None)
)
binary_table = Table(*table_args)
dialect = mssql.dialect()
gen = dialect.ddl_compiler(dialect, schema.CreateTable(binary_table))
for col in binary_table.c:
index = int(col.name[1:])
testing.eq_(
gen.get_column_specification(col),
"%s %s" % (col.name, columns[index][3]),
)
self.assert_(repr(col))
def test_VARBINARY_init(self):
d = mssql.dialect()
t = mssql.MSVarBinary(length=None, filestream=True)
eq_(str(t.compile(dialect=d)), "VARBINARY(max) FILESTREAM")
t = mssql.MSVarBinary(length="max", filestream=True)
eq_(str(t.compile(dialect=d)), "VARBINARY(max) FILESTREAM")
with expect_raises_message(
ValueError, "length must be None or 'max' when setting filestream"
):
mssql.MSVarBinary(length=1000, filestream=True)
| TypeDDLTest |
python | facelessuser__soupsieve | tests/test_level4/test_has.py | {
"start": 90,
"end": 4183
} | class ____(util.TestCase):
"""Test has selectors."""
MARKUP = """
<div id="0" class="aaaa">
<p id="1" class="bbbb"></p>
<p id="2" class="cccc"></p>
<p id="3" class="dddd"></p>
<div id="4" class="eeee">
<div id="5" class="ffff">
<div id="6" class="gggg">
<p id="7" class="hhhh"></p>
<p id="8" class="iiii zzzz"></p>
<p id="9" class="jjjj"></p>
<div id="10" class="kkkk">
<p id="11" class="llll zzzz"></p>
</div>
</div>
</div>
</div>
</div>
"""
MARKUP2 = """
<div id="0" class="aaaa">
<p id="1" class="bbbb"></p>
</div>
<div id="2" class="cccc">
<p id="3" class="dddd"></p>
</div>
<div id="4" class="eeee">
<p id="5" class="ffff"></p>
</div>
<div id="6" class="gggg">
<p id="7" class="hhhh"></p>
</div>
<div id="8" class="iiii">
<p id="9" class="jjjj"></p>
<span id="10"></span>
</div>
"""
def test_has_descendant(self):
"""Test has descendant."""
self.assert_selector(
self.MARKUP,
'div:not(.aaaa):has(.kkkk > p.llll)',
['4', '5', '6'],
flags=util.HTML
)
def test_has_next_sibling(self):
"""Test has next sibling."""
self.assert_selector(
self.MARKUP,
'p:has(+ .dddd:has(+ div .jjjj))',
['2'],
flags=util.HTML
)
def test_has_subsequent_sibling(self):
"""Test has subsequent sibling."""
self.assert_selector(
self.MARKUP,
'p:has(~ .jjjj)',
['7', '8'],
flags=util.HTML
)
def test_has_child(self):
"""Test has2."""
self.assert_selector(
self.MARKUP2,
'div:has(> .bbbb)',
['0'],
flags=util.HTML
)
def test_has_case(self):
"""Test has case insensitive."""
self.assert_selector(
self.MARKUP,
'div:NOT(.aaaa):HAS(.kkkk > p.llll)',
['4', '5', '6'],
flags=util.HTML
)
def test_has_mixed(self):
"""Test has mixed."""
self.assert_selector(
self.MARKUP2,
'div:has(> .bbbb, .ffff, .jjjj)',
['0', '4', '8'],
flags=util.HTML
)
self.assert_selector(
self.MARKUP2,
'div:has(.ffff, > .bbbb, .jjjj)',
['0', '4', '8'],
flags=util.HTML
)
def test_has_nested_pseudo(self):
"""Test has with nested pseudo."""
self.assert_selector(
self.MARKUP2,
'div:has(> :not(.bbbb, .ffff, .jjjj))',
['2', '6', '8'],
flags=util.HTML
)
self.assert_selector(
self.MARKUP2,
'div:not(:has(> .bbbb, .ffff, .jjjj))',
['2', '6'],
flags=util.HTML
)
def test_has_no_match(self):
"""Test has with a non-matching selector."""
self.assert_selector(
self.MARKUP2,
'div:has(:paused)',
[],
flags=util.HTML
)
def test_has_empty(self):
"""Test has with empty slot due to multiple commas."""
self.assert_raises('div:has()', SelectorSyntaxError)
def test_invalid_incomplete_has(self):
"""Test `:has()` fails with just a combinator."""
self.assert_raises(':has(>)', SelectorSyntaxError)
def test_invalid_has_double_combinator(self):
"""Test `:has()` fails with consecutive combinators."""
self.assert_raises(':has(>> has a)', SelectorSyntaxError)
self.assert_raises(':has(> has, >> a)', SelectorSyntaxError)
self.assert_raises(':has(> has >> a)', SelectorSyntaxError)
def test_invalid_has_trailing_combinator(self):
"""Test `:has()` fails with trailing combinator."""
self.assert_raises(':has(> has >)', SelectorSyntaxError)
| TestHas |
python | huggingface__transformers | examples/pytorch/language-modeling/run_mlm.py | {
"start": 5631,
"end": 28510
} | class ____:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
validation_split_percentage: Optional[int] = field(
default=5,
metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
},
)
max_seq_length: Optional[int] = field(
default=None,
metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated."
)
},
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
mlm_probability: float = field(
default=0.15, metadata={"help": "Ratio of tokens to mask for masked language modeling loss"}
)
line_by_line: bool = field(
default=False,
metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."},
)
pad_to_max_length: bool = field(
default=False,
metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
},
)
streaming: bool = field(default=False, metadata={"help": "Enable streaming mode"})
def __post_init__(self):
if self.streaming:
require_version("datasets>=2.0.0", "The streaming feature requires `datasets>=2.0.0`")
if self.dataset_name is None and self.train_file is None and self.validation_file is None:
raise ValueError("Need either a dataset name or a training/validation file.")
else:
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
if extension not in ["csv", "json", "txt"]:
raise ValueError("`train_file` should be a csv, a json or a txt file.")
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
if extension not in ["csv", "json", "txt"]:
raise ValueError("`validation_file` should be a csv, a json or a txt file.")
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_process_index}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
+ f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
logger.info(f"Training/evaluation parameters {training_args}")
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub
#
# For CSV/JSON files, this script will use the column called 'text' or the first column. You can easily tweak this
# behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
cache_dir=model_args.cache_dir,
token=model_args.token,
streaming=data_args.streaming,
trust_remote_code=model_args.trust_remote_code,
)
if "validation" not in raw_datasets:
raw_datasets["validation"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=f"train[:{data_args.validation_split_percentage}%]",
cache_dir=model_args.cache_dir,
token=model_args.token,
streaming=data_args.streaming,
trust_remote_code=model_args.trust_remote_code,
)
raw_datasets["train"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=f"train[{data_args.validation_split_percentage}%:]",
cache_dir=model_args.cache_dir,
token=model_args.token,
streaming=data_args.streaming,
trust_remote_code=model_args.trust_remote_code,
)
else:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
extension = data_args.train_file.split(".")[-1]
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.validation_file.split(".")[-1]
if extension == "txt":
extension = "text"
raw_datasets = load_dataset(
extension,
data_files=data_files,
cache_dir=model_args.cache_dir,
token=model_args.token,
)
# If no validation data is there, validation_split_percentage will be used to divide the dataset.
if "validation" not in raw_datasets:
raw_datasets["validation"] = load_dataset(
extension,
data_files=data_files,
split=f"train[:{data_args.validation_split_percentage}%]",
cache_dir=model_args.cache_dir,
token=model_args.token,
)
raw_datasets["train"] = load_dataset(
extension,
data_files=data_files,
split=f"train[{data_args.validation_split_percentage}%:]",
cache_dir=model_args.cache_dir,
token=model_args.token,
)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config_kwargs = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"token": model_args.token,
"trust_remote_code": model_args.trust_remote_code,
}
if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.config_overrides is not None:
logger.info(f"Overriding config: {model_args.config_overrides}")
config.update_from_string(model_args.config_overrides)
logger.info(f"New config: {config}")
tokenizer_kwargs = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"token": model_args.token,
"trust_remote_code": model_args.trust_remote_code,
}
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs)
elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script. "
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
if model_args.model_name_or_path:
dtype = model_args.dtype if model_args.dtype in ["auto", None] else getattr(torch, model_args.dtype)
model = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
dtype=dtype,
)
else:
logger.info("Training new model from scratch")
model = AutoModelForMaskedLM.from_config(config, trust_remote_code=model_args.trust_remote_code)
# We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch
# on a small vocab and want a smaller embedding size, remove this test.
embedding_size = model.get_input_embeddings().weight.shape[0]
if len(tokenizer) > embedding_size:
model.resize_token_embeddings(len(tokenizer))
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
column_names = list(raw_datasets["train"].features)
else:
column_names = list(raw_datasets["validation"].features)
text_column_name = "text" if "text" in column_names else column_names[0]
if data_args.max_seq_length is None:
max_seq_length = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`."
)
max_seq_length = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the "
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
if data_args.line_by_line:
# When using line_by_line, we just tokenize each nonempty line.
padding = "max_length" if data_args.pad_to_max_length else False
def tokenize_function(examples):
# Remove empty lines
examples[text_column_name] = [
line for line in examples[text_column_name] if len(line) > 0 and not line.isspace()
]
return tokenizer(
examples[text_column_name],
padding=padding,
truncation=True,
max_length=max_seq_length,
# We use this option because DataCollatorForLanguageModeling (see below) is more efficient when it
# receives the `special_tokens_mask`.
return_special_tokens_mask=True,
)
with training_args.main_process_first(desc="dataset map tokenization"):
if not data_args.streaming:
tokenized_datasets = raw_datasets.map(
tokenize_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=[text_column_name],
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on dataset line_by_line",
)
else:
tokenized_datasets = raw_datasets.map(
tokenize_function,
batched=True,
remove_columns=[text_column_name],
)
else:
# Otherwise, we tokenize every text, then concatenate them together before splitting them in smaller parts.
# We use `return_special_tokens_mask=True` because DataCollatorForLanguageModeling (see below) is more
# efficient when it receives the `special_tokens_mask`.
def tokenize_function(examples):
return tokenizer(examples[text_column_name], return_special_tokens_mask=True)
with training_args.main_process_first(desc="dataset map tokenization"):
if not data_args.streaming:
tokenized_datasets = raw_datasets.map(
tokenize_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on every text in dataset",
)
else:
tokenized_datasets = raw_datasets.map(
tokenize_function,
batched=True,
remove_columns=column_names,
)
# Main data processing function that will concatenate all texts from our dataset and generate chunks of
# max_seq_length.
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: list(chain(*examples[k])) for k in examples}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, and if the total_length < max_seq_length we exclude this batch and return an empty dict.
# We could add padding if the model supported it instead of this drop, you can customize this part to your needs.
total_length = (total_length // max_seq_length) * max_seq_length
# Split by chunks of max_len.
result = {
k: [t[i : i + max_seq_length] for i in range(0, total_length, max_seq_length)]
for k, t in concatenated_examples.items()
}
return result
# Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a
# remainder for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value
# might be slower to preprocess.
#
# To speed up this part, we use multiprocessing. See the documentation of the map method for more information:
# https://huggingface.co/docs/datasets/process#map
with training_args.main_process_first(desc="grouping texts together"):
if not data_args.streaming:
tokenized_datasets = tokenized_datasets.map(
group_texts,
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
desc=f"Grouping texts in chunks of {max_seq_length}",
)
else:
tokenized_datasets = tokenized_datasets.map(
group_texts,
batched=True,
)
if training_args.do_train:
if "train" not in tokenized_datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = tokenized_datasets["train"]
if data_args.max_train_samples is not None:
max_train_samples = min(len(train_dataset), data_args.max_train_samples)
train_dataset = train_dataset.select(range(max_train_samples))
if training_args.do_eval:
if "validation" not in tokenized_datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = tokenized_datasets["validation"]
if data_args.max_eval_samples is not None:
max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples)
eval_dataset = eval_dataset.select(range(max_eval_samples))
def preprocess_logits_for_metrics(logits, labels):
if isinstance(logits, tuple):
# Depending on the model and config, logits may contain extra tensors,
# like past_key_values, but logits always come first
logits = logits[0]
return logits.argmax(dim=-1)
metric = evaluate.load("accuracy", cache_dir=model_args.cache_dir)
def compute_metrics(eval_preds):
preds, labels = eval_preds
# preds have the same shape as the labels, after the argmax(-1) has been calculated
# by preprocess_logits_for_metrics
labels = labels.reshape(-1)
preds = preds.reshape(-1)
mask = labels != -100
labels = labels[mask]
preds = preds[mask]
return metric.compute(predictions=preds, references=labels)
# Data collator
# This one will take care of randomly masking the tokens.
pad_to_multiple_of_8 = data_args.line_by_line and training_args.fp16 and not data_args.pad_to_max_length
data_collator = DataCollatorForLanguageModeling(
tokenizer=tokenizer,
mlm_probability=data_args.mlm_probability,
pad_to_multiple_of=8 if pad_to_multiple_of_8 else None,
)
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
processing_class=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics if training_args.do_eval and not is_torch_xla_available() else None,
preprocess_logits_for_metrics=preprocess_logits_for_metrics
if training_args.do_eval and not is_torch_xla_available()
else None,
)
# Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model() # Saves the tokenizer too for easy upload
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate()
max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
try:
perplexity = math.exp(metrics["eval_loss"])
except OverflowError:
perplexity = float("inf")
metrics["perplexity"] = perplexity
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "fill-mask"}
if data_args.dataset_name is not None:
kwargs["dataset_tags"] = data_args.dataset_name
if data_args.dataset_config_name is not None:
kwargs["dataset_args"] = data_args.dataset_config_name
kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}"
else:
kwargs["dataset"] = data_args.dataset_name
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| DataTrainingArguments |
python | pytorch__pytorch | test/inductor/test_cpu_select_algorithm.py | {
"start": 4206,
"end": 4778
} | class ____(TestCase):
def _check_amx_counter(self, vec_amx):
if vec_amx:
self.assertTrue(counters["inductor"]["cpp_micro_gemm_amx_counter"] > 0)
else:
self.assertEqual(counters["inductor"]["cpp_micro_gemm_amx_counter"], 0)
def _check_brgemm_counter(self, vec_amx):
if vec_amx and torch.cpu._is_amx_fp16_supported():
self.assertTrue(counters["inductor"]["cpp_micro_brgemm_counter"] > 0)
else:
self.assertEqual(counters["inductor"]["cpp_micro_brgemm_counter"], 0)
| BaseTestSelectAlgorithm |
python | allegroai__clearml | clearml/backend_api/services/v2_23/queues.py | {
"start": 80916,
"end": 82290
} | class ____(Request):
"""
:param queue: Queue id
:type queue: str
:param task: Task id
:type task: str
"""
_service = "queues"
_action = "move_task_to_back"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"queue": {"description": "Queue id", "type": "string"},
"task": {"description": "Task id", "type": "string"},
},
"required": ["queue", "task"],
"type": "object",
}
def __init__(self, queue: str, task: str, **kwargs: Any) -> None:
super(MoveTaskToBackRequest, self).__init__(**kwargs)
self.queue = queue
self.task = task
@schema_property("queue")
def queue(self) -> str:
return self._property_queue
@queue.setter
def queue(self, value: str) -> None:
if value is None:
self._property_queue = None
return
self.assert_isinstance(value, "queue", six.string_types)
self._property_queue = value
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
| MoveTaskToBackRequest |
python | encode__httpx | httpx/_transports/default.py | {
"start": 3944,
"end": 8667
} | class ____(BaseTransport):
def __init__(
self,
verify: ssl.SSLContext | str | bool = True,
cert: CertTypes | None = None,
trust_env: bool = True,
http1: bool = True,
http2: bool = False,
limits: Limits = DEFAULT_LIMITS,
proxy: ProxyTypes | None = None,
uds: str | None = None,
local_address: str | None = None,
retries: int = 0,
socket_options: typing.Iterable[SOCKET_OPTION] | None = None,
) -> None:
import httpcore
proxy = Proxy(url=proxy) if isinstance(proxy, (str, URL)) else proxy
ssl_context = create_ssl_context(verify=verify, cert=cert, trust_env=trust_env)
if proxy is None:
self._pool = httpcore.ConnectionPool(
ssl_context=ssl_context,
max_connections=limits.max_connections,
max_keepalive_connections=limits.max_keepalive_connections,
keepalive_expiry=limits.keepalive_expiry,
http1=http1,
http2=http2,
uds=uds,
local_address=local_address,
retries=retries,
socket_options=socket_options,
)
elif proxy.url.scheme in ("http", "https"):
self._pool = httpcore.HTTPProxy(
proxy_url=httpcore.URL(
scheme=proxy.url.raw_scheme,
host=proxy.url.raw_host,
port=proxy.url.port,
target=proxy.url.raw_path,
),
proxy_auth=proxy.raw_auth,
proxy_headers=proxy.headers.raw,
ssl_context=ssl_context,
proxy_ssl_context=proxy.ssl_context,
max_connections=limits.max_connections,
max_keepalive_connections=limits.max_keepalive_connections,
keepalive_expiry=limits.keepalive_expiry,
http1=http1,
http2=http2,
socket_options=socket_options,
)
elif proxy.url.scheme in ("socks5", "socks5h"):
try:
import socksio # noqa
except ImportError: # pragma: no cover
raise ImportError(
"Using SOCKS proxy, but the 'socksio' package is not installed. "
"Make sure to install httpx using `pip install httpx[socks]`."
) from None
self._pool = httpcore.SOCKSProxy(
proxy_url=httpcore.URL(
scheme=proxy.url.raw_scheme,
host=proxy.url.raw_host,
port=proxy.url.port,
target=proxy.url.raw_path,
),
proxy_auth=proxy.raw_auth,
ssl_context=ssl_context,
max_connections=limits.max_connections,
max_keepalive_connections=limits.max_keepalive_connections,
keepalive_expiry=limits.keepalive_expiry,
http1=http1,
http2=http2,
)
else: # pragma: no cover
raise ValueError(
"Proxy protocol must be either 'http', 'https', 'socks5', or 'socks5h',"
f" but got {proxy.url.scheme!r}."
)
def __enter__(self: T) -> T: # Use generics for subclass support.
self._pool.__enter__()
return self
def __exit__(
self,
exc_type: type[BaseException] | None = None,
exc_value: BaseException | None = None,
traceback: TracebackType | None = None,
) -> None:
with map_httpcore_exceptions():
self._pool.__exit__(exc_type, exc_value, traceback)
def handle_request(
self,
request: Request,
) -> Response:
assert isinstance(request.stream, SyncByteStream)
import httpcore
req = httpcore.Request(
method=request.method,
url=httpcore.URL(
scheme=request.url.raw_scheme,
host=request.url.raw_host,
port=request.url.port,
target=request.url.raw_path,
),
headers=request.headers.raw,
content=request.stream,
extensions=request.extensions,
)
with map_httpcore_exceptions():
resp = self._pool.handle_request(req)
assert isinstance(resp.stream, typing.Iterable)
return Response(
status_code=resp.status,
headers=resp.headers,
stream=ResponseStream(resp.stream),
extensions=resp.extensions,
)
def close(self) -> None:
self._pool.close()
| HTTPTransport |
python | celery__celery | t/unit/app/test_schedules.py | {
"start": 9469,
"end": 19208
} | class ____:
def crontab(self, *args, **kwargs):
return crontab(*args, **dict(kwargs, app=self.app))
def next_occurrence(self, crontab, now):
crontab.nowfun = lambda: now
return now + crontab.remaining_estimate(now)
def test_next_minute(self):
next = self.next_occurrence(
self.crontab(), datetime(2010, 9, 11, 14, 30, 15),
)
assert next == datetime(2010, 9, 11, 14, 31)
def test_not_next_minute(self):
next = self.next_occurrence(
self.crontab(), datetime(2010, 9, 11, 14, 59, 15),
)
assert next == datetime(2010, 9, 11, 15, 0)
def test_this_hour(self):
next = self.next_occurrence(
self.crontab(minute=[5, 42]), datetime(2010, 9, 11, 14, 30, 15),
)
assert next == datetime(2010, 9, 11, 14, 42)
def test_not_this_hour(self):
next = self.next_occurrence(
self.crontab(minute=[5, 10, 15]),
datetime(2010, 9, 11, 14, 30, 15),
)
assert next == datetime(2010, 9, 11, 15, 5)
def test_today(self):
next = self.next_occurrence(
self.crontab(minute=[5, 42], hour=[12, 17]),
datetime(2010, 9, 11, 14, 30, 15),
)
assert next == datetime(2010, 9, 11, 17, 5)
def test_not_today(self):
next = self.next_occurrence(
self.crontab(minute=[5, 42], hour=[12]),
datetime(2010, 9, 11, 14, 30, 15),
)
assert next == datetime(2010, 9, 12, 12, 5)
def test_weekday(self):
next = self.next_occurrence(
self.crontab(minute=30, hour=14, day_of_week='sat'),
datetime(2010, 9, 11, 14, 30, 15),
)
assert next == datetime(2010, 9, 18, 14, 30)
def test_not_weekday(self):
next = self.next_occurrence(
self.crontab(minute=[5, 42], day_of_week='mon-fri'),
datetime(2010, 9, 11, 14, 30, 15),
)
assert next == datetime(2010, 9, 13, 0, 5)
def test_monthyear(self):
next = self.next_occurrence(
self.crontab(minute=30, hour=14, month_of_year='oct', day_of_month=18),
datetime(2010, 9, 11, 14, 30, 15),
)
assert next == datetime(2010, 10, 18, 14, 30)
def test_not_monthyear(self):
next = self.next_occurrence(
self.crontab(minute=[5, 42], month_of_year='nov-dec', day_of_month=13),
datetime(2010, 9, 11, 14, 30, 15),
)
assert next == datetime(2010, 11, 13, 0, 5)
def test_monthday(self):
next = self.next_occurrence(
self.crontab(minute=30, hour=14, day_of_month=18),
datetime(2010, 9, 11, 14, 30, 15),
)
assert next == datetime(2010, 9, 18, 14, 30)
def test_not_monthday(self):
next = self.next_occurrence(
self.crontab(minute=[5, 42], day_of_month=29),
datetime(2010, 1, 22, 14, 30, 15),
)
assert next == datetime(2010, 1, 29, 0, 5)
def test_weekday_monthday(self):
next = self.next_occurrence(
self.crontab(minute=30, hour=14,
day_of_week='mon', day_of_month=18),
datetime(2010, 1, 18, 14, 30, 15),
)
assert next == datetime(2010, 10, 18, 14, 30)
def test_monthday_not_weekday(self):
next = self.next_occurrence(
self.crontab(minute=[5, 42], day_of_week='sat', day_of_month=29),
datetime(2010, 1, 29, 0, 5, 15),
)
assert next == datetime(2010, 5, 29, 0, 5)
def test_weekday_not_monthday(self):
next = self.next_occurrence(
self.crontab(minute=[5, 42], day_of_week='mon', day_of_month=18),
datetime(2010, 1, 11, 0, 5, 15),
)
assert next == datetime(2010, 1, 18, 0, 5)
def test_not_weekday_not_monthday(self):
next = self.next_occurrence(
self.crontab(minute=[5, 42], day_of_week='mon', day_of_month=18),
datetime(2010, 1, 10, 0, 5, 15),
)
assert next == datetime(2010, 1, 18, 0, 5)
def test_leapday(self):
next = self.next_occurrence(
self.crontab(minute=30, hour=14, day_of_month=29),
datetime(2012, 1, 29, 14, 30, 15),
)
assert next == datetime(2012, 2, 29, 14, 30)
def test_not_leapday(self):
next = self.next_occurrence(
self.crontab(minute=30, hour=14, day_of_month=29),
datetime(2010, 1, 29, 14, 30, 15),
)
assert next == datetime(2010, 3, 29, 14, 30)
def test_weekmonthdayyear(self):
next = self.next_occurrence(
self.crontab(minute=30, hour=14, day_of_week='fri',
day_of_month=29, month_of_year=1),
datetime(2010, 1, 22, 14, 30, 15),
)
assert next == datetime(2010, 1, 29, 14, 30)
def test_monthdayyear_not_week(self):
next = self.next_occurrence(
self.crontab(minute=[5, 42], day_of_week='wed,thu',
day_of_month=29, month_of_year='1,4,7'),
datetime(2010, 1, 29, 14, 30, 15),
)
assert next == datetime(2010, 4, 29, 0, 5)
def test_weekdaymonthyear_not_monthday(self):
next = self.next_occurrence(
self.crontab(minute=30, hour=14, day_of_week='fri',
day_of_month=29, month_of_year='1-10'),
datetime(2010, 1, 29, 14, 30, 15),
)
assert next == datetime(2010, 10, 29, 14, 30)
def test_weekmonthday_not_monthyear(self):
next = self.next_occurrence(
self.crontab(minute=[5, 42], day_of_week='fri',
day_of_month=29, month_of_year='2-10'),
datetime(2010, 1, 29, 14, 30, 15),
)
assert next == datetime(2010, 10, 29, 0, 5)
def test_weekday_not_monthdayyear(self):
next = self.next_occurrence(
self.crontab(minute=[5, 42], day_of_week='mon',
day_of_month=18, month_of_year='2-10'),
datetime(2010, 1, 11, 0, 5, 15),
)
assert next == datetime(2010, 10, 18, 0, 5)
def test_monthday_not_weekdaymonthyear(self):
next = self.next_occurrence(
self.crontab(minute=[5, 42], day_of_week='mon',
day_of_month=29, month_of_year='2-4'),
datetime(2010, 1, 29, 0, 5, 15),
)
assert next == datetime(2010, 3, 29, 0, 5)
def test_monthyear_not_weekmonthday(self):
next = self.next_occurrence(
self.crontab(minute=[5, 42], day_of_week='mon',
day_of_month=29, month_of_year='2-4'),
datetime(2010, 2, 28, 0, 5, 15),
)
assert next == datetime(2010, 3, 29, 0, 5)
def test_not_weekmonthdayyear(self):
next = self.next_occurrence(
self.crontab(minute=[5, 42], day_of_week='fri,sat',
day_of_month=29, month_of_year='2-10'),
datetime(2010, 1, 28, 14, 30, 15),
)
assert next == datetime(2010, 5, 29, 0, 5)
def test_invalid_specification(self):
# *** WARNING ***
# This test triggers an infinite loop in case of a regression
with pytest.raises(RuntimeError):
self.next_occurrence(
self.crontab(day_of_month=31, month_of_year=4),
datetime(2010, 1, 28, 14, 30, 15),
)
def test_leapyear(self):
next = self.next_occurrence(
self.crontab(minute=30, hour=14, day_of_month=29, month_of_year=2),
datetime(2012, 2, 29, 14, 30),
)
assert next == datetime(2016, 2, 29, 14, 30)
def test_day_after_dst_end(self):
# Test for #1604 issue with region configuration using DST
tzname = "Europe/Paris"
self.app.timezone = tzname
tz = ZoneInfo(tzname)
crontab = self.crontab(minute=0, hour=9)
# Set last_run_at Before DST end
last_run_at = datetime(2017, 10, 28, 9, 0, tzinfo=tz)
# Set now after DST end
now = datetime(2017, 10, 29, 7, 0, tzinfo=tz)
crontab.nowfun = lambda: now
next = now + crontab.remaining_estimate(last_run_at)
assert next.utcoffset().seconds == 3600
assert next == datetime(2017, 10, 29, 9, 0, tzinfo=tz)
def test_day_after_dst_start(self):
# Test for #1604 issue with region configuration using DST
tzname = "Europe/Paris"
self.app.timezone = tzname
tz = ZoneInfo(tzname)
crontab = self.crontab(minute=0, hour=9)
# Set last_run_at Before DST start
last_run_at = datetime(2017, 3, 25, 9, 0, tzinfo=tz)
# Set now after DST start
now = datetime(2017, 3, 26, 7, 0, tzinfo=tz)
crontab.nowfun = lambda: now
next = now + crontab.remaining_estimate(last_run_at)
assert next.utcoffset().seconds == 7200
assert next == datetime(2017, 3, 26, 9, 0, tzinfo=tz)
def test_negative_utc_timezone_with_day_of_month(self):
# UTC-8
tzname = "America/Los_Angeles"
self.app.timezone = tzname
tz = ZoneInfo(tzname)
# set day_of_month to test on _delta_to_next
crontab = self.crontab(minute=0, day_of_month='27-31')
# last_run_at: '2023/01/28T23:00:00-08:00'
last_run_at = datetime(2023, 1, 28, 23, 0, tzinfo=tz)
# now: '2023/01/29T00:00:00-08:00'
now = datetime(2023, 1, 29, 0, 0, tzinfo=tz)
crontab.nowfun = lambda: now
next = now + crontab.remaining_estimate(last_run_at)
assert next == datetime(2023, 1, 29, 0, 0, tzinfo=tz)
| test_crontab_remaining_estimate |
python | pytorch__pytorch | torch/_subclasses/meta_utils.py | {
"start": 30348,
"end": 88856
} | class ____(Generic[_TensorT]):
def __init__(self, *, copy_data: bool = False) -> None:
# Maps MetaStorageId to UntypedStorage
self.storage_memo: weakref.WeakValueDictionary[
MetaStorageId, torch.UntypedStorage
] = weakref.WeakValueDictionary()
# Maps MetaTensorId to torch.Tensor (typically a meta tensor or
# FakeTensor)
self.tensor_memo: weakref.WeakValueDictionary[MetaTensorId, _TensorT] = (
weakref.WeakValueDictionary()
)
self.hit = 0
self.miss = 0
self.del_hook = None
self.arg_cnt = 0
# Ensures real_storage/real_tensor are populated on the resulting
# metaified storage/tensor. The naming of this attribute is load
# bearing: FakeTensor relies on real tensor being set to exactly this
# value
self.copy_data = copy_data
self.describer = MetaTensorDescriber(copy_data=copy_data)
def successful(self) -> bool:
return self.hit > 0 and self.miss == 0
def get_tensor_memo(self, t: MetaTensorDesc) -> Optional[torch.Tensor]:
return self.tensor_memo.get(t.id, None)
def _checked_get_tensor_memo(self, t: MetaTensorDesc) -> _TensorT:
r = self.tensor_memo.get(t.id, None)
assert r is not None
return r
def set_tensor_memo(self, t: MetaTensorDesc, v: _TensorT) -> None:
self.tensor_memo[t.id] = v
def get_storage_memo(self, s: MetaStorageDesc) -> Optional[torch.UntypedStorage]:
return self.storage_memo.get(s.id, None)
def set_storage_memo(self, s: MetaStorageDesc, v: torch.UntypedStorage) -> None:
self.storage_memo[s.id] = v
def meta_storage(
self,
s: MetaStorageDesc,
callback: Callable[[Callable[[], torch.Tensor]], _TensorT],
) -> torch.UntypedStorage:
# If we are fakeifying a tensor that has a secretly-zero-sized storage,
# Need to make sure to resize the meta storage too.
if (memo := self.get_storage_memo(s)) is None:
r_s = callback(
lambda: torch.empty(s.size, dtype=torch.uint8, device="meta"),
).untyped_storage()
if self.copy_data:
# NB: no_dispatch is needed because internally storage copy is
# implemented as Tensor operations
with torch.no_grad(), no_dispatch():
assert s.data is not None
_set_real_storage(r_s, s.data.clone())
self.set_storage_memo(s, r_s)
return r_s
else:
return memo
@classmethod
def _checked_cast_tensor_t(cls, t: torch.Tensor) -> _TensorT:
# TODO: how to check _TensorT?
return typing.cast(_TensorT, t)
@classmethod
def _identity_callable(
cls,
t: Callable[[], torch.Tensor],
device: Optional[Union[torch.device, str]] = None,
) -> _TensorT:
return cls._checked_cast_tensor_t(t())
@classmethod
def _backward_error(cls, t: _TensorT) -> _TensorT:
errfn = torch._C._functions.DelayedError(
"Internal error: Tried to backward() through example input",
1,
)
err = errfn(t)
return typing.cast(_TensorT, err)
# This function assumes that it's possible to do the conversion
# NB: name here is used in a conventional way by Dynamo; it corresponds
# precisely to the Source.name() of the tensor we're fakeifying and
# corresponds to a valid Python expression. When we construct sub-names
# as part of this process, we will maintain this invariant! (Even though
# other users of this may not need it this property to be upheld.)
def meta_tensor(
self,
t: MetaTensorDesc,
shape_env: Optional[ShapeEnv],
callback_: _MetaTensorCallback[_TensorT],
source: Optional[Source],
symbolic_context: Optional[SymbolicContext],
) -> _TensorT:
callback: _MetaTensorCallbackOptDevice = functools.partial(
callback_, device=t.device
)
if source is None:
from torch._dynamo.source import ConstantSource
# TODO: make a dedicated UnknownSource for this?
source = ConstantSource(
f"__meta_utils_unknown_tensor{len(self.tensor_memo)}"
)
msg = (
" This indicates you set no_dispatch() before calling into this"
" function. This is an error: we may be creating fake tensors and"
" will perform operations on them which need fake tensor mode to"
" be active. You will segfault if you are in a no_dispatch() block."
)
assert not torch._C._dispatch_tls_local_exclude_set().has(
torch._C.DispatchKey.Python
), msg
self.arg_cnt += 1
# When we make as_strided calls, we end up generating a guard
# that the new as_strided tensor is in bounds for the old storage
# for the base (since as_strided calls can "bust" out of their
# bounding box.) This guard is unnecessary: if a user is able
# to provide us a tensor with the view base setup this way, we
# don't need to produce a guard, because the fact that they
# were able to produce the view base means its in bounds.
#
# Now, ordinarily, this guard would be harmless. However, the
# generated guard refers to variables bound on the base variable.
# At the moment, Dynamo doesn't actually guard on x._base, because
# according to Voz this results in a lot of spurious invalidations,
# and also if the user doesn't directly make use of _base, its
# pointless anyway (because programs should be parametric over
# whether or not the input tensor is a view or not--unless you're
# mutating the input, but that's a whole 'nother ballgame). So
# for expediency, we suppress these guards so we don't have to
# deal with this (yet, anyway.)
#
# NB: An old version of this code suppressed guards for ALL operations
# happening during meta conversion, not just as_strided calls.
# This is too aggressive: we do duck sizing and 0/1 simplification
# as we allocate variables, and we do need to register guards for
# these cases.
maybe_suppress: Callable[[], Any] = contextlib.nullcontext
if shape_env is not None:
maybe_suppress = shape_env.suppress_guards
def sym_sizes_strides_storage_offset(
t: MetaTensorDesc,
src: torch._guards.Source,
symbolic_context: Optional[
torch.fx.experimental.symbolic_shapes.SymbolicContext
] = symbolic_context,
) -> tuple[tuple[int, ...], tuple[int, ...], int]:
assert t.stride is not None
if shape_env is not None:
fake_mode = t.fake_mode
if fake_mode is not None and fake_mode.shape_env is shape_env:
# Don't reallocate the sizes; the shape envs are the same,
# so reuse the old sizes/strides/etc
return (t.size, t.stride, t.storage_offset)
else:
# TODO: deduplicate this
t_size = tuple(
shape_env._maybe_specialize_sym_int_with_hint(sz)
for sz in t.size
)
t_stride = tuple(
shape_env._maybe_specialize_sym_int_with_hint(sd)
for sd in t.stride
)
t_storage_offset = shape_env._maybe_specialize_sym_int_with_hint(
t.storage_offset
)
return shape_env._create_symbolic_sizes_strides_storage_offset(
t_size,
t_stride,
t_storage_offset,
[d in t.dynamo_dynamic_indices for d in range(t.ndim)],
src,
symbolic_context=symbolic_context,
hint_overrides=t.dynamo_hint_overrides,
)
else:
return (t.size, t.stride, t.storage_offset)
def empty_create(
inner_t: MetaTensorDesc,
inner_src: torch._guards.Source,
symbolic_context: Optional[
torch.fx.experimental.symbolic_shapes.SymbolicContext
] = symbolic_context,
) -> torch.Tensor:
(
inner_sizes,
inner_strides,
_inner_storage_offset,
) = sym_sizes_strides_storage_offset(inner_t, inner_src, symbolic_context)
return torch.empty_strided(
inner_sizes,
inner_strides,
dtype=inner_t.dtype,
device="meta",
)
# Creates a subclass instance with empty inner tensors according to the specified
# symbolic context.
def empty_create_subclass(
t: MetaTensorDesc,
outer_size: tuple[int, ...],
outer_stride: tuple[int, ...],
symbolic_context: Optional[
torch.fx.experimental.symbolic_shapes.SymbolicContext
] = symbolic_context,
source: Optional[torch._guards.Source] = source,
) -> _TensorT:
from torch._dynamo.source import AttrSource
from torch.fx.experimental.symbolic_shapes import SubclassSymbolicContext
assert t.attrs is not None
assert t.type is not None
# NB: t.ctx could be None if the subclass in question has no
# meaningful context
# Note: transform_subclass will use __tensor_unflatten__ to generate
# a fresh subclass wrapper with outer sizes / strides according to the
# outer symbolic context (passed in to this function). Inner size / stride
# / storage offset symbols are allocated according to the appropriate inner
# symbolic contexts, after which the checks in transform_subclass() will
# relate them to the outer metadata as possible.
#
# Morally, the code here is same as transform_subclass, but we've
# written it from scratch to read EmptyCreateSubclass
outer_size = outer_size if outer_size is not None else t.size
# pyrefly: ignore [bad-assignment]
outer_stride = outer_stride if outer_stride is not None else t.stride
assert symbolic_context is None or isinstance(
symbolic_context, SubclassSymbolicContext
)
def _empty_create_subclass(
t: MetaTensorDesc,
outer_size: Optional[tuple[int, ...]],
outer_stride: Optional[tuple[int, ...]],
symbolic_context: Optional[
torch.fx.experimental.symbolic_shapes.SymbolicContext
],
callback: _MetaTensorCallbackOptDevice[_TensorT],
source: torch._guards.Source,
) -> _TensorT:
# We are hitting plain meta_desc tensor so actually
# create a tensor here.
if t.attrs is None:
return self.meta_tensor(
t,
shape_env,
callback,
source,
symbolic_context,
)
inner_tensors = {}
for attr, meta_tensor_desc in t.attrs.items():
current_context = None
if symbolic_context is not None:
assert isinstance(symbolic_context, SubclassSymbolicContext)
if (
current_context_ := symbolic_context.inner_contexts[attr]
) is not None:
current_context = _checked_cast(
torch.fx.experimental.symbolic_shapes.SymbolicContext,
current_context_,
)
current_source = AttrSource(source, attr)
inner_callback = functools.partial(
callback, device=meta_tensor_desc.device
)
new_empty_tensor = _empty_create_subclass(
meta_tensor_desc,
meta_tensor_desc.size,
meta_tensor_desc.stride,
current_context,
inner_callback,
current_source,
)
inner_tensors[attr] = new_empty_tensor
assert t.type is not None
return t.type.__tensor_unflatten__( # type: ignore[attr-defined]
inner_tensors, t.ctx, outer_size, outer_stride
)
assert source is not None
sub = _empty_create_subclass(
t, outer_size, outer_stride, symbolic_context, callback, source
)
# NB: Purposefully guard here to simplify the inner / outer symbols.
# Using sym_eq() for symbolic comparison can result in an expression that's too
# difficult to guard on, so we use == here.
assert sub.shape == outer_size, (
f"Expected return value from {t.type}__tensor_unflatten__() to have "
f"shape equal to {outer_size}, but got: {sub.shape}"
)
assert sub.stride() == outer_stride, (
f"Expected return value from {t.type}__tensor_unflatten__() to have "
f"stride equal to {outer_stride}, but got: {sub.stride()}"
)
return sub
# Returns an all-dynamic symbolic context used for metafying the given tensor with
# fully dynamic dims. This is useful when fake-ifying intermediate tensors in
# closed-over ViewFunc state, as we don't have symbolic contexts for them, but we
# don't want to over-specialize during view replay.
def all_dynamic_symbolic_context(
t: MetaTensorDesc,
source: torch._guards.Source,
shape_env: Optional[torch.fx.experimental.symbolic_shapes.ShapeEnv],
callback: _MetaTensorCallback[_TensorT],
) -> torch.fx.experimental.symbolic_shapes.SymbolicContext:
from torch._dynamo.source import AttrSource
from torch.fx.experimental.symbolic_shapes import (
DimDynamic,
StatelessSymbolicContext,
SubclassSymbolicContext,
)
view_base_context: Optional[
torch.fx.experimental.symbolic_shapes.SymbolicContext
] = None
if t.is_view:
assert t.base is not None
view_base_context = all_dynamic_symbolic_context(
t.base, AttrSource(source, "_base"), shape_env, callback
)
t_symbolic_context: torch.fx.experimental.symbolic_shapes.SymbolicContext
t_dynamic_sizes = [DimDynamic.DYNAMIC] * t.ndim
if t.is_traceable_wrapper_subclass:
assert t.attrs is not None
inner_contexts: dict[
str, torch.fx.experimental.symbolic_shapes.SymbolicContext
] = {}
for attr, inner in t.attrs.items():
assert isinstance(attr, str)
inner_contexts[attr] = all_dynamic_symbolic_context(
inner, AttrSource(source, attr), shape_env, callback
)
t_symbolic_context = SubclassSymbolicContext(
dynamic_sizes=t_dynamic_sizes,
constraint_sizes=[None] * t.ndim,
inner_contexts=inner_contexts, # type: ignore[arg-type]
tensor_source=source,
view_base_context=view_base_context,
)
else:
t_symbolic_context = StatelessSymbolicContext(
dynamic_sizes=t_dynamic_sizes,
constraint_sizes=[None] * t.ndim,
view_base_context=view_base_context,
)
return t_symbolic_context
# Returns a fake-ified version of an input view tensor t, given an already fake-ified
# base. At a high level, we want two things:
# 1. fake_t should have the same view relationship to the given fake base as the
# input t has to its _base.
# 2. fake_t should have symbolic sizes / strides / storage offset according to the
# appropriate symbolic context (i.e. from the automatic dynamic algorithm).
#
# We currently take different strategies across view types:
# * For dense -> dense views, accomplish both (1) and (2) simultaneously via an
# as_strided() call on the fake-ified base, passing symbolic metadata.
# * For views involving subclasses, perform view replay using view funcs to
# achieve (1). It's necessary for (2) to swap out any closed-over state in
# the view funcs with symbolicized SymInts and fake-ified tensors. Doing this
# avoids specialization (and thus over-eager simplification of symbols) that
# could occur during view replay on the fake-ified base.
#
# Examples:
# * t.unsqueeze(-1) with dense t is a dense -> dense view. It can be modeled
# with an as_strided() call on the fake base passing symbolic metadata.
# * sub.select(dim=0, index=3) is a subclass -> subclass view. The index arg
# is made symbolic to avoid invalid specialization and view replay is then
# done to reconstruct the view.
# * _nested_from_jagged(values, offsets) is a dense -> subclass view
# that returns a subclass instance from a dense values tensor. The offsets
# tensor is closed over in the view func, as it can be considered view metadata.
# First, the offsets tensor is fake-ified according to the inner symbolic
# context and with the correct relationship to the outer size / stride metadata.
# Then view replay is done, swapping in the fake offsets so the view replay output
# is fully fake with no invalid specialization.
def view_from_base(
base: _TensorT,
t: MetaTensorDesc,
shape_env: Optional[
torch.fx.experimental.symbolic_shapes.ShapeEnv
] = shape_env,
) -> _TensorT:
with enable_python_dispatcher():
# fake-ify t's metadata according to the outer symbolic context
(sizes, strides, storage_offset) = sym_sizes_strides_storage_offset(
t, source
)
if (
not t.is_traceable_wrapper_subclass
and not is_traceable_wrapper_subclass(base)
):
# Dense -> Dense view case uses as_strided() to construct view relationship.
# TODO: Change this logic to use view replay for consistency?
# It's likely there is no view func available.
with maybe_suppress():
return self._checked_cast_tensor_t(
base.as_strided(sizes, strides, storage_offset)
)
from torch._dynamo.source import EphemeralSource
from torch.fx.experimental.symbolic_shapes import (
StatelessSymbolicContext,
sym_eq,
)
def symint_visitor_fn(s: int) -> int:
nonlocal symbolic_context
from torch.fx.experimental.symbolic_shapes import DimDynamic
all_static_sizes = (
symbolic_context is not None
and isinstance(symbolic_context, StatelessSymbolicContext)
and all(
x is DimDynamic.STATIC
for x in symbolic_context.dynamic_sizes
)
)
# Can't just rely on shape env being None - dynamo always initializes it
if all_static_sizes or shape_env is None:
return s
# NB: The symbol here is expected to be simplified out because we a priori
# allocate inner and outer symbols according to the appropriate symbolic
# contexts and prefer those over this symbol during symbol simplification
# (via usage of EphemeralSource below). This -shouldn't- happen, but if
# this symbol somehow leaks out beyond the view tensor's shape metadata, our
# assumption of it being simplified out will fail and it may be guarded on,
# which will hard error.
sym_source = EphemeralSource("symint_visitor_fn")
symbol = shape_env.create_symbol(s, sym_source, positive=None)
return shape_env.create_symintnode(
symbol, hint=s, source=sym_source
)
real_to_fake_mapping = {}
if t.is_traceable_wrapper_subclass:
assert t.attrs is not None
# NB: t.ctx could be None if the subclass in question has no
# meaningful context
assert t.type is not None
# Fake-ify t naively here; this is only done so we can get fake-ified inner
# tensors with the correct relationships to the outer sizes / strides for use
# in view replay. It's done beforehand here because it's not easy to do when
# visiting tensors one-by-one during view replay.
#
# Example:
# Consider a Dense -> NJT view. NJT has (values, offsets) components and we
# want a view of values with the offsets closed over. As the offsets component
# is needed to describe the output view, it's important that it's fakeified
# correctly.
fake_t: _TensorT = empty_create_subclass(
t, outer_size=sizes, outer_stride=strides
)
attrs, _ = fake_t.__tensor_flatten__() # type: ignore[attr-defined]
for attr in attrs:
real_to_fake_mapping[t.attrs[attr].id] = getattr(fake_t, attr)
def tensor_visitor_fn(
visited_t: torch.Tensor,
# These arguments are never passed, we just use them to close
# over these relevant values
shape_env: Optional[
torch.fx.experimental.symbolic_shapes.ShapeEnv
] = shape_env,
callback: _MetaTensorCallbackOptDevice[_TensorT] = callback,
) -> torch.Tensor:
# It's possible to close over an undefined tensor (e.g. NJT's lengths).
if visited_t is None:
# pyrefly: ignore [bad-return]
return None
# NB: visited_t being a Tensor here is very naughty! Should
# have already been described
# Fake inner tensors of view subclasses will come from the mapping built above.
visited_id = self.describer.get_tensor_id(visited_t)
fake_visited_t = real_to_fake_mapping.get(visited_id)
if fake_visited_t is not None:
return fake_visited_t
visited_desc = self.describer.describe_tensor(visited_t)
# For other closed-over tensor state, fake-ify it as all dynamic with an
# ephemeral source. This avoids invalid specialization during view replay.
# If we find that in practice the usage of ephemeral sources isn't enough
# to guarantee that we don't have guards on these symbols, we may need to
# explicitly suppress guards (as is done for _base in the dense -> dense
# view case).
temp_source = EphemeralSource("tensor_visitor_fn")
return self.meta_tensor(
visited_desc,
shape_env,
callback,
temp_source,
all_dynamic_symbolic_context(
visited_desc, temp_source, shape_env, callback
),
)
# Replay the view, swapping out any non-symbolic SymInts or real tensors
# for symbolic SymInts or fake tensors.
assert t.view_func is not None
# NB: we do NOT suppress guards here, we need to remove ephemeral
# sources
fake_t = t.view_func.apply(
t, base, symint_visitor_fn, tensor_visitor_fn
)
# Ensure the output has symbolic shapes according to the outer symbolic context.
# These checks should simplify out any symbols created for closed-over view func
# SymInts.
torch._check(sym_eq(fake_t.size(), sizes))
torch._check(sym_eq(fake_t.stride(), strides))
torch._check(sym_eq(fake_t.storage_offset(), storage_offset))
return fake_t
if self.get_tensor_memo(t) is None:
GRAD_TENSOR_SENTINEL_VALUE = -2
with torch.inference_mode(t.is_inference):
if t.is_sparse:
is_leaf = t.is_leaf
# The lambda function below is similar to
# `t.to(device='meta')` except the latter
# preserves nnz value
r = callback(
lambda: torch.ops.aten._sparse_coo_tensor_with_dims(
t.sparse_dim,
t.dense_dim,
t.size,
dtype=t.dtype,
layout=torch.sparse_coo,
device="meta",
)
)
if self.copy_data:
# Pray that sparse clone doesn't lose information
assert t.data is not None
with torch.no_grad(), no_dispatch():
assert _is_fake_tensor(r)
r.real_tensor = _safe_clone(t.data)
assert safe_is_leaf(r), "the callback you passed in doesn't detach"
# Note [is_coalesced is dispatched]
# Strangely enough, is_coalesced() is a dispatched operator,
# which means that it will get caught by fake tensor mode.
# Ordinarily this would error, but there's some logic in
# fake tensor ensure this doesn't happen.
r._coalesced_(bool(t.is_coalesced))
if t.requires_grad:
r.requires_grad = True
if t.requires_grad and not is_leaf:
# This should probably use DelayedError,
# but clone is fine for now for sparse tensors.
# (DelayedError does not work for sparse because it causes
# the Fake sparse tensor to "lose" its fakeness)
r = self._checked_cast_tensor_t(r.clone())
with torch.enable_grad():
r._coalesced_(bool(t.is_coalesced))
elif is_sparse_compressed_layout(t.layout):
is_leaf = t.is_leaf
if t.layout in {torch.sparse_bsr, torch.sparse_bsc}:
assert t.sparse_dim is not None
assert t.dense_dim is not None
assert t.values is not None
batch_dim = t.ndim - t.sparse_dim - t.dense_dim
blocksize = t.values.shape[batch_dim + 1 : batch_dim + 3]
else:
blocksize = ()
if t.layout in {torch.sparse_csr, torch.sparse_bsr}:
assert t.crow_indices is not None
index_dtype = t.crow_indices.dtype
else:
assert t.ccol_indices is not None
index_dtype = t.ccol_indices.dtype
r = callback(
lambda: torch.ops.aten._sparse_compressed_tensor_with_dims(
0,
t.dense_dim,
t.shape,
blocksize,
index_dtype,
layout=t.layout,
dtype=t.dtype,
device="meta",
)
)
if self.copy_data:
# Pray sparse clone doesn't lose information
assert t.data is not None
with torch.no_grad(), no_dispatch():
assert _is_fake_tensor(r)
r.real_tensor = _safe_clone(t.data)
assert safe_is_leaf(r), "the callback you passed in doesn't detach"
if t.requires_grad:
r.requires_grad = True
if t.requires_grad and not is_leaf:
# pyrefly: ignore [bad-argument-type]
r = self._backward_error(r)
elif t.is_nested and not t.is_traceable_wrapper_subclass:
# TODO: Handle this better in Dynamo?
# There are checks there now, but this can still be triggered by a dense
# tensor graph input that is a view of a strided NT.
from torch._dynamo.exc import unimplemented
# NOTE this graph break will NOT be present in Dynamo's graph break registry
unimplemented(
gb_type="attempted to apply meta conversion to strided nested tensor",
context=str(t),
explanation="This is not supported.",
hints=[],
)
elif t.is_mkldnn:
is_leaf = t.is_leaf
(
sizes,
strides,
_storage_offset,
) = sym_sizes_strides_storage_offset(t, source)
# TODO: This doesn't seem right, where's the MKLDNN'ness
# lol
r = callback(
lambda: torch.empty_strided(
sizes, strides, dtype=t.dtype, device="meta"
)
)
if self.copy_data:
with torch.no_grad(), no_dispatch():
assert t.size is not None
assert t.stride is not None
assert _is_fake_tensor(r)
r.real_tensor = torch.empty_strided(
t.size, t.stride, dtype=t.dtype, device=t.device
)
assert t.data is not None
_safe_copy(r.real_tensor, t.data)
assert safe_is_leaf(r), "the callback you passed in doesn't detach"
if t.requires_grad:
r.requires_grad = True
if t.requires_grad and not is_leaf:
# pyrefly: ignore [bad-argument-type]
r = self._backward_error(r)
elif t.is_functorch_wrapped:
if t.is_view:
from torch._dynamo.exc import unimplemented
unimplemented(
gb_type="attempted to apply meta conversion to view functorch tensor",
context=str(t),
explanation="This is not supported.",
hints=[],
)
# Wraps a functorch tensor class (BatchedTensor, GradTrackingTensor)
# in a FakeTensor
def _to_fake_tensor(t: MetaTensorDesc) -> _TensorT:
# TODO: why aren't the recursive calls going to
# meta_tensor
r: _TensorT
if t.is_batchedtensor:
assert t.unwrapped is not None
assert t.level is not None
assert t.bdim is not None
ft = _to_fake_tensor(t.unwrapped)
lvl = t.level
bdim = t.bdim
# You cannot create functorch tensors without
# having the ambient funtorch interpreter stack
# available, as the level refers to things in the
# stack
with torch._functorch.pyfunctorch.temporarily_restore_interpreter_stack(
t.functorch_stack
):
r = self._checked_cast_tensor_t(
_add_batch_dim(ft, bdim, lvl)
)
elif t.is_gradtrackingtensor:
assert t.unwrapped is not None
assert t.level is not None
disable_functorch = torch._C._DisableFuncTorch
with disable_functorch():
ft = _to_fake_tensor(t.unwrapped)
lvl = t.level
if lvl == GRAD_TENSOR_SENTINEL_VALUE:
r = ft
else:
with torch._functorch.pyfunctorch.temporarily_restore_interpreter_stack(
t.functorch_stack
):
r = self._checked_cast_tensor_t(
torch._C._functorch._wrap_for_grad(ft, lvl),
)
is_leaf = t.is_leaf
if t.requires_grad and safe_is_leaf(r):
r.requires_grad = True
elif t.requires_grad and not is_leaf:
r = self._backward_error(r)
elif t.is_functional:
assert t.unwrapped is not None
assert t.current_level is not None
ft = self.meta_tensor(
t.unwrapped,
shape_env,
callback,
# NB: reuse these exactly, we treat the
# functional tensor as "invisible".
# TODO: Actually this all probably doesn't
# work, take a closer look.
source,
symbolic_context,
)
r = self._checked_cast_tensor_t(
_wrap_functional_tensor(ft, t.current_level),
)
# TODO: is_leaf/requires_grad?
else:
assert t.stride is not None
sizes = t.size
strides = t.stride
r = callback(
lambda: torch.empty_strided(
sizes,
strides,
dtype=t.dtype,
device="meta",
),
# device="meta",
)
if self.copy_data:
with torch.no_grad(), no_dispatch():
r.real_tensor = torch.empty_strided( # type: ignore[attr-defined]
t.size,
t.stride,
dtype=t.dtype,
device=t.device,
)
assert t.data is not None
_safe_copy(r.real_tensor, t.data) # type: ignore[attr-defined]
# pyrefly: ignore [bad-return]
return r
r = _to_fake_tensor(t)
elif t.is_functional and t.device.type not in ["xla", "lazy"]:
assert t.unwrapped is not None
assert not t.is_functorch_wrapped # handled above
unwrapped = self.meta_tensor(
t.unwrapped,
shape_env,
callback,
source,
symbolic_context,
)
r = self._checked_cast_tensor_t(
torch._to_functional_tensor(unwrapped)
)
torch._mirror_autograd_meta_to(t.autograd_meta_from, r) # type: ignore[attr-defined]
elif t.is_view:
# Construct views in two steps: recursively meta-fy their
# base, and then create view(s) off that. NB: doing it
# directly from storage is WRONG because this won't cause
# version counters to get shared.
assert t.base is not None
base_symbolic_context = None
if shape_env and symbolic_context is not None:
from torch.fx.experimental.symbolic_shapes import (
StatelessSymbolicContext,
)
assert isinstance(symbolic_context, StatelessSymbolicContext)
# NB: This should generally be set when the input is a view,
# but the exception right now is for fake-ifying grads, which is
# a work in progress.
if symbolic_context.view_base_context is not None:
base_symbolic_context = symbolic_context.view_base_context
base = self.meta_tensor(
t.base,
shape_env,
callback,
torch._dynamo.source.AttrSource(source, "_base"),
base_symbolic_context,
)
def is_c_of_r(
complex_dtype: torch.dtype, real_dtype: torch.dtype
) -> bool:
return (
utils.is_complex_dtype(complex_dtype)
and utils.corresponding_real_dtype(complex_dtype)
== real_dtype
)
# In some situations, MetaConverter may be called in a
# context where autograd is disabled. For the _is_view
# assert to pass, we have to setup the autograd view
# metadata anyway. Do this by reenabling the
# ADInplaceOrView key. This is kind of a hack.
old_exclude = torch._C._dispatch_tls_is_dispatch_key_excluded(
torch._C.DispatchKey.ADInplaceOrView
)
torch._C._dispatch_tls_set_dispatch_key_excluded(
torch._C.DispatchKey.ADInplaceOrView, False
)
try:
if base.dtype == t.dtype:
pass
elif is_c_of_r(base.dtype, t.dtype):
base = self._checked_cast_tensor_t(torch.view_as_real(base))
elif is_c_of_r(t.dtype, base.dtype):
base = self._checked_cast_tensor_t(
torch.view_as_complex(base)
)
else:
# This is not guaranteed to succeed. If it fails, it
# means there is another dtype-converting view function
# that hasn't been handled here
base = self._checked_cast_tensor_t(base.view(t.dtype))
# This is very tricky. Naively, you might expect this
# to hold:
#
# if t.requires_grad and not safe_is_leaf(t)
# assert t._base.requires_grad
#
# But it's not true! As you can see in the following
# program:
#
# x = torch.zeros(4)
# y = x.view(1, 4)
# y.requires_grad = True
# z = y.view(1, 1, 4)
# assert z._base is x
#
# So we may have to do *two* views out of the base to
# recreate this situation.
if t.is_leaf:
# Leaf views that track view metadata are created by
# creating a view inside a no_grad block
with torch.no_grad():
r = view_from_base(base, t)
# As it's a leaf, we can directly assign requires_grad
r.requires_grad = t.requires_grad
else:
if t.base.requires_grad == t.requires_grad:
# Easy case, just run the view op
with torch.enable_grad():
r = view_from_base(base, t)
# NB: We don't actually faithfully replicate
# autograd connectivity, but that doesn't matter
# today. See following for more info:
# https://gist.github.com/soulitzer/e03f015b314c3f5fcf80888c69390913
else:
# Obscure case. Create a leaf view and give it the
# correct requires_grad, then do the final view.
# NB: Can't have a non-leaf without requiring grad!
assert t.requires_grad
with torch.no_grad(), enable_python_dispatcher():
mid = self._checked_cast_tensor_t(
base.view(base.shape)
)
mid.requires_grad = t.requires_grad
with torch.enable_grad():
r = view_from_base(mid, t)
# The CreationMeta influences whether or not inplace
# mutation is an error or not. So we need to make
# sure we properly propagate this as well.
assert t.creation_meta is not None
torch._C._autograd._set_creation_meta(r, t.creation_meta)
finally:
torch._C._dispatch_tls_set_dispatch_key_excluded(
torch._C.DispatchKey.ADInplaceOrView, old_exclude
)
r.fake_device = t.device # type: ignore[attr-defined]
else:
is_leaf = t.is_leaf
# Graph-Break for wrapped tensors
if (
not (t.is_batchedtensor or t.is_gradtrackingtensor)
and t.is_functorch_wrapped
) or t.is_legacy_batchedtensor:
# pyrefly: ignore [bad-return]
return NotImplemented
(
sizes,
strides,
storage_offset,
) = sym_sizes_strides_storage_offset(t, source, symbolic_context)
# If we have a subclass that desugars into dense tensors,
# perform our callback on each inner tensor.
if t.is_traceable_wrapper_subclass:
r = empty_create_subclass(
t, outer_size=sizes, outer_stride=strides
)
else:
r = callback(
lambda: torch.empty_strided(
sizes,
strides,
dtype=t.dtype,
device="meta",
)
)
if self.copy_data:
with torch.no_grad(), no_dispatch():
assert t.size is not None
assert t.stride is not None
assert _is_fake_tensor(r)
r.real_tensor = torch.empty_strided(
t.size, t.stride, dtype=t.dtype, device=t.device
)
_safe_copy(r.real_tensor, t.data)
assert safe_is_leaf(r), "the callback you passed in doesn't detach"
if t.requires_grad:
r.requires_grad = t.requires_grad
if not is_leaf:
# Fake up some autograd history.
# Note: we *used* to call .clone() here to mock up some autograd history.
# This is bad for subclasses.
# Consider the case where you have a wrapper subclass that is contiguous,
# but its inner tensor is noncontiguous().
# .clone() (or other ops) will have the side effect of changing
# the metadata of the inner tensor.
# So instead, we now have a dedicated fn to set autograd history,
# without inadvertently changing other metadata.
# pyrefly: ignore [bad-argument-type]
r = self._backward_error(r)
s = t.storage
assert s is not None
if s.id not in self.storage_memo and (
r.is_nested
or (
r.stride() == strides
and r.storage_offset() == storage_offset
)
):
# You're normal and happy, install the fresh storage into the memo
self.set_storage_memo(s, r.untyped_storage())
if self.copy_data:
assert _is_fake_tensor(r)
assert r.real_tensor is not None
_set_real_storage(
r.untyped_storage(), r.real_tensor.untyped_storage()
)
else:
# You're in crazy town; somehow you gave us a tensor
# that wasn't a view, but had nonzero storage offset,
# nontrivial strides (such that clone() couldn't
# preserve them), or already aliases with another
# tensor's storage. The most typical way to end
# up here is with set_. So use set_ to bludgeon this
# in.
r_s = self.meta_storage(s, callback=callback)
# NB: In principle, this should always work, but there
# is some subtle difference in the autograd metadata
# that means we will backprop the set_ call, even if
# r is declared as an input to grad.
# See https://github.com/pytorch/pytorch/issues/87956
# for the reproducer.
# NB: The in_kernel_invocation_manager here is necessary
# for fake tensor. If we run the set_ call with fake
# tensor on, r will improperly report that it is NOT a
# meta tensor but a cpu tensor, and then the set_ call
# will fail due to device mismatch. no_dispatch() is
# not enough, because the fake tensor will still claim
# to be a CPU tensor and you'll end up in the CPU
# kernel. Arguably this is a hack; a cleaner way to
# solve this is to have a FakeStorage concept which
# would report it's CPU device--no problem now! But
# this is difficult to do because we don't have storage
# subclasses. Relevant test is
# DynamicShapesFunctionTests::test_add_dynamic_shapes in
# test/dynamo/test_dynamic_shapes.py
maybe_fake_mgr: AbstractContextManager[None] = (
contextlib.nullcontext()
)
from torch._subclasses.fake_tensor import (
in_kernel_invocation_manager,
maybe_get_fake_mode,
)
mb_fake_mode = maybe_get_fake_mode(r)
if mb_fake_mode is not None:
maybe_fake_mgr = in_kernel_invocation_manager(mb_fake_mode)
with torch.no_grad(), maybe_suppress():
with maybe_fake_mgr:
r.set_(r_s, storage_offset, sizes, strides)
if self.copy_data:
with torch.no_grad(), no_dispatch():
assert _is_fake_tensor(r)
assert r.real_tensor is not None
assert t.stride is not None
r.real_tensor.set_(
_get_real_storage(r_s),
t.storage_offset,
t.size,
t.stride,
)
if t.grad is not None:
from torch._dynamo.source import AttrSource
# TODO: Use a valid grad-specific symbolic context instead of recycling
# the one from t. This isn't correct if e.g. t._is_view() != t.grad._is_view().
# pyrefly: ignore [unbound-name]
r.grad = self.meta_tensor(
t.grad,
shape_env,
callback,
AttrSource(source, "grad"),
symbolic_context,
)
# pyrefly: ignore [unbound-name]
torch._C._set_conj(r, t.is_conj)
# pyrefly: ignore [unbound-name]
torch._C._set_neg(r, t.is_neg)
# This can be skipped if necessary for performance reasons
skip_leaf = (
t.is_gradtrackingtensor and t.level == GRAD_TENSOR_SENTINEL_VALUE
)
# pyrefly: ignore [unbound-name]
assert_metadata_eq(assert_eq, t, r, skip_symbolic=True, skip_leaf=skip_leaf)
# Thanks to storage resizing, it's possible to end up with a tensor
# that advertises a real size, but has a storage that actually has zero bytes.
# Need to reflect this in the generated FakeTensor.
from torch.fx.experimental.symbolic_shapes import guard_or_false
if t.storage is not None and guard_or_false(t.storage.size == 0):
# pyrefly: ignore [unbound-name]
r.untyped_storage().resize_(0)
if t.is_parameter:
# pyrefly: ignore [unbound-name]
r._is_param = True
# See Note: [Creating symbolic nested int]
if t.nested_int is not None:
# pyrefly: ignore [unbound-name]
assert _is_fake_tensor(r)
# pyrefly: ignore [unbound-name]
r.nested_int_memo = r.fake_mode.create_symbolic_nested_int(
nt_tensor_id=t.nested_int
)
# pyrefly: ignore [bad-argument-type, unbound-name]
self.set_tensor_memo(t, r)
return self._checked_get_tensor_memo(t)
def __call__(
self,
t: torch.Tensor,
shape_env: Optional[ShapeEnv] = None,
*,
callback: Optional[_MetaTensorCallback[_TensorT]] = None,
source: Optional[Source] = None,
symbolic_context: Optional[SymbolicContext] = None,
# Controls whether or not we should dump the tensor metadata to structured logs
# when source is not None. Because we refakify after Dynamo is done,
# we don't want to dump info again from AOTAutograd, it is redundant.
trace: bool = True,
) -> _TensorT:
callback_: _MetaTensorCallback[_TensorT]
if callback is None:
callback_ = self._identity_callable
else:
callback_ = callback
# TODO: zero tensors? We appear to have eliminated them by
# excluding complex for now
# Filter out cases we don't support
# TODO: This can probably be simplified quite a bit
if isinstance(t, torch.Tensor):
if (
# Lazy tensors are not supported. Note that XLA is
# implemented on top of lazy tensor, not excluded here; we
# have some special handling for it; this is for XLA Dynamo
# integration
t.device.type == "lazy"
or
# Quantization is not supported
t.is_quantized
or
# Views out of sparse tensors not currently supported (plain
# sparse is supported htough)
(t._is_view() and t._base is not None and t._base.is_sparse)
):
self.miss += 1
# pyrefly: ignore [bad-return]
return NotImplemented
else:
self.hit += 1
elif torch.overrides.is_tensor_like(t):
self.miss += 1
# pyrefly: ignore [bad-return]
return NotImplemented
else:
# non-Tensor types don't count as hit or miss
return t
if source is None:
trace = False
# Describe the tensor. NB: do NOT disable ambient modes, we may need
# to query them when figuring out what to put in here
t_desc = self.describer.describe_tensor(t, trace=trace)
if trace:
assert source is not None
trace_structured(
"describe_source",
metadata_fn=lambda: {
"describer_id": self.describer.id,
"id": t_desc.id,
"source": source.name(),
},
)
# Do the meta-fication. Here, we disable all the ambient modes, to
# better simulate what would be like to re-fakeify from a fresh
# process
with contextlib.ExitStack() as exit_stack:
exit_stack.enter_context(torch._dispatch.python.suspend_functionalization())
st = peek_interpreter_stack()
if st is not None:
exit_stack.enter_context(
torch._functorch.pyfunctorch.temporarily_clear_interpreter_stack()
)
r = self.meta_tensor(
t_desc,
shape_env,
callback_,
source,
symbolic_context,
)
if type(t) is torch.nn.Parameter:
# NB: Cannot directly use Parameter constructor
# because that would force a detach, not desirable
r._is_param = True
# TODO: return the description for later
return r
import torch._prims_common as utils
| MetaConverter |
python | kamyu104__LeetCode-Solutions | Python/open-the-lock.py | {
"start": 183,
"end": 1040
} | class ____(object):
def openLock(self, deadends, target):
"""
:type deadends: List[str]
:type target: str
:rtype: int
"""
dead = set(deadends)
q = ["0000"]
lookup = {"0000"}
depth = 0
while q:
next_q = []
for node in q:
if node == target: return depth
if node in dead: continue
for i in xrange(4):
n = int(node[i])
for d in (-1, 1):
nn = (n+d) % 10
neighbor = node[:i] + str(nn) + node[i+1:]
if neighbor not in lookup:
lookup.add(neighbor)
next_q.append(neighbor)
q = next_q
depth += 1
return -1
| Solution |
python | fastapi__sqlmodel | sqlmodel/sql/_expression_select_cls.py | {
"start": 411,
"end": 1121
} | class ____(_Select[Tuple[_T]]):
inherit_cache = True
def where(self, *whereclause: Union[_ColumnExpressionArgument[bool], bool]) -> Self:
"""Return a new `Select` construct with the given expression added to
its `WHERE` clause, joined to the existing clause via `AND`, if any.
"""
return super().where(*whereclause) # type: ignore[arg-type]
def having(self, *having: Union[_ColumnExpressionArgument[bool], bool]) -> Self:
"""Return a new `Select` construct with the given expression added to
its `HAVING` clause, joined to the existing clause via `AND`, if any.
"""
return super().having(*having) # type: ignore[arg-type]
| SelectBase |
python | walkccc__LeetCode | solutions/2977. Minimum Cost to Convert String II/2977.py | {
"start": 0,
"end": 1904
} | class ____:
def minimumCost(
self,
source: str,
target: str,
original: list[str],
changed: list[str],
cost: list[int],
) -> int:
subLengths = set(len(s) for s in original)
subToId = self._getSubToId(original, changed)
subCount = len(subToId)
# dist[u][v] := the minimum distance to change the substring with id u to
# the substring with id v
dist = [[math.inf for _ in range(subCount)] for _ in range(subCount)]
# dp[i] := the minimum cost to change the first i letters of `source` into
# `target`, leaving the suffix untouched
dp = [math.inf for _ in range(len(source) + 1)]
for a, b, c in zip(original, changed, cost):
u = subToId[a]
v = subToId[b]
dist[u][v] = min(dist[u][v], c)
for k in range(subCount):
for i in range(subCount):
if dist[i][k] < math.inf:
for j in range(subCount):
if dist[k][j] < math.inf:
dist[i][j] = min(dist[i][j], dist[i][k] + dist[k][j])
dp[0] = 0
for i, (s, t) in enumerate(zip(source, target)):
if dp[i] == math.inf:
continue
if s == t:
dp[i + 1] = min(dp[i + 1], dp[i])
for subLength in subLengths:
if i + subLength > len(source):
continue
subSource = source[i:i + subLength]
subTarget = target[i:i + subLength]
if subSource not in subToId or subTarget not in subToId:
continue
u = subToId[subSource]
v = subToId[subTarget]
if dist[u][v] != math.inf:
dp[i + subLength] = min(dp[i + subLength], dp[i] + dist[u][v])
return -1 if dp[len(source)] == math.inf else dp[len(source)]
def _getSubToId(self, original: str, changed: str) -> dict[str, int]:
subToId = {}
for s in original + changed:
if s not in subToId:
subToId[s] = len(subToId)
return subToId
| Solution |
python | walkccc__LeetCode | solutions/2945. Find Maximum Non-decreasing Array Length/2945.py | {
"start": 0,
"end": 921
} | class ____:
def findMaximumLength(self, nums: list[int]) -> int:
n = len(nums)
INF = 10_000_000_000
# prefix[i] := the sum of the first i nums
prefix = list(itertools.accumulate(nums, initial=0))
# dp[i] := the maximum number of elements in the increasing
# sequence after processing the first i nums
dp = [0] * (n + 1)
# last[i] := the last sum after processing the first i nums
last = [0] + [INF] * n
for i in range(n):
j = self._findIndex(i, prefix, last)
dp[i + 1] = max(dp[i], dp[j] + 1)
last[i + 1] = prefix[i + 1] - prefix[j]
return dp[n]
def _findIndex(self, i: int, prefix: list[int], last: list[int]) -> int:
"""Returns the index in [0..i].
Returns the maximum index j in [0..i] s.t.
prefix[i + 1] - prefix[j] >= last[j].
"""
for j in range(i, -1, -1):
if prefix[i + 1] - prefix[j] >= last[j]:
return j
| Solution |
python | kamyu104__LeetCode-Solutions | Python/masking-personal-information.py | {
"start": 29,
"end": 502
} | class ____(object):
def maskPII(self, S):
"""
:type S: str
:rtype: str
"""
if '@' in S:
first, after = S.split('@')
return "{}*****{}@{}".format(first[0], first[-1], after).lower()
digits = filter(lambda x: x.isdigit(), S)
local = "***-***-{}".format(digits[-4:])
if len(digits) == 10:
return local
return "+{}-{}".format('*' * (len(digits) - 10), local)
| Solution |
python | openai__openai-python | src/openai/types/responses/response_computer_tool_call.py | {
"start": 1028,
"end": 1364
} | class ____(BaseModel):
type: Literal["double_click"]
"""Specifies the event type.
For a double click action, this property is always set to `double_click`.
"""
x: int
"""The x-coordinate where the double click occurred."""
y: int
"""The y-coordinate where the double click occurred."""
| ActionDoubleClick |
python | sphinx-doc__sphinx | sphinx/writers/latex.py | {
"start": 1617,
"end": 1725
} | class ____(nodes.footnote):
"""Footnotes that are collected are assigned this class."""
| collected_footnote |
python | streamlit__streamlit | lib/tests/streamlit/runtime/state/widgets_test.py | {
"start": 23470,
"end": 24989
} | class ____(DeltaGeneratorTestCase):
@parameterized.expand(WIDGET_ELEMENTS)
def test_register_widget_called_with_valid_value_type(
self, _element_name: str, widget_func: ELEMENT_PRODUCER
):
with patch(
"streamlit.runtime.state.widgets.register_widget_from_metadata",
wraps=register_widget_from_metadata,
) as patched_register_widget_from_metadata:
widget_func()
assert patched_register_widget_from_metadata.call_count == 1
widget_metadata_arg: WidgetMetadata = (
patched_register_widget_from_metadata.call_args[0][0]
)
assert widget_metadata_arg.value_type in get_args(ValueFieldName)
# test that the value_type also maps to a protobuf field
assert widget_metadata_arg.value_type in WidgetState.DESCRIPTOR.fields_by_name
def test_raises_exception_with_on_change_and_callbacks(self):
"""Test that `register_widget` raises an exception when both `on_change`
and `callbacks` are provided.
"""
with pytest.raises(errors.StreamlitAPIException):
register_widget(
"el_id",
deserializer=lambda x: x,
serializer=lambda x: x,
ctx=None,
on_change_handler=lambda: None,
callbacks={"change": lambda: None},
value_type="bool_value",
)
@patch("streamlit.runtime.Runtime.exists", new=MagicMock(return_value=True))
| RegisterWidgetsTest |
python | pydata__xarray | xarray/coding/common.py | {
"start": 596,
"end": 1709
} | class ____:
"""Base class for encoding and decoding transformations on variables.
We use coders for transforming variables between xarray's data model and
a format suitable for serialization. For example, coders apply CF
conventions for how data should be represented in netCDF files.
Subclasses should implement encode() and decode(), which should satisfy
the identity ``coder.decode(coder.encode(variable)) == variable``. If any
options are necessary, they should be implemented as arguments to the
__init__ method.
The optional name argument to encode() and decode() exists solely for the
sake of better error messages, and should correspond to the name of
variables in the underlying store.
"""
def encode(self, variable: Variable, name: T_Name = None) -> Variable:
"""Convert an encoded variable to a decoded variable"""
raise NotImplementedError()
def decode(self, variable: Variable, name: T_Name = None) -> Variable:
"""Convert a decoded variable to an encoded variable"""
raise NotImplementedError()
| VariableCoder |
python | keras-team__keras | keras/src/tree/tree_api.py | {
"start": 824,
"end": 14559
} | class ____:
"""Special value for use with `traverse()`."""
pass
@keras_export("keras.tree.is_nested")
def is_nested(structure):
"""Checks if a given structure is nested.
Examples:
>>> keras.tree.is_nested(42)
False
>>> keras.tree.is_nested({"foo": 42})
True
Args:
structure: A structure to check.
Returns:
`True` if a given structure is nested, i.e. is a sequence, a mapping,
or a namedtuple, and `False` otherwise.
"""
return tree_impl.is_nested(structure)
@keras_export("keras.tree.traverse")
def traverse(func, structure, top_down=True):
"""Traverses the given nested structure, applying the given function.
The traversal is depth-first. If `top_down` is True (default), parents
are returned before their children (giving the option to avoid traversing
into a sub-tree).
Examples:
>>> v = []
>>> keras.tree.traverse(v.append, [(1, 2), [3], {"a": 4}], top_down=True)
[(1, 2), [3], {'a': 4}]
>>> v
[[(1, 2), [3], {'a': 4}], (1, 2), 1, 2, [3], 3, {'a': 4}, 4]
>>> v = []
>>> keras.tree.traverse(v.append, [(1, 2), [3], {"a": 4}], top_down=False)
[(1, 2), [3], {'a': 4}]
>>> v
[1, 2, (1, 2), 3, [3], 4, {'a': 4}, [(1, 2), [3], {'a': 4}]]
Args:
func: The function to be applied to each sub-nest of the structure.
When traversing top-down:
If `func(subtree) is None` the traversal continues into the
sub-tree.
If `func(subtree) is not None` the traversal does not continue
into the sub-tree. The sub-tree will be replaced by `func(subtree)`
in the returned structure (to replace the sub-tree with `None`, use
the special value `MAP_TO_NONE`).
When traversing bottom-up:
If `func(subtree) is None` the traversed sub-tree is returned
unaltered.
If `func(subtree) is not None` the sub-tree will be replaced by
`func(subtree)` in the returned structure (to replace the sub-tree
with None, use the special value `MAP_TO_NONE`).
structure: The structure to traverse.
top_down: If True, parent structures will be visited before their
children.
Returns:
The structured output from the traversal.
Raises:
TypeError: If `func` is not callable.
"""
return tree_impl.traverse(func, structure, top_down=top_down)
@keras_export("keras.tree.flatten")
def flatten(structure):
"""Flattens a possibly nested structure into a list.
In the case of dict instances, the sequence consists of the values,
sorted by key to ensure deterministic behavior. However, instances of
`collections.OrderedDict` are handled differently: their sequence order is
used instead of the sorted keys. The same convention is followed in
`pack_sequence_as`. This correctly unflattens dicts and `OrderedDict` after
they have been flattened, or vice-versa.
Dictionaries with non-sortable keys are not supported.
Examples:
>>> keras.tree.flatten([[1, 2, 3], [4, [5], [[6]]]])
[1, 2, 3, 4, 5, 6]
>>> keras.tree.flatten(None)
[None]
>>> keras.tree.flatten(1)
[1]
>>> keras.tree.flatten({100: 'world!', 6: 'Hello'})
['Hello', 'world!']
Args:
structure: An arbitrarily nested structure.
Returns:
A list, the flattened version of the input `structure`.
"""
return tree_impl.flatten(structure)
@keras_export("keras.tree.flatten_with_path")
def flatten_with_path(structure):
"""Flattens a possibly nested structure into a list.
This is a variant of flattens() which produces a
list of pairs: `(path, item)`. A path is a tuple of indices and/or keys
which uniquely identifies the position of the corresponding item.
Dictionaries with non-sortable keys are not supported.
Examples:
>>> keras.flatten_with_path([{"foo": 42}])
[((0, 'foo'), 42)]
Args:
structure: An arbitrarily nested structure.
Returns:
A list of `(path, item)` pairs corresponding to the flattened
version of the input `structure`.
"""
return tree_impl.flatten_with_path(structure)
@keras_export("keras.tree.map_structure")
def map_structure(func, *structures, none_is_leaf=True):
"""Maps `func` through given structures.
Examples:
>>> structure = [[1], [2], [3]]
>>> keras.tree.map_structure(lambda v: v**2, structure)
[[1], [4], [9]]
>>> keras.tree.map_structure(lambda x, y: x * y, structure, structure)
[[1], [4], [9]]
>>> Foo = collections.namedtuple('Foo', ['a', 'b'])
>>> structure = Foo(a=1, b=2)
>>> keras.tree.map_structure(lambda v: v * 2, structure)
Foo(a=2, b=4)
Args:
func: A callable that accepts as many arguments as there are structures.
*structures: Arbitrarily nested structures of the same layout.
none_is_leaf: If True, `func` will be called on `None` leaves. If False,
`None` values are not passed to `func` and are returned in the
output directly.
Returns:
A new structure with the same layout as the given ones.
Raises:
TypeError: If `structures` is empty or `func` is not callable.
ValueError: If there is more than one items in `structures` and some of
the nested structures don't match according to the rules of
`assert_same_structure`.
"""
return tree_impl.map_structure(func, *structures, none_is_leaf=none_is_leaf)
@keras_export("keras.tree.map_structure_up_to")
def map_structure_up_to(shallow_structure, func, *structures):
"""Maps `func` through given structures up to `shallow_structure`.
This is a variant of `map_structure` which only maps the given structures
up to `shallow_structure`. All further nested components are retained as-is.
Examples:
>>> shallow_structure = [None, None]
>>> structure = [[1, 1], [2, 2]]
>>> keras.tree.map_structure_up_to(shallow_structure, len, structure)
[2, 2]
>>> shallow_structure = [None, [None, None]]
>>> keras.tree.map_structure_up_to(shallow_structure, str, structure)
['[1, 1]', ['2', '2']]
Args:
shallow_structure: A structure with layout common to all `structures`.
func: A callable that accepts as many arguments as there are structures.
*structures: Arbitrarily nested structures of the same layout.
Returns:
A new structure with the same layout as `shallow_structure`.
Raises:
TypeError: If `structures` is empty or `func` is not callable.
ValueError: If one of the items in `structures` doesn't match the
nested structure of `shallow_structure` according to the rules of
`assert_same_structure`. Items in `structures` are allowed to be
nested deeper than `shallow_structure`, but they cannot be
shallower.
"""
return tree_impl.map_structure_up_to(shallow_structure, func, *structures)
@keras_export("keras.tree.assert_same_structure")
def assert_same_structure(a, b, check_types=None):
"""Asserts that two structures are nested in the same way.
This function verifies that the nested structures match. The leafs can be of
any type. At each level, the structures must be of the same type and have
the same number of elements. Instances of `dict`, `OrderedDict` and
`defaultdict` are all considered the same as long as they have the same set
of keys. However, `list`, `tuple`, `namedtuple` and `deque` are not the same
structures. Two namedtuples with identical fields and even identical names
are not the same structures.
Examples:
>>> keras.tree.assert_same_structure([(0, 1)], [(2, 3)])
>>> Foo = collections.namedtuple('Foo', ['a', 'b'])
>>> AlsoFoo = collections.namedtuple('Foo', ['a', 'b'])
>>> keras.tree.assert_same_structure(Foo(0, 1), Foo(2, 3))
>>> keras.tree.assert_same_structure(Foo(0, 1), AlsoFoo(2, 3))
Traceback (most recent call last):
...
ValueError: The two structures don't have the same nested structure.
...
Args:
a: an arbitrarily nested structure.
b: an arbitrarily nested structure.
check_types: Deprecated. The behavior of this flag was inconsistent, it
no longer has any effect. For a looser check, use
`assert_same_paths` instead, which considers `list`, `tuple`,
`namedtuple` and `deque` as matching structures.
Raises:
ValueError: If the two structures `a` and `b` don't match.
"""
if check_types is not None:
if check_types:
warnings.warn(
"The `check_types` argument is deprecated and no longer has "
"any effect, please remove.",
DeprecationWarning,
stacklevel=2,
)
else:
warnings.warn(
"The `check_types` argument is deprecated and no longer has "
"any effect. For a looser check, use "
"`keras.tree.assert_same_paths()`, which considers `list`, "
"`tuple`, `namedtuple` and `deque` as matching",
DeprecationWarning,
stacklevel=2,
)
return tree_impl.assert_same_structure(a, b)
@keras_export("keras.tree.assert_same_paths")
def assert_same_paths(a, b):
"""Asserts that two structures have identical paths in their tree structure.
This function verifies that two nested structures have the same paths.
Unlike `assert_same_structure`, this function only checks the paths
and ignores the collection types.
For Sequences, to path is the index: 0, 1, 2, etc. For Mappings, the path is
the key, for instance "a", "b", "c". Note that namedtuples also use indices
and not field names for the path.
Examples:
>>> keras.tree.assert_same_paths([0, 1], (2, 3))
>>> Point1 = collections.namedtuple('Point1', ['x', 'y'])
>>> Point2 = collections.namedtuple('Point2', ['x', 'y'])
>>> keras.tree.assert_same_paths(Point1(0, 1), Point2(2, 3))
Args:
a: an arbitrarily nested structure.
b: an arbitrarily nested structure.
Raises:
ValueError: If the paths in structure `a` don't match the paths in
structure `b`. The error message will include the specific paths
that differ.
"""
return tree_impl.assert_same_paths(a, b)
@keras_export("keras.tree.pack_sequence_as")
def pack_sequence_as(structure, flat_sequence):
"""Returns a given flattened sequence packed into a given structure.
If `structure` is an atom, `flat_sequence` must be a single-item list; in
this case the return value is `flat_sequence[0]`.
If `structure` is or contains a dict instance, the keys will be sorted to
pack the flat sequence in deterministic order. However, instances of
`collections.OrderedDict` are handled differently: their sequence order is
used instead of the sorted keys. The same convention is followed in
`flatten`. This correctly repacks dicts and `OrderedDicts` after they have
been flattened, or vice-versa.
Dictionaries with non-sortable keys are not supported.
Examples:
>>> structure = {"key3": "", "key1": "", "key2": ""}
>>> flat_sequence = ["value1", "value2", "value3"]
>>> keras.tree.pack_sequence_as(structure, flat_sequence)
{"key3": "value3", "key1": "value1", "key2": "value2"}
>>> structure = (("a", "b"), ("c", "d", "e"), "f")
>>> flat_sequence = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
>>> keras.tree.pack_sequence_as(structure, flat_sequence)
((1.0, 2.0), (3.0, 4.0, 5.0), 6.0)
>>> structure = {"key3": {"c": ("alpha", "beta"), "a": ("gamma")},
... "key1": {"e": "val1", "d": "val2"}}
>>> flat_sequence = ["val2", "val1", 3.0, 1.0, 2.0]
>>> keras.tree.pack_sequence_as(structure, flat_sequence)
{'key3': {'c': (1.0, 2.0), 'a': 3.0}, 'key1': {'e': 'val1', 'd': 'val2'}}
>>> structure = ["a"]
>>> flat_sequence = [np.array([[1, 2], [3, 4]])]
>>> keras.tree.pack_sequence_as(structure, flat_sequence)
[array([[1, 2],
[3, 4]])]
>>> structure = ["a"]
>>> flat_sequence = [keras.ops.ones([2, 2])]
>>> keras.tree.pack_sequence_as(structure, flat_sequence)
[array([[1., 1.],
[1., 1.]]]
Args:
structure: Arbitrarily nested structure.
flat_sequence: Flat sequence to pack.
Returns:
`flat_sequence` converted to have the same recursive structure as
`structure`.
Raises:
TypeError: If `flat_sequence` is not iterable.
ValueError: If `flat_sequence` cannot be repacked as `structure`; for
instance, if `flat_sequence` has too few or too many elements.
"""
return tree_impl.pack_sequence_as(structure, flat_sequence)
@keras_export("keras.tree.lists_to_tuples")
def lists_to_tuples(structure):
"""Returns the structure with list instances changed to tuples.
Args:
structure: Arbitrarily nested structure.
Returns:
The same structure but with tuples instead of lists.
"""
return tree_impl.lists_to_tuples(structure)
@keras_export("keras.tree.map_shape_structure")
def map_shape_structure(func, structure):
"""Variant of keras.tree.map_structure that operates on shape tuples.
Tuples containing ints and Nones are considered shapes and passed to `func`.
Args:
structure: Arbitrarily nested structure.
Returns:
The same structure with `func` applied.
"""
return tree_impl.map_shape_structure(func, structure)
| MAP_TO_NONE |
python | walkccc__LeetCode | solutions/1763. Longest Nice Substring/1763.py | {
"start": 0,
"end": 551
} | class ____:
def longestNiceSubstring(self, s: str) -> str:
if len(s) < 2:
return ''
seen = set(s)
for i, c in enumerate(s):
# If both upper and lower case letters exists in the string, keep moving,
# else take the erroneous character as a partition and check for its left
# and right parts to be nice strings.
if c.swapcase() not in seen:
prefix = self.longestNiceSubstring(s[:i])
suffix = self.longestNiceSubstring(s[i + 1:])
return max(prefix, suffix, key=len)
return s
| Solution |
python | django-haystack__django-haystack | haystack/fields.py | {
"start": 15609,
"end": 15675
} | class ____(FacetField, MultiValueField):
pass
| FacetMultiValueField |
python | Textualize__textual | tests/directory_tree/test_early_show_root.py | {
"start": 87,
"end": 573
} | class ____(App[None]):
def compose(self) -> ComposeResult:
tree = DirectoryTree(".")
tree.show_root = True
yield tree
async def test_managed_to_set_show_root_before_mounted() -> None:
"""https://github.com/Textualize/textual/issues/2363"""
async with DirectoryTreeApp().run_test() as pilot:
assert isinstance(pilot.app.query_one(DirectoryTree), DirectoryTree)
assert pilot.app.query_one(DirectoryTree).show_root is True
| DirectoryTreeApp |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.