language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
huggingface__transformers
src/transformers/models/granite_speech/modeling_granite_speech.py
{ "start": 5032, "end": 8536 }
class ____(nn.Module): """Attention for conformer blocks using Shaw's relative positional embeddings. See the following [paper](https://huggingface.co/papers/1803.02155) for more details. """ def __init__(self, config: GraniteSpeechEncoderConfig): super().__init__() inner_dim = config.dim_head * config.num_heads self.max_pos_emb = config.max_pos_emb self.context_size = config.context_size self.num_heads = config.num_heads self.dim_head = config.dim_head self.scale = self.dim_head**-0.5 self.pre_norm = nn.LayerNorm(config.hidden_dim) self.to_q = nn.Linear(config.hidden_dim, inner_dim, bias=False) self.to_kv = nn.Linear(config.hidden_dim, inner_dim * 2, bias=False) self.to_out = nn.Linear(inner_dim, config.hidden_dim) self.rel_pos_emb = nn.Embedding(2 * self.max_pos_emb + 1, self.dim_head) self.dropout = nn.Dropout(config.dropout) if self.context_size <= 0 or self.context_size > self.max_pos_emb: raise ValueError("Context size is either less than 0 or exceeds the max_pos_emb") def forward(self, hidden_states: torch.Tensor, attention_dists: torch.Tensor) -> torch.Tensor: hidden_states = self.pre_norm(hidden_states) bsz, num_features, _ = hidden_states.shape num_blocks = math.ceil(num_features / self.context_size) remainder = num_features % self.context_size if remainder > 0: # right padding to reach block size hidden_states = torch.nn.functional.pad(hidden_states, (0, 0, 0, self.context_size - remainder)) query_states = self.to_q(hidden_states) key_states, value_states = self.to_kv(hidden_states).chunk(2, dim=-1) query_states = query_states.reshape(bsz, num_blocks, self.context_size, self.num_heads, -1).transpose(2, 3) key_states = key_states.reshape(bsz, num_blocks, self.context_size, self.num_heads, -1).transpose(2, 3) value_states = value_states.reshape(bsz, num_blocks, self.context_size, self.num_heads, -1).transpose(2, 3) # shaw's relative positional embedding rel_pos_emb = self.rel_pos_emb(attention_dists) # alternative computation of `pos_attn` - for readability # rel_pos_emb_expanded = rel_pos_emb.view([1, 1, 1] + list(rel_pos_emb.shape)) # pos_attn = torch.sum(query_states.unsqueeze(-2) * rel_pos_emb_expanded, dim=-1) * self.scale # einsum implementation of pos_attn - gives x30 speedup over the alternative # TODO (@avihu111) find a fast alternative to einsum pos_attn = torch.einsum("b m h c d, c r d -> b m h c r", query_states, rel_pos_emb) * self.scale if remainder > 0: # masked attention in the extended block mask = torch.ones(self.context_size, self.context_size, dtype=bool, device=hidden_states.device) mask[:remainder, :remainder] = 0 mask_value = -torch.finfo(pos_attn.dtype).max pos_attn[:, -1, :].masked_fill_(mask, mask_value) with torch.nn.attention.sdpa_kernel(torch.nn.attention.SDPBackend.MATH): out = F.scaled_dot_product_attention( query_states, key_states, value_states, attn_mask=pos_attn, scale=self.scale ) out = out.transpose(2, 3).reshape(bsz, hidden_states.shape[1], -1) out = self.to_out(out[:, :num_features, :]) return self.dropout(out)
GraniteSpeechConformerAttention
python
huggingface__transformers
src/transformers/models/deformable_detr/modeling_deformable_detr.py
{ "start": 15129, "end": 18547 }
class ____(nn.Module): """ Convolutional backbone, using either the AutoBackbone API or one from the timm library. nn.BatchNorm2d layers are replaced by DeformableDetrFrozenBatchNorm2d as defined above. """ def __init__(self, config): super().__init__() self.config = config # For backwards compatibility we have to use the timm library directly instead of the AutoBackbone API if config.use_timm_backbone: # We default to values which were previously hard-coded. This enables configurability from the config # using backbone arguments, while keeping the default behavior the same. requires_backends(self, ["timm"]) kwargs = getattr(config, "backbone_kwargs", {}) kwargs = {} if kwargs is None else kwargs.copy() out_indices = kwargs.pop("out_indices", (2, 3, 4) if config.num_feature_levels > 1 else (4,)) num_channels = kwargs.pop("in_chans", config.num_channels) if config.dilation: kwargs["output_stride"] = kwargs.get("output_stride", 16) backbone = create_model( config.backbone, pretrained=config.use_pretrained_backbone, features_only=True, out_indices=out_indices, in_chans=num_channels, **kwargs, ) else: backbone = load_backbone(config) # replace batch norm by frozen batch norm with torch.no_grad(): replace_batch_norm(backbone) self.model = backbone self.intermediate_channel_sizes = ( self.model.feature_info.channels() if config.use_timm_backbone else self.model.channels ) backbone_model_type = None if config.backbone is not None: backbone_model_type = config.backbone elif config.backbone_config is not None: backbone_model_type = config.backbone_config.model_type else: raise ValueError("Either `backbone` or `backbone_config` should be provided in the config") if "resnet" in backbone_model_type: for name, parameter in self.model.named_parameters(): if config.use_timm_backbone: if "layer2" not in name and "layer3" not in name and "layer4" not in name: parameter.requires_grad_(False) else: if "stage.1" not in name and "stage.2" not in name and "stage.3" not in name: parameter.requires_grad_(False) # Copied from transformers.models.detr.modeling_detr.DetrConvEncoder.forward with Detr->DeformableDetr def forward(self, pixel_values: torch.Tensor, pixel_mask: torch.Tensor): # send pixel_values through the model to get list of feature maps features = self.model(pixel_values) if self.config.use_timm_backbone else self.model(pixel_values).feature_maps out = [] for feature_map in features: # downsample pixel_mask to match shape of corresponding feature_map mask = nn.functional.interpolate(pixel_mask[None].float(), size=feature_map.shape[-2:]).to(torch.bool)[0] out.append((feature_map, mask)) return out # Copied from transformers.models.detr.modeling_detr.DetrConvModel with Detr->DeformableDetr
DeformableDetrConvEncoder
python
geekcomputers__Python
venv/Lib/site-packages/pip/_internal/operations/freeze.py
{ "start": 8718, "end": 9864 }
class ____: def __init__( self, name: str, req: str, editable: bool, comments: Iterable[str] = (), ) -> None: self.name = name self.canonical_name = canonicalize_name(name) self.req = req self.editable = editable self.comments = comments @classmethod def from_dist(cls, dist: BaseDistribution) -> "FrozenRequirement": editable = dist.editable if editable: req, comments = _get_editable_info(dist) else: comments = [] direct_url = dist.direct_url if direct_url: # if PEP 610 metadata is present, use it req = direct_url_as_pep440_direct_reference(direct_url, dist.raw_name) else: # name==version requirement req = _format_as_name_version(dist) return cls(dist.raw_name, req, editable, comments=comments) def __str__(self) -> str: req = self.req if self.editable: req = f"-e {req}" return "\n".join(list(self.comments) + [str(req)]) + "\n"
FrozenRequirement
python
sympy__sympy
sympy/simplify/gammasimp.py
{ "start": 17551, "end": 18485 }
class ____(Function): @classmethod def eval(cls, a, b): if b.is_Integer: if not b: return S.One n = int(b) if n > 0: return Mul(*[a + i for i in range(n)]) elif n < 0: return 1/Mul(*[a - i for i in range(1, -n + 1)]) else: if b.is_Add: c, _b = b.as_coeff_Add() if c.is_Integer: if c > 0: return _rf(a, _b)*_rf(a + _b, c) elif c < 0: return _rf(a, _b)/_rf(a + _b + c, -c) if a.is_Add: c, _a = a.as_coeff_Add() if c.is_Integer: if c > 0: return _rf(_a, b)*_rf(_a + b, c)/_rf(_a, c) elif c < 0: return _rf(_a, b)*_rf(_a + c, -c)/_rf(_a + b + c, -c)
_rf
python
pydantic__pydantic
pydantic/v1/errors.py
{ "start": 10548, "end": 10689 }
class ____(_NumberBoundError): code = 'number.not_gt' msg_template = 'ensure this value is greater than {limit_value}'
NumberNotGtError
python
numpy__numpy
numpy/f2py/tests/test_array_from_pyobj.py
{ "start": 11439, "end": 23717 }
class ____: @pytest.fixture(autouse=True, scope="class", params=_type_names) def setup_type(self, request): request.cls.type = Type(request.param) request.cls.array = lambda self, dims, intent, obj: Array( Type(request.param), dims, intent, obj) @property def num2seq(self): if self.type.NAME.startswith('STRING'): elsize = self.type.elsize return ['1' * elsize, '2' * elsize] return [1, 2] @property def num23seq(self): if self.type.NAME.startswith('STRING'): elsize = self.type.elsize return [['1' * elsize, '2' * elsize, '3' * elsize], ['4' * elsize, '5' * elsize, '6' * elsize]] return [[1, 2, 3], [4, 5, 6]] def test_in_from_2seq(self): a = self.array([2], intent.in_, self.num2seq) assert not a.has_shared_memory() def test_in_from_2casttype(self): for t in self.type.cast_types(): obj = np.array(self.num2seq, dtype=t.dtype) a = self.array([len(self.num2seq)], intent.in_, obj) if t.elsize == self.type.elsize: assert a.has_shared_memory(), repr((self.type.dtype, t.dtype)) else: assert not a.has_shared_memory() @pytest.mark.parametrize("write", ["w", "ro"]) @pytest.mark.parametrize("order", ["C", "F"]) @pytest.mark.parametrize("inp", ["2seq", "23seq"]) def test_in_nocopy(self, write, order, inp): """Test if intent(in) array can be passed without copies""" seq = getattr(self, "num" + inp) obj = np.array(seq, dtype=self.type.dtype, order=order) obj.setflags(write=(write == 'w')) a = self.array(obj.shape, ((order == 'C' and intent.in_.c) or intent.in_), obj) assert a.has_shared_memory() def test_inout_2seq(self): obj = np.array(self.num2seq, dtype=self.type.dtype) a = self.array([len(self.num2seq)], intent.inout, obj) assert a.has_shared_memory() try: a = self.array([2], intent.in_.inout, self.num2seq) except TypeError as msg: if not str(msg).startswith( "failed to initialize intent(inout|inplace|cache) array"): raise else: raise SystemError("intent(inout) should have failed on sequence") def test_f_inout_23seq(self): obj = np.array(self.num23seq, dtype=self.type.dtype, order="F") shape = (len(self.num23seq), len(self.num23seq[0])) a = self.array(shape, intent.in_.inout, obj) assert a.has_shared_memory() obj = np.array(self.num23seq, dtype=self.type.dtype, order="C") shape = (len(self.num23seq), len(self.num23seq[0])) try: a = self.array(shape, intent.in_.inout, obj) except ValueError as msg: if not str(msg).startswith( "failed to initialize intent(inout) array"): raise else: raise SystemError( "intent(inout) should have failed on improper array") def test_c_inout_23seq(self): obj = np.array(self.num23seq, dtype=self.type.dtype) shape = (len(self.num23seq), len(self.num23seq[0])) a = self.array(shape, intent.in_.c.inout, obj) assert a.has_shared_memory() def test_in_copy_from_2casttype(self): for t in self.type.cast_types(): obj = np.array(self.num2seq, dtype=t.dtype) a = self.array([len(self.num2seq)], intent.in_.copy, obj) assert not a.has_shared_memory() def test_c_in_from_23seq(self): a = self.array( [len(self.num23seq), len(self.num23seq[0])], intent.in_, self.num23seq) assert not a.has_shared_memory() def test_in_from_23casttype(self): for t in self.type.cast_types(): obj = np.array(self.num23seq, dtype=t.dtype) a = self.array( [len(self.num23seq), len(self.num23seq[0])], intent.in_, obj) assert not a.has_shared_memory() def test_f_in_from_23casttype(self): for t in self.type.cast_types(): obj = np.array(self.num23seq, dtype=t.dtype, order="F") a = self.array( [len(self.num23seq), len(self.num23seq[0])], intent.in_, obj) if t.elsize == self.type.elsize: assert a.has_shared_memory() else: assert not a.has_shared_memory() def test_c_in_from_23casttype(self): for t in self.type.cast_types(): obj = np.array(self.num23seq, dtype=t.dtype) a = self.array( [len(self.num23seq), len(self.num23seq[0])], intent.in_.c, obj) if t.elsize == self.type.elsize: assert a.has_shared_memory() else: assert not a.has_shared_memory() def test_f_copy_in_from_23casttype(self): for t in self.type.cast_types(): obj = np.array(self.num23seq, dtype=t.dtype, order="F") a = self.array( [len(self.num23seq), len(self.num23seq[0])], intent.in_.copy, obj) assert not a.has_shared_memory() def test_c_copy_in_from_23casttype(self): for t in self.type.cast_types(): obj = np.array(self.num23seq, dtype=t.dtype) a = self.array( [len(self.num23seq), len(self.num23seq[0])], intent.in_.c.copy, obj) assert not a.has_shared_memory() def test_in_cache_from_2casttype(self): for t in self.type.all_types(): if t.elsize != self.type.elsize: continue obj = np.array(self.num2seq, dtype=t.dtype) shape = (len(self.num2seq), ) a = self.array(shape, intent.in_.c.cache, obj) assert a.has_shared_memory() a = self.array(shape, intent.in_.cache, obj) assert a.has_shared_memory() obj = np.array(self.num2seq, dtype=t.dtype, order="F") a = self.array(shape, intent.in_.c.cache, obj) assert a.has_shared_memory() a = self.array(shape, intent.in_.cache, obj) assert a.has_shared_memory(), repr(t.dtype) try: a = self.array(shape, intent.in_.cache, obj[::-1]) except ValueError as msg: if not str(msg).startswith( "failed to initialize intent(cache) array"): raise else: raise SystemError( "intent(cache) should have failed on multisegmented array") def test_in_cache_from_2casttype_failure(self): for t in self.type.all_types(): if t.NAME == 'STRING': # string elsize is 0, so skipping the test continue if t.elsize >= self.type.elsize: continue is_int = np.issubdtype(t.dtype, np.integer) if is_int and int(self.num2seq[0]) > np.iinfo(t.dtype).max: # skip test if num2seq would trigger an overflow error continue obj = np.array(self.num2seq, dtype=t.dtype) shape = (len(self.num2seq), ) try: self.array(shape, intent.in_.cache, obj) # Should succeed except ValueError as msg: if not str(msg).startswith( "failed to initialize intent(cache) array"): raise else: raise SystemError( "intent(cache) should have failed on smaller array") def test_cache_hidden(self): shape = (2, ) a = self.array(shape, intent.cache.hide, None) assert a.arr.shape == shape shape = (2, 3) a = self.array(shape, intent.cache.hide, None) assert a.arr.shape == shape shape = (-1, 3) try: a = self.array(shape, intent.cache.hide, None) except ValueError as msg: if not str(msg).startswith( "failed to create intent(cache|hide)|optional array"): raise else: raise SystemError( "intent(cache) should have failed on undefined dimensions") def test_hidden(self): shape = (2, ) a = self.array(shape, intent.hide, None) assert a.arr.shape == shape assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)) shape = (2, 3) a = self.array(shape, intent.hide, None) assert a.arr.shape == shape assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)) assert a.arr.flags["FORTRAN"] and not a.arr.flags["CONTIGUOUS"] shape = (2, 3) a = self.array(shape, intent.c.hide, None) assert a.arr.shape == shape assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)) assert not a.arr.flags["FORTRAN"] and a.arr.flags["CONTIGUOUS"] shape = (-1, 3) try: a = self.array(shape, intent.hide, None) except ValueError as msg: if not str(msg).startswith( "failed to create intent(cache|hide)|optional array"): raise else: raise SystemError( "intent(hide) should have failed on undefined dimensions") def test_optional_none(self): shape = (2, ) a = self.array(shape, intent.optional, None) assert a.arr.shape == shape assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)) shape = (2, 3) a = self.array(shape, intent.optional, None) assert a.arr.shape == shape assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)) assert a.arr.flags["FORTRAN"] and not a.arr.flags["CONTIGUOUS"] shape = (2, 3) a = self.array(shape, intent.c.optional, None) assert a.arr.shape == shape assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)) assert not a.arr.flags["FORTRAN"] and a.arr.flags["CONTIGUOUS"] def test_optional_from_2seq(self): obj = self.num2seq shape = (len(obj), ) a = self.array(shape, intent.optional, obj) assert a.arr.shape == shape assert not a.has_shared_memory() def test_optional_from_23seq(self): obj = self.num23seq shape = (len(obj), len(obj[0])) a = self.array(shape, intent.optional, obj) assert a.arr.shape == shape assert not a.has_shared_memory() a = self.array(shape, intent.optional.c, obj) assert a.arr.shape == shape assert not a.has_shared_memory() def test_inplace(self): obj = np.array(self.num23seq, dtype=self.type.dtype) assert not obj.flags["FORTRAN"] and obj.flags["CONTIGUOUS"] shape = obj.shape a = self.array(shape, intent.inplace, obj) assert obj[1][2] == a.arr[1][2], repr((obj, a.arr)) a.arr[1][2] = 54 assert obj[1][2] == a.arr[1][2] == np.array(54, dtype=self.type.dtype) assert a.arr is obj assert obj.flags["FORTRAN"] # obj attributes are changed inplace! assert not obj.flags["CONTIGUOUS"] def test_inplace_from_casttype(self): for t in self.type.cast_types(): if t is self.type: continue obj = np.array(self.num23seq, dtype=t.dtype) assert obj.dtype.type == t.type assert obj.dtype.type is not self.type.type assert not obj.flags["FORTRAN"] and obj.flags["CONTIGUOUS"] shape = obj.shape a = self.array(shape, intent.inplace, obj) assert obj[1][2] == a.arr[1][2], repr((obj, a.arr)) a.arr[1][2] = 54 assert obj[1][2] == a.arr[1][2] == np.array(54, dtype=self.type.dtype) assert a.arr is obj assert obj.flags["FORTRAN"] # obj attributes changed inplace! assert not obj.flags["CONTIGUOUS"] assert obj.dtype.type is self.type.type # obj changed inplace!
TestSharedMemory
python
viewflow__viewflow
viewflow/workflow/migrations/0006_i18n.py
{ "start": 204, "end": 4728 }
class ____(migrations.Migration): dependencies = [ ("viewflow", "0005_rename_flowcls"), ] operations = [ migrations.AlterModelOptions( name="process", options={ "verbose_name_plural": "Process list", "ordering": ["-created"], "verbose_name": "Process", }, ), migrations.AlterModelOptions( name="task", options={ "verbose_name_plural": "Tasks", "ordering": ["-created"], "verbose_name": "Task", }, ), migrations.AlterField( model_name="process", name="created", field=models.DateTimeField(auto_now_add=True, verbose_name="Created"), ), migrations.AlterField( model_name="process", name="finished", field=models.DateTimeField(blank=True, null=True, verbose_name="Finished"), ), migrations.AlterField( model_name="process", name="flow_class", field=viewflow.workflow.fields.FlowReferenceField( max_length=250, verbose_name="Flow" ), ), migrations.AlterField( model_name="process", name="status", field=models.CharField(default="NEW", max_length=50, verbose_name="Status"), ), migrations.AlterField( model_name="task", name="comments", field=models.TextField(blank=True, null=True, verbose_name="Comments"), ), migrations.AlterField( model_name="task", name="created", field=models.DateTimeField(auto_now_add=True, verbose_name="Created"), ), migrations.AlterField( model_name="task", name="external_task_id", field=models.CharField( blank=True, null=True, db_index=True, max_length=50, verbose_name="External Task ID", ), ), migrations.AlterField( model_name="task", name="finished", field=models.DateTimeField(blank=True, null=True, verbose_name="Finished"), ), migrations.AlterField( model_name="task", name="flow_task", field=viewflow.workflow.fields.TaskReferenceField( max_length=255, verbose_name="Task" ), ), migrations.AlterField( model_name="task", name="flow_task_type", field=models.CharField(max_length=50, verbose_name="Type"), ), migrations.AlterField( model_name="task", name="owner", field=models.ForeignKey( null=True, to=settings.AUTH_USER_MODEL, blank=True, verbose_name="Owner", on_delete=models.CASCADE, ), ), migrations.AlterField( model_name="task", name="owner_permission", field=models.CharField( blank=True, null=True, max_length=255, verbose_name="Permission" ), ), migrations.AlterField( model_name="task", name="previous", field=models.ManyToManyField( related_name="leading", to="viewflow.Task", verbose_name="Previous" ), ), migrations.AlterField( model_name="task", name="process", field=models.ForeignKey( to="viewflow.Process", verbose_name="Process", on_delete=models.CASCADE ), ), migrations.AlterField( model_name="task", name="started", field=models.DateTimeField(blank=True, null=True, verbose_name="Started"), ), migrations.AlterField( model_name="task", name="status", field=models.CharField( default="NEW", db_index=True, max_length=50, verbose_name="Status" ), ), migrations.AlterField( model_name="task", name="token", field=viewflow.workflow.fields.TokenField( default=viewflow.workflow.token.Token("start"), max_length=150, verbose_name="Token", ), ), ]
Migration
python
dagster-io__dagster
python_modules/dagster/dagster/_config/pythonic_config/resource.py
{ "start": 4337, "end": 5613 }
class ____(NestedResourcesResourceDefinition): def __init__( self, configurable_resource_cls: type, resource_fn: ResourceFunction, config_schema: Any, description: Optional[str], nested_resources: Mapping[str, Any], nested_partial_resources: Mapping[str, Any], dagster_maintained: bool = False, ): super().__init__( resource_fn=resource_fn, config_schema=config_schema, description=description, ) self._configurable_resource_cls = configurable_resource_cls self._nested_partial_resources = nested_partial_resources self._nested_resources = nested_resources self._dagster_maintained = dagster_maintained @property def configurable_resource_cls(self) -> type: return self._configurable_resource_cls @property def nested_resources( self, ) -> Mapping[str, Any]: return self._nested_resources @property def nested_partial_resources( self, ) -> Mapping[str, "CoercibleToResource"]: return self._nested_partial_resources def _is_dagster_maintained(self) -> bool: return self._dagster_maintained
ConfigurableResourceFactoryResourceDefinition
python
doocs__leetcode
solution/2800-2899/2850.Minimum Moves to Spread Stones Over Grid/Solution2.py
{ "start": 0, "end": 756 }
class ____: def minimumMoves(self, grid: List[List[int]]) -> int: def cal(a: tuple, b: tuple) -> int: return abs(a[0] - b[0]) + abs(a[1] - b[1]) left, right = [], [] for i in range(3): for j in range(3): if grid[i][j] == 0: left.append((i, j)) else: for _ in range(grid[i][j] - 1): right.append((i, j)) n = len(left) f = [inf] * (1 << n) f[0] = 0 for i in range(1, 1 << n): k = i.bit_count() for j in range(n): if i >> j & 1: f[i] = min(f[i], f[i ^ (1 << j)] + cal(left[k - 1], right[j])) return f[-1]
Solution
python
tensorflow__tensorflow
tensorflow/python/kernel_tests/control_flow/control_flow_ops_py_test.py
{ "start": 6274, "end": 173921 }
class ____(test.TestCase, parameterized.TestCase): @test_util.run_v1_only("b/120545219") def testRefIdentity(self): with self.cached_session(): v = variable_v1.VariableV1(7) v = control_flow_ops._Identity(v) op = state_ops.assign(v, 9) v2 = control_flow_ops.with_dependencies([op], v) self.assertTrue(isinstance(v2, tensor_lib.Tensor)) self.evaluate(variables.global_variables_initializer()) self.assertEqual(9, self.evaluate(v2)) @test_util.run_v1_only("b/120545219") def testRefEnter(self): with self.cached_session(): v = variable_v1.VariableV1(7) enter_v = control_flow_ops._Enter(v, "foo_1", is_constant=True) nine = constant_op.constant(9) enter_nine = gen_control_flow_ops.enter(nine, "foo_1") op = state_ops.assign(enter_v, enter_nine) v2 = control_flow_ops.with_dependencies([op], enter_v) v3 = control_flow_ops.exit(v2) self.evaluate(variables.global_variables_initializer()) self.assertEqual(9, self.evaluate(v3)) @test_util.run_v1_only("b/120545219") def testRefSwitch(self): with self.cached_session(): v = variable_v1.VariableV1(7) p = constant_op.constant(True) v1 = control_flow_ops._SwitchRefOrTensor(v._ref(), p) # pylint: disable=protected-access v2 = state_ops.assign(v1[1], 9) self.evaluate(variables.global_variables_initializer()) self.assertEqual(9, self.evaluate(v2)) def testEnterMulExit(self): with self.cached_session(): data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data") enter_data = gen_control_flow_ops.enter(data, "foo_1", False) five = constant_op.constant(5) enter_five = gen_control_flow_ops.enter(five, "foo_1", False) mul_op = math_ops.multiply(enter_data, enter_five) exit_op = control_flow_ops.exit(mul_op) result = self.evaluate(exit_op) self.assertAllEqual(np.array([x * 5 for x in [1, 2, 3, 4, 5, 6]]), result) @test_util.run_deprecated_v1 def testEnterShapePropagation(self): with self.cached_session(): v = variables.Variable([0.0, 0.0], dtype=dtypes.float32) # If is_constant=True, the shape information should be propagated. enter_v_constant = gen_control_flow_ops.enter( v, "frame1", is_constant=True) self.assertEqual(enter_v_constant.shape, [2]) # Otherwise, the shape should be unknown. enter_v_non_constant = gen_control_flow_ops.enter( v, "frame2", is_constant=False) self.assertEqual(enter_v_non_constant.shape, None) @test_util.run_v1_only("b/120545219") def testSwitchMergeIndexedSlices(self): with self.cached_session(): values = constant_op.constant([1, 2, 3, 4, 5, 6]) indices = constant_op.constant([0, 2, 4, 6, 8, 10]) data = indexed_slices.IndexedSlices(values, indices) pred = ops.convert_to_tensor(True) switch_op = control_flow_ops.switch(data, pred) merge_op = control_flow_ops.merge(switch_op)[0] val = merge_op.values ind = merge_op.indices self.assertAllEqual(np.arange(1, 7), val) self.assertAllEqual(np.arange(0, 12, 2), ind) @test_util.run_v1_only("b/120545219") def testSwitchDeadBranch(self): with self.cached_session(): data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data") ports = ops.convert_to_tensor(True, name="ports") switch_op = control_flow_ops.switch(data, ports) dead_branch = array_ops.identity(switch_op[0]) with self.assertRaisesWithPredicateMatch( errors_impl.InvalidArgumentError, lambda e: "Retval[0] does not have value" in str(e)): self.evaluate(dead_branch) @test_util.run_v1_only("b/120545219") def testSwitchMergeLess(self): with self.cached_session(): data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data") zero = ops.convert_to_tensor(0) one = ops.convert_to_tensor(1) less_op = math_ops.less(zero, one) switch_op = control_flow_ops.switch(data, less_op) merge_op = control_flow_ops.merge(switch_op)[0] result = self.evaluate(merge_op) self.assertAllEqual(np.arange(1, 7), result) @test_util.run_v1_only("b/120545219") def testSwitchMergeAddIdentity(self): with self.cached_session(): data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data") ports = ops.convert_to_tensor(False, name="ports") switch_op = control_flow_ops.switch(data, ports) one = constant_op.constant(1) add_op = math_ops.add(switch_op[0], one) id_op = array_ops.identity(switch_op[1]) merge_op = control_flow_ops.merge([add_op, id_op])[0] result = self.evaluate(merge_op) self.assertAllEqual(np.array([x + 1 for x in [1, 2, 3, 4, 5, 6]]), result) @test_util.run_v1_only("b/120545219") def testSwitchMergeAddMul(self): with self.cached_session(): data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data") ports = ops.convert_to_tensor(True, name="ports") switch_op = control_flow_ops.switch(data, ports) one = constant_op.constant(1) add_op = math_ops.add(switch_op[0], one) five = constant_op.constant(5) mul_op = math_ops.multiply(switch_op[1], five) merge_op = control_flow_ops.merge([add_op, mul_op])[0] result = self.evaluate(merge_op) self.assertAllEqual(np.array([x * 5 for x in [1, 2, 3, 4, 5, 6]]), result) @test_util.run_v1_only("b/120545219") def testLoop_false(self): with self.cached_session(): false = ops.convert_to_tensor(False) n = constant_op.constant(10) enter_false = gen_control_flow_ops.enter(false, "foo_1", False) enter_n = gen_control_flow_ops.enter(n, "foo_1", False) merge_n = control_flow_ops.merge([enter_n, enter_n], name="merge_n")[0] switch_n = control_flow_ops.switch(merge_n, enter_false) exit_n = control_flow_ops.exit(switch_n[0]) next_n = control_flow_ops.next_iteration(switch_n[0]) merge_n.op._update_input(1, next_n) result = self.evaluate(exit_n) self.assertAllEqual(10, result) @test_util.run_deprecated_v1 def testLoop_1(self): with self.cached_session(): zero = constant_op.constant(0) one = constant_op.constant(1) n = constant_op.constant(10) enter_i = gen_control_flow_ops.enter(zero, "foo", False) enter_one = gen_control_flow_ops.enter(one, "foo", True) enter_n = gen_control_flow_ops.enter(n, "foo", True) with ops.device(test.gpu_device_name()): merge_i = control_flow_ops.merge([enter_i, enter_i])[0] less_op = math_ops.less(merge_i, enter_n) cond_op = control_flow_ops.loop_cond(less_op) switch_i = control_flow_ops.switch(merge_i, cond_op) add_i = math_ops.add(switch_i[1], enter_one) next_i = control_flow_ops.next_iteration(add_i) merge_i.op._update_input(1, next_i) exit_i = control_flow_ops.exit(switch_i[0]) result = self.evaluate(exit_i) self.assertAllEqual(10, result) @test_util.run_v1_only("b/120545219") def testLoop_2(self): with self.cached_session(): zero = constant_op.constant(0) one = constant_op.constant(1) n = constant_op.constant(10) enter_i = gen_control_flow_ops.enter(zero, "foo", False) enter_one = gen_control_flow_ops.enter(one, "foo", True) enter_n = gen_control_flow_ops.enter(n, "foo", True) merge_i = control_flow_ops.merge([enter_i, enter_i])[0] less_op = math_ops.less(merge_i, enter_n) cond_op = control_flow_ops.loop_cond(less_op) switch_i = control_flow_ops.switch(merge_i, cond_op) add_i = math_ops.add(switch_i[1], enter_one) with ops.device(test.gpu_device_name()): next_i = control_flow_ops.next_iteration(add_i) merge_i.op._update_input(1, next_i) exit_i = control_flow_ops.exit(switch_i[0]) result = self.evaluate(exit_i) self.assertAllEqual(10, result) @test_util.run_v1_only("b/120545219") def testDifferentFrame(self): with self.cached_session(): data = array_ops.placeholder(dtypes.float32, shape=[]) enter_1 = gen_control_flow_ops.enter(data, "foo_1", False) enter_2 = gen_control_flow_ops.enter(data, "foo_2", False) res = math_ops.add(enter_1, enter_2) with self.assertRaisesOpError("has inputs from different frames"): res.eval(feed_dict={data: 1.0}) @test_util.run_deprecated_v1 def testCondBool(self): values = constant_op.constant(10) fn1 = lambda: math_ops.add(values, 1) fn2 = lambda: math_ops.subtract(values, 1) with self.assertRaisesRegex(TypeError, "must not be a Python bool"): _ = tf_cond.cond(False, fn1, fn2) @test_util.run_deprecated_v1 def testCondInt(self): p = array_ops.placeholder(dtypes.bool, shape=[]) v = constant_op.constant(10) fn1 = lambda: math_ops.add(v, 1) fn2 = lambda: math_ops.subtract(v, 1) y = tf_cond.cond(p, fn1, fn2) grad = gradients_impl.gradients(y, [v]) self.assertAllEqual([None], grad) def testCondOutputShape(self): x = constant_op.constant(1.0) b = tf_cond.cond( constant_op.constant(True), lambda: math_ops.square(x), lambda: math_ops.subtract(x, 1.)) self.assertEqual(b.shape, tensor_shape.TensorShape([])) @test_util.run_v1_only("b/120545219") def testFetchable(self): with self.cached_session() as sess: x = array_ops.placeholder(dtypes.float32) tf_cond.cond( constant_op.constant(True), lambda: x + 2, lambda: x + 0) graph = ops.get_default_graph() for op in graph.get_operations(): for t in op.inputs: if graph.is_fetchable(t.op): sess.run(t, feed_dict={x: 3}) else: with self.assertRaisesRegex(ValueError, "has been marked as not fetchable"): sess.run(t, feed_dict={x: 3}) @test_util.disable_control_flow_v2("Not relevant") @test_util.run_v1_only("b/120545219") def testFeedable(self): with self.cached_session() as sess: c = constant_op.constant(2) i0 = constant_op.constant(0) r = while_loop_tf.while_loop( lambda i: i < 1000, lambda i: math_ops.square(c) + i, [i0]) self.assertEqual(1000, r.eval(feed_dict={i0: 0})) feedable_tensors = all_feedables() for t in feedable_tensors: sess.run(r, feed_dict={t: 3}) graph = ops.get_default_graph() for op in graph.get_operations(): for t in op.inputs: if t not in feedable_tensors and t.dtype is dtypes.int32: with self.assertRaisesRegex(ValueError, "may not be fed"): sess.run(r, feed_dict={t: 3}) @test_util.run_v1_only("b/120545219") def testCondIndexedSlices(self): with self.cached_session(): values = constant_op.constant([10]) indices = constant_op.constant([0]) x = indexed_slices.IndexedSlices(values, indices) pred = math_ops.less(1, 2) fn1 = lambda: indexed_slices.IndexedSlices( math_ops.add(x.values, 1), indices) fn2 = lambda: indexed_slices.IndexedSlices( math_ops.subtract(x.values, 1), indices) r = tf_cond.cond(pred, fn1, fn2) val = r.values ind = r.indices self.assertAllEqual([11], val) self.assertAllEqual([0], ind) def testCondMismatchedIndexedSlices(self): @eager_def_function.function def foo(): values = constant_op.constant([10]) indices = constant_op.constant([0]) x = indexed_slices.IndexedSlices(values, indices) with self.assertRaisesRegex(TypeError, "Cannot reconcile tf.cond 0-th outputs"): tf_cond.cond( constant_op.constant(True), lambda: indexed_slices.IndexedSlices( math_ops.add(x.values, 1), indices), lambda: math_ops.add(x.values, 1), indices) foo() def testCondSparseTensor(self): values = constant_op.constant([2.0, 4.0], name="values") indices = constant_op.constant([[0], [3]], dtype=dtypes.int64, name="indices") shape = constant_op.constant([10], dtype=dtypes.int64, name="dense_shape") x = sparse_tensor.SparseTensor(indices, values, dense_shape=shape) pred = math_ops.less(1, 2) fn1 = lambda: sparse_tensor.SparseTensor( indices + 1, x.values + 1, dense_shape=shape) fn2 = lambda: sparse_tensor.SparseTensor( indices, x.values - 1, dense_shape=shape) r = tf_cond.cond(pred, fn1, fn2) self.assertAllEqual([3.0, 5.0], r.values) self.assertAllEqual([[1], [4]], r.indices) self.assertAllEqual(r.values.get_shape(), (2,)) def testCondRaggedTensor(self): rt = ragged_factory_ops.constant([[1, 2], [3], [4, 5, 6]]) pred = math_ops.less(1, 2) fn1 = lambda: array_ops.concat([rt + 2, [[100]]], axis=0) fn2 = lambda: rt[:2] - 2 result = tf_cond.cond(pred, fn1, fn2) self.assertAllEqual([3, 4, 5, 6, 7, 8, 100], result.values) self.assertAllEqual([0, 2, 3, 6, 7], result.row_splits) @test_util.run_v1_only("b/120545219") def testCondResource(self): with self.cached_session(): rv = resource_variable_ops.ResourceVariable(True) self.evaluate(variables.global_variables_initializer()) t = ops.convert_to_tensor(1.0) def case(): assign = resource_variable_ops.assign_variable_op(rv.handle, False) with ops.control_dependencies([assign]): return array_ops.identity(t) self.assertEqual( 1.0, self.evaluate(tf_cond.cond(rv, case, lambda: t))) @test_util.run_deprecated_v1 def testCondResourceGradShape(self): rv1 = resource_variable_ops.ResourceVariable([1.0, 2.0]) rv2 = resource_variable_ops.ResourceVariable([3.0, 4.0]) pred = constant_op.constant(True) result = tf_cond.cond(pred, lambda: rv1, lambda: rv2) grads = gradients_impl.gradients(result, [rv1, rv2]) self.assertAllEqual(grads[0].shape.as_list(), [2]) self.assertAllEqual(grads[1].shape.as_list(), [2]) @test_util.run_v1_only("b/120545219") def testCondWithTensorArrayGrad(self): with self.cached_session() as sess: with ops.device(test.gpu_device_name()): pred = array_ops.placeholder(dtypes.bool, []) x = constant_op.constant([1.0, 2.0, 3.0]) y = tf_cond.cond( pred, lambda: map_fn.map_fn(lambda z: z * 2.0, x), lambda: constant_op.constant([1.0, 1.0, 1.0])) g = gradients_impl.gradients(y, x)[0] self.assertAllEqual(sess.run(g, {pred: True}), [2.0, 2.0, 2.0]) self.assertAllEqual(sess.run(g, {pred: False}), [0.0, 0.0, 0.0]) @test_util.run_v1_only("b/120545219") def testCondIndexedSlicesDifferentTypes(self): with self.cached_session(): values = constant_op.constant([10]) i_32 = ops.convert_to_tensor([0], name="one", dtype=dtypes.int32) i_64 = ops.convert_to_tensor([0], name="one", dtype=dtypes.int64) x = indexed_slices.IndexedSlices(values, i_32) pred = math_ops.less(1, 2) fn1 = lambda: indexed_slices.IndexedSlices( math_ops.add(x.values, 1), i_32) fn2 = lambda: indexed_slices.IndexedSlices( math_ops.subtract(x.values, 1), i_64) r = tf_cond.cond(pred, fn1, fn2) val = r.values ind = r.indices self.assertAllEqual([11], val) self.assertAllEqual([0], ind) self.assertTrue(ind.dtype == np.int64) @test_util.run_v1_only("b/120545219") def testCondColocation(self): with self.session(): with ops.device("/cpu:0"): v = variables.Variable(7.0) x = constant_op.constant(10.0) pred = math_ops.less(1.0, 2.0) fn1 = lambda: math_ops.add(v, 1.0) fn2 = lambda: math_ops.subtract(x, 1.0) r = tf_cond.cond(pred, fn1, fn2) for op in x.graph.get_operations(): if op.name == "cond/Add/Switch": self.assertDeviceEqual(op.device, "/cpu:0") def _testCond_1(self, use_gpu): with self.cached_session(use_gpu=use_gpu): x = constant_op.constant(10) pred = math_ops.less(1, 2) fn1 = lambda: math_ops.add(x, 1) fn2 = lambda: math_ops.subtract(x, 1) r = tf_cond.cond(pred, fn1, fn2) result = self.evaluate(r) self.assertAllEqual(11, result) def testCond_1(self): self._testCond_1(use_gpu=False) # TODO(b/116526896): Enable GPU tests. # self._testCond_1(use_gpu=True) def testCond_2(self): with self.cached_session(): x = constant_op.constant(10) r = tf_cond.cond( math_ops.less(1, 0), lambda: math_ops.add(x, 1), lambda: math_ops.subtract(x, 1)) result = self.evaluate(r) self.assertAllEqual(9, result) def testCond_3(self): with self.cached_session(): x = constant_op.constant(10) pred = math_ops.less(1, 2) fn1 = lambda: math_ops.add(x, 1) fn2 = lambda: math_ops.subtract(x, 1) fn3 = lambda: math_ops.add(tf_cond.cond(pred, fn1, fn2), 1) r = tf_cond.cond(pred, fn3, fn2) result = self.evaluate(r) self.assertAllEqual(12, result) @test_util.run_in_graph_and_eager_modes def testCondPruning(self): v1 = variables.Variable(7) v2 = variables.Variable(7) v3 = variables.Variable(7) def f(): age = constant_op.constant(3) max_age = constant_op.constant(2) pred = math_ops.greater(age, max_age) fn1 = lambda: [state_ops.assign(v1, 1).op, state_ops.assign(v2, 2).op] fn2 = lambda: [state_ops.assign(v3, 3).op, constant_op.constant(10).op] r = tf_cond.cond(pred, fn1, fn2) self.assertEqual(len(r), 2) return r[1] f_defun = eager_def_function.function(f) if not context.executing_eagerly(): with self.cached_session(): self.evaluate(variables.global_variables_initializer()) result = self.evaluate(f()) self.assertEqual(True, result) # Only second cond result was fetched, so v1 assign shouldn't run. self.assertEqual(7, self.evaluate(v1)) self.assertEqual(2, self.evaluate(v2)) self.assertEqual(7, self.evaluate(v3)) result = f_defun() self.assertEqual(True, self.evaluate(result)) # Both v1 and v2 branch assignments should be run in defun. self.assertEqual(1, self.evaluate(v1)) self.assertEqual(2, self.evaluate(v2)) self.assertEqual(7, self.evaluate(v3)) def testCond_5(self): with self.cached_session(): alive = constant_op.constant(True, name="alive") count = constant_op.constant(0, name="count") def body(i): return tf_cond.cond( alive, lambda: [math_ops.less(i, 3), math_ops.add(count, 1)], lambda: [alive, count]) for i in range(10): alive, count = body(i) self.assertAllEqual(4, self.evaluate(count)) @test_util.run_v1_only("b/120545219") def testCond_6(self): with self.cached_session(): v1 = variables.Variable([7]) age = constant_op.constant(3) pred = math_ops.greater(age, 4) fn1 = lambda: age fn2 = lambda: v1 r = tf_cond.cond(pred, fn1, fn2) self.evaluate(variables.global_variables_initializer()) result = self.evaluate(r) self.assertAllEqual(np.array([7]), result) def testCond_7(self): with self.cached_session() as sess: x = constant_op.constant(10) y = constant_op.constant(200) pred = math_ops.less(1, 2) fn1 = lambda: [math_ops.add(x, 1), math_ops.add(x, 2)] fn2 = lambda: [y, y] r = tf_cond.cond(pred, fn1, fn2) self.assertAllEqual([11, 12], self.evaluate(r)) @parameterized.parameters(dtypes.float32, dtypes.float64) @test_util.run_v1_only("Uses tf.gradients") def testCondResourceGrad(self, dtype): init = constant_op.constant([7.], dtype=dtype) v1 = variables.Variable(init) age = constant_op.constant(3., dtype=dtype) pred = math_ops.greater(age, 4.) fn1 = lambda: age fn2 = lambda: v1 r = tf_cond.cond(pred, fn1, fn2) grad = gradients_impl.gradients(r, v1)[0] self.evaluate(variables.global_variables_initializer()) self.assertAllEqual(grad, [1.]) @test_util.run_gpu_only @test_util.run_deprecated_v1 def testCond_Device(self): x = constant_op.constant(-10.) # True branch function defined outside of device scope def true_fn(): return math_ops.exp(x) with ops.device("CPU:0"): r = tf_cond.cond( constant_op.constant(True), true_fn, lambda: 0.) self.assertIn("cpu", r.device.lower()) with session.Session() as sess: options = config_pb2.RunOptions(output_partition_graphs=True) run_metadata = config_pb2.RunMetadata() sess.run(r, options=options, run_metadata=run_metadata) # We expect that everything runs on CPU, even if GPU is available. self.assertEqual(len(run_metadata.partition_graphs), 1) def _count_matching_switch_nodes_on_device(self, run_metadata, device_str, dtype): # Returns the number of Switch nodes with type dtype placed on # `device_str`. device_graphs = [ g for g in run_metadata.partition_graphs if device_str in g.node[0].device ] self.assertLen(device_graphs, 1) switch_nodes = [ n for n in device_graphs[0].node if n.op == "Switch" and n.attr["T"].type == dtype.as_datatype_enum ] return len(switch_nodes) @test_util.run_gpu_only @test_util.run_deprecated_v1 def testCondSwitchColocatedWithInputWhenInputExplicitlyPlacedOnCPU(self): x = array_ops.placeholder(dtypes.float32) # `arg` is used in the cond then branch so a Switch node is created for it. # We test that the Switch node gets placed on the same device as `arg`. # We force `arg` to be on CPU here. with ops.device("CPU:0"): arg = x + 10. def true_fn(): with ops.device("CPU:0"): return arg + 1 r = tf_cond.cond(constant_op.constant(True), true_fn, lambda: 0.) # Disable Loop_optimizer grappler pass for this test because it replaces # Switch with Identity when it's part of a dead branch. config = config_pb2.ConfigProto() config.graph_options.rewrite_options.loop_optimization = ( rewriter_config_pb2.RewriterConfig.OFF) with self.session(config=config) as sess: run_metadata = config_pb2.RunMetadata() options = config_pb2.RunOptions(output_partition_graphs=True) sess.run( r, feed_dict={x: -10.}, options=options, run_metadata=run_metadata) self.assertLen(run_metadata.partition_graphs, 2) # Check that the Switch for `arg` gets placed on CPU. self.assertEqual( self._count_matching_switch_nodes_on_device(run_metadata, "CPU", dtypes.float32), 1) self.assertEqual( self._count_matching_switch_nodes_on_device(run_metadata, "GPU", dtypes.float32), 0) @test_util.run_gpu_only @test_util.run_deprecated_v1 def testCondSwitchColocatedWithInputWhenInputPlacedOnCPU(self): x = array_ops.placeholder(dtypes.float32) # `arg` is used in the cond then branch so a Switch node is created for it. # We test that the Switch node gets placed on the same device as `arg`. # Since arg is a dataset (and only has a CPU kernel), it gets placed on CPU # by placer. arg = dataset_ops.Dataset.range(8) def true_fn(): return cardinality.cardinality(arg) r = tf_cond.cond( constant_op.constant(True), true_fn, lambda: constant_op.constant(0, dtypes.int64)) # Disable Loop_optimizer grappler pass for this test because it replaces # Switch with Identity when it's part of a dead branch. config = config_pb2.ConfigProto() config.graph_options.rewrite_options.loop_optimization = ( rewriter_config_pb2.RewriterConfig.OFF) with session.Session(config=config) as sess: run_metadata = config_pb2.RunMetadata() options = config_pb2.RunOptions(output_partition_graphs=True) sess.run( r, feed_dict={x: -10.}, options=options, run_metadata=run_metadata) self.assertLen(run_metadata.partition_graphs, 2) # Check that the Switch for `arg` gets placed on CPU. self.assertEqual( self._count_matching_switch_nodes_on_device(run_metadata, "CPU", dtypes.variant), 1) self.assertEqual( self._count_matching_switch_nodes_on_device(run_metadata, "GPU", dtypes.variant), 0) @test_util.run_gpu_only @test_util.run_deprecated_v1 def testCondSwitchColocatedWithInputWhenInputOnGPU(self): x = array_ops.placeholder(dtypes.float32) # `arg` is used in the cond then branch so a Switch node is created for it. # We test that the Switch node gets placed on the same device as `arg`. # Note: `arg` gets placed on GPU by default by the placer. arg = x + 10. def true_fn(): with ops.device("CPU:0"): return arg + 1 r = tf_cond.cond(constant_op.constant(True), true_fn, lambda: 0.) # Disable Loop_optimizer grappler pass for this test because it replaces # Switch with Identity when it's part of a dead branch. config = config_pb2.ConfigProto() config.graph_options.rewrite_options.loop_optimization = ( rewriter_config_pb2.RewriterConfig.OFF) with session.Session(config=config) as sess: run_metadata = config_pb2.RunMetadata() options = config_pb2.RunOptions(output_partition_graphs=True) sess.run( r, feed_dict={x: -10.}, options=options, run_metadata=run_metadata) self.assertEqual(len(run_metadata.partition_graphs), 2) # Check that the Switch for `arg` gets placed on GPU. self.assertEqual( self._count_matching_switch_nodes_on_device(run_metadata, "CPU", dtypes.float32), 0) self.assertEqual( self._count_matching_switch_nodes_on_device(run_metadata, "GPU", dtypes.float32), 1) def testCondAccessTrueBranchTensorInFalseBranchRaises(self): @eager_def_function.function def f(): c = constant_op.constant(1.) inputs = {"c": c} def true_fn(inputs): inputs["c"] = array_ops.identity(inputs["c"], name="true_branch") return inputs["c"] def false_fn(inputs): return array_ops.identity(inputs["c"]) pred = constant_op.constant(True) return tf_cond.cond( pred, lambda: true_fn(inputs), lambda: false_fn(inputs)) prefix = "cond/" if context.executing_eagerly() else "" with self.assertRaisesRegex( ValueError, "Tensor %strue_branch:0 in true_fn is accessed from false_fn." % prefix): f() def testSwitchCaseAccessBranch1TensorInBranch4Raises(self): @eager_def_function.function def f(): c = constant_op.constant(1.) inputs = {"c": c} def br1_fn(inputs): inputs["c"] = array_ops.identity(inputs["c"], name="br1_identity") return inputs["c"] def br4_fn(inputs): return array_ops.identity(inputs["c"]) def other_fn(): return array_ops.identity(c) return control_flow_switch_case.switch_case( constant_op.constant(2), [ other_fn, lambda: br1_fn(inputs), other_fn, other_fn, lambda: br4_fn(inputs) ]) prefix = "switch_case/indexed_case/" if context.executing_eagerly() else "" with self.assertRaisesRegex( ValueError, "Tensor %sbr1_identity:0 in branch 1 is " "accessed from branch 4." % prefix): f() def testCondListOutput(self): with self.cached_session() as sess: x = constant_op.constant(10) y = constant_op.constant(200) pred = math_ops.less(1, 2) fn1 = lambda: [math_ops.add(x, y), math_ops.add(x, y)] fn2 = lambda: [y, y] r = tf_cond.cond(pred, fn1, fn2) test_result = self.evaluate(r) self.assertListEqual([210, 210], test_result) def testTupleOutput(self): with self.cached_session() as sess: x = constant_op.constant(10) y = constant_op.constant(200) pred = math_ops.less(1, 2) fn1 = lambda: (math_ops.add(x, y), math_ops.add(x, y)) fn2 = lambda: (y, y) r = tf_cond.cond(pred, fn1, fn2) test_result = self.evaluate(r) self.assertTupleEqual((210, 210), test_result) def testDictOutput(self): with self.cached_session() as sess: x = constant_op.constant(10) y = constant_op.constant(200) pred = math_ops.less(1, 2) fn1 = lambda: {"a": math_ops.add(x, y), "b": math_ops.add(x, y)} fn2 = lambda: {"a": y, "b": y} r = tf_cond.cond(pred, fn1, fn2) test_result = self.evaluate(r) self.assertDictEqual({"a": 210, "b": 210}, test_result) def testEmbeddedListOutput(self): x = constant_op.constant(10) y = constant_op.constant(200) pred = math_ops.less(1, 2) fn1 = lambda: [[math_ops.add(x, y), math_ops.add(x, y)]] fn2 = lambda: [[y, y]] # Pass strict=True flag as cond_v2 allows for tensors to be # in nested output structures as singletons r = tf_cond.cond(pred, fn1, fn2, strict=True) test_result = self.evaluate(r) self.assertListEqual([[210, 210]], test_result) def testEmbeddedTupleOutput(self): with self.cached_session() as sess: x = constant_op.constant(10) y = constant_op.constant(200) pred = math_ops.less(1, 2) fn1 = lambda: ((math_ops.add(x, y), math_ops.add(x, y))) fn2 = lambda: ((y, y)) r = tf_cond.cond(pred, fn1, fn2) test_result = self.evaluate(r) self.assertTupleEqual(((210, 210)), test_result) def testEmbeddedDictOutput(self): with self.cached_session() as sess: x = constant_op.constant(10) y = constant_op.constant(200) pred = math_ops.less(1, 2) fn1 = lambda: {"a": {"c": math_ops.add(x, y)}, "b": {"d": math_ops.add(x, y)}} fn2 = lambda: {"a": {"c": y}, "b": {"d": y}} r = tf_cond.cond(pred, fn1, fn2) test_result = self.evaluate(r) self.assertDictEqual({"a": {"c": 210}, "b": {"d": 210}}, test_result) @test_util.run_v1_only("b/120545219") def testCheckNestedOutputStruct(self): with self.cached_session() as sess: x = constant_op.constant(10) y = constant_op.constant(200) pred = math_ops.less(1, 2) fn1 = lambda: {"a": math_ops.add(x, y), "b": math_ops.add(x, y)} fn2 = lambda: {"c": y, "d": y} v1_msg = "The two structures don't have the same nested structure" v2_msg = ("true_fn and false_fn arguments to tf.cond must have the same " "number, type, and overall structure of return values.") with self.assertRaisesRegex( TypeError if control_flow_util.ENABLE_CONTROL_FLOW_V2 else ValueError, v2_msg if control_flow_util.ENABLE_CONTROL_FLOW_V2 else v1_msg): tf_cond.cond(pred, fn1, fn2) @test_util.run_v1_only("b/120545219") def testCondWithControl(self): with self.cached_session() as sess: control_holder = array_ops.placeholder(dtypes.float32, shape=()) a = constant_op.constant(3) def true_branch(): with ops.control_dependencies([control_holder]): _ = a + 1 return a + 2 r = tf_cond.cond( constant_op.constant(True), true_branch, lambda: constant_op.constant(1)) result = sess.run(r, feed_dict={control_holder: 5.}) self.assertEqual(5, result) @test_util.run_v1_only("b/120545219") def testUninitializedRefIdentity(self): with self.cached_session() as sess: v = gen_state_ops.variable( shape=[1], dtype=dtypes.float32, name="v", container="", shared_name="") inited = state_ops.is_variable_initialized(v) v_f, v_t = control_flow_ops.ref_switch(v, inited) # Both v_f and v_t are uninitialized references. However, an actual use # of the reference in the 'true' branch in the 'tf.identity' op will # not 'fire' when v is uninitialized, so this is a valid construction. # This test tests that ref_identity allows uninitialized ref as input # so that this construction is allowed. v_f_op = gen_array_ops.ref_identity(v_f) v_t_op = gen_array_ops.ref_identity(v_t) with ops.control_dependencies([v_f_op]): assign_v = state_ops.assign(v, [1.0]) with ops.control_dependencies([v_t_op]): orig_v = array_ops.identity(v) merged_op = control_flow_ops.merge([assign_v, orig_v]) self.assertAllEqual([1.0], self.evaluate(merged_op.output)) def testCondSwitchIdentity(self): # Make sure the recv identity is not removed by optimization. with session.Session(config=opt_cfg()) as sess: pred = constant_op.constant(True) def fn1(): return control_flow_ops.no_op() def fn2(): return control_flow_assert.Assert(False, ["Wrong branch!!!"]) r = tf_cond.cond(pred, fn1, fn2) self.evaluate(r) def testCondRecvIdentity(self): # Make sure the switch identity is not removed by optimization. with session.Session(config=opt_cfg()) as sess: with ops.device(test.gpu_device_name()): pred = constant_op.constant(True) def fn1(): return control_flow_ops.no_op() def fn2(): with ops.device("/cpu:0"): return control_flow_assert.Assert(False, ["Wrong branch!!!"]) r = tf_cond.cond(pred, fn1, fn2) self.evaluate(r) @test_util.run_deprecated_v1 @test_util.enable_control_flow_v2 def testDisableLoweringSwitchMerge(self): if test_util.is_gpu_available(): self.skipTest( "Single threaded executor doesn't support partitioned graphs. " "Skipping GPU test.") # Make pred feedable to ensure we don't constant-fold it out. run_opts = config_pb2.RunOptions( trace_level=config_pb2.RunOptions.FULL_TRACE) run_metadata_no_lowering = config_pb2.RunMetadata() run_metadata_with_lowering = config_pb2.RunMetadata() config = opt_cfg(do_constant_folding=False) pred = array_ops.placeholder_with_default( constant_op.constant(True), shape=()) r = tf_cond.cond(pred, lambda: True, lambda: False) with session.Session(config=config) as sess: r_value = sess.run( r, options=run_opts, run_metadata=run_metadata_with_lowering) self.assertEqual(r_value, True) # Use the single threaded executor, which disables control flow lowering. config.experimental.executor_type = "SINGLE_THREADED_EXECUTOR" with session.Session(config=config) as sess: r_value = sess.run( r, options=run_opts, run_metadata=run_metadata_no_lowering) self.assertEqual(r_value, True) self.assertTrue( # pylint: disable=g-complex-comprehension any("switch" in ns.node_name for dev_stat in run_metadata_with_lowering.step_stats.dev_stats for ns in dev_stat.node_stats)) self.assertTrue( # pylint: disable=g-complex-comprehension all("switch" not in ns.node_name for dev_stat in run_metadata_no_lowering.step_stats.dev_stats for ns in dev_stat.node_stats)) @test_util.run_v1_only("b/120545219") def testCondGrad_1(self): with self.cached_session(): x = constant_op.constant(10.0, name="x") pred = math_ops.less(1, 2) fn1 = lambda: array_ops.identity(x) fn2 = lambda: array_ops.identity(x) r = tf_cond.cond(pred, fn1, fn2) grad = gradients_impl.gradients(r, [x])[0] self.assertAllEqual(1.0, self.evaluate(grad)) @test_util.run_deprecated_v1 @test_util.enable_control_flow_v2 def testCondComputeGradAfterSessRunFails(self): with self.cached_session(): x = constant_op.constant(10.0, name="x") pred = math_ops.less(1, 2) def true_fn(): a = x * x return a * a def false_fn(): return x * x r = tf_cond.cond(pred, true_fn, false_fn) self.assertAllEqual(r, 10000.) grad = gradients_impl.gradients(r, [x])[0] with self.assertRaisesRegex( errors_impl.InvalidArgumentError, r"Connecting to invalid output 1 of source node cond which has 1 " r"outputs. Try using " "tf.compat.v1.experimental.output_all_intermediates\(True\)."): self.evaluate(grad) @test_util.run_deprecated_v1 @test_util.enable_output_all_intermediates def testCondComputeGradAfterSessRun(self): with self.cached_session(): x = constant_op.constant(10.0, name="x") pred = math_ops.less(1, 2) def true_fn(): a = x * x return a * a def false_fn(): return x * x r = tf_cond.cond(pred, true_fn, false_fn) self.assertAllEqual(r, 10000.) grad = gradients_impl.gradients(r, [x])[0] self.assertAllEqual(grad, 4000.) @test_util.run_deprecated_v1 @test_util.enable_output_all_intermediates def testNestedCondComputeGradAfterSessRun(self): with self.cached_session(): x = constant_op.constant(10.0, name="x") pred = math_ops.less(1, 2) def true_fn(): def inner_true_fn(): a = x * x return a * a def inner_false_fn(): return x * x return tf_cond.cond( constant_op.constant(True), inner_true_fn, inner_false_fn) def false_fn(): return x * x r = tf_cond.cond(pred, true_fn, false_fn) self.assertAllEqual(r, 10000.) grad = gradients_impl.gradients(r, [x])[0] self.assertAllEqual(grad, 4000.) @test_util.run_deprecated_v1 def testCondGrad_2(self): with self.cached_session(): c = array_ops.placeholder(dtypes.int32, shape=[]) x = constant_op.constant(10.0) pred = math_ops.less(c, 2) fn1 = lambda: math_ops.multiply(x, 42.0) fn2 = lambda: math_ops.multiply(x, 3.0) r = tf_cond.cond(pred, fn1, fn2) grad = gradients_impl.gradients(r, [x])[0] self.assertAllEqual(42.0, grad.eval(feed_dict={c: 1})) self.assertAllEqual(3.0, grad.eval(feed_dict={c: 3})) @test_util.disable_control_flow_v2( "b/110550782 (gradient w.r.t external variable)") @test_util.run_deprecated_v1 def testCondGrad_3(self): with self.cached_session(): c = array_ops.placeholder(dtypes.int32, shape=[]) ox = constant_op.constant(10.0) pred = math_ops.less(c, 2) def fn1(x): m = x * x return gradients_impl.gradients(m, [ox])[0] fn2 = lambda: math_ops.multiply(ox, 3.0) y = math_ops.multiply(7.0, ox) r = tf_cond.cond(pred, lambda: fn1(y), fn2) self.assertAllEqual(980.0, r.eval(feed_dict={c: 1})) self.assertAllEqual(30.0, r.eval(feed_dict={c: 3})) @test_util.run_deprecated_v1 def testCondGradMultiDevice(self): config = config_pb2.ConfigProto(device_count={"CPU": 2}, allow_soft_placement=True) with self.cached_session(config=config) as sess: pred = array_ops.placeholder(dtypes.bool, []) x = array_ops.placeholder(dtypes.float32) y = array_ops.placeholder(dtypes.float32) with ops.device("/cpu:0"): z = tf_cond.cond(pred, lambda: x * y * 2.0, lambda: 2.0) with ops.device("/cpu:1"): grad = gradients_impl.gradients(z, x)[0] with ops.device("/cpu:0"): grad_grad = gradients_impl.gradients(grad, x)[0] self.assertEqual(sess.run(grad, {pred: True, x: 1.0, y: 2.0}), 4.0) self.assertEqual(sess.run(grad, {pred: False, x: 1.0, y: 2.0}), 0.0) # v1 control flow gets None second derivative for some reason. if not control_flow_util.ENABLE_CONTROL_FLOW_V2: self.assertIsNone(grad_grad) return self.assertEqual(sess.run(grad_grad, {pred: True, x: 1.0, y: 2.0}), 0.0) self.assertEqual(sess.run(grad_grad, {pred: False, x: 1.0, y: 2.0}), 0.0) @test_util.run_v1_only("b/120545219") def testNestedCond_Simple(self): with self.cached_session(): x = constant_op.constant(0., name="X") y = tf_cond.cond( constant_op.constant(True), lambda: x, lambda: tf_cond.cond(x < 1., lambda: x, lambda: x)) result = gradients_impl.gradients(y, x)[0] self.assertEqual(1.0, self.evaluate(result)) z = tf_cond.cond( constant_op.constant(False), lambda: x, lambda: tf_cond.cond(x < 1., lambda: x, lambda: x)) result = gradients_impl.gradients(z, x)[0] self.assertEqual(1.0, self.evaluate(result)) @test_util.run_v1_only("b/120545219") def testCondGrad_Gather(self): with self.cached_session() as sess: v1 = variables.Variable([1.0, 42.0]) c = array_ops.placeholder(dtypes.int32, shape=[]) pred = math_ops.less(c, 2) fn1 = lambda: array_ops.identity(v1) fn2 = lambda: array_ops.gather(v1, [1, 1]) r = tf_cond.cond(pred, fn1, fn2) # The following `grad` is a Tensor since it is the aggregation of an # IndexedSlice and a Tensor. It is an `IndexedSlices` with control flow # v2. grad = gradients_impl.gradients(r, [v1])[0] self.evaluate(variables.global_variables_initializer()) if control_flow_util.ENABLE_CONTROL_FLOW_V2: self.assertIsInstance(grad, indexed_slices.IndexedSlices) grad_value = sess.run(grad, feed_dict={c: 1}) self.assertAllEqual(gradient_checker_v2._to_numpy(grad_value), [1.0, 1.0]) grad_value = sess.run(grad, feed_dict={c: 3}) self.assertAllEqual(gradient_checker_v2._to_numpy(grad_value), [0.0, 2.0]) @test_util.run_deprecated_v1 def testCondGrad_ResourceVarSparseRead(self): # NOTE(skyewm): this test is interesting because the # ResourceVariable.sparse_read gradient function returns IndexedSlices. var = resource_variable_ops.ResourceVariable( np.ones((4, 2), dtype=np.float32)) x = constant_op.constant(1.0) r = tf_cond.cond( constant_op.constant(True), lambda: x * math_ops.reduce_sum(var.sparse_read([1, 2])), lambda: constant_op.constant(np.zeros((2, 3)), dtype=dtypes.float32)) grad = gradients_impl.gradients(r, var)[0] self.evaluate(variables.global_variables_initializer()) grad_val = self.evaluate(grad) self.assertIsInstance(grad_val, indexed_slices.IndexedSlicesValue) self.assertAllEqual(gradient_checker_v2._to_numpy(grad_val), [[0., 0.], [1., 1.], [1., 1.], [0., 0.]]) def testCondGrad_MultiGather(self): # NOTE(skyewm): this test is interesting because the array_ops.gather and # ResourceVariable.sparse_read gradient functions returns IndexedSlices. var = resource_variable_ops.ResourceVariable( np.ones((4, 2), dtype=np.float32)) x1 = constant_op.constant(np.ones((3, 3), dtype=np.float32)) x2 = constant_op.constant(2.0) def true_fn(): y1 = var.sparse_read([1, 2]) y2 = array_ops.gather(x1, [2]) * x2 y3 = x2 * [1., 1., 1.] return y1, y2, y3 def false_fn(): y1 = np.zeros((2, 2), dtype=np.float32) y2 = array_ops.gather(x1, [2]) * x2 y3 = array_ops.gather(x1, [2]) return y1, y2, y3 @eager_def_function.function def foo(): r = tf_cond.cond(constant_op.constant(True), true_fn, false_fn) return gradients_impl.gradients(r, [var, x1, x2]) grad = foo() self.evaluate(variables.global_variables_initializer()) var_grad, x1_grad, x2_grad = self.evaluate(grad) self.assertIsInstance(var_grad, indexed_slices.IndexedSlicesValue) self.assertAllEqual(gradient_checker_v2._to_numpy(var_grad), [[0., 0.], [1., 1.], [1., 1.], [0., 0]]) self.assertIsInstance(x1_grad, indexed_slices.IndexedSlicesValue) self.assertAllEqual(gradient_checker_v2._to_numpy(x1_grad), [[0., 0., 0.], [0., 0., 0.], [2., 2., 2.]]) self.assertIsInstance(x1_grad, indexed_slices.IndexedSlicesValue) self.assertEqual(gradient_checker_v2._to_numpy(x2_grad), 6.) @test_util.run_v1_only("b/120545219") def testCondPredicateTensor(self): """Regression test for lowering predicate from non-first output of an op.""" @eager_def_function.function def foo(): return constant_op.constant("foo"), constant_op.constant(True) r = tf_cond.cond(foo()[1], lambda: 1.0, lambda: 2.0) self.assertEqual(self.evaluate(r), 1.0) @test_util.run_v1_only("Tests Session.run() pruning logic.") def testCondFeedConstantPredicate(self): with self.cached_session() as sess: value = constant_op.constant(37.0) predicate = constant_op.constant(True) cond_output = tf_cond.cond( predicate, lambda: constant_op.constant(0.0), lambda: value) result = array_ops.identity(cond_output) self.assertEqual(37.0, sess.run(result, feed_dict={predicate: False})) self.assertEqual(0.0, sess.run(result, feed_dict={predicate: True})) self.assertEqual(0.0, sess.run(result)) @test_util.run_v1_only("Tests Session.run() pruning logic.") def testCondFeedPlaceholderWithDefaultPredicate(self): with self.cached_session() as sess: value = constant_op.constant(37.0) predicate = array_ops.placeholder_with_default( constant_op.constant(True), []) cond_output = tf_cond.cond( predicate, lambda: constant_op.constant(0.0), lambda: value) result = array_ops.identity(cond_output) self.assertAllEqual(37.0, sess.run(result, feed_dict={predicate: False})) self.assertAllEqual(0.0, sess.run(result, feed_dict={predicate: True})) self.assertAllEqual(0.0, sess.run(result)) def testCondTensorDeps(self): t = array_ops.identity(1.) @eager_def_function.function def f(): with ops.control_dependencies([t]): return array_ops.identity(2.) f.get_concrete_function() @test_util.run_in_graph_and_eager_modes def testCondAutoControlDeps(self): if test_util.is_gpu_available(): self.skipTest("b/128676188 causes OOM on opensource gpu tests") print_prefix = "testCondAutoControlDeps: " def branch_fn(): enqueue_print_op("A") enqueue_print_op("B") with ops.control_dependencies([enqueue_print_op("C")]): return constant_op.constant(10) def build_cond(): return tf_cond.cond( constant_op.constant(True), branch_fn, lambda: 0) def build_nested_cond(): return tf_cond.cond( constant_op.constant(True), build_cond, lambda: 0) # In v1 graph mode, pruning should make only "C" print. if not context.executing_eagerly(): with self.cached_session(): with self.captureWritesToStream(sys.stderr) as printed: self.assertEqual(self.evaluate(build_cond()), 10) self.assertEqual(["C"], filter_test_messages(printed.contents())) with self.captureWritesToStream(sys.stderr) as printed: self.assertEqual(self.evaluate(build_nested_cond()), 10) self.assertEqual(["C"], filter_test_messages(printed.contents())) # In defuns, all prints should execute in program order. # This doesn't work with legacy control flow. if control_flow_util.ENABLE_CONTROL_FLOW_V2: @eager_def_function.function def cond(): return build_cond() with self.captureWritesToStream(sys.stderr) as printed: self.assertEqual(self.evaluate(cond()), 10) self.assertEqual(["A", "B", "C"], filter_test_messages(printed.contents())) @eager_def_function.function def nested_cond(): return build_nested_cond() with self.captureWritesToStream(sys.stderr) as printed: self.assertEqual(self.evaluate(nested_cond()), 10) self.assertEqual(["A", "B", "C"], filter_test_messages(printed.contents())) # wrap_function should prune. def pruned_cond(): return build_cond() pruned_cond = wrap_function.wrap_function(pruned_cond, []) with self.captureWritesToStream(sys.stderr) as printed: self.assertEqual(self.evaluate(pruned_cond()), 10) self.assertEqual(["C"], filter_test_messages(printed.contents())) def pruned_nested_cond(): return build_nested_cond() pruned_nested_cond = wrap_function.wrap_function(pruned_nested_cond, []) with self.captureWritesToStream(sys.stderr) as printed: self.assertEqual(self.evaluate(pruned_nested_cond()), 10) self.assertEqual(["C"], filter_test_messages(printed.contents())) @test_util.run_in_graph_and_eager_modes @test_util.disable_tfrt("b/179459136") def testWhileAutoControlDeps(self): # Legacy while_loop fails this test because it produces deprecation notices # in stderr. if not control_flow_util.ENABLE_CONTROL_FLOW_V2: return def cond(i, unused_x): enqueue_print_op("A") return i < 2 def body(i, x): enqueue_print_op("B") with ops.control_dependencies([enqueue_print_op("C")]): x = array_ops.identity(x) with ops.control_dependencies([enqueue_print_op("D")]): return i + 1, x def build_while(): return while_loop_tf.while_loop( cond, body, [constant_op.constant(0), constant_op.constant(0)]) def build_nested_while(): return tf_cond.cond( constant_op.constant(True), build_while, lambda: [0, 0]) # In v1 graph mode, pruning should make only "D" print. if not context.executing_eagerly(): with self.cached_session(): with self.captureWritesToStream(sys.stderr) as printed: self.assertEqual(self.evaluate(build_while()[0]), 2) self.assertEqual(["D", "D"], filter_test_messages(printed.contents())) with self.captureWritesToStream(sys.stderr) as printed: self.assertEqual(self.evaluate(build_nested_while()[0]), 2) self.assertEqual(["D", "D"], filter_test_messages(printed.contents())) # In defuns, all prints should execute in program order. @eager_def_function.function def while_loop(): return build_while()[0] with self.captureWritesToStream(sys.stderr) as printed: self.assertEqual(self.evaluate(while_loop()), 2) self.assertEqual(["A", "B", "C", "D", "A", "B", "C", "D", "A"], filter_test_messages(printed.contents())) @eager_def_function.function def nested_while_loop(): return build_nested_while()[0] with self.captureWritesToStream(sys.stderr) as printed: self.assertEqual(self.evaluate(nested_while_loop()), 2) self.assertEqual(["A", "B", "C", "D", "A", "B", "C", "D", "A"], filter_test_messages(printed.contents())) # wrap_function should prune. def pruned_while(): return build_while()[0] pruned_while = wrap_function.wrap_function(pruned_while, []) with self.captureWritesToStream(sys.stderr) as printed: self.assertEqual(self.evaluate(pruned_while()), 2) self.assertEqual(["D", "D"], filter_test_messages(printed.contents())) def pruned_nested_while(): return build_nested_while()[0] pruned_nested_while = wrap_function.wrap_function(pruned_nested_while, []) with self.captureWritesToStream(sys.stderr) as printed: self.assertEqual(self.evaluate(pruned_nested_while()), 2) self.assertEqual(["D", "D"], filter_test_messages(printed.contents())) # Microbenchmark: 256,000 iterations/s. def testWhile_1(self): with self.cached_session(): n = constant_op.constant(0) c = lambda x: math_ops.less(x, 10000) b = lambda x: math_ops.add(x, 1) r = while_loop_tf.while_loop(c, b, [n], parallel_iterations=20) self.assertEqual(10000, self.evaluate(r)) @test_util.run_v1_only("b/120545219") def testWhileExternalControlDependencies(self): with self.cached_session(): v = variables.Variable(0.0) self.evaluate(v.initializer) increment = v.assign_add(1.0).read_value() def body_fn(i): with ops.control_dependencies([increment]): return i + 1 result = while_loop_tf.while_loop( cond=lambda i: i < 2, body=body_fn, loop_vars=[1]) self.assertAllEqual(result, 2) self.assertAllEqual(v.read_value(), 1.0) @test_util.run_v1_only("b/120545219") def testWhileExternalControlDependenciesNoInput(self): with self.cached_session(): v = variables.Variable(0.0) self.evaluate(v.initializer) # TODO(apassos): figure out why the reading is necessary here. increment = v.assign_add(1.0).read_value() def body_fn(unused_i): with ops.control_dependencies([increment]): return constant_op.constant(5, name="five") result = while_loop_tf.while_loop( cond=lambda i: i < 5, body=body_fn, loop_vars=[0]) self.evaluate(result) self.assertAllEqual(self.evaluate(v), 1.0) @test_util.disable_control_flow_v2("b/113324949 (RefVariable)") @test_util.run_v1_only("b/120545219") def testWhileWithRefs_1(self): with self.cached_session() as sess: x = variable_v1.VariableV1(0)._ref() # pylint: disable=protected-access i = constant_op.constant(0) c = lambda i, x: math_ops.less(i, 100) self.assertEqual(x.dtype, dtypes.int32_ref) def b(i, x): self.assertEqual(x.dtype, dtypes.int32_ref) return (i + 1, gen_array_ops.ref_identity(x)) r = while_loop_tf.while_loop(c, b, [i, x], parallel_iterations=5) self.evaluate(variables.global_variables_initializer()) self.assertEqual(r[0].dtype, dtypes.int32) self.assertEqual(r[1].dtype, dtypes.int32_ref) value_i, value_x = self.evaluate(r) self.assertEqual(100, value_i) self.assertEqual(0, value_x) def testWhile_2(self): with self.cached_session(): s = constant_op.constant(0) r = isum(s) self.assertAllEqual(45, self.evaluate(r)) def testWhileWithMaximumIterations(self): with self.cached_session(): s = constant_op.constant([1, 2, 3, 4, 5]) r = isum(s, maximum_iterations=3) self.assertAllEqual([1 + 3, 2 + 3, 3 + 3, 4 + 3, 5 + 3], self.evaluate(r)) @test_util.run_v1_only("b/120545219") def testWhileWithMaximumIterationsAndSingleArgument(self): with self.cached_session(): r = while_loop_tf.while_loop( lambda i: i < 3, lambda i: i + 1, [0], maximum_iterations=1) self.assertEqual(1, self.evaluate(r)) @test_util.run_v1_only("b/120545219") def testXLAGradInLoop(self): # We have an optimization that moves certain reduction ops, this test makes # sure we don't do that for XLA ops. # Use dynamic inputs, which triggers the creation of "BroadcastGradientArgs" # and "Shape" op. input1 = array_ops.placeholder(dtype=dtypes.float32, shape=[None, None]) input2 = array_ops.placeholder(dtype=dtypes.float32, shape=[None, None]) def cond(i1, i2): return False def body(i1, i2): return math_ops.add(i1, i2), math_ops.add(i1, i2) xla_context = control_flow_ops.XLAControlFlowContext() xla_context.Enter() out1, _ = while_loop_tf.while_loop( cond, body, (input1, input2), maximum_iterations=2) g = gradients_impl.gradients(out1, [input1]) for op in out1.graph.get_operations(): # Test that the "Shape" is directly passed to BroadcastGradientArgs # instead of being pushed to the stack. if op.type == "BroadcastGradientArgs": self.assertEqual(op.inputs[0].op.type, "Shape") self.assertEqual(op.inputs[1].op.type, "Shape") xla_context.Exit() @test_util.disable_control_flow_v2("b/115776323 (max_iters)") @test_util.run_v1_only("b/120545219") def testSingleNestedMaximumIterationsWhileLoopGradientInXLAContext(self): v = constant_op.constant(1.0) def training_loop_with_gradient(i): out = while_loop_tf.while_loop( lambda i_, _: i_ < 3, lambda i_, j: [i_ + 1, j * v], [0, 1.0], maximum_iterations=i) g = gradients_impl.gradients(out, v) with ops.control_dependencies(g): return i + 1 xla_context = control_flow_ops.XLAControlFlowContext() xla_context.Enter() # Create training loop, ensure we can call gradient() of # while_loop inside the training loop. loop = while_loop_tf.while_loop( lambda i: i < 3, training_loop_with_gradient, [0]) xla_context.Exit() loop_execute = array_ops.identity(loop) # Because loop is not fetchable. # Should execute without issue. self.assertEqual(3, self.evaluate(loop_execute)) @test_util.run_v1_only("b/120545219") def testInvalidMaximumIterationsWhileLoopGradientInXLAContext(self): if control_flow_util.ENABLE_CONTROL_FLOW_V2: self.skipTest("WhileV2 does lazy evaluation of maximum_iterations") v = constant_op.constant(1.0) def inner_body(i, x): out = while_loop_tf.while_loop( lambda i, _: i < 3, lambda i, j: [i + 1, j * v], [0, x], maximum_iterations=i) return out def create_while_loop(maximum_iterations=None): return while_loop_tf.while_loop( lambda i, _: i < 3, inner_body, [0, 1.0], maximum_iterations=maximum_iterations) loop_no_xla = create_while_loop(maximum_iterations=5) # maximum_iterations is fine outside of an XLA scope gs = gradients_impl.gradients(loop_no_xla, v) self.evaluate(gs) # This should execute without error. xla_context = control_flow_ops.XLAControlFlowContext() xla_context.Enter() loop_no_maxiter = create_while_loop() loop_with_maxiter = create_while_loop(maximum_iterations=2) xla_context.Exit() with self.assertRaisesRegex( ValueError, r"Cannot create a gradient accumulator for tensor '.+' inside " r"XLA while_loop because maximum_iterations was not passed to " r"the tf.while_loop call \('.+'\)."): _ = gradients_impl.gradients(loop_no_maxiter, v) with self.assertRaisesRegex( ValueError, r"Cannot create a gradient accumulator for tensor '.+' inside XLA " r"while_loop. maximum_iterations tensor '.+' for while_loop context " r"'.+' must be statically known \(e.g. a constant value or known " r"shape dimension\), or be defined at or outside the while loop " r"context '.*' \(currently defined in '.*'\)"): _ = gradients_impl.gradients(loop_with_maxiter, v) @test_util.run_v1_only("b/120545219") def testInvalidMaximumIterationsFromSiblingContextWhileLoopInXLAContext(self): v = constant_op.constant(1.0) def create_while_loop(): max_iter_holder = [] def create_mi(): max_iter_holder.append(array_ops.placeholder(dtypes.int32, shape=())) return 1.0 _ = tf_cond.cond( constant_op.constant(True), create_mi, create_mi) return while_loop_tf.while_loop( lambda i, _: i < 3, lambda i, x: (i + 1, v * x), (0, 1.0), maximum_iterations=max_iter_holder[0]) if control_flow_util.ENABLE_CONTROL_FLOW_V2: xla_context = control_flow_ops.XLAControlFlowContext() xla_context.Enter() with self.assertRaisesRegex(ValueError, r"must be from the same graph.*"): loop = create_while_loop() xla_context.Exit() else: xla_context = control_flow_ops.XLAControlFlowContext() xla_context.Enter() loop = create_while_loop() xla_context.Exit() with self.assertRaisesRegex( ValueError, r"Cannot create a gradient accumulator for tensor '.+' inside XLA " r"while_loop. maximum_iterations tensor '.*Placeholder:0' for " r"while_loop context '.+' must be statically known \(e.g. a constant " r"value or known shape dimension\), or be defined at or outside the " r"while loop context '' \(currently defined in 'cond/.+'\)"): _ = gradients_impl.gradients(loop, v) @test_util.run_v1_only("b/120545219") def testNestedWhileLoopWithMaxItersFromOuterContextInXLAContext(self): if test_util.is_gpu_available(): self.skipTest("b/128646372, b/128645947 fails in opensource build") v = constant_op.constant(1.0) p = array_ops.placeholder(dtype=dtypes.int32) def mid_body_builder(iterations): def mid_body(i, x): r = while_loop_tf.while_loop( lambda *_: True, lambda i, x: (i + 1, v * x), (0, x), maximum_iterations=iterations, name="inner") return (i + 1, gradients_impl.gradients(x + r[1], v)[0]) return mid_body def outer_body(i, x): iterations = array_ops.size(p, name="iterations") return (i + 1, x + while_loop_tf.while_loop( lambda *_: True, mid_body_builder(iterations), (0, x), maximum_iterations=iterations, name="mid")[1]) def create_while_loop(): with ops.device("/cpu:0"): r = while_loop_tf.while_loop( lambda *_: True, outer_body, (0, 1.0), maximum_iterations=5, name="outer") return array_ops.identity(r[1]) xla_context = control_flow_ops.XLAControlFlowContext() xla_context.Enter() final_with_xla_context = create_while_loop() xla_context.Exit() final_without_xla_context = create_while_loop() with self.session(use_gpu=False) as sess: opts = config_pb2.RunOptions(trace_level=config_pb2.RunOptions.FULL_TRACE) run_metadata_without_xla_context = config_pb2.RunMetadata() run_metadata = config_pb2.RunMetadata() final_value_without_xla_context = sess.run( final_without_xla_context, feed_dict={p: [0, 0, 0]}, options=opts, run_metadata=run_metadata_without_xla_context) final_value_with_xla_context = sess.run( final_with_xla_context, feed_dict={p: [0, 0, 0]}, options=opts, run_metadata=run_metadata) if control_flow_util.ENABLE_CONTROL_FLOW_V2: # With while_v2 on xla, run_metadata only contains the unlowered While # op so node_stats does not have statistics for the pushes. So as a # loose check we check the pushes in the lowered version. for dev in run_metadata_without_xla_context.step_stats.dev_stats: if "/device:CPU" in dev.device: node_stats = dev.node_stats stack_push_count = len([ x for x in node_stats if re.match(r".*TensorListPushBack_?\d*", x.node_name) ]) else: for dev in run_metadata.step_stats.dev_stats: if "/device:CPU" in dev.device: node_stats = dev.node_stats stack_push_op = "StackPushV2" stack_push_count = len( [x for x in node_stats if x.node_name.endswith("StackPushV2")]) # Pushes to the stack = product of maximum_iterations values; # the last two "3"s comes from size(p), when p == [0, 0, 0]. self.assertEqual(stack_push_count, 5 * 3 * 3, str(node_stats)) self.assertAllClose(final_value_with_xla_context, final_value_without_xla_context) # Have more than 10 parallel iterations and hence exercise k-bound # most of the time. @test_util.run_deprecated_v1 def testWhile_3(self): with self.cached_session(): def compute(i, m, c, o): m, c = [math_ops.add(m, 1), math_ops.add(c, 1)] o = math_ops.add(o, m) o = math_ops.add(o, c) i = math_ops.add(i, 1) return [i, m, c, o] i = ops.convert_to_tensor(0) m = ops.convert_to_tensor(0) c = ops.convert_to_tensor(0) o = ops.convert_to_tensor(0) d = ops.convert_to_tensor(100) r = while_loop_tf.while_loop( lambda i, m, c, o: math_ops.less(i, d), compute, [i, m, c, o]) result = r[3] self.assertAllEqual(10100, result) @test_util.run_deprecated_v1 def testWhile_4(self): with self.cached_session(): def compute(i, m, c, o): m, c = [array_ops.gather(x, i), array_ops.gather(x, i)] o = math_ops.add(o, m) o = math_ops.add(o, c) i = math_ops.add(i, 1) return [i, m, c, o] i = ops.convert_to_tensor(0) m = ops.convert_to_tensor(0) c = ops.convert_to_tensor(0) o = ops.convert_to_tensor(0) x = ops.convert_to_tensor([1, 2, 3, 4, 5, 6]) s = array_ops.size(x) r = while_loop_tf.while_loop( lambda i, m, c, o: math_ops.less(i, s), compute, [i, m, c, o]) result = r[3] self.assertAllEqual(42, result) @test_util.run_v1_only("b/120545219") def testWhile_5(self): with self.cached_session(): def compute(i, c, o): c = array_ops.strided_slice(x, array_ops.expand_dims(i, 0), [1] + array_ops.expand_dims(i, 0)) o = array_ops.concat([o, c], 0) i = math_ops.add(i, 1) return [i, c, o] i = ops.convert_to_tensor(0) c = ops.convert_to_tensor([0]) o = ops.convert_to_tensor([0]) x = ops.convert_to_tensor([1, 2, 3, 4, 5, 6]) s = array_ops.size(x) r = while_loop_tf.while_loop( lambda i, c, o: math_ops.less(i, s), compute, [i, c, o], [ i.get_shape(), tensor_shape.unknown_shape(), tensor_shape.unknown_shape(), ], ) result = r[2] self.assertAllEqual(np.array([0, 1, 2, 3, 4, 5, 6]), result) @test_util.run_gpu_only @test_util.run_deprecated_v1 def testWhile_Device(self): # Body function defined outside of device scope def body(x): return math_ops.exp(x) with ops.device("CPU:0"): r = while_loop_tf.while_loop( lambda x: x < 10, body, [constant_op.constant(-10.0)]) self.assertIn("cpu", r.device.lower()) with session.Session() as sess: options = config_pb2.RunOptions(output_partition_graphs=True) run_metadata = config_pb2.RunMetadata() sess.run(r, options=options, run_metadata=run_metadata) # We expect that everything runs on CPU, even if GPU is available. self.assertEqual(len(run_metadata.partition_graphs), 1) @test_util.disable_control_flow_v2("b/116338794 (buffer_reuse)") @test_util.run_v1_only("b/120545219") def testBufferForwarding(self): run_options = config_pb2.RunOptions( trace_level=config_pb2.RunOptions.FULL_TRACE) run_metadata = config_pb2.RunMetadata() with self.cached_session() as sess: with ops.device("/cpu:0"): c = constant_op.constant(2) i0 = constant_op.constant(0) r = while_loop_tf.while_loop( lambda i: i < 1000, lambda i: math_ops.square(c) + i, [i0]) r_val = sess.run(r, options=run_options, run_metadata=run_metadata) self.assertEqual(1000, r_val) self.assertTrue(run_metadata.HasField("step_stats")) unique_allocs = set() for node_stat in run_metadata.step_stats.dev_stats[0].node_stats: for output in node_stat.output: unique_allocs.add( output.tensor_description.allocation_description.ptr) # Prior to cl/147536680, the number of unique allocations was about 1005. self.assertLess(len(unique_allocs), 756) def _testWhile_Gpu_1(self, use_gpu): with self.cached_session(use_gpu=use_gpu): n = constant_op.constant(1.0) c = lambda x: math_ops.less(x, 10.0) b = lambda x: math_ops.add(x, 1.0) r = while_loop_tf.while_loop(c, b, [n]) self.assertAllClose(10.0, self.evaluate(r)) def testWhile_Gpu_1(self): self._testWhile_Gpu_1(use_gpu=False) self._testWhile_Gpu_1(use_gpu=True) def _testWhile_Gpu_2(self, use_gpu): with self.cached_session(use_gpu=use_gpu): n = constant_op.constant(1.0) c = lambda x: math_ops.less(x, 10.0) def b(x): with ops.device("/cpu:0"): return math_ops.add(x, 1.0) r = while_loop_tf.while_loop(c, b, [n]) self.assertAllClose(10.0, self.evaluate(r)) def testWhile_Gpu_2(self): self._testWhile_Gpu_2(use_gpu=False) self._testWhile_Gpu_2(use_gpu=True) def testWhileShape(self): with self.cached_session(): i = constant_op.constant(0) m = array_ops.ones([2, 2]) c = lambda i, j: math_ops.less(i, 2) def _b(i, j): new_i = math_ops.add(i, 1) new_j = array_ops.tile(j, [2, 2]) return [new_i, new_j] r = while_loop_tf.while_loop( c, _b, [i, m], [i.get_shape(), tensor_shape.unknown_shape()]) r = r[1] * array_ops.ones([8, 8]) self.assertAllEqual(np.ones((8, 8)), self.evaluate(r)) @test_util.disable_control_flow_v2("b/131265085") @test_util.run_v1_only("b/131265085") def testWhileBadShape(self): x = constant_op.constant([2.0, 4.0], name="values") i = constant_op.constant(0) c = lambda i, _: math_ops.less(i, 10) b = lambda i, x: [i + 1, x + 1] with self.assertRaisesRegex(ValueError, "is not compatible with"): # Shape of x is [2], but we specify a shape of [5]. while_loop_tf.while_loop( c, b, [i, x], [i.shape, tensor_shape.TensorShape([5])]) @test_util.run_in_graph_and_eager_modes def testWhileBadBodyReturn(self): x = constant_op.constant([2.0, 4.0], name="values") i = constant_op.constant(0) c = lambda i, *x: math_ops.less(i, 10) # body accepts N values and returns N+1 values. b = lambda i, *x: (i, i) + x with self.assertRaisesRegex( ValueError, "The two structures don't have the same nested structure."): while_loop_tf.while_loop(c, b, [i, x]) @test_util.run_deprecated_v1 def testWhileWithNonTensorInput_Scalar(self): with self.cached_session(): n = 0 c = lambda x: x < 10000 b = lambda x: x + 1 r = while_loop_tf.while_loop(c, b, [n], parallel_iterations=20) self.assertEqual(10000, self.evaluate(r)) def testWhileWithNonTensorInput_Vector(self): with self.cached_session(): n = np.array([0]) # Note, [0] would not work here; that is a list c = lambda x: x[0] < 10000 b = lambda x: array_ops_stack.stack([x[0] + 1]) r = while_loop_tf.while_loop(c, b, [n], parallel_iterations=20) self.assertEqual([10000], self.evaluate(r)) def testWhileShapeInference(self): with self.cached_session(): i = constant_op.constant(0) m = array_ops.ones([2, 2]) c = lambda i, j: math_ops.less(i, 2) def b(i, j): new_i = math_ops.add(i, 1) new_j = array_ops.concat([j, j], 0) return [new_i, new_j] r = while_loop_tf.while_loop( c, b, [i, m], [i.get_shape(), tensor_shape.TensorShape([None, 2])]) self.assertTrue(r[1].shape.is_compatible_with([8, 2])) @test_util.run_v1_only("b/120545219") def testWhileShapeInferenceBadShape(self): with self.cached_session(): i = constant_op.constant(0) m = array_ops.ones([2, 2]) c = lambda i, j: math_ops.less(i, 2) b = lambda i, j: [i + 1, array_ops.concat([j, j], 0)] with self.assertRaisesRegex( ValueError, r".*\(2, 2\).*\(4, 2\) after one iteration\. To allow the shape to " r"vary across iterations, use the `shape_invariants` argument of " r"tf.while_loop to specify a less-specific shape\."): while_loop_tf.while_loop(c, b, [i, m]) def testWhileShapeInferenceSparseTensor(self): values = constant_op.constant([2.0, 4.0], name="values") indices = constant_op.constant([[0], [3]], dtype=dtypes.int64, name="indices") shape = constant_op.constant([10], dtype=dtypes.int64, name="dense_shape") i = constant_op.constant(0) x = sparse_tensor.SparseTensor(indices, values, dense_shape=shape) def c(i, _): return i < 10 def b1(i, x): # modifies values. (shape of components is not changed.) return [ i + 1, sparse_tensor.SparseTensor(x.indices, x.values * 2.0, x.dense_shape) ] def b2(i, x): # adds new values. (shape of components is changed.) return [ i + 1, sparse_ops.sparse_add( x, sparse_tensor.SparseTensor( indices=math_ops.cast( array_ops.fill([1, 1], i), dtypes.int64), values=array_ops.fill([1], 1.0), dense_shape=x.dense_shape)) ] def b3(i, x): # modifies rank. (shape of all components is changed.) return [ i + 1, sparse_tensor.SparseTensor( array_ops.concat([x.indices, [[i], [i]]], axis=1), x.values * 2.0, array_ops.concat([x.dense_shape, [10]], axis=0)) ] def check_shapes(r, indices, values, dense_shape): self.assertTrue(r.indices.shape.is_compatible_with(indices)) self.assertTrue(r.values.shape.is_compatible_with(values)) self.assertTrue(r.dense_shape.shape.is_compatible_with(dense_shape)) # Default shape invariant; b1 only modifies values. _, r = while_loop_tf.while_loop(c, b1, [i, x]) check_shapes(r, indices=[None, 1], values=[None], dense_shape=[1]) # Default shape invariant; b2 adds new values _, r = while_loop_tf.while_loop(c, b2, [i, x]) check_shapes(r, indices=[None, 1], values=[None], dense_shape=[1]) # Explicit shape invariant, allowing any rank; b1 only modifies values. _, r = while_loop_tf.while_loop( c, b1, [i, x], [i.get_shape(), tensor_shape.TensorShape([None])]) check_shapes(r, indices=[None, None], values=[None], dense_shape=[None]) # Explicit shape invariant, allowing any rank; b3 modifies rank. _, r = while_loop_tf.while_loop( c, b3, [i, x], [i.get_shape(), tensor_shape.TensorShape([None])]) check_shapes(r, indices=[None, None], values=[None], dense_shape=[None]) # Shape invariant with ndims=None. Technically, this isn't supported # according to the docs, but we support it for backwards compatibility. _, r = while_loop_tf.while_loop( c, b1, [i, x], [i.get_shape(), tensor_shape.TensorShape(None)]) check_shapes(r, indices=[None, None], values=[None], dense_shape=[None]) _, r = while_loop_tf.while_loop( c, b3, [i, x], [i.get_shape(), tensor_shape.TensorShape(None)]) check_shapes(r, indices=[None, None], values=[None], dense_shape=[None]) @test_util.disable_control_flow_v2("b/131265085") @test_util.run_v1_only("b/131265085") def testWhileBadShapeSparseTensor(self): values = constant_op.constant([2.0, 4.0], name="values") indices = constant_op.constant([[0], [3]], dtype=dtypes.int64, name="indices") shape = constant_op.constant([10], dtype=dtypes.int64, name="dense_shape") i = constant_op.constant(0) x = sparse_tensor.SparseTensor(indices, values, dense_shape=shape) c = lambda i, _: i < 10 b1 = lambda i, x: [i+1, x] def b2(i, x): # modifies rank. (shape of all components is changed.) return [ i + 1, sparse_tensor.SparseTensor( array_ops.concat([x.indices, [[i], [i]]], axis=1), x.values * 2.0, array_ops.concat([x.dense_shape, [10]], axis=0)) ] # Explicit shape invariant, with a specific (incompatible) rank. with self.assertRaisesRegex(ValueError, "is not compatible with"): while_loop_tf.while_loop( c, b1, [i, x], [i.get_shape(), tensor_shape.TensorShape([5])]) # Default shape invariant, but b2 modifies rank (which is not allowed). with self.assertRaises(ValueError): while_loop_tf.while_loop(c, b2, [i, x]) def testWhileShapeInferenceIndexedSlices(self): with self.cached_session(): values = constant_op.constant([[2.0, 4.0], [3.0, 5.0]], name="values") indices = constant_op.constant([0, 3], name="indices") shape = constant_op.constant([10, 2], name="dense_shape") i = constant_op.constant(0) x = indexed_slices.IndexedSlices(values, indices, dense_shape=shape) def c(i, _): return i < 10 def b(i, x): return [ i + 1, indexed_slices.IndexedSlices(x.values * 2.0, x.indices, x.dense_shape) ] _, r = while_loop_tf.while_loop(c, b, [i, x]) self.assertEqual(r.dense_shape.get_shape()[0], 2) self.assertEqual(r.values.get_shape(), tensor_shape.TensorShape([2, 2])) _, r = while_loop_tf.while_loop( c, b, [i, x], [i.get_shape(), tensor_shape.TensorShape([None, 2])]) self.assertEqual(r.dense_shape.get_shape()[0], 2) self.assertTrue(r.values.get_shape().is_compatible_with([None, 2])) @test_util.disable_control_flow_v2("b/131265085") @test_util.run_v1_only("b/131265085") def testWhileBadShapeIndexedSlices(self): values = constant_op.constant([2.0, 4.0], name="values") indices = constant_op.constant([[0], [3]], dtype=dtypes.int64, name="indices") shape = constant_op.constant([10], dtype=dtypes.int64, name="dense_shape") i = constant_op.constant(0) x = sparse_tensor.SparseTensor(indices, values, dense_shape=shape) c = lambda i, _: 10 b = lambda i, x: [i+1, x] # Explicit shape invariant, with a specific (incompatible) rank. with self.assertRaisesRegex(ValueError, "is not compatible with"): while_loop_tf.while_loop( c, b, [i, x], [i.get_shape(), tensor_shape.TensorShape([5])]) def testWhileShapeInferenceRaggedTensor(self): i = constant_op.constant(0) x = ragged_factory_ops.constant([[1, 2], [3], [4, 5, 6]]) c = lambda i, _: i < 10 def b1(i, x): # Adds new values to rows (but doesn't create new rows) return [ i + 1, array_ops.concat([x, x], axis=1) ] def b2(i, x): # Adds new rows. return [ i + 1, array_ops.concat([x, x], axis=0) ] def check_shapes(r, values, splits): self.assertTrue(r.values.shape.is_compatible_with(values)) self.assertTrue(r.row_splits.shape.is_compatible_with(splits)) # Default shape invariant; b1 adds new values to rows. _, r = while_loop_tf.while_loop(c, b1, [i, x]) check_shapes(r, values=[None], splits=[4]) # Default shape invariant; b2 adds new rows (not allowed). if not context.executing_eagerly(): with self.assertRaises(ValueError): _, r = while_loop_tf.while_loop(c, b2, [i, x]) # Explicit shape invariant; b1 adds new values to rows. # (deprecated: use TensorShape instead of RaggedTensorSpec) _, r = while_loop_tf.while_loop( c, b1, [i, x], [i.get_shape(), tensor_shape.TensorShape([None, None])]) check_shapes(r, values=[None], splits=[None]) # Explicit shape invariant; b1 adds new values to rows. _, r = while_loop_tf.while_loop(c, b1, [i, x], [ i.get_shape(), ragged_tensor.RaggedTensorSpec([None, None], dtypes.int32) ]) check_shapes(r, values=[None], splits=[None]) # Explicit shape invariant; b2 adds new rows. _, r = while_loop_tf.while_loop(c, b2, [i, x], [ i.get_shape(), ragged_tensor.RaggedTensorSpec([None, None], dtypes.int32) ]) check_shapes(r, values=[None], splits=[None]) def testWhileShapeInferenceRaggedTensorRaggedRank2(self): i = constant_op.constant(0) x = ragged_factory_ops.constant([[[1, 2], [3], [4, 5, 6]], [[], [8, 9, 10]]]) c = lambda i, _: i < 10 def b(i, x): return [ i + 1, array_ops.concat([x, x[..., i:i+1]], axis=-1) ] _, r = while_loop_tf.while_loop(c, b, [i, x]) self.assertEqual(r.row_splits.shape.as_list(), [3]) self.assertIn(r.values.row_splits.shape.as_list(), ([6], [None])) self.assertIn(r.values.values.shape.as_list(), ([49], [None])) def testWhileShapeInvariantTensorSpec(self): i = constant_op.constant(0) x = constant_op.constant([1]) c = lambda i, _: i < 10 b = lambda i, x: (i + 1, array_ops_stack.stack([x, x])) shape_invariants = [ tensor_lib.TensorSpec([], dtype=dtypes.int32), tensor_lib.TensorSpec(None, dtype=dtypes.int32)] while_loop_tf.while_loop(c, b, [i, x], shape_invariants) # TODO(b/131265085) Remove this decorator when bug is fixed. @test_util.build_as_function_and_v1_graph def testWhileShapeInvariantWrongTypeSpecType(self): c = lambda i, _: i < 10 b = lambda i, x: (i + 1, x) i = constant_op.constant(0) x = sparse_tensor.SparseTensor([[0]], [1.0], [10]) shape_invariants = [ tensor_lib.TensorSpec([], dtype=dtypes.int32), sparse_tensor.SparseTensorSpec([None])] while_loop_tf.while_loop(c, b, [i, x], shape_invariants) x2 = constant_op.constant([1]) with self.assertRaises(TypeError): while_loop_tf.while_loop(c, b, [i, x2], shape_invariants) x3 = ragged_factory_ops.constant([[1, 2], [3]]) with self.assertRaises(TypeError): while_loop_tf.while_loop(c, b, [i, x3], shape_invariants) i2 = constant_op.constant(0.0) with self.assertRaises(TypeError): while_loop_tf.while_loop(c, b, [i2, x], shape_invariants) # TODO(b/131265085) Remove this decorator when bug is fixed. @test_util.build_as_function_and_v1_graph def testWhileShapeInvariantBadType(self): i = constant_op.constant(0) x = constant_op.constant([1]) c = lambda i, _: i < 10 b = lambda i, x: (i + 1, x) with self.assertRaises((ValueError, TypeError)): while_loop_tf.while_loop(c, b, [i, x], ["foo", "bar"]) def _testNestedWhile_1(self, use_gpu): with self.cached_session(use_gpu=use_gpu): n = constant_op.constant(0) def cpu_sum(s): c = lambda i, s: math_ops.less(i, 10) def b(i, s): i1 = math_ops.add(i, 1) with ops.device("/cpu:0"): s1 = math_ops.add(i, s) return i1, s1 _, r_s = while_loop_tf.while_loop(c, b, [n, s]) return r_s c = lambda x: math_ops.less(x, 200) b = lambda x: math_ops.add(x, cpu_sum(n)) r = while_loop_tf.while_loop(c, b, [n]) self.assertEqual(225, self.evaluate(r)) def testNestedWhile_1(self): self._testNestedWhile_1(use_gpu=False) self._testNestedWhile_1(use_gpu=True) def _testNestedWhile_2(self, use_gpu): # Test the cases that A -> Enter and Exit -> A are partitioned. with self.cached_session(use_gpu=use_gpu): s0 = constant_op.constant(2.0) def inner_loop(s): c = lambda s: math_ops.less(s, 20.0) def b(s): s1 = math_ops.add(s, s) return s1 r_s = while_loop_tf.while_loop(c, b, [s], parallel_iterations=1) return r_s outer_c = lambda x: math_ops.less(x, 3000.0) def outer_b(x): x = logging_ops.Print(x, [x]) # Edge "Print -> Enter" is partitioned x = inner_loop(x) with ops.device("/cpu:0"): x = math_ops.square(x) # Edge "Exit -> Square" is partitioned return x r = while_loop_tf.while_loop( outer_c, outer_b, [s0], parallel_iterations=1) self.assertEqual(1048576.0, self.evaluate(r)) def testNestedWhile_2(self): self._testNestedWhile_2(use_gpu=False) self._testNestedWhile_2(use_gpu=True) @test_util.run_v1_only("b/120545219") def testWhileWithControl_1(self): with self.cached_session(): n = constant_op.constant(0) r = constant_op.constant(0) condition = lambda n_, r_: math_ops.less(n_, 10) def body(n_, r_): n_ = math_ops.add(n_, 1) with r_.graph.control_dependencies([r_]): r_ = constant_op.constant(12) return [n_, r_] res = while_loop_tf.while_loop( condition, body, [n, r], parallel_iterations=1) self.assertAllEqual(12, res[1]) @test_util.run_deprecated_v1 def testWhileWithControl_2(self): with self.cached_session(): r = constant_op.constant(0) condition = lambda r_: math_ops.less(r_, 10) def body(r_): with r_.graph.control_dependencies([r_]): r_ = constant_op.constant(12) return [r_] res = while_loop_tf.while_loop( condition, body, [r], parallel_iterations=1) self.assertAllEqual(12, self.evaluate(res)) @test_util.run_v1_only("b/120545219") def testWhileWithControl_3(self): with self.cached_session() as sess: b = array_ops.placeholder(dtypes.bool) c = constant_op.constant(1) x0 = constant_op.constant(0) with ops.control_dependencies([b]): r = while_loop_tf.while_loop(lambda x: x < 10, lambda x: x + c, [x0]) self.assertEqual(10, sess.run(r, {b: True})) @test_util.run_v1_only("b/120545219") def testWhileWithControl_4(self): with self.cached_session() as sess: b = array_ops.placeholder(dtypes.bool) c = constant_op.constant(1) x0 = constant_op.constant(0) with ops.control_dependencies([b]): r = while_loop_tf.while_loop( lambda x: x < 10, lambda x: x + array_ops.identity(c), [x0]) self.assertEqual(10, sess.run(r, {b: True})) @test_util.run_v1_only("b/120545219") def testWhileWithControl_5(self): with self.cached_session() as sess: b = array_ops.placeholder(dtypes.bool) c = constant_op.constant(1) x0 = constant_op.constant(0) def body(x): with ops.control_dependencies([b]): return x + c r = while_loop_tf.while_loop(lambda x: x < 10, body, [x0]) self.assertEqual(10, sess.run(r, {b: True})) def testWhileCondWithControl(self): # Ensure that no control edges by an outer control dependency context are # added to nodes inside cond/while contexts. with self.cached_session() as sess: const_true = lambda: constant_op.constant(True) const_false = lambda: constant_op.constant(False) cond = lambda i: tf_cond.cond(i > 0, const_true, const_false) body = lambda i: tf_cond.cond(i > 0, lambda: i - 1, lambda: i) with ops.control_dependencies([control_flow_ops.no_op()]): loop = while_loop_tf.while_loop(cond, body, (constant_op.constant(5),)) self.assertEqual(0, self.evaluate(loop)) @test_util.disable_control_flow_v2("b/113324949 (ref vars)") @test_util.run_v1_only("b/120545219") def testWhileCondWithControl_1(self): with self.cached_session(): v = variable_scope.get_variable( "v", [], initializer=init_ops.constant_initializer(2)) i0 = constant_op.constant(0) with ops.control_dependencies([i0]): def loop_condition(i): return i < 4 def loop_body(i): some_cond = tf_cond.cond( constant_op.constant(True), lambda: state_ops.assign(v, math_ops.square(v)), lambda: v) with ops.control_dependencies([some_cond]): return i + 1 r = while_loop_tf.while_loop(loop_condition, loop_body, (i0,)) self.evaluate(variables.global_variables_initializer()) self.assertEqual(4, self.evaluate(r)) self.assertAllClose(65536.0, self.evaluate(v)) @test_util.disable_control_flow_v2("b/113324949 (ref vars)") @test_util.run_v1_only("b/120545219") def testWhileCondExitControl(self): with self.cached_session(): v = variables.Variable(1) def false_branch(): cond = lambda i: i < 100 def body(i): x = state_ops.assign(v, i) return x + 1 loop = while_loop_tf.while_loop(cond, body, [0]) # Make sure to handle correctly control edge from Exit to a node. with ops.control_dependencies([loop]): return constant_op.constant(6.0) r = tf_cond.cond( constant_op.constant(False), lambda: constant_op.constant(1.0), false_branch) self.evaluate(variables.global_variables_initializer()) self.assertEqual(6.0, self.evaluate(r)) self.assertEqual(99, self.evaluate(v)) def testCondWhile_1(self): with self.cached_session(): n = ops.convert_to_tensor(0, name="n") c = lambda x: math_ops.less(x, 10) b = lambda x: math_ops.add(x, 1) r = tf_cond.cond( math_ops.less(0, 1), lambda: while_loop_tf.while_loop(c, b, [n]), lambda: n) self.assertAllEqual(10, self.evaluate(r)) def testCondWhile_2(self): with self.cached_session(): n = ops.convert_to_tensor(0) c = lambda x: math_ops.less(x, 10) b = lambda x: math_ops.add(x, 1) r = tf_cond.cond( math_ops.less(1, 0), lambda: math_ops.add(n, 1), lambda: while_loop_tf.while_loop(c, b, [n])) self.assertAllEqual(10, self.evaluate(r)) def _testCondWhile_3(self, use_gpu): with self.cached_session(use_gpu=use_gpu) as sess: p = array_ops.placeholder(dtypes.bool) n = constant_op.constant(0.0) def c(x): return math_ops.less(x, 10.0) def b(x): with ops.device("/cpu:0"): x1 = math_ops.add(x, 1.0) return x1 r = tf_cond.cond( p, lambda: while_loop_tf.while_loop(c, b, [n]), lambda: math_ops.multiply(n, 2.0), ) r1 = gradients_impl.gradients(r, [n]) self.assertEqual(10., sess.run(r, {p: True})) self.assertEqual([1.0], sess.run(r1, {p: True})) self.assertEqual(0.0, sess.run(r, {p: False})) self.assertEqual([2.0], sess.run(r1, {p: False})) @test_util.run_deprecated_v1 def testCondWhile_3(self): self._testCondWhile_3(use_gpu=False) self._testCondWhile_3(use_gpu=True) def testWhileCond_1(self): with self.cached_session(): i = ops.convert_to_tensor(0, name="i") n = ops.convert_to_tensor(10, name="n") one = ops.convert_to_tensor(1, name="one") c = lambda x: math_ops.less(x, n) # pylint: disable=undefined-variable # for OSS build b = lambda x: tf_cond.cond( constant_op.constant(True), lambda: math_ops.add(x, one), lambda: math_ops.subtract(x, one)) # pylint: enable=undefined-variable r = while_loop_tf.while_loop(c, b, [i]) self.assertAllEqual(10, self.evaluate(r)) def testWhileCond_2(self): with self.cached_session(): n = ops.convert_to_tensor(0, name="n") c = lambda x: math_ops.less(x, 10) b = lambda x: tf_cond.cond( constant_op.constant(True), lambda: math_ops.add(x, 1), lambda: n) r = while_loop_tf.while_loop(c, b, [n]) self.assertAllEqual(10, self.evaluate(r)) def testWhileCond_3(self): with self.cached_session(): n = ops.convert_to_tensor(0) c = lambda x: math_ops.less(x, 10) # pylint: disable=undefined-variable # for OSS build b = lambda x: tf_cond.cond(math_ops.less(0, 1), lambda: math_ops.add(x, 1), lambda: math_ops.subtract(x, 1)) # pylint: enable=undefined-variable r = while_loop_tf.while_loop(c, b, [n]) self.assertAllEqual(10, self.evaluate(r)) @test_util.run_deprecated_v1 def testWhileCondGradMultiDevice(self): config = config_pb2.ConfigProto(device_count={"CPU": 2}, allow_soft_placement=True) with self.cached_session(config=config) as sess: pred = array_ops.placeholder(dtypes.bool, []) x_init = constant_op.constant(1.0) with ops.device("/cpu:0"): z = while_loop_tf.while_loop( lambda i, _: i < 3, lambda i, x: (i + 1, tf_cond.cond(pred, lambda: x * 2.0, lambda: 10.0)), [0, x_init]) with ops.device("/cpu:1"): grad = gradients_impl.gradients(z, x_init)[0] with ops.device("/cpu:0"): grad_grad = gradients_impl.gradients(grad, x_init)[0] self.assertEqual(sess.run(grad, {pred: True}), 8.0) self.assertEqual(sess.run(grad, {pred: False}), 0.0) if not control_flow_util.ENABLE_CONTROL_FLOW_V2: return self.assertEqual(sess.run(grad_grad, {pred: True}), 0.0) self.assertEqual(sess.run(grad_grad, {pred: False}), 0.0) # NOTE: It is ok to have parallel_iterations > 1 @test_util.disable_control_flow_v2("b/113324949 (RefVariable)") @test_util.run_deprecated_v1 def testWhileUpdateVariable_1(self): with self.cached_session(): select = variables.Variable([3.0, 4.0, 5.0]) n = constant_op.constant(0) def loop_iterator(j): return math_ops.less(j, 3) def loop_body(j): ns = state_ops.scatter_update(select, j, 10.0) nj = math_ops.add(j, 1) op = control_flow_ops.group(ns) nj = control_flow_ops.with_dependencies([op], nj) return [nj] r = while_loop_tf.while_loop( loop_iterator, loop_body, [n], parallel_iterations=1) self.evaluate(variables.global_variables_initializer()) self.assertEqual(3, self.evaluate(r)) result = self.evaluate(select) self.assertAllClose(np.array([10.0, 10.0, 10.0]), result) @test_util.disable_control_flow_v2("b/113324949 (RefVariable)") @test_util.run_v1_only("b/120545219") def testWhileUpdateVariable_2(self): with self.cached_session(): select1 = variables.Variable([3.0, 4.0, 5.0]) select2 = variables.Variable([3.0, 4.0, 5.0]) n = constant_op.constant(0) def loop_iterator(j): return math_ops.less(j, 3) def loop_body(j): ns1 = state_ops.scatter_update(select1, j, 10.0) ns2 = state_ops.scatter_update(select2, j, 10.0) nj = math_ops.add(j, 1) op = control_flow_ops.group(ns1, ns2) nj = control_flow_ops.with_dependencies([op], nj) return [nj] r = while_loop_tf.while_loop( loop_iterator, loop_body, [n], parallel_iterations=1) self.evaluate(variables.global_variables_initializer()) self.assertEqual(3, self.evaluate(r)) result1 = self.evaluate(select1) self.assertAllClose(np.array([10.0, 10.0, 10.0]), result1) result2 = self.evaluate(select2) self.assertAllClose(np.array([10.0, 10.0, 10.0]), result2) @test_util.disable_control_flow_v2("b/113324949 (RefVariable)") @test_util.run_v1_only("b/120545219") def testWhileUpdateVariable_3(self): with self.cached_session(): select = variables.Variable([3.0, 4.0, 5.0]) n = constant_op.constant(0) def loop_iterator(j, _): return math_ops.less(j, 3) def loop_body(j, _): ns = state_ops.scatter_update(select, j, 10.0) nj = math_ops.add(j, 1) return [nj, ns] r = while_loop_tf.while_loop( loop_iterator, loop_body, [n, array_ops.identity(select)], parallel_iterations=1) self.evaluate(variables.global_variables_initializer()) result = r[1] self.assertAllClose(np.array([10.0, 10.0, 10.0]), result) @test_util.disable_control_flow_v2("b/113324949 (RefVariable)") @test_util.run_v1_only("b/120545219") def testWhileUpdateVariable_4(self): with self.cached_session(): var_a = variables.Variable(0, name="a") var_b = variables.Variable(0, name="b") self.evaluate(variables.global_variables_initializer()) c = constant_op.constant(0, name="c") asn1 = state_ops.assign_add(var_a, 1, name="a_add") # Loop condition def pred(i): return math_ops.less(i, 10) # Loop body def loop_body(i): asn2 = state_ops.assign_add(var_b, asn1, name="b_add") with ops.control_dependencies([asn2]): ni = math_ops.add(i, 1, name="i_add") return ni lpa = while_loop_tf.while_loop( pred, loop_body, [c], parallel_iterations=1) self.assertEqual(0, self.evaluate(var_b)) self.evaluate(lpa) # Run the loop self.assertEqual(10, self.evaluate(var_b)) @test_util.disable_control_flow_v2("b/113324949 (RefVariable)") @test_util.run_v1_only("b/120545219") def testWhileUpdateVariable_5(self): with self.cached_session(): # Create some variables. var_a = variables.Variable(0, name="a") var_b = variables.Variable(0, name="b") self.evaluate(variables.global_variables_initializer()) # Change condition to check var_b def pred(_): return math_ops.less(var_b, 10) # Change body to increment var_b def loop_body(i): asn1 = state_ops.assign_add( var_a, constant_op.constant(1), name="a_add") asn2 = state_ops.assign_add( var_b, constant_op.constant(1), name="b_add") with ops.control_dependencies([asn1, asn2]): inc_b = array_ops.identity(var_b) return inc_b lpa = while_loop_tf.while_loop( pred, loop_body, [var_b], parallel_iterations=1, name="loop") self.assertEqual(0, self.evaluate(var_b)) self.evaluate(lpa) # Run the loop self.assertEqual(10, self.evaluate(var_a)) self.assertEqual(10, self.evaluate(var_b)) @test_util.disable_control_flow_v2("b/113324949 (RefVariable)") @test_util.run_v1_only("b/120545219") def testWhileUpdateVariable_6(self): with self.cached_session(): # Create some variables. var_a = variables.Variable(0, name="a") var_b = variables.Variable(0, name="b") c = constant_op.constant(0) self.evaluate(variables.global_variables_initializer()) # Loop condition def pred(i): return math_ops.less(i, 10) # Loop body def loop_body(i): asn1 = state_ops.assign_add(var_a, 1, name="a_add") with ops.control_dependencies([asn1]): asn2 = state_ops.assign_add(var_b, var_a, name="b_add") with ops.control_dependencies([asn2]): ni = math_ops.add(i, 1, name="i_add") return ni lpa = while_loop_tf.while_loop( pred, loop_body, [c], parallel_iterations=1, name="loop") self.assertEqual(0, self.evaluate(var_b)) self.evaluate(lpa) # Run the loop self.assertEqual(55, self.evaluate(var_b)) self.assertEqual(10, self.evaluate(var_a)) @test_util.run_v1_only("b/120545219") def testWhileQueue_1(self): with self.cached_session(): q = data_flow_ops.FIFOQueue(-1, dtypes.int32) i = constant_op.constant(0) def c(i): return math_ops.less(i, 10) def b(i): ni = math_ops.add(i, 1) ni = control_flow_ops.with_dependencies([q.enqueue((i,))], ni) return ni r = while_loop_tf.while_loop(c, b, [i], parallel_iterations=1) self.assertEqual([10], self.evaluate(r)) for i in range(10): self.assertEqual([i], self.evaluate(q.dequeue())) @test_util.run_v1_only("b/120545219") def testWhileTimeOut(self): run_options = config_pb2.RunOptions(timeout_in_ms=1) with self.cached_session() as sess: n = constant_op.constant(0) c = lambda x: True b = lambda x: math_ops.add(x, 1) r = while_loop_tf.while_loop(c, b, [n]) with self.assertRaises(errors_impl.DeadlineExceededError): sess.run(r, options=run_options) @test_util.disable_control_flow_v2("b/117119329 (stack)") @test_util.run_v1_only("b/120545219") def testWhileStack_1(self): with self.cached_session(): s = gen_data_flow_ops.stack_v2(-1, dtypes.int32, stack_name="foo") i = constant_op.constant(0) def c(i): return math_ops.less(i, 10) def b(i): ni = math_ops.add(i, 1) ni = control_flow_ops.with_dependencies( [gen_data_flow_ops.stack_push_v2(s, i)], ni) return ni r = while_loop_tf.while_loop(c, b, [i], parallel_iterations=1) x = constant_op.constant(0) def c1(i, _): return math_ops.greater(i, 0) def b1(i, x): ni = math_ops.subtract(i, 1) nx = x + gen_data_flow_ops.stack_pop_v2(s, dtypes.int32) return [ni, nx] _, rx = while_loop_tf.while_loop( c1, b1, [r, x], [r.get_shape(), tensor_shape.unknown_shape()], parallel_iterations=1) self.assertEqual(45, self.evaluate(rx)) def _testWhileGrad_ColocateGradients(self, colocate): gpu_dev_name = test.gpu_device_name() if test.is_gpu_available( ) else "/device:CPU:0" graph = ops.Graph() with graph.as_default(): v = constant_op.constant(2.0, name="v") c = lambda v: math_ops.less(v, 100.0) def b(x): with ops.device(gpu_dev_name): return math_ops.square(x) loop = while_loop_tf.while_loop(c, b, [v], parallel_iterations=1) r = gradients_impl.gradients( loop, v, colocate_gradients_with_ops=colocate)[0] r_ops = graph.get_operations() r_devices = [(op.name, op.device) for op in r_ops] self.assertTrue(any("Square" in op.name for op in r_ops)) for (name, dev) in r_devices: if not colocate and name.endswith("Square"): # Only forward graph contain gpu in Square device self.assertTrue(gpu_dev_name in dev) elif colocate and "Square" in name: # Forward and backward graphs contain gpu in Square/Square_grad devices self.assertTrue(gpu_dev_name in dev) else: self.assertFalse(gpu_dev_name in dev) with self.session(graph=graph) as sess: self.assertAllClose(1024.0, self.evaluate(r)) @test_util.disable_control_flow_v2("b/116351701 (colocation)") @test_util.run_v1_only("b/120545219") def testWhileGrad_ColocateGradients(self): self._testWhileGrad_ColocateGradients(colocate=False) self._testWhileGrad_ColocateGradients(colocate=True) @test_util.run_v1_only("b/120545219") def testWhileGrad_Square(self): with self.cached_session(): v = constant_op.constant(2.0, name="v") c = lambda v: math_ops.less(v, 100.0) b = math_ops.square r = while_loop_tf.while_loop(c, b, [v], parallel_iterations=1) r = tf_cond.cond(math_ops.less(1, 2), lambda: r, lambda: v) r = gradients_impl.gradients(r, v)[0] self.assertAllClose(1024.0, self.evaluate(r)) @test_util.run_v1_only("b/120545219") def testWhileGrad_Shape(self): with self.cached_session(): x = array_ops.placeholder(dtypes.float32, shape=[None]) v = constant_op.constant([2.0], name="v") n = constant_op.constant(0, name="n") c = lambda i, v: math_ops.less(i, 5) b = lambda i, v: [i + 1, math_ops.multiply(x, v)] r = while_loop_tf.while_loop( c, b, [n, v], [n.get_shape(), tensor_shape.unknown_shape()], parallel_iterations=1) r = gradients_impl.gradients(r[1], x)[0] self.assertEqual([None], r.get_shape().as_list()) self.assertAllClose([810.0, 2560.0], r.eval(feed_dict={x: [3.0, 4.0]})) @test_util.run_deprecated_v1 def testWhileGrad_BaseShape(self): with self.cached_session() as sess: x = array_ops.placeholder(dtypes.float32, [None]) v0 = constant_op.constant([2.0, 2.0], name="v") c = lambda v: constant_op.constant(False) b = lambda v: math_ops.multiply(v, x) r = while_loop_tf.while_loop(c, b, [v0]) y = math_ops.square(x) r = gradients_impl.gradients([r, y], x)[0] self.assertAllClose([2.0, 4.0], sess.run(r, feed_dict={x: [1.0, 2.0]})) @test_util.run_deprecated_v1 @test_util.enable_output_all_intermediates def testWhileGradAfterSessionRun(self): v0 = constant_op.constant(2.) r = while_loop_tf.while_loop( lambda _: True, lambda v: v * v, [v0], maximum_iterations=3) self.assertAllEqual(r, 256.) grad = gradients_impl.gradients(r, v0)[0] self.assertAllClose(grad, 1024.) @test_util.run_deprecated_v1 @test_util.enable_output_all_intermediates def testNestedWhileGradAfterSessionRun(self): v0 = constant_op.constant(2.) def body(v): inner_v0 = constant_op.constant(1.) return while_loop_tf.while_loop( lambda _: True, lambda x: x * v, [inner_v0], maximum_iterations=2) r = while_loop_tf.while_loop( lambda _: True, body, [v0], maximum_iterations=3) self.assertAllEqual(r, 256.) grad = gradients_impl.gradients(r, v0)[0] self.assertAllClose(grad, 1024.) @test_util.run_v1_only("b/120545219") def testWhileGrad_MultipleUses(self): with self.cached_session(): v = constant_op.constant(2.0, name="v") c = lambda v: math_ops.less(v, 100.0) b = math_ops.square r = while_loop_tf.while_loop(c, b, [v], parallel_iterations=1) r = math_ops.multiply(r, r) r = gradients_impl.gradients(r, v)[0] self.assertEqual(524288.0, self.evaluate(r)) @test_util.run_v1_only("b/120545219") def testWhileGrad_LoopAdd(self): with self.cached_session(): v = constant_op.constant(2.0, name="v") c = lambda v: math_ops.less(v, 100.0) b = math_ops.square r = while_loop_tf.while_loop(c, b, [v], parallel_iterations=1) r = math_ops.add(r, r) r = gradients_impl.gradients(r, v)[0] self.assertAllClose(2048.0, self.evaluate(r)) def _testWhileGrad_Mul(self, use_gpu, p_iters): with self.cached_session(use_gpu=use_gpu) as sess: a = constant_op.constant(3.0, name="a") v = constant_op.constant(2.0, name="v") c = lambda v: math_ops.less(v, 100.0) b = lambda v: math_ops.multiply(v, a) r = while_loop_tf.while_loop(c, b, [v], parallel_iterations=p_iters) grad_a, grad_v = gradients_impl.gradients(r, [a, v]) grad_a_val, grad_v_val = self.evaluate([grad_a, grad_v]) self.assertAllClose(216.0, grad_a_val) self.assertAllClose(81.0, grad_v_val) @test_util.run_deprecated_v1 def testWhileGrad_Mul(self): self._testWhileGrad_Mul(use_gpu=False, p_iters=1) self._testWhileGrad_Mul(use_gpu=False, p_iters=10) self._testWhileGrad_Mul(use_gpu=True, p_iters=1) self._testWhileGrad_Mul(use_gpu=True, p_iters=10) def testWhileGradInControlDeps(self): @eager_def_function.function def f(): x_init = constant_op.constant(2.) loop_cond = lambda i, x: math_ops.less(i, 2) loop_body = lambda i, x: [i + 1, x**2] _, x = while_loop_tf.while_loop(loop_cond, loop_body, [0, x_init]) with ops.control_dependencies([x]): (grad,) = gradients_impl.gradients(x, x_init) return grad self.assertAllEqual(f(), 4. * 2.**3) # 4 * x_init ^ 3 @test_util.run_deprecated_v1 def testTfFunctionInV1WhileLoop(self): # This test specifically tests that creating a Const node inside a # tf.function inside a v1 while_loop while inlining is turned on works. config = opt_cfg() assert config.graph_options.optimizer_options.do_function_inlining with session.Session(config=config): @eager_def_function.function def loop_body(i): # Here we create the const. return i + 1. loop_cond = lambda i: True x = while_loop_tf.while_loop( loop_cond, loop_body, [0.], maximum_iterations=5) self.assertAllEqual(x, 5.) def _testNestedWhileCondWhileGrad(self, use_gpu): with self.cached_session(use_gpu=use_gpu): v = constant_op.constant(1.0) def inner_loop(s): z = constant_op.constant(0) c = lambda i, x: math_ops.less(i, 4) b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)] return while_loop_tf.while_loop(c, b, [z, s]) c = lambda x: math_ops.less(x, 128.0) def b(x): return tf_cond.cond( constant_op.constant(True), lambda: math_ops.square(inner_loop(x)[1]), lambda: math_ops.multiply(x, 2.0)) r = while_loop_tf.while_loop(c, b, [v]) r = gradients_impl.gradients(r, v)[0] self.assertAllClose(512.0, self.evaluate(r)) @test_util.run_deprecated_v1 def testNestedWhileCondWhileGrad(self): self._testNestedWhileCondWhileGrad(use_gpu=False) @test_util.run_deprecated_v1 def testNestedWhileCondWhileGradGpu(self): self._testNestedWhileCondWhileGrad(use_gpu=True) @test_util.run_v1_only("b/120545219") def testWhileGrad_Variable(self): with self.cached_session(): a = variables.Variable(3.0) v = constant_op.constant(2.0, name="v") c = lambda v: math_ops.less(v, 100.0) b = lambda v: math_ops.multiply(v, a) r = while_loop_tf.while_loop(c, b, [v], parallel_iterations=1) r = gradients_impl.gradients(r, a) self.evaluate(variables.global_variables_initializer()) self.assertAllClose(216.0, r[0]) @test_util.run_deprecated_v1 def testWhileGrad_ResourceVariable(self): with self.cached_session(): a = resource_variable_ops.ResourceVariable(3.0) v = constant_op.constant(2.0, name="v") c = lambda v: math_ops.less(v, 100.0) b = lambda v: math_ops.multiply(v, a) r = while_loop_tf.while_loop(c, b, [v], parallel_iterations=1) g = gradients_impl.gradients(r, a) self.evaluate(variables.global_variables_initializer()) self.assertAllClose(216.0, g[0]) def testWhileGrad_EagerResourceVariable(self): with context.eager_mode(): a = resource_variable_ops.ResourceVariable( np.ones([2, 2], dtype=np.float32)) v = constant_op.constant(1.0) @eager_def_function.function def fn(): r = while_loop_tf.while_loop( lambda i, _: i < 2, lambda i, x: (i + 1, x * math_ops.reduce_sum(a) * v), [0, 1.0])[1] return gradients_impl.gradients(r, [v])[0] self.assertEqual(self.evaluate(fn()), 32.) def testWhileGrad_ResourceVarInFunctionCall(self): @eager_def_function.function def foo(x, var): return x + math_ops.reduce_sum(var.sparse_read([1, 3])) @eager_def_function.function def bar(var): r = while_loop_tf.while_loop( lambda i, _: i < 2, lambda i, x: (i + 1, foo(x, var)), [0, 0.0])[1] return gradients_impl.gradients(r, var)[0] var = resource_variable_ops.ResourceVariable([1., 2., 3., 4.]) self.evaluate(variables.global_variables_initializer()) grad = self.evaluate(bar(var)) self.assertAllEqual(gradient_checker_v2._to_numpy(grad), [0., 2., 0., 2.]) def testWhileGrad_ResourceVarInNestedFunctionCall(self): @eager_def_function.function def foo(x, var): return x + math_ops.reduce_sum(var.sparse_read([1, 3])) @eager_def_function.function def foo2(x, var): return foo(x, var) @eager_def_function.function def bar(var): r = while_loop_tf.while_loop( lambda i, _: i < 2, lambda i, x: (i + 1, foo2(x, var)), [0, 0.0])[1] return gradients_impl.gradients(r, var)[0] var = resource_variable_ops.ResourceVariable([1., 1., 1., 1.]) self.evaluate(variables.global_variables_initializer()) grad = self.evaluate(bar(var)) self.assertAllEqual(gradient_checker_v2._to_numpy(grad), [0., 2., 0., 2.]) def testWhileGrad_ResourceVarInLoopInFunctionCall(self): if test.is_gpu_available(): self.skipTest("b/128635252") @eager_def_function.function def foo(x, var): return while_loop_tf.while_loop( lambda j, _: j < 3, lambda j, y: (j + 1, y + math_ops.reduce_sum(var.sparse_read([1, 2]))), [0, x])[1] @eager_def_function.function def bar(var): r = while_loop_tf.while_loop( lambda i, _: i < 2, lambda i, x: (i + 1, foo(x, var)), [0, 0.0])[1] return gradients_impl.gradients(r, var)[0] var = resource_variable_ops.ResourceVariable([1., 1., 1., 1.]) self.evaluate(variables.global_variables_initializer()) grad = self.evaluate(bar(var)) self.assertAllEqual(gradient_checker_v2._to_numpy(grad), [0., 6., 6., 0.]) def testWhileCondGrad_ResourceVarInFunctionCall(self): @eager_def_function.function def foo(x, var): return x + var.sparse_read([1])[0] def body(i, x): return (i + 1, tf_cond.cond( math_ops.equal(i % 2, 0), lambda: foo(x, var1), lambda: foo(x, var2))) @eager_def_function.function def bar(var1, var2): r = while_loop_tf.while_loop(lambda i, _: i < 4, body, [0, 0.0]) return gradients_impl.gradients(r, [var1, var2]) var1 = resource_variable_ops.ResourceVariable([1., 2., 3.]) var2 = resource_variable_ops.ResourceVariable([4., 5.]) self.evaluate(variables.global_variables_initializer()) grads = self.evaluate(bar(var1, var2)) self.assertAllEqual(gradient_checker_v2._to_numpy(grads[0]), [0., 2., 0.]) self.assertAllEqual(gradient_checker_v2._to_numpy(grads[1]), [0., 2.]) @test_util.run_deprecated_v1 def testWhileGrad_ResourceVarSparseRead(self): # NOTE(skyewm): this test is interesting because the gradient is the # aggregation result of IndexedSlices and Tensors. var = resource_variable_ops.ResourceVariable(np.ones(5), dtype=dtypes.float32) r = while_loop_tf.while_loop( lambda i, _: i < 3, lambda i, x: (i + 1, x * math_ops.reduce_sum(var.sparse_read([1, 3]))), [0, constant_op.constant(1.0)])[1] grad = gradients_impl.gradients(r, var)[0] self.evaluate(variables.global_variables_initializer()) grad_val = self.evaluate(grad) arr = gradient_checker_v2._to_numpy(grad_val) self.assertAllEqual(arr, [0., 12., 0., 12., 0.]) @test_util.run_deprecated_v1 def testWhileGrad_MultiResourceVarSparseRead(self): # NOTE(skyewm): this test is interesting because the gradient is the # aggregation result of IndexedSlices and Tensors. var1 = resource_variable_ops.ResourceVariable(np.ones(5), dtype=dtypes.float32) var2 = resource_variable_ops.ResourceVariable(np.ones(3), dtype=dtypes.float32) x1_init = constant_op.constant([0., 0.]) x2_init = constant_op.constant(1.) x3_init = constant_op.constant(1.) def body(i, unused_x1, x2, x3): y1 = var1.sparse_read([1, 3]) y2 = x2 * 2 y3 = x3 * math_ops.reduce_sum(var2.sparse_read([0])) return i + 1, y1, y2, y3 r = while_loop_tf.while_loop( lambda i, x1, x2, x3: i < 3, body, [0, x1_init, x2_init, x3_init])[1:] var1_grad, var2_grad = gradients_impl.gradients(r, [var1, var2]) self.evaluate(variables.global_variables_initializer()) var1_grad_val = self.evaluate(var1_grad) var2_grad_val = self.evaluate(var2_grad) self.assertAllEqual(gradient_checker_v2._to_numpy(var1_grad_val), [0., 1., 0., 1., 0.]) self.assertAllEqual(gradient_checker_v2._to_numpy(var2_grad_val), [3., 0., 0.]) def testWhileGrad_Gather(self): # NOTE(skyewm): this test is interesting because the gather gradient # function returns an IndexedSlices. @tf_function_in_tf2 def fn(): x = constant_op.constant([1., 1., 1., 1., 1.]) y = while_loop_tf.while_loop( lambda i, _: i < 3, lambda i, x: (i + 1, x + array_ops.gather(x, [0])), [0, x[:1]])[1] z = y * 3.0 grad = gradients_impl.gradients(z, x)[0] return y, grad y, grad = fn() self.assertEqual(self.evaluate(y), 8.) self.assertAllEqual(self.evaluate(grad), [24., 0., 0., 0., 0.]) def testWhileGrad_GatherNoFanOut(self): # NOTE(skyewm): this test is interesting because the gather gradient # function returns an IndexedSlices. @tf_function_in_tf2 def fn(): x = constant_op.constant([1., 1., 1., 1., 1.]) y = while_loop_tf.while_loop( lambda i, _: i < 3, lambda i, x: (i + 1, array_ops.gather(x, [0])), [0, x[:1]])[1] z = y * 3.0 grad = gradients_impl.gradients(z, x)[0] return y, grad y, grad = fn() self.assertEqual(self.evaluate(y), 1.) self.assertAllEqual(self.evaluate(grad), [3., 0., 0., 0., 0.]) @test_util.run_v1_only("b/120545219") def testWhileGradInCond(self): with self.cached_session(): n = ops.convert_to_tensor(1.0, name="n") x = array_ops.placeholder(dtypes.float32, shape=None) c = lambda n: math_ops.less(n, 10.0) b = lambda n: math_ops.add(n, x) def fn1(): r = while_loop_tf.while_loop(c, b, [n], [tensor_shape.unknown_shape()]) return gradients_impl.gradients(r, x)[0] r = tf_cond.cond(math_ops.less(1, 2), fn1, lambda: x) self.assertAllClose(9.0, r.eval(feed_dict={x: 1.0})) @test_util.disable_control_flow_v2("b/116340060") @test_util.run_v1_only("b/120545219") def testGradInWhileWrtInitialLoopVal(self): with self.cached_session(): x = array_ops.placeholder(dtypes.float32, shape=(), name="x") y = x + 1 def body(i, v): z = v * 2 return i + 1, gradients_impl.gradients(z, x)[0] with self.assertRaisesRegex( ValueError, "Cannot compute gradient inside while loop with respect to op 'x'. " "We do not support taking the gradient wrt or through the initial " "value of a loop variable. Gradients can be computed through " "loop invariants or wrt the input parameters to the loop body."): while_loop_tf.while_loop(lambda i, x: i < 3, body, [0, y]) @test_util.run_v1_only("b/120545219") def testWhileGradInWhile(self): with self.cached_session(): n = ops.convert_to_tensor(1.0, name="n") x = array_ops.placeholder(dtypes.float32, shape=None) c = lambda n: math_ops.less(n, 10.0) b = lambda n: math_ops.add(n, x) def b1(n): r = while_loop_tf.while_loop(c, b, [n], [tensor_shape.unknown_shape()]) return gradients_impl.gradients(r, x) r = while_loop_tf.while_loop( lambda n: n < 6.0, b1, [n], [tensor_shape.unknown_shape()]) self.assertAllClose(9.0, r.eval(feed_dict={x: 1.0})) @test_util.run_v1_only("b/120545219") def testCondGradInNestedWhiles(self): def outer_body(i, x): _, x = while_loop_tf.while_loop(lambda j, x: j < 3, inner_body, [0, 0.0]) return i + 1, x def inner_body(j, x): y = tf_cond.cond(math_ops.less(x, 1), lambda: 2 * x, lambda: x) return j + 1, gradients_impl.gradients(y, x)[0] i, x = while_loop_tf.while_loop(lambda i, x: i < 3, outer_body, [0, 0.0]) with self.cached_session() as sess: i_val, x_val = self.evaluate([i, x]) self.assertEqual(i_val, 3) self.assertAllClose(x_val, 1.0) @test_util.run_gpu_only def testGpuResourceAccess(self): with ops.device(test.gpu_device_name()): var = resource_variable_ops.ResourceVariable(constant_op.constant(3.0)) @eager_def_function.function def foo(): return while_loop_tf.while_loop( lambda i, _: i < 3, lambda i, x: (i + 1, tf_cond.cond( constant_op.constant(True), lambda: x + var, lambda: x)), [0, 0.0])[1] self.evaluate(variables.global_variables_initializer()) self.assertEqual(self.evaluate(foo()), 9.0) def testNestedResourceAccess(self): var = resource_variable_ops.ResourceVariable(constant_op.constant(3.0)) @eager_def_function.function def test_fn(): x = constant_op.constant(0.0) r = while_loop_tf.while_loop( # Outer loop condition lambda i, y: i < 2, # Outer loop body lambda i, y: ( i + 1, y + tf_cond.cond( constant_op.constant(True), # True branch lambda: while_loop_tf.while_loop( # Inner loop condition lambda j, z: j < 3, # Inner loop body lambda j, z: (j + 1, z + math_ops.square(var)), # Inner initial loop value [0, y])[1], # False branch lambda: (0.0))), # Outer initial loop value [0, x])[1] grad = gradients_impl.gradients(r, x)[0] return r, grad self.evaluate(variables.global_variables_initializer()) r, grad = self.evaluate(test_fn()) # 2 * 3 * 3^2 self.assertEqual(r, 81.0) # v1 control flow gets the wrong answer!!! # Gradient computation: # f(x) = x + 3^2 # inner_loop(x) = f(f(f(x))) = x + 3*3^2 = x + 27 # g(x) = x + inner_loop(x) = 2x + 27 # outer_loop(x) = g(g(x)) = 4x + 81 # outer_loop'(x) = 4 # Note that v1 control flow gets 4.0 as well if the cond is removed. if control_flow_util.ENABLE_CONTROL_FLOW_V2: self.assertEqual(grad, 4.0) def testWhile_NestedInput(self): with self.cached_session() as sess: named = collections.namedtuple("named", ("a", "b")) loop_vars = [ named(a=constant_op.constant(0.0), b=constant_op.constant(1.0)), (constant_op.constant(2.0), constant_op.constant(3.0)), constant_op.constant(4.0) ] c = lambda lv0, _1, _2: lv0.a < 100.0 def b(lv0, lv1, lv2): lv0 = named(a=lv0.a + 1, b=lv0.b) lv1 = (lv1[0] + 1, lv1[1]) lv2 += 2 return [lv0, lv1, lv2] r = while_loop_tf.while_loop(c, b, loop_vars) self.assertTrue(isinstance(r, list)) self.assertTrue(isinstance(r[0], named)) self.assertTrue(isinstance(r[1], tuple)) self.assertTrue(isinstance(r[2], tensor_lib.Tensor)) r_flattened = nest.flatten(r) self.assertEqual([100.0, 1.0, 102.0, 3.0, 4.0 + 100 * 2.0], self.evaluate(r_flattened)) @test_util.run_v1_only("b/120545219") def testWhile_NestedBadArityFails(self): with self.cached_session(): named = collections.namedtuple("named", ("a", "b")) loop_vars = [ named(a=constant_op.constant(0.0), b=constant_op.constant(1.0)), (constant_op.constant(2.0), constant_op.constant(3.0)), constant_op.constant(4.0) ] c = lambda lv0, _1, _2: lv0.a < 100.0 def b(lv0, lv1, _): return [lv0, lv1] with self.assertRaisesRegex(ValueError, "the same number of elements"): while_loop_tf.while_loop(c, b, loop_vars) @test_util.run_v1_only("b/120545219") def testWhileGrad_ys_xs(self): with self.cached_session(): x = constant_op.constant(3.0, name="x") y = constant_op.constant(2.0, name="y") c = lambda x, y: math_ops.less(x, 100.0) def b(x, y): y1 = math_ops.add(x, y) x1 = math_ops.multiply(x, y1) return x1, y1 rx, ry = while_loop_tf.while_loop(c, b, [x, y], parallel_iterations=1) r = gradients_impl.gradients([rx, ry], x) self.assertAllClose(304.0, r[0]) r = gradients_impl.gradients([rx, ry], y) self.assertAllClose(124.0, r[0]) r = gradients_impl.gradients([rx], x) self.assertAllClose(295.0, r[0]) r = gradients_impl.gradients([rx], y) self.assertAllClose(120.0, r[0]) @test_util.run_deprecated_v1 def testWhileGrad_Dependency(self): with self.cached_session(): i = constant_op.constant(0, name="i") x = constant_op.constant(2.0, name="x") c = lambda i, x: math_ops.less(i, 10) def b(i, x): x = math_ops.multiply(x, 2.0) i = math_ops.add(i, 1) return i, x ri, rx = while_loop_tf.while_loop(c, b, [i, x], parallel_iterations=1) r = gradients_impl.gradients([ri, rx], x) self.assertAllClose(1024.0, r[0]) r = gradients_impl.gradients([rx], x) self.assertAllClose(1024.0, r[0]) @test_util.run_v1_only("b/120545219") def testWhileGrad_NoGradient(self): with self.cached_session(): v = constant_op.constant(2.0, name="v") c = lambda v: math_ops.less(v, 100.0) b = math_ops.square r = while_loop_tf.while_loop(c, b, [v], back_prop=False) r = math_ops.add(r, v) r = gradients_impl.gradients(r, v) self.assertAllClose(1.0, r[0]) @test_util.disable_control_flow_v2("b/113324949 (RefVariable)") @test_util.run_v1_only("b/120545219") def testWhileGrad_NoDependency(self): with self.cached_session() as sess: variable = variables.Variable(array_ops.ones([2, 3])) duration = array_ops.zeros([], dtype=dtypes.int32) def cond(duration, tensor, _): del tensor return duration < 10 def body(duration, tensor, _): return (duration + 1, tensor, tensor) loop_vars = [duration, variable, variable] tensors = while_loop_tf.while_loop( cond=cond, body=body, loop_vars=loop_vars) cost = math_ops.reduce_sum(tensors[2]) grad = gradients_impl.gradients(cost, [variable]) self.evaluate(variables.global_variables_initializer()) self.assertAllClose(np.ones([2, 3]), sess.run(grad[0])) @test_util.run_deprecated_v1 def testWhileGrad_Const(self): with self.cached_session() as sess: c0 = constant_op.constant(0.0, name="c0") c1 = constant_op.constant(1.0, name="c1") duration = constant_op.constant(0, name="t") def cond(duration, _): return duration < 1 def body(duration, _): return duration + 1, c1 loop_vars = [duration, c0] tensors = while_loop_tf.while_loop( cond=cond, body=body, loop_vars=loop_vars) cost = math_ops.reduce_sum(tensors[1]) grad = gradients_impl.gradients(cost, [c0]) self.assertAllClose(0.0, sess.run(grad[0])) @test_util.run_v1_only("b/120545219") def testWhileGrad_SerialTwoLoops(self): with self.cached_session(): i = constant_op.constant(0, name="i") x = constant_op.constant(2.0, name="x") c = lambda i, x: math_ops.less(i, 5) def b(i, x): x = math_ops.multiply(x, 2.0) i = math_ops.add(i, 1) return i, x _, rx = while_loop_tf.while_loop(c, b, [i, x], parallel_iterations=1) _, rx = while_loop_tf.while_loop(c, b, [i, rx], parallel_iterations=1) r = gradients_impl.gradients([rx], x) self.assertAllClose(1024.0, r[0]) @test_util.run_v1_only("b/120545219") def testWhileGrad_ParallelTwoLoops(self): with self.cached_session(): i = constant_op.constant(0, name="i") x = constant_op.constant(2.0, name="x") c = lambda i, x: math_ops.less(i, 5) def b(i, x): x = math_ops.multiply(x, 2.0) i = math_ops.add(i, 1) return i, x _, r1 = while_loop_tf.while_loop(c, b, [i, x], parallel_iterations=1) _, r2 = while_loop_tf.while_loop(c, b, [i, x], parallel_iterations=1) rx = math_ops.add(r1, r2) r = gradients_impl.gradients([rx], x) self.assertAllClose(64.0, r[0]) @test_util.run_v1_only("b/120545219") def testWhileGrad_OneOutputWithControlDependencyOnSecond(self): with self.cached_session(): i = constant_op.constant(0, name="i") x = constant_op.constant(1.0, name="x") y = constant_op.constant(1.0, name="y") c = lambda i, *_: math_ops.less(i, 1, name="cond_less") def b(i, xi, yi): # return (i + 1, xi, xi + yi) return (math_ops.add(i, 1, name="inc"), array_ops.identity( xi, name="xi"), math_ops.add(xi, yi, name="xi_plus_yi")) _, x_f, y_f = while_loop_tf.while_loop(c, b, [i, x, y]) with ops.control_dependencies([x_f]): y_f_d = array_ops.identity(y_f, name="y_f_d") self.assertAllClose(2.0, self.evaluate(y_f_d)) # y_f_d = 1.0 + 1.0 g = gradients_impl.gradients([y_f_d], [x])[0] self.assertTrue(g is not None) self.assertAllClose(1.0, self.evaluate(g)) # y_f_d = x + 1.0, dy_f_d/dx = 1.0 def _testNestedWhileGrad_Simple(self, use_gpu): with self.cached_session(use_gpu=use_gpu): v = constant_op.constant(1.0) def inner_loop(s): c = lambda x: math_ops.less(x, 4.0) b = lambda x: math_ops.multiply(x, 2.0) return while_loop_tf.while_loop(c, b, [s]) c = lambda x: math_ops.less(x, 2.0) b = lambda x: math_ops.multiply(inner_loop(x), 2.0) r = while_loop_tf.while_loop(c, b, [v]) r = gradients_impl.gradients(r, v)[0] self.assertAllClose(8.0, self.evaluate(r)) @test_util.run_deprecated_v1 def testNestedWhileGrad_Simple(self): self._testNestedWhileGrad_Simple(use_gpu=False) self._testNestedWhileGrad_Simple(use_gpu=True) @test_util.run_v1_only("b/120545219") def testNestedWhileGrad_SerialInner(self): with self.cached_session(): v = constant_op.constant(1.0) def inner_loop1(s): z = constant_op.constant(0) c = lambda i, x: math_ops.less(i, 4) b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)] return while_loop_tf.while_loop(c, b, [z, s]) def inner_loop2(s): z = constant_op.constant(0) c = lambda i, x: math_ops.less(i, 4) b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)] return while_loop_tf.while_loop(c, b, [z, s]) c = lambda x: math_ops.less(x, 128.0) b = lambda x: inner_loop2(inner_loop1(x)[1])[1] r = while_loop_tf.while_loop(c, b, [v]) r = gradients_impl.gradients(r, v)[0] self.assertAllClose(256.0, self.evaluate(r)) @test_util.run_deprecated_v1 def testNestedWhileGrad_ParallelInner(self): with self.cached_session(): v = constant_op.constant(1.0) def inner_loop1(s): z = constant_op.constant(0) c = lambda i, x: math_ops.less(i, 4) b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)] return while_loop_tf.while_loop(c, b, [z, s]) def inner_loop2(s): z = constant_op.constant(0) c = lambda i, x: math_ops.less(i, 4) b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)] return while_loop_tf.while_loop(c, b, [z, s]) c = lambda x: math_ops.less(x, 128.0) b = lambda x: math_ops.multiply(inner_loop1(x)[1], inner_loop2(x)[1]) r = while_loop_tf.while_loop(c, b, [v]) r = gradients_impl.gradients(r, v)[0] self.assertAllClose(512.0, self.evaluate(r)) @test_util.run_v1_only("b/120545219") def testNestedWhileGrad_ParallelIterations(self): # Make sure the stack pushes and pops of an inner loop are executed in # the sequential order of the iterations of its outer loop. with self.cached_session() as sess: def inner_loop(t): fn = lambda n: n + math_ops.square(var) return map_fn.map_fn(fn=fn, elems=t, parallel_iterations=10) def outer_loop(inp): return map_fn.map_fn( fn=inner_loop, elems=inp, parallel_iterations=10) var = variables.Variable(constant_op.constant(3.0)) inp = constant_op.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]) res = outer_loop(inp) optimizer = adam.AdamOptimizer(learning_rate=0.001) train_op = optimizer.minimize(math_ops.reduce_mean(math_ops.square(res))) self.evaluate(variables.global_variables_initializer()) self.evaluate(train_op) self.assertAllClose(2.999, var.read_value()) def _testWhileCondGrad_Simple(self, use_gpu): with self.cached_session(use_gpu=use_gpu): v = ops.convert_to_tensor(2.0, name="v") n = ops.convert_to_tensor(100.0, name="n") one = ops.convert_to_tensor(1.0, name="one") c = lambda x: math_ops.less(x, n) # pylint: disable=undefined-variable # for OSS build b = lambda x: tf_cond.cond(constant_op.constant(True), lambda: math_ops.square(x), lambda: math_ops.subtract(x, one)) # pylint: enable=undefined-variable r = while_loop_tf.while_loop(c, b, [v]) r = gradients_impl.gradients(r, v)[0] self.assertAllClose(1024.0, self.evaluate(r)) @test_util.run_deprecated_v1 def testWhileCondGrad_Simple(self): self._testWhileCondGrad_Simple(use_gpu=False) self._testWhileCondGrad_Simple(use_gpu=True) @test_util.run_deprecated_v1 def testWhileCondGrad_UnknownShape(self): with self.cached_session() as sess: v = array_ops.placeholder(dtypes.float32) n = ops.convert_to_tensor(100.0, name="n") one = ops.convert_to_tensor(1.0, name="one") c = lambda x: math_ops.less(x, n) # pylint: disable=undefined-variable # for OSS build b = lambda x: tf_cond.cond(constant_op.constant(True), lambda: math_ops.square(x), lambda: math_ops.subtract(x, one)) # pylint: enable=undefined-variable r = while_loop_tf.while_loop(c, b, [v]) r = gradients_impl.gradients(r, v)[0] r = sess.run(r, feed_dict={v: 2.0}) self.assertAllClose(1024.0, r) @test_util.run_deprecated_v1 def testWhileGrad_Concat(self): with self.cached_session() as sess: x = variable_scope.get_variable("x", initializer=[[1., 2.]]) i0 = constant_op.constant(0) h0 = array_ops.zeros([0, 2]) def condition(i, _): return i < 2 def body(i, h): return i + 1, array_ops.concat([h, x], 0) _, h = while_loop_tf.while_loop( condition, body, [i0, h0], [i0.get_shape(), tensor_shape.TensorShape([None, 2])]) s = math_ops.reduce_sum(h) optimizer = gradient_descent.GradientDescentOptimizer(0.01) op = optimizer.minimize(s) self.evaluate(variables.global_variables_initializer()) self.evaluate(op) self.assertAllClose([[0.98000002, 1.98000002]], self.evaluate(x)) @test_util.disable_control_flow_v2("b/113324949 (RefVariable)") @test_util.run_v1_only("b/120545219") def testWhileWithRefsWithGradients_1(self): with self.cached_session() as sess: x = variable_v1.VariableV1(0.)._ref() # pylint: disable=protected-access i = constant_op.constant(0) c = lambda i, x: math_ops.less(i, 10) self.assertEqual(x.dtype, dtypes.float32_ref) def body(i, x): self.assertEqual(x.dtype, dtypes.float32_ref) return [i + 1, gen_array_ops.ref_identity(x)] r = while_loop_tf.while_loop(c, body, [i, x], parallel_iterations=5) grad_ys = [variable_v1.VariableV1(73)._ref()] # pylint: disable=protected-access grad = gradients_impl.gradients([r[1]], [x], grad_ys=grad_ys) self.evaluate(variables.global_variables_initializer()) self.assertEqual(r[0].dtype, dtypes.int32) self.assertEqual(r[1].dtype, dtypes.float32_ref) value_i, value_x, value_x_grad = sess.run(r + grad) self.assertEqual(10, value_i) self.assertEqual(0, value_x) self.assertEqual(73, value_x_grad) @test_util.deprecated_graph_mode_only def testWhileGrad_IndexedSlices(self): with self.cached_session(): values = constant_op.constant([2.0, 4.0], name="values") indices = constant_op.constant([0, 3], name="indices") shape = constant_op.constant([10], name="dense_shape") i = constant_op.constant(0) x = indexed_slices.IndexedSlices(values, indices, dense_shape=shape) def c(i, _): return i < 10 def b(i, x): return [ i + 1, indexed_slices.IndexedSlices(x.values * 2.0, x.indices, x.dense_shape) ] _, r = while_loop_tf.while_loop(c, b, [i, x]) r = gradients_impl.gradients(r.values, values)[0] self.assertAllClose(np.array([1024.0, 1024.0]), self.evaluate(r)) @test_util.deprecated_graph_mode_only def testWhileGrad_SparseTensor(self): with self.cached_session(): values = constant_op.constant([2.0, 4.0], name="values") indices = constant_op.constant( [[0], [3]], dtype=dtypes.int64, name="indices") shape = constant_op.constant([10], dtype=dtypes.int64, name="dense_shape") i = constant_op.constant(0) x = sparse_tensor.SparseTensor(indices, values, dense_shape=shape) def c(i, _): return i < 10 def b(i, x): return [ i + 1, sparse_tensor.SparseTensor(x.indices, x.values * 2.0, x.dense_shape) ] _, r = while_loop_tf.while_loop(c, b, [i, x]) r = gradients_impl.gradients(r.values, values)[0] self.assertAllClose(np.array([1024.0, 1024.0]), self.evaluate(r)) @test_util.deprecated_graph_mode_only def testCallGradInLoop(self): with self.cached_session() as sess: i0 = constant_op.constant(0) params = constant_op.constant(5.0) params_1 = math_ops.square(params) def c(i, _): return i < 10 def b(i, x): data = constant_op.constant([1.0, 2.0, 3.0]) data = math_ops.multiply(data, params_1) x1 = x + gradients_impl.gradients(data, params)[0] return i + 1, x1 output_grad = while_loop_tf.while_loop( c, b, [i0, constant_op.constant(0.0)]) self.assertAllClose(600.0, self.evaluate(output_grad)[1]) @test_util.run_deprecated_v1 def testWhileAndTensorArray(self): with self.cached_session() as sess: param = constant_op.constant(2.0) n0 = constant_op.constant(0) y0 = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="elems") def c(i, _): return i < 10 def b(i, y): return [ i + 1, map_fn.map_fn(lambda x: math_ops.multiply(x, param), y) ] r = while_loop_tf.while_loop(c, b, [n0, y0], parallel_iterations=1) r = gradients_impl.gradients(r, param)[0] self.assertAllClose(107520.0, self.evaluate(r)) @test_util.run_deprecated_v1 def testNestedWhileAndTensorArray(self): n = constant_op.constant(3.0) def Body(row, ta): def InnerBody(row, col, ta): # Note: row and col are 1-based. ta = ta.write( math_ops.cast(n * (row - 1.) + col - 1., dtypes.int32), row * col) return row, col + 1., ta ta = while_loop_tf.while_loop( lambda _, col, _1: col <= n, InnerBody, [row, constant_op.constant(1.), ta], return_same_structure=False)[2] return row + 1., ta ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=9) ta = while_loop_tf.while_loop( lambda row, _: row <= n, Body, [constant_op.constant(1.), ta], return_same_structure=False)[1] output = array_ops.reshape(ta.stack(), [3, 3]) self.assertAllEqual( self.evaluate(output), [[1., 2., 3.], [2., 4., 6.], [3., 6., 9.]]) # TODO(b/117675481): This does not work with current TA. Enable with new TA. # grad = gradients_impl.gradients(output, [n]) # self.assertEqual(self.evaluate(grad), 3.5) @test_util.run_deprecated_v1 def testWhileGrad_StopGrad(self): with self.cached_session(): x = constant_op.constant(3.0, name="x") y = constant_op.constant(2.0, name="y") c = lambda x, y: math_ops.less(x, 100.0) def b(x, y): y1 = math_ops.square(y) x1 = math_ops.add(math_ops.square(x), y1) return x1, y1 rx, ry = while_loop_tf.while_loop(c, b, [x, y]) r = gradients_impl.gradients(rx, y)[0] self.assertEqual(136.0, self.evaluate(r)) r = gradients_impl.gradients(ry, y)[0] self.assertEqual(32.0, self.evaluate(r)) r = gradients_impl.gradients(array_ops.stop_gradient(rx), y)[0] self.assertEqual(r, None) r = gradients_impl.gradients(array_ops.stop_gradient(ry), y)[0] self.assertEqual(r, None) r = gradients_impl.gradients( array_ops.stop_gradient(math_ops.square(rx)), y)[0] self.assertEqual(r, None) r = gradients_impl.gradients( array_ops.stop_gradient(math_ops.add(rx, ry)), x)[0] self.assertEqual(r, None) r = gradients_impl.gradients( array_ops.stop_gradient(math_ops.add(rx, ry)), y)[0] self.assertEqual(r, None) r = gradients_impl.gradients(math_ops.add(rx, ry), y)[0] self.assertEqual(168.0, self.evaluate(r)) r = gradients_impl.gradients( math_ops.add(rx, array_ops.stop_gradient(ry)), y)[0] self.assertEqual(136.0, self.evaluate(r)) r = gradients_impl.gradients( math_ops.add(array_ops.stop_gradient(rx), ry), y)[0] self.assertEqual(32.0, self.evaluate(r)) @test_util.run_deprecated_v1 def testWhileGrad_StopGradInside(self): with self.cached_session(): x = constant_op.constant(3.0, name="x") y = constant_op.constant(2.0, name="y") c = lambda x, y: math_ops.less(x, 100.0) def b(x, y): y1 = array_ops.stop_gradient(math_ops.square(y)) x1 = math_ops.add(math_ops.square(x), y1) return x1, y1 rx, _ = while_loop_tf.while_loop(c, b, [x, y]) r = gradients_impl.gradients(rx, y)[0] self.assertAllClose(0.0, self.evaluate(r)) r = gradients_impl.gradients(rx, x)[0] self.assertAllClose(156.0, self.evaluate(r)) @test_util.run_deprecated_v1 def testWhileGrad_StopGradInsideNoShape(self): with self.cached_session() as sess: x = array_ops.placeholder(dtypes.float32) y = array_ops.placeholder(dtypes.float32) c = lambda x, y: math_ops.less(math_ops.reduce_sum(x), 100.0) def b(x, y): y1 = array_ops.stop_gradient(math_ops.square(y, name="stopped")) x1 = math_ops.add(math_ops.square(x), y1) return x1, y1 rx, _ = while_loop_tf.while_loop(c, b, [x, y]) grad_y = gradients_impl.gradients(rx, y)[0] grad_x = gradients_impl.gradients(rx, x)[0] feed_dict = {x: [3.0, 4.0], y: [2.0, 3.0]} self.assertAllClose([0.0, 0.0], sess.run(grad_y, feed_dict=feed_dict)) self.assertAllClose([156.0, 400.0], sess.run(grad_x, feed_dict=feed_dict)) name = "gradients/while/stopped_grad" all_ops = x.graph.get_operations() self.assertFalse(any(name in op.name for op in all_ops)) @test_util.run_deprecated_v1 def testWhileGradGradFail(self): theta = variables.Variable(initial_value=1.) def fn(prev, x): return prev + x * theta result = functional_ops.scan(fn, np.array([1., 2., 3.], dtype=np.float32)) grad_theta = gradients_impl.gradients(result, theta) if not control_flow_util.ENABLE_CONTROL_FLOW_V2: with self.assertRaisesRegex(TypeError, "Second-order gradient"): gradients_impl.gradients(grad_theta, theta) grad_theta_stopped = array_ops.stop_gradient(grad_theta) gradients_impl.gradients(grad_theta_stopped, theta) @test_util.run_deprecated_v1 def testStopGradOnWhileGrad(self): with self.cached_session(): x = constant_op.constant(2.0, name="x") y = constant_op.constant(2.0, name="y") c = lambda x: math_ops.less(x, 100.0) b = lambda x: math_ops.multiply(x, y) rx = while_loop_tf.while_loop(c, b, [x]) rg = gradients_impl.gradients(rx, y)[0] rg = array_ops.stop_gradient(rg) r = math_ops.add(math_ops.square(y), rx) r = math_ops.add(r, rg) r = gradients_impl.gradients(r, y)[0] self.assertEqual(388.0, self.evaluate(r)) @test_util.disable_control_flow_v2("b/113324949 (RefVariable)") @test_util.run_deprecated_v1 def testWhileGradientWithNontrainablePath1(self): q = variables.Variable([7., 8.]) def cond(_, y): del y return False def body(x, _): return x, math_ops.cast(x, dtypes.float32) + math_ops.reduce_sum(q) _, y = while_loop_tf.while_loop(cond, body, (math_ops.argmin(q), 0.)) dy_dq, = gradients_impl.gradients(y, q) self.assertIsNotNone(dy_dq) with self.cached_session() as sess: self.evaluate(q.initializer) self.assertAllClose([0., 0.], self.evaluate(dy_dq)) @test_util.disable_control_flow_v2("b/113324949 (RefVariable)") @test_util.run_v1_only("b/120545219") def testWhileGradientWithNontrainablePath2(self): q = variables.Variable([7., 8.]) def cond(_, y): return math_ops.equal(y, 0.) def body(x, _): zero = constant_op.constant(0, dtype=dtypes.int64) return zero, math_ops.cast(x, dtypes.float32) + math_ops.reduce_sum(q) _, y = while_loop_tf.while_loop(cond, body, (math_ops.argmin(q), 0.)) dy_dq, = gradients_impl.gradients(y, q) self.assertIsNotNone(dy_dq) with self.cached_session() as sess: self.evaluate(q.initializer) self.assertAllClose([1., 1.], self.evaluate(dy_dq)) @test_util.run_v1_only("b/120545219") def testIssue16504(self): c = constant_op.constant(np.arange(100), dtype=dtypes.float32) w = variables.Variable( initial_value=np.ones(100), dtype=dtypes.float32) / 100 k = variables.Variable(0, dtype=dtypes.int32) chg_w = constant_op.constant(np.inf, dtype=dtypes.float32) def cond(k, _, chg_w): return math_ops.logical_and(k < 10, chg_w > 1e-3) def body(k, w, chg_w): grad, = gradients_impl.gradients(-math_ops.reduce_sum(w * c), w) w_n = w * math_ops.exp(-0.1 * grad) w_n /= math_ops.reduce_sum(w_n) chg_w = ( math_ops.reduce_sum(math_ops.abs(w_n - w)) / math_ops.reduce_sum( math_ops.abs(w))) return k + 1, w_n, chg_w _, w, _ = while_loop_tf.while_loop(cond, body, [k, w, chg_w]) grad, = gradients_impl.gradients(w, c) self.assertIsNotNone(grad) @test_util.run_v1_only("b/120545219") def testStopGradMultiFlows(self): with self.cached_session(): def body(i, y, r): x = variable_scope.get_variable( "x", shape=(), dtype=dtypes.float32, initializer=init_ops.ones_initializer()) y *= x return [i + 1, y, r + math_ops.reduce_sum(y)] i0 = constant_op.constant(0) y0 = array_ops.ones(5) r0 = constant_op.constant(0.0) cond = lambda i, y, r: i < 1 _, _, r = while_loop_tf.while_loop( cond, body, [i0, y0, r0], back_prop=True) vars_ = variables.global_variables() grads = linalg_ops.norm(gradients_impl.gradients(r, vars_)[0]) z = math_ops.add(r, array_ops.stop_gradient(math_ops.reduce_sum(grads))) result = gradients_impl.gradients(z, vars_)[0] self.evaluate(variables.global_variables_initializer()) self.assertEqual(5.0, self.evaluate(result)) @test_util.run_v1_only("b/120545219") def testOneValueCond(self): with self.cached_session(): c = array_ops.placeholder(dtypes.int32, shape=[]) one = ops.convert_to_tensor(1, name="one") two = ops.convert_to_tensor(2, name="two") p = math_ops.greater_equal(c, 1) i = tf_cond.cond(p, lambda: one, lambda: two) self.assertTrue(isinstance(i, tensor_lib.Tensor)) # True case: c = 2 is >= 1 self.assertEqual([1], i.eval(feed_dict={c: 2})) # False case: c = 0 is not >= 1 self.assertEqual([2], i.eval(feed_dict={c: 0})) @test_util.run_deprecated_v1 def testExampleCond(self): with self.cached_session(): x = ops.convert_to_tensor([-2.0, 2.0], name="x") d = array_ops.placeholder(dtypes.int32, shape=[]) def l2(): return math_ops.sqrt(math_ops.reduce_sum(math_ops.square(x))) def l1(): return math_ops.reduce_sum(math_ops.abs(x)) i = tf_cond.cond(math_ops.equal(d, 2), l2, l1) self.assertAllClose(4.0, i.eval(feed_dict={d: 1})) self.assertAllClose(2.0 * math.sqrt(2), i.eval(feed_dict={d: 2})) @test_util.run_v1_only("b/120545219") def testCase(self): with self.cached_session(): x = constant_op.constant(1) y = constant_op.constant(2) z = constant_op.constant(3) f1 = lambda: constant_op.constant(17) f2 = lambda: constant_op.constant(23) f3 = lambda: constant_op.constant(-1) r1 = control_flow_case.case({ x < y: f1, x > z: f2 }, default=f3, exclusive=True) self.assertAllEqual(r1, 17) r2 = control_flow_case.case([(y > z, f1), (y > x, f2)], default=f3) self.assertAllEqual(r2, 23) # Duplicate events can happen, first one is selected r3 = control_flow_case.case([(x < y, f1), (x < y, f2)], default=f3) self.assertAllEqual(r3, 17) # Duplicate events cause an error if exclusive = True r4 = control_flow_case.case([(x < y, f1), (x < y, f2)], default=f3, exclusive=True) with self.assertRaisesOpError("Input error:"): self.evaluate(r4) # Check that the default is called if none of the others are r5 = control_flow_case.case({x > y: f1}, default=f3) self.assertAllEqual(r5, -1) ran_once = [False, False, False] def break_run_twice(ix): def _break(): ran_once[ix] = True return constant_op.constant(ix) return _break # Should not fail - each conditional gets called exactly once # except default. Default gets called twice: once to create an # empty output and once for the actual cond switch. r6 = control_flow_case.case([(x < y, break_run_twice(0)), (x > y, break_run_twice(1))], default=lambda: constant_op.constant(2)) self.assertAllEqual(r6, 0) @test_util.run_v1_only("b/120545219") def testCaseSideEffects(self): with self.cached_session() as sess: v0 = variables.Variable(-1) v1 = variables.Variable(-1) v2 = variables.Variable(-1) a = lambda: control_flow_ops.with_dependencies([state_ops.assign(v0, 0)], 0) b = lambda: control_flow_ops.with_dependencies([state_ops.assign(v1, 1)], 1) c = lambda: control_flow_ops.with_dependencies([state_ops.assign(v2, 2)], 2) x = constant_op.constant(1) y = constant_op.constant(2) r0 = control_flow_case.case(((x < y, a), (x > y, b)), default=c, exclusive=True) r1 = control_flow_case.case(((x > y, a), (x < y, b)), default=c, exclusive=True) r2 = control_flow_case.case(((x > y, a), (x > y, b)), default=c, exclusive=True) self.evaluate(variables.global_variables_initializer()) self.assertAllEqual(self.evaluate([v0, v1, v2]), [-1] * 3) self.assertEqual(2, self.evaluate(r2)) self.assertAllEqual(self.evaluate([v0, v1, v2]), [-1, -1, 2]) self.evaluate(variables.global_variables_initializer()) self.assertAllEqual(self.evaluate([v0, v1, v2]), [-1] * 3) self.assertEqual(1, self.evaluate(r1)) self.assertAllEqual(self.evaluate([v0, v1, v2]), [-1, 1, -1]) self.evaluate(variables.global_variables_initializer()) self.assertAllEqual(self.evaluate([v0, v1, v2]), [-1] * 3) self.assertEqual(0, self.evaluate(r0)) self.assertAllEqual(self.evaluate([v0, v1, v2]), [0, -1, -1]) @test_util.disable_control_flow_v2("b/113324949 (ref vars)") @test_util.run_v1_only("b/120545219") def testOneOpCond(self): with self.cached_session(): v = variables.Variable(0) c = ops.convert_to_tensor(0) one = ops.convert_to_tensor(1) two = ops.convert_to_tensor(2) p = math_ops.greater_equal(c, 1) def a(): return state_ops.assign(v, one) def b(): return state_ops.assign(v, two) i = tf_cond.cond(p, a, b) self.assertTrue(isinstance(i, tensor_lib.Tensor)) self.evaluate(variables.global_variables_initializer()) self.assertEqual(0, self.evaluate(v)) # True case: c = 2 is >= 1, v is set to 1. self.assertEqual(1, i.eval(feed_dict={c.name: 2})) self.assertEqual(1, self.evaluate(v)) # False case: c = 0 is not >= 1, v is set to 2. self.assertEqual(2, i.eval(feed_dict={c.name: 0})) self.assertEqual(2, self.evaluate(v)) @test_util.run_v1_only("b/120545219") def testWithOpsDependencies(self): with self.cached_session() as sess: v = variable_v1.VariableV1(0.0) c = constant_op.constant(10) # Fetching v directly will result in an uninitialized error with self.assertRaisesOpError("Attempting to use uninitialized value"): self.evaluate([c, v]) # Use a control dependency to ensure init_variable is run # while asking for c real_v = control_flow_ops.with_dependencies( name="real_tensor", output_tensor=v._ref(), # pylint: disable=protected-access dependencies=[v.initializer]) c_val, real_v_val = self.evaluate([c, real_v]) # Ensure the result of 'real_c' is the same as 'c' self.assertAllEqual(10, c_val) # Ensure that 'v' is initialized self.assertAllClose(0.0, real_v_val) @test_util.run_v1_only("b/120545219") def testWithTensorDependencies(self): with self.cached_session(): v = variable_v1.VariableV1(0.0) c1 = constant_op.constant(10) c2 = constant_op.constant(20) # c1_with_init_v depends on the init op for v c1_with_init_v = control_flow_ops.with_dependencies( name="c1_with_init_v", output_tensor=c1, dependencies=[v.initializer]) # c2_with_c1 depends on the value of c1_with_init_v c2_with_c1_dep = control_flow_ops.with_dependencies( name="c2_with_c1_dep", output_tensor=c2, dependencies=[c1_with_init_v]) # Fetching v directly will result in an uninitialized error with self.assertRaisesOpError("Attempting to use uninitialized value"): self.evaluate(v) # Get the value of 'c2_with_c1_dep', which should cause 'v' # to be initialized. self.assertAllEqual(20, self.evaluate(c2_with_c1_dep)) # Ensure that 'v' is initialized self.assertAllClose(0.0, self.evaluate(v)) @test_util.run_v1_only("b/120545219") def testWithIndexedSlicesDependencies(self): with self.cached_session(): v = variable_v1.VariableV1( np.array([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]]).astype(np.float32)) v_at_1 = indexed_slices.IndexedSlices(v, constant_op.constant([1])) gather_v_at_1 = array_ops.gather(v_at_1.values, v_at_1.indices) v_at_1_after_init = control_flow_ops.with_dependencies([v.initializer], v_at_1) gather_v_at_1_after_init = array_ops.gather(v_at_1_after_init.values, v_at_1_after_init.indices) # Fetching gather_v_at_1 will result in an uninitialized error with self.assertRaisesOpError("Attempting to use uninitialized value"): self.evaluate(gather_v_at_1) # Getting gather_v_at_1_after_init will work, and initialize v. self.assertAllEqual([[10.0, 11.0]], self.evaluate(gather_v_at_1_after_init)) # Double check that 'v' is initialized self.assertAllClose([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]], self.evaluate(v)) def testDependenciesDevice(self): with ops.Graph().as_default(): # device set on tensor => same device on dep. with ops.device("/job:ps"): vd = variable_v1.VariableV1([0.0]) with_vd_dep = control_flow_ops.with_dependencies([vd.initializer], vd) self.assertTrue("/job:ps" in with_vd_dep.device) # No device set on tensor => no device on dep. vnod = variable_v1.VariableV1([0.0]) with_vnod_dep = control_flow_ops.with_dependencies([vnod.initializer], vnod) self.assertDeviceEqual(None, with_vnod_dep.device) # device set on tensor, default device on graph => default device on dep. vdef = variable_v1.VariableV1([0.0], name="vdef") with ops.device("/job:worker/device:GPU:1"): with_vdef_dep = control_flow_ops.with_dependencies([vdef.initializer], vdef) # The device is empty, but the colocation constraint is set. self.assertDeviceEqual("", with_vdef_dep.device) self.assertEqual([b"loc:@vdef"], with_vdef_dep.op.colocation_groups()) @test_util.run_v1_only("b/120545219") def testGroup(self): with self.cached_session() as sess: v1 = variable_v1.VariableV1([0.0]) v2 = variable_v1.VariableV1([1.0]) # Group init1 and init2 and run. init = control_flow_ops.group(v1.initializer, v2.initializer) # Fetching v1 directly will result in an uninitialized error with self.assertRaisesOpError("Attempting to use uninitialized value"): self.evaluate(v1) # Runs "init" before fetching v1 and v2. init.run() v1_val, v2_val = self.evaluate([v1, v2]) # Ensure that v1 and v2 are initialized self.assertAllClose([0.0], v1_val) self.assertAllClose([1.0], v2_val) @test_util.run_v1_only("b/120545219") def testGroupEmpty(self): op = control_flow_ops.group() self.assertEqual(op.type, "NoOp") self.assertEqual(op.control_inputs, []) @test_util.run_deprecated_v1 def testMergeShapes(self): # All inputs unknown. p1 = array_ops.placeholder(dtypes.float32) p2 = array_ops.placeholder(dtypes.float32) p3 = array_ops.placeholder(dtypes.float32) m, index = control_flow_ops.merge([p1, p2, p3]) self.assertIs(None, m.get_shape().ndims) self.assertEqual([], index.get_shape()) # All inputs known with different ranks. p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2]) p2 = array_ops.placeholder(dtypes.float32, shape=[1, 2, 3]) m, index = control_flow_ops.merge([p1, p2]) self.assertIs(None, m.get_shape().ndims) self.assertEqual([], index.get_shape()) # All inputs known with some dimensions different. p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2]) p2 = array_ops.placeholder(dtypes.float32, shape=[2, 1]) m, index = control_flow_ops.merge([p1, p2]) self.assertEqual([None, None], m.get_shape().as_list()) self.assertEqual([], index.get_shape()) p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2]) p2 = array_ops.placeholder(dtypes.float32, shape=[None, 2]) m, index = control_flow_ops.merge([p1, p2]) self.assertEqual([None, 2], m.get_shape().as_list()) self.assertEqual([], index.get_shape()) p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2]) p2 = array_ops.placeholder(dtypes.float32, shape=[2, 2]) m, index = control_flow_ops.merge([p1, p2]) self.assertEqual([None, 2], m.get_shape().as_list()) self.assertEqual([], index.get_shape()) # All inputs known with same dimensions. p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2]) p2 = array_ops.placeholder(dtypes.float32, shape=[1, 2]) m, index = control_flow_ops.merge([p1, p2]) self.assertEqual([1, 2], m.get_shape().as_list()) self.assertEqual([], index.get_shape()) p1 = array_ops.placeholder(dtypes.float32, shape=[None, 2]) p2 = array_ops.placeholder(dtypes.float32, shape=[None, 2]) m, index = control_flow_ops.merge([p1, p2]) self.assertEqual([None, 2], m.get_shape().as_list()) self.assertEqual([], index.get_shape()) p1 = array_ops.placeholder(dtypes.float32, shape=[None, None]) p2 = array_ops.placeholder(dtypes.float32, shape=[None, None]) m, index = control_flow_ops.merge([p1, p2]) self.assertEqual([None, None], m.get_shape().as_list()) self.assertEqual([], index.get_shape()) @test_util.run_v1_only("b/120545219") def testRefSelect(self): index = array_ops.placeholder(dtypes.int32) # All inputs unknown. p1 = array_ops.placeholder(dtypes.float32) p2 = array_ops.placeholder(dtypes.float32) p3 = array_ops.placeholder(dtypes.float32) v1 = variable_v1.VariableV1(p1, validate_shape=False) v2 = variable_v1.VariableV1(p2, validate_shape=False) v3 = variable_v1.VariableV1(p3, validate_shape=False) self.assertIs(None, v1.get_shape().ndims) s = control_flow_ops.ref_select(index, [v1, v2, v3]) self.assertIs(None, s.get_shape().ndims) # All inputs known but different. v1 = variable_v1.VariableV1([[1, 2]]) v2 = variable_v1.VariableV1([[2], [1]]) s = control_flow_ops.ref_select(index, [v1, v2]) self.assertIs(None, s.get_shape().ndims) # All inputs known and same. v1 = variable_v1.VariableV1([[1, 2]]) v2 = variable_v1.VariableV1([[1, 2]]) s = control_flow_ops.ref_select(index, [v1, v2]) self.assertEqual([1, 2], s.get_shape()) # Possibly the same but not guaranteed. v1 = variable_v1.VariableV1([[1., 2.]]) p2 = array_ops.placeholder(dtypes.float32, shape=[None, 2]) v2 = variable_v1.VariableV1(p2, validate_shape=False) s = control_flow_ops.ref_select(index, [v1, v2]) self.assertEqual(None, s.get_shape()) @test_util.run_deprecated_v1 def testRunLoopTensor(self): with self.cached_session() as sess: tensor_list = [] def condition(t): return t < constant_op.constant(5) def body(_): tensor_list.append(constant_op.constant(5)) return constant_op.constant(10) result = while_loop_tf.while_loop( condition, body, [constant_op.constant(4)]) self.assertEqual(10, self.evaluate(result)) # Ensure that we cannot run a tensor that escapes the loop body # accidentally. with self.assertRaises(ValueError): sess.run(tensor_list[0]) @test_util.run_v1_only("b/120545219") def testWhilePyFuncBasic(self): def func(x): return np.square(x) with self.cached_session(): r = while_loop_tf.while_loop( lambda i, v: i < 4, lambda i, v: [i + 1, script_ops.py_func(func, [v], [dtypes.float32])[0]], [constant_op.constant(0), constant_op.constant(2.0, dtypes.float32)], [tensor_shape.unknown_shape(), tensor_shape.unknown_shape()]) self.assertEqual(self.evaluate(r[1]), 65536.0) @test_util.run_v1_only("b/120545219") def testWhileFuncBasic(self): @function.Defun(dtypes.float32) def func(x): return math_ops.square(math_ops.square(x)) with self.cached_session(): x = constant_op.constant(2.0, dtypes.float32) r = while_loop_tf.while_loop( lambda i, v: i < 2, lambda i, v: [i + 1, func(v)], [constant_op.constant(0), x], [tensor_shape.unknown_shape(), tensor_shape.unknown_shape()]) grad = gradients_impl.gradients(r, x)[0] self.assertEqual(self.evaluate(r[1]), 65536.0) self.assertEqual(self.evaluate(grad), 524288.0) # while_v2 does not have stacks. if not control_flow_util.ENABLE_CONTROL_FLOW_V2: self.assertEqual( len([op for op in x.graph.get_operations() if op.type == "StackV2" ]), 1) @test_util.run_v1_only("b/120545219") def testQIntSwitchMerge(self): with self.cached_session(force_gpu=test.is_gpu_available()) as sess: constant_qint = constant_op.constant(np.array([42]), dtypes.qint8) cond = constant_op.constant(True, dtypes.bool) v_f, v_t = control_flow_ops.switch(constant_qint, cond) result = control_flow_ops.merge([v_f, v_t]) self.evaluate(result) @test_util.run_v1_only("b/120545219") def testQIntRefSwitchMerge(self): with self.cached_session(use_gpu=test.is_gpu_available()) as sess: var_qint = gen_state_ops.variable( shape=[1], dtype=dtypes.qint8, name="v", container="", shared_name="") assign_op = state_ops.assign( var_qint, constant_op.constant(np.array([42]), dtypes.qint8)) self.evaluate(assign_op) cond = constant_op.constant(True, dtypes.bool) v_f, v_t = control_flow_ops.ref_switch(var_qint, cond) result = control_flow_ops.ref_merge([v_f, v_t]) self.evaluate(result) @test_util.run_v1_only("b/120545219") def testUInt64SwitchMerge(self): with self.cached_session(force_gpu=test.is_gpu_available()) as sess: constant_uint64 = constant_op.constant(np.array([42]), dtypes.uint64) cond = constant_op.constant(True, dtypes.bool) v_f, v_t = control_flow_ops.switch(constant_uint64, cond) result = control_flow_ops.merge([v_f, v_t]) self.evaluate(result) def testSwitchEagerMode(self): if not context.executing_eagerly(): return input_data = [1, 2, 3, 4] vf, vt = control_flow_ops.switch(input_data, False) self.assertAllEqual(vf, input_data) self.assertAllEqual(vt, []) @test_util.run_deprecated_v1 def testQIntArgAndRet(self): @function.Defun(dtypes.qint8) def func(x): return x with self.cached_session(force_gpu=test.is_gpu_available()) as sess: qint = constant_op.constant(np.array([42]), dtypes.qint8) result = func(qint) self.evaluate(result) def testSparseIdentity(self): st1 = sparse_tensor.SparseTensor([[0, 5]], ['x'], [10, 10]) st2 = control_flow_ops._Identity(st1) self.assertAllEqual(st1.indices, st2.indices) self.assertAllEqual(st1.values, st2.values) self.assertAllEqual(st1.dense_shape, st2.dense_shape) def testSparseEnterExit(self): st1 = sparse_tensor.SparseTensor([[0, 5]], ['x'], [10, 10]) st2 = control_flow_ops._Enter(st1, "foo_1") st3 = control_flow_ops.exit(st2) self.assertAllEqual(st1.indices, st3.indices) self.assertAllEqual(st1.values, st3.values) self.assertAllEqual(st1.dense_shape, st3.dense_shape) def _buildWhileWithShapeInvariants(self, shape_invariants): r = constant_op.constant([1, 2]) def cond(_): return False def body(_): return constant_op.constant([1]) return while_loop_tf.while_loop( cond, body, [r], shape_invariants=shape_invariants) def testWhileOutputShapeWithShapeInvariantsUnknownRank(self): @eager_def_function.function def runTest(): while_output = self._buildWhileWithShapeInvariants( [tensor_shape.TensorShape(None)]) self.assertIsNone(while_output.shape.rank) runTest() def testWhileOutputShapeWithShapeInvariantsPartialShape(self): @eager_def_function.function def runTest(): while_output = self._buildWhileWithShapeInvariants( [tensor_shape.TensorShape([None])]) self.assertAllEqual(while_output.shape.as_list(), [None]) runTest() def testFunctionInWhile(self): @eager_def_function.function def body(x): return x + 1 r = while_loop_tf.while_loop(lambda x: x < 5, body, [0]) self.assertAllEqual(r, 5.)
ControlFlowTest
python
langchain-ai__langchain
libs/core/tests/unit_tests/_api/test_deprecation.py
{ "start": 12987, "end": 17315 }
class ____(BaseModel): @deprecated(since="2.0.0", removal="3.0.0") def deprecated_method(self) -> str: """Original doc.""" return "This is a deprecated method." def test_deprecated_method_pydantic() -> None: """Test deprecated method.""" with warnings.catch_warnings(record=True) as warning_list: warnings.simplefilter("always") obj = MyModel() assert obj.deprecated_method() == "This is a deprecated method." assert len(warning_list) == 1 warning = warning_list[0].message assert str(warning) == ( "The method `MyModel.deprecated_method` was deprecated in " "tests 2.0.0 and will be removed in 3.0.0" ) doc = obj.deprecated_method.__doc__ assert isinstance(doc, str) assert doc.startswith("!!! deprecated") def test_raise_error_for_bad_decorator() -> None: """Verify that errors raised on init rather than on use.""" # Should not specify both `alternative` and `alternative_import` with pytest.raises( ValueError, match="Cannot specify both alternative and alternative_import" ): @deprecated(since="2.0.0", alternative="NewClass", alternative_import="hello") def deprecated_function() -> str: """Original doc.""" return "This is a deprecated function." def test_rename_parameter() -> None: """Test rename parameter.""" @rename_parameter(since="2.0.0", removal="3.0.0", old="old_name", new="new_name") def foo(new_name: str) -> str: """Original doc.""" return new_name with warnings.catch_warnings(record=True) as warning_list: warnings.simplefilter("always") assert foo(old_name="hello") == "hello" # type: ignore[call-arg] assert len(warning_list) == 1 assert foo(new_name="hello") == "hello" assert foo("hello") == "hello" assert foo.__doc__ == "Original doc." with pytest.raises(TypeError): foo(meow="hello") # type: ignore[call-arg] with pytest.raises(TypeError): assert foo("hello", old_name="hello") # type: ignore[call-arg] with pytest.raises(TypeError): assert foo(old_name="goodbye", new_name="hello") # type: ignore[call-arg] async def test_rename_parameter_for_async_func() -> None: """Test rename parameter.""" @rename_parameter(since="2.0.0", removal="3.0.0", old="old_name", new="new_name") async def foo(new_name: str) -> str: """Original doc.""" return new_name with warnings.catch_warnings(record=True) as warning_list: warnings.simplefilter("always") assert await foo(old_name="hello") == "hello" # type: ignore[call-arg] assert len(warning_list) == 1 assert await foo(new_name="hello") == "hello" assert await foo("hello") == "hello" assert foo.__doc__ == "Original doc." with pytest.raises(TypeError): await foo(meow="hello") # type: ignore[call-arg] with pytest.raises(TypeError): assert await foo("hello", old_name="hello") # type: ignore[call-arg] with pytest.raises(TypeError): assert await foo(old_name="a", new_name="hello") # type: ignore[call-arg] def test_rename_parameter_method() -> None: """Test that it works for a method.""" class Foo: @rename_parameter( since="2.0.0", removal="3.0.0", old="old_name", new="new_name" ) def a(self, new_name: str) -> str: return new_name foo = Foo() with warnings.catch_warnings(record=True) as warning_list: warnings.simplefilter("always") assert foo.a(old_name="hello") == "hello" # type: ignore[call-arg] assert len(warning_list) == 1 assert str(warning_list[0].message) == ( "The parameter `old_name` of `a` was deprecated in 2.0.0 and will be " "removed " "in 3.0.0 Use `new_name` instead." ) assert foo.a(new_name="hello") == "hello" assert foo.a("hello") == "hello" with pytest.raises(TypeError): foo.a(meow="hello") # type: ignore[call-arg] with pytest.raises(TypeError): assert foo.a("hello", old_name="hello") # type: ignore[call-arg]
MyModel
python
huggingface__transformers
src/transformers/models/tvp/processing_tvp.py
{ "start": 759, "end": 1030 }
class ____(ProcessingKwargs, total=False): _defaults = { "text_kwargs": { "truncation": True, "padding": "max_length", "pad_to_max_length": True, "return_token_type_ids": False, }, }
TvpProcessorKwargs
python
huggingface__transformers
src/transformers/models/phimoe/modular_phimoe.py
{ "start": 10978, "end": 12631 }
class ____(nn.Module): """ This implementation is strictly equivalent to standard MoE with full capacity (no dropped tokens). It's faster since it formulates MoE operations in terms of block-sparse operations to accommodate imbalanced assignments of tokens to experts, whereas standard MoE either (1) drop tokens at the cost of reduced performance or (2) set capacity factor to number of experts and thus waste computation and memory on padding. """ def __init__(self, config): super().__init__() self.hidden_dim = config.hidden_size self.ffn_dim = config.intermediate_size self.num_experts = config.num_local_experts self.top_k = config.num_experts_per_tok self.router = PhimoeTopKRouter(config) self.experts = PhimoeExperts(config) self.input_jitter_noise = config.input_jitter_noise def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: batch_size, sequence_length, hidden_dim = hidden_states.shape if self.training and self.input_jitter_noise > 0: hidden_states *= torch.empty_like(hidden_states).uniform_( 1.0 - self.input_jitter_noise, 1.0 + self.input_jitter_noise ) batch_size, sequence_length, hidden_dim = hidden_states.shape hidden_states = hidden_states.reshape(-1, hidden_dim) routing_weights, selected_experts = self.router(hidden_states) final_hidden_states = self.experts(hidden_states, selected_experts, routing_weights) return final_hidden_states.reshape(batch_size, sequence_length, hidden_dim)
PhimoeSparseMoeBlock
python
google__jax
tests/pmap_test.py
{ "start": 87514, "end": 89309 }
class ____(jtu.JaxTestCase): # TODO(apaszke) @parameterized.named_parameters(jtu.named_cases_from_sampler(lambda s: ({ "testcase_name": f"{shapes}_{vmap_in_axes}_{vmap_out_axes}_{pmap_in_axes}_{pmap_out_axes}", "shapes": shapes, "vmap_in_axes": vmap_in_axes, "vmap_out_axes": vmap_out_axes, "pmap_in_axes": pmap_in_axes, "pmap_out_axes": pmap_out_axes } for arg_shapes in s(compatible_shapes) for num_args in s(range(1, 4)) for shapes in s(list(it.combinations_with_replacement(arg_shapes, num_args))) for vmap_in_axes in s(all_bdims(*shapes, pmap=False)) for pmap_in_axes in s(all_bdims(*shapes, pmap=True)) for vmap_out_axes in s(out_bdims(shapes[0], False)) for pmap_out_axes in s(out_bdims(shapes[0], True)) ))) def testVmapOfPmap(self, shapes, vmap_in_axes, pmap_in_axes, vmap_out_axes, pmap_out_axes): vmapped_size = 3 pmapped_size = jax.device_count() rng = jtu.rand_default(self.rng()) def fun(*args): return sum(args) final_shapes = map(partial(add_bdim, vmapped_size), vmap_in_axes, map(partial(add_bdim, pmapped_size), pmap_in_axes, shapes)) def args_slice(vi, pi): return args_slicer(args_slicer(args, vmap_in_axes)(vi), pmap_in_axes)(pi) args = [rng(shape, jnp.float32) for shape in final_shapes] ans = vmap(pmap(fun, in_axes=pmap_in_axes, out_axes=pmap_out_axes), in_axes=vmap_in_axes, out_axes=vmap_out_axes)(*args) expected = np.stack( [np.stack([fun(*args_slice(vi, pi)) for pi in range(pmapped_size)], axis=pmap_out_axes) for vi in range(vmapped_size)], axis=vmap_out_axes) self.assertAllClose(ans, expected) @jtu.pytest_mark_if_available('multiaccelerator')
VmapOfPmapTest
python
crytic__slither
slither/detectors/reentrancy/reentrancy_events.py
{ "start": 583, "end": 7236 }
class ____(Reentrancy): ARGUMENT = "reentrancy-events" HELP = "Reentrancy vulnerabilities leading to out-of-order Events" IMPACT = DetectorClassification.LOW CONFIDENCE = DetectorClassification.MEDIUM WIKI = ( "https://github.com/crytic/slither/wiki/Detector-Documentation#reentrancy-vulnerabilities-3" ) WIKI_TITLE = "Reentrancy vulnerabilities" # region wiki_description WIKI_DESCRIPTION = """ Detects [reentrancies](https://github.com/trailofbits/not-so-smart-contracts/tree/master/reentrancy) that allow manipulation of the order or value of events.""" # endregion wiki_description # region wiki_exploit_scenario WIKI_EXPLOIT_SCENARIO = """ ```solidity contract ReentrantContract { function f() external { if (BugReentrancyEvents(msg.sender).counter() == 1) { BugReentrancyEvents(msg.sender).count(this); } } } contract Counter { uint public counter; event Counter(uint); } contract BugReentrancyEvents is Counter { function count(ReentrantContract d) external { counter += 1; d.f(); emit Counter(counter); } } contract NoReentrancyEvents is Counter { function count(ReentrantContract d) external { counter += 1; emit Counter(counter); d.f(); } } ``` If the external call `d.f()` re-enters `BugReentrancyEvents`, the `Counter` events will be incorrect (`Counter(2)`, `Counter(2)`) whereas `NoReentrancyEvents` will correctly emit (`Counter(1)`, `Counter(2)`). This may cause issues for offchain components that rely on the values of events e.g. checking for the amount deposited to a bridge.""" # endregion wiki_exploit_scenario WIKI_RECOMMENDATION = "Apply the [`check-effects-interactions` pattern](https://docs.soliditylang.org/en/latest/security-considerations.html#re-entrancy)." STANDARD_JSON = False def find_reentrancies(self) -> DefaultDict[FindingKey, Set[FindingValue]]: result = defaultdict(set) for contract in self.contracts: for f in contract.functions_and_modifiers_declared: if not f.is_reentrant: continue for node in f.nodes: # dead code if self.KEY not in node.context: continue if node.context[self.KEY].calls: if not any(n != node for n in node.context[self.KEY].calls): continue # calls are ordered finding_key = FindingKey( function=node.function, calls=to_hashable(node.context[self.KEY].calls), send_eth=to_hashable(node.context[self.KEY].send_eth), ) finding_vars = { FindingValue( e, e.node, tuple(sorted(nodes, key=lambda x: x.node_id)), ) for (e, nodes) in node.context[self.KEY].events.items() } if finding_vars: result[finding_key] |= finding_vars return result def _detect(self) -> List[Output]: # pylint: disable=too-many-branches """""" super()._detect() reentrancies = self.find_reentrancies() results = [] result_sorted = sorted(list(reentrancies.items()), key=lambda x: x[0][0].name) for (func, calls, send_eth), events in result_sorted: calls = sorted(list(set(calls)), key=lambda x: x[0].node_id) send_eth = sorted(list(set(send_eth)), key=lambda x: x[0].node_id) events = sorted(events, key=lambda x: (str(x.variable.name), x.node.node_id)) info = ["Reentrancy in ", func, ":\n"] info += ["\tExternal calls:\n"] for (call_info, calls_list) in calls: info += ["\t- ", call_info, "\n"] for call_list_info in calls_list: if call_list_info != call_info: info += ["\t\t- ", call_list_info, "\n"] if calls != send_eth and send_eth: info += ["\tExternal calls sending eth:\n"] for (call_info, calls_list) in send_eth: info += ["\t- ", call_info, "\n"] for call_list_info in calls_list: if call_list_info != call_info: info += ["\t\t- ", call_list_info, "\n"] info += ["\tEvent emitted after the call(s):\n"] for finding_value in events: info += ["\t- ", finding_value.node, "\n"] for other_node in finding_value.nodes: if other_node != finding_value.node: info += ["\t\t- ", other_node, "\n"] # Create our JSON result res = self.generate_result(info) # Add the function with the re-entrancy first res.add(func) # Add all underlying calls in the function which are potentially problematic. for (call_info, calls_list) in calls: res.add(call_info, {"underlying_type": "external_calls"}) for call_list_info in calls_list: if call_list_info != call_info: res.add( call_list_info, {"underlying_type": "external_calls_sending_eth"}, ) # # If the calls are not the same ones that send eth, add the eth sending nodes. if calls != send_eth: for (call_info, calls_list) in send_eth: res.add(call_info, {"underlying_type": "external_calls_sending_eth"}) for call_list_info in calls_list: if call_list_info != call_info: res.add( call_list_info, {"underlying_type": "external_calls_sending_eth"}, ) for finding_value in events: res.add(finding_value.node, {"underlying_type": "event"}) for other_node in finding_value.nodes: if other_node != finding_value.node: res.add(other_node, {"underlying_type": "event"}) # Append our result results.append(res) return results
ReentrancyEvent
python
kubernetes-client__python
kubernetes/client/api/node_v1_api.py
{ "start": 543, "end": 95659 }
class ____(object): """NOTE: This class is auto generated by OpenAPI Generator Ref: https://openapi-generator.tech Do not edit the class manually. """ def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client def create_runtime_class(self, body, **kwargs): # noqa: E501 """create_runtime_class # noqa: E501 create a RuntimeClass # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_runtime_class(body, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param V1RuntimeClass body: (required) :param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered. :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: V1RuntimeClass If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True return self.create_runtime_class_with_http_info(body, **kwargs) # noqa: E501 def create_runtime_class_with_http_info(self, body, **kwargs): # noqa: E501 """create_runtime_class # noqa: E501 create a RuntimeClass # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_runtime_class_with_http_info(body, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param V1RuntimeClass body: (required) :param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered. :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(V1RuntimeClass, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = [ 'body', 'pretty', 'dry_run', 'field_manager', 'field_validation' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method create_runtime_class" % key ) local_var_params[key] = val del local_var_params['kwargs'] # verify the required parameter 'body' is set if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501 local_var_params['body'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `body` when calling `create_runtime_class`") # noqa: E501 collection_formats = {} path_params = {} query_params = [] if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501 query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501 if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501 query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501 if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501 query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501 if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501 query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in local_var_params: body_params = local_var_params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501 # Authentication setting auth_settings = ['BearerToken'] # noqa: E501 return self.api_client.call_api( '/apis/node.k8s.io/v1/runtimeclasses', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1RuntimeClass', # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def delete_collection_runtime_class(self, **kwargs): # noqa: E501 """delete_collection_runtime_class # noqa: E501 delete collection of RuntimeClass # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_runtime_class(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param V1DeleteOptions body: :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True return self.delete_collection_runtime_class_with_http_info(**kwargs) # noqa: E501 def delete_collection_runtime_class_with_http_info(self, **kwargs): # noqa: E501 """delete_collection_runtime_class # noqa: E501 delete collection of RuntimeClass # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_runtime_class_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param V1DeleteOptions body: :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = [ 'pretty', '_continue', 'dry_run', 'field_selector', 'grace_period_seconds', 'ignore_store_read_error_with_cluster_breaking_potential', 'label_selector', 'limit', 'orphan_dependents', 'propagation_policy', 'resource_version', 'resource_version_match', 'send_initial_events', 'timeout_seconds', 'body' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method delete_collection_runtime_class" % key ) local_var_params[key] = val del local_var_params['kwargs'] collection_formats = {} path_params = {} query_params = [] if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501 query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501 if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501 query_params.append(('continue', local_var_params['_continue'])) # noqa: E501 if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501 query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501 if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501 query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501 if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501 query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501 if 'ignore_store_read_error_with_cluster_breaking_potential' in local_var_params and local_var_params['ignore_store_read_error_with_cluster_breaking_potential'] is not None: # noqa: E501 query_params.append(('ignoreStoreReadErrorWithClusterBreakingPotential', local_var_params['ignore_store_read_error_with_cluster_breaking_potential'])) # noqa: E501 if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501 query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501 if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501 query_params.append(('limit', local_var_params['limit'])) # noqa: E501 if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501 query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501 if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501 query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501 if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501 query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501 if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501 query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501 if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501 query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501 if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501 query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in local_var_params: body_params = local_var_params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501 # Authentication setting auth_settings = ['BearerToken'] # noqa: E501 return self.api_client.call_api( '/apis/node.k8s.io/v1/runtimeclasses', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Status', # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def delete_runtime_class(self, name, **kwargs): # noqa: E501 """delete_runtime_class # noqa: E501 delete a RuntimeClass # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_runtime_class(name, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str name: name of the RuntimeClass (required) :param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :param V1DeleteOptions body: :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True return self.delete_runtime_class_with_http_info(name, **kwargs) # noqa: E501 def delete_runtime_class_with_http_info(self, name, **kwargs): # noqa: E501 """delete_runtime_class # noqa: E501 delete a RuntimeClass # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_runtime_class_with_http_info(name, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str name: name of the RuntimeClass (required) :param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool ignore_store_read_error_with_cluster_breaking_potential: if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :param V1DeleteOptions body: :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = [ 'name', 'pretty', 'dry_run', 'grace_period_seconds', 'ignore_store_read_error_with_cluster_breaking_potential', 'orphan_dependents', 'propagation_policy', 'body' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method delete_runtime_class" % key ) local_var_params[key] = val del local_var_params['kwargs'] # verify the required parameter 'name' is set if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501 local_var_params['name'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `name` when calling `delete_runtime_class`") # noqa: E501 collection_formats = {} path_params = {} if 'name' in local_var_params: path_params['name'] = local_var_params['name'] # noqa: E501 query_params = [] if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501 query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501 if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501 query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501 if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501 query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501 if 'ignore_store_read_error_with_cluster_breaking_potential' in local_var_params and local_var_params['ignore_store_read_error_with_cluster_breaking_potential'] is not None: # noqa: E501 query_params.append(('ignoreStoreReadErrorWithClusterBreakingPotential', local_var_params['ignore_store_read_error_with_cluster_breaking_potential'])) # noqa: E501 if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501 query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501 if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501 query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in local_var_params: body_params = local_var_params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501 # Authentication setting auth_settings = ['BearerToken'] # noqa: E501 return self.api_client.call_api( '/apis/node.k8s.io/v1/runtimeclasses/{name}', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1Status', # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def get_api_resources(self, **kwargs): # noqa: E501 """get_api_resources # noqa: E501 get available resources # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_api_resources(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: V1APIResourceList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True return self.get_api_resources_with_http_info(**kwargs) # noqa: E501 def get_api_resources_with_http_info(self, **kwargs): # noqa: E501 """get_api_resources # noqa: E501 get available resources # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_api_resources_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(V1APIResourceList, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = [ ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method get_api_resources" % key ) local_var_params[key] = val del local_var_params['kwargs'] collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501 # Authentication setting auth_settings = ['BearerToken'] # noqa: E501 return self.api_client.call_api( '/apis/node.k8s.io/v1/', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1APIResourceList', # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def list_runtime_class(self, **kwargs): # noqa: E501 """list_runtime_class # noqa: E501 list or watch objects of kind RuntimeClass # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_runtime_class(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: V1RuntimeClassList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True return self.list_runtime_class_with_http_info(**kwargs) # noqa: E501 def list_runtime_class_with_http_info(self, **kwargs): # noqa: E501 """list_runtime_class # noqa: E501 list or watch objects of kind RuntimeClass # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_runtime_class_with_http_info(async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). :param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset :param str resource_version_match: resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details. Defaults to unset :param bool send_initial_events: `sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched. When `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan is interpreted as \"data at least as new as the provided `resourceVersion`\" and the bookmark event is send when the state is synced to a `resourceVersion` at least as fresh as the one provided by the ListOptions. If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the bookmark event is send when the state is synced at least to the moment when request started being processed. - `resourceVersionMatch` set to any other value or unset Invalid error is returned. Defaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(V1RuntimeClassList, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = [ 'pretty', 'allow_watch_bookmarks', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'resource_version_match', 'send_initial_events', 'timeout_seconds', 'watch' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method list_runtime_class" % key ) local_var_params[key] = val del local_var_params['kwargs'] collection_formats = {} path_params = {} query_params = [] if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501 query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501 if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501 query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501 if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501 query_params.append(('continue', local_var_params['_continue'])) # noqa: E501 if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501 query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501 if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501 query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501 if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501 query_params.append(('limit', local_var_params['limit'])) # noqa: E501 if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501 query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501 if 'resource_version_match' in local_var_params and local_var_params['resource_version_match'] is not None: # noqa: E501 query_params.append(('resourceVersionMatch', local_var_params['resource_version_match'])) # noqa: E501 if 'send_initial_events' in local_var_params and local_var_params['send_initial_events'] is not None: # noqa: E501 query_params.append(('sendInitialEvents', local_var_params['send_initial_events'])) # noqa: E501 if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501 query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501 if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501 query_params.append(('watch', local_var_params['watch'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch', 'application/cbor-seq']) # noqa: E501 # Authentication setting auth_settings = ['BearerToken'] # noqa: E501 return self.api_client.call_api( '/apis/node.k8s.io/v1/runtimeclasses', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1RuntimeClassList', # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def patch_runtime_class(self, name, body, **kwargs): # noqa: E501 """patch_runtime_class # noqa: E501 partially update the specified RuntimeClass # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_runtime_class(name, body, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str name: name of the RuntimeClass (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch). :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered. :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests. :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: V1RuntimeClass If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True return self.patch_runtime_class_with_http_info(name, body, **kwargs) # noqa: E501 def patch_runtime_class_with_http_info(self, name, body, **kwargs): # noqa: E501 """patch_runtime_class # noqa: E501 partially update the specified RuntimeClass # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_runtime_class_with_http_info(name, body, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str name: name of the RuntimeClass (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch). :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered. :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests. :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(V1RuntimeClass, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = [ 'name', 'body', 'pretty', 'dry_run', 'field_manager', 'field_validation', 'force' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method patch_runtime_class" % key ) local_var_params[key] = val del local_var_params['kwargs'] # verify the required parameter 'name' is set if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501 local_var_params['name'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `name` when calling `patch_runtime_class`") # noqa: E501 # verify the required parameter 'body' is set if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501 local_var_params['body'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `body` when calling `patch_runtime_class`") # noqa: E501 collection_formats = {} path_params = {} if 'name' in local_var_params: path_params['name'] = local_var_params['name'] # noqa: E501 query_params = [] if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501 query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501 if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501 query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501 if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501 query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501 if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501 query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501 if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501 query_params.append(('force', local_var_params['force'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in local_var_params: body_params = local_var_params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml', 'application/apply-patch+cbor']) # noqa: E501 # Authentication setting auth_settings = ['BearerToken'] # noqa: E501 return self.api_client.call_api( '/apis/node.k8s.io/v1/runtimeclasses/{name}', 'PATCH', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1RuntimeClass', # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def read_runtime_class(self, name, **kwargs): # noqa: E501 """read_runtime_class # noqa: E501 read the specified RuntimeClass # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_runtime_class(name, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str name: name of the RuntimeClass (required) :param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: V1RuntimeClass If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True return self.read_runtime_class_with_http_info(name, **kwargs) # noqa: E501 def read_runtime_class_with_http_info(self, name, **kwargs): # noqa: E501 """read_runtime_class # noqa: E501 read the specified RuntimeClass # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_runtime_class_with_http_info(name, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str name: name of the RuntimeClass (required) :param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(V1RuntimeClass, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = [ 'name', 'pretty' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method read_runtime_class" % key ) local_var_params[key] = val del local_var_params['kwargs'] # verify the required parameter 'name' is set if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501 local_var_params['name'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `name` when calling `read_runtime_class`") # noqa: E501 collection_formats = {} path_params = {} if 'name' in local_var_params: path_params['name'] = local_var_params['name'] # noqa: E501 query_params = [] if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501 query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501 # Authentication setting auth_settings = ['BearerToken'] # noqa: E501 return self.api_client.call_api( '/apis/node.k8s.io/v1/runtimeclasses/{name}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1RuntimeClass', # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats) def replace_runtime_class(self, name, body, **kwargs): # noqa: E501 """replace_runtime_class # noqa: E501 replace the specified RuntimeClass # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_runtime_class(name, body, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str name: name of the RuntimeClass (required) :param V1RuntimeClass body: (required) :param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered. :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: V1RuntimeClass If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True return self.replace_runtime_class_with_http_info(name, body, **kwargs) # noqa: E501 def replace_runtime_class_with_http_info(self, name, body, **kwargs): # noqa: E501 """replace_runtime_class # noqa: E501 replace the specified RuntimeClass # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_runtime_class_with_http_info(name, body, async_req=True) >>> result = thread.get() :param async_req bool: execute request asynchronously :param str name: name of the RuntimeClass (required) :param V1RuntimeClass body: (required) :param str pretty: If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget). :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. :param str field_validation: fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered. :param _return_http_data_only: response data without head status code and headers :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: tuple(V1RuntimeClass, status_code(int), headers(HTTPHeaderDict)) If the method is called asynchronously, returns the request thread. """ local_var_params = locals() all_params = [ 'name', 'body', 'pretty', 'dry_run', 'field_manager', 'field_validation' ] all_params.extend( [ 'async_req', '_return_http_data_only', '_preload_content', '_request_timeout' ] ) for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise ApiTypeError( "Got an unexpected keyword argument '%s'" " to method replace_runtime_class" % key ) local_var_params[key] = val del local_var_params['kwargs'] # verify the required parameter 'name' is set if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501 local_var_params['name'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `name` when calling `replace_runtime_class`") # noqa: E501 # verify the required parameter 'body' is set if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501 local_var_params['body'] is None): # noqa: E501 raise ApiValueError("Missing the required parameter `body` when calling `replace_runtime_class`") # noqa: E501 collection_formats = {} path_params = {} if 'name' in local_var_params: path_params['name'] = local_var_params['name'] # noqa: E501 query_params = [] if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501 query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501 if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501 query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501 if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501 query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501 if 'field_validation' in local_var_params and local_var_params['field_validation'] is not None: # noqa: E501 query_params.append(('fieldValidation', local_var_params['field_validation'])) # noqa: E501 header_params = {} form_params = [] local_var_files = {} body_params = None if 'body' in local_var_params: body_params = local_var_params['body'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/cbor']) # noqa: E501 # Authentication setting auth_settings = ['BearerToken'] # noqa: E501 return self.api_client.call_api( '/apis/node.k8s.io/v1/runtimeclasses/{name}', 'PUT', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='V1RuntimeClass', # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats)
NodeV1Api
python
dask__distributed
distributed/utils_test.py
{ "start": 79388, "end": 80897 }
class ____(Worker): """Custom worker class which does not update `scheduler_delay`. This worker class is useful for some tests which make time comparisons using times reported from workers. See also -------- no_time_resync padded_time """ @property def scheduler_delay(self): return 0 @scheduler_delay.setter def scheduler_delay(self, value): pass @pytest.fixture() def no_time_resync(): """Temporarily disable the automatic resync of distributed.metrics._WindowsTime which, every 10 minutes, can cause time() to go backwards a few milliseconds. On Linux, MacOSX, and Windows with Python 3.13+ this fixture is a no-op. See also -------- NoSchedulerDelayWorker padded_time """ if isinstance(time, _WindowsTime): time() # Initialize or refresh delta bak = time.__self__.next_resync time.__self__.next_resync = float("inf") yield time.__self__.next_resync = bak else: yield async def padded_time(before=0.05, after=0.05): """Sample time(), preventing millisecond-magnitude corrections in the wall clock from disrupting monotonicity tests (t0 < t1 < t2 < ...). This prevents frequent flakiness on Windows and, more rarely, in Linux and MacOSX. See also -------- NoSchedulerDelayWorker no_time_resync """ await asyncio.sleep(before) t = time() await asyncio.sleep(after) return t
NoSchedulerDelayWorker
python
weaviate__weaviate-python-client
weaviate/collections/queries/near_image/query/async_.py
{ "start": 310, "end": 457 }
class ____( Generic[Properties, References], _NearImageQueryExecutor[ConnectionAsync, Properties, References], ): pass
_NearImageQueryAsync
python
SmileyChris__easy-thumbnails
easy_thumbnails/tests/test_namers.py
{ "start": 71, "end": 224 }
class ____: def __init__(self, basedir='', subdir=''): self.thumbnail_basedir = basedir self.thumbnail_subdir = subdir
FakeThumbnailer
python
davidhalter__jedi
jedi/inference/base_value.py
{ "start": 10860, "end": 11618 }
class ____(HelperValueMixin): @safe_property def name(self): from jedi.inference.names import ValueName wrapped_name = self._wrapped_value.name if wrapped_name.tree_name is not None: return ValueName(self, wrapped_name.tree_name) else: from jedi.inference.compiled import CompiledValueName return CompiledValueName(self, wrapped_name.string_name) @classmethod @inference_state_as_method_param_cache() def create_cached(cls, inference_state, *args, **kwargs): return cls(*args, **kwargs) def __getattr__(self, name): assert name != '_wrapped_value', 'Problem with _get_wrapped_value' return getattr(self._wrapped_value, name)
_ValueWrapperBase
python
gevent__gevent
src/greentest/3.13/test_threading.py
{ "start": 73710, "end": 76571 }
class ____(unittest.TestCase): def check_interrupt_main_with_signal_handler(self, signum): def handler(signum, frame): 1/0 old_handler = signal.signal(signum, handler) self.addCleanup(signal.signal, signum, old_handler) with self.assertRaises(ZeroDivisionError): _thread.interrupt_main() def check_interrupt_main_noerror(self, signum): handler = signal.getsignal(signum) try: # No exception should arise. signal.signal(signum, signal.SIG_IGN) _thread.interrupt_main(signum) signal.signal(signum, signal.SIG_DFL) _thread.interrupt_main(signum) finally: # Restore original handler signal.signal(signum, handler) @requires_gil_enabled("gh-118433: Flaky due to a longstanding bug") def test_interrupt_main_subthread(self): # Calling start_new_thread with a function that executes interrupt_main # should raise KeyboardInterrupt upon completion. def call_interrupt(): _thread.interrupt_main() t = threading.Thread(target=call_interrupt) with self.assertRaises(KeyboardInterrupt): t.start() t.join() t.join() def test_interrupt_main_mainthread(self): # Make sure that if interrupt_main is called in main thread that # KeyboardInterrupt is raised instantly. with self.assertRaises(KeyboardInterrupt): _thread.interrupt_main() def test_interrupt_main_with_signal_handler(self): self.check_interrupt_main_with_signal_handler(signal.SIGINT) self.check_interrupt_main_with_signal_handler(signal.SIGTERM) def test_interrupt_main_noerror(self): self.check_interrupt_main_noerror(signal.SIGINT) self.check_interrupt_main_noerror(signal.SIGTERM) def test_interrupt_main_invalid_signal(self): self.assertRaises(ValueError, _thread.interrupt_main, -1) self.assertRaises(ValueError, _thread.interrupt_main, signal.NSIG) self.assertRaises(ValueError, _thread.interrupt_main, 1000000) @threading_helper.reap_threads def test_can_interrupt_tight_loops(self): cont = [True] started = [False] interrupted = [False] def worker(started, cont, interrupted): iterations = 100_000_000 started[0] = True while cont[0]: if iterations: iterations -= 1 else: return pass interrupted[0] = True t = threading.Thread(target=worker,args=(started, cont, interrupted)) t.start() while not started[0]: pass cont[0] = False t.join() self.assertTrue(interrupted[0])
InterruptMainTests
python
jazzband__django-waffle
waffle/tests/test_testutils.py
{ "start": 9333, "end": 9540 }
class ____(OverrideSwitchOnClassTestsMixin, TestCase): """ Run tests with Django TestCase """ @override_switch('foo', active=False)
OverrideSwitchOnClassTestCase
python
getsentry__sentry
tests/sentry/api/serializers/test_fields.py
{ "start": 290, "end": 441 }
class ____(serializers.Serializer): b_field = serializers.CharField(max_length=64) d_field = serializers.CharField(max_length=64)
ChildSerializer
python
huggingface__transformers
tests/models/xlm_roberta_xl/test_modeling_xlm_roberta_xl.py
{ "start": 13880, "end": 31476 }
class ____(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( XLMRobertaXLForCausalLM, XLMRobertaXLForMaskedLM, XLMRobertaXLModel, XLMRobertaXLForSequenceClassification, XLMRobertaXLForTokenClassification, XLMRobertaXLForMultipleChoice, XLMRobertaXLForQuestionAnswering, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": XLMRobertaXLModel, "fill-mask": XLMRobertaXLForMaskedLM, "question-answering": XLMRobertaXLForQuestionAnswering, "text-classification": XLMRobertaXLForSequenceClassification, "text-generation": XLMRobertaXLForCausalLM, "token-classification": XLMRobertaXLForTokenClassification, "zero-shot": XLMRobertaXLForSequenceClassification, } if is_torch_available() else {} ) model_split_percents = [0.5, 0.85, 0.95] # TODO: Fix the failed tests def is_pipeline_test_to_skip( self, pipeline_test_case_name, config_class, model_architecture, tokenizer_name, image_processor_name, feature_extractor_name, processor_name, ): if pipeline_test_case_name == "QAPipelineTests" and not tokenizer_name.endswith("Fast"): return True return False # Overwriting to add `is_decoder` flag def prepare_config_and_inputs_for_generate(self, batch_size=2): config, inputs = super().prepare_config_and_inputs_for_generate(batch_size) config.is_decoder = True return config, inputs def setUp(self): self.model_tester = XLMRobertaXLModelTester(self) self.config_tester = ConfigTester(self, config_class=XLMRobertaXLConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_as_decoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*config_and_inputs) def test_model_as_decoder_with_default_input_mask(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) = self.model_tester.prepare_config_and_inputs_for_decoder() input_mask = None self.model_tester.create_and_check_model_as_decoder( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def test_for_causal_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*config_and_inputs) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_create_position_ids_respects_padding_index(self): """This is a regression test for https://github.com/huggingface/transformers/issues/1761 The position ids should be masked with the embedding object's padding index. Therefore, the first available non-padding position index is XLMRobertaXLEmbeddings.padding_idx + 1 """ config = self.model_tester.prepare_config_and_inputs()[0] model = XLMRobertaXLEmbeddings(config=config) input_ids = torch.as_tensor([[12, 31, 13, model.padding_idx]]) expected_positions = torch.as_tensor( [[0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx]] ) position_ids = XLMRobertaXLEmbeddings.create_position_ids_from_input_ids(input_ids, model.padding_idx) self.assertEqual(position_ids.shape, expected_positions.shape) self.assertTrue(torch.all(torch.eq(position_ids, expected_positions))) def test_create_position_ids_from_inputs_embeds(self): """This is a regression test for https://github.com/huggingface/transformers/issues/1761 The position ids should be masked with the embedding object's padding index. Therefore, the first available non-padding position index is XLMRobertaXLEmbeddings.padding_idx + 1 """ config = self.model_tester.prepare_config_and_inputs()[0] embeddings = XLMRobertaXLEmbeddings(config=config) inputs_embeds = torch.empty(2, 4, 30) expected_single_positions = [ 0 + embeddings.padding_idx + 1, 1 + embeddings.padding_idx + 1, 2 + embeddings.padding_idx + 1, 3 + embeddings.padding_idx + 1, ] expected_positions = torch.as_tensor([expected_single_positions, expected_single_positions]) position_ids = embeddings.create_position_ids_from_inputs_embeds(inputs_embeds, embeddings.padding_idx) self.assertEqual(position_ids.shape, expected_positions.shape) self.assertTrue(torch.all(torch.eq(position_ids, expected_positions))) def attention_mask_padding_matches_padding_free_with_position_ids( self, attn_implementation: str, fa_kwargs: bool = False ): """ Overwritten to account for the embeddings that rely on position ids. """ if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") max_new_tokens = 30 support_flag = { "sdpa": "_supports_sdpa", "flash_attention_2": "_supports_flash_attn", "flash_attention_3": "_supports_flash_attn", } for model_class in self.all_generative_model_classes: if attn_implementation != "eager" and not getattr(model_class, support_flag[attn_implementation]): self.skipTest(f"{model_class.__name__} does not support {attn_implementation}") # can't infer if new attn mask API is supported by assume that only model with attention backend support it if not model_class._supports_attention_backend: self.skipTest(f"{model_class.__name__} does not support new attention mask API") if model_class._is_stateful: # non-transformer models most probably have no packing support self.skipTest(f"{model_class.__name__} doesn't support packing!") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if config.is_encoder_decoder: self.skipTest("Model is an encoder-decoder") if 0 not in inputs_dict.get("attention_mask", []) or "attention_mask" not in inputs_dict: self.skipTest("Model dummy inputs should contain padding in their attention mask") if "input_ids" not in inputs_dict or inputs_dict["input_ids"].ndim != 2: self.skipTest("Model dummy inputs should contain text input ids") # make sure that all models have enough positions for generation dummy_input_ids = inputs_dict["input_ids"] if hasattr(config, "max_position_embeddings"): config.max_position_embeddings = max_new_tokens + dummy_input_ids.shape[1] + 1 model = model_class(config) if "position_ids" not in inspect.signature(model.forward).parameters: self.skipTest("Model does not support position_ids") if (not fa_kwargs) and "position_ids" not in inspect.signature(model.forward).parameters: continue # this model doesn't accept position ids as input with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) # Drop all keys except for the minimal set. Hard to manipulate with multimodals etc inputs_dict = {k: v for k, v in inputs_dict.items() if k in ["input_ids", "attention_mask"]} # Ensure left padding, to adapt for some models if 0 in inputs_dict["attention_mask"][:, -1]: inputs_dict["attention_mask"] = inputs_dict["attention_mask"].flip(1) dummy_attention_mask = inputs_dict["attention_mask"] dummy_input_ids[~dummy_attention_mask.bool()] = config.get_text_config().pad_token_id # Main difference to other models, we need to prepare position ids according to the attention mask # as we use it to extract embeddings that rely on the correct position - naively increasing sequences do # not suffice anymore atp. The solution here calculates an increasing sequences for all 1s and puts 0s else. inputs_dict["position_ids"] = ((inputs_dict["attention_mask"] == 1).long().cumsum(dim=1) - 1) * ( inputs_dict["attention_mask"] == 1 ).long() model = ( model_class.from_pretrained( tmpdirname, dtype=torch.bfloat16, attn_implementation=attn_implementation, ) .to(torch_device) .eval() ) if fa_kwargs: # flatten features = [ {"input_ids": i[a.bool()].tolist()} for i, a in zip(dummy_input_ids, dummy_attention_mask) ] # add position_ids + fa_kwargs data_collator = DataCollatorWithFlattening(return_tensors="pt", return_flash_attn_kwargs=True) batch = data_collator(features) padfree_inputs_dict = { k: t.to(torch_device) if torch.is_tensor(t) else t for k, t in batch.items() } else: # create packed position_ids position_ids = ( torch.cat([torch.arange(length) for length in dummy_attention_mask.sum(1).tolist()]) .long() .unsqueeze(0) .to(torch_device) ) padfree_inputs_dict = { "input_ids": dummy_input_ids[dummy_attention_mask.bool()].unsqueeze(0), "position_ids": position_ids, } # We need to do simple forward without cache in order to trigger packed SDPA/flex/eager attention path res_padded = model(**inputs_dict, use_cache=False) res_padfree = model(**padfree_inputs_dict, use_cache=False) logits_padded = res_padded.logits[dummy_attention_mask.bool()] logits_padfree = res_padfree.logits[0] # acceptable numerical instability tol = torch.finfo(torch.bfloat16).eps torch.testing.assert_close(logits_padded, logits_padfree, rtol=tol, atol=tol) def flash_attn_inference_equivalence( self, attn_implementation: str, padding_side: str, atol: float = 4e-2, rtol: float = 4e-2 ): r""" Overwritten to enforce decoder behavior as the model is very easily influenced by slight changes in the mask. One major reason for the high fluctuations is the extra layernom at the end of the model which shifts the logits a lot. """ if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.is_decoder = True model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_fa = model_class.from_pretrained( tmpdirname, torch_dtype=torch.bfloat16, attn_implementation=attn_implementation ) model_fa.to(torch_device) model = model_class.from_pretrained(tmpdirname, torch_dtype=torch.bfloat16) model.to(torch_device) dummy_input = inputs_dict[model.main_input_name][:1] if dummy_input.dtype in [torch.float32, torch.float16]: dummy_input = dummy_input.to(torch.bfloat16) dummy_attention_mask = inputs_dict.get("attention_mask", None) if dummy_attention_mask is not None: dummy_attention_mask = dummy_attention_mask[:1] if padding_side == "left": dummy_attention_mask[:, 1:] = 1 dummy_attention_mask[:, :1] = 0 else: dummy_attention_mask[:, :-1] = 1 dummy_attention_mask[:, -1:] = 0 # no attention mask processed_inputs = { model.main_input_name: dummy_input, "output_hidden_states": True, } if model.config.is_encoder_decoder: processed_inputs["decoder_input_ids"] = inputs_dict.get("decoder_input_ids", dummy_input)[:1] prepared_inputs = self._prepare_for_class(processed_inputs, model_class) prepared_inputs = { k: v.to(torch_device) if isinstance(v, torch.Tensor) else v for k, v in prepared_inputs.items() } outputs = model(**prepared_inputs) outputs_fa = model_fa(**prepared_inputs) logits = ( outputs.hidden_states[-1] if not model.config.is_encoder_decoder else outputs.decoder_hidden_states[-1] ) logits_fa = ( outputs_fa.hidden_states[-1] if not model.config.is_encoder_decoder else outputs_fa.decoder_hidden_states[-1] ) assert torch.allclose(logits_fa, logits, atol=atol, rtol=rtol) # with attention mask if dummy_attention_mask is not None: processed_inputs["attention_mask"] = dummy_attention_mask if model.config.is_encoder_decoder: processed_inputs["decoder_attention_mask"] = dummy_attention_mask prepared_inputs = self._prepare_for_class(processed_inputs, model_class) prepared_inputs = { k: v.to(torch_device) if isinstance(v, torch.Tensor) else v for k, v in prepared_inputs.items() } outputs = model(**prepared_inputs) outputs_fa = model_fa(**prepared_inputs) logits = ( outputs.hidden_states[-1] if not model.config.is_encoder_decoder else outputs.decoder_hidden_states[-1] ) logits_fa = ( outputs_fa.hidden_states[-1] if not model.config.is_encoder_decoder else outputs_fa.decoder_hidden_states[-1] ) if padding_side == "left": assert torch.allclose(logits_fa[1:], logits[1:], atol=atol, rtol=rtol) # check with inference + dropout model.train() _ = model_fa(**prepared_inputs) else: assert torch.allclose(logits_fa[:-1], logits[:-1], atol=atol, rtol=rtol) @unittest.skip("XLM Roberta XL has some higher fluctuations, skipping for now (norm issue)") def test_flash_attn_2_inference_equivalence_right_padding(self): pass @unittest.skip("XLM Roberta XL doesn't work for some reason, FIXME") def test_eager_padding_matches_padding_free_with_position_ids(self): pass @unittest.skip("XLM Roberta XL doesn't work for some reason, FIXME") def test_sdpa_padding_matches_padding_free_with_position_ids(self): pass @require_torch
XLMRobertaXLModelTest
python
getsentry__sentry
tests/sentry/tasks/test_commit_context.py
{ "start": 1936, "end": 4228 }
class ____(TestCase): def setUp(self) -> None: self.project = self.create_project() self.repo = Repository.objects.create( organization_id=self.organization.id, name="example", integration_id=self.integration.id, ) self.code_mapping = self.create_code_mapping( repo=self.repo, project=self.project, stack_root="sentry/", source_root="sentry/", ) self.commit_author = self.create_commit_author(project=self.project, user=self.user) self.commit = self.create_commit( project=self.project, repo=self.repo, author=self.commit_author, key="asdfwreqr", message="placeholder commit message", ) self.group = self.create_group( project=self.project, message="Kaboom!", first_release=self.release ) self.event = self.store_event( data={ "message": "Kaboom!", "platform": "python", "timestamp": before_now(seconds=10).isoformat(), "stacktrace": { "frames": [ { "function": "handle_set_commits", "abs_path": "/usr/src/sentry/src/sentry/tasks.py", "module": "sentry.tasks", "in_app": False, "lineno": 30, "filename": "sentry/tasks.py", }, None, { "function": "set_commits", "abs_path": "/usr/src/sentry/src/sentry/models/release.py", "module": "sentry.models.release", "in_app": True, "lineno": 39, "filename": "sentry/models/release.py", }, ] }, "tags": {"sentry:release": self.release.version}, "fingerprint": ["put-me-in-the-control-group"], }, project_id=self.project.id, )
TestCommitContextIntegration
python
cython__cython
tests/run/class_scope.py
{ "start": 32, "end": 188 }
class ____(object): """ >>> MethodRedef().a(5) 7 """ def a(self, i): return i+1 def a(self, i): return i+2
MethodRedef
python
kamyu104__LeetCode-Solutions
Python/maximize-the-distance-between-points-on-a-square.py
{ "start": 4971, "end": 6262 }
class ____(object): def maxDistance(self, side, points, k): """ :type side: int :type points: List[List[int]] :type k: int :rtype: int """ def binary_search_right(left, right, check): while left <= right: mid = left + (right-left)//2 if not check(mid): right = mid-1 else: left = mid+1 return right def check(d): for i in xrange(len(points)): j = i for _ in xrange(k-1): j = bisect.bisect_left(p, p[j]+d, lo=j+1, hi=i+len(points)) if j == i+len(points): break else: if p[i+len(points)]-p[j] >= d: return True return False p = [] for x, y in points: if x == 0: p.append(0*side+y) elif y == side: p.append(1*side+x) elif x == side: p.append(2*side+(side-y)) else: p.append(3*side+(side-x)) p.sort() p += [x+4*side for x in p] return binary_search_right(1, 4*side//k, check)
Solution4
python
walkccc__LeetCode
solutions/1618. Maximum Font to Fit a Sentence in a Screen/1618.py
{ "start": 324, "end": 1095 }
class ____: def maxFont( self, text: str, w: int, h: int, fonts: list[int], fontInfo: 'FontInfo', ) -> int: count = collections.Counter(text) l = 0 r = len(fonts) - 1 while l < r: m = (l + r + 1) // 2 if fontInfo.getHeight( fonts[m]) <= h and self._getWidthSum( count, fonts[m], fontInfo) <= w: l = m else: r = m - 1 return fonts[l] if self._getWidthSum(count, fonts[l], fontInfo) <= w else -1 def _getWidthSum( self, count: list[int], font: int, fontInfo: 'FontInfo', ) -> int: width = 0 for c in string.ascii_lowercase: width += count[c] * fontInfo.getWidth(font, c) return width
Solution
python
python-jsonschema__jsonschema
jsonschema/tests/test_utils.py
{ "start": 2273, "end": 4163 }
class ____(TestCase): def test_equal_lists(self): list_1 = ["a", "b", "c"] list_2 = ["a", "b", "c"] self.assertTrue(equal(list_1, list_2)) def test_equal_lists_with_nan(self): list_1 = ["a", nan, "c"] list_2 = ["a", nan, "c"] self.assertTrue(equal(list_1, list_2)) def test_unsorted_lists(self): list_1 = ["a", "b", "c"] list_2 = ["b", "b", "a"] self.assertFalse(equal(list_1, list_2)) def test_first_list_larger(self): list_1 = ["a", "b", "c"] list_2 = ["a", "b"] self.assertFalse(equal(list_1, list_2)) def test_second_list_larger(self): list_1 = ["a", "b"] list_2 = ["a", "b", "c"] self.assertFalse(equal(list_1, list_2)) def test_list_with_none_unequal(self): list_1 = ["a", "b", None] list_2 = ["a", "b", "c"] self.assertFalse(equal(list_1, list_2)) list_1 = ["a", "b", None] list_2 = [None, "b", "c"] self.assertFalse(equal(list_1, list_2)) def test_list_with_none_equal(self): list_1 = ["a", None, "c"] list_2 = ["a", None, "c"] self.assertTrue(equal(list_1, list_2)) def test_empty_list(self): list_1 = [] list_2 = [] self.assertTrue(equal(list_1, list_2)) def test_one_none(self): list_1 = None list_2 = [] self.assertFalse(equal(list_1, list_2)) def test_same_list(self): list_1 = ["a", "b", "c"] self.assertTrue(equal(list_1, list_1)) def test_equal_nested_lists(self): list_1 = ["a", ["b", "c"], "d"] list_2 = ["a", ["b", "c"], "d"] self.assertTrue(equal(list_1, list_2)) def test_unequal_nested_lists(self): list_1 = ["a", ["b", "c"], "d"] list_2 = ["a", [], "c"] self.assertFalse(equal(list_1, list_2))
TestListEqual
python
huggingface__transformers
src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py
{ "start": 11251, "end": 12223 }
class ____(nn.Module): # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TConformerFeatureProjection.__init__ def __init__(self, config): super().__init__() self.layer_norm = nn.LayerNorm(config.feature_projection_input_dim, eps=config.layer_norm_eps) self.projection = nn.Linear(config.feature_projection_input_dim, config.hidden_size) self.dropout = nn.Dropout(config.speech_encoder_dropout) def forward(self, hidden_states): # non-projected hidden states are needed for quantization norm_hidden_states = self.layer_norm(hidden_states.to(self.layer_norm.weight.dtype)) hidden_states = self.projection(norm_hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states # Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TConformerFeedForward with SeamlessM4T->SeamlessM4Tv2
SeamlessM4Tv2ConformerFeatureProjection
python
ansible__ansible
test/units/cli/test_cli.py
{ "start": 3880, "end": 17591 }
class ____(unittest.TestCase): def setUp(self): self.fake_loader = DictDataLoader({}) self.tty_patcher = patch('ansible.cli.sys.stdin.isatty', return_value=True) self.mock_isatty = self.tty_patcher.start() self.display_v_patcher = patch('ansible.cli.display.verbosity', return_value=6) self.mock_display_v = self.display_v_patcher.start() cli.display.verbosity = 5 def tearDown(self): self.tty_patcher.stop() self.display_v_patcher.stop() cli.display.verbosity = 0 def test(self): res = cli.CLI.setup_vault_secrets(None, None, auto_prompt=False) self.assertIsInstance(res, list) @patch('ansible.cli.get_file_vault_secret') def test_password_file(self, mock_file_secret): filename = '/dev/null/secret' mock_file_secret.return_value = MagicMock(bytes=b'file1_password', vault_id='file1', filename=filename) res = cli.CLI.setup_vault_secrets(loader=self.fake_loader, vault_ids=['secret1@%s' % filename, 'secret2'], vault_password_files=[filename]) self.assertIsInstance(res, list) matches = vault.match_secrets(res, ['secret1']) self.assertIn('secret1', [x[0] for x in matches]) match = matches[0][1] self.assertEqual(match.bytes, b'file1_password') @patch('ansible.cli.PromptVaultSecret') def test_prompt(self, mock_prompt_secret): mock_prompt_secret.return_value = MagicMock(bytes=b'prompt1_password', vault_id='prompt1') res = cli.CLI.setup_vault_secrets(loader=self.fake_loader, vault_ids=['prompt1@prompt'], ask_vault_pass=True, auto_prompt=False) self.assertIsInstance(res, list) matches = vault.match_secrets(res, ['prompt1']) self.assertIn('prompt1', [x[0] for x in matches]) match = matches[0][1] self.assertEqual(match.bytes, b'prompt1_password') @patch('ansible.cli.PromptVaultSecret') def test_prompt_no_tty(self, mock_prompt_secret): self.mock_isatty.return_value = False mock_prompt_secret.return_value = MagicMock(bytes=b'prompt1_password', vault_id='prompt1', name='bytes_should_be_prompt1_password', spec=vault.PromptVaultSecret) res = cli.CLI.setup_vault_secrets(loader=self.fake_loader, vault_ids=['prompt1@prompt'], ask_vault_pass=True, auto_prompt=False) self.assertIsInstance(res, list) self.assertEqual(len(res), 2) matches = vault.match_secrets(res, ['prompt1']) self.assertIn('prompt1', [x[0] for x in matches]) self.assertEqual(len(matches), 1) @patch('ansible.cli.get_file_vault_secret') @patch('ansible.cli.PromptVaultSecret') def test_prompt_no_tty_and_password_file(self, mock_prompt_secret, mock_file_secret): self.mock_isatty.return_value = False mock_prompt_secret.return_value = MagicMock(bytes=b'prompt1_password', vault_id='prompt1') filename = '/dev/null/secret' mock_file_secret.return_value = MagicMock(bytes=b'file1_password', vault_id='file1', filename=filename) res = cli.CLI.setup_vault_secrets(loader=self.fake_loader, vault_ids=['prompt1@prompt', 'file1@/dev/null/secret'], ask_vault_pass=True) self.assertIsInstance(res, list) matches = vault.match_secrets(res, ['file1']) self.assertIn('file1', [x[0] for x in matches]) self.assertNotIn('prompt1', [x[0] for x in matches]) match = matches[0][1] self.assertEqual(match.bytes, b'file1_password') def _assert_ids(self, vault_id_names, res, password=b'prompt1_password'): self.assertIsInstance(res, list) len_ids = len(vault_id_names) matches = vault.match_secrets(res, vault_id_names) self.assertEqual(len(res), len_ids, 'len(res):%s does not match len_ids:%s' % (len(res), len_ids)) self.assertEqual(len(matches), len_ids) for index, prompt in enumerate(vault_id_names): self.assertIn(prompt, [x[0] for x in matches]) # simple mock, same password/prompt for each mock_prompt_secret self.assertEqual(matches[index][1].bytes, password) @patch('ansible.cli.PromptVaultSecret') def test_multiple_prompts(self, mock_prompt_secret): mock_prompt_secret.return_value = MagicMock(bytes=b'prompt1_password', vault_id='prompt1') res = cli.CLI.setup_vault_secrets(loader=self.fake_loader, vault_ids=['prompt1@prompt', 'prompt2@prompt'], ask_vault_pass=False) vault_id_names = ['prompt1', 'prompt2'] self._assert_ids(vault_id_names, res) @patch('ansible.cli.PromptVaultSecret') def test_multiple_prompts_and_ask_vault_pass(self, mock_prompt_secret): self.mock_isatty.return_value = False mock_prompt_secret.return_value = MagicMock(bytes=b'prompt1_password', vault_id='prompt1') res = cli.CLI.setup_vault_secrets(loader=self.fake_loader, vault_ids=['prompt1@prompt', 'prompt2@prompt', 'prompt3@prompt_ask_vault_pass'], ask_vault_pass=True) # We provide some vault-ids and secrets, so auto_prompt shouldn't get triggered, # so there is vault_id_names = ['prompt1', 'prompt2', 'prompt3', 'default'] self._assert_ids(vault_id_names, res) @patch('ansible.cli.C') @patch('ansible.cli.get_file_vault_secret') @patch('ansible.cli.PromptVaultSecret') def test_default_file_vault(self, mock_prompt_secret, mock_file_secret, mock_config): mock_prompt_secret.return_value = MagicMock(bytes=b'prompt1_password', vault_id='default') mock_file_secret.return_value = MagicMock(bytes=b'file1_password', vault_id='default') mock_config.DEFAULT_VAULT_PASSWORD_FILE = '/dev/null/faux/vault_password_file' mock_config.DEFAULT_VAULT_IDENTITY = 'default' res = cli.CLI.setup_vault_secrets(loader=self.fake_loader, vault_ids=[], create_new_password=False, ask_vault_pass=False) self.assertIsInstance(res, list) matches = vault.match_secrets(res, ['default']) # --vault-password-file/DEFAULT_VAULT_PASSWORD_FILE is higher precendce than prompts # if the same vault-id ('default') regardless of cli order since it didn't matter in 2.3 self.assertEqual(matches[0][1].bytes, b'file1_password') self.assertEqual(len(matches), 1) VaultSecretsContext._current = None res = cli.CLI.setup_vault_secrets(loader=self.fake_loader, vault_ids=[], create_new_password=False, ask_vault_pass=True, auto_prompt=True) self.assertIsInstance(res, list) matches = vault.match_secrets(res, ['default']) self.assertEqual(matches[0][1].bytes, b'file1_password') self.assertEqual(matches[1][1].bytes, b'prompt1_password') self.assertEqual(len(matches), 2) @patch('ansible.cli.get_file_vault_secret') @patch('ansible.cli.PromptVaultSecret') def test_default_file_vault_identity_list(self, mock_prompt_secret, mock_file_secret): default_vault_ids = ['some_prompt@prompt', 'some_file@/dev/null/secret'] mock_prompt_secret.return_value = MagicMock(bytes=b'some_prompt_password', vault_id='some_prompt') filename = '/dev/null/secret' mock_file_secret.return_value = MagicMock(bytes=b'some_file_password', vault_id='some_file', filename=filename) vault_ids = default_vault_ids res = cli.CLI.setup_vault_secrets(loader=self.fake_loader, vault_ids=vault_ids, create_new_password=False, ask_vault_pass=True) self.assertIsInstance(res, list) matches = vault.match_secrets(res, ['some_file']) # --vault-password-file/DEFAULT_VAULT_PASSWORD_FILE is higher precendce than prompts # if the same vault-id ('default') regardless of cli order since it didn't matter in 2.3 self.assertEqual(matches[0][1].bytes, b'some_file_password') matches = vault.match_secrets(res, ['some_prompt']) self.assertEqual(matches[0][1].bytes, b'some_prompt_password') @patch('ansible.cli.PromptVaultSecret') def test_prompt_just_ask_vault_pass(self, mock_prompt_secret): mock_prompt_secret.return_value = MagicMock(bytes=b'prompt1_password', vault_id='default') res = cli.CLI.setup_vault_secrets(loader=self.fake_loader, vault_ids=[], create_new_password=False, ask_vault_pass=True) self.assertIsInstance(res, list) match = vault.match_secrets(res, ['default'])[0][1] self.assertEqual(match.bytes, b'prompt1_password') @patch('ansible.cli.PromptVaultSecret') def test_prompt_new_password_ask_vault_pass(self, mock_prompt_secret): mock_prompt_secret.return_value = MagicMock(bytes=b'prompt1_password', vault_id='default') res = cli.CLI.setup_vault_secrets(loader=self.fake_loader, vault_ids=[], create_new_password=True, ask_vault_pass=True) self.assertIsInstance(res, list) match = vault.match_secrets(res, ['default'])[0][1] self.assertEqual(match.bytes, b'prompt1_password') @patch('ansible.cli.PromptVaultSecret') def test_prompt_new_password_vault_id_prompt(self, mock_prompt_secret): mock_prompt_secret.return_value = MagicMock(bytes=b'prompt1_password', vault_id='some_vault_id') res = cli.CLI.setup_vault_secrets(loader=self.fake_loader, vault_ids=['some_vault_id@prompt'], create_new_password=True, ask_vault_pass=False) self.assertIsInstance(res, list) match = vault.match_secrets(res, ['some_vault_id'])[0][1] self.assertEqual(match.bytes, b'prompt1_password') @patch('ansible.cli.PromptVaultSecret') def test_prompt_new_password_vault_id_prompt_ask_vault_pass(self, mock_prompt_secret): mock_prompt_secret.return_value = MagicMock(bytes=b'prompt1_password', vault_id='default') res = cli.CLI.setup_vault_secrets(loader=self.fake_loader, vault_ids=['some_vault_id@prompt_ask_vault_pass'], create_new_password=True, ask_vault_pass=False) self.assertIsInstance(res, list) match = vault.match_secrets(res, ['some_vault_id'])[0][1] self.assertEqual(match.bytes, b'prompt1_password') @patch('ansible.cli.PromptVaultSecret') def test_prompt_new_password_vault_id_prompt_ask_vault_pass_ask_vault_pass(self, mock_prompt_secret): mock_prompt_secret.return_value = MagicMock(bytes=b'prompt1_password', vault_id='default') res = cli.CLI.setup_vault_secrets(loader=self.fake_loader, vault_ids=['some_vault_id@prompt_ask_vault_pass'], create_new_password=True, ask_vault_pass=True) self.assertIsInstance(res, list) match = vault.match_secrets(res, ['some_vault_id'])[0][1] self.assertEqual(match.bytes, b'prompt1_password')
TestCliSetupVaultSecrets
python
pytorch__pytorch
torch/_inductor/template_heuristics/triton.py
{ "start": 95002, "end": 95280 }
class ____(MMTemplateConfigMixin, MTIAConfigHeuristic): """Standard MM template heuristic for MTIA""" @register_template_heuristic(mm_template.uid, "mtia", op_name="addmm") @register_template_heuristic(bmm_template.uid, "mtia", op_name="baddbmm")
MTIAMMTemplateConfigHeuristic
python
Textualize__textual
tests/snapshot_tests/snapshot_apps/data_table_style_order.py
{ "start": 622, "end": 1716 }
class ____(App): """Regression test snapshot app which ensures that styles are layered on top of each other correctly in the DataTable. In this example, the colour of the text in the cells under the cursor should not be red, because the CSS should be applied on top.""" CSS = """ DataTable {margin-bottom: 1;} DataTable > .datatable--cursor { color: $accent; background: $success; text-style: bold italic; } """ def compose(self) -> ComposeResult: priorities: list[ tuple[Literal["css", "renderable"], Literal["css", "renderable"]] ] = [ ("css", "css"), ("css", "renderable"), ("renderable", "renderable"), ("renderable", "css"), ] for foreground, background in priorities: yield Label(f"Foreground is {foreground!r}, background is {background!r}:") table = make_datatable(foreground, background) yield table app = DataTableCursorStyles() if __name__ == "__main__": app.run()
DataTableCursorStyles
python
pandas-dev__pandas
asv_bench/benchmarks/tslibs/normalize.py
{ "start": 344, "end": 1209 }
class ____: params = [ _sizes, _tzs, ] param_names = ["size", "tz"] def setup(self, size, tz): # use an array that will have is_date_array_normalized give True, # so we do not short-circuit early. dti = pd.date_range("2016-01-01", periods=10, tz=tz).repeat(size // 10) self.i8data = dti.asi8 if size == 10**6 and tz is tzlocal_obj: # tzlocal is cumbersomely slow, so skip to keep runtime in check raise NotImplementedError def time_normalize_i8_timestamps(self, size, tz): # 10 i.e. NPY_FR_ns normalize_i8_timestamps(self.i8data, tz, 10) def time_is_date_array_normalized(self, size, tz): # TODO: cases with different levels of short-circuiting # 10 i.e. NPY_FR_ns is_date_array_normalized(self.i8data, tz, 10)
Normalize
python
huggingface__transformers
src/transformers/models/timesfm/modeling_timesfm.py
{ "start": 11651, "end": 12147 }
class ____(PreTrainedModel): config: TimesFmConfig base_model_prefix = "timesfm" _no_split_modules = ["TimesFmDecoderLayer"] main_input_name = "past_values" input_modalities = ("time",) _supports_sdpa = True @torch.no_grad() def _init_weights(self, module): super()._init_weights(module) if isinstance(module, TimesFmAttention): # Initialize scaling parameter init.ones_(module.scaling) @auto_docstring
TimesFmPreTrainedModel
python
tensorflow__tensorflow
tensorflow/python/keras/engine/training_distributed_v1.py
{ "start": 29105, "end": 29780 }
class ____(training_utils_v1.TrainingLoop): """Training loop for distribution strategy with multiple worker.""" def __init__(self, single_worker_loop): self._single_worker_loop = single_worker_loop def fit(self, *args, **kwargs): return _train_with_multi_worker(self._single_worker_loop.fit)( *args, **kwargs) def evaluate(self, *args, **kwargs): return _train_with_multi_worker(self._single_worker_loop.evaluate)( *args, **kwargs) def predict(self, *args, **kwargs): # Currently predict is still using the single worker implementation. return self._single_worker_loop.predict(*args, **kwargs)
DistributionMultiWorkerTrainingLoop
python
airbytehq__airbyte
airbyte-integrations/connectors/source-instagram/unit_tests/integration/test_users.py
{ "start": 1415, "end": 2187 }
class ____(TestCase): @staticmethod def _read(config_: ConfigBuilder, expecting_exception: bool = False) -> EntrypointOutput: return read_output( config_builder=config_, stream_name=_STREAM_NAME, sync_mode=SyncMode.full_refresh, expecting_exception=expecting_exception, ) @HttpMocker() def test_read_records(self, http_mocker: HttpMocker) -> None: http_mocker.get( get_account_request().build(), get_account_response(), ) http_mocker.get( _get_request().build(), _get_response().with_record(_record()).build(), ) output = self._read(config_=config()) assert len(output.records) == 1
TestFullRefresh
python
huggingface__transformers
src/transformers/masking_utils.py
{ "start": 64904, "end": 66448 }
class ____(torch.Tensor): def __new__(cls, data, style=None): # Create a new instance of AttentionMask as a Tensor cls.style = style return torch.Tensor._make_subclass(cls, data, require_grad=False) def __init__(self, data): # You can initialize any additional metadata here if needed pass def to_string(self, grid_size=(20, 40), limit=4): """Returns a string representation of the block mask.""" dense_mask = self *batch_dims, num_rows, num_cols = dense_mask.shape total_vis = [] for idx, batch_idx in enumerate(itertools.product(*[range(i) for i in batch_dims])): if idx == limit: total_vis.append("...") total_vis.append("To print out more, set AttentionMask.to_string(limit=N)") total_vis.append("You can also index (AttentionMask[batch, head]) to choose a specific batch or head") break block_vis = tensor_to_mask_visual(dense_mask[batch_idx], grid_size=grid_size, style=self.style) total_vis.append(block_vis) total_vis.append(f"torch.Tensor(shape={tuple(self.shape)}, dtype={self.dtype})") return "\n".join(total_vis) def __repr__(self): return self.to_string() def __str__(self): return self.to_string() @classmethod def from_tensor(cls, tensor: torch.Tensor, style: Optional[str] = None) -> "AttentionMask": res = cls(tensor) res.style = style return res
AttentionMask
python
spyder-ide__spyder
external-deps/spyder-remote-services/spyder_remote_services/services/files/handlers.py
{ "start": 7235, "end": 7542 }
class ____(BaseFSHandler): @web.authenticated @authorized def post(self): path = self.get_path_argument("path") truncate = (self.get_argument("truncate", "true").lower() == "true") result = self.fs_touch(path, truncate=truncate) self.write_json(result)
TouchHandler
python
mwaskom__seaborn
seaborn/_core/plot.py
{ "start": 7467, "end": 33453 }
class ____: """ An interface for declaratively specifying statistical graphics. Plots are constructed by initializing this class and adding one or more layers, comprising a `Mark` and optional `Stat` or `Move`. Additionally, faceting variables or variable pairings may be defined to divide the space into multiple subplots. The mappings from data values to visual properties can be parametrized using scales, although the plot will try to infer good defaults when scales are not explicitly defined. The constructor accepts a data source (a :class:`pandas.DataFrame` or dictionary with columnar values) and variable assignments. Variables can be passed as keys to the data source or directly as data vectors. If multiple data-containing objects are provided, they will be index-aligned. The data source and variables defined in the constructor will be used for all layers in the plot, unless overridden or disabled when adding a layer. The following variables can be defined in the constructor: {known_properties} The `data`, `x`, and `y` variables can be passed as positional arguments or using keywords. Whether the first positional argument is interpreted as a data source or `x` variable depends on its type. The methods of this class return a copy of the instance; use chaining to build up a plot through multiple calls. Methods can be called in any order. Most methods only add information to the plot spec; no actual processing happens until the plot is shown or saved. It is also possible to compile the plot without rendering it to access the lower-level representation. """ config = PlotConfig() _data: PlotData _layers: list[Layer] _scales: dict[str, Scale] _shares: dict[str, bool | str] _limits: dict[str, tuple[Any, Any]] _labels: dict[str, str | Callable[[str], str]] _theme: dict[str, Any] _facet_spec: FacetSpec _pair_spec: PairSpec _figure_spec: dict[str, Any] _subplot_spec: dict[str, Any] _layout_spec: dict[str, Any] def __init__( self, *args: DataSource | VariableSpec, data: DataSource = None, **variables: VariableSpec, ): if args: data, variables = self._resolve_positionals(args, data, variables) unknown = [x for x in variables if x not in PROPERTIES] if unknown: err = f"Plot() got unexpected keyword argument(s): {', '.join(unknown)}" raise TypeError(err) self._data = PlotData(data, variables) self._layers = [] self._scales = {} self._shares = {} self._limits = {} self._labels = {} self._theme = {} self._facet_spec = {} self._pair_spec = {} self._figure_spec = {} self._subplot_spec = {} self._layout_spec = {} self._target = None def _resolve_positionals( self, args: tuple[DataSource | VariableSpec, ...], data: DataSource, variables: dict[str, VariableSpec], ) -> tuple[DataSource, dict[str, VariableSpec]]: """Handle positional arguments, which may contain data / x / y.""" if len(args) > 3: err = "Plot() accepts no more than 3 positional arguments (data, x, y)." raise TypeError(err) if ( isinstance(args[0], (abc.Mapping, pd.DataFrame)) or hasattr(args[0], "__dataframe__") ): if data is not None: raise TypeError("`data` given by both name and position.") data, args = args[0], args[1:] if len(args) == 2: x, y = args elif len(args) == 1: x, y = *args, None else: x = y = None for name, var in zip("yx", (y, x)): if var is not None: if name in variables: raise TypeError(f"`{name}` given by both name and position.") # Keep coordinates at the front of the variables dict # Cast type because we know this isn't a DataSource at this point variables = {name: cast(VariableSpec, var), **variables} return data, variables def __add__(self, other): if isinstance(other, Mark) or isinstance(other, Stat): raise TypeError("Sorry, this isn't ggplot! Perhaps try Plot.add?") other_type = other.__class__.__name__ raise TypeError(f"Unsupported operand type(s) for +: 'Plot' and '{other_type}") def _repr_png_(self) -> tuple[bytes, dict[str, float]] | None: if Plot.config.display["format"] != "png": return None return self.plot()._repr_png_() def _repr_svg_(self) -> str | None: if Plot.config.display["format"] != "svg": return None return self.plot()._repr_svg_() def _clone(self) -> Plot: """Generate a new object with the same information as the current spec.""" new = Plot() # TODO any way to enforce that data does not get mutated? new._data = self._data new._layers.extend(self._layers) new._scales.update(self._scales) new._shares.update(self._shares) new._limits.update(self._limits) new._labels.update(self._labels) new._theme.update(self._theme) new._facet_spec.update(self._facet_spec) new._pair_spec.update(self._pair_spec) new._figure_spec.update(self._figure_spec) new._subplot_spec.update(self._subplot_spec) new._layout_spec.update(self._layout_spec) new._target = self._target return new def _theme_with_defaults(self) -> dict[str, Any]: theme = self.config.theme.copy() theme.update(self._theme) return theme @property def _variables(self) -> list[str]: variables = ( list(self._data.frame) + list(self._pair_spec.get("variables", [])) + list(self._facet_spec.get("variables", [])) ) for layer in self._layers: variables.extend(v for v in layer["vars"] if v not in variables) # Coerce to str in return to appease mypy; we know these will only # ever be strings but I don't think we can type a DataFrame that way yet return [str(v) for v in variables] def on(self, target: Axes | SubFigure | Figure) -> Plot: """ Provide existing Matplotlib figure or axes for drawing the plot. When using this method, you will also need to explicitly call a method that triggers compilation, such as :meth:`Plot.show` or :meth:`Plot.save`. If you want to postprocess using matplotlib, you'd need to call :meth:`Plot.plot` first to compile the plot without rendering it. Parameters ---------- target : Axes, SubFigure, or Figure Matplotlib object to use. Passing :class:`matplotlib.axes.Axes` will add artists without otherwise modifying the figure. Otherwise, subplots will be created within the space of the given :class:`matplotlib.figure.Figure` or :class:`matplotlib.figure.SubFigure`. Examples -------- .. include:: ../docstrings/objects.Plot.on.rst """ accepted_types: tuple # Allow tuple of various length accepted_types = ( mpl.axes.Axes, mpl.figure.SubFigure, mpl.figure.Figure ) accepted_types_str = ( f"{mpl.axes.Axes}, {mpl.figure.SubFigure}, or {mpl.figure.Figure}" ) if not isinstance(target, accepted_types): err = ( f"The `Plot.on` target must be an instance of {accepted_types_str}. " f"You passed an instance of {target.__class__} instead." ) raise TypeError(err) new = self._clone() new._target = target return new def add( self, mark: Mark, *transforms: Stat | Move, orient: str | None = None, legend: bool = True, label: str | None = None, data: DataSource = None, **variables: VariableSpec, ) -> Plot: """ Specify a layer of the visualization in terms of mark and data transform(s). This is the main method for specifying how the data should be visualized. It can be called multiple times with different arguments to define a plot with multiple layers. Parameters ---------- mark : :class:`Mark` The visual representation of the data to use in this layer. transforms : :class:`Stat` or :class:`Move` Objects representing transforms to be applied before plotting the data. Currently, at most one :class:`Stat` can be used, and it must be passed first. This constraint will be relaxed in the future. orient : "x", "y", "v", or "h" The orientation of the mark, which also affects how transforms are computed. Typically corresponds to the axis that defines groups for aggregation. The "v" (vertical) and "h" (horizontal) options are synonyms for "x" / "y", but may be more intuitive with some marks. When not provided, an orientation will be inferred from characteristics of the data and scales. legend : bool Option to suppress the mark/mappings for this layer from the legend. label : str A label to use for the layer in the legend, independent of any mappings. data : DataFrame or dict Data source to override the global source provided in the constructor. variables : data vectors or identifiers Additional layer-specific variables, including variables that will be passed directly to the transforms without scaling. Examples -------- .. include:: ../docstrings/objects.Plot.add.rst """ if not isinstance(mark, Mark): msg = f"mark must be a Mark instance, not {type(mark)!r}." raise TypeError(msg) # TODO This API for transforms was a late decision, and previously Plot.add # accepted 0 or 1 Stat instances and 0, 1, or a list of Move instances. # It will take some work to refactor the internals so that Stat and Move are # treated identically, and until then well need to "unpack" the transforms # here and enforce limitations on the order / types. stat: Optional[Stat] move: Optional[List[Move]] error = False if not transforms: stat, move = None, None elif isinstance(transforms[0], Stat): stat = transforms[0] move = [m for m in transforms[1:] if isinstance(m, Move)] error = len(move) != len(transforms) - 1 else: stat = None move = [m for m in transforms if isinstance(m, Move)] error = len(move) != len(transforms) if error: msg = " ".join([ "Transforms must have at most one Stat type (in the first position),", "and all others must be a Move type. Given transform type(s):", ", ".join(str(type(t).__name__) for t in transforms) + "." ]) raise TypeError(msg) new = self._clone() new._layers.append({ "mark": mark, "stat": stat, "move": move, # TODO it doesn't work to supply scalars to variables, but it should "vars": variables, "source": data, "legend": legend, "label": label, "orient": {"v": "x", "h": "y"}.get(orient, orient), # type: ignore }) return new def pair( self, x: VariableSpecList = None, y: VariableSpecList = None, wrap: int | None = None, cross: bool = True, ) -> Plot: """ Produce subplots by pairing multiple `x` and/or `y` variables. Parameters ---------- x, y : sequence(s) of data vectors or identifiers Variables that will define the grid of subplots. wrap : int When using only `x` or `y`, "wrap" subplots across a two-dimensional grid with this many columns (when using `x`) or rows (when using `y`). cross : bool When False, zip the `x` and `y` lists such that the first subplot gets the first pair, the second gets the second pair, etc. Otherwise, create a two-dimensional grid from the cartesian product of the lists. Examples -------- .. include:: ../docstrings/objects.Plot.pair.rst """ # TODO Add transpose= arg, which would then draw pair(y=[...]) across rows # This may also be possible by setting `wrap=1`, but is that too unobvious? # TODO PairGrid features not currently implemented: diagonals, corner pair_spec: PairSpec = {} axes = {"x": [] if x is None else x, "y": [] if y is None else y} for axis, arg in axes.items(): if isinstance(arg, (str, int)): err = f"You must pass a sequence of variable keys to `{axis}`" raise TypeError(err) pair_spec["variables"] = {} pair_spec["structure"] = {} for axis in "xy": keys = [] for i, col in enumerate(axes[axis]): key = f"{axis}{i}" keys.append(key) pair_spec["variables"][key] = col if keys: pair_spec["structure"][axis] = keys if not cross and len(axes["x"]) != len(axes["y"]): err = "Lengths of the `x` and `y` lists must match with cross=False" raise ValueError(err) pair_spec["cross"] = cross pair_spec["wrap"] = wrap new = self._clone() new._pair_spec.update(pair_spec) return new def facet( self, col: VariableSpec = None, row: VariableSpec = None, order: OrderSpec | dict[str, OrderSpec] = None, wrap: int | None = None, ) -> Plot: """ Produce subplots with conditional subsets of the data. Parameters ---------- col, row : data vectors or identifiers Variables used to define subsets along the columns and/or rows of the grid. Can be references to the global data source passed in the constructor. order : list of strings, or dict with dimensional keys Define the order of the faceting variables. wrap : int When using only `col` or `row`, wrap subplots across a two-dimensional grid with this many subplots on the faceting dimension. Examples -------- .. include:: ../docstrings/objects.Plot.facet.rst """ variables: dict[str, VariableSpec] = {} if col is not None: variables["col"] = col if row is not None: variables["row"] = row structure = {} if isinstance(order, dict): for dim in ["col", "row"]: dim_order = order.get(dim) if dim_order is not None: structure[dim] = list(dim_order) elif order is not None: if col is not None and row is not None: err = " ".join([ "When faceting on both col= and row=, passing `order` as a list" "is ambiguous. Use a dict with 'col' and/or 'row' keys instead." ]) raise RuntimeError(err) elif col is not None: structure["col"] = list(order) elif row is not None: structure["row"] = list(order) spec: FacetSpec = { "variables": variables, "structure": structure, "wrap": wrap, } new = self._clone() new._facet_spec.update(spec) return new # TODO def twin()? def scale(self, **scales: Scale) -> Plot: """ Specify mappings from data units to visual properties. Keywords correspond to variables defined in the plot, including coordinate variables (`x`, `y`) and semantic variables (`color`, `pointsize`, etc.). A number of "magic" arguments are accepted, including: - The name of a transform (e.g., `"log"`, `"sqrt"`) - The name of a palette (e.g., `"viridis"`, `"muted"`) - A tuple of values, defining the output range (e.g. `(1, 5)`) - A dict, implying a :class:`Nominal` scale (e.g. `{"a": .2, "b": .5}`) - A list of values, implying a :class:`Nominal` scale (e.g. `["b", "r"]`) For more explicit control, pass a scale spec object such as :class:`Continuous` or :class:`Nominal`. Or pass `None` to use an "identity" scale, which treats data values as literally encoding visual properties. Examples -------- .. include:: ../docstrings/objects.Plot.scale.rst """ new = self._clone() new._scales.update(scales) return new def share(self, **shares: bool | str) -> Plot: """ Control sharing of axis limits and ticks across subplots. Keywords correspond to variables defined in the plot, and values can be boolean (to share across all subplots), or one of "row" or "col" (to share more selectively across one dimension of a grid). Behavior for non-coordinate variables is currently undefined. Examples -------- .. include:: ../docstrings/objects.Plot.share.rst """ new = self._clone() new._shares.update(shares) return new def limit(self, **limits: tuple[Any, Any]) -> Plot: """ Control the range of visible data. Keywords correspond to variables defined in the plot, and values are a `(min, max)` tuple (where either can be `None` to leave unset). Limits apply only to the axis; data outside the visible range are still used for any stat transforms and added to the plot. Behavior for non-coordinate variables is currently undefined. Examples -------- .. include:: ../docstrings/objects.Plot.limit.rst """ new = self._clone() new._limits.update(limits) return new def label( self, *, title: str | None = None, legend: str | None = None, **variables: str | Callable[[str], str] ) -> Plot: """ Control the labels and titles for axes, legends, and subplots. Additional keywords correspond to variables defined in the plot. Values can be one of the following types: - string (used literally; pass "" to clear the default label) - function (called on the default label) For coordinate variables, the value sets the axis label. For semantic variables, the value sets the legend title. For faceting variables, `title=` modifies the subplot-specific label, while `col=` and/or `row=` add a label for the faceting variable. When using a single subplot, `title=` sets its title. The `legend=` parameter sets the title for the "layer" legend (i.e., when using `label` in :meth:`Plot.add`). Examples -------- .. include:: ../docstrings/objects.Plot.label.rst """ new = self._clone() if title is not None: new._labels["title"] = title if legend is not None: new._labels["legend"] = legend new._labels.update(variables) return new def layout( self, *, size: tuple[float, float] | Default = default, engine: str | None | Default = default, extent: tuple[float, float, float, float] | Default = default, ) -> Plot: """ Control the figure size and layout. .. note:: Default figure sizes and the API for specifying the figure size are subject to change in future "experimental" releases of the objects API. The default layout engine may also change. Parameters ---------- size : (width, height) Size of the resulting figure, in inches. Size is inclusive of legend when using pyplot, but not otherwise. engine : {{"tight", "constrained", "none"}} Name of method for automatically adjusting the layout to remove overlap. The default depends on whether :meth:`Plot.on` is used. extent : (left, bottom, right, top) Boundaries of the plot layout, in fractions of the figure size. Takes effect through the layout engine; exact results will vary across engines. Note: the extent includes axis decorations when using a layout engine, but it is exclusive of them when `engine="none"`. Examples -------- .. include:: ../docstrings/objects.Plot.layout.rst """ # TODO add an "auto" mode for figsize that roughly scales with the rcParams # figsize (so that works), but expands to prevent subplots from being squished # Also should we have height=, aspect=, exclusive with figsize? Or working # with figsize when only one is defined? new = self._clone() if size is not default: new._figure_spec["figsize"] = size if engine is not default: new._layout_spec["engine"] = engine if extent is not default: new._layout_spec["extent"] = extent return new # TODO def legend (ugh) def theme(self, config: Mapping[str, Any], /) -> Plot: """ Control the appearance of elements in the plot. .. note:: The API for customizing plot appearance is not yet finalized. Currently, the only valid argument is a dict of matplotlib rc parameters. (This dict must be passed as a positional argument.) It is likely that this method will be enhanced in future releases. Matplotlib rc parameters are documented on the following page: https://matplotlib.org/stable/tutorials/introductory/customizing.html Examples -------- .. include:: ../docstrings/objects.Plot.theme.rst """ new = self._clone() rc = mpl.RcParams(config) new._theme.update(rc) return new def save(self, loc, **kwargs) -> Plot: """ Compile the plot and write it to a buffer or file on disk. Parameters ---------- loc : str, path, or buffer Location on disk to save the figure, or a buffer to write into. kwargs Other keyword arguments are passed through to :meth:`matplotlib.figure.Figure.savefig`. """ # TODO expose important keyword arguments in our signature? with theme_context(self._theme_with_defaults()): self._plot().save(loc, **kwargs) return self def show(self, **kwargs) -> None: """ Compile the plot and display it by hooking into pyplot. Calling this method is not necessary to render a plot in notebook context, but it may be in other environments (e.g., in a terminal). After compiling the plot, it calls :func:`matplotlib.pyplot.show` (passing any keyword parameters). Unlike other :class:`Plot` methods, there is no return value. This should be the last method you call when specifying a plot. """ # TODO make pyplot configurable at the class level, and when not using, # import IPython.display and call on self to populate cell output? # Keep an eye on whether matplotlib implements "attaching" an existing # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024 self.plot(pyplot=True).show(**kwargs) def plot(self, pyplot: bool = False) -> Plotter: """ Compile the plot spec and return the Plotter object. """ with theme_context(self._theme_with_defaults()): return self._plot(pyplot) def _plot(self, pyplot: bool = False) -> Plotter: # TODO if we have _target object, pyplot should be determined by whether it # is hooked into the pyplot state machine (how do we check?) plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults()) # Process the variable assignments and initialize the figure common, layers = plotter._extract_data(self) plotter._setup_figure(self, common, layers) # Process the scale spec for coordinate variables and transform their data coord_vars = [v for v in self._variables if re.match(r"^x|y", v)] plotter._setup_scales(self, common, layers, coord_vars) # Apply statistical transform(s) plotter._compute_stats(self, layers) # Process scale spec for semantic variables and coordinates computed by stat plotter._setup_scales(self, common, layers) # TODO Remove these after updating other methods # ---- Maybe have debug= param that attaches these when True? plotter._data = common plotter._layers = layers # Process the data for each layer and add matplotlib artists for layer in layers: plotter._plot_layer(self, layer) # Add various figure decorations plotter._make_legend(self) plotter._finalize_figure(self) return plotter # ---- The plot compilation engine ---------------------------------------------- #
Plot
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/conditional1.py
{ "start": 231, "end": 304 }
class ____: def __bool__(self) -> bool: return True
ReturnsBool
python
ray-project__ray
python/ray/tune/integration/keras.py
{ "start": 359, "end": 582 }
class ____: """Deprecated. Use :class:`ray.train.tensorflow.keras.ReportCheckpointCallback` instead.""" def __new__(cls, *args, **kwargs): raise DeprecationWarning(_DEPRECATION_MESSAGE)
TuneReportCallback
python
scikit-learn__scikit-learn
sklearn/externals/array_api_compat/torch/_info.py
{ "start": 264, "end": 11889 }
class ____: """ Get the array API inspection namespace for PyTorch. The array API inspection namespace defines the following functions: - capabilities() - default_device() - default_dtypes() - dtypes() - devices() See https://data-apis.org/array-api/latest/API_specification/inspection.html for more details. Returns ------- info : ModuleType The array API inspection namespace for PyTorch. Examples -------- >>> info = xp.__array_namespace_info__() >>> info.default_dtypes() {'real floating': numpy.float64, 'complex floating': numpy.complex128, 'integral': numpy.int64, 'indexing': numpy.int64} """ __module__ = 'torch' def capabilities(self): """ Return a dictionary of array API library capabilities. The resulting dictionary has the following keys: - **"boolean indexing"**: boolean indicating whether an array library supports boolean indexing. Always ``True`` for PyTorch. - **"data-dependent shapes"**: boolean indicating whether an array library supports data-dependent output shapes. Always ``True`` for PyTorch. See https://data-apis.org/array-api/latest/API_specification/generated/array_api.info.capabilities.html for more details. See Also -------- __array_namespace_info__.default_device, __array_namespace_info__.default_dtypes, __array_namespace_info__.dtypes, __array_namespace_info__.devices Returns ------- capabilities : dict A dictionary of array API library capabilities. Examples -------- >>> info = xp.__array_namespace_info__() >>> info.capabilities() {'boolean indexing': True, 'data-dependent shapes': True, 'max dimensions': 64} """ return { "boolean indexing": True, "data-dependent shapes": True, "max dimensions": 64, } def default_device(self): """ The default device used for new PyTorch arrays. See Also -------- __array_namespace_info__.capabilities, __array_namespace_info__.default_dtypes, __array_namespace_info__.dtypes, __array_namespace_info__.devices Returns ------- device : Device The default device used for new PyTorch arrays. Examples -------- >>> info = xp.__array_namespace_info__() >>> info.default_device() device(type='cpu') Notes ----- This method returns the static default device when PyTorch is initialized. However, the *current* device used by creation functions (``empty`` etc.) can be changed at runtime. See Also -------- https://github.com/data-apis/array-api/issues/835 """ return torch.device("cpu") def default_dtypes(self, *, device=None): """ The default data types used for new PyTorch arrays. Parameters ---------- device : Device, optional The device to get the default data types for. Unused for PyTorch, as all devices use the same default dtypes. Returns ------- dtypes : dict A dictionary describing the default data types used for new PyTorch arrays. See Also -------- __array_namespace_info__.capabilities, __array_namespace_info__.default_device, __array_namespace_info__.dtypes, __array_namespace_info__.devices Examples -------- >>> info = xp.__array_namespace_info__() >>> info.default_dtypes() {'real floating': torch.float32, 'complex floating': torch.complex64, 'integral': torch.int64, 'indexing': torch.int64} """ # Note: if the default is set to float64, the devices like MPS that # don't support float64 will error. We still return the default_dtype # value here because this error doesn't represent a different default # per-device. default_floating = torch.get_default_dtype() default_complex = torch.complex64 if default_floating == torch.float32 else torch.complex128 default_integral = torch.int64 return { "real floating": default_floating, "complex floating": default_complex, "integral": default_integral, "indexing": default_integral, } def _dtypes(self, kind): bool = torch.bool int8 = torch.int8 int16 = torch.int16 int32 = torch.int32 int64 = torch.int64 uint8 = torch.uint8 # uint16, uint32, and uint64 are present in newer versions of pytorch, # but they aren't generally supported by the array API functions, so # we omit them from this function. float32 = torch.float32 float64 = torch.float64 complex64 = torch.complex64 complex128 = torch.complex128 if kind is None: return { "bool": bool, "int8": int8, "int16": int16, "int32": int32, "int64": int64, "uint8": uint8, "float32": float32, "float64": float64, "complex64": complex64, "complex128": complex128, } if kind == "bool": return {"bool": bool} if kind == "signed integer": return { "int8": int8, "int16": int16, "int32": int32, "int64": int64, } if kind == "unsigned integer": return { "uint8": uint8, } if kind == "integral": return { "int8": int8, "int16": int16, "int32": int32, "int64": int64, "uint8": uint8, } if kind == "real floating": return { "float32": float32, "float64": float64, } if kind == "complex floating": return { "complex64": complex64, "complex128": complex128, } if kind == "numeric": return { "int8": int8, "int16": int16, "int32": int32, "int64": int64, "uint8": uint8, "float32": float32, "float64": float64, "complex64": complex64, "complex128": complex128, } if isinstance(kind, tuple): res = {} for k in kind: res.update(self.dtypes(kind=k)) return res raise ValueError(f"unsupported kind: {kind!r}") @cache def dtypes(self, *, device=None, kind=None): """ The array API data types supported by PyTorch. Note that this function only returns data types that are defined by the array API. Parameters ---------- device : Device, optional The device to get the data types for. Unused for PyTorch, as all devices use the same dtypes. kind : str or tuple of str, optional The kind of data types to return. If ``None``, all data types are returned. If a string, only data types of that kind are returned. If a tuple, a dictionary containing the union of the given kinds is returned. The following kinds are supported: - ``'bool'``: boolean data types (i.e., ``bool``). - ``'signed integer'``: signed integer data types (i.e., ``int8``, ``int16``, ``int32``, ``int64``). - ``'unsigned integer'``: unsigned integer data types (i.e., ``uint8``, ``uint16``, ``uint32``, ``uint64``). - ``'integral'``: integer data types. Shorthand for ``('signed integer', 'unsigned integer')``. - ``'real floating'``: real-valued floating-point data types (i.e., ``float32``, ``float64``). - ``'complex floating'``: complex floating-point data types (i.e., ``complex64``, ``complex128``). - ``'numeric'``: numeric data types. Shorthand for ``('integral', 'real floating', 'complex floating')``. Returns ------- dtypes : dict A dictionary mapping the names of data types to the corresponding PyTorch data types. See Also -------- __array_namespace_info__.capabilities, __array_namespace_info__.default_device, __array_namespace_info__.default_dtypes, __array_namespace_info__.devices Examples -------- >>> info = xp.__array_namespace_info__() >>> info.dtypes(kind='signed integer') {'int8': numpy.int8, 'int16': numpy.int16, 'int32': numpy.int32, 'int64': numpy.int64} """ res = self._dtypes(kind) for k, v in res.copy().items(): try: torch.empty((0,), dtype=v, device=device) except: del res[k] return res @cache def devices(self): """ The devices supported by PyTorch. Returns ------- devices : list[Device] The devices supported by PyTorch. See Also -------- __array_namespace_info__.capabilities, __array_namespace_info__.default_device, __array_namespace_info__.default_dtypes, __array_namespace_info__.dtypes Examples -------- >>> info = xp.__array_namespace_info__() >>> info.devices() [device(type='cpu'), device(type='mps', index=0), device(type='meta')] """ # Torch doesn't have a straightforward way to get the list of all # currently supported devices. To do this, we first parse the error # message of torch.device to get the list of all possible types of # device: try: torch.device('notadevice') raise AssertionError("unreachable") # pragma: nocover except RuntimeError as e: # The error message is something like: # "Expected one of cpu, cuda, ipu, xpu, mkldnn, opengl, opencl, ideep, hip, ve, fpga, ort, xla, lazy, vulkan, mps, meta, hpu, mtia, privateuseone device type at start of device string: notadevice" devices_names = e.args[0].split('Expected one of ')[1].split(' device type')[0].split(', ') # Next we need to check for different indices for different devices. # device(device_name, index=index) doesn't actually check if the # device name or index is valid. We have to try to create a tensor # with it (which is why this function is cached). devices = [] for device_name in devices_names: i = 0 while True: try: a = torch.empty((0,), device=torch.device(device_name, index=i)) if a.device in devices: break devices.append(a.device) except: break i += 1 return devices
__array_namespace_info__
python
fluentpython__example-code-2e
15-more-types/protocol/random/erp.py
{ "start": 87, "end": 326 }
class ____(Generic[T]): def __init__(self, items: Iterable[T]) -> None: self._items: List[T] = list(items) random.shuffle(self._items) def pop_random(self) -> T: return self._items.pop()
EnterpriserRandomPopper
python
bokeh__bokeh
tests/unit/bokeh/embed/test_util__embed.py
{ "start": 20410, "end": 21511 }
class ____: @patch('bokeh.embed.util.standalone_docs_json_and_render_items') def test_delgation(self, mock_sdjari: MagicMock) -> None: p1 = SomeModel() p2 = SomeModel() d = Document() d.add_root(p1) d.add_root(p2) # ignore error unpacking None mock result, just checking to see that # standalone_docs_json_and_render_items is called as expected try: beu.standalone_docs_json([p1, p2]) except ValueError: pass mock_sdjari.assert_called_once_with([p1, p2]) def test_output(self) -> None: p1 = SomeModel() p2 = SomeModel() d = Document() d.add_root(p1) d.add_root(p2) out = beu.standalone_docs_json([p1, p2]) expected = beu.standalone_docs_json_and_render_items([p1, p2])[0] assert list(out.values()) ==list(expected.values()) #----------------------------------------------------------------------------- # Private API #-----------------------------------------------------------------------------
Test_standalone_docs_json
python
scikit-image__scikit-image
src/skimage/_shared/utils.py
{ "start": 21393, "end": 36621 }
class ____: """Decorate a deprecated function and warn when it is called. Adapted from <http://wiki.python.org/moin/PythonDecoratorLibrary>. Parameters ---------- deprecated_version : str The package version when the deprecation was introduced. removed_version : str The package version in which the deprecated function will be removed. hint : str, optional A hint on how to address this deprecation, e.g., "Use `skimage.submodule.alternative_func` instead." stacklevel : {None, int}, optional If None, the decorator attempts to detect the appropriate stacklevel for the deprecation warning automatically. This can fail, e.g., due to decorating a closure, in which case you can set the stacklevel manually here. The outermost decorator should have stacklevel 2, the next inner one stacklevel 3, etc. Examples -------- >>> @deprecate_func( ... deprecated_version="1.0.0", ... removed_version="1.2.0", ... hint="Use `bar` instead." ... ) ... def foo(): ... pass Calling ``foo`` will warn with:: FutureWarning: `foo` is deprecated since version 1.0.0 and will be removed in version 1.2.0. Use `bar` instead. """ def __init__( self, *, deprecated_version, removed_version=None, hint=None, stacklevel=None ): self.deprecated_version = deprecated_version self.removed_version = removed_version self.hint = hint self.stacklevel = stacklevel def __call__(self, func): message = ( f"`{func.__name__}` is deprecated since version {self.deprecated_version}" ) if self.removed_version: message += f" and will be removed in version {self.removed_version}." if self.hint: # Prepend space and make sure it closes with "." message += f" {self.hint.rstrip('.')}." @functools.wraps(func) def wrapped(*args, **kwargs): stacklevel = ( self.stacklevel if self.stacklevel is not None else _warning_stacklevel(func) ) warnings.warn(message, category=FutureWarning, stacklevel=stacklevel) return func(*args, **kwargs) # modify docstring to display deprecation warning doc = f'**Deprecated:** {message}' if wrapped.__doc__ is None: wrapped.__doc__ = doc else: wrapped.__doc__ = doc + '\n\n ' + wrapped.__doc__ return wrapped def _deprecate_estimate(func, class_name=None): """Deprecate ``estimate`` method.""" class_name = func.__qualname__.split('.')[0] if class_name is None else class_name return deprecate_func( deprecated_version="0.26", removed_version="2.2", hint=f"Please use `{class_name}.from_estimate` class constructor instead.", stacklevel=2, )(func) def _deprecate_inherited_estimate(cls): """Deprecate inherited ``estimate`` instance method. This needs a class decorator so we can correctly specify the class of the `from_estimate` class method in the deprecation message. """ def estimate(self, *args, **kwargs): return self._estimate(*args, **kwargs) is None # The inherited method will always be wrapped by deprecator. inherited_meth = getattr(cls, 'estimate').__wrapped__ estimate.__doc__ = inherited_meth.__doc__ estimate.__signature__ = inspect.signature(inherited_meth) cls.estimate = _deprecate_estimate(estimate, cls.__name__) return cls def _update_from_estimate_docstring(cls): """Fix docstring for inherited ``from_estimate`` class method. Even for classes that inherit the `from_estimate` method, and do not override it, we nevertheless need to change the *docstring* of the `from_estimate` method to point the user to the current (inheriting) class, rather than the class in which the method is defined (the inherited class). This needs a class decorator so we can modify the docstring of the new class method. CPython currently does not allow us to modify class method docstrings by updating ``__doc__``. """ inherited_cmeth = getattr(cls, 'from_estimate') def from_estimate(cls, *args, **kwargs): return inherited_cmeth(*args, **kwargs) inherited_class_name = inherited_cmeth.__qualname__.split('.')[-2] from_estimate.__doc__ = inherited_cmeth.__doc__.replace( inherited_class_name, cls.__name__ ) from_estimate.__signature__ = inspect.signature(inherited_cmeth) cls.from_estimate = classmethod(from_estimate) return cls def get_bound_method_class(m): """Return the class for a bound method.""" return m.im_class if sys.version < '3' else m.__self__.__class__ def safe_as_int(val, atol=1e-3): """ Attempt to safely cast values to integer format. Parameters ---------- val : scalar or iterable of scalars Number or container of numbers which are intended to be interpreted as integers, e.g., for indexing purposes, but which may not carry integer type. atol : float Absolute tolerance away from nearest integer to consider values in ``val`` functionally integers. Returns ------- val_int : NumPy scalar or ndarray of dtype `np.int64` Returns the input value(s) coerced to dtype `np.int64` assuming all were within ``atol`` of the nearest integer. Notes ----- This operation calculates ``val`` modulo 1, which returns the mantissa of all values. Then all mantissas greater than 0.5 are subtracted from one. Finally, the absolute tolerance from zero is calculated. If it is less than ``atol`` for all value(s) in ``val``, they are rounded and returned in an integer array. Or, if ``val`` was a scalar, a NumPy scalar type is returned. If any value(s) are outside the specified tolerance, an informative error is raised. Examples -------- >>> safe_as_int(7.0) 7 >>> safe_as_int([9, 4, 2.9999999999]) array([9, 4, 3]) >>> safe_as_int(53.1) Traceback (most recent call last): ... ValueError: Integer argument required but received 53.1, check inputs. >>> safe_as_int(53.01, atol=0.01) 53 """ mod = np.asarray(val) % 1 # Extract mantissa # Check for and subtract any mod values > 0.5 from 1 if mod.ndim == 0: # Scalar input, cannot be indexed if mod > 0.5: mod = 1 - mod else: # Iterable input, now ndarray mod[mod > 0.5] = 1 - mod[mod > 0.5] # Test on each side of nearest int if not np.allclose(mod, 0, atol=atol): raise ValueError(f'Integer argument required but received {val}, check inputs.') return np.round(val).astype(np.int64) def check_shape_equality(*images): """Check that all images have the same shape""" image0 = images[0] if not all(image0.shape == image.shape for image in images[1:]): raise ValueError('Input images must have the same dimensions.') return def slice_at_axis(sl, axis): """ Construct tuple of slices to slice an array in the given dimension. Parameters ---------- sl : slice The slice for the given dimension. axis : int The axis to which `sl` is applied. All other dimensions are left "unsliced". Returns ------- sl : tuple of slices A tuple with slices matching `shape` in length. Examples -------- >>> slice_at_axis(slice(None, 3, -1), 1) (slice(None, None, None), slice(None, 3, -1), Ellipsis) """ return (slice(None),) * axis + (sl,) + (...,) def reshape_nd(arr, ndim, dim): """Reshape a 1D array to have n dimensions, all singletons but one. Parameters ---------- arr : array, shape (N,) Input array ndim : int Number of desired dimensions of reshaped array. dim : int Which dimension/axis will not be singleton-sized. Returns ------- arr_reshaped : array, shape ([1, ...], N, [1,...]) View of `arr` reshaped to the desired shape. Examples -------- >>> rng = np.random.default_rng() >>> arr = rng.random(7) >>> reshape_nd(arr, 2, 0).shape (7, 1) >>> reshape_nd(arr, 3, 1).shape (1, 7, 1) >>> reshape_nd(arr, 4, -1).shape (1, 1, 1, 7) """ if arr.ndim != 1: raise ValueError("arr must be a 1D array") new_shape = [1] * ndim new_shape[dim] = -1 return np.reshape(arr, new_shape) def check_nD(array, ndim, arg_name='image'): """ Verify an array meets the desired ndims and array isn't empty. Parameters ---------- array : array-like Input array to be validated ndim : int or iterable of ints Allowable ndim or ndims for the array. arg_name : str, optional The name of the array in the original function. """ array = np.asanyarray(array) msg_incorrect_dim = "The parameter `%s` must be a %s-dimensional array" msg_empty_array = "The parameter `%s` cannot be an empty array" if isinstance(ndim, int): ndim = [ndim] if array.size == 0: raise ValueError(msg_empty_array % (arg_name)) if array.ndim not in ndim: raise ValueError( msg_incorrect_dim % (arg_name, '-or-'.join([str(n) for n in ndim])) ) def convert_to_float(image, preserve_range): """Convert input image to float image with the appropriate range. Parameters ---------- image : ndarray Input image. preserve_range : bool Determines if the range of the image should be kept or transformed using img_as_float. Also see https://scikit-image.org/docs/dev/user_guide/data_types.html Notes ----- * Input images with `float32` data type are not upcast. Returns ------- image : ndarray Transformed version of the input. """ if image.dtype == np.float16: return image.astype(np.float32) if preserve_range: # Convert image to double only if it is not single or double # precision float if image.dtype.char not in 'df': image = image.astype(float) else: from ..util.dtype import img_as_float image = img_as_float(image) return image def _validate_interpolation_order(image_dtype, order): """Validate and return spline interpolation's order. Parameters ---------- image_dtype : dtype Image dtype. order : {None, int}, optional The order of the spline interpolation. The order has to be in the range 0-5. If ``None`` assume order 0 for Boolean images, otherwise 1. See `skimage.transform.warp` for detail. Returns ------- order : int if input order is None, returns 0 if image_dtype is bool and 1 otherwise. Otherwise, image_dtype is checked and input order is validated accordingly (order > 0 is not supported for bool image dtype) """ if order is None: return 0 if image_dtype == bool else 1 if order < 0 or order > 5: raise ValueError("Spline interpolation order has to be in the range 0-5.") if image_dtype == bool and order != 0: raise ValueError( "Input image dtype is bool. Interpolation is not defined " "with bool data type. Please set order to 0 or explicitly " "cast input image to another data type." ) return order def _to_np_mode(mode): """Convert padding modes from `ndi.correlate` to `np.pad`.""" mode_translation_dict = dict(nearest='edge', reflect='symmetric', mirror='reflect') if mode in mode_translation_dict: mode = mode_translation_dict[mode] return mode def _to_ndimage_mode(mode): """Convert from `numpy.pad` mode name to the corresponding ndimage mode.""" mode_translation_dict = dict( constant='constant', edge='nearest', symmetric='reflect', reflect='mirror', wrap='wrap', ) if mode not in mode_translation_dict: raise ValueError( f"Unknown mode: '{mode}', or cannot translate mode. The " f"mode should be one of 'constant', 'edge', 'symmetric', " f"'reflect', or 'wrap'. See the documentation of numpy.pad for " f"more info." ) return _fix_ndimage_mode(mode_translation_dict[mode]) def _fix_ndimage_mode(mode): # SciPy 1.6.0 introduced grid variants of constant and wrap which # have less surprising behavior for images. Use these when available grid_modes = {'constant': 'grid-constant', 'wrap': 'grid-wrap'} return grid_modes.get(mode, mode) new_float_type = { # preserved types np.float32().dtype.char: np.float32, np.float64().dtype.char: np.float64, np.complex64().dtype.char: np.complex64, np.complex128().dtype.char: np.complex128, # altered types np.float16().dtype.char: np.float32, 'g': np.float64, # np.float128 ; doesn't exist on windows 'G': np.complex128, # np.complex256 ; doesn't exist on windows } def _supported_float_type(input_dtype, allow_complex=False): """Return an appropriate floating-point dtype for a given dtype. float32, float64, complex64, complex128 are preserved. float16 is promoted to float32. complex256 is demoted to complex128. Other types are cast to float64. Parameters ---------- input_dtype : np.dtype or tuple of np.dtype The input dtype. If a tuple of multiple dtypes is provided, each dtype is first converted to a supported floating point type and the final dtype is then determined by applying `np.result_type` on the sequence of supported floating point types. allow_complex : bool, optional If False, raise a ValueError on complex-valued inputs. Returns ------- float_type : dtype Floating-point dtype for the image. """ if isinstance(input_dtype, tuple): return np.result_type(*(_supported_float_type(d) for d in input_dtype)) input_dtype = np.dtype(input_dtype) if not allow_complex and input_dtype.kind == 'c': raise ValueError("complex valued input is not supported") return new_float_type.get(input_dtype.char, np.float64) def identity(image, *args, **kwargs): """Returns the first argument unmodified.""" return image def as_binary_ndarray(array, *, variable_name): """Return `array` as a numpy.ndarray of dtype bool. Raises ------ ValueError: An error including the given `variable_name` if `array` can not be safely cast to a boolean array. """ array = np.asarray(array) if array.dtype != bool: if np.any((array != 1) & (array != 0)): raise ValueError( f"{variable_name} array is not of dtype boolean or " f"contains values other than 0 and 1 so cannot be " f"safely cast to boolean array." ) return np.asarray(array, dtype=bool)
deprecate_func
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/memberAccess4.py
{ "start": 622, "end": 704 }
class ____(Protocol): def must_have(self) -> None: pass
HasItemProtocol2
python
tensorflow__tensorflow
tensorflow/python/compiler/tensorrt/test/reshape_transpose_test.py
{ "start": 3497, "end": 4536 }
class ____(trt_test.TfTrtIntegrationTestBase): def GraphFn(self, inp): # Add a block with compatible transposes. compatible_transpose = array_ops.transpose( inp, [0, 3, 1, 2], name="transpose-1") compatible_transpose = array_ops.transpose( compatible_transpose, [0, 2, 3, 1], name="transposeback") return array_ops.identity(compatible_transpose, name="output_0") def GetParams(self): return self.BuildParams(self.GraphFn, dtypes.float32, [[100, 24, 24, 2]], [[100, 24, 24, 2]]) def ExpectedEnginesToBuild(self, run_params): """Return the expected engines to build.""" return { "TRTEngineOp_000": [ "transpose-1", "transpose-1/perm", "transposeback", "transposeback/perm" ] } def ShouldRunTest(self, run_params): """Whether to run the test.""" return (not trt_test.IsQuantizationMode(run_params.precision_mode) and not run_params.dynamic_engine), "test static engine and non-INT8"
TransposeTest
python
dagster-io__dagster
python_modules/dagster/dagster_tests/execution_tests/pipes_tests/test_threaded_message_reader.py
{ "start": 1129, "end": 6878 }
class ____(PipesThreadedMessageReader): def __init__(self, *, log_readers, path: Optional[str] = None): self.path = path self.file_position = 0 super().__init__(log_readers=log_readers) def on_launched(self, launched_payload: PipesLaunchedData) -> None: if "path" in launched_payload.get("extras", {}): self.path = launched_payload["extras"]["path"] super().on_launched(launched_payload) def messages_are_readable(self, params: PipesParams) -> bool: if self.path is not None: return os.path.exists(self.path) else: return False @contextmanager def get_params(self) -> Iterator[PipesParams]: yield {PipesDefaultMessageWriter.STDIO_KEY: PipesDefaultMessageWriter.STDOUT} def download_messages( # pyright: ignore[reportIncompatibleMethodOverride] self, cursor: Optional[int], params: PipesParams ) -> Optional[tuple[int, str]]: if cursor is None: cursor = 0 assert self.path is not None with open(self.path) as file: file.seek(cursor) chunk = file.read() if chunk: return (file.tell(), chunk) def no_messages_debug_text(self) -> str: return "Attempted to read messages by extracting them from a file." def test_file_log_reader(tmp_path_factory, capsys): logs_dir = tmp_path_factory.mktemp("logs") log_path = os.path.join(logs_dir, "test.log") reader = PipesFileLogReader(path=log_path, target_stream=sys.stdout) is_session_closed = threading.Event() assert not reader.target_is_readable({}), "Should not be able to read without the file existing" with open(log_path, "w") as file: file.write("1\n") assert reader.target_is_readable({}), "Should be able to read after the file was created" reader.start({}, is_session_closed) with open(log_path, "a") as file: file.write("2\n") file.write("3\n") is_session_closed.set() reader.stop() time.sleep(1) assert capsys.readouterr().out == "1\n2\n3\n" def test_file_message_reader(tmp_path_factory, capsys): logs_dir = tmp_path_factory.mktemp("logs") messages_path = os.path.join(logs_dir, "messages.txt") log_path_1 = os.path.join(logs_dir, "test_1.log") log_path_2 = os.path.join(logs_dir, "test_2.log") log_path_3 = os.path.join(logs_dir, "test_3.log") log_reader_1 = PipesFileLogReader( path=log_path_1, target_stream=sys.stdout, ) log_reader_2 = PipesFileLogReader( path=log_path_2, target_stream=sys.stderr, ) # this one is used to test delayed PipesLogReader submission log_reader_3 = PipesFileLogReader( path=log_path_3, target_stream=sys.stderr, ) reader = PipesFileMessageReader( log_readers=[ log_reader_1, log_reader_2, ] ) @dg.asset def my_asset(context: AssetExecutionContext): with dg.open_pipes_session( context=context, message_reader=reader, context_injector=dg.PipesEnvContextInjector() ) as session: assert not reader.messages_are_readable({}) new_params = { "path": messages_path, } session.report_launched({"extras": new_params}) def log_event(message: str): with open(messages_path, "a") as file: file.write(message + "\n") def log_line_1(message: str): with open(log_path_1, "a") as file: file.write(message + "\n") def log_line_2(message: str): with open(log_path_2, "a") as file: file.write(message + "\n") def log_line_3(message: str): with open(log_path_3, "a") as file: file.write(message + "\n") log_line_1("Hello 1") log_event(json.dumps(_make_message(method="opened", params={}))) assert reader.messages_are_readable({}) log_event( json.dumps( _make_message(method="log", params={"message": "Hello!", "level": "INFO"}) ) ) log_event( json.dumps( _make_message( method="report_asset_materialization", params={ "asset_key": "my_asset", "metadata": {"foo": {"raw_value": "bar", "type": "text"}}, "data_version": "alpha", }, ) ) ) log_event(json.dumps(_make_message(method="closed", params={}))) log_line_1("Bye 1") log_line_2("Hello 2") log_line_2("Bye 2") log_line_3("Hello 3") reader.add_log_reader(log_reader_3) log_line_3("Bye 3") return session.get_results() result = dg.materialize([my_asset]) assert result.success mats = result.get_asset_materialization_events() assert len(mats) == 1 mat = mats[0] assert mat.asset_key == dg.AssetKey(["my_asset"]) assert mat.materialization.metadata["foo"].value == "bar" assert mat.materialization.tags[DATA_VERSION_TAG] == "alpha" # pyright: ignore[reportOptionalSubscript] captured = capsys.readouterr() assert "Hello 1" in captured.out assert "Bye 1" in captured.out assert "Hello 2" in captured.err assert "Bye 2" in captured.err assert "Hello 3" in captured.err assert "Bye 3" in captured.err
PipesFileMessageReader
python
conda__conda
conda/models/channel.py
{ "start": 1984, "end": 13682 }
class ____(metaclass=ChannelType): """ Channel: scheme <> auth <> location <> token <> channel <> subchannel <> platform <> package_filename Package Spec: channel <> subchannel <> namespace <> package_name """ _cache_ = {} @staticmethod def _reset_state() -> None: Channel._cache_ = {} def __init__( self, scheme: str | None = None, auth: str | None = None, location: str | None = None, token: str | None = None, name: str | None = None, platform: str | None = None, package_filename: str | None = None, ): self.scheme = scheme self.auth = auth self.location = location self.token = token self.name = name or "" self.platform = platform self.package_filename = package_filename @property def channel_location(self) -> str | None: return self.location @property def channel_name(self) -> str: return self.name @property def subdir(self) -> str | None: return self.platform @staticmethod def from_url(url: str) -> Channel: return parse_conda_channel_url(url) @staticmethod def from_channel_name(channel_name: str) -> Channel: return _get_channel_for_name(channel_name) @staticmethod def from_value(value: str | None) -> Channel: """Construct a new :class:`Channel` from a single value. Args: value: Anyone of the following forms: `None`, or one of the special strings "<unknown>", "None:///<unknown>", or "None": represents the unknown channel, used for packages with unknown origin. A URL including a scheme like ``file://`` or ``https://``: represents a channel URL. A local directory path: represents a local channel; relative paths must start with ``./``. A package file (i.e. the path to a file ending in ``.conda`` or ``.tar.bz2``): represents a channel for a single package A known channel name: represents a known channel, e.g. from the users ``.condarc`` file or the global configuration. Returns: A channel object. """ if value in (None, "<unknown>", "None:///<unknown>", "None"): return Channel(name=UNKNOWN_CHANNEL) value = ensure_text_type(value) if has_scheme(value): if value.startswith("file:"): value = win_path_backout(value) return Channel.from_url(value) elif is_path(value): return Channel.from_url(path_to_url(value)) elif is_package_file(value): if value.startswith("file:"): value = win_path_backout(value) return Channel.from_url(value) else: # at this point assume we don't have a bare (non-scheme) url # e.g. this would be bad: repo.anaconda.com/pkgs/free _stripped, platform = split_platform(context.known_subdirs, value) if _stripped in context.custom_multichannels: return MultiChannel( _stripped, context.custom_multichannels[_stripped], platform ) else: return Channel.from_channel_name(value) @staticmethod def make_simple_channel( channel_alias: Channel, channel_url: str, name: str | None = None ) -> Channel: ca = channel_alias test_url, scheme, auth, token = split_scheme_auth_token(channel_url) if name and scheme: return Channel( scheme=scheme, auth=auth, location=test_url, token=token, name=name.strip("/"), ) if scheme: if ca.location and test_url.startswith(ca.location): location, name = ca.location, test_url.replace(ca.location, "", 1) else: url_parts = urlparse(test_url) location = str(Url(hostname=url_parts.hostname, port=url_parts.port)) name = url_parts.path or "" return Channel( scheme=scheme, auth=auth, location=location, token=token, name=name.strip("/"), ) else: return Channel( scheme=ca.scheme, auth=ca.auth, location=ca.location, token=ca.token, name=name and name.strip("/") or channel_url.strip("/"), ) @property def canonical_name(self) -> str: try: return self.__canonical_name except AttributeError: pass for multiname, channels in context.custom_multichannels.items(): for channel in channels: if self.name == channel.name: cn = self.__canonical_name = multiname return cn for that_name in context.custom_channels: if self.name and tokenized_startswith( self.name.split("/"), that_name.split("/") ): cn = self.__canonical_name = self.name return cn if any( alias.location == self.location for alias in ( context.channel_alias, *context.migrated_channel_aliases, ) ): cn = self.__canonical_name = self.name return cn # fall back to the equivalent of self.base_url # re-defining here because base_url for MultiChannel is None if self.scheme: cn = self.__canonical_name = ( f"{self.scheme}://{join_url(self.location, self.name)}" ) return cn else: cn = self.__canonical_name = join_url(self.location, self.name).lstrip("/") return cn def urls( self, with_credentials: bool = False, subdirs: Iterable[str] | None = None, ) -> list[str]: """Generate URLs for this channel across specified platforms. Args: with_credentials: If True, include authentication credentials (token, auth) in URLs. subdirs: Specific platform subdirs to generate URLs for. If None, uses the channel's platform (if defined) or falls back to `context.subdirs`. If this is explicitly provided, overrides any platform defined in the channel. Examples: >>> channel = Channel("conda-forge") >>> channel.urls() # Uses context.subdirs ['https://conda.anaconda.org/conda-forge/linux-64', 'https://conda.anaconda.org/conda-forge/noarch'] >>> channel = Channel("conda-forge/linux-aarch64") >>> channel.urls() # Uses channel's platform ['https://conda.anaconda.org/conda-forge/linux-aarch64', 'https://conda.anaconda.org/conda-forge/noarch'] >>> channel.urls(subdirs=("osx-64", "noarch")) ['https://conda.anaconda.org/conda-forge/osx-64', 'https://conda.anaconda.org/conda-forge/noarch'] Returns: list[str]: List of URLs for accessing this channel's specified subdirectories. """ if subdirs is not None and not isiterable(subdirs): raise ValueError( f"`subdirs` must be an iterable of strings. Got: {subdirs}." ) if self.canonical_name == UNKNOWN_CHANNEL: return Channel(DEFAULTS_CHANNEL_NAME).urls(with_credentials, subdirs) base = [self.location] if with_credentials and self.token: base.extend(["t", self.token]) base.append(self.name) base = join_url(*base) def _platforms() -> Iterator[str]: # kwargs 'subdir' takes precedence, if passed explicitly if subdirs is not None: yield from subdirs elif self.platform: yield self.platform if self.platform != "noarch": yield "noarch" else: yield from context.subdirs bases = (join_url(base, p) for p in _platforms()) if with_credentials and self.auth: return [f"{self.scheme}://{self.auth}@{b}" for b in bases] else: return [f"{self.scheme}://{b}" for b in bases] def url(self, with_credentials: bool = False) -> str | None: if self.canonical_name == UNKNOWN_CHANNEL: return None base = [self.location] if with_credentials and self.token: base.extend(["t", self.token]) base.append(self.name) if self.platform: base.append(self.platform) if self.package_filename: base.append(self.package_filename) else: first_non_noarch = next( (s for s in context.subdirs if s != "noarch"), "noarch" ) base.append(first_non_noarch) base = join_url(*base) if with_credentials and self.auth: return f"{self.scheme}://{self.auth}@{base}" else: return f"{self.scheme}://{base}" @property def base_url(self) -> str | None: if self.canonical_name == UNKNOWN_CHANNEL: return None return f"{self.scheme}://{join_url(self.location, self.name)}" @property def base_urls(self) -> tuple[str | None, ...]: return (self.base_url,) @property def subdir_url(self) -> str: url = self.url(True) if self.package_filename and url: url = url.rsplit("/", 1)[0] return url @property def channels(self) -> tuple[Channel, ...]: return (self,) def __str__(self) -> str: base = self.base_url or self.name if self.subdir: return join_url(base, self.subdir) else: return base def __repr__(self) -> str: return 'Channel("%s")' % ( join_url(self.name, self.subdir) if self.subdir else self.name ) def __eq__(self, other: Any) -> bool: if isinstance(other, Channel): return ( self.location == other.location and self.name == other.name and self.platform == other.platform ) else: try: _other = Channel(other) return ( self.location == _other.location and self.name == _other.name and self.platform == _other.platform ) except Exception as e: log.debug("%r", e) return False def __hash__(self) -> int: return hash((self.location, self.name, self.platform)) def __nonzero__(self) -> bool: return any((self.location, self.name)) def __bool__(self) -> bool: return self.__nonzero__() def __json__(self) -> dict[str, Any]: return self.__dict__ @property def url_channel_wtf(self) -> tuple[str | None, str]: return self.base_url, self.canonical_name def dump(self) -> dict[str, Any]: return { "scheme": self.scheme, "auth": self.auth, "location": self.location, "token": self.token, "name": self.name, "platform": self.platform, "package_filename": self.package_filename, }
Channel
python
google__jax
jax/_src/scipy/spatial/transform.py
{ "start": 7462, "end": 17634 }
class ____(typing.NamedTuple): """Spherical Linear Interpolation of Rotations. JAX implementation of :class:`scipy.spatial.transform.Slerp`. Examples: Create a Slerp instance from a series of rotations: >>> import math >>> from jax.scipy.spatial.transform import Rotation, Slerp >>> rots = jnp.array([[90, 0, 0], ... [0, 45, 0], ... [0, 0, -30]]) >>> key_rotations = Rotation.from_euler('zxy', rots, degrees=True) >>> key_times = [0, 1, 2] >>> slerp = Slerp.init(key_times, key_rotations) >>> times = [0, 0.5, 1, 1.5, 2] >>> interp_rots = slerp(times) >>> interp_rots.as_euler('zxy') Array([[ 1.5707963e+00, 0.0000000e+00, 0.0000000e+00], [ 8.5309029e-01, 3.8711953e-01, 1.7768645e-01], [-2.3841858e-07, 7.8539824e-01, 0.0000000e+00], [-5.6668043e-02, 3.9213133e-01, -2.8347540e-01], [ 0.0000000e+00, 0.0000000e+00, -5.2359891e-01]], dtype=float32) """ times: Array timedelta: Array rotations: Rotation rotvecs: Array @classmethod def init(cls, times: Array, rotations: Rotation): if not isinstance(rotations, Rotation): raise TypeError("`rotations` must be a `Rotation` instance.") if rotations.single or len(rotations) == 1: raise ValueError("`rotations` must be a sequence of at least 2 rotations.") times = jnp.asarray(times, dtype=rotations.quat.dtype) if times.ndim != 1: raise ValueError("Expected times to be specified in a 1 " "dimensional array, got {} " "dimensions.".format(times.ndim)) if times.shape[0] != len(rotations): raise ValueError("Expected number of rotations to be equal to " "number of timestamps given, got {} rotations " "and {} timestamps.".format(len(rotations), times.shape[0])) timedelta = jnp.diff(times) # if jnp.any(timedelta <= 0): # this causes a concretization error... # raise ValueError("Times must be in strictly increasing order.") new_rotations = Rotation(rotations.as_quat()[:-1]) return cls( times=times, timedelta=timedelta, rotations=new_rotations, rotvecs=(new_rotations.inv() * Rotation(rotations.as_quat()[1:])).as_rotvec()) def __call__(self, times: Array): """Interpolate rotations.""" compute_times = jnp.asarray(times, dtype=self.times.dtype) if compute_times.ndim > 1: raise ValueError("`times` must be at most 1-dimensional.") single_time = compute_times.ndim == 0 compute_times = jnp.atleast_1d(compute_times) ind = jnp.maximum(jnp.searchsorted(self.times, compute_times) - 1, 0) alpha = (compute_times - self.times[ind]) / self.timedelta[ind] result = (self.rotations[ind] * Rotation.from_rotvec(self.rotvecs[ind] * alpha[:, None])) if single_time: return result[0] return result @functools.partial(jnp_vectorize.vectorize, signature='(m,m),(m),()->(m)') def _apply(matrix: Array, vector: Array, inverse: bool) -> Array: return jnp.where(inverse, matrix.T, matrix) @ vector @functools.partial(jnp_vectorize.vectorize, signature='(m)->(n,n)') def _as_matrix(quat: Array) -> Array: x = quat[0] y = quat[1] z = quat[2] w = quat[3] x2 = x * x y2 = y * y z2 = z * z w2 = w * w xy = x * y zw = z * w xz = x * z yw = y * w yz = y * z xw = x * w return jnp.array([[+ x2 - y2 - z2 + w2, 2 * (xy - zw), 2 * (xz + yw)], [2 * (xy + zw), - x2 + y2 - z2 + w2, 2 * (yz - xw)], [2 * (xz - yw), 2 * (yz + xw), - x2 - y2 + z2 + w2]]) @functools.partial(jnp_vectorize.vectorize, signature='(m)->(n)') def _as_mrp(quat: Array) -> Array: sign = jnp.where(quat[3] < 0, -1., 1.) denominator = 1. + sign * quat[3] return sign * quat[:3] / denominator @functools.partial(jnp_vectorize.vectorize, signature='(m),()->(n)') def _as_rotvec(quat: Array, degrees: bool) -> Array: quat = jnp.where(quat[3] < 0, -quat, quat) # w > 0 to ensure 0 <= angle <= pi angle = 2. * jnp.arctan2(_vector_norm(quat[:3]), quat[3]) angle2 = angle * angle small_scale = 2 + angle2 / 12 + 7 * angle2 * angle2 / 2880 large_scale = angle / jnp.sin(angle / 2) scale = jnp.where(angle <= 1e-3, small_scale, large_scale) scale = jnp.where(degrees, jnp.rad2deg(scale), scale) return scale * jnp.array(quat[:3]) @functools.partial(jnp_vectorize.vectorize, signature='(n),(n)->(n)') def _compose_quat(p: Array, q: Array) -> Array: cross = jnp.cross(p[:3], q[:3]) return jnp.array([p[3]*q[0] + q[3]*p[0] + cross[0], p[3]*q[1] + q[3]*p[1] + cross[1], p[3]*q[2] + q[3]*p[2] + cross[2], p[3]*q[3] - p[0]*q[0] - p[1]*q[1] - p[2]*q[2]]) @functools.partial(jnp_vectorize.vectorize, signature='(m),(l),(),()->(n)') def _compute_euler_from_quat(quat: Array, axes: Array, extrinsic: bool, degrees: bool) -> Array: angle_first = jnp.where(extrinsic, 0, 2) angle_third = jnp.where(extrinsic, 2, 0) axes = jnp.where(extrinsic, axes, axes[::-1]) i = axes[0] j = axes[1] k = axes[2] symmetric = i == k k = jnp.where(symmetric, 3 - i - j, k) sign = jnp.array((i - j) * (j - k) * (k - i) // 2, dtype=quat.dtype) eps = 1e-7 a = jnp.where(symmetric, quat[3], quat[3] - quat[j]) b = jnp.where(symmetric, quat[i], quat[i] + quat[k] * sign) c = jnp.where(symmetric, quat[j], quat[j] + quat[3]) d = jnp.where(symmetric, quat[k] * sign, quat[k] * sign - quat[i]) angles = jnp.empty(3, dtype=quat.dtype) angles = angles.at[1].set(2 * jnp.arctan2(jnp.hypot(c, d), jnp.hypot(a, b))) case = jnp.where(jnp.abs(angles[1] - np.pi) <= eps, 2, 0) case = jnp.where(jnp.abs(angles[1]) <= eps, 1, case) half_sum = jnp.arctan2(b, a) half_diff = jnp.arctan2(d, c) angles = angles.at[0].set(jnp.where(case == 1, 2 * half_sum, 2 * half_diff * jnp.where(extrinsic, -1, 1))) # any degenerate case angles = angles.at[angle_first].set(jnp.where(case == 0, half_sum - half_diff, angles[angle_first])) angles = angles.at[angle_third].set(jnp.where(case == 0, half_sum + half_diff, angles[angle_third])) angles = angles.at[angle_third].set(jnp.where(symmetric, angles[angle_third], angles[angle_third] * sign)) angles = angles.at[1].set(jnp.where(symmetric, angles[1], angles[1] - np.pi / 2)) angles = (angles + np.pi) % (2 * np.pi) - np.pi return jnp.where(degrees, jnp.rad2deg(angles), angles) def _elementary_basis_index(axis: str) -> int: if axis == 'x': return 0 elif axis == 'y': return 1 elif axis == 'z': return 2 raise ValueError(f"Expected axis to be from ['x', 'y', 'z'], got {axis}") @functools.partial(jnp_vectorize.vectorize, signature=('(m),(m),(),()->(n)')) def _elementary_quat_compose(angles: Array, axes: Array, intrinsic: bool, degrees: bool) -> Array: angles = jnp.where(degrees, jnp.deg2rad(angles), angles) result = _make_elementary_quat(axes[0], angles[0]) for idx in range(1, len(axes)): quat = _make_elementary_quat(axes[idx], angles[idx]) result = jnp.where(intrinsic, _compose_quat(result, quat), _compose_quat(quat, result)) return result @functools.partial(jnp_vectorize.vectorize, signature=('(m),()->(n)')) def _from_rotvec(rotvec: Array, degrees: bool) -> Array: rotvec = jnp.where(degrees, jnp.deg2rad(rotvec), rotvec) angle = _vector_norm(rotvec) angle2 = angle * angle small_scale = scale = 0.5 - angle2 / 48 + angle2 * angle2 / 3840 large_scale = jnp.sin(angle / 2) / angle scale = jnp.where(angle <= 1e-3, small_scale, large_scale) return jnp.hstack([scale * rotvec, jnp.cos(angle / 2)]) @functools.partial(jnp_vectorize.vectorize, signature=('(m,m)->(n)')) def _from_matrix(matrix: Array) -> Array: matrix_trace = matrix[0, 0] + matrix[1, 1] + matrix[2, 2] decision = jnp.array([matrix[0, 0], matrix[1, 1], matrix[2, 2], matrix_trace], dtype=matrix.dtype) choice = jnp.argmax(decision) i = choice j = (i + 1) % 3 k = (j + 1) % 3 quat_012 = jnp.empty(4, dtype=matrix.dtype) quat_012 = quat_012.at[i].set(1 - decision[3] + 2 * matrix[i, i]) quat_012 = quat_012.at[j].set(matrix[j, i] + matrix[i, j]) quat_012 = quat_012.at[k].set(matrix[k, i] + matrix[i, k]) quat_012 = quat_012.at[3].set(matrix[k, j] - matrix[j, k]) quat_3 = jnp.empty(4, dtype=matrix.dtype) quat_3 = quat_3.at[0].set(matrix[2, 1] - matrix[1, 2]) quat_3 = quat_3.at[1].set(matrix[0, 2] - matrix[2, 0]) quat_3 = quat_3.at[2].set(matrix[1, 0] - matrix[0, 1]) quat_3 = quat_3.at[3].set(1 + decision[3]) quat = jnp.where(choice != 3, quat_012, quat_3) return _normalize_quaternion(quat) @functools.partial(jnp_vectorize.vectorize, signature='(m)->(n)') def _from_mrp(mrp: Array) -> Array: mrp_squared_plus_1 = jnp.dot(mrp, mrp) + 1 return jnp.hstack([2 * mrp[:3], (2 - mrp_squared_plus_1)]) / mrp_squared_plus_1 @functools.partial(jnp_vectorize.vectorize, signature='(n)->(n)') def _inv(quat: Array) -> Array: return quat * jnp.array([-1, -1, -1, 1], dtype=quat.dtype) @functools.partial(jnp_vectorize.vectorize, signature='(n)->()') def _magnitude(quat: Array) -> Array: return 2. * jnp.arctan2(_vector_norm(quat[:3]), jnp.abs(quat[3])) @functools.partial(jnp_vectorize.vectorize, signature='(),()->(n)') def _make_elementary_quat(axis: int, angle: Array) -> Array: quat = jnp.zeros(4, dtype=angle.dtype) quat = quat.at[3].set(jnp.cos(angle / 2.)) quat = quat.at[axis].set(jnp.sin(angle / 2.)) return quat @functools.partial(jnp_vectorize.vectorize, signature='(n)->(n)') def _normalize_quaternion(quat: Array) -> Array: return quat / _vector_norm(quat) @functools.partial(jnp_vectorize.vectorize, signature='(n)->()') def _vector_norm(vector: Array) -> Array: return jnp.sqrt(jnp.dot(vector, vector)) @functools.partial(jnp_vectorize.vectorize, signature='(n)->(n)') def _make_canonical(quat: Array) -> Array: is_neg = quat < 0 is_zero = quat == 0 neg = ( is_neg[3] | (is_zero[3] & is_neg[0]) | (is_zero[3] & is_zero[0] & is_neg[1]) | (is_zero[3] & is_zero[0] & is_zero[1] & is_neg[2]) ) return jnp.where(neg, -quat, quat)
Slerp
python
walkccc__LeetCode
solutions/148. Sort List/148.py
{ "start": 0, "end": 1063 }
class ____: def sortList(self, head: ListNode) -> ListNode: def split(head: ListNode, k: int) -> ListNode: while k > 1 and head: head = head.next k -= 1 rest = head.next if head else None if head: head.next = None return rest def merge(l1: ListNode, l2: ListNode) -> tuple: dummy = ListNode(0) tail = dummy while l1 and l2: if l1.val > l2.val: l1, l2 = l2, l1 tail.next = l1 l1 = l1.next tail = tail.next tail.next = l1 if l1 else l2 while tail.next: tail = tail.next return dummy.next, tail length = 0 curr = head while curr: length += 1 curr = curr.next dummy = ListNode(0, head) k = 1 while k < length: curr = dummy.next tail = dummy while curr: l = curr r = split(l, k) curr = split(r, k) mergedHead, mergedTail = merge(l, r) tail.next = mergedHead tail = mergedTail k *= 2 return dummy.next
Solution
python
django__django
django/contrib/gis/admin/options.py
{ "start": 134, "end": 627 }
class ____: gis_widget = OSMWidget gis_widget_kwargs = {} def formfield_for_dbfield(self, db_field, request, **kwargs): if isinstance(db_field, models.GeometryField) and ( db_field.dim < 3 or self.gis_widget.supports_3d ): kwargs["widget"] = self.gis_widget(**self.gis_widget_kwargs) return db_field.formfield(**kwargs) else: return super().formfield_for_dbfield(db_field, request, **kwargs)
GeoModelAdminMixin
python
PyCQA__pylint
tests/functional/e/enum_subclasses.py
{ "start": 1235, "end": 1544 }
class ____(TestBase): """Tests the false positive for enums.""" a = auto() b = auto() test_enum = TestEnum.a assert test_enum.hello_pylint() == test_enum.name # Check combinations of Flag members using the bitwise operators (&, |, ^, ~) # https://github.com/pylint-dev/pylint/issues/7381
TestEnum
python
PrefectHQ__prefect
src/prefect/serializers.py
{ "start": 7831, "end": 9225 }
class ____(Serializer[D]): """ Wraps another serializer, compressing its output. Uses `lzma` by default. See `compressionlib` for using alternative libraries. Attributes: serializer: The serializer to use before compression. compressionlib: The import path of a compression module to use. Must have methods `compress(bytes) -> bytes` and `decompress(bytes) -> bytes`. level: If not null, the level of compression to pass to `compress`. """ type: str = Field(default="compressed", frozen=True) serializer: Serializer[D] compressionlib: str = "lzma" @field_validator("serializer", mode="before") def validate_serializer(cls, value: Union[str, Serializer[D]]) -> Serializer[D]: return cast_type_names_to_serializers(value) @field_validator("compressionlib") def check_compressionlib(cls, value: str) -> str: return validate_compressionlib(value) def dumps(self, obj: D) -> bytes: blob = self.serializer.dumps(obj) compressor = from_qualified_name(self.compressionlib) return base64.encodebytes(compressor.compress(blob)) def loads(self, blob: bytes) -> D: compressor = from_qualified_name(self.compressionlib) uncompressed = compressor.decompress(base64.decodebytes(blob)) return self.serializer.loads(uncompressed)
CompressedSerializer
python
PyCQA__pylint
tests/functional/i/invalid/invalid_hash_returned.py
{ "start": 826, "end": 956 }
class ____: """ __hash__ returns a float""" def __hash__(self): # [invalid-hash-returned] return 1.11
ThirdBadHash
python
great-expectations__great_expectations
contrib/experimental/great_expectations_experimental/expectations/expect_column_values_to_be_valid_crc32.py
{ "start": 1566, "end": 3845 }
class ____(ColumnMapExpectation): """Expect column values to be hashes that match valid CRC32 format.""" # These examples will be shown in the public gallery. # They will also be executed as unit tests for your Expectation. examples = [ { "data": { "well_formed_crc32": [ "35a4c0a4", "1e51313b", # crc32 hash of "great_expectations" "5c2c3127", "178b56a5", "871cb9cf", ], "malformed_crc32": [ "", "ab12", "1e51313ba", "1e51313x", "This is not valid crc32", ], }, "tests": [ { "title": "basic_positive_test", "exact_match_out": False, "include_in_gallery": True, "in": {"column": "well_formed_crc32"}, "out": {"success": True}, }, { "title": "basic_negative_test", "exact_match_out": False, "include_in_gallery": True, "in": {"column": "malformed_crc32"}, "out": {"success": False}, }, ], } ] # This is the id string of the Metric used by this Expectation. # For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above. map_metric = "column_values.valid_crc32" # This is a list of parameter names that can affect whether the Expectation evaluates to True or False success_keys = ("mostly",) # This dictionary contains default values for any parameters that should have default values default_kwarg_values = {} # This object contains metadata for display in the public Gallery library_metadata = { "maturity": "experimental", "tags": ["experimental", "typed-entities"], "contributors": [ "@sp1thas", ], } if __name__ == "__main__": ExpectColumnValuesToBeValidCrc32().print_diagnostic_checklist()
ExpectColumnValuesToBeValidCrc32
python
walkccc__LeetCode
solutions/1882. Process Tasks Using Servers/1882.py
{ "start": 0, "end": 842 }
class ____: def assignTasks(self, servers: list[int], tasks: list[int]) -> list[int]: ans = [] free = [] # (weight, index, freeTime) used = [] # (freeTime, weight, index) for i, weight in enumerate(servers): heapq.heappush(free, (weight, i, 0)) for i, executionTime in enumerate(tasks): # i := the current time # Poll all servers that'll be free at time i. while used and used[0][0] <= i: curr = heapq.heappop(used) heapq.heappush(free, (curr[1], curr[2], curr[0])) if free: curr = heapq.heappop(free) ans.append(curr[1]) heapq.heappush(used, (i + executionTime, curr[0], curr[1])) else: curr = heapq.heappop(used) ans.append(curr[2]) heapq.heappush(used, (curr[0] + executionTime, curr[1], curr[2])) return ans
Solution
python
geekcomputers__Python
BlackJack_game/blackjack_rr.py
{ "start": 695, "end": 865 }
class ____: def __init__(self, suit, rank): self.suit = suit self.rank = rank def __str__(self): return self.rank + " of " + self.suit
Card
python
tensorflow__tensorflow
tensorflow/python/ops/sparse_ops.py
{ "start": 36689, "end": 141691 }
class ____: def __repr__(self): # This is needed to make documentation without fully qualified module paths return "KeywordRequired()" @tf_export(v1=["sparse.split", "sparse_split"]) @deprecation.deprecated_endpoints("sparse_split") @deprecation.deprecated_args( None, "split_dim is deprecated, use axis instead", "split_dim") def sparse_split(keyword_required=KeywordRequired(), sp_input=None, num_split=None, axis=None, name=None, split_dim=None): """Split a `SparseTensor` into `num_split` tensors along `axis`. If the `sp_input.dense_shape[axis]` is not an integer multiple of `num_split` each slice starting from 0:`shape[axis] % num_split` gets extra one dimension. For example, if `axis = 1` and `num_split = 2` and the input is: input_tensor = shape = [2, 7] [ a d e ] [b c ] Graphically the output tensors are: output_tensor[0] = [ a ] [b c ] output_tensor[1] = [ d e ] [ ] Args: keyword_required: Python 2 standin for * (temporary for argument reorder) sp_input: The `SparseTensor` to split. num_split: A Python integer. The number of ways to split. axis: A 0-D `int32` `Tensor`. The dimension along which to split. Must be in range [-rank, rank), where rank is the number of dimensions in the input `SparseTensor`. name: A name for the operation (optional). split_dim: Deprecated old name for axis. Returns: `num_split` `SparseTensor` objects resulting from splitting `value`. Raises: TypeError: If `sp_input` is not a `SparseTensor`. ValueError: If the deprecated `split_dim` and `axis` are both non None. """ if not isinstance(keyword_required, KeywordRequired): raise ValueError("Keyword arguments are required for this function.") if sp_input is None: raise ValueError("sp_input is required") if num_split is None: raise ValueError("num_split is required") if axis is None: raise ValueError("axis is required") axis = deprecation.deprecated_argument_lookup("axis", axis, "split_dim", split_dim) sp_input = _convert_to_sparse_tensor(sp_input) output_inds, output_vals, output_shapes = ( gen_sparse_ops.sparse_split( axis, sp_input.indices, sp_input.values, sp_input.dense_shape, num_split, name=name)) sparse_tensors = [] for i in range(0, num_split): sparse_tensors.append( sparse_tensor.SparseTensor(output_inds[i], output_vals[i], output_shapes[i])) return sparse_tensors @tf_export("sparse.split", v1=[]) def sparse_split_v2(sp_input=None, num_split=None, axis=None, name=None): """Split a `SparseTensor` into `num_split` tensors along `axis`. If the `sp_input.dense_shape[axis]` is not an integer multiple of `num_split` each slice starting from 0:`shape[axis] % num_split` gets extra one dimension. For example: >>> indices = [[0, 2], [0, 4], [0, 5], [1, 0], [1, 1]] >>> values = [1, 2, 3, 4, 5] >>> t = tf.sparse.SparseTensor(indices=indices, values=values, ... dense_shape=[2, 7]) >>> tf.sparse.to_dense(t) <tf.Tensor: shape=(2, 7), dtype=int32, numpy= array([[0, 0, 1, 0, 2, 3, 0], [4, 5, 0, 0, 0, 0, 0]], dtype=int32)> >>> output = tf.sparse.split(sp_input=t, num_split=2, axis=1) >>> tf.sparse.to_dense(output[0]) <tf.Tensor: shape=(2, 4), dtype=int32, numpy= array([[0, 0, 1, 0], [4, 5, 0, 0]], dtype=int32)> >>> tf.sparse.to_dense(output[1]) <tf.Tensor: shape=(2, 3), dtype=int32, numpy= array([[2, 3, 0], [0, 0, 0]], dtype=int32)> >>> output = tf.sparse.split(sp_input=t, num_split=2, axis=0) >>> tf.sparse.to_dense(output[0]) <tf.Tensor: shape=(1, 7), dtype=int32, numpy=array([[0, 0, 1, 0, 2, 3, 0]], dtype=int32)> >>> tf.sparse.to_dense(output[1]) <tf.Tensor: shape=(1, 7), dtype=int32, numpy=array([[4, 5, 0, 0, 0, 0, 0]], dtype=int32)> >>> output = tf.sparse.split(sp_input=t, num_split=2, axis=-1) >>> tf.sparse.to_dense(output[0]) <tf.Tensor: shape=(2, 4), dtype=int32, numpy= array([[0, 0, 1, 0], [4, 5, 0, 0]], dtype=int32)> >>> tf.sparse.to_dense(output[1]) <tf.Tensor: shape=(2, 3), dtype=int32, numpy= array([[2, 3, 0], [0, 0, 0]], dtype=int32)> Args: sp_input: The `SparseTensor` to split. num_split: A Python integer. The number of ways to split. axis: A 0-D `int32` `Tensor`. The dimension along which to split. Must be in range [-rank, rank), where rank is the number of dimensions in the input `SparseTensor`. name: A name for the operation (optional). Returns: `num_split` `SparseTensor` objects resulting from splitting `value`. Raises: TypeError: If `sp_input` is not a `SparseTensor`. """ return sparse_split(sp_input=sp_input, num_split=num_split, axis=axis, name=name, split_dim=None) @tf_export("sparse.slice", v1=["sparse.slice", "sparse_slice"]) @deprecation.deprecated_endpoints("sparse_slice") def sparse_slice(sp_input, start, size, name=None): """Slice a `SparseTensor` based on the `start` and `size`. For example, if the input is input_tensor = shape = [2, 7] [ a d e ] [b c ] Graphically the output tensors are: sparse.slice([0, 0], [2, 4]) = shape = [2, 4] [ a ] [b c ] sparse.slice([0, 4], [2, 3]) = shape = [2, 3] [ d e ] [ ] Args: sp_input: The `SparseTensor` to split. start: 1-D. tensor represents the start of the slice. size: 1-D. tensor represents the size of the slice. name: A name for the operation (optional). Returns: A `SparseTensor` objects resulting from splicing. Raises: TypeError: If `sp_input` is not a `SparseTensor`. """ sp_input = _convert_to_sparse_tensor(sp_input) start = ops.convert_to_tensor(start, dtypes.int64) size = ops.convert_to_tensor(size, dtypes.int64) with ops.name_scope(name, "SparseSlice", [sp_input]) as name: output_indices, output_values, output_shape = gen_sparse_ops.sparse_slice( sp_input.indices, sp_input.values, sp_input.dense_shape, start, size, name=name) return sparse_tensor.SparseTensor(output_indices, output_values, output_shape) @tf_export(v1=["sparse_to_dense"]) @dispatch.add_dispatch_support @deprecation.deprecated( None, "Create a `tf.sparse.SparseTensor` and use `tf.sparse.to_dense` instead.") def sparse_to_dense(sparse_indices, output_shape, sparse_values, default_value=0, validate_indices=True, name=None): """Converts a sparse representation into a dense tensor. Builds an array `dense` with shape `output_shape` such that ```python # If sparse_indices is scalar dense[i] = (i == sparse_indices ? sparse_values : default_value) # If sparse_indices is a vector, then for each i dense[sparse_indices[i]] = sparse_values[i] # If sparse_indices is an n by d matrix, then for each i in [0, n) dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i] ``` All other values in `dense` are set to `default_value`. If `sparse_values` is a scalar, all sparse indices are set to this single value. Indices should be sorted in lexicographic order, and indices must not contain any repeats. If `validate_indices` is True, these properties are checked during execution. Args: sparse_indices: A 0-D, 1-D, or 2-D `Tensor` of type `int32` or `int64`. `sparse_indices[i]` contains the complete index where `sparse_values[i]` will be placed. output_shape: A 1-D `Tensor` of the same type as `sparse_indices`. Shape of the dense output tensor. sparse_values: A 0-D or 1-D `Tensor`. Values corresponding to each row of `sparse_indices`, or a scalar value to be used for all sparse indices. default_value: A 0-D `Tensor` of the same type as `sparse_values`. Value to set for indices not specified in `sparse_indices`. Defaults to zero. validate_indices: A boolean value. If True, indices are checked to make sure they are sorted in lexicographic order and that there are no repeats. name: A name for the operation (optional). Returns: Dense `Tensor` of shape `output_shape`. Has the same type as `sparse_values`. """ return gen_sparse_ops.sparse_to_dense( sparse_indices, output_shape, sparse_values, default_value=default_value, validate_indices=validate_indices, name=name) @tf_export("sparse.reduce_max", v1=[]) def sparse_reduce_max_v2( sp_input, axis=None, keepdims=None, output_is_sparse=False, name=None): """Computes `tf.sparse.maximum` of elements across dimensions of a SparseTensor. This is the reduction operation for the elementwise `tf.sparse.maximum` op. This Op takes a SparseTensor and is the sparse counterpart to `tf.reduce_max()`. In particular, this Op also returns a dense `Tensor` if `output_is_sparse` is `False`, or a `SparseTensor` if `output_is_sparse` is `True`. Note: A gradient is not defined for this function, so it can't be used in training models that need gradient descent. Reduces `sp_input` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each entry in `axis`. If `keepdims` is true, the reduced dimensions are retained with length 1. If `axis` has no entries, all dimensions are reduced, and a tensor with a single element is returned. Additionally, the axes can be negative, similar to the indexing rules in Python. The values not defined in `sp_input` don't participate in the reduce max, as opposed to be implicitly assumed 0 -- hence it can return negative values for sparse `axis`. But, in case there are no values in `axis`, it will reduce to 0. See second example below. For example: # 'x' represents [[1, ?, 2] # [?, 3, ?]] # where ? is implicitly-zero. >>> x = tf.sparse.SparseTensor([[0, 0], [0, 2], [1, 1]], [1, 2, 3], [2, 3]) >>> tf.sparse.reduce_max(x) <tf.Tensor: shape=(), dtype=int32, numpy=3> >>> tf.sparse.reduce_max(x, 0) <tf.Tensor: shape=(3,), dtype=int32, numpy=array([1, 3, 2], dtype=int32)> >>> tf.sparse.reduce_max(x, 1) <tf.Tensor: shape=(2,), dtype=int32, numpy=array([2, 3], dtype=int32)> >>> tf.sparse.reduce_max(x, 1, keepdims=True) <tf.Tensor: shape=(2, 1), dtype=int32, numpy= array([[2], [3]], dtype=int32)> >>> tf.sparse.reduce_max(x, [0, 1]) <tf.Tensor: shape=(), dtype=int32, numpy=3> # 'y' represents [[-7, ?] # [ 4, 3] # [ ?, ?] >>> y = tf.sparse.SparseTensor([[0, 0,], [1, 0], [1, 1]], [-7, 4, 3], ... [3, 2]) >>> tf.sparse.reduce_max(y, 1) <tf.Tensor: shape=(3,), dtype=int32, numpy=array([-7, 4, 0], dtype=int32)> Args: sp_input: The SparseTensor to reduce. Should have numeric type. axis: The dimensions to reduce; list or scalar. If `None` (the default), reduces all dimensions. keepdims: If true, retain reduced dimensions with length 1. output_is_sparse: If true, returns a `SparseTensor` instead of a dense `Tensor` (the default). name: A name for the operation (optional). Returns: The reduced Tensor or the reduced SparseTensor if `output_is_sparse` is True. """ if keepdims is None: keepdims = False if output_is_sparse: output_ind, output_val, output_shape = ( gen_sparse_ops.sparse_reduce_max_sparse( sp_input.indices, sp_input.values, sp_input.dense_shape, math_ops._ReductionDims(sp_input, axis), keepdims, name=name)) return sparse_tensor.SparseTensor(output_ind, output_val, output_shape) return gen_sparse_ops.sparse_reduce_max( sp_input.indices, sp_input.values, sp_input.dense_shape, math_ops._ReductionDims(sp_input, axis), keepdims, name=name) @tf_export(v1=["sparse.reduce_max", "sparse_reduce_max"]) @deprecation.deprecated_endpoints("sparse_reduce_max") @deprecation.deprecated_args( None, "keep_dims is deprecated, use keepdims instead", "keep_dims") @deprecation.deprecated_args( None, "reduction_axes is deprecated, use axis instead", "reduction_axes") def sparse_reduce_max(sp_input, axis=None, keepdims=None, reduction_axes=None, keep_dims=None): """Computes `tf.sparse.maximum` of elements across dimensions of a SparseTensor. This is the reduction operation for the elementwise `tf.sparse.maximum` op. This Op takes a SparseTensor and is the sparse counterpart to `tf.reduce_max()`. In particular, this Op also returns a dense `Tensor` instead of a sparse one. Note: A gradient is not defined for this function, so it can't be used in training models that need gradient descent. Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each entry in `reduction_axes`. If `keepdims` is true, the reduced dimensions are retained with length 1. If `reduction_axes` has no entries, all dimensions are reduced, and a tensor with a single element is returned. Additionally, the axes can be negative, similar to the indexing rules in Python. The values not defined in `sp_input` don't participate in the reduce max, as opposed to be implicitly assumed 0 -- hence it can return negative values for sparse `reduction_axes`. But, in case there are no values in `reduction_axes`, it will reduce to 0. See second example below. For example: # 'x' represents [[1, ?, 2] # [?, 3, ?]] # where ? is implicitly-zero. >>> x = tf.sparse.SparseTensor([[0, 0], [0, 2], [1, 1]], [1, 2, 3], [2, 3]) >>> tf.sparse.reduce_max(x) <tf.Tensor: shape=(), dtype=int32, numpy=3> >>> tf.sparse.reduce_max(x, 0) <tf.Tensor: shape=(3,), dtype=int32, numpy=array([1, 3, 2], dtype=int32)> >>> tf.sparse.reduce_max(x, 1) <tf.Tensor: shape=(2,), dtype=int32, numpy=array([2, 3], dtype=int32)> >>> tf.sparse.reduce_max(x, 1, keepdims=True) <tf.Tensor: shape=(2, 1), dtype=int32, numpy= array([[2], [3]], dtype=int32)> >>> tf.sparse.reduce_max(x, [0, 1]) <tf.Tensor: shape=(), dtype=int32, numpy=3> # 'y' represents [[-7, ?] # [ 4, 3] # [ ?, ?] >>> y = tf.sparse.SparseTensor([[0, 0,], [1, 0], [1, 1]], [-7, 4, 3], ... [3, 2]) >>> tf.sparse.reduce_max(y, 1) <tf.Tensor: shape=(3,), dtype=int32, numpy=array([-7, 4, 0], dtype=int32)> Args: sp_input: The SparseTensor to reduce. Should have numeric type. axis: The dimensions to reduce; list or scalar. If `None` (the default), reduces all dimensions. keepdims: If true, retain reduced dimensions with length 1. reduction_axes: Deprecated name of `axis`. keep_dims: Deprecated alias for `keepdims`. Returns: The reduced Tensor. """ keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims, "keep_dims", keep_dims) axis = deprecation.deprecated_argument_lookup("axis", axis, "reduction_axes", reduction_axes) if keepdims is None: keepdims = False return gen_sparse_ops.sparse_reduce_max( sp_input.indices, sp_input.values, sp_input.dense_shape, math_ops._ReductionDims(sp_input, axis), keepdims) @tf_export(v1=["sparse.reduce_max_sparse", "sparse_reduce_max_sparse"]) @deprecation.deprecated_endpoints("sparse_reduce_max_sparse") @deprecation.deprecated_args( None, "keep_dims is deprecated, use keepdims instead", "keep_dims") def sparse_reduce_max_sparse(sp_input, axis=None, keepdims=None, reduction_axes=None, keep_dims=None): """Computes the max of elements across dimensions of a SparseTensor. This Op takes a SparseTensor and is the sparse counterpart to `tf.reduce_max()`. In contrast to SparseReduceSum, this Op returns a SparseTensor. Note: A gradient is not defined for this function, so it can't be used in training models that need gradient descent. Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each entry in `reduction_axes`. If `keepdims` is true, the reduced dimensions are retained with length 1. If `reduction_axes` has no entries, all dimensions are reduced, and a tensor with a single element is returned. Additionally, the axes can be negative, which are interpreted according to the indexing rules in Python. Args: sp_input: The SparseTensor to reduce. Should have numeric type. axis: The dimensions to reduce; list or scalar. If `None` (the default), reduces all dimensions. keepdims: If true, retain reduced dimensions with length 1. reduction_axes: Deprecated name of axis. keep_dims: Deprecated alias for `keepdims`. Returns: The reduced SparseTensor. """ keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims, "keep_dims", keep_dims) axis = deprecation.deprecated_argument_lookup("axis", axis, "reduction_axes", reduction_axes) if keepdims is None: keepdims = False output_ind, output_val, output_shape = ( gen_sparse_ops.sparse_reduce_max_sparse( sp_input.indices, sp_input.values, sp_input.dense_shape, math_ops._ReductionDims(sp_input, axis), keepdims)) return sparse_tensor.SparseTensor(output_ind, output_val, output_shape) @tf_export("sparse.reduce_sum", v1=[]) def sparse_reduce_sum_v2( sp_input, axis=None, keepdims=None, output_is_sparse=False, name=None): """Computes `tf.sparse.add` of elements across dimensions of a SparseTensor. This is the reduction operation for the elementwise `tf.sparse.add` op. This Op takes a SparseTensor and is the sparse counterpart to `tf.reduce_sum()`. In particular, this Op also returns a dense `Tensor` if `output_is_sparse` is `False`, or a `SparseTensor` if `output_is_sparse` is `True`. Note: if `output_is_sparse` is True, a gradient is not defined for this function, so it can't be used in training models that need gradient descent. Reduces `sp_input` along the dimensions given in `axis`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each entry in `axis`. If `keepdims` is true, the reduced dimensions are retained with length 1. If `axis` has no entries, all dimensions are reduced, and a tensor with a single element is returned. Additionally, the axes can be negative, similar to the indexing rules in Python. For example: # 'x' represents [[1, ?, 1] # [?, 1, ?]] # where ? is implicitly-zero. >>> x = tf.sparse.SparseTensor([[0, 0], [0, 2], [1, 1]], [1, 1, 1], [2, 3]) >>> tf.sparse.reduce_sum(x) <tf.Tensor: shape=(), dtype=int32, numpy=3> >>> tf.sparse.reduce_sum(x, 0) <tf.Tensor: shape=(3,), dtype=int32, numpy=array([1, 1, 1], dtype=int32)> >>> tf.sparse.reduce_sum(x, 1) # Can also use -1 as the axis <tf.Tensor: shape=(2,), dtype=int32, numpy=array([2, 1], dtype=int32)> >>> tf.sparse.reduce_sum(x, 1, keepdims=True) <tf.Tensor: shape=(2, 1), dtype=int32, numpy= array([[2], [1]], dtype=int32)> >>> tf.sparse.reduce_sum(x, [0, 1]) <tf.Tensor: shape=(), dtype=int32, numpy=3> Args: sp_input: The SparseTensor to reduce. Should have numeric type. axis: The dimensions to reduce; list or scalar. If `None` (the default), reduces all dimensions. keepdims: If true, retain reduced dimensions with length 1. output_is_sparse: If true, returns a `SparseTensor` instead of a dense `Tensor` (the default). name: A name for the operation (optional). Returns: The reduced Tensor or the reduced SparseTensor if `output_is_sparse` is True. """ if keepdims is None: keepdims = False if output_is_sparse: output_ind, output_val, output_shape = ( gen_sparse_ops.sparse_reduce_sum_sparse( sp_input.indices, sp_input.values, sp_input.dense_shape, math_ops._ReductionDims(sp_input, axis), keepdims, name=name)) return sparse_tensor.SparseTensor(output_ind, output_val, output_shape) return gen_sparse_ops.sparse_reduce_sum( sp_input.indices, sp_input.values, sp_input.dense_shape, math_ops._ReductionDims(sp_input, axis), keepdims, name=name) @tf_export(v1=["sparse.reduce_sum", "sparse_reduce_sum"]) @deprecation.deprecated_endpoints("sparse_reduce_sum") @deprecation.deprecated_args( None, "keep_dims is deprecated, use keepdims instead", "keep_dims") @deprecation.deprecated_args( None, "reduction_axes is deprecated, use axis instead", "reduction_axes") def sparse_reduce_sum(sp_input, axis=None, keepdims=None, reduction_axes=None, keep_dims=None): """Computes `tf.sparse.add` of elements across dimensions of a SparseTensor. This is the reduction operation for the elementwise `tf.sparse.add` op. This Op takes a SparseTensor and is the sparse counterpart to `tf.reduce_sum()`. In particular, this Op also returns a dense `Tensor` instead of a sparse one. Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each entry in `reduction_axes`. If `keepdims` is true, the reduced dimensions are retained with length 1. If `reduction_axes` has no entries, all dimensions are reduced, and a tensor with a single element is returned. Additionally, the axes can be negative, similar to the indexing rules in Python. For example: # 'x' represents [[1, ?, 1] # [?, 1, ?]] # where ? is implicitly-zero. >>> x = tf.sparse.SparseTensor([[0, 0], [0, 2], [1, 1]], [1, 1, 1], [2, 3]) >>> tf.sparse.reduce_sum(x) <tf.Tensor: shape=(), dtype=int32, numpy=3> >>> tf.sparse.reduce_sum(x, 0) <tf.Tensor: shape=(3,), dtype=int32, numpy=array([1, 1, 1], dtype=int32)> >>> tf.sparse.reduce_sum(x, 1) # Can also use -1 as the axis <tf.Tensor: shape=(2,), dtype=int32, numpy=array([2, 1], dtype=int32)> >>> tf.sparse.reduce_sum(x, 1, keepdims=True) <tf.Tensor: shape=(2, 1), dtype=int32, numpy= array([[2], [1]], dtype=int32)> >>> tf.sparse.reduce_sum(x, [0, 1]) <tf.Tensor: shape=(), dtype=int32, numpy=3> Args: sp_input: The SparseTensor to reduce. Should have numeric type. axis: The dimensions to reduce; list or scalar. If `None` (the default), reduces all dimensions. keepdims: If true, retain reduced dimensions with length 1. reduction_axes: Deprecated name of `axis`. keep_dims: Deprecated alias for `keepdims`. Returns: The reduced Tensor. """ keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims, "keep_dims", keep_dims) axis = deprecation.deprecated_argument_lookup("axis", axis, "reduction_axes", reduction_axes) if keepdims is None: keepdims = False return gen_sparse_ops.sparse_reduce_sum( sp_input.indices, sp_input.values, sp_input.dense_shape, math_ops._ReductionDims(sp_input, axis), keepdims) @tf_export(v1=["sparse.reduce_sum_sparse", "sparse_reduce_sum_sparse"]) @deprecation.deprecated_endpoints("sparse_reduce_sum_sparse") @deprecation.deprecated_args( None, "keep_dims is deprecated, use keepdims instead", "keep_dims") def sparse_reduce_sum_sparse(sp_input, axis=None, keepdims=None, reduction_axes=None, keep_dims=None): """Computes the sum of elements across dimensions of a SparseTensor. This Op takes a SparseTensor and is the sparse counterpart to `tf.reduce_sum()`. In contrast to SparseReduceSum, this Op returns a SparseTensor. Note: A gradient is not defined for this function, so it can't be used in training models that need gradient descent. Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each entry in `reduction_axes`. If `keepdims` is true, the reduced dimensions are retained with length 1. If `reduction_axes` has no entries, all dimensions are reduced, and a tensor with a single element is returned. Additionally, the axes can be negative, which are interpreted according to the indexing rules in Python. Args: sp_input: The SparseTensor to reduce. Should have numeric type. axis: The dimensions to reduce; list or scalar. If `None` (the default), reduces all dimensions. keepdims: If true, retain reduced dimensions with length 1. reduction_axes: Deprecated name of axis. keep_dims: Deprecated alias for `keepdims`. Returns: The reduced SparseTensor. """ keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims, "keep_dims", keep_dims) axis = deprecation.deprecated_argument_lookup("axis", axis, "reduction_axes", reduction_axes) if keepdims is None: keepdims = False output_ind, output_val, output_shape = ( gen_sparse_ops.sparse_reduce_sum_sparse( sp_input.indices, sp_input.values, sp_input.dense_shape, math_ops._ReductionDims(sp_input, axis), keepdims)) return sparse_tensor.SparseTensor(output_ind, output_val, output_shape) @tf_export("sparse.to_dense", v1=["sparse.to_dense", "sparse_tensor_to_dense"]) @deprecation.deprecated_endpoints("sparse_tensor_to_dense") def sparse_tensor_to_dense(sp_input, default_value=None, validate_indices=True, name=None): """Converts a `SparseTensor` into a dense tensor. For this sparse tensor with three non-empty values: >>> sp_input = tf.sparse.SparseTensor( ... dense_shape=[3, 5], ... values=[7, 8, 9], ... indices =[[0, 1], ... [0, 3], ... [2, 0]]) The output will be a dense `[3, 5]` tensor with values: >>> tf.sparse.to_dense(sp_input).numpy() array([[0, 7, 0, 8, 0], [0, 0, 0, 0, 0], [9, 0, 0, 0, 0]], dtype=int32) Note: Indices must be without repeats. This is only tested if `validate_indices` is `True`. Args: sp_input: The input `SparseTensor`. default_value: Scalar value to set for indices not specified in `sp_input`. Defaults to zero. validate_indices: A boolean value. If `True`, indices are checked to make sure they are sorted in lexicographic order and that there are no repeats. name: A name prefix for the returned tensors (optional). Returns: A dense tensor with shape `sp_input.dense_shape` and values specified by the non-empty values in `sp_input`. Indices not in `sp_input` are assigned `default_value`. Raises: TypeError: If `sp_input` is not a `SparseTensor`. """ sp_input = _convert_to_sparse_tensor(sp_input) if default_value is None: default_value = array_ops.zeros([], dtype=sp_input.dtype) return gen_sparse_ops.sparse_to_dense( sp_input.indices, sp_input.dense_shape, sp_input.values, default_value=default_value, validate_indices=validate_indices, name=name) @tf_export( "sparse.to_indicator", v1=["sparse.to_indicator", "sparse_to_indicator"]) @deprecation.deprecated_endpoints("sparse_to_indicator") def sparse_to_indicator(sp_input, vocab_size, name=None): """Converts a `SparseTensor` of ids into a dense bool indicator tensor. The last dimension of `sp_input.indices` is discarded and replaced with the values of `sp_input`. If `sp_input.dense_shape = [D0, D1, ..., Dn, K]`, then `output.shape = [D0, D1, ..., Dn, vocab_size]`, where output[d_0, d_1, ..., d_n, sp_input[d_0, d_1, ..., d_n, k]] = True and False elsewhere in `output`. For example, if `sp_input.dense_shape = [2, 3, 4]` with non-empty values: [0, 0, 0]: 0 [0, 1, 0]: 10 [1, 0, 3]: 103 [1, 1, 1]: 150 [1, 1, 2]: 149 [1, 1, 3]: 150 [1, 2, 1]: 121 and `vocab_size = 200`, then the output will be a `[2, 3, 200]` dense bool tensor with False everywhere except at positions (0, 0, 0), (0, 1, 10), (1, 0, 103), (1, 1, 149), (1, 1, 150), (1, 2, 121). Note that repeats are allowed in the input SparseTensor. This op is useful for converting `SparseTensor`s into dense formats for compatibility with ops that expect dense tensors. The input `SparseTensor` must be in row-major order. Args: sp_input: A `SparseTensor` with `values` property of type `int32` or `int64`. vocab_size: A scalar int64 Tensor (or Python int) containing the new size of the last dimension, `all(0 <= sp_input.values < vocab_size)`. name: A name prefix for the returned tensors (optional) Returns: A dense bool indicator tensor representing the indices with specified value. Raises: TypeError: If `sp_input` is not a `SparseTensor`. """ sp_input = _convert_to_sparse_tensor(sp_input) with ops.name_scope(name, "SparseToIndicator", [sp_input]) as name: num_entries = array_ops.shape(sp_input.indices)[0] new_values = array_ops.fill(array_ops.expand_dims(num_entries, 0), True) sp_values = sparse_tensor.SparseTensor(sp_input.indices, new_values, sp_input.dense_shape) sp_new = sparse_merge_impl(sp_input, sp_values, vocab_size, name) # validate_indices may be False because we allow duplicates in new_indices: # repeated indices are allowed when creating an indicator matrix. return sparse_tensor_to_dense( sp_new, default_value=False, validate_indices=False, name=name) @tf_export(v1=["sparse.merge", "sparse_merge"]) @deprecation.deprecated(None, "No similar op available at this time.") def sparse_merge(sp_ids, sp_values, vocab_size, name=None, already_sorted=False): """Combines a batch of feature ids and values into a single `SparseTensor`. The most common use case for this function occurs when feature ids and their corresponding values are stored in `Example` protos on disk. `parse_example` will return a batch of ids and a batch of values, and this function joins them into a single logical `SparseTensor` for use in functions such as `sparse_tensor_dense_matmul`, `sparse_to_dense`, etc. The `SparseTensor` returned by this function has the following properties: - `indices` is equivalent to `sp_ids.indices` with the last dimension discarded and replaced with `sp_ids.values`. - `values` is simply `sp_values.values`. - If `sp_ids.dense_shape = [D0, D1, ..., Dn, K]`, then `output.shape = [D0, D1, ..., Dn, vocab_size]`. For example, consider the following feature vectors: ```python vector1 = [-3, 0, 0, 0, 0, 0] vector2 = [ 0, 1, 0, 4, 1, 0] vector3 = [ 5, 0, 0, 9, 0, 0] ``` These might be stored sparsely in the following Example protos by storing only the feature ids (column number if the vectors are treated as a matrix) of the non-zero elements and the corresponding values: ```python examples = [Example(features={ "ids": Feature(int64_list=Int64List(value=[0])), "values": Feature(float_list=FloatList(value=[-3]))}), Example(features={ "ids": Feature(int64_list=Int64List(value=[1, 4, 3])), "values": Feature(float_list=FloatList(value=[1, 1, 4]))}), Example(features={ "ids": Feature(int64_list=Int64List(value=[0, 3])), "values": Feature(float_list=FloatList(value=[5, 9]))})] ``` The result of calling parse_example on these examples will produce a dictionary with entries for "ids" and "values". Passing those two objects to this function along with vocab_size=6, will produce a `SparseTensor` that sparsely represents all three instances. Namely, the `indices` property will contain the coordinates of the non-zero entries in the feature matrix (the first dimension is the row number in the matrix, i.e., the index within the batch, and the second dimension is the column number, i.e., the feature id); `values` will contain the actual values. `shape` will be the shape of the original matrix, i.e., (3, 6). For our example above, the output will be equal to: ```python SparseTensor(indices=[[0, 0], [1, 1], [1, 3], [1, 4], [2, 0], [2, 3]], values=[-3, 1, 4, 1, 5, 9], dense_shape=[3, 6]) ``` This method generalizes to higher-dimensions by simply providing a list for both the sp_ids as well as the vocab_size. In this case the resulting `SparseTensor` has the following properties: - `indices` is equivalent to `sp_ids[0].indices` with the last dimension discarded and concatenated with `sp_ids[0].values, sp_ids[1].values, ...`. - `values` is simply `sp_values.values`. - If `sp_ids.dense_shape = [D0, D1, ..., Dn, K]`, then `output.shape = [D0, D1, ..., Dn] + vocab_size`. Args: sp_ids: A single `SparseTensor` with `values` property of type `int32` or `int64` or a Python list of such `SparseTensor`s or a list thereof. sp_values: A `SparseTensor` of any type. vocab_size: A scalar `int64` Tensor (or Python int) containing the new size of the last dimension, `all(0 <= sp_ids.values < vocab_size)`. Or a list thereof with `all(0 <= sp_ids[i].values < vocab_size[i])` for all `i`. name: A name prefix for the returned tensors (optional) already_sorted: A boolean to specify whether the per-batch values in `sp_values` are already sorted. If so skip sorting, False by default (optional). Returns: A `SparseTensor` compactly representing a batch of feature ids and values, useful for passing to functions that expect such a `SparseTensor`. Raises: TypeError: If `sp_values` is not a `SparseTensor`. Or if `sp_ids` is neither a `SparseTensor` nor a list thereof. Or if `vocab_size` is not a `Tensor` or a Python int and `sp_ids` is a `SparseTensor`. Or if `vocab_size` is not a or list thereof and `sp_ids` is a list. ValueError: If `sp_ids` and `vocab_size` are lists of different lengths. """ return sparse_merge_impl(sp_ids, sp_values, vocab_size, name, already_sorted) def sparse_merge_impl(sp_ids, sp_values, vocab_size, name=None, already_sorted=False): """Internal implementation for sparse_merge to avoid deprecation warnings.""" if isinstance(sp_ids, sparse_tensor.SparseTensorValue) or isinstance( sp_ids, sparse_tensor.SparseTensor): sp_ids = [sp_ids] if not (isinstance(vocab_size, tensor_lib.Tensor) or isinstance(vocab_size, numbers.Integral)): raise TypeError("vocab_size has to be a Tensor or Python int. Found %s" % type(vocab_size)) vocab_size = [vocab_size] else: if not isinstance(sp_ids, collections_abc.Iterable): raise TypeError("sp_ids has to be a SparseTensor or list thereof. " "Found %s" % type(sp_ids)) if not isinstance(vocab_size, collections_abc.Iterable): raise TypeError("vocab_size has to be a list of Tensors or Python ints. " "Found %s" % type(vocab_size)) for dim in vocab_size: if not (isinstance( dim, tensor_lib.Tensor) or isinstance(dim, numbers.Integral)): raise TypeError( "vocab_size has to be a list of Tensors or Python ints. Found %s" % type(dim)) if len(sp_ids) != len(vocab_size): raise ValueError("sp_ids and vocab_size have to have equal lengths.") with ops.name_scope(name, "SparseMerge", [sp_ids, sp_values]): sp_ids = [_convert_to_sparse_tensor(sp_ids_dim) for sp_ids_dim in sp_ids] sp_values = _convert_to_sparse_tensor(sp_values) ids = [] for sp_ids_dim in sp_ids: ids_dim = sp_ids_dim.values if sp_ids_dim.dtype != dtypes.int64: ids_dim = math_ops.cast(ids_dim, dtypes.int64) ids += [array_ops.expand_dims(ids_dim, axis=1)] vocab_size = [math_ops.cast(x, dtypes.int64) for x in vocab_size] # Slice off the last dimension of indices, then tack on the ids indices_columns_to_preserve = sp_ids[0].indices[:, :-1] new_indices = array_ops.concat([indices_columns_to_preserve] + ids, 1) new_values = sp_values.values new_shape = array_ops.concat([sp_ids[0].dense_shape[:-1], vocab_size], 0) result = sparse_tensor.SparseTensor(new_indices, new_values, new_shape) if already_sorted: return result sorted_result = sparse_reorder(result) return sparse_tensor.SparseTensor( sorted_result.indices, sorted_result.values, new_shape) @tf_export("sparse.retain", v1=["sparse.retain", "sparse_retain"]) @deprecation.deprecated_endpoints("sparse_retain") def sparse_retain(sp_input, to_retain): """Retains specified non-empty values within a `SparseTensor`. For example, if `sp_input` has shape `[4, 5]` and 4 non-empty string values: [0, 1]: a [0, 3]: b [2, 0]: c [3, 1]: d and `to_retain = [True, False, False, True]`, then the output will be a `SparseTensor` of shape `[4, 5]` with 2 non-empty values: [0, 1]: a [3, 1]: d Args: sp_input: The input `SparseTensor` with `N` non-empty elements. to_retain: A bool vector of length `N` with `M` true values. Returns: A `SparseTensor` with the same shape as the input and `M` non-empty elements corresponding to the true positions in `to_retain`. Raises: TypeError: If `sp_input` is not a `SparseTensor`. """ sp_input = _convert_to_sparse_tensor(sp_input) to_retain = ops.convert_to_tensor(to_retain) # Shape checking, if shape is known at graph construction time retain_shape = to_retain.get_shape() retain_shape.assert_has_rank(1) if sp_input.values.get_shape().dims is not None: sp_input.values.get_shape().dims[0].assert_is_compatible_with( tensor_shape.dimension_at_index(retain_shape, 0)) where_true = array_ops.reshape(array_ops.where_v2(to_retain), [-1]) new_indices = array_ops.gather(sp_input.indices, where_true) new_values = array_ops.gather(sp_input.values, where_true) return sparse_tensor.SparseTensor(new_indices, new_values, array_ops.identity(sp_input.dense_shape)) @tf_export( "sparse.reset_shape", v1=["sparse.reset_shape", "sparse_reset_shape"]) @deprecation.deprecated_endpoints("sparse_reset_shape") def sparse_reset_shape(sp_input, new_shape=None): """Resets the shape of a `SparseTensor` with indices and values unchanged. If `new_shape` is None, returns a copy of `sp_input` with its shape reset to the tight bounding box of `sp_input`. This will be a shape consisting of all zeros if sp_input has no values. If `new_shape` is provided, then it must be larger or equal in all dimensions compared to the shape of `sp_input`. When this condition is met, the returned SparseTensor will have its shape reset to `new_shape` and its indices and values unchanged from that of `sp_input.` For example: Consider a `sp_input` with shape [2, 3, 5]: [0, 0, 1]: a [0, 1, 0]: b [0, 2, 2]: c [1, 0, 3]: d - It is an error to set `new_shape` as [3, 7] since this represents a rank-2 tensor while `sp_input` is rank-3. This is either a ValueError during graph construction (if both shapes are known) or an OpError during run time. - Setting `new_shape` as [2, 3, 6] will be fine as this shape is larger or equal in every dimension compared to the original shape [2, 3, 5]. - On the other hand, setting new_shape as [2, 3, 4] is also an error: The third dimension is smaller than the original shape [2, 3, 5] (and an `InvalidArgumentError` will be raised). - If `new_shape` is None, the returned SparseTensor will have a shape [2, 3, 4], which is the tight bounding box of `sp_input`. Args: sp_input: The input `SparseTensor`. new_shape: None or a vector representing the new shape for the returned `SparseTensor`. Returns: A `SparseTensor` indices and values unchanged from `sp_input`. Its shape is `new_shape` if that is set. Otherwise it is the tight bounding box of `sp_input` Raises: TypeError: If `sp_input` is not a `SparseTensor`. ValueError: If `new_shape` represents a tensor with a different rank from that of `sp_input` (if shapes are known when graph is constructed). ValueError: If `new_shape` is determined during graph build to have dimension sizes that are too small. OpError: - If `new_shape` has dimension sizes that are too small. - If shapes are not known during graph construction time, and during run time it is found out that the ranks do not match. """ sp_input = _convert_to_sparse_tensor(sp_input) in_indices = array_ops.identity(sp_input.indices) in_values = array_ops.identity(sp_input.values) in_shape = array_ops.identity(sp_input.dense_shape) if new_shape is None: dim_low_bound = math_ops.reduce_max(in_indices, axis=0) output_shape_tensor = math_ops.maximum( array_ops.constant(0, dtype=dtypes.int64), math_ops.add(dim_low_bound, array_ops.ones_like(in_shape))) else: output_shape_tensor = ops.convert_to_tensor(new_shape) output_shape_tensor.get_shape().assert_has_rank(1) output_shape_tensor = math_ops.cast(output_shape_tensor, dtypes.int64) # For cases when shape is known during graph construction, this catches the # error before the sparse_tensor.SparseTensor catches it. if output_shape_tensor.get_shape().rank is not None: output_shape_tensor.get_shape().dims[0].assert_is_compatible_with( in_shape.get_shape().dims[0]) output_shape_tensor_const = tensor_util.constant_value(output_shape_tensor) # For cases where all shapes are known during graph construction if (output_shape_tensor_const is not None and sp_input.get_shape().is_fully_defined()): in_shape_const = np.array(sp_input.get_shape().as_list()) if not np.all(in_shape_const <= output_shape_tensor_const): raise ValueError( "Requested new_shape should have dimension sizes >= sp_input.shape." " Found new_shape (%s), sp_input.shape (%s)." % (in_shape_const, output_shape_tensor_const)) output_shape_tensor = output_shape_tensor_const else: # For cases where shape is not known during graph construction. output_shape_tensor = control_flow_ops.with_dependencies([ check_ops.assert_equal( array_ops.shape(in_shape), array_ops.shape(output_shape_tensor)) ], output_shape_tensor) output_shape_tensor = control_flow_ops.with_dependencies( [check_ops.assert_less_equal(in_shape, output_shape_tensor)], output_shape_tensor) return sparse_tensor.SparseTensor(in_indices, in_values, output_shape_tensor) @tf_export( "sparse.fill_empty_rows", v1=["sparse.fill_empty_rows", "sparse_fill_empty_rows"]) @deprecation.deprecated_endpoints("sparse_fill_empty_rows") def sparse_fill_empty_rows(sp_input, default_value, name=None): """Fills empty rows in the input 2-D `SparseTensor` with a default value. This op adds entries with the specified `default_value` at index `[row, 0]` for any row in the input that does not already have a value. For example, suppose `sp_input` has shape `[5, 6]` and non-empty values: [0, 1]: a [0, 3]: b [2, 0]: c [3, 1]: d Rows 1 and 4 are empty, so the output will be of shape `[5, 6]` with values: [0, 1]: a [0, 3]: b [1, 0]: default_value [2, 0]: c [3, 1]: d [4, 0]: default_value Note that the input may have empty columns at the end, with no effect on this op. The output `SparseTensor` will be in row-major order and will have the same shape as the input. This op also returns an indicator vector such that empty_row_indicator[i] = True iff row i was an empty row. Args: sp_input: A `SparseTensor` with shape `[N, M]`. default_value: The value to fill for empty rows, with the same type as `sp_input.` name: A name prefix for the returned tensors (optional) Returns: sp_ordered_output: A `SparseTensor` with shape `[N, M]`, and with all empty rows filled in with `default_value`. empty_row_indicator: A bool vector of length `N` indicating whether each input row was empty. Raises: TypeError: If `sp_input` is not a `SparseTensor`. """ sp_input = _convert_to_sparse_tensor(sp_input) with ops.name_scope(name, "SparseFillEmptyRows", [sp_input]): default_value = ops.convert_to_tensor( default_value, dtype=sp_input.values.dtype) (output_indices, output_values, empty_row_indicator, unused_reverse_index_map) = gen_sparse_ops.sparse_fill_empty_rows( indices=sp_input.indices, values=sp_input.values, dense_shape=sp_input.dense_shape, default_value=default_value) return (sparse_tensor.SparseTensor( indices=output_indices, values=output_values, dense_shape=sp_input.dense_shape), empty_row_indicator) @tf_export(v1=["io.serialize_sparse", "serialize_sparse"]) @dispatch.add_dispatch_support @deprecation.deprecated_endpoints("serialize_sparse") def serialize_sparse(sp_input, name=None, out_type=dtypes.string): """Serialize a `SparseTensor` into a 3-vector (1-D `Tensor`) object. Args: sp_input: The input `SparseTensor`. name: A name prefix for the returned tensors (optional). out_type: The `dtype` to use for serialization. Returns: A 3-vector (1-D `Tensor`), with each column representing the serialized `SparseTensor`'s indices, values, and shape (respectively). Raises: TypeError: If `sp_input` is not a `SparseTensor`. """ return serialize_sparse_v2(sp_input, out_type, name) @tf_export("io.serialize_sparse", v1=[]) @dispatch.add_dispatch_support def serialize_sparse_v2(sp_input, out_type=dtypes.string, name=None): """Serialize a `SparseTensor` into a 3-vector (1-D `Tensor`) object. Args: sp_input: The input `SparseTensor`. out_type: The `dtype` to use for serialization. name: A name prefix for the returned tensors (optional). Returns: A 3-vector (1-D `Tensor`), with each column representing the serialized `SparseTensor`'s indices, values, and shape (respectively). Raises: TypeError: If `sp_input` is not a `SparseTensor`. """ sp_input = _convert_to_sparse_tensor(sp_input) return gen_sparse_ops.serialize_sparse( sp_input.indices, sp_input.values, sp_input.dense_shape, name=name, out_type=out_type) @tf_export(v1=["io.serialize_many_sparse", "serialize_many_sparse"]) @dispatch.add_dispatch_support @deprecation.deprecated_endpoints("serialize_many_sparse") def serialize_many_sparse(sp_input, name=None, out_type=dtypes.string): """Serialize `N`-minibatch `SparseTensor` into an `[N, 3]` `Tensor`. The `SparseTensor` must have rank `R` greater than 1, and the first dimension is treated as the minibatch dimension. Elements of the `SparseTensor` must be sorted in increasing order of this first dimension. The serialized `SparseTensor` objects going into each row of the output `Tensor` will have rank `R-1`. The minibatch size `N` is extracted from `sparse_shape[0]`. Args: sp_input: The input rank `R` `SparseTensor`. name: A name prefix for the returned tensors (optional). out_type: The `dtype` to use for serialization. Returns: A matrix (2-D `Tensor`) with `N` rows and `3` columns. Each column represents serialized `SparseTensor`'s indices, values, and shape (respectively). Raises: TypeError: If `sp_input` is not a `SparseTensor`. """ return serialize_many_sparse_v2(sp_input, out_type, name) @tf_export("io.serialize_many_sparse", v1=[]) @dispatch.add_dispatch_support def serialize_many_sparse_v2(sp_input, out_type=dtypes.string, name=None): """Serialize `N`-minibatch `SparseTensor` into an `[N, 3]` `Tensor`. The `SparseTensor` must have rank `R` greater than 1, and the first dimension is treated as the minibatch dimension. Elements of the `SparseTensor` must be sorted in increasing order of this first dimension. The serialized `SparseTensor` objects going into each row of the output `Tensor` will have rank `R-1`. The minibatch size `N` is extracted from `sparse_shape[0]`. Args: sp_input: The input rank `R` `SparseTensor`. out_type: The `dtype` to use for serialization. name: A name prefix for the returned tensors (optional). Returns: A matrix (2-D `Tensor`) with `N` rows and `3` columns. Each column represents serialized `SparseTensor`'s indices, values, and shape (respectively). Raises: TypeError: If `sp_input` is not a `SparseTensor`. """ sp_input = _convert_to_sparse_tensor(sp_input) return gen_sparse_ops.serialize_many_sparse( sp_input.indices, sp_input.values, sp_input.dense_shape, name=name, out_type=out_type) def deserialize_sparse(serialized_sparse, dtype, rank=None, name=None): """Deserialize `SparseTensor` objects. The input `serialized_sparse` must have the shape `[?, ?, ..., ?, 3]` where the last dimension stores serialized `SparseTensor` objects and the other N dimensions (N >= 0) correspond to a batch. The ranks of the original `SparseTensor` objects must all match. When the final `SparseTensor` is created, its rank is the rank of the incoming `SparseTensor` objects plus N; the sparse tensors have been concatenated along new dimensions, one for each batch. The output `SparseTensor` object's shape values for the original dimensions are the max across the input `SparseTensor` objects' shape values for the corresponding dimensions. The new dimensions match the size of the batch. The input `SparseTensor` objects' indices are assumed ordered in standard lexicographic order. If this is not the case, after this step run `SparseReorder` to restore index ordering. For example, if the serialized input is a `[2 x 3]` matrix representing two original `SparseTensor` objects: index = [ 0] [10] [20] values = [1, 2, 3] shape = [50] and index = [ 2] [10] values = [4, 5] shape = [30] then the final deserialized `SparseTensor` will be: index = [0 0] [0 10] [0 20] [1 2] [1 10] values = [1, 2, 3, 4, 5] shape = [2 50] Args: serialized_sparse: The serialized `SparseTensor` objects. The last dimension must have 3 columns. dtype: The `dtype` of the serialized `SparseTensor` objects. rank: (optional) Python int, the rank of the `SparseTensor` objects. name: A name prefix for the returned tensors (optional). Returns: A `SparseTensor` representing the deserialized `SparseTensor` objects. """ output_indices, output_values, output_shape = ( gen_sparse_ops.deserialize_sparse(serialized_sparse, dtype, name=name)) # Feed rank data back in, if available output_indices.set_shape([None, rank]) output_shape.set_shape([rank]) return sparse_tensor.SparseTensor(output_indices, output_values, output_shape) @tf_export( "io.deserialize_many_sparse", v1=["io.deserialize_many_sparse", "deserialize_many_sparse"]) @dispatch.add_dispatch_support @deprecation.deprecated_endpoints("deserialize_many_sparse") def deserialize_many_sparse(serialized_sparse, dtype, rank=None, name=None): """Deserialize and concatenate `SparseTensors` from a serialized minibatch. The input `serialized_sparse` must be a string matrix of shape `[N x 3]` where `N` is the minibatch size and the rows correspond to packed outputs of `serialize_sparse`. The ranks of the original `SparseTensor` objects must all match. When the final `SparseTensor` is created, it has rank one higher than the ranks of the incoming `SparseTensor` objects (they have been concatenated along a new row dimension). The output `SparseTensor` object's shape values for all dimensions but the first are the max across the input `SparseTensor` objects' shape values for the corresponding dimensions. Its first shape value is `N`, the minibatch size. The input `SparseTensor` objects' indices are assumed ordered in standard lexicographic order. If this is not the case, after this step run `sparse.reorder` to restore index ordering. For example, if the serialized input is a `[2, 3]` matrix representing two original `SparseTensor` objects: index = [ 0] [10] [20] values = [1, 2, 3] shape = [50] and index = [ 2] [10] values = [4, 5] shape = [30] then the final deserialized `SparseTensor` will be: index = [0 0] [0 10] [0 20] [1 2] [1 10] values = [1, 2, 3, 4, 5] shape = [2 50] Args: serialized_sparse: 2-D `Tensor` of type `string` of shape `[N, 3]`. The serialized and packed `SparseTensor` objects. dtype: The `dtype` of the serialized `SparseTensor` objects. rank: (optional) Python int, the rank of the `SparseTensor` objects. name: A name prefix for the returned tensors (optional) Returns: A `SparseTensor` representing the deserialized `SparseTensor`s, concatenated along the `SparseTensor`s' first dimension. All of the serialized `SparseTensor`s must have had the same rank and type. """ output_indices, output_values, output_shape = ( gen_sparse_ops.deserialize_many_sparse( serialized_sparse, dtype, name=name)) # Feed rank data back in, if available output_indices.set_shape([None, rank]) output_shape.set_shape([rank]) return sparse_tensor.SparseTensor(output_indices, output_values, output_shape) @tf_export("sparse.sparse_dense_matmul", v1=["sparse.sparse_dense_matmul", "sparse.matmul", "sparse_tensor_dense_matmul"]) @deprecation.deprecated_endpoints("sparse_tensor_dense_matmul") def sparse_tensor_dense_matmul(sp_a, b, adjoint_a=False, adjoint_b=False, name=None): # pylint: disable=line-too-long """Multiply SparseTensor (or dense Matrix) (of rank 2) "A" by dense matrix (or SparseTensor) "B". Please note that one and only one of the inputs MUST be a SparseTensor and the other MUST be a dense matrix. The following input format is recommended (but not required) for optimal performance: * If `adjoint_a == false`: `A` should be sorted in lexicographically increasing order. Use `sparse.reorder` if you're not sure. * If `adjoint_a == true`: `A` should be sorted in order of increasing dimension 1 (i.e., "column major" order instead of "row major" order). Args: sp_a: SparseTensor (or dense Matrix) A, of rank 2. b: dense Matrix (or SparseTensor) B, with the same dtype as sp_a. adjoint_a: Use the adjoint of A in the matrix multiply. If A is complex, this is transpose(conj(A)). Otherwise it's transpose(A). adjoint_b: Use the adjoint of B in the matrix multiply. If B is complex, this is transpose(conj(B)). Otherwise it's transpose(B). name: A name prefix for the returned tensors (optional) Returns: A dense matrix (pseudo-code in dense np.matrix notation): `A = A.H if adjoint_a else A` `B = B.H if adjoint_b else B` `return A*B` Notes: Using `tf.nn.embedding_lookup_sparse` for sparse multiplication: It's not obvious but you can consider `embedding_lookup_sparse` as another sparse and dense multiplication. In some situations, you may prefer to use `embedding_lookup_sparse` even though you're not dealing with embeddings. There are two questions to ask in the decision process: Do you need gradients computed as sparse too? Is your sparse data represented as two `SparseTensor`s: ids and values? There is more explanation about data format below. If you answer any of these questions as yes, consider using `tf.nn.embedding_lookup_sparse`. Following explains differences between the expected SparseTensors: For example if dense form of your sparse data has shape `[3, 5]` and values: [[ a ] [b c] [ d ]] `SparseTensor` format expected by `sparse_tensor_dense_matmul`: `sp_a` (indices, values): [0, 1]: a [1, 0]: b [1, 4]: c [2, 2]: d `SparseTensor` format expected by `embedding_lookup_sparse`: `sp_ids` `sp_weights` [0, 0]: 1 [0, 0]: a [1, 0]: 0 [1, 0]: b [1, 1]: 4 [1, 1]: c [2, 0]: 2 [2, 0]: d Deciding when to use `sparse_tensor_dense_matmul` vs. `matmul`(a_is_sparse=True): There are a number of questions to ask in the decision process, including: * Will the SparseTensor `A` fit in memory if densified? * Is the column count of the product large (>> 1)? * Is the density of `A` larger than approximately 15%? If the answer to several of these questions is yes, consider converting the `SparseTensor` to a dense one and using `tf.matmul` with `a_is_sparse=True`. This operation tends to perform well when `A` is more sparse, if the column size of the product is small (e.g. matrix-vector multiplication), if `sp_a.dense_shape` takes on large values. Below is a rough speed comparison between `sparse_tensor_dense_matmul`, labeled 'sparse', and `matmul`(a_is_sparse=True), labeled 'dense'. For purposes of the comparison, the time spent converting from a `SparseTensor` to a dense `Tensor` is not included, so it is overly conservative with respect to the time ratio. Benchmark system: CPU: Intel Ivybridge with HyperThreading (6 cores) dL1:32KB dL2:256KB dL3:12MB GPU: NVidia Tesla k40c Compiled with: `-c opt --config=cuda --copt=-mavx` ``` tensorflow/python/sparse_tensor_dense_matmul_op_test --benchmarks A sparse [m, k] with % nonzero values between 1% and 80% B dense [k, n] % nnz n gpu m k dt(dense) dt(sparse) dt(sparse)/dt(dense) 0.01 1 True 100 100 0.000221166 0.00010154 0.459112 0.01 1 True 100 1000 0.00033858 0.000109275 0.322745 0.01 1 True 1000 100 0.000310557 9.85661e-05 0.317385 0.01 1 True 1000 1000 0.0008721 0.000100875 0.115669 0.01 1 False 100 100 0.000208085 0.000107603 0.51711 0.01 1 False 100 1000 0.000327112 9.51118e-05 0.290762 0.01 1 False 1000 100 0.000308222 0.00010345 0.335635 0.01 1 False 1000 1000 0.000865721 0.000101397 0.117124 0.01 10 True 100 100 0.000218522 0.000105537 0.482958 0.01 10 True 100 1000 0.000340882 0.000111641 0.327506 0.01 10 True 1000 100 0.000315472 0.000117376 0.372064 0.01 10 True 1000 1000 0.000905493 0.000123263 0.136128 0.01 10 False 100 100 0.000221529 9.82571e-05 0.44354 0.01 10 False 100 1000 0.000330552 0.000112615 0.340687 0.01 10 False 1000 100 0.000341277 0.000114097 0.334324 0.01 10 False 1000 1000 0.000819944 0.000120982 0.147549 0.01 25 True 100 100 0.000207806 0.000105977 0.509981 0.01 25 True 100 1000 0.000322879 0.00012921 0.400181 0.01 25 True 1000 100 0.00038262 0.00014158 0.370035 0.01 25 True 1000 1000 0.000865438 0.000202083 0.233504 0.01 25 False 100 100 0.000209401 0.000104696 0.499979 0.01 25 False 100 1000 0.000321161 0.000130737 0.407076 0.01 25 False 1000 100 0.000377012 0.000136801 0.362856 0.01 25 False 1000 1000 0.000861125 0.00020272 0.235413 0.2 1 True 100 100 0.000206952 9.69219e-05 0.46833 0.2 1 True 100 1000 0.000348674 0.000147475 0.422959 0.2 1 True 1000 100 0.000336908 0.00010122 0.300439 0.2 1 True 1000 1000 0.001022 0.000203274 0.198898 0.2 1 False 100 100 0.000207532 9.5412e-05 0.459746 0.2 1 False 100 1000 0.000356127 0.000146824 0.41228 0.2 1 False 1000 100 0.000322664 0.000100918 0.312764 0.2 1 False 1000 1000 0.000998987 0.000203442 0.203648 0.2 10 True 100 100 0.000211692 0.000109903 0.519165 0.2 10 True 100 1000 0.000372819 0.000164321 0.440753 0.2 10 True 1000 100 0.000338651 0.000144806 0.427596 0.2 10 True 1000 1000 0.00108312 0.000758876 0.70064 0.2 10 False 100 100 0.000215727 0.000110502 0.512231 0.2 10 False 100 1000 0.000375419 0.0001613 0.429653 0.2 10 False 1000 100 0.000336999 0.000145628 0.432132 0.2 10 False 1000 1000 0.00110502 0.000762043 0.689618 0.2 25 True 100 100 0.000218705 0.000129913 0.594009 0.2 25 True 100 1000 0.000394794 0.00029428 0.745402 0.2 25 True 1000 100 0.000404483 0.0002693 0.665788 0.2 25 True 1000 1000 0.0012002 0.00194494 1.62052 0.2 25 False 100 100 0.000221494 0.0001306 0.589632 0.2 25 False 100 1000 0.000396436 0.000297204 0.74969 0.2 25 False 1000 100 0.000409346 0.000270068 0.659754 0.2 25 False 1000 1000 0.00121051 0.00193737 1.60046 0.5 1 True 100 100 0.000214981 9.82111e-05 0.456836 0.5 1 True 100 1000 0.000415328 0.000223073 0.537101 0.5 1 True 1000 100 0.000358324 0.00011269 0.314492 0.5 1 True 1000 1000 0.00137612 0.000437401 0.317851 0.5 1 False 100 100 0.000224196 0.000101423 0.452386 0.5 1 False 100 1000 0.000400987 0.000223286 0.556841 0.5 1 False 1000 100 0.000368825 0.00011224 0.304318 0.5 1 False 1000 1000 0.00136036 0.000429369 0.31563 0.5 10 True 100 100 0.000222125 0.000112308 0.505608 0.5 10 True 100 1000 0.000461088 0.00032357 0.701753 0.5 10 True 1000 100 0.000394624 0.000225497 0.571422 0.5 10 True 1000 1000 0.00158027 0.00190898 1.20801 0.5 10 False 100 100 0.000232083 0.000114978 0.495418 0.5 10 False 100 1000 0.000454574 0.000324632 0.714146 0.5 10 False 1000 100 0.000379097 0.000227768 0.600817 0.5 10 False 1000 1000 0.00160292 0.00190168 1.18638 0.5 25 True 100 100 0.00023429 0.000151703 0.647501 0.5 25 True 100 1000 0.000497462 0.000598873 1.20386 0.5 25 True 1000 100 0.000460778 0.000557038 1.20891 0.5 25 True 1000 1000 0.00170036 0.00467336 2.74845 0.5 25 False 100 100 0.000228981 0.000155334 0.678371 0.5 25 False 100 1000 0.000496139 0.000620789 1.25124 0.5 25 False 1000 100 0.00045473 0.000551528 1.21287 0.5 25 False 1000 1000 0.00171793 0.00467152 2.71927 0.8 1 True 100 100 0.000222037 0.000105301 0.47425 0.8 1 True 100 1000 0.000410804 0.000329327 0.801664 0.8 1 True 1000 100 0.000349735 0.000131225 0.375212 0.8 1 True 1000 1000 0.00139219 0.000677065 0.48633 0.8 1 False 100 100 0.000214079 0.000107486 0.502085 0.8 1 False 100 1000 0.000413746 0.000323244 0.781261 0.8 1 False 1000 100 0.000348983 0.000131983 0.378193 0.8 1 False 1000 1000 0.00136296 0.000685325 0.50282 0.8 10 True 100 100 0.000229159 0.00011825 0.516017 0.8 10 True 100 1000 0.000498845 0.000532618 1.0677 0.8 10 True 1000 100 0.000383126 0.00029935 0.781336 0.8 10 True 1000 1000 0.00162866 0.00307312 1.88689 0.8 10 False 100 100 0.000230783 0.000124958 0.541452 0.8 10 False 100 1000 0.000493393 0.000550654 1.11606 0.8 10 False 1000 100 0.000377167 0.000298581 0.791642 0.8 10 False 1000 1000 0.00165795 0.00305103 1.84024 0.8 25 True 100 100 0.000233496 0.000175241 0.75051 0.8 25 True 100 1000 0.00055654 0.00102658 1.84458 0.8 25 True 1000 100 0.000463814 0.000783267 1.68875 0.8 25 True 1000 1000 0.00186905 0.00755344 4.04132 0.8 25 False 100 100 0.000240243 0.000175047 0.728625 0.8 25 False 100 1000 0.000578102 0.00104499 1.80763 0.8 25 False 1000 100 0.000485113 0.000776849 1.60138 0.8 25 False 1000 1000 0.00211448 0.00752736 3.55992 ``` """ # pylint: enable=line-too-long if isinstance(b, sparse_tensor.SparseTensor) \ or isinstance(b, sparse_tensor.SparseTensorValue): # We can do C * D where C is sparse but if we want to do A * B when # B is sparse we have to transpose. But AB = (B'A')' so we have to feed in # the transpose of the arguments as well. if adjoint_a != adjoint_b: return array_ops.transpose( sparse_tensor_dense_matmul(b, sp_a, adjoint_a, adjoint_b)) else: return array_ops.transpose( sparse_tensor_dense_matmul( b, sp_a, adjoint_a=not adjoint_a, adjoint_b=not adjoint_b)) else: sp_a = _convert_to_sparse_tensor(sp_a) with ops.name_scope(name, "SparseTensorDenseMatMul", [sp_a.indices, sp_a.values, b]) as name: b = ops.convert_to_tensor(b, name="b") return gen_sparse_ops.sparse_tensor_dense_mat_mul( a_indices=sp_a.indices, a_values=sp_a.values, a_shape=sp_a.dense_shape, b=b, adjoint_a=adjoint_a, adjoint_b=adjoint_b) @tf_export("sparse.softmax", v1=["sparse.softmax", "sparse_softmax"]) @deprecation.deprecated_endpoints("sparse_softmax") def sparse_softmax(sp_input, name=None): """Applies softmax to a batched N-D `SparseTensor`. The inputs represent an N-D SparseTensor with logical shape `[..., B, C]` (where `N >= 2`), and with indices sorted in the canonical lexicographic order. This op is equivalent to applying the normal `tf.nn.softmax()` to each innermost logical submatrix with shape `[B, C]`, but with the catch that *the implicitly zero elements do not participate*. Specifically, the algorithm is equivalent to: (1) Applies `tf.nn.softmax()` to a densified view of each innermost submatrix with shape `[B, C]`, along the size-C dimension; (2) Masks out the original implicitly-zero locations; (3) Renormalizes the remaining elements. Hence, the `SparseTensor` result has exactly the same non-zero indices and shape. Example using a 3-D SparseTensor: >>> st = tf.sparse.from_dense( ... [[[0., np.e], ... [1., 0.]], ... ... [[np.e, 0.], ... [np.e, np.e]]]) >>> res = tf.sparse.softmax(st) >>> res.indices <tf.Tensor: shape=(5, 3), dtype=int64, numpy= array([[0, 0, 1], [0, 1, 0], [1, 0, 0], [1, 1, 0], [1, 1, 1]])> >>> res.values <tf.Tensor: ... numpy=array([1. , 1. , 1. , 0.5, 0.5], dtype=float32)> >>> res.dense_shape <tf.Tensor: shape=(3,), dtype=int64, numpy=array([2, 2, 2])> >>> tf.sparse.to_dense(res) <tf.Tensor: shape=(2, 2, 2), dtype=float32, numpy= array([[[0. , 1. ], [1. , 0. ]], [[1. , 0. ], [0.5, 0.5]]], dtype=float32)> Args: sp_input: N-D `SparseTensor`, where `N >= 2`. name: optional name of the operation. Returns: output: N-D `SparseTensor` representing the results. """ with ops.name_scope(name, "SparseSoftmax", [sp_input.indices, sp_input.values]) as name: out_vals = gen_sparse_ops.sparse_softmax(sp_input.indices, sp_input.values, sp_input.dense_shape) return sparse_tensor.SparseTensor(sp_input.indices, out_vals, sp_input.dense_shape) @tf_export("sparse.maximum", v1=["sparse.maximum", "sparse_maximum"]) @deprecation.deprecated_endpoints("sparse_maximum") def sparse_maximum(sp_a, sp_b, name=None): """Returns the element-wise max of two SparseTensors. Assumes the two SparseTensors have the same shape, i.e., no broadcasting. Example: >>> sp_zero = tf.sparse.SparseTensor([[0]], [0], [7]) >>> sp_one = tf.sparse.SparseTensor([[1]], [1], [7]) >>> res = tf.sparse.maximum(sp_zero, sp_one) >>> res.indices <tf.Tensor: shape=(2, 1), dtype=int64, numpy= array([[0], [1]])> >>> res.values <tf.Tensor: shape=(2,), dtype=int32, numpy=array([0, 1], dtype=int32)> >>> res.dense_shape <tf.Tensor: shape=(1,), dtype=int64, numpy=array([7])> The reduction version of this elementwise operation is `tf.sparse.reduce_max` Args: sp_a: a `SparseTensor` operand whose dtype is real, and indices lexicographically ordered. sp_b: the other `SparseTensor` operand with the same requirements (and the same shape). name: optional name of the operation. Returns: output: the output SparseTensor. """ with ops.name_scope( name, "SparseSparseMaximum", [sp_a.indices, sp_a.values, sp_b.indices, sp_b.values]) as name: out_indices, out_values = gen_sparse_ops.sparse_sparse_maximum( sp_a.indices, sp_a.values, sp_a.dense_shape, sp_b.indices, sp_b.values, sp_b.dense_shape, name=name) return sparse_tensor.SparseTensor(out_indices, out_values, sp_a.dense_shape) @tf_export("sparse.minimum", v1=["sparse.minimum", "sparse_minimum"]) @deprecation.deprecated_endpoints("sparse_minimum") def sparse_minimum(sp_a, sp_b, name=None): """Returns the element-wise min of two SparseTensors. Assumes the two SparseTensors have the same shape, i.e., no broadcasting. Example: >>> sp_zero = tf.sparse.SparseTensor([[0]], [0], [7]) >>> sp_one = tf.sparse.SparseTensor([[1]], [1], [7]) >>> res = tf.sparse.minimum(sp_zero, sp_one) >>> res.indices <tf.Tensor: shape=(2, 1), dtype=int64, numpy= array([[0], [1]])> >>> res.values <tf.Tensor: shape=(2,), dtype=int32, numpy=array([0, 0], dtype=int32)> >>> res.dense_shape <tf.Tensor: shape=(1,), dtype=int64, numpy=array([7])> Args: sp_a: a `SparseTensor` operand whose dtype is real, and indices lexicographically ordered. sp_b: the other `SparseTensor` operand with the same requirements (and the same shape). name: optional name of the operation. Returns: output: the output SparseTensor. """ with ops.name_scope( name, "SparseSparseMinimum", [sp_a.indices, sp_a.values, sp_b.indices, sp_b.values]) as name: out_indices, out_values = gen_sparse_ops.sparse_sparse_minimum( sp_a.indices, sp_a.values, sp_a.dense_shape, sp_b.indices, sp_b.values, sp_b.dense_shape, name=name) return sparse_tensor.SparseTensor(out_indices, out_values, sp_a.dense_shape) @tf_export("sparse.transpose", v1=["sparse.transpose", "sparse_transpose"]) @deprecation.deprecated_endpoints("sparse_transpose") def sparse_transpose(sp_input, perm=None, name=None): """Transposes a `SparseTensor`. Permutes the dimensions according to the value of `perm`. This is the sparse version of `tf.transpose`. The returned tensor's dimension `i` will correspond to the input dimension `perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is the rank of the input tensor. Hence, by default, this operation performs a regular matrix transpose on 2-D input Tensors. For example: >>> x = tf.SparseTensor(indices=[[0, 1], [0, 3], [2, 3], [3, 1]], ... values=[1.1, 2.2, 3.3, 4.4], ... dense_shape=[4, 5]) >>> print('x =', tf.sparse.to_dense(x)) x = tf.Tensor( [[0. 1.1 0. 2.2 0. ] [0. 0. 0. 0. 0. ] [0. 0. 0. 3.3 0. ] [0. 4.4 0. 0. 0. ]], shape=(4, 5), dtype=float32) >>> x_transpose = tf.sparse.transpose(x) >>> print('x_transpose =', tf.sparse.to_dense(x_transpose)) x_transpose = tf.Tensor( [[0. 0. 0. 0. ] [1.1 0. 0. 4.4] [0. 0. 0. 0. ] [2.2 0. 3.3 0. ] [0. 0. 0. 0. ]], shape=(5, 4), dtype=float32) Equivalently, you could call `tf.sparse.transpose(x, perm=[1, 0])`. The `perm` argument is more useful for n-dimensional tensors where n > 2. >>> x = tf.SparseTensor(indices=[[0, 0, 1], [0, 0, 3], [1, 2, 3], [1, 3, 1]], ... values=[1.1, 2.2, 3.3, 4.4], ... dense_shape=[2, 4, 5]) >>> print('x =', tf.sparse.to_dense(x)) x = tf.Tensor( [[[0. 1.1 0. 2.2 0. ] [0. 0. 0. 0. 0. ] [0. 0. 0. 0. 0. ] [0. 0. 0. 0. 0. ]] [[0. 0. 0. 0. 0. ] [0. 0. 0. 0. 0. ] [0. 0. 0. 3.3 0. ] [0. 4.4 0. 0. 0. ]]], shape=(2, 4, 5), dtype=float32) As above, simply calling `tf.sparse.transpose` will default to `perm=[2,1,0]`. To take the transpose of a batch of sparse matrices, where 0 is the batch dimension, you would set `perm=[0,2,1]`. >>> x_transpose = tf.sparse.transpose(x, perm=[0, 2, 1]) >>> print('x_transpose =', tf.sparse.to_dense(x_transpose)) x_transpose = tf.Tensor( [[[0. 0. 0. 0. ] [1.1 0. 0. 0. ] [0. 0. 0. 0. ] [2.2 0. 0. 0. ] [0. 0. 0. 0. ]] [[0. 0. 0. 0. ] [0. 0. 0. 4.4] [0. 0. 0. 0. ] [0. 0. 3.3 0. ] [0. 0. 0. 0. ]]], shape=(2, 5, 4), dtype=float32) Args: sp_input: The input `SparseTensor`. perm: A permutation vector of the dimensions of `sp_input`. name: A name prefix for the returned tensors (optional). Returns: A transposed `SparseTensor`. Raises: TypeError: If `sp_input` is not a `SparseTensor`. """ with ops.name_scope(name, "SparseTranspose", [sp_input]) as name: if perm is None: if sp_input.shape.rank is not None: rank = sp_input.shape.rank perm = (rank - 1) - np.arange(0, rank, 1) else: rank = array_ops.rank(sp_input) perm = (rank - 1) - math_ops.range(0, rank, 1) indices = sp_input.indices transposed_indices = array_ops.transpose( array_ops.gather(array_ops.transpose(indices), perm)) perm_ = tensor_util.constant_value(ops.convert_to_tensor(perm)) if perm_ is not None and sp_input.get_shape().is_fully_defined(): old_shape_ = sp_input.get_shape().as_list() transposed_dense_shape = list(old_shape_) # Copy. for i, p in enumerate(perm_): transposed_dense_shape[i] = old_shape_[p] else: dense_shape = sp_input.dense_shape transposed_dense_shape = array_ops.gather(dense_shape, perm) transposed_st = sparse_tensor.SparseTensor( transposed_indices, sp_input.values, transposed_dense_shape) transposed_st = sparse_reorder(transposed_st) return transposed_st @tf_export("sparse.map_values", v1=[]) @dispatch.add_dispatch_support def map_values(op, *args, **kwargs): """Applies `op` to the `.values` tensor of one or more `SparseTensor`s. Replaces any `SparseTensor` in `args` or `kwargs` with its `values` tensor (which contains the non-default values for the SparseTensor), and then calls `op`. Returns a `SparseTensor` that is constructed from the input `SparseTensor`s' `indices`, `dense_shape`, and the value returned by the `op`. If the input arguments contain multiple `SparseTensor`s, then they must have equal `indices` and dense shapes. Examples: >>> s = tf.sparse.from_dense([[1, 2, 0], ... [0, 4, 0], ... [1, 0, 0]]) >>> tf.sparse.to_dense(tf.sparse.map_values(tf.ones_like, s)).numpy() array([[1, 1, 0], [0, 1, 0], [1, 0, 0]], dtype=int32) >>> tf.sparse.to_dense(tf.sparse.map_values(tf.multiply, s, s)).numpy() array([[ 1, 4, 0], [ 0, 16, 0], [ 1, 0, 0]], dtype=int32) >>> tf.sparse.to_dense(tf.sparse.map_values(tf.add, s, 5)).numpy() array([[6, 7, 0], [0, 9, 0], [6, 0, 0]], dtype=int32) Note: even though `tf.add(0, 5) != 0`, implicit zeros will remain unchanged. However, if the sparse tensor contains any explicit zeros, these will be affected by the mapping! Args: op: The operation that should be applied to the SparseTensor `values`. `op` is typically an element-wise operation (such as math_ops.add), but any operation that preserves the shape can be used. *args: Arguments for `op`. **kwargs: Keyword arguments for `op`. Returns: A `SparseTensor` whose `indices` and `dense_shape` matches the `indices` and `dense_shape` of all input `SparseTensor`s. Raises: ValueError: If args contains no `SparseTensor`, or if the `indices` or `dense_shape`s of the input `SparseTensor`s are not equal. """ sparse_list = [] inner_args = _replace_sparse_with_values(args, sparse_list) inner_kwargs = _replace_sparse_with_values(kwargs, sparse_list) if not sparse_list: raise ValueError("No SparseTensor in argument list of map_values") with ops.control_dependencies(_assert_sparse_compatible(sparse_list)): # Delegate to op, and then compose the result from the transformed values # and the known indices/dense shape. Since we ensure that indices and shape # are identical, we can just use the first one. return sparse_tensor.SparseTensor(sparse_list[0].indices, op(*inner_args, **inner_kwargs), sparse_list[0].dense_shape) @dispatch.dispatch_for_api(bincount_ops.bincount) def bincount(arr: sparse_tensor.SparseTensor, weights=None, minlength=None, maxlength=None, dtype=dtypes.int32, name=None, axis=None, binary_output=False): """Counts the number of occurrences of each value in an integer array. Only the values in the SparseTensor's `values` tensor are counted, missing zeros are ignored. If `minlength` and `maxlength` are not given, returns a vector with length `tf.reduce_max(arr) + 1` if `arr` is non-empty, and length 0 otherwise. >>> data = tf.sparse.SparseTensor( ... indices=[[0, 3], [1, 7], [2, 4], [3, 0], ... [4, 9], [5, 1], [6, 8], [7, 2]], ... values=[1,1,2,3,2,4,4,5], ... dense_shape=[8, 10]) >>> tf.math.bincount(data) <tf.Tensor: ... numpy=array([0, 2, 2, 1, 2, 1], dtype=int32)> Vector length = Maximum element in vector `values` is 5. Adding 1, which is 6 will be the vector length. Each bin value in the output indicates number of occurrences of the particular index. Here, index 1 in output has a value 2. This indicates value 1 occurs two times in `values`. **Bin-counting with weights** >>> indices=[[0, 3], [1, 7], [2, 4], [3, 0], [4, 9], [5, 1], [6, 8], [7, 2]] >>> data = tf.sparse.SparseTensor( ... indices=indices, ... values=[1,1,2,3,2,4,4,5], ... dense_shape=[8, 10]) >>> weights = tf.sparse.SparseTensor( ... indices=indices, ... values=[1,5,0,1,0,5,4,5], ... dense_shape=[8, 10]) >>> tf.math.bincount(data, weights=weights) <tf.Tensor: ... numpy=array([0, 6, 0, 1, 9, 5], dtype=int32)> When `weights` is specified, bins will be incremented by the corresponding weight instead of 1. Here, index 1 in output has a value 6. This is the summation of `weights` corresponding to the value in `values` (i.e. for index 1, the first two data values are 1 so the first two weights, 1 and 5, are summed). On GPU, `bincount` with weights is only supported when `axis=0` and XLA is enabled (typically when a function decorated with `@tf.function(jit_compile=True)`). **Bin-counting matrix rows independently** This example uses `axis=-1` with a 2 dimensional input and returns a `Tensor` with bincounting where axis 0 is **not** flattened, i.e. an independent bincount for each matrix row. >>> data = tf.sparse.SparseTensor( ... indices=[[0, 3], [0, 7], [1, 4], [1, 0], ... [1, 9], [2, 1], [2, 8], [2, 2]], ... values=[1,1,2,3,2,4,4,5], ... dense_shape=[3, 10]) >>> tf.math.bincount(data, axis=-1) <tf.Tensor: shape=(3, 6), dtype=int32, numpy= array([[0, 2, 0, 0, 0, 0], [0, 0, 2, 1, 0, 0], [0, 0, 0, 0, 2, 1]], dtype=int32)> **Bin-counting with binary_output** This example gives binary output instead of counting the occurrence. >>> data = tf.sparse.SparseTensor( ... indices=[[0, 3], [0, 7], [1, 4], [1, 0], ... [1, 9], [2, 1], [2, 8], [2, 2]], ... values=[1,1,2,3,2,4,4,5], ... dense_shape=[3, 10]) >>> tf.math.bincount(data, axis=-1, binary_output=True) <tf.Tensor: shape=(3, 6), dtype=int32, numpy= array([[0, 1, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0], [0, 0, 0, 0, 1, 1]], dtype=int32)> **Missing zeros in SparseTensor** Note that missing zeros (implict zeros) in SparseTensor are **NOT** counted. This supports cases such as `0` in the values tensor indicates that index/id `0`is present and a missing zero indicates that no index/id is present. If counting missing zeros is desired, there are workarounds. For the `axis=0` case, the number of missing zeros can computed by subtracting the number of elements in the SparseTensor's `values` tensor from the number of elements in the dense shape, and this difference can be added to the first element of the output of `bincount`. For all cases, the SparseTensor can be converted to a dense Tensor with `tf.sparse.to_dense` before calling `tf.math.bincount`. >>> data = tf.sparse.SparseTensor( ... indices=[[0, 3], [1, 7], [2, 4], [3, 0], ... [4, 9], [5, 1], [6, 8], [7, 2]], ... values=[1,1,2,3,2,4,4,5], ... dense_shape=[8, 10]) >>> counts = tf.math.bincount(data, dtype=tf.int64) >>> dense_size = tf.math.reduce_prod(data.dense_shape) >>> missing_zeros = dense_size - tf.size(data.values, out_type=tf.int64) >>> tf.concat([[counts[0] + missing_zeros], counts[1:]], 0) <tf.Tensor: ... numpy=array([72, 2, 2, 1, 2, 1])> >>> data = tf.sparse.SparseTensor( ... indices=[[0, 3], [1, 7], [2, 4], [3, 0], ... [4, 9], [5, 1], [6, 8], [7, 2]], ... values=[1,1,2,3,2,4,4,5], ... dense_shape=[8, 10]) >>> tf.math.bincount(tf.sparse.to_dense(data), dtype=tf.int64) <tf.Tensor: ... numpy=array([72, 2, 2, 1, 2, 1])> Args: arr: A SparseTensor whose values should be counted. These tensors must have a rank of 2 if `axis=-1`. weights: If non-None, must be a SparseTensor with the same dense shape and same indices as `arr`. For each value in `arr`, the bin will be incremented by the corresponding weight instead of 1. If non-None, `binary_output` must be False. minlength: If given, ensures the output has length at least `minlength`, padding with zeros at the end if necessary. maxlength: If given, skips values in `arr` that are equal or greater than `maxlength`, ensuring that the output has length at most `maxlength`. dtype: If `weights` is None, determines the type of the output bins. name: A name scope for the associated operations (optional). axis: The axis to slice over. Axes at and below `axis` will be flattened before bin counting. Currently, only `0`, and `-1` are supported. If None, all axes will be flattened (identical to passing `0`). XLA does not support `axis=-1`. binary_output: If True, this op will output 1 instead of the number of times a token appears (equivalent to one_hot + reduce_any instead of one_hot + reduce_add). Defaults to False. Returns: A vector with the same dtype as `weights` or the given `dtype` containing the bincount values. Raises: `InvalidArgumentError` if negative values are provided as an input. """ name = "bincount" if name is None else name with ops.name_scope(name): if weights is not None and binary_output: raise ValueError("Arguments `binary_output` and `weights` are mutually " "exclusive. Please specify only one.") if not arr.dtype.is_integer: arr = math_ops.cast(arr, dtypes.int32) if axis is None: axis = 0 if axis not in [0, -1]: raise ValueError(f"Unsupported value for argument axis={axis}. Only 0 and" " -1 are currently supported.") total_size = array_ops.size(arr) array_is_nonempty = total_size > 0 # For the case where all values are implicit zeros, reduce_max # returns the integer closest to negative infinity. max_value = math_ops.maximum(math_ops.reduce_max(arr.values), -1) output_size = math_ops.cast(array_is_nonempty, arr.dtype) * (max_value + 1) if minlength is not None: minlength = ops.convert_to_tensor( minlength, name="minlength", dtype=arr.dtype) output_size = gen_math_ops.maximum(minlength, output_size) if maxlength is not None: maxlength = ops.convert_to_tensor( maxlength, name="maxlength", dtype=arr.dtype) output_size = gen_math_ops.minimum(maxlength, output_size) if axis == 0: if weights is not None: weights = validate_sparse_weights(arr, weights, dtype) arr = arr.values if isinstance(arr, sparse_tensor.SparseTensor): # axis != 0 case weights = validate_sparse_weights(arr, weights, dtype) return gen_math_ops.sparse_bincount( indices=arr.indices, values=arr.values, dense_shape=arr.dense_shape, size=output_size, weights=weights, binary_output=binary_output) else: # axis == 0 case weights = bincount_ops.validate_dense_weights(arr, weights, dtype) return gen_math_ops.dense_bincount( input=arr, size=output_size, weights=weights, binary_output=binary_output) @tf_export("sparse.bincount") @dispatch.add_dispatch_support def sparse_bincount(values, weights=None, axis=0, minlength=None, maxlength=None, binary_output=False, name=None): """Count the number of times an integer value appears in a tensor. This op takes an N-dimensional `Tensor`, `RaggedTensor`, or `SparseTensor`, and returns an N-dimensional int64 SparseTensor where element `[i0...i[axis], j]` contains the number of times the value `j` appears in slice `[i0...i[axis], :]` of the input tensor. Currently, only N=0 and N=-1 are supported. Args: values: A Tensor, RaggedTensor, or SparseTensor whose values should be counted. These tensors must have a rank of 2 if `axis=-1`. weights: If non-None, must be the same shape as `arr`. If `arr` is a SparseTensor, `weights` must be a SparseTensor with the same dense shape and same indices as `arr`. For each value in `value`, the bin will be incremented by the corresponding weight instead of 1. axis: The axis to slice over. Axes at and below `axis` will be flattened before bin counting. Currently, only `0`, and `-1` are supported. If None, all axes will be flattened (identical to passing `0`). minlength: If given, ensures the output has length at least `minlength`, padding with zeros at the end if necessary. maxlength: If given, skips values in `values` that are equal or greater than `maxlength`, ensuring that the output has length at most `maxlength`. binary_output: If True, this op will output 1 instead of the number of times a token appears (equivalent to one_hot + reduce_any instead of one_hot + reduce_add). Defaults to False. name: A name for this op. Returns: A SparseTensor with `output.shape = values.shape[:axis] + [N]`, where `N` is * `maxlength` (if set); * `minlength` (if set, and `minlength > reduce_max(values)`); * `0` (if `values` is empty); * `reduce_max(values) + 1` otherwise. Raises: `InvalidArgumentError` if negative values are provided as an input. Examples: **Bin-counting every item in individual batches** This example takes an input (which could be a Tensor, RaggedTensor, or SparseTensor) and returns a SparseTensor where the value of (i,j) is the number of times value j appears in batch i. >>> data = np.array([[10, 20, 30, 20], [11, 101, 11, 10001]], dtype=np.int64) >>> tf.sparse.bincount(data, axis=-1) SparseTensor(indices=tf.Tensor( [[ 0 10] [ 0 20] [ 0 30] [ 1 11] [ 1 101] [ 1 10001]], shape=(6, 2), dtype=int64), values=tf.Tensor([1 2 1 2 1 1], shape=(6,), dtype=int64), dense_shape=tf.Tensor([ 2 10002], shape=(2,), dtype=int64)) This example shows a sparse tensor input. Missing zeros are not counted. >>> data = tf.sparse.SparseTensor( ... indices=[[0, 3], [0, 7], [0, 8], [0, 11], ... [1, 9], [1, 11], [1, 18], [1, 27]], ... values=[10, 20, 30, 20, 11, 101, 11, 10001], ... dense_shape=[2, 30]) >>> tf.sparse.bincount(data, axis=-1) SparseTensor(indices=tf.Tensor( [[ 0 10] [ 0 20] [ 0 30] [ 1 11] [ 1 101] [ 1 10001]], shape=(6, 2), dtype=int64), values=tf.Tensor([1 2 1 2 1 1], shape=(6,), dtype=int32), dense_shape=tf.Tensor([ 2 10002], shape=(2,), dtype=int64)) **Bin-counting with defined output shape** This example takes an input (which could be a Tensor, RaggedTensor, or SparseTensor) and returns a SparseTensor where the value of (i,j) is the number of times value j appears in batch i. However, all values of j above 'maxlength' are ignored. The dense_shape of the output sparse tensor is set to 'minlength'. Note that, while the input is identical to the example above, the value '10001' in batch item 2 is dropped, and the dense shape is [2, 500] instead of [2,10002] or [2, 102]. >>> minlength = maxlength = 500 >>> data = np.array([[10, 20, 30, 20], [11, 101, 11, 10001]], dtype=np.int64) >>> tf.sparse.bincount( ... data, axis=-1, minlength=minlength, maxlength=maxlength) SparseTensor(indices=tf.Tensor( [[ 0 10] [ 0 20] [ 0 30] [ 1 11] [ 1 101]], shape=(5, 2), dtype=int64), values=tf.Tensor([1 2 1 2 1], shape=(5,), dtype=int64), dense_shape=tf.Tensor([ 2 500], shape=(2,), dtype=int64)) **Binary bin-counting** This example takes an input (which could be a Tensor, RaggedTensor, or SparseTensor) and returns a SparseTensor where (i,j) is 1 if the value j appears in batch i at least once and is 0 otherwise. Note that, even though some values (like 20 in batch 1 and 11 in batch 2) appear more than once, the 'values' tensor is all 1s. >>> data = np.array([[10, 20, 30, 20], [11, 101, 11, 10001]], dtype=np.int64) >>> tf.sparse.bincount(data, binary_output=True, axis=-1) SparseTensor(indices=tf.Tensor( [[ 0 10] [ 0 20] [ 0 30] [ 1 11] [ 1 101] [ 1 10001]], shape=(6, 2), dtype=int64), values=tf.Tensor([1 1 1 1 1 1], shape=(6,), dtype=int64), dense_shape=tf.Tensor([ 2 10002], shape=(2,), dtype=int64)) **Weighted bin-counting** This example takes two inputs - a values tensor and a weights tensor. These tensors must be identically shaped, and have the same row splits or indices in the case of RaggedTensors or SparseTensors. When performing a weighted count, the op will output a SparseTensor where the value of (i, j) is the sum of the values in the weight tensor's batch i in the locations where the values tensor has the value j. In this case, the output dtype is the same as the dtype of the weights tensor. >>> data = np.array([[10, 20, 30, 20], [11, 101, 11, 10001]], dtype=np.int64) >>> weights = [[2, 0.25, 15, 0.5], [2, 17, 3, 0.9]] >>> tf.sparse.bincount(data, weights=weights, axis=-1) SparseTensor(indices=tf.Tensor( [[ 0 10] [ 0 20] [ 0 30] [ 1 11] [ 1 101] [ 1 10001]], shape=(6, 2), dtype=int64), values=tf.Tensor([2. 0.75 15. 5. 17. 0.9], shape=(6,), dtype=float32), dense_shape=tf.Tensor([ 2 10002], shape=(2,), dtype=int64)) """ with ops.name_scope(name, "count", [values, weights]): if not isinstance(values, sparse_tensor.SparseTensor): values = tensor_conversion.convert_to_tensor_v2_with_dispatch( values, name="values") if weights is not None: # Note that `weights` is not used for dispatch and if there is a type # mismatch between `values` and `weights`, `weights` can be a RaggedTensor # (or potentially some other kind of CompositeTensor) where conversion # to a dense tensor fails. if not isinstance(weights, composite_tensor.CompositeTensor): weights = tensor_conversion.convert_to_tensor_v2_with_dispatch( weights, name="weights") if weights is not None and binary_output: raise ValueError("Arguments `binary_output` and `weights` are mutually " "exclusive. Please specify only one.") if axis is None: axis = 0 if axis not in [0, -1]: raise ValueError(f"Unsupported value for argument axis={axis}. Only 0 and" " -1 are currently supported.") minlength_value = minlength if minlength is not None else -1 maxlength_value = maxlength if maxlength is not None else -1 if axis == 0: if isinstance(values, sparse_tensor.SparseTensor): if weights is not None: weights = validate_sparse_weights(values, weights) values = values.values else: if weights is not None: weights = array_ops.reshape(weights, [-1]) values = array_ops.reshape(values, [-1]) if isinstance(values, sparse_tensor.SparseTensor): weights = validate_sparse_weights(values, weights) c_ind, c_val, c_shape = gen_count_ops.sparse_count_sparse_output( values.indices, values.values, values.dense_shape, weights, minlength=minlength_value, maxlength=maxlength_value, binary_output=binary_output) else: weights = bincount_ops.validate_dense_weights(values, weights) c_ind, c_val, c_shape = gen_count_ops.dense_count_sparse_output( values, weights=weights, minlength=minlength_value, maxlength=maxlength_value, binary_output=binary_output) return sparse_tensor.SparseTensor(c_ind, c_val, c_shape) def validate_sparse_weights(values, weights, dtype=None): """Validates the passed weight tensor or creates an empty one.""" if weights is None: if dtype: return array_ops.constant([], dtype=dtype) return array_ops.constant([], dtype=values.values.dtype) if not isinstance(weights, sparse_tensor.SparseTensor): raise ValueError( "Argument `weights` must be a SparseTensor if `values` is a " f"SparseTensor. Received weights={weights} of type: " f"{type(weights).__name__}") checks = [] if weights.dense_shape is not values.dense_shape: checks.append( check_ops.assert_equal( weights.dense_shape, values.dense_shape, message="'weights' and 'values' must have the same dense shape.")) if weights.indices is not values.indices: checks.append( check_ops.assert_equal( weights.indices, values.indices, message="'weights' and 'values' must have the same indices.") ) if checks: with ops.control_dependencies(checks): weights = array_ops.identity(weights.values) else: weights = weights.values return weights def _assert_sparse_compatible(sparse_tensors): """Check that all of `sparse_tensors` have same `indices` and `dense_shape`. Args: sparse_tensors: A list of sparse tensors. Returns: An op to be used as a control dependency. """ checks = [] first = sparse_tensors[0] for t in sparse_tensors[1:]: checks.append( check_ops.assert_equal( first.dense_shape, t.dense_shape, message="Mismatched shapes!")) checks.append( check_ops.assert_equal( first.indices, t.indices, message="Mismatched indices!")) return checks def _replace_sparse_with_values(value, sparse_list): """Replace `SparseTensor`s with their values in `value` Each `SparseTensor` in `value` is replaced by its `values` tensor, and collects all `SparseTensor`s in `sparse_list`. Args: value: A structure of `Tensor`s and `SparseTensor`s sparse_list: A list. Output parameter that collects all `SparseTensor`s in `value`. Returns: `value` with each SparseTensor replaced by its `.value` attribute. """ flat_vals = nest.flatten(value, expand_composites=False) new_vals = [] for v in flat_vals: if isinstance(v, sparse_tensor.SparseTensor): sparse_list.append(v) new_vals.append(v.values) else: new_vals.append(v) return nest.pack_sequence_as(value, new_vals, expand_composites=False) def _add_sparse_to_tensors_map(sp_input, container=None, shared_name=None, name=None): """Add a `SparseTensor` to a `SparseTensorsMap` and return its handle. Args: sp_input: The input `SparseTensor`. container: The container for the underlying `SparseTensorsMap` (optional). shared_name: The shared name for the underlying `SparseTensorsMap` (optional, defaults to the name of the newly created op). name: A name prefix for the returned tensors (optional). Returns: A string 1-vector (1D `Tensor`), with the single element representing the a unique handle to a `SparseTensor` stored by the `SparseTensorMap` underlying this op. Raises: TypeError: If `sp_input` is not a `SparseTensor`. """ sp_input = _convert_to_sparse_tensor(sp_input) return gen_sparse_ops.add_sparse_to_tensors_map( sp_input.indices, sp_input.values, sp_input.dense_shape, container=container, shared_name=shared_name, name=name) def _add_many_sparse_to_tensors_map(sp_input, container=None, shared_name=None, name=None): """Add a minibatch `SparseTensor` to a `SparseTensorsMap`, return `N` handles. The `SparseTensor` must have rank `R` greater than 1, and the first dimension is treated as the minibatch dimension. Elements of the `SparseTensor` must be sorted in increasing order of this first dimension. The serialized `SparseTensor` objects going into each row of the output `Tensor` will have rank `R-1`. The minibatch size `N` is extracted from `sparse_shape[0]`. Args: sp_input: The input rank `R` `SparseTensor`. container: The container for the underlying `SparseTensorsMap` (optional). shared_name: The shared name for the underlying `SparseTensorsMap` (optional, defaults to the name of the newly created op). name: A name prefix for the returned tensors (optional). Returns: A string matrix (2-D `Tensor`) with `N` rows and `1` column. Each row represents a unique handle to a `SparseTensor` stored by the `SparseTensorMap` underlying this op. Raises: TypeError: If `sp_input` is not a `SparseTensor`. """ sp_input = _convert_to_sparse_tensor(sp_input) return gen_sparse_ops.add_many_sparse_to_tensors_map( sp_input.indices, sp_input.values, sp_input.dense_shape, container=container, shared_name=shared_name, name=name) def _take_many_sparse_from_tensors_map(sparse_map_op, sparse_handles, rank=None, name=None): """Read `SparseTensors` from a `SparseTensorsMap` and concatenate them. The input `sparse_handles` must be a string matrix of shape `[N, 1]` where `N` is the minibatch size and the rows correspond to packed outputs of `add_sparse_to_tensors_map`. The ranks of the original `SparseTensor` objects must all match. When the final `SparseTensor` is created, it has rank one higher than the ranks of the incoming `SparseTensor` objects (they have been concatenated along a new row dimension). The output `SparseTensor` object's shape values for all dimensions but the first are the max across the input `SparseTensor` objects' shape values for the corresponding dimensions. Its first shape value is `N`, the minibatch size. The input `SparseTensor` objects' indices are assumed ordered in standard lexicographic order. If this is not the case, after this step run `sparse.reorder` to restore index ordering. For example, if the serialized input is a `[2, 3]` matrix representing two original `SparseTensor` objects: index = [ 0] [10] [20] values = [1, 2, 3] shape = [50] and index = [ 2] [10] values = [4, 5] shape = [30] then the final deserialized `SparseTensor` will be: index = [0 0] [0 10] [0 20] [1 2] [1 10] values = [1, 2, 3, 4, 5] shape = [2 50] Args: sparse_map_op: The `Operation` that created the original handles. Usually this is, e.g., `add_sparse_to_tensors_map(...).op`. sparse_handles: 2-D `Tensor` of type `string` of shape `[N, 1]`. The serialized and packed `SparseTensor` objects. rank: (optional) Python int, the rank of the `SparseTensor` objects. name: A name prefix for the returned tensors (optional) Returns: A `SparseTensor` representing the deserialized `SparseTensor`s, concatenated along the `SparseTensor`s' first dimension. All of the serialized `SparseTensor`s must have had the same rank and type. """ if not isinstance(sparse_map_op, ops.Operation): raise TypeError("sparse_map_op be an Operation") if sparse_map_op.type not in ("AddSparseToTensorsMap", "AddManySparseToTensorsMap"): raise TypeError( "sparse_map_op must be one of AddSparseToTensorsMap or " "AddSparseToTensorsMap. Instead, found `%s`." % sparse_map_op.type) with ops.colocate_with(sparse_map_op): shared_name = sparse_map_op.get_attr("shared_name") or sparse_map_op.name output_indices, output_values, output_shape = ( gen_sparse_ops.take_many_sparse_from_tensors_map( sparse_handles, dtype=sparse_map_op.get_attr("T"), container=sparse_map_op.get_attr("container"), shared_name=shared_name, name=name)) # Feed rank data back in, if available output_indices.set_shape([None, rank]) output_shape.set_shape([rank]) return sparse_tensor.SparseTensor(output_indices, output_values, output_shape)
KeywordRequired
python
python-excel__xlwt
xlwt/BIFFRecords.py
{ "start": 46843, "end": 47672 }
class ____(BiffRecord): """ Record DIMENSIONS, BIFF8: Offset Size Contents 0 4 Index to first used row 4 4 Index to last used row, increased by 1 8 2 Index to first used column 10 2 Index to last used column, increased by 1 12 2 Not used """ _REC_ID = 0x0200 def __init__(self, first_used_row, last_used_row, first_used_col, last_used_col): if first_used_row > last_used_row or first_used_col > last_used_col: # Special case: empty worksheet first_used_row = first_used_col = 0 last_used_row = last_used_col = -1 self._rec_data = pack('<2L3H', first_used_row, last_used_row + 1, first_used_col, last_used_col + 1, 0x00)
DimensionsRecord
python
langchain-ai__langchain
libs/partners/anthropic/tests/unit_tests/middleware/test_file_search.py
{ "start": 8068, "end": 14924 }
class ____: """Tests for filesystem-backed grep search.""" def test_grep_content_mode(self) -> None: """Test grep with content output mode.""" middleware = StateFileSearchMiddleware() state: AnthropicToolsState = { "messages": [], "text_editor_files": { "/src/main.py": { "content": ["def foo():", " pass", "def bar():"], "created_at": "2025-01-01T00:00:00", "modified_at": "2025-01-01T00:00:00", }, }, } result = middleware._handle_grep_search( pattern=r"def \w+\(\):", path="/", include=None, output_mode="content", state=state, ) assert isinstance(result, str) lines = result.split("\n") assert len(lines) == 2 assert lines[0] == "/src/main.py:1:def foo():" assert lines[1] == "/src/main.py:3:def bar():" def test_grep_count_mode(self) -> None: """Test grep with count output mode.""" middleware = StateFileSearchMiddleware() state: AnthropicToolsState = { "messages": [], "text_editor_files": { "/src/main.py": { "content": ["TODO: fix this", "print('hello')", "TODO: add tests"], "created_at": "2025-01-01T00:00:00", "modified_at": "2025-01-01T00:00:00", }, "/src/utils.py": { "content": ["TODO: implement"], "created_at": "2025-01-01T00:00:00", "modified_at": "2025-01-01T00:00:00", }, }, } result = middleware._handle_grep_search( pattern=r"TODO", path="/", include=None, output_mode="count", state=state ) assert isinstance(result, str) lines = result.split("\n") assert "/src/main.py:2" in lines assert "/src/utils.py:1" in lines def test_grep_with_include_filter(self) -> None: """Test grep with include file pattern filter.""" middleware = StateFileSearchMiddleware() state: AnthropicToolsState = { "messages": [], "text_editor_files": { "/src/main.py": { "content": ["import os"], "created_at": "2025-01-01T00:00:00", "modified_at": "2025-01-01T00:00:00", }, "/src/main.ts": { "content": ["import os from 'os'"], "created_at": "2025-01-01T00:00:00", "modified_at": "2025-01-01T00:00:00", }, }, } result = middleware._handle_grep_search( pattern="import", path="/", include="*.py", output_mode="files_with_matches", state=state, ) assert isinstance(result, str) assert "/src/main.py" in result assert "/src/main.ts" not in result def test_grep_with_brace_expansion_filter(self) -> None: """Test grep with brace expansion in include filter.""" middleware = StateFileSearchMiddleware() state: AnthropicToolsState = { "messages": [], "text_editor_files": { "/src/main.ts": { "content": ["const x = 1"], "created_at": "2025-01-01T00:00:00", "modified_at": "2025-01-01T00:00:00", }, "/src/App.tsx": { "content": ["const y = 2"], "created_at": "2025-01-01T00:00:00", "modified_at": "2025-01-01T00:00:00", }, "/src/main.py": { "content": ["z = 3"], "created_at": "2025-01-01T00:00:00", "modified_at": "2025-01-01T00:00:00", }, }, } result = middleware._handle_grep_search( pattern="const", path="/", include="*.{ts,tsx}", output_mode="files_with_matches", state=state, ) assert isinstance(result, str) assert "/src/main.ts" in result assert "/src/App.tsx" in result assert "/src/main.py" not in result def test_grep_with_base_path(self) -> None: """Test grep with base path restriction.""" middleware = StateFileSearchMiddleware() state: AnthropicToolsState = { "messages": [], "text_editor_files": { "/src/main.py": { "content": ["import foo"], "created_at": "2025-01-01T00:00:00", "modified_at": "2025-01-01T00:00:00", }, "/tests/test.py": { "content": ["import foo"], "created_at": "2025-01-01T00:00:00", "modified_at": "2025-01-01T00:00:00", }, }, } result = middleware._handle_grep_search( pattern="import", path="/src", include=None, output_mode="files_with_matches", state=state, ) assert isinstance(result, str) assert "/src/main.py" in result assert "/tests/test.py" not in result def test_grep_no_matches(self) -> None: """Test grep with no matching content.""" middleware = StateFileSearchMiddleware() state: AnthropicToolsState = { "messages": [], "text_editor_files": { "/src/main.py": { "content": ["print('hello')"], "created_at": "2025-01-01T00:00:00", "modified_at": "2025-01-01T00:00:00", }, }, } result = middleware._handle_grep_search( pattern=r"TODO", path="/", include=None, output_mode="files_with_matches", state=state, ) assert isinstance(result, str) assert result == "No matches found" def test_grep_invalid_regex(self) -> None: """Test grep with invalid regex pattern.""" middleware = StateFileSearchMiddleware() state: AnthropicToolsState = { "messages": [], "text_editor_files": {}, } result = middleware._handle_grep_search( pattern=r"[unclosed", path="/", include=None, output_mode="files_with_matches", state=state, ) assert isinstance(result, str) assert "Invalid regex pattern" in result
TestFilesystemGrepSearch
python
dagster-io__dagster
python_modules/dagster/dagster_tests/execution_tests/pipes_tests/test_threaded_message_reader.py
{ "start": 515, "end": 1129 }
class ____(PipesChunkedLogReader): def __init__(self, *, path: str, interval: float = 10, target_stream: TextIO): super().__init__(interval=interval, target_stream=target_stream) self.path = path self.file_position = 0 def target_is_readable(self, params: PipesParams) -> bool: return os.path.exists(self.path) def download_log_chunk(self, params: PipesParams) -> str: with open(self.path) as file: file.seek(self.file_position) chunk = file.read() self.file_position = file.tell() return chunk
PipesFileLogReader
python
getsentry__sentry
src/sentry/profiles/flamegraph.py
{ "start": 1147, "end": 1376 }
class ____(TypedDict): project_id: int profiler_id: str chunk_id: str thread_id: NotRequired[str] start: NotRequired[str] end: NotRequired[str] transaction_id: NotRequired[str]
ContinuousProfileCandidate
python
FactoryBoy__factory_boy
examples/django_demo/generic_foreignkey/factories.py
{ "start": 855, "end": 998 }
class ____(TaggedItemFactory): content_object = factory.SubFactory(GroupFactory) class Meta: model = TaggedItem
TaggedGroupFactory
python
pytest-dev__pytest
testing/example_scripts/unittest/test_setup_skip_class.py
{ "start": 185, "end": 310 }
class ____(unittest.TestCase): @classmethod def setUpClass(cls): assert 0 @unittest.skip("skip all tests")
Base
python
fluentpython__example-code-2e
14-inheritance/diamond.py
{ "start": 896, "end": 1081 }
class ____(Root): # <2> def ping(self): print(f'{self}.ping() in A') super().ping() def pong(self): print(f'{self}.pong() in A') super().pong()
A
python
openai__openai-python
src/openai/resources/webhooks.py
{ "start": 4158, "end": 7820 }
class ____(AsyncAPIResource): def unwrap( self, payload: str | bytes, headers: HeadersLike, *, secret: str | None = None, ) -> UnwrapWebhookEvent: """Validates that the given payload was sent by OpenAI and parses the payload.""" if secret is None: secret = self._client.webhook_secret self.verify_signature(payload=payload, headers=headers, secret=secret) body = payload.decode("utf-8") if isinstance(payload, bytes) else payload return cast( UnwrapWebhookEvent, construct_type( type_=UnwrapWebhookEvent, value=json.loads(body), ), ) def verify_signature( self, payload: str | bytes, headers: HeadersLike, *, secret: str | None = None, tolerance: int = 300, ) -> None: """Validates whether or not the webhook payload was sent by OpenAI. Args: payload: The webhook payload headers: The webhook headers secret: The webhook secret (optional, will use client secret if not provided) tolerance: Maximum age of the webhook in seconds (default: 300 = 5 minutes) """ if secret is None: secret = self._client.webhook_secret if secret is None: raise ValueError( "The webhook secret must either be set using the env var, OPENAI_WEBHOOK_SECRET, " "on the client class, OpenAI(webhook_secret='123'), or passed to this function" ) from None signature_header = get_required_header(headers, "webhook-signature") timestamp = get_required_header(headers, "webhook-timestamp") webhook_id = get_required_header(headers, "webhook-id") # Validate timestamp to prevent replay attacks try: timestamp_seconds = int(timestamp) except ValueError: raise InvalidWebhookSignatureError("Invalid webhook timestamp format") from None now = int(time.time()) if now - timestamp_seconds > tolerance: raise InvalidWebhookSignatureError("Webhook timestamp is too old") from None if timestamp_seconds > now + tolerance: raise InvalidWebhookSignatureError("Webhook timestamp is too new") from None # Extract signatures from v1,<base64> format # The signature header can have multiple values, separated by spaces. # Each value is in the format v1,<base64>. We should accept if any match. signatures: list[str] = [] for part in signature_header.split(): if part.startswith("v1,"): signatures.append(part[3:]) else: signatures.append(part) # Decode the secret if it starts with whsec_ if secret.startswith("whsec_"): decoded_secret = base64.b64decode(secret[6:]) else: decoded_secret = secret.encode() body = payload.decode("utf-8") if isinstance(payload, bytes) else payload # Prepare the signed payload (OpenAI uses webhookId.timestamp.payload format) signed_payload = f"{webhook_id}.{timestamp}.{body}" expected_signature = base64.b64encode( hmac.new(decoded_secret, signed_payload.encode(), hashlib.sha256).digest() ).decode() # Accept if any signature matches if not any(hmac.compare_digest(expected_signature, sig) for sig in signatures): raise InvalidWebhookSignatureError("The given webhook signature does not match the expected signature")
AsyncWebhooks
python
apache__airflow
airflow-core/tests/unit/api_fastapi/core_api/routes/public/test_hitl.py
{ "start": 17025, "end": 24342 }
class ____: @pytest.mark.usefixtures("sample_hitl_detail") def test_should_respond_200_with_existing_response( self, test_client: TestClient, expected_sample_hitl_detail_dict: dict[str, Any], ) -> None: with assert_queries_count(3): response = test_client.get("/dags/~/dagRuns/~/hitlDetails") assert response.status_code == 200 assert response.json() == { "hitl_details": [expected_sample_hitl_detail_dict], "total_entries": 1, } @pytest.mark.usefixtures("sample_hitl_details") @pytest.mark.parametrize( ("params", "expected_ti_count"), [ # ti related filter ({"dag_id_pattern": "hitl_dag"}, 5), ({"dag_id_pattern": "other_Dag_"}, 3), ({"task_id": "hitl_task_0"}, 1), ({"task_id_pattern": "another_hitl"}, 3), ({"map_index": -1}, 8), ({"map_index": 1}, 0), ({"state": "deferred"}, 5), ({"state": "success"}, 3), # hitl detail related filter ({"subject_search": "This is subject"}, 5), ({"body_search": "this is"}, 8), ({"response_received": False}, 5), ({"response_received": True}, 3), ({"responded_by_user_id": ["test"]}, 3), ({"responded_by_user_name": ["test"]}, 3), ( {"created_at_gte": from_datetime_to_zulu_without_ms(DEFAULT_CREATED_AT + timedelta(days=1))}, 0, ), ( {"created_at_lte": from_datetime_to_zulu_without_ms(DEFAULT_CREATED_AT - timedelta(days=1))}, 0, ), ( { "created_at_gte": from_datetime_to_zulu_without_ms(DEFAULT_CREATED_AT), "created_at_lte": from_datetime_to_zulu_without_ms(DEFAULT_CREATED_AT), }, 5, ), ], ids=[ "dag_id_pattern_hitl_dag", "dag_id_pattern_other_dag", "task_id", "task_id_pattern", "map_index_none", "map_index_1", "ti_state_deferred", "ti_state_success", "subject", "body", "response_not_received", "response_received", "responded_by_user_id", "responded_by_user_name", "created_at_gte", "created_at_lte", "created_at", ], ) def test_should_respond_200_with_existing_response_and_query( self, test_client: TestClient, params: dict[str, Any], expected_ti_count: int, ) -> None: with assert_queries_count(3): response = test_client.get("/dags/~/dagRuns/~/hitlDetails", params=params) assert response.status_code == 200 assert response.json()["total_entries"] == expected_ti_count assert len(response.json()["hitl_details"]) == expected_ti_count @pytest.mark.usefixtures("sample_hitl_details") def test_should_respond_200_with_existing_response_and_concrete_query( self, test_client: TestClient, ) -> None: response = test_client.get("/dags/hitl_dag_0/dagRuns/hitl_run_0/hitlDetails") assert response.status_code == 200 assert response.json() == { "hitl_details": [ { "task_instance": mock.ANY, "options": ["Approve", "Reject"], "subject": "This is subject 0", "body": "this is body 0", "defaults": ["Approve"], "multiple": False, "params": {"input_1": {"value": 1, "schema": {}, "description": None}}, "assigned_users": [], "created_at": DEFAULT_CREATED_AT.isoformat().replace("+00:00", "Z"), "responded_by_user": None, "responded_at": None, "chosen_options": None, "params_input": {}, "response_received": False, } ], "total_entries": 1, } @pytest.mark.usefixtures("sample_hitl_details") @pytest.mark.parametrize("asc_desc_mark", ["", "-"], ids=["asc", "desc"]) @pytest.mark.parametrize( ("key", "get_key_lambda"), [ # ti key ("ti_id", lambda x: x["task_instance"]["id"]), ("dag_id", lambda x: x["task_instance"]["dag_id"]), ("run_id", lambda x: x["task_instance"]["dag_run_id"]), ("run_after", lambda x: x["task_instance"]["run_after"]), ("rendered_map_index", lambda x: x["task_instance"]["rendered_map_index"]), ("task_instance_operator", lambda x: x["task_instance"]["operator_name"]), ("task_instance_state", lambda x: x["task_instance"]["state"]), # htil key ("subject", itemgetter("subject")), ("responded_at", itemgetter("responded_at")), ("created_at", itemgetter("created_at")), ], ids=[ # ti key "ti_id", "dag_id", "run_id", "run_after", "rendered_map_index", "task_instance_operator", "task_instance_state", # htil key "subject", "responded_at", "created_at", ], ) def test_should_respond_200_with_existing_response_and_order_by( self, test_client: TestClient, asc_desc_mark: str, key: str, get_key_lambda: Callable, ) -> None: reverse = asc_desc_mark == "-" response = test_client.get( "/dags/~/dagRuns/~/hitlDetails", params={"order_by": f"{asc_desc_mark}{key}"} ) data = response.json() hitl_details = data["hitl_details"] assert response.status_code == 200 assert data["total_entries"] == 8 assert len(hitl_details) == 8 sorted_hitl_details = sorted( hitl_details, key=lambda x: ( # pull none to the last no matter it's asc or desc (get_key_lambda(x) is not None) if reverse else (get_key_lambda(x) is None), get_key_lambda(x), x["task_instance"]["id"], ), reverse=reverse, ) assert hitl_details == sorted_hitl_details def test_should_respond_200_without_response(self, test_client: TestClient) -> None: response = test_client.get("/dags/~/dagRuns/~/hitlDetails") assert response.status_code == 200 assert response.json() == { "hitl_details": [], "total_entries": 0, } def test_should_respond_401(self, unauthenticated_test_client: TestClient) -> None: response = unauthenticated_test_client.get("/dags/~/dagRuns/~/hitlDetails") assert response.status_code == 401 def test_should_respond_403(self, unauthorized_test_client: TestClient) -> None: response = unauthorized_test_client.get("/dags/~/dagRuns/~/hitlDetails") assert response.status_code == 403
TestGetHITLDetailsEndpoint
python
facebookresearch__faiss
faiss/gpu/test/test_gpu_index_ivfsq.py
{ "start": 6398, "end": 7029 }
class ____(unittest.TestCase): def test_fp16(self): do_multi_test(faiss.ScalarQuantizer.QT_fp16) def test_8bit(self): do_multi_test(faiss.ScalarQuantizer.QT_8bit) def test_8bit_uniform(self): do_multi_test(faiss.ScalarQuantizer.QT_8bit_uniform) def test_6bit(self): do_multi_test(faiss.ScalarQuantizer.QT_6bit) def test_4bit(self): do_multi_test(faiss.ScalarQuantizer.QT_4bit) def test_4bit_uniform(self): do_multi_test(faiss.ScalarQuantizer.QT_4bit_uniform) def test_8bit_direct(self): do_multi_test(faiss.ScalarQuantizer.QT_8bit_direct)
TestSQ
python
getsentry__sentry
src/sentry/utils/concurrent.py
{ "start": 9285, "end": 10722 }
class ____: """\ Coordinates a set of ``Future`` objects (either from ``concurrent.futures``, or otherwise API compatible), and allows for attaching a callback when all futures have completed execution. """ def __init__(self, futures): self.__pending = set(futures) self.__completed = set() self.__callbacks = [] self.__lock = threading.Lock() for future in futures: future.add_done_callback(self.__mark_completed) def __iter__(self): with self.__lock: futures = self.__pending | self.__completed return iter(futures) def __execute_callback(self, callback): try: callback(self) except Exception as error: logger.warning("Error when calling callback %r: %s", callback, error, exc_info=True) def __mark_completed(self, future): with self.__lock: self.__pending.remove(future) self.__completed.add(future) remaining = len(self.__pending) if remaining == 0: for callback in self.__callbacks: self.__execute_callback(callback) def add_done_callback(self, callback): with self.__lock: remaining = len(self.__pending) if remaining > 0: self.__callbacks.append(callback) if remaining == 0: self.__execute_callback(callback)
FutureSet
python
jmcnamara__XlsxWriter
xlsxwriter/test/worksheet/test_cond_format06.py
{ "start": 345, "end": 3622 }
class ____(unittest.TestCase): """ Test assembling a complete Worksheet file. """ def test_assemble_xml_file(self): """Test writing a worksheet with conditional formatting.""" self.maxDiff = None fh = StringIO() worksheet = Worksheet() worksheet._set_filehandle(fh) worksheet.select() worksheet.write("A1", 10) worksheet.write("A2", 20) worksheet.write("A3", 30) worksheet.write("A4", 40) worksheet.conditional_format( "A1:A4", { "type": "top", "value": 15, "format": None, }, ) worksheet.conditional_format( "A1:A4", { "type": "bottom", "value": 16, "format": None, }, ) worksheet.conditional_format( "A1:A4", { "type": "top", "criteria": "%", "value": 17, "format": None, }, ) worksheet.conditional_format( "A1:A4", { "type": "bottom", "criteria": "%", "value": 18, "format": None, }, ) worksheet._assemble_xml_file() exp = _xml_to_list( """ <?xml version="1.0" encoding="UTF-8" standalone="yes"?> <worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships"> <dimension ref="A1:A4"/> <sheetViews> <sheetView tabSelected="1" workbookViewId="0"/> </sheetViews> <sheetFormatPr defaultRowHeight="15"/> <sheetData> <row r="1" spans="1:1"> <c r="A1"> <v>10</v> </c> </row> <row r="2" spans="1:1"> <c r="A2"> <v>20</v> </c> </row> <row r="3" spans="1:1"> <c r="A3"> <v>30</v> </c> </row> <row r="4" spans="1:1"> <c r="A4"> <v>40</v> </c> </row> </sheetData> <conditionalFormatting sqref="A1:A4"> <cfRule type="top10" priority="1" rank="15"/> <cfRule type="top10" priority="2" bottom="1" rank="16"/> <cfRule type="top10" priority="3" percent="1" rank="17"/> <cfRule type="top10" priority="4" percent="1" bottom="1" rank="18"/> </conditionalFormatting> <pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/> </worksheet> """ ) got = _xml_to_list(fh.getvalue()) self.assertEqual(exp, got)
TestAssembleWorksheet
python
apache__airflow
providers/redis/tests/integration/redis/hooks/test_redis.py
{ "start": 931, "end": 1536 }
class ____: def test_real_ping(self): hook = RedisHook(redis_conn_id="redis_default") redis = hook.get_conn() assert redis.ping(), "Connection to Redis with PING works." def test_real_get_and_set(self): hook = RedisHook(redis_conn_id="redis_default") redis = hook.get_conn() assert redis.set("test_key", "test_value"), "Connection to Redis with SET works." assert redis.get("test_key") == b"test_value", "Connection to Redis with GET works." assert redis.delete("test_key") == 1, "Connection to Redis with DELETE works."
TestRedisHook
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 683996, "end": 684311 }
class ____(sgqlc.types.Type): """ See source code for more info. """ __schema__ = graphql_schema __field_names__ = ("cursor", "node") cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor") node = sgqlc.types.Field("UserStatus", graphql_name="node")
UserStatusEdge
python
walkccc__LeetCode
solutions/3272. Find the Count of Good Integers/3272.py
{ "start": 0, "end": 953 }
class ____: def countGoodIntegers(self, n: int, k: int) -> int: halfLength = (n + 1) // 2 minHalf = 10**(halfLength - 1) maxHalf = 10**halfLength ans = 0 seen = set() for num in range(minHalf, maxHalf): palindrome = str(num) + str(num)[::-1][n % 2:] sortedDigits = ''.join(sorted(palindrome)) if int(palindrome) % k != 0 or sortedDigits in seen: continue seen.add(sortedDigits) digitCount = collections.Counter(palindrome) # Leading zeros are not allowed, so the first digit is special. firstDigitChoices = n - digitCount['0'] permutations = firstDigitChoices * math.factorial(n - 1) # For each repeated digit, divide by the factorial of the frequency since # permutations that swap identical digits don't create a new number. for freq in digitCount.values(): permutations //= math.factorial(freq) ans += permutations return ans
Solution
python
pytorch__pytorch
test/test_dataloader.py
{ "start": 13912, "end": 18452 }
class ____(TestCase): def test_empty(self): with self.assertRaisesRegex( ValueError, "At least one dataset should be passed" ): StackDataset() def test_mixed(self): with self.assertRaisesRegex(ValueError, "Supported either"): StackDataset( TensorDataset(torch.randn(15, 10)), a=TensorDataset(torch.randn(10, 15)) ) def test_size_mismatch(self): with self.assertRaisesRegex(ValueError, "Size mismatch between datasets"): StackDataset( TensorDataset(torch.randn(15, 10)), TensorDataset(torch.randn(10, 15)) ) with self.assertRaisesRegex(ValueError, "Size mismatch between datasets"): StackDataset( a=TensorDataset(torch.randn(15, 10)), b=TensorDataset(torch.randn(10, 15)), ) def test_len(self): source = StackDataset( TensorDataset(torch.randn(15, 10)), TensorDataset(torch.randn(15)) ) self.assertEqual(len(source), 15) source = StackDataset(TensorDataset(torch.randn(15, 10))) self.assertEqual(len(source), 15) source = StackDataset( a=TensorDataset(torch.randn(15, 10)), b=TensorDataset(torch.randn(15)) ) self.assertEqual(len(source), 15) source = StackDataset(a=TensorDataset(torch.randn(15, 10))) self.assertEqual(len(source), 15) def test_single(self): t = TensorDataset(torch.randn(15, 10)) source = StackDataset(t) for i in range(15): self.assertEqual(t[i], source[i][0]) source = StackDataset(a=t) for i in range(15): self.assertEqual(t[i], source[i]["a"]) def test_getitem(self): t = TensorDataset(torch.randn(15, 10)) l = TensorDataset(torch.randn(15, 5, 4)) source = StackDataset(t, l) for i in range(15): self.assertEqual(t[i], source[i][0]) self.assertEqual(l[i], source[i][1]) source = StackDataset(a=t, b=l) for i in range(15): self.assertEqual(t[i], source[i]["a"]) self.assertEqual(l[i], source[i]["b"]) def test_getitems(self): class GetItemsDataset(Dataset): def __init__(self) -> None: self.data = torch.randn(4) def __getitem__(self, item): return self.data[item] def __getitems__(self, items): return self.data[items] def __len__(self): return 4 t = GetItemsDataset() l = [1, 2, 3, 4] source = StackDataset(t, l) batch = source.__getitems__([0, 1, 2, 3]) for i in range(4): self.assertEqual(t[i], batch[i][0]) self.assertEqual(l[i], batch[i][1]) source = StackDataset(t=t, l=l) batch = source.__getitems__([0, 1, 2, 3]) for i in range(4): self.assertEqual(t[i], batch[i]["t"]) self.assertEqual(l[i], batch[i]["l"]) def test_getitems_raises_index_error(self): class GetItemsDataset(Dataset): def __init__(self) -> None: self.data = torch.randn(4) def __getitem__(self, item): return self.data[item] def __getitems__(self, items): return self.data[items] def __len__(self): return 4 t = GetItemsDataset() l = [1, 2, 3, 4] source = StackDataset(t, l) with self.assertRaises(IndexError): source.__getitems__([0, 4]) def test_getitems_value_error(self): class GetItemsDataset(Dataset): def __init__(self) -> None: self.data = torch.randn(4) def __getitem__(self, item): return self.data[item] def __getitems__(self, items): return self.data[items][:-1] # return less def __len__(self): return 4 t = GetItemsDataset() l = [1, 2, 3, 4] source = StackDataset(t, l) with self.assertRaisesRegex( ValueError, "Nested dataset's output size mismatch. Expected 4, got 3" ): source.__getitems__([0, 1, 2, 3]) @unittest.skipIf( TEST_WITH_TSAN, "Fails with TSAN with the following error: starting new threads after multi-threaded " "fork is not supported. Dying (set die_after_fork=0 to override)", )
TestStackDataset
python
pydata__xarray
xarray/core/_aggregations.py
{ "start": 615, "end": 45421 }
class ____: __slots__ = () def reduce( self, func: Callable[..., Any], dim: Dims = None, *, axis: int | Sequence[int] | None = None, keep_attrs: bool | None = None, keepdims: bool = False, **kwargs: Any, ) -> Self: raise NotImplementedError() def count( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataTree's data by applying ``count`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``count``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``count`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataTree New DataTree with ``count`` applied to its data and the indicated dimension(s) removed See Also -------- pandas.DataFrame.count dask.dataframe.DataFrame.count Dataset.count DataArray.count :ref:`agg` User guide on reduction or aggregation operations. Examples -------- >>> dt = xr.DataTree( ... xr.Dataset( ... data_vars=dict(foo=("time", np.array([1, 2, 3, 0, 2, np.nan]))), ... coords=dict( ... time=( ... "time", ... pd.date_range("2001-01-01", freq="ME", periods=6), ... ), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ), ... ) >>> dt <xarray.DataTree> Group: / Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a' Data variables: foo (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan >>> dt.count() <xarray.DataTree> Group: / Dimensions: () Data variables: foo int64 8B 5 """ return self.reduce( duck_array_ops.count, dim=dim, numeric_only=False, keep_attrs=keep_attrs, **kwargs, ) def all( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataTree's data by applying ``all`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``all``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``all`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataTree New DataTree with ``all`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.all dask.array.all Dataset.all DataArray.all :ref:`agg` User guide on reduction or aggregation operations. Examples -------- >>> dt = xr.DataTree( ... xr.Dataset( ... data_vars=dict( ... foo=( ... "time", ... np.array([True, True, True, True, True, False], dtype=bool), ... ) ... ), ... coords=dict( ... time=( ... "time", ... pd.date_range("2001-01-01", freq="ME", periods=6), ... ), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ), ... ) >>> dt <xarray.DataTree> Group: / Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a' Data variables: foo (time) bool 6B True True True True True False >>> dt.all() <xarray.DataTree> Group: / Dimensions: () Data variables: foo bool 1B False """ return self.reduce( duck_array_ops.array_all, dim=dim, numeric_only=False, keep_attrs=keep_attrs, **kwargs, ) def any( self, dim: Dims = None, *, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataTree's data by applying ``any`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``any``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``any`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataTree New DataTree with ``any`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.any dask.array.any Dataset.any DataArray.any :ref:`agg` User guide on reduction or aggregation operations. Examples -------- >>> dt = xr.DataTree( ... xr.Dataset( ... data_vars=dict( ... foo=( ... "time", ... np.array([True, True, True, True, True, False], dtype=bool), ... ) ... ), ... coords=dict( ... time=( ... "time", ... pd.date_range("2001-01-01", freq="ME", periods=6), ... ), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ), ... ) >>> dt <xarray.DataTree> Group: / Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a' Data variables: foo (time) bool 6B True True True True True False >>> dt.any() <xarray.DataTree> Group: / Dimensions: () Data variables: foo bool 1B True """ return self.reduce( duck_array_ops.array_any, dim=dim, numeric_only=False, keep_attrs=keep_attrs, **kwargs, ) def max( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataTree's data by applying ``max`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``max``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``max`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataTree New DataTree with ``max`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.max dask.array.max Dataset.max DataArray.max :ref:`agg` User guide on reduction or aggregation operations. Examples -------- >>> dt = xr.DataTree( ... xr.Dataset( ... data_vars=dict(foo=("time", np.array([1, 2, 3, 0, 2, np.nan]))), ... coords=dict( ... time=( ... "time", ... pd.date_range("2001-01-01", freq="ME", periods=6), ... ), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ), ... ) >>> dt <xarray.DataTree> Group: / Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a' Data variables: foo (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan >>> dt.max() <xarray.DataTree> Group: / Dimensions: () Data variables: foo float64 8B 3.0 Use ``skipna`` to control whether NaNs are ignored. >>> dt.max(skipna=False) <xarray.DataTree> Group: / Dimensions: () Data variables: foo float64 8B nan """ return self.reduce( duck_array_ops.max, dim=dim, skipna=skipna, numeric_only=False, keep_attrs=keep_attrs, **kwargs, ) def min( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataTree's data by applying ``min`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``min``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``min`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataTree New DataTree with ``min`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.min dask.array.min Dataset.min DataArray.min :ref:`agg` User guide on reduction or aggregation operations. Examples -------- >>> dt = xr.DataTree( ... xr.Dataset( ... data_vars=dict(foo=("time", np.array([1, 2, 3, 0, 2, np.nan]))), ... coords=dict( ... time=( ... "time", ... pd.date_range("2001-01-01", freq="ME", periods=6), ... ), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ), ... ) >>> dt <xarray.DataTree> Group: / Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a' Data variables: foo (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan >>> dt.min() <xarray.DataTree> Group: / Dimensions: () Data variables: foo float64 8B 0.0 Use ``skipna`` to control whether NaNs are ignored. >>> dt.min(skipna=False) <xarray.DataTree> Group: / Dimensions: () Data variables: foo float64 8B nan """ return self.reduce( duck_array_ops.min, dim=dim, skipna=skipna, numeric_only=False, keep_attrs=keep_attrs, **kwargs, ) def mean( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataTree's data by applying ``mean`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``mean``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``mean`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataTree New DataTree with ``mean`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.mean dask.array.mean Dataset.mean DataArray.mean :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Examples -------- >>> dt = xr.DataTree( ... xr.Dataset( ... data_vars=dict(foo=("time", np.array([1, 2, 3, 0, 2, np.nan]))), ... coords=dict( ... time=( ... "time", ... pd.date_range("2001-01-01", freq="ME", periods=6), ... ), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ), ... ) >>> dt <xarray.DataTree> Group: / Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a' Data variables: foo (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan >>> dt.mean() <xarray.DataTree> Group: / Dimensions: () Data variables: foo float64 8B 1.6 Use ``skipna`` to control whether NaNs are ignored. >>> dt.mean(skipna=False) <xarray.DataTree> Group: / Dimensions: () Data variables: foo float64 8B nan """ return self.reduce( duck_array_ops.mean, dim=dim, skipna=skipna, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def prod( self, dim: Dims = None, *, skipna: bool | None = None, min_count: int | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataTree's data by applying ``prod`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``prod``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). min_count : int or None, optional The required number of valid values to perform the operation. If fewer than min_count non-NA values are present the result will be NA. Only used if skipna is set to True or defaults to True for the array's dtype. Changed in version 0.17.0: if specified on an integer array and skipna=True, the result will be a float array. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``prod`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataTree New DataTree with ``prod`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.prod dask.array.prod Dataset.prod DataArray.prod :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Examples -------- >>> dt = xr.DataTree( ... xr.Dataset( ... data_vars=dict(foo=("time", np.array([1, 2, 3, 0, 2, np.nan]))), ... coords=dict( ... time=( ... "time", ... pd.date_range("2001-01-01", freq="ME", periods=6), ... ), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ), ... ) >>> dt <xarray.DataTree> Group: / Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a' Data variables: foo (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan >>> dt.prod() <xarray.DataTree> Group: / Dimensions: () Data variables: foo float64 8B 0.0 Use ``skipna`` to control whether NaNs are ignored. >>> dt.prod(skipna=False) <xarray.DataTree> Group: / Dimensions: () Data variables: foo float64 8B nan Specify ``min_count`` for finer control over when NaNs are ignored. >>> dt.prod(skipna=True, min_count=2) <xarray.DataTree> Group: / Dimensions: () Data variables: foo float64 8B 0.0 """ return self.reduce( duck_array_ops.prod, dim=dim, skipna=skipna, min_count=min_count, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def sum( self, dim: Dims = None, *, skipna: bool | None = None, min_count: int | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataTree's data by applying ``sum`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``sum``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). min_count : int or None, optional The required number of valid values to perform the operation. If fewer than min_count non-NA values are present the result will be NA. Only used if skipna is set to True or defaults to True for the array's dtype. Changed in version 0.17.0: if specified on an integer array and skipna=True, the result will be a float array. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``sum`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataTree New DataTree with ``sum`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.sum dask.array.sum Dataset.sum DataArray.sum :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Examples -------- >>> dt = xr.DataTree( ... xr.Dataset( ... data_vars=dict(foo=("time", np.array([1, 2, 3, 0, 2, np.nan]))), ... coords=dict( ... time=( ... "time", ... pd.date_range("2001-01-01", freq="ME", periods=6), ... ), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ), ... ) >>> dt <xarray.DataTree> Group: / Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a' Data variables: foo (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan >>> dt.sum() <xarray.DataTree> Group: / Dimensions: () Data variables: foo float64 8B 8.0 Use ``skipna`` to control whether NaNs are ignored. >>> dt.sum(skipna=False) <xarray.DataTree> Group: / Dimensions: () Data variables: foo float64 8B nan Specify ``min_count`` for finer control over when NaNs are ignored. >>> dt.sum(skipna=True, min_count=2) <xarray.DataTree> Group: / Dimensions: () Data variables: foo float64 8B 8.0 """ return self.reduce( duck_array_ops.sum, dim=dim, skipna=skipna, min_count=min_count, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def std( self, dim: Dims = None, *, skipna: bool | None = None, ddof: int = 0, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataTree's data by applying ``std`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``std``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). ddof : int, default: 0 “Delta Degrees of Freedom”: the divisor used in the calculation is ``N - ddof``, where ``N`` represents the number of elements. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``std`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataTree New DataTree with ``std`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.std dask.array.std Dataset.std DataArray.std :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Examples -------- >>> dt = xr.DataTree( ... xr.Dataset( ... data_vars=dict(foo=("time", np.array([1, 2, 3, 0, 2, np.nan]))), ... coords=dict( ... time=( ... "time", ... pd.date_range("2001-01-01", freq="ME", periods=6), ... ), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ), ... ) >>> dt <xarray.DataTree> Group: / Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a' Data variables: foo (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan >>> dt.std() <xarray.DataTree> Group: / Dimensions: () Data variables: foo float64 8B 1.02 Use ``skipna`` to control whether NaNs are ignored. >>> dt.std(skipna=False) <xarray.DataTree> Group: / Dimensions: () Data variables: foo float64 8B nan Specify ``ddof=1`` for an unbiased estimate. >>> dt.std(skipna=True, ddof=1) <xarray.DataTree> Group: / Dimensions: () Data variables: foo float64 8B 1.14 """ return self.reduce( duck_array_ops.std, dim=dim, skipna=skipna, ddof=ddof, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def var( self, dim: Dims = None, *, skipna: bool | None = None, ddof: int = 0, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataTree's data by applying ``var`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``var``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). ddof : int, default: 0 “Delta Degrees of Freedom”: the divisor used in the calculation is ``N - ddof``, where ``N`` represents the number of elements. keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``var`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataTree New DataTree with ``var`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.var dask.array.var Dataset.var DataArray.var :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Examples -------- >>> dt = xr.DataTree( ... xr.Dataset( ... data_vars=dict(foo=("time", np.array([1, 2, 3, 0, 2, np.nan]))), ... coords=dict( ... time=( ... "time", ... pd.date_range("2001-01-01", freq="ME", periods=6), ... ), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ), ... ) >>> dt <xarray.DataTree> Group: / Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a' Data variables: foo (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan >>> dt.var() <xarray.DataTree> Group: / Dimensions: () Data variables: foo float64 8B 1.04 Use ``skipna`` to control whether NaNs are ignored. >>> dt.var(skipna=False) <xarray.DataTree> Group: / Dimensions: () Data variables: foo float64 8B nan Specify ``ddof=1`` for an unbiased estimate. >>> dt.var(skipna=True, ddof=1) <xarray.DataTree> Group: / Dimensions: () Data variables: foo float64 8B 1.3 """ return self.reduce( duck_array_ops.var, dim=dim, skipna=skipna, ddof=ddof, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def median( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataTree's data by applying ``median`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``median``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``median`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataTree New DataTree with ``median`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.median dask.array.median Dataset.median DataArray.median :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Examples -------- >>> dt = xr.DataTree( ... xr.Dataset( ... data_vars=dict(foo=("time", np.array([1, 2, 3, 0, 2, np.nan]))), ... coords=dict( ... time=( ... "time", ... pd.date_range("2001-01-01", freq="ME", periods=6), ... ), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ), ... ) >>> dt <xarray.DataTree> Group: / Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a' Data variables: foo (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan >>> dt.median() <xarray.DataTree> Group: / Dimensions: () Data variables: foo float64 8B 2.0 Use ``skipna`` to control whether NaNs are ignored. >>> dt.median(skipna=False) <xarray.DataTree> Group: / Dimensions: () Data variables: foo float64 8B nan """ return self.reduce( duck_array_ops.median, dim=dim, skipna=skipna, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def cumsum( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataTree's data by applying ``cumsum`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``cumsum``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``cumsum`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataTree New DataTree with ``cumsum`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.cumsum dask.array.cumsum Dataset.cumsum DataArray.cumsum DataTree.cumulative :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Note that the methods on the ``cumulative`` method are more performant (with numbagg installed) and better supported. ``cumsum`` and ``cumprod`` may be deprecated in the future. Examples -------- >>> dt = xr.DataTree( ... xr.Dataset( ... data_vars=dict(foo=("time", np.array([1, 2, 3, 0, 2, np.nan]))), ... coords=dict( ... time=( ... "time", ... pd.date_range("2001-01-01", freq="ME", periods=6), ... ), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ), ... ) >>> dt <xarray.DataTree> Group: / Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a' Data variables: foo (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan >>> dt.cumsum() <xarray.DataTree> Group: / Dimensions: (time: 6) Dimensions without coordinates: time Data variables: foo (time) float64 48B 1.0 3.0 6.0 6.0 8.0 8.0 Use ``skipna`` to control whether NaNs are ignored. >>> dt.cumsum(skipna=False) <xarray.DataTree> Group: / Dimensions: (time: 6) Dimensions without coordinates: time Data variables: foo (time) float64 48B 1.0 3.0 6.0 6.0 8.0 nan """ return self.reduce( duck_array_ops.cumsum, dim=dim, skipna=skipna, numeric_only=True, keep_attrs=keep_attrs, **kwargs, ) def cumprod( self, dim: Dims = None, *, skipna: bool | None = None, keep_attrs: bool | None = None, **kwargs: Any, ) -> Self: """ Reduce this DataTree's data by applying ``cumprod`` along some dimension(s). Parameters ---------- dim : str, Iterable of Hashable, "..." or None, default: None Name of dimension[s] along which to apply ``cumprod``. For e.g. ``dim="x"`` or ``dim=["x", "y"]``. If "..." or None, will reduce over all dimensions. skipna : bool or None, optional If True, skip missing values (as marked by NaN). By default, only skips missing values for float dtypes; other dtypes either do not have a sentinel missing value (int) or ``skipna=True`` has not been implemented (object, datetime64 or timedelta64). keep_attrs : bool or None, optional If True, ``attrs`` will be copied from the original object to the new one. If False, the new object will be returned without attributes. **kwargs : Any Additional keyword arguments passed on to the appropriate array function for calculating ``cumprod`` on this object's data. These could include dask-specific kwargs like ``split_every``. Returns ------- reduced : DataTree New DataTree with ``cumprod`` applied to its data and the indicated dimension(s) removed See Also -------- numpy.cumprod dask.array.cumprod Dataset.cumprod DataArray.cumprod DataTree.cumulative :ref:`agg` User guide on reduction or aggregation operations. Notes ----- Non-numeric variables will be removed prior to reducing. Note that the methods on the ``cumulative`` method are more performant (with numbagg installed) and better supported. ``cumsum`` and ``cumprod`` may be deprecated in the future. Examples -------- >>> dt = xr.DataTree( ... xr.Dataset( ... data_vars=dict(foo=("time", np.array([1, 2, 3, 0, 2, np.nan]))), ... coords=dict( ... time=( ... "time", ... pd.date_range("2001-01-01", freq="ME", periods=6), ... ), ... labels=("time", np.array(["a", "b", "c", "c", "b", "a"])), ... ), ... ), ... ) >>> dt <xarray.DataTree> Group: / Dimensions: (time: 6) Coordinates: * time (time) datetime64[ns] 48B 2001-01-31 2001-02-28 ... 2001-06-30 labels (time) <U1 24B 'a' 'b' 'c' 'c' 'b' 'a' Data variables: foo (time) float64 48B 1.0 2.0 3.0 0.0 2.0 nan >>> dt.cumprod() <xarray.DataTree> Group: / Dimensions: (time: 6) Dimensions without coordinates: time Data variables: foo (time) float64 48B 1.0 2.0 6.0 0.0 0.0 0.0 Use ``skipna`` to control whether NaNs are ignored. >>> dt.cumprod(skipna=False) <xarray.DataTree> Group: / Dimensions: (time: 6) Dimensions without coordinates: time Data variables: foo (time) float64 48B 1.0 2.0 6.0 0.0 0.0 nan """ return self.reduce( duck_array_ops.cumprod, dim=dim, skipna=skipna, numeric_only=True, keep_attrs=keep_attrs, **kwargs, )
DataTreeAggregations
python
altair-viz__altair
tests/utils/test_schemapi.py
{ "start": 3477, "end": 3581 }
class ____(_TestSchema): _schema = {"$ref": "#/definitions/Bar"} _rootschema = Derived._schema
Bar
python
airbytehq__airbyte
airbyte-integrations/connectors/destination-milvus/destination_milvus/config.py
{ "start": 864, "end": 1185 }
class ____(BaseModel): mode: Literal["no_auth"] = Field("no_auth", const=True) class Config(OneOfOptionConfig): title = "No auth" description = "Do not authenticate (suitable for locally running test clusters, do not use for clusters with public IP addresses)" discriminator = "mode"
NoAuth
python
tox-dev__tox
src/tox/tox_env/python/pip/pip_install.py
{ "start": 835, "end": 1803 }
class ____(Installer[Python], ABC): def __init__(self, tox_env: Python, with_list_deps: bool = True) -> None: # noqa: FBT001, FBT002 self._with_list_deps = with_list_deps super().__init__(tox_env) def _register_config(self) -> None: if self._with_list_deps: # pragma: no branch self._env.conf.add_config( keys=["list_dependencies_command"], of_type=Command, default=Command(self.freeze_cmd()), desc="command used to list installed packages", ) @abstractmethod def freeze_cmd(self) -> list[str]: raise NotImplementedError def installed(self) -> list[str]: cmd: Command = self._env.conf["list_dependencies_command"] result = self._env.execute(cmd=cmd.args, stdin=StdinSource.OFF, run_id="freeze", show=False) result.assert_success() return result.out.splitlines()
PythonInstallerListDependencies
python
walkccc__LeetCode
solutions/2551. Put Marbles in Bags/2551.py
{ "start": 0, "end": 581 }
class ____: def putMarbles(self, weights: list[int], k: int) -> int: # To distribute marbles into k bags, there will be k - 1 cuts. If there's a # cut after weights[i], then weights[i] and weights[i + 1] will be added to # the cost. Also, no matter how we cut, weights[0] and weights[n - 1] will # be counted. So, the goal is to find the max//min k - 1 weights[i] + # weights[i + 1]. # weights[i] + weights[i + 1] arr = [a + b for a, b in itertools.pairwise(weights)] return sum(heapq.nlargest(k - 1, arr)) - sum(heapq.nsmallest(k - 1, arr))
Solution
python
sphinx-doc__sphinx
sphinx/addnodes.py
{ "start": 5199, "end": 5879 }
class ____(_desc_classes_injector, nodes.Inline, nodes.TextElement): """Node for a signature fragment in inline text. This is for example used for roles like :rst:role:`cpp:expr`. This node always has the classes ``sig``, ``sig-inline``, and the name of the domain it belongs to. """ classes = ['sig', 'sig-inline'] def __init__(self, domain: str, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs, domain=domain) self['classes'].append(domain) # Nodes for high-level structure in signatures ############################################## # nodes to use within a desc_signature or desc_signature_line
desc_inline
python
SmileyChris__easy-thumbnails
easy_thumbnails/tests/test_templatetags.py
{ "start": 14630, "end": 20319 }
class ____(test.BaseTest): def setUp(self): super().setUp() self.storage = test.TemporaryStorage() # Save a test image. self.filename = self.create_image(self.storage, 'test.svg', image_format='SVG') # Required so that IOError's get wrapped as TemplateSyntaxError settings.TEMPLATE_DEBUG = True def tearDown(self): self.storage.delete_temporary_storage() super().tearDown() def render_template(self, source, template_tag_library='thumbnail'): source_image = get_thumbnailer(self.storage, self.filename) source_image.thumbnail_storage = self.storage source_image.thumbnail_preserve_extensions = True context = Context({ 'source': source_image, 'storage': self.storage, 'filename': self.filename, 'invalid_filename': 'not%s' % self.filename, 'size': (90, 100), 'invalid_size': (90, 'fish'), 'strsize': '80x90', 'invalid_strsize': ('1notasize2'), 'invalid_q': 'notanumber'}) source = '{% load ' + template_tag_library + ' %}' + source return Template(source).render(context) def testTag(self): # Set THUMBNAIL_DEBUG = True to make it easier to trace any failures settings.THUMBNAIL_DEBUG = True # Basic output = self.render_template('src="{% thumbnail source 240x240 %}"') expected = self.verify_thumbnail((240, 180), {'size': (240, 240)}) expected_url = ''.join((settings.MEDIA_URL, expected)) self.assertEqual(output, 'src="{}"'.format(expected_url)) # Size from context variable # as a tuple: output = self.render_template( 'src="{% thumbnail source size %}"') expected = self.verify_thumbnail((90, 68), {'size': (90, 100)}) expected_url = ''.join((settings.MEDIA_URL, expected)) self.assertEqual(output, 'src="{}"'.format(expected_url)) # as a string: output = self.render_template( 'src="{% thumbnail source strsize %}"') expected = self.verify_thumbnail((80, 60), {'size': (80, 90)}) expected_url = ''.join((settings.MEDIA_URL, expected)) self.assertEqual(output, 'src="{}"'.format(expected_url)) # On context output = self.render_template( 'height:{% thumbnail source 240x240 as thumb %}{{ thumb.height }}') self.assertEqual(output, 'height:180.0') # With options and quality output = self.render_template( 'src="{% thumbnail source 240x240 sharpen crop quality=95 %}"') # Note that the opts are sorted to ensure a consistent filename. expected = self.verify_thumbnail( (240, 240), {'size': (240, 240), 'crop': True, 'sharpen': True, 'quality': 95}) expected_url = ''.join((settings.MEDIA_URL, expected)) self.assertEqual(output, 'src="{}"'.format(expected_url)) # With option and quality on context (also using its unicode method to # display the url) output = self.render_template( '{% thumbnail source 240x240 sharpen crop quality=95 as thumb %}' 'width:{{ thumb.width }}, url:{{ thumb.url }}') self.assertEqual(output, 'width:240.0, url:{}'.format(expected_url)) # One dimensional resize output = self.render_template('src="{% thumbnail source 100x0 %}"') expected = self.verify_thumbnail((100, 75), {'size': (100, 0)}) expected_url = ''.join((settings.MEDIA_URL, expected)) self.assertEqual(output, 'src="{}"'.format(expected_url)) def verify_thumbnail(self, expected_size, options, source_filename=None, transparent=False): from easy_thumbnails.VIL import Image if source_filename is None: source_filename = self.filename self.assertTrue(isinstance(options, dict)) # Verify that the thumbnail file exists thumbnailer = get_thumbnailer(self.storage, source_filename) thumbnailer.thumbnail_preserve_extensions = True expected_filename = thumbnailer.get_thumbnail_name( options, transparent=transparent) self.assertTrue( self.storage.exists(expected_filename), "Thumbnail file %r not found" % expected_filename) # Verify the thumbnail has the expected dimensions with self.storage.open(expected_filename) as expected_file: with Image.load(expected_file.name) as image: self.assertEqual(image.size, expected_size) return expected_filename def test_named_file(self): Image = import_string('easy_thumbnails.VIL.Image') expected = '<svg width="30" height="30" preserveAspectRatio="xMinYMin meet" viewBox="0 0 30 30" xmlns="http://www.w3.org/2000/svg" version="1.0" fill-rule="evenodd" xmlns:xlink="http://www.w3.org/1999/xlink"><title>...</title><desc>...</desc><g id="group" transform="scale(1,-1) translate(0,-30)" clip="0 0 30 30"/></svg>' with Image.new('rgb', (30, 30)) as img: with tempfile.NamedTemporaryFile() as namedtmpfile: img.save(namedtmpfile.name, 'SVG') namedtmpfile.seek(0) xml = namedtmpfile.read().decode() self.assertHTMLEqual(xml, expected) with tempfile.NamedTemporaryFile() as namedtmpfile: path = Path(namedtmpfile.name) img.save(path, 'SVG') namedtmpfile.seek(0) xml = path.read_text() self.assertHTMLEqual(xml, expected)
ThumbnailSVGImage
python
boto__boto3
boto3/exceptions.py
{ "start": 2905, "end": 3483 }
class ____(Boto3Error): """Raised for operations that are not supported for an operand.""" def __init__(self, operation, value): msg = ( f'{operation} operation cannot be applied to value {value} of type ' f'{type(value)} directly. Must use AttributeBase object methods ' f'(i.e. Attr().eq()). to generate ConditionBase instances first.' ) Exception.__init__(self, msg) # FIXME: Backward compatibility DynanmoDBOperationNotSupportedError = DynamoDBOperationNotSupportedError
DynamoDBOperationNotSupportedError
python
fastai__fastai
fastai/losses.py
{ "start": 9040, "end": 9763 }
class ____(BaseLoss): "Same as `LabelSmoothingCrossEntropy`, but flattens input and target." y_int = True @use_kwargs_dict(keep=True, eps=0.1, reduction='mean') def __init__(self, *args, axis:int=-1, # Class axis **kwargs ): super().__init__(LabelSmoothingCrossEntropy, *args, axis=axis, **kwargs) def activation(self, out:Tensor) -> Tensor: "`LabelSmoothingCrossEntropy`'s fused activation function applied to model output" return F.softmax(out, dim=-1) def decodes(self, out:Tensor) -> Tensor: "Converts model output to target format" return out.argmax(dim=-1) # %% ../nbs/01a_losses.ipynb 30
LabelSmoothingCrossEntropyFlat
python
ray-project__ray
python/ray/serve/tests/test_handle_streaming.py
{ "start": 753, "end": 1530 }
class ____: def __call__( self, n: int, should_error: bool = False ) -> Generator[int, None, None]: if should_error: raise RuntimeError("oopsies") for i in range(n): yield i def other_method(self, n: int) -> Generator[int, None, None]: for i in range(n): yield i def call_inner_generator(self, n: int) -> Generator[int, None, None]: return self.other_method(n) def unary(self, n: int) -> int: return n @serve.deployment def sync_gen_function(n: int): for i in range(n): yield i @serve.deployment async def async_gen_function(n: int): for i in range(n): yield i @pytest.mark.parametrize("deployment", [AsyncStreamer, SyncStreamer])
SyncStreamer
python
doocs__leetcode
solution/0200-0299/0291.Word Pattern II/Solution.py
{ "start": 0, "end": 828 }
class ____: def wordPatternMatch(self, pattern: str, s: str) -> bool: def dfs(i, j): if i == m and j == n: return True if i == m or j == n or n - j < m - i: return False for k in range(j, n): t = s[j : k + 1] if d.get(pattern[i]) == t: if dfs(i + 1, k + 1): return True if pattern[i] not in d and t not in vis: d[pattern[i]] = t vis.add(t) if dfs(i + 1, k + 1): return True d.pop(pattern[i]) vis.remove(t) return False m, n = len(pattern), len(s) d = {} vis = set() return dfs(0, 0)
Solution
python
pypa__warehouse
tests/unit/test_sanity.py
{ "start": 838, "end": 2511 }
class ____: def test_valid(self): request = Request( { "REQUEST_METHOD": "POST", "CONTENT_TYPE": ( "multipart/form-data; boundary=c397e2aa2980f1a53dee37c05b8fb45a" ), "wsgi.input": io.BytesIO( b"--------------------------c397e2aa2980f1a53dee37c05b8fb45a\r\n" b'Content-Disposition: form-data; name="person"\r\n' b"anonymous" ), } ) sanity.invalid_forms(request) def test_invalid_form(self): request = Request( { "REQUEST_METHOD": "POST", "CONTENT_TYPE": ("multipart/form-data"), "wsgi.input": io.BytesIO( b'Content-Disposition: form-data; name="person"\r\n' b"anonymous" ), } ) with pytest.raises(HTTPBadRequest, match="Invalid Form Data."): sanity.invalid_forms(request) def test_not_post(self): request = Request({"REQUEST_METHOD": "GET"}) sanity.invalid_forms(request) @pytest.mark.parametrize( ("original_location", "expected_location"), [ ("/a/path/to/nowhere", "/a/path/to/nowhere"), ("/project/☃/", "/project/%E2%98%83/"), (None, None), ], ) def test_unicode_redirects(original_location, expected_location): if original_location: resp_in = HTTPMovedPermanently(original_location) else: resp_in = HTTPOk() resp_out = sanity.unicode_redirects(resp_in) assert resp_out.location == expected_location
TestInvalidForms
python
tornadoweb__tornado
tornado/test/wsgi_test.py
{ "start": 235, "end": 1947 }
class ____: # TODO: Now that WSGIAdapter is gone, this is a pretty weak test. def get_executor(self): raise NotImplementedError() def get_app(self): executor = self.get_executor() # The barrier test in DummyExecutorTest will always wait the full # value of this timeout, so we don't want it to be too high. self.barrier = threading.Barrier(2, timeout=0.3) def make_container(app): return WSGIContainer(validator(app), executor=executor) return RuleRouter( [ ("/simple", make_container(self.simple_wsgi_app)), ("/barrier", make_container(self.barrier_wsgi_app)), ("/streaming_barrier", make_container(self.streaming_barrier_wsgi_app)), ] ) def respond_plain(self, start_response): status = "200 OK" response_headers = [("Content-Type", "text/plain")] start_response(status, response_headers) def simple_wsgi_app(self, environ, start_response): self.respond_plain(start_response) return [b"Hello world!"] def barrier_wsgi_app(self, environ, start_response): self.respond_plain(start_response) try: n = self.barrier.wait() except threading.BrokenBarrierError: return [b"broken barrier"] else: return [b"ok %d" % n] def streaming_barrier_wsgi_app(self, environ, start_response): self.respond_plain(start_response) yield b"ok " try: n = self.barrier.wait() except threading.BrokenBarrierError: yield b"broken barrier" else: yield b"%d" % n
WSGIAppMixin
python
zostera__django-bootstrap4
example/app/views.py
{ "start": 579, "end": 834 }
class ____(TemplateView): template_name = "app/home.html" def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) messages.info(self.request, "hello http://example.com") return context
HomePageView
python
doocs__leetcode
solution/2700-2799/2736.Maximum Sum Queries/Solution.py
{ "start": 418, "end": 1122 }
class ____: def maximumSumQueries( self, nums1: List[int], nums2: List[int], queries: List[List[int]] ) -> List[int]: nums = sorted(zip(nums1, nums2), key=lambda x: -x[0]) nums2.sort() n, m = len(nums1), len(queries) ans = [-1] * m j = 0 tree = BinaryIndexedTree(n) for i in sorted(range(m), key=lambda i: -queries[i][0]): x, y = queries[i] while j < n and nums[j][0] >= x: k = n - bisect_left(nums2, nums[j][1]) tree.update(k, nums[j][0] + nums[j][1]) j += 1 k = n - bisect_left(nums2, y) ans[i] = tree.query(k) return ans
Solution
python
bokeh__bokeh
tests/test_defaults.py
{ "start": 1230, "end": 2575 }
class ____: def test_defaults(self) -> None: baseline = Path(__file__).parent / "baselines" / "defaults.json5" defaults = collect_defaults() output_defaults(baseline, defaults) status, out, _ = diff_baseline(baseline) if status != 0: print(out) assert False, "baseline differs" #----------------------------------------------------------------------------- # Dev API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Private API #----------------------------------------------------------------------------- def git(*args: str) -> tuple[int, str, str]: proc = subprocess.Popen(["git", *args], stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = proc.communicate() out = stdout.decode("utf-8", errors="ignore") err = stderr.decode("utf-8", errors="ignore") return (proc.returncode, out, err) def diff_baseline(baseline_path: Path, ref: str = "HEAD") -> tuple[int, str, str]: return git("diff", "--color", "--exit-code", ref, "--", str(baseline_path)) #----------------------------------------------------------------------------- # Code #-----------------------------------------------------------------------------
TestDefaults
python
ray-project__ray
python/ray/data/tests/unit/test_datatype.py
{ "start": 10109, "end": 10866 }
class ____: """Test string representation methods.""" @pytest.mark.parametrize( "datatype,expected_repr", [ (DataType.from_arrow(pa.int64()), "DataType(arrow:int64)"), (DataType.from_arrow(pa.string()), "DataType(arrow:string)"), (DataType.from_numpy(np.dtype("float32")), "DataType(numpy:float32)"), (DataType.from_numpy(np.dtype("int64")), "DataType(numpy:int64)"), (DataType(str), "DataType(python:str)"), (DataType(int), "DataType(python:int)"), ], ) def test_repr(self, datatype, expected_repr): """Test __repr__ method for different type categories.""" assert repr(datatype) == expected_repr
TestDataTypeStringRepresentation
python
Lightning-AI__lightning
tests/tests_fabric/test_connector.py
{ "start": 2285, "end": 33944 }
class ____(Mock): def __instancecheck__(self, instance): return True @pytest.mark.parametrize( ("accelerator", "devices"), [("tpu", "auto"), ("tpu", 1), ("tpu", [1]), ("tpu", 8), ("auto", 1), ("auto", 8)] ) @RunIf(min_python="3.9") # mocking issue def test_accelerator_choice_tpu(accelerator, devices, tpu_available, monkeypatch): monkeypatch.setattr(torch, "device", DeviceMock()) connector = _Connector(accelerator=accelerator, devices=devices) assert isinstance(connector.accelerator, XLAAccelerator) if devices == "auto" or (isinstance(devices, int) and devices > 1): assert isinstance(connector.strategy, XLAStrategy) assert isinstance(connector.strategy.cluster_environment, XLAEnvironment) assert isinstance(connector.cluster_environment, XLAEnvironment) else: assert isinstance(connector.strategy, SingleDeviceXLAStrategy) @RunIf(skip_windows=True, standalone=True) def test_strategy_choice_ddp_on_cpu(): """Test that selecting DDPStrategy on CPU works.""" _test_strategy_choice_ddp_and_cpu(ddp_strategy_class=DDPStrategy) def _test_strategy_choice_ddp_and_cpu(ddp_strategy_class): connector = _Connector( strategy=ddp_strategy_class(), accelerator="cpu", devices=2, ) assert isinstance(connector.strategy, ddp_strategy_class) assert isinstance(connector.accelerator, CPUAccelerator) assert connector.strategy.num_processes == 2 assert connector.strategy.parallel_devices == [torch.device("cpu")] * 2 @mock.patch.dict( os.environ, { "SLURM_NTASKS": "2", "SLURM_JOB_NAME": "SOME_NAME", "SLURM_NODEID": "0", "LOCAL_RANK": "0", "SLURM_PROCID": "0", "SLURM_LOCALID": "0", }, ) @mock.patch("lightning.fabric.accelerators.cuda.num_cuda_devices", return_value=0) def test_custom_cluster_environment_in_slurm_environment(_): """Test that we choose the custom cluster even when SLURM or TE flags are around.""" class CustomCluster(LightningEnvironment): @property def main_address(self): return "asdf" @property def creates_processes_externally(self) -> bool: return True connector = _Connector( plugins=[CustomCluster()], accelerator="cpu", strategy="ddp", devices=2, ) assert isinstance(connector.accelerator, CPUAccelerator) assert isinstance(connector.strategy, DDPStrategy) assert isinstance(connector.strategy.cluster_environment, CustomCluster) # this checks that `strategy._set_world_ranks` was called by the connector assert connector.strategy.world_size == 2 @RunIf(mps=False) @mock.patch.dict( os.environ, { "SLURM_NTASKS": "2", "SLURM_NTASKS_PER_NODE": "1", "SLURM_JOB_NAME": "SOME_NAME", "SLURM_NODEID": "0", "LOCAL_RANK": "0", "SLURM_PROCID": "0", "SLURM_LOCALID": "0", }, ) @mock.patch("lightning.fabric.accelerators.cuda.num_cuda_devices", return_value=0) def test_custom_accelerator(*_): class Accel(Accelerator): def setup_device(self, device: torch.device) -> None: pass def get_device_stats(self, device: torch.device) -> dict[str, Any]: pass def teardown(self) -> None: pass @staticmethod def parse_devices(devices): return devices @staticmethod def get_parallel_devices(devices): return [torch.device("cpu")] * devices @staticmethod def auto_device_count() -> int: return 1 @staticmethod def is_available() -> bool: return True @staticmethod def name() -> str: return "custom_acc_name" class Prec(Precision): pass class TestStrategy(SingleDeviceStrategy): pass strategy = TestStrategy(device=torch.device("cpu"), accelerator=Accel(), precision=Prec()) connector = _Connector(strategy=strategy, devices=2) assert isinstance(connector.accelerator, Accel) assert isinstance(connector.strategy, TestStrategy) assert isinstance(connector.precision, Prec) assert connector.strategy is strategy class TestStrategy(DDPStrategy): pass strategy = TestStrategy(accelerator=Accel(), precision=Prec()) connector = _Connector(strategy=strategy, devices=2) assert isinstance(connector.accelerator, Accel) assert isinstance(connector.strategy, TestStrategy) assert isinstance(connector.precision, Prec) assert connector.strategy is strategy @pytest.mark.parametrize( ("env_vars", "expected_environment"), [ ( { "SLURM_NTASKS": "2", "SLURM_NTASKS_PER_NODE": "1", "SLURM_JOB_NAME": "SOME_NAME", "SLURM_NODEID": "0", "LOCAL_RANK": "0", "SLURM_PROCID": "0", "SLURM_LOCALID": "0", }, SLURMEnvironment, ), ( { "LSB_JOBID": "1", "LSB_DJOB_RANKFILE": "SOME_RANK_FILE", "JSM_NAMESPACE_LOCAL_RANK": "1", "JSM_NAMESPACE_SIZE": "20", "JSM_NAMESPACE_RANK": "1", }, LSFEnvironment, ), ], ) @mock.patch("lightning.fabric.plugins.environments.lsf.LSFEnvironment._read_hosts", return_value=["node0", "node1"]) @mock.patch("lightning.fabric.plugins.environments.lsf.LSFEnvironment._get_node_rank", return_value=0) def test_fallback_from_ddp_spawn_to_ddp_on_cluster(_, __, env_vars, expected_environment): with mock.patch.dict(os.environ, env_vars, clear=True): connector = _Connector(strategy="ddp_spawn", accelerator="cpu", devices=2) assert isinstance(connector.accelerator, CPUAccelerator) assert isinstance(connector.strategy, DDPStrategy) assert isinstance(connector.strategy.cluster_environment, expected_environment) @RunIf(mps=False) @mock.patch("lightning.fabric.accelerators.cuda.num_cuda_devices", return_value=2) def test_interactive_incompatible_backend_error(_, monkeypatch): monkeypatch.setattr(lightning.fabric.connector, "_IS_INTERACTIVE", True) with pytest.raises(RuntimeError, match=r"strategy='ddp'\)`.*is not compatible"): _Connector(strategy="ddp", accelerator="gpu", devices=2) with pytest.raises(RuntimeError, match=r"strategy='ddp_spawn'\)`.*is not compatible"): _Connector(strategy="ddp_spawn", accelerator="gpu", devices=2) with pytest.raises(RuntimeError, match=r"strategy='ddp'\)`.*is not compatible"): # Edge case: _Connector maps dp to ddp if accelerator != gpu _Connector(strategy="dp", accelerator="cpu") def test_precision_and_precision_plugin_raises(): with pytest.raises(ValueError, match="both `precision=16-true` and `plugins"): _Connector(precision="16-true", plugins=Precision()) @mock.patch("lightning.fabric.accelerators.cuda.num_cuda_devices", return_value=2) @mock.patch("lightning.fabric.accelerators.mps.MPSAccelerator.is_available", return_value=False) def test_interactive_compatible_dp_strategy_gpu(_, __, monkeypatch): monkeypatch.setattr(lightning.fabric.utilities.imports, "_IS_INTERACTIVE", True) connector = _Connector(strategy="dp", accelerator="gpu") assert connector.strategy.launcher is None @RunIf(skip_windows=True) def test_interactive_compatible_strategy_ddp_fork(monkeypatch): monkeypatch.setattr(lightning.fabric.utilities.imports, "_IS_INTERACTIVE", True) connector = _Connector(strategy="ddp_fork", accelerator="cpu") assert connector.strategy.launcher.is_interactive_compatible @RunIf(mps=True) @pytest.mark.parametrize( ("strategy", "strategy_class"), [ ("ddp", DDPStrategy), ("dp", DataParallelStrategy), pytest.param("deepspeed", DeepSpeedStrategy, marks=RunIf(deepspeed=True)), ], ) @pytest.mark.parametrize("accelerator", ["mps", "auto", "gpu", MPSAccelerator()]) def test_invalid_ddp_strategy_with_mps(accelerator, strategy, strategy_class): with pytest.raises(ValueError, match="strategies from the DDP family are not supported"): _Connector(accelerator=accelerator, strategy=strategy) with pytest.raises(ValueError, match="strategies from the DDP family are not supported"): _Connector(accelerator="mps", strategy=strategy_class()) @RunIf(mps=False) @pytest.mark.parametrize( ("strategy", "strategy_class"), [ ("ddp", DDPStrategy), ("ddp_spawn", DDPStrategy), pytest.param("deepspeed", DeepSpeedStrategy, marks=RunIf(deepspeed=True)), ], ) @pytest.mark.parametrize("devices", [1, 2]) @mock.patch("lightning.fabric.accelerators.cuda.num_cuda_devices", return_value=2) def test_strategy_choice_multi_node_gpu(_, strategy, strategy_class, devices): connector = _Connector(num_nodes=2, accelerator="gpu", strategy=strategy, devices=devices) assert isinstance(connector.strategy, strategy_class) def test_num_nodes_input_validation(): with pytest.raises(ValueError, match="`num_nodes` must be a positive integer"): _Connector(num_nodes=0) with pytest.raises(ValueError, match="`num_nodes` must be a positive integer"): _Connector(num_nodes=-1) @mock.patch("lightning.fabric.accelerators.cuda.num_cuda_devices", return_value=0) def test_cuda_accelerator_can_not_run_on_system(_): connector = _Connector(accelerator="cpu") assert isinstance(connector.accelerator, CPUAccelerator) with pytest.raises( RuntimeError, match="CUDAAccelerator` can not run on your system since the accelerator is not available.", ): _Connector(accelerator="cuda", devices=1) @pytest.mark.skipif(XLAAccelerator.is_available(), reason="test requires missing TPU") @mock.patch("lightning.fabric.accelerators.xla._XLA_AVAILABLE", True) @mock.patch("lightning.fabric.accelerators.xla._using_pjrt", return_value=True) def test_tpu_accelerator_can_not_run_on_system(_): with pytest.raises(RuntimeError, match="XLAAccelerator` can not run on your system"): _Connector(accelerator="tpu", devices=8) @mock.patch("lightning.fabric.accelerators.cuda.num_cuda_devices", return_value=2) @pytest.mark.parametrize("device_count", [["0"], [0, "1"], ["GPU"], [["0", "1"], [0, 1]], [False]]) def test_accelerator_invalid_type_devices(_, device_count): with pytest.raises(TypeError, match=r"must be an int, a string, a sequence of ints, but you"): _ = _Connector(accelerator="gpu", devices=device_count) @RunIf(min_cuda_gpus=1) def test_accelerator_gpu(): connector = _Connector(accelerator="gpu", devices=1) assert isinstance(connector.accelerator, CUDAAccelerator) connector = _Connector(accelerator="gpu") assert isinstance(connector.accelerator, CUDAAccelerator) connector = _Connector(accelerator="auto", devices=1) assert isinstance(connector.accelerator, CUDAAccelerator) @pytest.mark.parametrize(("devices", "strategy_class"), [(1, SingleDeviceStrategy), (5, DDPStrategy)]) def test_accelerator_cpu_with_devices(devices, strategy_class): connector = _Connector(accelerator="cpu", devices=devices) assert connector._parallel_devices == [torch.device("cpu")] * devices assert isinstance(connector.strategy, strategy_class) assert isinstance(connector.accelerator, CPUAccelerator) @RunIf(min_cuda_gpus=2) @pytest.mark.parametrize( ("devices", "strategy_class"), [(1, SingleDeviceStrategy), ([1], SingleDeviceStrategy), (2, DDPStrategy)] ) def test_accelerator_gpu_with_devices(devices, strategy_class): connector = _Connector(accelerator="gpu", devices=devices) assert len(connector._parallel_devices) == len(devices) if isinstance(devices, list) else devices assert isinstance(connector.strategy, strategy_class) assert isinstance(connector.accelerator, CUDAAccelerator) @RunIf(min_cuda_gpus=1) def test_accelerator_auto_with_devices_gpu(): connector = _Connector(accelerator="auto", devices=1) assert isinstance(connector.accelerator, CUDAAccelerator) assert connector._parallel_devices == [torch.device("cuda", 0)] def test_set_devices_if_none_cpu(): connector = _Connector(accelerator="cpu", devices=3) assert connector._parallel_devices == [torch.device("cpu")] * 3 @RunIf(mps=False) def test_unsupported_strategy_types_on_cpu_and_fallback(): with pytest.warns(UserWarning, match="is not supported on CPUs, hence setting `strategy='ddp"): connector = _Connector(accelerator="cpu", strategy="dp", devices=2) assert isinstance(connector.strategy, DDPStrategy) @RunIf(mps=True) @pytest.mark.parametrize("precision", ["16-mixed", "bf16-mixed"]) def test_mps_enabled_with_float16_or_bfloat16_precision(precision): connector = _Connector(accelerator="mps", precision=precision) assert connector.precision.device == "mps" def test_invalid_accelerator_choice(): with pytest.raises(ValueError, match="You selected an invalid accelerator name: `accelerator='cocofruit'`"): _Connector(accelerator="cocofruit") @pytest.mark.parametrize("invalid_strategy", ["cocofruit", object()]) def test_invalid_strategy_choice(invalid_strategy): with pytest.raises(ValueError, match="You selected an invalid strategy name:"): _Connector(strategy=invalid_strategy) @pytest.mark.parametrize( ("strategy", "strategy_class"), [ ("ddp_spawn", DDPStrategy), ("ddp", DDPStrategy), ], ) def test_strategy_choice_cpu_str(strategy, strategy_class): connector = _Connector(strategy=strategy, accelerator="cpu", devices=2) assert isinstance(connector.strategy, strategy_class) @RunIf(min_cuda_gpus=2) @pytest.mark.parametrize( ("strategy", "strategy_class"), [ ("ddp_spawn", DDPStrategy), ("ddp", DDPStrategy), ("dp", DataParallelStrategy), pytest.param("deepspeed", DeepSpeedStrategy, marks=RunIf(deepspeed=True)), ], ) def test_strategy_choice_gpu_str(strategy, strategy_class): connector = _Connector(strategy=strategy, accelerator="gpu", devices=2) assert isinstance(connector.strategy, strategy_class) def test_device_type_when_strategy_instance_cpu_passed(): connector = _Connector(strategy=DDPStrategy(), accelerator="cpu", devices=2) assert isinstance(connector.strategy, DDPStrategy) assert isinstance(connector.accelerator, CPUAccelerator) @RunIf(min_cuda_gpus=2) def test_device_type_when_strategy_instance_gpu_passed(): connector = _Connector(strategy=DDPStrategy(), accelerator="gpu", devices=2) assert isinstance(connector.strategy, DDPStrategy) assert isinstance(connector.accelerator, CUDAAccelerator) @pytest.mark.parametrize("precision", [1, 12, "invalid"]) def test_validate_precision_type(precision): with pytest.raises(ValueError, match=f"Precision {repr(precision)} is invalid"): _Connector(precision=precision) @pytest.mark.parametrize( ("precision", "expected_precision", "should_warn"), [ (16, "16-mixed", True), ("16", "16-mixed", True), ("16-mixed", "16-mixed", False), ("bf16", "bf16-mixed", True), ("bf16-mixed", "bf16-mixed", False), (32, "32-true", False), ("32", "32-true", False), ("32-true", "32-true", False), (64, "64-true", False), ("64", "64-true", False), ("64-true", "64-true", False), ], ) # mock cuda as available to not be limited by dtype and accelerator compatibility - this is tested elsewhere @mock.patch("lightning.fabric.accelerators.cuda.num_cuda_devices", return_value=1) @mock.patch("lightning.fabric.accelerators.mps.MPSAccelerator.is_available", return_value=False) def test_precision_conversion(patch1, patch2, precision, expected_precision, should_warn): warn_context = pytest.warns if should_warn else no_warning_call with warn_context( UserWarning, match=( f"{precision}` is supported for historical reasons but its usage is discouraged. " f"Please set your precision to {expected_precision} instead!" ), ): connector = _Connector(precision=precision, accelerator="cuda") assert connector._precision_input == expected_precision def test_multi_device_default_strategy(): """The default strategy when multiple devices are selected is "ddp" with the subprocess launcher.""" connector = _Connector(strategy="auto", accelerator="cpu", devices=2) assert isinstance(connector.accelerator, CPUAccelerator) assert isinstance(connector.strategy, DDPStrategy) assert connector.strategy._start_method == "popen" assert isinstance(connector.strategy.launcher, _SubprocessScriptLauncher) def test_strategy_choice_ddp_spawn_cpu(): connector = _Connector(strategy="ddp_spawn", accelerator="cpu", devices=2) assert isinstance(connector.accelerator, CPUAccelerator) assert isinstance(connector.strategy, DDPStrategy) assert isinstance(connector.strategy.cluster_environment, LightningEnvironment) assert connector.strategy._start_method == "spawn" assert connector.strategy.launcher._start_method == "spawn" @RunIf(skip_windows=True) @mock.patch("lightning.fabric.connector._IS_INTERACTIVE", True) def test_strategy_choice_ddp_fork_in_interactive(): """Test that when strategy is unspecified, the connector chooses DDP Fork in interactive environments by default.""" connector = _Connector(accelerator="cpu", devices=2) assert isinstance(connector.accelerator, CPUAccelerator) assert isinstance(connector.strategy, DDPStrategy) assert isinstance(connector.strategy.cluster_environment, LightningEnvironment) assert connector.strategy._start_method == "fork" assert connector.strategy.launcher._start_method == "fork" @RunIf(skip_windows=True) def test_strategy_choice_ddp_fork_cpu(): connector = _Connector(strategy="ddp_fork", accelerator="cpu", devices=2) assert isinstance(connector.accelerator, CPUAccelerator) assert isinstance(connector.strategy, DDPStrategy) assert isinstance(connector.strategy.cluster_environment, LightningEnvironment) assert connector.strategy._start_method == "fork" assert connector.strategy.launcher._start_method == "fork" @mock.patch.dict(os.environ, {"CUDA_VISIBLE_DEVICES": "0,1"}) @mock.patch("lightning.fabric.accelerators.cuda.num_cuda_devices", return_value=2) @mock.patch("lightning.fabric.accelerators.mps.MPSAccelerator.is_available", return_value=False) def test_strategy_choice_ddp(*_): connector = _Connector(strategy="ddp", accelerator="gpu", devices=1) assert isinstance(connector.accelerator, CUDAAccelerator) assert isinstance(connector.strategy, DDPStrategy) assert isinstance(connector.strategy.cluster_environment, LightningEnvironment) @mock.patch.dict(os.environ, {"CUDA_VISIBLE_DEVICES": "0,1"}) @mock.patch("lightning.fabric.accelerators.cuda.num_cuda_devices", return_value=2) @mock.patch("lightning.fabric.accelerators.mps.MPSAccelerator.is_available", return_value=False) def test_strategy_choice_ddp_spawn(*_): connector = _Connector(strategy="ddp_spawn", accelerator="gpu", devices=1) assert isinstance(connector.accelerator, CUDAAccelerator) assert isinstance(connector.strategy, DDPStrategy) assert isinstance(connector.strategy.cluster_environment, LightningEnvironment) @mock.patch("lightning.fabric.accelerators.cuda.num_cuda_devices", return_value=2) @pytest.mark.parametrize( ("job_name", "expected_env"), [("some_name", SLURMEnvironment), ("bash", LightningEnvironment)] ) @pytest.mark.parametrize("strategy", ["auto", "ddp", DDPStrategy]) def test_strategy_choice_ddp_slurm(_, strategy, job_name, expected_env): if strategy and not isinstance(strategy, str): strategy = strategy() with mock.patch.dict( os.environ, { "CUDA_VISIBLE_DEVICES": "0,1", "SLURM_NTASKS": "2", "SLURM_NTASKS_PER_NODE": "1", "SLURM_JOB_NAME": job_name, "SLURM_NODEID": "0", "SLURM_PROCID": "1", "SLURM_LOCALID": "1", }, ): connector = _Connector(strategy=strategy, accelerator="cuda", devices=2) assert isinstance(connector.accelerator, CUDAAccelerator) assert isinstance(connector.strategy, DDPStrategy) assert isinstance(connector.strategy.cluster_environment, expected_env) @mock.patch.dict( os.environ, { "CUDA_VISIBLE_DEVICES": "0,1", "WORLD_SIZE": "2", "LOCAL_WORLD_SIZE": "2", "RANK": "1", "LOCAL_RANK": "1", "GROUP_RANK": "0", "TORCHELASTIC_RUN_ID": "1", }, ) @mock.patch("lightning.fabric.accelerators.cuda.num_cuda_devices", return_value=2) @mock.patch("lightning.fabric.accelerators.mps.MPSAccelerator.is_available", return_value=False) def test_strategy_choice_ddp_torchelastic(*_): connector = _Connector(accelerator="gpu", devices=2) assert isinstance(connector.accelerator, CUDAAccelerator) assert isinstance(connector.strategy, DDPStrategy) assert isinstance(connector.strategy.cluster_environment, TorchElasticEnvironment) assert connector.strategy.cluster_environment.local_rank() == 1 assert connector.strategy.local_rank == 1 @mock.patch.dict( os.environ, { "TORCHELASTIC_RUN_ID": "1", "SLURM_NTASKS": "2", "WORLD_SIZE": "2", "RANK": "1", "LOCAL_RANK": "1", }, ) @mock.patch("lightning.fabric.accelerators.cuda.num_cuda_devices", return_value=2) @mock.patch("lightning.fabric.accelerators.mps.MPSAccelerator.is_available", return_value=False) def test_torchelastic_priority_over_slurm(*_): """Test that the TorchElastic cluster environment is chosen over SLURM when both are detected.""" assert TorchElasticEnvironment.detect() assert SLURMEnvironment.detect() connector = _Connector(strategy="ddp") assert isinstance(connector.strategy.cluster_environment, TorchElasticEnvironment) @mock.patch.dict( os.environ, { "CUDA_VISIBLE_DEVICES": "0", "KUBERNETES_PORT": "tcp://127.0.0.1:443", "MASTER_ADDR": "1.2.3.4", "MASTER_PORT": "500", "WORLD_SIZE": "20", "RANK": "1", }, ) @mock.patch("lightning.fabric.accelerators.cuda.num_cuda_devices", return_value=2) @mock.patch("lightning.fabric.accelerators.mps.MPSAccelerator.is_available", return_value=False) def test_strategy_choice_ddp_kubeflow(*_): connector = _Connector(accelerator="gpu", devices=2, plugins=KubeflowEnvironment()) assert isinstance(connector.accelerator, CUDAAccelerator) assert isinstance(connector.strategy, DDPStrategy) assert isinstance(connector.strategy.cluster_environment, KubeflowEnvironment) assert connector.strategy.cluster_environment.local_rank() == 0 assert connector.strategy.local_rank == 0 @mock.patch.dict( os.environ, { "KUBERNETES_PORT": "tcp://127.0.0.1:443", "MASTER_ADDR": "1.2.3.4", "MASTER_PORT": "500", "WORLD_SIZE": "20", "RANK": "1", }, ) def test_strategy_choice_ddp_cpu_kubeflow(): connector = _Connector(accelerator="cpu", devices=2, plugins=KubeflowEnvironment()) assert isinstance(connector.accelerator, CPUAccelerator) assert isinstance(connector.strategy, DDPStrategy) assert isinstance(connector.strategy.cluster_environment, KubeflowEnvironment) assert connector.strategy.cluster_environment.local_rank() == 0 assert connector.strategy.local_rank == 0 @mock.patch.dict( os.environ, { "SLURM_NTASKS": "2", "SLURM_NTASKS_PER_NODE": "1", "SLURM_JOB_NAME": "SOME_NAME", "SLURM_NODEID": "0", "LOCAL_RANK": "0", "SLURM_PROCID": "0", "SLURM_LOCALID": "0", }, ) @pytest.mark.parametrize("strategy", ["auto", "ddp", DDPStrategy()]) def test_strategy_choice_ddp_cpu_slurm(strategy): connector = _Connector(strategy=strategy, accelerator="cpu", devices=2) assert isinstance(connector.accelerator, CPUAccelerator) assert isinstance(connector.strategy, DDPStrategy) assert isinstance(connector.strategy.cluster_environment, SLURMEnvironment) assert connector.strategy.local_rank == 0 @mock.patch.dict(os.environ, {}, clear=True) @mock.patch("lightning.fabric.accelerators.mps.MPSAccelerator.is_available", return_value=False) def test_unsupported_tpu_choice(_, tpu_available): # if user didn't set strategy, _Connector will choose the SingleDeviceXLAStrategy or XLAStrategy with pytest.raises(ValueError, match="XLAAccelerator` can only be used with a `SingleDeviceXLAStrategy`"): _Connector(accelerator="tpu", precision="16-true", strategy="ddp") # wrong precision plugin type with pytest.raises(TypeError, match="can only work with the `XLAPrecision` plugin"): XLAStrategy(accelerator=XLAAccelerator(), precision=Precision()) # wrong strategy type strategy = DDPStrategy(accelerator=XLAAccelerator(), precision=XLAPrecision(precision="16-true")) with pytest.raises(ValueError, match="XLAAccelerator` can only be used with a `SingleDeviceXLAStrategy`"): _Connector(strategy=strategy) @RunIf(skip_windows=True) def test_connector_with_tpu_accelerator_instance(tpu_available, monkeypatch): monkeypatch.setattr(torch, "device", DeviceMock()) accelerator = XLAAccelerator() connector = _Connector(accelerator=accelerator, devices=1) assert connector.accelerator is accelerator assert isinstance(connector.strategy, SingleDeviceXLAStrategy) connector = _Connector(accelerator=accelerator) assert connector.accelerator is accelerator assert isinstance(connector.strategy, XLAStrategy) @RunIf(mps=True) def test_devices_auto_choice_mps(): connector = _Connector(accelerator="auto", devices="auto") assert isinstance(connector.accelerator, MPSAccelerator) assert isinstance(connector.strategy, SingleDeviceStrategy) assert connector.strategy.root_device == torch.device("mps", 0) assert connector._parallel_devices == [torch.device("mps", 0)] @pytest.mark.parametrize( ("parallel_devices", "accelerator"), [([torch.device("cpu")], "cuda"), ([torch.device("cuda", i) for i in range(8)], "tpu")], ) def test_parallel_devices_in_strategy_conflict_with_accelerator(parallel_devices, accelerator): with pytest.raises(ValueError, match=r"parallel_devices set through"): _Connector(strategy=DDPStrategy(parallel_devices=parallel_devices), accelerator=accelerator) @pytest.mark.parametrize( ("plugins", "expected"), [ ([LightningEnvironment(), SLURMEnvironment()], "ClusterEnvironment"), ([TorchCheckpointIO(), TorchCheckpointIO()], "CheckpointIO"), ( [Precision(), DoublePrecision(), LightningEnvironment(), SLURMEnvironment()], "Precision, ClusterEnvironment", ), ], ) def test_plugin_only_one_instance_for_one_type(plugins, expected): with pytest.raises(ValueError, match=f"Received multiple values for {expected}"): _Connector(plugins=plugins) @pytest.mark.parametrize("accelerator", ["cpu", "cuda", "mps", "tpu"]) @pytest.mark.parametrize("devices", ["0", 0, []]) def test_passing_zero_and_empty_list_to_devices_flag(accelerator, devices): with pytest.raises(ValueError, match="value is not a valid input using"): _Connector(accelerator=accelerator, devices=devices) @pytest.mark.parametrize( ("expected_accelerator_flag", "expected_accelerator_class"), [ pytest.param("cuda", CUDAAccelerator, marks=RunIf(min_cuda_gpus=1)), pytest.param("mps", MPSAccelerator, marks=RunIf(mps=True)), ], ) def test_gpu_accelerator_backend_choice(expected_accelerator_flag, expected_accelerator_class): connector = _Connector(accelerator="gpu") assert connector._accelerator_flag == expected_accelerator_flag assert isinstance(connector.accelerator, expected_accelerator_class) @mock.patch("lightning.fabric.accelerators.mps.MPSAccelerator.is_available", return_value=False) @mock.patch("lightning.fabric.accelerators.cuda.num_cuda_devices", return_value=1) def test_gpu_accelerator_backend_choice_cuda(*_): connector = _Connector(accelerator="gpu") assert connector._accelerator_flag == "cuda" assert isinstance(connector.accelerator, CUDAAccelerator) @mock.patch("lightning.fabric.accelerators.mps.MPSAccelerator.is_available", return_value=True) @mock.patch("lightning.fabric.accelerators.mps._get_all_available_mps_gpus", return_value=[0]) @mock.patch("torch.device", DeviceMock) def test_gpu_accelerator_backend_choice_mps(*_: object) -> object: connector = _Connector(accelerator="gpu") assert connector._accelerator_flag == "mps" assert isinstance(connector.accelerator, MPSAccelerator) @mock.patch("lightning.fabric.accelerators.mps.MPSAccelerator.is_available", return_value=False) @mock.patch("lightning.fabric.accelerators.cuda.CUDAAccelerator.is_available", return_value=False) def test_gpu_accelerator_no_gpu_backend_found_error(*_): with pytest.raises(RuntimeError, match="No supported gpu backend found!"): _Connector(accelerator="gpu") @pytest.mark.parametrize("strategy", _DDP_FORK_ALIASES) @mock.patch( "lightning.fabric.connector.torch.multiprocessing.get_all_start_methods", return_value=[], ) @mock.patch("lightning.fabric.accelerators.mps.MPSAccelerator.is_available", return_value=False) def test_ddp_fork_on_unsupported_platform(_, __, strategy): with pytest.raises(ValueError, match="process forking is not supported on this platform"): _Connector(strategy=strategy) @pytest.mark.parametrize( ("precision_str", "strategy_str", "expected_precision_cls"), [ ("64-true", "auto", DoublePrecision), ("32-true", "auto", Precision), ("16-true", "auto", HalfPrecision), ("bf16-true", "auto", HalfPrecision), ("16-mixed", "auto", MixedPrecision), ("bf16-mixed", "auto", MixedPrecision), pytest.param("32-true", "fsdp", FSDPPrecision, marks=RunIf(min_cuda_gpus=1)), pytest.param("16-true", "fsdp", FSDPPrecision, marks=RunIf(min_cuda_gpus=1)), pytest.param("bf16-true", "fsdp", FSDPPrecision, marks=RunIf(min_cuda_gpus=1)), pytest.param("16-mixed", "fsdp", FSDPPrecision, marks=RunIf(min_cuda_gpus=1)), pytest.param("bf16-mixed", "fsdp", FSDPPrecision, marks=RunIf(min_cuda_gpus=1)), pytest.param("32-true", "deepspeed", DeepSpeedPrecision, marks=RunIf(deepspeed=True, mps=False)), pytest.param("16-true", "deepspeed", DeepSpeedPrecision, marks=RunIf(deepspeed=True, mps=False)), pytest.param("bf16-true", "deepspeed", DeepSpeedPrecision, marks=RunIf(deepspeed=True, mps=False)), pytest.param("16-mixed", "deepspeed", DeepSpeedPrecision, marks=RunIf(deepspeed=True, mps=False)), pytest.param("bf16-mixed", "deepspeed", DeepSpeedPrecision, marks=RunIf(deepspeed=True, mps=False)), ], ) def test_precision_selection(precision_str, strategy_str, expected_precision_cls): connector = _Connector(precision=precision_str, strategy=strategy_str) assert isinstance(connector.precision, expected_precision_cls) def test_precision_selection_16_on_cpu_warns(): with pytest.warns( UserWarning, match=r"precision='16-mixed'\)` but AMP with fp16 is not supported on CPU. Using `precision='bf16-mixed'", ): _Connector(accelerator="cpu", precision="16-mixed")
DeviceMock
python
openai__openai-python
src/openai/types/realtime/conversation_item_create_event.py
{ "start": 280, "end": 1089 }
class ____(BaseModel): item: ConversationItem """A single item within a Realtime conversation.""" type: Literal["conversation.item.create"] """The event type, must be `conversation.item.create`.""" event_id: Optional[str] = None """Optional client-generated ID used to identify this event.""" previous_item_id: Optional[str] = None """The ID of the preceding item after which the new item will be inserted. If not set, the new item will be appended to the end of the conversation. If set to `root`, the new item will be added to the beginning of the conversation. If set to an existing ID, it allows an item to be inserted mid-conversation. If the ID cannot be found, an error will be returned and the item will not be added. """
ConversationItemCreateEvent
python
airbytehq__airbyte
airbyte-integrations/connectors/source-faker/source_faker/source.py
{ "start": 292, "end": 1367 }
class ____(AbstractSource): def check_connection(self, logger: logging.Logger, config: Mapping[str, Any]) -> Tuple[bool, Any]: if type(config["count"]) == int or type(config["count"]) == float: return True, None else: return False, "Count option is missing" def streams(self, config: Mapping[str, Any]) -> List[Stream]: count: int = config["count"] if "count" in config else DEFAULT_COUNT seed: int = config["seed"] if "seed" in config else None records_per_slice: int = config["records_per_slice"] if "records_per_slice" in config else 100 always_updated: bool = config["always_updated"] if "always_updated" in config else True parallelism: int = config["parallelism"] if "parallelism" in config else 4 return [ Products(count, seed, parallelism, records_per_slice, always_updated), Users(count, seed, parallelism, records_per_slice, always_updated), Purchases(count, seed, parallelism, records_per_slice, always_updated), ]
SourceFaker
python
ray-project__ray
python/ray/serve/_private/common.py
{ "start": 25665, "end": 25739 }
class ____(str, Enum): REPLICA = "replica" @dataclass
ServeComponentType
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/util/_collections.py
{ "start": 9527, "end": 12403 }
class ____(Generic[_T]): """Appends items to a collection ensuring uniqueness. Additional appends() of the same object are ignored. Membership is determined by identity (``is a``) not equality (``==``). """ __slots__ = "data", "_data_appender", "_unique" data: Union[Iterable[_T], Set[_T], List[_T]] _data_appender: Callable[[_T], None] _unique: Dict[int, Literal[True]] def __init__( self, data: Union[Iterable[_T], Set[_T], List[_T]], via: Optional[str] = None, ): self.data = data self._unique = {} if via: self._data_appender = getattr(data, via) elif hasattr(data, "append"): self._data_appender = cast("List[_T]", data).append elif hasattr(data, "add"): self._data_appender = cast("Set[_T]", data).add def append(self, item: _T) -> None: id_ = id(item) if id_ not in self._unique: self._data_appender(item) self._unique[id_] = True def __iter__(self) -> Iterator[_T]: return iter(self.data) def coerce_generator_arg(arg: Any) -> List[Any]: if len(arg) == 1 and isinstance(arg[0], types.GeneratorType): return list(arg[0]) else: return cast("List[Any]", arg) def to_list(x: Any, default: Optional[List[Any]] = None) -> List[Any]: if x is None: return default # type: ignore if not is_non_string_iterable(x): return [x] elif isinstance(x, list): return x else: return list(x) def has_intersection(set_: Container[Any], iterable: Iterable[Any]) -> bool: r"""return True if any items of set\_ are present in iterable. Goes through special effort to ensure __hash__ is not called on items in iterable that don't support it. """ return any(i in set_ for i in iterable if i.__hash__) def to_set(x): if x is None: return set() if not isinstance(x, set): return set(to_list(x)) else: return x def to_column_set(x: Any) -> Set[Any]: if x is None: return column_set() if not isinstance(x, column_set): return column_set(to_list(x)) else: return x def update_copy( d: Dict[Any, Any], _new: Optional[Dict[Any, Any]] = None, **kw: Any ) -> Dict[Any, Any]: """Copy the given dict and update with the given values.""" d = d.copy() if _new: d.update(_new) d.update(**kw) return d def flatten_iterator(x: Iterable[_T]) -> Iterator[_T]: """Given an iterator of which further sub-elements may also be iterators, flatten the sub-elements into a single iterator. """ elem: _T for elem in x: if not isinstance(elem, str) and hasattr(elem, "__iter__"): yield from flatten_iterator(elem) else: yield elem
UniqueAppender
python
scikit-image__scikit-image
benchmarks/benchmark_transform_warp.py
{ "start": 2073, "end": 2711 }
class ____: params = ( [np.float32, np.float64], [(512, 512), (2048, 2048), (48, 48, 48), (192, 192, 192)], [(512, 512), (2048, 2048), (48, 48, 48), (192, 192, 192)], ) param_names = ['dtype', 'shape_in', 'shape_out'] timeout = 180 def setup(self, dtype, shape_in, shape_out): if len(shape_in) != len(shape_out): raise NotImplementedError("shape_in, shape_out must have same dimension") self.image = np.zeros(shape_in, dtype=dtype) def time_resize_local_mean(self, dtype, shape_in, shape_out): resize_local_mean(self.image, shape_out)
ResizeLocalMeanSuite
python
pennersr__django-allauth
allauth/socialaccount/providers/oauth/views.py
{ "start": 1576, "end": 1904 }
class ____: @classmethod def adapter_view(cls, adapter): @login_not_required def view(request, *args, **kwargs): self = cls() self.request = request self.adapter = adapter(request) return self.dispatch(request, *args, **kwargs) return view
OAuthView