language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
FactoryBoy__factory_boy
tests/djapp/models.py
{ "start": 1465, "end": 1559 }
class ____(models.Model): foo = models.CharField(max_length=20, default='')
WithDefaultValue
python
milvus-io__pymilvus
tests/test_bulk_writer_validators.py
{ "start": 4731, "end": 8087 }
class ____: def test_valid_list_float16(self): """Test valid list of floats for float16""" result = float16_vector_validator([1.0, 2.0, 3.0], 3, is_bfloat=False) assert isinstance(result, bytes) # Verify we can reconstruct the array arr = np.frombuffer(result, dtype=np.float16) np.testing.assert_array_almost_equal(arr, [1.0, 2.0, 3.0]) @pytest.mark.skipif(not hasattr(np, 'bfloat16'), reason="bfloat16 not available") def test_valid_list_bfloat16(self): """Test valid list of floats for bfloat16""" result = float16_vector_validator([1.0, 2.0, 3.0], 3, is_bfloat=True) assert isinstance(result, bytes) def test_invalid_list_length(self): """Test list with wrong dimension""" with pytest.raises(MilvusException, match="array's length must be equal to vector dimension"): float16_vector_validator([1.0, 2.0], 3, is_bfloat=False) def test_invalid_list_type(self): """Test list with non-float elements""" with pytest.raises(MilvusException, match="array's element must be float value"): float16_vector_validator([1.0, 2, 3.0], 3, is_bfloat=False) def test_valid_numpy_float16(self): """Test valid numpy array with float16""" arr = np.array([1.0, 2.0, 3.0], dtype=np.float16) result = float16_vector_validator(arr, 3, is_bfloat=False) assert isinstance(result, bytes) assert result == arr.tobytes() @pytest.mark.skipif(not hasattr(np, 'bfloat16'), reason="bfloat16 not available") def test_valid_numpy_bfloat16(self): """Test valid numpy array with bfloat16""" arr = np.array([1.0, 2.0, 3.0], dtype='bfloat16') result = float16_vector_validator(arr, 3, is_bfloat=True) assert isinstance(result, bytes) assert result == arr.tobytes() def test_invalid_numpy_dtype_float16(self): """Test numpy array with wrong dtype for float16""" arr = np.array([1.0, 2.0, 3.0], dtype=np.float32) with pytest.raises(MilvusException, match='dtype must be "float16"'): float16_vector_validator(arr, 3, is_bfloat=False) def test_invalid_numpy_dtype_bfloat16(self): """Test numpy array with wrong dtype for bfloat16""" arr = np.array([1.0, 2.0, 3.0], dtype=np.float32) with pytest.raises(MilvusException, match='dtype must be "bfloat16"'): float16_vector_validator(arr, 3, is_bfloat=True) def test_invalid_numpy_shape(self): """Test numpy array with wrong shape""" arr = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float16) with pytest.raises(MilvusException, match="shape must not be one dimension"): float16_vector_validator(arr, 4, is_bfloat=False) def test_invalid_numpy_length(self): """Test numpy array with wrong dimension""" arr = np.array([1.0, 2.0], dtype=np.float16) with pytest.raises(MilvusException, match="length must be equal to vector dimension"): float16_vector_validator(arr, 3, is_bfloat=False) def test_invalid_type(self): """Test with invalid input type""" with pytest.raises(MilvusException, match="only accept numpy.ndarray or list"): float16_vector_validator("invalid", 3, is_bfloat=False)
TestFloat16VectorValidator
python
modin-project__modin
modin/core/dataframe/base/partitioning/axis_partition.py
{ "start": 1037, "end": 8230 }
class ____( ABC, ClassLogger, modin_layer="VIRTUAL-PARTITION", log_level=LogLevel.DEBUG ): # pragma: no cover """ An abstract class that represents the parent class for any axis partition class. This class is intended to simplify the way that operations are performed. Attributes ---------- _PARTITIONS_METADATA_LEN : int The number of metadata values that the object of `partition_type` consumes. """ @property @abstractmethod def list_of_blocks(self) -> list: """Get the list of physical partition objects that compose this partition.""" pass def apply( self, func: Callable, *args: Iterable, num_splits: Optional[int] = None, other_axis_partition: Optional["BaseDataframeAxisPartition"] = None, maintain_partitioning: bool = True, lengths: Optional[Iterable] = None, manual_partition: bool = False, **kwargs: dict, ) -> Any: """ Apply a function to this axis partition along full axis. Parameters ---------- func : callable The function to apply. This will be preprocessed according to the corresponding `BaseDataframePartition` objects. *args : iterable Positional arguments to pass to `func`. num_splits : int, default: None The number of times to split the result object. other_axis_partition : BaseDataframeAxisPartition, default: None Another `BaseDataframeAxisPartition` object to be applied to func. This is for operations that are between two data sets. maintain_partitioning : bool, default: True Whether to keep the partitioning in the same orientation as it was previously or not. This is important because we may be operating on an individual axis partition and not touching the rest. In this case, we have to return the partitioning to its previous orientation (the lengths will remain the same). This is ignored between two axis partitions. lengths : iterable, default: None The list of lengths to shuffle the partition into. manual_partition : bool, default: False If True, partition the result with `lengths`. **kwargs : dict Additional keywords arguments to be passed in `func`. Returns ------- list A list of `BaseDataframePartition` objects. Notes ----- The procedures that invoke this method assume full axis knowledge. Implement this method accordingly. You must return a list of `BaseDataframePartition` objects from this method. """ pass # Child classes must have these in order to correctly subclass. partition_type: Type _PARTITIONS_METADATA_LEN = 0 def _wrap_partitions( self, partitions: list, extract_metadata: Optional[bool] = None ) -> list: """ Wrap remote partition objects with `BaseDataframePartition` class. Parameters ---------- partitions : list List of remotes partition objects to be wrapped with `BaseDataframePartition` class. extract_metadata : bool, optional Whether the partitions list contains information about partition's metadata. If `None` was passed will take the argument's value from the value of `cls._PARTITIONS_METADATA_LEN`. Returns ------- list List of wrapped remote partition objects. """ assert self.partition_type is not None if extract_metadata is None: # If `_PARTITIONS_METADATA_LEN == 0` then the execution doesn't support metadata # and thus we should never try extracting it, otherwise assuming that the common # approach of always passing the metadata is used. extract_metadata = bool(self._PARTITIONS_METADATA_LEN) if extract_metadata: # Here we recieve a 1D array of futures describing partitions and their metadata as: # [object_id{partition_idx}, metadata{partition_idx}_{metadata_idx}, ...] # Here's an example of such array: # [ # object_id1, metadata1_1, metadata1_2, ..., metadata1_PARTITIONS_METADATA_LEN, # object_id2, metadata2_1, ..., metadata2_PARTITIONS_METADATA_LEN, # ... # object_idN, metadataN_1, ..., metadataN_PARTITIONS_METADATA_LEN, # ] return [ self.partition_type(*init_args) for init_args in zip( # `partition_type` consumes `(object_id, *metadata)`, thus adding `+1` *[iter(partitions)] * (1 + self._PARTITIONS_METADATA_LEN) ) ] else: return [self.partition_type(object_id) for object_id in partitions] def force_materialization( self, get_ip: bool = False ) -> "BaseDataframeAxisPartition": """ Materialize axis partitions into a single partition. Parameters ---------- get_ip : bool, default: False Whether to get node ip address to a single partition or not. Returns ------- BaseDataframeAxisPartition An axis partition containing only a single materialized partition. """ materialized = self.apply( lambda x: x, num_splits=1, maintain_partitioning=False ) return type(self)(materialized, get_ip=get_ip) # type: ignore[call-arg] def unwrap( self, squeeze: bool = False, get_ip: bool = False ) -> Union[list, Tuple[list, list]]: """ Unwrap partitions from this axis partition. Parameters ---------- squeeze : bool, default: False Flag used to unwrap only one partition. get_ip : bool, default: False Whether to get node ip address to each partition or not. Returns ------- list List of partitions from this axis partition. Notes ----- If `get_ip=True`, a tuple of lists of Ray.ObjectRef/Dask.Future to node ip addresses and unwrapped partitions, respectively, is returned if Ray/Dask is used as an engine (i.e. [(Ray.ObjectRef/Dask.Future, Ray.ObjectRef/Dask.Future), ...]). """ if squeeze and len(self.list_of_blocks) == 1: if get_ip: # TODO(https://github.com/modin-project/modin/issues/5176): Stop ignoring the list_of_ips # check once we know that we're not calling list_of_ips on python axis partitions return self.list_of_ips[0], self.list_of_blocks[0] # type: ignore[attr-defined] else: return self.list_of_blocks[0] else: if get_ip: return list(zip(self.list_of_ips, self.list_of_blocks)) # type: ignore[attr-defined] else: return self.list_of_blocks
BaseDataframeAxisPartition
python
Pylons__pyramid
tests/test_path.py
{ "start": 18970, "end": 19537 }
class ____: def __init__(self, real_package_or_module, raise_exc=None): self.__dict__['raise_exc'] = raise_exc self.__dict__['__name__'] = real_package_or_module.__name__ import os self.__dict__['package_path'] = os.path.dirname( os.path.abspath(real_package_or_module.__file__) ) self.__dict__['__file__'] = real_package_or_module.__file__ def __setattr__(self, key, val): if self.raise_exc is not None: raise self.raise_exc self.__dict__[key] = val
DummyPackageOrModule
python
plotly__plotly.py
plotly/graph_objs/choropleth/unselected/_marker.py
{ "start": 233, "end": 2368 }
class ____(_BaseTraceHierarchyType): _parent_path_str = "choropleth.unselected" _path_str = "choropleth.unselected.marker" _valid_props = {"opacity"} @property def opacity(self): """ Sets the marker opacity of unselected points, applied only when a selection exists. The 'opacity' property is a number and may be specified as: - An int or float in the interval [0, 1] Returns ------- int|float """ return self["opacity"] @opacity.setter def opacity(self, val): self["opacity"] = val @property def _prop_descriptions(self): return """\ opacity Sets the marker opacity of unselected points, applied only when a selection exists. """ def __init__(self, arg=None, opacity=None, **kwargs): """ Construct a new Marker object Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.choropleth.unselected.Marker` opacity Sets the marker opacity of unselected points, applied only when a selection exists. Returns ------- Marker """ super().__init__("marker") if "_parent" in kwargs: self._parent = kwargs["_parent"] return if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError("""\ The first argument to the plotly.graph_objs.choropleth.unselected.Marker constructor must be a dict or an instance of :class:`plotly.graph_objs.choropleth.unselected.Marker`""") self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) self._set_property("opacity", arg, opacity) self._process_kwargs(**dict(arg, **kwargs)) self._skip_invalid = False
Marker
python
weaviate__weaviate-python-client
weaviate/collections/classes/config_vectorizers.py
{ "start": 10992, "end": 11308 }
class ____(_VectorizerConfigCreate): vectorizer: Union[Vectorizers, _EnumLikeStr] = Field( default=Vectorizers.TEXT2VEC_DATABRICKS, frozen=True, exclude=True ) endpoint: str instruction: Optional[str] vectorizeClassName: bool OpenAIType = Literal["text", "code"]
_Text2VecDatabricksConfig
python
encode__django-rest-framework
tests/schemas/views.py
{ "start": 5079, "end": 5359 }
class ____(serializers.Serializer): date = serializers.DateField() datetime = serializers.DateTimeField() hstore = serializers.HStoreField() uuid_field = serializers.UUIDField(default=uuid.uuid4) class Meta: model = OpenAPIExample
ExampleSerializerModel
python
kubernetes-client__python
kubernetes/client/models/v1alpha1_storage_version_migration.py
{ "start": 383, "end": 7766 }
class ____(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'api_version': 'str', 'kind': 'str', 'metadata': 'V1ObjectMeta', 'spec': 'V1alpha1StorageVersionMigrationSpec', 'status': 'V1alpha1StorageVersionMigrationStatus' } attribute_map = { 'api_version': 'apiVersion', 'kind': 'kind', 'metadata': 'metadata', 'spec': 'spec', 'status': 'status' } def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501 """V1alpha1StorageVersionMigration - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._api_version = None self._kind = None self._metadata = None self._spec = None self._status = None self.discriminator = None if api_version is not None: self.api_version = api_version if kind is not None: self.kind = kind if metadata is not None: self.metadata = metadata if spec is not None: self.spec = spec if status is not None: self.status = status @property def api_version(self): """Gets the api_version of this V1alpha1StorageVersionMigration. # noqa: E501 APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501 :return: The api_version of this V1alpha1StorageVersionMigration. # noqa: E501 :rtype: str """ return self._api_version @api_version.setter def api_version(self, api_version): """Sets the api_version of this V1alpha1StorageVersionMigration. APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501 :param api_version: The api_version of this V1alpha1StorageVersionMigration. # noqa: E501 :type: str """ self._api_version = api_version @property def kind(self): """Gets the kind of this V1alpha1StorageVersionMigration. # noqa: E501 Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501 :return: The kind of this V1alpha1StorageVersionMigration. # noqa: E501 :rtype: str """ return self._kind @kind.setter def kind(self, kind): """Sets the kind of this V1alpha1StorageVersionMigration. Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501 :param kind: The kind of this V1alpha1StorageVersionMigration. # noqa: E501 :type: str """ self._kind = kind @property def metadata(self): """Gets the metadata of this V1alpha1StorageVersionMigration. # noqa: E501 :return: The metadata of this V1alpha1StorageVersionMigration. # noqa: E501 :rtype: V1ObjectMeta """ return self._metadata @metadata.setter def metadata(self, metadata): """Sets the metadata of this V1alpha1StorageVersionMigration. :param metadata: The metadata of this V1alpha1StorageVersionMigration. # noqa: E501 :type: V1ObjectMeta """ self._metadata = metadata @property def spec(self): """Gets the spec of this V1alpha1StorageVersionMigration. # noqa: E501 :return: The spec of this V1alpha1StorageVersionMigration. # noqa: E501 :rtype: V1alpha1StorageVersionMigrationSpec """ return self._spec @spec.setter def spec(self, spec): """Sets the spec of this V1alpha1StorageVersionMigration. :param spec: The spec of this V1alpha1StorageVersionMigration. # noqa: E501 :type: V1alpha1StorageVersionMigrationSpec """ self._spec = spec @property def status(self): """Gets the status of this V1alpha1StorageVersionMigration. # noqa: E501 :return: The status of this V1alpha1StorageVersionMigration. # noqa: E501 :rtype: V1alpha1StorageVersionMigrationStatus """ return self._status @status.setter def status(self, status): """Sets the status of this V1alpha1StorageVersionMigration. :param status: The status of this V1alpha1StorageVersionMigration. # noqa: E501 :type: V1alpha1StorageVersionMigrationStatus """ self._status = status def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1alpha1StorageVersionMigration): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1alpha1StorageVersionMigration): return True return self.to_dict() != other.to_dict()
V1alpha1StorageVersionMigration
python
scikit-learn__scikit-learn
sklearn/impute/tests/test_base.py
{ "start": 826, "end": 1056 }
class ____(_BaseImputer): def fit(self, X, y=None): self._fit_indicator(X) return self def transform(self, X): return self._concatenate_indicator(X, self._transform_indicator(X))
NoPrecomputedMaskFit
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 288431, "end": 288876 }
class ____(sgqlc.types.Type): """ See source code for more info. """ __schema__ = graphql_schema __field_names__ = ("cursor", "node", "role") cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor") node = sgqlc.types.Field("User", graphql_name="node") role = sgqlc.types.Field( sgqlc.types.non_null(EnterpriseAdministratorRole), graphql_name="role" )
EnterpriseAdministratorEdge
python
huggingface__transformers
tests/models/lfm2_vl/test_modeling_lfm2_vl.py
{ "start": 1615, "end": 5512 }
class ____(CausalLMModelTester): if is_torch_available(): config_class = Lfm2VlConfig base_model_class = Lfm2VlModel causal_lm_class = Lfm2VlForConditionalGeneration def __init__( self, parent, is_training=True, batch_size=2, scale_factor=2, num_images=2, vision_config={ "hidden_size": 32, "intermediate_size": 37, "num_hidden_layers": 2, "num_attention_heads": 2, "num_channels": 3, "num_patches": 16, "patch_size": 4, "hidden_act": "gelu_pytorch_tanh", "layer_norm_eps": 1e-6, "attention_dropout": 0.0, }, text_config={ "vocab_size": 100, "hidden_size": 32, "intermediate_size": 37, "num_hidden_layers": 2, "num_attention_heads": 4, "num_key_value_heads": 2, "max_position_embeddings": 100, "pad_token_id": 0, "bos_token_id": 1, "eos_token_id": 2, "tie_word_embeddings": True, "rope_theta": 1000000.0, "conv_bias": False, "conv_L_cache": 3, "block_multiple_of": 2, "full_attn_idxs": [0], }, image_token_id=4, downsample_factor=4, projector_hidden_size=32, ): super().__init__(parent) self.vision_config = vision_config self.text_config = text_config self.image_token_id = image_token_id self.is_training = is_training self.batch_size = batch_size self.scale_factor = scale_factor self.num_images = num_images self.downsample_factor = downsample_factor self.projector_hidden_size = projector_hidden_size self.image_seq_length = 4 def get_config(self): return Lfm2VlConfig( vision_config=self.vision_config, text_config=self.text_config, image_token_id=self.image_token_id, downsample_factor=self.downsample_factor, projector_hidden_size=self.projector_hidden_size, ) def prepare_config_and_inputs(self): # Create dummy pixel values: [num_images, num_patches, channels * patch_size^2] patch_size = self.vision_config["patch_size"] pixel_values = floats_tensor([self.num_images, 64, 3 * patch_size * patch_size]) # Spatial shapes: one (height_patches, width_patches) per image patches = int(math.sqrt(64)) spatial_shapes = torch.tensor([[patches, patches]] * self.num_images, dtype=torch.long, device=torch_device) # Pixel attention mask: mark all patches as valid (no padding) pixel_attention_mask = torch.ones((self.num_images, 64), dtype=torch.long, device=torch_device) config = self.get_config() return config, pixel_values, spatial_shapes, pixel_attention_mask def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, spatial_shapes, pixel_attention_mask = config_and_inputs input_ids = ids_tensor([self.batch_size, self.seq_length], config.text_config.vocab_size - 2) + 1 # For simplicity just set the last n tokens to the image token input_ids[input_ids == self.image_token_id] = self.text_config["pad_token_id"] input_ids[:, -self.image_seq_length :] = self.image_token_id attention_mask = input_ids.ne(1).to(torch_device) inputs_dict = { "pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask, "spatial_shapes": spatial_shapes, "pixel_attention_mask": pixel_attention_mask, } return config, inputs_dict @require_torch
Lfm2VlModelTester
python
pypa__pip
src/pip/_vendor/rich/progress.py
{ "start": 28535, "end": 28809 }
class ____(ProgressColumn): """Renders completed filesize.""" def render(self, task: "Task") -> Text: """Show data completed.""" data_size = filesize.decimal(int(task.completed)) return Text(data_size, style="progress.filesize")
FileSizeColumn
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 579145, "end": 579454 }
class ____(sgqlc.types.Type): """ See source code for more info. """ __schema__ = graphql_schema __field_names__ = ("cursor", "node") cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor") node = sgqlc.types.Field("Release", graphql_name="node")
ReleaseEdge
python
numba__numba
numba/core/targetconfig.py
{ "start": 951, "end": 1025 }
class ____(utils.ThreadLocalStack, stack_name="flags"): pass
_FlagsStack
python
pytorch__pytorch
torch/_numpy/_dtypes.py
{ "start": 5169, "end": 10279 }
class ____: def __init__(self, arg): # a pytorch object? if isinstance(arg, torch.dtype): sctype = _torch_dtypes[arg] elif isinstance(arg, torch.Tensor): sctype = _torch_dtypes[arg.dtype] # a scalar type? elif issubclass_(arg, generic): sctype = arg # a dtype already? elif isinstance(arg, DType): sctype = arg._scalar_type # a has a right attribute? elif hasattr(arg, "dtype"): sctype = arg.dtype._scalar_type else: sctype = sctype_from_string(arg) self._scalar_type = sctype @property def name(self): return self._scalar_type.name @property def type(self): return self._scalar_type @property def kind(self): # https://numpy.org/doc/stable/reference/generated/numpy.dtype.kind.html return _torch_dtypes[self.torch_dtype].name[0] @property def typecode(self): return self._scalar_type.typecode def __eq__(self, other): if isinstance(other, DType): return self._scalar_type == other._scalar_type try: other_instance = DType(other) except TypeError: return False return self._scalar_type == other_instance._scalar_type @property def torch_dtype(self): return self._scalar_type.torch_dtype def __hash__(self): return hash(self._scalar_type.name) def __repr__(self): return f'dtype("{self.name}")' __str__ = __repr__ @property def itemsize(self): elem = self.type(1) return elem.tensor.element_size() def __getstate__(self): return self._scalar_type def __setstate__(self, value): self._scalar_type = value typecodes = { "All": "efdFDBbhil?", "AllFloat": "efdFD", "AllInteger": "Bbhil", "Integer": "bhil", "UnsignedInteger": "B", "Float": "efd", "Complex": "FD", } # ### Defaults and dtype discovery def set_default_dtype(fp_dtype="numpy", int_dtype="numpy"): """Set the (global) defaults for fp, complex, and int dtypes. The complex dtype is inferred from the float (fp) dtype. It has a width at least twice the width of the float dtype, i.e., it's complex128 for float64 and complex64 for float32. Parameters ---------- fp_dtype Allowed values are "numpy", "pytorch" or dtype_like things which can be converted into a DType instance. Default is "numpy" (i.e. float64). int_dtype Allowed values are "numpy", "pytorch" or dtype_like things which can be converted into a DType instance. Default is "numpy" (i.e. int64). Returns ------- The old default dtype state: a namedtuple with attributes ``float_dtype``, ``complex_dtypes`` and ``int_dtype``. These attributes store *pytorch* dtypes. Notes ------------ This functions has a side effect: it sets the global state with the provided dtypes. The complex dtype has bit width of at least twice the width of the float dtype, i.e. it's complex128 for float64 and complex64 for float32. """ if fp_dtype not in ["numpy", "pytorch"]: fp_dtype = dtype(fp_dtype).torch_dtype if int_dtype not in ["numpy", "pytorch"]: int_dtype = dtype(int_dtype).torch_dtype if fp_dtype == "numpy": float_dtype = torch.float64 elif fp_dtype == "pytorch": float_dtype = torch.float32 else: float_dtype = fp_dtype complex_dtype = { torch.float64: torch.complex128, torch.float32: torch.complex64, torch.float16: torch.complex64, }[float_dtype] if int_dtype in ["numpy", "pytorch"]: int_dtype = torch.int64 new_defaults = _dtypes_impl.DefaultDTypes( float_dtype=float_dtype, complex_dtype=complex_dtype, int_dtype=int_dtype ) # set the new global state and return the old state old_defaults = _dtypes_impl.default_dtypes _dtypes_impl._default_dtypes = new_defaults return old_defaults def issubclass_(arg, klass): try: return issubclass(arg, klass) except TypeError: return False def issubdtype(arg1, arg2): # cf https://github.com/numpy/numpy/blob/v1.24.0/numpy/core/numerictypes.py#L356-L420 # We also accept strings even if NumPy doesn't as dtypes are serialized as their # string representation in dynamo's graph def str_to_abstract(t): if isinstance(t, str) and t in _abstract_dtypes: return globals()[t] return t arg1 = str_to_abstract(arg1) arg2 = str_to_abstract(arg2) if not issubclass_(arg1, generic): arg1 = dtype(arg1).type if not issubclass_(arg2, generic): arg2 = dtype(arg2).type return issubclass(arg1, arg2) __all__ = ["dtype", "DType", "typecodes", "issubdtype", "set_default_dtype", "sctypes"] __all__ += list(_names.keys()) # noqa: PLE0605 __all__ += list(_name_aliases.keys()) # noqa: PLE0605 __all__ += _abstract_dtypes # noqa: PLE0605
DType
python
pytorch__pytorch
test/distributed/_composable/fsdp/test_fully_shard_logging.py
{ "start": 546, "end": 2209 }
class ____(LoggingTestCase): @requires_distributed() def test_fsdp_logging(self): env = dict(os.environ) env["TORCH_LOGS"] = "fsdp" env["RANK"] = "0" env["WORLD_SIZE"] = "1" env["MASTER_PORT"] = "34715" env["MASTER_ADDR"] = "localhost" _, stderr = self.run_process_no_exception( f"""\ import logging import torch import torch.distributed as dist import torch.nn as nn from torch.distributed.fsdp import fully_shard logger = logging.getLogger("torch.distributed.fsdp.fully_shard") logger.setLevel(logging.DEBUG) device = '{device_type.type}' torch.manual_seed(0) model = nn.Sequential(*[nn.Linear(4, 4, device=device, bias=False) for _ in range(2)]) for layer in model: fully_shard(layer) fully_shard(model) x = torch.randn((4, 4), device=device) model(x).sum().backward() """, env=env, ) self.assertIn("FSDP::root_pre_forward", stderr.decode("utf-8")) self.assertIn("FSDP::pre_forward (0)", stderr.decode("utf-8")) self.assertIn("FSDP::pre_forward (1)", stderr.decode("utf-8")) self.assertIn("FSDP::post_forward (0)", stderr.decode("utf-8")) self.assertIn("FSDP::post_forward (1)", stderr.decode("utf-8")) self.assertIn("FSDP::pre_backward (0)", stderr.decode("utf-8")) self.assertIn("FSDP::pre_backward (1)", stderr.decode("utf-8")) self.assertIn("FSDP::post_backward (0)", stderr.decode("utf-8")) self.assertIn("FSDP::post_backward (1)", stderr.decode("utf-8")) self.assertIn("FSDP::root_post_backward", stderr.decode("utf-8")) if __name__ == "__main__": run_tests()
LoggingTests
python
pdm-project__pdm
src/pdm/project/lockfile/base.py
{ "start": 652, "end": 1008 }
class ____(enum.IntEnum): NONE = 0 # The lockfile can't be read by the current version of PDM. SAME = 1 # The lockfile version is the same as the current version of PDM. BACKWARD = 2 # The current version of PDM is newer than the lockfile version. FORWARD = 3 # The current version of PDM is older than the lockfile version.
Compatibility
python
html5lib__html5lib-python
html5lib/html5parser.py
{ "start": 73064, "end": 74548 }
class ____(Phase): __slots__ = ("originalPhase", "characterTokens") def __init__(self, *args, **kwargs): super(InTableTextPhase, self).__init__(*args, **kwargs) self.originalPhase = None self.characterTokens = [] def flushCharacters(self): data = "".join([item["data"] for item in self.characterTokens]) if any(item not in spaceCharacters for item in data): token = {"type": tokenTypes["Characters"], "data": data} self.parser.phases["inTable"].insertText(token) elif data: self.tree.insertText(data) self.characterTokens = [] def processComment(self, token): self.flushCharacters() self.parser.phase = self.originalPhase return token def processEOF(self): self.flushCharacters() self.parser.phase = self.originalPhase return True def processCharacters(self, token): if token["data"] == "\u0000": return self.characterTokens.append(token) def processSpaceCharacters(self, token): # pretty sure we should never reach here self.characterTokens.append(token) # assert False def processStartTag(self, token): self.flushCharacters() self.parser.phase = self.originalPhase return token def processEndTag(self, token): self.flushCharacters() self.parser.phase = self.originalPhase return token
InTableTextPhase
python
huggingface__transformers
tests/quantization/torchao_integration/test_torchao.py
{ "start": 34072, "end": 34769 }
class ____(TorchAoSerializationTest): # called only once for all test in this class @classmethod def setUpClass(cls): super().setUpClass() cls.quant_scheme = Int8WeightOnlyConfig() cls.quant_scheme_kwargs = {} cls.EXPECTED_OUTPUT = "What are we having for dinner?\n\nJessica: (smiling)" @require_torch_accelerator def test_serialization_expected_output_on_accelerator(self): """ Test if we can serialize on device (cpu) and load/infer the model on accelerator """ self.check_serialization_expected_output(torch_device, self.EXPECTED_OUTPUT) @require_torch_accelerator @require_torchao
TorchAoSerializationW8CPUTest
python
sqlalchemy__sqlalchemy
test/dialect/postgresql/test_types.py
{ "start": 187480, "end": 187559 }
class ____(_DateRangeTests, _RangeTypeRoundTrip): pass
DateRangeRoundTripTest
python
run-llama__llama_index
llama-index-core/llama_index/core/memory/memory_blocks/vector.py
{ "start": 889, "end": 7539 }
class ____(BaseMemoryBlock[str]): """ A memory block that retrieves relevant information from a vector store. This block stores conversation history in a vector store and retrieves relevant information based on the most recent messages. """ name: str = Field( default="RetrievedMessages", description="The name of the memory block." ) vector_store: BasePydanticVectorStore = Field( description="The vector store to use for retrieval." ) embed_model: BaseEmbedding = Field( default_factory=get_default_embed_model, description="The embedding model to use for encoding queries and documents.", ) similarity_top_k: int = Field( default=2, description="Number of top results to return." ) retrieval_context_window: int = Field( default=5, description="Maximum number of messages to include for context when retrieving.", ) format_template: BasePromptTemplate = Field( default=DEFAULT_RETRIEVED_TEXT_TEMPLATE, description="Template for formatting the retrieved information.", ) node_postprocessors: List[BaseNodePostprocessor] = Field( default_factory=list, description="List of node postprocessors to apply to the retrieved nodes containing messages.", ) query_kwargs: Dict[str, Any] = Field( default_factory=dict, description="Additional keyword arguments for the vector store query.", ) @field_validator("vector_store", mode="before") def validate_vector_store(cls, v: Any) -> "BasePydanticVectorStore": if not isinstance(v, BasePydanticVectorStore): raise ValueError("vector_store must be a BasePydanticVectorStore") if not v.stores_text: raise ValueError( "vector_store must store text to be used as a retrieval memory block" ) return v @field_validator("format_template", mode="before") @classmethod def validate_format_template(cls, v: Any) -> "BasePromptTemplate": if isinstance(v, str): if "{{" in v and "}}" in v: v = RichPromptTemplate(v) else: v = PromptTemplate(v) return v def _get_text_from_messages(self, messages: List[ChatMessage]) -> str: """Get the text from the messages.""" text = "" for i, message in enumerate(messages): for block in message.blocks: if isinstance(block, TextBlock): text += block.text if len(messages) > 1 and i != len(messages) - 1: text += " " return text async def _aget( self, messages: Optional[List[ChatMessage]] = None, session_id: Optional[str] = None, **block_kwargs: Any, ) -> str: """Retrieve relevant information based on recent messages.""" if not messages or len(messages) == 0: return "" # Use the last message or a context window of messages for the query if ( self.retrieval_context_window > 1 and len(messages) >= self.retrieval_context_window ): context = messages[-self.retrieval_context_window :] else: context = messages query_text = self._get_text_from_messages(context) if not query_text: return "" # Handle filtering by session_id if session_id is not None: filter = MetadataFilter(key="session_id", value=session_id) if "filters" in self.query_kwargs and isinstance( self.query_kwargs["filters"], MetadataFilters ): # only add session_id filter if it does not exist in the filters list session_id_filter_exists = False for metadata_filter in self.query_kwargs["filters"].filters: if ( isinstance(metadata_filter, MetadataFilter) and metadata_filter.key == "session_id" ): session_id_filter_exists = True break if not session_id_filter_exists: self.query_kwargs["filters"].filters.append(filter) else: self.query_kwargs["filters"] = MetadataFilters(filters=[filter]) # Create and execute the query query_embedding = await self.embed_model.aget_query_embedding(query_text) query = VectorStoreQuery( query_str=query_text, query_embedding=query_embedding, similarity_top_k=self.similarity_top_k, **self.query_kwargs, ) results = await self.vector_store.aquery(query) nodes_with_scores = [ NodeWithScore(node=node, score=score) for node, score in zip(results.nodes or [], results.similarities or []) ] if not nodes_with_scores: return "" # Apply postprocessors for postprocessor in self.node_postprocessors: nodes_with_scores = await postprocessor.apostprocess_nodes( nodes_with_scores, query_str=query_text ) # Format the results retrieved_text = "\n\n".join([node.get_content() for node in nodes_with_scores]) return self.format_template.format(text=retrieved_text) async def _aput(self, messages: List[ChatMessage]) -> None: """Store messages in the vector store for future retrieval.""" if not messages: return # Format messages with role, text content, and additional info texts = [] session_id = None for message in messages: text = self._get_text_from_messages([message]) if not text: continue # special case for session_id if "session_id" in message.additional_kwargs: session_id = message.additional_kwargs.pop("session_id") if message.additional_kwargs: text += f"\nAdditional Info: ({message.additional_kwargs!s})" text = f"<message role='{message.role.value}'>{text}</message>" texts.append(text) if not texts: return # Get embeddings text_node = TextNode(text="\n".join(texts), metadata={"session_id": session_id}) text_node.embedding = await self.embed_model.aget_text_embedding(text_node.text) # Add to vector store, one node per entire message batch await self.vector_store.async_add([text_node])
VectorMemoryBlock
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 141031, "end": 141493 }
class ____(sgqlc.types.Input): """Autogenerated input type of AddStar""" __schema__ = github_schema __field_names__ = ("starrable_id", "client_mutation_id") starrable_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="starrableId") """The Starrable ID to star.""" client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId") """A unique identifier for the client performing the mutation."""
AddStarInput
python
ray-project__ray
python/ray/tune/experiment/trial.py
{ "start": 3253, "end": 4222 }
class ____: """Serializable struct for holding information for a Trial. Attributes: trial_name: String name of the current trial. trial_id: trial_id of the trial trial_resources: resources used by trial. """ def __init__(self, trial: "Trial"): self._trial_name = str(trial) self._trial_id = trial.trial_id self._trial_resources = trial.placement_group_factory self._experiment_name = trial.experiment_dir_name @property def experiment_name(self): return self._experiment_name @property def trial_name(self): return self._trial_name @property def trial_id(self): return self._trial_id @property def trial_resources(self) -> PlacementGroupFactory: return self._trial_resources @trial_resources.setter def trial_resources(self, new_resources: PlacementGroupFactory): self._trial_resources = new_resources
_TrialInfo
python
Textualize__textual
docs/examples/styles/text_overflow.py
{ "start": 214, "end": 497 }
class ____(App): CSS_PATH = "text_overflow.tcss" def compose(self) -> ComposeResult: yield Static(TEXT, id="static1") yield Static(TEXT, id="static2") yield Static(TEXT, id="static3") if __name__ == "__main__": app = WrapApp() app.run()
WrapApp
python
pytorch__pytorch
torch/_numpy/testing/utils.py
{ "start": 55711, "end": 56806 }
class ____(Exception): "Ignoring this exception due to disabled feature" @contextlib.contextmanager def tempdir(*args, **kwargs): """Context manager to provide a temporary test folder. All arguments are passed as this to the underlying tempfile.mkdtemp function. """ tmpdir = mkdtemp(*args, **kwargs) try: yield tmpdir finally: shutil.rmtree(tmpdir) @contextlib.contextmanager def temppath(*args, **kwargs): """Context manager for temporary files. Context manager that returns the path to a closed temporary file. Its parameters are the same as for tempfile.mkstemp and are passed directly to that function. The underlying file is removed when the context is exited, so it should be closed at that time. Windows does not allow a temporary file to be opened if it is already open, so the underlying file must be closed after opening before it can be opened again. """ fd, path = mkstemp(*args, **kwargs) os.close(fd) try: yield path finally: os.remove(path)
IgnoreException
python
allegroai__clearml
clearml/backend_api/services/v2_23/datasets.py
{ "start": 97213, "end": 98292 }
class ____(Response): """ Response of datasets.delete_frames endpoint. :param deleted: Number of frames deleted :type deleted: int """ _service = "datasets" _action = "delete_frames" _version = "2.23" _schema = { "definitions": {}, "properties": { "deleted": { "description": "Number of frames deleted", "type": ["integer", "null"], } }, "type": "object", } def __init__(self, deleted=None, **kwargs): super(DeleteFramesResponse, self).__init__(**kwargs) self.deleted = deleted @schema_property("deleted") def deleted(self): return self._property_deleted @deleted.setter def deleted(self, value): if value is None: self._property_deleted = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "deleted", six.integer_types) self._property_deleted = value
DeleteFramesResponse
python
getsentry__sentry
src/sentry/tasks/summaries/organization_report_context_factory.py
{ "start": 757, "end": 7937 }
class ____: timestamp: float duration: int organization: Organization def __init__(self, timestamp: float, duration: int, organization: Organization): self.timestamp = timestamp self.duration = duration self.organization = organization def _append_user_project_ownership(self, ctx: OrganizationReportContext) -> None: """Find the projects associated with each user. Populates context.project_ownership which is { user_id: set<project_id> } """ with sentry_sdk.start_span(op="weekly_reports.user_project_ownership"): for project_id, user_id in OrganizationMember.objects.filter( organization_id=ctx.organization.id, teams__projectteam__project__isnull=False, teams__status=TeamStatus.ACTIVE, ).values_list("teams__projectteam__project_id", "user_id"): if user_id is not None: ctx.project_ownership.setdefault(user_id, set()).add(project_id) def _append_project_event_counts(self, ctx: OrganizationReportContext) -> None: with sentry_sdk.start_span(op="weekly_reports.project_event_counts_for_organization"): event_counts = project_event_counts_for_organization( start=ctx.start, end=ctx.end, ctx=ctx, referrer=Referrer.REPORTS_OUTCOMES.value ) for data in event_counts: project_id = data["project_id"] # Project no longer in organization, but events still exist if project_id not in ctx.projects_context_map: continue project_ctx = ctx.projects_context_map[project_id] assert isinstance( project_ctx, ProjectContext ), f"Expected a ProjectContext, received {type(project_ctx)}" total = data["total"] timestamp = int(parse_snuba_datetime(data["time"]).timestamp()) if data["category"] == DataCategory.TRANSACTION: # Transaction outcome if ( data["outcome"] == Outcome.RATE_LIMITED or data["outcome"] == Outcome.FILTERED ): project_ctx.dropped_transaction_count += total else: project_ctx.accepted_transaction_count += total project_ctx.transaction_count_by_day[timestamp] = total elif data["category"] == DataCategory.REPLAY: # Replay outcome if ( data["outcome"] == Outcome.RATE_LIMITED or data["outcome"] == Outcome.FILTERED ): project_ctx.dropped_replay_count += total else: project_ctx.accepted_replay_count += total project_ctx.replay_count_by_day[timestamp] = total else: # Error outcome if ( data["outcome"] == Outcome.RATE_LIMITED or data["outcome"] == Outcome.FILTERED ): project_ctx.dropped_error_count += total else: project_ctx.accepted_error_count += total project_ctx.error_count_by_day[timestamp] = ( project_ctx.error_count_by_day.get(timestamp, 0) + total ) def _append_organization_project_issue_substatus_summaries( self, ctx: OrganizationReportContext ) -> None: with sentry_sdk.start_span( op="weekly_reports.organization_project_issue_substatus_summaries" ): organization_project_issue_substatus_summaries(ctx) def _append_project_key_errors(self, ctx: OrganizationReportContext) -> None: with sentry_sdk.start_span(op="weekly_reports.project_passes"): organization = ctx.organization # Run project passes for project in organization.project_set.all(): key_errors = project_key_errors( ctx, project, referrer=Referrer.REPORTS_KEY_ERRORS.value ) if project.id not in ctx.projects_context_map: continue project_ctx = ctx.projects_context_map[project.id] assert isinstance( project_ctx, ProjectContext ), f"Expected a ProjectContext, received {type(project_ctx)}" if key_errors: project_ctx.key_errors_by_id = [ (e["events.group_id"], e["count()"]) for e in key_errors ] key_transactions_this_week = project_key_transactions_this_week(ctx, project) if key_transactions_this_week: project_ctx.key_transactions = [ (i["transaction_name"], i["count"], i["p95"]) for i in key_transactions_this_week ] query_result = project_key_transactions_last_week( ctx, project, key_transactions_this_week ) # Join this week with last week last_week_data = { i["transaction_name"]: (i["count"], i["p95"]) for i in query_result["data"] } project_ctx.key_transactions = [ (i["transaction_name"], i["count"], i["p95"]) + last_week_data.get(i["transaction_name"], (0, 0)) for i in key_transactions_this_week ] key_performance_issues = project_key_performance_issues( ctx, project, referrer=Referrer.REPORTS_KEY_PERFORMANCE_ISSUES.value ) if key_performance_issues: ctx.projects_context_map[project.id].key_performance_issues = ( key_performance_issues ) def _hydrate_key_error_groups(self, ctx: OrganizationReportContext) -> None: with sentry_sdk.start_span(op="weekly_reports.fetch_key_error_groups"): fetch_key_error_groups(ctx) def _hydrate_key_performance_issue_groups(self, ctx: OrganizationReportContext) -> None: with sentry_sdk.start_span(op="weekly_reports.fetch_key_performance_issue_groups"): fetch_key_performance_issue_groups(ctx) def create_context(self) -> OrganizationReportContext: ctx = OrganizationReportContext(self.timestamp, self.duration, self.organization) self._append_user_project_ownership(ctx) self._append_project_event_counts(ctx) self._append_organization_project_issue_substatus_summaries(ctx) self._append_project_key_errors(ctx) self._hydrate_key_error_groups(ctx) self._hydrate_key_performance_issue_groups(ctx) return ctx
OrganizationReportContextFactory
python
ray-project__ray
python/ray/data/datasource/file_based_datasource.py
{ "start": 1358, "end": 2547 }
class ____: """Configuration for file shuffling. This configuration object controls how files are shuffled while reading file-based datasets. .. note:: Even if you provided a seed, you might still observe a non-deterministic row order. This is because tasks are executed in parallel and their completion order might vary. If you need to preserve the order of rows, set `DataContext.get_current().execution_options.preserve_order`. Args: seed: An optional integer seed for the file shuffler. If provided, Ray Data shuffles files deterministically based on this seed. Example: >>> import ray >>> from ray.data import FileShuffleConfig >>> shuffle = FileShuffleConfig(seed=42) >>> ds = ray.data.read_images("s3://anonymous@ray-example-data/batoidea", shuffle=shuffle) """ # noqa: E501 seed: Optional[int] = None def __post_init__(self): """Ensure that the seed is either None or an integer.""" if self.seed is not None and not isinstance(self.seed, int): raise ValueError("Seed must be an integer or None.") @DeveloperAPI
FileShuffleConfig
python
spyder-ide__spyder
external-deps/python-lsp-server/pylsp/_utils.py
{ "start": 7386, "end": 12547 }
class ____(Formatter): command = ["black"] formatters = {"ruff": RuffFormatter(), "black": BlackFormatter()} def format_signature(signature: str, config: dict, signature_formatter: str) -> str: """Formats signature using ruff or black if either is available.""" as_func = f"def {signature.strip()}:\n pass" line_length = config.get("line_length", 88) formatter = formatters[signature_formatter] if formatter.is_installed: try: return ( formatter.format(as_func, line_length=line_length) .removeprefix("def ") .removesuffix(":\n pass") ) except subprocess.CalledProcessError as e: log.warning("Signature formatter failed %s", e) else: log.warning( "Formatter %s was requested but it does not appear to be installed", signature_formatter, ) return signature def convert_signatures_to_markdown(signatures: list[str], config: dict) -> str: signature_formatter = config.get("formatter", "black") if signature_formatter: signatures = [ format_signature( signature, signature_formatter=signature_formatter, config=config ) for signature in signatures ] return wrap_signature("\n".join(signatures)) def format_docstring( contents: str, markup_kind: str, signatures: Optional[list[str]] = None, signature_config: Optional[dict] = None, ): """Transform the provided docstring into a MarkupContent object. If `markup_kind` is 'markdown' the docstring will get converted to markdown representation using `docstring-to-markdown`; if it is `plaintext`, it will be returned as plain text. Call signatures of functions (or equivalent code summaries) provided in optional `signatures` argument will be prepended to the provided contents of the docstring if given. """ if not isinstance(contents, str): contents = "" if markup_kind == "markdown": try: value = docstring_to_markdown.convert(contents) except docstring_to_markdown.UnknownFormatError: # try to escape the Markdown syntax instead: value = escape_markdown(contents) if signatures: wrapped_signatures = convert_signatures_to_markdown( signatures, config=signature_config or {} ) value = wrapped_signatures + "\n\n" + value return {"kind": "markdown", "value": value} value = contents if signatures: value = "\n".join(signatures) + "\n\n" + value return {"kind": "plaintext", "value": escape_plain_text(value)} def clip_column(column, lines, line_number): """ Normalise the position as per the LSP that accepts character positions > line length https://microsoft.github.io/language-server-protocol/specification#position """ max_column = ( len(lines[line_number].rstrip("\r\n")) if len(lines) > line_number else 0 ) return min(column, max_column) def position_to_jedi_linecolumn(document, position): """ Convert the LSP format 'line', 'character' to Jedi's 'line', 'column' https://microsoft.github.io/language-server-protocol/specification#position """ code_position = {} if position: code_position = { "line": position["line"] + 1, "column": clip_column( position["character"], document.lines, position["line"] ), } return code_position if os.name == "nt": import ctypes kernel32 = ctypes.windll.kernel32 PROCESS_QUERY_INFROMATION = 0x1000 def is_process_alive(pid): """Check whether the process with the given pid is still alive. Running `os.kill()` on Windows always exits the process, so it can't be used to check for an alive process. see: https://docs.python.org/3/library/os.html?highlight=os%20kill#os.kill Hence ctypes is used to check for the process directly via windows API avoiding any other 3rd-party dependency. Args: pid (int): process ID Returns: bool: False if the process is not alive or don't have permission to check, True otherwise. """ process = kernel32.OpenProcess(PROCESS_QUERY_INFROMATION, 0, pid) if process != 0: kernel32.CloseHandle(process) return True return False else: import errno def is_process_alive(pid): """Check whether the process with the given pid is still alive. Args: pid (int): process ID Returns: bool: False if the process is not alive or don't have permission to check, True otherwise. """ if pid < 0: return False try: os.kill(pid, 0) except OSError as e: return e.errno == errno.EPERM return True def get_eol_chars(text): """Get EOL chars used in text.""" match = EOL_REGEX.search(text) if match: return match.group(0) return None
BlackFormatter
python
huggingface__transformers
src/transformers/models/audioflamingo3/modular_audioflamingo3.py
{ "start": 1407, "end": 1473 }
class ____(WhisperEncoderLayer): pass
AudioFlamingo3EncoderLayer
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/dialects/mssql/base.py
{ "start": 45531, "end": 45597 }
class ____(sqltypes.LargeBinary): __visit_name__ = "IMAGE"
IMAGE
python
realpython__materials
python-async-iterators/large_file_iterable.py
{ "start": 34, "end": 641 }
class ____: def __init__(self, path, chunk_size=1024): self.path = path self.chunk_size = chunk_size async def __aiter__(self): async with aiofiles.open(self.path, mode="rb") as file: while True: chunk = await file.read(self.chunk_size) if not chunk: break yield chunk async def main(): async for chunk in AsyncFileIterable("large-file.md"): # Process the file chunk here... await asyncio.sleep(0.2) print(chunk.decode("utf-8")) asyncio.run(main())
AsyncFileIterable
python
google__pytype
pytype/pytd/mro_test.py
{ "start": 229, "end": 1969 }
class ____(parser_test_base.ParserTest): """Test pytype.pytd.mro.""" def test_dedup(self): self.assertEqual([], mro.Dedup([])) self.assertEqual([1], mro.Dedup([1])) self.assertEqual([1, 2], mro.Dedup([1, 2])) self.assertEqual([1, 2], mro.Dedup([1, 2, 1])) self.assertEqual([1, 2], mro.Dedup([1, 1, 2, 2])) self.assertEqual([3, 2, 1], mro.Dedup([3, 2, 1, 3])) def test_mro_merge(self): self.assertEqual([], mro.MROMerge([[], []])) self.assertEqual([1], mro.MROMerge([[], [1]])) self.assertEqual([1], mro.MROMerge([[1], []])) self.assertEqual([1, 2], mro.MROMerge([[1], [2]])) self.assertEqual([1, 2], mro.MROMerge([[1, 2], [2]])) self.assertEqual([1, 2, 3, 4], mro.MROMerge([[1, 2, 3], [2, 4]])) self.assertEqual([1, 2, 3], mro.MROMerge([[1, 2], [1, 2, 3]])) self.assertEqual([1, 2], mro.MROMerge([[1, 1], [2, 2]])) self.assertEqual( [1, 2, 3, 4, 5, 6], mro.MROMerge([[1, 3, 5], [2, 3, 4], [4, 5, 6]]) ) self.assertEqual([1, 2, 3], mro.MROMerge([[1, 2, 1], [2, 3, 2]])) def test_get_bases_in_mro(self): ast = parser.parse_string( textwrap.dedent(""" from typing import Generic, TypeVar T = TypeVar("T") class Foo(Generic[T]): pass class Bar(Foo[int]): pass """), options=self.options, ) ast = ast.Visit(visitors.AdjustTypeParameters()) loader = load_pytd.Loader( config.Options.create(python_version=self.python_version) ) ast = loader.resolve_ast(ast) bases = mro.GetBasesInMRO(ast.Lookup("Bar"), lookup_ast=ast) self.assertListEqual( ["Foo", "typing.Generic", "builtins.object"], [t.name for t in bases] ) if __name__ == "__main__": unittest.main()
MroTest
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 85176, "end": 85734 }
class ____(sgqlc.types.Input): """ See source code for more info. """ __schema__ = graphql_schema __field_names__ = ("repository_id", "name", "oid", "client_mutation_id") repository_id = sgqlc.types.Field( sgqlc.types.non_null(ID), graphql_name="repositoryId" ) name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="name") oid = sgqlc.types.Field(sgqlc.types.non_null(GitObjectID), graphql_name="oid") client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
CreateRefInput
python
HypothesisWorks__hypothesis
hypothesis-python/tests/cover/test_type_lookup.py
{ "start": 4235, "end": 7477 }
class ____(ParentUnknownType): def __init__(self, arg): pass def test_custom_type_resolution_fails_without_registering(): fails = st.from_type(UnknownType) with pytest.raises(ResolutionFailed): check_can_generate_examples(fails) def test_custom_type_resolution(): sentinel = object() with temp_registered(UnknownType, st.just(sentinel)): assert_simple_property(st.from_type(UnknownType), lambda v: v is sentinel) # Also covered by registration of child class assert_simple_property(st.from_type(ParentUnknownType), lambda v: v is sentinel) def test_custom_type_resolution_with_function(): sentinel = object() with temp_registered(UnknownType, lambda _: st.just(sentinel)): assert_simple_property(st.from_type(UnknownType), lambda v: v is sentinel) assert_simple_property(st.from_type(ParentUnknownType), lambda v: v is sentinel) def test_custom_type_resolution_with_function_non_strategy(): with temp_registered(UnknownType, lambda _: None): with pytest.raises(ResolutionFailed): check_can_generate_examples(st.from_type(UnknownType)) with pytest.raises(ResolutionFailed): check_can_generate_examples(st.from_type(ParentUnknownType)) @pytest.mark.parametrize("strategy_returned", [True, False]) def test_conditional_type_resolution_with_function(strategy_returned): sentinel = object() def resolve_strategy(thing): assert thing == UnknownType if strategy_returned: return st.just(sentinel) return NotImplemented with temp_registered(UnknownType, resolve_strategy): if strategy_returned: assert_simple_property(st.from_type(UnknownType), lambda v: v is sentinel) else: with pytest.raises(ResolutionFailed): check_can_generate_examples(st.from_type(UnknownType)) def test_errors_if_generic_resolves_empty(): with temp_registered(UnknownType, lambda _: st.nothing()): fails_1 = st.from_type(UnknownType) with pytest.raises(ResolutionFailed): check_can_generate_examples(fails_1) fails_2 = st.from_type(ParentUnknownType) with pytest.raises(ResolutionFailed): check_can_generate_examples(fails_2) @skipif_threading def test_cannot_register_empty(): # Cannot register and did not register with pytest.raises(InvalidArgument): st.register_type_strategy(UnknownType, st.nothing()) fails = st.from_type(UnknownType) with pytest.raises(ResolutionFailed): check_can_generate_examples(fails) assert UnknownType not in types._global_type_lookup def test_pulic_interface_works(): check_can_generate_examples(st.from_type(int)) fails = st.from_type("not a type or annotated function") with pytest.raises(InvalidArgument): check_can_generate_examples(fails) @pytest.mark.parametrize("infer_token", [infer, ...]) def test_given_can_infer_from_manual_annotations(infer_token): # Editing annotations before decorating is hilariously awkward, but works! def inner(a): assert isinstance(a, int) inner.__annotations__ = {"a": int} given(a=infer_token)(inner)()
UnknownType
python
getsentry__sentry
tests/sentry/issues/test_group_attributes_dataset.py
{ "start": 381, "end": 2153 }
class ____(SnubaTestCase, TestCase): def _send(self, snapshot: GroupAttributesSnapshot) -> None: produce_snapshot_to_kafka(snapshot) def test_query_dataset_returns_empty(self) -> None: json_body = { "selected_columns": ["project_id", "group_id"], "offset": 0, "limit": 100, "project": [1], "dataset": "group_attributes", "conditions": [ ["project_id", "IN", [2]], ], "consistent": False, "tenant_ids": {"referrer": "group_attributes", "organization_id": 1}, } request = json_to_snql(json_body, "group_attributes") request.validate() result = raw_snql_query(request) assert len(result["data"]) == 0 def test_insert_then_query(self) -> None: project = self.create_project() group = self.create_group(project=project) group_values = _bulk_retrieve_group_values([group.id]) snapshot = _bulk_retrieve_snapshot_values(group_values, False) self._send(snapshot[0]) json_body = { "selected_columns": ["project_id", "group_id"], "offset": 0, "limit": 100, "project": [project.id], "dataset": "group_attributes", "conditions": [ ["project_id", "IN", [project.id]], ], "consistent": False, "tenant_ids": { "referrer": "group_attributes", "organization_id": project.organization.id, }, } request = json_to_snql(json_body, "group_attributes") request.validate() result = raw_snql_query(request) assert len(result["data"]) == 1
DatasetTest
python
tensorflow__tensorflow
tensorflow/python/kernel_tests/benchmark_test.py
{ "start": 1671, "end": 2450 }
class ____(test.Benchmark): """This benchmark (maybe) reports some stuff.""" def benchmarkReport1(self): self.report_benchmark(iters=1) def benchmarkReport2(self): self.report_benchmark( iters=2, name="custom_benchmark_name", extras={"number_key": 3, "other_key": "string"}) def benchmark_times_an_op(self): input_size = 5 with session.Session(config=benchmark.benchmark_config()) as sess: a = array_ops.placeholder(dtype=dtypes.float32, shape=(input_size)) a_plus_a = a + a return self.run_op_benchmark( sess, a_plus_a, feed_dict={a: np.arange(input_size)}, min_iters=1000, store_trace=True, name="op_benchmark")
TestReportingBenchmark
python
openai__openai-python
src/openai/resources/beta/threads/threads.py
{ "start": 96187, "end": 97651 }
class ____: def __init__(self, threads: AsyncThreads) -> None: self._threads = threads self.create = ( # pyright: ignore[reportDeprecated] async_to_streamed_response_wrapper( threads.create, # pyright: ignore[reportDeprecated], ) ) self.retrieve = ( # pyright: ignore[reportDeprecated] async_to_streamed_response_wrapper( threads.retrieve, # pyright: ignore[reportDeprecated], ) ) self.update = ( # pyright: ignore[reportDeprecated] async_to_streamed_response_wrapper( threads.update, # pyright: ignore[reportDeprecated], ) ) self.delete = ( # pyright: ignore[reportDeprecated] async_to_streamed_response_wrapper( threads.delete, # pyright: ignore[reportDeprecated], ) ) self.create_and_run = ( # pyright: ignore[reportDeprecated] async_to_streamed_response_wrapper( threads.create_and_run, # pyright: ignore[reportDeprecated], ) ) @cached_property def runs(self) -> AsyncRunsWithStreamingResponse: return AsyncRunsWithStreamingResponse(self._threads.runs) @cached_property def messages(self) -> AsyncMessagesWithStreamingResponse: return AsyncMessagesWithStreamingResponse(self._threads.messages)
AsyncThreadsWithStreamingResponse
python
pytorch__pytorch
torch/utils/data/datapipes/iter/sharding.py
{ "start": 325, "end": 423 }
class ____(IntEnum): DEFAULT = 1 DISTRIBUTED = 2 MULTIPROCESSING = 3
SHARDING_PRIORITIES
python
tensorflow__tensorflow
tensorflow/python/util/numpy_compat_test.py
{ "start": 831, "end": 2145 }
class ____(test.TestCase): def test_no_copy_new_vs_old(self): # Define old_np_asarray to replicate the old code that used .astype(dtype) # WITHOUT passing `copy=copy`. def old_np_asarray(values, dtype=None, order=None, copy=None): if np.lib.NumpyVersion(np.__version__) >= '2.0.0.dev0': if dtype is not None and np.issubdtype(dtype, np.number): return np.asarray(values, order=order, copy=copy).astype(dtype) else: return np.asarray(values, dtype=dtype, order=order, copy=copy) else: return np.asarray(values, dtype=dtype, order=order) # Test array x = np.array([1, 2, 3], dtype=np.float32) # Expect old numpy 2.x code to always copy even when copy=None y_old = old_np_asarray(x, dtype=np.float32, copy=None) if np.lib.NumpyVersion(np.__version__) >= '2.0.0.dev0': self.assertIsNot( y_old, x, msg='Old code did NOT copy, but we expect it to always copy.', ) # Expect new code to reuse the array if copy=None y_new = numpy_compat.np_asarray(x, dtype=np.float32, copy=None) self.assertIs( y_new, x, msg='New code did copy, but we expect it NOT to copy since copy=None.', ) if __name__ == '__main__': test.main()
NumpyCompatCopyBehaviorTest
python
ansible__ansible
test/units/module_utils/facts/test_ansible_collector.py
{ "start": 18523, "end": 19266 }
class ____(TestPkgMgrFacts): @patch( 'ansible.module_utils.facts.system.pkg_mgr.os.path.exists', side_effect=lambda x: x == '/run/ostree-booted') def _recollect_facts(self, distribution, version, mock_exists): self.collected_facts['ansible_distribution'] = distribution self.collected_facts['ansible_distribution_major_version'] = \ str(version) # Recollect facts self.setUp() self.assertIn('pkg_mgr', self.facts) self.assertEqual(self.facts['pkg_mgr'], 'atomic_container') def test_is_rhel_edge_ostree(self): self._recollect_facts('RedHat', 8) def test_is_fedora_ostree(self): self._recollect_facts('Fedora', 33)
TestPkgMgrOSTreeFacts
python
miyuchina__mistletoe
test/test_contrib/test_mathjax.py
{ "start": 103, "end": 1128 }
class ____(unittest.TestCase): mathjax_src = '<script src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0/MathJax.js?config=TeX-MML-AM_CHTML"></script>\n' def test_render_html(self): with MathJaxRenderer() as renderer: token = Document(['# heading 1\n', 'paragraph\n']) output = renderer.render(token) target = '<h1>heading 1</h1>\n<p>paragraph</p>\n' target += self.mathjax_src self.assertEqual(output, target) def test_render_math(self): with MathJaxRenderer() as renderer: raw = ['math displayed as a block:\n', '$$ \\sum_{i=1}^{\\infty} \\frac{1}{i^p} $$\n', 'math displayed in-line: $ 2^x $\n'] token = Document(raw) output = renderer.render(token) target = '<p>math displayed as a block:\n$$ \\sum_{i=1}^{\\infty} \\frac{1}{i^p} $$\nmath displayed in-line: \\( 2^x \\)</p>\n' target += self.mathjax_src self.assertEqual(output, target)
TestMathJaxRenderer
python
getsentry__sentry
src/sentry/types/region.py
{ "start": 3700, "end": 3809 }
class ____(Exception): """Indicate that a region's identity could not be resolved."""
RegionResolutionError
python
ray-project__ray
python/ray/tune/execution/insufficient_resources_manager.py
{ "start": 3885, "end": 5982 }
class ____: """Insufficient resources manager. Makes best effort, conservative guesses about if Tune loop is stuck due to infeasible resources. If so, outputs usability messages for users to act upon. """ def __init__(self, for_train: bool = False): # The information tracked across the life time of Tune loop. self._no_running_trials_since = -1 self._last_trial_num = -1 self._for_train = for_train def on_no_available_trials(self, all_trials): """Tracks information across the life of Tune loop and makes guesses about if Tune loop is stuck due to infeasible resources. If so, outputs certain warning messages. The logic should be conservative, non-intrusive and informative. For example, rate limiting is applied so that the message is not spammy. """ # This is approximately saying we are not making progress. if len(all_trials) == self._last_trial_num: if self._no_running_trials_since == -1: self._no_running_trials_since = time.monotonic() elif ( time.monotonic() - self._no_running_trials_since > _get_insufficient_resources_warning_threshold() ): can_fulfill_any = any( trial.status == Trial.PENDING and _can_fulfill_no_autoscaler(trial) for trial in all_trials ) if can_fulfill_any: # If one trial can be fulfilled, it will be fulfilled eventually self._no_running_trials_since = -1 return # Otherwise, can fulfill none msg = _get_insufficient_resources_warning_msg( for_train=self._for_train, trial=all_trials[0] ) logger.warning(msg) self._no_running_trials_since = time.monotonic() else: self._no_running_trials_since = -1 self._last_trial_num = len(all_trials)
_InsufficientResourcesManager
python
doocs__leetcode
solution/0500-0599/0573.Squirrel Simulation/Solution.py
{ "start": 0, "end": 488 }
class ____: def minDistance( self, height: int, width: int, tree: List[int], squirrel: List[int], nuts: List[List[int]], ) -> int: tr, tc = tree sr, sc = squirrel s = sum(abs(r - tr) + abs(c - tc) for r, c in nuts) * 2 ans = inf for r, c in nuts: a = abs(r - tr) + abs(c - tc) b = abs(r - sr) + abs(c - sc) ans = min(ans, s - a + b) return ans
Solution
python
Textualize__textual
tests/snapshot_tests/snapshot_apps/bindings_screen_overrides_show.py
{ "start": 150, "end": 329 }
class ____(Screen): BINDINGS = [ Binding("p", "app.pop_screen", "Binding shown"), ] def compose(self) -> ComposeResult: yield Footer()
ShowBindingScreen
python
django__django
tests/migrations/test_migrations_squashed_complex_multi_apps/app1/3_auto.py
{ "start": 35, "end": 202 }
class ____(migrations.Migration): dependencies = [("app1", "2_auto"), ("app2", "2_auto")] operations = [migrations.RunPython(migrations.RunPython.noop)]
Migration
python
tensorflow__tensorflow
tensorflow/python/distribute/strategy_common_test.py
{ "start": 2071, "end": 3391 }
class ____(test.TestCase, parameterized.TestCase): def testCaptureReplicaId(self, strategy): m = {} @def_function.function def f(): return distribute_lib.get_replica_context().replica_id_in_sync_group @def_function.function def g(): # Make g() a stateful function so it's traced twice. if m.get('v', None) is None: m['v'] = variables.Variable(0.) return strategy.run(f) g() def testMergeCallInitScope(self, strategy): with strategy.scope(): @def_function.function def fn(): def merge_fn(unused_strat): y = constant_op.constant(11) return y def replica_fn(): with ops.init_scope(): y = distribute_lib.get_replica_context().merge_call(merge_fn) z = y + 1 return z return strategy.run(replica_fn) result = strategy.experimental_local_results(fn()) self.assertAllClose(result, [12] * _get_num_replicas_per_client(strategy)) @combinations.generate( combinations.combine( distribution=[ strategy_combinations.mirrored_strategy_with_cpu_1_and_2, strategy_combinations.multi_worker_mirrored_2x2_gpu, strategy_combinations.tpu_strategy ], mode=['graph', 'eager']))
StrategyTest
python
apache__airflow
providers/google/tests/unit/google/common/links/test_storage.py
{ "start": 1549, "end": 2260 }
class ____: def test_file_details_link_name_and_key(self): assert FileDetailsLink.name == "GCS File Details" assert FileDetailsLink.key == "file_details" assert ( FileDetailsLink.format_str == "https://console.cloud.google.com/storage/browser/_details/{uri};tab=live_object?project={project_id}" ) def test_file_details_link_format(self): link = FileDetailsLink() url = link._format_link(uri="test-bucket/test-folder", project_id="test-id") expected_url = "https://console.cloud.google.com/storage/browser/_details/test-bucket/test-folder;tab=live_object?project=test-id" assert url == expected_url
TestFileDetailsLink
python
run-llama__llama_index
llama-index-core/llama_index/core/base/llms/types.py
{ "start": 15075, "end": 15733 }
class ____(BaseModel): """A representation of the content streamed from reasoning/thinking processes by LLMs""" block_type: Literal["thinking"] = "thinking" content: Optional[str] = Field( description="Content of the reasoning/thinking process, if available", default=None, ) num_tokens: Optional[int] = Field( description="Number of token used for reasoning/thinking, if available", default=None, ) additional_information: Dict[str, Any] = Field( description="Additional information related to the thinking/reasoning process, if available", default_factory=dict, )
ThinkingBlock
python
pytorch__pytorch
test/dynamo/cpython/3_13/test_operator.py
{ "start": 26022, "end": 29271 }
class ____: def copy(self, obj, proto): with support.swap_item(sys.modules, 'operator', self.module): pickled = pickle.dumps(obj, proto) with support.swap_item(sys.modules, 'operator', self.module2): return pickle.loads(pickled) def test_attrgetter(self): attrgetter = self.module.attrgetter with torch._dynamo.error_on_graph_break(False): class A: pass a = A() a.x = 'X' a.y = 'Y' a.z = 'Z' a.t = A() a.t.u = A() a.t.u.v = 'V' for proto in range(pickle.HIGHEST_PROTOCOL + 1): with self.subTest(proto=proto): f = attrgetter('x') f2 = self.copy(f, proto) self.assertEqual(repr(f2), repr(f)) self.assertEqual(f2(a), f(a)) # multiple gets f = attrgetter('x', 'y', 'z') f2 = self.copy(f, proto) self.assertEqual(repr(f2), repr(f)) self.assertEqual(f2(a), f(a)) # recursive gets f = attrgetter('t.u.v') f2 = self.copy(f, proto) self.assertEqual(repr(f2), repr(f)) self.assertEqual(f2(a), f(a)) def test_itemgetter(self): itemgetter = self.module.itemgetter a = 'ABCDE' for proto in range(pickle.HIGHEST_PROTOCOL + 1): with self.subTest(proto=proto): f = itemgetter(2) f2 = self.copy(f, proto) self.assertEqual(repr(f2), repr(f)) self.assertEqual(f2(a), f(a)) # multiple gets f = itemgetter(2, 0, 4) f2 = self.copy(f, proto) self.assertEqual(repr(f2), repr(f)) self.assertEqual(f2(a), f(a)) def test_methodcaller(self): methodcaller = self.module.methodcaller with torch._dynamo.error_on_graph_break(False): class A: def foo(self, *args, **kwds): return args[0] + args[1] def bar(self, f=42): return f def baz(*args, **kwds): return kwds['name'], kwds['self'] a = A() for proto in range(pickle.HIGHEST_PROTOCOL + 1): with self.subTest(proto=proto): f = methodcaller('bar') f2 = self.copy(f, proto) self.assertEqual(repr(f2), repr(f)) self.assertEqual(f2(a), f(a)) # positional args f = methodcaller('foo', 1, 2) f2 = self.copy(f, proto) self.assertEqual(repr(f2), repr(f)) self.assertEqual(f2(a), f(a)) # keyword args f = methodcaller('bar', f=5) f2 = self.copy(f, proto) self.assertEqual(repr(f2), repr(f)) self.assertEqual(f2(a), f(a)) f = methodcaller('baz', self='eggs', name='spam') f2 = self.copy(f, proto) # Can't test repr consistently with multiple keyword args self.assertEqual(f2(a), f(a))
OperatorPickleTestCase
python
django-import-export__django-import-export
tests/core/tests/test_resources/test_natural_foreign_key.py
{ "start": 648, "end": 2114 }
class ____(TestCase): """Tests for issue 1816.""" def setUp(self) -> None: author = Author.objects.create(name="J. R. R. Tolkien") Book.objects.create(author=author, name="The Hobbit") self.expected_dataset = tablib.Dataset(headers=["name", "author"]) row = ["The Hobbit", '["J. R. R. Tolkien"]'] self.expected_dataset.append(row) def test_resource_use_natural_keys(self): """ test with ModelResource.Meta.use_natural_foreign_keys=True Reproduces this problem """ resource = BookUsingNaturalKeys() exported_dataset = resource.export(Book.objects.all()) self.assertDatasetEqual(self.expected_dataset, exported_dataset) def test_field_use_natural_keys(self): """ test with ModelResource.field.widget.use_natural_foreign_keys=True Example of correct behaviour """ resource = BookUsingAuthorNaturalKey() exported_dataset = resource.export(Book.objects.all()) self.assertDatasetEqual(self.expected_dataset, exported_dataset) def assertDatasetEqual(self, expected_dataset, actual_dataset, message=None): """ Util for comparing datasets """ self.assertEqual(len(expected_dataset), len(actual_dataset), message) for expected_row, actual_row in zip(expected_dataset, actual_dataset): self.assertEqual(expected_row, actual_row, message)
TestNaturalKeys
python
dask__dask
dask/dataframe/io/utils.py
{ "start": 7893, "end": 7998 }
class ____(Protocol): def acquire(self) -> object: ... def release(self) -> object: ...
SupportsLock
python
sqlalchemy__sqlalchemy
test/sql/test_operators.py
{ "start": 110084, "end": 111190 }
class ____(fixtures.TestBase, testing.AssertsCompiledSQL): __dialect__ = "default" def setup_test(self): self.table = table( "mytable", column("myid", String), column("name", String) ) def test_regexp_match(self): assert_raises_message( exc.CompileError, "default dialect does not support regular expressions", self.table.c.myid.regexp_match("pattern").compile, dialect=default.DefaultDialect(), ) def test_not_regexp_match(self): assert_raises_message( exc.CompileError, "default dialect does not support regular expressions", (~self.table.c.myid.regexp_match("pattern")).compile, dialect=default.DefaultDialect(), ) def test_regexp_replace(self): assert_raises_message( exc.CompileError, "default dialect does not support regular expression replacements", self.table.c.myid.regexp_replace("pattern", "rep").compile, dialect=default.DefaultDialect(), )
RegexpTest
python
tensorflow__tensorflow
tensorflow/python/framework/auto_control_deps_test.py
{ "start": 1776, "end": 32523 }
class ____(test.TestCase): def setUp(self): super().setUp() self.must_run_order_insensitive_stateful_ops = ( acd.MUST_RUN_ORDER_INSENSITIVE_STATEFUL_OPS) def tearDown(self): acd.MUST_RUN_ORDER_INSENSITIVE_STATEFUL_OPS = ( self.must_run_order_insensitive_stateful_ops) super().tearDown() def testBasic(self): with context.graph_mode(), self.cached_session(): v = resource_variable_ops.ResourceVariable(1.0) self.evaluate(variables.global_variables_initializer()) with acd.AutomaticControlDependencies() as c: v.assign(v + 1) v.assign(2 * v) val = v.read_value() val = c.mark_as_return(val) self.assertAllEqual(val, 4.0) def testUnorderedOpsRunInParallel(self): acd.MUST_RUN_ORDER_INSENSITIVE_STATEFUL_OPS |= frozenset(("EagerPyFunc",)) side_effects = [] def side_effect_one(x): side_effects.append(1) return x def side_effect_two(x): side_effects.append(2) return x @def_function.function def f(): script_ops.eager_py_func(side_effect_one, [1], [dtypes.int32]) script_ops.eager_py_func(side_effect_two, [1], [dtypes.int32]) return 1 side_effects = [] self.evaluate(f()) self.assertSetEqual(set(side_effects), set((1, 2))) def testIndependentOpsRunInParallel(self): v = resource_variable_ops.ResourceVariable(1) self.evaluate(variables.global_variables_initializer()) @def_function.function def f(): gen_resource_variable_ops.assign_variable_op(v.handle, 1) ops.get_default_graph().experimental_acd_manager.run_independently( gen_resource_variable_ops.assign_variable_op(v.handle, 2)) # A function with two identical ops, should cause a data race in most # conditions. var_values = set() for _ in range(10000): self.evaluate(f()) var_values.add( self.evaluate( resource_variable_ops.read_variable_op(v.handle, dtypes.int32))) # With regular control dependencies, the function should always run the # first assign first, and the value 1 should never be seen. # With run_independently, assign 1 and 2 are run in parallel. Thus, when f # is run large number of times, we see both 1 and 2 values assigned to # variable v. self.assertSetEqual(var_values, set((1, 2))) def testIndependentOpsInLoop(self): v = resource_variable_ops.ResourceVariable(0) self.evaluate(variables.global_variables_initializer()) @def_function.function def f(): for i in math_ops.range(3): ops.get_default_graph().experimental_acd_manager.run_independently( gen_resource_variable_ops.assign_variable_op(v.handle, i)) self.evaluate(f()) # TODO(mdan): Find a more robust way to test in loops. self.assertEqual( self.evaluate( resource_variable_ops.read_variable_op(v.handle, dtypes.int32)), 2) def testNoControlDepsBetweenVariableReads(self): with context.graph_mode(), self.cached_session(): v = resource_variable_ops.ResourceVariable(1.0) self.evaluate(variables.global_variables_initializer()) with acd.AutomaticControlDependencies(): read_op1 = gen_resource_variable_ops.read_variable_op( v.handle, v.dtype).op read_op2 = gen_resource_variable_ops.read_variable_op( v.handle, v.dtype).op gen_resource_variable_ops.assign_variable_op(v.handle, v + 1) self.assertNotIn(read_op1, read_op2.control_inputs) self.assertNotIn(read_op2, read_op1.control_inputs) def testVariableReadThenWrite(self): with context.graph_mode(), self.cached_session(): v = resource_variable_ops.ResourceVariable(1.0) self.evaluate(variables.global_variables_initializer()) with acd.AutomaticControlDependencies(): read_op1 = gen_resource_variable_ops.read_variable_op( v.handle, v.dtype).op read_op2 = gen_resource_variable_ops.read_variable_op( v.handle, v.dtype).op assign_op = gen_resource_variable_ops.assign_variable_op( v.handle, v + 1) # Writes should have control deps from "all" reads since last write # or start of the code block. self.assertIn(read_op1, assign_op.control_inputs) self.assertIn(read_op2, assign_op.control_inputs) # There should be no control deps between reads. self.assertNotIn(read_op1, read_op2.control_inputs) self.assertNotIn(read_op2, read_op1.control_inputs) def testVariableWriteThenRead(self): with context.graph_mode(), self.cached_session(): v = resource_variable_ops.ResourceVariable(1.0) self.evaluate(variables.global_variables_initializer()) with acd.AutomaticControlDependencies(): assign_op = gen_resource_variable_ops.assign_variable_op( v.handle, v + 1) read_op1 = gen_resource_variable_ops.read_variable_op( v.handle, v.dtype).op read_op2 = gen_resource_variable_ops.read_variable_op( v.handle, v.dtype).op # Reads should have a control dep from the last write. self.assertIn(assign_op, read_op1.control_inputs) self.assertIn(assign_op, read_op2.control_inputs) # There should be no control deps between reads. self.assertNotIn(read_op1, read_op2.control_inputs) self.assertNotIn(read_op2, read_op1.control_inputs) def testIdentityPassThrough(self): with context.graph_mode(), self.cached_session(): v = resource_variable_ops.ResourceVariable(1.0) self.evaluate(variables.global_variables_initializer()) with acd.AutomaticControlDependencies(): gen_resource_variable_ops.assign_variable_op(v.handle, v + 1) identity_handle = gen_array_ops.identity(v.handle) assign_op2 = gen_resource_variable_ops.assign_variable_op( v.handle, v + 1) read_op = gen_resource_variable_ops.read_variable_op( identity_handle, v.dtype).op # Read should have a control dep from second last write even # with Identity applied to resource. self.assertIn(assign_op2, read_op.control_inputs) def testVariableReadsInOpsWithMustRun(self): with context.graph_mode(), self.cached_session(): v = resource_variable_ops.ResourceVariable(1.0) self.evaluate(variables.global_variables_initializer()) with acd.AutomaticControlDependencies() as c: read_op = gen_resource_variable_ops.read_variable_op(v.handle, v.dtype).op # Read ops get added to control outputs only if they have consumers. c.mark_as_return(read_op.outputs[0]) self.assertIn(read_op, c.ops_which_must_run) def testVariableMultipleReadsAndWrites(self): with context.graph_mode(), self.cached_session(): v = resource_variable_ops.ResourceVariable(1.0) self.evaluate(variables.global_variables_initializer()) with acd.AutomaticControlDependencies() as c: # 2 reads -> 2 writes -> 2 reads -> 2 writes. read_op1 = gen_resource_variable_ops.read_variable_op( v.handle, v.dtype).op read_op2 = gen_resource_variable_ops.read_variable_op( v.handle, v.dtype).op assign_op1 = gen_resource_variable_ops.assign_variable_op( v.handle, v + 1) assign_op2 = gen_resource_variable_ops.assign_variable_op( v.handle, v + 1) read_op3 = gen_resource_variable_ops.read_variable_op( v.handle, v.dtype).op read_op4 = gen_resource_variable_ops.read_variable_op( v.handle, v.dtype).op assign_op3 = gen_resource_variable_ops.assign_variable_op( v.handle, v + 1) assign_op4 = gen_resource_variable_ops.assign_variable_op( v.handle, v + 1) # Read ops get added to control outputs only if they have consumers. c.mark_as_return(read_op1.outputs[0]) c.mark_as_return(read_op2.outputs[0]) c.mark_as_return(read_op3.outputs[0]) c.mark_as_return(read_op4.outputs[0]) # Verify the control edges. self.assertIn(read_op1, assign_op1.control_inputs) self.assertIn(read_op2, assign_op1.control_inputs) self.assertIn(assign_op1, assign_op2.control_inputs) self.assertIn(assign_op2, read_op3.control_inputs) self.assertIn(assign_op2, read_op4.control_inputs) self.assertIn(read_op3, assign_op3.control_inputs) self.assertIn(read_op4, assign_op3.control_inputs) self.assertIn(assign_op3, assign_op4.control_inputs) # There should be no control deps between reads. read_ops = [read_op1, read_op2, read_op3, read_op4] for src_op, tgt_op in itertools.product(read_ops, read_ops): self.assertNotIn(src_op, tgt_op.control_inputs) # Reads must be in `ops_which_must_run`. self.assertIn(read_op1, c.ops_which_must_run) self.assertIn(read_op2, c.ops_which_must_run) self.assertIn(read_op3, c.ops_which_must_run) self.assertIn(read_op4, c.ops_which_must_run) # Last write must be in `ops_which_must_run`. self.assertIn(assign_op4, c.ops_which_must_run) def testSendInOpsWithMustRun(self): with context.graph_mode(), self.cached_session(): v = resource_variable_ops.ResourceVariable(1.0) self.evaluate(variables.global_variables_initializer()) with acd.AutomaticControlDependencies() as c: send_op = gen_sendrecv_ops.send(v, "x", "/", 0, "/") # Send must be in `ops_which_must_run`. self.assertIn(send_op, c.ops_which_must_run) def _testVariableReadInFunctionalOp(self, build_functional_op, op_type): v = resource_variable_ops.ResourceVariable(1.0) self.evaluate(variables.global_variables_initializer()) @def_function.function def read_var_in_while(): gen_resource_variable_ops.read_variable_op( v.handle, v.dtype, name="read1") result = build_functional_op(v) gen_resource_variable_ops.read_variable_op( v.handle, v.dtype, name="read2") gen_resource_variable_ops.assign_variable_op(v.handle, v + 1) return result func_graph = read_var_in_while.get_concrete_function().graph assert len(func_graph.inputs) == 1 def get_op(op_type, sub_name): operations = [ op for op in func_graph.get_operations() if op.type == op_type and sub_name in op.name ] assert len(operations) == 1 return operations[0] read1 = get_op("ReadVariableOp", "read1") functional_op = get_op(op_type, "") read2 = get_op("ReadVariableOp", "read2") assign_op = get_op("AssignVariableOp", "") # Since the functional op only has reads, previous reads e.g. read1 do not\ # have a control edge to it and next future reads e.g. read2 do not have a # control edge from it. self.assertNotIn(read1, functional_op.control_inputs) self.assertNotIn(functional_op, read2.control_inputs) self.assertIn(read1, assign_op.control_inputs) self.assertIn(read2, assign_op.control_inputs) self.assertIn(functional_op, assign_op.control_inputs) def testVariableReadInWhileLoop(self): def build_functional_op(v): def body(_): return gen_resource_variable_ops.read_variable_op(v.handle, v.dtype) return while_loop.while_loop( lambda i: True, body, [0.0], maximum_iterations=1) self._testVariableReadInFunctionalOp(build_functional_op, "While") def testVariableReadInCondTrueBranch(self): def build_functional_op(v): def then_branch(): return gen_resource_variable_ops.read_variable_op(v.handle, v.dtype) def else_branch(): return array_ops.zeros([], v.dtype) return cond.cond( constant_op.constant(True), then_branch, else_branch) self._testVariableReadInFunctionalOp(build_functional_op, "If") def testVariableReadInCondFalseBranch(self): def build_functional_op(v): def then_branch(): return array_ops.zeros([], v.dtype) def else_branch(): return gen_resource_variable_ops.read_variable_op(v.handle, v.dtype) return cond.cond( constant_op.constant(False), then_branch, else_branch) self._testVariableReadInFunctionalOp(build_functional_op, "If") def testVariableReadInCaseBranch0(self): def build_functional_op(v): def branch0(): return gen_resource_variable_ops.read_variable_op(v.handle, v.dtype) def branch1(): return array_ops.zeros([], v.dtype) return control_flow_switch_case.switch_case( constant_op.constant(0), [branch0, branch1]) self._testVariableReadInFunctionalOp(build_functional_op, "Case") def testVariableReadInCaseBranch1(self): def build_functional_op(v): def branch0(): return array_ops.zeros([], v.dtype) def branch1(): return gen_resource_variable_ops.read_variable_op(v.handle, v.dtype) return control_flow_switch_case.switch_case( constant_op.constant(0), [branch0, branch1]) self._testVariableReadInFunctionalOp(build_functional_op, "Case") def testVariableReadInFunction(self): def build_functional_op(v): @def_function.function def fn_with_read(): return gen_resource_variable_ops.read_variable_op(v.handle, v.dtype) return fn_with_read() self._testVariableReadInFunctionalOp(build_functional_op, "StatefulPartitionedCall") def testVariableReadInNestedFunction(self): def build_functional_op(v): @def_function.function def fn_with_read(): @def_function.function def inner_fn(): return gen_resource_variable_ops.read_variable_op(v.handle, v.dtype) return inner_fn() return fn_with_read() self._testVariableReadInFunctionalOp(build_functional_op, "StatefulPartitionedCall") def testVariableReadInWhileInInnerFunc(self): def build_functional_op(v): @def_function.function def fn_with_read(): @def_function.function def inner_fn(): def body(_): return gen_resource_variable_ops.read_variable_op(v.handle, v.dtype) return while_loop.while_loop( lambda i: True, body, [0.0], maximum_iterations=1) return inner_fn() return fn_with_read() self._testVariableReadInFunctionalOp(build_functional_op, "StatefulPartitionedCall") def testVariableReadInCondInInnerFunc(self): def build_functional_op(v): @def_function.function def fn_with_read(): @def_function.function def inner_fn(): def then_branch(): return gen_resource_variable_ops.read_variable_op(v.handle, v.dtype) def else_branch(): return array_ops.zeros([], v.dtype) return cond.cond( constant_op.constant(True), then_branch, else_branch) return inner_fn() return fn_with_read() self._testVariableReadInFunctionalOp(build_functional_op, "StatefulPartitionedCall") def _testVariableWriteInFunctionalOp(self, build_functional_op, op_type): v = resource_variable_ops.ResourceVariable(1.0) self.evaluate(variables.global_variables_initializer()) @def_function.function def write_var_in_while(): gen_resource_variable_ops.read_variable_op( v.handle, v.dtype, name="read1") result = build_functional_op(v) gen_resource_variable_ops.read_variable_op( v.handle, v.dtype, name="read2") gen_resource_variable_ops.assign_variable_op(v.handle, v + 1) return result func_graph = write_var_in_while.get_concrete_function().graph assert len(func_graph.inputs) == 1 def get_op(op_type, sub_name): operations = [ op for op in func_graph.get_operations() if op.type == op_type and sub_name in op.name ] assert len(operations) == 1 return operations[0] read1 = get_op("ReadVariableOp", "read1") functional_op = get_op(op_type, "") read2 = get_op("ReadVariableOp", "read2") assign_op = get_op("AssignVariableOp", "") # Since the While has writes, it has control edges from previous reads # e.g. `read1` and to future reads(`read2`) and writes(`assign_op`). self.assertIn(read1, functional_op.control_inputs) self.assertIn(functional_op, read2.control_inputs) self.assertIn(read2, assign_op.control_inputs) self.assertIn(functional_op, assign_op.control_inputs) def testVariableWriteInWhileLoop(self): def build_functional_op(v): def body(_): gen_resource_variable_ops.assign_variable_op(v.handle, v + 1) return gen_resource_variable_ops.read_variable_op(v.handle, v.dtype) return while_loop.while_loop( lambda i: True, body, [0.0], maximum_iterations=1) self._testVariableWriteInFunctionalOp(build_functional_op, "While") def testVariableWriteInCondTrueBranch(self): def build_functional_op(v): def then_branch(): gen_resource_variable_ops.assign_variable_op(v.handle, v + 1) return gen_resource_variable_ops.read_variable_op(v.handle, v.dtype) def else_branch(): return array_ops.zeros([], v.dtype) return cond.cond( constant_op.constant(True), then_branch, else_branch) self._testVariableWriteInFunctionalOp(build_functional_op, "If") def testVariableWriteInCondFalseBranch(self): def build_functional_op(v): def then_branch(): return array_ops.zeros([], v.dtype) def else_branch(): gen_resource_variable_ops.assign_variable_op(v.handle, v + 1) return gen_resource_variable_ops.read_variable_op(v.handle, v.dtype) return cond.cond( constant_op.constant(False), then_branch, else_branch) self._testVariableWriteInFunctionalOp(build_functional_op, "If") def testVariableWriteInCaseBranch0(self): def build_functional_op(v): def branch0(): gen_resource_variable_ops.assign_variable_op(v.handle, v + 1) return gen_resource_variable_ops.read_variable_op(v.handle, v.dtype) def branch1(): return array_ops.zeros([], v.dtype) return control_flow_switch_case.switch_case( constant_op.constant(0), [branch0, branch1]) self._testVariableWriteInFunctionalOp(build_functional_op, "Case") def testVariableWriteInCaseBranch1(self): def build_functional_op(v): def branch0(): return array_ops.zeros([], v.dtype) def branch1(): gen_resource_variable_ops.assign_variable_op(v.handle, v + 1) return gen_resource_variable_ops.read_variable_op(v.handle, v.dtype) return control_flow_switch_case.switch_case( constant_op.constant(0), [branch0, branch1]) self._testVariableWriteInFunctionalOp(build_functional_op, "Case") def testVariableWriteInFunction(self): def build_functional_op(v): @def_function.function def fn_with_write(): gen_resource_variable_ops.assign_variable_op(v.handle, v + 1) return gen_resource_variable_ops.read_variable_op(v.handle, v.dtype) return fn_with_write() self._testVariableWriteInFunctionalOp(build_functional_op, "StatefulPartitionedCall") def testVariableWriteInNestedFunction(self): def build_functional_op(v): @def_function.function def fn_with_write(): @def_function.function def inner_fn(): gen_resource_variable_ops.assign_variable_op(v.handle, v + 1) return gen_resource_variable_ops.read_variable_op(v.handle, v.dtype) return inner_fn() return fn_with_write() self._testVariableWriteInFunctionalOp(build_functional_op, "StatefulPartitionedCall") def testVariableWriteInWhileInInnerFunc(self): def build_functional_op(v): @def_function.function def fn_with_write(): @def_function.function def inner_fn(): def body(_): gen_resource_variable_ops.assign_variable_op(v.handle, v + 1) return gen_resource_variable_ops.read_variable_op(v.handle, v.dtype) return while_loop.while_loop( lambda i: True, body, [0.0], maximum_iterations=1) return inner_fn() return fn_with_write() self._testVariableWriteInFunctionalOp(build_functional_op, "StatefulPartitionedCall") def testVariableWriteInCondInInnerFunc(self): def build_functional_op(v): @def_function.function def fn_with_write(): @def_function.function def inner_fn(): def then_branch(): gen_resource_variable_ops.assign_variable_op(v.handle, v + 1) return gen_resource_variable_ops.read_variable_op(v.handle, v.dtype) def else_branch(): return array_ops.zeros([], v.dtype) return cond.cond( constant_op.constant(True), then_branch, else_branch) return inner_fn() return fn_with_write() self._testVariableWriteInFunctionalOp(build_functional_op, "StatefulPartitionedCall") @test_util.run_v1_only("b/120545219") def testCondMustRun(self): with context.graph_mode(), self.cached_session(): v = resource_variable_ops.ResourceVariable(1.0) self.evaluate(variables.global_variables_initializer()) p = array_ops.placeholder(dtype=dtypes.bool) with acd.AutomaticControlDependencies() as c: def true_fn(): v.assign(v + 1) return 0.0 def false_fn(): v.assign(v + 4) return 1.0 cond.cond(p, true_fn, false_fn) val = v.read_value() val = c.mark_as_return(val) self.assertAllEqual(val.eval(feed_dict={p: False}), 5.0) self.assertAllEqual(val.eval(feed_dict={p: True}), 6.0) @test_util.run_v1_only("b/120545219") def testCondMustRunSeparateRead(self): with context.graph_mode(), self.cached_session(): v = resource_variable_ops.ResourceVariable(1.0) self.evaluate(variables.global_variables_initializer()) p = array_ops.placeholder(dtype=dtypes.bool) with acd.AutomaticControlDependencies() as c: def true_fn(): v.assign(v + 1) return 0.0 def false_fn(): v.assign(v + 4) return 1.0 cond.cond(p, true_fn, false_fn) one = constant_op.constant(1.0) one = c.mark_as_return(one) one.eval(feed_dict={p: False}) self.assertAllEqual(v.read_value(), 5.0) one.eval(feed_dict={p: True}) self.assertAllEqual(v.read_value(), 6.0) @test_util.run_v1_only("b/120545219") def testCondNested(self): with context.graph_mode(), self.cached_session(): v = resource_variable_ops.ResourceVariable(1.0) self.evaluate(variables.global_variables_initializer()) p = array_ops.placeholder(dtype=dtypes.bool) q = array_ops.placeholder(dtype=dtypes.bool) with acd.AutomaticControlDependencies() as c: def true_fn(): v.assign(v + 1, name="true") return 1.0 def false_fn(): def inner_true_fn(): v.assign(v * 2, name="false_true") return 2.0 def inner_false_fn(): v.assign(v * 3, name="false_false") return 3.0 cond.cond(q, inner_true_fn, inner_false_fn) return 1.0 cond.cond(p, true_fn, false_fn) with ops.name_scope("final"): val = v.read_value() val = c.mark_as_return(val) self.assertAllEqual(val.eval(feed_dict={p: False, q: False}), 3.0) self.assertAllEqual(val.eval(feed_dict={p: False, q: True}), 6.0) self.assertAllEqual(val.eval(feed_dict={p: True, q: True}), 7.0) self.assertAllEqual(val.eval(feed_dict={p: True, q: False}), 8.0) @test_util.run_v1_only("b/120545219") def testCondOneBranch(self): with context.graph_mode(), self.cached_session(): v = resource_variable_ops.ResourceVariable(1.0) self.evaluate(variables.global_variables_initializer()) p = array_ops.placeholder(dtype=dtypes.bool) with acd.AutomaticControlDependencies() as c: def true_fn(): return 0.0 def false_fn(): v.assign(v + 4) return 1.0 cond.cond(p, true_fn, false_fn) val = v.read_value() val = c.mark_as_return(val) self.assertAllEqual(val.eval(feed_dict={p: False}), 5.0) self.assertAllEqual(val.eval(feed_dict={p: True}), 5.0) @test_util.run_v1_only("b/120545219") def testCondOneBranchUpdateBefore(self): with context.graph_mode(), self.cached_session(): v = resource_variable_ops.ResourceVariable(1.0) self.evaluate(variables.global_variables_initializer()) p = array_ops.placeholder(dtype=dtypes.bool) with acd.AutomaticControlDependencies() as c: v.assign(v * 2) def true_fn(): return 0.0 def false_fn(): v.assign(v + 4) return 1.0 cond.cond(p, true_fn, false_fn) val = v.read_value() val = c.mark_as_return(val) self.assertAllEqual(val.eval(feed_dict={p: False}), 6.0) self.assertAllEqual(val.eval(feed_dict={p: True}), 12.0) @test_util.run_v1_only("b/120545219") def testCondOneBranchUpdateAfter(self): with context.graph_mode(), self.cached_session(): v = resource_variable_ops.ResourceVariable(1.0) self.evaluate(variables.global_variables_initializer()) p = array_ops.placeholder(dtype=dtypes.bool) with acd.AutomaticControlDependencies() as c: def true_fn(): return 0.0 def false_fn(): v.assign(v + 4) return 1.0 cond.cond(p, true_fn, false_fn) v.assign(v * 2) val = v.read_value() val = c.mark_as_return(val) self.assertAllEqual(val.eval(feed_dict={p: False}), 10.0) self.assertAllEqual(val.eval(feed_dict={p: True}), 20.0) def testFunctionWhileLoopWithCapturedLoopVars(self): n = 3 x = constant_op.constant(list(range(n))) @def_function.function def loop(): c = lambda i, x: i < n b = lambda i, x: (i + 1, x + 1) i, out = while_loop.while_loop(c, b, (0, x)) return i, out i, out = loop() self.assertEqual(int(i), 3) self.assertAllEqual(out, [3, 4, 5]) def testDecorator(self): with context.graph_mode(), self.cached_session(): v = resource_variable_ops.ResourceVariable(1.0) self.evaluate(variables.global_variables_initializer()) @acd.automatic_control_dependencies def f(): v.assign(v + 1) v.assign(2 * v) return v.read_value() self.assertAllEqual(f(), 4.0) def testOptimizerInFunction(self): def loss(v): return v**2 optimizer = momentum.MomentumOptimizer(learning_rate=1.0, momentum=1.0) @def_function.function def train(): grad = backprop.implicit_grad(loss)(self.v) optimizer.apply_gradients(grad) return self.v.read_value() self.v = resource_variable_ops.ResourceVariable(1.0) value = train() self.assertEqual(value.numpy(), -1.0) def testReturningNonTensorRaisesError(self): optimizer = momentum.MomentumOptimizer(learning_rate=1.0, momentum=1.0) optimizer.apply_gradients = def_function.function(optimizer.apply_gradients) v = resource_variable_ops.ResourceVariable(1.0) grad = backprop.implicit_grad(lambda v: v**2)(v) with self.assertRaisesRegex(TypeError, ".*must return zero or more Tensors.*"): # TODO(akshayka): We might want to allow defun-ing Python functions # that return operations (and just execute the op instead of running it). optimizer.apply_gradients(grad) # TODO(b/111663004): This should work when the outer context is graph # building. def testOptimizerNonSlotVarsInFunctionNoError(self): def loss(v): return v**2 optimizer = adam.AdamOptimizer(learning_rate=1.0) @def_function.function def train(): grad = backprop.implicit_grad(loss)(self.v) optimizer.apply_gradients(grad) return self.v.read_value() self.v = resource_variable_ops.ResourceVariable(1.0) train() def testOptimizerInFunctionWithCapturedVariable(self): v = resource_variable_ops.ResourceVariable(1.0) def loss(): return v**2 optimizer = momentum.MomentumOptimizer(learning_rate=1.0, momentum=1.0) @def_function.function def train(): grad = backprop.implicit_grad(loss)() optimizer.apply_gradients(grad) train() self.assertEqual(v.numpy(), -1.0) def testRepeatedResourceInput(self): var = resource_variable_ops.ResourceVariable(1.0) @def_function.function def inner(var1, var2): return (resource_variable_ops.read_variable_op(var1, dtypes.float32) + resource_variable_ops.read_variable_op(var2, dtypes.float32)) @def_function.function def outer(): return inner(var.handle, var.handle) self.assertEqual(self.evaluate(outer()), 2.0) def testManualControlDepMonitoringAttrNotAdded(self): with context.graph_mode(), self.cached_session(): v = resource_variable_ops.ResourceVariable(1.0) self.evaluate(variables.global_variables_initializer()) with acd.AutomaticControlDependencies(): read_op1 = gen_resource_variable_ops.read_variable_op( v.handle, v.dtype).op read_op2 = gen_resource_variable_ops.read_variable_op( v.handle, v.dtype).op assign_op = gen_resource_variable_ops.assign_variable_op( v.handle, v + 1) # Writes should have control deps automatically added from "all" reads # since last write or start of the code block. self.assertIn(read_op1, assign_op.control_inputs) self.assertIn(read_op2, assign_op.control_inputs) # But, we shouldn't add the monitoring attribute in this case. with self.assertRaises(ValueError): assign_op.get_attr("_has_manual_control_dependencies") with self.assertRaises(ValueError): read_op1.get_attr("_has_manual_control_dependencies") with self.assertRaises(ValueError): read_op2.get_attr("_has_manual_control_dependencies") if __name__ == "__main__": ops.enable_eager_execution() test.main()
AutomaticControlDependenciesTest
python
huggingface__transformers
src/transformers/models/longcat_flash/modular_longcat_flash.py
{ "start": 14657, "end": 17859 }
class ____(DeepseekV3Model): _keys_to_ignore_on_load_unexpected = [r"model\.mtp.*"] def __init__(self, config): super().__init__(config) self.layers = nn.ModuleList( [LongcatFlashDecoderLayer(config, layer_idx) for layer_idx in range(config.num_layers)] ) # Each layer above has 2 sublayers, config hack to have a correct cache (to avoid a checkpoint change) self.head_dim = config.head_dim # For CI happiness (we didn't convert so head_dim is not directly used) self.config.num_hidden_layers = 2 * config.num_layers self.norm = LongcatFlashRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.rotary_emb = LongcatFlashRotaryEmbedding(config=config) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, cache_position: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, **kwargs: Unpack[TransformersKwargs], ): if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds: torch.Tensor = self.embed_tokens(input_ids) if use_cache and past_key_values is None: past_key_values = DynamicCache(config=self.config) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position: torch.Tensor = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = create_causal_mask( config=self.config, input_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=position_ids, ) hidden_states = inputs_embeds position_embeddings = self.rotary_emb(hidden_states, position_ids) for decoder_layer in self.layers[: self.config.num_layers]: hidden_states = decoder_layer( hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_values=past_key_values, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = self.norm(hidden_states) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=None, attentions=None, )
LongcatFlashModel
python
django__django
tests/postgres_tests/test_operations.py
{ "start": 9570, "end": 15153 }
class ____(PostgreSQLTestCase): app_label = "test_allow_create_extention" @override_settings(DATABASE_ROUTERS=[NoMigrationRouter()]) def test_no_allow_migrate(self): operation = CreateExtension("tablefunc") self.assertEqual( operation.formatted_description(), "+ Creates extension tablefunc" ) project_state = ProjectState() new_state = project_state.clone() # Don't create an extension. with CaptureQueriesContext(connection) as captured_queries: with connection.schema_editor(atomic=False) as editor: operation.database_forwards( self.app_label, editor, project_state, new_state ) self.assertEqual(len(captured_queries), 0) # Reversal. with CaptureQueriesContext(connection) as captured_queries: with connection.schema_editor(atomic=False) as editor: operation.database_backwards( self.app_label, editor, new_state, project_state ) self.assertEqual(len(captured_queries), 0) def test_allow_migrate(self): operation = CreateExtension("tablefunc") self.assertEqual( operation.migration_name_fragment, "create_extension_tablefunc" ) project_state = ProjectState() new_state = project_state.clone() # Create an extension. with CaptureQueriesContext(connection) as captured_queries: with connection.schema_editor(atomic=False) as editor: operation.database_forwards( self.app_label, editor, project_state, new_state ) self.assertEqual(len(captured_queries), 4) self.assertIn("CREATE EXTENSION IF NOT EXISTS", captured_queries[1]["sql"]) # Reversal. with CaptureQueriesContext(connection) as captured_queries: with connection.schema_editor(atomic=False) as editor: operation.database_backwards( self.app_label, editor, new_state, project_state ) self.assertEqual(len(captured_queries), 2) self.assertIn("DROP EXTENSION IF EXISTS", captured_queries[1]["sql"]) @override_settings(DATABASE_ROUTERS=[MigrateWhenHinted()]) def test_allow_migrate_based_on_hints(self): operation_no_hints = CreateExtension("tablefunc") self.assertEqual(operation_no_hints.hints, {}) operation_hints = CreateExtension("tablefunc", hints={"a_hint": True}) self.assertEqual(operation_hints.hints, {"a_hint": True}) project_state = ProjectState() new_state = project_state.clone() with ( CaptureQueriesContext(connection) as captured_queries, connection.schema_editor(atomic=False) as editor, ): operation_no_hints.database_forwards( self.app_label, editor, project_state, new_state ) self.assertEqual(len(captured_queries), 0) with ( CaptureQueriesContext(connection) as captured_queries, connection.schema_editor(atomic=False) as editor, ): operation_no_hints.database_backwards( self.app_label, editor, project_state, new_state ) self.assertEqual(len(captured_queries), 0) with ( CaptureQueriesContext(connection) as captured_queries, connection.schema_editor(atomic=False) as editor, ): operation_hints.database_forwards( self.app_label, editor, project_state, new_state ) self.assertEqual(len(captured_queries), 4) self.assertIn("CREATE EXTENSION IF NOT EXISTS", captured_queries[1]["sql"]) with ( CaptureQueriesContext(connection) as captured_queries, connection.schema_editor(atomic=False) as editor, ): operation_hints.database_backwards( self.app_label, editor, project_state, new_state ) self.assertEqual(len(captured_queries), 2) self.assertIn("DROP EXTENSION IF EXISTS", captured_queries[1]["sql"]) def test_create_existing_extension(self): operation = BloomExtension() self.assertEqual(operation.migration_name_fragment, "create_extension_bloom") project_state = ProjectState() new_state = project_state.clone() # Don't create an existing extension. with CaptureQueriesContext(connection) as captured_queries: with connection.schema_editor(atomic=False) as editor: operation.database_forwards( self.app_label, editor, project_state, new_state ) self.assertEqual(len(captured_queries), 3) self.assertIn("SELECT", captured_queries[0]["sql"]) def test_drop_nonexistent_extension(self): operation = CreateExtension("tablefunc") project_state = ProjectState() new_state = project_state.clone() # Don't drop a nonexistent extension. with CaptureQueriesContext(connection) as captured_queries: with connection.schema_editor(atomic=False) as editor: operation.database_backwards( self.app_label, editor, project_state, new_state ) self.assertEqual(len(captured_queries), 1) self.assertIn("SELECT", captured_queries[0]["sql"]) @unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific tests.")
CreateExtensionTests
python
doocs__leetcode
solution/1500-1599/1512.Number of Good Pairs/Solution.py
{ "start": 0, "end": 205 }
class ____: def numIdenticalPairs(self, nums: List[int]) -> int: ans = 0 cnt = Counter() for x in nums: ans += cnt[x] cnt[x] += 1 return ans
Solution
python
cython__cython
Cython/Compiler/Optimize.py
{ "start": 85994, "end": 87346 }
class ____(Visitor.NodeRefCleanupMixin, Visitor.EnvTransform): visit_Node = Visitor.VisitorTransform.recurse_to_children def get_constant_value_node(self, name_node): if name_node.cf_state is None: return None if name_node.cf_state.cf_is_null: return None entry = self.current_env().lookup(name_node.name) if not entry or (not entry.cf_assignments or len(entry.cf_assignments) != 1): # not just a single assignment in all closures return None return entry.cf_assignments[0].rhs def visit_SimpleCallNode(self, node): self.visitchildren(node) if not self.current_directives.get('optimize.inline_defnode_calls'): return node function_name = node.function if not function_name.is_name: return node function = self.get_constant_value_node(function_name) if not isinstance(function, ExprNodes.PyCFunctionNode): return node inlined = ExprNodes.InlinedDefNodeCallNode( node.pos, function_name=function_name, function=function, args=node.args, generator_arg_tag=node.generator_arg_tag) if inlined.can_be_inlined(): return self.replace(node, inlined) return node
InlineDefNodeCalls
python
getsentry__sentry
src/sentry/hybridcloud/services/tombstone/impl.py
{ "start": 201, "end": 446 }
class ____(RegionTombstoneService): def record_remote_tombstone(self, *, region_name: str, tombstone: RpcTombstone) -> None: RegionTombstone.record_delete(tombstone.table_name, tombstone.identifier)
DatabaseBackedRegionTombstoneService
python
mlflow__mlflow
dev/clint/tests/rules/test_redundant_test_docstring.py
{ "start": 3192, "end": 3585 }
class ____: """Line1 Line2""" pass ''' config = Config(select={RedundantTestDocstring.name}) violations = lint_file(Path("test_multiline.py"), code, config, index_path) assert len(violations) == 0 def test_error_message_content(index_path: Path) -> None: code = '''def test_data_processing_validation(): """Test data processing.""" pass
TestCompactMultiline
python
astropy__astropy
astropy/time/tests/test_basic.py
{ "start": 54455, "end": 57080 }
class ____: """Test issues related to copying and replicating data""" def test_immutable_input(self): """Internals are never mutable.""" jds = np.array([2450000.5], dtype=np.double) t = Time(jds, format="jd", scale="tai") assert allclose_jd(t.jd, jds) jds[0] = 2458654 assert not allclose_jd(t.jd, jds) mjds = np.array([50000.0], dtype=np.double) t = Time(mjds, format="mjd", scale="tai") assert allclose_jd(t.jd, [2450000.5]) mjds[0] = 0.0 assert allclose_jd(t.jd, [2450000.5]) def test_replicate(self): """Test replicate method""" t = Time(["2000:001"], format="yday", scale="tai", location=("45d", "45d")) t_yday = t.yday t_loc_x = t.location.x.copy() t2 = t.replicate() assert t.yday == t2.yday assert t.format == t2.format assert t.scale == t2.scale assert t.location == t2.location # This is not allowed publicly, but here we hack the internal time # and location values to show that t and t2 are sharing references. t2._time.jd1 += 100.0 # Need to delete the cached yday attributes (only an issue because # of the internal _time hack). del t.cache del t2.cache assert t.yday == t2.yday assert t.yday != t_yday # prove that it changed t2_loc_x_view = t2.location.x t2_loc_x_view[()] = 0 # use 0 to avoid having to give units assert t2.location.x == t2_loc_x_view assert t.location.x == t2.location.x assert t.location.x != t_loc_x # prove that it changed def test_copy(self): """Test copy method""" t = Time("2000:001", format="yday", scale="tai", location=("45d", "45d")) t_yday = t.yday t_loc_x = t.location.x.copy() t2 = t.copy() assert t.yday == t2.yday # This is not allowed publicly, but here we hack the internal time # and location values to show that t and t2 are not sharing references. t2._time.jd1 += 100.0 # Need to delete the cached yday attributes (only an issue because # of the internal _time hack). del t.cache del t2.cache assert t.yday != t2.yday assert t.yday == t_yday # prove that it did not change t2_loc_x_view = t2.location.x t2_loc_x_view[()] = 0 # use 0 to avoid having to give units assert t2.location.x == t2_loc_x_view assert t.location.x != t2.location.x assert t.location.x == t_loc_x # prove that it changed
TestCopyReplicate
python
huggingface__transformers
tests/models/layoutxlm/test_tokenization_layoutxlm.py
{ "start": 1459, "end": 90058 }
class ____(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "FacebookAI/xlm-roberta-base" tokenizer_class = LayoutXLMTokenizer rust_tokenizer_class = LayoutXLMTokenizer test_rust_tokenizer = True test_slow_tokenizer = False from_pretrained_filter = filter_non_english test_seq2seq = False test_sentencepiece = True maxDiff = None def get_words_and_boxes(self): words = ["a", "weirdly", "test"] boxes = [[423, 237, 440, 251], [427, 272, 441, 287], [419, 115, 437, 129]] return words, boxes def get_words_and_boxes_batch(self): words = [["a", "weirdly", "test"], ["hello", "my", "name", "is", "bob"]] boxes = [ [[423, 237, 440, 251], [427, 272, 441, 287], [419, 115, 437, 129]], [[961, 885, 992, 912], [256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [34, 42, 66, 69]], ] return words, boxes def get_question_words_and_boxes(self): question = "what's his name?" words = ["a", "weirdly", "test"] boxes = [[423, 237, 440, 251], [427, 272, 441, 287], [419, 115, 437, 129]] return question, words, boxes def get_question_words_and_boxes_batch(self): questions = ["what's his name?", "how is he called?"] words = [["a", "weirdly", "test"], ["what", "a", "laif", "gastn"]] boxes = [ [[423, 237, 440, 251], [427, 272, 441, 287], [419, 115, 437, 129]], [[256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [34, 42, 66, 69]], ] return questions, words, boxes def get_empty_words_and_boxes(self): words = ["test", "empty", ""] boxes = [[423, 237, 440, 251], [427, 272, 441, 287], [419, 115, 437, 129]] return words, boxes def get_empty_words_and_boxes_batch(self): words = [["test", "empty", ""], ["one", "more", "empty", ""]] boxes = [ [[423, 237, 440, 251], [427, 272, 441, 287], [419, 115, 437, 129]], [[961, 885, 992, 912], [256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57]], ] return words, boxes def get_empty_question_words_and_boxes(self): question = "" words = ["test", "empty", ""] boxes = [[423, 237, 440, 251], [427, 272, 441, 287], [419, 115, 437, 129]] return question, words, boxes def get_empty_question_words_and_boxes_batch(self): questions = ["what's his name?", ""] words = [["test", "empty", ""], ["one", "more", "empty", ""]] boxes = [ [[423, 237, 440, 251], [427, 272, 441, 287], [419, 115, 437, 129]], [[961, 885, 992, 912], [256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57]], ] return questions, words, boxes @classmethod def setUpClass(cls): super().setUpClass() # Extract vocab from SentencePiece model extractor = SentencePieceExtractor(SAMPLE_VOCAB) vocab_ids, vocab_scores, merges = extractor.extract() # Create tokenizer from vocab tokenizer = LayoutXLMTokenizer(vocab=vocab_scores) tokenizer.save_pretrained(cls.tmpdirname) def convert_batch_encode_plus_format_to_encode_plus(self, batch_encode_plus_sequences): """Helper method to convert batch_encode_plus output to list of encode_plus outputs""" # Get the batch size first_key = list(batch_encode_plus_sequences.keys())[0] batch_size = len(batch_encode_plus_sequences[first_key]) # Convert to list of dicts encode_plus_sequences = [] for i in range(batch_size): single_sequence = {} for key, value in batch_encode_plus_sequences.items(): if key != "encodings": # Skip the encodings attribute single_sequence[key] = value[i] encode_plus_sequences.append(single_sequence) return encode_plus_sequences def get_input_output_texts(self, tokenizer): input_text = "UNwant\u00e9d,running" output_text = "unwanted, running" return input_text, output_text @unittest.skip(reason="Chat template tests don't play well with table/layout models.") def test_chat_template_batched(self): pass def test_bos_token_with_add_bos_token_true(self): # LayoutXLM requires pretokenized input with boxes tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): words = ["hello", "world"] boxes = [[1, 2, 3, 4], [5, 6, 7, 8]] # LayoutXLM doesn't use add_bos_token, it uses post_processor # Just verify it can encode without error encoded = tokenizer.encode(words, boxes=boxes) self.assertIsInstance(encoded, list) def test_bos_token_with_add_bos_token_false(self): # LayoutXLM requires pretokenized input with boxes tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): words = ["hello", "world"] boxes = [[1, 2, 3, 4], [5, 6, 7, 8]] # LayoutXLM doesn't use add_bos_token, it uses post_processor # Just verify it can encode without error encoded = tokenizer.encode(words, boxes=boxes) self.assertIsInstance(encoded, list) def test_pad_token_initialization(self): """Test that passing pad_token when creating a tokenizer works correctly.""" # LayoutXLM requires pretokenized input with boxes tokenizer = self.get_tokenizer(pad_token="[PAD]") # Verify the pad_token was set correctly self.assertEqual(tokenizer.pad_token, "[PAD]") self.assertIsNotNone(tokenizer.pad_token_id) # Test with two sequences of different lengths to trigger padding seq_0 = ["Test", "this", "method"] seq_1 = ["With", "these", "inputs", "and", "some", "extra"] boxes_0 = [[1, 2, 3, 4] for _ in seq_0] boxes_1 = [[1, 2, 3, 4] for _ in seq_1] # Test padding works with the custom pad_token output_with_padding = tokenizer( [seq_0, seq_1], boxes=[boxes_0, boxes_1], padding=True, ) # Check padding was applied correctly self.assertEqual(len(output_with_padding["input_ids"][0]), len(output_with_padding["input_ids"][1])) def test_encode_basic_padding(self): """Test basic left/right padding behavior using encode() method with max_length strategy.""" tokenizer = self.get_tokenizer(do_lower_case=False) # LayoutXLM requires pretokenized input with boxes words = ["Sequence"] boxes = [[1, 2, 3, 4]] padding_size = 10 # check correct behaviour if no pad_token_id exists and add it eventually self._check_no_pad_token_padding(tokenizer, words) padding_idx = tokenizer.pad_token_id # Test right padding encoded_sequence = tokenizer.encode(words, boxes=boxes) sequence_length = len(encoded_sequence) padded_sequence = tokenizer.encode( words, boxes=boxes, max_length=sequence_length + padding_size, padding="max_length" ) padded_sequence_length = len(padded_sequence) assert sequence_length + padding_size == padded_sequence_length assert encoded_sequence + [padding_idx] * padding_size == padded_sequence # Test left padding tokenizer.padding_side = "left" encoded_sequence = tokenizer.encode(words, boxes=boxes) sequence_length = len(encoded_sequence) padded_sequence = tokenizer.encode( words, boxes=boxes, max_length=sequence_length + padding_size, padding="max_length" ) padded_sequence_length = len(padded_sequence) assert sequence_length + padding_size == padded_sequence_length assert [padding_idx] * padding_size + encoded_sequence == padded_sequence # override test in `test_tokenization_common.py` because of the required input format of the `__call__`` method of # this tokenizer def test_save_sentencepiece_tokenizer(self) -> None: if not self.test_sentencepiece: self.skipTest(reason="test_sentencepiece is set to False") # We want to verify that we will be able to save the tokenizer even if the original files that were used to # build the tokenizer have been deleted in the meantime. words, boxes = self.get_words_and_boxes() tokenizer_1 = self.get_tokenizer() encoding_tokenizer_1 = tokenizer_1( words, boxes=boxes, ) tmpdirname_1 = tempfile.mkdtemp() tmpdirname_2 = tempfile.mkdtemp() tokenizer_1.save_pretrained(tmpdirname_1) tokenizer_2 = self.tokenizer_class.from_pretrained(tmpdirname_1) encoding_tokenizer_2 = tokenizer_2( words, boxes=boxes, ) shutil.rmtree(tmpdirname_1) tokenizer_2.save_pretrained(tmpdirname_2) tokenizer_3 = self.tokenizer_class.from_pretrained(tmpdirname_2) encoding_tokenizer_3 = tokenizer_3( words, boxes=boxes, ) shutil.rmtree(tmpdirname_2) self.assertEqual(encoding_tokenizer_1, encoding_tokenizer_2) self.assertEqual(encoding_tokenizer_1, encoding_tokenizer_3) def test_split_special_tokens(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: special_token = "<my_new_token>" special_sentence = f"Hey this is a {special_token} token" with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.get_tokenizer( pretrained_name, extra_special_tokens=[special_token], split_special_tokens=True, **kwargs ) # For LayoutXLM, tokenize works with strings (not requiring boxes) r_tokens_output = tokenizer_r.tokenize(special_sentence) self.assertTrue(special_token not in r_tokens_output) r_tokens_output_unsplit = tokenizer_r.tokenize(special_sentence, split_special_tokens=False) self.assertTrue(special_token in r_tokens_output_unsplit) tmpdirname = tempfile.mkdtemp() tokenizer_r.save_pretrained(tmpdirname) fast_from_saved = self.tokenizer_class.from_pretrained(tmpdirname) output_tokens_reloaded_split = fast_from_saved.tokenize(special_sentence) self.assertTrue(special_token not in output_tokens_reloaded_split) output_tokens_reloaded_unsplit = fast_from_saved.tokenize(special_sentence, split_special_tokens=False) self.assertTrue(special_token in output_tokens_reloaded_unsplit) @slow def test_sequence_builders(self): tokenizer = self.tokenizer_class.from_pretrained("microsoft/layoutxlm-base") question, words, boxes = self.get_question_words_and_boxes() text = tokenizer.encode( question.split(), boxes=[tokenizer.pad_token_box for _ in range(len(question.split()))], add_special_tokens=False, ) text_2 = tokenizer.encode(words, boxes=boxes, add_special_tokens=False) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) assert encoded_pair == [0] + text + [2] + [2] + text_2 + [2] def test_offsets_with_special_characters(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.get_tokenizer(pretrained_name, **kwargs) words, boxes = self.get_words_and_boxes() words[1] = tokenizer_r.mask_token tokens = tokenizer_r.encode_plus( words, boxes=boxes, return_attention_mask=False, return_token_type_ids=False, return_offsets_mapping=True, add_special_tokens=True, ) expected_results = [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "▁a"), ((0, 6), tokenizer_r.mask_token), ((0, 4), "▁test"), ((0, 0), tokenizer_r.sep_token), ] self.assertEqual( [e[1] for e in expected_results], tokenizer_r.convert_ids_to_tokens(tokens["input_ids"]) ) self.assertEqual([e[0] for e in expected_results], tokens["offset_mapping"]) def test_add_special_tokens(self): tokenizers: list[LayoutXLMTokenizer] = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): special_token = "[SPECIAL_TOKEN]" special_token_box = [1000, 1000, 1000, 1000] tokenizer.add_special_tokens({"cls_token": special_token}) encoded_special_token = tokenizer.encode( [special_token], boxes=[special_token_box], add_special_tokens=False ) self.assertEqual(len(encoded_special_token), 1) decoded = tokenizer.decode(encoded_special_token, skip_special_tokens=True) self.assertTrue(special_token not in decoded) def test_add_tokens_tokenizer(self): tokenizers: list[LayoutXLMTokenizer] = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): vocab_size = tokenizer.vocab_size all_size = len(tokenizer) self.assertNotEqual(vocab_size, 0) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) new_toks = ["aaaaa", "bbbbbb", "cccccccccdddddddd"] added_toks = tokenizer.add_tokens(new_toks) vocab_size_2 = tokenizer.vocab_size all_size_2 = len(tokenizer) self.assertNotEqual(vocab_size_2, 0) self.assertEqual(vocab_size, vocab_size_2) self.assertEqual(added_toks, len(new_toks)) self.assertEqual(all_size_2, all_size + len(new_toks)) words = "aaaaa bbbbbb low cccccccccdddddddd l".split() boxes = [[1000, 1000, 1000, 1000] for _ in range(len(words))] tokens = tokenizer.encode(words, boxes=boxes, add_special_tokens=False) self.assertGreaterEqual(len(tokens), 4) self.assertGreater(tokens[0], tokenizer.vocab_size - 1) self.assertGreater(tokens[-2], tokenizer.vocab_size - 1) new_toks_2 = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"} added_toks_2 = tokenizer.add_special_tokens(new_toks_2) vocab_size_3 = tokenizer.vocab_size all_size_3 = len(tokenizer) self.assertNotEqual(vocab_size_3, 0) self.assertEqual(vocab_size, vocab_size_3) self.assertEqual(added_toks_2, len(new_toks_2)) self.assertEqual(all_size_3, all_size_2 + len(new_toks_2)) words = ">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l".split() boxes = [[1000, 1000, 1000, 1000] for _ in range(len(words))] tokens = tokenizer.encode( words, boxes=boxes, add_special_tokens=False, ) self.assertGreaterEqual(len(tokens), 6) self.assertGreater(tokens[0], tokenizer.vocab_size - 1) self.assertGreater(tokens[0], tokens[1]) self.assertGreater(tokens[-2], tokenizer.vocab_size - 1) self.assertGreater(tokens[-2], tokens[-3]) self.assertEqual(tokens[0], tokenizer.eos_token_id) self.assertEqual(tokens[-2], tokenizer.pad_token_id) @require_tokenizers def test_encode_decode_with_spaces(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): words, boxes = self.get_words_and_boxes() new_toks = [AddedToken("[ABC]", normalized=False), AddedToken("[DEF]", normalized=False)] tokenizer.add_tokens(new_toks) input = "[ABC][DEF][ABC][DEF]" if self.space_between_special_tokens: output = "[ABC] [DEF] [ABC] [DEF]" else: output = input encoded = tokenizer.encode(input.split(), boxes=boxes, add_special_tokens=False) decoded = tokenizer.decode(encoded, spaces_between_special_tokens=self.space_between_special_tokens) self.assertIn(decoded, [output, output.lower()]) @parameterized.expand([(True,), (False,)]) def test_encode_plus_with_padding(self, use_padding_as_call_kwarg: bool): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): words, boxes = self.get_words_and_boxes() # check correct behaviour if no pad_token_id exists and add it eventually self._check_no_pad_token_padding(tokenizer, words) padding_size = 10 padding_idx = tokenizer.pad_token_id encoded_sequence = tokenizer.encode_plus(words, boxes=boxes, return_special_tokens_mask=True) input_ids = encoded_sequence["input_ids"] special_tokens_mask = encoded_sequence["special_tokens_mask"] sequence_length = len(input_ids) # Test 'longest' and 'no_padding' don't do anything tokenizer.padding_side = "right" not_padded_sequence = tokenizer.encode_plus( words, boxes=boxes, padding=False, return_special_tokens_mask=True, ) not_padded_input_ids = not_padded_sequence["input_ids"] not_padded_special_tokens_mask = not_padded_sequence["special_tokens_mask"] not_padded_sequence_length = len(not_padded_input_ids) self.assertTrue(sequence_length == not_padded_sequence_length) self.assertTrue(input_ids == not_padded_input_ids) self.assertTrue(special_tokens_mask == not_padded_special_tokens_mask) not_padded_sequence = tokenizer.encode_plus( words, boxes=boxes, padding=False, return_special_tokens_mask=True, ) not_padded_input_ids = not_padded_sequence["input_ids"] not_padded_special_tokens_mask = not_padded_sequence["special_tokens_mask"] not_padded_sequence_length = len(not_padded_input_ids) self.assertTrue(sequence_length == not_padded_sequence_length) self.assertTrue(input_ids == not_padded_input_ids) self.assertTrue(special_tokens_mask == not_padded_special_tokens_mask) # Test right padding tokenizer_kwargs_right = { "max_length": sequence_length + padding_size, "padding": "max_length", "return_special_tokens_mask": True, } if not use_padding_as_call_kwarg: tokenizer.padding_side = "right" else: tokenizer_kwargs_right["padding_side"] = "right" right_padded_sequence = tokenizer.encode_plus(words, boxes=boxes, **tokenizer_kwargs_right) right_padded_input_ids = right_padded_sequence["input_ids"] right_padded_special_tokens_mask = right_padded_sequence["special_tokens_mask"] right_padded_sequence_length = len(right_padded_input_ids) self.assertTrue(sequence_length + padding_size == right_padded_sequence_length) self.assertTrue(input_ids + [padding_idx] * padding_size == right_padded_input_ids) self.assertTrue(special_tokens_mask + [1] * padding_size == right_padded_special_tokens_mask) # Test left padding tokenizer_kwargs_left = { "max_length": sequence_length + padding_size, "padding": "max_length", "return_special_tokens_mask": True, } if not use_padding_as_call_kwarg: tokenizer.padding_side = "left" else: tokenizer_kwargs_left["padding_side"] = "left" left_padded_sequence = tokenizer.encode_plus(words, boxes=boxes, **tokenizer_kwargs_left) left_padded_input_ids = left_padded_sequence["input_ids"] left_padded_special_tokens_mask = left_padded_sequence["special_tokens_mask"] left_padded_sequence_length = len(left_padded_input_ids) self.assertTrue(sequence_length + padding_size == left_padded_sequence_length) self.assertTrue([padding_idx] * padding_size + input_ids == left_padded_input_ids) self.assertTrue([1] * padding_size + special_tokens_mask == left_padded_special_tokens_mask) if "token_type_ids" in tokenizer.model_input_names: token_type_ids = encoded_sequence["token_type_ids"] left_padded_token_type_ids = left_padded_sequence["token_type_ids"] right_padded_token_type_ids = right_padded_sequence["token_type_ids"] assert token_type_ids + [0] * padding_size == right_padded_token_type_ids assert [0] * padding_size + token_type_ids == left_padded_token_type_ids if "attention_mask" in tokenizer.model_input_names: attention_mask = encoded_sequence["attention_mask"] right_padded_attention_mask = right_padded_sequence["attention_mask"] left_padded_attention_mask = left_padded_sequence["attention_mask"] self.assertTrue(attention_mask + [0] * padding_size == right_padded_attention_mask) self.assertTrue([0] * padding_size + attention_mask == left_padded_attention_mask) def test_internal_consistency(self): tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): words, boxes = self.get_words_and_boxes() tokens = [] for word in words: tokens.extend(tokenizer.tokenize(word)) ids = tokenizer.convert_tokens_to_ids(tokens) ids_2 = tokenizer.encode(words, boxes=boxes, add_special_tokens=False) self.assertListEqual(ids, ids_2) tokens_2 = tokenizer.convert_ids_to_tokens(ids) self.assertNotEqual(len(tokens_2), 0) text_2 = tokenizer.decode(ids) self.assertIsInstance(text_2, str) output_text = "a weirdly test" self.assertEqual(text_2, output_text) def test_mask_output(self): tokenizers = self.get_tokenizers(fast=False, do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): words, boxes = self.get_words_and_boxes() if ( tokenizer.build_inputs_with_special_tokens.__qualname__.split(".")[0] != "PreTrainedTokenizer" and "token_type_ids" in tokenizer.model_input_names ): information = tokenizer.encode_plus(words, boxes=boxes, add_special_tokens=True) sequences, mask = information["input_ids"], information["token_type_ids"] self.assertEqual(len(sequences), len(mask)) def test_number_of_added_tokens(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): # test 1: single sequence words, boxes = self.get_words_and_boxes() sequences = tokenizer.encode(words, boxes=boxes, add_special_tokens=False) attached_sequences = tokenizer.encode(words, boxes=boxes, add_special_tokens=True) # Method is implemented (e.g. not GPT-2) if len(attached_sequences) != 2: self.assertEqual( tokenizer.num_special_tokens_to_add(pair=False), len(attached_sequences) - len(sequences) ) # test 2: two sequences question, words, boxes = self.get_question_words_and_boxes() sequences = tokenizer.encode(question, words, boxes=boxes, add_special_tokens=False) attached_sequences = tokenizer.encode(question, words, boxes=boxes, add_special_tokens=True) # Method is implemented (e.g. not GPT-2) if len(attached_sequences) != 2: self.assertEqual( tokenizer.num_special_tokens_to_add(pair=True), len(attached_sequences) - len(sequences) ) def test_padding(self, max_length=50): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.get_tokenizer(pretrained_name, **kwargs) tokenizer_p = self.get_tokenizer(pretrained_name, **kwargs) self.assertEqual(tokenizer_p.pad_token_id, tokenizer_r.pad_token_id) pad_token_id = tokenizer_p.pad_token_id # Encode - Simple input words, boxes = self.get_words_and_boxes() input_r = tokenizer_r.encode(words, boxes=boxes, max_length=max_length, padding="max_length") input_p = tokenizer_p.encode(words, boxes=boxes, max_length=max_length, padding="max_length") self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id) input_r = tokenizer_r.encode(words, boxes=boxes, padding="longest") input_p = tokenizer_p.encode(words, boxes=boxes, padding=True) self.assert_padded_input_match(input_r, input_p, len(input_r), pad_token_id) # Encode - Pair input question, words, boxes = self.get_question_words_and_boxes() input_r = tokenizer_r.encode(question, words, boxes=boxes, max_length=max_length, padding="max_length") input_p = tokenizer_p.encode(question, words, boxes=boxes, max_length=max_length, padding="max_length") self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id) input_r = tokenizer_r.encode(question, words, boxes=boxes, padding=True) input_p = tokenizer_p.encode(question, words, boxes=boxes, padding="longest") self.assert_padded_input_match(input_r, input_p, len(input_r), pad_token_id) # Encode_plus - Simple input words, boxes = self.get_words_and_boxes() input_r = tokenizer_r.encode_plus(words, boxes=boxes, max_length=max_length, padding="max_length") input_p = tokenizer_p.encode_plus(words, boxes=boxes, max_length=max_length, padding="max_length") self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id) self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"]) input_r = tokenizer_r.encode_plus(words, boxes=boxes, padding="longest") input_p = tokenizer_p.encode_plus(words, boxes=boxes, padding=True) self.assert_padded_input_match( input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]), pad_token_id ) self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"]) # Encode_plus - Pair input question, words, boxes = self.get_question_words_and_boxes() input_r = tokenizer_r.encode_plus( question, words, boxes=boxes, max_length=max_length, padding="max_length" ) input_p = tokenizer_p.encode_plus( question, words, boxes=boxes, max_length=max_length, padding="max_length" ) self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id) self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"]) input_r = tokenizer_r.encode_plus(question, words, boxes=boxes, padding="longest") input_p = tokenizer_p.encode_plus(question, words, boxes=boxes, padding=True) self.assert_padded_input_match( input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]), pad_token_id ) self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"]) # Batch_encode_plus - Simple input words, boxes = self.get_words_and_boxes_batch() input_r = tokenizer_r.batch_encode_plus( words, boxes=boxes, max_length=max_length, padding="max_length", ) input_p = tokenizer_p.batch_encode_plus( words, boxes=boxes, max_length=max_length, padding="max_length", ) self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id) input_r = tokenizer_r.batch_encode_plus( words, boxes=boxes, max_length=max_length, padding="longest", ) input_p = tokenizer_p.batch_encode_plus( words, boxes=boxes, max_length=max_length, padding=True, ) self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id) input_r = tokenizer_r.batch_encode_plus(words, boxes=boxes, padding="longest") input_p = tokenizer_p.batch_encode_plus(words, boxes=boxes, padding=True) self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id) # Batch_encode_plus - Pair input questions, words, boxes = self.get_question_words_and_boxes_batch() input_r = tokenizer_r.batch_encode_plus( list(zip(questions, words)), is_pair=True, boxes=boxes, max_length=max_length, truncation=True, padding="max_length", ) input_p = tokenizer_p.batch_encode_plus( list(zip(questions, words)), is_pair=True, boxes=boxes, max_length=max_length, truncation=True, padding="max_length", ) self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id) input_r = tokenizer_r.batch_encode_plus( list(zip(questions, words)), is_pair=True, boxes=boxes, padding=True, ) input_p = tokenizer_p.batch_encode_plus( list(zip(questions, words)), is_pair=True, boxes=boxes, padding="longest", ) self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id) # Using pad on single examples after tokenization words, boxes = self.get_words_and_boxes() input_r = tokenizer_r.encode_plus(words, boxes=boxes) input_r = tokenizer_r.pad(input_r) input_p = tokenizer_r.encode_plus(words, boxes=boxes) input_p = tokenizer_r.pad(input_p) self.assert_padded_input_match( input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]), pad_token_id ) # Using pad on single examples after tokenization input_r = tokenizer_r.encode_plus(words, boxes=boxes) input_r = tokenizer_r.pad(input_r, max_length=max_length, padding="max_length") input_p = tokenizer_r.encode_plus(words, boxes=boxes) input_p = tokenizer_r.pad(input_p, max_length=max_length, padding="max_length") self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id) # Using pad after tokenization words, boxes = self.get_words_and_boxes_batch() input_r = tokenizer_r.batch_encode_plus( words, boxes=boxes, ) input_r = tokenizer_r.pad(input_r) input_p = tokenizer_r.batch_encode_plus( words, boxes=boxes, ) input_p = tokenizer_r.pad(input_p) self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id) # Using pad after tokenization words, boxes = self.get_words_and_boxes_batch() input_r = tokenizer_r.batch_encode_plus( words, boxes=boxes, ) input_r = tokenizer_r.pad(input_r, max_length=max_length, padding="max_length") input_p = tokenizer_r.batch_encode_plus( words, boxes=boxes, ) input_p = tokenizer_r.pad(input_p, max_length=max_length, padding="max_length") self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id) def test_call(self): # Tests that all call wrap to encode_plus and batch_encode_plus tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): # Test not batched words, boxes = self.get_words_and_boxes() encoded_sequences_1 = tokenizer.encode_plus(words, boxes=boxes) encoded_sequences_2 = tokenizer(words, boxes=boxes) self.assertEqual(encoded_sequences_1, encoded_sequences_2) # Test not batched pairs question, words, boxes = self.get_question_words_and_boxes() encoded_sequences_1 = tokenizer.encode_plus(words, boxes=boxes) encoded_sequences_2 = tokenizer(words, boxes=boxes) self.assertEqual(encoded_sequences_1, encoded_sequences_2) # Test batched words, boxes = self.get_words_and_boxes_batch() encoded_sequences_1 = tokenizer.batch_encode_plus(words, is_pair=False, boxes=boxes) encoded_sequences_2 = tokenizer(words, boxes=boxes) self.assertEqual(encoded_sequences_1, encoded_sequences_2) def test_batch_encode_plus_batch_sequence_length(self): # Tests that all encoded values have the correct size tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): words, boxes = self.get_words_and_boxes_batch() encoded_sequences = [ tokenizer.encode_plus(words_example, boxes=boxes_example) for words_example, boxes_example in zip(words, boxes) ] encoded_sequences_batch = tokenizer.batch_encode_plus(words, is_pair=False, boxes=boxes, padding=False) self.assertListEqual( encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch) ) maximum_length = len( max([encoded_sequence["input_ids"] for encoded_sequence in encoded_sequences], key=len) ) # check correct behaviour if no pad_token_id exists and add it eventually self._check_no_pad_token_padding(tokenizer, words) encoded_sequences_padded = [ tokenizer.encode_plus( words_example, boxes=boxes_example, max_length=maximum_length, padding="max_length" ) for words_example, boxes_example in zip(words, boxes) ] encoded_sequences_batch_padded = tokenizer.batch_encode_plus( words, is_pair=False, boxes=boxes, padding=True ) self.assertListEqual( encoded_sequences_padded, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch_padded), ) # check 'longest' is unsensitive to a max length encoded_sequences_batch_padded_1 = tokenizer.batch_encode_plus( words, is_pair=False, boxes=boxes, padding=True ) encoded_sequences_batch_padded_2 = tokenizer.batch_encode_plus( words, is_pair=False, boxes=boxes, max_length=maximum_length + 10, padding="longest" ) for key in encoded_sequences_batch_padded_1: self.assertListEqual( encoded_sequences_batch_padded_1[key], encoded_sequences_batch_padded_2[key], ) # check 'no_padding' is unsensitive to a max length encoded_sequences_batch_padded_1 = tokenizer.batch_encode_plus( words, is_pair=False, boxes=boxes, padding=False ) encoded_sequences_batch_padded_2 = tokenizer.batch_encode_plus( words, is_pair=False, boxes=boxes, max_length=maximum_length + 10, padding=False ) for key in encoded_sequences_batch_padded_1: self.assertListEqual( encoded_sequences_batch_padded_1[key], encoded_sequences_batch_padded_2[key], ) @unittest.skip(reason="batch_encode_plus does not handle overflowing tokens.") def test_batch_encode_plus_overflowing_tokens(self): pass def test_batch_encode_plus_padding(self): # Test that padded sequences are equivalent between batch_encode_plus and encode_plus # Right padding tests tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): words, boxes = self.get_words_and_boxes_batch() max_length = 100 # check correct behaviour if no pad_token_id exists and add it eventually self._check_no_pad_token_padding(tokenizer, words) encoded_sequences = [ tokenizer.encode_plus( words_example, boxes=boxes_example, max_length=max_length, padding="max_length" ) for words_example, boxes_example in zip(words, boxes) ] encoded_sequences_batch = tokenizer.batch_encode_plus( words, is_pair=False, boxes=boxes, max_length=max_length, padding="max_length" ) self.assertListEqual( encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch) ) # Left padding tests tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): tokenizer.padding_side = "left" words, boxes = self.get_words_and_boxes_batch() max_length = 100 # check correct behaviour if no pad_token_id exists and add it eventually self._check_no_pad_token_padding(tokenizer, words) encoded_sequences = [ tokenizer.encode_plus( words_example, boxes=boxes_example, max_length=max_length, padding="max_length" ) for words_example, boxes_example in zip(words, boxes) ] encoded_sequences_batch = tokenizer.batch_encode_plus( words, is_pair=False, boxes=boxes, max_length=max_length, padding="max_length" ) self.assertListEqual( encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch) ) def test_padding_to_multiple_of(self): tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): if tokenizer.pad_token is None: self.skipTest(reason="No padding token.") else: words, boxes = self.get_words_and_boxes() # empty_tokens = tokenizer([""], [[]], padding=True, pad_to_multiple_of=8) normal_tokens = tokenizer(words, boxes=boxes, padding=True, pad_to_multiple_of=8) # for key, value in empty_tokens.items(): # self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8") for key, value in normal_tokens.items(): self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8") normal_tokens = tokenizer(words, boxes=boxes, pad_to_multiple_of=8) for key, value in normal_tokens.items(): self.assertNotEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8") # Should also work with truncation normal_tokens = tokenizer(words, boxes=boxes, padding=True, truncation=True, pad_to_multiple_of=8) for key, value in normal_tokens.items(): self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8") # truncation to something which is not a multiple of pad_to_multiple_of raises an error self.assertRaises( ValueError, tokenizer.__call__, words, boxes=boxes, padding=True, truncation=True, max_length=12, pad_to_multiple_of=8, ) def test_tokenizer_slow_store_full_signature(self): signature = inspect.signature(self.tokenizer_class.__init__) tokenizer = self.get_tokenizer() for parameter_name, parameter in signature.parameters.items(): if parameter.default != inspect.Parameter.empty: self.assertIn(parameter_name, tokenizer.init_kwargs) def test_build_inputs_with_special_tokens(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.get_tokenizer(pretrained_name, **kwargs) # Input tokens id words, boxes = self.get_words_and_boxes() input_simple = tokenizer_r.encode(words, boxes=boxes, add_special_tokens=False) input_pair = tokenizer_r.encode(words, boxes=boxes, add_special_tokens=False) # Generate output _ = tokenizer_r.build_inputs_with_special_tokens(input_simple) # Generate pair output _ = tokenizer_r.build_inputs_with_special_tokens(input_simple, input_pair) def test_special_tokens_mask_input_pairs(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): words, boxes = self.get_words_and_boxes() encoded_sequence = tokenizer.encode(words, boxes=boxes, add_special_tokens=False) encoded_sequence_dict = tokenizer.encode_plus( words, boxes=boxes, add_special_tokens=True, return_special_tokens_mask=True, # add_prefix_space=False, ) encoded_sequence_w_special = encoded_sequence_dict["input_ids"] special_tokens_mask = encoded_sequence_dict["special_tokens_mask"] self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special)) filtered_sequence = [ (x if not special_tokens_mask[i] else None) for i, x in enumerate(encoded_sequence_w_special) ] filtered_sequence = [x for x in filtered_sequence if x is not None] self.assertEqual(encoded_sequence, filtered_sequence) def test_special_tokens_mask(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): words, boxes = self.get_words_and_boxes() # Testing single inputs encoded_sequence = tokenizer.encode(words, boxes=boxes, add_special_tokens=False) encoded_sequence_dict = tokenizer.encode_plus( words, boxes=boxes, add_special_tokens=True, return_special_tokens_mask=True ) encoded_sequence_w_special = encoded_sequence_dict["input_ids"] special_tokens_mask = encoded_sequence_dict["special_tokens_mask"] self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special)) filtered_sequence = [x for i, x in enumerate(encoded_sequence_w_special) if not special_tokens_mask[i]] self.assertEqual(encoded_sequence, filtered_sequence) def test_save_and_load_tokenizer(self): # safety check on max_len default value so we are sure the test works tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): self.assertNotEqual(tokenizer.model_max_length, 42) # Now let's start the test tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): # Isolate this from the other tests because we save additional tokens/etc words, boxes = self.get_words_and_boxes() tmpdirname = tempfile.mkdtemp() before_tokens = tokenizer.encode(words, boxes=boxes, add_special_tokens=False) before_vocab = tokenizer.get_vocab() tokenizer.save_pretrained(tmpdirname) after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname) after_tokens = after_tokenizer.encode(words, boxes=boxes, add_special_tokens=False) after_vocab = after_tokenizer.get_vocab() self.assertListEqual(before_tokens, after_tokens) self.assertDictEqual(before_vocab, after_vocab) shutil.rmtree(tmpdirname) @unittest.skip(reason="Not implemented") def test_right_and_left_truncation(self): pass def test_right_and_left_padding(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): words, boxes = self.get_words_and_boxes() sequence = "Sequence" padding_size = 10 # check correct behaviour if no pad_token_id exists and add it eventually self._check_no_pad_token_padding(tokenizer, sequence) padding_idx = tokenizer.pad_token_id # RIGHT PADDING - Check that it correctly pads when a maximum length is specified along with the padding flag set to True tokenizer.padding_side = "right" encoded_sequence = tokenizer.encode(words, boxes=boxes) sequence_length = len(encoded_sequence) padded_sequence = tokenizer.encode( words, boxes=boxes, max_length=sequence_length + padding_size, padding="max_length" ) padded_sequence_length = len(padded_sequence) assert sequence_length + padding_size == padded_sequence_length assert encoded_sequence + [padding_idx] * padding_size == padded_sequence # LEFT PADDING - Check that it correctly pads when a maximum length is specified along with the padding flag set to True tokenizer.padding_side = "left" encoded_sequence = tokenizer.encode(words, boxes=boxes) sequence_length = len(encoded_sequence) padded_sequence = tokenizer.encode( words, boxes=boxes, max_length=sequence_length + padding_size, padding="max_length" ) padded_sequence_length = len(padded_sequence) assert sequence_length + padding_size == padded_sequence_length assert [padding_idx] * padding_size + encoded_sequence == padded_sequence # RIGHT & LEFT PADDING - Check that nothing is done for 'longest' and 'no_padding' encoded_sequence = tokenizer.encode(words, boxes=boxes) sequence_length = len(encoded_sequence) tokenizer.padding_side = "right" padded_sequence_right = tokenizer.encode(words, boxes=boxes, padding=True) padded_sequence_right_length = len(padded_sequence_right) assert sequence_length == padded_sequence_right_length assert encoded_sequence == padded_sequence_right tokenizer.padding_side = "left" padded_sequence_left = tokenizer.encode(words, boxes=boxes, padding="longest") padded_sequence_left_length = len(padded_sequence_left) assert sequence_length == padded_sequence_left_length assert encoded_sequence == padded_sequence_left tokenizer.padding_side = "right" padded_sequence_right = tokenizer.encode(words, boxes=boxes) padded_sequence_right_length = len(padded_sequence_right) assert sequence_length == padded_sequence_right_length assert encoded_sequence == padded_sequence_right tokenizer.padding_side = "left" padded_sequence_left = tokenizer.encode(words, boxes=boxes, padding=False) padded_sequence_left_length = len(padded_sequence_left) assert sequence_length == padded_sequence_left_length assert encoded_sequence == padded_sequence_left def test_token_type_ids(self): tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): # test 1: single sequence words, boxes = self.get_words_and_boxes() output = tokenizer(words, boxes=boxes, return_token_type_ids=True) # Assert that the token type IDs have the same length as the input IDs self.assertEqual(len(output["token_type_ids"]), len(output["input_ids"])) # Assert that the token type IDs have the same length as the attention mask self.assertEqual(len(output["token_type_ids"]), len(output["attention_mask"])) self.assertIn(0, output["token_type_ids"]) self.assertNotIn(1, output["token_type_ids"]) # test 2: two sequences (question + words) question, words, boxes = self.get_question_words_and_boxes() output = tokenizer(question, words, boxes, return_token_type_ids=True) # Assert that the token type IDs have the same length as the input IDs self.assertEqual(len(output["token_type_ids"]), len(output["input_ids"])) # Assert that the token type IDs have the same length as the attention mask self.assertEqual(len(output["token_type_ids"]), len(output["attention_mask"])) self.assertIn(0, output["token_type_ids"]) self.assertNotIn(1, output["token_type_ids"]) def test_offsets_mapping(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) text = ["a", "wonderful", "test"] boxes = [[1, 8, 12, 20] for _ in range(len(text))] # No pair tokens_with_offsets = tokenizer_r.encode_plus( text, boxes=boxes, return_special_tokens_mask=True, return_offsets_mapping=True, add_special_tokens=True, ) added_tokens = tokenizer_r.num_special_tokens_to_add(False) offsets = tokens_with_offsets["offset_mapping"] # Assert there is the same number of tokens and offsets self.assertEqual(len(offsets), len(tokens_with_offsets["input_ids"])) # Assert there is online added_tokens special_tokens self.assertEqual(sum(tokens_with_offsets["special_tokens_mask"]), added_tokens) # Pairs text = "what's his name" pair = ["a", "wonderful", "test"] boxes = [[1, 8, 12, 20] for _ in range(len(pair))] tokens_with_offsets = tokenizer_r.encode_plus( text, pair, boxes=boxes, return_special_tokens_mask=True, return_offsets_mapping=True, add_special_tokens=True, ) added_tokens = tokenizer_r.num_special_tokens_to_add(True) offsets = tokens_with_offsets["offset_mapping"] # Assert there is the same number of tokens and offsets self.assertEqual(len(offsets), len(tokens_with_offsets["input_ids"])) # Assert there is online added_tokens special_tokens self.assertEqual(sum(tokens_with_offsets["special_tokens_mask"]), added_tokens) @require_torch @slow def test_torch_encode_plus_sent_to_model(self): import torch from transformers import MODEL_MAPPING, TOKENIZER_MAPPING MODEL_TOKENIZER_MAPPING = merge_model_tokenizer_mappings(MODEL_MAPPING, TOKENIZER_MAPPING) tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): if tokenizer.__class__ not in MODEL_TOKENIZER_MAPPING: self.skipTest(f"{tokenizer.__class__} is not in the MODEL_TOKENIZER_MAPPING") config_class, model_class = MODEL_TOKENIZER_MAPPING[tokenizer.__class__] config = config_class() if config.is_encoder_decoder or config.pad_token_id is None: self.skipTest(reason="Model is an encoder-decoder or has no pad token id set.") model = model_class(config) # Make sure the model contains at least the full vocabulary size in its embedding matrix is_using_common_embeddings = hasattr(model.get_input_embeddings(), "weight") assert ( (model.get_input_embeddings().weight.shape[0] >= len(tokenizer)) if is_using_common_embeddings else True ) # Build sequence words, boxes = self.get_words_and_boxes() encoded_sequence = tokenizer.encode_plus(words, boxes=boxes, return_tensors="pt") batch_encoded_sequence = tokenizer.batch_encode_plus( [words, words], [boxes, boxes], return_tensors="pt" ) # This should not fail with torch.no_grad(): # saves some time model(**encoded_sequence) model(**batch_encoded_sequence) def test_rust_and_python_full_tokenizers(self): if not self.test_rust_tokenizer: self.skipTest(reason="test_rust_tokenizer is set to False") if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions self.skipTest(reason="test_slow_tokenizer is set to False") def test_tokenization_python_rust_equals(self): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions self.skipTest(reason="test_slow_tokenizer is set to False") def test_embedded_special_tokens(self): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions self.skipTest(reason="test_slow_tokenizer is set to False") def test_compare_add_special_tokens(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) simple_num_special_tokens_to_add = tokenizer_r.num_special_tokens_to_add(pair=False) words, boxes = self.get_words_and_boxes() # tokenize() no_special_tokens = tokenizer_r.tokenize(" ".join(words), add_special_tokens=False) with_special_tokens = tokenizer_r.tokenize(" ".join(words), add_special_tokens=True) self.assertEqual(len(no_special_tokens), len(with_special_tokens) - simple_num_special_tokens_to_add) # encode() no_special_tokens = tokenizer_r.encode(words, boxes=boxes, add_special_tokens=False) with_special_tokens = tokenizer_r.encode(words, boxes=boxes, add_special_tokens=True) self.assertEqual(len(no_special_tokens), len(with_special_tokens) - simple_num_special_tokens_to_add) # encode_plus() no_special_tokens = tokenizer_r.encode_plus(words, boxes=boxes, add_special_tokens=False) with_special_tokens = tokenizer_r.encode_plus(words, boxes=boxes, add_special_tokens=True) for key in no_special_tokens: self.assertEqual( len(no_special_tokens[key]), len(with_special_tokens[key]) - simple_num_special_tokens_to_add, ) # # batch_encode_plus words, boxes = self.get_words_and_boxes_batch() no_special_tokens = tokenizer_r.batch_encode_plus(words, boxes=boxes, add_special_tokens=False) with_special_tokens = tokenizer_r.batch_encode_plus(words, boxes=boxes, add_special_tokens=True) for key in no_special_tokens: for i_no, i_with in zip(no_special_tokens[key], with_special_tokens[key]): self.assertEqual(len(i_no), len(i_with) - simple_num_special_tokens_to_add) @slow def test_layoutxlm_truncation_integration_test(self): words, boxes = self.get_words_and_boxes() tokenizer = LayoutXLMTokenizer.from_pretrained("microsoft/layoutxlm-base", model_max_length=512) for i in range(12, 512): new_encoded_inputs = tokenizer.encode(words, boxes=boxes, max_length=i, truncation=True) # Ensure that the input IDs are less than the max length defined. self.assertLessEqual(len(new_encoded_inputs), i) tokenizer.model_max_length = 20 new_encoded_inputs = tokenizer.encode(words, boxes=boxes, truncation=True) dropped_encoded_inputs = tokenizer.encode(words, boxes=boxes, truncation=True) # Ensure that the input IDs are still truncated when no max_length is specified self.assertListEqual(new_encoded_inputs, dropped_encoded_inputs) self.assertLessEqual(len(new_encoded_inputs), 20) def test_sequence_ids(self): tokenizers = self.get_tokenizers() for tokenizer in tokenizers: if not tokenizer.is_fast: continue with self.subTest(f"{tokenizer.__class__.__name__}"): seq_0 = "Test this method." seq_1 = ["With", "these", "inputs."] boxes = [[1000, 1000, 1000, 1000] for _ in range(len(seq_1))] # We want to have sequence 0 and sequence 1 are tagged # respectively with 0 and 1 token_ids # (regardless of whether the model use token type ids) # We use this assumption in the QA pipeline among other place output = tokenizer(seq_0.split(), boxes=boxes) self.assertIn(0, output.sequence_ids()) output = tokenizer(seq_0, seq_1, boxes=boxes) self.assertIn(0, output.sequence_ids()) self.assertIn(1, output.sequence_ids()) if tokenizer.num_special_tokens_to_add(pair=True): self.assertIn(None, output.sequence_ids()) def test_special_tokens_initialization(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): added_tokens = [AddedToken("<special>", lstrip=True)] tokenizer_r = self.rust_tokenizer_class.from_pretrained( pretrained_name, extra_special_tokens=added_tokens, **kwargs ) words = "Hey this is a <special> token".split() boxes = [[1000, 1000, 1000, 1000] for _ in range(len(words))] r_output = tokenizer_r.encode(words, boxes=boxes) special_token_id = tokenizer_r.encode( ["<special>"], boxes=[1000, 1000, 1000, 1000], add_special_tokens=False )[0] self.assertTrue(special_token_id in r_output) def test_training_new_tokenizer(self): # This feature only exists for fast tokenizers if not self.test_rust_tokenizer: self.skipTest(reason="test_rust_tokenizer is set to False") tokenizer = self.get_tokenizer() new_tokenizer = tokenizer.train_new_from_iterator(SMALL_TRAINING_CORPUS, 100) # Test we can use the new tokenizer with something not seen during training text = [["this", "is", "the"], ["how", "are", "you"]] boxes = [[[1, 2, 3, 4], [5, 6, 7, 8], [1, 3, 4, 8]], [[5, 6, 7, 8], [4, 5, 6, 7], [3, 9, 2, 7]]] inputs = new_tokenizer(text, boxes=boxes) self.assertEqual(len(inputs["input_ids"]), 2) decoded_input = new_tokenizer.decode(inputs["input_ids"][0], skip_special_tokens=True) expected_result = "this is the" if tokenizer.backend_tokenizer.normalizer is not None: expected_result = tokenizer.backend_tokenizer.normalizer.normalize_str(expected_result) self.assertEqual(expected_result, decoded_input) # We check that the parameters of the tokenizer remained the same # Check we have the same number of added_tokens for both pair and non-pair inputs. self.assertEqual(tokenizer.num_special_tokens_to_add(False), new_tokenizer.num_special_tokens_to_add(False)) self.assertEqual(tokenizer.num_special_tokens_to_add(True), new_tokenizer.num_special_tokens_to_add(True)) # Check we have the correct max_length for both pair and non-pair inputs. # max_len_single_sentence = model_max_length - num_special_tokens_to_add(pair=False) self.assertEqual( tokenizer.model_max_length - tokenizer.num_special_tokens_to_add(pair=False), new_tokenizer.model_max_length - new_tokenizer.num_special_tokens_to_add(pair=False), ) # max_len_sentences_pair = model_max_length - num_special_tokens_to_add(pair=True) self.assertEqual( tokenizer.model_max_length - tokenizer.num_special_tokens_to_add(pair=True), new_tokenizer.model_max_length - new_tokenizer.num_special_tokens_to_add(pair=True), ) # Assert the set of special tokens match as we didn't ask to change them self.assertDictEqual(tokenizer.special_tokens_map, new_tokenizer.special_tokens_map) def test_training_new_tokenizer_with_special_tokens_change(self): # This feature only exists for fast tokenizers if not self.test_rust_tokenizer: self.skipTest(reason="test_rust_tokenizer is set to False") tokenizer = self.get_tokenizer() # Test with a special tokens map class_signature = inspect.signature(tokenizer.__class__) if "cls_token" in class_signature.parameters: new_tokenizer = tokenizer.train_new_from_iterator( SMALL_TRAINING_CORPUS, 100, special_tokens_map={tokenizer.cls_token: "<cls>"} ) cls_id = new_tokenizer.get_vocab()["<cls>"] self.assertEqual(new_tokenizer.cls_token, "<cls>") self.assertEqual(new_tokenizer.cls_token_id, cls_id) # Create a new mapping from the special tokens defined in the original tokenizer special_tokens_list = PreTrainedTokenizerBase.SPECIAL_TOKENS_ATTRIBUTES.copy() special_tokens_map = {} for token in special_tokens_list: # Get the private one to avoid unnecessary warnings. if getattr(tokenizer, token) is not None: special_token = getattr(tokenizer, token) special_tokens_map[special_token] = f"{special_token}a" # Train new tokenizer new_tokenizer = tokenizer.train_new_from_iterator( SMALL_TRAINING_CORPUS, 100, special_tokens_map=special_tokens_map ) # Check the changes for token in special_tokens_list: # Get the private one to avoid unnecessary warnings. if getattr(tokenizer, token) is None: continue special_token = getattr(tokenizer, token) if special_token in special_tokens_map: new_special_token = getattr(new_tokenizer, token) self.assertEqual(special_tokens_map[special_token], new_special_token) new_id = new_tokenizer.get_vocab()[new_special_token] self.assertEqual(getattr(new_tokenizer, f"{token}_id"), new_id) # Check if the AddedToken / string format has been kept tokenizer_special_tokens = [ tok for value in tokenizer._special_tokens_map.values() if value for tok in (value if isinstance(value, (list, tuple)) else [value]) ] new_tokenizer_special_tokens = [ tok for value in new_tokenizer._special_tokens_map.values() if value for tok in (value if isinstance(value, (list, tuple)) else [value]) ] for special_token in tokenizer_special_tokens: if isinstance(special_token, AddedToken) and special_token.content not in special_tokens_map: # The special token must appear identically in the list of the new tokenizer. self.assertTrue( special_token in new_tokenizer_special_tokens, f"'{special_token}' should be in {new_tokenizer_special_tokens}", ) elif isinstance(special_token, AddedToken): # The special token must appear in the list of the new tokenizer as an object of type AddedToken with # the same parameters as the old AddedToken except the content that the user has requested to change. special_token_str = special_token.content new_special_token_str = special_tokens_map[special_token_str] find = False for candidate in new_tokenizer_special_tokens: if ( isinstance(candidate, AddedToken) and candidate.content == new_special_token_str and candidate.lstrip == special_token.lstrip and candidate.rstrip == special_token.rstrip and candidate.normalized == special_token.normalized and candidate.single_word == special_token.single_word ): find = True break self.assertTrue( find, f"'{new_special_token_str}' doesn't appear in the list " f"'{new_tokenizer_special_tokens}' as an AddedToken with the same parameters as " f"'{special_token}' in the list {tokenizer_special_tokens}", ) elif special_token not in special_tokens_map: # The special token must appear identically in the list of the new tokenizer. self.assertTrue( special_token in new_tokenizer_special_tokens, f"'{special_token}' should be in {new_tokenizer_special_tokens}", ) else: # The special token must appear in the list of the new tokenizer as an object of type string. self.assertTrue(special_tokens_map[special_token] in new_tokenizer_special_tokens) # Test we can use the new tokenizer with something not seen during training words = [["this", "is"], ["hello", "🤗"]] boxes = [[[1, 2, 3, 4], [5, 6, 7, 8]], [[1, 2, 3, 4], [5, 6, 7, 8]]] inputs = new_tokenizer(words, boxes=boxes) self.assertEqual(len(inputs["input_ids"]), 2) decoded_input = new_tokenizer.decode(inputs["input_ids"][0], skip_special_tokens=True) expected_result = "this is" if tokenizer.backend_tokenizer.normalizer is not None: expected_result = tokenizer.backend_tokenizer.normalizer.normalize_str(expected_result) self.assertEqual(expected_result, decoded_input) def test_prepare_for_model(self): tokenizers = self.get_tokenizers(do_lower_case=False) for tokenizer in tokenizers: # only test prepare_for_model for the slow tokenizer if tokenizer.__class__.__name__ == "LayoutXLMTokenizer": continue with self.subTest(f"{tokenizer.__class__.__name__}"): words, boxes = self.get_words_and_boxes() prepared_input_dict = tokenizer.prepare_for_model(words, boxes=boxes, add_special_tokens=True) input_dict = tokenizer.encode_plus(words, boxes=boxes, add_special_tokens=True) self.assertEqual(input_dict, prepared_input_dict) def test_padding_different_model_input_name(self): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions self.skipTest(reason="test_slow_tokenizer is set to False") def test_batch_encode_dynamic_overflowing(self): """ When calling batch_encode with multiple sequences, it can return different number of overflowing encoding for each sequence: [ Sequence 1: [Encoding 1, Encoding 2], Sequence 2: [Encoding 1], Sequence 3: [Encoding 1, Encoding 2, ... Encoding N] ] This needs to be padded so that it can represented as a tensor """ for tokenizer, pretrained_name, kwargs in self.tokenizers_list: tokenizer = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name}, {tokenizer.__class__.__name__})"): returned_tensor = "pt" # Single example words, boxes = self.get_words_and_boxes() tokens = tokenizer.encode_plus( words, boxes=boxes, max_length=6, padding=True, truncation=True, return_tensors=returned_tensor, return_overflowing_tokens=True, ) for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()): if key != "bbox": self.assertEqual(len(tokens[key].shape), 2) else: self.assertEqual(len(tokens[key].shape), 3) # Batch of examples # For these 2 examples, 3 training examples will be created words, boxes = self.get_words_and_boxes_batch() tokens = tokenizer.batch_encode_plus( words, boxes=boxes, max_length=6, padding=True, truncation="only_first", return_tensors=returned_tensor, return_overflowing_tokens=True, ) for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()): if key != "bbox": self.assertEqual(len(tokens[key].shape), 2) self.assertEqual(tokens[key].shape[-1], 6) else: self.assertEqual(len(tokens[key].shape), 3) self.assertEqual(tokens[key].shape[-1], 4) # overwrite from test_tokenization_common to speed up test def test_save_pretrained(self): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions self.skipTest(reason="test_slow_tokenizer is set to False") @unittest.skip(reason="TO DO: overwrite this very extensive test.") def test_alignment_methods(self): pass @unittest.skip(reason="layoutxlm tokenizer requires boxes besides sequences.") def test_maximum_encoding_length_pair_input(self): pass @unittest.skip(reason="layoutxlm tokenizer requires boxes besides sequences.") def test_maximum_encoding_length_single_input(self): pass @unittest.skip(reason="layoutxlm tokenizer requires boxes besides sequences.") def test_pretokenized_inputs(self): pass @unittest.skip(reason="layoutxlm tokenizer always expects pretokenized inputs.") def test_compare_pretokenized_inputs(self): pass @unittest.skip(reason="layoutxlm fast tokenizer does not support prepare_for_model") def test_compare_prepare_for_model(self): pass @slow def test_only_label_first_subword(self): words = ["hello", "niels"] boxes = [[1000, 1000, 1000, 1000] for _ in range(len(words))] word_labels = [0, 1] # test fast tokenizer tokenizer_r = LayoutXLMTokenizer.from_pretrained("microsoft/layoutxlm-base") encoding = tokenizer_r(words, boxes=boxes, word_labels=word_labels) self.assertListEqual(encoding.labels, [-100, 0, -100, 1, -100, -100]) tokenizer_r = LayoutXLMTokenizer.from_pretrained("microsoft/layoutxlm-base", only_label_first_subword=False) encoding = tokenizer_r(words, boxes=boxes, word_labels=word_labels) self.assertListEqual(encoding.labels, [-100, 0, 0, 1, 1, -100]) @slow def test_layoutxlm_integration_test(self): tokenizer_r = LayoutXLMTokenizer.from_pretrained("microsoft/layoutxlm-base") # There are 3 cases: # CASE 1: document image classification (training + inference), document image token classification (inference), # in which case only words and normalized bounding boxes are provided to the tokenizer # CASE 2: document image token classification (training), # in which case one also provides word labels to the tokenizer # CASE 3: document image visual question answering (inference), # in which case one also provides a question to the tokenizer # We need to test all 3 cases both on batched and non-batched inputs. # CASE 1: not batched words, boxes = self.get_words_and_boxes() expected_results = {'input_ids': [0, 10, 179459, 538, 3034, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'bbox': [[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], 'attention_mask': [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} # fmt: skip encoding_r = tokenizer_r(words, boxes=boxes, padding="max_length", max_length=20) self.assertDictEqual(dict(encoding_r), expected_results) # CASE 1: batched words, boxes = self.get_words_and_boxes_batch() expected_results = {'input_ids': [[0, 10, 179459, 538, 3034, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 33600, 31, 759, 9351, 83, 21895, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'bbox': [[[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [961, 885, 992, 912], [961, 885, 992, 912], [256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [34, 42, 66, 69], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: skip encoding_r = tokenizer_r(words, boxes=boxes, padding="max_length", max_length=20) self.assertDictEqual(dict(encoding_r), expected_results) # CASE 2: not batched words, boxes = self.get_words_and_boxes() word_labels = [1, 2, 3] expected_results = {'input_ids': [0, 10, 179459, 538, 3034, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'bbox': [[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], 'labels': [-100, 1, 2, -100, 3, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100], 'attention_mask': [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} # fmt: skip encoding_r = tokenizer_r(words, boxes=boxes, word_labels=word_labels, padding="max_length", max_length=20) self.assertDictEqual(dict(encoding_r), expected_results) # CASE 2: batched words, boxes = self.get_words_and_boxes_batch() word_labels = [[1, 2, 3], [2, 46, 17, 22, 3]] expected_results = {'input_ids': [[0, 10, 179459, 538, 3034, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 33600, 31, 759, 9351, 83, 21895, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'bbox': [[[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [961, 885, 992, 912], [961, 885, 992, 912], [256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [34, 42, 66, 69], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]], 'labels': [[-100, 1, 2, -100, 3, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100], [-100, 2, -100, 46, 17, 22, 3, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: skip encoding_r = tokenizer_r(words, boxes=boxes, word_labels=word_labels, padding="max_length", max_length=20) self.assertDictEqual(dict(encoding_r), expected_results) # CASE 3: not batched question, words, boxes = self.get_question_words_and_boxes() expected_results = {'input_ids': [0, 2367, 25, 7, 1919, 9351, 32, 2, 2, 10, 179459, 538, 3034, 2, 1, 1, 1, 1, 1, 1], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], 'bbox': [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1000, 1000, 1000, 1000], [1000, 1000, 1000, 1000], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]} # fmt: skip encoding_r = tokenizer_r(question, words, boxes, padding="max_length", max_length=20) self.assertDictEqual(dict(encoding_r), expected_results) # CASE 3: batched questions, words, boxes = self.get_question_words_and_boxes_batch() expected_results = {'input_ids': [[0, 2367, 25, 7, 1919, 9351, 32, 2, 2, 10, 179459, 538, 3034, 2, 1, 1, 1, 1, 1, 1], [0, 3642, 83, 764, 35839, 32, 2, 2, 2367, 10, 21, 3190, 53496, 19, 2, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0]], 'bbox': [[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1000, 1000, 1000, 1000], [1000, 1000, 1000, 1000], [423, 237, 440, 251], [427, 272, 441, 287], [427, 272, 441, 287], [419, 115, 437, 129], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1000, 1000, 1000, 1000], [1000, 1000, 1000, 1000], [256, 38, 330, 58], [256, 38, 330, 58], [336, 42, 353, 57], [336, 42, 353, 57], [34, 42, 66, 69], [34, 42, 66, 69], [1000, 1000, 1000, 1000], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]]} # fmt: skip encoding_r = tokenizer_r(questions, words, boxes, padding="max_length", max_length=20) self.assertDictEqual(dict(encoding_r), expected_results) @unittest.skip(reason="Doesn't support returning Numpy arrays") def test_np_encode_plus_sent_to_model(self): pass @unittest.skip(reason="Doesn't use SentencePiece") def test_sentencepiece_tokenize_and_convert_tokens_to_string(self): pass @unittest.skip(reason="Doesn't use SentencePiece") def test_sentencepiece_tokenize_and_decode(self): pass @unittest.skip(reason="Chat is not supported") def test_chat_template(self): pass @unittest.skip("Chat is not supported") def test_chat_template_return_assistant_tokens_mask(self): pass @unittest.skip("Chat is not supported") def test_chat_template_return_assistant_tokens_mask_truncated(self): pass def test_empty_input_string(self): tokenizer_return_type = [] output_tensor_type = [] if is_torch_available(): import numpy as np import torch tokenizer_return_type.append("pt") output_tensor_type.append(torch.int64) tokenizer_return_type.append("np") output_tensor_type.append(np.int64) if is_mlx_available(): import mlx.core as mx tokenizer_return_type.append("mlx") output_tensor_type.append(mx.int32) if len(tokenizer_return_type) == 0: self.skipTest(reason="No expected framework from PT or MLX found") tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): words, boxes = self.get_empty_words_and_boxes() for return_type, target_type in zip(tokenizer_return_type, output_tensor_type): output = tokenizer(words, boxes=boxes, return_tensors=return_type) self.assertEqual(output.input_ids.dtype, target_type) question, words, boxes = self.get_empty_question_words_and_boxes() for return_type, target_type in zip(tokenizer_return_type, output_tensor_type): output = tokenizer(words, boxes=boxes, return_tensors=return_type) self.assertEqual(output.input_ids.dtype, target_type) words, boxes = self.get_empty_words_and_boxes_batch() for return_type, target_type in zip(tokenizer_return_type, output_tensor_type): output = tokenizer(words, boxes=boxes, padding=True, return_tensors=return_type) self.assertEqual(output.input_ids.dtype, target_type) question, words, boxes = self.get_empty_question_words_and_boxes_batch() for return_type, target_type in zip(tokenizer_return_type, output_tensor_type): output = tokenizer(words, boxes=boxes, padding=True, return_tensors=return_type) self.assertEqual(output.input_ids.dtype, target_type)
LayoutXLMTokenizationTest
python
django__django
django/forms/utils.py
{ "start": 3155, "end": 3451 }
class ____(RenderableMixin): def as_json(self, escape_html=False): return json.dumps(self.get_json_data(escape_html)) def as_text(self): return self.render(self.template_name_text) def as_ul(self): return self.render(self.template_name_ul)
RenderableErrorMixin
python
tensorflow__tensorflow
tensorflow/python/kernel_tests/array_ops/array_ops_test.py
{ "start": 95988, "end": 97481 }
class ____(test_util.TensorFlowTestCase, parameterized.TestCase): def testStopGradient(self): x = array_ops.zeros(3) y = array_ops.stop_gradient(x) self.assertAllEqual(x, y) def testStopGradientRaggedTensor(self): x = RaggedTensor.from_row_splits(values=[1, 2, 3], row_splits=[0, 1, 1, 3]) y = array_ops.stop_gradient(x) self.assertAllEqual(x, y) def testStopGradientGradientTape(self): x = array_ops.zeros(3) with backprop.GradientTape() as tape: y = array_ops.stop_gradient(x) self.assertIsNone(tape.gradient(y, x)) def testStopGradientGradientTapeRaggedTensor(self): x = RaggedTensor.from_row_splits(values=[1, 2, 3], row_splits=[0, 1, 1, 3]) with backprop.GradientTape() as tape: y = array_ops.stop_gradient(x) self.assertIsNone(tape.gradient(y, x)) @parameterized.named_parameters([ ("TFFunction", def_function.function), ("PythonFunction", lambda f: f), ]) def test_stop_gradient_resource_variable(self, decorator): x = resource_variable_ops.ResourceVariable([1.0]) self.evaluate(x.initializer) @decorator def stop_gradient_f(x): return array_ops.stop_gradient(x) with backprop.GradientTape() as tape: y = stop_gradient_f(x) self.assertIsNone(tape.gradient(y, x)) # stop_gradient converts ResourceVariable to Tensor self.assertIsInstance(y, tensor_lib.Tensor) self.assertAllEqual(y, x) if __name__ == "__main__": test_lib.main()
StopGradientTest
python
getsentry__sentry
src/sentry/seer/fetch_issues/utils.py
{ "start": 1250, "end": 1383 }
class ____(RepoInfo): repo: Repository repo_configs: list[RepositoryProjectPathConfig] projects: list[Project]
RepoProjects
python
facelessuser__pymdown-extensions
pymdownx/arithmatex.py
{ "start": 11657, "end": 14746 }
class ____(Extension): """Adds delete extension to Markdown class.""" def __init__(self, *args, **kwargs): """Initialize.""" self.config = { 'tex_inline_wrap': [ ["\\(", "\\)"], "Wrap inline content with the provided text ['open', 'close'] - Default: ['', '']" ], 'tex_block_wrap': [ ["\\[", "\\]"], "Wrap blick content with the provided text ['open', 'close'] - Default: ['', '']" ], "smart_dollar": [True, "Use Arithmatex's smart dollars - Default True"], "block_syntax": [ ['dollar', 'square', 'begin'], 'Enable block syntax: "dollar" ($$...$$), "square" (\\[...\\]), and ' '"begin" (\\begin{env}...\\end{env}). - Default: ["dollar", "square", "begin"]' ], "inline_syntax": [ ['dollar', 'round'], 'Enable block syntax: "dollar" ($$...$$), "bracket" (\\(...\\)) ' ' - Default: ["dollar", "round"]' ], 'generic': [False, "Output in a generic format for non MathJax libraries - Default: False"], 'preview': [ True, "Insert a preview for scripts. - Default: False" ], 'block_tag': ['div', "Specify wrapper tag - Default 'div'"], 'inline_tag': ['span', "Specify wrapper tag - Default 'span'"] } super().__init__(*args, **kwargs) def extendMarkdown(self, md): """Extend the inline and block processor objects.""" md.registerExtension(self) util.escape_chars(md, ['$']) config = self.getConfigs() # Inline patterns allowed_inline = set(config.get('inline_syntax', ['dollar', 'round'])) smart_dollar = config.get('smart_dollar', True) inline_patterns = [] if 'dollar' in allowed_inline: inline_patterns.append(RE_SMART_DOLLAR_INLINE if smart_dollar else RE_DOLLAR_INLINE) if 'round' in allowed_inline: inline_patterns.append(RE_BRACKET_INLINE) if inline_patterns: inline = InlineArithmatexPattern('(?:%s)' % '|'.join(inline_patterns), config) md.inlinePatterns.register(inline, 'arithmatex-inline', 189.9) # Block patterns allowed_block = set(config.get('block_syntax', ['dollar', 'square', 'begin'])) block_pattern = [] if 'dollar' in allowed_block: block_pattern.append(RE_DOLLAR_BLOCK) if 'square' in allowed_block: block_pattern.append(RE_BRACKET_BLOCK) if 'begin' in allowed_block: block_pattern.append(RE_TEX_BLOCK) if block_pattern: block = BlockArithmatexProcessor(r'(?s)^(?:%s)[ ]*$' % '|'.join(block_pattern), config, md) md.parser.blockprocessors.register(block, "arithmatex-block", 79.9) def makeExtension(*args, **kwargs): """Return extension.""" return ArithmatexExtension(*args, **kwargs)
ArithmatexExtension
python
allegroai__clearml
clearml/backend_api/services/v2_13/tasks.py
{ "start": 174422, "end": 179220 }
class ____(Request): """ Delete tasks :param ids: Entities to move :type ids: Sequence[str] :param move_to_trash: Move task to trash instead of deleting it. For internal use only, tasks in the trash are not visible from the API and cannot be restored! :type move_to_trash: bool :param force: If not true, call fails if the task status is 'in_progress' :type force: bool :param return_file_urls: If set to 'true' then return the urls of the files that were uploaded by the tasks. Default value is 'false' :type return_file_urls: bool :param delete_output_models: If set to 'true' then delete output models of the tasks that are not referenced by other tasks. Default value is 'true' :type delete_output_models: bool """ _service = "tasks" _action = "delete_many" _version = "2.13" _schema = { "definitions": {}, "properties": { "delete_output_models": { "description": "If set to 'true' then delete output models of the tasks that are not referenced by other tasks. Default value is 'true'", "type": "boolean", }, "force": { "default": False, "description": "If not true, call fails if the task status is 'in_progress'", "type": "boolean", }, "ids": { "description": "Entities to move", "items": {"type": "string"}, "type": "array", }, "move_to_trash": { "default": False, "description": "Move task to trash instead of deleting it. For internal use only, tasks in the trash are not visible from the API and cannot be restored!", "type": "boolean", }, "return_file_urls": { "description": "If set to 'true' then return the urls of the files that were uploaded by the tasks. Default value is 'false'", "type": "boolean", }, }, "required": ["ids"], "type": "object", } def __init__( self, ids: List[str], move_to_trash: Optional[bool] = False, force: Optional[bool] = False, return_file_urls: Optional[bool] = None, delete_output_models: Optional[bool] = None, **kwargs: Any ) -> None: super(DeleteManyRequest, self).__init__(**kwargs) self.ids = ids self.move_to_trash = move_to_trash self.force = force self.return_file_urls = return_file_urls self.delete_output_models = delete_output_models @schema_property("ids") def ids(self) -> List[str]: return self._property_ids @ids.setter def ids(self, value: List[str]) -> None: if value is None: self._property_ids = None return self.assert_isinstance(value, "ids", (list, tuple)) self.assert_isinstance(value, "ids", six.string_types, is_array=True) self._property_ids = value @schema_property("move_to_trash") def move_to_trash(self) -> Optional[bool]: return self._property_move_to_trash @move_to_trash.setter def move_to_trash(self, value: Optional[bool]) -> None: if value is None: self._property_move_to_trash = None return self.assert_isinstance(value, "move_to_trash", (bool,)) self._property_move_to_trash = value @schema_property("force") def force(self) -> Optional[bool]: return self._property_force @force.setter def force(self, value: Optional[bool]) -> None: if value is None: self._property_force = None return self.assert_isinstance(value, "force", (bool,)) self._property_force = value @schema_property("return_file_urls") def return_file_urls(self) -> Optional[bool]: return self._property_return_file_urls @return_file_urls.setter def return_file_urls(self, value: Optional[bool]) -> None: if value is None: self._property_return_file_urls = None return self.assert_isinstance(value, "return_file_urls", (bool,)) self._property_return_file_urls = value @schema_property("delete_output_models") def delete_output_models(self) -> Optional[bool]: return self._property_delete_output_models @delete_output_models.setter def delete_output_models(self, value: Optional[bool]) -> None: if value is None: self._property_delete_output_models = None return self.assert_isinstance(value, "delete_output_models", (bool,)) self._property_delete_output_models = value
DeleteManyRequest
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/typeParams5.py
{ "start": 619, "end": 725 }
class ____[**P: int]: ... # This should generate an error because the expression isn't # a valid type.
ClassG
python
ansible__ansible
test/lib/ansible_test/_internal/util.py
{ "start": 19839, "end": 20347 }
class ____(WrappedThread): """Thread to write data to stdin of a subprocess.""" def __init__(self, handle: t.IO[bytes], data: bytes, name: str) -> None: super().__init__(self._run, f'{self.__class__.__name__}: {name}') self.handle = handle self.data = data def _run(self) -> None: """Workload to run on a thread.""" try: self.handle.write(self.data) self.handle.flush() finally: self.handle.close()
WriterThread
python
readthedocs__readthedocs.org
readthedocs/rtd_tests/tests/test_resolver.py
{ "start": 26489, "end": 26627 }
class ____(ResolverAltSetUp, SmartResolverPathTests): pass @override_settings(PUBLIC_DOMAIN="readthedocs.org")
SmartResolverPathTestsAlt
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/callbackProtocol6.py
{ "start": 880, "end": 1191 }
class ____(Protocol): def __call__(self, *, path: str) -> str: ... def func3_1(*, path: str = "") -> str: ... def func3_2(*, path: str) -> str: ... val3_1: Callback3 = func3_1 # This should generate an error. val3_2: Callback3 = func3_2 val4_1: Callback4 = func3_1 val4_2: Callback4 = func3_2
Callback4
python
Netflix__metaflow
metaflow/plugins/aws/step_functions/step_functions.py
{ "start": 47506, "end": 48314 }
class ____(object): def __init__(self, name): self.name = name tree = lambda: defaultdict(tree) self.payload = tree() def mode(self, mode): self.payload["ProcessorConfig"] = {"Mode": mode} if mode == "DISTRIBUTED": self.payload["ProcessorConfig"]["ExecutionType"] = "STANDARD" return self def start_at(self, start_at): self.payload["StartAt"] = start_at return self def add_state(self, state): self.payload["States"][state.name] = state.payload return self def timeout_seconds(self, timeout_seconds): self.payload["TimeoutSeconds"] = timeout_seconds return self def to_json(self, pretty=False): return json.dumps(self.payload, indent=4 if pretty else None)
Workflow
python
dagster-io__dagster
python_modules/libraries/dagster-dbt/dagster_dbt/errors.py
{ "start": 83, "end": 183 }
class ____(Failure, ABC): """The base exception of the ``dagster-dbt`` library."""
DagsterDbtError
python
readthedocs__readthedocs.org
readthedocs/organizations/views/private.py
{ "start": 2775, "end": 3332 }
class ____(FilterContextMixin, PrivateViewMixin, OrganizationView, ListView): template_name = "organizations/organization_list.html" admin_only = False filterset_class = OrganizationListFilterSet def get_queryset(self): return Organization.objects.for_user(user=self.request.user) def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context["filter"] = self.get_filterset() context["organization_list"] = self.get_filtered_queryset() return context
ListOrganization
python
PrefectHQ__prefect
src/integrations/prefect-dbt/tests/core/test_runner.py
{ "start": 5865, "end": 7498 }
class ____: """Test manifest loading functionality.""" def test_manifest_loading_success(self, tmp_path: Path): """Test successful manifest loading from file.""" # Create a mock manifest file manifest_data = {"nodes": {}, "metadata": {"adapter_type": "snowflake"}} manifest_path = tmp_path / "target" / "manifest.json" manifest_path.parent.mkdir(parents=True) with open(manifest_path, "w") as f: json.dump(manifest_data, f) runner = PrefectDbtRunner() runner._project_dir = tmp_path runner._target_path = Path("target") with patch("prefect_dbt.core.runner.Manifest.from_dict") as mock_from_dict: mock_manifest = Mock(spec=Manifest) mock_from_dict.return_value = mock_manifest result = runner.manifest assert result == mock_manifest mock_from_dict.assert_called_once_with(manifest_data) def test_manifest_loading_file_not_found(self, tmp_path: Path): """Test that missing manifest file raises appropriate error.""" runner = PrefectDbtRunner() runner._project_dir = tmp_path runner._target_path = Path("target") with pytest.raises(ValueError, match="Manifest file not found"): _ = runner.manifest def test_manifest_loading_with_preloaded_manifest(self, mock_manifest): """Test that preloaded manifest is used without file access.""" runner = PrefectDbtRunner(manifest=mock_manifest) result = runner.manifest assert result == mock_manifest
TestPrefectDbtRunnerManifestLoading
python
pikepdf__pikepdf
tests/test_object.py
{ "start": 24166, "end": 25444 }
class ____: def test_cyclic_toc(self, cyclic_toc): assert cyclic_toc.get_object(5,0) != cyclic_toc.get_object(9,0) assert cyclic_toc.get_object(5,0) == cyclic_toc.get_object(5,0) assert cyclic_toc.get_object(9,0) == cyclic_toc.get_object(9,0) def test_loop(self): pdf = pikepdf.new() d1 = pdf.make_indirect(Dictionary()) d2 = pdf.make_indirect(Dictionary()) d1['/x'] = d2 d2['/x'] = d1 assert d1 == d1 assert d1['/x'] == d2 assert d1 == d2 d2['/y'] = d1 assert d1 != d2 def test_loop3(self): pdf = pikepdf.new() a = pdf.make_indirect(Dictionary()) b = pdf.make_indirect(Dictionary()) c = pdf.make_indirect(Dictionary()) a['/b'] = b b['/c'] = c c['/a'] = a assert a == c['/a'] def test_crossed_tree(self): pdf = pikepdf.new() a = pdf.make_indirect(Dictionary()) a1 = pdf.make_indirect(Dictionary()) a2 = pdf.make_indirect(Dictionary()) a['/a1'] = a1 a['/a2'] = a2 a1['/parent'] = a a1['/sibling'] = a2 a2['/parent'] = a a2['/sibling'] = a1 assert a == a1['/parent'] assert a1 == a2
TestCyclicEquality
python
kamyu104__LeetCode-Solutions
Python/number-of-corner-rectangles.py
{ "start": 105, "end": 592 }
class ____(object): def countCornerRectangles(self, grid): """ :type grid: List[List[int]] :rtype: int """ rows = [[c for c, val in enumerate(row) if val] for row in grid] result = 0 for i in xrange(len(rows)): lookup = set(rows[i]) for j in xrange(i): count = sum(1 for c in rows[j] if c in lookup) result += count*(count-1)/2 return result
Solution
python
django__django
django/contrib/sites/models.py
{ "start": 659, "end": 2645 }
class ____(models.Manager): use_in_migrations = True def _get_site_by_id(self, site_id): if site_id not in SITE_CACHE: site = self.get(pk=site_id) SITE_CACHE[site_id] = site return SITE_CACHE[site_id] def _get_site_by_request(self, request): host = request.get_host() try: # First attempt to look up the site by host with or without port. if host not in SITE_CACHE: SITE_CACHE[host] = self.get(domain__iexact=host) return SITE_CACHE[host] except Site.DoesNotExist: # Fallback to looking up site after stripping port from the host. domain, port = split_domain_port(host) if domain not in SITE_CACHE: SITE_CACHE[domain] = self.get(domain__iexact=domain) return SITE_CACHE[domain] def get_current(self, request=None): """ Return the current Site based on the SITE_ID in the project's settings. If SITE_ID isn't defined, return the site with domain matching request.get_host(). The ``Site`` object is cached the first time it's retrieved from the database. """ from django.conf import settings if getattr(settings, "SITE_ID", ""): site_id = settings.SITE_ID return self._get_site_by_id(site_id) elif request: return self._get_site_by_request(request) raise ImproperlyConfigured( 'You\'re using the Django "sites framework" without having ' "set the SITE_ID setting. Create a site in your database and " "set the SITE_ID setting or pass a request to " "Site.objects.get_current() to fix this error." ) def clear_cache(self): """Clear the ``Site`` object cache.""" global SITE_CACHE SITE_CACHE = {} def get_by_natural_key(self, domain): return self.get(domain=domain)
SiteManager
python
xlwings__xlwings
tests/test_book.py
{ "start": 204, "end": 1697 }
class ____(TestBase): def test_indexing(self): self.assertEqual(self.app1.books[0], self.app1.books(1)) def test_len(self): count = self.app1.books.count self.app1.books.add() self.assertEqual(len(self.app1.books), count + 1) def test_count(self): self.assertEqual(len(self.app1.books), self.app1.books.count) def test_add(self): current_count = self.app1.books.count self.app1.books.add() self.assertEqual(len(self.app1.books), current_count + 1) def test_open(self): fullname = os.path.join(this_dir, "test book.xlsx") wb = self.app1.books.open(fullname) self.assertEqual(self.app1.books.active, wb) wb2 = self.app1.books.open(fullname) # Should not reopen self.assertEqual(wb, wb2) @unittest.skipIf(pathlib is None, "pathlib unavailable") def test_open_pathlib(self): fullname = pathlib.Path(this_dir) / "test book.xlsx" wb = self.app1.books.open(fullname) self.assertEqual(self.app1.books.active, wb) wb2 = self.app1.books.open(fullname) # Should not reopen self.assertEqual(wb, wb2) def test_open_bad_name(self): fullname = os.path.join(this_dir, "no book.xlsx") with self.assertRaises(FileNotFoundError): self.app1.books.open(fullname) def test_iter(self): for ix, wb in enumerate(self.app1.books): self.assertEqual(self.app1.books[ix], wb)
TestBooks
python
kamyu104__LeetCode-Solutions
Python/final-array-state-after-k-multiplication-operations-ii.py
{ "start": 1502, "end": 3102 }
class ____(object): def getFinalState(self, nums, k, multiplier): """ :type nums: List[int] :type k: int :type multiplier: int :rtype: List[int] """ MOD = 10**9+7 EPS = 1e-15 def binary_search_right(left, right, check): while left <= right: mid = left+(right-left)//2 if not check(mid): right = mid-1 else: left = mid+1 return right def count(x, target): return int(target-x+EPS) def check(target): result = 0 for x, i in vals: c = count(x, target) if c <= 0: break result += c return result <= k if multiplier == 1: return nums vals = sorted((log(x)/log(multiplier), i) for i, x in enumerate(nums)) target = binary_search_right(1, int(vals[-1][0])+1, check) for idx, (x, i) in enumerate(vals): c = count(x, target) if c <= 0: break k -= c nums[i] *= pow(multiplier, c) q, r = divmod(k, len(nums)) m = pow(multiplier, q, MOD) result = [0]*len(nums) for idx, (x, i) in enumerate(sorted((x, i) for i, x in enumerate(nums))): result[i] = x*m*(multiplier if idx < r else 1)%MOD return result # Time: O(min(nlogr, k) * logn + nlogn) = O(nlogn * logr) # Space: O(n) import heapq # heap, sort, fast exponentiation
Solution2
python
PyCQA__pyflakes
pyflakes/checker.py
{ "start": 7873, "end": 9187 }
class ____(Definition): """ A binding created by an import statement. @ivar fullName: The complete name given to the import statement, possibly including multiple dotted components. @type fullName: C{str} """ def __init__(self, name, source, full_name=None): self.fullName = full_name or name self.redefined = [] super().__init__(name, source) def redefines(self, other): if isinstance(other, SubmoduleImportation): # See note in SubmoduleImportation about RedefinedWhileUnused return self.fullName == other.fullName return isinstance(other, Definition) and self.name == other.name def _has_alias(self): """Return whether importation needs an as clause.""" return not self.fullName.split('.')[-1] == self.name @property def source_statement(self): """Generate a source statement equivalent to the import.""" if self._has_alias(): return f'import {self.fullName} as {self.name}' else: return 'import %s' % self.fullName def __str__(self): """Return import full name with alias.""" if self._has_alias(): return self.fullName + ' as ' + self.name else: return self.fullName
Importation
python
PrefectHQ__prefect
tests/_internal/pydantic/test_validated_func.py
{ "start": 1638, "end": 2758 }
class ____: """Test that types are validated correctly.""" def test_type_coercion(self): def add(a: int, b: int): return a + b vf = ValidatedFunction(add) # Pydantic should coerce string to int result = vf.validate_call_args(("5", "10"), {}) assert result == {"a": 5, "b": 10} def test_type_validation_error(self): def add(a: int, b: int): return a + b vf = ValidatedFunction(add) with pytest.raises(ValidationError) as exc_info: vf.validate_call_args(("not a number",), {"b": 10}) assert "a" in str(exc_info.value) def test_pydantic_model_validation(self): class Person(BaseModel): name: str age: int def process_person(person: Person): return person vf = ValidatedFunction(process_person) result = vf.validate_call_args(({"name": "Alice", "age": 30},), {}) assert isinstance(result["person"], Person) assert result["person"].name == "Alice" assert result["person"].age == 30
TestTypeValidation
python
django__django
tests/test_utils/tests.py
{ "start": 88282, "end": 88404 }
class ____(TestContextDecorator): def enable(self): pass def disable(self): pass
DoNothingDecorator
python
spack__spack
lib/spack/spack/vendor/macholib/SymbolTable.py
{ "start": 396, "end": 3151 }
class ____(object): def __init__(self, macho, header=None, openfile=None): if openfile is None: openfile = open if header is None: header = macho.headers[0] self.macho_header = header with openfile(macho.filename, "rb") as fh: self.symtab = header.getSymbolTableCommand() self.dysymtab = header.getDynamicSymbolTableCommand() if self.symtab is not None: self.nlists = self.readSymbolTable(fh) if self.dysymtab is not None: self.readDynamicSymbolTable(fh) def readSymbolTable(self, fh): cmd = self.symtab fh.seek(self.macho_header.offset + cmd.stroff) strtab = fh.read(cmd.strsize) fh.seek(self.macho_header.offset + cmd.symoff) nlists = [] if self.macho_header.MH_MAGIC in [MH_MAGIC_64, MH_CIGAM_64]: cls = nlist_64 else: cls = nlist for _i in range(cmd.nsyms): cmd = cls.from_fileobj(fh, _endian_=self.macho_header.endian) if cmd.n_un == 0: nlists.append((cmd, "")) else: nlists.append( ( cmd, strtab[cmd.n_un : strtab.find(b"\x00", cmd.n_un)], # noqa: E203 ) ) return nlists def readDynamicSymbolTable(self, fh): cmd = self.dysymtab nlists = self.nlists self.localsyms = nlists[ cmd.ilocalsym : cmd.ilocalsym + cmd.nlocalsym # noqa: E203 ] self.extdefsyms = nlists[ cmd.iextdefsym : cmd.iextdefsym + cmd.nextdefsym # noqa: E203 ] self.undefsyms = nlists[ cmd.iundefsym : cmd.iundefsym + cmd.nundefsym # noqa: E203 ] if cmd.tocoff == 0: self.toc = None else: self.toc = self.readtoc(fh, cmd.tocoff, cmd.ntoc) def readtoc(self, fh, off, n): fh.seek(self.macho_header.offset + off) return [dylib_table_of_contents.from_fileobj(fh) for i in range(n)] def readmodtab(self, fh, off, n): fh.seek(self.macho_header.offset + off) return [dylib_module.from_fileobj(fh) for i in range(n)] def readsym(self, fh, off, n): fh.seek(self.macho_header.offset + off) refs = [] for _i in range(n): ref = dylib_reference.from_fileobj(fh) isym, flags = divmod(ref.isym_flags, 256) refs.append((self.nlists[isym], flags)) return refs def readrel(self, fh, off, n): fh.seek(self.macho_header.offset + off) return [relocation_info.from_fileobj(fh) for i in range(n)]
SymbolTable
python
scipy__scipy
benchmarks/benchmarks/signal_filtering.py
{ "start": 3053, "end": 3495 }
class ____(Benchmark): param_names = ['whole', 'nyquist', 'worN'] params = [ [False, True], [False, True], [64, 65, 128, 129, 256, 257, 258, 512, 513, 65536, 65537, 65538], ] def setup(self, whole, nyquist, worN): self.y = np.zeros(worN) self.y[worN//2] = 1.0 def time_freqz(self, whole, nyquist, worN): freqz(self.y, whole=whole, include_nyquist=nyquist, worN=worN)
FreqzRfft
python
pyca__cryptography
src/cryptography/x509/extensions.py
{ "start": 2873, "end": 3190 }
class ____(metaclass=abc.ABCMeta): oid: typing.ClassVar[ObjectIdentifier] def public_bytes(self) -> bytes: """ Serializes the extension type to DER. """ raise NotImplementedError( f"public_bytes is not implemented for extension type {self!r}" )
ExtensionType
python
getsentry__sentry
tests/sentry/api/endpoints/test_organization_releases.py
{ "start": 34575, "end": 49591 }
class ____(APITestCase): endpoint = "sentry-api-0-organization-releases-stats" def setUp(self) -> None: self.project1 = self.create_project(teams=[self.team], organization=self.organization) self.project2 = self.create_project(teams=[self.team], organization=self.organization) self.project3 = self.create_project(teams=[self.team], organization=self.organization) self.login_as(user=self.user) def test_simple(self) -> None: release1 = Release.objects.create( organization_id=self.organization.id, version="1", date_added=datetime(2013, 8, 13, 3, 8, 24, 880386, tzinfo=UTC), ) release1.add_project(self.project1) release2 = Release.objects.create( organization_id=self.organization.id, version="2", date_added=datetime(2013, 8, 12, 3, 8, 24, 880386, tzinfo=UTC), date_released=datetime(2013, 8, 15, 3, 8, 24, 880386, tzinfo=UTC), ) release2.add_project(self.project2) release3 = Release.objects.create( organization_id=self.organization.id, version="3", date_added=datetime(2013, 8, 14, 3, 8, 24, 880386, tzinfo=UTC), ) release3.add_project(self.project3) url = reverse( "sentry-api-0-organization-releases-stats", kwargs={"organization_id_or_slug": self.organization.slug}, ) response = self.client.get(url, format="json") assert response.status_code == 200, response.content assert len(response.data) == 3 assert response.data[0]["version"] == release3.version assert response.data[0]["date"] == release3.date_added assert response.data[1]["version"] == release1.version assert response.data[1]["date"] == release1.date_added assert response.data[2]["version"] == release2.version assert response.data[2]["date"] == release2.date_added def test_release_list_order_by_date_added(self) -> None: """ Test that ensures that by relying on the default date sorting, releases will only be sorted according to `Release.date_added`, and `Release.date_released` should have no effect whatsoever on that order """ user = self.create_user(is_staff=False, is_superuser=False) org = self.organization org.flags.allow_joinleave = False org.save() team = self.create_team(organization=org) project = self.create_project(teams=[team], organization=org) self.create_member(teams=[team], user=user, organization=org) self.login_as(user=user) release6 = Release.objects.create( organization_id=org.id, version="6", date_added=datetime(2013, 8, 10, 3, 8, 24, 880386, tzinfo=UTC), date_released=datetime(2013, 8, 20, 3, 8, 24, 880386, tzinfo=UTC), ) release6.add_project(project) release7 = Release.objects.create( organization_id=org.id, version="7", date_added=datetime(2013, 8, 12, 3, 8, 24, 880386, tzinfo=UTC), date_released=datetime(2013, 8, 18, 3, 8, 24, 880386, tzinfo=UTC), ) release7.add_project(project) release8 = Release.objects.create( organization_id=org.id, version="8", date_added=datetime(2013, 8, 14, 3, 8, 24, 880386, tzinfo=UTC), date_released=datetime(2013, 8, 16, 3, 8, 24, 880386, tzinfo=UTC), ) release8.add_project(project) url = reverse( "sentry-api-0-organization-releases-stats", kwargs={"organization_id_or_slug": self.organization.slug}, ) response = self.client.get(url, format="json") assert response.status_code == 200, response.content assert len(response.data) == 3 assert response.data[0]["version"] == release8.version assert response.data[1]["version"] == release7.version assert response.data[2]["version"] == release6.version def test_with_adoption_stages(self) -> None: user = self.create_user(is_staff=False, is_superuser=False) org = self.organization org.save() team1 = self.create_team(organization=org) project1 = self.create_project(teams=[team1], organization=org) self.create_member(teams=[team1], user=user, organization=org) self.login_as(user=user) release1 = Release.objects.create( organization_id=org.id, version="1", date_added=datetime(2013, 8, 13, 3, 8, 24, 880386, tzinfo=UTC), ) release1.add_project(project1) url = reverse( "sentry-api-0-organization-releases", kwargs={"organization_id_or_slug": org.slug} ) response = self.client.get(f"{url}?adoptionStages=1", format="json") assert response.status_code == 200, response.content assert len(response.data) == 1 assert "adoptionStages" in response.data[0] def test_semver_filter(self) -> None: self.login_as(user=self.user) release_1 = self.create_release(version="test@1.2.4") release_2 = self.create_release(version="test@1.2.3") release_3 = self.create_release(version="test2@1.2.5") self.create_release(version="some.release") response = self.get_success_response(self.organization.slug, query=f"{SEMVER_ALIAS}:>1.2.3") assert [r["version"] for r in response.data] == [release_3.version, release_1.version] response = self.get_success_response( self.organization.slug, query=f"{SEMVER_ALIAS}:>=1.2.3" ) assert [r["version"] for r in response.data] == [ release_3.version, release_2.version, release_1.version, ] response = self.get_success_response(self.organization.slug, query=f"{SEMVER_ALIAS}:1.2.*") assert [r["version"] for r in response.data] == [ release_3.version, release_2.version, release_1.version, ] response = self.get_success_response(self.organization.slug, query=f"{SEMVER_ALIAS}:2.2.1") assert [r["version"] for r in response.data] == [] response = self.get_success_response( self.organization.slug, query=f"{SEMVER_PACKAGE_ALIAS}:test2" ) assert [r["version"] for r in response.data] == [release_3.version] response = self.get_success_response( self.organization.slug, query=f"{SEMVER_PACKAGE_ALIAS}:test" ) assert [r["version"] for r in response.data] == [release_2.version, release_1.version] def test_finalized_filter(self) -> None: self.login_as(user=self.user) release_1 = self.create_release( version="test@1.2.3", date_released=datetime(2013, 8, 13, 3, 8, 24, 880386, tzinfo=UTC) ) release_2 = self.create_release(version="test@1.2.4", date_released=None) release_3 = self.create_release(version="test2@1.2.5", date_released=None) release_4 = self.create_release(version="test2@1.2.6") url = reverse( "sentry-api-0-organization-releases", kwargs={"organization_id_or_slug": self.organization.slug}, ) response = self.client.get(url + f"?query={FINALIZED_KEY}:true", format="json") assert response.status_code == 200, response.content assert len(response.data) == 1 assert [r["version"] for r in response.data] == [release_1.version] response = self.client.get(url + f"?query={FINALIZED_KEY}:false", format="json") assert [r["version"] for r in response.data] == [ release_4.version, release_3.version, release_2.version, ] # if anything besides "true" or "false" is parsed, return all releases response = self.client.get(url + f"?query={FINALIZED_KEY}:wrong_value", format="json") assert [r["version"] for r in response.data] == [ release_4.version, release_3.version, release_2.version, release_1.version, ] def test_release_stage_filter(self) -> None: self.login_as(user=self.user) response = self.get_success_response( self.organization.slug, query=f"{RELEASE_STAGE_ALIAS}:adopted", environment=self.environment.name, ) assert [r["version"] for r in response.data] == [] replaced_release = self.create_release(version="replaced_release") adopted_release = self.create_release(version="adopted_release") not_adopted_release = self.create_release(version="not_adopted_release") ReleaseProjectEnvironment.objects.create( project_id=self.project.id, release_id=adopted_release.id, environment_id=self.environment.id, adopted=timezone.now(), ) ReleaseProjectEnvironment.objects.create( project_id=self.project.id, release_id=replaced_release.id, environment_id=self.environment.id, adopted=timezone.now(), unadopted=timezone.now(), ) ReleaseProjectEnvironment.objects.create( project_id=self.project.id, release_id=not_adopted_release.id, environment_id=self.environment.id, ) response = self.get_success_response( self.organization.slug, query=f"{RELEASE_STAGE_ALIAS}:{ReleaseStages.ADOPTED.value}", environment=self.environment.name, ) assert [r["version"] for r in response.data] == [adopted_release.version] response = self.get_success_response( self.organization.slug, query=f"{RELEASE_STAGE_ALIAS}:{ReleaseStages.LOW_ADOPTION.value}", environment=self.environment.name, ) assert [r["version"] for r in response.data] == [not_adopted_release.version] response = self.get_success_response( self.organization.slug, query=f"{RELEASE_STAGE_ALIAS}:{ReleaseStages.REPLACED.value}", environment=self.environment.name, ) assert [r["version"] for r in response.data] == [replaced_release.version] response = self.get_success_response( self.organization.slug, query=f"{RELEASE_STAGE_ALIAS}:[{ReleaseStages.ADOPTED.value},{ReleaseStages.REPLACED.value}]", environment=self.environment.name, ) assert [r["version"] for r in response.data] == [ adopted_release.version, replaced_release.version, ] response = self.get_success_response( self.organization.slug, query=f"{RELEASE_STAGE_ALIAS}:[{ReleaseStages.LOW_ADOPTION.value}]", environment=self.environment.name, ) assert [r["version"] for r in response.data] == [not_adopted_release.version] response = self.get_response( self.organization.slug, query=f"{RELEASE_STAGE_ALIAS}:invalid_stage", environment=self.environment.name, ) assert response.status_code == 400 response = self.get_response( self.organization.slug, query=f"{RELEASE_STAGE_ALIAS}:{ReleaseStages.ADOPTED.value}", # No environment ) assert response.status_code == 400 def test_multi_project_release_gets_filtered(self) -> None: multi_project_release = self.create_release(version="multi_project_release") single_project_release = self.create_release(version="single_project_release") project2 = self.create_project(teams=[self.team], organization=self.organization) # One project not adopted ReleaseProjectEnvironment.objects.create( project_id=self.project.id, release_id=multi_project_release.id, environment_id=self.environment.id, ) # One project adopted ReleaseProjectEnvironment.objects.create( project_id=project2.id, release_id=multi_project_release.id, environment_id=self.environment.id, adopted=timezone.now(), ) ReleaseProjectEnvironment.objects.create( project_id=self.project.id, release_id=single_project_release.id, environment_id=self.environment.id, adopted=timezone.now(), ) # Filtering to self.environment.name and self.project with release.stage:adopted should NOT return multi_project_release. response = self.get_success_response( self.organization.slug, project=self.project.id, environment=self.environment.name, query=f"{RELEASE_STAGE_ALIAS}:adopted", ) assert [r["version"] for r in response.data] == [single_project_release.version] response = self.get_success_response( self.organization.slug, environment=self.environment.name, query=f"{RELEASE_STAGE_ALIAS}:adopted", ) assert [r["version"] for r in response.data] == [ single_project_release.version, multi_project_release.version, ] def test_query_filter(self) -> None: self.login_as(user=self.user) release = self.create_release( self.project, version="foobar", date_added=datetime(2013, 8, 13, 3, 8, 24, 880386, tzinfo=UTC), ) self.create_release( self.project, version="sdfsdfsdf", date_added=datetime(2013, 8, 13, 3, 8, 24, 880386, tzinfo=UTC), ) response = self.get_success_response(self.organization.slug, query="oob") assert [r["version"] for r in response.data] == [release.version] response = self.get_success_response(self.organization.slug, query="baz") assert [r["version"] for r in response.data] == [] response = self.get_success_response(self.organization.slug, query="release:*oob*") assert [r["version"] for r in response.data] == [release.version] response = self.get_success_response(self.organization.slug, query="release:foob*") assert [r["version"] for r in response.data] == [release.version] response = self.get_success_response(self.organization.slug, query="release:*bar") assert [r["version"] for r in response.data] == [release.version] response = self.get_success_response(self.organization.slug, query="release:foobar") assert [r["version"] for r in response.data] == [release.version] response = self.get_success_response(self.organization.slug, query="release:*baz*") assert [r["version"] for r in response.data] == []
OrganizationReleasesStatsTest
python
celery__celery
t/unit/tasks/test_canvas.py
{ "start": 2691, "end": 2877 }
class ____(chord): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.subtask_type = "chord_subclass" @Signature.register_type()
chord_subclass
python
django__django
django/contrib/admin/widgets.py
{ "start": 3839, "end": 4542 }
class ____(forms.ClearableFileInput): template_name = "admin/widgets/clearable_file_input.html" def url_params_from_lookup_dict(lookups): """ Convert the type of lookups specified in a ForeignKey limit_choices_to attribute to a dictionary of query parameters """ params = {} if lookups and hasattr(lookups, "items"): for k, v in lookups.items(): if callable(v): v = v() if isinstance(v, (tuple, list)): v = ",".join(str(x) for x in v) elif isinstance(v, bool): v = ("0", "1")[v] else: v = str(v) params[k] = v return params
AdminFileWidget
python
django-guardian__django-guardian
guardian/testapp/tests/test_shortcuts.py
{ "start": 2033, "end": 7390 }
class ____(ObjectPermissionTestCase): """ Tests permission assigning for user/group and object. """ def test_not_model(self): self.assertRaises( NotUserNorGroup, assign_perm, perm="change_object", user_or_group="Not a Model", obj=self.ctype ) def test_global_wrong_perm(self): self.assertRaises( ValueError, assign_perm, perm="change_site", # for global permissions must provide app_label user_or_group=self.user, ) def test_user_assign_perm(self): assign_perm("add_contenttype", self.user, self.ctype) assign_perm("change_contenttype", self.group, self.ctype) assign_perm(self.get_permission("delete_contenttype"), self.user, self.ctype) self.assertTrue(self.user.has_perm("add_contenttype", self.ctype)) self.assertTrue(self.user.has_perm("change_contenttype", self.ctype)) self.assertTrue(self.user.has_perm("delete_contenttype", self.ctype)) def test_group_assign_perm(self): assign_perm("add_contenttype", self.group, self.ctype) assign_perm("change_contenttype", self.group, self.ctype) assign_perm(self.get_permission("delete_contenttype"), self.group, self.ctype) check = ObjectPermissionChecker(self.group) self.assertTrue(check.has_perm("add_contenttype", self.ctype)) self.assertTrue(check.has_perm("change_contenttype", self.ctype)) self.assertTrue(check.has_perm("delete_contenttype", self.ctype)) def test_user_assign_perm_queryset(self): assign_perm("add_contenttype", self.user, self.ctype_qset) assign_perm("change_contenttype", self.group, self.ctype_qset) assign_perm(self.get_permission("delete_contenttype"), self.user, self.ctype_qset) for obj in self.ctype_qset: self.assertTrue(self.user.has_perm("add_contenttype", obj)) self.assertTrue(self.user.has_perm("change_contenttype", obj)) self.assertTrue(self.user.has_perm("delete_contenttype", obj)) def test_group_assign_perm_queryset(self): assign_perm("add_contenttype", self.group, self.ctype_qset) assign_perm("change_contenttype", self.group, self.ctype_qset) assign_perm(self.get_permission("delete_contenttype"), self.group, self.ctype_qset) check = ObjectPermissionChecker(self.group) for obj in self.ctype_qset: self.assertTrue(check.has_perm("add_contenttype", obj)) self.assertTrue(check.has_perm("change_contenttype", obj)) self.assertTrue(check.has_perm("delete_contenttype", obj)) def test_user_assign_perm_global(self): perm = assign_perm("contenttypes.change_contenttype", self.user) assign_perm(self.get_permission("delete_contenttype"), self.group) self.assertTrue(self.user.has_perm("contenttypes.change_contenttype")) self.assertTrue(self.user.has_perm("contenttypes.delete_contenttype")) self.assertTrue(isinstance(perm, Permission)) def test_group_assign_perm_global(self): perm = assign_perm("contenttypes.change_contenttype", self.group) self.assertTrue(self.user.has_perm("contenttypes.change_contenttype")) self.assertTrue(isinstance(perm, Permission)) def test_assign_perm_with_dots(self): Permission.objects.create( codename="contenttype.reorder", content_type=ContentType.objects.get_for_model(self.ctype) ) assign_perm("contenttypes.contenttype.reorder", self.user, self.ctype) self.assertTrue(self.user.has_perm("contenttypes.contenttype.reorder", self.ctype)) def test_deprecation_warning(self): with warnings.catch_warnings(record=True) as warns: warnings.simplefilter("always") assign("contenttypes.change_contenttype", self.group) self.assertEqual(len(warns), 1) self.assertTrue(isinstance(warns[0].message, DeprecationWarning)) def test_user_assign_perm_list(self): """ Test that one is able to assign permissions for a list of objects to a user """ assign_perm("add_contenttype", self.user, self.ctype_list) assign_perm("change_contenttype", self.group, self.ctype_list) assign_perm(self.get_permission("delete_contenttype"), self.user, self.ctype_list) for obj in self.ctype_list: self.assertTrue(self.user.has_perm("add_contenttype", obj)) self.assertTrue(self.user.has_perm("change_contenttype", obj)) self.assertTrue(self.user.has_perm("delete_contenttype", obj)) def test_group_assign_perm_list(self): """ Test that one is able to assign permissions for a list of objects to a group """ assign_perm("add_contenttype", self.group, self.ctype_list) assign_perm("change_contenttype", self.group, self.ctype_list) assign_perm(self.get_permission("delete_contenttype"), self.group, self.ctype_list) check = ObjectPermissionChecker(self.group) for obj in self.ctype_list: self.assertTrue(check.has_perm("add_contenttype", obj)) self.assertTrue(check.has_perm("change_contenttype", obj)) self.assertTrue(check.has_perm("delete_contenttype", obj))
AssignPermTest
python
huggingface__transformers
src/transformers/models/glm46v/modular_glm46v.py
{ "start": 5618, "end": 8530 }
class ____(Glm4vVideoProcessor): def sample_frames( self, metadata: VideoMetadata, fps: Optional[Union[int, float]] = None, **kwargs, ): if metadata is None or getattr(metadata, "fps", None) is None: raise ValueError( "Asked to sample frames per second but no video metadata was provided which is required when sampling in Glm46V. " "Please pass in `VideoMetadata` object or set `do_sample_frames=False`" ) total_frames = metadata.total_num_frames max_frame_idx = total_frames - 1 duration = metadata.duration or round(max_frame_idx / metadata.fps) + 1 DYNAMIC_FPS_THRES = {30: 3, 300: 1, 2400: 0.5} MAX_FRAME_COUNT_DYNAMIC = 640 MAX_DURATION = 2400 effective_duration = min(duration, MAX_DURATION) if effective_duration <= 30: target_fps = DYNAMIC_FPS_THRES[30] elif effective_duration <= 300: target_fps = DYNAMIC_FPS_THRES[300] else: target_fps = DYNAMIC_FPS_THRES[2400] extract_t = int(effective_duration * target_fps * self.temporal_patch_size) extract_t = min(extract_t, MAX_FRAME_COUNT_DYNAMIC) duration_per_frame = 1 / metadata.fps timestamps = [i * duration_per_frame for i in range(total_frames)] max_second = int(duration) if total_frames < extract_t: frame_indices = np.linspace(0, total_frames - 1, extract_t, dtype=int).tolist() else: frame_indices = [] current_second = 0 inv_fps = 1 / (self.temporal_patch_size * target_fps) for frame_index in range(total_frames): if timestamps[frame_index] >= current_second: current_second += inv_fps frame_indices.append(frame_index) if current_second >= max_second: break if len(frame_indices) < extract_t: if len(frame_indices) == 0: start, end = 0, max(total_frames - 1, 0) else: start, end = frame_indices[0], frame_indices[-1] frame_indices = np.linspace(start, end, extract_t, dtype=int).tolist() elif len(frame_indices) > extract_t: frame_indices = np.linspace(0, total_frames - 1, extract_t, dtype=int).tolist() seen, uniq = set(), [] for idx in frame_indices: if idx not in seen: seen.add(idx) uniq.append(idx) if len(uniq) & 1: uniq.append(uniq[-1]) return np.array(uniq) __all__ = [ "Glm46VConfig", "Glm46VModel", "Glm46VPreTrainedModel", "Glm46VForConditionalGeneration", "Glm46VProcessor", "Glm46VImageProcessor", "Glm46VImageProcessorFast", "Glm46VVideoProcessor", ]
Glm46VVideoProcessor
python
Pylons__pyramid
tests/test_traversal.py
{ "start": 117, "end": 1426 }
class ____(unittest.TestCase): def _callFUT(self, path): from pyramid.traversal import traversal_path return traversal_path(path) def test_utf8(self): la = b'La Pe\xc3\xb1a' encoded = quote(la) decoded = text_(la, 'utf-8') path = '/'.join([encoded, encoded]) result = self._callFUT(path) self.assertEqual(result, (decoded, decoded)) def test_utf16(self): from pyramid.exceptions import URLDecodeError la = text_(b'La Pe\xc3\xb1a', 'utf-8').encode('utf-16') encoded = quote(la) path = '/'.join([encoded, encoded]) self.assertRaises(URLDecodeError, self._callFUT, path) def test_unicode_highorder_chars(self): path = text_('/%E6%B5%81%E8%A1%8C%E8%B6%8B%E5%8A%BF') self.assertEqual( self._callFUT(path), (text_('\u6d41\u884c\u8d8b\u52bf', 'unicode_escape'),), ) def test_element_urllquoted(self): self.assertEqual( self._callFUT('/foo/space%20thing/bar'), (text_('foo'), text_('space thing'), text_('bar')), ) def test_unicode_undecodeable_to_ascii(self): path = text_(b'/La Pe\xc3\xb1a', 'utf-8') self.assertRaises(UnicodeEncodeError, self._callFUT, path)
TraversalPathTests
python
numpy__numpy
numpy/linalg/tests/test_linalg.py
{ "start": 58272, "end": 58337 }
class ____(_TestNorm, _TestNormSingleBase): pass
TestNormSingle
python
scipy__scipy
scipy/io/arff/_arffread.py
{ "start": 2574, "end": 4655 }
class ____(Attribute): type_name = 'nominal' def __init__(self, name, values): super().__init__(name) self.values = values self.range = values self.dtype = (np.bytes_, max(len(i) for i in values)) @staticmethod def _get_nom_val(atrv): """Given a string containing a nominal type, returns a tuple of the possible values. A nominal type is defined as something framed between braces ({}). Parameters ---------- atrv : str Nominal type definition Returns ------- poss_vals : tuple possible values Examples -------- >>> from scipy.io.arff._arffread import NominalAttribute >>> NominalAttribute._get_nom_val("{floup, bouga, fl, ratata}") ('floup', 'bouga', 'fl', 'ratata') """ m = r_nominal.match(atrv) if m: attrs, _ = split_data_line(m.group(1)) return tuple(attrs) else: raise ValueError("This does not look like a nominal string") @classmethod def parse_attribute(cls, name, attr_string): """ Parse the attribute line if it knows how. Returns the parsed attribute, or None. For nominal attributes, the attribute string would be like '{<attr_1>, <attr2>, <attr_3>}'. """ if attr_string[0] == '{': values = cls._get_nom_val(attr_string) return cls(name, values) else: return None def parse_data(self, data_str): """ Parse a value of this type. """ if data_str in self.values: return data_str elif data_str == '?': return data_str else: raise ValueError(f"{str(data_str)} value not in {str(self.values)}") def __str__(self): msg = self.name + ",{" for i in range(len(self.values)-1): msg += self.values[i] + "," msg += self.values[-1] msg += "}" return msg
NominalAttribute
python
tiangolo__fastapi
scripts/people.py
{ "start": 1494, "end": 1590 }
class ____(BaseModel): createdAt: datetime author: Union[Author, None] = None
CommentsNode
python
bokeh__bokeh
src/bokeh/colors/color.py
{ "start": 1625, "end": 5241 }
class ____(Serializable, metaclass=ABCMeta): ''' A base class for representing color objects. ''' def __repr__(self) -> str: return self.to_css() def to_serializable(self, serializer: Serializer) -> AnyRep: return self.to_css() @staticmethod def clamp(value: float, maximum: float | None = None) -> float: ''' Clamp numeric values to be non-negative, an optionally, less than a given maximum. Args: value (float) : A number to clamp. maximum (float, optional) : A max bound to to clamp to. If None, there is no upper bound, and values are only clamped to be non-negative. (default: None) Returns: float ''' value = max(value, 0) if maximum is not None: return min(value, maximum) else: return value @abstractmethod def copy(self) -> Self: ''' Copy this color. *Subclasses must implement this method.* ''' raise NotImplementedError def darken(self, amount: float) -> Self: ''' Darken (reduce the luminance) of this color. *Subclasses must implement this method.* Args: amount (float) : Amount to reduce the luminance by (clamped above zero) Returns: Color ''' return self.lighten(-amount) @classmethod @abstractmethod def from_hsl(cls, value: HSL) -> Self: ''' Create a new color by converting from an HSL color. *Subclasses must implement this method.* Args: value (HSL) : A color to convert from HSL Returns: Color ''' raise NotImplementedError @classmethod @abstractmethod def from_rgb(cls, value: RGB) -> Self: ''' Create a new color by converting from an RGB color. *Subclasses must implement this method.* Args: value (:class:`~bokeh.colors.RGB`) : A color to convert from RGB Returns: Color ''' raise NotImplementedError def lighten(self, amount: float) -> Self: ''' Lighten (increase the luminance) of this color. *Subclasses must implement this method.* Args: amount (float) : Amount to increase the luminance by (clamped above zero) Returns: Color ''' rgb = self.to_rgb() h, l, s = colorsys.rgb_to_hls(float(rgb.r)/255, float(rgb.g)/255, float(rgb.b)/255) new_l = self.clamp(l + amount, 1) r, g, b = colorsys.hls_to_rgb(h, new_l, s) rgb.r = round(r * 255) rgb.g = round(g * 255) rgb.b = round(b * 255) return self.from_rgb(rgb) @abstractmethod def to_css(self) -> str: ''' Return a CSS representation of this color. *Subclasses must implement this method.* Returns: str ''' raise NotImplementedError @abstractmethod def to_hsl(self) -> HSL: ''' Create a new HSL color by converting from this color. *Subclasses must implement this method.* Returns: HSL ''' raise NotImplementedError @abstractmethod def to_rgb(self) -> RGB: ''' Create a new HSL color by converting from this color. *Subclasses must implement this method.* Returns: :class:`~bokeh.colors.RGB` ''' raise NotImplementedError
Color
python
astropy__astropy
astropy/utils/exceptions.py
{ "start": 506, "end": 708 }
class ____(Warning): """ The base warning class from which all Astropy warnings should inherit. Any warning inheriting from this class is handled by the Astropy logger. """
AstropyWarning
python
encode__django-rest-framework
tests/test_generics.py
{ "start": 11843, "end": 13237 }
class ____(TestCase): """ Test cases for a RetrieveUpdateDestroyAPIView that does NOT use the queryset/model mechanism but instead overrides get_object() """ def setUp(self): """ Create 3 BasicModel instances. """ items = ['foo', 'bar', 'baz'] for item in items: BasicModel(text=item).save() self.objects = BasicModel.objects self.data = [ {'id': obj.id, 'text': obj.text} for obj in self.objects.all() ] class OverriddenGetObjectView(generics.RetrieveUpdateDestroyAPIView): """ Example detail view for override of get_object(). """ serializer_class = BasicSerializer def get_object(self): pk = int(self.kwargs['pk']) return get_object_or_404(BasicModel.objects.all(), id=pk) self.view = OverriddenGetObjectView.as_view() def test_overridden_get_object_view(self): """ GET requests to RetrieveUpdateDestroyAPIView should return a single object. """ request = factory.get('/1') with self.assertNumQueries(1): response = self.view(request, pk=1).render() assert response.status_code == status.HTTP_200_OK assert response.data == self.data[0] # Regression test for #285
TestOverriddenGetObject
python
milvus-io__pymilvus
tests/test_decorators.py
{ "start": 5518, "end": 6013 }
class ____(MilvusException): def __init__(self, code=ErrorCode.FORCE_DENY, message="force deny"): super(MilvusException, self).__init__(message) self._code = code self._message = message self._compatible_code = common_pb2.ForceDeny @property def code(self): return self._code @property def message(self): return self._message @property def compatible_code(self): return self._compatible_code
MockForceDenyError