text stringlengths 1 1.02k | class_index int64 0 10.8k | source stringlengths 85 188 |
|---|---|---|
codebook_size = codebook_size if codebook_size is not None else {"height": 112, "width": 112}
codebook_size = get_size_dict(codebook_size, param_name="codebook_size")
codebook_crop_size = codebook_crop_size if codebook_crop_size is not None else {"height": 112, "width": 112}
codebook_crop_size = get_size_dict(codebook_crop_size, param_name="codebook_crop_size")
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else FLAVA_IMAGE_MEAN
self.image_std = image_std if image_std is not None else FLAVA_IMAGE_STD | 3,137 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/image_processing_flava.py |
self.return_image_mask = return_image_mask
self.input_size_patches = input_size_patches
self.total_mask_patches = total_mask_patches
self.mask_group_min_patches = mask_group_min_patches
self.mask_group_max_patches = mask_group_max_patches
self.mask_group_min_aspect_ratio = mask_group_min_aspect_ratio
self.mask_group_max_aspect_ratio = mask_group_max_aspect_ratio | 3,137 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/image_processing_flava.py |
self.return_codebook_pixels = return_codebook_pixels
self.codebook_do_resize = codebook_do_resize
self.codebook_size = codebook_size
self.codebook_resample = codebook_resample
self.codebook_do_center_crop = codebook_do_center_crop
self.codebook_crop_size = codebook_crop_size
self.codebook_do_rescale = codebook_do_rescale
self.codebook_rescale_factor = codebook_rescale_factor
self.codebook_do_map_pixels = codebook_do_map_pixels
self.codebook_do_normalize = codebook_do_normalize
self.codebook_image_mean = codebook_image_mean
self.codebook_image_mean = codebook_image_mean if codebook_image_mean is not None else FLAVA_CODEBOOK_MEAN
self.codebook_image_std = codebook_image_std if codebook_image_std is not None else FLAVA_CODEBOOK_STD | 3,137 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/image_processing_flava.py |
@classmethod
def from_dict(cls, image_processor_dict: Dict[str, Any], **kwargs):
"""
Overrides the `from_dict` method from the base class to make sure parameters are updated if image processor is
created using from_dict and kwargs e.g. `FlavaImageProcessor.from_pretrained(checkpoint, codebook_size=600)`
"""
image_processor_dict = image_processor_dict.copy()
if "codebook_size" in kwargs:
image_processor_dict["codebook_size"] = kwargs.pop("codebook_size")
if "codebook_crop_size" in kwargs:
image_processor_dict["codebook_crop_size"] = kwargs.pop("codebook_crop_size")
return super().from_dict(image_processor_dict, **kwargs) | 3,137 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/image_processing_flava.py |
@lru_cache()
def masking_generator(
self,
input_size_patches,
total_mask_patches,
mask_group_min_patches,
mask_group_max_patches,
mask_group_min_aspect_ratio,
mask_group_max_aspect_ratio,
) -> FlavaMaskingGenerator:
return FlavaMaskingGenerator(
input_size=input_size_patches,
total_mask_patches=total_mask_patches,
mask_group_min_patches=mask_group_min_patches,
mask_group_max_patches=mask_group_max_patches,
mask_group_min_aspect_ratio=mask_group_min_aspect_ratio,
mask_group_max_aspect_ratio=mask_group_max_aspect_ratio,
) | 3,137 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/image_processing_flava.py |
# Copied from transformers.models.vit.image_processing_vit.ViTImageProcessor.resize with PILImageResampling.BILINEAR->PILImageResampling.BICUBIC
def resize(
self,
image: np.ndarray,
size: Dict[str, int],
resample: PILImageResampling = PILImageResampling.BICUBIC,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Resize an image to `(size["height"], size["width"])`. | 3,137 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/image_processing_flava.py |
Args:
image (`np.ndarray`):
Image to resize.
size (`Dict[str, int]`):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
`PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format. | 3,137 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/image_processing_flava.py |
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format. | 3,137 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/image_processing_flava.py |
Returns:
`np.ndarray`: The resized image.
"""
size = get_size_dict(size)
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}")
output_size = (size["height"], size["width"])
return resize(
image,
size=output_size,
resample=resample,
data_format=data_format,
input_data_format=input_data_format,
**kwargs,
)
def map_pixels(self, image: np.ndarray) -> np.ndarray:
return (1 - 2 * LOGIT_LAPLACE_EPS) * image + LOGIT_LAPLACE_EPS | 3,137 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/image_processing_flava.py |
def _preprocess_image(
self,
image: ImageInput,
do_resize: bool = None,
size: Dict[str, int] = None,
resample: PILImageResampling = None,
do_center_crop: bool = None,
crop_size: Dict[str, int] = None,
do_rescale: bool = None,
rescale_factor: float = None,
do_normalize: bool = None,
image_mean: Optional[Union[float, List[float]]] = None,
image_std: Optional[Union[float, List[float]]] = None,
do_map_pixels: bool = None,
data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
input_data_format: Optional[ChannelDimension] = None,
) -> np.ndarray:
"""Preprocesses a single image.""" | 3,137 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/image_processing_flava.py |
validate_preprocess_arguments(
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_center_crop=do_center_crop,
crop_size=crop_size,
do_resize=do_resize,
size=size,
resample=resample,
)
# All transformations expect numpy arrays.
image = to_numpy_array(image)
if do_rescale and is_scaled_image(image):
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(image) | 3,137 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/image_processing_flava.py |
if do_resize:
image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
if do_center_crop:
image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format)
if do_rescale:
image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
if do_normalize:
image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
if do_map_pixels:
image = self.map_pixels(image)
if data_format is not None:
image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
return image | 3,137 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/image_processing_flava.py |
@filter_out_non_signature_kwargs()
def preprocess(
self,
images: ImageInput,
do_resize: Optional[bool] = None,
size: Dict[str, int] = None,
resample: PILImageResampling = None,
do_center_crop: Optional[bool] = None,
crop_size: Optional[Dict[str, int]] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, List[float]]] = None,
image_std: Optional[Union[float, List[float]]] = None,
# Mask related params
return_image_mask: Optional[bool] = None,
input_size_patches: Optional[int] = None,
total_mask_patches: Optional[int] = None,
mask_group_min_patches: Optional[int] = None,
mask_group_max_patches: Optional[int] = None,
mask_group_min_aspect_ratio: Optional[float] = None,
mask_group_max_aspect_ratio: Optional[float] = None, | 3,137 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/image_processing_flava.py |
# Codebook related params
return_codebook_pixels: Optional[bool] = None,
codebook_do_resize: Optional[bool] = None,
codebook_size: Optional[Dict[str, int]] = None,
codebook_resample: Optional[int] = None,
codebook_do_center_crop: Optional[bool] = None,
codebook_crop_size: Optional[Dict[str, int]] = None,
codebook_do_rescale: Optional[bool] = None,
codebook_rescale_factor: Optional[float] = None,
codebook_do_map_pixels: Optional[bool] = None,
codebook_do_normalize: Optional[bool] = None,
codebook_image_mean: Optional[Iterable[float]] = None,
codebook_image_std: Optional[Iterable[float]] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: ChannelDimension = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> PIL.Image.Image:
"""
Preprocess an image or batch of images. | 3,137 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/image_processing_flava.py |
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`Dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
has an effect if `do_resize` is set to `True`.
do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
Whether to center crop the image.
crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`): | 3,137 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/image_processing_flava.py |
Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
Image mean.
image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation.
return_image_mask (`bool`, *optional*, defaults to `self.return_image_mask`):
Whether to return the image mask. | 3,137 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/image_processing_flava.py |
input_size_patches (`int`, *optional*, defaults to `self.input_size_patches`):
Size of the patches to extract from the image.
total_mask_patches (`int`, *optional*, defaults to `self.total_mask_patches`):
Total number of patches to extract from the image.
mask_group_min_patches (`int`, *optional*, defaults to `self.mask_group_min_patches`):
Minimum number of patches to extract from the image.
mask_group_max_patches (`int`, *optional*, defaults to `self.mask_group_max_patches`):
Maximum number of patches to extract from the image.
mask_group_min_aspect_ratio (`float`, *optional*, defaults to `self.mask_group_min_aspect_ratio`):
Minimum aspect ratio of the patches to extract from the image.
mask_group_max_aspect_ratio (`float`, *optional*, defaults to `self.mask_group_max_aspect_ratio`): | 3,137 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/image_processing_flava.py |
Maximum aspect ratio of the patches to extract from the image.
return_codebook_pixels (`bool`, *optional*, defaults to `self.return_codebook_pixels`):
Whether to return the codebook pixels.
codebook_do_resize (`bool`, *optional*, defaults to `self.codebook_do_resize`):
Whether to resize the codebook pixels.
codebook_size (`Dict[str, int]`, *optional*, defaults to `self.codebook_size`):
Size of the codebook pixels.
codebook_resample (`int`, *optional*, defaults to `self.codebook_resample`):
Resampling filter to use if resizing the codebook pixels. This can be one of the enum
`PILImageResampling`, Only has an effect if `codebook_do_resize` is set to `True`.
codebook_do_center_crop (`bool`, *optional*, defaults to `self.codebook_do_center_crop`):
Whether to center crop the codebook pixels. | 3,137 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/image_processing_flava.py |
codebook_crop_size (`Dict[str, int]`, *optional*, defaults to `self.codebook_crop_size`):
Size of the center crop of the codebook pixels. Only has an effect if `codebook_do_center_crop` is set
to `True`.
codebook_do_rescale (`bool`, *optional*, defaults to `self.codebook_do_rescale`):
Whether to rescale the codebook pixels values between [0 - 1].
codebook_rescale_factor (`float`, *optional*, defaults to `self.codebook_rescale_factor`):
Rescale factor to rescale the codebook pixels by if `codebook_do_rescale` is set to `True`.
codebook_do_map_pixels (`bool`, *optional*, defaults to `self.codebook_do_map_pixels`):
Whether to map the codebook pixels values.
codebook_do_normalize (`bool`, *optional*, defaults to `self.codebook_do_normalize`):
Whether to normalize the codebook pixels. | 3,137 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/image_processing_flava.py |
codebook_image_mean (`float` or `List[float]`, *optional*, defaults to `self.codebook_image_mean`):
Codebook pixels mean to normalize the codebook pixels by if `codebook_do_normalize` is set to `True`.
codebook_image_std (`float` or `List[float]`, *optional*, defaults to `self.codebook_image_std`):
Codebook pixels standard deviation to normalize the codebook pixels by if `codebook_do_normalize` is
set to `True`.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. | 3,137 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/image_processing_flava.py |
- `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `ChannelDimension.LAST`: image in (height, width, num_channels) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. | 3,137 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/image_processing_flava.py |
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
size = size if size is not None else self.size
size = get_size_dict(size)
resample = resample if resample is not None else self.resample
do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
crop_size = crop_size if crop_size is not None else self.crop_size
crop_size = get_size_dict(crop_size, param_name="crop_size")
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std | 3,137 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/image_processing_flava.py |
return_image_mask = return_image_mask if return_image_mask is not None else self.return_image_mask
input_size_patches = input_size_patches if input_size_patches is not None else self.input_size_patches
total_mask_patches = total_mask_patches if total_mask_patches is not None else self.total_mask_patches
mask_group_min_patches = (
mask_group_min_patches if mask_group_min_patches is not None else self.mask_group_min_patches
)
mask_group_max_patches = (
mask_group_max_patches if mask_group_max_patches is not None else self.mask_group_max_patches
)
mask_group_min_aspect_ratio = (
mask_group_min_aspect_ratio
if mask_group_min_aspect_ratio is not None
else self.mask_group_min_aspect_ratio
)
mask_group_max_aspect_ratio = (
mask_group_max_aspect_ratio
if mask_group_max_aspect_ratio is not None
else self.mask_group_max_aspect_ratio
) | 3,137 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/image_processing_flava.py |
return_codebook_pixels = (
return_codebook_pixels if return_codebook_pixels is not None else self.return_codebook_pixels
)
codebook_do_resize = codebook_do_resize if codebook_do_resize is not None else self.codebook_do_resize
codebook_size = codebook_size if codebook_size is not None else self.codebook_size
codebook_size = get_size_dict(codebook_size, param_name="codebook_size")
codebook_resample = codebook_resample if codebook_resample is not None else self.codebook_resample
codebook_do_rescale = codebook_do_rescale if codebook_do_rescale is not None else self.codebook_do_rescale
codebook_rescale_factor = (
codebook_rescale_factor if codebook_rescale_factor is not None else self.codebook_rescale_factor
)
codebook_do_center_crop = (
codebook_do_center_crop if codebook_do_center_crop is not None else self.codebook_do_center_crop
) | 3,137 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/image_processing_flava.py |
codebook_crop_size = codebook_crop_size if codebook_crop_size is not None else self.codebook_crop_size
codebook_crop_size = get_size_dict(codebook_crop_size, param_name="codebook_crop_size")
codebook_do_map_pixels = (
codebook_do_map_pixels if codebook_do_map_pixels is not None else self.codebook_do_map_pixels
)
codebook_do_normalize = (
codebook_do_normalize if codebook_do_normalize is not None else self.codebook_do_normalize
)
codebook_image_mean = codebook_image_mean if codebook_image_mean is not None else self.codebook_image_mean
codebook_image_std = codebook_image_std if codebook_image_std is not None else self.codebook_image_std | 3,137 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/image_processing_flava.py |
images = make_list_of_images(images)
if not valid_images(images):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray."
)
processed_images = [
self._preprocess_image(
image=img,
do_resize=do_resize,
size=size,
resample=resample,
do_center_crop=do_center_crop,
crop_size=crop_size,
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_map_pixels=False,
data_format=data_format,
input_data_format=input_data_format,
)
for img in images
]
data = {"pixel_values": processed_images} | 3,137 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/image_processing_flava.py |
if return_codebook_pixels:
codebook_images = [
self._preprocess_image(
image=img,
do_resize=codebook_do_resize,
size=codebook_size,
resample=codebook_resample,
do_center_crop=codebook_do_center_crop,
crop_size=codebook_crop_size,
do_rescale=codebook_do_rescale,
rescale_factor=codebook_rescale_factor,
do_normalize=codebook_do_normalize,
image_mean=codebook_image_mean,
image_std=codebook_image_std,
do_map_pixels=codebook_do_map_pixels,
data_format=data_format,
input_data_format=input_data_format,
)
for img in images
]
data["codebook_pixel_values"] = codebook_images | 3,137 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/image_processing_flava.py |
if return_image_mask:
mask_generator = self.masking_generator(
input_size_patches=input_size_patches,
total_mask_patches=total_mask_patches,
mask_group_min_patches=mask_group_min_patches,
mask_group_max_patches=mask_group_max_patches,
mask_group_min_aspect_ratio=mask_group_min_aspect_ratio,
mask_group_max_aspect_ratio=mask_group_max_aspect_ratio,
)
masks = [mask_generator() for _ in images]
data["bool_masked_pos"] = masks
return BatchFeature(data=data, tensor_type=return_tensors) | 3,137 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/image_processing_flava.py |
class FlavaProcessor(ProcessorMixin):
r"""
Constructs a FLAVA processor which wraps a FLAVA image processor and a FLAVA tokenizer into a single processor.
[`FlavaProcessor`] offers all the functionalities of [`FlavaImageProcessor`] and [`BertTokenizerFast`]. See the
[`~FlavaProcessor.__call__`] and [`~FlavaProcessor.decode`] for more information.
Args:
image_processor ([`FlavaImageProcessor`], *optional*): The image processor is a required input.
tokenizer ([`BertTokenizerFast`], *optional*): The tokenizer is a required input.
"""
attributes = ["image_processor", "tokenizer"]
image_processor_class = "FlavaImageProcessor"
tokenizer_class = ("BertTokenizer", "BertTokenizerFast") | 3,138 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/processing_flava.py |
def __init__(self, image_processor=None, tokenizer=None, **kwargs):
feature_extractor = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead.",
FutureWarning,
)
feature_extractor = kwargs.pop("feature_extractor")
image_processor = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(image_processor, tokenizer)
self.current_processor = self.image_processor | 3,138 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/processing_flava.py |
def __call__(
self,
images: Optional[ImageInput] = None,
text: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = False,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
return_image_mask: Optional[bool] = None,
return_codebook_pixels: Optional[bool] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
return_tensors: Optional[Union[str, TensorType]] = None,
**kwargs,
):
""" | 3,138 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/processing_flava.py |
This method uses [`FlavaImageProcessor.__call__`] method to prepare image(s) for the model, and
[`BertTokenizerFast.__call__`] to prepare text for the model. | 3,138 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/processing_flava.py |
Please refer to the docstring of the above two methods for more information.
"""
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none.") | 3,138 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/processing_flava.py |
if text is not None:
encoding = self.tokenizer(
text=text,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
return_tensors=return_tensors,
**kwargs,
)
if images is not None:
image_features = self.image_processor(
images,
return_image_mask=return_image_mask, | 3,138 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/processing_flava.py |
return_codebook_pixels=return_codebook_pixels,
return_tensors=return_tensors,
**kwargs,
) | 3,138 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/processing_flava.py |
if text is not None and images is not None:
encoding.update(image_features)
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors)
def batch_decode(self, *args, **kwargs):
"""
This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
refer to the docstring of this method for more information.
"""
return self.tokenizer.batch_decode(*args, **kwargs)
def decode(self, *args, **kwargs):
"""
This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
the docstring of this method for more information.
"""
return self.tokenizer.decode(*args, **kwargs) | 3,138 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/processing_flava.py |
@property
def model_input_names(self):
tokenizer_input_names = self.tokenizer.model_input_names
image_processor_input_names = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def feature_extractor_class(self):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.",
FutureWarning,
)
return self.image_processor_class
@property
def feature_extractor(self):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.",
FutureWarning,
)
return self.image_processor | 3,138 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/flava/processing_flava.py |
class AlbertConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`AlbertModel`] or a [`TFAlbertModel`]. It is used
to instantiate an ALBERT model according to the specified arguments, defining the model architecture. Instantiating
a configuration with the defaults will yield a similar configuration to that of the ALBERT
[albert/albert-xxlarge-v2](https://huggingface.co/albert/albert-xxlarge-v2) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information. | 3,139 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/configuration_albert.py |
Args:
vocab_size (`int`, *optional*, defaults to 30000):
Vocabulary size of the ALBERT model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`AlbertModel`] or [`TFAlbertModel`].
embedding_size (`int`, *optional*, defaults to 128):
Dimensionality of vocabulary embeddings.
hidden_size (`int`, *optional*, defaults to 4096):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_hidden_groups (`int`, *optional*, defaults to 1):
Number of groups for the hidden layers, parameters in the same group are shared.
num_attention_heads (`int`, *optional*, defaults to 64):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 16384): | 3,139 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/configuration_albert.py |
The dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
inner_group_num (`int`, *optional*, defaults to 1):
The number of inner repetition of attention and ffn.
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu_new"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large | 3,139 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/configuration_albert.py |
(e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the `token_type_ids` passed when calling [`AlbertModel`] or [`TFAlbertModel`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
classifier_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for attached classifiers.
position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to | 3,139 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/configuration_albert.py |
[Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
pad_token_id (`int`, *optional*, defaults to 0):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 2):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 3):
End of stream token id. | 3,139 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/configuration_albert.py |
Examples:
```python
>>> from transformers import AlbertConfig, AlbertModel
>>> # Initializing an ALBERT-xxlarge style configuration
>>> albert_xxlarge_configuration = AlbertConfig()
>>> # Initializing an ALBERT-base style configuration
>>> albert_base_configuration = AlbertConfig(
... hidden_size=768,
... num_attention_heads=12,
... intermediate_size=3072,
... )
>>> # Initializing a model (with random weights) from the ALBERT-base style configuration
>>> model = AlbertModel(albert_xxlarge_configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "albert" | 3,139 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/configuration_albert.py |
def __init__(
self,
vocab_size=30000,
embedding_size=128,
hidden_size=4096,
num_hidden_layers=12,
num_hidden_groups=1,
num_attention_heads=64,
intermediate_size=16384,
inner_group_num=1,
hidden_act="gelu_new",
hidden_dropout_prob=0,
attention_probs_dropout_prob=0,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
classifier_dropout_prob=0.1,
position_embedding_type="absolute",
pad_token_id=0,
bos_token_id=2,
eos_token_id=3,
**kwargs,
):
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) | 3,139 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/configuration_albert.py |
self.vocab_size = vocab_size
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_hidden_groups = num_hidden_groups
self.num_attention_heads = num_attention_heads
self.inner_group_num = inner_group_num
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.classifier_dropout_prob = classifier_dropout_prob
self.position_embedding_type = position_embedding_type | 3,139 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/configuration_albert.py |
class AlbertOnnxConfig(OnnxConfig):
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"}
else:
dynamic_axis = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
]
) | 3,140 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/configuration_albert.py |
class AlbertEmbeddings(nn.Module):
"""
Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config: AlbertConfig):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob) | 3,141 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py |
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer(
"position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
)
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
self.register_buffer(
"token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
) | 3,141 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py |
# Copied from transformers.models.bert.modeling_bert.BertEmbeddings.forward
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
past_key_values_length: int = 0,
) -> torch.Tensor:
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] | 3,141 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py |
# Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
# when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
# issue #5664
if token_type_ids is None:
if hasattr(self, "token_type_ids"):
buffered_token_type_ids = self.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids) | 3,141 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py |
embeddings = inputs_embeds + token_type_embeddings
if self.position_embedding_type == "absolute":
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings | 3,141 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py |
class AlbertAttention(nn.Module):
def __init__(self, config: AlbertConfig):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads}"
)
self.num_attention_heads = config.num_attention_heads
self.hidden_size = config.hidden_size
self.attention_head_size = config.hidden_size // config.num_attention_heads
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size) | 3,142 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py |
self.attention_dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.output_dropout = nn.Dropout(config.hidden_dropout_prob)
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.pruned_heads = set()
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) | 3,142 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py |
# Copied from transformers.models.bert.modeling_bert.BertSelfAttention.transpose_for_scores
def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(new_x_shape)
return x.permute(0, 2, 1, 3)
def prune_heads(self, heads: List[int]) -> None:
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.num_attention_heads, self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.query = prune_linear_layer(self.query, index)
self.key = prune_linear_layer(self.key, index)
self.value = prune_linear_layer(self.value, index)
self.dense = prune_linear_layer(self.dense, index, dim=1) | 3,142 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py |
# Update hyper params and store pruned heads
self.num_attention_heads = self.num_attention_heads - len(heads)
self.all_head_size = self.attention_head_size * self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
output_attentions: bool = False,
) -> Union[Tuple[torch.Tensor], Tuple[torch.Tensor, torch.Tensor]]:
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer) | 3,142 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py |
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask | 3,142 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py |
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
seq_length = hidden_states.size()[1]
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility | 3,142 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py |
if self.position_embedding_type == "relative_key":
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == "relative_key_query":
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1) | 3,142 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py |
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.attention_dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.transpose(2, 1).flatten(2)
projected_context_layer = self.dense(context_layer)
projected_context_layer_dropout = self.output_dropout(projected_context_layer)
layernormed_context_layer = self.LayerNorm(hidden_states + projected_context_layer_dropout)
return (layernormed_context_layer, attention_probs) if output_attentions else (layernormed_context_layer,) | 3,142 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py |
class AlbertSdpaAttention(AlbertAttention):
def __init__(self, config):
super().__init__(config)
self.dropout_prob = config.attention_probs_dropout_prob
self.require_contiguous_qkv = not is_torch_greater_or_equal_than_2_2 | 3,143 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py |
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
output_attentions: bool = False,
) -> Union[Tuple[torch.Tensor], Tuple[torch.Tensor, torch.Tensor]]:
if self.position_embedding_type != "absolute" or output_attentions or head_mask is not None:
logger.warning(
"AlbertSdpaAttention is used but `torch.nn.functional.scaled_dot_product_attention` does not support "
"non-absolute `position_embedding_type` or `output_attentions=True` or `head_mask`. Falling back to "
"the eager attention implementation, but specifying the eager implementation will be required from "
"Transformers version v5.0.0 onwards. This warning can be removed using the argument "
'`attn_implementation="eager"` when loading the model.'
) | 3,143 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py |
return super().forward(hidden_states, attention_mask, head_mask, output_attentions) | 3,143 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py |
batch_size, seq_len, _ = hidden_states.size()
query_layer = self.transpose_for_scores(self.query(hidden_states))
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
# SDPA with memory-efficient backend is broken in torch==2.1.2 when using non-contiguous inputs and a custom
# attn_mask, so we need to call `.contiguous()` here. This was fixed in torch==2.2.0.
# Reference: https://github.com/pytorch/pytorch/issues/112577
if self.require_contiguous_qkv and query_layer.device.type == "cuda" and attention_mask is not None:
query_layer = query_layer.contiguous()
key_layer = key_layer.contiguous()
value_layer = value_layer.contiguous() | 3,143 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py |
attention_output = torch.nn.functional.scaled_dot_product_attention(
query=query_layer,
key=key_layer,
value=value_layer,
attn_mask=attention_mask,
dropout_p=self.dropout_prob if self.training else 0.0,
is_causal=False,
)
attention_output = attention_output.transpose(1, 2)
attention_output = attention_output.reshape(batch_size, seq_len, self.all_head_size)
projected_context_layer = self.dense(attention_output)
projected_context_layer_dropout = self.output_dropout(projected_context_layer)
layernormed_context_layer = self.LayerNorm(hidden_states + projected_context_layer_dropout)
return (layernormed_context_layer,) | 3,143 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py |
class AlbertLayer(nn.Module):
def __init__(self, config: AlbertConfig):
super().__init__()
self.config = config
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.full_layer_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.attention = ALBERT_ATTENTION_CLASSES[config._attn_implementation](config)
self.ffn = nn.Linear(config.hidden_size, config.intermediate_size)
self.ffn_output = nn.Linear(config.intermediate_size, config.hidden_size)
self.activation = ACT2FN[config.hidden_act]
self.dropout = nn.Dropout(config.hidden_dropout_prob) | 3,144 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py |
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
output_attentions: bool = False,
output_hidden_states: bool = False,
) -> Tuple[torch.Tensor, torch.Tensor]:
attention_output = self.attention(hidden_states, attention_mask, head_mask, output_attentions)
ffn_output = apply_chunking_to_forward(
self.ff_chunk,
self.chunk_size_feed_forward,
self.seq_len_dim,
attention_output[0],
)
hidden_states = self.full_layer_layer_norm(ffn_output + attention_output[0])
return (hidden_states,) + attention_output[1:] # add attentions if we output them | 3,144 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py |
def ff_chunk(self, attention_output: torch.Tensor) -> torch.Tensor:
ffn_output = self.ffn(attention_output)
ffn_output = self.activation(ffn_output)
ffn_output = self.ffn_output(ffn_output)
return ffn_output | 3,144 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py |
class AlbertLayerGroup(nn.Module):
def __init__(self, config: AlbertConfig):
super().__init__()
self.albert_layers = nn.ModuleList([AlbertLayer(config) for _ in range(config.inner_group_num)])
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
output_attentions: bool = False,
output_hidden_states: bool = False,
) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]], ...]:
layer_hidden_states = ()
layer_attentions = ()
for layer_index, albert_layer in enumerate(self.albert_layers):
layer_output = albert_layer(hidden_states, attention_mask, head_mask[layer_index], output_attentions)
hidden_states = layer_output[0]
if output_attentions:
layer_attentions = layer_attentions + (layer_output[1],) | 3,145 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py |
if output_hidden_states:
layer_hidden_states = layer_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if output_hidden_states:
outputs = outputs + (layer_hidden_states,)
if output_attentions:
outputs = outputs + (layer_attentions,)
return outputs # last-layer hidden state, (layer hidden states), (layer attentions) | 3,145 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py |
class AlbertTransformer(nn.Module):
def __init__(self, config: AlbertConfig):
super().__init__()
self.config = config
self.embedding_hidden_mapping_in = nn.Linear(config.embedding_size, config.hidden_size)
self.albert_layer_groups = nn.ModuleList([AlbertLayerGroup(config) for _ in range(config.num_hidden_groups)])
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
) -> Union[BaseModelOutput, Tuple]:
hidden_states = self.embedding_hidden_mapping_in(hidden_states)
all_hidden_states = (hidden_states,) if output_hidden_states else None
all_attentions = () if output_attentions else None
head_mask = [None] * self.config.num_hidden_layers if head_mask is None else head_mask | 3,146 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py |
for i in range(self.config.num_hidden_layers):
# Number of layers in a hidden group
layers_per_group = int(self.config.num_hidden_layers / self.config.num_hidden_groups)
# Index of the hidden group
group_idx = int(i / (self.config.num_hidden_layers / self.config.num_hidden_groups))
layer_group_output = self.albert_layer_groups[group_idx](
hidden_states,
attention_mask,
head_mask[group_idx * layers_per_group : (group_idx + 1) * layers_per_group],
output_attentions,
output_hidden_states,
)
hidden_states = layer_group_output[0]
if output_attentions:
all_attentions = all_attentions + layer_group_output[-1]
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,) | 3,146 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py |
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
) | 3,146 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py |
class AlbertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = AlbertConfig
load_tf_weights = load_tf_weights_in_albert
base_model_prefix = "albert"
_supports_sdpa = True | 3,147 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py |
def _init_weights(self, module):
"""Initialize the weights."""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0) | 3,147 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py |
class AlbertForPreTrainingOutput(ModelOutput):
"""
Output type of [`AlbertForPreTraining`]. | 3,148 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py |
Args:
loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
Total loss as the sum of the masked language modeling loss and the next sequence prediction
(classification) loss.
prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
sop_logits (`torch.FloatTensor` of shape `(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
before SoftMax).
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of | 3,148 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py |
shape `(batch_size, sequence_length, hidden_size)`. | 3,148 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py |
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
prediction_logits: torch.FloatTensor = None
sop_logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None | 3,148 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py |
class AlbertModel(AlbertPreTrainedModel):
config_class = AlbertConfig
base_model_prefix = "albert"
def __init__(self, config: AlbertConfig, add_pooling_layer: bool = True):
super().__init__(config)
self.config = config
self.embeddings = AlbertEmbeddings(config)
self.encoder = AlbertTransformer(config)
if add_pooling_layer:
self.pooler = nn.Linear(config.hidden_size, config.hidden_size)
self.pooler_activation = nn.Tanh()
else:
self.pooler = None
self.pooler_activation = None
self.attn_implementation = config._attn_implementation
self.position_embedding_type = config.position_embedding_type
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self) -> nn.Embedding:
return self.embeddings.word_embeddings
def set_input_embeddings(self, value: nn.Embedding) -> None:
self.embeddings.word_embeddings = value | 3,149 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py |
def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None:
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} ALBERT has
a different architecture in that its layers are shared across groups, which then has inner groups. If an ALBERT
model has 12 hidden layers and 2 hidden groups, with two inner groups, there is a total of 4 different layers.
These layers are flattened: the indices [0,1] correspond to the two inner groups of the first hidden layer,
while [2,3] correspond to the two inner groups of the second hidden layer. | 3,149 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py |
Any layer with in index other than [0,1,2,3] will result in an error. See base class PreTrainedModel for more
information about head pruning
"""
for layer, heads in heads_to_prune.items():
group_idx = int(layer / self.config.inner_group_num)
inner_group_idx = int(layer - group_idx * self.config.inner_group_num)
self.encoder.albert_layer_groups[group_idx].albert_layers[inner_group_idx].attention.prune_heads(heads) | 3,149 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py |
@add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPooling,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[BaseModelOutputWithPooling, Tuple]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = ( | 3,149 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py |
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict | 3,149 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py |
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
batch_size, seq_length = input_shape
device = input_ids.device if input_ids is not None else inputs_embeds.device | 3,149 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py |
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
if hasattr(self.embeddings, "token_type_ids"):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
embedding_output = self.embeddings(
input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
)
use_sdpa_attention_mask = (
self.attn_implementation == "sdpa"
and self.position_embedding_type == "absolute"
and head_mask is None
and not output_attentions
) | 3,149 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py |
if use_sdpa_attention_mask:
extended_attention_mask = _prepare_4d_attention_mask_for_sdpa(
attention_mask, embedding_output.dtype, tgt_len=seq_length
)
else:
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * torch.finfo(self.dtype).min
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
encoder_outputs = self.encoder(
embedding_output,
extended_attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0] | 3,149 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py |
pooled_output = self.pooler_activation(self.pooler(sequence_output[:, 0])) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPooling(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
) | 3,149 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py |
class AlbertForPreTraining(AlbertPreTrainedModel):
_tied_weights_keys = ["predictions.decoder.bias", "predictions.decoder.weight"]
def __init__(self, config: AlbertConfig):
super().__init__(config)
self.albert = AlbertModel(config)
self.predictions = AlbertMLMHead(config)
self.sop_classifier = AlbertSOPHead(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self) -> nn.Linear:
return self.predictions.decoder
def set_output_embeddings(self, new_embeddings: nn.Linear) -> None:
self.predictions.decoder = new_embeddings
def get_input_embeddings(self) -> nn.Embedding:
return self.albert.embeddings.word_embeddings | 3,150 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py |
@add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=AlbertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
sentence_order_label: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[AlbertForPreTrainingOutput, Tuple]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): | 3,150 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py |
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
sentence_order_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see `input_ids` docstring) Indices should be in `[0, 1]`. `0` indicates original order (sequence A, then
sequence B), `1` indicates switched order (sequence B, then sequence A). | 3,150 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py |
Returns:
Example:
```python
>>> from transformers import AutoTokenizer, AlbertForPreTraining
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("albert/albert-base-v2")
>>> model = AlbertForPreTraining.from_pretrained("albert/albert-base-v2")
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0)
>>> # Batch size 1
>>> outputs = model(input_ids)
>>> prediction_logits = outputs.prediction_logits
>>> sop_logits = outputs.sop_logits
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict | 3,150 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py |
outputs = self.albert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output, pooled_output = outputs[:2]
prediction_scores = self.predictions(sequence_output)
sop_scores = self.sop_classifier(pooled_output)
total_loss = None
if labels is not None and sentence_order_label is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
sentence_order_loss = loss_fct(sop_scores.view(-1, 2), sentence_order_label.view(-1))
total_loss = masked_lm_loss + sentence_order_loss | 3,150 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py |
if not return_dict:
output = (prediction_scores, sop_scores) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return AlbertForPreTrainingOutput(
loss=total_loss,
prediction_logits=prediction_scores,
sop_logits=sop_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
) | 3,150 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py |
class AlbertMLMHead(nn.Module):
def __init__(self, config: AlbertConfig):
super().__init__()
self.LayerNorm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
self.dense = nn.Linear(config.hidden_size, config.embedding_size)
self.decoder = nn.Linear(config.embedding_size, config.vocab_size)
self.activation = ACT2FN[config.hidden_act]
self.decoder.bias = self.bias
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
hidden_states = self.decoder(hidden_states)
prediction_scores = hidden_states
return prediction_scores | 3,151 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py |
def _tie_weights(self) -> None:
# For accelerate compatibility and to not break backward compatibility
if self.decoder.bias.device.type == "meta":
self.decoder.bias = self.bias
else:
# To tie those two weights if they get disconnected (on TPU or when the bias is resized)
self.bias = self.decoder.bias | 3,151 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py |
class AlbertSOPHead(nn.Module):
def __init__(self, config: AlbertConfig):
super().__init__()
self.dropout = nn.Dropout(config.classifier_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, pooled_output: torch.Tensor) -> torch.Tensor:
dropout_pooled_output = self.dropout(pooled_output)
logits = self.classifier(dropout_pooled_output)
return logits | 3,152 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py |
class AlbertForMaskedLM(AlbertPreTrainedModel, GenerationMixin):
_tied_weights_keys = ["predictions.decoder.bias", "predictions.decoder.weight"]
def __init__(self, config):
super().__init__(config)
self.albert = AlbertModel(config, add_pooling_layer=False)
self.predictions = AlbertMLMHead(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self) -> nn.Linear:
return self.predictions.decoder
def set_output_embeddings(self, new_embeddings: nn.Linear) -> None:
self.predictions.decoder = new_embeddings
self.predictions.bias = new_embeddings.bias
def get_input_embeddings(self) -> nn.Embedding:
return self.albert.embeddings.word_embeddings | 3,153 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py |
@add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[MaskedLMOutput, Tuple]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., | 3,153 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py |
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` | 3,153 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py |
Returns:
Example:
```python
>>> import torch
>>> from transformers import AutoTokenizer, AlbertForMaskedLM
>>> tokenizer = AutoTokenizer.from_pretrained("albert/albert-base-v2")
>>> model = AlbertForMaskedLM.from_pretrained("albert/albert-base-v2")
>>> # add mask_token
>>> inputs = tokenizer("The capital of [MASK] is Paris.", return_tensors="pt")
>>> with torch.no_grad():
... logits = model(**inputs).logits
>>> # retrieve index of [MASK]
>>> mask_token_index = (inputs.input_ids == tokenizer.mask_token_id)[0].nonzero(as_tuple=True)[0]
>>> predicted_token_id = logits[0, mask_token_index].argmax(axis=-1)
>>> tokenizer.decode(predicted_token_id)
'france'
``` | 3,153 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/albert/modeling_albert.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.