text stringlengths 1 1.02k | class_index int64 0 10.8k | source stringlengths 85 188 |
|---|---|---|
class UdopCellEmbeddings(nn.Module):
def __init__(self, max_2d_position_embeddings=501, hidden_size=1024):
super(UdopCellEmbeddings, self).__init__()
self.max_2d_position_embeddings = max_2d_position_embeddings
self.x_position_embeddings = nn.Embedding(max_2d_position_embeddings, hidden_size)
self.y_position_embeddings = nn.Embedding(max_2d_position_embeddings, hidden_size)
def forward(self, bbox):
bbox = torch.clip(bbox, 0.0, 1.0)
bbox = (bbox * (self.max_2d_position_embeddings - 1)).long()
left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0])
upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1])
right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2])
lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3]) | 3,061 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
embeddings = (
left_position_embeddings
+ upper_position_embeddings
+ right_position_embeddings
+ lower_position_embeddings
)
return embeddings | 3,061 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
class RelativePositionBiasBase(nn.Module, ABC):
"""
Base class of relative biases. | 3,062 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
Args:
num_heads (`int`):
Number of attention heads in the model, it will create embeddings of size `num_heads`, which will be added to the scores of each token pair.
relative_attention_num_buckets (`int`, *optional*, defaults to 32):
Pair token metric (distance in the sequence, distance in pixels etc.) will be bucketed, parameter is defining number of such
buckets.
bidirectional (`bool`, *optional*, defaults to `True`):
Whether the distance should be bidirectional for a pair of tokens. If `False`, then distance(tok1, tok2) == distance(tok2, tok1).
scaling_factor (`int`, *optional*, defaults to 1):
Defining factor which will be used to scale relative distance.
max_distance (`int`, *optional*, defaults to 128):
All distances above this value will end up in the one/same bucket.
augmentation (`bool`, *optional*, defaults to `False`): | 3,062 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
Whether to multiply relative distances by a random scalar.
expand (`bool`, *optional*, defaults to `False`):
Whether to expand an existing pretrained model with subsequent additions of prefix_bucket.
""" | 3,062 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
def __init__(
self,
num_heads=None,
relative_attention_num_buckets=32,
bidirectional=True,
scaling_factor=1,
max_distance=128,
level="tokens",
augmentation=False,
prefix_bucket=False,
expand=False,
):
super(RelativePositionBiasBase, self).__init__()
self.prefix_bucket = prefix_bucket
self.augmentation = augmentation
self.level = level
self.max_distance = max_distance
self.scaling_factor = scaling_factor
self.bidirectional = bidirectional
self.num_heads = num_heads
self.expand = expand
self.relative_attention_num_buckets = relative_attention_num_buckets
extra_head = 2 if prefix_bucket and not self.expand else 0
self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets + extra_head, self.num_heads) | 3,062 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
@abstractmethod
def prepare_input(
self,
attention_mask: Optional[Tensor] = None,
bbox: Optional[Dict[str, Any]] = None,
) -> Tensor:
pass
def get_bucket(self, attention_mask: Optional[Tensor] = None, bbox: Optional[Dict[str, Any]] = None) -> Tensor:
relative_position = self.prepare_input(attention_mask, bbox)
rp_bucket: Tensor = get_relative_position_bucket(
relative_position,
bidirectional=self.bidirectional,
num_buckets=self.relative_attention_num_buckets,
max_distance=self.max_distance,
)
return rp_bucket
def get_relative_position(self, positions):
context_position = positions[:, :, None]
memory_position = positions[:, None, :]
relative_position = memory_position - context_position
if self.augmentation and self.training:
relative_position *= random.uniform(*AUGMENTATION_RANGE)
relative_position *= self.scaling_factor | 3,062 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
return relative_position.to(torch.long)
def forward(self, attention_mask: Optional[Tensor] = None, bbox: Optional[Dict[str, Any]] = None) -> Tensor:
# re-using pretrained model with subsequent addition of prefix_bucket
if self.expand and self.prefix_bucket:
new_bias = nn.Embedding(self.relative_attention_num_buckets + 2, self.num_heads)
new_bias.weight.data[: self.relative_attention_num_buckets] = self.relative_attention_bias.weight.data
new_bias.weight.data[self.relative_attention_num_buckets :] = 0.1
self.relative_attention_bias = new_bias
self.expand = False
rp_bucket = self.get_bucket(attention_mask, bbox) | 3,062 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
if self.prefix_bucket:
if rp_bucket.size(0) == 1 and attention_mask.size(0) > 1:
rp_bucket = rp_bucket.repeat(attention_mask.size(0), 1, 1)
# based on assumption that prefix bboxes are negative
is_prefix = bbox[:, :, 1] < 0
num_prefix = is_prefix.sum(-1)
for idx, num_prefix_row in enumerate(num_prefix.cpu().numpy()):
rp_bucket[idx, :num_prefix_row, num_prefix_row:] = self.relative_attention_num_buckets
rp_bucket[idx, num_prefix_row:, :num_prefix_row] = self.relative_attention_num_buckets + 1
values: Tensor = self.relative_attention_bias(rp_bucket)
if values.dim() != 4:
raise ValueError("Wrong dimension of values tensor")
values = values.permute([0, 3, 1, 2])
return values | 3,062 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
class RelativePositionBias1D(RelativePositionBiasBase):
def __init__(self, scaling_factor=1, max_distance=128, **kwargs):
"""
Reimplementation of T5 relative position bias. Distance between given tokens is their distance in the sequence.
Parameters are the same as in base class
"""
super().__init__(scaling_factor=scaling_factor, max_distance=max_distance, **kwargs)
def prepare_input(self, attention_mask: Optional[Tensor] = None, bbox: Optional[Dict[str, Any]] = None) -> Tensor:
if self.scaling_factor != 1:
raise ValueError("No need to scale 1d features")
relative_position = self.get_relative_position(
torch.arange(attention_mask.size(1), dtype=torch.long, device=attention_mask.device)[None, :]
)
return relative_position | 3,063 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
class RelativePositionBiasHorizontal(RelativePositionBiasBase):
def __init__(self, scaling_factor=100, max_distance=100, **kwargs):
"""
Represents in the bucket embeddings horizontal distance between two tokens. Parameters are the same as in base
class
"""
super().__init__(scaling_factor=scaling_factor, max_distance=max_distance, **kwargs)
def prepare_input(self, attention_mask: Optional[Tensor] = None, bbox: Optional[Dict[str, Any]] = None) -> Tensor:
if not self.scaling_factor > 1.0:
raise ValueError("Need to scale the values of bboxes, as there are in small (0,1) range")
if bbox is None:
raise ValueError("Bbox is required for horizontal relative position bias")
# get x positions of left point of bbox
horizontal_position: Tensor = bbox[:, :, [0, 2]].mean(dim=-1)
return self.get_relative_position(horizontal_position) | 3,064 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
class RelativePositionBiasVertical(RelativePositionBiasBase):
def __init__(self, scaling_factor=100, max_distance=100, **kwargs):
"""
Represents in the bucket embeddings vertical distance between two tokens. Parameters are the same as in base
class
"""
super().__init__(scaling_factor=scaling_factor, max_distance=max_distance, **kwargs)
def prepare_input(self, attention_mask: Optional[Tensor] = None, bbox: Optional[Dict[str, Any]] = None) -> Tensor:
if not self.scaling_factor > 1.0:
raise ValueError("Need to scale the values of bboxes, as there are in small (0,1) range")
if bbox is None:
raise ValueError("Bbox is required for vertical relative position bias")
# get y positions of middle of bbox
vertical_position: Tensor = bbox[:, :, [1, 3]].mean(dim=-1)
return self.get_relative_position(vertical_position) | 3,065 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
class RelativePositionBiasAggregated(nn.Module):
def __init__(self, modules: Sequence[RelativePositionBiasBase]):
"""
Class which sums up various computed biases.
Args:
modules (Sequence[RelativePositionBiasBase]):
List of relative bias modules.
"""
super().__init__()
self.biases = nn.ModuleList(modules)
def forward(
self, attention_mask: Optional[Tensor] = None, bbox: Optional[Dict[str, Any]] = None
) -> Union[float, Tensor]:
output = 0.0
for bias in self.biases: # type: ignore
output = bias(attention_mask, bbox) + output
return output | 3,066 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
class UdopStack(UdopPreTrainedModel):
"""
This class is based on `T5Stack`, but modified to take into account the image modality as well as 2D position
embeddings.
"""
def __init__(self, config, embed_tokens=None, embed_patches=None):
super().__init__(config)
self.embed_tokens = embed_tokens
self.embed_patches = embed_patches
self.is_decoder = config.is_decoder
self._max_length = config.max_length
self.num_layers = config.num_layers
self.block = nn.ModuleList(
[UdopBlock(config, has_relative_attention_bias=bool(i == 0), layer_idx=i) for i in range(self.num_layers)]
)
self.final_layer_norm = UdopLayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
if not self.is_decoder:
self.cell_2d_embedding = UdopCellEmbeddings(config.max_2d_position_embeddings, config.hidden_size) | 3,067 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
# get weights from encoder position bias
self.relative_bias = self._get_relative_bias(config)
def _tie_weights(self):
for bias in self.relative_bias.biases:
if isinstance(bias, RelativePositionBias1D):
self._tie_or_clone_weights(
bias.relative_attention_bias, self.block[0].layer[0].SelfAttention.relative_attention_bias
)
@staticmethod
def _get_relative_bias(config: UdopConfig) -> RelativePositionBiasAggregated:
relative_bias_list = create_relative_bias(config)
return RelativePositionBiasAggregated(relative_bias_list)
def get_input_embeddings(self):
return self.embed_tokens
def get_output_embeddings(self):
return self.embed_tokens
def set_input_embeddings(self, new_embeddings):
self.embed_tokens = new_embeddings | 3,067 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
def forward(
self,
input_ids=None,
attention_mask=None,
bbox=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
inputs_embeds=None,
pixel_values=None,
visual_bbox=None,
image_embeddings=None,
position_bias=None,
head_mask=None,
cross_attn_head_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
cache_position=None,
):
use_cache = use_cache if use_cache is not None else self.config.use_cache
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict | 3,067 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
# input embeddings processing | 3,067 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
if input_ids is not None and inputs_embeds is not None:
err_msg_prefix = "decoder_" if self.is_decoder else ""
raise ValueError(
f"You cannot specify both {err_msg_prefix}inputs and {err_msg_prefix}inputs_embeds at the same time"
)
elif input_ids is not None and torch.numel(input_ids) > 0:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is None and input_ids is not None and torch.numel(input_ids) == 0:
input_ids = torch.full((4, 1024), self.config.pad_token_id, device=input_ids.device, dtype=input_ids.dtype)
attention_mask = torch.zeros((4, 1024), device=input_ids.device, dtype=input_ids.dtype)
bbox = torch.zeros((4, 1024, 4), device=input_ids.device, dtype=input_ids.dtype)
input_shape = input_ids.size()
position_bias = torch.zeros_like(self.get_extended_attention_mask(attention_mask, input_shape)) | 3,067 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
# encoder_attention_mask = attention_mask
logger.warning("Empty batch")
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
err_msg_prefix = "decoder_" if self.is_decoder else ""
raise ValueError(f"You have to specify either {err_msg_prefix}inputs or {err_msg_prefix}inputs_embeds") | 3,067 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
if inputs_embeds is None:
if self.embed_tokens is None:
raise ValueError("You have to intialize the model with valid token embeddings")
inputs_embeds = self.embed_tokens(input_ids)
if pixel_values is not None:
image_embeddings = self.embed_patches(pixel_values)
if image_embeddings is not None:
# combine visual and OCR text embeddings
num_patches = self.config.image_size // self.config.patch_size
inputs_embeds, bbox, attention_mask = combine_image_text_embeddings(
image_embeddings,
inputs_embeds,
bbox,
visual_bbox,
attention_mask,
num_patches,
0,
self.config.image_size,
self.config.patch_size,
)
input_shape = inputs_embeds.size()[:-1] | 3,067 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
if not self.is_decoder and bbox is not None:
inputs_embeds += self.cell_2d_embedding(bbox)
batch_size, seq_length = input_shape
if use_cache is True:
assert self.is_decoder, "`use_cache` can only be set to `True` if {} is used as a decoder".format(self) | 3,067 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
# initialize past_key_values
return_legacy_cache = False
return_self_attention_cache = False
if self.is_decoder and (use_cache or past_key_values is not None):
if isinstance(past_key_values, Cache) and not isinstance(past_key_values, EncoderDecoderCache):
return_self_attention_cache = True
past_key_values = EncoderDecoderCache(past_key_values, DynamicCache())
elif not isinstance(past_key_values, EncoderDecoderCache):
return_legacy_cache = True
logger.warning_once(
"Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.48.0. "
"You should pass an instance of `EncoderDecoderCache` instead, e.g. "
"`past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`."
)
past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values) | 3,067 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
elif past_key_values is None:
past_key_values = EncoderDecoderCache(DynamicCache(), DynamicCache())
elif not self.is_decoder:
# do not pass cache object down the line for encoder stack
# it messes indexing later in decoder-stack because cache object is modified in-place
past_key_values = None | 3,067 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0
if cache_position is None:
cache_position = torch.arange(
past_key_values_length, past_key_values_length + seq_length, device=inputs_embeds.device
)
if attention_mask is None and not is_torchdynamo_compiling():
# required mask seq length can be calculated via length of past cache
mask_seq_length = past_key_values_length + seq_length
attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device) | 3,067 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
if self.config.is_decoder:
causal_mask = self._update_causal_mask(
attention_mask,
inputs_embeds,
cache_position,
past_key_values.self_attention_cache if past_key_values is not None else None,
output_attentions,
)
else:
causal_mask = attention_mask[:, None, None, :]
causal_mask = causal_mask.to(dtype=inputs_embeds.dtype)
causal_mask = (1.0 - causal_mask) * torch.finfo(inputs_embeds.dtype).min
if self.is_decoder and encoder_attention_mask is not None:
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None | 3,067 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
# Prepare head mask if needed
head_mask = self.get_head_mask(head_mask, self.num_layers)
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
all_cross_attentions = () if (output_attentions and self.is_decoder) else None
if self.is_decoder: # modified lines
position_bias = None
else:
position_bias = self.relative_bias(attention_mask=attention_mask, bbox=bbox)
position_bias = position_bias + causal_mask
encoder_decoder_position_bias = None
hidden_states = inputs_embeds
hidden_states = self.dropout(hidden_states)
for i, layer_module in enumerate(self.block):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,) | 3,067 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
layer_outputs = layer_module(
hidden_states,
attention_mask=causal_mask,
position_bias=position_bias,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
encoder_decoder_position_bias=encoder_decoder_position_bias,
layer_head_mask=head_mask[i],
past_key_value=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
cache_position=cache_position,
)
# layer_outputs is a tuple with:
# hidden-states, key-value-states, (self-attention weights), (self-attention position bias), (cross-attention weights), (cross-attention position bias)
if use_cache is False: # MP fixes
layer_outputs = layer_outputs[:1] + (None,) + layer_outputs[1:]
hidden_states, next_decoder_cache = layer_outputs[:2] | 3,067 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
# We share the position biases between the layers - the first layer store them
# layer_outputs = hidden-states, key-value-states (self-attention weights),
# (self-attention position bias), (cross-attention weights), (cross-attention position bias)
position_bias = layer_outputs[2]
if self.is_decoder and encoder_hidden_states is not None:
encoder_decoder_position_bias = layer_outputs[4 if output_attentions else 3]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[2],) # We keep only self-attention weights for now
if self.is_decoder:
all_cross_attentions = all_cross_attentions + (layer_outputs[5],)
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.dropout(hidden_states)
# Add last layer
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,) | 3,067 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
next_cache = next_decoder_cache if use_cache else None
if return_self_attention_cache:
next_cache = past_key_values.self_attention_cache
if return_legacy_cache:
next_cache = past_key_values.to_legacy_cache()
if not return_dict:
return tuple(
v
for v in [
hidden_states,
attention_mask,
next_cache,
all_hidden_states,
all_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithAttentionMask(
last_hidden_state=hidden_states,
attention_mask=attention_mask,
past_key_values=next_cache,
hidden_states=all_hidden_states,
attentions=all_attentions,
cross_attentions=all_cross_attentions,
) | 3,067 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
# Copied from transformers.models.llama.modeling_llama.LlamaModel._update_causal_mask
def _update_causal_mask(
self,
attention_mask: torch.Tensor,
input_tensor: torch.Tensor,
cache_position: torch.Tensor,
past_key_values: Cache,
output_attentions: bool,
):
if self.config._attn_implementation == "flash_attention_2":
if attention_mask is not None and (attention_mask == 0.0).any():
return attention_mask
return None
# For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
# order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
# to infer the attention mask.
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
using_static_cache = isinstance(past_key_values, StaticCache) | 3,067 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
# When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
if self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions:
if AttentionMaskConverter._ignore_causal_mask_sdpa(
attention_mask,
inputs_embeds=input_tensor,
past_key_values_length=past_seen_tokens,
is_training=self.training,
):
return None
dtype, device = input_tensor.dtype, input_tensor.device
sequence_length = input_tensor.shape[1]
if using_static_cache:
target_length = past_key_values.get_max_cache_shape()
else:
target_length = (
attention_mask.shape[-1]
if isinstance(attention_mask, torch.Tensor)
else past_seen_tokens + sequence_length + 1
) | 3,067 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
# In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
attention_mask,
sequence_length=sequence_length,
target_length=target_length,
dtype=dtype,
device=device,
cache_position=cache_position,
batch_size=input_tensor.shape[0],
) | 3,067 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
if (
self.config._attn_implementation == "sdpa"
and attention_mask is not None
and attention_mask.device.type == "cuda"
and not output_attentions
):
# Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
# using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
# Details: https://github.com/pytorch/pytorch/issues/110213
min_dtype = torch.finfo(dtype).min
causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
return causal_mask | 3,067 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
@staticmethod
# Copied from transformers.models.llama.modeling_llama.LlamaPreTrainedModel._prepare_4d_causal_attention_mask_with_cache_position
def _prepare_4d_causal_attention_mask_with_cache_position(
attention_mask: torch.Tensor,
sequence_length: int,
target_length: int,
dtype: torch.dtype,
device: torch.device,
cache_position: torch.Tensor,
batch_size: int,
**kwargs,
):
"""
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. | 3,067 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
Args:
attention_mask (`torch.Tensor`):
A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
`(batch_size, 1, query_length, key_value_length)`.
sequence_length (`int`):
The sequence length being processed.
target_length (`int`):
The target length: when generating with static cache, the mask should be as long as the static cache,
to account for the 0 padding, the part of the cache that is not filled yet.
dtype (`torch.dtype`):
The dtype to use for the 4D attention mask.
device (`torch.device`):
The device to plcae the 4D attention mask on.
cache_position (`torch.Tensor`):
Indices depicting the position of the input sequence tokens in the sequence.
batch_size (`torch.Tensor`):
Batch size.
""" | 3,067 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
if attention_mask is not None and attention_mask.dim() == 4:
# In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
causal_mask = attention_mask
else:
min_dtype = torch.finfo(dtype).min
causal_mask = torch.full(
(sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device
)
if sequence_length != 1:
causal_mask = torch.triu(causal_mask, diagonal=1)
causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
if attention_mask is not None:
causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
mask_length = attention_mask.shape[-1] | 3,067 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :]
padding_mask = padding_mask == 0
causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
padding_mask, min_dtype
) | 3,067 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
return causal_mask | 3,067 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
class UdopModel(UdopPreTrainedModel):
_tied_weights_keys = [
"encoder.embed_tokens.weight",
"decoder.embed_tokens.weight",
"encoder.embed_patches.proj.weight",
"encoder.embed_patches.proj.bias",
"encoder.relative_bias.biases.0.relative_attention_bias.weight",
"decoder.relative_bias.biases.0.relative_attention_bias.weight",
]
def __init__(self, config):
super(UdopModel, self).__init__(config)
# text and image embeddings
self.shared = nn.Embedding(config.vocab_size, config.d_model)
self.patch_embed = UdopPatchEmbeddings(config)
encoder_config = deepcopy(config)
encoder_config.is_decoder = False
encoder_config.use_cache = False
encoder_config.is_encoder_decoder = False
self.encoder = UdopStack(encoder_config, self.shared, self.patch_embed) | 3,068 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
decoder_config = deepcopy(config)
decoder_config.is_decoder = True
decoder_config.is_encoder_decoder = False
decoder_config.num_layers = config.num_decoder_layers
self.decoder = UdopStack(decoder_config, self.shared)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared = new_embeddings
self.encoder.set_input_embeddings(new_embeddings)
self.decoder.set_input_embeddings(new_embeddings)
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder | 3,068 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
@add_start_docstrings_to_model_forward(UDOP_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: Tensor = None,
attention_mask: Tensor = None,
bbox: Dict[str, Any] = None,
pixel_values: Optional[Tensor] = None,
visual_bbox: Dict[str, Any] = None,
decoder_input_ids: Optional[Tensor] = None,
decoder_attention_mask: Optional[Tensor] = None,
inputs_embeds: Optional[Tensor] = None,
encoder_outputs: Optional[Tensor] = None,
past_key_values: Optional[Tensor] = None,
head_mask: Optional[Tensor] = None,
decoder_inputs_embeds: Optional[Tensor] = None,
decoder_head_mask: Optional[Tensor] = None,
cross_attn_head_mask: Optional[Tensor] = None,
use_cache=True,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None, | 3,068 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
) -> Tuple[Tensor, ...]:
r"""
Returns: | 3,068 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
Example:
```python
>>> from transformers import AutoProcessor, AutoModel
>>> from datasets import load_dataset
>>> import torch
>>> # load model and processor
>>> # in this case, we already have performed OCR ourselves
>>> # so we initialize the processor with `apply_ocr=False`
>>> processor = AutoProcessor.from_pretrained("microsoft/udop-large", apply_ocr=False)
>>> model = AutoModel.from_pretrained("microsoft/udop-large")
>>> # load an example image, along with the words and coordinates
>>> # which were extracted using an OCR engine
>>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train", trust_remote_code=True)
>>> example = dataset[0]
>>> image = example["image"]
>>> words = example["tokens"]
>>> boxes = example["bboxes"]
>>> inputs = processor(image, words, boxes=boxes, return_tensors="pt") | 3,068 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
>>> decoder_input_ids = torch.tensor([[model.config.decoder_start_token_id]])
>>> # forward pass
>>> outputs = model(**inputs, decoder_input_ids=decoder_input_ids)
>>> last_hidden_states = outputs.last_hidden_state
>>> list(last_hidden_states.shape)
[1, 1, 1024]
```"""
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict | 3,068 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
# Encode if needed (training, first prediction pass)
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
bbox=bbox,
pixel_values=pixel_values,
visual_bbox=visual_bbox,
inputs_embeds=inputs_embeds,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = encoder_outputs[0]
encoder_attention_mask = encoder_outputs.attention_mask if return_dict else encoder_outputs[1] | 3,068 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
# Decode
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
inputs_embeds=decoder_inputs_embeds,
past_key_values=past_key_values,
encoder_hidden_states=hidden_states,
encoder_attention_mask=encoder_attention_mask,
head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
if not return_dict:
# we filter out the attention mask
decoder_outputs = tuple(value for idx, value in enumerate(decoder_outputs) if idx != 1)
encoder_outputs = tuple(value for idx, value in enumerate(encoder_outputs) if idx != 1)
return decoder_outputs + encoder_outputs | 3,068 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
return Seq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
) | 3,068 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
class UdopForConditionalGeneration(UdopPreTrainedModel, GenerationMixin):
_tied_weights_keys = [
"encoder.embed_tokens.weight",
"decoder.embed_tokens.weight",
"encoder.embed_patches.proj.weight",
"encoder.embed_patches.proj.bias",
"encoder.relative_bias.biases.0.relative_attention_bias.weight",
"decoder.relative_bias.biases.0.relative_attention_bias.weight",
"lm_head.weight",
]
def __init__(self, config):
super(UdopForConditionalGeneration, self).__init__(config)
# text and image embeddings
self.shared = nn.Embedding(config.vocab_size, config.d_model)
self.patch_embed = UdopPatchEmbeddings(config)
encoder_config = deepcopy(config)
encoder_config.is_decoder = False
encoder_config.use_cache = False
encoder_config.is_encoder_decoder = False
self.encoder = UdopStack(encoder_config, self.shared, self.patch_embed) | 3,069 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
decoder_config = deepcopy(config)
decoder_config.is_decoder = True
decoder_config.is_encoder_decoder = False
decoder_config.num_layers = config.num_decoder_layers
self.decoder = UdopStack(decoder_config, self.shared)
# The weights of the language modeling head are shared with those of the encoder and decoder
self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared = new_embeddings
self.encoder.set_input_embeddings(new_embeddings)
self.decoder.set_input_embeddings(new_embeddings)
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def get_output_embeddings(self):
return self.lm_head
def get_encoder(self):
return self.encoder | 3,069 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
def get_decoder(self):
return self.decoder | 3,069 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
@add_start_docstrings_to_model_forward(UDOP_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: Tensor = None,
attention_mask: Tensor = None,
bbox: Dict[str, Any] = None,
pixel_values: Optional[Tensor] = None,
visual_bbox: Dict[str, Any] = None,
decoder_input_ids: Optional[Tensor] = None,
decoder_attention_mask: Optional[Tensor] = None,
inputs_embeds: Optional[Tensor] = None,
encoder_outputs: Optional[Tensor] = None,
past_key_values: Optional[Tensor] = None,
head_mask: Optional[Tensor] = None,
decoder_inputs_embeds: Optional[Tensor] = None,
decoder_head_mask: Optional[Tensor] = None,
cross_attn_head_mask: Optional[Tensor] = None,
use_cache=True,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None, | 3,069 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
labels: Optional[Tensor] = None,
cache_position: Optional[torch.LongTensor] = None,
) -> Tuple[Tensor, ...]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size -
1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ...,
config.vocab_size]`. | 3,069 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
Returns:
Examples:
```python
>>> from transformers import AutoProcessor, UdopForConditionalGeneration
>>> from datasets import load_dataset
>>> # load model and processor
>>> # in this case, we already have performed OCR ourselves
>>> # so we initialize the processor with `apply_ocr=False`
>>> processor = AutoProcessor.from_pretrained("microsoft/udop-large", apply_ocr=False)
>>> model = UdopForConditionalGeneration.from_pretrained("microsoft/udop-large")
>>> # load an example image, along with the words and coordinates
>>> # which were extracted using an OCR engine
>>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train", trust_remote_code=True)
>>> example = dataset[0]
>>> image = example["image"]
>>> words = example["tokens"]
>>> boxes = example["bboxes"] | 3,069 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
>>> # one can use the various task prefixes (prompts) used during pre-training
>>> # e.g. the task prefix for DocVQA is "Question answering. "
>>> question = "Question answering. What is the date on the form?"
>>> encoding = processor(image, question, text_pair=words, boxes=boxes, return_tensors="pt")
>>> # autoregressive generation
>>> predicted_ids = model.generate(**encoding)
>>> print(processor.batch_decode(predicted_ids, skip_special_tokens=True)[0])
9/30/92
```"""
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if decoder_input_ids is None and labels is not None:
decoder_input_ids = self._shift_right(labels) | 3,069 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
# Encode if needed (training, first prediction pass)
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
bbox=bbox,
visual_bbox=visual_bbox,
pixel_values=pixel_values,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = encoder_outputs[0]
encoder_attention_mask = encoder_outputs.attention_mask if return_dict else encoder_outputs[1] | 3,069 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
# Decode
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
inputs_embeds=decoder_inputs_embeds,
past_key_values=past_key_values,
encoder_hidden_states=hidden_states,
encoder_attention_mask=encoder_attention_mask,
head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
sequence_output = decoder_outputs[0] | 3,069 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
if self.config.tie_word_embeddings:
# Rescale output before projecting on vocab
# See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586
sequence_output = sequence_output * (self.config.d_model**-0.5)
lm_logits = self.lm_head(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-100)
loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1))
if not return_dict:
output = (lm_logits,) + decoder_outputs[2:] + (encoder_outputs[0],) + encoder_outputs[2:]
return ((loss,) + output) if loss is not None else output | 3,069 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
return Seq2SeqLMOutput(
loss=loss,
logits=lm_logits,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
# Copied from transformers.models.t5.modeling_t5.T5ForConditionalGeneration._reorder_cache
def _reorder_cache(self, past_key_values, beam_idx):
# if decoder past is not included in output
# speedy decoding is disabled and no need to reorder
if past_key_values is None:
logger.warning("You might want to consider setting `use_cache=True` to speed up decoding")
return past_key_values | 3,069 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
reordered_decoder_past = ()
for layer_past_states in past_key_values:
# get the correct batch idx from layer past batch dim
# batch dim of `past` is at 2nd position
reordered_layer_past_states = ()
for layer_past_state in layer_past_states:
# need to set correct `past` for each of the four key / value states
reordered_layer_past_states = reordered_layer_past_states + (
layer_past_state.index_select(0, beam_idx.to(layer_past_state.device)),
) | 3,069 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
if reordered_layer_past_states[0].shape != layer_past_states[0].shape:
raise ValueError(
f"reordered_layer_past_states[0] shape {reordered_layer_past_states[0].shape} and layer_past_states[0] shape {layer_past_states[0].shape} mismatched"
)
if len(reordered_layer_past_states) != len(layer_past_states):
raise ValueError(
f"length of reordered_layer_past_states {len(reordered_layer_past_states)} and length of layer_past_states {len(layer_past_states)} mismatched"
)
reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,)
return reordered_decoder_past | 3,069 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
class UdopEncoderModel(UdopPreTrainedModel):
_tied_weights_keys = [
"encoder.embed_tokens.weight",
"encoder.embed_patches.proj.weight",
"encoder.embed_patches.proj.bias",
"encoder.relative_bias.biases.0.relative_attention_bias.weight",
]
def __init__(self, config: UdopConfig):
super().__init__(config)
# text and image embeddings
self.shared = nn.Embedding(config.vocab_size, config.d_model)
self.patch_embed = UdopPatchEmbeddings(config)
encoder_config = deepcopy(config)
encoder_config.is_decoder = False
encoder_config.use_cache = False
encoder_config.is_encoder_decoder = False
self.encoder = UdopStack(encoder_config, self.shared, self.patch_embed)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.shared | 3,070 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
def set_input_embeddings(self, new_embeddings):
self.shared = new_embeddings
self.encoder.set_input_embeddings(new_embeddings)
def get_encoder(self):
return self.encoder
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.block[layer].layer[0].SelfAttention.prune_heads(heads) | 3,070 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
@add_start_docstrings_to_model_forward(UDOP_ENCODER_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=BaseModelOutputWithAttentionMask, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: Tensor = None,
bbox: Dict[str, Any] = None,
attention_mask: Tensor = None,
pixel_values: Optional[Tensor] = None,
visual_bbox: Dict[str, Any] = None,
head_mask: Optional[Tensor] = None,
inputs_embeds: Optional[Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.FloatTensor], BaseModelOutputWithAttentionMask]:
r"""
Returns:
Example:
```python
>>> from transformers import AutoProcessor, UdopEncoderModel
>>> from huggingface_hub import hf_hub_download
>>> from datasets import load_dataset | 3,070 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
>>> # load model and processor
>>> # in this case, we already have performed OCR ourselves
>>> # so we initialize the processor with `apply_ocr=False`
>>> processor = AutoProcessor.from_pretrained("microsoft/udop-large", apply_ocr=False)
>>> model = UdopEncoderModel.from_pretrained("microsoft/udop-large")
>>> # load an example image, along with the words and coordinates
>>> # which were extracted using an OCR engine
>>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train", trust_remote_code=True)
>>> example = dataset[0]
>>> image = example["image"]
>>> words = example["tokens"]
>>> boxes = example["bboxes"]
>>> encoding = processor(image, words, boxes=boxes, return_tensors="pt") | 3,070 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
>>> outputs = model(**encoding)
>>> last_hidden_states = outputs.last_hidden_state
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
encoder_outputs = self.encoder(
input_ids=input_ids,
bbox=bbox,
visual_bbox=visual_bbox,
pixel_values=pixel_values,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
return encoder_outputs | 3,070 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/modeling_udop.py |
class UdopTokenizer(PreTrainedTokenizer):
"""
Adapted from [`LayoutXLMTokenizer`] and [`T5Tokenizer`]. Based on
[SentencePiece](https://github.com/google/sentencepiece).
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
Path to the vocabulary file.
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead. | 3,071 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/tokenization_udop.py |
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens. | 3,071 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/tokenization_udop.py |
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
sep_token_box (`List[int]`, *optional*, defaults to `[1000, 1000, 1000, 1000]`):
The bounding box to use for the special [SEP] token.
pad_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
The bounding box to use for the special [PAD] token.
pad_token_label (`int`, *optional*, defaults to -100):
The label to use for padding tokens. Defaults to -100, which is the `ignore_index` of PyTorch's
CrossEntropyLoss.
only_label_first_subword (`bool`, *optional*, defaults to `True`):
Whether or not to only label the first subword, in case word labels are provided.
additional_special_tokens (`List[str]`, *optional*, defaults to `["<s>NOTUSED", "</s>NOTUSED"]`):
Additional special tokens used by the tokenizer. | 3,071 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/tokenization_udop.py |
sp_model_kwargs (`dict`, *optional*):
Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
to set:
- `enable_sampling`: Enable subword regularization.
- `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
- `nbest_size = {0,1}`: No sampling is performed.
- `nbest_size > 1`: samples from the nbest_size results.
- `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
using forward-filtering-and-backward-sampling algorithm. | 3,071 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/tokenization_udop.py |
- `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
BPE-dropout.
legacy (`bool`, *optional*, defaults to `True`):
Whether or not the `legacy` behaviour of the tokenizer should be used. Legacy is before the merge of #24622
which includes fixes to properly handle tokens that appear after special tokens. A simple example:
- `legacy=True`:
```python
>>> from transformers import T5Tokenizer
>>> tokenizer = T5Tokenizer.from_pretrained("t5-base", legacy=True)
>>> tokenizer.encode("Hello <extra_id_0>.")
[8774, 32099, 3, 5, 1]
```
- `legacy=False`:
```python
>>> from transformers import T5Tokenizer | 3,071 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/tokenization_udop.py |
>>> tokenizer = T5Tokenizer.from_pretrained("t5-base", legacy=False)
>>> tokenizer.encode("Hello <extra_id_0>.") # the extra space `[3]` is no longer here
[8774, 32099, 5, 1]
```
Checkout the pull request and the issue [here](https://github.com/huggingface/transformers/pull/24565) for
more details.
add_prefix_space (`bool`, *optional*, defaults to `True`):
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
other word.
Attributes:
sp_model (`SentencePieceProcessor`):
The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ["input_ids", "attention_mask"] | 3,071 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/tokenization_udop.py |
def __init__(
self,
vocab_file,
eos_token="</s>",
unk_token="<unk>",
sep_token="</s>",
pad_token="<pad>",
sep_token_box=[1000, 1000, 1000, 1000],
pad_token_box=[0, 0, 0, 0],
pad_token_label=-100,
only_label_first_subword=True,
additional_special_tokens=None,
sp_model_kwargs: Optional[Dict[str, Any]] = None,
legacy=True,
add_prefix_space=True,
**kwargs,
) -> None:
eos_token = AddedToken(eos_token, special=True) if isinstance(eos_token, str) else eos_token
unk_token = AddedToken(unk_token, special=True) if isinstance(unk_token, str) else unk_token
sep_token = AddedToken(sep_token, special=True) if isinstance(sep_token, str) else sep_token
pad_token = AddedToken(pad_token, special=True) if isinstance(pad_token, str) else pad_token | 3,071 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/tokenization_udop.py |
self.legacy = legacy
self.add_prefix_space = add_prefix_space
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
self.vocab_file = vocab_file
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(vocab_file)
# additional properties
self.sep_token_box = sep_token_box
self.pad_token_box = pad_token_box
self.pad_token_label = pad_token_label
self.only_label_first_subword = only_label_first_subword | 3,071 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/tokenization_udop.py |
super().__init__(
eos_token=eos_token,
unk_token=unk_token,
sep_token=sep_token,
pad_token=pad_token,
sep_token_box=sep_token_box,
pad_token_box=pad_token_box,
pad_token_label=pad_token_label,
only_label_first_subword=only_label_first_subword,
additional_special_tokens=additional_special_tokens,
sp_model_kwargs=self.sp_model_kwargs,
legacy=legacy,
add_prefix_space=add_prefix_space,
**kwargs,
)
@property
def vocab_size(self):
return len(self.sp_model)
# Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.get_vocab
def get_vocab(self):
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab | 3,071 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/tokenization_udop.py |
# Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.get_special_tokens_mask
def get_special_tokens_mask(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model. | 3,071 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/tokenization_udop.py |
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
)
# normal case: some special tokens
if token_ids_1 is None:
return ([0] * len(token_ids_0)) + [1]
return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
# Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.get_sentinel_tokens
def get_sentinel_tokens(self):
return list(
set(filter(lambda x: bool(re.search(r"<extra_id_\d+>", x)) is not None, self.additional_special_tokens))
) | 3,071 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/tokenization_udop.py |
# Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.get_sentinel_token_ids
def get_sentinel_token_ids(self):
return [self.convert_tokens_to_ids(token) for token in self.get_sentinel_tokens()]
# Copied from transformers.models.t5.tokenization_t5.T5Tokenizer._add_eos_if_not_present
def _add_eos_if_not_present(self, token_ids: List[int]) -> List[int]:
"""Do not add eos again if user already added it."""
if len(token_ids) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
" eos tokens being added."
)
return token_ids
else:
return token_ids + [self.eos_token_id] | 3,071 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/tokenization_udop.py |
# Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.create_token_type_ids_from_sequences
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make
use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of zeros.
"""
eos = [self.eos_token_id]
if token_ids_1 is None:
return len(token_ids_0 + eos) * [0]
return len(token_ids_0 + eos + token_ids_1 + eos) * [0] | 3,071 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/tokenization_udop.py |
# Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.build_inputs_with_special_tokens
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A sequence has the following format:
- single sequence: `X </s>`
- pair of sequences: `A </s> B </s>`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs. | 3,071 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/tokenization_udop.py |
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
token_ids_0 = self._add_eos_if_not_present(token_ids_0)
if token_ids_1 is None:
return token_ids_0
else:
token_ids_1 = self._add_eos_if_not_present(token_ids_1)
return token_ids_0 + token_ids_1
# Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.__getstate__
def __getstate__(self):
state = self.__dict__.copy()
state["sp_model"] = None
return state
def __setstate__(self, d):
self.__dict__.update(d)
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file) | 3,071 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/tokenization_udop.py |
# Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.tokenize
def tokenize(self, text: "TextInput", **kwargs) -> List[str]:
"""
Converts a string to a list of tokens. If `self.legacy` is set to `False`, a prefix token is added unless the
first token is special.
"""
if self.legacy or len(text) == 0:
return super().tokenize(text, **kwargs)
text = text.replace(SPIECE_UNDERLINE, " ")
if self.add_prefix_space:
text = SPIECE_UNDERLINE + text
tokens = super().tokenize(text, **kwargs)
if len(tokens) > 1 and tokens[0] == SPIECE_UNDERLINE and tokens[1] in self.all_special_tokens:
tokens = tokens[1:]
return tokens
# Copied from transformers.models.t5.tokenization_t5.T5Tokenizer._tokenize
def _tokenize(self, text, **kwargs):
"""
Returns a tokenized string. | 3,071 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/tokenization_udop.py |
We de-activated the `add_dummy_prefix` option, thus the sentencepiece internals will always strip any
SPIECE_UNDERLINE. For example: `self.sp_model.encode(f"{SPIECE_UNDERLINE}Hey", out_type = str)` will give
`['H', 'e', 'y']` instead of `['▁He', 'y']`. Thus we always encode `f"{unk_token}text"` and strip the
`unk_token`. Here is an example with `unk_token = "<unk>"` and `unk_token_length = 4`.
`self.tokenizer.sp_model.encode("<unk> Hey", out_type = str)[4:]`.
"""
if self.legacy or not text.startswith((SPIECE_UNDERLINE, " ")):
return self.sp_model.encode(text, out_type=str)
# 1. Encode string + prefix ex: "<unk> Hey"
tokens = self.sp_model.encode(self.unk_token + text, out_type=str)
# 2. Remove self.unk_token from ['<','unk','>', '▁Hey']
return tokens[self.unk_token_length :] if len(tokens) >= self.unk_token_length else tokens | 3,071 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/tokenization_udop.py |
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.sp_model.piece_to_id(token)
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.sp_model.IdToPiece(index)
# Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.convert_tokens_to_string
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
# since we manually add the prefix space, we have to remove it when decoding
if tokens[0].startswith(SPIECE_UNDERLINE) and self.add_prefix_space:
tokens[0] = tokens[0][1:] | 3,071 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/tokenization_udop.py |
current_sub_tokens = []
out_string = ""
prev_is_special = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(current_sub_tokens) + token
prev_is_special = True
current_sub_tokens = []
else:
current_sub_tokens.append(token)
prev_is_special = False
out_string += self.sp_model.decode(current_sub_tokens)
return out_string.strip() | 3,071 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/tokenization_udop.py |
# Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.save_vocabulary
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
out_vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file, out_vocab_file)
elif not os.path.isfile(self.vocab_file):
with open(out_vocab_file, "wb") as fi:
content_spiece_model = self.sp_model.serialized_model_proto()
fi.write(content_spiece_model)
return (out_vocab_file,) | 3,071 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/tokenization_udop.py |
@add_end_docstrings(UDOP_ENCODE_KWARGS_DOCSTRING)
def __call__(
self,
text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None,
boxes: Union[List[List[int]], List[List[List[int]]]] = None,
word_labels: Optional[Union[List[int], List[List[int]]]] = None,
text_target: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
text_pair_target: Optional[
Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]
] = None,
**kwargs,
) -> BatchEncoding:
if text is None and text_target is None:
raise ValueError("You need to specify either `text` or `text_target`.")
if text is not None:
# The context manager will send the inputs as normal texts and not text_target, but we shouldn't change the | 3,071 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/tokenization_udop.py |
# input mode in this case.
if not self._in_target_context_manager:
self._switch_to_input_mode()
encodings = self.call_boxes(text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, **kwargs)
if text_target is not None:
self._switch_to_target_mode()
target_encodings = self._call_one(text=text_target, text_pair=text_pair_target, **kwargs)
# Leave back tokenizer in input mode
self._switch_to_input_mode() | 3,071 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/tokenization_udop.py |
if text_target is None:
return encodings
elif text is None:
return target_encodings
else:
encodings["labels"] = target_encodings["input_ids"]
return encodings | 3,071 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/tokenization_udop.py |
def call_boxes(
self,
text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]],
text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None,
boxes: Union[List[List[int]], List[List[List[int]]]] = None,
word_labels: Optional[Union[List[int], List[List[int]]]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = None,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
padding_side: Optional[bool] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False, | 3,071 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/tokenization_udop.py |
return_length: bool = False,
verbose: bool = True,
**kwargs,
) -> BatchEncoding:
"""
Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
sequences with word-level normalized bounding boxes and optional labels. | 3,071 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/tokenization_udop.py |
Args:
text (`str`, `List[str]`, `List[List[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings
(words of a single example or questions of a batch of examples) or a list of list of strings (batch of
words).
text_pair (`List[str]`, `List[List[str]]`):
The sequence or batch of sequences to be encoded. Each sequence should be a list of strings
(pretokenized string).
boxes (`List[List[int]]`, `List[List[List[int]]]`):
Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale.
word_labels (`List[int]`, `List[List[int]]`, *optional*):
Word-level integer labels (for token classification tasks such as FUNSD, CORD).
""" | 3,071 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/tokenization_udop.py |
# Input type checking for clearer error
def _is_valid_text_input(t):
if isinstance(t, str):
# Strings are fine
return True
elif isinstance(t, (list, tuple)):
# List are fine as long as they are...
if len(t) == 0:
# ... empty
return True
elif isinstance(t[0], str):
# ... list of strings
return True
elif isinstance(t[0], (list, tuple)):
# ... list with an empty list or with a list of strings
return len(t[0]) == 0 or isinstance(t[0][0], str)
else:
return False
else:
return False | 3,071 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/tokenization_udop.py |
if text_pair is not None:
# in case text + text_pair are provided, text = questions, text_pair = words
if not _is_valid_text_input(text):
raise ValueError("text input must of type `str` (single example) or `List[str]` (batch of examples). ")
if not isinstance(text_pair, (list, tuple)):
raise ValueError(
"words must of type `List[str]` (single pretokenized example), "
"or `List[List[str]]` (batch of pretokenized examples)."
)
else:
# in case only text is provided => must be words
if not isinstance(text, (list, tuple)):
raise ValueError(
"Words must of type `List[str]` (single pretokenized example), "
"or `List[List[str]]` (batch of pretokenized examples)."
) | 3,071 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/tokenization_udop.py |
if text_pair is not None:
is_batched = isinstance(text, (list, tuple))
else:
is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple))
words = text if text_pair is None else text_pair
if boxes is None:
raise ValueError("You must provide corresponding bounding boxes")
if is_batched:
if len(words) != len(boxes):
raise ValueError("You must provide words and boxes for an equal amount of examples")
for words_example, boxes_example in zip(words, boxes):
if len(words_example) != len(boxes_example):
raise ValueError("You must provide as many words as there are bounding boxes")
else:
if len(words) != len(boxes):
raise ValueError("You must provide as many words as there are bounding boxes") | 3,071 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/tokenization_udop.py |
if is_batched:
if text_pair is not None and len(text) != len(text_pair):
raise ValueError(
f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:"
f" {len(text_pair)}."
)
batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
is_pair = bool(text_pair is not None)
return self.batch_encode_plus_boxes(
batch_text_or_text_pairs=batch_text_or_text_pairs,
is_pair=is_pair,
boxes=boxes,
word_labels=word_labels,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
padding_side=padding_side,
return_tensors=return_tensors, | 3,071 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/tokenization_udop.py |
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
else:
return self.encode_plus_boxes(
text=text,
text_pair=text_pair,
boxes=boxes,
word_labels=word_labels,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
padding_side=padding_side,
return_tensors=return_tensors, | 3,071 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/tokenization_udop.py |
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
) | 3,071 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/tokenization_udop.py |
def batch_encode_plus_boxes(
self,
batch_text_or_text_pairs: Union[
List[TextInput],
List[TextInputPair],
List[PreTokenizedInput],
],
is_pair: bool = None,
boxes: Optional[List[List[List[int]]]] = None,
word_labels: Optional[List[List[int]]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = None,
max_length: Optional[int] = None,
stride: int = 0,
is_split_into_words: bool = False,
pad_to_multiple_of: Optional[int] = None,
padding_side: Optional[bool] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False, | 3,071 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/tokenization_udop.py |
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs,
) -> BatchEncoding:
"""
Tokenize and prepare for the model a list of sequences or a list of pairs of sequences. | 3,071 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/tokenization_udop.py |
Args:
batch_text_or_text_pairs (`List[str]`, `List[Tuple[str, str]]`, `List[List[str]]`, `List[Tuple[List[str], List[str]]]`, and for not-fast tokenizers, also `List[List[int]]`, `List[Tuple[List[int], List[int]]]`):
Batch of sequences or pair of sequences to be encoded. This can be a list of
string/string-sequences/int-sequences or a list of pair of string/string-sequences/int-sequence (see
details in `encode_plus`).
"""
# Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
padding=padding,
truncation=truncation,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
verbose=verbose,
**kwargs,
) | 3,071 | /Users/nielsrogge/Documents/python_projecten/transformers/src/transformers/models/udop/tokenization_udop.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.