id
int64 0
328k
| repository_name
stringlengths 7
58
| file_path
stringlengths 9
302
| class_name
stringlengths 5
256
| human_written_code
stringlengths 16
2.16M
| class_skeleton
stringlengths 18
1.49M
⌀ | total_program_units
int64 1
1.76k
| total_doc_str
int64 0
771
| AvgCountLine
float64 0
7.89k
| AvgCountLineBlank
float64 0
297
| AvgCountLineCode
float64 0
7.89k
| AvgCountLineComment
float64 0
7.89k
| AvgCyclomatic
float64 0
130
| CommentToCodeRatio
float64 0
168
| CountClassBase
float64 0
40
| CountClassCoupled
float64 0
583
| CountClassCoupledModified
float64 0
575
| CountClassDerived
float64 0
5.35k
| CountDeclInstanceMethod
float64 0
529
| CountDeclInstanceVariable
float64 0
296
| CountDeclMethod
float64 0
599
| CountDeclMethodAll
float64 0
1.12k
| CountLine
float64 1
40.4k
| CountLineBlank
float64 0
8.16k
| CountLineCode
float64 1
25.7k
| CountLineCodeDecl
float64 1
8.15k
| CountLineCodeExe
float64 0
24.2k
| CountLineComment
float64 0
16.5k
| CountStmt
float64 1
9.71k
| CountStmtDecl
float64 1
8.15k
| CountStmtExe
float64 0
9.69k
| MaxCyclomatic
float64 0
759
| MaxInheritanceTree
float64 0
16
| MaxNesting
float64 0
34
| SumCyclomatic
float64 0
2.9k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1,000
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip_2/modeling_blip_2.py
|
transformers.models.blip_2.modeling_blip_2.Blip2TextEmbeddings
|
from typing import Any, Callable, Optional, Union
import torch
from torch import nn
class Blip2TextEmbeddings(nn.Module):
"""Construct the embeddings from word and position embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False)
self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute')
def forward(self, input_ids: Optional[torch.FloatTensor]=None, position_ids: Optional[torch.LongTensor]=None, query_embeds: Optional[torch.FloatTensor]=None) -> torch.Tensor:
if input_ids is not None:
seq_length = input_ids.size()[1]
else:
seq_length = 0
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
if input_ids is not None:
input_ids = input_ids.to(self.word_embeddings.weight.device)
embeddings = self.word_embeddings(input_ids)
if self.position_embedding_type == 'absolute':
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
if query_embeds is not None:
if query_embeds.dtype != embeddings.dtype:
query_embeds = query_embeds.to(embeddings.dtype)
embeddings = torch.cat((query_embeds, embeddings), dim=1)
else:
embeddings = query_embeds
return embeddings
|
class Blip2TextEmbeddings(nn.Module):
'''Construct the embeddings from word and position embeddings.'''
def __init__(self, config):
pass
def forward(self, input_ids: Optional[torch.FloatTensor]=None, position_ids: Optional[torch.LongTensor]=None, query_embeds: Optional[torch.FloatTensor]=None) -> torch.Tensor:
pass
| 3
| 1
| 19
| 3
| 16
| 1
| 4
| 0.06
| 1
| 2
| 0
| 0
| 2
| 3
| 2
| 12
| 41
| 7
| 32
| 14
| 24
| 2
| 23
| 9
| 20
| 6
| 1
| 2
| 7
|
1,001
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip_2/modeling_blip_2.py
|
transformers.models.blip_2.modeling_blip_2.Blip2TextModelWithProjection
|
from ...processing_utils import Unpack
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, filter_out_non_signature_kwargs, logging, torch_int
import torch
from .configuration_blip_2 import Blip2Config, Blip2QFormerConfig, Blip2VisionConfig
from torch import nn
from typing import Any, Callable, Optional, Union
@auto_docstring
class Blip2TextModelWithProjection(Blip2PreTrainedModel):
supports_gradient_checkpointing = False
_keep_in_fp32_modules = ['query_tokens', 'qformer']
_supports_flash_attn = False
def __init__(self, config: Blip2Config):
super().__init__(config)
self.query_tokens = nn.Parameter(torch.zeros(1, config.num_query_tokens, config.qformer_config.hidden_size))
self.embeddings = Blip2TextEmbeddings(config.qformer_config)
self.qformer = Blip2QFormerModel(config.qformer_config)
self.text_projection = nn.Linear(config.qformer_config.hidden_size, config.image_text_hidden_size)
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, Blip2TextModelOutput]:
"""
Examples:
```python
>>> import torch
>>> from transformers import AutoProcessor, Blip2TextModelWithProjection
>>> device = "cuda" if torch.cuda.is_available() else "cpu"
>>> model = Blip2TextModelWithProjection.from_pretrained(
... "Salesforce/blip2-itm-vit-g", dtype=torch.float16
... )
>>> model.to(device) # doctest: +IGNORE_RESULT
>>> processor = AutoProcessor.from_pretrained("Salesforce/blip2-itm-vit-g")
>>> inputs = processor(text=["a photo of a cat", "a photo of a dog"], return_tensors="pt").to(device)
>>> outputs = model(**inputs)
>>> text_embeds = outputs.text_embeds
>>> print(text_embeds.shape)
torch.Size([2, 7, 256])
```"""
query_embeds = self.embeddings(input_ids=input_ids, position_ids=position_ids)
text_outputs = self.qformer(query_embeds=query_embeds, query_length=0, attention_mask=attention_mask, **kwargs)
pooled_output = text_outputs[0]
pooled_output = pooled_output.to(dtype=self.text_projection.weight.dtype)
text_embeds = self.text_projection(pooled_output)
text_embeds = nn.functional.normalize(text_embeds, dim=-1)
return Blip2TextModelOutput(text_embeds=text_embeds, last_hidden_state=text_outputs.last_hidden_state, hidden_states=text_outputs.hidden_states, attentions=text_outputs.attentions)
|
@auto_docstring
class Blip2TextModelWithProjection(Blip2PreTrainedModel):
def __init__(self, config: Blip2Config):
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, value):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, Blip2TextModelOutput]:
'''
Examples:
```python
>>> import torch
>>> from transformers import AutoProcessor, Blip2TextModelWithProjection
>>> device = "cuda" if torch.cuda.is_available() else "cpu"
>>> model = Blip2TextModelWithProjection.from_pretrained(
... "Salesforce/blip2-itm-vit-g", dtype=torch.float16
... )
>>> model.to(device) # doctest: +IGNORE_RESULT
>>> processor = AutoProcessor.from_pretrained("Salesforce/blip2-itm-vit-g")
>>> inputs = processor(text=["a photo of a cat", "a photo of a dog"], return_tensors="pt").to(device)
>>> outputs = model(**inputs)
>>> text_embeds = outputs.text_embeds
>>> print(text_embeds.shape)
torch.Size([2, 7, 256])
```'''
pass
| 8
| 1
| 21
| 4
| 11
| 5
| 2
| 0.4
| 1
| 8
| 4
| 0
| 4
| 4
| 4
| 5
| 91
| 21
| 50
| 25
| 35
| 20
| 25
| 16
| 20
| 4
| 2
| 1
| 7
|
1,002
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip_2/modeling_blip_2.py
|
transformers.models.blip_2.modeling_blip_2.Blip2VisionEmbeddings
|
from .configuration_blip_2 import Blip2Config, Blip2QFormerConfig, Blip2VisionConfig
import torch
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, filter_out_non_signature_kwargs, logging, torch_int
from torch import nn
class Blip2VisionEmbeddings(nn.Module):
def __init__(self, config: Blip2VisionConfig):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.image_size = config.image_size
self.patch_size = config.patch_size
self.class_embedding = nn.Parameter(torch.randn(1, 1, self.embed_dim))
self.patch_embedding = nn.Conv2d(in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size)
self.num_patches = (self.image_size // self.patch_size) ** 2
self.num_positions = self.num_patches + 1
self.position_embedding = nn.Parameter(torch.randn(1, self.num_positions, self.embed_dim))
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
"""
num_patches = embeddings.shape[1] - 1
num_positions = self.position_embedding.shape[1] - 1
if not torch.jit.is_tracing() and num_patches == num_positions and (height == width):
return self.position_embedding
class_pos_embed = self.position_embedding[:, :1]
patch_pos_embed = self.position_embedding[:, 1:]
dim = embeddings.shape[-1]
new_height = height // self.patch_size
new_width = width // self.patch_size
sqrt_num_positions = torch_int(num_positions ** 0.5)
patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
patch_pos_embed = nn.functional.interpolate(patch_pos_embed, size=(new_height, new_width), mode='bicubic', align_corners=False)
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed, patch_pos_embed), dim=1)
def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding: bool=False) -> torch.Tensor:
batch_size, _, height, width = pixel_values.shape
target_dtype = self.patch_embedding.weight.dtype
patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype))
patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
class_embeds = self.class_embedding.expand(batch_size, 1, -1).to(target_dtype)
embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
if interpolate_pos_encoding:
position_embedding = self.interpolate_pos_encoding(embeddings, height, width)
else:
position_embedding = self.position_embedding
embeddings = embeddings + position_embedding[:, :embeddings.size(1), :].to(target_dtype)
return embeddings
|
class Blip2VisionEmbeddings(nn.Module):
def __init__(self, config: Blip2VisionConfig):
pass
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
'''
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
'''
pass
def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding: bool=False) -> torch.Tensor:
pass
| 4
| 1
| 23
| 5
| 16
| 3
| 2
| 0.19
| 1
| 5
| 1
| 0
| 3
| 9
| 3
| 13
| 72
| 16
| 48
| 27
| 44
| 9
| 40
| 27
| 36
| 2
| 1
| 1
| 5
|
1,003
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip_2/modeling_blip_2.py
|
transformers.models.blip_2.modeling_blip_2.Blip2VisionModel
|
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, filter_out_non_signature_kwargs, logging, torch_int
from typing import Any, Callable, Optional, Union
from ...utils.generic import OutputRecorder, check_model_inputs
from torch import nn
from ...processing_utils import Unpack
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithPast, Seq2SeqLMOutput
from .configuration_blip_2 import Blip2Config, Blip2QFormerConfig, Blip2VisionConfig
import torch
@auto_docstring
class Blip2VisionModel(Blip2PreTrainedModel):
main_input_name = 'pixel_values'
config: Blip2VisionConfig
_can_record_outputs = {'hidden_states': Blip2EncoderLayer, 'attentions': Blip2Attention}
def __init__(self, config: Blip2VisionConfig):
super().__init__(config)
self.config = config
embed_dim = config.hidden_size
self.embeddings = Blip2VisionEmbeddings(config)
self.encoder = Blip2Encoder(config)
self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
self.post_init()
@check_model_inputs
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, interpolate_pos_encoding: bool=False, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, BaseModelOutputWithPooling]:
if pixel_values is None:
raise ValueError('You have to specify pixel_values')
hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
encoder_outputs: BaseModelOutput = self.encoder(inputs_embeds=hidden_states, **kwargs)
last_hidden_state = encoder_outputs.last_hidden_state
last_hidden_state = self.post_layernorm(last_hidden_state)
pooled_output = last_hidden_state[:, 0, :]
pooled_output = self.post_layernorm(pooled_output)
return BaseModelOutputWithPooling(last_hidden_state=last_hidden_state, pooler_output=pooled_output)
def get_input_embeddings(self):
return self.embeddings
|
@auto_docstring
class Blip2VisionModel(Blip2PreTrainedModel):
def __init__(self, config: Blip2VisionConfig):
pass
@check_model_inputs
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, interpolate_pos_encoding: bool=False, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, BaseModelOutputWithPooling]:
pass
def get_input_embeddings(self):
pass
| 7
| 0
| 19
| 3
| 15
| 1
| 3
| 0.06
| 1
| 7
| 4
| 0
| 3
| 4
| 3
| 4
| 65
| 13
| 49
| 23
| 36
| 3
| 28
| 15
| 24
| 6
| 2
| 1
| 8
|
1,004
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip_2/modeling_blip_2.py
|
transformers.models.blip_2.modeling_blip_2.Blip2VisionModelWithProjection
|
from torch import nn
from ...processing_utils import Unpack
from .configuration_blip_2 import Blip2Config, Blip2QFormerConfig, Blip2VisionConfig
import torch
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, filter_out_non_signature_kwargs, logging, torch_int
from typing import Any, Callable, Optional, Union
@auto_docstring
class Blip2VisionModelWithProjection(Blip2PreTrainedModel):
main_input_name = 'pixel_values'
_keep_in_fp32_modules = ['query_tokens', 'qformer']
_supports_flash_attn = False
def __init__(self, config: Blip2Config):
super().__init__(config)
self.vision_model = Blip2VisionModel._from_config(config.vision_config)
self.query_tokens = nn.Parameter(torch.zeros(1, config.num_query_tokens, config.qformer_config.hidden_size))
self.qformer = Blip2QFormerModel._from_config(config.qformer_config)
self.vision_projection = nn.Linear(config.qformer_config.hidden_size, config.image_text_hidden_size)
self.post_init()
def get_input_embeddings(self) -> nn.Module:
return self.vision_model.embeddings.patch_embedding
@can_return_tuple
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, Blip2VisionModelOutput]:
"""
Examples:
```python
>>> import torch
>>> from transformers import AutoProcessor, Blip2VisionModelWithProjection
>>> from transformers.image_utils import load_image
>>> device = "cuda" if torch.cuda.is_available() else "cpu"
>>> processor = AutoProcessor.from_pretrained("Salesforce/blip2-itm-vit-g")
>>> model = Blip2VisionModelWithProjection.from_pretrained(
... "Salesforce/blip2-itm-vit-g", dtype=torch.float16
... )
>>> model.to(device) # doctest: +IGNORE_RESULT
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = load_image(url)
>>> inputs = processor(images=image, return_tensors="pt").to(device, torch.float16)
>>> with torch.inference_mode():
... outputs = model(**inputs)
>>> image_embeds = outputs.image_embeds
>>> print(image_embeds.shape)
torch.Size([1, 32, 256])
```"""
vision_outputs = self.vision_model(pixel_values=pixel_values, **kwargs)
pooled_output = vision_outputs[0]
image_attention_mask = torch.ones(pooled_output.size()[:-1], dtype=torch.long, device=pooled_output.device)
query_tokens = self.query_tokens.expand(pooled_output.shape[0], -1, -1)
query_outputs = self.qformer(query_embeds=query_tokens, encoder_hidden_states=pooled_output, encoder_attention_mask=image_attention_mask, **kwargs)
embeds = query_outputs[0]
embeds = embeds.to(dtype=self.vision_projection.weight.dtype)
image_embeds = self.vision_projection(embeds)
image_embeds = nn.functional.normalize(image_embeds, dim=-1)
return Blip2VisionModelOutput(image_embeds=image_embeds, last_hidden_state=vision_outputs.last_hidden_state, hidden_states=vision_outputs.hidden_states, attentions=vision_outputs.attentions)
|
@auto_docstring
class Blip2VisionModelWithProjection(Blip2PreTrainedModel):
def __init__(self, config: Blip2Config):
pass
def get_input_embeddings(self) -> nn.Module:
pass
@can_return_tuple
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, Blip2VisionModelOutput]:
'''
Examples:
```python
>>> import torch
>>> from transformers import AutoProcessor, Blip2VisionModelWithProjection
>>> from transformers.image_utils import load_image
>>> device = "cuda" if torch.cuda.is_available() else "cpu"
>>> processor = AutoProcessor.from_pretrained("Salesforce/blip2-itm-vit-g")
>>> model = Blip2VisionModelWithProjection.from_pretrained(
... "Salesforce/blip2-itm-vit-g", dtype=torch.float16
... )
>>> model.to(device) # doctest: +IGNORE_RESULT
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = load_image(url)
>>> inputs = processor(images=image, return_tensors="pt").to(device, torch.float16)
>>> with torch.inference_mode():
... outputs = model(**inputs)
>>> image_embeds = outputs.image_embeds
>>> print(image_embeds.shape)
torch.Size([1, 32, 256])
```'''
pass
| 7
| 1
| 30
| 6
| 16
| 8
| 3
| 0.45
| 1
| 7
| 4
| 0
| 3
| 4
| 3
| 4
| 99
| 22
| 53
| 25
| 41
| 24
| 28
| 18
| 24
| 7
| 2
| 1
| 9
|
1,005
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip_2/processing_blip_2.py
|
transformers.models.blip_2.processing_blip_2.Blip2Processor
|
from ...image_utils import ImageInput
from ...image_processing_utils import BatchFeature
from typing import Optional, Union
from ...processing_utils import ProcessingKwargs, ProcessorMixin, Unpack
from ...tokenization_utils_base import AddedToken, BatchEncoding, PreTokenizedInput, TextInput
class Blip2Processor(ProcessorMixin):
"""
Constructs a BLIP-2 processor which wraps a BLIP image processor and an OPT/T5 tokenizer into a single processor.
[`BlipProcessor`] offers all the functionalities of [`BlipImageProcessor`] and [`AutoTokenizer`]. See the docstring
of [`~BlipProcessor.__call__`] and [`~BlipProcessor.decode`] for more information.
Args:
image_processor (`BlipImageProcessor`):
An instance of [`BlipImageProcessor`]. The image processor is a required input.
tokenizer (`AutoTokenizer`):
An instance of ['PreTrainedTokenizer`]. The tokenizer is a required input.
num_query_tokens (`int`, *optional*):
Number of tokens used by the Qformer as queries, should be same as in model's config.
"""
attributes = ['image_processor', 'tokenizer']
image_processor_class = ('BlipImageProcessor', 'BlipImageProcessorFast')
tokenizer_class = 'AutoTokenizer'
def __init__(self, image_processor, tokenizer, num_query_tokens=None, **kwargs):
tokenizer.return_token_type_ids = False
self.current_processor = image_processor
if not hasattr(tokenizer, 'image_token'):
self.image_token = AddedToken('<image>', normalized=False, special=True)
tokenizer.add_tokens([self.image_token], special_tokens=True)
else:
self.image_token = tokenizer.image_token
self.num_query_tokens = num_query_tokens
super().__init__(image_processor, tokenizer)
def __call__(self, images: Optional[ImageInput]=None, text: Optional[Union[str, list[str], TextInput, PreTokenizedInput]]=None, audio=None, videos=None, **kwargs: Unpack[Blip2ProcessorKwargs]) -> BatchEncoding:
"""
This method uses [`BlipImageProcessor.__call__`] method to prepare image(s) for the model, and
[`BertTokenizerFast.__call__`] to prepare text for the model.
Please refer to the docstring of the above two methods for more information.
Args:
images (`ImageInput`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. Both channels-first and channels-last formats are supported.
text (`TextInput`, `PreTokenizedInput`, `list[TextInput]`, `list[PreTokenizedInput]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors of a particular framework. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
"""
if images is None and text is None:
raise ValueError('You have to specify either images or text.')
output_kwargs = self._merge_kwargs(Blip2ProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs)
return_tensors = output_kwargs['text_kwargs'].pop('return_tensors', None)
max_length = output_kwargs['text_kwargs'].pop('max_length', None)
if max_length is not None:
output_kwargs['text_kwargs']['max_length'] = max_length - self.num_query_tokens
encoding = BatchFeature(tensor_type=return_tensors)
if text is not None:
if isinstance(text, str):
text = [text]
elif not isinstance(text, list) and (not isinstance(text[0], str)):
raise ValueError('Invalid input text. Please provide a string, or a list of strings')
text_encoding = self.tokenizer(text, **output_kwargs['text_kwargs'])
if images is not None and self.num_query_tokens is not None:
image_tokens = self.image_token.content * self.num_query_tokens
output_kwargs['text_kwargs']['add_special_tokens'] = False
output_kwargs['text_kwargs']['padding'] = False
output_kwargs['text_kwargs']['truncation'] = False
image_text_encoding = self.tokenizer(image_tokens, **output_kwargs['text_kwargs'])
for k in text_encoding:
text_encoding[k] = [image_text_encoding[k] + sample for sample in text_encoding[k]]
encoding.update(text_encoding)
if images is not None:
image_encoding = self.image_processor(images, **output_kwargs['images_kwargs'])
encoding.update(image_encoding)
encoding = BatchFeature(encoding, tensor_type=return_tensors)
return encoding
|
class Blip2Processor(ProcessorMixin):
'''
Constructs a BLIP-2 processor which wraps a BLIP image processor and an OPT/T5 tokenizer into a single processor.
[`BlipProcessor`] offers all the functionalities of [`BlipImageProcessor`] and [`AutoTokenizer`]. See the docstring
of [`~BlipProcessor.__call__`] and [`~BlipProcessor.decode`] for more information.
Args:
image_processor (`BlipImageProcessor`):
An instance of [`BlipImageProcessor`]. The image processor is a required input.
tokenizer (`AutoTokenizer`):
An instance of ['PreTrainedTokenizer`]. The tokenizer is a required input.
num_query_tokens (`int`, *optional*):
Number of tokens used by the Qformer as queries, should be same as in model's config.
'''
def __init__(self, image_processor, tokenizer, num_query_tokens=None, **kwargs):
pass
def __call__(self, images: Optional[ImageInput]=None, text: Optional[Union[str, list[str], TextInput, PreTokenizedInput]]=None, audio=None, videos=None, **kwargs: Unpack[Blip2ProcessorKwargs]) -> BatchEncoding:
'''
This method uses [`BlipImageProcessor.__call__`] method to prepare image(s) for the model, and
[`BertTokenizerFast.__call__`] to prepare text for the model.
Please refer to the docstring of the above two methods for more information.
Args:
images (`ImageInput`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. Both channels-first and channels-last formats are supported.
text (`TextInput`, `PreTokenizedInput`, `list[TextInput]`, `list[PreTokenizedInput]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors of a particular framework. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
'''
pass
| 3
| 2
| 22
| 1
| 14
| 7
| 3
| 0.64
| 1
| 9
| 3
| 0
| 5
| 3
| 5
| 22
| 138
| 15
| 75
| 32
| 61
| 48
| 50
| 24
| 44
| 9
| 2
| 3
| 14
|
1,006
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/blip_2/processing_blip_2.py
|
transformers.models.blip_2.processing_blip_2.Blip2ProcessorKwargs
|
from ...processing_utils import ProcessingKwargs, ProcessorMixin, Unpack
class Blip2ProcessorKwargs(ProcessingKwargs, total=False):
_defaults = {'text_kwargs': {'add_special_tokens': True, 'padding': False, 'stride': 0, 'return_overflowing_tokens': False, 'return_special_tokens_mask': False, 'return_offsets_mapping': False, 'return_token_type_ids': False, 'return_length': False, 'verbose': True}, 'images_kwargs': {}}
|
class Blip2ProcessorKwargs(ProcessingKwargs, total=False):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 15
| 0
| 15
| 2
| 14
| 0
| 2
| 2
| 1
| 0
| 3
| 0
| 0
|
1,007
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bloom/configuration_bloom.py
|
transformers.models.bloom.configuration_bloom.BloomConfig
|
from ...configuration_utils import PretrainedConfig
class BloomConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`BloomModel`]. It is used to instantiate a Bloom
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to the Bloom architecture
[bigscience/bloom](https://huggingface.co/bigscience/bloom).
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 250880):
Vocabulary size of the Bloom model. Defines the maximum number of different tokens that can be represented
by the `inputs_ids` passed when calling [`BloomModel`]. Check [this
discussion](https://huggingface.co/bigscience/bloom/discussions/120#633d28389addb8530b406c2a) on how the
`vocab_size` has been defined.
hidden_size (`int`, *optional*, defaults to 64):
Dimensionality of the embeddings and hidden states.
n_layer (`int`, *optional*, defaults to 2):
Number of hidden layers in the Transformer encoder.
n_head (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer encoder.
layer_norm_epsilon (`float`, *optional*, defaults to 1e-5):
The epsilon to use in the layer normalization layers.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
apply_residual_connection_post_layernorm (`bool`, *optional*, defaults to `False`):
If enabled, use the layer norm of the hidden states as the residual in the transformer blocks
hidden_dropout (`float`, *optional*, defaults to 0.1):
Dropout rate of the dropout function on the bias dropout.
attention_dropout (`float`, *optional*, defaults to 0.1):
Dropout rate applied to the attention probs
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
pretraining_tp (`int`, *optional*, defaults to `1`):
Experimental feature. Tensor parallelism rank used during pretraining with Megatron. Please refer to [this
document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is
necessary to ensure exact reproducibility of the pretraining results. Please refer to [this
issue](https://github.com/pytorch/pytorch/issues/76232). Note also that this is enabled only when
`slow_but_exact=True`.
slow_but_exact (`bool`, *optional*, defaults to `False`):
Experimental feature. Whether to use slow but exact implementation of the attention mechanism. While
merging the TP rank tensors, due to slicing operations the results may be slightly different between the
model trained on Megatron and our model. Please refer to [this
issue](https://github.com/pytorch/pytorch/issues/76232). A solution to obtain more accurate results is to
enable this feature. Enabling this will hurt the computational time of the inference. Will be probably
resolved in the future once the main model has been fine-tuned with TP_rank=1.
Example:
```python
>>> from transformers import BloomConfig, BloomModel
>>> # Initializing a Bloom configuration
>>> configuration = BloomConfig()
>>> # Initializing a model (with random weights) from the configuration
>>> model = BloomModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'bloom'
keys_to_ignore_at_inference = ['past_key_values']
attribute_map = {'num_hidden_layers': 'n_layer', 'num_attention_heads': 'n_head'}
def __init__(self, vocab_size=250880, hidden_size=64, n_layer=2, n_head=8, layer_norm_epsilon=1e-05, initializer_range=0.02, use_cache=True, bos_token_id=1, eos_token_id=2, apply_residual_connection_post_layernorm=False, hidden_dropout=0.0, attention_dropout=0.0, pretraining_tp=1, slow_but_exact=False, **kwargs):
self.vocab_size = vocab_size
n_embed = kwargs.pop('n_embed', None)
self.hidden_size = hidden_size if n_embed is None else n_embed
self.n_layer = n_layer
self.n_head = n_head
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_range = initializer_range
self.use_cache = use_cache
self.pretraining_tp = pretraining_tp
self.apply_residual_connection_post_layernorm = apply_residual_connection_post_layernorm
self.hidden_dropout = hidden_dropout
self.attention_dropout = attention_dropout
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.slow_but_exact = slow_but_exact
super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
|
class BloomConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`BloomModel`]. It is used to instantiate a Bloom
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to the Bloom architecture
[bigscience/bloom](https://huggingface.co/bigscience/bloom).
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 250880):
Vocabulary size of the Bloom model. Defines the maximum number of different tokens that can be represented
by the `inputs_ids` passed when calling [`BloomModel`]. Check [this
discussion](https://huggingface.co/bigscience/bloom/discussions/120#633d28389addb8530b406c2a) on how the
`vocab_size` has been defined.
hidden_size (`int`, *optional*, defaults to 64):
Dimensionality of the embeddings and hidden states.
n_layer (`int`, *optional*, defaults to 2):
Number of hidden layers in the Transformer encoder.
n_head (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer encoder.
layer_norm_epsilon (`float`, *optional*, defaults to 1e-5):
The epsilon to use in the layer normalization layers.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
apply_residual_connection_post_layernorm (`bool`, *optional*, defaults to `False`):
If enabled, use the layer norm of the hidden states as the residual in the transformer blocks
hidden_dropout (`float`, *optional*, defaults to 0.1):
Dropout rate of the dropout function on the bias dropout.
attention_dropout (`float`, *optional*, defaults to 0.1):
Dropout rate applied to the attention probs
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
pretraining_tp (`int`, *optional*, defaults to `1`):
Experimental feature. Tensor parallelism rank used during pretraining with Megatron. Please refer to [this
document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is
necessary to ensure exact reproducibility of the pretraining results. Please refer to [this
issue](https://github.com/pytorch/pytorch/issues/76232). Note also that this is enabled only when
`slow_but_exact=True`.
slow_but_exact (`bool`, *optional*, defaults to `False`):
Experimental feature. Whether to use slow but exact implementation of the attention mechanism. While
merging the TP rank tensors, due to slicing operations the results may be slightly different between the
model trained on Megatron and our model. Please refer to [this
issue](https://github.com/pytorch/pytorch/issues/76232). A solution to obtain more accurate results is to
enable this feature. Enabling this will hurt the computational time of the inference. Will be probably
resolved in the future once the main model has been fine-tuned with TP_rank=1.
Example:
```python
>>> from transformers import BloomConfig, BloomModel
>>> # Initializing a Bloom configuration
>>> configuration = BloomConfig()
>>> # Initializing a model (with random weights) from the configuration
>>> model = BloomModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, vocab_size=250880, hidden_size=64, n_layer=2, n_head=8, layer_norm_epsilon=1e-05, initializer_range=0.02, use_cache=True, bos_token_id=1, eos_token_id=2, apply_residual_connection_post_layernorm=False, hidden_dropout=0.0, attention_dropout=0.0, pretraining_tp=1, slow_but_exact=False, **kwargs):
pass
| 2
| 1
| 37
| 2
| 34
| 2
| 2
| 1.37
| 1
| 1
| 0
| 0
| 1
| 14
| 1
| 1
| 108
| 12
| 41
| 37
| 22
| 56
| 21
| 20
| 19
| 2
| 1
| 0
| 2
|
1,008
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bloom/configuration_bloom.py
|
transformers.models.bloom.configuration_bloom.BloomOnnxConfig
|
from collections.abc import Mapping
from ...onnx import OnnxConfigWithPast, PatchingSpec
from typing import TYPE_CHECKING, Any, Optional
from ...configuration_utils import PretrainedConfig
from collections import OrderedDict
from ...utils import is_torch_available, logging
from packaging import version
class BloomOnnxConfig(OnnxConfigWithPast):
torch_onnx_minimum_version = version.parse('1.12')
def __init__(self, config: PretrainedConfig, task: str='default', patching_specs: Optional[list[PatchingSpec]]=None, use_past: bool=False):
super().__init__(config, task=task, patching_specs=patching_specs, use_past=use_past)
if not getattr(self._config, 'pad_token_id', None):
self._config.pad_token_id = 0
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
common_inputs = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}})
if self.use_past:
self.fill_with_past_key_values_(common_inputs, direction='inputs', inverted_values_shape=True)
common_inputs['attention_mask'] = {0: 'batch', 1: 'past_sequence + sequence'}
else:
common_inputs['attention_mask'] = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def num_layers(self) -> int:
return self._config.n_layer
@property
def num_attention_heads(self) -> int:
return self._config.n_head
@property
def atol_for_validation(self) -> float:
return 0.001
def generate_dummy_inputs(self, tokenizer: 'PreTrainedTokenizer', batch_size: int=-1, seq_length: int=-1, is_pair: bool=False) -> Mapping[str, Any]:
common_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs(tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair)
ordered_inputs = OrderedDict({'input_ids': common_inputs['input_ids']})
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
batch, seqlen = common_inputs['input_ids'].shape
past_key_values_length = seqlen + 2
head_dim = self._config.hidden_size // self.num_attention_heads
past_key_shape = (batch * self.num_attention_heads, head_dim, past_key_values_length)
past_value_shape = (batch * self.num_attention_heads, past_key_values_length, head_dim)
ordered_inputs['past_key_values'] = [(torch.zeros(past_key_shape), torch.zeros(past_value_shape)) for _ in range(self.num_layers)]
ordered_inputs['attention_mask'] = common_inputs['attention_mask']
if self.use_past:
mask_dtype = ordered_inputs['attention_mask'].dtype
ordered_inputs['attention_mask'] = torch.cat([ordered_inputs['attention_mask'], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1)
return ordered_inputs
@property
def default_onnx_opset(self) -> int:
return 13
|
class BloomOnnxConfig(OnnxConfigWithPast):
def __init__(self, config: PretrainedConfig, task: str='default', patching_specs: Optional[list[PatchingSpec]]=None, use_past: bool=False):
pass
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
pass
@property
def num_layers(self) -> int:
pass
@property
def num_attention_heads(self) -> int:
pass
@property
def atol_for_validation(self) -> float:
pass
def generate_dummy_inputs(self, tokenizer: 'PreTrainedTokenizer', batch_size: int=-1, seq_length: int=-1, is_pair: bool=False) -> Mapping[str, Any]:
pass
@property
def default_onnx_opset(self) -> int:
pass
| 13
| 0
| 11
| 1
| 9
| 1
| 2
| 0.07
| 1
| 9
| 0
| 0
| 7
| 0
| 7
| 7
| 91
| 13
| 73
| 38
| 46
| 5
| 39
| 19
| 30
| 4
| 1
| 2
| 12
|
1,009
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bloom/modeling_bloom.py
|
transformers.models.bloom.modeling_bloom.BloomAttention
|
from typing import Optional, Union
import math
from torch import nn
from ...cache_utils import Cache, DynamicCache, StaticCache
from torch.nn import functional as F
from .configuration_bloom import BloomConfig
import torch
class BloomAttention(nn.Module):
def __init__(self, config: BloomConfig, layer_idx: Optional[int]=None):
super().__init__()
self.pretraining_tp = config.pretraining_tp
self.slow_but_exact = config.slow_but_exact
self.hidden_size = config.hidden_size
self.num_heads = config.n_head
self.head_dim = self.hidden_size // self.num_heads
self.split_size = self.hidden_size
self.hidden_dropout = config.hidden_dropout
if self.head_dim * self.num_heads != self.hidden_size:
raise ValueError(f'`hidden_size` must be divisible by num_heads (got `hidden_size`: {self.hidden_size} and `num_heads`: {self.num_heads}).')
self.inv_norm_factor = 1.0 / math.sqrt(self.head_dim)
self.beta = 1.0
self.layer_idx = layer_idx
if layer_idx is None:
logger.warning_once(f'Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` when creating this class.')
self.query_key_value = nn.Linear(self.hidden_size, 3 * self.hidden_size, bias=True)
self.dense = nn.Linear(self.hidden_size, self.hidden_size)
self.attention_dropout = nn.Dropout(config.attention_dropout)
def _reshape(self, fused_qkv: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Split the last dimension into (num_heads, head_dim) and reshapes to (bs, heads, len, dim) shape
without making any copies, results share same memory storage as `fused_qkv`
Args:
fused_qkv (`torch.tensor`): [batch_size, seq_length, num_heads * 3 * head_dim]
Returns:
query: [batch_size, num_heads, seq_length, head_dim]
key: [batch_size, num_heads, seq_length, head_dim]
value: [batch_size, num_heads, seq_length, head_dim]
"""
batch_size, seq_length, three_times_hidden_size = fused_qkv.shape
fused_qkv = fused_qkv.view(batch_size, seq_length, self.num_heads, 3, self.head_dim)
query_layer = fused_qkv[..., 0, :].transpose(1, 2)
key_layer = fused_qkv[..., 1, :].transpose(1, 2)
value_layer = fused_qkv[..., 2, :].transpose(1, 2)
return (query_layer, key_layer, value_layer)
def _merge_heads(self, x: torch.Tensor) -> torch.Tensor:
"""
Merge heads together over the last dimension
Args:
x (`torch.tensor`): [batch_size * num_heads, seq_length, head_dim]
Returns:
torch.tensor: [batch_size, seq_length, num_heads * head_dim]
"""
batch_size_and_num_heads, seq_length, _ = x.shape
batch_size = batch_size_and_num_heads // self.num_heads
x = x.view(batch_size, self.num_heads, seq_length, self.head_dim)
x = x.permute(0, 2, 1, 3)
return x.reshape(batch_size, seq_length, self.num_heads * self.head_dim)
def forward(self, hidden_states: torch.Tensor, residual: torch.Tensor, alibi: torch.Tensor, attention_mask: torch.Tensor, layer_past: Optional[Cache]=None, head_mask: Optional[torch.Tensor]=None, use_cache: bool=False, output_attentions: bool=False, cache_position: Optional[torch.LongTensor]=None):
batch_size, q_length, _ = hidden_states.shape
fused_qkv = self.query_key_value(hidden_states)
query_layer, key_layer, value_layer = self._reshape(fused_qkv)
if layer_past is not None:
cache_kwargs = {'cache_position': cache_position}
key_layer, value_layer = layer_past.update(key_layer, value_layer, self.layer_idx, cache_kwargs)
query_layer = query_layer.reshape(batch_size * self.num_heads, -1, self.head_dim)
key_layer = key_layer.reshape(batch_size * self.num_heads, -1, self.head_dim).transpose(-1, -2)
value_layer = value_layer.reshape(batch_size * self.num_heads, -1, self.head_dim)
attention_scores = alibi.baddbmm(batch1=query_layer, batch2=key_layer, beta=self.beta, alpha=self.inv_norm_factor)
attn_weights = attention_scores.view(batch_size, self.num_heads, q_length, -1)
if attention_mask is not None:
causal_mask = attention_mask[:, :, :, :key_layer.shape[-1]]
attn_weights = attn_weights + causal_mask
attention_probs = F.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_layer.dtype)
attention_probs = self.attention_dropout(attention_probs)
if head_mask is not None:
attention_probs = attention_probs * head_mask
attention_probs_reshaped = attention_probs.view(batch_size * self.num_heads, q_length, -1)
context_layer = torch.bmm(attention_probs_reshaped, value_layer)
context_layer = self._merge_heads(context_layer)
if self.pretraining_tp > 1 and self.slow_but_exact:
slices = self.hidden_size / self.pretraining_tp
output_tensor = torch.zeros_like(context_layer)
for i in range(self.pretraining_tp):
output_tensor = output_tensor + F.linear(context_layer[:, :, int(i * slices):int((i + 1) * slices)], self.dense.weight[:, int(i * slices):int((i + 1) * slices)])
else:
output_tensor = self.dense(context_layer)
output_tensor = dropout_add(output_tensor, residual, self.hidden_dropout, self.training)
return (output_tensor, attention_probs)
|
class BloomAttention(nn.Module):
def __init__(self, config: BloomConfig, layer_idx: Optional[int]=None):
pass
def _reshape(self, fused_qkv: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
'''
Split the last dimension into (num_heads, head_dim) and reshapes to (bs, heads, len, dim) shape
without making any copies, results share same memory storage as `fused_qkv`
Args:
fused_qkv (`torch.tensor`): [batch_size, seq_length, num_heads * 3 * head_dim]
Returns:
query: [batch_size, num_heads, seq_length, head_dim]
key: [batch_size, num_heads, seq_length, head_dim]
value: [batch_size, num_heads, seq_length, head_dim]
'''
pass
def _merge_heads(self, x: torch.Tensor) -> torch.Tensor:
'''
Merge heads together over the last dimension
Args:
x (`torch.tensor`): [batch_size * num_heads, seq_length, head_dim]
Returns:
torch.tensor: [batch_size, seq_length, num_heads * head_dim]
'''
pass
def forward(self, hidden_states: torch.Tensor, residual: torch.Tensor, alibi: torch.Tensor, attention_mask: torch.Tensor, layer_past: Optional[Cache]=None, head_mask: Optional[torch.Tensor]=None, use_cache: bool=False, output_attentions: bool=False, cache_position: Optional[torch.LongTensor]=None):
pass
| 5
| 2
| 38
| 7
| 23
| 9
| 3
| 0.39
| 1
| 8
| 2
| 0
| 4
| 13
| 4
| 14
| 156
| 29
| 93
| 49
| 77
| 36
| 66
| 38
| 61
| 7
| 1
| 2
| 12
|
1,010
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bloom/modeling_bloom.py
|
transformers.models.bloom.modeling_bloom.BloomBlock
|
import torch
from ...modeling_layers import GradientCheckpointingLayer
from .configuration_bloom import BloomConfig
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, LayerNorm, MSELoss
from ...cache_utils import Cache, DynamicCache, StaticCache
from typing import Optional, Union
class BloomBlock(GradientCheckpointingLayer):
def __init__(self, config: BloomConfig, layer_idx: Optional[int]=None):
super().__init__()
hidden_size = config.hidden_size
self.input_layernorm = LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
self.num_heads = config.n_head
self.self_attention = BloomAttention(config, layer_idx)
self.post_attention_layernorm = LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
self.mlp = BloomMLP(config)
self.apply_residual_connection_post_layernorm = config.apply_residual_connection_post_layernorm
self.hidden_dropout = config.hidden_dropout
def forward(self, hidden_states: torch.Tensor, alibi: torch.Tensor, attention_mask: torch.Tensor, layer_past: Optional[Cache]=None, head_mask: Optional[torch.Tensor]=None, use_cache: bool=False, output_attentions: bool=False, cache_position: Optional[torch.LongTensor]=None):
layernorm_output = self.input_layernorm(hidden_states)
if self.apply_residual_connection_post_layernorm:
residual = layernorm_output
else:
residual = hidden_states
attention_output, attn_weights = self.self_attention(layernorm_output, residual, layer_past=layer_past, attention_mask=attention_mask, alibi=alibi, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position)
layernorm_output = self.post_attention_layernorm(attention_output)
if self.apply_residual_connection_post_layernorm:
residual = layernorm_output
else:
residual = attention_output
output = self.mlp(layernorm_output, residual)
return (output, attn_weights)
|
class BloomBlock(GradientCheckpointingLayer):
def __init__(self, config: BloomConfig, layer_idx: Optional[int]=None):
pass
def forward(self, hidden_states: torch.Tensor, alibi: torch.Tensor, attention_mask: torch.Tensor, layer_past: Optional[Cache]=None, head_mask: Optional[torch.Tensor]=None, use_cache: bool=False, output_attentions: bool=False, cache_position: Optional[torch.LongTensor]=None):
pass
| 3
| 0
| 35
| 7
| 25
| 4
| 3
| 0.14
| 1
| 8
| 4
| 0
| 2
| 7
| 2
| 12
| 71
| 14
| 51
| 27
| 38
| 7
| 28
| 17
| 25
| 4
| 1
| 1
| 5
|
1,011
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bloom/modeling_bloom.py
|
transformers.models.bloom.modeling_bloom.BloomForCausalLM
|
from typing import Optional, Union
import warnings
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, QuestionAnsweringModelOutput, SequenceClassifierOutputWithPast, TokenClassifierOutput
from torch import nn
from ...generation import GenerationMixin
from ...utils import auto_docstring, is_torch_flex_attn_available, logging
from .configuration_bloom import BloomConfig
import torch
from ...cache_utils import Cache, DynamicCache, StaticCache
@auto_docstring(custom_intro='\n The Bloom Model transformer with a language modeling head on top (linear layer with weights tied to the input\n embeddings).\n ')
class BloomForCausalLM(BloomPreTrainedModel, GenerationMixin):
_tied_weights_keys = ['lm_head.weight']
def __init__(self, config: BloomConfig):
super().__init__(config)
self.transformer = BloomModel(config)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.post_init()
def set_output_embeddings(self, new_embeddings: torch.Tensor):
self.lm_head = new_embeddings
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, use_cache=True, **kwargs):
if past_key_values is not None:
if inputs_embeds is not None and input_ids.shape[1] == 0:
inputs_embeds = inputs_embeds[:, -cache_position.shape[0]:]
elif inputs_embeds is not None or cache_position[-1] >= input_ids.shape[1]:
input_ids = input_ids[:, -cache_position.shape[0]:]
elif input_ids.shape[1] != cache_position.shape[0]:
input_ids = input_ids[:, cache_position]
if inputs_embeds is not None and len(cache_position) == inputs_embeds.shape[1]:
model_inputs = {'inputs_embeds': inputs_embeds, 'input_ids': None}
else:
model_inputs = {'input_ids': input_ids.clone(memory_format=torch.contiguous_format), 'inputs_embeds': None}
if isinstance(past_key_values, StaticCache) and attention_mask is not None:
target_length = past_key_values.get_max_cache_shape()
batch_size, seq_length = attention_mask.shape
diff = target_length - seq_length
new_attn_mask = torch.zeros(batch_size, diff, device=attention_mask.device, dtype=attention_mask.dtype)
attention_mask = torch.cat([attention_mask, new_attn_mask], dim=-1)
model_inputs.update({'cache_position': cache_position, 'past_key_values': past_key_values, 'use_cache': use_cache, 'attention_mask': attention_mask})
for key, value in kwargs.items():
if key not in model_inputs:
model_inputs[key] = value
return model_inputs
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, tuple[tuple[torch.Tensor, torch.Tensor], ...]]]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, **deprecated_arguments) -> Union[tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()`
(`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
"""
num_items_in_batch = deprecated_arguments.pop('num_items_in_batch', None)
if deprecated_arguments.pop('position_ids', False) is not False:
warnings.warn('`position_ids` have no functionality in BLOOM and will be removed in v5.0.0. You can safely ignore passing `position_ids`.', FutureWarning)
if len(deprecated_arguments) > 0:
raise ValueError(f'Got unexpected arguments: {deprecated_arguments}')
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
loss = None
if labels is not None:
labels = labels.to(lm_logits.device)
loss = self.loss_function(lm_logits, labels, vocab_size=self.config.vocab_size, num_items_in_batch=num_items_in_batch)
if not return_dict:
output = (lm_logits,) + transformer_outputs[1:]
return (loss,) + output if loss is not None else output
return CausalLMOutputWithCrossAttentions(loss=loss, logits=lm_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions)
|
@auto_docstring(custom_intro='\n The Bloom Model transformer with a language modeling head on top (linear layer with weights tied to the input\n embeddings).\n ')
class BloomForCausalLM(BloomPreTrainedModel, GenerationMixin):
def __init__(self, config: BloomConfig):
pass
def set_output_embeddings(self, new_embeddings: torch.Tensor):
pass
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, use_cache=True, **kwargs):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, tuple[tuple[torch.Tensor, torch.Tensor], ...]]]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, **deprecated_arguments) -> Union[tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:
'''
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()`
(`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
'''
pass
| 7
| 1
| 28
| 2
| 21
| 6
| 3
| 0.27
| 2
| 11
| 5
| 0
| 6
| 2
| 6
| 8
| 182
| 19
| 131
| 50
| 93
| 36
| 50
| 23
| 43
| 7
| 2
| 2
| 18
|
1,012
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bloom/modeling_bloom.py
|
transformers.models.bloom.modeling_bloom.BloomForQuestionAnswering
|
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, LayerNorm, MSELoss
import torch
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, QuestionAnsweringModelOutput, SequenceClassifierOutputWithPast, TokenClassifierOutput
from typing import Optional, Union
from ...utils import auto_docstring, is_torch_flex_attn_available, logging
from torch import nn
@auto_docstring
class BloomForQuestionAnswering(BloomPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.transformer = BloomModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, 2)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, QuestionAnsweringModelOutput]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()`
(`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.transformer(input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return (total_loss,) + output if total_loss is not None else output
return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring
class BloomForQuestionAnswering(BloomPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, QuestionAnsweringModelOutput]:
'''
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()`
(`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
'''
pass
| 5
| 1
| 39
| 4
| 29
| 7
| 4
| 0.22
| 1
| 4
| 2
| 0
| 2
| 2
| 2
| 4
| 81
| 9
| 59
| 28
| 43
| 13
| 31
| 15
| 28
| 7
| 2
| 2
| 8
|
1,013
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bloom/modeling_bloom.py
|
transformers.models.bloom.modeling_bloom.BloomForSequenceClassification
|
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, QuestionAnsweringModelOutput, SequenceClassifierOutputWithPast, TokenClassifierOutput
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, LayerNorm, MSELoss
import torch
import warnings
from ...utils import auto_docstring, is_torch_flex_attn_available, logging
from typing import Optional, Union
from ...cache_utils import Cache, DynamicCache, StaticCache
from torch import nn
from .configuration_bloom import BloomConfig
@auto_docstring(custom_intro='\n The Bloom Model transformer with a sequence classification head on top (linear layer).\n\n [`BloomForSequenceClassification`] uses the last token in order to do the classification, as other causal models\n (e.g. GPT-1) do.\n\n Since it does classification on the last token, it requires to know the position of the last token. If a\n `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If\n no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the\n padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in\n each row of the batch).\n ')
class BloomForSequenceClassification(BloomPreTrainedModel):
def __init__(self, config: BloomConfig):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = BloomModel(config)
self.score = nn.Linear(config.hidden_size, config.num_labels, bias=False)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, tuple[tuple[torch.Tensor, torch.Tensor], ...]]]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **deprecated_arguments) -> Union[tuple[torch.Tensor], SequenceClassifierOutputWithPast]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()`
(`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
if deprecated_arguments.pop('position_ids', False) is not False:
warnings.warn('`position_ids` have no functionality in BLOOM and will be removed in v5.0.0. You can safely ignore passing `position_ids`.', FutureWarning)
if len(deprecated_arguments) > 0:
raise ValueError(f'Got unexpected arguments: {deprecated_arguments}')
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
hidden_states = transformer_outputs[0]
logits = self.score(hidden_states)
if input_ids is not None:
batch_size = input_ids.shape[0]
else:
batch_size = inputs_embeds.shape[0]
if self.config.pad_token_id is None and batch_size != 1:
raise ValueError('Cannot handle batch sizes > 1 if no padding token is defined.')
if self.config.pad_token_id is None:
last_non_pad_token = -1
elif input_ids is not None:
non_pad_mask = (input_ids != self.config.pad_token_id).to(logits.device, torch.int32)
token_indices = torch.arange(input_ids.shape[-1], device=logits.device, dtype=torch.int32)
last_non_pad_token = (token_indices * non_pad_mask).argmax(-1)
else:
last_non_pad_token = -1
logger.warning_once(f'{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be unexpected if using padding tokens in conjunction with `inputs_embeds.`')
pooled_logits = logits[torch.arange(batch_size, device=logits.device), last_non_pad_token]
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = 'single_label_classification'
else:
self.config.problem_type = 'multi_label_classification'
if self.config.problem_type == 'regression':
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(pooled_logits, labels)
elif self.config.problem_type == 'single_label_classification':
loss_fct = CrossEntropyLoss()
loss = loss_fct(pooled_logits, labels)
elif self.config.problem_type == 'multi_label_classification':
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(pooled_logits, labels)
if not return_dict:
output = (pooled_logits,) + transformer_outputs[1:]
return (loss,) + output if loss is not None else output
return SequenceClassifierOutputWithPast(loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions)
| null | 5
| 1
| 56
| 5
| 46
| 5
| 10
| 0.09
| 1
| 10
| 4
| 0
| 2
| 3
| 2
| 4
| 119
| 11
| 99
| 31
| 77
| 9
| 49
| 17
| 46
| 18
| 2
| 3
| 19
|
1,014
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bloom/modeling_bloom.py
|
transformers.models.bloom.modeling_bloom.BloomForTokenClassification
|
import warnings
from .configuration_bloom import BloomConfig
from ...cache_utils import Cache, DynamicCache, StaticCache
from ...utils import auto_docstring, is_torch_flex_attn_available, logging
import torch
from torch import nn
from typing import Optional, Union
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, QuestionAnsweringModelOutput, SequenceClassifierOutputWithPast, TokenClassifierOutput
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, LayerNorm, MSELoss
@auto_docstring
class BloomForTokenClassification(BloomPreTrainedModel):
def __init__(self, config: BloomConfig):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = BloomModel(config)
if hasattr(config, 'classifier_dropout') and config.classifier_dropout is not None:
classifier_dropout = config.classifier_dropout
elif hasattr(config, 'hidden_dropout') and config.hidden_dropout is not None:
classifier_dropout = config.hidden_dropout
else:
classifier_dropout = 0.1
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, tuple[tuple[torch.Tensor, torch.Tensor], ...]]]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **deprecated_arguments) -> Union[tuple[torch.Tensor], TokenClassifierOutput]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()`
(`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
if deprecated_arguments.pop('position_ids', False) is not False:
warnings.warn('`position_ids` have no functionality in BLOOM and will be removed in v5.0.0. You can safely ignore passing `position_ids`.', FutureWarning)
if len(deprecated_arguments) > 0:
raise ValueError(f'Got unexpected arguments: {deprecated_arguments}')
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
hidden_states = transformer_outputs[0]
hidden_states = self.dropout(hidden_states)
logits = self.classifier(hidden_states)
loss = None
if labels is not None:
labels = labels.to(logits.device)
batch_size, seq_length = labels.shape
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(batch_size * seq_length, self.num_labels), labels.view(batch_size * seq_length))
if not return_dict:
output = (logits,) + transformer_outputs[2:]
return (loss,) + output if loss is not None else output
return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions)
|
@auto_docstring
class BloomForTokenClassification(BloomPreTrainedModel):
def __init__(self, config: BloomConfig):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, tuple[tuple[torch.Tensor, torch.Tensor], ...]]]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **deprecated_arguments) -> Union[tuple[torch.Tensor], TokenClassifierOutput]:
'''
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()`
(`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
'''
pass
| 5
| 1
| 42
| 4
| 34
| 5
| 5
| 0.12
| 1
| 9
| 4
| 0
| 2
| 4
| 2
| 4
| 92
| 9
| 74
| 29
| 52
| 9
| 32
| 15
| 29
| 7
| 2
| 1
| 10
|
1,015
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bloom/modeling_bloom.py
|
transformers.models.bloom.modeling_bloom.BloomGelu
|
from torch import nn
import torch
class BloomGelu(nn.Module):
"""
BloomBiasGelu wrapper function that make use of the simple function on inference mode to make the model
torchscriptable and use the autograd function in training mode to get the accurate results of the gradients Partly
copied from Megatron-DeepSpeed code and adapted for our needs
See here why autograd functions are not torchscriptable: https://github.com/pytorch/pytorch/issues/22329
"""
def __init__(self):
super().__init__()
def forward(self, x: torch.Tensor) -> torch.Tensor:
if self.training:
return GeLUFunction.apply(x)
else:
return bloom_gelu_forward(x)
|
class BloomGelu(nn.Module):
'''
BloomBiasGelu wrapper function that make use of the simple function on inference mode to make the model
torchscriptable and use the autograd function in training mode to get the accurate results of the gradients Partly
copied from Megatron-DeepSpeed code and adapted for our needs
See here why autograd functions are not torchscriptable: https://github.com/pytorch/pytorch/issues/22329
'''
def __init__(self):
pass
def forward(self, x: torch.Tensor) -> torch.Tensor:
pass
| 3
| 1
| 4
| 0
| 4
| 0
| 2
| 0.75
| 1
| 3
| 1
| 0
| 2
| 0
| 2
| 12
| 17
| 3
| 8
| 3
| 5
| 6
| 7
| 3
| 4
| 2
| 1
| 1
| 3
|
1,016
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bloom/modeling_bloom.py
|
transformers.models.bloom.modeling_bloom.BloomMLP
|
from torch import nn
from .configuration_bloom import BloomConfig
from torch.nn import functional as F
import torch
class BloomMLP(nn.Module):
def __init__(self, config: BloomConfig):
super().__init__()
hidden_size = config.hidden_size
self.pretraining_tp = config.pretraining_tp
self.slow_but_exact = config.slow_but_exact
self.dense_h_to_4h = nn.Linear(hidden_size, 4 * hidden_size)
self.gelu_impl = BloomGelu()
self.dense_4h_to_h = nn.Linear(4 * hidden_size, hidden_size)
self.hidden_dropout = config.hidden_dropout
def forward(self, hidden_states: torch.Tensor, residual: torch.Tensor) -> torch.Tensor:
hidden_states = self.gelu_impl(self.dense_h_to_4h(hidden_states))
if self.pretraining_tp > 1 and self.slow_but_exact:
intermediate_output = torch.zeros_like(residual)
slices = self.dense_4h_to_h.weight.shape[-1] / self.pretraining_tp
for i in range(self.pretraining_tp):
intermediate_output = intermediate_output + F.linear(hidden_states[:, :, int(i * slices):int((i + 1) * slices)], self.dense_4h_to_h.weight[:, int(i * slices):int((i + 1) * slices)])
else:
intermediate_output = self.dense_4h_to_h(hidden_states)
output = dropout_add(intermediate_output, residual, self.hidden_dropout, self.training)
return output
|
class BloomMLP(nn.Module):
def __init__(self, config: BloomConfig):
pass
def forward(self, hidden_states: torch.Tensor, residual: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 14
| 2
| 12
| 0
| 2
| 0
| 1
| 6
| 2
| 0
| 2
| 6
| 2
| 12
| 29
| 5
| 24
| 14
| 21
| 0
| 20
| 14
| 17
| 3
| 1
| 2
| 4
|
1,017
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bloom/modeling_bloom.py
|
transformers.models.bloom.modeling_bloom.BloomModel
|
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, LayerNorm, MSELoss
import warnings
from ...cache_utils import Cache, DynamicCache, StaticCache
from torch import nn
from .configuration_bloom import BloomConfig
import torch
from ...utils import auto_docstring, is_torch_flex_attn_available, logging
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, QuestionAnsweringModelOutput, SequenceClassifierOutputWithPast, TokenClassifierOutput
from typing import Optional, Union
from ...modeling_attn_mask_utils import AttentionMaskConverter
@auto_docstring
class BloomModel(BloomPreTrainedModel):
def __init__(self, config: BloomConfig):
super().__init__(config)
self.embed_dim = config.hidden_size
self.num_heads = config.n_head
self.word_embeddings = nn.Embedding(config.vocab_size, self.embed_dim)
self.word_embeddings_layernorm = LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
self.h = nn.ModuleList([BloomBlock(config, layer_idx=i) for i in range(config.num_hidden_layers)])
self.ln_f = LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
self.gradient_checkpointing = False
self.post_init()
def build_alibi_tensor(self, attention_mask: torch.Tensor, num_heads: int, dtype: torch.dtype) -> torch.Tensor:
return build_alibi_tensor(attention_mask, num_heads, dtype)
def get_input_embeddings(self):
return self.word_embeddings
def set_input_embeddings(self, new_embeddings: torch.Tensor):
self.word_embeddings = new_embeddings
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, tuple[tuple[torch.Tensor, torch.Tensor], ...]]]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, **deprecated_arguments) -> Union[tuple[torch.Tensor, ...], BaseModelOutputWithPastAndCrossAttentions]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()`
(`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
"""
if deprecated_arguments.pop('position_ids', False) is not False:
warnings.warn('`position_ids` have no functionality in BLOOM and will be removed in v5.0.0. You can safely ignore passing `position_ids`.', FutureWarning)
if len(deprecated_arguments) > 0:
raise ValueError(f'Got unexpected arguments: {deprecated_arguments}')
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError('You must specify exactly one of input_ids or inputs_embeds')
if self.gradient_checkpointing and self.training and use_cache:
logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...')
use_cache = False
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
batch_size, seq_length, _ = inputs_embeds.shape
past_length = past_key_values.get_seq_length() if past_key_values is not None else 0
seq_length_with_past = seq_length + past_length
if cache_position is None:
cache_position = torch.arange(past_length, past_length + seq_length, device=inputs_embeds.device)
head_mask = self.get_head_mask(head_mask, self.config.n_layer)
hidden_states = self.word_embeddings_layernorm(inputs_embeds)
all_self_attentions = () if output_attentions else None
all_hidden_states = () if output_hidden_states else None
if attention_mask is None:
attention_mask = torch.ones((batch_size, seq_length_with_past), device=hidden_states.device)
else:
attention_mask = attention_mask.to(hidden_states.device)
alibi = self.build_alibi_tensor(attention_mask, self.num_heads, dtype=hidden_states.dtype)
causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions)
for i, block in enumerate(self.h):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = block(hidden_states, layer_past=past_key_values, attention_mask=causal_mask, head_mask=head_mask[i], use_cache=use_cache, output_attentions=output_attentions, alibi=alibi, cache_position=cache_position)
hidden_states = outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (outputs[1],)
hidden_states = self.ln_f(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, past_key_values, all_hidden_states, all_self_attentions] if v is not None))
return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attentions)
def _update_causal_mask(self, attention_mask: Union[torch.Tensor, 'BlockMask'], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool=False):
if self.config._attn_implementation == 'flash_attention_2':
if attention_mask is not None and (attention_mask == 0.0).any():
return attention_mask
return None
if self.config._attn_implementation == 'flex_attention':
if isinstance(attention_mask, torch.Tensor):
attention_mask = make_flex_block_causal_mask(attention_mask)
return attention_mask
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
using_compilable_cache = past_key_values.is_compileable if past_key_values is not None else False
if self.config._attn_implementation == 'sdpa' and (not using_compilable_cache) and (not output_attentions):
if AttentionMaskConverter._ignore_causal_mask_sdpa(attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training):
return None
dtype = input_tensor.dtype
sequence_length = input_tensor.shape[1]
if using_compilable_cache:
target_length = past_key_values.get_max_cache_shape()
else:
target_length = attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1
causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, cache_position=cache_position, batch_size=input_tensor.shape[0])
if self.config._attn_implementation == 'sdpa' and attention_mask is not None and (attention_mask.device.type in ['cuda', 'xpu', 'npu']) and (not output_attentions):
min_dtype = torch.finfo(dtype).min
causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
return causal_mask
@staticmethod
def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs):
"""
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
Args:
attention_mask (`torch.Tensor`):
A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
`(batch_size, 1, query_length, key_value_length)`.
sequence_length (`int`):
The sequence length being processed.
target_length (`int`):
The target length: when generating with static cache, the mask should be as long as the static cache,
to account for the 0 padding, the part of the cache that is not filled yet.
dtype (`torch.dtype`):
The dtype to use for the 4D attention mask.
cache_position (`torch.Tensor`):
Indices depicting the position of the input sequence tokens in the sequence.
batch_size (`torch.Tensor`):
Batch size.
"""
if attention_mask is not None and attention_mask.dim() == 4:
causal_mask = attention_mask
else:
min_dtype = torch.finfo(dtype).min
causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device)
if sequence_length != 1:
causal_mask = torch.triu(causal_mask, diagonal=1)
causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1)
causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
if attention_mask is not None:
causal_mask = causal_mask.clone()
mask_length = attention_mask.shape[-1]
padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device)
padding_mask = padding_mask == 0
causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype)
return causal_mask
|
@auto_docstring
class BloomModel(BloomPreTrainedModel):
def __init__(self, config: BloomConfig):
pass
def build_alibi_tensor(self, attention_mask: torch.Tensor, num_heads: int, dtype: torch.dtype) -> torch.Tensor:
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, new_embeddings: torch.Tensor):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, tuple[tuple[torch.Tensor, torch.Tensor], ...]]]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, **deprecated_arguments) -> Union[tuple[torch.Tensor, ...], BaseModelOutputWithPastAndCrossAttentions]:
'''
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()`
(`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
'''
pass
def _update_causal_mask(self, attention_mask: Union[torch.Tensor, 'BlockMask'], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool=False):
pass
@staticmethod
def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs):
'''
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
Args:
attention_mask (`torch.Tensor`):
A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
`(batch_size, 1, query_length, key_value_length)`.
sequence_length (`int`):
The sequence length being processed.
target_length (`int`):
The target length: when generating with static cache, the mask should be as long as the static cache,
to account for the 0 padding, the part of the cache that is not filled yet.
dtype (`torch.dtype`):
The dtype to use for the 4D attention mask.
cache_position (`torch.Tensor`):
Indices depicting the position of the input sequence tokens in the sequence.
batch_size (`torch.Tensor`):
Batch size.
'''
pass
| 11
| 2
| 41
| 5
| 30
| 6
| 6
| 0.21
| 1
| 16
| 7
| 0
| 6
| 7
| 7
| 9
| 301
| 39
| 218
| 70
| 174
| 46
| 111
| 39
| 103
| 27
| 2
| 2
| 44
|
1,018
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bloom/modeling_bloom.py
|
transformers.models.bloom.modeling_bloom.BloomPreTrainedModel
|
from ...utils import auto_docstring, is_torch_flex_attn_available, logging
from ...modeling_utils import PreTrainedModel
from .configuration_bloom import BloomConfig
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, LayerNorm, MSELoss
from torch import nn
@auto_docstring
class BloomPreTrainedModel(PreTrainedModel):
config: BloomConfig
base_model_prefix = 'transformer'
supports_gradient_checkpointing = True
_no_split_modules = ['BloomBlock']
_skip_keys_device_placement = 'past_key_values'
_can_compile_fullgraph = True
def __init__(self, *inputs, **kwargs):
super().__init__(*inputs, **kwargs)
def _init_weights(self, module: nn.Module):
"""Initialize the weights."""
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
|
@auto_docstring
class BloomPreTrainedModel(PreTrainedModel):
def __init__(self, *inputs, **kwargs):
pass
def _init_weights(self, module: nn.Module):
'''Initialize the weights.'''
pass
| 4
| 1
| 9
| 0
| 7
| 2
| 4
| 0.13
| 1
| 1
| 0
| 5
| 2
| 0
| 2
| 2
| 28
| 2
| 23
| 11
| 20
| 3
| 21
| 11
| 18
| 6
| 1
| 2
| 7
|
1,019
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bloom/modeling_bloom.py
|
transformers.models.bloom.modeling_bloom.GeLUFunction
|
import torch
class GeLUFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, input: torch.Tensor) -> torch.Tensor:
ctx.save_for_backward(input)
return bloom_gelu_forward(input)
@staticmethod
def backward(ctx, grad_output: torch.Tensor) -> torch.Tensor:
input = ctx.saved_tensors
tmp = bloom_gelu_back(grad_output, input)
return tmp
|
class GeLUFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, input: torch.Tensor) -> torch.Tensor:
pass
@staticmethod
def backward(ctx, grad_output: torch.Tensor) -> torch.Tensor:
pass
| 5
| 0
| 4
| 0
| 4
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 2
| 32
| 11
| 1
| 10
| 6
| 5
| 0
| 8
| 4
| 5
| 1
| 5
| 0
| 2
|
1,020
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bloom/tokenization_bloom_fast.py
|
transformers.models.bloom.tokenization_bloom_fast.BloomTokenizerFast
|
from ...tokenization_utils_base import BatchEncoding
import pickle
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from typing import Optional
class BloomTokenizerFast(PreTrainedTokenizerFast):
"""
Construct a "fast" Bloom tokenizer (backed by HuggingFace's *tokenizers* library). Based on byte-level
Byte-Pair-Encoding.
This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
be encoded differently whether it is at the beginning of the sentence (without space) or not:
```python
>>> from transformers import BloomTokenizerFast
>>> tokenizer = BloomTokenizerFast.from_pretrained("bigscience/bloom")
>>> tokenizer("Hello world")["input_ids"]
[59414, 8876]
>>> tokenizer(" Hello world")["input_ids"]
[86153, 8876]
```
You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer, but since
the model was not pretrained this way, it might yield a decrease in performance.
<Tip>
When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`.
</Tip>
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
Path to the vocabulary file.
merges_file (`str`):
Path to the merges file.
errors (`str`, *optional*, defaults to `"replace"`):
Paradigm to follow when decoding bytes to UTF-8. See
[bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
unk_token (`str`, *optional*, defaults to `<|endoftext|>`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
bos_token (`str`, *optional*, defaults to `<|endoftext|>`):
The beginning of sequence token.
eos_token (`str`, *optional*, defaults to `<|endoftext|>`):
The end of sequence token.
add_prefix_space (`bool`, *optional*, defaults to `False`):
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
other word. (Bloom tokenizer detect beginning of words by the preceding space).
trim_offsets (`bool`, *optional*, defaults to `True`):
Whether or not the post-processing step should trim offsets to avoid including whitespaces.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ['input_ids', 'attention_mask']
slow_tokenizer_class = None
def __init__(self, vocab_file=None, merges_file=None, tokenizer_file=None, unk_token='<unk>', bos_token='<s>', eos_token='</s>', pad_token='<pad>', add_prefix_space=False, clean_up_tokenization_spaces=False, **kwargs):
super().__init__(vocab_file=vocab_file, merges_file=merges_file, tokenizer_file=tokenizer_file, unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, add_prefix_space=add_prefix_space, clean_up_tokenization_spaces=clean_up_tokenization_spaces, **kwargs)
pre_tok_state = pickle.dumps(self.backend_tokenizer.pre_tokenizer)
decoder_state = pickle.dumps(self.backend_tokenizer.decoder)
if add_prefix_space:
pre_tok_state = pre_tok_state.replace(b'"add_prefix_space":false', b'"add_prefix_space": true')
decoder_state = decoder_state.replace(b'"add_prefix_space":false', b'"add_prefix_space": true')
self.backend_tokenizer.pre_tokenizer = pickle.loads(pre_tok_state)
self.backend_tokenizer.decoder = pickle.loads(decoder_state)
self.add_prefix_space = add_prefix_space
def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding:
is_split_into_words = kwargs.get('is_split_into_words', False)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with pretokenized inputs.')
return super()._batch_encode_plus(*args, **kwargs)
def _encode_plus(self, *args, **kwargs) -> BatchEncoding:
is_split_into_words = kwargs.get('is_split_into_words', False)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with pretokenized inputs.')
return super()._encode_plus(*args, **kwargs)
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
files = self._tokenizer.model.save(save_directory, name=filename_prefix)
return tuple(files)
|
class BloomTokenizerFast(PreTrainedTokenizerFast):
'''
Construct a "fast" Bloom tokenizer (backed by HuggingFace's *tokenizers* library). Based on byte-level
Byte-Pair-Encoding.
This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
be encoded differently whether it is at the beginning of the sentence (without space) or not:
```python
>>> from transformers import BloomTokenizerFast
>>> tokenizer = BloomTokenizerFast.from_pretrained("bigscience/bloom")
>>> tokenizer("Hello world")["input_ids"]
[59414, 8876]
>>> tokenizer(" Hello world")["input_ids"]
[86153, 8876]
```
You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer, but since
the model was not pretrained this way, it might yield a decrease in performance.
<Tip>
When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`.
</Tip>
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
Path to the vocabulary file.
merges_file (`str`):
Path to the merges file.
errors (`str`, *optional*, defaults to `"replace"`):
Paradigm to follow when decoding bytes to UTF-8. See
[bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
unk_token (`str`, *optional*, defaults to `<|endoftext|>`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
bos_token (`str`, *optional*, defaults to `<|endoftext|>`):
The beginning of sequence token.
eos_token (`str`, *optional*, defaults to `<|endoftext|>`):
The end of sequence token.
add_prefix_space (`bool`, *optional*, defaults to `False`):
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
other word. (Bloom tokenizer detect beginning of words by the preceding space).
trim_offsets (`bool`, *optional*, defaults to `True`):
Whether or not the post-processing step should trim offsets to avoid including whitespaces.
'''
def __init__(self, vocab_file=None, merges_file=None, tokenizer_file=None, unk_token='<unk>', bos_token='<s>', eos_token='</s>', pad_token='<pad>', add_prefix_space=False, clean_up_tokenization_spaces=False, **kwargs):
pass
def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding:
pass
def _encode_plus(self, *args, **kwargs) -> BatchEncoding:
pass
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
pass
| 5
| 1
| 15
| 1
| 13
| 1
| 2
| 0.79
| 1
| 5
| 1
| 0
| 4
| 1
| 4
| 92
| 120
| 20
| 56
| 26
| 39
| 44
| 27
| 14
| 22
| 2
| 3
| 1
| 7
|
1,021
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bridgetower/configuration_bridgetower.py
|
transformers.models.bridgetower.configuration_bridgetower.BridgeTowerConfig
|
from ...configuration_utils import PretrainedConfig
class BridgeTowerConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`BridgeTowerModel`]. It is used to instantiate a
BridgeTower model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the bridgetower-base
[BridgeTower/bridgetower-base](https://huggingface.co/BridgeTower/bridgetower-base/) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
share_cross_modal_transformer_layers (`bool`, *optional*, defaults to `True`):
Whether cross modal transformer layers are shared.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler.
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
initializer_factor (`float`, *optional*, defaults to 1):
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
testing).
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
share_link_tower_layers (`bool`, *optional*, defaults to `False`):
Whether the bride/link tower layers are shared.
link_tower_type (`str`, *optional*, defaults to `"add"`):
Type of the bridge/link layer.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
num_hidden_layers (`int`, *optional*, defaults to 6):
Number of hidden layers in the Transformer encoder.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie input and output embeddings.
init_layernorm_from_vision_encoder (`bool`, *optional*, defaults to `False`):
Whether to init LayerNorm from the vision encoder.
text_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`BridgeTowerTextConfig`].
vision_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`BridgeTowerVisionConfig`].
Example:
```python
>>> from transformers import BridgeTowerModel, BridgeTowerConfig
>>> # Initializing a BridgeTower BridgeTower/bridgetower-base style configuration
>>> configuration = BridgeTowerConfig()
>>> # Initializing a model from the BridgeTower/bridgetower-base style configuration
>>> model = BridgeTowerModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'bridgetower'
sub_configs = {'text_config': BridgeTowerTextConfig, 'vision_config': BridgeTowerVisionConfig}
def __init__(self, share_cross_modal_transformer_layers=True, hidden_act='gelu', hidden_size=768, initializer_factor=1, layer_norm_eps=1e-05, share_link_tower_layers=False, link_tower_type='add', num_attention_heads=12, num_hidden_layers=6, tie_word_embeddings=False, init_layernorm_from_vision_encoder=False, text_config=None, vision_config=None, **kwargs):
_ = kwargs.pop('text_config_dict', None)
_ = kwargs.pop('vision_config_dict', None)
super().__init__(**kwargs)
self.share_cross_modal_transformer_layers = share_cross_modal_transformer_layers
self.hidden_act = hidden_act
self.hidden_size = hidden_size
self.initializer_factor = initializer_factor
self.layer_norm_eps = layer_norm_eps
self.share_link_tower_layers = share_link_tower_layers
self.link_tower_type = link_tower_type
self.num_attention_heads = num_attention_heads
self.num_hidden_layers = num_hidden_layers
self.tie_word_embeddings = tie_word_embeddings
self.init_layernorm_from_vision_encoder = init_layernorm_from_vision_encoder
if text_config is None:
text_config = {}
logger.info('`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.')
if vision_config is None:
vision_config = {}
logger.info('`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.')
self.text_config = BridgeTowerTextConfig(**text_config)
self.vision_config = BridgeTowerVisionConfig(**vision_config)
|
class BridgeTowerConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`BridgeTowerModel`]. It is used to instantiate a
BridgeTower model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the bridgetower-base
[BridgeTower/bridgetower-base](https://huggingface.co/BridgeTower/bridgetower-base/) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
share_cross_modal_transformer_layers (`bool`, *optional*, defaults to `True`):
Whether cross modal transformer layers are shared.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler.
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
initializer_factor (`float`, *optional*, defaults to 1):
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
testing).
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
share_link_tower_layers (`bool`, *optional*, defaults to `False`):
Whether the bride/link tower layers are shared.
link_tower_type (`str`, *optional*, defaults to `"add"`):
Type of the bridge/link layer.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
num_hidden_layers (`int`, *optional*, defaults to 6):
Number of hidden layers in the Transformer encoder.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie input and output embeddings.
init_layernorm_from_vision_encoder (`bool`, *optional*, defaults to `False`):
Whether to init LayerNorm from the vision encoder.
text_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`BridgeTowerTextConfig`].
vision_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`BridgeTowerVisionConfig`].
Example:
```python
>>> from transformers import BridgeTowerModel, BridgeTowerConfig
>>> # Initializing a BridgeTower BridgeTower/bridgetower-base style configuration
>>> configuration = BridgeTowerConfig()
>>> # Initializing a model from the BridgeTower/bridgetower-base style configuration
>>> model = BridgeTowerModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, share_cross_modal_transformer_layers=True, hidden_act='gelu', hidden_size=768, initializer_factor=1, layer_norm_eps=1e-05, share_link_tower_layers=False, link_tower_type='add', num_attention_heads=12, num_hidden_layers=6, tie_word_embeddings=False, init_layernorm_from_vision_encoder=False, text_config=None, vision_config=None, **kwargs):
pass
| 2
| 1
| 27
| 3
| 22
| 3
| 2
| 1.06
| 1
| 3
| 2
| 0
| 1
| 13
| 2
| 2
| 112
| 15
| 47
| 38
| 25
| 50
| 28
| 19
| 25
| 3
| 1
| 1
| 4
|
1,022
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bridgetower/configuration_bridgetower.py
|
transformers.models.bridgetower.configuration_bridgetower.BridgeTowerTextConfig
|
from ...configuration_utils import PretrainedConfig
class BridgeTowerTextConfig(PretrainedConfig):
"""
This is the configuration class to store the text configuration of a [`BridgeTowerModel`]. The default values here
are copied from RoBERTa. Instantiating a configuration with the defaults will yield a similar configuration to that
of the bridgetower-base [BridegTower/bridgetower-base](https://huggingface.co/BridgeTower/bridgetower-base/)
architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50265):
Vocabulary size of the text part of the model. Defines the number of different tokens that can be
represented by the `inputs_ids` passed when calling [`BridgeTowerModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 514):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the `token_type_ids`.
initializer_factor (`float`, *optional*, defaults to 1):
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
testing).
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
[Self-Attention with Relative Position Representations (Shaw et al.)](https://huggingface.co/papers/1803.02155).
For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
with Better Relative Position Embeddings (Huang et al.)](https://huggingface.co/papers/2009.13658).
is_decoder (`bool`, *optional*, defaults to `False`):
Whether the model is used as a decoder or not. If `False`, the model is used as an encoder.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
Example:
```python
>>> from transformers import BridgeTowerTextConfig
>>> # Initializing a BridgeTower BridgeTower/bridgetower-base style configuration for the text model
>>> configuration = BridgeTowerTextConfig()
>>> # Accessing the configuration
>>> configuration
```"""
model_type = 'bridgetower_text_model'
base_config_key = 'text_config'
def __init__(self, vocab_size=50265, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, initializer_factor=1, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=514, type_vocab_size=1, layer_norm_eps=1e-05, pad_token_id=1, bos_token_id=0, eos_token_id=2, position_embedding_type='absolute', use_cache=True, **kwargs):
super().__init__(**kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.initializer_factor = initializer_factor
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.layer_norm_eps = layer_norm_eps
self.position_embedding_type = position_embedding_type
self.use_cache = use_cache
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
|
class BridgeTowerTextConfig(PretrainedConfig):
'''
This is the configuration class to store the text configuration of a [`BridgeTowerModel`]. The default values here
are copied from RoBERTa. Instantiating a configuration with the defaults will yield a similar configuration to that
of the bridgetower-base [BridegTower/bridgetower-base](https://huggingface.co/BridgeTower/bridgetower-base/)
architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50265):
Vocabulary size of the text part of the model. Defines the number of different tokens that can be
represented by the `inputs_ids` passed when calling [`BridgeTowerModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 514):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the `token_type_ids`.
initializer_factor (`float`, *optional*, defaults to 1):
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
testing).
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
[Self-Attention with Relative Position Representations (Shaw et al.)](https://huggingface.co/papers/1803.02155).
For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
with Better Relative Position Embeddings (Huang et al.)](https://huggingface.co/papers/2009.13658).
is_decoder (`bool`, *optional*, defaults to `False`):
Whether the model is used as a decoder or not. If `False`, the model is used as an encoder.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
Example:
```python
>>> from transformers import BridgeTowerTextConfig
>>> # Initializing a BridgeTower BridgeTower/bridgetower-base style configuration for the text model
>>> configuration = BridgeTowerTextConfig()
>>> # Accessing the configuration
>>> configuration
```'''
def __init__(self, vocab_size=50265, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, initializer_factor=1, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=514, type_vocab_size=1, layer_norm_eps=1e-05, pad_token_id=1, bos_token_id=0, eos_token_id=2, position_embedding_type='absolute', use_cache=True, **kwargs):
pass
| 2
| 1
| 40
| 1
| 39
| 0
| 1
| 1.31
| 1
| 1
| 0
| 0
| 1
| 17
| 1
| 1
| 106
| 9
| 42
| 41
| 20
| 55
| 22
| 21
| 20
| 1
| 1
| 0
| 1
|
1,023
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bridgetower/configuration_bridgetower.py
|
transformers.models.bridgetower.configuration_bridgetower.BridgeTowerVisionConfig
|
from ...configuration_utils import PretrainedConfig
class BridgeTowerVisionConfig(PretrainedConfig):
"""
This is the configuration class to store the vision configuration of a [`BridgeTowerModel`]. Instantiating a
configuration with the defaults will yield a similar configuration to that of the bridgetower-base
[BridgeTower/bridgetower-base](https://huggingface.co/BridgeTower/bridgetower-base/) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in visual encoder model.
patch_size (`int`, *optional*, defaults to 16):
The size (resolution) of each patch.
image_size (`int`, *optional*, defaults to 288):
The size (resolution) of each image.
initializer_factor (`float`, *optional*, defaults to 1):
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
testing).
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
stop_gradient (`bool`, *optional*, defaults to `False`):
Whether to stop gradient for training.
share_layernorm (`bool`, *optional*, defaults to `True`):
Whether LayerNorm layers are shared.
remove_last_layer (`bool`, *optional*, defaults to `False`):
Whether to remove the last layer from the vision encoder.
Example:
```python
>>> from transformers import BridgeTowerVisionConfig
>>> # Initializing a BridgeTower BridgeTower/bridgetower-base style configuration for the vision model
>>> configuration = BridgeTowerVisionConfig()
>>> # Accessing the configuration
>>> configuration
```"""
model_type = 'bridgetower_vision_model'
base_config_key = 'vision_config'
def __init__(self, hidden_size=768, num_hidden_layers=12, num_channels=3, patch_size=16, image_size=288, initializer_factor=1, layer_norm_eps=1e-05, stop_gradient=False, share_layernorm=True, remove_last_layer=False, **kwargs):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_channels = num_channels
self.patch_size = patch_size
self.image_size = image_size
self.initializer_factor = initializer_factor
self.layer_norm_eps = layer_norm_eps
self.stop_gradient = stop_gradient
self.share_layernorm = share_layernorm
self.remove_last_layer = remove_last_layer
|
class BridgeTowerVisionConfig(PretrainedConfig):
'''
This is the configuration class to store the vision configuration of a [`BridgeTowerModel`]. Instantiating a
configuration with the defaults will yield a similar configuration to that of the bridgetower-base
[BridgeTower/bridgetower-base](https://huggingface.co/BridgeTower/bridgetower-base/) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in visual encoder model.
patch_size (`int`, *optional*, defaults to 16):
The size (resolution) of each patch.
image_size (`int`, *optional*, defaults to 288):
The size (resolution) of each image.
initializer_factor (`float`, *optional*, defaults to 1):
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
testing).
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
stop_gradient (`bool`, *optional*, defaults to `False`):
Whether to stop gradient for training.
share_layernorm (`bool`, *optional*, defaults to `True`):
Whether LayerNorm layers are shared.
remove_last_layer (`bool`, *optional*, defaults to `False`):
Whether to remove the last layer from the vision encoder.
Example:
```python
>>> from transformers import BridgeTowerVisionConfig
>>> # Initializing a BridgeTower BridgeTower/bridgetower-base style configuration for the vision model
>>> configuration = BridgeTowerVisionConfig()
>>> # Accessing the configuration
>>> configuration
```'''
def __init__(self, hidden_size=768, num_hidden_layers=12, num_channels=3, patch_size=16, image_size=288, initializer_factor=1, layer_norm_eps=1e-05, stop_gradient=False, share_layernorm=True, remove_last_layer=False, **kwargs):
pass
| 2
| 1
| 25
| 0
| 25
| 0
| 1
| 1.21
| 1
| 1
| 0
| 0
| 1
| 10
| 1
| 1
| 71
| 9
| 28
| 27
| 13
| 34
| 15
| 14
| 13
| 1
| 1
| 0
| 1
|
1,024
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bridgetower/image_processing_bridgetower.py
|
transformers.models.bridgetower.image_processing_bridgetower.BridgeTowerImageProcessor
|
from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging
from collections.abc import Iterable
from typing import Any, Optional, Union
import numpy as np
from ...image_transforms import PaddingMode, center_crop, pad, resize, to_channel_dimension_format
from ...image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_scaled_image, make_flat_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
class BridgeTowerImageProcessor(BaseImageProcessor):
"""
Constructs a BridgeTower image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
`do_resize` parameter in the `preprocess` method.
size (`dict[str, int]` *optional*, defaults to `{'shortest_edge': 288}`):
Resize the shorter side of the input to `size["shortest_edge"]`. The longer side will be limited to under
`int((1333 / 800) * size["shortest_edge"])` while preserving the aspect ratio. Only has an effect if
`do_resize` is set to `True`. Can be overridden by the `size` parameter in the `preprocess` method.
size_divisor (`int`, *optional*, defaults to 32):
The size by which to make sure both the height and width can be divided. Only has an effect if `do_resize`
is set to `True`. Can be overridden by the `size_divisor` parameter in the `preprocess` method.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`. Can be
overridden by the `resample` parameter in the `preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Only has an effect if `do_rescale` is set to `True`. Can be
overridden by the `rescale_factor` parameter in the `preprocess` method.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
method. Can be overridden by the `do_normalize` parameter in the `preprocess` method.
image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be
overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
Can be overridden by the `image_std` parameter in the `preprocess` method.
do_center_crop (`bool`, *optional*, defaults to `True`):
Whether to center crop the image. Can be overridden by the `do_center_crop` parameter in the `preprocess`
method.
crop_size (`dict[str, int]`, *optional*):
Desired output size when applying center-cropping. Only has an effect if `do_center_crop` is set to `True`.
Can be overridden by the `crop_size` parameter in the `preprocess` method. If unset defaults to `size`,
do_pad (`bool`, *optional*, defaults to `True`):
Whether to pad the image to the `(max_height, max_width)` of the images in the batch. Can be overridden by
the `do_pad` parameter in the `preprocess` method.
"""
model_input_names = ['pixel_values', 'pixel_mask']
def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, size_divisor: int=32, resample: PILImageResampling=PILImageResampling.BICUBIC, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_center_crop: bool=True, crop_size: Optional[dict[str, int]]=None, do_pad: bool=True, **kwargs) -> None:
if 'pad_and_return_pixel_mask' in kwargs:
do_pad = kwargs.pop('pad_and_return_pixel_mask')
super().__init__(**kwargs)
size = size if size is not None else {'shortest_edge': 288}
size = get_size_dict(size, default_to_square=False)
self.do_resize = do_resize
self.size = size
self.size_divisor = size_divisor
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD
self.do_pad = do_pad
self.do_center_crop = do_center_crop
self.crop_size = crop_size
def resize(self, image: np.ndarray, size: dict[str, int], size_divisor: int=32, resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
"""
Resize an image.
Resizes the shorter side of the image to `size["shortest_edge"]` while preserving the aspect ratio. If the
longer side is larger than the max size `(int(`size["shortest_edge"]` * 1333 / 800))`, the longer side is then
resized to the max size while preserving the aspect ratio.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Controls the size of the output image. Should be of the form `{"shortest_edge": int}`.
size_divisor (`int`, *optional*, defaults to 32):
The image is resized to a size that is a multiple of this value.
resample (`PILImageResampling` filter, *optional*, defaults to `PILImageResampling.BICUBIC`):
Resampling filter to use when resiizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
size = get_size_dict(size, default_to_square=False)
if 'shortest_edge' not in size:
raise ValueError(f'The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}')
shorter = size['shortest_edge']
longer = int(1333 / 800 * shorter)
output_size = get_resize_output_image_size(image, shorter=shorter, longer=longer, size_divisor=size_divisor, input_data_format=input_data_format)
return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)
def center_crop(self, image: np.ndarray, size: dict[str, int], data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
"""
Center crop an image to `(size["height"], size["width"])`. If the input size is smaller than `crop_size` along
any edge, the image is padded with 0's and then center cropped.
Args:
image (`np.ndarray`):
Image to center crop.
size (`dict[str, int]`):
Size of the output image in the form `{"height": h, "width": w}`.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred from the input
image.
"""
output_size = size['shortest_edge']
return center_crop(image, size=(output_size, output_size), data_format=data_format, input_data_format=input_data_format, **kwargs)
def _pad_image(self, image: np.ndarray, output_size: tuple[int, int], constant_values: Union[float, Iterable[float]]=0, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
"""
Pad an image with zeros to the given size.
"""
input_height, input_width = get_image_size(image, channel_dim=input_data_format)
output_height, output_width = output_size
pad_bottom = output_height - input_height
pad_right = output_width - input_width
padding = ((0, pad_bottom), (0, pad_right))
padded_image = pad(image, padding, mode=PaddingMode.CONSTANT, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format)
return padded_image
def pad(self, images: list[np.ndarray], constant_values: Union[float, Iterable[float]]=0, return_pixel_mask: bool=True, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> BatchFeature:
"""
Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width
in the batch and optionally returns their corresponding pixel mask.
Args:
image (`np.ndarray`):
Image to pad.
constant_values (`float` or `Iterable[float]`, *optional*):
The value to use for the padding if `mode` is `"constant"`.
return_pixel_mask (`bool`, *optional*, defaults to `True`):
Whether to return a pixel mask.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
pad_size = get_max_height_width(images, input_data_format=input_data_format)
padded_images = [self._pad_image(image, pad_size, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format) for image in images]
data = {'pixel_values': padded_images}
if return_pixel_mask:
masks = [make_pixel_mask(image=image, output_size=pad_size, input_data_format=input_data_format) for image in images]
data['pixel_mask'] = masks
return BatchFeature(data=data, tensor_type=return_tensors)
@filter_out_non_signature_kwargs()
def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, size_divisor: Optional[int]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_pad: Optional[bool]=None, do_center_crop: Optional[bool]=None, crop_size: Optional[dict[str, int]]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Controls the size of the image after `resize`. The shortest edge of the image is resized to
`size["shortest_edge"]` whilst preserving the aspect ratio. If the longest edge of this resized image
is > `int(size["shortest_edge"] * (1333 / 800))`, then the image is resized again to make the longest
edge equal to `int(size["shortest_edge"] * (1333 / 800))`.
size_divisor (`int`, *optional*, defaults to `self.size_divisor`):
The image is resized to a size that is a multiple of this value.
resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to normalize the image by if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to normalize the image by if `do_normalize` is set to `True`.
do_pad (`bool`, *optional*, defaults to `self.do_pad`):
Whether to pad the image to the (max_height, max_width) in the batch. If `True`, a pixel mask is also
created and returned.
do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
Whether to center crop the image. If the input size is smaller than `crop_size` along any edge, the
image is padded with 0's and then center cropped.
crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`):
Size of the image after center crop. If one edge the image is smaller than `crop_size`, it will be
padded with zeros and then cropped
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
size_divisor = size_divisor if size_divisor is not None else self.size_divisor
resample = resample if resample is not None else self.resample
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
do_pad = do_pad if do_pad is not None else self.do_pad
do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
crop_size = crop_size if crop_size is not None else self.crop_size if self.crop_size is not None else self.size
size = size if size is not None else self.size
size = get_size_dict(size, default_to_square=False)
images = self.fetch_images(images)
images = make_flat_list_of_images(images)
if not valid_images(images):
raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor')
validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_center_crop=do_center_crop, crop_size=crop_size, do_resize=do_resize, size=size, resample=resample)
images = [to_numpy_array(image) for image in images]
if do_rescale and is_scaled_image(images[0]):
logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.')
if do_resize:
images = [self.resize(image=image, size=size, size_divisor=size_divisor, resample=resample, input_data_format=input_data_format) for image in images]
if do_center_crop:
images = [self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) for image in images]
if do_rescale:
images = [self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images]
if do_normalize:
images = [self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images]
images = [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images]
if do_pad:
encoded_outputs = self.pad(images, return_pixel_mask=True, return_tensors=return_tensors, input_data_format=data_format)
else:
encoded_outputs = BatchFeature(data={'pixel_values': images}, tensor_type=return_tensors)
return encoded_outputs
|
class BridgeTowerImageProcessor(BaseImageProcessor):
'''
Constructs a BridgeTower image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
`do_resize` parameter in the `preprocess` method.
size (`dict[str, int]` *optional*, defaults to `{'shortest_edge': 288}`):
Resize the shorter side of the input to `size["shortest_edge"]`. The longer side will be limited to under
`int((1333 / 800) * size["shortest_edge"])` while preserving the aspect ratio. Only has an effect if
`do_resize` is set to `True`. Can be overridden by the `size` parameter in the `preprocess` method.
size_divisor (`int`, *optional*, defaults to 32):
The size by which to make sure both the height and width can be divided. Only has an effect if `do_resize`
is set to `True`. Can be overridden by the `size_divisor` parameter in the `preprocess` method.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`. Can be
overridden by the `resample` parameter in the `preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Only has an effect if `do_rescale` is set to `True`. Can be
overridden by the `rescale_factor` parameter in the `preprocess` method.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
method. Can be overridden by the `do_normalize` parameter in the `preprocess` method.
image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be
overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
Can be overridden by the `image_std` parameter in the `preprocess` method.
do_center_crop (`bool`, *optional*, defaults to `True`):
Whether to center crop the image. Can be overridden by the `do_center_crop` parameter in the `preprocess`
method.
crop_size (`dict[str, int]`, *optional*):
Desired output size when applying center-cropping. Only has an effect if `do_center_crop` is set to `True`.
Can be overridden by the `crop_size` parameter in the `preprocess` method. If unset defaults to `size`,
do_pad (`bool`, *optional*, defaults to `True`):
Whether to pad the image to the `(max_height, max_width)` of the images in the batch. Can be overridden by
the `do_pad` parameter in the `preprocess` method.
'''
def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, size_divisor: int=32, resample: PILImageResampling=PILImageResampling.BICUBIC, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_center_crop: bool=True, crop_size: Optional[dict[str, int]]=None, do_pad: bool=True, **kwargs) -> None:
pass
def resize(self, image: np.ndarray, size: dict[str, int], size_divisor: int=32, resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
'''
Resize an image.
Resizes the shorter side of the image to `size["shortest_edge"]` while preserving the aspect ratio. If the
longer side is larger than the max size `(int(`size["shortest_edge"]` * 1333 / 800))`, the longer side is then
resized to the max size while preserving the aspect ratio.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Controls the size of the output image. Should be of the form `{"shortest_edge": int}`.
size_divisor (`int`, *optional*, defaults to 32):
The image is resized to a size that is a multiple of this value.
resample (`PILImageResampling` filter, *optional*, defaults to `PILImageResampling.BICUBIC`):
Resampling filter to use when resiizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
'''
pass
def center_crop(self, image: np.ndarray, size: dict[str, int], data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
'''
Center crop an image to `(size["height"], size["width"])`. If the input size is smaller than `crop_size` along
any edge, the image is padded with 0's and then center cropped.
Args:
image (`np.ndarray`):
Image to center crop.
size (`dict[str, int]`):
Size of the output image in the form `{"height": h, "width": w}`.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred from the input
image.
'''
pass
def _pad_image(self, image: np.ndarray, output_size: tuple[int, int], constant_values: Union[float, Iterable[float]]=0, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
'''
Pad an image with zeros to the given size.
'''
pass
def pad(self, images: list[np.ndarray], constant_values: Union[float, Iterable[float]]=0, return_pixel_mask: bool=True, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> BatchFeature:
'''
Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width
in the batch and optionally returns their corresponding pixel mask.
Args:
image (`np.ndarray`):
Image to pad.
constant_values (`float` or `Iterable[float]`, *optional*):
The value to use for the padding if `mode` is `"constant"`.
return_pixel_mask (`bool`, *optional*, defaults to `True`):
Whether to return a pixel mask.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
'''
pass
@filter_out_non_signature_kwargs()
def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, size_divisor: Optional[int]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_pad: Optional[bool]=None, do_center_crop: Optional[bool]=None, crop_size: Optional[dict[str, int]]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image:
'''
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Controls the size of the image after `resize`. The shortest edge of the image is resized to
`size["shortest_edge"]` whilst preserving the aspect ratio. If the longest edge of this resized image
is > `int(size["shortest_edge"] * (1333 / 800))`, then the image is resized again to make the longest
edge equal to `int(size["shortest_edge"] * (1333 / 800))`.
size_divisor (`int`, *optional*, defaults to `self.size_divisor`):
The image is resized to a size that is a multiple of this value.
resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to normalize the image by if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to normalize the image by if `do_normalize` is set to `True`.
do_pad (`bool`, *optional*, defaults to `self.do_pad`):
Whether to pad the image to the (max_height, max_width) in the batch. If `True`, a pixel mask is also
created and returned.
do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
Whether to center crop the image. If the input size is smaller than `crop_size` along any edge, the
image is padded with 0's and then center cropped.
crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`):
Size of the image after center crop. If one edge the image is smaller than `crop_size`, it will be
padded with zeros and then cropped
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
'''
pass
| 8
| 6
| 60
| 4
| 37
| 20
| 6
| 0.73
| 1
| 9
| 3
| 0
| 6
| 12
| 6
| 26
| 417
| 30
| 224
| 100
| 152
| 163
| 82
| 35
| 75
| 22
| 3
| 1
| 33
|
1,025
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bridgetower/modeling_bridgetower.py
|
transformers.models.bridgetower.modeling_bridgetower.BridgeTowerAttention
|
from typing import Callable, Optional, Union
from ...processing_utils import Unpack
from ...cache_utils import Cache, EncoderDecoderCache
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
import torch
from torch import nn
from ...utils import TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging, torch_int
class BridgeTowerAttention(nn.Module):
def __init__(self, config, position_embedding_type=None, is_causal=False, layer_idx=None, is_cross_attention=False):
super().__init__()
self.is_cross_attention = is_cross_attention
attention_class = BridgeTowerCrossAttention if is_cross_attention else BridgeTowerSelfAttention
self.self = attention_class(config, position_embedding_type=position_embedding_type, is_causal=is_causal, layer_idx=layer_idx)
self.output = BridgeTowerSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads)
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Cache]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor]:
attention_mask = attention_mask if not self.is_cross_attention else encoder_attention_mask
attention_output, attn_weights = self.self(hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, head_mask=head_mask, past_key_value=past_key_value, cache_position=cache_position, **kwargs)
attention_output = self.output(attention_output, hidden_states)
return (attention_output, attn_weights)
|
class BridgeTowerAttention(nn.Module):
def __init__(self, config, position_embedding_type=None, is_causal=False, layer_idx=None, is_cross_attention=False):
pass
def prune_heads(self, heads):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Cache]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor]:
pass
| 4
| 0
| 15
| 1
| 14
| 1
| 1
| 0.07
| 1
| 5
| 1
| 0
| 3
| 3
| 3
| 13
| 49
| 4
| 43
| 20
| 30
| 3
| 22
| 11
| 18
| 2
| 1
| 1
| 4
|
1,026
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bridgetower/modeling_bridgetower.py
|
transformers.models.bridgetower.modeling_bridgetower.BridgeTowerBertCrossLayer
|
from ...processing_utils import Unpack
from torch import nn
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
from ...utils import TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging, torch_int
class BridgeTowerBertCrossLayer(nn.Module):
def __init__(self, config, layer_idx=None):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = BridgeTowerAttention(config, is_causal=True, layer_idx=layer_idx)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
self.crossattention = BridgeTowerAttention(config, position_embedding_type='absolute', is_causal=False, layer_idx=layer_idx, is_cross_attention=True)
self.intermediate = BridgeTowerIntermediate(config)
self.output = BridgeTowerOutput(config)
def forward(self, hidden_states, encoder_hidden_states, attention_mask=None, head_mask=None, encoder_attention_mask=None, past_key_value=None, **kwargs: Unpack[TransformersKwargs]):
self_attention_output, self_attn_weights = self.attention(hidden_states, attention_mask=attention_mask, head_mask=None, past_key_value=None, **kwargs)
attention_output = self_attention_output
cross_attention_output, cross_attn_weights = self.crossattention(attention_output, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=past_key_value, **kwargs)
attention_output = cross_attention_output
layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output)
return (layer_output, self_attn_weights, cross_attn_weights)
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
|
class BridgeTowerBertCrossLayer(nn.Module):
def __init__(self, config, layer_idx=None):
pass
def forward(self, hidden_states, encoder_hidden_states, attention_mask=None, head_mask=None, encoder_attention_mask=None, past_key_value=None, **kwargs: Unpack[TransformersKwargs]):
pass
def feed_forward_chunk(self, attention_output):
pass
| 4
| 0
| 19
| 1
| 16
| 1
| 1
| 0.08
| 1
| 4
| 3
| 0
| 3
| 8
| 3
| 13
| 60
| 6
| 50
| 28
| 37
| 4
| 25
| 19
| 21
| 1
| 1
| 0
| 3
|
1,027
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bridgetower/modeling_bridgetower.py
|
transformers.models.bridgetower.modeling_bridgetower.BridgeTowerContrastiveHead
|
from torch import nn
class BridgeTowerContrastiveHead(nn.Module):
def __init__(self, hidden_size, embed_size):
super().__init__()
self.fc = nn.Linear(hidden_size, embed_size)
def forward(self, x):
x = self.fc(x)
return x
|
class BridgeTowerContrastiveHead(nn.Module):
def __init__(self, hidden_size, embed_size):
pass
def forward(self, x):
pass
| 3
| 0
| 3
| 0
| 3
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 2
| 1
| 2
| 12
| 8
| 1
| 7
| 4
| 4
| 0
| 7
| 4
| 4
| 1
| 1
| 0
| 2
|
1,028
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bridgetower/modeling_bridgetower.py
|
transformers.models.bridgetower.modeling_bridgetower.BridgeTowerContrastiveOutput
|
from dataclasses import dataclass
from ...utils import TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging, torch_int
import torch
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, ModelOutput, SequenceClassifierOutput
from typing import Callable, Optional, Union
@dataclass
@auto_docstring(custom_intro="\n Output type of ['BridgeTowerForContrastiveLearning']\n ")
class BridgeTowerContrastiveOutput(ModelOutput):
"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
Image-text contrastive loss.
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
text_embeds (`torch.FloatTensor)`, *optional*, returned when model is initialized with `with_projection=True`):
The text embeddings obtained by applying the projection layer to the pooler_output.
image_embeds (`torch.FloatTensor)`, *optional*, returned when model is initialized with `with_projection=True`):
The image embeddings obtained by applying the projection layer to the pooler_output.
cross_embeds (`torch.FloatTensor)`, *optional*, returned when model is initialized with `with_projection=True`):
The text-image cross-modal embeddings obtained by applying the projection layer to the pooler_output.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
text_embeds: Optional[tuple[torch.FloatTensor]] = None
image_embeds: Optional[tuple[torch.FloatTensor]] = None
cross_embeds: Optional[tuple[torch.FloatTensor]] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
|
@dataclass
@auto_docstring(custom_intro="\n Output type of ['BridgeTowerForContrastiveLearning']\n ")
class BridgeTowerContrastiveOutput(ModelOutput):
'''
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
Image-text contrastive loss.
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
text_embeds (`torch.FloatTensor)`, *optional*, returned when model is initialized with `with_projection=True`):
The text embeddings obtained by applying the projection layer to the pooler_output.
image_embeds (`torch.FloatTensor)`, *optional*, returned when model is initialized with `with_projection=True`):
The image embeddings obtained by applying the projection layer to the pooler_output.
cross_embeds (`torch.FloatTensor)`, *optional*, returned when model is initialized with `with_projection=True`):
The text-image cross-modal embeddings obtained by applying the projection layer to the pooler_output.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 2.63
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 31
| 2
| 8
| 8
| 7
| 21
| 8
| 8
| 7
| 0
| 1
| 0
| 0
|
1,029
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bridgetower/modeling_bridgetower.py
|
transformers.models.bridgetower.modeling_bridgetower.BridgeTowerForContrastiveLearning
|
import torch
from torch import nn
from typing import Callable, Optional, Union
from ...utils import TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging, torch_int
@auto_docstring(custom_intro='\n BridgeTower Model with a image-text contrastive head on top computing image-text contrastive loss.\n ')
class BridgeTowerForContrastiveLearning(BridgeTowerPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bridgetower = BridgeTowerModel(config)
self.itc_text_head = BridgeTowerContrastiveHead(config.hidden_size, config.contrastive_hidden_size)
self.itc_image_head = BridgeTowerContrastiveHead(config.hidden_size, config.contrastive_hidden_size)
self.itc_cross_modal_head = BridgeTowerContrastiveHead(config.hidden_size * 2, config.contrastive_hidden_size)
self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value))
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, pixel_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, image_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=True, return_dict: Optional[bool]=None, return_loss: Optional[bool]=None) -> Union[BridgeTowerContrastiveOutput, tuple[torch.FloatTensor]]:
"""
image_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`, *optional*):
Optionally, instead of passing `pixel_values`, you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `pixel_values` into patch embeddings.
return_loss (`bool`, *optional*):
Whether or not to return the contrastive loss.
Examples:
```python
>>> from transformers import BridgeTowerProcessor, BridgeTowerForContrastiveLearning
>>> import requests
>>> from PIL import Image
>>> import torch
>>> image_urls = [
... "https://farm4.staticflickr.com/3395/3428278415_81c3e27f15_z.jpg",
... "http://images.cocodataset.org/val2017/000000039769.jpg",
... ]
>>> texts = ["two dogs in a car", "two cats sleeping on a couch"]
>>> images = [Image.open(requests.get(url, stream=True).raw) for url in image_urls]
>>> processor = BridgeTowerProcessor.from_pretrained("BridgeTower/bridgetower-large-itm-mlm-itc")
>>> model = BridgeTowerForContrastiveLearning.from_pretrained("BridgeTower/bridgetower-large-itm-mlm-itc")
>>> inputs = processor(images, texts, padding=True, return_tensors="pt")
>>> loss = model(**inputs, return_loss=True).loss
>>> inputs = processor(images, texts[::-1], padding=True, return_tensors="pt")
>>> loss_swapped = model(**inputs, return_loss=True).loss
>>> print("Loss", round(loss.item(), 4))
Loss 0.0019
>>> print("Loss with swapped images", round(loss_swapped.item(), 4))
Loss with swapped images 2.126
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bridgetower(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, pixel_values=pixel_values, pixel_mask=pixel_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, image_embeds=image_embeds, output_attentions=output_attentions, output_hidden_states=True, return_dict=return_dict)
pooler_output = outputs.pooler_output if return_dict else outputs[2]
hidden_states_txt, hidden_states_img, hidden_states_cross_modal = outputs.hidden_states if return_dict else outputs[3]
text_embeds = hidden_states_txt[-1]
image_embeds = hidden_states_img[-1]
image_embeds_with_ln = self.bridgetower.vision_model.visual.forward_post(image_embeds)
image_token_type_embeddings = self.bridgetower.token_type_embeddings(torch.full((1,), 1, dtype=torch.long, device=self.bridgetower.token_type_embeddings.weight.device)).expand_as(image_embeds_with_ln)
image_embeds = self.bridgetower.cross_modal_image_transform(image_embeds_with_ln) + image_token_type_embeddings
text_embeds = nn.functional.normalize(self.itc_text_head(text_embeds[:, 0, :]), dim=-1, p=2)
image_embeds = nn.functional.normalize(self.itc_image_head(image_embeds[:, 0, :]), dim=-1, p=2).to(device=text_embeds.device)
cross_embeds = nn.functional.normalize(self.itc_cross_modal_head(pooler_output), dim=-1, p=2).to(device=text_embeds.device)
logits = torch.stack([text_embeds, image_embeds, cross_embeds], dim=-2)
logit_scale = self.logit_scale.exp().to(device=text_embeds.device)
logits_text_to_image = torch.matmul(text_embeds, image_embeds.t()) * logit_scale
logits_text_to_cross = torch.matmul(text_embeds, cross_embeds.t()) * logit_scale
logits_image_to_cross = torch.matmul(image_embeds, cross_embeds.t()) * logit_scale
itc_loss = None
if return_loss:
labels = torch.arange(len(logits), device=logits.device)
text_to_image_loss = nn.functional.cross_entropy(logits_text_to_image, labels)
text_to_cross_loss = nn.functional.cross_entropy(logits_text_to_cross, labels)
image_to_cross_loss = nn.functional.cross_entropy(logits_image_to_cross, labels)
itc_loss = (text_to_image_loss + text_to_cross_loss + image_to_cross_loss) / 3.0
if not return_dict:
output = (logits, text_embeds, image_embeds, cross_embeds) + outputs[3:]
return (itc_loss,) + output if itc_loss is not None else output
return BridgeTowerContrastiveOutput(loss=itc_loss, logits=logits, text_embeds=text_embeds, image_embeds=image_embeds, cross_embeds=cross_embeds, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring(custom_intro='\n BridgeTower Model with a image-text contrastive head on top computing image-text contrastive loss.\n ')
class BridgeTowerForContrastiveLearning(BridgeTowerPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, pixel_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, image_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=True, return_dict: Optional[bool]=None, return_loss: Optional[bool]=None) -> Union[BridgeTowerContrastiveOutput, tuple[torch.FloatTensor]]:
'''
image_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`, *optional*):
Optionally, instead of passing `pixel_values`, you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `pixel_values` into patch embeddings.
return_loss (`bool`, *optional*):
Whether or not to return the contrastive loss.
Examples:
```python
>>> from transformers import BridgeTowerProcessor, BridgeTowerForContrastiveLearning
>>> import requests
>>> from PIL import Image
>>> import torch
>>> image_urls = [
... "https://farm4.staticflickr.com/3395/3428278415_81c3e27f15_z.jpg",
... "http://images.cocodataset.org/val2017/000000039769.jpg",
... ]
>>> texts = ["two dogs in a car", "two cats sleeping on a couch"]
>>> images = [Image.open(requests.get(url, stream=True).raw) for url in image_urls]
>>> processor = BridgeTowerProcessor.from_pretrained("BridgeTower/bridgetower-large-itm-mlm-itc")
>>> model = BridgeTowerForContrastiveLearning.from_pretrained("BridgeTower/bridgetower-large-itm-mlm-itc")
>>> inputs = processor(images, texts, padding=True, return_tensors="pt")
>>> loss = model(**inputs, return_loss=True).loss
>>> inputs = processor(images, texts[::-1], padding=True, return_tensors="pt")
>>> loss_swapped = model(**inputs, return_loss=True).loss
>>> print("Loss", round(loss.item(), 4))
Loss 0.0019
>>> print("Loss with swapped images", round(loss_swapped.item(), 4))
Loss with swapped images 2.126
```'''
pass
| 5
| 1
| 66
| 12
| 40
| 15
| 4
| 0.35
| 1
| 5
| 3
| 0
| 2
| 5
| 2
| 3
| 135
| 24
| 82
| 41
| 63
| 29
| 38
| 26
| 35
| 7
| 2
| 1
| 8
|
1,030
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bridgetower/modeling_bridgetower.py
|
transformers.models.bridgetower.modeling_bridgetower.BridgeTowerForImageAndTextRetrieval
|
from torch.nn import CrossEntropyLoss
from ...utils import TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging, torch_int
import torch
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, ModelOutput, SequenceClassifierOutput
from typing import Callable, Optional, Union
@auto_docstring(custom_intro='\n BridgeTower Model transformer with a classifier head on top (a linear layer on top of the final hidden state of the\n [CLS] token) for image-to-text matching.\n ')
class BridgeTowerForImageAndTextRetrieval(BridgeTowerPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bridgetower = BridgeTowerModel(config)
self.itm_score = BridgeTowerITMHead(config.hidden_size * 2)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, pixel_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, image_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: Optional[torch.LongTensor]=None) -> Union[SequenceClassifierOutput, tuple[torch.FloatTensor]]:
"""
image_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`, *optional*):
Optionally, instead of passing `pixel_values`, you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `pixel_values` into patch embeddings.
labels (`torch.LongTensor` of shape `(batch_size, 1)`, *optional*):
Labels for computing the image-text matching loss. 0 means the pairs don't match and 1 means they match.
The pairs with 0 will be skipped for calculation.
Examples:
```python
>>> from transformers import BridgeTowerProcessor, BridgeTowerForImageAndTextRetrieval
>>> import requests
>>> from PIL import Image
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> texts = ["An image of two cats chilling on a couch", "A football player scoring a goal"]
>>> processor = BridgeTowerProcessor.from_pretrained("BridgeTower/bridgetower-base-itm-mlm")
>>> model = BridgeTowerForImageAndTextRetrieval.from_pretrained("BridgeTower/bridgetower-base-itm-mlm")
>>> # forward pass
>>> scores = dict()
>>> for text in texts:
... # prepare inputs
... encoding = processor(image, text, return_tensors="pt")
... outputs = model(**encoding)
... scores[text] = outputs.logits[0, 1].item()
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bridgetower(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, pixel_values=pixel_values, pixel_mask=pixel_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, image_embeds=image_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
pooler_output = outputs.pooler_output if return_dict else outputs[2]
logits = self.itm_score(pooler_output)
itm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
labels = labels.to(logits.device)
itm_loss = loss_fct(logits, labels)
if not return_dict:
output = tuple(logits)
return (itm_loss,) + output if itm_loss is not None else output
return SequenceClassifierOutput(loss=itm_loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring(custom_intro='\n BridgeTower Model transformer with a classifier head on top (a linear layer on top of the final hidden state of the\n [CLS] token) for image-to-text matching.\n ')
class BridgeTowerForImageAndTextRetrieval(BridgeTowerPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, pixel_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, image_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: Optional[torch.LongTensor]=None) -> Union[SequenceClassifierOutput, tuple[torch.FloatTensor]]:
'''
image_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`, *optional*):
Optionally, instead of passing `pixel_values`, you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `pixel_values` into patch embeddings.
labels (`torch.LongTensor` of shape `(batch_size, 1)`, *optional*):
Labels for computing the image-text matching loss. 0 means the pairs don't match and 1 means they match.
The pairs with 0 will be skipped for calculation.
Examples:
```python
>>> from transformers import BridgeTowerProcessor, BridgeTowerForImageAndTextRetrieval
>>> import requests
>>> from PIL import Image
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> texts = ["An image of two cats chilling on a couch", "A football player scoring a goal"]
>>> processor = BridgeTowerProcessor.from_pretrained("BridgeTower/bridgetower-base-itm-mlm")
>>> model = BridgeTowerForImageAndTextRetrieval.from_pretrained("BridgeTower/bridgetower-base-itm-mlm")
>>> # forward pass
>>> scores = dict()
>>> for text in texts:
... # prepare inputs
... encoding = processor(image, text, return_tensors="pt")
... outputs = model(**encoding)
... scores[text] = outputs.logits[0, 1].item()
```'''
pass
| 5
| 1
| 45
| 8
| 25
| 12
| 4
| 0.45
| 1
| 6
| 3
| 0
| 2
| 2
| 2
| 3
| 93
| 16
| 53
| 26
| 34
| 24
| 20
| 11
| 17
| 6
| 2
| 1
| 7
|
1,031
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bridgetower/modeling_bridgetower.py
|
transformers.models.bridgetower.modeling_bridgetower.BridgeTowerForMaskedLM
|
from torch.nn import CrossEntropyLoss
from ...utils import TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging, torch_int
import torch
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, ModelOutput, SequenceClassifierOutput
from typing import Callable, Optional, Union
@auto_docstring(custom_intro='\n BridgeTower Model with a language modeling head on top as done during pretraining.\n ')
class BridgeTowerForMaskedLM(BridgeTowerPreTrainedModel):
_tied_weights_keys = ['mlm_score.decoder.weight']
def __init__(self, config):
super().__init__(config)
self.bridgetower = BridgeTowerModel(config)
self.mlm_score = BridgeTowerMLMHead(config)
self.post_init()
def get_output_embeddings(self):
return self.mlm_score.decoder
def set_output_embeddings(self, new_embeddings):
self.mlm_score.decoder = new_embeddings
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, pixel_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, image_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: Optional[torch.LongTensor]=None) -> Union[MaskedLMOutput, tuple[torch.FloatTensor]]:
"""
image_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`, *optional*):
Optionally, instead of passing `pixel_values`, you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `pixel_values` into patch embeddings.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
Examples:
```python
>>> from transformers import BridgeTowerProcessor, BridgeTowerForMaskedLM
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000360943.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw).convert("RGB")
>>> text = "a <mask> looking out of the window"
>>> processor = BridgeTowerProcessor.from_pretrained("BridgeTower/bridgetower-base-itm-mlm")
>>> model = BridgeTowerForMaskedLM.from_pretrained("BridgeTower/bridgetower-base-itm-mlm")
>>> # prepare inputs
>>> encoding = processor(image, text, return_tensors="pt")
>>> # forward pass
>>> outputs = model(**encoding)
>>> results = processor.decode(outputs.logits.argmax(dim=-1).squeeze(0).tolist())
>>> print(results)
.a cat looking out of the window.
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bridgetower(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, pixel_values=pixel_values, pixel_mask=pixel_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, image_embeds=image_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
mlm_logits = self.mlm_score(outputs.text_features if return_dict else outputs[0])
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
labels = labels.to(mlm_logits.device)
masked_lm_loss = loss_fct(mlm_logits.view(-1, self.config.text_config.vocab_size), labels.view(-1))
if not return_dict:
output = tuple(mlm_logits)
return (masked_lm_loss,) + output if masked_lm_loss is not None else output
return MaskedLMOutput(loss=masked_lm_loss, logits=mlm_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring(custom_intro='\n BridgeTower Model with a language modeling head on top as done during pretraining.\n ')
class BridgeTowerForMaskedLM(BridgeTowerPreTrainedModel):
def __init__(self, config):
pass
def get_output_embeddings(self):
pass
def set_output_embeddings(self, new_embeddings):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, pixel_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, image_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: Optional[torch.LongTensor]=None) -> Union[MaskedLMOutput, tuple[torch.FloatTensor]]:
'''
image_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`, *optional*):
Optionally, instead of passing `pixel_values`, you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `pixel_values` into patch embeddings.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
Examples:
```python
>>> from transformers import BridgeTowerProcessor, BridgeTowerForMaskedLM
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000360943.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw).convert("RGB")
>>> text = "a <mask> looking out of the window"
>>> processor = BridgeTowerProcessor.from_pretrained("BridgeTower/bridgetower-base-itm-mlm")
>>> model = BridgeTowerForMaskedLM.from_pretrained("BridgeTower/bridgetower-base-itm-mlm")
>>> # prepare inputs
>>> encoding = processor(image, text, return_tensors="pt")
>>> # forward pass
>>> outputs = model(**encoding)
>>> results = processor.decode(outputs.logits.argmax(dim=-1).squeeze(0).tolist())
>>> print(results)
.a cat looking out of the window.
```'''
pass
| 7
| 1
| 23
| 4
| 13
| 7
| 2
| 0.46
| 1
| 6
| 3
| 0
| 4
| 2
| 4
| 5
| 100
| 18
| 57
| 28
| 36
| 26
| 24
| 13
| 19
| 6
| 2
| 1
| 9
|
1,032
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bridgetower/modeling_bridgetower.py
|
transformers.models.bridgetower.modeling_bridgetower.BridgeTowerITMHead
|
from torch import nn
class BridgeTowerITMHead(nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.fc = nn.Linear(hidden_size, 2)
def forward(self, x):
itm_score = self.fc(x)
return itm_score
|
class BridgeTowerITMHead(nn.Module):
def __init__(self, hidden_size):
pass
def forward(self, x):
pass
| 3
| 0
| 3
| 0
| 3
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 2
| 1
| 2
| 12
| 8
| 1
| 7
| 5
| 4
| 0
| 7
| 5
| 4
| 1
| 1
| 0
| 2
|
1,033
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bridgetower/modeling_bridgetower.py
|
transformers.models.bridgetower.modeling_bridgetower.BridgeTowerIntermediate
|
import torch
from torch import nn
from ...activations import ACT2FN, QuickGELUActivation
class BridgeTowerIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
|
class BridgeTowerIntermediate(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 2
| 0
| 1
| 3
| 0
| 0
| 2
| 2
| 2
| 12
| 13
| 1
| 12
| 5
| 9
| 0
| 11
| 5
| 8
| 2
| 1
| 1
| 3
|
1,034
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bridgetower/modeling_bridgetower.py
|
transformers.models.bridgetower.modeling_bridgetower.BridgeTowerLinkTower
|
import torch
from torch import nn
class BridgeTowerLinkTower(nn.Module):
def __init__(self, config):
super().__init__()
self.link_tower_type = config.link_tower_type
self.hidden_size = config.hidden_size
if config.link_tower_type in ['add', 'scaled_add', 'interpolate']:
if config.link_tower_type == 'scaled_add':
self.scaled_factor = nn.Parameter(torch.tensor(1.0))
elif config.link_tower_type == 'interpolate':
self.beta = nn.Parameter(torch.tensor(0.5))
self.LayerNorm = nn.LayerNorm(self.hidden_size, eps=config.layer_norm_eps)
else:
raise NotImplementedError(f'link_tower_type {config.link_tower_type} is not implemented')
def forward(self, hidden_states, cross_modal_hidden_states, attention_mask):
if self.link_tower_type == 'add':
return self.LayerNorm(hidden_states + cross_modal_hidden_states)
elif self.link_tower_type == 'scaled_add':
return self.LayerNorm(hidden_states * self.scaled_factor + cross_modal_hidden_states)
elif self.link_tower_type == 'interpolate':
return self.LayerNorm(hidden_states * (1 - self.beta) + cross_modal_hidden_states * self.beta)
else:
raise NotImplementedError(f'link_tower_type {self.link_tower_type} is not implemented')
|
class BridgeTowerLinkTower(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states, cross_modal_hidden_states, attention_mask):
pass
| 3
| 0
| 11
| 0
| 11
| 0
| 4
| 0
| 1
| 2
| 0
| 0
| 2
| 5
| 2
| 12
| 23
| 1
| 22
| 8
| 19
| 0
| 17
| 8
| 14
| 4
| 1
| 2
| 8
|
1,035
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bridgetower/modeling_bridgetower.py
|
transformers.models.bridgetower.modeling_bridgetower.BridgeTowerMLMHead
|
import torch
from torch import nn
class BridgeTowerMLMHead(nn.Module):
def __init__(self, config, weight=None):
super().__init__()
self.config = config
self.transform = BridgeTowerPredictionHeadTransform(config)
self.decoder = nn.Linear(config.hidden_size, config.text_config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.text_config.vocab_size))
if weight is not None:
self.decoder.weight = weight
def forward(self, x):
mlm_score = self.transform(x)
mlm_score = self.decoder(mlm_score) + self.bias
return mlm_score
|
class BridgeTowerMLMHead(nn.Module):
def __init__(self, config, weight=None):
pass
def forward(self, x):
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 2
| 0
| 1
| 2
| 1
| 0
| 2
| 4
| 2
| 12
| 14
| 1
| 13
| 8
| 10
| 0
| 13
| 8
| 10
| 2
| 1
| 1
| 3
|
1,036
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bridgetower/modeling_bridgetower.py
|
transformers.models.bridgetower.modeling_bridgetower.BridgeTowerModel
|
import torch
from torch import nn
from typing import Callable, Optional, Union
from ...utils import TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging, torch_int
@auto_docstring(custom_intro='\n The bare BridgeTower Model transformer outputting BridgeTowerModelOutput object without any specific head on\n ')
class BridgeTowerModel(BridgeTowerPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
vision_config = config.vision_config
text_config = config.text_config
if config.share_cross_modal_transformer_layers:
self.cross_modal_text_transform = nn.Linear(text_config.hidden_size, config.hidden_size)
self.cross_modal_image_transform = nn.Linear(vision_config.hidden_size, config.hidden_size)
else:
self.cross_modal_text_transform = nn.ModuleList([nn.Linear(text_config.hidden_size, config.hidden_size) for _ in range(config.num_hidden_layers)])
self.cross_modal_image_transform = nn.ModuleList([nn.Linear(vision_config.hidden_size, config.hidden_size) for _ in range(config.num_hidden_layers)])
self.token_type_embeddings = nn.Embedding(2, config.hidden_size)
self.vision_model = BridgeTowerVisionModel(vision_config)
self.text_model = BridgeTowerTextModel(text_config)
if not vision_config.share_layernorm and config.init_layernorm_from_vision_encoder:
for ln in self.vision_model.visual.cross_modal_ln_separate:
ln.weight.data = self.vision_model.visual.ln_post.weight.data
ln.bias.data = self.vision_model.visual.ln_post.bias.data
self.cross_modal_image_layers = nn.ModuleList([BridgeTowerBertCrossLayer(text_config) for _ in range(config.num_hidden_layers)])
self.cross_modal_text_layers = nn.ModuleList([BridgeTowerBertCrossLayer(text_config) for _ in range(config.num_hidden_layers)])
self.cross_modal_image_pooler = BridgeTowerPooler(config)
self.cross_modal_text_pooler = BridgeTowerPooler(config)
self.cross_modal_text_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.cross_modal_image_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
if config.share_link_tower_layers:
self.cross_modal_text_link_tower = BridgeTowerLinkTower(config)
self.cross_modal_image_link_tower = BridgeTowerLinkTower(config)
else:
self.cross_modal_text_link_tower = nn.ModuleList([BridgeTowerLinkTower(config) for _ in range(config.num_hidden_layers - 1)])
self.cross_modal_image_link_tower = nn.ModuleList([BridgeTowerLinkTower(config) for _ in range(config.num_hidden_layers - 1)])
self.post_init()
def get_input_embeddings(self):
return self.text_model.get_input_embeddings()
def set_input_embeddings(self, value):
self.text_model.set_input_embeddings(value)
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, pixel_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, image_embeds: Optional[torch.FloatTensor]=None, image_token_type_idx: Optional[int]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: Optional[torch.LongTensor]=None, interpolate_pos_encoding: bool=False) -> Union[tuple[torch.Tensor], BridgeTowerModelOutput]:
"""
image_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`, *optional*):
Optionally, instead of passing `pixel_values`, you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `pixel_values` into patch embeddings.
image_token_type_idx (`int`, *optional*):
- The token type ids for images.
output_hidden_states (`bool`, *optional*):
If set to `True`, hidden states are returned as a list containing the hidden states of text, image, and
cross-modal components respectively. i.e. `(hidden_states_text, hidden_states_image,
hidden_states_cross_modal)` where each element is a list of the hidden states of the corresponding
modality. `hidden_states_txt/img` are a list of tensors corresponding to unimodal hidden states and
`hidden_states_cross_modal` is a list of tuples containing `cross_modal_text_hidden_states` and
`cross_modal_image_hidden_states` of each brdige layer.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels are currently not supported.
Examples:
```python
>>> from transformers import BridgeTowerProcessor, BridgeTowerModel
>>> from PIL import Image
>>> import requests
>>> # prepare image and text
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> text = "hello world"
>>> processor = BridgeTowerProcessor.from_pretrained("BridgeTower/bridgetower-base")
>>> model = BridgeTowerModel.from_pretrained("BridgeTower/bridgetower-base")
>>> inputs = processor(image, text, return_tensors="pt")
>>> outputs = model(**inputs)
>>> outputs.keys()
odict_keys(['text_features', 'image_features', 'pooler_output'])
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
all_hidden_states_text = () if output_hidden_states else None
all_hidden_states_image = () if output_hidden_states else None
all_hidden_states_cross = () if output_hidden_states else None
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
if inputs_embeds is not None and input_ids is None:
raise NotImplementedError('BridgeTowerModel does not use `inputs_embeds`. Make sure to pass in `input_ids` instead.')
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
image_token_type_idx = image_token_type_idx if image_token_type_idx else 1
input_shape = input_ids.size()
text_embeds = self.text_model.embeddings(input_ids=input_ids)
if output_hidden_states:
all_hidden_states_text += (text_embeds,)
if attention_mask is None:
attention_mask = torch.ones(input_shape, dtype=torch.long, device=input_ids.device)
extend_text_masks = self.text_model.get_extended_attention_mask(attention_mask, input_shape).to(input_ids.device)
split_index = len(self.text_model.encoder.layer) - self.config.num_hidden_layers + 1
for layer in self.text_model.encoder.layer[:split_index]:
text_embeds = layer(text_embeds, extend_text_masks)[0]
if output_hidden_states:
all_hidden_states_text += (text_embeds,)
if image_embeds is None:
image_embeds = self.vision_model.visual.forward_pre(pixel_values.type(self.vision_model.dtype), interpolate_pos_encoding=interpolate_pos_encoding)
else:
image_embeds = image_embeds.permute(1, 0, 2)
if output_hidden_states:
all_hidden_states_image += (image_embeds,)
for block in self.vision_model.visual.transformer.resblocks[:split_index]:
image_embeds = block(image_embeds)
if output_hidden_states:
all_hidden_states_image += (image_embeds,)
image_embeds_with_ln = self.vision_model.visual.forward_post(image_embeds.type(self.vision_model.dtype))
cross_modal_text = self.cross_modal_text_transform(text_embeds)
text_token_type_embeddings = self.token_type_embeddings(torch.zeros(1, dtype=torch.long, device=input_ids.device)).expand_as(cross_modal_text)
cross_modal_text = self.cross_modal_text_layernorm(cross_modal_text + text_token_type_embeddings)
image_embeds_with_ln = self.cross_modal_image_transform(image_embeds_with_ln)
image_token_type_embeddings = self.token_type_embeddings(torch.full((1,), image_token_type_idx, dtype=torch.long, device=input_ids.device)).expand_as(image_embeds_with_ln)
image_embeds_with_ln = image_embeds_with_ln + image_token_type_embeddings
cross_modal_image = self.cross_modal_image_layernorm(image_embeds_with_ln)
pixel_mask = torch.ones((cross_modal_image.size(0), cross_modal_image.size(1)), dtype=torch.long, device=input_ids.device)
extend_image_masks = self.text_model.get_extended_attention_mask(pixel_mask, pixel_mask.size()).to(input_ids.device)
layer_outputs_text = self.cross_modal_text_layers[0](cross_modal_text, cross_modal_image, attention_mask=extend_text_masks, encoder_attention_mask=extend_image_masks, output_attentions=output_attentions)
cross_text_features = layer_outputs_text[0]
layer_outputs_image = self.cross_modal_image_layers[0](cross_modal_image, cross_modal_text, attention_mask=extend_image_masks, encoder_attention_mask=extend_text_masks, output_attentions=output_attentions)
cross_image_features = layer_outputs_image[0]
if output_hidden_states:
all_hidden_states_cross += ((cross_text_features, cross_image_features),)
if output_attentions:
all_self_attentions += ((layer_outputs_text[1], layer_outputs_image[1]),)
link_layer_index = 0
for i in range(split_index, len(self.text_model.encoder.layer)):
text_embeds = self.text_model.encoder.layer[i](text_embeds, extend_text_masks)[0]
image_embeds = self.vision_model.visual.transformer.resblocks[i](image_embeds).type(self.vision_model.dtype)
image_embeds_with_ln = self.cross_modal_image_transform(self.vision_model.visual.forward_post(image_embeds)) + image_token_type_embeddings
text_link_tower = self.cross_modal_text_link_tower[link_layer_index]
image_link_tower = self.cross_modal_image_link_tower[link_layer_index]
cross_text_features_ = text_link_tower(self.cross_modal_text_transform(text_embeds) + text_token_type_embeddings, cross_text_features, extend_text_masks)
cross_image_features_ = image_link_tower(image_embeds_with_ln, cross_image_features, extend_image_masks)
layer_outputs_text = self.cross_modal_text_layers[link_layer_index + 1](cross_text_features_, cross_image_features_, attention_mask=extend_text_masks, encoder_attention_mask=extend_image_masks, output_attentions=output_attentions)
cross_text_features = layer_outputs_text[0]
layer_outputs_image = self.cross_modal_image_layers[link_layer_index + 1](cross_image_features_, cross_text_features_, attention_mask=extend_image_masks, encoder_attention_mask=extend_text_masks, output_attentions=output_attentions)
cross_image_features = layer_outputs_image[0]
link_layer_index += 1
if output_hidden_states:
all_hidden_states_text += (text_embeds,)
all_hidden_states_image += (image_embeds,)
all_hidden_states_cross += ((cross_text_features, cross_image_features),)
if output_attentions:
all_self_attentions += ((layer_outputs_text[1], layer_outputs_image[1]),)
text_features, image_features = (cross_text_features, cross_image_features)
cls_features = self.get_cls_features(text_features, image_features)
if output_hidden_states:
all_hidden_states = (all_hidden_states_text, all_hidden_states_image, all_hidden_states_cross)
if not return_dict:
return tuple((v for v in [text_features, image_features, cls_features, all_hidden_states, all_self_attentions] if v is not None))
return BridgeTowerModelOutput(text_features=text_features, image_features=image_features, pooler_output=cls_features, hidden_states=all_hidden_states, attentions=all_self_attentions)
def get_cls_features(self, text_features, image_features):
cls_features_text = self.cross_modal_text_pooler(text_features)
cls_features_image = self.cross_modal_image_pooler(image_features)
return torch.cat([cls_features_text, cls_features_image], dim=-1)
|
@auto_docstring(custom_intro='\n The bare BridgeTower Model transformer outputting BridgeTowerModelOutput object without any specific head on\n ')
class BridgeTowerModel(BridgeTowerPreTrainedModel):
def __init__(self, config):
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, value):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, pixel_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, image_embeds: Optional[torch.FloatTensor]=None, image_token_type_idx: Optional[int]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: Optional[torch.LongTensor]=None, interpolate_pos_encoding: bool=False) -> Union[tuple[torch.Tensor], BridgeTowerModelOutput]:
'''
image_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`, *optional*):
Optionally, instead of passing `pixel_values`, you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `pixel_values` into patch embeddings.
image_token_type_idx (`int`, *optional*):
- The token type ids for images.
output_hidden_states (`bool`, *optional*):
If set to `True`, hidden states are returned as a list containing the hidden states of text, image, and
cross-modal components respectively. i.e. `(hidden_states_text, hidden_states_image,
hidden_states_cross_modal)` where each element is a list of the hidden states of the corresponding
modality. `hidden_states_txt/img` are a list of tensors corresponding to unimodal hidden states and
`hidden_states_cross_modal` is a list of tuples containing `cross_modal_text_hidden_states` and
`cross_modal_image_hidden_states` of each brdige layer.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels are currently not supported.
Examples:
```python
>>> from transformers import BridgeTowerProcessor, BridgeTowerModel
>>> from PIL import Image
>>> import requests
>>> # prepare image and text
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> text = "hello world"
>>> processor = BridgeTowerProcessor.from_pretrained("BridgeTower/bridgetower-base")
>>> model = BridgeTowerModel.from_pretrained("BridgeTower/bridgetower-base")
>>> inputs = processor(image, text, return_tensors="pt")
>>> outputs = model(**inputs)
>>> outputs.keys()
odict_keys(['text_features', 'image_features', 'pooler_output'])
```'''
pass
def get_cls_features(self, text_features, image_features):
pass
| 8
| 1
| 59
| 10
| 41
| 8
| 7
| 0.19
| 1
| 13
| 6
| 0
| 5
| 14
| 5
| 6
| 301
| 52
| 210
| 72
| 186
| 39
| 116
| 54
| 110
| 26
| 2
| 2
| 34
|
1,037
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bridgetower/modeling_bridgetower.py
|
transformers.models.bridgetower.modeling_bridgetower.BridgeTowerModelOutput
|
from dataclasses import dataclass
from ...utils import TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging, torch_int
import torch
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, ModelOutput, SequenceClassifierOutput
from typing import Callable, Optional, Union
@dataclass
@auto_docstring(custom_intro='\n Output type of [`BridgeTowerModel`].\n ')
class BridgeTowerModelOutput(ModelOutput):
"""
text_features (`torch.FloatTensor` of shape `(batch_size, text_sequence_length, hidden_size)`):
Sequence of hidden-states at the text output of the last layer of the model.
image_features (`torch.FloatTensor` of shape `(batch_size, image_sequence_length, hidden_size)`):
Sequence of hidden-states at the image output of the last layer of the model.
pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size x 2)`):
Concatenation of last layer hidden-state of the first token of the text and image sequence (classification
token), respectively, after further processing through layers used for auxiliary pretraining tasks.
"""
text_features: Optional[torch.FloatTensor] = None
image_features: Optional[torch.FloatTensor] = None
pooler_output: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
|
@dataclass
@auto_docstring(custom_intro='\n Output type of [`BridgeTowerModel`].\n ')
class BridgeTowerModelOutput(ModelOutput):
'''
text_features (`torch.FloatTensor` of shape `(batch_size, text_sequence_length, hidden_size)`):
Sequence of hidden-states at the text output of the last layer of the model.
image_features (`torch.FloatTensor` of shape `(batch_size, image_sequence_length, hidden_size)`):
Sequence of hidden-states at the image output of the last layer of the model.
pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size x 2)`):
Concatenation of last layer hidden-state of the first token of the text and image sequence (classification
token), respectively, after further processing through layers used for auxiliary pretraining tasks.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3.33
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 29
| 3
| 6
| 6
| 5
| 20
| 6
| 6
| 5
| 0
| 1
| 0
| 0
|
1,038
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bridgetower/modeling_bridgetower.py
|
transformers.models.bridgetower.modeling_bridgetower.BridgeTowerOutput
|
import torch
from torch import nn
class BridgeTowerOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
|
class BridgeTowerOutput(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 3
| 2
| 12
| 12
| 1
| 11
| 6
| 8
| 0
| 11
| 6
| 8
| 1
| 1
| 0
| 2
|
1,039
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bridgetower/modeling_bridgetower.py
|
transformers.models.bridgetower.modeling_bridgetower.BridgeTowerPooler
|
import torch
from torch import nn
class BridgeTowerPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
|
class BridgeTowerPooler(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 5
| 1
| 1
| 0.2
| 1
| 2
| 0
| 0
| 2
| 2
| 2
| 12
| 13
| 1
| 10
| 7
| 7
| 2
| 10
| 7
| 7
| 1
| 1
| 0
| 2
|
1,040
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bridgetower/modeling_bridgetower.py
|
transformers.models.bridgetower.modeling_bridgetower.BridgeTowerPreTrainedModel
|
from torch import nn
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from .configuration_bridgetower import BridgeTowerConfig, BridgeTowerTextConfig, BridgeTowerVisionConfig
from ...utils import TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging, torch_int
@auto_docstring
class BridgeTowerPreTrainedModel(PreTrainedModel):
config: BridgeTowerConfig
base_model_prefix = 'bridgetower'
supports_gradient_checkpointing = False
_no_split_modules = ['BridgeTowerSelfAttention', 'BridgeTowerResidualAttention']
_skip_keys_device_placement = 'past_key_values'
def _init_weights(self, module: nn.Module):
std = self.config.initializer_factor
if isinstance(module, BridgeTowerVisionTransformer):
proj_std = self.config.hidden_size ** (-0.5) * (2 * self.config.num_hidden_layers) ** (-0.5)
attn_std = self.config.hidden_size ** (-0.5)
fc_std = (2 * self.config.hidden_size) ** (-0.5)
for block in module.transformer.resblocks:
nn.init.normal_(block.attn.in_proj_weight, std=attn_std * std)
block.attn.in_proj_bias.data.zero_()
nn.init.normal_(block.attn.out_proj.weight, std=proj_std * std)
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std * std)
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std * std)
nn.init.normal_(module.embeddings.class_embedding, std=attn_std * std)
nn.init.normal_(module.embeddings.position_embedding.weight, std=attn_std * std)
elif isinstance(module, (nn.Linear, nn.Conv2d, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.05 * std)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, BridgeTowerForContrastiveLearning):
module.logit_scale.data.fill_(self.config.logit_scale_init_value)
if isinstance(module, (nn.Linear, BridgeTowerMLMHead)) and module.bias is not None:
module.bias.data.zero_()
|
@auto_docstring
class BridgeTowerPreTrainedModel(PreTrainedModel):
def _init_weights(self, module: nn.Module):
pass
| 3
| 0
| 25
| 2
| 23
| 0
| 6
| 0.14
| 1
| 1
| 1
| 6
| 1
| 0
| 1
| 1
| 37
| 4
| 29
| 11
| 27
| 4
| 23
| 11
| 21
| 6
| 1
| 2
| 6
|
1,041
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bridgetower/modeling_bridgetower.py
|
transformers.models.bridgetower.modeling_bridgetower.BridgeTowerPredictionHeadTransform
|
from torch import nn
from ...activations import ACT2FN, QuickGELUActivation
class BridgeTowerPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
|
class BridgeTowerPredictionHeadTransform(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 7
| 0
| 7
| 0
| 2
| 0
| 1
| 2
| 0
| 0
| 2
| 3
| 2
| 12
| 15
| 1
| 14
| 6
| 11
| 0
| 13
| 6
| 10
| 2
| 1
| 1
| 3
|
1,042
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bridgetower/modeling_bridgetower.py
|
transformers.models.bridgetower.modeling_bridgetower.BridgeTowerResidualAttention
|
from collections import OrderedDict
from torch import nn
import torch
from typing import Callable, Optional, Union
from ...activations import ACT2FN, QuickGELUActivation
class BridgeTowerResidualAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.attn = nn.MultiheadAttention(config.hidden_size, config.hidden_size // 64)
self.ln_1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.mlp = nn.ModuleDict(OrderedDict([('c_fc', nn.Linear(config.hidden_size, config.hidden_size * 4)), ('gelu', QuickGELUActivation()), ('c_proj', nn.Linear(config.hidden_size * 4, config.hidden_size))]))
self.ln_2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.attn_mask = None
def attention(self, hidden_state: torch.Tensor, attention_mask: torch.Tensor):
if attention_mask is not None:
attention_mask = attention_mask.to(dtype=torch.bool, device=hidden_state.device)
self.attn_mask = self.attn_mask.to(dtype=hidden_state.dtype, device=hidden_state.device) if self.attn_mask is not None else None
return self.attn(hidden_state, hidden_state, hidden_state, need_weights=False, attn_mask=self.attn_mask, key_padding_mask=attention_mask)[0]
def forward(self, hidden_state: torch.Tensor, attention_mask: Optional[torch.Tensor]=None):
residual_state = hidden_state + self.attention(self.ln_1(hidden_state), attention_mask)
hidden_state = self.ln_2(residual_state)
for layer in self.mlp.values():
hidden_state = layer(hidden_state)
hidden_state = residual_state + hidden_state
return hidden_state
|
class BridgeTowerResidualAttention(nn.Module):
def __init__(self, config):
pass
def attention(self, hidden_state: torch.Tensor, attention_mask: torch.Tensor):
pass
def forward(self, hidden_state: torch.Tensor, attention_mask: Optional[torch.Tensor]=None):
pass
| 4
| 0
| 13
| 0
| 13
| 0
| 2
| 0
| 1
| 5
| 1
| 0
| 3
| 5
| 3
| 13
| 42
| 3
| 39
| 11
| 35
| 0
| 20
| 11
| 16
| 3
| 1
| 1
| 6
|
1,043
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bridgetower/modeling_bridgetower.py
|
transformers.models.bridgetower.modeling_bridgetower.BridgeTowerSelfAttention
|
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...utils import TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging, torch_int
from torch import nn
import torch
from ...cache_utils import Cache, EncoderDecoderCache
from ...processing_utils import Unpack
from typing import Callable, Optional, Union
class BridgeTowerSelfAttention(nn.Module):
def __init__(self, config, position_embedding_type=None, is_causal=False, layer_idx=None):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')):
raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})')
self.config = config
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.scaling = self.attention_head_size ** (-0.5)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = position_embedding_type or getattr(config, 'position_embedding_type', 'absolute')
if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query':
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
self.is_decoder = config.is_decoder
self.is_causal = is_causal
self.layer_idx = layer_idx
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Cache]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.attention_head_size)
query_layer = self.query(hidden_states).view(*hidden_shape).transpose(1, 2)
key_layer = self.key(hidden_states).view(*hidden_shape).transpose(1, 2)
value_layer = self.value(hidden_states).view(*hidden_shape).transpose(1, 2)
if past_key_value is not None:
current_past_key_value = past_key_value
if isinstance(past_key_value, EncoderDecoderCache):
current_past_key_value = past_key_value.self_attention_cache
key_layer, value_layer = current_past_key_value.update(key_layer, value_layer, self.layer_idx, {'cache_position': cache_position})
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
if self.position_embedding_type != 'absolute':
raise ValueError(f'You are using {self.config._attn_implementation} as attention type. However, non-absolute positional embeddings can not work with them. Please load the model with `attn_implementation="eager"`.')
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(self, query_layer, key_layer, value_layer, attention_mask, dropout=0.0 if not self.training else self.dropout.p, scaling=self.scaling, head_mask=head_mask, use_cache=past_key_value is not None, **kwargs)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
return (attn_output, attn_weights)
|
class BridgeTowerSelfAttention(nn.Module):
def __init__(self, config, position_embedding_type=None, is_causal=False, layer_idx=None):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Cache]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor]:
pass
| 3
| 0
| 43
| 7
| 31
| 6
| 6
| 0.19
| 1
| 5
| 0
| 0
| 3
| 11
| 3
| 13
| 132
| 22
| 93
| 44
| 80
| 18
| 72
| 35
| 68
| 13
| 1
| 2
| 17
|
1,044
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bridgetower/modeling_bridgetower.py
|
transformers.models.bridgetower.modeling_bridgetower.BridgeTowerSelfOutput
|
import torch
from torch import nn
class BridgeTowerSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
|
class BridgeTowerSelfOutput(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 3
| 2
| 12
| 12
| 1
| 11
| 6
| 8
| 0
| 11
| 6
| 8
| 1
| 1
| 0
| 2
|
1,045
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bridgetower/modeling_bridgetower.py
|
transformers.models.bridgetower.modeling_bridgetower.BridgeTowerTextEmbeddings
|
import torch
from torch import nn
from typing import Callable, Optional, Union
class BridgeTowerTextEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute')
self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False)
self.register_buffer('token_type_ids', torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False)
self.padding_idx = config.pad_token_id
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx)
def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, past_key_values_length: int=0) -> torch.Tensor:
if position_ids is None:
if input_ids is not None:
position_ids = self.create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length)
else:
position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds, self.padding_idx)
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
if token_type_ids is None:
if hasattr(self, 'token_type_ids'):
buffered_token_type_ids = self.token_type_ids.expand(position_ids.shape[0], -1)
buffered_token_type_ids = torch.gather(buffered_token_type_ids, dim=1, index=position_ids)
token_type_ids = buffered_token_type_ids.expand(batch_size, seq_length)
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.position_embedding_type == 'absolute':
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
@staticmethod
def create_position_ids_from_inputs_embeds(inputs_embeds, padding_idx):
"""
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
Args:
inputs_embeds: torch.Tensor
Returns: torch.Tensor
"""
input_shape = inputs_embeds.size()[:-1]
sequence_length = input_shape[1]
position_ids = torch.arange(padding_idx + 1, sequence_length + padding_idx + 1, dtype=torch.long, device=inputs_embeds.device)
return position_ids.unsqueeze(0).expand(input_shape)
@staticmethod
def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
"""
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
are ignored. This is modified from fairseq's `utils.make_positions`.
Args:
x: torch.Tensor x:
Returns: torch.Tensor
"""
mask = input_ids.ne(padding_idx).int()
incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
return incremental_indices.long() + padding_idx
|
class BridgeTowerTextEmbeddings(nn.Module):
'''Construct the embeddings from word, position and token_type embeddings.'''
def __init__(self, config):
pass
def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, past_key_values_length: int=0) -> torch.Tensor:
pass
@staticmethod
def create_position_ids_from_inputs_embeds(inputs_embeds, padding_idx):
'''
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
Args:
inputs_embeds: torch.Tensor
Returns: torch.Tensor
'''
pass
@staticmethod
def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
'''
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
are ignored. This is modified from fairseq's `utils.make_positions`.
Args:
x: torch.Tensor x:
Returns: torch.Tensor
'''
pass
| 7
| 3
| 26
| 3
| 18
| 5
| 3
| 0.32
| 1
| 1
| 0
| 0
| 3
| 7
| 3
| 13
| 87
| 13
| 56
| 23
| 50
| 18
| 43
| 21
| 39
| 8
| 1
| 2
| 10
|
1,046
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bridgetower/modeling_bridgetower.py
|
transformers.models.bridgetower.modeling_bridgetower.BridgeTowerTextEncoder
|
from ...utils import TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging, torch_int
from torch import nn
import torch
from ...cache_utils import Cache, EncoderDecoderCache
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, ModelOutput, SequenceClassifierOutput
from ...processing_utils import Unpack
from typing import Callable, Optional, Union
class BridgeTowerTextEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([BridgeTowerTextLayer(config, layer_idx=i) for i in range(config.num_hidden_layers)])
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
for i, layer_module in enumerate(self.layer):
layer_head_mask = head_mask[i] if head_mask is not None else None
layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=past_key_values, cache_position=cache_position, **kwargs)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=past_key_values if use_cache else None, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions)
|
class BridgeTowerTextEncoder(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
pass
| 3
| 0
| 45
| 4
| 41
| 0
| 9
| 0
| 1
| 8
| 2
| 0
| 2
| 3
| 2
| 12
| 91
| 8
| 83
| 26
| 68
| 0
| 35
| 14
| 32
| 17
| 1
| 3
| 18
|
1,047
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bridgetower/modeling_bridgetower.py
|
transformers.models.bridgetower.modeling_bridgetower.BridgeTowerTextLayer
|
from ...utils import TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging, torch_int
import torch
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
from ...cache_utils import Cache, EncoderDecoderCache
from ...processing_utils import Unpack
from typing import Callable, Optional, Union
from ...modeling_layers import GradientCheckpointingLayer
class BridgeTowerTextLayer(GradientCheckpointingLayer):
def __init__(self, config, layer_idx=None):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = BridgeTowerAttention(config, is_causal=config.is_decoder, layer_idx=layer_idx)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
if not self.is_decoder:
raise ValueError(f'{self} should be used as a decoder model if cross attention is added')
self.crossattention = BridgeTowerAttention(config, position_embedding_type='absolute', is_causal=False, layer_idx=layer_idx, is_cross_attention=True)
self.intermediate = BridgeTowerIntermediate(config)
self.output = BridgeTowerOutput(config)
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Cache]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor]:
outputs = ()
self_attention_output, self_attn_weights = self.attention(hidden_states, attention_mask, head_mask, past_key_value=past_key_value, cache_position=cache_position, **kwargs)
attention_output = self_attention_output
if self.is_decoder and encoder_hidden_states is not None:
if not hasattr(self, 'crossattention'):
raise ValueError(f'If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`')
cross_attention_output, cross_attn_weights = self.crossattention(self_attention_output, None, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value=past_key_value, **kwargs)
attention_output = cross_attention_output
outputs = (cross_attn_weights,)
layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output)
return outputs + (layer_output, self_attn_weights)
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
|
class BridgeTowerTextLayer(GradientCheckpointingLayer):
def __init__(self, config, layer_idx=None):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Cache]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor]:
pass
def feed_forward_chunk(self, attention_output):
pass
| 4
| 0
| 27
| 2
| 23
| 2
| 4
| 0.1
| 1
| 7
| 3
| 0
| 3
| 8
| 3
| 13
| 84
| 9
| 70
| 32
| 57
| 7
| 41
| 23
| 37
| 7
| 1
| 2
| 11
|
1,048
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bridgetower/modeling_bridgetower.py
|
transformers.models.bridgetower.modeling_bridgetower.BridgeTowerTextModel
|
from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_attention_mask_for_sdpa
from ...utils import TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging, torch_int
from ...masking_utils import create_causal_mask
from ...utils.generic import can_return_tuple
import torch
from .configuration_bridgetower import BridgeTowerConfig, BridgeTowerTextConfig, BridgeTowerVisionConfig
from ...cache_utils import Cache, EncoderDecoderCache
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, ModelOutput, SequenceClassifierOutput
from ...processing_utils import Unpack
from typing import Callable, Optional, Union
@auto_docstring(custom_intro='\n The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of\n cross-attention is added between the self-attention layers, following the architecture described in *Attention is\n all you need*_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz\n Kaiser and Illia Polosukhin.\n\n To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set\n to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and\n `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.\n\n .. _*Attention is all you need*: https://huggingface.co/papers/1706.03762\n ')
class BridgeTowerTextModel(BridgeTowerPreTrainedModel):
config: BridgeTowerTextConfig
def __init__(self, config, add_pooling_layer=True):
"""
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
"""
super().__init__(config)
self.config = config
self.gradient_checkpointing = False
self.embeddings = BridgeTowerTextEmbeddings(config)
self.encoder = BridgeTowerTextEncoder(config)
self.pooler = BridgeTowerPooler(config) if add_pooling_layer else None
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Union[list[torch.FloatTensor], Cache]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...')
use_cache = False
return_legacy_cache = False
if use_cache and (not isinstance(past_key_values, Cache)):
logger.warning_once('Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. You should pass an instance of `EncoderDecoderCache` instead, e.g. `past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`.')
return_legacy_cache = True
past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values)
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError('You must specify exactly one of input_ids or inputs_embeds')
if input_ids is not None:
device = input_ids.device
input_shape = input_ids.shape
else:
device = inputs_embeds.device
input_shape = inputs_embeds.shape[:-1]
seq_length = input_shape[1]
past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0
if cache_position is None:
cache_position = torch.arange(past_key_values_length, past_key_values_length + seq_length, device=device)
embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length)
attention_mask, encoder_attention_mask = self._create_attention_masks(input_shape=input_shape, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, embedding_output=embedding_output, encoder_hidden_states=encoder_hidden_states, cache_position=cache_position, past_key_values=past_key_values)
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
encoder_outputs = self.encoder(embedding_output, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, cache_position=cache_position, position_ids=position_ids, **kwargs)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if return_legacy_cache:
encoder_outputs.past_key_values = encoder_outputs.past_key_values.to_legacy_cache()
return BaseModelOutputWithPoolingAndCrossAttentions(last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions)
def _create_attention_masks(self, input_shape, attention_mask, encoder_attention_mask, embedding_output, encoder_hidden_states, cache_position, past_key_values):
if attention_mask is not None and attention_mask.dim() == 2:
if self.config.is_decoder:
attention_mask = create_causal_mask(config=self.config, input_embeds=embedding_output, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values)
else:
attention_mask = self._update_full_mask(attention_mask, embedding_output)
elif attention_mask is not None and attention_mask.dim() == 3:
if 'flash' in self.config._attn_implementation or self.config._attn_implementation == 'flex_attention':
raise ValueError(f'Passing attention mask with a 3D/4D shape does not work with type {self.config._attn_implementation} - please use either `sdpa` or `eager` instead.')
attention_mask = self.get_extended_attention_mask(attention_mask, input_shape)
if encoder_attention_mask is not None:
if encoder_attention_mask.dim() == 2:
encoder_attention_mask = self._update_cross_attn_mask(encoder_hidden_states, encoder_attention_mask, embedding_output.shape[:2], embedding_output)
else:
if 'flash' in self.config._attn_implementation or self.config._attn_implementation == 'flex_attention':
raise ValueError(f'Passing attention mask with a 3D/4D shape does not work with type {self.config._attn_implementation} - please use either `sdpa` or `eager` instead.')
encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask)
return (attention_mask, encoder_attention_mask)
def _update_full_mask(self, attention_mask: Union[torch.Tensor, None], inputs_embeds: torch.Tensor):
if attention_mask is not None:
if 'flash' in self.config._attn_implementation:
attention_mask = attention_mask if 0 in attention_mask else None
elif self.config._attn_implementation == 'sdpa':
attention_mask = _prepare_4d_attention_mask_for_sdpa(attention_mask, inputs_embeds.dtype)
elif self.config._attn_implementation == 'flex_attention':
if isinstance(attention_mask, torch.Tensor):
attention_mask = make_flex_block_causal_mask(attention_mask, is_causal=False)
else:
attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
return attention_mask
def _update_cross_attn_mask(self, encoder_hidden_states: Union[torch.Tensor, None], encoder_attention_mask: Union[torch.Tensor, None], input_shape: torch.Size, inputs_embeds: torch.Tensor):
if encoder_hidden_states is not None and encoder_attention_mask is not None:
if 'flash' in self.config._attn_implementation:
encoder_attention_mask = encoder_attention_mask if 0 in encoder_attention_mask else None
elif self.config._attn_implementation == 'sdpa':
encoder_attention_mask = _prepare_4d_attention_mask_for_sdpa(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
elif self.config._attn_implementation == 'flex_attention':
if isinstance(encoder_attention_mask, torch.Tensor):
encoder_attention_mask = make_flex_block_causal_mask(encoder_attention_mask, query_length=input_shape[-1], is_causal=False)
else:
encoder_attention_mask = _prepare_4d_attention_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
return encoder_attention_mask
| null | 12
| 2
| 30
| 3
| 20
| 7
| 5
| 0.43
| 1
| 8
| 4
| 0
| 5
| 4
| 5
| 6
| 174
| 27
| 103
| 41
| 82
| 44
| 56
| 26
| 50
| 18
| 2
| 2
| 24
|
1,049
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bridgetower/modeling_bridgetower.py
|
transformers.models.bridgetower.modeling_bridgetower.BridgeTowerTransformer
|
import torch
from torch import nn
from typing import Callable, Optional, Union
class BridgeTowerTransformer(nn.Module):
def __init__(self, config):
super().__init__()
self.hidden_size = config.hidden_size
self.num_hidden_layers = config.num_hidden_layers
if config.remove_last_layer:
self.resblocks = nn.ModuleList([BridgeTowerResidualAttention(config) for _ in range(self.num_hidden_layers - 1)])
else:
self.resblocks = nn.ModuleList([BridgeTowerResidualAttention(config) for _ in range(self.num_hidden_layers)])
self.stop_gradient = config.stop_gradient
def forward(self, hidden_state: torch.Tensor, attention_mask: Optional[torch.Tensor]=None):
hidden_states = []
for block in self.resblocks:
hidden_state = block(hidden_state, attention_mask)
if self.stop_gradient:
hidden_states.append(hidden_state.detach())
else:
hidden_states.append(hidden_state)
return hidden_states
|
class BridgeTowerTransformer(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_state: torch.Tensor, attention_mask: Optional[torch.Tensor]=None):
pass
| 3
| 0
| 11
| 0
| 11
| 0
| 3
| 0
| 1
| 4
| 1
| 0
| 2
| 4
| 2
| 12
| 24
| 1
| 23
| 10
| 20
| 0
| 17
| 9
| 14
| 3
| 1
| 2
| 5
|
1,050
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bridgetower/modeling_bridgetower.py
|
transformers.models.bridgetower.modeling_bridgetower.BridgeTowerVisionEmbeddings
|
import torch
from torch import nn
from .configuration_bridgetower import BridgeTowerConfig, BridgeTowerTextConfig, BridgeTowerVisionConfig
from ...utils import TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging, torch_int
class BridgeTowerVisionEmbeddings(nn.Module):
def __init__(self, config: BridgeTowerVisionConfig):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.image_size = config.image_size
self.patch_size = config.patch_size
self.class_embedding = nn.Parameter(torch.randn(self.embed_dim))
self.patch_embedding = nn.Conv2d(in_channels=config.num_channels, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size, bias=False)
self.num_patches = (self.image_size // self.patch_size) ** 2
self.num_positions = self.num_patches + 1
self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
self.register_buffer('position_ids', torch.arange(self.num_positions).expand((1, -1)), persistent=False)
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
"""
num_patches = embeddings.shape[1] - 1
position_embedding = self.position_embedding.weight.unsqueeze(0)
num_positions = position_embedding.shape[1] - 1
if not torch.jit.is_tracing() and num_patches == num_positions and (height == width):
return self.position_embedding(self.position_ids)
class_pos_embed = position_embedding[:, :1]
patch_pos_embed = position_embedding[:, 1:]
dim = embeddings.shape[-1]
new_height = height // self.patch_size
new_width = width // self.patch_size
sqrt_num_positions = torch_int(num_positions ** 0.5)
patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
patch_pos_embed = nn.functional.interpolate(patch_pos_embed, size=(new_height, new_width), mode='bicubic', align_corners=False)
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed, patch_pos_embed), dim=1)
def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding=False) -> torch.Tensor:
batch_size, _, height, width = pixel_values.shape
if not interpolate_pos_encoding and (height != self.image_size or width != self.image_size):
raise ValueError(f"Input image size ({height}*{width}) doesn't match model ({self.image_size}*{self.image_size}).")
target_dtype = self.patch_embedding.weight.dtype
patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype))
patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
class_embeds = self.class_embedding.expand(batch_size, 1, -1)
embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
if interpolate_pos_encoding:
embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
else:
embeddings = embeddings + self.position_embedding(self.position_ids)
return embeddings
|
class BridgeTowerVisionEmbeddings(nn.Module):
def __init__(self, config: BridgeTowerVisionConfig):
pass
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
'''
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
'''
pass
def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding=False) -> torch.Tensor:
pass
| 4
| 1
| 26
| 5
| 19
| 3
| 2
| 0.16
| 1
| 5
| 1
| 0
| 3
| 9
| 3
| 13
| 81
| 16
| 57
| 27
| 53
| 9
| 43
| 27
| 39
| 3
| 1
| 1
| 6
|
1,051
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bridgetower/modeling_bridgetower.py
|
transformers.models.bridgetower.modeling_bridgetower.BridgeTowerVisionModel
|
from .configuration_bridgetower import BridgeTowerConfig, BridgeTowerTextConfig, BridgeTowerVisionConfig
class BridgeTowerVisionModel(BridgeTowerPreTrainedModel):
config: BridgeTowerVisionConfig
def __init__(self, config):
super().__init__(config)
self.visual = BridgeTowerVisionTransformer(config)
@property
def dtype(self):
return self.visual.embeddings.patch_embedding.weight.dtype
def forward(self, image, image_mask=None, interpolate_pos_encoding=False):
return self.visual(image.type(self.dtype), image_mask, interpolate_pos_encoding)
|
class BridgeTowerVisionModel(BridgeTowerPreTrainedModel):
def __init__(self, config):
pass
@property
def dtype(self):
pass
def forward(self, image, image_mask=None, interpolate_pos_encoding=False):
pass
| 5
| 0
| 2
| 0
| 2
| 0
| 1
| 0
| 1
| 2
| 1
| 0
| 3
| 1
| 3
| 4
| 13
| 3
| 10
| 7
| 5
| 0
| 9
| 6
| 5
| 1
| 2
| 0
| 3
|
1,052
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bridgetower/modeling_bridgetower.py
|
transformers.models.bridgetower.modeling_bridgetower.BridgeTowerVisionTransformer
|
import torch
from torch import nn
class BridgeTowerVisionTransformer(nn.Module):
def __init__(self, config):
super().__init__()
self.embeddings = BridgeTowerVisionEmbeddings(config)
self.ln_pre = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.transformer = BridgeTowerTransformer(config)
self.ln_post = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.share_layernorm = config.share_layernorm
if not config.share_layernorm:
self.ln_separate = nn.ModuleList([nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) for _ in range(config.num_hidden_layers)])
def forward(self, pixel_values: torch.Tensor, attention_mask, interpolate_pos_encoding: bool=False):
hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding)
hidden_states = self.ln_pre(hidden_states)
hidden_states = hidden_states.permute(1, 0, 2)
hidden_states = self.transformer(hidden_states, attention_mask)
hidden_states = torch.stack(hidden_states, dim=0)
hidden_states = hidden_states.permute(0, 2, 1, 3)
if self.share_layernorm:
hidden_states = self.ln_post(hidden_states)
else:
hidden_states_stack = []
for hidden_states, ln in zip(hidden_states, self.ln_separate):
hidden_states = ln(hidden_states)
hidden_states_stack.append(hidden_states)
hidden_states = torch.stack(hidden_states_stack, dim=0)
return hidden_states
def forward_pre(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool=False):
hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
hidden_states = self.ln_pre(hidden_states)
hidden_states = hidden_states.permute(1, 0, 2)
return hidden_states
def forward_post(self, hidden_state: torch.Tensor):
visual_output_post = hidden_state.permute(1, 0, 2)
visual_output_post = self.ln_post(visual_output_post)
return visual_output_post
|
class BridgeTowerVisionTransformer(nn.Module):
def __init__(self, config):
pass
def forward(self, pixel_values: torch.Tensor, attention_mask, interpolate_pos_encoding: bool=False):
pass
def forward_pre(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool=False):
pass
def forward_post(self, hidden_state: torch.Tensor):
pass
| 5
| 0
| 13
| 1
| 11
| 1
| 2
| 0.11
| 1
| 7
| 2
| 0
| 4
| 6
| 4
| 14
| 56
| 5
| 46
| 26
| 32
| 5
| 34
| 16
| 29
| 3
| 1
| 2
| 7
|
1,053
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bridgetower/processing_bridgetower.py
|
transformers.models.bridgetower.processing_bridgetower.BridgeTowerProcessor
|
from ...processing_utils import ImagesKwargs, ProcessingKwargs, ProcessorMixin
class BridgeTowerProcessor(ProcessorMixin):
"""
Constructs a BridgeTower processor which wraps a Roberta tokenizer and BridgeTower image processor into a single
processor.
[`BridgeTowerProcessor`] offers all the functionalities of [`BridgeTowerImageProcessor`] and
[`RobertaTokenizerFast`]. See the docstring of [`~BridgeTowerProcessor.__call__`] and
[`~BridgeTowerProcessor.decode`] for more information.
Args:
image_processor (`BridgeTowerImageProcessor`):
An instance of [`BridgeTowerImageProcessor`]. The image processor is a required input.
tokenizer (`RobertaTokenizerFast`):
An instance of ['RobertaTokenizerFast`]. The tokenizer is a required input.
"""
attributes = ['image_processor', 'tokenizer']
image_processor_class = 'BridgeTowerImageProcessor'
tokenizer_class = ('RobertaTokenizer', 'RobertaTokenizerFast')
valid_processor_kwargs = BridgeTowerProcessorKwargs
def __init__(self, image_processor, tokenizer):
super().__init__(image_processor, tokenizer)
|
class BridgeTowerProcessor(ProcessorMixin):
'''
Constructs a BridgeTower processor which wraps a Roberta tokenizer and BridgeTower image processor into a single
processor.
[`BridgeTowerProcessor`] offers all the functionalities of [`BridgeTowerImageProcessor`] and
[`RobertaTokenizerFast`]. See the docstring of [`~BridgeTowerProcessor.__call__`] and
[`~BridgeTowerProcessor.decode`] for more information.
Args:
image_processor (`BridgeTowerImageProcessor`):
An instance of [`BridgeTowerImageProcessor`]. The image processor is a required input.
tokenizer (`RobertaTokenizerFast`):
An instance of ['RobertaTokenizerFast`]. The tokenizer is a required input.
'''
def __init__(self, image_processor, tokenizer):
pass
| 2
| 1
| 9
| 0
| 5
| 3
| 1
| 0.81
| 1
| 5
| 2
| 0
| 5
| 0
| 5
| 22
| 68
| 10
| 32
| 22
| 18
| 26
| 20
| 14
| 14
| 1
| 2
| 0
| 5
|
1,054
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bridgetower/processing_bridgetower.py
|
transformers.models.bridgetower.processing_bridgetower.BridgeTowerProcessorKwargs
|
from ...processing_utils import ImagesKwargs, ProcessingKwargs, ProcessorMixin
class BridgeTowerProcessorKwargs(ProcessingKwargs, total=False):
images_kwargs: BridgeTowerImagesKwargs
_defaults = {'text_kwargs': {'add_special_tokens': True, 'padding': False, 'stride': 0, 'return_overflowing_tokens': False, 'return_special_tokens_mask': False, 'return_offsets_mapping': False, 'return_length': False, 'verbose': True}, 'images_kwargs': {'do_normalize': True, 'do_center_crop': True}}
|
class BridgeTowerProcessorKwargs(ProcessingKwargs, total=False):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 17
| 0
| 17
| 2
| 16
| 0
| 2
| 2
| 1
| 0
| 3
| 0
| 0
|
1,055
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bros/configuration_bros.py
|
transformers.models.bros.configuration_bros.BrosConfig
|
from ...configuration_utils import PretrainedConfig
class BrosConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`BrosModel`] or a [`TFBrosModel`]. It is used to
instantiate a Bros model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the Bros
[jinho8345/bros-base-uncased](https://huggingface.co/jinho8345/bros-base-uncased) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 30522):
Vocabulary size of the Bros model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`BrosModel`] or [`TFBrosModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the `token_type_ids` passed when calling [`BrosModel`] or [`TFBrosModel`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
pad_token_id (`int`, *optional*, defaults to 0):
The index of the padding token in the token vocabulary.
dim_bbox (`int`, *optional*, defaults to 8):
The dimension of the bounding box coordinates. (x0, y1, x1, y0, x1, y1, x0, y1)
bbox_scale (`float`, *optional*, defaults to 100.0):
The scale factor of the bounding box coordinates.
n_relations (`int`, *optional*, defaults to 1):
The number of relations for SpadeEE(entity extraction), SpadeEL(entity linking) head.
classifier_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the classifier head.
Examples:
```python
>>> from transformers import BrosConfig, BrosModel
>>> # Initializing a BROS jinho8345/bros-base-uncased style configuration
>>> configuration = BrosConfig()
>>> # Initializing a model from the jinho8345/bros-base-uncased style configuration
>>> model = BrosModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'bros'
def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, dim_bbox=8, bbox_scale=100.0, n_relations=1, classifier_dropout_prob=0.1, **kwargs):
super().__init__(vocab_size=vocab_size, hidden_size=hidden_size, num_hidden_layers=num_hidden_layers, num_attention_heads=num_attention_heads, intermediate_size=intermediate_size, hidden_act=hidden_act, hidden_dropout_prob=hidden_dropout_prob, attention_probs_dropout_prob=attention_probs_dropout_prob, max_position_embeddings=max_position_embeddings, type_vocab_size=type_vocab_size, initializer_range=initializer_range, layer_norm_eps=layer_norm_eps, pad_token_id=pad_token_id, **kwargs)
self.dim_bbox = dim_bbox
self.bbox_scale = bbox_scale
self.n_relations = n_relations
self.dim_bbox_sinusoid_emb_2d = self.hidden_size // 4
self.dim_bbox_sinusoid_emb_1d = self.dim_bbox_sinusoid_emb_2d // self.dim_bbox
self.dim_bbox_projection = self.hidden_size // self.num_attention_heads
self.classifier_dropout_prob = classifier_dropout_prob
|
class BrosConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`BrosModel`] or a [`TFBrosModel`]. It is used to
instantiate a Bros model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the Bros
[jinho8345/bros-base-uncased](https://huggingface.co/jinho8345/bros-base-uncased) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 30522):
Vocabulary size of the Bros model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`BrosModel`] or [`TFBrosModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the `token_type_ids` passed when calling [`BrosModel`] or [`TFBrosModel`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
pad_token_id (`int`, *optional*, defaults to 0):
The index of the padding token in the token vocabulary.
dim_bbox (`int`, *optional*, defaults to 8):
The dimension of the bounding box coordinates. (x0, y1, x1, y0, x1, y1, x0, y1)
bbox_scale (`float`, *optional*, defaults to 100.0):
The scale factor of the bounding box coordinates.
n_relations (`int`, *optional*, defaults to 1):
The number of relations for SpadeEE(entity extraction), SpadeEL(entity linking) head.
classifier_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the classifier head.
Examples:
```python
>>> from transformers import BrosConfig, BrosModel
>>> # Initializing a BROS jinho8345/bros-base-uncased style configuration
>>> configuration = BrosConfig()
>>> # Initializing a model from the jinho8345/bros-base-uncased style configuration
>>> model = BrosModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, dim_bbox=8, bbox_scale=100.0, n_relations=1, classifier_dropout_prob=0.1, **kwargs):
pass
| 2
| 1
| 45
| 1
| 44
| 0
| 1
| 1.2
| 1
| 1
| 0
| 0
| 1
| 7
| 1
| 1
| 112
| 11
| 46
| 30
| 24
| 55
| 11
| 10
| 9
| 1
| 1
| 0
| 1
|
1,056
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bros/modeling_bros.py
|
transformers.models.bros.modeling_bros.BrosAttention
|
import torch
from typing import Optional, Union
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
from torch import nn
class BrosAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = BrosSelfAttention(config)
self.output = BrosSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads)
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, hidden_states: torch.Tensor, bbox_pos_emb: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
self_outputs = self.self(hidden_states=hidden_states, bbox_pos_emb=bbox_pos_emb, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:]
return outputs
|
class BrosAttention(nn.Module):
def __init__(self, config):
pass
def prune_heads(self, heads):
pass
def forward(self, hidden_states: torch.Tensor, bbox_pos_emb: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
pass
| 4
| 0
| 16
| 1
| 15
| 1
| 1
| 0.07
| 1
| 6
| 2
| 0
| 3
| 3
| 3
| 13
| 52
| 4
| 46
| 21
| 32
| 3
| 22
| 11
| 18
| 2
| 1
| 1
| 4
|
1,057
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bros/modeling_bros.py
|
transformers.models.bros.modeling_bros.BrosBboxEmbeddings
|
from torch import nn
import torch
class BrosBboxEmbeddings(nn.Module):
def __init__(self, config):
super().__init__()
self.bbox_sinusoid_emb = BrosPositionalEmbedding2D(config)
self.bbox_projection = nn.Linear(config.dim_bbox_sinusoid_emb_2d, config.dim_bbox_projection, bias=False)
def forward(self, bbox: torch.Tensor):
bbox_t = bbox.transpose(0, 1)
bbox_pos = bbox_t[None, :, :, :] - bbox_t[:, None, :, :]
bbox_pos_emb = self.bbox_sinusoid_emb(bbox_pos)
bbox_pos_emb = self.bbox_projection(bbox_pos_emb)
return bbox_pos_emb
|
class BrosBboxEmbeddings(nn.Module):
def __init__(self, config):
pass
def forward(self, bbox: torch.Tensor):
pass
| 3
| 0
| 6
| 1
| 5
| 0
| 1
| 0
| 1
| 3
| 1
| 0
| 2
| 2
| 2
| 12
| 13
| 2
| 11
| 8
| 8
| 0
| 11
| 8
| 8
| 1
| 1
| 0
| 2
|
1,058
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bros/modeling_bros.py
|
transformers.models.bros.modeling_bros.BrosEncoder
|
import torch
from ...modeling_outputs import BaseModelOutputWithCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, TokenClassifierOutput
from typing import Optional, Union
from torch import nn
from ...utils import ModelOutput, auto_docstring, can_return_tuple, logging
class BrosEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([BrosLayer(config) for _ in range(config.num_hidden_layers)])
@can_return_tuple
def forward(self, hidden_states: torch.Tensor, bbox_pos_emb: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[tuple[torch.Tensor], BaseModelOutputWithCrossAttentions]:
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
layer_outputs = layer_module(hidden_states=hidden_states, bbox_pos_emb=bbox_pos_emb, attention_mask=attention_mask, head_mask=layer_head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
return BaseModelOutputWithCrossAttentions(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions)
|
class BrosEncoder(nn.Module):
def __init__(self, config):
pass
@can_return_tuple
def forward(self, hidden_states: torch.Tensor, bbox_pos_emb: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> Union[tuple[torch.Tensor], BaseModelOutputWithCrossAttentions]:
pass
| 4
| 0
| 45
| 3
| 42
| 0
| 9
| 0
| 1
| 8
| 2
| 0
| 2
| 2
| 2
| 12
| 91
| 7
| 84
| 26
| 68
| 0
| 33
| 13
| 30
| 16
| 1
| 3
| 17
|
1,059
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bros/modeling_bros.py
|
transformers.models.bros.modeling_bros.BrosForTokenClassification
|
from typing import Optional, Union
from torch.nn import CrossEntropyLoss
from ...utils import ModelOutput, auto_docstring, can_return_tuple, logging
import torch
from torch import nn
from ...modeling_outputs import BaseModelOutputWithCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, TokenClassifierOutput
@auto_docstring
class BrosForTokenClassification(BrosPreTrainedModel):
_keys_to_ignore_on_load_unexpected = ['pooler']
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bros = BrosModel(config)
classifier_dropout = config.classifier_dropout if hasattr(config, 'classifier_dropout') else config.hidden_dropout_prob
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, bbox: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, bbox_first_token_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], TokenClassifierOutput]:
"""
bbox ('torch.FloatTensor' of shape '(batch_size, num_boxes, 4)'):
Bounding box coordinates for each token in the input sequence. Each bounding box is a list of four values
(x1, y1, x2, y2), where (x1, y1) is the top left corner, and (x2, y2) is the bottom right corner of the
bounding box.
bbox_first_token_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to indicate the first token of each bounding box. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
Examples:
```python
>>> import torch
>>> from transformers import BrosProcessor, BrosForTokenClassification
>>> processor = BrosProcessor.from_pretrained("jinho8345/bros-base-uncased")
>>> model = BrosForTokenClassification.from_pretrained("jinho8345/bros-base-uncased")
>>> encoding = processor("Hello, my dog is cute", add_special_tokens=False, return_tensors="pt")
>>> bbox = torch.tensor([[[0, 0, 1, 1]]]).repeat(1, encoding["input_ids"].shape[-1], 1)
>>> encoding["bbox"] = bbox
>>> outputs = model(**encoding)
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bros(input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
if bbox_first_token_mask is not None:
bbox_first_token_mask = bbox_first_token_mask.view(-1)
loss = loss_fct(logits.view(-1, self.num_labels)[bbox_first_token_mask], labels.view(-1)[bbox_first_token_mask])
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring
class BrosForTokenClassification(BrosPreTrainedModel):
def __init__(self, config):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, bbox: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, bbox_first_token_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], TokenClassifierOutput]:
'''
bbox ('torch.FloatTensor' of shape '(batch_size, num_boxes, 4)'):
Bounding box coordinates for each token in the input sequence. Each bounding box is a list of four values
(x1, y1, x2, y2), where (x1, y1) is the top left corner, and (x2, y2) is the bottom right corner of the
bounding box.
bbox_first_token_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to indicate the first token of each bounding box. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
Examples:
```python
>>> import torch
>>> from transformers import BrosProcessor, BrosForTokenClassification
>>> processor = BrosProcessor.from_pretrained("jinho8345/bros-base-uncased")
>>> model = BrosForTokenClassification.from_pretrained("jinho8345/bros-base-uncased")
>>> encoding = processor("Hello, my dog is cute", add_special_tokens=False, return_tensors="pt")
>>> bbox = torch.tensor([[[0, 0, 1, 1]]]).repeat(1, encoding["input_ids"].shape[-1], 1)
>>> encoding["bbox"] = bbox
>>> outputs = model(**encoding)
```'''
pass
| 6
| 1
| 45
| 8
| 30
| 7
| 4
| 0.2
| 1
| 5
| 2
| 0
| 2
| 4
| 2
| 3
| 95
| 18
| 64
| 30
| 45
| 13
| 27
| 15
| 24
| 6
| 2
| 2
| 8
|
1,060
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bros/modeling_bros.py
|
transformers.models.bros.modeling_bros.BrosIntermediate
|
from ...activations import ACT2FN
from torch import nn
import torch
class BrosIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
|
class BrosIntermediate(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 2
| 0
| 1
| 3
| 0
| 0
| 2
| 2
| 2
| 12
| 13
| 1
| 12
| 5
| 9
| 0
| 11
| 5
| 8
| 2
| 1
| 1
| 3
|
1,061
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bros/modeling_bros.py
|
transformers.models.bros.modeling_bros.BrosLayer
|
import torch
from ...modeling_layers import GradientCheckpointingLayer
from typing import Optional, Union
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
class BrosLayer(GradientCheckpointingLayer):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = BrosAttention(config)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
if not self.is_decoder:
raise Exception(f'{self} should be used as a decoder model if cross attention is added')
self.crossattention = BrosAttention(config)
self.intermediate = BrosIntermediate(config)
self.output = BrosOutput(config)
def forward(self, hidden_states: torch.Tensor, bbox_pos_emb: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
self_attention_outputs = self.attention(hidden_states, bbox_pos_emb=bbox_pos_emb, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions)
attention_output = self_attention_outputs[0]
if self.is_decoder:
outputs = self_attention_outputs[1:-1]
else:
outputs = self_attention_outputs[1:]
if self.is_decoder and encoder_hidden_states is not None:
if hasattr(self, 'crossattention'):
raise Exception(f'If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`')
cross_attention_outputs = self.crossattention(attention_output, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1]
layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output)
outputs = (layer_output,) + outputs
if self.is_decoder:
outputs = outputs + (None,)
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
|
class BrosLayer(GradientCheckpointingLayer):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, bbox_pos_emb: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor]:
pass
def feed_forward_chunk(self, attention_output):
pass
| 4
| 0
| 28
| 2
| 24
| 2
| 4
| 0.09
| 1
| 7
| 3
| 0
| 3
| 8
| 3
| 13
| 88
| 9
| 74
| 33
| 60
| 7
| 41
| 23
| 37
| 7
| 1
| 2
| 11
|
1,062
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bros/modeling_bros.py
|
transformers.models.bros.modeling_bros.BrosModel
|
from ...modeling_outputs import BaseModelOutputWithCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, TokenClassifierOutput
import torch
from typing import Optional, Union
from ...utils import ModelOutput, auto_docstring, can_return_tuple, logging
@auto_docstring
class BrosModel(BrosPreTrainedModel):
def __init__(self, config, add_pooling_layer=True):
"""
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
"""
super().__init__(config)
self.config = config
self.embeddings = BrosTextEmbeddings(config)
self.bbox_embeddings = BrosBboxEmbeddings(config)
self.encoder = BrosEncoder(config)
self.pooler = BrosPooler(config) if add_pooling_layer else None
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, bbox: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
"""
bbox ('torch.FloatTensor' of shape '(batch_size, num_boxes, 4)'):
Bounding box coordinates for each token in the input sequence. Each bounding box is a list of four values
(x1, y1, x2, y2), where (x1, y1) is the top left corner, and (x2, y2) is the bottom right corner of the
bounding box.
Examples:
```python
>>> import torch
>>> from transformers import BrosProcessor, BrosModel
>>> processor = BrosProcessor.from_pretrained("jinho8345/bros-base-uncased")
>>> model = BrosModel.from_pretrained("jinho8345/bros-base-uncased")
>>> encoding = processor("Hello, my dog is cute", add_special_tokens=False, return_tensors="pt")
>>> bbox = torch.tensor([[[0, 0, 1, 1]]]).repeat(1, encoding["input_ids"].shape[-1], 1)
>>> encoding["bbox"] = bbox
>>> outputs = model(**encoding)
>>> last_hidden_states = outputs.last_hidden_state
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds')
if bbox is None:
raise ValueError('You have to specify bbox')
batch_size, seq_length = input_shape
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
if hasattr(self.embeddings, 'token_type_ids'):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds)
if bbox.shape[-1] == 4:
bbox = bbox[:, :, [0, 1, 2, 1, 2, 3, 0, 3]]
scaled_bbox = bbox * self.config.bbox_scale
bbox_position_embeddings = self.bbox_embeddings(scaled_bbox)
encoder_outputs = self.encoder(embedding_output, bbox_pos_emb=bbox_position_embeddings, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
return BaseModelOutputWithPoolingAndCrossAttentions(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions)
|
@auto_docstring
class BrosModel(BrosPreTrainedModel):
def __init__(self, config, add_pooling_layer=True):
'''
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
'''
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, value):
pass
def _prune_heads(self, heads_to_prune):
'''
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
'''
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, bbox: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
'''
bbox ('torch.FloatTensor' of shape '(batch_size, num_boxes, 4)'):
Bounding box coordinates for each token in the input sequence. Each bounding box is a list of four values
(x1, y1, x2, y2), where (x1, y1) is the top left corner, and (x2, y2) is the bottom right corner of the
bounding box.
Examples:
```python
>>> import torch
>>> from transformers import BrosProcessor, BrosModel
>>> processor = BrosProcessor.from_pretrained("jinho8345/bros-base-uncased")
>>> model = BrosModel.from_pretrained("jinho8345/bros-base-uncased")
>>> encoding = processor("Hello, my dog is cute", add_special_tokens=False, return_tensors="pt")
>>> bbox = torch.tensor([[[0, 0, 1, 1]]]).repeat(1, encoding["input_ids"].shape[-1], 1)
>>> encoding["bbox"] = bbox
>>> outputs = model(**encoding)
>>> last_hidden_states = outputs.last_hidden_state
```'''
pass
| 9
| 3
| 32
| 5
| 22
| 6
| 5
| 0.26
| 1
| 9
| 5
| 0
| 5
| 5
| 5
| 6
| 169
| 28
| 112
| 45
| 88
| 29
| 61
| 28
| 55
| 20
| 2
| 2
| 26
|
1,063
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bros/modeling_bros.py
|
transformers.models.bros.modeling_bros.BrosOutput
|
from torch import nn
import torch
class BrosOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
|
class BrosOutput(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 3
| 2
| 12
| 12
| 1
| 11
| 6
| 8
| 0
| 11
| 6
| 8
| 1
| 1
| 0
| 2
|
1,064
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bros/modeling_bros.py
|
transformers.models.bros.modeling_bros.BrosPooler
|
import torch
from torch import nn
class BrosPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
|
class BrosPooler(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 5
| 1
| 1
| 0.2
| 1
| 2
| 0
| 0
| 2
| 2
| 2
| 12
| 13
| 1
| 10
| 7
| 7
| 2
| 10
| 7
| 7
| 1
| 1
| 0
| 2
|
1,065
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bros/modeling_bros.py
|
transformers.models.bros.modeling_bros.BrosPositionalEmbedding1D
|
import torch
from torch import nn
class BrosPositionalEmbedding1D(nn.Module):
def __init__(self, config):
super().__init__()
self.dim_bbox_sinusoid_emb_1d = config.dim_bbox_sinusoid_emb_1d
inv_freq = 1 / 10000 ** (torch.arange(0.0, self.dim_bbox_sinusoid_emb_1d, 2.0) / self.dim_bbox_sinusoid_emb_1d)
self.register_buffer('inv_freq', inv_freq)
def forward(self, pos_seq: torch.Tensor) -> torch.Tensor:
seq_size = pos_seq.size()
b1, b2, b3 = seq_size
sinusoid_inp = pos_seq.view(b1, b2, b3, 1) * self.inv_freq.view(1, 1, 1, self.dim_bbox_sinusoid_emb_1d // 2)
pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1)
return pos_emb
|
class BrosPositionalEmbedding1D(nn.Module):
def __init__(self, config):
pass
def forward(self, pos_seq: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 8
| 1
| 7
| 0
| 1
| 0.07
| 1
| 2
| 0
| 0
| 2
| 1
| 2
| 12
| 19
| 4
| 14
| 9
| 11
| 1
| 12
| 9
| 9
| 1
| 1
| 0
| 2
|
1,066
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bros/modeling_bros.py
|
transformers.models.bros.modeling_bros.BrosPositionalEmbedding2D
|
from torch import nn
import torch
class BrosPositionalEmbedding2D(nn.Module):
def __init__(self, config):
super().__init__()
self.dim_bbox = config.dim_bbox
self.x_pos_emb = BrosPositionalEmbedding1D(config)
self.y_pos_emb = BrosPositionalEmbedding1D(config)
def forward(self, bbox: torch.Tensor) -> torch.Tensor:
stack = []
for i in range(self.dim_bbox):
if i % 2 == 0:
stack.append(self.x_pos_emb(bbox[..., i]))
else:
stack.append(self.y_pos_emb(bbox[..., i]))
bbox_pos_emb = torch.cat(stack, dim=-1)
return bbox_pos_emb
|
class BrosPositionalEmbedding2D(nn.Module):
def __init__(self, config):
pass
def forward(self, bbox: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 8
| 1
| 7
| 0
| 2
| 0
| 1
| 4
| 1
| 0
| 2
| 3
| 2
| 12
| 17
| 2
| 15
| 9
| 12
| 0
| 14
| 9
| 11
| 3
| 1
| 2
| 4
|
1,067
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bros/modeling_bros.py
|
transformers.models.bros.modeling_bros.BrosPreTrainedModel
|
from torch import nn
from ...modeling_utils import PreTrainedModel
from .configuration_bros import BrosConfig
from ...utils import ModelOutput, auto_docstring, can_return_tuple, logging
@auto_docstring
class BrosPreTrainedModel(PreTrainedModel):
config: BrosConfig
base_model_prefix = 'bros'
def _init_weights(self, module: nn.Module):
"""Initialize the weights"""
std = self.config.initializer_range
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, BrosRelationExtractor):
nn.init.normal_(module.dummy_node, std=std)
|
@auto_docstring
class BrosPreTrainedModel(PreTrainedModel):
def _init_weights(self, module: nn.Module):
'''Initialize the weights'''
pass
| 3
| 1
| 15
| 0
| 12
| 3
| 6
| 0.47
| 1
| 0
| 0
| 4
| 1
| 0
| 1
| 1
| 24
| 2
| 15
| 4
| 13
| 7
| 13
| 4
| 11
| 6
| 1
| 2
| 6
|
1,068
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bros/modeling_bros.py
|
transformers.models.bros.modeling_bros.BrosRelationExtractor
|
from torch import nn
import torch
class BrosRelationExtractor(nn.Module):
def __init__(self, config):
super().__init__()
self.n_relations = config.n_relations
self.backbone_hidden_size = config.hidden_size
self.head_hidden_size = config.hidden_size
self.classifier_dropout_prob = config.classifier_dropout_prob
self.drop = nn.Dropout(self.classifier_dropout_prob)
self.query = nn.Linear(self.backbone_hidden_size, self.n_relations * self.head_hidden_size)
self.key = nn.Linear(self.backbone_hidden_size, self.n_relations * self.head_hidden_size)
self.dummy_node = nn.Parameter(torch.zeros(1, self.backbone_hidden_size))
def forward(self, query_layer: torch.Tensor, key_layer: torch.Tensor):
query_layer = self.query(self.drop(query_layer))
dummy_vec = self.dummy_node.unsqueeze(0).repeat(1, key_layer.size(1), 1)
key_layer = torch.cat([key_layer, dummy_vec], axis=0)
key_layer = self.key(self.drop(key_layer))
query_layer = query_layer.view(query_layer.size(0), query_layer.size(1), self.n_relations, self.head_hidden_size)
key_layer = key_layer.view(key_layer.size(0), key_layer.size(1), self.n_relations, self.head_hidden_size)
relation_score = torch.matmul(query_layer.permute(2, 1, 0, 3), key_layer.permute(2, 1, 3, 0))
return relation_score
|
class BrosRelationExtractor(nn.Module):
def __init__(self, config):
pass
def forward(self, query_layer: torch.Tensor, key_layer: torch.Tensor):
pass
| 3
| 0
| 15
| 4
| 12
| 1
| 1
| 0.04
| 1
| 2
| 0
| 0
| 2
| 8
| 2
| 12
| 32
| 8
| 24
| 13
| 21
| 1
| 20
| 13
| 17
| 1
| 1
| 0
| 2
|
1,069
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bros/modeling_bros.py
|
transformers.models.bros.modeling_bros.BrosSelfAttention
|
import math
from torch import nn
import torch
from typing import Optional, Union
class BrosSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')):
raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})')
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute')
if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query':
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
self.is_decoder = config.is_decoder
def forward(self, hidden_states: torch.Tensor, bbox_pos_emb: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[torch.Tensor]=False) -> tuple[torch.Tensor]:
hidden_shape = (hidden_states.shape[0], -1, self.num_attention_heads, self.attention_head_size)
query_layer = self.query(hidden_states).view(hidden_shape).transpose(1, 2)
is_cross_attention = encoder_hidden_states is not None
if is_cross_attention:
key_layer = self.key(encoder_hidden_states).view(hidden_shape).transpose(1, 2)
value_layer = self.value(encoder_hidden_states).view(hidden_shape).transpose(1, 2)
attention_mask = encoder_attention_mask
else:
key_layer = self.key(hidden_states).view(hidden_shape).transpose(1, 2)
value_layer = self.value(hidden_states).view(hidden_shape).transpose(1, 2)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query':
seq_length = hidden_states.size()[1]
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
positional_embedding = positional_embedding.to(dtype=query_layer.dtype)
if self.position_embedding_type == 'relative_key':
relative_position_scores = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == 'relative_key_query':
relative_position_scores_query = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding)
relative_position_scores_key = torch.einsum('bhrd,lrd->bhlr', key_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
batch_size, n_head, seq_length, d_head = query_layer.shape
bbox_pos_emb = bbox_pos_emb.view(seq_length, seq_length, batch_size, d_head)
bbox_pos_emb = bbox_pos_emb.permute([2, 0, 1, 3])
bbox_pos_scores = torch.einsum('bnid,bijd->bnij', (query_layer, bbox_pos_emb))
attention_scores = attention_scores + bbox_pos_scores
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
attention_probs = nn.Softmax(dim=-1)(attention_scores)
attention_probs = self.dropout(attention_probs)
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
if self.is_decoder:
outputs = outputs + (None,)
return outputs
|
class BrosSelfAttention(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, bbox_pos_emb: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[torch.Tensor]=False) -> tuple[torch.Tensor]:
pass
| 3
| 0
| 44
| 7
| 31
| 6
| 5
| 0.2
| 1
| 4
| 0
| 0
| 3
| 11
| 3
| 13
| 136
| 24
| 94
| 46
| 80
| 19
| 74
| 36
| 70
| 12
| 1
| 2
| 16
|
1,070
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bros/modeling_bros.py
|
transformers.models.bros.modeling_bros.BrosSelfOutput
|
import torch
from torch import nn
class BrosSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
|
class BrosSelfOutput(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 3
| 2
| 12
| 12
| 1
| 11
| 6
| 8
| 0
| 11
| 6
| 8
| 1
| 1
| 0
| 2
|
1,071
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bros/modeling_bros.py
|
transformers.models.bros.modeling_bros.BrosSpadeEEForTokenClassification
|
from ...utils import ModelOutput, auto_docstring, can_return_tuple, logging
import torch
from torch import nn
from typing import Optional, Union
from torch.nn import CrossEntropyLoss
@auto_docstring(custom_intro='\n Bros Model with a token classification head on top (initial_token_layers and subsequent_token_layer on top of the\n hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. The initial_token_classifier is used to\n predict the first token of each entity, and the subsequent_token_classifier is used to predict the subsequent\n tokens within an entity. Compared to BrosForTokenClassification, this model is more robust to serialization errors\n since it predicts next token from one token.\n ')
class BrosSpadeEEForTokenClassification(BrosPreTrainedModel):
_keys_to_ignore_on_load_unexpected = ['pooler']
def __init__(self, config):
super().__init__(config)
self.config = config
self.num_labels = config.num_labels
self.n_relations = config.n_relations
self.backbone_hidden_size = config.hidden_size
self.bros = BrosModel(config)
classifier_dropout = config.classifier_dropout if hasattr(config, 'classifier_dropout') else config.hidden_dropout_prob
self.initial_token_classifier = nn.Sequential(nn.Dropout(classifier_dropout), nn.Linear(config.hidden_size, config.hidden_size), nn.Dropout(classifier_dropout), nn.Linear(config.hidden_size, config.num_labels))
self.subsequent_token_classifier = BrosRelationExtractor(config)
self.init_weights()
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, bbox: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, bbox_first_token_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, initial_token_labels: Optional[torch.Tensor]=None, subsequent_token_labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], BrosSpadeOutput]:
"""
bbox ('torch.FloatTensor' of shape '(batch_size, num_boxes, 4)'):
Bounding box coordinates for each token in the input sequence. Each bounding box is a list of four values
(x1, y1, x2, y2), where (x1, y1) is the top left corner, and (x2, y2) is the bottom right corner of the
bounding box.
bbox_first_token_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to indicate the first token of each bounding box. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
initial_token_labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for the initial token classification.
subsequent_token_labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for the subsequent token classification.
Examples:
```python
>>> import torch
>>> from transformers import BrosProcessor, BrosSpadeEEForTokenClassification
>>> processor = BrosProcessor.from_pretrained("jinho8345/bros-base-uncased")
>>> model = BrosSpadeEEForTokenClassification.from_pretrained("jinho8345/bros-base-uncased")
>>> encoding = processor("Hello, my dog is cute", add_special_tokens=False, return_tensors="pt")
>>> bbox = torch.tensor([[[0, 0, 1, 1]]]).repeat(1, encoding["input_ids"].shape[-1], 1)
>>> encoding["bbox"] = bbox
>>> outputs = model(**encoding)
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bros(input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True)
last_hidden_states = outputs[0]
last_hidden_states = last_hidden_states.transpose(0, 1).contiguous()
initial_token_logits = self.initial_token_classifier(last_hidden_states).transpose(0, 1).contiguous()
subsequent_token_logits = self.subsequent_token_classifier(last_hidden_states, last_hidden_states).squeeze(0)
inv_attention_mask = 1 - attention_mask
batch_size, max_seq_length = inv_attention_mask.shape
device = inv_attention_mask.device
invalid_token_mask = torch.cat([inv_attention_mask, torch.zeros([batch_size, 1]).to(device)], axis=1).bool()
subsequent_token_logits = subsequent_token_logits.masked_fill(invalid_token_mask[:, None, :], torch.finfo(subsequent_token_logits.dtype).min)
self_token_mask = torch.eye(max_seq_length, max_seq_length + 1).to(device=device, dtype=torch.bool)
subsequent_token_logits = subsequent_token_logits.masked_fill(self_token_mask[None, :, :], torch.finfo(subsequent_token_logits.dtype).min)
subsequent_token_mask = attention_mask.view(-1).bool()
loss = None
if initial_token_labels is not None and subsequent_token_labels is not None:
loss_fct = CrossEntropyLoss()
initial_token_labels = initial_token_labels.view(-1)
if bbox_first_token_mask is not None:
bbox_first_token_mask = bbox_first_token_mask.view(-1)
initial_token_loss = loss_fct(initial_token_logits.view(-1, self.num_labels)[bbox_first_token_mask], initial_token_labels[bbox_first_token_mask])
else:
initial_token_loss = loss_fct(initial_token_logits.view(-1, self.num_labels), initial_token_labels)
subsequent_token_labels = subsequent_token_labels.view(-1)
subsequent_token_loss = loss_fct(subsequent_token_logits.view(-1, max_seq_length + 1)[subsequent_token_mask], subsequent_token_labels[subsequent_token_mask])
loss = initial_token_loss + subsequent_token_loss
return BrosSpadeOutput(loss=loss, initial_token_logits=initial_token_logits, subsequent_token_logits=subsequent_token_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring(custom_intro='\n Bros Model with a token classification head on top (initial_token_layers and subsequent_token_layer on top of the\n hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. The initial_token_classifier is used to\n predict the first token of each entity, and the subsequent_token_classifier is used to predict the subsequent\n tokens within an entity. Compared to BrosForTokenClassification, this model is more robust to serialization errors\n since it predicts next token from one token.\n ')
class BrosSpadeEEForTokenClassification(BrosPreTrainedModel):
def __init__(self, config):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, bbox: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, bbox_first_token_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, initial_token_labels: Optional[torch.Tensor]=None, subsequent_token_labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], BrosSpadeOutput]:
'''
bbox ('torch.FloatTensor' of shape '(batch_size, num_boxes, 4)'):
Bounding box coordinates for each token in the input sequence. Each bounding box is a list of four values
(x1, y1, x2, y2), where (x1, y1) is the top left corner, and (x2, y2) is the bottom right corner of the
bounding box.
bbox_first_token_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to indicate the first token of each bounding box. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
initial_token_labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for the initial token classification.
subsequent_token_labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for the subsequent token classification.
Examples:
```python
>>> import torch
>>> from transformers import BrosProcessor, BrosSpadeEEForTokenClassification
>>> processor = BrosProcessor.from_pretrained("jinho8345/bros-base-uncased")
>>> model = BrosSpadeEEForTokenClassification.from_pretrained("jinho8345/bros-base-uncased")
>>> encoding = processor("Hello, my dog is cute", add_special_tokens=False, return_tensors="pt")
>>> bbox = torch.tensor([[[0, 0, 1, 1]]]).repeat(1, encoding["input_ids"].shape[-1], 1)
>>> encoding["bbox"] = bbox
>>> outputs = model(**encoding)
```'''
pass
| 6
| 1
| 64
| 10
| 46
| 9
| 4
| 0.18
| 1
| 6
| 3
| 0
| 2
| 7
| 2
| 3
| 134
| 22
| 95
| 43
| 75
| 17
| 43
| 27
| 40
| 6
| 2
| 2
| 8
|
1,072
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bros/modeling_bros.py
|
transformers.models.bros.modeling_bros.BrosSpadeELForTokenClassification
|
from ...modeling_outputs import BaseModelOutputWithCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, TokenClassifierOutput
import torch
from torch.nn import CrossEntropyLoss
from typing import Optional, Union
from ...utils import ModelOutput, auto_docstring, can_return_tuple, logging
@auto_docstring(custom_intro='\n Bros Model with a token classification head on top (a entity_linker layer on top of the hidden-states output) e.g.\n for Entity-Linking. The entity_linker is used to predict intra-entity links (one entity to another entity).\n ')
class BrosSpadeELForTokenClassification(BrosPreTrainedModel):
_keys_to_ignore_on_load_unexpected = ['pooler']
def __init__(self, config):
super().__init__(config)
self.config = config
self.num_labels = config.num_labels
self.n_relations = config.n_relations
self.backbone_hidden_size = config.hidden_size
self.bros = BrosModel(config)
config.classifier_dropout if hasattr(config, 'classifier_dropout') else config.hidden_dropout_prob
self.entity_linker = BrosRelationExtractor(config)
self.init_weights()
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, bbox: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, bbox_first_token_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], TokenClassifierOutput]:
"""
bbox ('torch.FloatTensor' of shape '(batch_size, num_boxes, 4)'):
Bounding box coordinates for each token in the input sequence. Each bounding box is a list of four values
(x1, y1, x2, y2), where (x1, y1) is the top left corner, and (x2, y2) is the bottom right corner of the
bounding box.
bbox_first_token_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to indicate the first token of each bounding box. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
Examples:
```python
>>> import torch
>>> from transformers import BrosProcessor, BrosSpadeELForTokenClassification
>>> processor = BrosProcessor.from_pretrained("jinho8345/bros-base-uncased")
>>> model = BrosSpadeELForTokenClassification.from_pretrained("jinho8345/bros-base-uncased")
>>> encoding = processor("Hello, my dog is cute", add_special_tokens=False, return_tensors="pt")
>>> bbox = torch.tensor([[[0, 0, 1, 1]]]).repeat(1, encoding["input_ids"].shape[-1], 1)
>>> encoding["bbox"] = bbox
>>> outputs = model(**encoding)
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bros(input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True)
last_hidden_states = outputs[0]
last_hidden_states = last_hidden_states.transpose(0, 1).contiguous()
logits = self.entity_linker(last_hidden_states, last_hidden_states).squeeze(0)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
batch_size, max_seq_length = attention_mask.shape
device = attention_mask.device
self_token_mask = torch.eye(max_seq_length, max_seq_length + 1).to(device=device, dtype=torch.bool)
mask = bbox_first_token_mask.view(-1)
bbox_first_token_mask = torch.cat([~bbox_first_token_mask, torch.zeros([batch_size, 1], dtype=torch.bool, device=device)], axis=1)
logits = logits.masked_fill(bbox_first_token_mask[:, None, :], torch.finfo(logits.dtype).min)
logits = logits.masked_fill(self_token_mask[None, :, :], torch.finfo(logits.dtype).min)
loss = loss_fct(logits.view(-1, max_seq_length + 1)[mask], labels.view(-1)[mask])
return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring(custom_intro='\n Bros Model with a token classification head on top (a entity_linker layer on top of the hidden-states output) e.g.\n for Entity-Linking. The entity_linker is used to predict intra-entity links (one entity to another entity).\n ')
class BrosSpadeELForTokenClassification(BrosPreTrainedModel):
def __init__(self, config):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, bbox: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, bbox_first_token_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], TokenClassifierOutput]:
'''
bbox ('torch.FloatTensor' of shape '(batch_size, num_boxes, 4)'):
Bounding box coordinates for each token in the input sequence. Each bounding box is a list of four values
(x1, y1, x2, y2), where (x1, y1) is the top left corner, and (x2, y2) is the bottom right corner of the
bounding box.
bbox_first_token_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to indicate the first token of each bounding box. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
Examples:
```python
>>> import torch
>>> from transformers import BrosProcessor, BrosSpadeELForTokenClassification
>>> processor = BrosProcessor.from_pretrained("jinho8345/bros-base-uncased")
>>> model = BrosSpadeELForTokenClassification.from_pretrained("jinho8345/bros-base-uncased")
>>> encoding = processor("Hello, my dog is cute", add_special_tokens=False, return_tensors="pt")
>>> bbox = torch.tensor([[[0, 0, 1, 1]]]).repeat(1, encoding["input_ids"].shape[-1], 1)
>>> encoding["bbox"] = bbox
>>> outputs = model(**encoding)
```'''
pass
| 6
| 1
| 50
| 10
| 34
| 7
| 4
| 0.18
| 1
| 6
| 3
| 0
| 2
| 6
| 2
| 3
| 105
| 21
| 71
| 35
| 52
| 13
| 33
| 20
| 30
| 5
| 2
| 1
| 7
|
1,073
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bros/modeling_bros.py
|
transformers.models.bros.modeling_bros.BrosTextEmbeddings
|
from torch import nn
import torch
from typing import Optional, Union
class BrosTextEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute')
self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)))
self.register_buffer('token_type_ids', torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device), persistent=False)
def forward(self, input_ids: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None) -> torch.Tensor:
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
if token_type_ids is None:
if hasattr(self, 'token_type_ids'):
buffered_token_type_ids = self.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.position_embedding_type == 'absolute':
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
|
class BrosTextEmbeddings(nn.Module):
'''Construct the embeddings from word, position and token_type embeddings.'''
def __init__(self, config):
pass
def forward(self, input_ids: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None) -> torch.Tensor:
pass
| 3
| 1
| 30
| 4
| 25
| 2
| 4
| 0.08
| 1
| 3
| 0
| 0
| 2
| 6
| 2
| 12
| 64
| 9
| 51
| 23
| 41
| 4
| 34
| 16
| 31
| 7
| 1
| 2
| 8
|
1,074
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/bros/processing_bros.py
|
transformers.models.bros.processing_bros.BrosProcessor
|
from ...processing_utils import ProcessingKwargs, ProcessorMixin
class BrosProcessor(ProcessorMixin):
"""
Constructs a Bros processor which wraps a BERT tokenizer.
[`BrosProcessor`] offers all the functionalities of [`BertTokenizerFast`]. See the docstring of
[`~BrosProcessor.__call__`] and [`~BrosProcessor.decode`] for more information.
Args:
tokenizer (`BertTokenizerFast`, *optional*):
An instance of ['BertTokenizerFast`]. The tokenizer is a required input.
"""
attributes = ['tokenizer']
tokenizer_class = ('BertTokenizer', 'BertTokenizerFast')
valid_processor_kwargs = BrosProcessorKwargs
def __init__(self, tokenizer=None, **kwargs):
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(tokenizer)
|
class BrosProcessor(ProcessorMixin):
'''
Constructs a Bros processor which wraps a BERT tokenizer.
[`BrosProcessor`] offers all the functionalities of [`BertTokenizerFast`]. See the docstring of
[`~BrosProcessor.__call__`] and [`~BrosProcessor.decode`] for more information.
Args:
tokenizer (`BertTokenizerFast`, *optional*):
An instance of ['BertTokenizerFast`]. The tokenizer is a required input.
'''
def __init__(self, tokenizer=None, **kwargs):
pass
| 2
| 1
| 13
| 1
| 10
| 2
| 1
| 0.38
| 1
| 9
| 2
| 0
| 5
| 0
| 5
| 22
| 84
| 11
| 53
| 29
| 28
| 20
| 17
| 10
| 11
| 2
| 2
| 1
| 6
|
1,075
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/byt5/tokenization_byt5.py
|
transformers.models.byt5.tokenization_byt5.ByT5Tokenizer
|
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
import warnings
from typing import Optional
class ByT5Tokenizer(PreTrainedTokenizer):
"""
Construct a ByT5 tokenizer. ByT5 simply uses raw bytes utf-8 encoding.
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
extra_ids (`int`, *optional*, defaults to 125):
Add a number of extra ids added to the end of the vocabulary for use as sentinels. These tokens are
accessible as "<extra_id_{%d}>" where "{%d}" is a number between 0 and extra_ids-1. Extra tokens are
indexed from the end of the vocabulary up to beginning ("<extra_id_0>" is the last token in the vocabulary
like in ByT5 preprocessing see
[here](https://github.com/google-research/text-to-text-transfer-transformer/blob/9fd7b14a769417be33bc6c850f9598764913c833/t5/data/preprocessors.py#L2117)).
additional_special_tokens (`list[str]`, *optional*):
Additional special tokens used by the tokenizer.
"""
model_input_names = ['input_ids', 'attention_mask']
def __init__(self, eos_token='</s>', unk_token='<unk>', pad_token='<pad>', extra_ids=125, additional_special_tokens=None, **kwargs) -> None:
if extra_ids > 0 and additional_special_tokens is None:
additional_special_tokens = [f'<extra_id_{i}>' for i in range(extra_ids)]
elif extra_ids > 0 and additional_special_tokens is not None and (len(additional_special_tokens) > 0):
extra_tokens = len(set(filter(lambda x: bool('extra_id' in str(x)), additional_special_tokens)))
if extra_tokens != extra_ids:
raise ValueError(f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are provided to ByT5Tokenizer. In this case the additional_special_tokens must include the extra_ids tokens')
pad_token = AddedToken(pad_token, lstrip=True, rstrip=True) if isinstance(pad_token, str) else pad_token
eos_token = AddedToken(eos_token, lstrip=True, rstrip=True) if isinstance(eos_token, str) else eos_token
unk_token = AddedToken(unk_token, lstrip=True, rstrip=True) if isinstance(unk_token, str) else unk_token
self._added_tokens_decoder = {0: pad_token, 1: eos_token, 2: unk_token}
self.offset = len(self._added_tokens_decoder)
self._utf_vocab_size = 2 ** 8
super().__init__(eos_token=eos_token, unk_token=unk_token, pad_token=pad_token, extra_ids=0, additional_special_tokens=additional_special_tokens, **kwargs)
@property
def vocab_size(self):
return self._utf_vocab_size
def get_vocab(self):
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size + self.offset)}
vocab.update(self.added_tokens_encoder)
return vocab
def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
if token_ids_1 is None:
return [0] * len(token_ids_0) + [1]
return [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1]
def _add_eos_if_not_present(self, token_ids: list[int]) -> list[int]:
"""Do not add eos again if user already added it."""
if len(token_ids) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(f'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated eos tokens being added.')
return token_ids
else:
return token_ids + [self.eos_token_id]
def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. ByT5 does not
make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of zeros.
"""
eos = [self.eos_token_id]
if token_ids_1 is None:
return len(token_ids_0 + eos) * [0]
return len(token_ids_0 + eos + token_ids_1 + eos) * [0]
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A sequence has the following format:
- single sequence: `X </s>`
- pair of sequences: `A </s> B </s>`
Args:
token_ids_0 (`list[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
token_ids_0 = self._add_eos_if_not_present(token_ids_0)
if token_ids_1 is None:
return token_ids_0
else:
token_ids_1 = self._add_eos_if_not_present(token_ids_1)
return token_ids_0 + token_ids_1
def _tokenize(self, text: str) -> list[str]:
"""Take as input a string and return a list of strings (tokens) for words/sub-words"""
tokens = [chr(i) for i in text.encode('utf-8')]
return tokens
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
if len(token) != 1:
token_id = None
else:
token_id = ord(token) + self.offset
return token_id
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
token = chr(index - self.offset)
return token
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
bstring = b''
for token in tokens:
if token in self.added_tokens_decoder:
tok_string = self.added_tokens_decoder[token].encode('utf-8')
elif token in self.added_tokens_encoder:
tok_string = token.encode('utf-8')
else:
tok_string = bytes([ord(token)])
bstring += tok_string
string = bstring.decode('utf-8', errors='ignore')
return string
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
return ()
|
class ByT5Tokenizer(PreTrainedTokenizer):
'''
Construct a ByT5 tokenizer. ByT5 simply uses raw bytes utf-8 encoding.
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
extra_ids (`int`, *optional*, defaults to 125):
Add a number of extra ids added to the end of the vocabulary for use as sentinels. These tokens are
accessible as "<extra_id_{%d}>" where "{%d}" is a number between 0 and extra_ids-1. Extra tokens are
indexed from the end of the vocabulary up to beginning ("<extra_id_0>" is the last token in the vocabulary
like in ByT5 preprocessing see
[here](https://github.com/google-research/text-to-text-transfer-transformer/blob/9fd7b14a769417be33bc6c850f9598764913c833/t5/data/preprocessors.py#L2117)).
additional_special_tokens (`list[str]`, *optional*):
Additional special tokens used by the tokenizer.
'''
def __init__(self, eos_token='</s>', unk_token='<unk>', pad_token='<pad>', extra_ids=125, additional_special_tokens=None, **kwargs) -> None:
pass
@property
def vocab_size(self):
pass
def get_vocab(self):
pass
def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]:
'''
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
'''
pass
def _add_eos_if_not_present(self, token_ids: list[int]) -> list[int]:
'''Do not add eos again if user already added it.'''
pass
def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
'''
Create a mask from the two sequences passed to be used in a sequence-pair classification task. ByT5 does not
make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of zeros.
'''
pass
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
'''
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A sequence has the following format:
- single sequence: `X </s>`
- pair of sequences: `A </s> B </s>`
Args:
token_ids_0 (`list[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
'''
pass
def _tokenize(self, text: str) -> list[str]:
'''Take as input a string and return a list of strings (tokens) for words/sub-words'''
pass
def _convert_token_to_id(self, token):
'''Converts a token (str) in an id using the vocab.'''
pass
def _convert_id_to_token(self, index):
'''Converts an index (integer) in a token (str) using the vocab.'''
pass
def convert_tokens_to_string(self, tokens):
'''Converts a sequence of tokens (string) in a single string.'''
pass
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
pass
| 14
| 9
| 13
| 1
| 8
| 4
| 2
| 0.73
| 1
| 9
| 0
| 0
| 12
| 3
| 12
| 101
| 207
| 31
| 103
| 42
| 75
| 75
| 66
| 27
| 53
| 7
| 3
| 2
| 27
|
1,076
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/camembert/configuration_camembert.py
|
transformers.models.camembert.configuration_camembert.CamembertConfig
|
from ...configuration_utils import PretrainedConfig
class CamembertConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`CamembertModel`] or a [`TFCamembertModel`]. It is
used to instantiate a Camembert model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the Camembert
[almanach/camembert-base](https://huggingface.co/almanach/camembert-base) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 30522):
Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`CamembertModel`] or [`TFCamembertModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the `token_type_ids` passed when calling [`CamembertModel`] or [`TFCamembertModel`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
[Self-Attention with Relative Position Representations (Shaw et al.)](https://huggingface.co/papers/1803.02155).
For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
with Better Relative Position Embeddings (Huang et al.)](https://huggingface.co/papers/2009.13658).
is_decoder (`bool`, *optional*, defaults to `False`):
Whether the model is used as a decoder or not. If `False`, the model is used as an encoder.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
classifier_dropout (`float`, *optional*):
The dropout ratio for the classification head.
Example:
```python
>>> from transformers import CamembertConfig, CamembertModel
>>> # Initializing a Camembert almanach/camembert-base style configuration
>>> configuration = CamembertConfig()
>>> # Initializing a model (with random weights) from the almanach/camembert-base style configuration
>>> model = CamembertModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'camembert'
def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=1, bos_token_id=0, eos_token_id=2, position_embedding_type='absolute', use_cache=True, classifier_dropout=None, **kwargs):
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.position_embedding_type = position_embedding_type
self.use_cache = use_cache
self.classifier_dropout = classifier_dropout
|
class CamembertConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`CamembertModel`] or a [`TFCamembertModel`]. It is
used to instantiate a Camembert model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the Camembert
[almanach/camembert-base](https://huggingface.co/almanach/camembert-base) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 30522):
Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`CamembertModel`] or [`TFCamembertModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the `token_type_ids` passed when calling [`CamembertModel`] or [`TFCamembertModel`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
[Self-Attention with Relative Position Representations (Shaw et al.)](https://huggingface.co/papers/1803.02155).
For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
with Better Relative Position Embeddings (Huang et al.)](https://huggingface.co/papers/2009.13658).
is_decoder (`bool`, *optional*, defaults to `False`):
Whether the model is used as a decoder or not. If `False`, the model is used as an encoder.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
classifier_dropout (`float`, *optional*):
The dropout ratio for the classification head.
Example:
```python
>>> from transformers import CamembertConfig, CamembertModel
>>> # Initializing a Camembert almanach/camembert-base style configuration
>>> configuration = CamembertConfig()
>>> # Initializing a model (with random weights) from the almanach/camembert-base style configuration
>>> model = CamembertModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=1, bos_token_id=0, eos_token_id=2, position_embedding_type='absolute', use_cache=True, classifier_dropout=None, **kwargs):
pass
| 2
| 1
| 39
| 1
| 38
| 0
| 1
| 1.45
| 1
| 1
| 0
| 0
| 1
| 15
| 1
| 1
| 109
| 11
| 40
| 39
| 17
| 58
| 19
| 18
| 17
| 1
| 1
| 0
| 1
|
1,077
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/camembert/configuration_camembert.py
|
transformers.models.camembert.configuration_camembert.CamembertOnnxConfig
|
from collections import OrderedDict
from ...onnx import OnnxConfig
from collections.abc import Mapping
class CamembertOnnxConfig(OnnxConfig):
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
if self.task == 'multiple-choice':
dynamic_axis = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
dynamic_axis = {0: 'batch', 1: 'sequence'}
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)])
|
class CamembertOnnxConfig(OnnxConfig):
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
pass
| 3
| 0
| 11
| 0
| 11
| 0
| 2
| 0
| 1
| 3
| 0
| 0
| 1
| 0
| 1
| 1
| 13
| 0
| 13
| 4
| 10
| 0
| 6
| 3
| 4
| 2
| 1
| 1
| 2
|
1,078
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/camembert/modeling_camembert.py
|
transformers.models.camembert.modeling_camembert.CamembertAttention
|
from typing import Callable, Optional, Union
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
from ...utils import TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
from ...cache_utils import Cache, EncoderDecoderCache
import torch
from ...processing_utils import Unpack
import torch.nn as nn
class CamembertAttention(nn.Module):
def __init__(self, config, position_embedding_type=None, is_causal=False, layer_idx=None, is_cross_attention=False):
super().__init__()
self.is_cross_attention = is_cross_attention
attention_class = CamembertCrossAttention if is_cross_attention else CamembertSelfAttention
self.self = attention_class(config, position_embedding_type=position_embedding_type, is_causal=is_causal, layer_idx=layer_idx)
self.output = CamembertSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads)
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Cache]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor]:
attention_mask = attention_mask if not self.is_cross_attention else encoder_attention_mask
attention_output, attn_weights = self.self(hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, head_mask=head_mask, past_key_value=past_key_value, cache_position=cache_position, **kwargs)
attention_output = self.output(attention_output, hidden_states)
return (attention_output, attn_weights)
|
class CamembertAttention(nn.Module):
def __init__(self, config, position_embedding_type=None, is_causal=False, layer_idx=None, is_cross_attention=False):
pass
def prune_heads(self, heads):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Cache]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor]:
pass
| 4
| 0
| 15
| 1
| 14
| 1
| 1
| 0.07
| 1
| 5
| 1
| 0
| 3
| 3
| 3
| 13
| 49
| 4
| 43
| 20
| 30
| 3
| 22
| 11
| 18
| 2
| 1
| 1
| 4
|
1,079
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/camembert/modeling_camembert.py
|
transformers.models.camembert.modeling_camembert.CamembertClassificationHead
|
import torch
import torch.nn as nn
class CamembertClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
self.dropout = nn.Dropout(classifier_dropout)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, features, **kwargs):
x = features[:, 0, :]
x = self.dropout(x)
x = self.dense(x)
x = torch.tanh(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
|
class CamembertClassificationHead(nn.Module):
'''Head for sentence-level classification tasks.'''
def __init__(self, config):
pass
def forward(self, features, **kwargs):
pass
| 3
| 1
| 8
| 0
| 8
| 1
| 2
| 0.12
| 1
| 1
| 0
| 0
| 2
| 3
| 2
| 12
| 20
| 2
| 17
| 8
| 14
| 2
| 15
| 8
| 12
| 2
| 1
| 0
| 3
|
1,080
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/camembert/modeling_camembert.py
|
transformers.models.camembert.modeling_camembert.CamembertEmbeddings
|
import torch
import torch.nn as nn
from typing import Callable, Optional, Union
class CamembertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute')
self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False)
self.register_buffer('token_type_ids', torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False)
self.padding_idx = config.pad_token_id
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx)
def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, past_key_values_length: int=0) -> torch.Tensor:
if position_ids is None:
if input_ids is not None:
position_ids = self.create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length)
else:
position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds, self.padding_idx)
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
if token_type_ids is None:
if hasattr(self, 'token_type_ids'):
buffered_token_type_ids = self.token_type_ids.expand(position_ids.shape[0], -1)
buffered_token_type_ids = torch.gather(buffered_token_type_ids, dim=1, index=position_ids)
token_type_ids = buffered_token_type_ids.expand(batch_size, seq_length)
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.position_embedding_type == 'absolute':
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
@staticmethod
def create_position_ids_from_inputs_embeds(inputs_embeds, padding_idx):
"""
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
Args:
inputs_embeds: torch.Tensor
Returns: torch.Tensor
"""
input_shape = inputs_embeds.size()[:-1]
sequence_length = input_shape[1]
position_ids = torch.arange(padding_idx + 1, sequence_length + padding_idx + 1, dtype=torch.long, device=inputs_embeds.device)
return position_ids.unsqueeze(0).expand(input_shape)
@staticmethod
def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
"""
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
are ignored. This is modified from fairseq's `utils.make_positions`.
Args:
x: torch.Tensor x:
Returns: torch.Tensor
"""
mask = input_ids.ne(padding_idx).int()
incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
return incremental_indices.long() + padding_idx
|
class CamembertEmbeddings(nn.Module):
'''Construct the embeddings from word, position and token_type embeddings.'''
def __init__(self, config):
pass
def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, past_key_values_length: int=0) -> torch.Tensor:
pass
@staticmethod
def create_position_ids_from_inputs_embeds(inputs_embeds, padding_idx):
'''
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
Args:
inputs_embeds: torch.Tensor
Returns: torch.Tensor
'''
pass
@staticmethod
def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
'''
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
are ignored. This is modified from fairseq's `utils.make_positions`.
Args:
x: torch.Tensor x:
Returns: torch.Tensor
'''
pass
| 7
| 3
| 26
| 3
| 18
| 5
| 3
| 0.32
| 1
| 1
| 0
| 0
| 3
| 7
| 3
| 13
| 87
| 13
| 56
| 23
| 50
| 18
| 43
| 21
| 39
| 8
| 1
| 2
| 10
|
1,081
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/camembert/modeling_camembert.py
|
transformers.models.camembert.modeling_camembert.CamembertEncoder
|
from ...utils import TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
import torch.nn as nn
from ...processing_utils import Unpack
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from ...cache_utils import Cache, EncoderDecoderCache
from typing import Callable, Optional, Union
import torch
class CamembertEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([CamembertLayer(config, layer_idx=i) for i in range(config.num_hidden_layers)])
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
for i, layer_module in enumerate(self.layer):
layer_head_mask = head_mask[i] if head_mask is not None else None
hidden_states = layer_module(hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=past_key_values, cache_position=cache_position, **kwargs)
return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=past_key_values if use_cache else None)
|
class CamembertEncoder(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
pass
| 3
| 0
| 45
| 4
| 41
| 0
| 9
| 0
| 1
| 8
| 2
| 0
| 2
| 3
| 2
| 12
| 91
| 8
| 83
| 26
| 68
| 0
| 35
| 14
| 32
| 17
| 1
| 3
| 18
|
1,082
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/camembert/modeling_camembert.py
|
transformers.models.camembert.modeling_camembert.CamembertForCausalLM
|
import torch
import torch.nn as nn
from ...generation import GenerationMixin
from ...processing_utils import Unpack
from ...utils.generic import can_return_tuple, check_model_inputs
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from ...utils import TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
from typing import Callable, Optional, Union
@auto_docstring(custom_intro='\n Camembert Model with a `language modeling` head on top for CLM fine-tuning.\n ')
class CamembertForCausalLM(CamembertPreTrainedModel, GenerationMixin):
_tied_weights_keys = ['lm_head.decoder.weight', 'lm_head.decoder.bias']
def __init__(self, config):
super().__init__(config)
if not config.is_decoder:
logger.warning('If you want to use `CamembertLMHeadModel` as a standalone, add `is_decoder=True.`')
self.lm_head = CamembertLMHead(config)
self.roberta = CamembertModel(config, add_pooling_layer=False)
self.post_init()
def get_output_embeddings(self):
return self.lm_head.decoder
def set_output_embeddings(self, new_embeddings):
self.lm_head.decoder = new_embeddings
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, past_key_values: Optional[tuple[tuple[torch.FloatTensor]]]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:
"""
token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
This parameter can only be used when the model is initialized with `type_vocab_size` parameter with value
>= 2. All the value in this tensor should be always < type_vocab_size.
[What are token type IDs?](../glossary#token-type-ids)
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
`[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
Example:
```python
>>> from transformers import AutoTokenizer, CamembertForCausalLM, AutoConfig
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("almanach/camembert-base")
>>> config = AutoConfig.from_pretrained("almanach/camembert-base")
>>> config.is_decoder = True
>>> model = CamembertForCausalLM.from_pretrained("almanach/camembert-base", config=config)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
```"""
if labels is not None:
use_cache = False
outputs = self.roberta(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, return_dict=True, **kwargs)
sequence_output = outputs[0]
prediction_scores = self.lm_head(sequence_output)
lm_loss = None
if labels is not None:
labels = labels.to(prediction_scores.device)
lm_loss = self.loss_function(prediction_scores, labels, vocab_size=self.config.vocab_size, **kwargs)
return CausalLMOutputWithCrossAttentions(loss=lm_loss, logits=prediction_scores, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions)
|
@auto_docstring(custom_intro='\n Camembert Model with a `language modeling` head on top for CLM fine-tuning.\n ')
class CamembertForCausalLM(CamembertPreTrainedModel, GenerationMixin):
def __init__(self, config):
pass
def get_output_embeddings(self):
pass
def set_output_embeddings(self, new_embeddings):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, past_key_values: Optional[tuple[tuple[torch.FloatTensor]]]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:
'''
token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
This parameter can only be used when the model is initialized with `type_vocab_size` parameter with value
>= 2. All the value in this tensor should be always < type_vocab_size.
[What are token type IDs?](../glossary#token-type-ids)
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
`[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
Example:
```python
>>> from transformers import AutoTokenizer, CamembertForCausalLM, AutoConfig
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("almanach/camembert-base")
>>> config = AutoConfig.from_pretrained("almanach/camembert-base")
>>> config.is_decoder = True
>>> model = CamembertForCausalLM.from_pretrained("almanach/camembert-base", config=config)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
```'''
pass
| 8
| 1
| 26
| 3
| 15
| 7
| 2
| 0.45
| 2
| 7
| 3
| 0
| 5
| 2
| 5
| 6
| 138
| 22
| 80
| 34
| 55
| 36
| 33
| 16
| 27
| 6
| 2
| 1
| 12
|
1,083
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/camembert/modeling_camembert.py
|
transformers.models.camembert.modeling_camembert.CamembertForMaskedLM
|
from ...utils.generic import can_return_tuple, check_model_inputs
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
import torch.nn as nn
from ...utils import TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
import torch
from typing import Callable, Optional, Union
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...processing_utils import Unpack
@auto_docstring
class CamembertForMaskedLM(CamembertPreTrainedModel):
_tied_weights_keys = ['lm_head.decoder.weight', 'lm_head.decoder.bias']
def __init__(self, config):
super().__init__(config)
if config.is_decoder:
logger.warning('If you want to use `CamembertForMaskedLM` make sure `config.is_decoder=False` for bi-directional self-attention.')
self.lm_head = CamembertLMHead(config)
self.roberta = CamembertModel(config, add_pooling_layer=False)
self.post_init()
def get_output_embeddings(self):
return self.lm_head.decoder
def set_output_embeddings(self, new_embeddings):
self.lm_head.decoder = new_embeddings
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], MaskedLMOutput]:
"""
token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
This parameter can only be used when the model is initialized with `type_vocab_size` parameter with value
>= 2. All the value in this tensor should be always < type_vocab_size.
[What are token type IDs?](../glossary#token-type-ids)
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
"""
outputs = self.roberta(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, return_dict=True, **kwargs)
sequence_output = outputs[0]
prediction_scores = self.lm_head(sequence_output)
masked_lm_loss = None
if labels is not None:
labels = labels.to(prediction_scores.device)
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
return MaskedLMOutput(loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring
class CamembertForMaskedLM(CamembertPreTrainedModel):
def __init__(self, config):
pass
def get_output_embeddings(self):
pass
def set_output_embeddings(self, new_embeddings):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], MaskedLMOutput]:
'''
token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
This parameter can only be used when the model is initialized with `type_vocab_size` parameter with value
>= 2. All the value in this tensor should be always < type_vocab_size.
[What are token type IDs?](../glossary#token-type-ids)
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
'''
pass
| 8
| 1
| 19
| 2
| 15
| 3
| 2
| 0.14
| 1
| 6
| 3
| 0
| 4
| 2
| 4
| 5
| 91
| 11
| 70
| 29
| 42
| 10
| 27
| 14
| 22
| 5
| 2
| 1
| 9
|
1,084
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/camembert/modeling_camembert.py
|
transformers.models.camembert.modeling_camembert.CamembertForMultipleChoice
|
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
import torch.nn as nn
from ...utils import TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
from ...utils.generic import can_return_tuple, check_model_inputs
from ...processing_utils import Unpack
import torch
from typing import Callable, Optional, Union
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
@auto_docstring
class CamembertForMultipleChoice(CamembertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.roberta = CamembertModel(config, add_pooling_layer=False)
self.post_init()
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], MultipleChoiceModelOutput]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
This parameter can only be used when the model is initialized with `type_vocab_size` parameter with value
>= 2. All the value in this tensor should be always < type_vocab_size.
[What are token type IDs?](../glossary#token-type-ids)
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
`input_ids` above)
position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
"""
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
flat_inputs_embeds = inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None
outputs = self.roberta(flat_input_ids, position_ids=flat_position_ids, token_type_ids=flat_token_type_ids, attention_mask=flat_attention_mask, head_mask=head_mask, inputs_embeds=flat_inputs_embeds, return_dict=True, **kwargs)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
labels = labels.to(reshaped_logits.device)
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
return MultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring
class CamembertForMultipleChoice(CamembertPreTrainedModel):
def __init__(self, config):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], MultipleChoiceModelOutput]:
'''
input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
This parameter can only be used when the model is initialized with `type_vocab_size` parameter with value
>= 2. All the value in this tensor should be always < type_vocab_size.
[What are token type IDs?](../glossary#token-type-ids)
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
`input_ids` above)
position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
'''
pass
| 6
| 1
| 38
| 4
| 30
| 4
| 6
| 0.12
| 1
| 5
| 2
| 0
| 2
| 3
| 2
| 3
| 85
| 9
| 68
| 34
| 45
| 8
| 29
| 19
| 26
| 11
| 2
| 1
| 12
|
1,085
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/camembert/modeling_camembert.py
|
transformers.models.camembert.modeling_camembert.CamembertForQuestionAnswering
|
from ...utils.generic import can_return_tuple, check_model_inputs
import torch
from typing import Callable, Optional, Union
from ...utils import TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
import torch.nn as nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...processing_utils import Unpack
@auto_docstring
class CamembertForQuestionAnswering(CamembertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.roberta = CamembertModel(config, add_pooling_layer=False)
self.post_init()
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], QuestionAnsweringModelOutput]:
"""
token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
This parameter can only be used when the model is initialized with `type_vocab_size` parameter with value
>= 2. All the value in this tensor should be always < type_vocab_size.
[What are token type IDs?](../glossary#token-type-ids)
"""
outputs = self.roberta(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, return_dict=True, **kwargs)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring
class CamembertForQuestionAnswering(CamembertPreTrainedModel):
def __init__(self, config):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], QuestionAnsweringModelOutput]:
'''
token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
This parameter can only be used when the model is initialized with `type_vocab_size` parameter with value
>= 2. All the value in this tensor should be always < type_vocab_size.
[What are token type IDs?](../glossary#token-type-ids)
'''
pass
| 6
| 1
| 41
| 5
| 30
| 7
| 4
| 0.19
| 1
| 5
| 2
| 0
| 2
| 3
| 2
| 3
| 92
| 10
| 69
| 30
| 45
| 13
| 32
| 16
| 29
| 7
| 2
| 2
| 8
|
1,086
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/camembert/modeling_camembert.py
|
transformers.models.camembert.modeling_camembert.CamembertForSequenceClassification
|
from ...processing_utils import Unpack
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
import torch
import torch.nn as nn
from typing import Callable, Optional, Union
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from ...utils.generic import can_return_tuple, check_model_inputs
from ...utils import TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
@auto_docstring(custom_intro='\n Camembert Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n pooled output) e.g. for GLUE tasks.\n ')
class CamembertForSequenceClassification(CamembertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.classifier = CamembertClassificationHead(config)
self.roberta = CamembertModel(config, add_pooling_layer=False)
self.post_init()
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], SequenceClassifierOutput]:
"""
token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
This parameter can only be used when the model is initialized with `type_vocab_size` parameter with value
>= 2. All the value in this tensor should be always < type_vocab_size.
[What are token type IDs?](../glossary#token-type-ids)
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
outputs = self.roberta(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, return_dict=True, **kwargs)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
labels = labels.to(logits.device)
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = 'single_label_classification'
else:
self.config.problem_type = 'multi_label_classification'
if self.config.problem_type == 'regression':
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == 'single_label_classification':
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == 'multi_label_classification':
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring(custom_intro='\n Camembert Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n pooled output) e.g. for GLUE tasks.\n ')
class CamembertForSequenceClassification(CamembertPreTrainedModel):
def __init__(self, config):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], SequenceClassifierOutput]:
'''
token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
This parameter can only be used when the model is initialized with `type_vocab_size` parameter with value
>= 2. All the value in this tensor should be always < type_vocab_size.
[What are token type IDs?](../glossary#token-type-ids)
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
'''
pass
| 6
| 1
| 40
| 4
| 33
| 4
| 7
| 0.11
| 1
| 7
| 3
| 0
| 2
| 4
| 2
| 3
| 90
| 8
| 74
| 26
| 51
| 8
| 34
| 13
| 31
| 12
| 2
| 3
| 13
|
1,087
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/camembert/modeling_camembert.py
|
transformers.models.camembert.modeling_camembert.CamembertForTokenClassification
|
from ...processing_utils import Unpack
import torch.nn as nn
from ...utils import TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
import torch
from typing import Callable, Optional, Union
from ...utils.generic import can_return_tuple, check_model_inputs
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
@auto_docstring
class CamembertForTokenClassification(CamembertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.roberta = CamembertModel(config, add_pooling_layer=False)
self.post_init()
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], TokenClassifierOutput]:
"""
token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
This parameter can only be used when the model is initialized with `type_vocab_size` parameter with value
>= 2. All the value in this tensor should be always < type_vocab_size.
[What are token type IDs?](../glossary#token-type-ids)
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
"""
outputs = self.roberta(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, return_dict=True, **kwargs)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
labels = labels.to(logits.device)
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring
class CamembertForTokenClassification(CamembertPreTrainedModel):
def __init__(self, config):
pass
@can_return_tuple
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], TokenClassifierOutput]:
'''
token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
This parameter can only be used when the model is initialized with `type_vocab_size` parameter with value
>= 2. All the value in this tensor should be always < type_vocab_size.
[What are token type IDs?](../glossary#token-type-ids)
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
'''
pass
| 6
| 1
| 33
| 4
| 26
| 3
| 4
| 0.1
| 1
| 5
| 2
| 0
| 2
| 4
| 2
| 3
| 76
| 9
| 61
| 27
| 38
| 6
| 24
| 14
| 21
| 5
| 2
| 1
| 7
|
1,088
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/camembert/modeling_camembert.py
|
transformers.models.camembert.modeling_camembert.CamembertIntermediate
|
import torch
from ...activations import ACT2FN, gelu
import torch.nn as nn
class CamembertIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
|
class CamembertIntermediate(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 2
| 0
| 1
| 3
| 0
| 0
| 2
| 2
| 2
| 12
| 13
| 1
| 12
| 5
| 9
| 0
| 11
| 5
| 8
| 2
| 1
| 1
| 3
|
1,089
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/camembert/modeling_camembert.py
|
transformers.models.camembert.modeling_camembert.CamembertLMHead
|
from ...activations import ACT2FN, gelu
import torch
import torch.nn as nn
class CamembertLMHead(nn.Module):
"""Camembert Head for masked language modeling."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
self.decoder.bias = self.bias
def forward(self, features, **kwargs):
x = self.dense(features)
x = gelu(x)
x = self.layer_norm(x)
x = self.decoder(x)
return x
def _tie_weights(self):
if self.decoder.bias.device.type == 'meta':
self.decoder.bias = self.bias
else:
self.bias = self.decoder.bias
|
class CamembertLMHead(nn.Module):
'''Camembert Head for masked language modeling.'''
def __init__(self, config):
pass
def forward(self, features, **kwargs):
pass
def _tie_weights(self):
pass
| 4
| 1
| 8
| 1
| 6
| 1
| 1
| 0.21
| 1
| 1
| 0
| 0
| 3
| 4
| 3
| 13
| 29
| 6
| 19
| 9
| 15
| 4
| 18
| 9
| 14
| 2
| 1
| 1
| 4
|
1,090
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/camembert/modeling_camembert.py
|
transformers.models.camembert.modeling_camembert.CamembertLayer
|
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
import torch.nn as nn
from ...utils import TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
from ...modeling_layers import GradientCheckpointingLayer
from typing import Callable, Optional, Union
from ...processing_utils import Unpack
from ...cache_utils import Cache, EncoderDecoderCache
import torch
class CamembertLayer(GradientCheckpointingLayer):
def __init__(self, config, layer_idx=None):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = CamembertAttention(config, is_causal=config.is_decoder, layer_idx=layer_idx)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
if not self.is_decoder:
raise ValueError(f'{self} should be used as a decoder model if cross attention is added')
self.crossattention = CamembertAttention(config, position_embedding_type='absolute', is_causal=False, layer_idx=layer_idx, is_cross_attention=True)
self.intermediate = CamembertIntermediate(config)
self.output = CamembertOutput(config)
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Cache]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor]:
self_attention_output, _ = self.attention(hidden_states, attention_mask, head_mask, past_key_value=past_key_value, cache_position=cache_position, **kwargs)
attention_output = self_attention_output
if self.is_decoder and encoder_hidden_states is not None:
if not hasattr(self, 'crossattention'):
raise ValueError(f'If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`')
cross_attention_output, _ = self.crossattention(self_attention_output, None, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value=past_key_value, **kwargs)
attention_output = cross_attention_output
layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output)
return layer_output
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
|
class CamembertLayer(GradientCheckpointingLayer):
def __init__(self, config, layer_idx=None):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Cache]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor]:
pass
def feed_forward_chunk(self, attention_output):
pass
| 4
| 0
| 27
| 2
| 23
| 2
| 4
| 0.1
| 1
| 7
| 3
| 0
| 3
| 8
| 3
| 13
| 84
| 9
| 70
| 32
| 57
| 7
| 41
| 23
| 37
| 7
| 1
| 2
| 11
|
1,091
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/camembert/modeling_camembert.py
|
transformers.models.camembert.modeling_camembert.CamembertModel
|
from ...masking_utils import create_causal_mask
from ...cache_utils import Cache, EncoderDecoderCache
from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_attention_mask_for_sdpa
import torch.nn as nn
from ...utils.generic import can_return_tuple, check_model_inputs
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
import torch
from ...utils import TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
from typing import Callable, Optional, Union
from ...processing_utils import Unpack
@auto_docstring(custom_intro='\n The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of\n cross-attention is added between the self-attention layers, following the architecture described in [Attention is\n all you need](https://huggingface.co/papers/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,\n Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.\n\n To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set\n to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and\n `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.\n ')
class CamembertModel(CamembertPreTrainedModel):
_no_split_modules = ['CamembertEmbeddings', 'CamembertLayer']
def __init__(self, config, add_pooling_layer=True):
"""
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
"""
super().__init__(config)
self.config = config
self.gradient_checkpointing = False
self.embeddings = CamembertEmbeddings(config)
self.encoder = CamembertEncoder(config)
self.pooler = CamembertPooler(config) if add_pooling_layer else None
self.position_embedding_type = config.position_embedding_type
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@check_model_inputs
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Union[list[torch.FloatTensor], Cache]]=None, use_cache: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
return_legacy_cache = False
if use_cache and (not isinstance(past_key_values, Cache)):
logger.warning_once('Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. You should pass an instance of `EncoderDecoderCache` instead, e.g. `past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`.')
return_legacy_cache = True
past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values)
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError('You must specify exactly one of input_ids or inputs_embeds')
if input_ids is not None:
device = input_ids.device
input_shape = input_ids.shape
else:
device = inputs_embeds.device
input_shape = inputs_embeds.shape[:-1]
seq_length = input_shape[1]
past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0
if cache_position is None:
cache_position = torch.arange(past_key_values_length, past_key_values_length + seq_length, device=device)
embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length)
attention_mask, encoder_attention_mask = self._create_attention_masks(input_shape=input_shape, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, embedding_output=embedding_output, encoder_hidden_states=encoder_hidden_states, cache_position=cache_position, past_key_values=past_key_values)
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
encoder_outputs = self.encoder(embedding_output, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_ids=position_ids, **kwargs)
sequence_output = encoder_outputs.last_hidden_state
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if return_legacy_cache:
encoder_outputs.past_key_values = encoder_outputs.past_key_values.to_legacy_cache()
return BaseModelOutputWithPoolingAndCrossAttentions(last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values)
def _create_attention_masks(self, input_shape, attention_mask, encoder_attention_mask, embedding_output, encoder_hidden_states, cache_position, past_key_values):
if attention_mask is not None and attention_mask.dim() == 2:
if self.config.is_decoder:
attention_mask = create_causal_mask(config=self.config, input_embeds=embedding_output, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values)
else:
attention_mask = self._update_full_mask(attention_mask, embedding_output)
elif attention_mask is not None and attention_mask.dim() == 3:
if 'flash' in self.config._attn_implementation or self.config._attn_implementation == 'flex_attention':
raise ValueError(f'Passing attention mask with a 3D/4D shape does not work with type {self.config._attn_implementation} - please use either `sdpa` or `eager` instead.')
attention_mask = self.get_extended_attention_mask(attention_mask, input_shape)
if encoder_attention_mask is not None:
if encoder_attention_mask.dim() == 2:
encoder_attention_mask = self._update_cross_attn_mask(encoder_hidden_states, encoder_attention_mask, embedding_output.shape[:2], embedding_output)
else:
if 'flash' in self.config._attn_implementation or self.config._attn_implementation == 'flex_attention':
raise ValueError(f'Passing attention mask with a 3D/4D shape does not work with type {self.config._attn_implementation} - please use either `sdpa` or `eager` instead.')
encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask)
return (attention_mask, encoder_attention_mask)
def _update_full_mask(self, attention_mask: Union[torch.Tensor, None], inputs_embeds: torch.Tensor):
if attention_mask is not None:
if 'flash' in self.config._attn_implementation:
attention_mask = attention_mask if 0 in attention_mask else None
elif self.config._attn_implementation == 'sdpa':
attention_mask = _prepare_4d_attention_mask_for_sdpa(attention_mask, inputs_embeds.dtype)
elif self.config._attn_implementation == 'flex_attention':
if isinstance(attention_mask, torch.Tensor):
attention_mask = make_flex_block_causal_mask(attention_mask, is_causal=False)
else:
attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
return attention_mask
def _update_cross_attn_mask(self, encoder_hidden_states: Union[torch.Tensor, None], encoder_attention_mask: Union[torch.Tensor, None], input_shape: torch.Size, inputs_embeds: torch.Tensor):
if encoder_hidden_states is not None and encoder_attention_mask is not None:
if 'flash' in self.config._attn_implementation:
encoder_attention_mask = encoder_attention_mask if 0 in encoder_attention_mask else None
elif self.config._attn_implementation == 'sdpa':
encoder_attention_mask = _prepare_4d_attention_mask_for_sdpa(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
elif self.config._attn_implementation == 'flex_attention':
if isinstance(encoder_attention_mask, torch.Tensor):
encoder_attention_mask = make_flex_block_causal_mask(encoder_attention_mask, query_length=input_shape[-1], is_causal=False)
else:
encoder_attention_mask = _prepare_4d_attention_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
return encoder_attention_mask
| null | 12
| 2
| 37
| 4
| 25
| 8
| 5
| 0.37
| 1
| 8
| 4
| 0
| 5
| 6
| 5
| 6
| 216
| 31
| 135
| 45
| 108
| 50
| 65
| 29
| 59
| 21
| 2
| 2
| 27
|
1,092
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/camembert/modeling_camembert.py
|
transformers.models.camembert.modeling_camembert.CamembertOutput
|
import torch.nn as nn
import torch
class CamembertOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
|
class CamembertOutput(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 3
| 2
| 12
| 12
| 1
| 11
| 6
| 8
| 0
| 11
| 6
| 8
| 1
| 1
| 0
| 2
|
1,093
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/camembert/modeling_camembert.py
|
transformers.models.camembert.modeling_camembert.CamembertPooler
|
import torch
import torch.nn as nn
class CamembertPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
|
class CamembertPooler(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 5
| 1
| 1
| 0.2
| 1
| 2
| 0
| 0
| 2
| 2
| 2
| 12
| 13
| 1
| 10
| 7
| 7
| 2
| 10
| 7
| 7
| 1
| 1
| 0
| 2
|
1,094
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/camembert/modeling_camembert.py
|
transformers.models.camembert.modeling_camembert.CamembertPreTrainedModel
|
import torch.nn as nn
from .configuration_camembert import CamembertConfig
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...utils import TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
@auto_docstring
class CamembertPreTrainedModel(PreTrainedModel):
config_class = CamembertConfig
base_model_prefix = 'roberta'
supports_gradient_checkpointing = True
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_supports_attention_backend = True
_can_record_outputs = {'hidden_states': CamembertLayer, 'attentions': CamembertSelfAttention, 'cross_attentions': CamembertCrossAttention}
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, CamembertLMHead):
module.bias.data.zero_()
|
@auto_docstring
class CamembertPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
'''Initialize the weights'''
pass
| 3
| 1
| 15
| 0
| 12
| 3
| 6
| 0.47
| 1
| 0
| 0
| 7
| 1
| 0
| 1
| 1
| 27
| 2
| 17
| 6
| 15
| 8
| 15
| 6
| 13
| 6
| 1
| 2
| 6
|
1,095
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/camembert/modeling_camembert.py
|
transformers.models.camembert.modeling_camembert.CamembertSelfAttention
|
import torch.nn as nn
from typing import Callable, Optional, Union
import torch
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...utils import TransformersKwargs, auto_docstring, is_torch_flex_attn_available, logging
from ...processing_utils import Unpack
from ...cache_utils import Cache, EncoderDecoderCache
class CamembertSelfAttention(nn.Module):
def __init__(self, config, position_embedding_type=None, is_causal=False, layer_idx=None):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')):
raise ValueError(f'The hidden size ({config.hidden_size}) is not a multiple of the number of attention heads ({config.num_attention_heads})')
self.config = config
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.scaling = self.attention_head_size ** (-0.5)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = position_embedding_type or getattr(config, 'position_embedding_type', 'absolute')
if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query':
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
self.is_decoder = config.is_decoder
self.is_causal = is_causal
self.layer_idx = layer_idx
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Cache]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.attention_head_size)
query_layer = self.query(hidden_states).view(*hidden_shape).transpose(1, 2)
key_layer = self.key(hidden_states).view(*hidden_shape).transpose(1, 2)
value_layer = self.value(hidden_states).view(*hidden_shape).transpose(1, 2)
if past_key_value is not None:
current_past_key_value = past_key_value
if isinstance(past_key_value, EncoderDecoderCache):
current_past_key_value = past_key_value.self_attention_cache
key_layer, value_layer = current_past_key_value.update(key_layer, value_layer, self.layer_idx, {'cache_position': cache_position})
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
if self.position_embedding_type != 'absolute':
raise ValueError(f'You are using {self.config._attn_implementation} as attention type. However, non-absolute positional embeddings can not work with them. Please load the model with `attn_implementation="eager"`.')
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(self, query_layer, key_layer, value_layer, attention_mask, dropout=0.0 if not self.training else self.dropout.p, scaling=self.scaling, head_mask=head_mask, use_cache=past_key_value is not None, **kwargs)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
return (attn_output, attn_weights)
|
class CamembertSelfAttention(nn.Module):
def __init__(self, config, position_embedding_type=None, is_causal=False, layer_idx=None):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, past_key_value: Optional[Cache]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> tuple[torch.Tensor]:
pass
| 3
| 0
| 43
| 7
| 31
| 6
| 6
| 0.19
| 1
| 5
| 0
| 1
| 3
| 11
| 3
| 13
| 132
| 22
| 93
| 44
| 80
| 18
| 72
| 35
| 68
| 13
| 1
| 2
| 17
|
1,096
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/camembert/modeling_camembert.py
|
transformers.models.camembert.modeling_camembert.CamembertSelfOutput
|
import torch
import torch.nn as nn
class CamembertSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
|
class CamembertSelfOutput(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 3
| 2
| 12
| 12
| 1
| 11
| 6
| 8
| 0
| 11
| 6
| 8
| 1
| 1
| 0
| 2
|
1,097
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/camembert/tokenization_camembert.py
|
transformers.models.camembert.tokenization_camembert.CamembertTokenizer
|
from ...utils.import_utils import requires
import os
from typing import Any, Optional
from shutil import copyfile
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
import sentencepiece as spm
@requires(backends=('sentencepiece',))
class CamembertTokenizer(PreTrainedTokenizer):
"""
Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Construct a CamemBERT tokenizer. Based on
[SentencePiece](https://github.com/google/sentencepiece).
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
contains the vocabulary necessary to instantiate a tokenizer.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (`str`, *optional*, defaults to `"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
additional_special_tokens (`list[str]`, *optional*, defaults to `['<s>NOTUSED', '</s>NOTUSED', '<unk>NOTUSED']`):
Additional special tokens used by the tokenizer.
sp_model_kwargs (`dict`, *optional*):
Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
to set:
- `enable_sampling`: Enable subword regularization.
- `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
- `nbest_size = {0,1}`: No sampling is performed.
- `nbest_size > 1`: samples from the nbest_size results.
- `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
using forward-filtering-and-backward-sampling algorithm.
- `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
BPE-dropout.
Attributes:
sp_model (`SentencePieceProcessor`):
The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ['input_ids', 'attention_mask']
def __init__(self, vocab_file, bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', additional_special_tokens=['<s>NOTUSED', '</s>NOTUSED', '<unk>NOTUSED'], sp_model_kwargs: Optional[dict[str, Any]]=None, **kwargs) -> None:
mask_token = AddedToken(mask_token, lstrip=True, rstrip=False, normalized=False, special=True) if isinstance(mask_token, str) else mask_token
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(vocab_file))
self.vocab_file = vocab_file
self._added_tokens_decoder = {0: AddedToken('<s>NOTUSED', special=True), 1: AddedToken(pad_token, special=True) if isinstance(pad_token, str) else pad_token, 2: AddedToken('</s>NOTUSED', special=True), 3: AddedToken(unk_token, special=True) if isinstance(unk_token, str) else unk_token, 4: AddedToken('<unk>NOTUSED', special=True)}
self.fairseq_offset = 4
if 'added_tokens_decoder' in kwargs:
kwargs['added_tokens_decoder'].update(self._added_tokens_decoder)
super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, additional_special_tokens=additional_special_tokens, sp_model_kwargs=self.sp_model_kwargs, **kwargs)
@property
def vocab_size(self):
return len(self.sp_model)
def get_vocab(self):
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size + self.fairseq_offset)}
vocab.update(self.added_tokens_encoder)
return vocab
def _tokenize(self, text: str) -> list[str]:
return self.sp_model.encode(text, out_type=str)
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
if self.sp_model.PieceToId(token) == 0:
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(token)
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
current_sub_tokens = []
out_string = ''
prev_is_special = False
for token in tokens:
if token in self.all_special_tokens:
if not prev_is_special:
out_string += ' '
out_string += self.sp_model.decode(current_sub_tokens) + token
prev_is_special = True
current_sub_tokens = []
else:
current_sub_tokens.append(token)
prev_is_special = False
out_string += self.sp_model.decode(current_sub_tokens)
return out_string.strip()
def __getstate__(self):
state = self.__dict__.copy()
state['sp_model'] = None
return state
def __setstate__(self, d):
self.__dict__ = d
if not hasattr(self, 'sp_model_kwargs'):
self.sp_model_kwargs = {}
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file, out_vocab_file)
elif not os.path.isfile(self.vocab_file):
with open(out_vocab_file, 'wb') as fi:
content_spiece_model = self.sp_model.serialized_model_proto()
fi.write(content_spiece_model)
return (out_vocab_file,)
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An CamemBERT sequence has the following format:
- single sequence: `<s> X </s>`
- pair of sequences: `<s> A </s></s> B </s>`
Args:
token_ids_0 (`list[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + sep + token_ids_1 + sep
def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
if token_ids_1 is None:
return [1] + [0] * len(token_ids_0) + [1]
return [1] + [0] * len(token_ids_0) + [1, 1] + [0] * len(token_ids_1) + [1]
def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. CamemBERT, like
RoBERTa, does not make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of zeros.
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
|
@requires(backends=('sentencepiece',))
class CamembertTokenizer(PreTrainedTokenizer):
'''
Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Construct a CamemBERT tokenizer. Based on
[SentencePiece](https://github.com/google/sentencepiece).
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
contains the vocabulary necessary to instantiate a tokenizer.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (`str`, *optional*, defaults to `"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
additional_special_tokens (`list[str]`, *optional*, defaults to `['<s>NOTUSED', '</s>NOTUSED', '<unk>NOTUSED']`):
Additional special tokens used by the tokenizer.
sp_model_kwargs (`dict`, *optional*):
Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
to set:
- `enable_sampling`: Enable subword regularization.
- `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
- `nbest_size = {0,1}`: No sampling is performed.
- `nbest_size > 1`: samples from the nbest_size results.
- `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
using forward-filtering-and-backward-sampling algorithm.
- `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
BPE-dropout.
Attributes:
sp_model (`SentencePieceProcessor`):
The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
'''
def __init__(self, vocab_file, bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', additional_special_tokens=['<s>NOTUSED', '</s>NOTUSED', '<unk>NOTUSED'], sp_model_kwargs: Optional[dict[str, Any]]=None, **kwargs) -> None:
pass
@property
def vocab_size(self):
pass
def get_vocab(self):
pass
def _tokenize(self, text: str) -> list[str]:
pass
def _convert_token_to_id(self, token):
'''Converts a token (str) in an id using the vocab.'''
pass
def _convert_id_to_token(self, index):
'''Converts an index (integer) in a token (str) using the vocab.'''
pass
def convert_tokens_to_string(self, tokens):
'''Converts a sequence of tokens (string) in a single string.'''
pass
def __getstate__(self):
pass
def __setstate__(self, d):
pass
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
pass
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
'''
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An CamemBERT sequence has the following format:
- single sequence: `<s> X </s>`
- pair of sequences: `<s> A </s></s> B </s>`
Args:
token_ids_0 (`list[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
'''
pass
def get_special_tokens_mask(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None, already_has_special_tokens: bool=False) -> list[int]:
'''
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`list[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
'''
pass
def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
'''
Create a mask from the two sequences passed to be used in a sequence-pair classification task. CamemBERT, like
RoBERTa, does not make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of zeros.
'''
pass
| 16
| 7
| 15
| 2
| 10
| 4
| 2
| 0.84
| 1
| 6
| 0
| 0
| 13
| 6
| 13
| 102
| 284
| 48
| 129
| 55
| 95
| 108
| 82
| 34
| 68
| 6
| 3
| 3
| 31
|
1,098
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/camembert/tokenization_camembert_fast.py
|
transformers.models.camembert.tokenization_camembert_fast.CamembertTokenizerFast
|
from shutil import copyfile
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from typing import Optional
import os
class CamembertTokenizerFast(PreTrainedTokenizerFast):
"""
Construct a "fast" CamemBERT tokenizer (backed by HuggingFace's *tokenizers* library). Adapted from
[`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on
[BPE](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=BPE#models).
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
contains the vocabulary necessary to instantiate a tokenizer.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (`str`, *optional*, defaults to `"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
additional_special_tokens (`list[str]`, *optional*, defaults to `["<s>NOTUSED", "</s>NOTUSED"]`):
Additional special tokens used by the tokenizer.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ['input_ids', 'attention_mask']
slow_tokenizer_class = CamembertTokenizer
def __init__(self, vocab_file=None, tokenizer_file=None, bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', additional_special_tokens=['<s>NOTUSED', '</s>NOTUSED', '<unk>NOTUSED'], **kwargs):
mask_token = AddedToken(mask_token, lstrip=True, special=True) if isinstance(mask_token, str) else mask_token
super().__init__(vocab_file, tokenizer_file=tokenizer_file, bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, cls_token=cls_token, unk_token=unk_token, pad_token=pad_token, mask_token=mask_token, additional_special_tokens=additional_special_tokens, **kwargs)
self.vocab_file = vocab_file
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An CamemBERT sequence has the following format:
- single sequence: `<s> X </s>`
- pair of sequences: `<s> A </s></s> B </s>`
Args:
token_ids_0 (`list[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + sep + token_ids_1 + sep
def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. CamemBERT, like
RoBERTa, does not make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of zeros.
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError('Your fast tokenizer does not have the necessary information to save the vocabulary for a slow tokenizer.')
if not os.path.isdir(save_directory):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
copyfile(self.vocab_file, out_vocab_file)
return (out_vocab_file,)
|
class CamembertTokenizerFast(PreTrainedTokenizerFast):
'''
Construct a "fast" CamemBERT tokenizer (backed by HuggingFace's *tokenizers* library). Adapted from
[`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on
[BPE](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=BPE#models).
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
contains the vocabulary necessary to instantiate a tokenizer.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (`str`, *optional*, defaults to `"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
additional_special_tokens (`list[str]`, *optional*, defaults to `["<s>NOTUSED", "</s>NOTUSED"]`):
Additional special tokens used by the tokenizer.
'''
def __init__(self, vocab_file=None, tokenizer_file=None, bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', additional_special_tokens=['<s>NOTUSED', '</s>NOTUSED', '<unk>NOTUSED'], **kwargs):
pass
def build_inputs_with_special_tokens(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
'''
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An CamemBERT sequence has the following format:
- single sequence: `<s> X </s>`
- pair of sequences: `<s> A </s></s> B </s>`
Args:
token_ids_0 (`list[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
'''
pass
def create_token_type_ids_from_sequences(self, token_ids_0: list[int], token_ids_1: Optional[list[int]]=None) -> list[int]:
'''
Create a mask from the two sequences passed to be used in a sequence-pair classification task. CamemBERT, like
RoBERTa, does not make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`list[int]`):
List of IDs.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of zeros.
'''
pass
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
pass
| 5
| 3
| 20
| 2
| 12
| 5
| 3
| 0.97
| 1
| 5
| 0
| 0
| 5
| 1
| 5
| 93
| 159
| 27
| 67
| 33
| 43
| 65
| 32
| 15
| 26
| 5
| 3
| 1
| 13
|
1,099
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/canine/configuration_canine.py
|
transformers.models.canine.configuration_canine.CanineConfig
|
from ...configuration_utils import PretrainedConfig
class CanineConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`CanineModel`]. It is used to instantiate an
CANINE model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the CANINE
[google/canine-s](https://huggingface.co/google/canine-s) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 768):
Dimension of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the deep Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoders.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoders.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoders, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 16384):
The maximum sequence length that this model might ever be used with.
type_vocab_size (`int`, *optional*, defaults to 16):
The vocabulary size of the `token_type_ids` passed when calling [`CanineModel`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
pad_token_id (`int`, *optional*, defaults to 0):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 57344):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 57345):
End of stream token id.
downsampling_rate (`int`, *optional*, defaults to 4):
The rate at which to downsample the original character sequence length before applying the deep Transformer
encoder.
upsampling_kernel_size (`int`, *optional*, defaults to 4):
The kernel size (i.e. the number of characters in each window) of the convolutional projection layer when
projecting back from `hidden_size`*2 to `hidden_size`.
num_hash_functions (`int`, *optional*, defaults to 8):
The number of hash functions to use. Each hash function has its own embedding matrix.
num_hash_buckets (`int`, *optional*, defaults to 16384):
The number of hash buckets to use.
local_transformer_stride (`int`, *optional*, defaults to 128):
The stride of the local attention of the first shallow Transformer encoder. Defaults to 128 for good
TPU/XLA memory alignment.
Example:
```python
>>> from transformers import CanineConfig, CanineModel
>>> # Initializing a CANINE google/canine-s style configuration
>>> configuration = CanineConfig()
>>> # Initializing a model (with random weights) from the google/canine-s style configuration
>>> model = CanineModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'canine'
def __init__(self, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=16384, type_vocab_size=16, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, bos_token_id=57344, eos_token_id=57345, downsampling_rate=4, upsampling_kernel_size=4, num_hash_functions=8, num_hash_buckets=16384, local_transformer_stride=128, **kwargs):
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.type_vocab_size = type_vocab_size
self.layer_norm_eps = layer_norm_eps
self.downsampling_rate = downsampling_rate
self.upsampling_kernel_size = upsampling_kernel_size
self.num_hash_functions = num_hash_functions
self.num_hash_buckets = num_hash_buckets
self.local_transformer_stride = local_transformer_stride
|
class CanineConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`CanineModel`]. It is used to instantiate an
CANINE model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the CANINE
[google/canine-s](https://huggingface.co/google/canine-s) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 768):
Dimension of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the deep Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoders.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoders.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoders, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 16384):
The maximum sequence length that this model might ever be used with.
type_vocab_size (`int`, *optional*, defaults to 16):
The vocabulary size of the `token_type_ids` passed when calling [`CanineModel`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
pad_token_id (`int`, *optional*, defaults to 0):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 57344):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 57345):
End of stream token id.
downsampling_rate (`int`, *optional*, defaults to 4):
The rate at which to downsample the original character sequence length before applying the deep Transformer
encoder.
upsampling_kernel_size (`int`, *optional*, defaults to 4):
The kernel size (i.e. the number of characters in each window) of the convolutional projection layer when
projecting back from `hidden_size`*2 to `hidden_size`.
num_hash_functions (`int`, *optional*, defaults to 8):
The number of hash functions to use. Each hash function has its own embedding matrix.
num_hash_buckets (`int`, *optional*, defaults to 16384):
The number of hash buckets to use.
local_transformer_stride (`int`, *optional*, defaults to 128):
The stride of the local attention of the first shallow Transformer encoder. Defaults to 128 for good
TPU/XLA memory alignment.
Example:
```python
>>> from transformers import CanineConfig, CanineModel
>>> # Initializing a CANINE google/canine-s style configuration
>>> configuration = CanineConfig()
>>> # Initializing a model (with random weights) from the google/canine-s style configuration
>>> model = CanineModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=16384, type_vocab_size=16, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, bos_token_id=57344, eos_token_id=57345, downsampling_rate=4, upsampling_kernel_size=4, num_hash_functions=8, num_hash_buckets=16384, local_transformer_stride=128, **kwargs):
pass
| 2
| 1
| 43
| 2
| 40
| 2
| 1
| 1.48
| 1
| 1
| 0
| 0
| 1
| 16
| 1
| 1
| 115
| 12
| 42
| 41
| 18
| 62
| 20
| 19
| 18
| 1
| 1
| 0
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.