id
int64 0
328k
| repository_name
stringlengths 7
58
| file_path
stringlengths 9
302
| class_name
stringlengths 5
256
| human_written_code
stringlengths 16
2.16M
| class_skeleton
stringlengths 18
1.49M
⌀ | total_program_units
int64 1
1.76k
| total_doc_str
int64 0
771
| AvgCountLine
float64 0
7.89k
| AvgCountLineBlank
float64 0
297
| AvgCountLineCode
float64 0
7.89k
| AvgCountLineComment
float64 0
7.89k
| AvgCyclomatic
float64 0
130
| CommentToCodeRatio
float64 0
168
| CountClassBase
float64 0
40
| CountClassCoupled
float64 0
583
| CountClassCoupledModified
float64 0
575
| CountClassDerived
float64 0
5.35k
| CountDeclInstanceMethod
float64 0
529
| CountDeclInstanceVariable
float64 0
296
| CountDeclMethod
float64 0
599
| CountDeclMethodAll
float64 0
1.12k
| CountLine
float64 1
40.4k
| CountLineBlank
float64 0
8.16k
| CountLineCode
float64 1
25.7k
| CountLineCodeDecl
float64 1
8.15k
| CountLineCodeExe
float64 0
24.2k
| CountLineComment
float64 0
16.5k
| CountStmt
float64 1
9.71k
| CountStmtDecl
float64 1
8.15k
| CountStmtExe
float64 0
9.69k
| MaxCyclomatic
float64 0
759
| MaxInheritanceTree
float64 0
16
| MaxNesting
float64 0
34
| SumCyclomatic
float64 0
2.9k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3,100
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics3/modeling_idefics3.py
|
transformers.models.idefics3.modeling_idefics3.Idefics3VisionEmbeddings
|
import torch
from torch import nn
from .configuration_idefics3 import Idefics3Config, Idefics3VisionConfig
class Idefics3VisionEmbeddings(nn.Module):
"""
This is a modified version of `siglip.modelign_siglip.SiglipVisionEmbeddings` to enable images of variable
resolution.
The modifications are adapted from [Patch n' Pack: NaViT, a Vision Transformer for any Aspect Ratio and Resolution](https://huggingface.co/papers/2307.06304)
which allows treating images in their native aspect ratio and without the need to resize them to the same
fixed size. In particular, we start from the original pre-trained SigLIP model
(which uses images of fixed-size square images) and adapt it by training on images of variable resolutions.
"""
def __init__(self, config: Idefics3VisionConfig):
super().__init__()
self.embed_dim = config.hidden_size
self.image_size = config.image_size
self.patch_size = config.patch_size
self.patch_embedding = nn.Conv2d(in_channels=config.num_channels, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size, padding='valid')
self.num_patches_per_side = self.image_size // self.patch_size
self.num_patches = self.num_patches_per_side ** 2
self.num_positions = self.num_patches
self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
def forward(self, pixel_values: torch.FloatTensor, patch_attention_mask: torch.BoolTensor) -> torch.Tensor:
batch_size, _, max_im_h, max_im_w = pixel_values.shape
patch_embeds = self.patch_embedding(pixel_values)
embeddings = patch_embeds.flatten(2).transpose(1, 2)
max_nb_patches_h, max_nb_patches_w = (max_im_h // self.patch_size, max_im_w // self.patch_size)
boundaries = torch.arange(1 / self.num_patches_per_side, 1.0, 1 / self.num_patches_per_side, device=pixel_values.device)
position_ids = torch.full(size=(batch_size, max_nb_patches_h * max_nb_patches_w), fill_value=0, device=pixel_values.device)
for batch_idx, p_attn_mask in enumerate(patch_attention_mask):
nb_patches_h = p_attn_mask[:, 0].sum()
nb_patches_w = p_attn_mask[0].sum()
h_indices = torch.arange(nb_patches_h, device=position_ids.device, dtype=pixel_values.dtype)
w_indices = torch.arange(nb_patches_w, device=position_ids.device, dtype=pixel_values.dtype)
fractional_coords_h = h_indices / nb_patches_h * (1 - 1e-06)
fractional_coords_w = w_indices / nb_patches_w * (1 - 1e-06)
bucket_coords_h = torch.bucketize(fractional_coords_h, boundaries, right=True)
bucket_coords_w = torch.bucketize(fractional_coords_w, boundaries, right=True)
pos_ids = (bucket_coords_h[:, None] * self.num_patches_per_side + bucket_coords_w).flatten()
position_ids[batch_idx][p_attn_mask.view(-1)] = pos_ids
embeddings = embeddings + self.position_embedding(position_ids)
return embeddings
|
class Idefics3VisionEmbeddings(nn.Module):
'''
This is a modified version of `siglip.modelign_siglip.SiglipVisionEmbeddings` to enable images of variable
resolution.
The modifications are adapted from [Patch n' Pack: NaViT, a Vision Transformer for any Aspect Ratio and Resolution](https://huggingface.co/papers/2307.06304)
which allows treating images in their native aspect ratio and without the need to resize them to the same
fixed size. In particular, we start from the original pre-trained SigLIP model
(which uses images of fixed-size square images) and adapt it by training on images of variable resolutions.
'''
def __init__(self, config: Idefics3VisionConfig):
pass
def forward(self, pixel_values: torch.FloatTensor, patch_attention_mask: torch.BoolTensor) -> torch.Tensor:
pass
| 3
| 1
| 22
| 5
| 18
| 0
| 2
| 0.22
| 1
| 4
| 1
| 0
| 2
| 8
| 2
| 12
| 56
| 12
| 36
| 25
| 33
| 8
| 30
| 25
| 27
| 2
| 1
| 1
| 3
|
3,101
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics3/modeling_idefics3.py
|
transformers.models.idefics3.modeling_idefics3.Idefics3VisionMLP
|
from torch import nn
import torch
from ...activations import ACT2FN
class Idefics3VisionMLP(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.activation_fn = ACT2FN[config.hidden_act]
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.fc1(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = self.fc2(hidden_states)
return hidden_states
|
class Idefics3VisionMLP(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 4
| 2
| 12
| 13
| 1
| 12
| 7
| 9
| 0
| 12
| 7
| 9
| 1
| 1
| 0
| 2
|
3,102
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics3/modeling_idefics3.py
|
transformers.models.idefics3.modeling_idefics3.Idefics3VisionTransformer
|
from .configuration_idefics3 import Idefics3Config, Idefics3VisionConfig
from torch import nn
from ...modeling_outputs import BaseModelOutput, ModelOutput
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
from ...utils.generic import check_model_inputs
from ...modeling_attn_mask_utils import _prepare_4d_attention_mask
import torch
from ...processing_utils import Unpack
from typing import Callable, Optional, Union
@auto_docstring(custom_intro='\n The Idefics3 Vision Transformer Model outputting raw image embedding.\n ')
class Idefics3VisionTransformer(Idefics3PreTrainedModel):
config: Idefics3VisionConfig
_supports_sdpa = True
_supports_flash_attn = True
_supports_flex_attn = True
_can_record_outputs = {'hidden_states': Idefics3EncoderLayer, 'attentions': Idefics3VisionAttention}
def __init__(self, config: Idefics3VisionConfig):
super().__init__(config)
embed_dim = config.hidden_size
self.embeddings = Idefics3VisionEmbeddings(config)
self.encoder = Idefics3Encoder(config)
self.patch_size = config.patch_size
self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
def get_input_embeddings(self):
return self.embeddings
def set_input_embeddings(self, value):
self.embeddings = value
@check_model_inputs
def forward(self, pixel_values, patch_attention_mask: Optional[torch.BoolTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, BaseModelOutput]:
batch_size = pixel_values.size(0)
if patch_attention_mask is None:
patch_size = self.patch_size
patch_attention_mask = torch.ones((batch_size, pixel_values.size(2) // patch_size, pixel_values.size(3) // patch_size))
patch_attention_mask = patch_attention_mask.to(dtype=torch.bool, device=pixel_values.device)
hidden_states = self.embeddings(pixel_values=pixel_values, patch_attention_mask=patch_attention_mask)
patch_attention_mask = patch_attention_mask.view(batch_size, -1)
if self.config._attn_implementation != 'flash_attention_2':
patch_attention_mask = _prepare_4d_attention_mask(patch_attention_mask, hidden_states.dtype)
elif not torch.any(~patch_attention_mask):
patch_attention_mask = None
encoder_outputs: BaseModelOutput = self.encoder(inputs_embeds=hidden_states, attention_mask=patch_attention_mask)
last_hidden_state = encoder_outputs.last_hidden_state
last_hidden_state = self.post_layernorm(last_hidden_state)
return BaseModelOutput(last_hidden_state=last_hidden_state)
|
@auto_docstring(custom_intro='\n The Idefics3 Vision Transformer Model outputting raw image embedding.\n ')
class Idefics3VisionTransformer(Idefics3PreTrainedModel):
def __init__(self, config: Idefics3VisionConfig):
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, value):
pass
@check_model_inputs
def forward(self, pixel_values, patch_attention_mask: Optional[torch.BoolTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, BaseModelOutput]:
pass
| 7
| 0
| 17
| 2
| 15
| 1
| 3
| 0.08
| 1
| 6
| 4
| 0
| 4
| 5
| 4
| 5
| 78
| 12
| 61
| 25
| 49
| 5
| 35
| 18
| 30
| 8
| 2
| 1
| 11
|
3,103
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics3/processing_idefics3.py
|
transformers.models.idefics3.processing_idefics3.Idefics3ImagesKwargs
|
from ...processing_utils import ImagesKwargs, MultiModalData, ProcessingKwargs, ProcessorMixin, Unpack
from typing import TYPE_CHECKING, Optional, Union
class Idefics3ImagesKwargs(ImagesKwargs, total=False):
return_row_col_info: Optional[bool]
max_image_size: Optional[dict[str, int]]
|
class Idefics3ImagesKwargs(ImagesKwargs, total=False):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3
| 0
| 3
| 1
| 2
| 0
| 3
| 1
| 2
| 0
| 2
| 0
| 0
|
3,104
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics3/processing_idefics3.py
|
transformers.models.idefics3.processing_idefics3.Idefics3Processor
|
from ...processing_utils import ImagesKwargs, MultiModalData, ProcessingKwargs, ProcessorMixin, Unpack
from ...image_utils import ImageInput, is_valid_image, load_image
from itertools import accumulate
import re
from typing import TYPE_CHECKING, Optional, Union
from ...tokenization_utils_base import AddedToken, BatchEncoding, TextInput
from ...feature_extraction_utils import BatchFeature
import numpy as np
class Idefics3Processor(ProcessorMixin):
"""
Constructs a Idefics3 processor which wraps a LLama tokenizer and Idefics3 image processor into a single processor.
[`Idefics3Processor`] offers all the functionalities of [`Idefics3ImageProcessor`] and [`Idefics3TokenizerFast`]. See
the docstring of [`~IdeficsProcessor.__call__`] and [`~IdeficsProcessor.decode`] for more information.
Args:
image_processor (`Idefics3ImageProcessor`):
An instance of [`Idefics3ImageProcessor`]. The image processor is a required input.
tokenizer (`PreTrainedTokenizerBase`, *optional*):
An instance of [`PreTrainedTokenizerBase`]. This should correspond with the model's text model. The tokenizer is a required input.
image_seq_len (`int`, *optional*, defaults to 169):
The length of the image sequence i.e. the number of <image> tokens per image in the input.
This parameter is used to build the string from the input prompt and image tokens and should match the
value the model used. It is computed as: image_seq_len = int(((image_size // patch_size) ** 2) / (scale_factor**2))
chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
in a chat into a tokenizable string.
"""
attributes = ['image_processor', 'tokenizer']
image_processor_class = 'Idefics3ImageProcessor'
tokenizer_class = 'AutoTokenizer'
def __init__(self, image_processor, tokenizer=None, image_seq_len: int=169, chat_template: Optional[str]=None, **kwargs):
self.fake_image_token = AddedToken('<fake_token_around_image>', normalized=False, special=True).content
self.image_token = AddedToken('<image>', normalized=False, special=True).content
self.end_of_utterance_token = AddedToken('<end_of_utterance>', normalized=False, special=True).content
self.global_image_tag = '<global-img>'
self.image_seq_len = image_seq_len
self.image_token_id = tokenizer.convert_tokens_to_ids(self.image_token)
self.fake_image_token_id = tokenizer.convert_tokens_to_ids(self.fake_image_token)
self.global_image_token_id = tokenizer.convert_tokens_to_ids(self.global_image_tag)
self.row_col_ids = [tokenizer.convert_tokens_to_ids(f'<row_{i + 1}_col_{j + 1}>') for i in range(6) for j in range(6)]
self._regex_to_remove_extra_special_tokens = re.compile('(\\n?<global-img>\\n?|<row_\\d+_col_\\d+>\\n?)+')
tokens_to_add = {'additional_special_tokens': [self.fake_image_token, self.image_token, self.end_of_utterance_token]}
tokenizer.add_special_tokens(tokens_to_add)
self.image_token_id = tokenizer.convert_tokens_to_ids(self.image_token)
super().__init__(image_processor, tokenizer, chat_template=chat_template, **kwargs)
def _extract_images_from_prompts(self, prompts):
prompt_images = []
for prompt in prompts:
images = []
for elem in prompt:
if is_valid_image(elem):
images.append(elem)
elif is_url(elem):
images.append(load_image(elem))
prompt_images.append(images)
return prompt_images
def __call__(self, images: Union[ImageInput, list[ImageInput], list[list[ImageInput]]]=None, text: Union[TextInput, 'PreTokenizedInput', list[TextInput], list['PreTokenizedInput']]=None, audio=None, videos=None, image_seq_len: Optional[int]=None, **kwargs: Unpack[Idefics3ProcessorKwargs]) -> BatchEncoding:
"""
Processes the input prompts and returns a BatchEncoding.
Example:
```python
>>> import requests
>>> from transformers import Idefics3Processor
>>> from transformers.image_utils import load_image
>>> processor = Idefics3Processor.from_pretrained("HuggingFaceM4/Idefics3-8B-Llama3")
>>> processor.image_processor.do_image_splitting = False # Force as False to simplify the example
>>> url1 = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
>>> url2 = "https://cdn.britannica.com/59/94459-050-DBA42467/Skyline-Chicago.jpg"
>>> image1, image2 = load_image(url1), load_image(url2)
>>> images = [[image1], [image2]]
>>> text = [
... "<image>In this image, we see",
... "bla bla bla<image>",
... ]
>>> outputs = processor(images=images, text=text, return_tensors="pt", padding=True)
>>> input_ids = outputs.input_ids
>>> input_tokens = processor.tokenizer.batch_decode(input_ids)
>>> print(input_tokens)
['<|begin_of_text|><fake_token_around_image><global-img>((<image>)*169)<fake_token_around_image> In this image, we see', '<|reserved_special_token_0|><|reserved_special_token_0|><|reserved_special_token_0|><|begin_of_text|>bla bla bla<fake_token_around_image><global-img>((<image>)*169)<fake_token_around_image>']
```
Args:
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`, *optional*):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. If is of type `list[ImageInput]`, it's assumed that this is for a single prompt i.e. of batch size 1.
text (`Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]`, *optional*):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
Wherever an image token, `<image>` is encountered it is expanded to
`<fake_token_around_image>` + `<row_x_col_y>` + `<image>` * `image_seq_len` * <fake_token_around_image>`.
image_seq_len (`int`, *optional*):
The length of the image sequence. If not provided, the default value of self.image_seq_len is used.
image_seq_len should be equal to int(((image_size // patch_size) ** 2) / (scale_factor**2))
return_tensors (`Union[str, TensorType]`, *optional*):
If set, will return tensors of a particular framework. See [`PreTrainedTokenizerFast.__call__`] for more
information.
"""
if text is None and images is None:
raise ValueError('You must provide either `text` or `images`.')
output_kwargs = self._merge_kwargs(Idefics3ProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs)
image_seq_len = image_seq_len if image_seq_len is not None else self.image_seq_len
return_mm_token_type_ids = output_kwargs['text_kwargs'].pop('return_mm_token_type_ids', False)
return_tensors = output_kwargs['text_kwargs'].pop('return_tensors', None)
n_images_in_text = []
n_images_in_images = []
inputs = {}
if text is not None:
if isinstance(text, str):
text = [text]
elif not isinstance(text, list) and (not isinstance(text[0], str)):
raise ValueError('Invalid input text. Please provide a string, or a list of strings')
n_images_in_text = [sample.count(self.image_token) for sample in text]
if images is not None:
if is_image_or_image_url(images):
images = [[images]]
elif isinstance(images, (list, tuple)) and is_image_or_image_url(images[0]):
if text is not None:
if sum(n_images_in_text) != len(images):
raise ValueError(f'The total number of {self.image_token} tokens in the prompts should be the same as the number of images passed. Found {sum(n_images_in_text)} {self.image_token} tokens and {len(images)} images.')
cumsum_images_in_text = [0] + list(accumulate(n_images_in_text))
images = [images[cumsum_images_in_text[i]:cumsum_images_in_text[i + 1]] for i in range(len(n_images_in_text))]
else:
images = [images]
elif not isinstance(images, (list, tuple)) and (not isinstance(images[0], (list, tuple))) and (not is_image_or_image_url(images[0][0])):
raise ValueError('Invalid input images. Please provide a single image or a list of images or a list of list of images.')
n_images_in_images = [len(sample) for sample in images]
images = [[load_image(im) if is_url(im) else im for im in sample] for sample in images]
image_inputs = self.image_processor(images, **output_kwargs['images_kwargs'])
inputs.update(image_inputs)
if text is not None:
if n_images_in_images != n_images_in_text:
raise ValueError(f'The number of images in the text {n_images_in_text} and images {n_images_in_images} should be the same.')
image_rows = inputs.pop('rows', [[0] * len(text)])
image_cols = inputs.pop('cols', [[0] * len(text)])
fake_image_token = self.fake_image_token
image_token = self.image_token
global_img_token = self.global_image_tag
prompt_strings = []
batch_image_seq_lengths = []
for sample, sample_rows, sample_cols in zip(text, image_rows, image_cols):
image_prompt_strings = []
image_seq_lengths = []
for n_rows, n_cols in zip(sample_rows, sample_cols):
image_prompt_string = get_image_prompt_string(n_rows, n_cols, image_seq_len, image_token=image_token, fake_token_around_image=fake_image_token, global_img_token=global_img_token)
row_length = (self.image_seq_len + 2) * n_cols + 1
image_seq_lengths.append(self.image_seq_len + 3 + row_length * n_rows)
image_prompt_strings.append(image_prompt_string)
batch_image_seq_lengths.append(image_seq_lengths)
split_sample = sample.split(image_token)
if len(split_sample) == 0:
raise ValueError('The image token should be present in the text.')
sample = split_sample[0]
for i, image_prompt_string in enumerate(image_prompt_strings):
sample += image_prompt_string + split_sample[i + 1]
prompt_strings.append(sample)
text_inputs = self.tokenizer(prompt_strings, **output_kwargs['text_kwargs'])
self._check_special_mm_tokens(prompt_strings, text_inputs, modalities=['image'])
inputs.update(text_inputs)
elif text is not None:
if any(n_images_in_text):
raise ValueError(f'Found {sum(n_images_in_text)} {self.image_token} tokens in the text but no images were passed.')
text_inputs = self.tokenizer(text=text, **output_kwargs['text_kwargs'])
inputs.update(text_inputs)
if return_mm_token_type_ids:
array_ids = np.array(inputs['input_ids'])
mm_token_type_ids = np.zeros_like(array_ids)
for i, seq_lengths in enumerate(batch_image_seq_lengths):
image_start_positions = np.where(array_ids[i] == self.fake_image_token_id)[0]
j = 0
for seq_len in seq_lengths:
if j >= len(image_start_positions):
break
start = image_start_positions[j]
end = start + seq_len
mm_token_type_ids[i, start:end] = 1
j = np.searchsorted(image_start_positions, end)
inputs['mm_token_type_ids'] = mm_token_type_ids.tolist()
return BatchFeature(data=inputs, tensor_type=return_tensors)
def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs):
"""
Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.
Args:
image_sizes (`list[list[int]]`, *optional*):
The input sizes formatted as (height, width) per each image.
Returns:
`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided
input modalities, along with other useful data.
"""
vision_data = {}
if image_sizes is not None:
images_kwargs = Idefics3ProcessorKwargs._defaults.get('images_kwargs', {})
images_kwargs.update(kwargs)
num_image_row_cols = [self.image_processor.get_number_of_image_patches(*image_size, images_kwargs) for image_size in image_sizes]
base_image_length = self.image_seq_len + 3
col_length = self.image_seq_len + 2
num_image_tokens = []
num_image_patches = []
for num_patches, num_rows, num_cols in num_image_row_cols:
row_length = col_length * num_cols + 1
num_image_tokens.append(base_image_length + row_length * num_rows)
num_image_patches.append(num_patches)
vision_data.update({'num_image_tokens': num_image_tokens, 'num_image_patches': num_image_patches})
return MultiModalData(**vision_data)
|
class Idefics3Processor(ProcessorMixin):
'''
Constructs a Idefics3 processor which wraps a LLama tokenizer and Idefics3 image processor into a single processor.
[`Idefics3Processor`] offers all the functionalities of [`Idefics3ImageProcessor`] and [`Idefics3TokenizerFast`]. See
the docstring of [`~IdeficsProcessor.__call__`] and [`~IdeficsProcessor.decode`] for more information.
Args:
image_processor (`Idefics3ImageProcessor`):
An instance of [`Idefics3ImageProcessor`]. The image processor is a required input.
tokenizer (`PreTrainedTokenizerBase`, *optional*):
An instance of [`PreTrainedTokenizerBase`]. This should correspond with the model's text model. The tokenizer is a required input.
image_seq_len (`int`, *optional*, defaults to 169):
The length of the image sequence i.e. the number of <image> tokens per image in the input.
This parameter is used to build the string from the input prompt and image tokens and should match the
value the model used. It is computed as: image_seq_len = int(((image_size // patch_size) ** 2) / (scale_factor**2))
chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
in a chat into a tokenizable string.
'''
def __init__(self, image_processor, tokenizer=None, image_seq_len: int=169, chat_template: Optional[str]=None, **kwargs):
pass
def _extract_images_from_prompts(self, prompts):
pass
def __call__(self, images: Union[ImageInput, list[ImageInput], list[list[ImageInput]]]=None, text: Union[TextInput, 'PreTokenizedInput', list[TextInput], list['PreTokenizedInput']]=None, audio=None, videos=None, image_seq_len: Optional[int]=None, **kwargs: Unpack[Idefics3ProcessorKwargs]) -> BatchEncoding:
'''
Processes the input prompts and returns a BatchEncoding.
Example:
```python
>>> import requests
>>> from transformers import Idefics3Processor
>>> from transformers.image_utils import load_image
>>> processor = Idefics3Processor.from_pretrained("HuggingFaceM4/Idefics3-8B-Llama3")
>>> processor.image_processor.do_image_splitting = False # Force as False to simplify the example
>>> url1 = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
>>> url2 = "https://cdn.britannica.com/59/94459-050-DBA42467/Skyline-Chicago.jpg"
>>> image1, image2 = load_image(url1), load_image(url2)
>>> images = [[image1], [image2]]
>>> text = [
... "<image>In this image, we see",
... "bla bla bla<image>",
... ]
>>> outputs = processor(images=images, text=text, return_tensors="pt", padding=True)
>>> input_ids = outputs.input_ids
>>> input_tokens = processor.tokenizer.batch_decode(input_ids)
>>> print(input_tokens)
['<|begin_of_text|><fake_token_around_image><global-img>((<image>)*169)<fake_token_around_image> In this image, we see', '<|reserved_special_token_0|><|reserved_special_token_0|><|reserved_special_token_0|><|begin_of_text|>bla bla bla<fake_token_around_image><global-img>((<image>)*169)<fake_token_around_image>']
```
Args:
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`, *optional*):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. If is of type `list[ImageInput]`, it's assumed that this is for a single prompt i.e. of batch size 1.
text (`Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]`, *optional*):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
Wherever an image token, `<image>` is encountered it is expanded to
`<fake_token_around_image>` + `<row_x_col_y>` + `<image>` * `image_seq_len` * <fake_token_around_image>`.
image_seq_len (`int`, *optional*):
The length of the image sequence. If not provided, the default value of self.image_seq_len is used.
image_seq_len should be equal to int(((image_size // patch_size) ** 2) / (scale_factor**2))
return_tensors (`Union[str, TensorType]`, *optional*):
If set, will return tensors of a particular framework. See [`PreTrainedTokenizerFast.__call__`] for more
information.
'''
pass
def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs):
'''
Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.
Args:
image_sizes (`list[list[int]]`, *optional*):
The input sizes formatted as (height, width) per each image.
Returns:
`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided
input modalities, along with other useful data.
'''
pass
| 5
| 3
| 36
| 5
| 23
| 9
| 5
| 0.5
| 1
| 12
| 3
| 0
| 6
| 6
| 6
| 23
| 247
| 36
| 141
| 52
| 125
| 71
| 93
| 43
| 86
| 21
| 2
| 4
| 32
|
3,105
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/idefics3/processing_idefics3.py
|
transformers.models.idefics3.processing_idefics3.Idefics3ProcessorKwargs
|
from ...processing_utils import ImagesKwargs, MultiModalData, ProcessingKwargs, ProcessorMixin, Unpack
class Idefics3ProcessorKwargs(ProcessingKwargs, total=False):
images_kwargs: Idefics3ImagesKwargs
_defaults = {'text_kwargs': {'add_special_tokens': True, 'padding': False, 'is_split_into_words': False, 'return_mm_token_type_ids': False}, 'images_kwargs': {'return_row_col_info': True}}
|
class Idefics3ProcessorKwargs(ProcessingKwargs, total=False):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 13
| 1
| 12
| 2
| 11
| 0
| 3
| 2
| 2
| 0
| 3
| 0
| 0
|
3,106
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ijepa/configuration_ijepa.py
|
transformers.models.ijepa.configuration_ijepa.IJepaConfig
|
from ...configuration_utils import PretrainedConfig
class IJepaConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`IJepaModel`]. It is used to instantiate an IJEPA
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the I-JEPA
[facebook/ijepa_vith14_1k](https://huggingface.co/facebook/ijepa_vith14_1k) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 16):
The size (resolution) of each patch.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries, keys and values.
pooler_output_size (`int`, *optional*):
Dimensionality of the pooler layer. If None, defaults to `hidden_size`.
pooler_act (`str`, *optional*, defaults to `"tanh"`):
The activation function to be used by the pooler.
Example:
```python
>>> from transformers import IJepaConfig, IJepaModel
>>> # Initializing a IJEPA ijepa-base-patch16-224 style configuration
>>> configuration = IJepaConfig()
>>> # Initializing a model (with random weights) from the ijepa-base-patch16-224 style configuration
>>> model = IJepaModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'ijepa'
def __init__(self, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-12, image_size=224, patch_size=16, num_channels=3, qkv_bias=True, pooler_output_size=None, pooler_act='tanh', **kwargs):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.qkv_bias = qkv_bias
self.pooler_output_size = pooler_output_size if pooler_output_size else hidden_size
self.pooler_act = pooler_act
|
class IJepaConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`IJepaModel`]. It is used to instantiate an IJEPA
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the I-JEPA
[facebook/ijepa_vith14_1k](https://huggingface.co/facebook/ijepa_vith14_1k) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 16):
The size (resolution) of each patch.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries, keys and values.
pooler_output_size (`int`, *optional*):
Dimensionality of the pooler layer. If None, defaults to `hidden_size`.
pooler_act (`str`, *optional*, defaults to `"tanh"`):
The activation function to be used by the pooler.
Example:
```python
>>> from transformers import IJepaConfig, IJepaModel
>>> # Initializing a IJEPA ijepa-base-patch16-224 style configuration
>>> configuration = IJepaConfig()
>>> # Initializing a model (with random weights) from the ijepa-base-patch16-224 style configuration
>>> model = IJepaModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-12, image_size=224, patch_size=16, num_channels=3, qkv_bias=True, pooler_output_size=None, pooler_act='tanh', **kwargs):
pass
| 2
| 1
| 32
| 1
| 31
| 0
| 1
| 1.36
| 1
| 1
| 0
| 0
| 1
| 13
| 1
| 1
| 89
| 11
| 33
| 32
| 15
| 45
| 17
| 16
| 15
| 1
| 1
| 0
| 1
|
3,107
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ijepa/modeling_ijepa.py
|
transformers.models.ijepa.modeling_ijepa.IJepaAttention
|
from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
import torch.nn as nn
import torch
from .configuration_ijepa import IJepaConfig
from typing import Callable, Optional, Union
class IJepaAttention(nn.Module):
def __init__(self, config: IJepaConfig):
super().__init__()
self.attention = IJepaSelfAttention(config)
self.output = IJepaSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads: set[int]):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads)
self.attention.query = prune_linear_layer(self.attention.query, index)
self.attention.key = prune_linear_layer(self.attention.key, index)
self.attention.value = prune_linear_layer(self.attention.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None) -> torch.Tensor:
self_attn_output, _ = self.attention(hidden_states, head_mask)
output = self.output(self_attn_output, hidden_states)
return output
|
class IJepaAttention(nn.Module):
def __init__(self, config: IJepaConfig):
pass
def prune_heads(self, heads: set[int]):
pass
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None) -> torch.Tensor:
pass
| 4
| 0
| 11
| 1
| 9
| 1
| 1
| 0.1
| 1
| 8
| 3
| 1
| 3
| 3
| 3
| 13
| 37
| 6
| 29
| 16
| 20
| 3
| 22
| 11
| 18
| 2
| 1
| 1
| 4
|
3,108
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ijepa/modeling_ijepa.py
|
transformers.models.ijepa.modeling_ijepa.IJepaEmbeddings
|
import torch
from ...utils import TransformersKwargs, auto_docstring, torch_int
from .configuration_ijepa import IJepaConfig
import torch.nn as nn
from typing import Callable, Optional, Union
class IJepaEmbeddings(nn.Module):
"""
Construct the CLS token, position and patch embeddings. Optionally, also the mask token.
"""
def __init__(self, config: IJepaConfig, use_mask_token: bool=False) -> None:
super().__init__()
self.mask_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) if use_mask_token else None
self.patch_embeddings = IJepaPatchEmbeddings(config)
num_patches = self.patch_embeddings.num_patches
self.position_embeddings = nn.Parameter(torch.randn(1, num_patches, config.hidden_size))
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.patch_size = config.patch_size
self.config = config
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
"""
num_patches = embeddings.shape[1]
num_positions = self.position_embeddings.shape[1]
if not torch.jit.is_tracing() and num_patches == num_positions and (height == width):
return self.position_embeddings
patch_pos_embed = self.position_embeddings
dim = embeddings.shape[-1]
new_height = height // self.patch_size
new_width = width // self.patch_size
sqrt_num_positions = torch_int(num_positions ** 0.5)
patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
patch_pos_embed = nn.functional.interpolate(patch_pos_embed, size=(new_height, new_width), mode='bicubic', align_corners=False)
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return patch_pos_embed
def forward(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.BoolTensor]=None, interpolate_pos_encoding: bool=False) -> torch.Tensor:
batch_size, _, height, width = pixel_values.shape
embeddings = self.patch_embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
if bool_masked_pos is not None:
seq_length = embeddings.shape[1]
mask_tokens = self.mask_token.expand(batch_size, seq_length, -1)
mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens)
embeddings = embeddings * (1.0 - mask) + mask_tokens * mask
if interpolate_pos_encoding:
embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
else:
embeddings = embeddings + self.position_embeddings
embeddings = self.dropout(embeddings)
return embeddings
|
class IJepaEmbeddings(nn.Module):
'''
Construct the CLS token, position and patch embeddings. Optionally, also the mask token.
'''
def __init__(self, config: IJepaConfig, use_mask_token: bool=False) -> None:
pass
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
'''
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
'''
pass
def forward(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.BoolTensor]=None, interpolate_pos_encoding: bool=False) -> torch.Tensor:
pass
| 4
| 2
| 24
| 5
| 16
| 3
| 2
| 0.27
| 1
| 6
| 2
| 0
| 3
| 6
| 3
| 13
| 79
| 17
| 49
| 28
| 40
| 13
| 38
| 23
| 34
| 3
| 1
| 1
| 7
|
3,109
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ijepa/modeling_ijepa.py
|
transformers.models.ijepa.modeling_ijepa.IJepaEncoder
|
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput
import torch.nn as nn
import torch
from typing import Callable, Optional, Union
from .configuration_ijepa import IJepaConfig
class IJepaEncoder(nn.Module):
def __init__(self, config: IJepaConfig):
super().__init__()
self.config = config
self.layer = nn.ModuleList([IJepaLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None) -> BaseModelOutput:
for i, layer_module in enumerate(self.layer):
layer_head_mask = head_mask[i] if head_mask is not None else None
hidden_states = layer_module(hidden_states, layer_head_mask)
return BaseModelOutput(last_hidden_state=hidden_states)
|
class IJepaEncoder(nn.Module):
def __init__(self, config: IJepaConfig):
pass
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None) -> BaseModelOutput:
pass
| 3
| 0
| 24
| 4
| 20
| 0
| 6
| 0
| 1
| 9
| 3
| 0
| 2
| 3
| 2
| 12
| 49
| 8
| 41
| 18
| 31
| 0
| 24
| 11
| 21
| 10
| 1
| 2
| 11
|
3,110
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ijepa/modeling_ijepa.py
|
transformers.models.ijepa.modeling_ijepa.IJepaForImageClassification
|
from typing import Callable, Optional, Union
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring, torch_int
from ...utils.generic import can_return_tuple, check_model_inputs
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput
import torch
from .configuration_ijepa import IJepaConfig
import torch.nn as nn
@auto_docstring(custom_intro="\n IJepa Model transformer with an image classification head on top (a linear layer on top of the final hidden states)\n e.g. for ImageNet.\n\n <Tip>\n\n Note that it's possible to fine-tune IJepa on higher resolution images than the ones it has been trained on, by\n setting `interpolate_pos_encoding` to `True` in the forward of the model. This will interpolate the pre-trained\n position embeddings to the higher resolution.\n\n </Tip>\n ")
class IJepaForImageClassification(IJepaPreTrainedModel):
def __init__(self, config: IJepaConfig):
super().__init__(config)
self.num_labels = config.num_labels
self.ijepa = IJepaModel(config, add_pooling_layer=False)
self.classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
self.post_init()
@can_return_tuple
@auto_docstring
def forward(self, pixel_values: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, interpolate_pos_encoding: Optional[bool]=None, **kwargs: Unpack[TransformersKwargs]) -> ImageClassifierOutput:
"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
outputs: BaseModelOutputWithPooling = self.ijepa(pixel_values, head_mask=head_mask, interpolate_pos_encoding=interpolate_pos_encoding, **kwargs)
sequence_output = outputs.last_hidden_state
logits = self.classifier(sequence_output.mean(dim=1))
loss = None
if labels is not None:
loss = self.loss_function(labels, logits, self.config, **kwargs)
return ImageClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring(custom_intro="\n IJepa Model transformer with an image classification head on top (a linear layer on top of the final hidden states)\n e.g. for ImageNet.\n\n <Tip>\n\n Note that it's possible to fine-tune IJepa on higher resolution images than the ones it has been trained on, by\n setting `interpolate_pos_encoding` to `True` in the forward of the model. This will interpolate the pre-trained\n position embeddings to the higher resolution.\n\n </Tip>\n ")
class IJepaForImageClassification(IJepaPreTrainedModel):
def __init__(self, config: IJepaConfig):
pass
@can_return_tuple
@auto_docstring
def forward(self, pixel_values: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, interpolate_pos_encoding: Optional[bool]=None, **kwargs: Unpack[TransformersKwargs]) -> ImageClassifierOutput:
'''
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
'''
pass
| 6
| 1
| 39
| 5
| 29
| 5
| 7
| 0.14
| 1
| 8
| 3
| 0
| 2
| 3
| 2
| 3
| 86
| 11
| 66
| 22
| 47
| 9
| 33
| 12
| 30
| 12
| 2
| 3
| 14
|
3,111
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ijepa/modeling_ijepa.py
|
transformers.models.ijepa.modeling_ijepa.IJepaIntermediate
|
import torch.nn as nn
from .configuration_ijepa import IJepaConfig
from ...activations import ACT2FN
import torch
class IJepaIntermediate(nn.Module):
def __init__(self, config: IJepaConfig):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
|
class IJepaIntermediate(nn.Module):
def __init__(self, config: IJepaConfig):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 1
| 6
| 0
| 2
| 0
| 1
| 4
| 1
| 0
| 2
| 2
| 2
| 12
| 14
| 2
| 12
| 5
| 9
| 0
| 11
| 5
| 8
| 2
| 1
| 1
| 3
|
3,112
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ijepa/modeling_ijepa.py
|
transformers.models.ijepa.modeling_ijepa.IJepaLayer
|
from .configuration_ijepa import IJepaConfig
import torch
import torch.nn as nn
from ...modeling_layers import GradientCheckpointingLayer
from typing import Callable, Optional, Union
class IJepaLayer(GradientCheckpointingLayer):
"""This corresponds to the Block class in the timm implementation."""
def __init__(self, config: IJepaConfig):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = IJepaAttention(config)
self.intermediate = IJepaIntermediate(config)
self.output = IJepaOutput(config)
self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None) -> torch.Tensor:
hidden_states_norm = self.layernorm_before(hidden_states)
attention_output = self.attention(hidden_states_norm, head_mask)
hidden_states = attention_output + hidden_states
layer_output = self.layernorm_after(hidden_states)
layer_output = self.intermediate(layer_output)
layer_output = self.output(layer_output, hidden_states)
return layer_output
|
class IJepaLayer(GradientCheckpointingLayer):
'''This corresponds to the Block class in the timm implementation.'''
def __init__(self, config: IJepaConfig):
pass
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None) -> torch.Tensor:
pass
| 3
| 1
| 18
| 3
| 14
| 3
| 1
| 0.21
| 1
| 6
| 3
| 0
| 2
| 7
| 2
| 12
| 40
| 7
| 29
| 19
| 21
| 6
| 20
| 14
| 17
| 1
| 1
| 0
| 2
|
3,113
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ijepa/modeling_ijepa.py
|
transformers.models.ijepa.modeling_ijepa.IJepaModel
|
from ...utils import TransformersKwargs, auto_docstring, torch_int
from .configuration_ijepa import IJepaConfig
import torch
from ...processing_utils import Unpack
from typing import Callable, Optional, Union
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput
import torch.nn as nn
from ...utils.generic import can_return_tuple, check_model_inputs
@auto_docstring
class IJepaModel(IJepaPreTrainedModel):
def __init__(self, config: IJepaConfig, add_pooling_layer: bool=False, use_mask_token: bool=False):
"""
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
use_mask_token (`bool`, *optional*, defaults to `False`):
Whether to use a mask token for masked image modeling.
"""
super().__init__(config)
self.config = config
self.embeddings = IJepaEmbeddings(config, use_mask_token=use_mask_token)
self.encoder = IJepaEncoder(config)
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.pooler = IJepaPooler(config) if add_pooling_layer else None
self.post_init()
def get_input_embeddings(self) -> IJepaPatchEmbeddings:
return self.embeddings.patch_embeddings
def _prune_heads(self, heads_to_prune: dict[int, list[int]]):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@check_model_inputs
@auto_docstring
def forward(self, pixel_values: Optional[torch.Tensor]=None, bool_masked_pos: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.Tensor]=None, interpolate_pos_encoding: Optional[bool]=None, **kwargs: Unpack[TransformersKwargs]) -> BaseModelOutputWithPooling:
"""
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`, *optional*):
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
"""
if pixel_values is None:
raise ValueError('You have to specify pixel_values')
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
expected_dtype = self.embeddings.patch_embeddings.projection.weight.dtype
if pixel_values.dtype != expected_dtype:
pixel_values = pixel_values.to(expected_dtype)
embedding_output = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos, interpolate_pos_encoding=interpolate_pos_encoding)
encoder_outputs: BaseModelOutput = self.encoder(embedding_output, head_mask=head_mask)
sequence_output = encoder_outputs.last_hidden_state
sequence_output = self.layernorm(sequence_output)
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
return BaseModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooled_output)
|
@auto_docstring
class IJepaModel(IJepaPreTrainedModel):
def __init__(self, config: IJepaConfig, add_pooling_layer: bool=False, use_mask_token: bool=False):
'''
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
use_mask_token (`bool`, *optional*, defaults to `False`):
Whether to use a mask token for masked image modeling.
'''
pass
def get_input_embeddings(self) -> IJepaPatchEmbeddings:
pass
def _prune_heads(self, heads_to_prune: dict[int, list[int]]):
'''
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
'''
pass
@check_model_inputs
@auto_docstring
def forward(self, pixel_values: Optional[torch.Tensor]=None, bool_masked_pos: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.Tensor]=None, interpolate_pos_encoding: Optional[bool]=None, **kwargs: Unpack[TransformersKwargs]) -> BaseModelOutputWithPooling:
'''
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`, *optional*):
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
'''
pass
| 8
| 3
| 20
| 2
| 14
| 4
| 4
| 0.23
| 1
| 11
| 6
| 0
| 4
| 5
| 4
| 5
| 92
| 12
| 65
| 27
| 43
| 15
| 33
| 17
| 28
| 9
| 2
| 1
| 14
|
3,114
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ijepa/modeling_ijepa.py
|
transformers.models.ijepa.modeling_ijepa.IJepaOutput
|
import torch
import torch.nn as nn
from .configuration_ijepa import IJepaConfig
class IJepaOutput(nn.Module):
def __init__(self, config: IJepaConfig):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = hidden_states + input_tensor
return hidden_states
|
class IJepaOutput(nn.Module):
def __init__(self, config: IJepaConfig):
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 1
| 5
| 0
| 1
| 0
| 1
| 3
| 1
| 0
| 2
| 2
| 2
| 12
| 13
| 3
| 10
| 5
| 7
| 0
| 10
| 5
| 7
| 1
| 1
| 0
| 2
|
3,115
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ijepa/modeling_ijepa.py
|
transformers.models.ijepa.modeling_ijepa.IJepaPatchEmbeddings
|
from .configuration_ijepa import IJepaConfig
import torch
import torch.nn as nn
import collections.abc
class IJepaPatchEmbeddings(nn.Module):
"""
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
`hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
Transformer.
"""
def __init__(self, config: IJepaConfig):
super().__init__()
image_size, patch_size = (config.image_size, config.patch_size)
num_channels, hidden_size = (config.num_channels, config.hidden_size)
image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
num_patches = image_size[1] // patch_size[1] * (image_size[0] // patch_size[0])
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.num_patches = num_patches
self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
def forward(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool=False) -> torch.Tensor:
batch_size, num_channels, height, width = pixel_values.shape
if num_channels != self.num_channels:
raise ValueError(f'Make sure that the channel dimension of the pixel values match with the one set in the configuration. Expected {self.num_channels} but got {num_channels}.')
if not interpolate_pos_encoding:
if height != self.image_size[0] or width != self.image_size[1]:
raise ValueError(f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]}).")
embeddings = self.projection(pixel_values).flatten(2).transpose(1, 2)
return embeddings
|
class IJepaPatchEmbeddings(nn.Module):
'''
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
`hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
Transformer.
'''
def __init__(self, config: IJepaConfig):
pass
def forward(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool=False) -> torch.Tensor:
pass
| 3
| 1
| 15
| 1
| 14
| 0
| 4
| 0.18
| 1
| 5
| 0
| 0
| 2
| 5
| 2
| 12
| 37
| 4
| 28
| 13
| 25
| 5
| 22
| 13
| 19
| 4
| 1
| 2
| 7
|
3,116
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ijepa/modeling_ijepa.py
|
transformers.models.ijepa.modeling_ijepa.IJepaPooler
|
from .configuration_ijepa import IJepaConfig
import torch.nn as nn
import torch
from ...activations import ACT2FN
class IJepaPooler(nn.Module):
def __init__(self, config: IJepaConfig):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.pooler_output_size)
self.activation = ACT2FN[config.pooler_act]
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
|
class IJepaPooler(nn.Module):
def __init__(self, config: IJepaConfig):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 5
| 1
| 1
| 0.2
| 1
| 2
| 1
| 0
| 2
| 2
| 2
| 12
| 13
| 1
| 10
| 7
| 7
| 2
| 10
| 7
| 7
| 1
| 1
| 0
| 2
|
3,117
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ijepa/modeling_ijepa.py
|
transformers.models.ijepa.modeling_ijepa.IJepaPreTrainedModel
|
from typing import Callable, Optional, Union
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from .configuration_ijepa import IJepaConfig
import torch
import torch.nn as nn
from ...utils import TransformersKwargs, auto_docstring, torch_int
@auto_docstring
class IJepaPreTrainedModel(PreTrainedModel):
config: IJepaConfig
base_model_prefix = 'ijepa'
main_input_name = 'pixel_values'
supports_gradient_checkpointing = True
_no_split_modules = ['IJepaEmbeddings', 'IJepaLayer']
_supports_sdpa = True
_supports_flash_attn = True
_supports_flex_attn = True
_supports_attention_backend = True
_can_record_outputs = {'hidden_states': IJepaLayer, 'attentions': IJepaSelfAttention}
def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None:
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
module.weight.data = nn.init.trunc_normal_(module.weight.data.to(torch.float32), mean=0.0, std=self.config.initializer_range).to(module.weight.dtype)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, IJepaEmbeddings):
module.position_embeddings.data = nn.init.trunc_normal_(module.position_embeddings.data.to(torch.float32), mean=0.0, std=self.config.initializer_range).to(module.position_embeddings.dtype)
if module.mask_token is not None:
module.mask_token.data.zero_()
|
@auto_docstring
class IJepaPreTrainedModel(PreTrainedModel):
def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None:
'''Initialize the weights'''
pass
| 3
| 1
| 19
| 0
| 16
| 3
| 5
| 0.3
| 1
| 1
| 1
| 2
| 1
| 0
| 1
| 1
| 32
| 2
| 23
| 8
| 21
| 7
| 15
| 8
| 13
| 5
| 1
| 2
| 5
|
3,118
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ijepa/modeling_ijepa.py
|
transformers.models.ijepa.modeling_ijepa.IJepaSelfAttention
|
import torch.nn as nn
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from .configuration_ijepa import IJepaConfig
from typing import Callable, Optional, Union
import torch
class IJepaSelfAttention(nn.Module):
def __init__(self, config: IJepaConfig):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')):
raise ValueError(f'The hidden size {config.hidden_size} is not a multiple of the number of attention heads {config.num_attention_heads}.')
self.config = config
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.dropout_prob = config.attention_probs_dropout_prob
self.scaling = self.attention_head_size ** (-0.5)
self.is_causal = False
self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None) -> tuple[torch.Tensor, torch.Tensor]:
batch_size = hidden_states.shape[0]
new_shape = (batch_size, -1, self.num_attention_heads, self.attention_head_size)
key_layer = self.key(hidden_states).view(*new_shape).transpose(1, 2)
value_layer = self.value(hidden_states).view(*new_shape).transpose(1, 2)
query_layer = self.query(hidden_states).view(*new_shape).transpose(1, 2)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
context_layer, attention_probs = attention_interface(self, query_layer, key_layer, value_layer, head_mask, is_causal=self.is_causal, scaling=self.scaling, dropout=0.0 if not self.training else self.dropout_prob)
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.reshape(new_context_layer_shape)
return (context_layer, attention_probs)
|
class IJepaSelfAttention(nn.Module):
def __init__(self, config: IJepaConfig):
pass
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None) -> tuple[torch.Tensor, torch.Tensor]:
pass
| 3
| 0
| 18
| 4
| 12
| 2
| 2
| 0.13
| 1
| 6
| 1
| 1
| 3
| 7
| 3
| 13
| 58
| 15
| 38
| 23
| 32
| 5
| 33
| 21
| 29
| 3
| 1
| 1
| 6
|
3,119
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ijepa/modeling_ijepa.py
|
transformers.models.ijepa.modeling_ijepa.IJepaSelfOutput
|
import torch
import torch.nn as nn
from .configuration_ijepa import IJepaConfig
class IJepaSelfOutput(nn.Module):
"""
The residual connection is defined in IJepaLayer instead of here (as is the case with other models), due to the
layernorm applied before each block.
"""
def __init__(self, config: IJepaConfig):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
|
class IJepaSelfOutput(nn.Module):
'''
The residual connection is defined in IJepaLayer instead of here (as is the case with other models), due to the
layernorm applied before each block.
'''
def __init__(self, config: IJepaConfig):
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 1
| 5
| 1
| 4
| 0
| 1
| 0.44
| 1
| 3
| 1
| 0
| 2
| 2
| 2
| 12
| 16
| 3
| 9
| 5
| 6
| 4
| 9
| 5
| 6
| 1
| 1
| 0
| 2
|
3,120
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ijepa/modular_ijepa.py
|
transformers.models.ijepa.modular_ijepa.IJepaEmbeddings
|
from transformers.models.ijepa.configuration_ijepa import IJepaConfig
from ...utils import TransformersKwargs, auto_docstring, torch_int
from typing import Optional, Union
import torch.nn as nn
import torch
from ..vit.modeling_vit import ViTEmbeddings, ViTForImageClassification, ViTModel, ViTPreTrainedModel
class IJepaEmbeddings(ViTEmbeddings):
def __init__(self, config: IJepaConfig, use_mask_token: bool=False) -> None:
super().__init__(config, use_mask_token)
del self.cls_token
num_patches = self.patch_embeddings.num_patches
self.position_embeddings = nn.Parameter(torch.randn(1, num_patches, config.hidden_size))
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
"""
num_patches = embeddings.shape[1]
num_positions = self.position_embeddings.shape[1]
if not torch.jit.is_tracing() and num_patches == num_positions and (height == width):
return self.position_embeddings
patch_pos_embed = self.position_embeddings
dim = embeddings.shape[-1]
new_height = height // self.patch_size
new_width = width // self.patch_size
sqrt_num_positions = torch_int(num_positions ** 0.5)
patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
patch_pos_embed = nn.functional.interpolate(patch_pos_embed, size=(new_height, new_width), mode='bicubic', align_corners=False)
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return patch_pos_embed
def forward(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.BoolTensor]=None, interpolate_pos_encoding: bool=False) -> torch.Tensor:
batch_size, _, height, width = pixel_values.shape
embeddings = self.patch_embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
if bool_masked_pos is not None:
seq_length = embeddings.shape[1]
mask_tokens = self.mask_token.expand(batch_size, seq_length, -1)
mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens)
embeddings = embeddings * (1.0 - mask) + mask_tokens * mask
if interpolate_pos_encoding:
embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
else:
embeddings = embeddings + self.position_embeddings
embeddings = self.dropout(embeddings)
return embeddings
|
class IJepaEmbeddings(ViTEmbeddings):
def __init__(self, config: IJepaConfig, use_mask_token: bool=False) -> None:
pass
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
'''
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
'''
pass
def forward(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.BoolTensor]=None, interpolate_pos_encoding: bool=False) -> torch.Tensor:
pass
| 4
| 1
| 23
| 5
| 15
| 4
| 2
| 0.24
| 1
| 5
| 0
| 0
| 3
| 1
| 3
| 16
| 72
| 16
| 45
| 23
| 36
| 11
| 34
| 18
| 30
| 3
| 2
| 1
| 6
|
3,121
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ijepa/modular_ijepa.py
|
transformers.models.ijepa.modular_ijepa.IJepaForImageClassification
|
from ...processing_utils import Unpack
from transformers.models.ijepa.configuration_ijepa import IJepaConfig
from ...modeling_outputs import BaseModelOutputWithPooling, ImageClassifierOutput
from ...utils import TransformersKwargs, auto_docstring, torch_int
import torch
from ..vit.modeling_vit import ViTEmbeddings, ViTForImageClassification, ViTModel, ViTPreTrainedModel
import torch.nn as nn
from typing import Optional, Union
@auto_docstring(custom_intro="\n IJepa Model transformer with an image classification head on top (a linear layer on top of the final hidden states)\n e.g. for ImageNet.\n\n <Tip>\n\n Note that it's possible to fine-tune IJepa on higher resolution images than the ones it has been trained on, by\n setting `interpolate_pos_encoding` to `True` in the forward of the model. This will interpolate the pre-trained\n position embeddings to the higher resolution.\n\n </Tip>\n ")
class IJepaForImageClassification(IJepaPreTrainedModel, ViTForImageClassification):
def __init__(self, config: IJepaConfig):
super().__init__(config)
self.ijepa = IJepaModel(config, add_pooling_layer=False)
self.post_init()
def forward(self, pixel_values: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, interpolate_pos_encoding: Optional[bool]=None, **kwargs: Unpack[TransformersKwargs]) -> ImageClassifierOutput:
"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
outputs: BaseModelOutputWithPooling = self.ijepa(pixel_values, head_mask=head_mask, interpolate_pos_encoding=interpolate_pos_encoding, **kwargs)
sequence_output = outputs.last_hidden_state
logits = self.classifier(sequence_output.mean(dim=1))
loss = None
if labels is not None:
loss = self.loss_function(labels, logits, self.config, **kwargs)
return ImageClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring(custom_intro="\n IJepa Model transformer with an image classification head on top (a linear layer on top of the final hidden states)\n e.g. for ImageNet.\n\n <Tip>\n\n Note that it's possible to fine-tune IJepa on higher resolution images than the ones it has been trained on, by\n setting `interpolate_pos_encoding` to `True` in the forward of the model. This will interpolate the pre-trained\n position embeddings to the higher resolution.\n\n </Tip>\n ")
class IJepaForImageClassification(IJepaPreTrainedModel, ViTForImageClassification):
def __init__(self, config: IJepaConfig):
pass
def forward(self, pixel_values: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, interpolate_pos_encoding: Optional[bool]=None, **kwargs: Unpack[TransformersKwargs]) -> ImageClassifierOutput:
'''
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
'''
pass
| 4
| 1
| 35
| 4
| 28
| 4
| 7
| 0.12
| 2
| 8
| 2
| 0
| 2
| 1
| 2
| 135
| 72
| 8
| 57
| 19
| 45
| 7
| 31
| 10
| 28
| 12
| 3
| 3
| 13
|
3,122
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ijepa/modular_ijepa.py
|
transformers.models.ijepa.modular_ijepa.IJepaModel
|
from transformers.models.ijepa.configuration_ijepa import IJepaConfig
from ..vit.modeling_vit import ViTEmbeddings, ViTForImageClassification, ViTModel, ViTPreTrainedModel
class IJepaModel(IJepaPreTrainedModel, ViTModel):
def __init__(self, config: IJepaConfig, add_pooling_layer: bool=False, use_mask_token: bool=False):
"""
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
use_mask_token (`bool`, *optional*, defaults to `False`):
Whether to use a mask token for masked image modeling.
"""
super().__init__(config)
self.config = config
self.embeddings = IJepaEmbeddings(config, use_mask_token=use_mask_token)
|
class IJepaModel(IJepaPreTrainedModel, ViTModel):
def __init__(self, config: IJepaConfig, add_pooling_layer: bool=False, use_mask_token: bool=False):
'''
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
use_mask_token (`bool`, *optional*, defaults to `False`):
Whether to use a mask token for masked image modeling.
'''
pass
| 2
| 1
| 4
| 0
| 4
| 0
| 1
| 0
| 2
| 4
| 1
| 0
| 1
| 2
| 1
| 136
| 5
| 0
| 5
| 4
| 3
| 0
| 5
| 4
| 3
| 1
| 3
| 0
| 1
|
3,123
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/ijepa/modular_ijepa.py
|
transformers.models.ijepa.modular_ijepa.IJepaPreTrainedModel
|
from ..vit.modeling_vit import ViTEmbeddings, ViTForImageClassification, ViTModel, ViTPreTrainedModel
from ...utils import TransformersKwargs, auto_docstring, torch_int
from typing import Optional, Union
import torch.nn as nn
import torch
@auto_docstring
class IJepaPreTrainedModel(ViTPreTrainedModel):
def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None:
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
module.weight.data = nn.init.trunc_normal_(module.weight.data.to(torch.float32), mean=0.0, std=self.config.initializer_range).to(module.weight.dtype)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, IJepaEmbeddings):
module.position_embeddings.data = nn.init.trunc_normal_(module.position_embeddings.data.to(torch.float32), mean=0.0, std=self.config.initializer_range).to(module.position_embeddings.dtype)
if module.mask_token is not None:
module.mask_token.data.zero_()
|
@auto_docstring
class IJepaPreTrainedModel(ViTPreTrainedModel):
def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None:
'''Initialize the weights'''
pass
| 3
| 1
| 19
| 0
| 16
| 3
| 5
| 0.3
| 1
| 1
| 1
| 2
| 1
| 0
| 1
| 130
| 32
| 2
| 23
| 8
| 21
| 7
| 15
| 8
| 13
| 5
| 2
| 2
| 5
|
3,124
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/imagegpt/configuration_imagegpt.py
|
transformers.models.imagegpt.configuration_imagegpt.ImageGPTConfig
|
from ...configuration_utils import PretrainedConfig
class ImageGPTConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`ImageGPTModel`] or a [`TFImageGPTModel`]. It is
used to instantiate a GPT-2 model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the ImageGPT
[openai/imagegpt-small](https://huggingface.co/openai/imagegpt-small) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 512):
Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`ImageGPTModel`] or [`TFImageGPTModel`].
n_positions (`int`, *optional*, defaults to 32*32):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
n_embd (`int`, *optional*, defaults to 512):
Dimensionality of the embeddings and hidden states.
n_layer (`int`, *optional*, defaults to 24):
Number of hidden layers in the Transformer encoder.
n_head (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer encoder.
n_inner (`int`, *optional*, defaults to None):
Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd
activation_function (`str`, *optional*, defaults to `"quick_gelu"`):
Activation function (can be one of the activation functions defined in src/transformers/activations.py).
Defaults to "quick_gelu".
resid_pdrop (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
embd_pdrop (`int`, *optional*, defaults to 0.1):
The dropout ratio for the embeddings.
attn_pdrop (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention.
layer_norm_epsilon (`float`, *optional*, defaults to 1e-5):
The epsilon to use in the layer normalization layers.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
scale_attn_weights (`bool`, *optional*, defaults to `True`):
Scale attention weights by dividing by sqrt(hidden_size)..
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
scale_attn_by_inverse_layer_idx (`bool`, *optional*, defaults to `False`):
Whether to additionally scale attention weights by `1 / layer_idx + 1`.
reorder_and_upcast_attn (`bool`, *optional*, defaults to `False`):
Whether to scale keys (K) prior to computing attention (dot-product) and upcast attention
dot-product/softmax to float() when training with mixed precision.
Example:
```python
>>> from transformers import ImageGPTConfig, ImageGPTModel
>>> # Initializing a ImageGPT configuration
>>> configuration = ImageGPTConfig()
>>> # Initializing a model (with random weights) from the configuration
>>> model = ImageGPTModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'imagegpt'
keys_to_ignore_at_inference = ['past_key_values']
attribute_map = {'hidden_size': 'n_embd', 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer'}
def __init__(self, vocab_size=512 + 1, n_positions=32 * 32, n_embd=512, n_layer=24, n_head=8, n_inner=None, activation_function='quick_gelu', resid_pdrop=0.1, embd_pdrop=0.1, attn_pdrop=0.1, layer_norm_epsilon=1e-05, initializer_range=0.02, scale_attn_weights=True, use_cache=True, tie_word_embeddings=False, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False, **kwargs):
self.vocab_size = vocab_size
self.n_positions = n_positions
self.n_embd = n_embd
self.n_layer = n_layer
self.n_head = n_head
self.n_inner = n_inner
self.activation_function = activation_function
self.resid_pdrop = resid_pdrop
self.embd_pdrop = embd_pdrop
self.attn_pdrop = attn_pdrop
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_range = initializer_range
self.scale_attn_weights = scale_attn_weights
self.use_cache = use_cache
self.scale_attn_by_inverse_layer_idx = scale_attn_by_inverse_layer_idx
self.reorder_and_upcast_attn = reorder_and_upcast_attn
self.tie_word_embeddings = tie_word_embeddings
super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs)
|
class ImageGPTConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`ImageGPTModel`] or a [`TFImageGPTModel`]. It is
used to instantiate a GPT-2 model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the ImageGPT
[openai/imagegpt-small](https://huggingface.co/openai/imagegpt-small) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 512):
Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`ImageGPTModel`] or [`TFImageGPTModel`].
n_positions (`int`, *optional*, defaults to 32*32):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
n_embd (`int`, *optional*, defaults to 512):
Dimensionality of the embeddings and hidden states.
n_layer (`int`, *optional*, defaults to 24):
Number of hidden layers in the Transformer encoder.
n_head (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer encoder.
n_inner (`int`, *optional*, defaults to None):
Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd
activation_function (`str`, *optional*, defaults to `"quick_gelu"`):
Activation function (can be one of the activation functions defined in src/transformers/activations.py).
Defaults to "quick_gelu".
resid_pdrop (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
embd_pdrop (`int`, *optional*, defaults to 0.1):
The dropout ratio for the embeddings.
attn_pdrop (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention.
layer_norm_epsilon (`float`, *optional*, defaults to 1e-5):
The epsilon to use in the layer normalization layers.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
scale_attn_weights (`bool`, *optional*, defaults to `True`):
Scale attention weights by dividing by sqrt(hidden_size)..
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
scale_attn_by_inverse_layer_idx (`bool`, *optional*, defaults to `False`):
Whether to additionally scale attention weights by `1 / layer_idx + 1`.
reorder_and_upcast_attn (`bool`, *optional*, defaults to `False`):
Whether to scale keys (K) prior to computing attention (dot-product) and upcast attention
dot-product/softmax to float() when training with mixed precision.
Example:
```python
>>> from transformers import ImageGPTConfig, ImageGPTModel
>>> # Initializing a ImageGPT configuration
>>> configuration = ImageGPTConfig()
>>> # Initializing a model (with random weights) from the configuration
>>> model = ImageGPTModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, vocab_size=512 + 1, n_positions=32 * 32, n_embd=512, n_layer=24, n_head=8, n_inner=None, activation_function='quick_gelu', resid_pdrop=0.1, embd_pdrop=0.1, attn_pdrop=0.1, layer_norm_epsilon=1e-05, initializer_range=0.02, scale_attn_weights=True, use_cache=True, tie_word_embeddings=False, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False, **kwargs):
pass
| 2
| 1
| 40
| 1
| 39
| 1
| 1
| 1.15
| 1
| 1
| 0
| 0
| 1
| 17
| 1
| 1
| 113
| 11
| 48
| 42
| 26
| 55
| 23
| 22
| 21
| 1
| 1
| 0
| 1
|
3,125
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/imagegpt/configuration_imagegpt.py
|
transformers.models.imagegpt.configuration_imagegpt.ImageGPTOnnxConfig
|
from collections import OrderedDict
from ...onnx import OnnxConfig
from typing import TYPE_CHECKING, Any
from collections.abc import Mapping
class ImageGPTOnnxConfig(OnnxConfig):
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([('input_ids', {0: 'batch', 1: 'sequence'})])
def generate_dummy_inputs(self, preprocessor: 'FeatureExtractionMixin', batch_size: int=1, seq_length: int=-1, is_pair: bool=False, num_channels: int=3, image_width: int=32, image_height: int=32) -> Mapping[str, Any]:
"""
Generate inputs to provide to the ONNX exporter.
Args:
preprocessor ([`PreTrainedTokenizerBase`] or [`FeatureExtractionMixin`]):
The preprocessor associated with this model configuration.
batch_size (`int`, *optional*, defaults to -1):
The batch size to export the model for (-1 means dynamic axis).
num_choices (`int`, *optional*, defaults to -1):
The number of candidate answers provided for multiple choice task (-1 means dynamic axis).
seq_length (`int`, *optional*, defaults to -1):
The sequence length to export the model for (-1 means dynamic axis).
is_pair (`bool`, *optional*, defaults to `False`):
Indicate if the input is a pair (sentence 1, sentence 2)
num_channels (`int`, *optional*, defaults to 3):
The number of channels of the generated images.
image_width (`int`, *optional*, defaults to 40):
The width of the generated images.
image_height (`int`, *optional*, defaults to 40):
The height of the generated images.
Returns:
Mapping[str, Tensor] holding the kwargs to provide to the model's forward function
"""
input_image = self._generate_dummy_images(batch_size, num_channels, image_height, image_width)
inputs = dict(preprocessor(images=input_image, return_tensors='pt'))
return inputs
|
class ImageGPTOnnxConfig(OnnxConfig):
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
pass
def generate_dummy_inputs(self, preprocessor: 'FeatureExtractionMixin', batch_size: int=1, seq_length: int=-1, is_pair: bool=False, num_channels: int=3, image_width: int=32, image_height: int=32) -> Mapping[str, Any]:
'''
Generate inputs to provide to the ONNX exporter.
Args:
preprocessor ([`PreTrainedTokenizerBase`] or [`FeatureExtractionMixin`]):
The preprocessor associated with this model configuration.
batch_size (`int`, *optional*, defaults to -1):
The batch size to export the model for (-1 means dynamic axis).
num_choices (`int`, *optional*, defaults to -1):
The number of candidate answers provided for multiple choice task (-1 means dynamic axis).
seq_length (`int`, *optional*, defaults to -1):
The sequence length to export the model for (-1 means dynamic axis).
is_pair (`bool`, *optional*, defaults to `False`):
Indicate if the input is a pair (sentence 1, sentence 2)
num_channels (`int`, *optional*, defaults to 3):
The number of channels of the generated images.
image_width (`int`, *optional*, defaults to 40):
The width of the generated images.
image_height (`int`, *optional*, defaults to 40):
The height of the generated images.
Returns:
Mapping[str, Tensor] holding the kwargs to provide to the model's forward function
'''
pass
| 4
| 1
| 24
| 2
| 10
| 12
| 1
| 1.09
| 1
| 6
| 0
| 0
| 2
| 0
| 2
| 2
| 51
| 5
| 22
| 16
| 8
| 24
| 7
| 5
| 4
| 1
| 1
| 0
| 2
|
3,126
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/imagegpt/feature_extraction_imagegpt.py
|
transformers.models.imagegpt.feature_extraction_imagegpt.ImageGPTFeatureExtractor
|
from .image_processing_imagegpt import ImageGPTImageProcessor
from ...utils.import_utils import requires
import warnings
@requires(backends=('vision',))
class ImageGPTFeatureExtractor(ImageGPTImageProcessor):
def __init__(self, *args, **kwargs) -> None:
warnings.warn('The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please use ImageGPTImageProcessor instead.', FutureWarning)
super().__init__(*args, **kwargs)
|
@requires(backends=('vision',))
class ImageGPTFeatureExtractor(ImageGPTImageProcessor):
def __init__(self, *args, **kwargs) -> None:
pass
| 3
| 0
| 7
| 0
| 7
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 1
| 0
| 1
| 25
| 8
| 0
| 8
| 2
| 6
| 0
| 4
| 2
| 2
| 1
| 4
| 0
| 1
|
3,127
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/imagegpt/image_processing_imagegpt.py
|
transformers.models.imagegpt.image_processing_imagegpt.ImageGPTImageProcessor
|
import numpy as np
from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging
from ...utils.import_utils import requires
from ...image_utils import ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, make_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments
from ...image_transforms import rescale, resize, to_channel_dimension_format
from typing import Optional, Union
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
@requires(backends=('vision',))
class ImageGPTImageProcessor(BaseImageProcessor):
"""
Constructs a ImageGPT image processor. This image processor can be used to resize images to a smaller resolution
(such as 32x32 or 64x64), normalize them and finally color quantize them to obtain sequences of "pixel values"
(color clusters).
Args:
clusters (`np.ndarray` or `list[list[int]]`, *optional*):
The color clusters to use, of shape `(n_clusters, 3)` when color quantizing. Can be overridden by `clusters`
in `preprocess`.
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's dimensions to `(size["height"], size["width"])`. Can be overridden by
`do_resize` in `preprocess`.
size (`dict[str, int]` *optional*, defaults to `{"height": 256, "width": 256}`):
Size of the image after resizing. Can be overridden by `size` in `preprocess`.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
Resampling filter to use if resizing the image. Can be overridden by `resample` in `preprocess`.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image pixel value to between [-1, 1]. Can be overridden by `do_normalize` in
`preprocess`.
do_color_quantize (`bool`, *optional*, defaults to `True`):
Whether to color quantize the image. Can be overridden by `do_color_quantize` in `preprocess`.
"""
model_input_names = ['pixel_values']
def __init__(self, clusters: Optional[Union[list[list[int]], np.ndarray]]=None, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BILINEAR, do_normalize: bool=True, do_color_quantize: bool=True, **kwargs) -> None:
super().__init__(**kwargs)
size = size if size is not None else {'height': 256, 'width': 256}
size = get_size_dict(size)
self.clusters = np.array(clusters) if clusters is not None else None
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_normalize = do_normalize
self.do_color_quantize = do_color_quantize
def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
"""
Resize an image to `(size["height"], size["width"])`.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
`PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
Returns:
`np.ndarray`: The resized image.
"""
size = get_size_dict(size)
if 'height' not in size or 'width' not in size:
raise ValueError(f'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}')
output_size = (size['height'], size['width'])
return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)
def normalize(self, image: np.ndarray, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
"""
Normalizes an images' pixel values to between [-1, 1].
Args:
image (`np.ndarray`):
Image to normalize.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
image = rescale(image=image, scale=1 / 127.5, data_format=data_format, input_data_format=input_data_format)
image = image - 1
return image
@filter_out_non_signature_kwargs()
def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_normalize: Optional[bool]=None, do_color_quantize: Optional[bool]=None, clusters: Optional[Union[list[list[int]], np.ndarray]]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[Union[str, ChannelDimension]]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_normalize=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
has an effect if `do_resize` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image
do_color_quantize (`bool`, *optional*, defaults to `self.do_color_quantize`):
Whether to color quantize the image.
clusters (`np.ndarray` or `list[list[int]]`, *optional*, defaults to `self.clusters`):
Clusters used to quantize the image of shape `(n_clusters, 3)`. Only has an effect if
`do_color_quantize` is set to `True`.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `ChannelDimension.LAST`: image in (height, width, num_channels) format.
Only has an effect if `do_color_quantize` is set to `False`.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
size = size if size is not None else self.size
size = get_size_dict(size)
resample = resample if resample is not None else self.resample
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
do_color_quantize = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
clusters = clusters if clusters is not None else self.clusters
clusters = np.array(clusters)
images = make_list_of_images(images)
if not valid_images(images):
raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor')
validate_preprocess_arguments(do_resize=do_resize, size=size, resample=resample)
if do_color_quantize and clusters is None:
raise ValueError('Clusters must be specified if do_color_quantize is True.')
images = [to_numpy_array(image) for image in images]
if do_normalize and is_scaled_image(images[0]):
logger.warning_once('It looks like you are trying to rescale already rescaled images. If you wish to do this, make sure to set `do_normalize` to `False` and that pixel values are between [-1, 1].')
if input_data_format is None:
input_data_format = infer_channel_dimension_format(images[0])
if do_resize:
images = [self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format) for image in images]
if do_normalize:
images = [self.normalize(image=image, input_data_format=input_data_format) for image in images]
if do_color_quantize:
images = [to_channel_dimension_format(image, ChannelDimension.LAST, input_data_format) for image in images]
images = np.array(images)
images = color_quantize(images, clusters).reshape(images.shape[:-1])
batch_size = images.shape[0]
images = images.reshape(batch_size, -1)
images = list(images)
data = {'input_ids': images}
else:
images = [to_channel_dimension_format(image, data_format, input_data_format) for image in images]
data = {'pixel_values': images}
return BatchFeature(data=data, tensor_type=return_tensors)
def to_dict(self):
output = super().to_dict()
if output.get('clusters') is not None and isinstance(output['clusters'], np.ndarray):
output['clusters'] = output['clusters'].tolist()
missing_keys = ['image_mean', 'image_std', 'rescale_factor', 'do_rescale']
for key in missing_keys:
if key in output:
output[key] = None
return output
|
@requires(backends=('vision',))
class ImageGPTImageProcessor(BaseImageProcessor):
'''
Constructs a ImageGPT image processor. This image processor can be used to resize images to a smaller resolution
(such as 32x32 or 64x64), normalize them and finally color quantize them to obtain sequences of "pixel values"
(color clusters).
Args:
clusters (`np.ndarray` or `list[list[int]]`, *optional*):
The color clusters to use, of shape `(n_clusters, 3)` when color quantizing. Can be overridden by `clusters`
in `preprocess`.
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's dimensions to `(size["height"], size["width"])`. Can be overridden by
`do_resize` in `preprocess`.
size (`dict[str, int]` *optional*, defaults to `{"height": 256, "width": 256}`):
Size of the image after resizing. Can be overridden by `size` in `preprocess`.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
Resampling filter to use if resizing the image. Can be overridden by `resample` in `preprocess`.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image pixel value to between [-1, 1]. Can be overridden by `do_normalize` in
`preprocess`.
do_color_quantize (`bool`, *optional*, defaults to `True`):
Whether to color quantize the image. Can be overridden by `do_color_quantize` in `preprocess`.
'''
def __init__(self, clusters: Optional[Union[list[list[int]], np.ndarray]]=None, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BILINEAR, do_normalize: bool=True, do_color_quantize: bool=True, **kwargs) -> None:
pass
def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
'''
Resize an image to `(size["height"], size["width"])`.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
`PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
Returns:
`np.ndarray`: The resized image.
'''
pass
def normalize(self, image: np.ndarray, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
'''
Normalizes an images' pixel values to between [-1, 1].
Args:
image (`np.ndarray`):
Image to normalize.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
'''
pass
@filter_out_non_signature_kwargs()
def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_normalize: Optional[bool]=None, do_color_quantize: Optional[bool]=None, clusters: Optional[Union[list[list[int]], np.ndarray]]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[Union[str, ChannelDimension]]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image:
'''
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_normalize=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
has an effect if `do_resize` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image
do_color_quantize (`bool`, *optional*, defaults to `self.do_color_quantize`):
Whether to color quantize the image.
clusters (`np.ndarray` or `list[list[int]]`, *optional*, defaults to `self.clusters`):
Clusters used to quantize the image of shape `(n_clusters, 3)`. Only has an effect if
`do_color_quantize` is set to `True`.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `ChannelDimension.LAST`: image in (height, width, num_channels) format.
Only has an effect if `do_color_quantize` is set to `False`.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
'''
pass
def to_dict(self):
pass
| 8
| 4
| 53
| 4
| 28
| 20
| 5
| 0.9
| 1
| 8
| 2
| 1
| 4
| 6
| 4
| 24
| 241
| 23
| 115
| 50
| 75
| 103
| 56
| 15
| 51
| 14
| 3
| 1
| 20
|
3,128
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/imagegpt/modeling_imagegpt.py
|
transformers.models.imagegpt.modeling_imagegpt.ImageGPTAttention
|
import torch
from ...pytorch_utils import Conv1D, find_pruneable_heads_and_indices, prune_conv1d_layer
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from torch import nn
from ...utils import auto_docstring, logging, torch_float
from typing import Any, Optional, Union
class ImageGPTAttention(nn.Module):
def __init__(self, config, is_cross_attention: Optional[bool]=False, layer_idx: Optional[int]=None):
super().__init__()
max_positions = config.max_position_embeddings
self.register_buffer('bias', torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view(1, 1, max_positions, max_positions), persistent=False)
self.register_buffer('masked_bias', torch.tensor(-10000.0), persistent=False)
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
self.split_size = self.embed_dim
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(f'`embed_dim` must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads}).')
self.scale_attn_weights = config.scale_attn_weights
self.is_cross_attention = is_cross_attention
self.scale_attn_by_inverse_layer_idx = config.scale_attn_by_inverse_layer_idx
self.layer_idx = layer_idx
self.reorder_and_upcast_attn = config.reorder_and_upcast_attn
if self.is_cross_attention:
self.c_attn = Conv1D(2 * self.embed_dim, self.embed_dim)
self.q_attn = Conv1D(self.embed_dim, self.embed_dim)
else:
self.c_attn = Conv1D(3 * self.embed_dim, self.embed_dim)
self.c_proj = Conv1D(self.embed_dim, self.embed_dim)
self.attn_dropout = nn.Dropout(config.attn_pdrop)
self.resid_dropout = nn.Dropout(config.resid_pdrop)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.num_heads, self.head_dim, self.pruned_heads)
index_attn = torch.cat([index, index + self.split_size, index + 2 * self.split_size])
self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)
self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)
self.split_size = self.split_size // self.num_heads * (self.num_heads - len(heads))
self.num_heads = self.num_heads - len(heads)
self.pruned_heads = self.pruned_heads.union(heads)
def _attn(self, query, key, value, attention_mask=None, head_mask=None):
attn_weights = torch.matmul(query, key.transpose(-1, -2))
if self.scale_attn_weights:
attn_weights = attn_weights / torch_float(value.size(-1) ** 0.5)
if self.scale_attn_by_inverse_layer_idx:
attn_weights = attn_weights / float(self.layer_idx + 1)
if not self.is_cross_attention:
query_length, key_length = (query.size(-2), key.size(-2))
causal_mask = self.bias[:, :, key_length - query_length:key_length, :key_length]
mask_value = torch.finfo(attn_weights.dtype).min
mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype, device=attn_weights.device)
attn_weights = torch.where(causal_mask, attn_weights, mask_value)
if attention_mask is not None:
attn_weights = attn_weights + attention_mask
attn_weights = nn.Softmax(dim=-1)(attn_weights)
attn_weights = attn_weights.type(value.dtype)
attn_weights = self.attn_dropout(attn_weights)
if head_mask is not None:
attn_weights = attn_weights * head_mask
attn_output = torch.matmul(attn_weights, value)
return (attn_output, attn_weights)
def _upcast_and_reordered_attn(self, query, key, value, attention_mask=None, head_mask=None):
bsz, num_heads, q_seq_len, dk = query.size()
_, _, k_seq_len, _ = key.size()
attn_weights = torch.empty(bsz * num_heads, q_seq_len, k_seq_len, dtype=torch.float32, device=query.device)
scale_factor = 1.0
if self.scale_attn_weights:
scale_factor /= float(value.size(-1)) ** 0.5
if self.scale_attn_by_inverse_layer_idx:
scale_factor /= float(self.layer_idx + 1)
with torch.autocast(query.device.type, enabled=False):
q, k = (query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape(-1, dk, k_seq_len))
attn_weights = torch.baddbmm(attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor)
attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len)
if not self.is_cross_attention:
query_length, key_length = (query.size(-2), key.size(-2))
causal_mask = self.bias[:, :, key_length - query_length:key_length, :key_length]
mask_value = torch.finfo(attn_weights.dtype).min
mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype, device=attn_weights.device)
attn_weights = torch.where(causal_mask, attn_weights, mask_value)
if attention_mask is not None:
attn_weights = attn_weights + attention_mask
attn_weights = nn.Softmax(dim=-1)(attn_weights)
if attn_weights.dtype != torch.float32:
raise RuntimeError('Error with upcasting, attn_weights does not have dtype torch.float32')
attn_weights = attn_weights.type(value.dtype)
attn_weights = self.attn_dropout(attn_weights)
if head_mask is not None:
attn_weights = attn_weights * head_mask
attn_output = torch.matmul(attn_weights, value)
return (attn_output, attn_weights)
def _split_heads(self, tensor, num_heads, attn_head_size):
"""
Splits hidden_size dim into attn_head_size and num_heads
"""
new_shape = tensor.size()[:-1] + (num_heads, attn_head_size)
tensor = tensor.view(*new_shape)
return tensor.permute(0, 2, 1, 3)
def _merge_heads(self, tensor, num_heads, attn_head_size):
"""
Merges attn_head_size dim and num_attn_heads dim into hidden_size
"""
tensor = tensor.permute(0, 2, 1, 3).contiguous()
new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,)
return tensor.view(new_shape)
def forward(self, hidden_states: torch.Tensor, layer_past: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, use_cache: Optional[bool]=False, output_attentions: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None) -> tuple:
is_cross_attention = encoder_hidden_states is not None
bsz, seq_len, _ = hidden_states.shape
if layer_past is not None:
if isinstance(layer_past, EncoderDecoderCache):
is_updated = layer_past.is_updated.get(self.layer_idx)
if is_cross_attention:
curr_past_key_value = layer_past.cross_attention_cache
else:
curr_past_key_value = layer_past.self_attention_cache
else:
curr_past_key_value = layer_past
current_states = encoder_hidden_states if is_cross_attention else hidden_states
if is_cross_attention:
if not hasattr(self, 'q_attn'):
raise ValueError('If class is used as cross attention, the weights `q_attn` have to be defined. Please make sure to instantiate class with `ImageGPTAttention(..., is_cross_attention=True)`.')
if layer_past is not None and is_updated:
query = self.q_attn(hidden_states)
key = curr_past_key_value.layers[self.layer_idx].keys
value = curr_past_key_value.layers[self.layer_idx].values
else:
query = self.q_attn(hidden_states)
key, value = self.c_attn(current_states).split(self.split_size, dim=2)
key = key.view(bsz, -1, self.num_heads, self.head_dim).transpose(1, 2)
value = value.view(bsz, -1, self.num_heads, self.head_dim).transpose(1, 2)
else:
query, key, value = self.c_attn(current_states).split(self.split_size, dim=2)
key = key.view(bsz, -1, self.num_heads, self.head_dim).transpose(1, 2)
value = value.view(bsz, -1, self.num_heads, self.head_dim).transpose(1, 2)
if layer_past is not None:
cache_position = cache_position if not is_cross_attention else None
key, value = curr_past_key_value.update(key, value, self.layer_idx, {'cache_position': cache_position})
if is_cross_attention:
layer_past.is_updated[self.layer_idx] = True
query = query.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2)
if self.reorder_and_upcast_attn:
attn_output, attn_weights = self._upcast_and_reordered_attn(query, key, value, attention_mask, head_mask)
else:
attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim)
attn_output = self.c_proj(attn_output)
attn_output = self.resid_dropout(attn_output)
return (attn_output, attn_weights)
|
class ImageGPTAttention(nn.Module):
def __init__(self, config, is_cross_attention: Optional[bool]=False, layer_idx: Optional[int]=None):
pass
def prune_heads(self, heads):
pass
def _attn(self, query, key, value, attention_mask=None, head_mask=None):
pass
def _upcast_and_reordered_attn(self, query, key, value, attention_mask=None, head_mask=None):
pass
def _split_heads(self, tensor, num_heads, attn_head_size):
'''
Splits hidden_size dim into attn_head_size and num_heads
'''
pass
def _merge_heads(self, tensor, num_heads, attn_head_size):
'''
Merges attn_head_size dim and num_attn_heads dim into hidden_size
'''
pass
def forward(self, hidden_states: torch.Tensor, layer_past: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, use_cache: Optional[bool]=False, output_attentions: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None) -> tuple:
pass
| 8
| 2
| 30
| 5
| 21
| 4
| 4
| 0.19
| 1
| 11
| 1
| 0
| 7
| 15
| 7
| 17
| 217
| 43
| 148
| 58
| 130
| 28
| 122
| 48
| 114
| 7
| 1
| 2
| 27
|
3,129
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/imagegpt/modeling_imagegpt.py
|
transformers.models.imagegpt.modeling_imagegpt.ImageGPTBlock
|
import torch
from typing import Any, Optional, Union
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from ...modeling_layers import GradientCheckpointingLayer
class ImageGPTBlock(GradientCheckpointingLayer):
def __init__(self, config, layer_idx=None):
super().__init__()
hidden_size = config.hidden_size
inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size
self.ln_1 = ImageGPTLayerNorm(hidden_size, eps=config.layer_norm_epsilon)
self.attn = ImageGPTAttention(config, layer_idx=layer_idx)
self.ln_2 = ImageGPTLayerNorm(hidden_size, eps=config.layer_norm_epsilon)
if config.add_cross_attention:
self.crossattention = ImageGPTAttention(config, is_cross_attention=True, layer_idx=layer_idx)
self.ln_cross_attn = ImageGPTLayerNorm(hidden_size, eps=config.layer_norm_epsilon)
self.mlp = ImageGPTMLP(inner_dim, config)
def forward(self, hidden_states: torch.Tensor, layer_past: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, use_cache: Optional[bool]=False, output_attentions: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None) -> tuple:
residual = hidden_states
hidden_states = self.ln_1(hidden_states)
attn_outputs = self.attn(hidden_states, layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position)
attn_output = attn_outputs[0]
outputs = attn_outputs[1:]
hidden_states = attn_output + residual
if encoder_hidden_states is not None:
if not hasattr(self, 'crossattention'):
raise ValueError(f'If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`')
residual = hidden_states
hidden_states = self.ln_cross_attn(hidden_states)
cross_attn_outputs = self.crossattention(hidden_states, layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, cache_position=cache_position)
attn_output = cross_attn_outputs[0]
hidden_states = residual + attn_output
outputs = outputs + cross_attn_outputs[1:]
residual = hidden_states
hidden_states = self.ln_2(hidden_states)
feed_forward_hidden_states = self.mlp(hidden_states)
hidden_states = residual + feed_forward_hidden_states
return (hidden_states,) + outputs
|
class ImageGPTBlock(GradientCheckpointingLayer):
def __init__(self, config, layer_idx=None):
pass
def forward(self, hidden_states: torch.Tensor, layer_past: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, use_cache: Optional[bool]=False, output_attentions: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None) -> tuple:
pass
| 3
| 0
| 36
| 4
| 30
| 4
| 4
| 0.11
| 1
| 8
| 3
| 0
| 2
| 6
| 2
| 12
| 73
| 8
| 61
| 27
| 48
| 7
| 34
| 17
| 31
| 4
| 1
| 2
| 7
|
3,130
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/imagegpt/modeling_imagegpt.py
|
transformers.models.imagegpt.modeling_imagegpt.ImageGPTForCausalImageModeling
|
from torch.nn import CrossEntropyLoss
from typing import Any, Optional, Union
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, SequenceClassifierOutputWithPast
from ...utils import auto_docstring, logging, torch_float
import torch
from torch import nn
from .configuration_imagegpt import ImageGPTConfig
from ...generation import GenerationMixin
@auto_docstring(custom_intro='\n The ImageGPT Model transformer with a language modeling head on top (linear layer with weights tied to the input\n embeddings).\n ')
class ImageGPTForCausalImageModeling(ImageGPTPreTrainedModel, GenerationMixin):
_tied_weights_keys = ['lm_head.weight']
def __init__(self, config: ImageGPTConfig):
super().__init__(config)
self.transformer = ImageGPTModel(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size - 1, bias=False)
self.model_parallel = False
self.device_map = None
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Any) -> Union[tuple, CausalLMOutputWithCrossAttentions]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else
`past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input
sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoImageProcessor`]. See [`ImageGPTImageProcessor.__call__`] for details.
labels (`torch.LongTensor` of shape `(batch_size, input_ids_length)`, *optional*):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
Examples:
```python
>>> from transformers import AutoImageProcessor, ImageGPTForCausalImageModeling
>>> import torch
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> image_processor = AutoImageProcessor.from_pretrained("openai/imagegpt-small")
>>> model = ImageGPTForCausalImageModeling.from_pretrained("openai/imagegpt-small")
>>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
>>> model.to(device) # doctest: +IGNORE_RESULT
>>> # unconditional generation of 8 images
>>> batch_size = 4
>>> context = torch.full((batch_size, 1), model.config.vocab_size - 1) # initialize with SOS token
>>> context = context.to(device)
>>> output = model.generate(
... input_ids=context, max_length=model.config.n_positions + 1, temperature=1.0, do_sample=True, top_k=40
... )
>>> clusters = image_processor.clusters
>>> height = image_processor.size["height"]
>>> width = image_processor.size["width"]
>>> samples = output[:, 1:].detach().cpu().numpy()
>>> samples_img = [
... np.reshape(np.rint(127.5 * (clusters[s] + 1.0)), [height, width, 3]).astype(np.uint8) for s in samples
... ] # convert color cluster tokens back to pixels
>>> f, axes = plt.subplots(1, batch_size, dpi=300)
>>> for img, ax in zip(samples_img, axes): # doctest: +IGNORE_RESULT
... ax.axis("off")
... ax.imshow(img)
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
loss = None
if labels is not None:
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
loss_fct = CrossEntropyLoss()
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
if not return_dict:
output = (lm_logits,) + transformer_outputs[1:]
return (loss,) + output if loss is not None else output
return CausalLMOutputWithCrossAttentions(loss=loss, logits=lm_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, cross_attentions=transformer_outputs.cross_attentions)
|
@auto_docstring(custom_intro='\n The ImageGPT Model transformer with a language modeling head on top (linear layer with weights tied to the input\n embeddings).\n ')
class ImageGPTForCausalImageModeling(ImageGPTPreTrainedModel, GenerationMixin):
def __init__(self, config: ImageGPTConfig):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Any) -> Union[tuple, CausalLMOutputWithCrossAttentions]:
'''
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else
`past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input
sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoImageProcessor`]. See [`ImageGPTImageProcessor.__call__`] for details.
labels (`torch.LongTensor` of shape `(batch_size, input_ids_length)`, *optional*):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
Examples:
```python
>>> from transformers import AutoImageProcessor, ImageGPTForCausalImageModeling
>>> import torch
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> image_processor = AutoImageProcessor.from_pretrained("openai/imagegpt-small")
>>> model = ImageGPTForCausalImageModeling.from_pretrained("openai/imagegpt-small")
>>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
>>> model.to(device) # doctest: +IGNORE_RESULT
>>> # unconditional generation of 8 images
>>> batch_size = 4
>>> context = torch.full((batch_size, 1), model.config.vocab_size - 1) # initialize with SOS token
>>> context = context.to(device)
>>> output = model.generate(
... input_ids=context, max_length=model.config.n_positions + 1, temperature=1.0, do_sample=True, top_k=40
... )
>>> clusters = image_processor.clusters
>>> height = image_processor.size["height"]
>>> width = image_processor.size["width"]
>>> samples = output[:, 1:].detach().cpu().numpy()
>>> samples_img = [
... np.reshape(np.rint(127.5 * (clusters[s] + 1.0)), [height, width, 3]).astype(np.uint8) for s in samples
... ] # convert color cluster tokens back to pixels
>>> f, axes = plt.subplots(1, batch_size, dpi=300)
>>> for img, ax in zip(samples_img, axes): # doctest: +IGNORE_RESULT
... ax.axis("off")
... ax.imshow(img)
```'''
pass
| 5
| 1
| 29
| 4
| 16
| 9
| 2
| 0.51
| 2
| 10
| 3
| 0
| 4
| 4
| 5
| 7
| 153
| 23
| 86
| 40
| 58
| 44
| 35
| 19
| 29
| 7
| 2
| 2
| 11
|
3,131
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/imagegpt/modeling_imagegpt.py
|
transformers.models.imagegpt.modeling_imagegpt.ImageGPTForImageClassification
|
from typing import Any, Optional, Union
from ...utils import auto_docstring, logging, torch_float
from torch import nn
import torch
from .configuration_imagegpt import ImageGPTConfig
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, SequenceClassifierOutputWithPast
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
@auto_docstring(custom_intro='\n The ImageGPT Model transformer with an image classification head on top (linear layer).\n [`ImageGPTForImageClassification`] average-pools the hidden states in order to do the classification.\n ')
class ImageGPTForImageClassification(ImageGPTPreTrainedModel):
def __init__(self, config: ImageGPTConfig):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = ImageGPTModel(config)
self.score = nn.Linear(config.n_embd, self.num_labels, bias=False)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs: Any) -> Union[tuple, SequenceClassifierOutputWithPast]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else
`past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input
sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoImageProcessor`]. See [`ImageGPTImageProcessor.__call__`] for details.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> from transformers import AutoImageProcessor, ImageGPTForImageClassification
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("openai/imagegpt-small")
>>> model = ImageGPTForImageClassification.from_pretrained("openai/imagegpt-small")
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> logits = outputs.logits
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
hidden_states = transformer_outputs[0]
pooled_hidden_states = hidden_states.mean(dim=1)
logits = self.score(pooled_hidden_states)
loss = None
if labels is not None:
loss = self.loss_function(labels, logits, self.config)
if not return_dict:
output = (logits,) + transformer_outputs[1:]
return (loss,) + output if loss is not None else output
return SequenceClassifierOutputWithPast(loss=loss, logits=logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions)
|
@auto_docstring(custom_intro='\n The ImageGPT Model transformer with an image classification head on top (linear layer).\n [`ImageGPTForImageClassification`] average-pools the hidden states in order to do the classification.\n ')
class ImageGPTForImageClassification(ImageGPTPreTrainedModel):
def __init__(self, config: ImageGPTConfig):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs: Any) -> Union[tuple, SequenceClassifierOutputWithPast]:
'''
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else
`past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input
sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoImageProcessor`]. See [`ImageGPTImageProcessor.__call__`] for details.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> from transformers import AutoImageProcessor, ImageGPTForImageClassification
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("openai/imagegpt-small")
>>> model = ImageGPTForImageClassification.from_pretrained("openai/imagegpt-small")
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> logits = outputs.logits
```'''
pass
| 5
| 1
| 59
| 8
| 40
| 11
| 8
| 0.27
| 1
| 10
| 3
| 0
| 2
| 3
| 2
| 4
| 121
| 16
| 83
| 29
| 63
| 22
| 38
| 13
| 35
| 14
| 2
| 3
| 15
|
3,132
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/imagegpt/modeling_imagegpt.py
|
transformers.models.imagegpt.modeling_imagegpt.ImageGPTLayerNorm
|
from torch import nn
import torch
class ImageGPTLayerNorm(nn.Module):
def __init__(self, hidden_size: tuple[int], eps: float=1e-05):
super().__init__()
self.eps = eps
self.weight = nn.Parameter(torch.Tensor(hidden_size))
def forward(self, tensor: torch.Tensor) -> torch.Tensor:
tensor = tensor / torch.sqrt(torch.mean(torch.square(tensor), axis=-1, keepdim=True) + self.eps)
tensor = tensor * self.weight
return tensor
|
class ImageGPTLayerNorm(nn.Module):
def __init__(self, hidden_size: tuple[int], eps: float=1e-05):
pass
def forward(self, tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 5
| 1
| 1
| 0.09
| 1
| 5
| 0
| 0
| 2
| 2
| 2
| 12
| 13
| 1
| 11
| 5
| 8
| 1
| 7
| 5
| 4
| 1
| 1
| 0
| 2
|
3,133
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/imagegpt/modeling_imagegpt.py
|
transformers.models.imagegpt.modeling_imagegpt.ImageGPTMLP
|
from torch import nn
from ...activations import ACT2FN
from ...pytorch_utils import Conv1D, find_pruneable_heads_and_indices, prune_conv1d_layer
import torch
class ImageGPTMLP(nn.Module):
def __init__(self, intermediate_size, config):
super().__init__()
embed_dim = config.hidden_size
self.c_fc = Conv1D(intermediate_size, embed_dim)
self.c_proj = Conv1D(embed_dim, intermediate_size)
self.act = ACT2FN[config.activation_function]
self.dropout = nn.Dropout(config.resid_pdrop)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.c_fc(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.c_proj(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
|
class ImageGPTMLP(nn.Module):
def __init__(self, intermediate_size, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 7
| 0
| 7
| 0
| 1
| 0
| 1
| 3
| 1
| 0
| 2
| 4
| 2
| 12
| 15
| 1
| 14
| 8
| 11
| 0
| 14
| 8
| 11
| 1
| 1
| 0
| 2
|
3,134
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/imagegpt/modeling_imagegpt.py
|
transformers.models.imagegpt.modeling_imagegpt.ImageGPTModel
|
import torch
from typing import Any, Optional, Union
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from .configuration_imagegpt import ImageGPTConfig
from ...utils import auto_docstring, logging, torch_float
from torch import nn
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, SequenceClassifierOutputWithPast
@auto_docstring
class ImageGPTModel(ImageGPTPreTrainedModel):
def __init__(self, config: ImageGPTConfig):
super().__init__(config)
self.embed_dim = config.hidden_size
self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim)
self.drop = nn.Dropout(config.embd_pdrop)
self.h = nn.ModuleList([ImageGPTBlock(config, layer_idx=i) for i in range(config.num_hidden_layers)])
self.ln_f = ImageGPTLayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
self.model_parallel = False
self.device_map = None
self.gradient_checkpointing = False
self.post_init()
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, new_embeddings):
self.wte = new_embeddings
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
"""
for layer, heads in heads_to_prune.items():
self.h[layer].attn.prune_heads(heads)
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Any) -> Union[tuple, BaseModelOutputWithPastAndCrossAttentions]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else
`past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input
sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoImageProcessor`]. See [`ImageGPTImageProcessor.__call__`] for details.
Examples:
```python
>>> from transformers import AutoImageProcessor, ImageGPTModel
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("openai/imagegpt-small")
>>> model = ImageGPTModel.from_pretrained("openai/imagegpt-small")
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
elif input_ids is not None:
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
batch_size = input_ids.shape[0]
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size = inputs_embeds.shape[0]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds')
device = input_ids.device if input_ids is not None else inputs_embeds.device
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...')
use_cache = False
if use_cache and past_key_values is None:
past_key_values = EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config))
if use_cache and isinstance(past_key_values, tuple):
logger.warning_once('Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. You should pass an instance of `EncoderDecoderCache` instead, e.g. `past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`.')
past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values)
past_length = past_key_values.get_seq_length() if past_key_values is not None else past_key_values
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, input_shape[-1])
if position_ids is None:
position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0)
if attention_mask is not None:
if batch_size <= 0:
raise ValueError('batch_size has to be defined and > 0')
attention_mask = attention_mask.view(batch_size, -1)
attention_mask = attention_mask[:, None, None, :]
attention_mask = attention_mask.to(dtype=self.dtype)
attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
if self.config.add_cross_attention and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_attention_mask = None
head_mask = self.get_head_mask(head_mask, self.config.n_layer)
if inputs_embeds is None:
inputs_embeds = self.wte(input_ids)
position_embeds = self.wpe(position_ids)
hidden_states = inputs_embeds + position_embeds.to(inputs_embeds.device)
if token_type_ids is not None:
token_type_embeds = self.wte(token_type_ids)
hidden_states = hidden_states + token_type_embeds
hidden_states = self.drop(hidden_states)
output_shape = input_shape + (hidden_states.size(-1),)
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
all_hidden_states = () if output_hidden_states else None
for i, block in enumerate(self.h):
if self.model_parallel:
torch.cuda.set_device(hidden_states.device)
if attention_mask is not None:
attention_mask = attention_mask.to(hidden_states.device)
if isinstance(head_mask, torch.Tensor):
head_mask = head_mask.to(hidden_states.device)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = block(hidden_states, past_key_values, attention_mask, head_mask[i], encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position)
hidden_states = outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (outputs[2],)
if self.model_parallel:
for k, v in self.device_map.items():
if i == v[-1] and 'cuda:' + str(k) != self.last_device:
hidden_states = hidden_states.to('cuda:' + str(k + 1))
hidden_states = self.ln_f(hidden_states)
hidden_states = hidden_states.view(*output_shape)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, past_key_values, all_hidden_states, all_self_attentions, all_cross_attentions] if v is not None))
return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions)
|
@auto_docstring
class ImageGPTModel(ImageGPTPreTrainedModel):
def __init__(self, config: ImageGPTConfig):
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, new_embeddings):
pass
def _prune_heads(self, heads_to_prune):
'''
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
'''
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Any) -> Union[tuple, BaseModelOutputWithPastAndCrossAttentions]:
'''
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else
`past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input
sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoImageProcessor`]. See [`ImageGPTImageProcessor.__call__`] for details.
Examples:
```python
>>> from transformers import AutoImageProcessor, ImageGPTModel
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("openai/imagegpt-small")
>>> model = ImageGPTModel.from_pretrained("openai/imagegpt-small")
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
```'''
pass
| 8
| 2
| 51
| 7
| 34
| 9
| 10
| 0.27
| 1
| 15
| 4
| 0
| 5
| 9
| 5
| 7
| 261
| 40
| 175
| 50
| 151
| 47
| 113
| 33
| 107
| 43
| 2
| 4
| 48
|
3,135
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/imagegpt/modeling_imagegpt.py
|
transformers.models.imagegpt.modeling_imagegpt.ImageGPTPreTrainedModel
|
import math
from .configuration_imagegpt import ImageGPTConfig
from ...modeling_utils import PreTrainedModel
from torch import nn
from ...pytorch_utils import Conv1D, find_pruneable_heads_and_indices, prune_conv1d_layer
from ...utils import auto_docstring, logging, torch_float
@auto_docstring
class ImageGPTPreTrainedModel(PreTrainedModel):
config: ImageGPTConfig
base_model_prefix = 'transformer'
main_input_name = 'input_ids'
supports_gradient_checkpointing = True
_no_split_modules = ['ImageGPTBlock']
def __init__(self, *inputs, **kwargs):
super().__init__(*inputs, **kwargs)
def _init_weights(self, module):
"""Initialize the weights."""
if isinstance(module, (nn.Linear, Conv1D)):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, ImageGPTLayerNorm):
module.weight.data.fill_(1.0)
for name, p in module.named_parameters():
if 'c_proj' in name and 'weight' in name:
p.data.normal_(mean=0.0, std=self.config.initializer_range / math.sqrt(2 * self.config.n_layer))
|
@auto_docstring
class ImageGPTPreTrainedModel(PreTrainedModel):
def __init__(self, *inputs, **kwargs):
pass
def _init_weights(self, module):
'''Initialize the weights.'''
pass
| 4
| 1
| 14
| 1
| 8
| 5
| 5
| 0.61
| 1
| 3
| 2
| 3
| 2
| 0
| 2
| 2
| 41
| 4
| 23
| 10
| 20
| 14
| 21
| 10
| 18
| 8
| 1
| 2
| 9
|
3,136
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/informer/configuration_informer.py
|
transformers.models.informer.configuration_informer.InformerConfig
|
from ...configuration_utils import PretrainedConfig
from typing import Optional, Union
class InformerConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of an [`InformerModel`]. It is used to instantiate an
Informer model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the Informer
[huggingface/informer-tourism-monthly](https://huggingface.co/huggingface/informer-tourism-monthly) architecture.
Configuration objects inherit from [`PretrainedConfig`] can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
prediction_length (`int`):
The prediction length for the decoder. In other words, the prediction horizon of the model. This value is
typically dictated by the dataset and we recommend to set it appropriately.
context_length (`int`, *optional*, defaults to `prediction_length`):
The context length for the encoder. If `None`, the context length will be the same as the
`prediction_length`.
distribution_output (`string`, *optional*, defaults to `"student_t"`):
The distribution emission head for the model. Could be either "student_t", "normal" or "negative_binomial".
loss (`string`, *optional*, defaults to `"nll"`):
The loss function for the model corresponding to the `distribution_output` head. For parametric
distributions it is the negative log likelihood (nll) - which currently is the only supported one.
input_size (`int`, *optional*, defaults to 1):
The size of the target variable which by default is 1 for univariate targets. Would be > 1 in case of
multivariate targets.
scaling (`string` or `bool`, *optional* defaults to `"mean"`):
Whether to scale the input targets via "mean" scaler, "std" scaler or no scaler if `None`. If `True`, the
scaler is set to "mean".
lags_sequence (`list[int]`, *optional*, defaults to `[1, 2, 3, 4, 5, 6, 7]`):
The lags of the input time series as covariates often dictated by the frequency of the data. Default is
`[1, 2, 3, 4, 5, 6, 7]` but we recommend to change it based on the dataset appropriately.
num_time_features (`int`, *optional*, defaults to 0):
The number of time features in the input time series.
num_dynamic_real_features (`int`, *optional*, defaults to 0):
The number of dynamic real valued features.
num_static_categorical_features (`int`, *optional*, defaults to 0):
The number of static categorical features.
num_static_real_features (`int`, *optional*, defaults to 0):
The number of static real valued features.
cardinality (`list[int]`, *optional*):
The cardinality (number of different values) for each of the static categorical features. Should be a list
of integers, having the same length as `num_static_categorical_features`. Cannot be `None` if
`num_static_categorical_features` is > 0.
embedding_dimension (`list[int]`, *optional*):
The dimension of the embedding for each of the static categorical features. Should be a list of integers,
having the same length as `num_static_categorical_features`. Cannot be `None` if
`num_static_categorical_features` is > 0.
d_model (`int`, *optional*, defaults to 64):
Dimensionality of the transformer layers.
encoder_layers (`int`, *optional*, defaults to 2):
Number of encoder layers.
decoder_layers (`int`, *optional*, defaults to 2):
Number of decoder layers.
encoder_attention_heads (`int`, *optional*, defaults to 2):
Number of attention heads for each attention layer in the Transformer encoder.
decoder_attention_heads (`int`, *optional*, defaults to 2):
Number of attention heads for each attention layer in the Transformer decoder.
encoder_ffn_dim (`int`, *optional*, defaults to 32):
Dimension of the "intermediate" (often named feed-forward) layer in encoder.
decoder_ffn_dim (`int`, *optional*, defaults to 32):
Dimension of the "intermediate" (often named feed-forward) layer in decoder.
activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and decoder. If string, `"gelu"` and
`"relu"` are supported.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the encoder, and decoder.
encoder_layerdrop (`float`, *optional*, defaults to 0.1):
The dropout probability for the attention and fully connected layers for each encoder layer.
decoder_layerdrop (`float`, *optional*, defaults to 0.1):
The dropout probability for the attention and fully connected layers for each decoder layer.
attention_dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for the attention probabilities.
activation_dropout (`float`, *optional*, defaults to 0.1):
The dropout probability used between the two layers of the feed-forward networks.
num_parallel_samples (`int`, *optional*, defaults to 100):
The number of samples to generate in parallel for each time step of inference.
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated normal weight initialization distribution.
use_cache (`bool`, *optional*, defaults to `True`):
Whether to use the past key/values attentions (if applicable to the model) to speed up decoding.
attention_type (`str`, *optional*, defaults to "prob"):
Attention used in encoder. This can be set to "prob" (Informer's ProbAttention) or "full" (vanilla
transformer's canonical self-attention).
sampling_factor (`int`, *optional*, defaults to 5):
ProbSparse sampling factor (only makes affect when `attention_type`="prob"). It is used to control the
reduced query matrix (Q_reduce) input length.
distil (`bool`, *optional*, defaults to `True`):
Whether to use distilling in encoder.
Example:
```python
>>> from transformers import InformerConfig, InformerModel
>>> # Initializing an Informer configuration with 12 time steps for prediction
>>> configuration = InformerConfig(prediction_length=12)
>>> # Randomly initializing a model (with random weights) from the configuration
>>> model = InformerModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'informer'
attribute_map = {'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', 'num_hidden_layers': 'encoder_layers', 'initializer_range': 'init_std'}
def __init__(self, prediction_length: Optional[int]=None, context_length: Optional[int]=None, distribution_output: str='student_t', loss: str='nll', input_size: int=1, lags_sequence: Optional[list[int]]=None, scaling: Optional[Union[str, bool]]='mean', num_dynamic_real_features: int=0, num_static_real_features: int=0, num_static_categorical_features: int=0, num_time_features: int=0, cardinality: Optional[list[int]]=None, embedding_dimension: Optional[list[int]]=None, d_model: int=64, encoder_ffn_dim: int=32, decoder_ffn_dim: int=32, encoder_attention_heads: int=2, decoder_attention_heads: int=2, encoder_layers: int=2, decoder_layers: int=2, is_encoder_decoder: bool=True, activation_function: str='gelu', dropout: float=0.05, encoder_layerdrop: float=0.1, decoder_layerdrop: float=0.1, attention_dropout: float=0.1, activation_dropout: float=0.1, num_parallel_samples: int=100, init_std: float=0.02, use_cache=True, attention_type: str='prob', sampling_factor: int=5, distil: bool=True, **kwargs):
self.prediction_length = prediction_length
self.context_length = context_length or prediction_length
self.distribution_output = distribution_output
self.loss = loss
self.input_size = input_size
self.num_time_features = num_time_features
self.lags_sequence = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
self.scaling = scaling
self.num_dynamic_real_features = num_dynamic_real_features
self.num_static_real_features = num_static_real_features
self.num_static_categorical_features = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(cardinality) != num_static_categorical_features:
raise ValueError('The cardinality should be a list of the same length as `num_static_categorical_features`')
self.cardinality = cardinality
else:
self.cardinality = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(embedding_dimension) != num_static_categorical_features:
raise ValueError('The embedding dimension should be a list of the same length as `num_static_categorical_features`')
self.embedding_dimension = embedding_dimension
else:
self.embedding_dimension = [min(50, (cat + 1) // 2) for cat in self.cardinality]
self.num_parallel_samples = num_parallel_samples
self.feature_size = input_size * len(self.lags_sequence) + self._number_of_features
self.d_model = d_model
self.encoder_attention_heads = encoder_attention_heads
self.decoder_attention_heads = decoder_attention_heads
self.encoder_ffn_dim = encoder_ffn_dim
self.decoder_ffn_dim = decoder_ffn_dim
self.encoder_layers = encoder_layers
self.decoder_layers = decoder_layers
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.activation_function = activation_function
self.init_std = init_std
self.use_cache = use_cache
self.attention_type = attention_type
self.sampling_factor = sampling_factor
self.distil = distil
super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
@property
def _number_of_features(self) -> int:
return sum(self.embedding_dimension) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2
|
class InformerConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of an [`InformerModel`]. It is used to instantiate an
Informer model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the Informer
[huggingface/informer-tourism-monthly](https://huggingface.co/huggingface/informer-tourism-monthly) architecture.
Configuration objects inherit from [`PretrainedConfig`] can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
prediction_length (`int`):
The prediction length for the decoder. In other words, the prediction horizon of the model. This value is
typically dictated by the dataset and we recommend to set it appropriately.
context_length (`int`, *optional*, defaults to `prediction_length`):
The context length for the encoder. If `None`, the context length will be the same as the
`prediction_length`.
distribution_output (`string`, *optional*, defaults to `"student_t"`):
The distribution emission head for the model. Could be either "student_t", "normal" or "negative_binomial".
loss (`string`, *optional*, defaults to `"nll"`):
The loss function for the model corresponding to the `distribution_output` head. For parametric
distributions it is the negative log likelihood (nll) - which currently is the only supported one.
input_size (`int`, *optional*, defaults to 1):
The size of the target variable which by default is 1 for univariate targets. Would be > 1 in case of
multivariate targets.
scaling (`string` or `bool`, *optional* defaults to `"mean"`):
Whether to scale the input targets via "mean" scaler, "std" scaler or no scaler if `None`. If `True`, the
scaler is set to "mean".
lags_sequence (`list[int]`, *optional*, defaults to `[1, 2, 3, 4, 5, 6, 7]`):
The lags of the input time series as covariates often dictated by the frequency of the data. Default is
`[1, 2, 3, 4, 5, 6, 7]` but we recommend to change it based on the dataset appropriately.
num_time_features (`int`, *optional*, defaults to 0):
The number of time features in the input time series.
num_dynamic_real_features (`int`, *optional*, defaults to 0):
The number of dynamic real valued features.
num_static_categorical_features (`int`, *optional*, defaults to 0):
The number of static categorical features.
num_static_real_features (`int`, *optional*, defaults to 0):
The number of static real valued features.
cardinality (`list[int]`, *optional*):
The cardinality (number of different values) for each of the static categorical features. Should be a list
of integers, having the same length as `num_static_categorical_features`. Cannot be `None` if
`num_static_categorical_features` is > 0.
embedding_dimension (`list[int]`, *optional*):
The dimension of the embedding for each of the static categorical features. Should be a list of integers,
having the same length as `num_static_categorical_features`. Cannot be `None` if
`num_static_categorical_features` is > 0.
d_model (`int`, *optional*, defaults to 64):
Dimensionality of the transformer layers.
encoder_layers (`int`, *optional*, defaults to 2):
Number of encoder layers.
decoder_layers (`int`, *optional*, defaults to 2):
Number of decoder layers.
encoder_attention_heads (`int`, *optional*, defaults to 2):
Number of attention heads for each attention layer in the Transformer encoder.
decoder_attention_heads (`int`, *optional*, defaults to 2):
Number of attention heads for each attention layer in the Transformer decoder.
encoder_ffn_dim (`int`, *optional*, defaults to 32):
Dimension of the "intermediate" (often named feed-forward) layer in encoder.
decoder_ffn_dim (`int`, *optional*, defaults to 32):
Dimension of the "intermediate" (often named feed-forward) layer in decoder.
activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and decoder. If string, `"gelu"` and
`"relu"` are supported.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the encoder, and decoder.
encoder_layerdrop (`float`, *optional*, defaults to 0.1):
The dropout probability for the attention and fully connected layers for each encoder layer.
decoder_layerdrop (`float`, *optional*, defaults to 0.1):
The dropout probability for the attention and fully connected layers for each decoder layer.
attention_dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for the attention probabilities.
activation_dropout (`float`, *optional*, defaults to 0.1):
The dropout probability used between the two layers of the feed-forward networks.
num_parallel_samples (`int`, *optional*, defaults to 100):
The number of samples to generate in parallel for each time step of inference.
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated normal weight initialization distribution.
use_cache (`bool`, *optional*, defaults to `True`):
Whether to use the past key/values attentions (if applicable to the model) to speed up decoding.
attention_type (`str`, *optional*, defaults to "prob"):
Attention used in encoder. This can be set to "prob" (Informer's ProbAttention) or "full" (vanilla
transformer's canonical self-attention).
sampling_factor (`int`, *optional*, defaults to 5):
ProbSparse sampling factor (only makes affect when `attention_type`="prob"). It is used to control the
reduced query matrix (Q_reduce) input length.
distil (`bool`, *optional*, defaults to `True`):
Whether to use distilling in encoder.
Example:
```python
>>> from transformers import InformerConfig, InformerModel
>>> # Initializing an Informer configuration with 12 time steps for prediction
>>> configuration = InformerConfig(prediction_length=12)
>>> # Randomly initializing a model (with random weights) from the configuration
>>> model = InformerModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, prediction_length: Optional[int]=None, context_length: Optional[int]=None, distribution_output: str='student_t', loss: str='nll', input_size: int=1, lags_sequence: Optional[list[int]]=None, scaling: Optional[Union[str, bool]]='mean', num_dynamic_real_features: int=0, num_static_real_features: int=0, num_static_categorical_features: int=0, num_time_features: int=0, cardinality: Optional[list[int]]=None, embedding_dimension: Optional[list[int]]=None, d_model: int=64, encoder_ffn_dim: int=32, decoder_ffn_dim: int=32, encoder_attention_heads: int=2, decoder_attention_heads: int=2, encoder_layers: int=2, decoder_layers: int=2, is_encoder_decoder: bool=True, activation_function: str='gelu', dropout: float=0.05, encoder_layerdrop: float=0.1, decoder_layerdrop: float=0.1, attention_dropout: float=0.1, activation_dropout: float=0.1, num_parallel_samples: int=100, init_std: float=0.02, use_cache=True, attention_type: str='prob', sampling_factor: int=5, distil: bool=True, **kwargs):
pass
@property
def _number_of_features(self) -> int:
pass
| 4
| 1
| 54
| 5
| 47
| 4
| 4
| 1.01
| 1
| 6
| 0
| 0
| 2
| 33
| 2
| 2
| 221
| 19
| 101
| 75
| 61
| 102
| 48
| 38
| 45
| 6
| 1
| 2
| 7
|
3,137
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/informer/modeling_informer.py
|
transformers.models.informer.modeling_informer.InformerAttention
|
from .configuration_informer import InformerConfig
from ...processing_utils import Unpack
from ...utils.deprecation import deprecate_kwarg
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from typing import Callable, Optional, Union
from ...modeling_flash_attention_utils import FlashAttentionKwargs
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from torch import nn
import torch
class InformerAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True, is_causal: bool=False, config: Optional[InformerConfig]=None, layer_idx: Optional[int]=None):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
self.config = config
if self.head_dim * num_heads != self.embed_dim:
raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).')
self.scaling = self.head_dim ** (-0.5)
self.is_decoder = is_decoder
self.is_causal = is_causal
self.layer_idx = layer_idx
if layer_idx is None and self.is_decoder:
logger.warning_once(f'Instantiating a decoder {self.__class__.__name__} without passing `layer_idx` is not recommended and will lead to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` when creating this class.')
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
is_cross_attention = key_value_states is not None
bsz, tgt_len = hidden_states.shape[:-1]
src_len = key_value_states.shape[1] if is_cross_attention else tgt_len
q_input_shape = (bsz, tgt_len, -1, self.head_dim)
kv_input_shape = (bsz, src_len, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(*q_input_shape).transpose(1, 2)
is_updated = False
if past_key_values is not None:
if isinstance(past_key_values, EncoderDecoderCache):
is_updated = past_key_values.is_updated.get(self.layer_idx)
if is_cross_attention:
curr_past_key_value = past_key_values.cross_attention_cache
else:
curr_past_key_value = past_key_values.self_attention_cache
else:
curr_past_key_value = past_key_values
current_states = key_value_states if is_cross_attention else hidden_states
if is_cross_attention and past_key_values is not None and is_updated:
key_states = curr_past_key_value.layers[self.layer_idx].keys
value_states = curr_past_key_value.layers[self.layer_idx].values
else:
key_states = self.k_proj(current_states)
value_states = self.v_proj(current_states)
key_states = key_states.view(*kv_input_shape).transpose(1, 2)
value_states = value_states.view(*kv_input_shape).transpose(1, 2)
if past_key_values is not None:
cache_position = cache_position if not is_cross_attention else None
key_states, value_states = curr_past_key_value.update(key_states, value_states, self.layer_idx, {'cache_position': cache_position})
if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache):
past_key_values.is_updated[self.layer_idx] = True
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.dropout, scaling=self.scaling, output_attentions=output_attentions, head_mask=layer_head_mask, **kwargs)
attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous()
attn_output = self.out_proj(attn_output)
return (attn_output, attn_weights)
|
class InformerAttention(nn.Module):
'''Multi-headed attention from 'Attention Is All You Need' paper'''
def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True, is_causal: bool=False, config: Optional[InformerConfig]=None, layer_idx: Optional[int]=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
'''Input shape: Batch x Time x Channel'''
pass
| 4
| 2
| 50
| 7
| 35
| 8
| 5
| 0.24
| 1
| 7
| 1
| 0
| 3
| 12
| 3
| 13
| 156
| 23
| 107
| 44
| 86
| 26
| 68
| 27
| 64
| 12
| 1
| 2
| 15
|
3,138
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/informer/modeling_informer.py
|
transformers.models.informer.modeling_informer.InformerConvLayer
|
from torch import nn
from ...modeling_layers import GradientCheckpointingLayer
class InformerConvLayer(GradientCheckpointingLayer):
def __init__(self, c_in):
super().__init__()
self.downConv = nn.Conv1d(in_channels=c_in, out_channels=c_in, kernel_size=3, padding=1, padding_mode='circular')
self.norm = nn.BatchNorm1d(c_in)
self.activation = nn.ELU()
self.maxPool = nn.MaxPool1d(kernel_size=3, stride=2, padding=1)
def forward(self, x):
x = self.downConv(x.permute(0, 2, 1))
x = self.norm(x)
x = self.activation(x)
x = self.maxPool(x)
x = x.transpose(1, 2)
return x
|
class InformerConvLayer(GradientCheckpointingLayer):
def __init__(self, c_in):
pass
def forward(self, x):
pass
| 3
| 0
| 10
| 0
| 10
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 2
| 4
| 2
| 12
| 21
| 1
| 20
| 7
| 17
| 0
| 14
| 7
| 11
| 1
| 1
| 0
| 2
|
3,139
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/informer/modeling_informer.py
|
transformers.models.informer.modeling_informer.InformerDecoder
|
from typing import Callable, Optional, Union
from torch import nn
import torch
from .configuration_informer import InformerConfig
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, SampleTSPredictionOutput, Seq2SeqTSModelOutput, Seq2SeqTSPredictionOutput
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
class InformerDecoder(InformerPreTrainedModel):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a
[`InformerDecoderLayer`]
Args:
config: InformerConfig
"""
def __init__(self, config: InformerConfig):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
if config.prediction_length is None:
raise ValueError('The `prediction_length` config needs to be specified.')
self.value_embedding = InformerValueEmbedding(feature_size=config.feature_size, d_model=config.d_model)
self.embed_positions = InformerSinusoidalPositionalEmbedding(config.context_length + config.prediction_length, config.d_model)
self.layers = nn.ModuleList([InformerDecoderLayer(config, layer_idx=i) for i in range(config.decoder_layers)])
self.layernorm_embedding = nn.LayerNorm(config.d_model)
self.gradient_checkpointing = False
self.post_init()
def forward(self, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple, BaseModelOutputWithPastAndCrossAttentions]:
"""
Args:
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing
cross-attention on hidden heads. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence. It is used to update the
cache in the correct position and to infer the complete sequence length.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
input_shape = inputs_embeds.size()[:-1]
if use_cache and past_key_values is None:
past_key_values = EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config))
if use_cache and isinstance(past_key_values, tuple):
logger.warning_once('Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. You should pass an instance of `EncoderDecoderCache` instead, e.g. `past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`.')
past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values)
past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0
if cache_position is None:
cache_position = torch.arange(past_key_values_length, past_key_values_length + input_shape[1], device=inputs_embeds.device)
attention_mask = self._update_causal_mask(attention_mask, input_shape, inputs_embeds, past_key_values_length)
encoder_attention_mask = self._update_cross_attn_mask(encoder_hidden_states, encoder_attention_mask, input_shape, inputs_embeds)
hidden_states = self.value_embedding(inputs_embeds)
embed_pos = self.embed_positions(inputs_embeds.size(), past_key_values_length=self.config.context_length)
hidden_states = self.layernorm_embedding(hidden_states + embed_pos)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...')
use_cache = False
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if output_attentions and encoder_hidden_states is not None else None
for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ['head_mask', 'cross_attn_head_mask']):
if attn_mask is not None:
if attn_mask.size()[0] != len(self.layers):
raise ValueError(f'The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.')
for idx, decoder_layer in enumerate(self.layers):
if output_hidden_states:
all_hidden_states += (hidden_states,)
if self.training:
dropout_probability = torch.rand([])
if dropout_probability < self.layerdrop:
continue
layer_outputs = decoder_layer(hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, cross_attn_layer_head_mask=cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attns += (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions += (layer_outputs[2],)
if output_hidden_states:
all_hidden_states += (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, past_key_values, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None))
return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions)
|
class InformerDecoder(InformerPreTrainedModel):
'''
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a
[`InformerDecoderLayer`]
Args:
config: InformerConfig
'''
def __init__(self, config: InformerConfig):
pass
def forward(self, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple, BaseModelOutputWithPastAndCrossAttentions]:
'''
Args:
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing
cross-attention on hidden heads. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence. It is used to update the
cache in the correct position and to infer the complete sequence length.
'''
pass
| 3
| 2
| 103
| 14
| 61
| 28
| 18
| 0.5
| 1
| 13
| 5
| 0
| 2
| 7
| 2
| 3
| 215
| 31
| 123
| 37
| 107
| 61
| 63
| 24
| 60
| 33
| 2
| 3
| 35
|
3,140
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/informer/modeling_informer.py
|
transformers.models.informer.modeling_informer.InformerDecoderLayer
|
from .configuration_informer import InformerConfig
from ...utils.deprecation import deprecate_kwarg
from ...modeling_layers import GradientCheckpointingLayer
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from typing import Callable, Optional, Union
from ...activations import ACT2FN
from torch import nn
import torch
class InformerDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: InformerConfig, layer_idx: Optional[int]=None):
super().__init__()
self.embed_dim = config.d_model
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.encoder_attn = InformerAttention(self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, config=config, layer_idx=layer_idx)
self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
if config.attention_type == 'prob':
self.self_attn = InformerProbSparseAttention(embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, sampling_factor=config.sampling_factor, is_decoder=True, layer_idx=layer_idx)
else:
self.self_attn = InformerAttention(embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, config=config, layer_idx=layer_idx)
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, cross_attn_layer_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=True, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
encoder_hidden_states (`torch.FloatTensor`):
cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`.
cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of
size `(decoder_attention_heads,)`.
past_key_values (`Cache`): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence. It is used to update the
cache in the correct position and to infer the complete sequence length.
"""
residual = hidden_states
hidden_states, self_attn_weights = self.self_attn(hidden_states=hidden_states, past_key_values=past_key_values, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, cache_position=cache_position)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
hidden_states, cross_attn_weights = self.encoder_attn(hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_values=past_key_values, output_attentions=output_attentions, cache_position=cache_position)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
return outputs
|
class InformerDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: InformerConfig, layer_idx: Optional[int]=None):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, cross_attn_layer_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=True, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
'''
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
encoder_hidden_states (`torch.FloatTensor`):
cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`.
cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of
size `(decoder_attention_heads,)`.
past_key_values (`Cache`): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence. It is used to update the
cache in the correct position and to infer the complete sequence length.
'''
pass
| 4
| 1
| 61
| 6
| 43
| 13
| 4
| 0.29
| 1
| 6
| 3
| 0
| 2
| 11
| 2
| 12
| 124
| 12
| 87
| 32
| 73
| 25
| 46
| 21
| 43
| 6
| 1
| 1
| 8
|
3,141
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/informer/modeling_informer.py
|
transformers.models.informer.modeling_informer.InformerEncoder
|
from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_attention_mask_for_sdpa, _prepare_4d_causal_attention_mask, _prepare_4d_causal_attention_mask_for_sdpa
from .configuration_informer import InformerConfig
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, SampleTSPredictionOutput, Seq2SeqTSModelOutput, Seq2SeqTSPredictionOutput
from typing import Callable, Optional, Union
from torch import nn
import torch
class InformerEncoder(InformerPreTrainedModel):
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
[`InformerEncoderLayer`].
Args:
config: InformerConfig
"""
def __init__(self, config: InformerConfig):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
if config.prediction_length is None:
raise ValueError('The `prediction_length` config needs to be specified.')
self.value_embedding = InformerValueEmbedding(feature_size=config.feature_size, d_model=config.d_model)
self.embed_positions = InformerSinusoidalPositionalEmbedding(config.context_length + config.prediction_length, config.d_model)
self.layers = nn.ModuleList([InformerEncoderLayer(config) for _ in range(config.encoder_layers)])
self.layernorm_embedding = nn.LayerNorm(config.d_model)
self.gradient_checkpointing = False
if config.distil:
self.conv_layers = nn.ModuleList([InformerConvLayer(config.d_model) for _ in range(config.encoder_layers - 1)])
self.conv_layers.append(None)
else:
self.conv_layers = [None] * config.encoder_layers
self.post_init()
def forward(self, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutput]:
"""
Args:
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
hidden_states = self.value_embedding(inputs_embeds)
embed_pos = self.embed_positions(inputs_embeds.size())
hidden_states = self.layernorm_embedding(hidden_states + embed_pos)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
if attention_mask is not None:
attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
if head_mask is not None:
if head_mask.size()[0] != len(self.layers):
raise ValueError(f'The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.')
for idx, (encoder_layer, conv_layer) in enumerate(zip(self.layers, self.conv_layers)):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
to_drop = False
if self.training:
dropout_probability = torch.rand([])
if dropout_probability < self.layerdrop:
to_drop = True
if to_drop:
layer_outputs = (None, None)
else:
layer_outputs = encoder_layer(hidden_states, attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, output_attentions=output_attentions)
if conv_layer is not None:
output = conv_layer(layer_outputs[0])
layer_outputs = (output,) + layer_outputs[1:]
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))
return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)
|
class InformerEncoder(InformerPreTrainedModel):
'''
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
[`InformerEncoderLayer`].
Args:
config: InformerConfig
'''
def __init__(self, config: InformerConfig):
pass
def forward(self, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutput]:
'''
Args:
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
pass
| 3
| 2
| 70
| 10
| 46
| 15
| 13
| 0.39
| 1
| 14
| 6
| 0
| 2
| 8
| 2
| 3
| 149
| 22
| 92
| 28
| 81
| 36
| 59
| 20
| 56
| 22
| 2
| 4
| 25
|
3,142
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/informer/modeling_informer.py
|
transformers.models.informer.modeling_informer.InformerEncoderLayer
|
from typing import Callable, Optional, Union
from ...activations import ACT2FN
from torch import nn
import torch
from .configuration_informer import InformerConfig
from ...modeling_layers import GradientCheckpointingLayer
class InformerEncoderLayer(GradientCheckpointingLayer):
def __init__(self, config: InformerConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
if config.attention_type == 'prob':
self.self_attn = InformerProbSparseAttention(embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout, sampling_factor=config.sampling_factor)
else:
self.self_attn = InformerAttention(embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout, config=config)
def forward(self, hidden_states: torch.FloatTensor, attention_mask: torch.FloatTensor, layer_head_mask: torch.FloatTensor, output_attentions: Optional[bool]=False) -> tuple[torch.FloatTensor, Optional[torch.FloatTensor]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
if hidden_states.dtype == torch.float16 and (torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()):
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
|
class InformerEncoderLayer(GradientCheckpointingLayer):
def __init__(self, config: InformerConfig):
pass
def forward(self, hidden_states: torch.FloatTensor, attention_mask: torch.FloatTensor, layer_head_mask: torch.FloatTensor, output_attentions: Optional[bool]=False) -> tuple[torch.FloatTensor, Optional[torch.FloatTensor]]:
'''
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
'''
pass
| 3
| 1
| 36
| 3
| 28
| 6
| 3
| 0.19
| 1
| 5
| 3
| 0
| 2
| 9
| 2
| 12
| 74
| 6
| 57
| 22
| 48
| 11
| 34
| 16
| 31
| 3
| 1
| 1
| 5
|
3,143
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/informer/modeling_informer.py
|
transformers.models.informer.modeling_informer.InformerFeatureEmbedder
|
from torch import nn
import torch
class InformerFeatureEmbedder(nn.Module):
"""
Embed a sequence of categorical features.
Args:
cardinalities (`list[int]`):
List of cardinalities of the categorical features.
embedding_dims (`list[int]`):
List of embedding dimensions of the categorical features.
"""
def __init__(self, cardinalities: list[int], embedding_dims: list[int]) -> None:
super().__init__()
self.num_features = len(cardinalities)
self.embedders = nn.ModuleList([nn.Embedding(c, d) for c, d in zip(cardinalities, embedding_dims)])
def forward(self, features: torch.Tensor) -> torch.Tensor:
if self.num_features > 1:
cat_feature_slices = torch.chunk(features, self.num_features, dim=-1)
else:
cat_feature_slices = [features]
return torch.cat([embed(cat_feature_slice.squeeze(-1)) for embed, cat_feature_slice in zip(self.embedders, cat_feature_slices)], dim=-1)
|
class InformerFeatureEmbedder(nn.Module):
'''
Embed a sequence of categorical features.
Args:
cardinalities (`list[int]`):
List of cardinalities of the categorical features.
embedding_dims (`list[int]`):
List of embedding dimensions of the categorical features.
'''
def __init__(self, cardinalities: list[int], embedding_dims: list[int]) -> None:
pass
def forward(self, features: torch.Tensor) -> torch.Tensor:
pass
| 3
| 1
| 10
| 1
| 8
| 1
| 2
| 0.59
| 1
| 4
| 0
| 0
| 2
| 2
| 2
| 12
| 32
| 5
| 17
| 6
| 14
| 10
| 10
| 6
| 7
| 2
| 1
| 1
| 3
|
3,144
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/informer/modeling_informer.py
|
transformers.models.informer.modeling_informer.InformerForPrediction
|
from typing import Callable, Optional, Union
from ...utils import auto_docstring, is_torch_flex_attn_available, logging
import torch
from ...time_series_utils import NegativeBinomialOutput, NormalOutput, StudentTOutput
from .configuration_informer import InformerConfig
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, SampleTSPredictionOutput, Seq2SeqTSModelOutput, Seq2SeqTSPredictionOutput
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
@auto_docstring
class InformerForPrediction(InformerPreTrainedModel):
def __init__(self, config: InformerConfig):
super().__init__(config)
self.model = InformerModel(config)
if config.distribution_output == 'student_t':
self.distribution_output = StudentTOutput(dim=config.input_size)
elif config.distribution_output == 'normal':
self.distribution_output = NormalOutput(dim=config.input_size)
elif config.distribution_output == 'negative_binomial':
self.distribution_output = NegativeBinomialOutput(dim=config.input_size)
else:
raise ValueError(f'Unknown distribution output {config.distribution_output}')
self.parameter_projection = self.distribution_output.get_parameter_projection(self.model.config.d_model)
self.target_shape = self.distribution_output.event_shape
if config.loss == 'nll':
self.loss = nll
else:
raise ValueError(f'Unknown loss function {config.loss}')
self.post_init()
def output_params(self, dec_output):
return self.parameter_projection(dec_output)
def get_encoder(self):
return self.model.get_encoder()
def get_decoder(self):
return self.model.get_decoder()
@torch.jit.ignore
def output_distribution(self, params, loc=None, scale=None, trailing_n=None) -> torch.distributions.Distribution:
sliced_params = params
if trailing_n is not None:
sliced_params = [p[:, -trailing_n:] for p in params]
return self.distribution_output.distribution(sliced_params, loc=loc, scale=scale)
@auto_docstring
def forward(self, past_values: torch.Tensor, past_time_features: torch.Tensor, past_observed_mask: torch.Tensor, static_categorical_features: Optional[torch.Tensor]=None, static_real_features: Optional[torch.Tensor]=None, future_values: Optional[torch.Tensor]=None, future_time_features: Optional[torch.Tensor]=None, future_observed_mask: Optional[torch.Tensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[list[torch.FloatTensor]]=None, past_key_values: Optional[Cache]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, use_cache: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[Seq2SeqTSModelOutput, tuple]:
"""
past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`):
Past values of the time series, that serve as context in order to predict the future. The sequence size of
this tensor must be larger than the `context_length` of the model, since the model will use the larger size
to construct lag features, i.e. additional values from the past which are added in order to serve as "extra
context".
The `sequence_length` here is equal to `config.context_length` + `max(config.lags_sequence)`, which if no
`lags_sequence` is configured, is equal to `config.context_length` + 7 (as by default, the largest
look-back index in `config.lags_sequence` is 7). The property `_past_length` returns the actual length of
the past.
The `past_values` is what the Transformer encoder gets as input (with optional additional features, such as
`static_categorical_features`, `static_real_features`, `past_time_features` and lags).
Optionally, missing values need to be replaced with zeros and indicated via the `past_observed_mask`.
For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number of
variates in the time series per time step.
past_time_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_features)`):
Required time features, which the model internally will add to `past_values`. These could be things like
"month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features). These
could also be so-called "age" features, which basically help the model know "at which point in life" a
time-series is. Age features have small values for distant past time steps and increase monotonically the
more we approach the current time step. Holiday features are also a good example of time features.
These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where
the position encodings are learned from scratch internally as parameters of the model, the Time Series
Transformer requires to provide additional time features. The Time Series Transformer only learns
additional embeddings for `static_categorical_features`.
Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these features
must but known at prediction time.
The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`, *optional*):
Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in
`[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
static_categorical_features (`torch.LongTensor` of shape `(batch_size, number of static categorical features)`, *optional*):
Optional static categorical features for which the model will learn an embedding, which it will add to the
values of the time series.
Static categorical features are features which have the same value for all time steps (static over time).
A typical example of a static categorical feature is a time series ID.
static_real_features (`torch.FloatTensor` of shape `(batch_size, number of static real features)`, *optional*):
Optional static real features which the model will add to the values of the time series.
Static real features are features which have the same value for all time steps (static over time).
A typical example of a static real feature is promotion information.
future_values (`torch.FloatTensor` of shape `(batch_size, prediction_length)` or `(batch_size, prediction_length, input_size)`, *optional*):
Future values of the time series, that serve as labels for the model. The `future_values` is what the
Transformer needs during training to learn to output, given the `past_values`.
The sequence length here is equal to `prediction_length`.
See the demo notebook and code snippets for details.
Optionally, during training any missing values need to be replaced with zeros and indicated via the
`future_observed_mask`.
For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number of
variates in the time series per time step.
future_time_features (`torch.FloatTensor` of shape `(batch_size, prediction_length, num_features)`):
Required time features for the prediction window, which the model internally will add to `future_values`.
These could be things like "month of year", "day of the month", etc. encoded as vectors (for instance as
Fourier features). These could also be so-called "age" features, which basically help the model know "at
which point in life" a time-series is. Age features have small values for distant past time steps and
increase monotonically the more we approach the current time step. Holiday features are also a good example
of time features.
These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where
the position encodings are learned from scratch internally as parameters of the model, the Time Series
Transformer requires to provide additional time features. The Time Series Transformer only learns
additional embeddings for `static_categorical_features`.
Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these features
must but known at prediction time.
The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
future_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`, *optional*):
Boolean mask to indicate which `future_values` were observed and which were missing. Mask values selected
in `[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
This mask is used to filter out missing values for the final loss calculation.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
Tuple consists of `last_hidden_state`, `hidden_states` (*optional*) and `attentions` (*optional*)
`last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` (*optional*) is a sequence of
hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
Examples:
```python
>>> from huggingface_hub import hf_hub_download
>>> import torch
>>> from transformers import InformerForPrediction
>>> file = hf_hub_download(
... repo_id="hf-internal-testing/tourism-monthly-batch", filename="train-batch.pt", repo_type="dataset"
... )
>>> batch = torch.load(file)
>>> model = InformerForPrediction.from_pretrained(
... "huggingface/informer-tourism-monthly"
... )
>>> # during training, one provides both past and future values
>>> # as well as possible additional features
>>> outputs = model(
... past_values=batch["past_values"],
... past_time_features=batch["past_time_features"],
... past_observed_mask=batch["past_observed_mask"],
... static_categorical_features=batch["static_categorical_features"],
... static_real_features=batch["static_real_features"],
... future_values=batch["future_values"],
... future_time_features=batch["future_time_features"],
... )
>>> loss = outputs.loss
>>> loss.backward()
>>> # during inference, one only provides past values
>>> # as well as possible additional features
>>> # the model autoregressively generates future values
>>> outputs = model.generate(
... past_values=batch["past_values"],
... past_time_features=batch["past_time_features"],
... past_observed_mask=batch["past_observed_mask"],
... static_categorical_features=batch["static_categorical_features"],
... static_real_features=batch["static_real_features"],
... future_time_features=batch["future_time_features"],
... )
>>> mean_prediction = outputs.sequences.mean(dim=1)
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if future_values is not None:
use_cache = False
outputs = self.model(past_values=past_values, past_time_features=past_time_features, past_observed_mask=past_observed_mask, static_categorical_features=static_categorical_features, static_real_features=static_real_features, future_values=future_values, future_time_features=future_time_features, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, past_key_values=past_key_values, output_hidden_states=output_hidden_states, output_attentions=output_attentions, use_cache=use_cache, return_dict=return_dict, cache_position=cache_position)
prediction_loss = None
params = None
if future_values is not None:
params = self.output_params(outputs[0])
distribution = self.output_distribution(params, loc=outputs[-3], scale=outputs[-2])
loss = self.loss(distribution, future_values)
if future_observed_mask is None:
future_observed_mask = torch.ones_like(future_values)
if len(self.target_shape) == 0:
loss_weights = future_observed_mask
else:
loss_weights, _ = future_observed_mask.min(dim=-1, keepdim=False)
prediction_loss = weighted_average(loss, weights=loss_weights)
if not return_dict:
outputs = (params,) + outputs[1:] if params is not None else outputs[1:]
return (prediction_loss,) + outputs if prediction_loss is not None else outputs
return Seq2SeqTSPredictionOutput(loss=prediction_loss, params=params, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, loc=outputs.loc, scale=outputs.scale, static_features=outputs.static_features)
@torch.no_grad()
def generate(self, past_values: torch.Tensor, past_time_features: torch.Tensor, future_time_features: torch.Tensor, past_observed_mask: Optional[torch.Tensor]=None, static_categorical_features: Optional[torch.Tensor]=None, static_real_features: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None) -> SampleTSPredictionOutput:
"""
Greedily generate sequences of sample predictions from a model with a probability distribution head.
Parameters:
past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`):
Past values of the time series, that serve as context in order to predict the future. The sequence size
of this tensor must be larger than the `context_length` of the model, since the model will use the
larger size to construct lag features, i.e. additional values from the past which are added in order to
serve as "extra context".
The `sequence_length` here is equal to `config.context_length` + `max(config.lags_sequence)`, which if
no `lags_sequence` is configured, is equal to `config.context_length` + 7 (as by default, the largest
look-back index in `config.lags_sequence` is 7). The property `_past_length` returns the actual length
of the past.
The `past_values` is what the Transformer encoder gets as input (with optional additional features,
such as `static_categorical_features`, `static_real_features`, `past_time_features` and lags).
Optionally, missing values need to be replaced with zeros and indicated via the `past_observed_mask`.
For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number
of variates in the time series per time step.
past_time_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_features)`):
Required time features, which the model internally will add to `past_values`. These could be things
like "month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features).
These could also be so-called "age" features, which basically help the model know "at which point in
life" a time-series is. Age features have small values for distant past time steps and increase
monotonically the more we approach the current time step. Holiday features are also a good example of
time features.
These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT,
where the position encodings are learned from scratch internally as parameters of the model, the Time
Series Transformer requires to provide additional time features. The Time Series Transformer only
learns additional embeddings for `static_categorical_features`.
Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these
features must but known at prediction time.
The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
future_time_features (`torch.FloatTensor` of shape `(batch_size, prediction_length, num_features)`):
Required time features for the prediction window, which the model internally will add to sampled
predictions. These could be things like "month of year", "day of the month", etc. encoded as vectors
(for instance as Fourier features). These could also be so-called "age" features, which basically help
the model know "at which point in life" a time-series is. Age features have small values for distant
past time steps and increase monotonically the more we approach the current time step. Holiday features
are also a good example of time features.
These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT,
where the position encodings are learned from scratch internally as parameters of the model, the Time
Series Transformer requires to provide additional time features. The Time Series Transformer only
learns additional embeddings for `static_categorical_features`.
Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these
features must but known at prediction time.
The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`, *optional*):
Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected
in `[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
static_categorical_features (`torch.LongTensor` of shape `(batch_size, number of static categorical features)`, *optional*):
Optional static categorical features for which the model will learn an embedding, which it will add to
the values of the time series.
Static categorical features are features which have the same value for all time steps (static over
time).
A typical example of a static categorical feature is a time series ID.
static_real_features (`torch.FloatTensor` of shape `(batch_size, number of static real features)`, *optional*):
Optional static real features which the model will add to the values of the time series.
Static real features are features which have the same value for all time steps (static over time).
A typical example of a static real feature is promotion information.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers.
Return:
[`SampleTSPredictionOutput`] where the outputs `sequences` tensor will have shape `(batch_size, number of
samples, prediction_length)` or `(batch_size, number of samples, prediction_length, input_size)` for
multivariate predictions.
"""
outputs = self(static_categorical_features=static_categorical_features, static_real_features=static_real_features, past_time_features=past_time_features, past_values=past_values, past_observed_mask=past_observed_mask, future_time_features=future_time_features, future_values=None, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, use_cache=True)
decoder = self.model.get_decoder()
enc_last_hidden = outputs.encoder_last_hidden_state
loc = outputs.loc
scale = outputs.scale
static_feat = outputs.static_features
num_parallel_samples = self.config.num_parallel_samples
repeated_loc = loc.repeat_interleave(repeats=num_parallel_samples, dim=0)
repeated_scale = scale.repeat_interleave(repeats=num_parallel_samples, dim=0)
repeated_past_values = (past_values.repeat_interleave(repeats=num_parallel_samples, dim=0) - repeated_loc) / repeated_scale
expanded_static_feat = static_feat.unsqueeze(1).expand(-1, future_time_features.shape[1], -1)
features = torch.cat((expanded_static_feat, future_time_features), dim=-1)
repeated_features = features.repeat_interleave(repeats=num_parallel_samples, dim=0)
repeated_enc_last_hidden = enc_last_hidden.repeat_interleave(repeats=num_parallel_samples, dim=0)
future_samples = []
for k in range(self.config.prediction_length):
lagged_sequence = self.model.get_lagged_subsequences(sequence=repeated_past_values, subsequences_length=1 + k, shift=1)
lags_shape = lagged_sequence.shape
reshaped_lagged_sequence = lagged_sequence.reshape(lags_shape[0], lags_shape[1], -1)
decoder_input = torch.cat((reshaped_lagged_sequence, repeated_features[:, :k + 1]), dim=-1)
dec_output = decoder(inputs_embeds=decoder_input, encoder_hidden_states=repeated_enc_last_hidden)
dec_last_hidden = dec_output.last_hidden_state
params = self.parameter_projection(dec_last_hidden[:, -1:])
distr = self.output_distribution(params, loc=repeated_loc, scale=repeated_scale)
next_sample = distr.sample()
repeated_past_values = torch.cat((repeated_past_values, (next_sample - repeated_loc) / repeated_scale), dim=1)
future_samples.append(next_sample)
concat_future_samples = torch.cat(future_samples, dim=1)
return SampleTSPredictionOutput(sequences=concat_future_samples.reshape((-1, num_parallel_samples, self.config.prediction_length) + self.target_shape))
|
@auto_docstring
class InformerForPrediction(InformerPreTrainedModel):
def __init__(self, config: InformerConfig):
pass
def output_params(self, dec_output):
pass
def get_encoder(self):
pass
def get_decoder(self):
pass
@torch.jit.ignore
def output_distribution(self, params, loc=None, scale=None, trailing_n=None) -> torch.distributions.Distribution:
pass
@auto_docstring
def forward(self, past_values: torch.Tensor, past_time_features: torch.Tensor, past_observed_mask: torch.Tensor, static_categorical_features: Optional[torch.Tensor]=None, static_real_features: Optional[torch.Tensor]=None, future_values: Optional[torch.Tensor]=None, future_time_features: Optional[torch.Tensor]=None, future_observed_mask: Optional[torch.Tensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[list[torch.FloatTensor]]=None, past_key_values: Optional[Cache]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, use_cache: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[Seq2SeqTSModelOutput, tuple]:
'''
past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`):
Past values of the time series, that serve as context in order to predict the future. The sequence size of
this tensor must be larger than the `context_length` of the model, since the model will use the larger size
to construct lag features, i.e. additional values from the past which are added in order to serve as "extra
context".
The `sequence_length` here is equal to `config.context_length` + `max(config.lags_sequence)`, which if no
`lags_sequence` is configured, is equal to `config.context_length` + 7 (as by default, the largest
look-back index in `config.lags_sequence` is 7). The property `_past_length` returns the actual length of
the past.
The `past_values` is what the Transformer encoder gets as input (with optional additional features, such as
`static_categorical_features`, `static_real_features`, `past_time_features` and lags).
Optionally, missing values need to be replaced with zeros and indicated via the `past_observed_mask`.
For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number of
variates in the time series per time step.
past_time_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_features)`):
Required time features, which the model internally will add to `past_values`. These could be things like
"month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features). These
could also be so-called "age" features, which basically help the model know "at which point in life" a
time-series is. Age features have small values for distant past time steps and increase monotonically the
more we approach the current time step. Holiday features are also a good example of time features.
These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where
the position encodings are learned from scratch internally as parameters of the model, the Time Series
Transformer requires to provide additional time features. The Time Series Transformer only learns
additional embeddings for `static_categorical_features`.
Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these features
must but known at prediction time.
The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`, *optional*):
Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in
`[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
static_categorical_features (`torch.LongTensor` of shape `(batch_size, number of static categorical features)`, *optional*):
Optional static categorical features for which the model will learn an embedding, which it will add to the
values of the time series.
Static categorical features are features which have the same value for all time steps (static over time).
A typical example of a static categorical feature is a time series ID.
static_real_features (`torch.FloatTensor` of shape `(batch_size, number of static real features)`, *optional*):
Optional static real features which the model will add to the values of the time series.
Static real features are features which have the same value for all time steps (static over time).
A typical example of a static real feature is promotion information.
future_values (`torch.FloatTensor` of shape `(batch_size, prediction_length)` or `(batch_size, prediction_length, input_size)`, *optional*):
Future values of the time series, that serve as labels for the model. The `future_values` is what the
Transformer needs during training to learn to output, given the `past_values`.
The sequence length here is equal to `prediction_length`.
See the demo notebook and code snippets for details.
Optionally, during training any missing values need to be replaced with zeros and indicated via the
`future_observed_mask`.
For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number of
variates in the time series per time step.
future_time_features (`torch.FloatTensor` of shape `(batch_size, prediction_length, num_features)`):
Required time features for the prediction window, which the model internally will add to `future_values`.
These could be things like "month of year", "day of the month", etc. encoded as vectors (for instance as
Fourier features). These could also be so-called "age" features, which basically help the model know "at
which point in life" a time-series is. Age features have small values for distant past time steps and
increase monotonically the more we approach the current time step. Holiday features are also a good example
of time features.
These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where
the position encodings are learned from scratch internally as parameters of the model, the Time Series
Transformer requires to provide additional time features. The Time Series Transformer only learns
additional embeddings for `static_categorical_features`.
Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these features
must but known at prediction time.
The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
future_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`, *optional*):
Boolean mask to indicate which `future_values` were observed and which were missing. Mask values selected
in `[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
This mask is used to filter out missing values for the final loss calculation.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
Tuple consists of `last_hidden_state`, `hidden_states` (*optional*) and `attentions` (*optional*)
`last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` (*optional*) is a sequence of
hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
Examples:
```python
>>> from huggingface_hub import hf_hub_download
>>> import torch
>>> from transformers import InformerForPrediction
>>> file = hf_hub_download(
... repo_id="hf-internal-testing/tourism-monthly-batch", filename="train-batch.pt", repo_type="dataset"
... )
>>> batch = torch.load(file)
>>> model = InformerForPrediction.from_pretrained(
... "huggingface/informer-tourism-monthly"
... )
>>> # during training, one provides both past and future values
>>> # as well as possible additional features
>>> outputs = model(
... past_values=batch["past_values"],
... past_time_features=batch["past_time_features"],
... past_observed_mask=batch["past_observed_mask"],
... static_categorical_features=batch["static_categorical_features"],
... static_real_features=batch["static_real_features"],
... future_values=batch["future_values"],
... future_time_features=batch["future_time_features"],
... )
>>> loss = outputs.loss
>>> loss.backward()
>>> # during inference, one only provides past values
>>> # as well as possible additional features
>>> # the model autoregressively generates future values
>>> outputs = model.generate(
... past_values=batch["past_values"],
... past_time_features=batch["past_time_features"],
... past_observed_mask=batch["past_observed_mask"],
... static_categorical_features=batch["static_categorical_features"],
... static_real_features=batch["static_real_features"],
... future_time_features=batch["future_time_features"],
... )
>>> mean_prediction = outputs.sequences.mean(dim=1)
```'''
pass
@torch.no_grad()
def generate(self, past_values: torch.Tensor, past_time_features: torch.Tensor, future_time_features: torch.Tensor, past_observed_mask: Optional[torch.Tensor]=None, static_categorical_features: Optional[torch.Tensor]=None, static_real_features: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None) -> SampleTSPredictionOutput:
'''
Greedily generate sequences of sample predictions from a model with a probability distribution head.
Parameters:
past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`):
Past values of the time series, that serve as context in order to predict the future. The sequence size
of this tensor must be larger than the `context_length` of the model, since the model will use the
larger size to construct lag features, i.e. additional values from the past which are added in order to
serve as "extra context".
The `sequence_length` here is equal to `config.context_length` + `max(config.lags_sequence)`, which if
no `lags_sequence` is configured, is equal to `config.context_length` + 7 (as by default, the largest
look-back index in `config.lags_sequence` is 7). The property `_past_length` returns the actual length
of the past.
The `past_values` is what the Transformer encoder gets as input (with optional additional features,
such as `static_categorical_features`, `static_real_features`, `past_time_features` and lags).
Optionally, missing values need to be replaced with zeros and indicated via the `past_observed_mask`.
For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number
of variates in the time series per time step.
past_time_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_features)`):
Required time features, which the model internally will add to `past_values`. These could be things
like "month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features).
These could also be so-called "age" features, which basically help the model know "at which point in
life" a time-series is. Age features have small values for distant past time steps and increase
monotonically the more we approach the current time step. Holiday features are also a good example of
time features.
These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT,
where the position encodings are learned from scratch internally as parameters of the model, the Time
Series Transformer requires to provide additional time features. The Time Series Transformer only
learns additional embeddings for `static_categorical_features`.
Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these
features must but known at prediction time.
The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
future_time_features (`torch.FloatTensor` of shape `(batch_size, prediction_length, num_features)`):
Required time features for the prediction window, which the model internally will add to sampled
predictions. These could be things like "month of year", "day of the month", etc. encoded as vectors
(for instance as Fourier features). These could also be so-called "age" features, which basically help
the model know "at which point in life" a time-series is. Age features have small values for distant
past time steps and increase monotonically the more we approach the current time step. Holiday features
are also a good example of time features.
These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT,
where the position encodings are learned from scratch internally as parameters of the model, the Time
Series Transformer requires to provide additional time features. The Time Series Transformer only
learns additional embeddings for `static_categorical_features`.
Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these
features must but known at prediction time.
The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`, *optional*):
Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected
in `[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
static_categorical_features (`torch.LongTensor` of shape `(batch_size, number of static categorical features)`, *optional*):
Optional static categorical features for which the model will learn an embedding, which it will add to
the values of the time series.
Static categorical features are features which have the same value for all time steps (static over
time).
A typical example of a static categorical feature is a time series ID.
static_real_features (`torch.FloatTensor` of shape `(batch_size, number of static real features)`, *optional*):
Optional static real features which the model will add to the values of the time series.
Static real features are features which have the same value for all time steps (static over time).
A typical example of a static real feature is promotion information.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers.
Return:
[`SampleTSPredictionOutput`] where the outputs `sequences` tensor will have shape `(batch_size, number of
samples, prediction_length)` or `(batch_size, number of samples, prediction_length, input_size)` for
multivariate predictions.
'''
pass
| 12
| 2
| 47
| 7
| 24
| 16
| 3
| 0.66
| 1
| 14
| 8
| 0
| 7
| 5
| 7
| 8
| 341
| 58
| 171
| 80
| 129
| 113
| 77
| 47
| 69
| 9
| 2
| 2
| 21
|
3,145
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/informer/modeling_informer.py
|
transformers.models.informer.modeling_informer.InformerMeanScaler
|
from torch import nn
from .configuration_informer import InformerConfig
import torch
class InformerMeanScaler(nn.Module):
"""
Computes a scaling factor as the weighted average absolute value along the first dimension, and scales the data
accordingly.
"""
def __init__(self, config: InformerConfig):
super().__init__()
self.dim = config.scaling_dim if hasattr(config, 'scaling_dim') else 1
self.keepdim = config.keepdim if hasattr(config, 'keepdim') else True
self.minimum_scale = config.minimum_scale if hasattr(config, 'minimum_scale') else 1e-10
self.default_scale = config.default_scale if hasattr(config, 'default_scale') else None
def forward(self, data: torch.Tensor, observed_indicator: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Parameters:
data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):
input for Batch norm calculation
observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`):
Calculating the scale on the observed indicator.
Returns:
tuple of `torch.Tensor` of shapes
(`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,
`(batch_size, 1, num_input_channels)`)
"""
ts_sum = (data * observed_indicator).abs().sum(self.dim, keepdim=True)
num_observed = observed_indicator.sum(self.dim, keepdim=True)
scale = ts_sum / torch.clamp(num_observed, min=1)
if self.default_scale is None:
batch_sum = ts_sum.sum(dim=0)
batch_observations = torch.clamp(num_observed.sum(0), min=1)
default_scale = torch.squeeze(batch_sum / batch_observations)
else:
default_scale = self.default_scale * torch.ones_like(scale)
scale = torch.where(num_observed > 0, scale, default_scale)
scale = torch.clamp(scale, min=self.minimum_scale)
scaled_data = data / scale
if not self.keepdim:
scale = scale.squeeze(dim=self.dim)
return (scaled_data, torch.zeros_like(scale), scale)
|
class InformerMeanScaler(nn.Module):
'''
Computes a scaling factor as the weighted average absolute value along the first dimension, and scales the data
accordingly.
'''
def __init__(self, config: InformerConfig):
pass
def forward(self, data: torch.Tensor, observed_indicator: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
'''
Parameters:
data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):
input for Batch norm calculation
observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`):
Calculating the scale on the observed indicator.
Returns:
tuple of `torch.Tensor` of shapes
(`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,
`(batch_size, 1, num_input_channels)`)
'''
pass
| 3
| 2
| 23
| 3
| 12
| 8
| 4
| 0.76
| 1
| 3
| 1
| 0
| 2
| 4
| 2
| 12
| 52
| 8
| 25
| 16
| 20
| 19
| 22
| 14
| 19
| 5
| 1
| 1
| 8
|
3,146
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/informer/modeling_informer.py
|
transformers.models.informer.modeling_informer.InformerModel
|
from .configuration_informer import InformerConfig
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, SampleTSPredictionOutput, Seq2SeqTSModelOutput, Seq2SeqTSPredictionOutput
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from typing import Callable, Optional, Union
from ...utils import auto_docstring, is_torch_flex_attn_available, logging
import torch
@auto_docstring
class InformerModel(InformerPreTrainedModel):
def __init__(self, config: InformerConfig):
super().__init__(config)
if config.scaling == 'mean' or config.scaling is True:
self.scaler = InformerMeanScaler(config)
elif config.scaling == 'std':
self.scaler = InformerStdScaler(config)
else:
self.scaler = InformerNOPScaler(config)
if config.num_static_categorical_features > 0:
self.embedder = InformerFeatureEmbedder(cardinalities=config.cardinality, embedding_dims=config.embedding_dimension)
self.encoder = InformerEncoder(config)
self.decoder = InformerDecoder(config)
self.post_init()
@property
def _past_length(self) -> int:
return self.config.context_length + max(self.config.lags_sequence)
def get_lagged_subsequences(self, sequence: torch.Tensor, subsequences_length: int, shift: int=0) -> torch.Tensor:
"""
Returns lagged subsequences of a given sequence. Returns a tensor of shape (N, S, C, I),
where S = subsequences_length and I = len(indices), containing lagged subsequences. Specifically, lagged[i,
j, :, k] = sequence[i, -indices[k]-S+j, :].
Args:
sequence: Tensor
The sequence from which lagged subsequences should be extracted. Shape: (N, T, C).
subsequences_length : int
Length of the subsequences to be extracted.
shift: int
Shift the lags by this amount back.
"""
sequence_length = sequence.shape[1]
indices = [lag - shift for lag in self.config.lags_sequence]
if max(indices) + subsequences_length > sequence_length:
raise ValueError(f'lags cannot go further than history length, found lag {max(indices)} while history length is only {sequence_length}')
lagged_values = []
for lag_index in indices:
begin_index = -lag_index - subsequences_length
end_index = -lag_index if lag_index > 0 else None
lagged_values.append(sequence[:, begin_index:end_index, ...])
return torch.stack(lagged_values, dim=-1)
def create_network_inputs(self, past_values: torch.Tensor, past_time_features: torch.Tensor, static_categorical_features: Optional[torch.Tensor]=None, static_real_features: Optional[torch.Tensor]=None, past_observed_mask: Optional[torch.Tensor]=None, future_values: Optional[torch.Tensor]=None, future_time_features: Optional[torch.Tensor]=None):
time_feat = torch.cat((past_time_features[:, self._past_length - self.config.context_length:, ...], future_time_features), dim=1) if future_values is not None else past_time_features[:, self._past_length - self.config.context_length:, ...]
if past_observed_mask is None:
past_observed_mask = torch.ones_like(past_values)
context = past_values[:, -self.config.context_length:]
observed_context = past_observed_mask[:, -self.config.context_length:]
_, loc, scale = self.scaler(context, observed_context)
inputs = (torch.cat((past_values, future_values), dim=1) - loc) / scale if future_values is not None else (past_values - loc) / scale
if loc.ndim == 3:
squeezed_loc = loc.squeeze(1)
squeezed_scale = scale.squeeze(1)
else:
squeezed_loc = loc
squeezed_scale = scale
log_abs_loc = squeezed_loc.abs().log1p()
log_scale = squeezed_scale.log()
static_feat = torch.cat((log_abs_loc, log_scale), dim=1)
if static_real_features is not None:
static_feat = torch.cat((static_real_features, static_feat), dim=1)
if static_categorical_features is not None:
embedded_cat = self.embedder(static_categorical_features)
static_feat = torch.cat((embedded_cat, static_feat), dim=1)
expanded_static_feat = static_feat.unsqueeze(1).expand(-1, time_feat.shape[1], -1)
features = torch.cat((expanded_static_feat, time_feat), dim=-1)
subsequences_length = self.config.context_length + self.config.prediction_length if future_values is not None else self.config.context_length
lagged_sequence = self.get_lagged_subsequences(sequence=inputs, subsequences_length=subsequences_length)
lags_shape = lagged_sequence.shape
reshaped_lagged_sequence = lagged_sequence.reshape(lags_shape[0], lags_shape[1], -1)
if reshaped_lagged_sequence.shape[1] != time_feat.shape[1]:
raise ValueError(f'input length {reshaped_lagged_sequence.shape[1]} and time feature lengths {time_feat.shape[1]} does not match')
transformer_inputs = torch.cat((reshaped_lagged_sequence, features), dim=-1)
return (transformer_inputs, loc, scale, static_feat)
def get_encoder(self):
return self.encoder
@auto_docstring
def forward(self, past_values: torch.Tensor, past_time_features: torch.Tensor, past_observed_mask: torch.Tensor, static_categorical_features: Optional[torch.Tensor]=None, static_real_features: Optional[torch.Tensor]=None, future_values: Optional[torch.Tensor]=None, future_time_features: Optional[torch.Tensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[list[torch.FloatTensor]]=None, past_key_values: Optional[Cache]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, use_cache: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[Seq2SeqTSModelOutput, tuple]:
"""
past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`):
Past values of the time series, that serve as context in order to predict the future. The sequence size of
this tensor must be larger than the `context_length` of the model, since the model will use the larger size
to construct lag features, i.e. additional values from the past which are added in order to serve as "extra
context".
The `sequence_length` here is equal to `config.context_length` + `max(config.lags_sequence)`, which if no
`lags_sequence` is configured, is equal to `config.context_length` + 7 (as by default, the largest
look-back index in `config.lags_sequence` is 7). The property `_past_length` returns the actual length of
the past.
The `past_values` is what the Transformer encoder gets as input (with optional additional features, such as
`static_categorical_features`, `static_real_features`, `past_time_features` and lags).
Optionally, missing values need to be replaced with zeros and indicated via the `past_observed_mask`.
For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number of
variates in the time series per time step.
past_time_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_features)`):
Required time features, which the model internally will add to `past_values`. These could be things like
"month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features). These
could also be so-called "age" features, which basically help the model know "at which point in life" a
time-series is. Age features have small values for distant past time steps and increase monotonically the
more we approach the current time step. Holiday features are also a good example of time features.
These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where
the position encodings are learned from scratch internally as parameters of the model, the Time Series
Transformer requires to provide additional time features. The Time Series Transformer only learns
additional embeddings for `static_categorical_features`.
Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these features
must but known at prediction time.
The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`, *optional*):
Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in
`[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
static_categorical_features (`torch.LongTensor` of shape `(batch_size, number of static categorical features)`, *optional*):
Optional static categorical features for which the model will learn an embedding, which it will add to the
values of the time series.
Static categorical features are features which have the same value for all time steps (static over time).
A typical example of a static categorical feature is a time series ID.
static_real_features (`torch.FloatTensor` of shape `(batch_size, number of static real features)`, *optional*):
Optional static real features which the model will add to the values of the time series.
Static real features are features which have the same value for all time steps (static over time).
A typical example of a static real feature is promotion information.
future_values (`torch.FloatTensor` of shape `(batch_size, prediction_length)` or `(batch_size, prediction_length, input_size)`, *optional*):
Future values of the time series, that serve as labels for the model. The `future_values` is what the
Transformer needs during training to learn to output, given the `past_values`.
The sequence length here is equal to `prediction_length`.
See the demo notebook and code snippets for details.
Optionally, during training any missing values need to be replaced with zeros and indicated via the
`future_observed_mask`.
For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number of
variates in the time series per time step.
future_time_features (`torch.FloatTensor` of shape `(batch_size, prediction_length, num_features)`):
Required time features for the prediction window, which the model internally will add to `future_values`.
These could be things like "month of year", "day of the month", etc. encoded as vectors (for instance as
Fourier features). These could also be so-called "age" features, which basically help the model know "at
which point in life" a time-series is. Age features have small values for distant past time steps and
increase monotonically the more we approach the current time step. Holiday features are also a good example
of time features.
These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where
the position encodings are learned from scratch internally as parameters of the model, the Time Series
Transformer requires to provide additional time features. The Time Series Transformer only learns
additional embeddings for `static_categorical_features`.
Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these features
must but known at prediction time.
The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
Tuple consists of `last_hidden_state`, `hidden_states` (*optional*) and `attentions` (*optional*)
`last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` (*optional*) is a sequence of
hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
Examples:
```python
>>> from huggingface_hub import hf_hub_download
>>> import torch
>>> from transformers import InformerModel
>>> file = hf_hub_download(
... repo_id="hf-internal-testing/tourism-monthly-batch", filename="train-batch.pt", repo_type="dataset"
... )
>>> batch = torch.load(file)
>>> model = InformerModel.from_pretrained("huggingface/informer-tourism-monthly")
>>> # during training, one provides both past and future values
>>> # as well as possible additional features
>>> outputs = model(
... past_values=batch["past_values"],
... past_time_features=batch["past_time_features"],
... past_observed_mask=batch["past_observed_mask"],
... static_categorical_features=batch["static_categorical_features"],
... static_real_features=batch["static_real_features"],
... future_values=batch["future_values"],
... future_time_features=batch["future_time_features"],
... )
>>> last_hidden_state = outputs.last_hidden_state
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_inputs, loc, scale, static_feat = self.create_network_inputs(past_values=past_values, past_time_features=past_time_features, past_observed_mask=past_observed_mask, static_categorical_features=static_categorical_features, static_real_features=static_real_features, future_values=future_values, future_time_features=future_time_features)
if encoder_outputs is None:
enc_input = transformer_inputs[:, :self.config.context_length, ...]
encoder_outputs = self.encoder(inputs_embeds=enc_input, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)):
encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None)
if self.config.context_length >= transformer_inputs.shape[1]:
bsz, _, dim = transformer_inputs.shape
dec_input = torch.zeros(size=(bsz, 1, dim), device=transformer_inputs.device, dtype=transformer_inputs.dtype)
else:
dec_input = transformer_inputs[:, self.config.context_length:, ...]
decoder_outputs = self.decoder(inputs_embeds=dec_input, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position)
if not return_dict:
return decoder_outputs + encoder_outputs + (loc, scale, static_feat)
return Seq2SeqTSModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, loc=loc, scale=scale, static_features=static_feat)
|
@auto_docstring
class InformerModel(InformerPreTrainedModel):
def __init__(self, config: InformerConfig):
pass
@property
def _past_length(self) -> int:
pass
def get_lagged_subsequences(self, sequence: torch.Tensor, subsequences_length: int, shift: int=0) -> torch.Tensor:
'''
Returns lagged subsequences of a given sequence. Returns a tensor of shape (N, S, C, I),
where S = subsequences_length and I = len(indices), containing lagged subsequences. Specifically, lagged[i,
j, :, k] = sequence[i, -indices[k]-S+j, :].
Args:
sequence: Tensor
The sequence from which lagged subsequences should be extracted. Shape: (N, T, C).
subsequences_length : int
Length of the subsequences to be extracted.
shift: int
Shift the lags by this amount back.
'''
pass
def create_network_inputs(self, past_values: torch.Tensor, past_time_features: torch.Tensor, static_categorical_features: Optional[torch.Tensor]=None, static_real_features: Optional[torch.Tensor]=None, past_observed_mask: Optional[torch.Tensor]=None, future_values: Optional[torch.Tensor]=None, future_time_features: Optional[torch.Tensor]=None):
pass
def get_encoder(self):
pass
@auto_docstring
def forward(self, past_values: torch.Tensor, past_time_features: torch.Tensor, past_observed_mask: torch.Tensor, static_categorical_features: Optional[torch.Tensor]=None, static_real_features: Optional[torch.Tensor]=None, future_values: Optional[torch.Tensor]=None, future_time_features: Optional[torch.Tensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[list[torch.FloatTensor]]=None, past_key_values: Optional[Cache]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, use_cache: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[Seq2SeqTSModelOutput, tuple]:
'''
past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`):
Past values of the time series, that serve as context in order to predict the future. The sequence size of
this tensor must be larger than the `context_length` of the model, since the model will use the larger size
to construct lag features, i.e. additional values from the past which are added in order to serve as "extra
context".
The `sequence_length` here is equal to `config.context_length` + `max(config.lags_sequence)`, which if no
`lags_sequence` is configured, is equal to `config.context_length` + 7 (as by default, the largest
look-back index in `config.lags_sequence` is 7). The property `_past_length` returns the actual length of
the past.
The `past_values` is what the Transformer encoder gets as input (with optional additional features, such as
`static_categorical_features`, `static_real_features`, `past_time_features` and lags).
Optionally, missing values need to be replaced with zeros and indicated via the `past_observed_mask`.
For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number of
variates in the time series per time step.
past_time_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_features)`):
Required time features, which the model internally will add to `past_values`. These could be things like
"month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features). These
could also be so-called "age" features, which basically help the model know "at which point in life" a
time-series is. Age features have small values for distant past time steps and increase monotonically the
more we approach the current time step. Holiday features are also a good example of time features.
These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where
the position encodings are learned from scratch internally as parameters of the model, the Time Series
Transformer requires to provide additional time features. The Time Series Transformer only learns
additional embeddings for `static_categorical_features`.
Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these features
must but known at prediction time.
The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`, *optional*):
Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in
`[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
static_categorical_features (`torch.LongTensor` of shape `(batch_size, number of static categorical features)`, *optional*):
Optional static categorical features for which the model will learn an embedding, which it will add to the
values of the time series.
Static categorical features are features which have the same value for all time steps (static over time).
A typical example of a static categorical feature is a time series ID.
static_real_features (`torch.FloatTensor` of shape `(batch_size, number of static real features)`, *optional*):
Optional static real features which the model will add to the values of the time series.
Static real features are features which have the same value for all time steps (static over time).
A typical example of a static real feature is promotion information.
future_values (`torch.FloatTensor` of shape `(batch_size, prediction_length)` or `(batch_size, prediction_length, input_size)`, *optional*):
Future values of the time series, that serve as labels for the model. The `future_values` is what the
Transformer needs during training to learn to output, given the `past_values`.
The sequence length here is equal to `prediction_length`.
See the demo notebook and code snippets for details.
Optionally, during training any missing values need to be replaced with zeros and indicated via the
`future_observed_mask`.
For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number of
variates in the time series per time step.
future_time_features (`torch.FloatTensor` of shape `(batch_size, prediction_length, num_features)`):
Required time features for the prediction window, which the model internally will add to `future_values`.
These could be things like "month of year", "day of the month", etc. encoded as vectors (for instance as
Fourier features). These could also be so-called "age" features, which basically help the model know "at
which point in life" a time-series is. Age features have small values for distant past time steps and
increase monotonically the more we approach the current time step. Holiday features are also a good example
of time features.
These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where
the position encodings are learned from scratch internally as parameters of the model, the Time Series
Transformer requires to provide additional time features. The Time Series Transformer only learns
additional embeddings for `static_categorical_features`.
Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these features
must but known at prediction time.
The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.
cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
Tuple consists of `last_hidden_state`, `hidden_states` (*optional*) and `attentions` (*optional*)
`last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` (*optional*) is a sequence of
hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
Examples:
```python
>>> from huggingface_hub import hf_hub_download
>>> import torch
>>> from transformers import InformerModel
>>> file = hf_hub_download(
... repo_id="hf-internal-testing/tourism-monthly-batch", filename="train-batch.pt", repo_type="dataset"
... )
>>> batch = torch.load(file)
>>> model = InformerModel.from_pretrained("huggingface/informer-tourism-monthly")
>>> # during training, one provides both past and future values
>>> # as well as possible additional features
>>> outputs = model(
... past_values=batch["past_values"],
... past_time_features=batch["past_time_features"],
... past_observed_mask=batch["past_observed_mask"],
... static_categorical_features=batch["static_categorical_features"],
... static_real_features=batch["static_real_features"],
... future_values=batch["future_values"],
... future_time_features=batch["future_time_features"],
... )
>>> last_hidden_state = outputs.last_hidden_state
```'''
pass
| 10
| 2
| 35
| 4
| 24
| 7
| 4
| 0.26
| 1
| 14
| 9
| 0
| 7
| 4
| 7
| 8
| 255
| 34
| 175
| 70
| 134
| 46
| 70
| 38
| 62
| 10
| 2
| 1
| 31
|
3,147
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/informer/modeling_informer.py
|
transformers.models.informer.modeling_informer.InformerNOPScaler
|
from typing import Callable, Optional, Union
from .configuration_informer import InformerConfig
from torch import nn
import torch
class InformerNOPScaler(nn.Module):
"""
Assigns a scaling factor equal to 1 along the first dimension, and therefore applies no scaling to the input data.
"""
def __init__(self, config: InformerConfig):
super().__init__()
self.dim = config.scaling_dim if hasattr(config, 'scaling_dim') else 1
self.keepdim = config.keepdim if hasattr(config, 'keepdim') else True
def forward(self, data: torch.Tensor, observed_indicator: Optional[torch.Tensor]=None) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Parameters:
data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):
input for Batch norm calculation
Returns:
tuple of `torch.Tensor` of shapes
(`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,
`(batch_size, 1, num_input_channels)`)
"""
scale = torch.ones_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim)
loc = torch.zeros_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim)
return (data, loc, scale)
|
class InformerNOPScaler(nn.Module):
'''
Assigns a scaling factor equal to 1 along the first dimension, and therefore applies no scaling to the input data.
'''
def __init__(self, config: InformerConfig):
pass
def forward(self, data: torch.Tensor, observed_indicator: Optional[torch.Tensor]=None) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
'''
Parameters:
data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):
input for Batch norm calculation
Returns:
tuple of `torch.Tensor` of shapes
(`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,
`(batch_size, 1, num_input_channels)`)
'''
pass
| 3
| 2
| 10
| 0
| 5
| 5
| 2
| 1.09
| 1
| 3
| 1
| 0
| 2
| 2
| 2
| 12
| 25
| 2
| 11
| 9
| 6
| 12
| 9
| 7
| 6
| 3
| 1
| 0
| 4
|
3,148
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/informer/modeling_informer.py
|
transformers.models.informer.modeling_informer.InformerPreTrainedModel
|
from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_attention_mask_for_sdpa, _prepare_4d_causal_attention_mask, _prepare_4d_causal_attention_mask_for_sdpa
from .configuration_informer import InformerConfig
from typing import Callable, Optional, Union
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...utils import auto_docstring, is_torch_flex_attn_available, logging
from torch import nn
import torch
@auto_docstring
class InformerPreTrainedModel(PreTrainedModel):
config: InformerConfig
base_model_prefix = 'model'
main_input_name = 'past_values'
supports_gradient_checkpointing = True
def _init_weights(self, module: nn.Module):
super()._init_weights(module)
if isinstance(module, InformerSinusoidalPositionalEmbedding):
module._init_weight()
def _update_full_mask(self, attention_mask: Union[torch.Tensor, None], inputs_embeds: torch.Tensor):
if attention_mask is not None:
if 'flash' in self.config._attn_implementation:
attention_mask = attention_mask if 0 in attention_mask else None
elif self.config._attn_implementation == 'sdpa':
attention_mask = _prepare_4d_attention_mask_for_sdpa(attention_mask, inputs_embeds.dtype)
elif self.config._attn_implementation == 'flex_attention':
if isinstance(attention_mask, torch.Tensor):
attention_mask = make_flex_block_causal_mask(attention_mask, is_causal=False)
else:
attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
return attention_mask
def _update_causal_mask(self, attention_mask: Union[torch.Tensor, None], input_shape: torch.Size, inputs_embeds: torch.Tensor, past_key_values_length: int):
if self.config._attn_implementation == 'flash_attention_2':
attention_mask = attention_mask if attention_mask is not None and 0 in attention_mask else None
elif self.config._attn_implementation == 'sdpa':
attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(attention_mask, input_shape, inputs_embeds, past_key_values_length)
elif self.config._attn_implementation == 'flex_attention':
if isinstance(attention_mask, torch.Tensor):
attention_mask = make_flex_block_causal_mask(attention_mask)
elif attention_mask is None:
attention_mask = make_flex_block_causal_mask(torch.ones(size=input_shape, device=inputs_embeds.device))
else:
attention_mask = _prepare_4d_causal_attention_mask(attention_mask, input_shape, inputs_embeds, past_key_values_length)
return attention_mask
def _update_cross_attn_mask(self, encoder_hidden_states: Union[torch.Tensor, None], encoder_attention_mask: Union[torch.Tensor, None], input_shape: torch.Size, inputs_embeds: torch.Tensor):
if encoder_hidden_states is not None and encoder_attention_mask is not None:
if self.config._attn_implementation == 'flash_attention_2':
encoder_attention_mask = encoder_attention_mask if 0 in encoder_attention_mask else None
elif self.config._attn_implementation == 'sdpa':
encoder_attention_mask = _prepare_4d_attention_mask_for_sdpa(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
elif self.config._attn_implementation == 'flex_attention':
if isinstance(encoder_attention_mask, torch.Tensor):
encoder_attention_mask = make_flex_block_causal_mask(encoder_attention_mask, query_length=input_shape[-1], is_causal=False)
else:
encoder_attention_mask = _prepare_4d_attention_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
return encoder_attention_mask
|
@auto_docstring
class InformerPreTrainedModel(PreTrainedModel):
def _init_weights(self, module: nn.Module):
pass
def _update_full_mask(self, attention_mask: Union[torch.Tensor, None], inputs_embeds: torch.Tensor):
pass
def _update_causal_mask(self, attention_mask: Union[torch.Tensor, None], input_shape: torch.Size, inputs_embeds: torch.Tensor, past_key_values_length: int):
pass
def _update_cross_attn_mask(self, encoder_hidden_states: Union[torch.Tensor, None], encoder_attention_mask: Union[torch.Tensor, None], input_shape: torch.Size, inputs_embeds: torch.Tensor):
pass
| 6
| 0
| 10
| 0
| 10
| 0
| 5
| 0
| 1
| 1
| 1
| 4
| 1
| 0
| 1
| 1
| 16
| 1
| 15
| 7
| 13
| 0
| 14
| 7
| 12
| 5
| 1
| 2
| 5
|
3,149
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/informer/modeling_informer.py
|
transformers.models.informer.modeling_informer.InformerProbSparseAttention
|
import numpy as np
from ...utils.deprecation import deprecate_kwarg
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from typing import Callable, Optional, Union
from torch import nn
import torch
class InformerProbSparseAttention(nn.Module):
"""Probabilistic Attention mechanism to select the "active"
queries rather than the "lazy" queries and provides a sparse Transformer thus mitigating the quadratic compute and
memory requirements of vanilla attention"""
def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, sampling_factor: int=5, bias: bool=True, layer_idx: Optional[int]=None):
super().__init__()
self.factor = sampling_factor
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
if self.head_dim * num_heads != self.embed_dim:
raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).')
self.scaling = self.head_dim ** (-0.5)
self.is_decoder = is_decoder
self.layer_idx = layer_idx
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
is_cross_attention = key_value_states is not None
bsz, tgt_len, _ = hidden_states.size()
src_len = key_value_states.shape[1] if is_cross_attention else tgt_len
kv_input_shape = (bsz, src_len, -1, self.head_dim)
query_states = self.q_proj(hidden_states) * self.scaling
is_updated = False
if past_key_values is not None:
if isinstance(past_key_values, EncoderDecoderCache):
is_updated = past_key_values.is_updated.get(self.layer_idx)
if is_cross_attention:
curr_past_key_value = past_key_values.cross_attention_cache
else:
curr_past_key_value = past_key_values.self_attention_cache
else:
curr_past_key_value = past_key_values
current_states = key_value_states if is_cross_attention else hidden_states
if is_cross_attention and past_key_values is not None and is_updated:
key_states = curr_past_key_value.layers[self.layer_idx].keys
value_states = curr_past_key_value.layers[self.layer_idx].values
else:
key_states = self.k_proj(current_states)
value_states = self.v_proj(current_states)
key_states = key_states.view(*kv_input_shape).transpose(1, 2)
value_states = value_states.view(*kv_input_shape).transpose(1, 2)
if past_key_values is not None:
cache_position = cache_position if not is_cross_attention else None
key_states, value_states = curr_past_key_value.update(key_states, value_states, self.layer_idx, {'cache_position': cache_position})
if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache):
past_key_values.is_updated[self.layer_idx] = True
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.reshape(*proj_shape)
value_states = value_states.reshape(*proj_shape)
key_states_time_length = key_states.size(1)
log_key_states_time_length = np.ceil(np.log1p(key_states_time_length)).astype('int').item()
query_states_time_length = query_states.size(1)
log_query_states_time_length = np.ceil(np.log1p(query_states_time_length)).astype('int').item()
u_part = min(self.factor * query_states_time_length * log_key_states_time_length, key_states_time_length)
u = min(self.factor * log_query_states_time_length, query_states_time_length)
if key_states_time_length > 0:
index_sample = torch.randint(0, key_states_time_length, (u_part,))
k_sample = key_states[:, index_sample, :]
else:
k_sample = key_states
queries_keys_sample = torch.bmm(query_states, k_sample.transpose(1, 2))
if u > 0:
sparsity_measurement = queries_keys_sample.max(dim=-1)[0] - torch.div(queries_keys_sample.sum(dim=-1), key_states_time_length)
top_u_sparsity_measurement = sparsity_measurement.topk(u, sorted=False)[1]
dim_for_slice = torch.arange(query_states.size(0)).unsqueeze(-1)
q_reduce = query_states[dim_for_slice, top_u_sparsity_measurement]
else:
q_reduce = query_states
top_u_sparsity_measurement = None
attn_weights = torch.bmm(q_reduce, key_states.transpose(1, 2))
src_len = key_states.size(1)
if attn_weights.size() != (bsz * self.num_heads, u, src_len):
raise ValueError(f'Attention weights should be of size {(bsz * self.num_heads, u, src_len)}, but is {attn_weights.size()}')
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(f'Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}')
prob_mask = attention_mask.expand(bsz, self.num_heads, tgt_len, src_len).reshape(bsz * self.num_heads, tgt_len, src_len)
if top_u_sparsity_measurement is not None:
dim_for_slice = torch.arange(prob_mask.size(0)).unsqueeze(-1)
prob_mask = prob_mask[dim_for_slice, top_u_sparsity_measurement, :]
attn_weights = attn_weights.view(bsz, self.num_heads, u, src_len) + prob_mask.view(bsz, self.num_heads, u, src_len)
attn_weights = attn_weights.view(bsz * self.num_heads, u, src_len)
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if layer_head_mask is not None:
if layer_head_mask.size() != (self.num_heads,):
raise ValueError(f'Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}')
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, u, src_len)
attn_weights = attn_weights.view(bsz * self.num_heads, u, src_len)
if output_attentions:
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, u, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, u, src_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
if self.is_decoder:
context = value_states.cumsum(dim=-2, dtype=torch.float32).to(value_states.dtype)
else:
v_mean_dim_time = value_states.mean(dim=-2)
context = v_mean_dim_time.unsqueeze(dim=1).expand(bsz * self.num_heads, query_states_time_length, v_mean_dim_time.size(-1)).clone()
if top_u_sparsity_measurement is not None:
dim_for_slice = torch.arange(context.size(0)).unsqueeze(-1)
context[dim_for_slice, top_u_sparsity_measurement, :] = attn_output
attn_output = context
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
raise ValueError(f'`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}')
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
attn_output = self.out_proj(attn_output)
return (attn_output, attn_weights_reshaped)
|
class InformerProbSparseAttention(nn.Module):
'''Probabilistic Attention mechanism to select the "active"
queries rather than the "lazy" queries and provides a sparse Transformer thus mitigating the quadratic compute and
memory requirements of vanilla attention'''
def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, sampling_factor: int=5, bias: bool=True, layer_idx: Optional[int]=None):
pass
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
pass
@deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58')
def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
'''Input shape: Batch x Time x Channel'''
pass
| 5
| 2
| 70
| 10
| 49
| 13
| 7
| 0.28
| 1
| 6
| 0
| 0
| 3
| 11
| 3
| 13
| 216
| 33
| 148
| 58
| 128
| 42
| 97
| 42
| 93
| 17
| 1
| 2
| 20
|
3,150
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/informer/modeling_informer.py
|
transformers.models.informer.modeling_informer.InformerSinusoidalPositionalEmbedding
|
from typing import Callable, Optional, Union
from torch import nn
import numpy as np
import torch
class InformerSinusoidalPositionalEmbedding(nn.Embedding):
"""This module produces sinusoidal positional embeddings of any length."""
def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int]=None) -> None:
super().__init__(num_positions, embedding_dim)
def _init_weight(self):
"""
Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in
the 2nd half of the vector. [dim // 2:]
"""
n_pos, dim = self.weight.shape
position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)])
out = torch.empty(n_pos, dim, dtype=self.weight.dtype, requires_grad=False)
sentinel = dim // 2 if dim % 2 == 0 else dim // 2 + 1
out[:, 0:sentinel] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))
out[:, sentinel:] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))
self.weight = nn.Parameter(out, requires_grad=False)
@torch.no_grad()
def forward(self, input_ids_shape: torch.Size, past_key_values_length: int=0, position_ids: Optional[torch.Tensor]=None) -> torch.Tensor:
"""`input_ids_shape` is expected to be [bsz x seqlen]."""
if position_ids is None:
bsz, seq_len = input_ids_shape[:2]
position_ids = torch.arange(past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device)
return super().forward(position_ids)
|
class InformerSinusoidalPositionalEmbedding(nn.Embedding):
'''This module produces sinusoidal positional embeddings of any length.'''
def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int]=None) -> None:
pass
def _init_weight(self):
'''
Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in
the 2nd half of the vector. [dim // 2:]
'''
pass
@torch.no_grad()
def forward(self, input_ids_shape: torch.Size, past_key_values_length: int=0, position_ids: Optional[torch.Tensor]=None) -> torch.Tensor:
'''`input_ids_shape` is expected to be [bsz x seqlen].'''
pass
| 5
| 3
| 8
| 0
| 7
| 2
| 1
| 0.3
| 1
| 4
| 0
| 0
| 2
| 1
| 3
| 3
| 32
| 3
| 23
| 12
| 17
| 7
| 17
| 10
| 13
| 2
| 1
| 0
| 4
|
3,151
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/informer/modeling_informer.py
|
transformers.models.informer.modeling_informer.InformerStdScaler
|
from torch import nn
from .configuration_informer import InformerConfig
import torch
class InformerStdScaler(nn.Module):
"""
Standardize features by calculating the mean and scaling along the first dimension, and then normalizes it by
subtracting from the mean and dividing by the standard deviation.
"""
def __init__(self, config: InformerConfig):
super().__init__()
self.dim = config.scaling_dim if hasattr(config, 'scaling_dim') else 1
self.keepdim = config.keepdim if hasattr(config, 'keepdim') else True
self.minimum_scale = config.minimum_scale if hasattr(config, 'minimum_scale') else 1e-05
def forward(self, data: torch.Tensor, observed_indicator: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Parameters:
data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):
input for Batch norm calculation
observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`):
Calculating the scale on the observed indicator.
Returns:
tuple of `torch.Tensor` of shapes
(`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,
`(batch_size, 1, num_input_channels)`)
"""
denominator = observed_indicator.sum(self.dim, keepdim=self.keepdim)
denominator = denominator.clamp_min(1.0)
loc = (data * observed_indicator).sum(self.dim, keepdim=self.keepdim) / denominator
variance = (((data - loc) * observed_indicator) ** 2).sum(self.dim, keepdim=self.keepdim) / denominator
scale = torch.sqrt(variance + self.minimum_scale)
return ((data - loc) / scale, loc, scale)
|
class InformerStdScaler(nn.Module):
'''
Standardize features by calculating the mean and scaling along the first dimension, and then normalizes it by
subtracting from the mean and dividing by the standard deviation.
'''
def __init__(self, config: InformerConfig):
pass
def forward(self, data: torch.Tensor, observed_indicator: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
'''
Parameters:
data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):
input for Batch norm calculation
observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`):
Calculating the scale on the observed indicator.
Returns:
tuple of `torch.Tensor` of shapes
(`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,
`(batch_size, 1, num_input_channels)`)
'''
pass
| 3
| 2
| 13
| 1
| 7
| 6
| 3
| 1
| 1
| 3
| 1
| 0
| 2
| 3
| 2
| 12
| 33
| 3
| 15
| 12
| 10
| 15
| 13
| 10
| 10
| 4
| 1
| 0
| 5
|
3,152
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/informer/modeling_informer.py
|
transformers.models.informer.modeling_informer.InformerValueEmbedding
|
from torch import nn
class InformerValueEmbedding(nn.Module):
def __init__(self, feature_size, d_model):
super().__init__()
self.value_projection = nn.Linear(in_features=feature_size, out_features=d_model, bias=False)
def forward(self, x):
return self.value_projection(x)
|
class InformerValueEmbedding(nn.Module):
def __init__(self, feature_size, d_model):
pass
def forward(self, x):
pass
| 3
| 0
| 3
| 0
| 3
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 2
| 1
| 2
| 12
| 7
| 1
| 6
| 4
| 3
| 0
| 6
| 4
| 3
| 1
| 1
| 0
| 2
|
3,153
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/instructblip/configuration_instructblip.py
|
transformers.models.instructblip.configuration_instructblip.InstructBlipConfig
|
from ..auto import CONFIG_MAPPING, AutoConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...configuration_utils import PretrainedConfig
class InstructBlipConfig(PretrainedConfig):
"""
[`InstructBlipConfig`] is the configuration class to store the configuration of a
[`InstructBlipForConditionalGeneration`]. It is used to instantiate a InstructBLIP model according to the specified
arguments, defining the vision model, Q-Former model and language model configs. Instantiating a configuration with
the defaults will yield a similar configuration to that of the InstructBLIP
[Salesforce/instruct-blip-flan-t5](https://huggingface.co/Salesforce/instruct-blip-flan-t5) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vision_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`InstructBlipVisionConfig`].
qformer_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`InstructBlipQFormerConfig`].
text_config (`dict`, *optional*):
Dictionary of configuration options used to initialize any [`PretrainedConfig`].
num_query_tokens (`int`, *optional*, defaults to 32):
The number of query tokens passed through the Transformer.
image_token_index (`int`, *optional*):
Token index of special image token.
kwargs (*optional*):
Dictionary of keyword arguments.
Example:
```python
>>> from transformers import (
... InstructBlipVisionConfig,
... InstructBlipQFormerConfig,
... OPTConfig,
... InstructBlipConfig,
... InstructBlipForConditionalGeneration,
... )
>>> # Initializing a InstructBlipConfig with Salesforce/instruct-blip-flan-t5 style configuration
>>> configuration = InstructBlipConfig()
>>> # Initializing a InstructBlipForConditionalGeneration (with random weights) from the Salesforce/instruct-blip-flan-t5 style configuration
>>> model = InstructBlipForConditionalGeneration(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
>>> # We can also initialize a InstructBlipConfig from a InstructBlipVisionConfig, InstructBlipQFormerConfig and any PretrainedConfig
>>> # Initializing InstructBLIP vision, InstructBLIP Q-Former and language model configurations
>>> vision_config = InstructBlipVisionConfig()
>>> qformer_config = InstructBlipQFormerConfig()
>>> text_config = OPTConfig()
>>> config = InstructBlipConfig.from_text_vision_configs(vision_config, qformer_config, text_config)
```"""
model_type = 'instructblip'
attribute_map = {'image_token_id': 'image_token_index'}
sub_configs = {'text_config': AutoConfig, 'qformer_config': InstructBlipQFormerConfig, 'vision_config': InstructBlipVisionConfig}
def __init__(self, vision_config=None, qformer_config=None, text_config=None, num_query_tokens=32, image_token_index=None, **kwargs):
super().__init__(**kwargs)
if vision_config is None:
vision_config = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.')
if qformer_config is None:
qformer_config = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.')
if text_config is None:
text_config = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).')
self.vision_config = InstructBlipVisionConfig(**vision_config)
self.qformer_config = InstructBlipQFormerConfig(**qformer_config)
text_model_type = text_config.get('model_type', 'opt')
self.text_config = CONFIG_MAPPING[text_model_type](**text_config)
self.num_query_tokens = num_query_tokens
self.image_token_index = image_token_index
self.qformer_config.encoder_hidden_size = self.vision_config.hidden_size
self.use_decoder_only_language_model = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
self.initializer_factor = 1.0
self.initializer_range = 0.02
@classmethod
def from_vision_qformer_text_configs(cls, vision_config: InstructBlipVisionConfig, qformer_config: InstructBlipQFormerConfig, text_config: PretrainedConfig, **kwargs):
"""
Instantiate a [`InstructBlipConfig`] (or a derived class) from a InstructBLIP vision model, Q-Former and
language model configurations.
Returns:
[`InstructBlipConfig`]: An instance of a configuration object
"""
return cls(vision_config=vision_config.to_dict(), qformer_config=qformer_config.to_dict(), text_config=text_config.to_dict(), **kwargs)
|
class InstructBlipConfig(PretrainedConfig):
'''
[`InstructBlipConfig`] is the configuration class to store the configuration of a
[`InstructBlipForConditionalGeneration`]. It is used to instantiate a InstructBLIP model according to the specified
arguments, defining the vision model, Q-Former model and language model configs. Instantiating a configuration with
the defaults will yield a similar configuration to that of the InstructBLIP
[Salesforce/instruct-blip-flan-t5](https://huggingface.co/Salesforce/instruct-blip-flan-t5) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vision_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`InstructBlipVisionConfig`].
qformer_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`InstructBlipQFormerConfig`].
text_config (`dict`, *optional*):
Dictionary of configuration options used to initialize any [`PretrainedConfig`].
num_query_tokens (`int`, *optional*, defaults to 32):
The number of query tokens passed through the Transformer.
image_token_index (`int`, *optional*):
Token index of special image token.
kwargs (*optional*):
Dictionary of keyword arguments.
Example:
```python
>>> from transformers import (
... InstructBlipVisionConfig,
... InstructBlipQFormerConfig,
... OPTConfig,
... InstructBlipConfig,
... InstructBlipForConditionalGeneration,
... )
>>> # Initializing a InstructBlipConfig with Salesforce/instruct-blip-flan-t5 style configuration
>>> configuration = InstructBlipConfig()
>>> # Initializing a InstructBlipForConditionalGeneration (with random weights) from the Salesforce/instruct-blip-flan-t5 style configuration
>>> model = InstructBlipForConditionalGeneration(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
>>> # We can also initialize a InstructBlipConfig from a InstructBlipVisionConfig, InstructBlipQFormerConfig and any PretrainedConfig
>>> # Initializing InstructBLIP vision, InstructBLIP Q-Former and language model configurations
>>> vision_config = InstructBlipVisionConfig()
>>> qformer_config = InstructBlipQFormerConfig()
>>> text_config = OPTConfig()
>>> config = InstructBlipConfig.from_text_vision_configs(vision_config, qformer_config, text_config)
```'''
def __init__(self, vision_config=None, qformer_config=None, text_config=None, num_query_tokens=32, image_token_index=None, **kwargs):
pass
@classmethod
def from_vision_qformer_text_configs(cls, vision_config: InstructBlipVisionConfig, qformer_config: InstructBlipQFormerConfig, text_config: PretrainedConfig, **kwargs):
'''
Instantiate a [`InstructBlipConfig`] (or a derived class) from a InstructBLIP vision model, Q-Former and
language model configurations.
Returns:
[`InstructBlipConfig`]: An instance of a configuration object
'''
pass
| 4
| 2
| 28
| 4
| 21
| 3
| 3
| 0.98
| 1
| 3
| 2
| 0
| 1
| 8
| 2
| 2
| 120
| 21
| 50
| 29
| 32
| 49
| 26
| 14
| 23
| 5
| 1
| 1
| 6
|
3,154
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/instructblip/configuration_instructblip.py
|
transformers.models.instructblip.configuration_instructblip.InstructBlipQFormerConfig
|
from ...configuration_utils import PretrainedConfig
class InstructBlipQFormerConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`InstructBlipQFormerModel`]. It is used to
instantiate a InstructBLIP Querying Transformer (Q-Former) model according to the specified arguments, defining the
model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of
the InstructBLIP [Salesforce/instruct-blip-flan-t5](https://huggingface.co/Salesforce/instruct-blip-flan-t5)
architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs.
Read the documentation from [`PretrainedConfig`] for more information.
Note that [`InstructBlipQFormerModel`] is very similar to [`BertLMHeadModel`] with interleaved cross-attention.
Args:
vocab_size (`int`, *optional*, defaults to 30522):
Vocabulary size of the Q-Former model. Defines the number of different tokens that can be represented by
the `inputs_ids` passed when calling the model.
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
pad_token_id (`int`, *optional*, defaults to 0):
Token id used for padding sequences.
position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
[Self-Attention with Relative Position Representations (Shaw et al.)](https://huggingface.co/papers/1803.02155).
For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
with Better Relative Position Embeddings (Huang et al.)](https://huggingface.co/papers/2009.13658).
cross_attention_frequency (`int`, *optional*, defaults to 2):
The frequency of adding cross-attention to the Transformer layers.
encoder_hidden_size (`int`, *optional*, defaults to 1408):
The hidden size of the hidden states for cross-attention.
Examples:
```python
>>> from transformers import InstructBlipQFormerConfig, InstructBlipQFormerModel
>>> # Initializing a InstructBLIP Salesforce/instruct-blip-flan-t5 style configuration
>>> configuration = InstructBlipQFormerConfig()
>>> # Initializing a model (with random weights) from the Salesforce/instruct-blip-flan-t5 style configuration
>>> model = InstructBlipQFormerModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'instructblip_qformer'
base_config_key = 'qformer_config'
def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, position_embedding_type='absolute', cross_attention_frequency=2, encoder_hidden_size=1408, **kwargs):
super().__init__(pad_token_id=pad_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.position_embedding_type = position_embedding_type
self.cross_attention_frequency = cross_attention_frequency
self.encoder_hidden_size = encoder_hidden_size
|
class InstructBlipQFormerConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`InstructBlipQFormerModel`]. It is used to
instantiate a InstructBLIP Querying Transformer (Q-Former) model according to the specified arguments, defining the
model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of
the InstructBLIP [Salesforce/instruct-blip-flan-t5](https://huggingface.co/Salesforce/instruct-blip-flan-t5)
architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs.
Read the documentation from [`PretrainedConfig`] for more information.
Note that [`InstructBlipQFormerModel`] is very similar to [`BertLMHeadModel`] with interleaved cross-attention.
Args:
vocab_size (`int`, *optional*, defaults to 30522):
Vocabulary size of the Q-Former model. Defines the number of different tokens that can be represented by
the `inputs_ids` passed when calling the model.
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
pad_token_id (`int`, *optional*, defaults to 0):
Token id used for padding sequences.
position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
[Self-Attention with Relative Position Representations (Shaw et al.)](https://huggingface.co/papers/1803.02155).
For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
with Better Relative Position Embeddings (Huang et al.)](https://huggingface.co/papers/2009.13658).
cross_attention_frequency (`int`, *optional*, defaults to 2):
The frequency of adding cross-attention to the Transformer layers.
encoder_hidden_size (`int`, *optional*, defaults to 1408):
The hidden size of the hidden states for cross-attention.
Examples:
```python
>>> from transformers import InstructBlipQFormerConfig, InstructBlipQFormerModel
>>> # Initializing a InstructBLIP Salesforce/instruct-blip-flan-t5 style configuration
>>> configuration = InstructBlipQFormerConfig()
>>> # Initializing a model (with random weights) from the Salesforce/instruct-blip-flan-t5 style configuration
>>> model = InstructBlipQFormerModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, position_embedding_type='absolute', cross_attention_frequency=2, encoder_hidden_size=1408, **kwargs):
pass
| 2
| 1
| 35
| 1
| 34
| 0
| 1
| 1.51
| 1
| 1
| 0
| 0
| 1
| 14
| 1
| 1
| 102
| 9
| 37
| 36
| 17
| 56
| 19
| 18
| 17
| 1
| 1
| 0
| 1
|
3,155
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/instructblip/configuration_instructblip.py
|
transformers.models.instructblip.configuration_instructblip.InstructBlipVisionConfig
|
from ...configuration_utils import PretrainedConfig
class InstructBlipVisionConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`InstructBlipVisionModel`]. It is used to
instantiate a InstructBLIP vision encoder according to the specified arguments, defining the model architecture.
Instantiating a configuration defaults will yield a similar configuration to that of the InstructBLIP
[Salesforce/instruct-blip-flan-t5](https://huggingface.co/Salesforce/instruct-blip-flan-t5) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 1408):
Dimensionality of the encoder layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 6144):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
num_hidden_layers (`int`, *optional*, defaults to 39):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 14):
The size (resolution) of each patch.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` `"gelu"` are supported. to 1e-5): The epsilon used by the layer
normalization layers.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 1e-10):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries and values in the self-attention layers.
Example:
```python
>>> from transformers import InstructBlipVisionConfig, InstructBlipVisionModel
>>> # Initializing a InstructBlipVisionConfig with Salesforce/instruct-blip-flan-t5 style configuration
>>> configuration = InstructBlipVisionConfig()
>>> # Initializing a InstructBlipVisionModel (with random weights) from the Salesforce/instruct-blip-flan-t5 style configuration
>>> model = InstructBlipVisionModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'instructblip_vision_model'
base_config_key = 'vision_config'
def __init__(self, hidden_size=1408, intermediate_size=6144, num_hidden_layers=39, num_attention_heads=16, image_size=224, patch_size=14, hidden_act='gelu', layer_norm_eps=1e-06, attention_dropout=0.0, initializer_range=1e-10, qkv_bias=True, **kwargs):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.patch_size = patch_size
self.image_size = image_size
self.initializer_range = initializer_range
self.attention_dropout = attention_dropout
self.layer_norm_eps = layer_norm_eps
self.hidden_act = hidden_act
self.qkv_bias = qkv_bias
|
class InstructBlipVisionConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`InstructBlipVisionModel`]. It is used to
instantiate a InstructBLIP vision encoder according to the specified arguments, defining the model architecture.
Instantiating a configuration defaults will yield a similar configuration to that of the InstructBLIP
[Salesforce/instruct-blip-flan-t5](https://huggingface.co/Salesforce/instruct-blip-flan-t5) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 1408):
Dimensionality of the encoder layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 6144):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
num_hidden_layers (`int`, *optional*, defaults to 39):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 14):
The size (resolution) of each patch.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` `"gelu"` are supported. to 1e-5): The epsilon used by the layer
normalization layers.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 1e-10):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries and values in the self-attention layers.
Example:
```python
>>> from transformers import InstructBlipVisionConfig, InstructBlipVisionModel
>>> # Initializing a InstructBlipVisionConfig with Salesforce/instruct-blip-flan-t5 style configuration
>>> configuration = InstructBlipVisionConfig()
>>> # Initializing a InstructBlipVisionModel (with random weights) from the Salesforce/instruct-blip-flan-t5 style configuration
>>> model = InstructBlipVisionModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, hidden_size=1408, intermediate_size=6144, num_hidden_layers=39, num_attention_heads=16, image_size=224, patch_size=14, hidden_act='gelu', layer_norm_eps=1e-06, attention_dropout=0.0, initializer_range=1e-10, qkv_bias=True, **kwargs):
pass
| 2
| 1
| 28
| 1
| 27
| 0
| 1
| 1.4
| 1
| 1
| 0
| 0
| 1
| 11
| 1
| 1
| 82
| 10
| 30
| 29
| 14
| 42
| 16
| 15
| 14
| 1
| 1
| 0
| 1
|
3,156
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/instructblip/modeling_instructblip.py
|
transformers.models.instructblip.modeling_instructblip.InstructBlipAttention
|
import torch
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from typing import Any, Callable, Optional, Union
from torch import nn
class InstructBlipAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads}).')
self.scale = self.head_dim ** (-0.5)
self.is_causal = False
self.attention_dropout = config.attention_dropout
self.qkv = nn.Linear(self.embed_dim, 3 * self.embed_dim, bias=False)
if config.qkv_bias:
q_bias = nn.Parameter(torch.zeros(self.embed_dim))
v_bias = nn.Parameter(torch.zeros(self.embed_dim))
else:
q_bias = None
v_bias = None
if q_bias is not None:
qkv_bias = torch.cat((q_bias, torch.zeros_like(v_bias, requires_grad=False), v_bias))
self.qkv.bias = nn.Parameter(qkv_bias)
self.projection = nn.Linear(self.embed_dim, self.embed_dim)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None, **kwargs) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
bsz, tgt_len, embed_dim = hidden_states.size()
mixed_qkv = self.qkv(hidden_states)
mixed_qkv = mixed_qkv.reshape(bsz, tgt_len, 3, self.num_heads, embed_dim // self.num_heads).permute(2, 0, 3, 1, 4)
query_states, key_states, value_states = (mixed_qkv[0], mixed_qkv[1], mixed_qkv[2])
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, attention_mask=None, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scale, **kwargs)
attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous()
attn_output = self.projection(attn_output)
return (attn_output, attn_weights)
|
class InstructBlipAttention(nn.Module):
'''Multi-headed attention from 'Attention Is All You Need' paper'''
def __init__(self, config):
pass
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
pass
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None, **kwargs) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
'''Input shape: Batch x Time x Channel'''
pass
| 4
| 2
| 25
| 6
| 17
| 2
| 3
| 0.16
| 1
| 5
| 0
| 0
| 3
| 8
| 3
| 13
| 79
| 20
| 51
| 29
| 42
| 8
| 40
| 24
| 36
| 4
| 1
| 1
| 8
|
3,157
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/instructblip/modeling_instructblip.py
|
transformers.models.instructblip.modeling_instructblip.InstructBlipEncoder
|
import torch
from typing import Any, Callable, Optional, Union
from ...processing_utils import Unpack
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, logging, torch_int
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions
from .configuration_instructblip import InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig
from torch import nn
class InstructBlipEncoder(nn.Module):
"""
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
[`InstructBlipEncoderLayer`].
Args:
config (`InstructBlipConfig`):
The corresponding vision configuration for the `InstructBlipEncoder`.
"""
def __init__(self, config: InstructBlipConfig):
super().__init__()
self.config = config
self.layers = nn.ModuleList([InstructBlipEncoderLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
@auto_docstring
def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, BaseModelOutput]:
hidden_states = inputs_embeds
for encoder_layer in self.layers:
hidden_states = encoder_layer(hidden_states, attention_mask=attention_mask, **kwargs)
return BaseModelOutput(last_hidden_state=hidden_states)
|
class InstructBlipEncoder(nn.Module):
'''
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
[`InstructBlipEncoderLayer`].
Args:
config (`InstructBlipConfig`):
The corresponding vision configuration for the `InstructBlipEncoder`.
'''
def __init__(self, config: InstructBlipConfig):
pass
@auto_docstring
def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, BaseModelOutput]:
pass
| 4
| 1
| 37
| 4
| 24
| 9
| 7
| 0.52
| 1
| 9
| 3
| 0
| 2
| 3
| 2
| 12
| 84
| 11
| 48
| 18
| 38
| 25
| 27
| 11
| 24
| 12
| 1
| 2
| 13
|
3,158
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/instructblip/modeling_instructblip.py
|
transformers.models.instructblip.modeling_instructblip.InstructBlipEncoderLayer
|
from torch import nn
from ...processing_utils import Unpack
import torch
from .configuration_instructblip import InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig
from ...modeling_layers import GradientCheckpointingLayer
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, logging, torch_int
class InstructBlipEncoderLayer(GradientCheckpointingLayer):
def __init__(self, config: InstructBlipConfig):
super().__init__()
self.embed_dim = config.hidden_size
self.self_attn = InstructBlipAttention(config)
self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
self.mlp = InstructBlipMLP(config)
self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
@auto_docstring
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, **kwargs: Unpack[TransformersKwargs]) -> torch.FloatTensor:
residual = hidden_states
hidden_states = self.layer_norm1(hidden_states)
hidden_states, _ = self.self_attn(hidden_states=hidden_states, head_mask=attention_mask, **kwargs)
hidden_states = hidden_states + residual
residual = hidden_states
hidden_states = self.layer_norm2(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = hidden_states + residual
return hidden_states
|
class InstructBlipEncoderLayer(GradientCheckpointingLayer):
def __init__(self, config: InstructBlipConfig):
pass
@auto_docstring
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, **kwargs: Unpack[TransformersKwargs]) -> torch.FloatTensor:
pass
| 4
| 0
| 22
| 3
| 15
| 5
| 2
| 0.33
| 1
| 6
| 3
| 0
| 2
| 5
| 2
| 12
| 46
| 6
| 30
| 16
| 22
| 10
| 21
| 11
| 18
| 2
| 1
| 1
| 3
|
3,159
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/instructblip/modeling_instructblip.py
|
transformers.models.instructblip.modeling_instructblip.InstructBlipForConditionalGeneration
|
from ..auto import AutoModel, AutoModelForCausalLM, AutoModelForSeq2SeqLM
import torch
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, logging, torch_int
from torch import nn
from typing import Any, Callable, Optional, Union
from ...generation import GenerationMixin
from ...processing_utils import Unpack
from .configuration_instructblip import InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig
@auto_docstring(custom_intro='\n InstructBLIP Model for generating text given an image and an optional text prompt. The model consists of a vision\n encoder, Querying Transformer (Q-Former) and a language model.\n\n One can optionally pass `input_ids` to the model, which serve as a text prompt, to make the language model continue\n the prompt. Otherwise, the language model starts generating text from the [BOS] (beginning-of-sequence) token.\n ')
class InstructBlipForConditionalGeneration(InstructBlipPreTrainedModel, GenerationMixin):
config: InstructBlipConfig
main_input_name = 'pixel_values'
_can_compile_fullgraph = True
_keep_in_fp32_modules = ['query_tokens']
def __init__(self, config: InstructBlipConfig):
super().__init__(config)
self.vision_model = InstructBlipVisionModel._from_config(config.vision_config)
self.query_tokens = nn.Parameter(torch.zeros(1, config.num_query_tokens, config.qformer_config.hidden_size))
self.qformer = InstructBlipQFormerModel._from_config(config.qformer_config)
self.language_projection = nn.Linear(config.qformer_config.hidden_size, config.text_config.hidden_size)
if config.use_decoder_only_language_model:
language_model = AutoModelForCausalLM.from_config(config.text_config)
else:
language_model = AutoModelForSeq2SeqLM.from_config(config.text_config)
if language_model._no_split_modules is not None:
self._no_split_modules.extend(language_model._no_split_modules)
if language_model._keep_in_fp32_modules is not None:
self._keep_in_fp32_modules.extend(language_model._keep_in_fp32_modules)
self.language_model = language_model
self.post_init()
def get_input_embeddings(self):
return self.language_model.get_input_embeddings()
def set_input_embeddings(self, value):
self.language_model.set_input_embeddings(value)
def set_output_embeddings(self, new_embeddings):
self.language_model.set_output_embeddings(new_embeddings)
def get_output_embeddings(self) -> nn.Module:
return self.language_model.get_output_embeddings()
def get_encoder(self):
return self.language_model.get_encoder()
def get_decoder(self):
return self.language_model.get_decoder()
def _tie_weights(self):
if not self.config.use_decoder_only_language_model:
self.language_model.encoder.embed_tokens = self.language_model.shared
self.language_model.decoder.embed_tokens = self.language_model.shared
def _preprocess_accelerate(self):
"""
Some pre-processing hacks to make the model `accelerate` compatible. Check
https://github.com/huggingface/transformers/pull/21707 for more details.
"""
hf_device_map = self.hf_device_map
if len(hf_device_map) > 1 and 'language_model' not in hf_device_map and (torch.cuda.device_count() > 1):
logger.warning('The `language_model` is not in the `hf_device_map` dictionary and you are running your script in a multi-GPU environment. this may lead to unexpected behavior when using `accelerate`. Please pass a `device_map` that contains `language_model` to remove this warning. Please refer to https://github.com/huggingface/blog/blob/main/accelerate-large-models.md for more details on creating a `device_map` for large models.')
if hasattr(self.language_model, '_hf_hook'):
self.language_model._hf_hook.io_same_device = True
def get_image_features(self, pixel_values: torch.FloatTensor, qformer_input_ids: torch.LongTensor, qformer_attention_mask: Optional[torch.LongTensor]=None, interpolate_pos_encoding: Optional[bool]=False, return_dict: Optional[bool]=False):
"""
Encodes images into continuous embeddings that can be forwarded to the language model.
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
The tensors corresponding to the input images.
"""
vision_outputs = self.vision_model(pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=True)
image_embeds = vision_outputs[0]
image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device)
query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)
query_attention_mask = torch.ones(query_tokens.size()[:-1], dtype=torch.long, device=image_embeds.device)
if qformer_attention_mask is None:
qformer_attention_mask = torch.ones_like(qformer_input_ids)
qformer_attention_mask = torch.cat([query_attention_mask, qformer_attention_mask], dim=1)
query_outputs = self.qformer(input_ids=qformer_input_ids, attention_mask=qformer_attention_mask, query_embeds=query_tokens, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, return_dict=True)
query_output = query_outputs[0][:, :query_tokens.size(1), :]
language_model_inputs = self.language_projection(query_output)
if return_dict:
return (language_model_inputs, vision_outputs, query_outputs)
return language_model_inputs
def get_placeholder_mask(self, input_ids: torch.LongTensor, inputs_embeds: torch.FloatTensor):
"""
Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`.
"""
if input_ids is None:
special_image_mask = inputs_embeds == self.get_input_embeddings()(torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device))
special_image_mask = special_image_mask.all(-1)
else:
special_image_mask = input_ids == self.config.image_token_id
special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
return special_image_mask
@can_return_tuple
@auto_docstring
def forward(self, pixel_values: torch.FloatTensor, qformer_input_ids: torch.FloatTensor, qformer_attention_mask: Optional[torch.LongTensor]=None, input_ids: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.LongTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, interpolate_pos_encoding: bool=False, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, InstructBlipForConditionalGenerationModelOutput]:
"""
qformer_input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices of input sequence tokens in the vocabulary of the Q-Former. Input tokens can optionally be provided
to serve as text prompt, which the Q-Former model will encode.
Indices can be obtained using [`InstructBlipProcessor`]. See [`InstructBlipProcessor.__call__`] for
details.
[What are input IDs?](../glossary#input-ids)
qformer_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
Only relevant in case an encoder-decoder language model (like T5) is used.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size -
1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ...,
config.vocab_size]`
Examples:
```python
>>> from transformers import InstructBlipProcessor, InstructBlipForConditionalGeneration
>>> import torch
>>> from PIL import Image
>>> import requests
>>> model = InstructBlipForConditionalGeneration.from_pretrained("Salesforce/instructblip-vicuna-7b")
>>> processor = InstructBlipProcessor.from_pretrained("Salesforce/instructblip-vicuna-7b")
>>> device = "cuda" if torch.cuda.is_available() else "cpu"
>>> model.to(device) # doctest: +IGNORE_RESULT
>>> url = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw).convert("RGB")
>>> prompt = "What is unusual about this image?"
>>> inputs = processor(images=image, text=prompt, return_tensors="pt").to(device)
>>> outputs = model.generate(
... **inputs,
... do_sample=False,
... num_beams=5,
... max_length=256,
... min_length=1,
... top_p=0.9,
... repetition_penalty=1.5,
... length_penalty=1.0,
... temperature=1,
... )
>>> generated_text = processor.batch_decode(outputs, skip_special_tokens=True)[0].strip()
>>> print(generated_text)
The unusual aspect of this image is that a man is ironing clothes on the back of a yellow SUV, which is parked in the middle of a busy city street. This is an unconventional approach to ironing clothes, as it requires the man to balance himself and his ironing equipment on top of the vehicle while navigating through traffic. Additionally, the presence of taxis and other vehicles in the scene further emphasizes the unusual nature of this situation.
```"""
language_model_inputs, vision_outputs, query_outputs = self.get_image_features(pixel_values, qformer_input_ids=qformer_input_ids, qformer_attention_mask=qformer_attention_mask, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=True)
if inputs_embeds is None:
inputs_embeds = self.get_input_embeddings()(input_ids)
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
language_model_inputs = language_model_inputs.to(inputs_embeds.device, inputs_embeds.dtype)
special_image_mask = self.get_placeholder_mask(input_ids, inputs_embeds=inputs_embeds)
inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, language_model_inputs)
if self.config.use_decoder_only_language_model:
outputs = self.language_model(inputs_embeds=inputs_embeds, attention_mask=attention_mask, **kwargs)
logits = outputs[0]
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **kwargs)
else:
kwargs['return_dict'] = True
outputs = self.language_model(inputs_embeds=inputs_embeds, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, labels=labels, **kwargs)
loss = outputs.loss
logits = outputs.logits
return InstructBlipForConditionalGenerationModelOutput(loss=loss, logits=logits, vision_outputs=vision_outputs, qformer_outputs=query_outputs, language_model_outputs=outputs)
@torch.no_grad()
def generate(self, pixel_values: torch.FloatTensor, qformer_input_ids: Optional[torch.LongTensor]=None, qformer_attention_mask: Optional[torch.LongTensor]=None, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, interpolate_pos_encoding: bool=False, **generate_kwargs) -> torch.LongTensor:
"""
Overrides `generate` function to be able to use the model as a conditional generator.
Args:
pixel_values (`torch.FloatTensor` of shape (batch_size, num_channels, height, width)):
Input images to be processed.
qformer_input_ids (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
The sequence used as a prompt to be fed to the Q-Former module.
qformer_attention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
Mask to avoid performing attention on padding token indices.
input_ids (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
The sequence used as a prompt for the generation.
attention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
Mask to avoid performing attention on padding token indices.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Embedded representation of the inputs. Should be float, not int tokens.
interpolate_pos_encoding (`bool`, *optional*, defaults to `False`):
Whether to interpolate the positional encoding of the image embeddings.
Returns:
captions (list): A list of strings of length batch_size * num_captions.
"""
if hasattr(self, 'hf_device_map'):
self._preprocess_accelerate()
batch_size = pixel_values.shape[0]
language_model_inputs, vision_outputs, query_outputs = self.get_image_features(pixel_values, qformer_input_ids=qformer_input_ids, qformer_attention_mask=qformer_attention_mask, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=True)
if inputs_embeds is None:
if input_ids is None:
image_tokens = [self.config.image_token_index] * self.config.num_query_tokens
start_tokens = image_tokens + [self.config.text_config.bos_token_id]
input_ids = torch.tensor([start_tokens], dtype=torch.long, device=pixel_values.device)
input_ids = input_ids.repeat(batch_size, 1)
inputs_embeds = self.get_input_embeddings()(input_ids)
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
language_model_inputs = language_model_inputs.to(inputs_embeds.device, inputs_embeds.dtype)
special_image_mask = self.get_placeholder_mask(input_ids, inputs_embeds=inputs_embeds)
inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, language_model_inputs)
inputs = {'inputs_embeds': inputs_embeds, 'attention_mask': attention_mask}
if not self.language_model.config.is_encoder_decoder:
inputs['input_ids'] = input_ids
outputs = self.language_model.generate(**inputs, **generate_kwargs)
return outputs
| null | 18
| 5
| 30
| 4
| 19
| 7
| 3
| 0.34
| 2
| 8
| 6
| 0
| 11
| 5
| 11
| 12
| 349
| 53
| 222
| 78
| 182
| 75
| 119
| 53
| 107
| 12
| 2
| 2
| 36
|
3,160
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/instructblip/modeling_instructblip.py
|
transformers.models.instructblip.modeling_instructblip.InstructBlipForConditionalGenerationModelOutput
|
from typing import Any, Callable, Optional, Union
from dataclasses import dataclass
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, logging, torch_int
import torch
@dataclass
@auto_docstring(custom_intro='\n Class defining the outputs of [`InstructBlipForConditionalGeneration`].\n ')
class InstructBlipForConditionalGenerationModelOutput(ModelOutput):
"""
loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
Language modeling loss from the language model.
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head of the language model.
vision_outputs (`BaseModelOutputWithPooling`):
Outputs of the vision encoder.
qformer_outputs (`BaseModelOutputWithPoolingAndCrossAttentions`):
Outputs of the Q-Former (Querying Transformer).
language_model_outputs (`CausalLMOutputWithPast` or `Seq2SeqLMOutput`):
Outputs of the language model.
"""
loss: Optional[tuple[torch.FloatTensor]] = None
logits: Optional[tuple[torch.FloatTensor]] = None
vision_outputs: Optional[torch.FloatTensor] = None
qformer_outputs: Optional[tuple[torch.FloatTensor]] = None
language_model_outputs: Optional[tuple[torch.FloatTensor]] = None
def to_tuple(self) -> tuple[Any]:
return tuple((self[k] if k not in ['vision_outputs', 'qformer_outputs', 'language_model_outputs'] else getattr(self, k).to_tuple() for k in self.keys()))
|
@dataclass
@auto_docstring(custom_intro='\n Class defining the outputs of [`InstructBlipForConditionalGeneration`].\n ')
class InstructBlipForConditionalGenerationModelOutput(ModelOutput):
'''
loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
Language modeling loss from the language model.
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head of the language model.
vision_outputs (`BaseModelOutputWithPooling`):
Outputs of the vision encoder.
qformer_outputs (`BaseModelOutputWithPoolingAndCrossAttentions`):
Outputs of the Q-Former (Querying Transformer).
language_model_outputs (`CausalLMOutputWithPast` or `Seq2SeqLMOutput`):
Outputs of the language model.
'''
def to_tuple(self) -> tuple[Any]:
pass
| 4
| 1
| 7
| 0
| 7
| 0
| 2
| 1.08
| 1
| 2
| 0
| 0
| 1
| 0
| 1
| 1
| 30
| 3
| 13
| 7
| 11
| 14
| 8
| 7
| 6
| 2
| 1
| 0
| 2
|
3,161
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/instructblip/modeling_instructblip.py
|
transformers.models.instructblip.modeling_instructblip.InstructBlipMLP
|
from torch import nn
from ...activations import ACT2FN
import torch
class InstructBlipMLP(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.activation_fn = ACT2FN[config.hidden_act]
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.fc1(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = self.fc2(hidden_states)
return hidden_states
|
class InstructBlipMLP(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 4
| 2
| 12
| 13
| 1
| 12
| 7
| 9
| 0
| 12
| 7
| 9
| 1
| 1
| 0
| 2
|
3,162
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/instructblip/modeling_instructblip.py
|
transformers.models.instructblip.modeling_instructblip.InstructBlipPreTrainedModel
|
from torch import nn
from .configuration_instructblip import InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, logging, torch_int
@auto_docstring
class InstructBlipPreTrainedModel(PreTrainedModel):
config: InstructBlipConfig
base_model_prefix = 'blip'
supports_gradient_checkpointing = True
_supports_attention_backend = True
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = True
_no_split_modules = ['InstructBlipQFormerEmbeddings', 'InstructBlipAttention', 'InstructBlipQFormerMultiHeadAttention', 'InstructBlipQFormerSelfOutput']
def _init_weights(self, module):
"""Initialize the weights"""
factor = self.config.initializer_range
if isinstance(module, (nn.Linear, nn.Conv2d)):
module.weight.data.normal_(mean=0.0, std=factor)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=factor)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, InstructBlipVisionEmbeddings):
nn.init.trunc_normal_(module.position_embedding, mean=0.0, std=factor)
nn.init.trunc_normal_(module.class_embedding, mean=0.0, std=factor)
elif isinstance(module, (InstructBlipForConditionalGeneration, InstructBlipModel)):
module.query_tokens.data.zero_()
|
@auto_docstring
class InstructBlipPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
'''Initialize the weights'''
pass
| 3
| 1
| 19
| 2
| 16
| 1
| 7
| 0.22
| 1
| 2
| 2
| 3
| 1
| 0
| 1
| 1
| 38
| 5
| 27
| 8
| 25
| 6
| 20
| 8
| 18
| 7
| 1
| 2
| 7
|
3,163
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/instructblip/modeling_instructblip.py
|
transformers.models.instructblip.modeling_instructblip.InstructBlipQFormerAttention
|
from ...processing_utils import Unpack
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
from typing import Any, Callable, Optional, Union
import torch
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, logging, torch_int
from torch import nn
class InstructBlipQFormerAttention(nn.Module):
def __init__(self, config, is_cross_attention=False):
super().__init__()
self.attention = InstructBlipQFormerMultiHeadAttention(config, is_cross_attention)
self.output = InstructBlipQFormerSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads)
self.attention.query = prune_linear_layer(self.attention.query, index)
self.attention.key = prune_linear_layer(self.attention.key, index)
self.attention.value = prune_linear_layer(self.attention.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> torch.Tensor:
attn_output, _ = self.attention(hidden_states=hidden_states, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, **kwargs)
attention_output = self.output(attn_output, hidden_states)
return attention_output
|
class InstructBlipQFormerAttention(nn.Module):
def __init__(self, config, is_cross_attention=False):
pass
def prune_heads(self, heads):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> torch.Tensor:
pass
| 4
| 0
| 15
| 1
| 13
| 1
| 1
| 0.07
| 1
| 6
| 2
| 0
| 3
| 3
| 3
| 13
| 47
| 4
| 41
| 20
| 28
| 3
| 22
| 11
| 18
| 2
| 1
| 1
| 4
|
3,164
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/instructblip/modeling_instructblip.py
|
transformers.models.instructblip.modeling_instructblip.InstructBlipQFormerEmbeddings
|
import torch
from torch import nn
class InstructBlipQFormerEmbeddings(nn.Module):
"""Construct the embeddings from word and position embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False)
self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute')
self.config = config
def forward(self, input_ids=None, position_ids=None, query_embeds=None, past_key_values_length=0):
if input_ids is not None:
seq_length = input_ids.size()[1]
else:
seq_length = 0
if position_ids is None:
position_ids = self.position_ids[:, past_key_values_length:seq_length + past_key_values_length].clone()
if input_ids is not None:
embeddings = self.word_embeddings(input_ids)
if self.position_embedding_type == 'absolute':
position_embeddings = self.position_embeddings(position_ids.to(embeddings.device))
embeddings = embeddings + position_embeddings
if query_embeds is not None:
embeddings = torch.cat((query_embeds, embeddings), dim=1)
else:
embeddings = query_embeds
embeddings = embeddings.to(self.layernorm.weight.dtype)
embeddings = self.layernorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
|
class InstructBlipQFormerEmbeddings(nn.Module):
'''Construct the embeddings from word and position embeddings.'''
def __init__(self, config):
pass
def forward(self, input_ids=None, position_ids=None, query_embeds=None, past_key_values_length=0):
pass
| 3
| 1
| 23
| 4
| 19
| 1
| 4
| 0.05
| 1
| 1
| 0
| 0
| 2
| 6
| 2
| 12
| 49
| 9
| 38
| 18
| 29
| 2
| 28
| 12
| 25
| 6
| 1
| 2
| 7
|
3,165
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/instructblip/modeling_instructblip.py
|
transformers.models.instructblip.modeling_instructblip.InstructBlipQFormerEncoder
|
from torch import nn
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, logging, torch_int
from ...processing_utils import Unpack
class InstructBlipQFormerEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([InstructBlipQFormerLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
@can_return_tuple
def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, query_length=0, **kwargs: Unpack[TransformersKwargs]):
for i in range(self.config.num_hidden_layers):
layer_module = self.layer[i]
layer_head_mask = head_mask[i] if head_mask is not None else None
hidden_states = layer_module(hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, query_length=query_length, **kwargs)
return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states)
|
class InstructBlipQFormerEncoder(nn.Module):
def __init__(self, config):
pass
@can_return_tuple
def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, query_length=0, **kwargs: Unpack[TransformersKwargs]):
pass
| 4
| 0
| 46
| 4
| 42
| 0
| 9
| 0
| 1
| 5
| 2
| 0
| 2
| 3
| 2
| 12
| 93
| 8
| 85
| 28
| 69
| 0
| 35
| 15
| 32
| 16
| 1
| 3
| 17
|
3,166
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/instructblip/modeling_instructblip.py
|
transformers.models.instructblip.modeling_instructblip.InstructBlipQFormerIntermediate
|
import torch
from torch import nn
from ...activations import ACT2FN
class InstructBlipQFormerIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
|
class InstructBlipQFormerIntermediate(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 2
| 0
| 1
| 3
| 0
| 0
| 2
| 2
| 2
| 12
| 13
| 1
| 12
| 5
| 9
| 0
| 11
| 5
| 8
| 2
| 1
| 1
| 3
|
3,167
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/instructblip/modeling_instructblip.py
|
transformers.models.instructblip.modeling_instructblip.InstructBlipQFormerLayer
|
from ...modeling_layers import GradientCheckpointingLayer
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
import torch
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, logging, torch_int
from ...processing_utils import Unpack
class InstructBlipQFormerLayer(GradientCheckpointingLayer):
def __init__(self, config, layer_idx):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = InstructBlipQFormerAttention(config)
self.layer_idx = layer_idx
if layer_idx % config.cross_attention_frequency == 0:
self.crossattention = InstructBlipQFormerAttention(config, is_cross_attention=True)
self.has_cross_attention = True
else:
self.has_cross_attention = False
self.intermediate = InstructBlipQFormerIntermediate(config)
self.output = InstructBlipQFormerOutput(config)
self.intermediate_query = InstructBlipQFormerIntermediate(config)
self.output_query = InstructBlipQFormerOutput(config)
def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, query_length=0, **kwargs: Unpack[TransformersKwargs]):
attention_output = self.attention(hidden_states, attention_mask=attention_mask, head_mask=head_mask, **kwargs)
if query_length > 0:
query_attention_output = attention_output[:, :query_length, :]
if self.has_cross_attention:
if encoder_hidden_states is None:
raise ValueError('encoder_hidden_states must be given for cross-attention layers')
query_attention_output = self.crossattention(query_attention_output, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, **kwargs)
layer_output = apply_chunking_to_forward(self.feed_forward_chunk_query, self.chunk_size_feed_forward, self.seq_len_dim, query_attention_output)
if attention_output.shape[1] > query_length:
layer_output_text = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output[:, query_length:, :]).to(layer_output.device)
layer_output = torch.cat([layer_output, layer_output_text], dim=1)
else:
layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output)
return layer_output
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
def feed_forward_chunk_query(self, attention_output):
intermediate_output = self.intermediate_query(attention_output)
layer_output = self.output_query(intermediate_output, attention_output)
return layer_output
|
class InstructBlipQFormerLayer(GradientCheckpointingLayer):
def __init__(self, config, layer_idx):
pass
def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, query_length=0, **kwargs: Unpack[TransformersKwargs]):
pass
def feed_forward_chunk(self, attention_output):
pass
def feed_forward_chunk_query(self, attention_output):
pass
| 5
| 0
| 24
| 3
| 21
| 1
| 3
| 0.02
| 1
| 5
| 3
| 0
| 4
| 10
| 4
| 14
| 101
| 14
| 85
| 38
| 70
| 2
| 45
| 28
| 40
| 6
| 1
| 3
| 10
|
3,168
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/instructblip/modeling_instructblip.py
|
transformers.models.instructblip.modeling_instructblip.InstructBlipQFormerModel
|
from ...utils.generic import OutputRecorder, check_model_inputs
from typing import Any, Callable, Optional, Union
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, logging, torch_int
import torch
from .configuration_instructblip import InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig
from ...processing_utils import Unpack
class InstructBlipQFormerModel(InstructBlipPreTrainedModel):
"""
Querying Transformer (Q-Former), used in InstructBLIP. Slightly modified from BLIP-2 as it also takes the
instruction as input.
"""
_supports_attention_backend = False
_supports_flash_attn = False
_supports_sdpa = False
_supports_flex_attn = False
_can_record_outputs = {'hidden_states': InstructBlipQFormerLayer, 'attentions': [OutputRecorder(InstructBlipQFormerMultiHeadAttention, index=1, layer_name='.attention')], 'cross_attentions': [OutputRecorder(InstructBlipQFormerMultiHeadAttention, index=1, layer_name='.crossattention')]}
def __init__(self, config: InstructBlipQFormerConfig):
super().__init__(config)
self.config = config
self.embeddings = InstructBlipQFormerEmbeddings(config)
self.encoder = InstructBlipQFormerEncoder(config)
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def get_extended_attention_mask(self, attention_mask: torch.Tensor, input_shape: tuple[int], device: torch.device, has_query: bool=False) -> torch.Tensor:
"""
Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
Arguments:
attention_mask (`torch.Tensor`):
Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
input_shape (`tuple[int]`):
The shape of the input to the model.
device: (`torch.device`):
The device of the input to the model.
Returns:
`torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`.
"""
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
elif attention_mask.dim() == 2:
extended_attention_mask = attention_mask[:, None, None, :]
else:
raise ValueError(f'Wrong shape for input_ids (shape {input_shape}) or attention_mask (shape {attention_mask.shape})')
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype)
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
@check_model_inputs
@auto_docstring
def forward(self, input_ids: torch.LongTensor, attention_mask: Optional[torch.FloatTensor]=None, position_ids: Optional[torch.LongTensor]=None, query_embeds: Optional[torch.Tensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.FloatTensor], BaseModelOutputWithPoolingAndCrossAttentions]:
"""
query_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Hidden states to be used in the attention computation. If cross-attention,
will be used for the query (i.e., key and value will use the encoder_hidden_states).
"""
if input_ids is None and query_embeds is None:
raise ValueError('You have to specify query_embeds when input_ids is None')
query_length = query_embeds.shape[1] if query_embeds is not None else 0
embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, query_embeds=query_embeds)
input_shape = embedding_output.size()[:-1]
batch_size, seq_length = input_shape
device = embedding_output.device
if attention_mask is None:
attention_mask = torch.ones((batch_size, seq_length), device=device)
extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, device)
if encoder_hidden_states is not None:
if isinstance(encoder_hidden_states, list):
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size()
else:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if isinstance(encoder_attention_mask, list):
encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask]
elif encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
encoder_outputs: BaseModelOutput = self.encoder(embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, query_length=query_length, **kwargs)
sequence_output = encoder_outputs.last_hidden_state
pooled_output = sequence_output[:, 0, :]
return BaseModelOutputWithPoolingAndCrossAttentions(last_hidden_state=sequence_output, pooler_output=pooled_output)
|
class InstructBlipQFormerModel(InstructBlipPreTrainedModel):
'''
Querying Transformer (Q-Former), used in InstructBLIP. Slightly modified from BLIP-2 as it also takes the
instruction as input.
'''
def __init__(self, config: InstructBlipQFormerConfig):
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, value):
pass
def _prune_heads(self, heads_to_prune):
'''
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
'''
pass
def get_extended_attention_mask(self, attention_mask: torch.Tensor, input_shape: tuple[int], device: torch.device, has_query: bool=False) -> torch.Tensor:
'''
Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
Arguments:
attention_mask (`torch.Tensor`):
Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
input_shape (`tuple[int]`):
The shape of the input to the model.
device: (`torch.device`):
The device of the input to the model.
Returns:
`torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`.
'''
pass
@check_model_inputs
@auto_docstring
def forward(self, input_ids: torch.LongTensor, attention_mask: Optional[torch.FloatTensor]=None, position_ids: Optional[torch.LongTensor]=None, query_embeds: Optional[torch.Tensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.FloatTensor], BaseModelOutputWithPoolingAndCrossAttentions]:
'''
query_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Hidden states to be used in the attention computation. If cross-attention,
will be used for the query (i.e., key and value will use the encoder_hidden_states).
'''
pass
| 9
| 4
| 31
| 3
| 18
| 9
| 4
| 0.53
| 1
| 10
| 4
| 0
| 6
| 3
| 6
| 7
| 194
| 25
| 111
| 45
| 84
| 59
| 55
| 25
| 48
| 13
| 2
| 2
| 21
|
3,169
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/instructblip/modeling_instructblip.py
|
transformers.models.instructblip.modeling_instructblip.InstructBlipQFormerMultiHeadAttention
|
import torch
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, logging, torch_int
from torch import nn
import math
from ...processing_utils import Unpack
class InstructBlipQFormerMultiHeadAttention(nn.Module):
def __init__(self, config, is_cross_attention=False):
super().__init__()
self.config = config
if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')):
raise ValueError('The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
if is_cross_attention:
self.key = nn.Linear(config.encoder_hidden_size, self.all_head_size)
self.value = nn.Linear(config.encoder_hidden_size, self.all_head_size)
else:
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute')
if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query':
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
self.save_attention = False
def save_attn_gradients(self, attn_gradients):
self.attn_gradients = attn_gradients
def get_attn_gradients(self):
return self.attn_gradients
def save_attention_map(self, attention_map):
self.attention_map = attention_map
def get_attention_map(self):
return self.attention_map
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, **kwargs: Unpack[TransformersKwargs]):
is_cross_attention = encoder_hidden_states is not None
if is_cross_attention:
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
attention_mask = encoder_attention_mask
else:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
mixed_query_layer = self.query(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query':
seq_length = hidden_states.size()[1]
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
positional_embedding = positional_embedding.to(dtype=query_layer.dtype)
if self.position_embedding_type == 'relative_key':
relative_position_scores = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == 'relative_key_query':
relative_position_scores_query = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding)
relative_position_scores_key = torch.einsum('bhrd,lrd->bhlr', key_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
attention_scores_dtype = attention_scores.dtype
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
attention_probs = nn.Softmax(dim=-1)(attention_scores).to(attention_scores_dtype)
if is_cross_attention and self.save_attention:
self.save_attention_map(attention_probs)
attention_probs.register_hook(self.save_attn_gradients)
attention_probs_dropped = self.dropout(attention_probs)
if head_mask is not None:
attention_probs_dropped = attention_probs_dropped * head_mask
context_layer = torch.matmul(attention_probs_dropped, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return (context_layer, attention_probs)
|
class InstructBlipQFormerMultiHeadAttention(nn.Module):
def __init__(self, config, is_cross_attention=False):
pass
def save_attn_gradients(self, attn_gradients):
pass
def get_attn_gradients(self):
pass
def save_attention_map(self, attention_map):
pass
def get_attention_map(self):
pass
def transpose_for_scores(self, x):
pass
def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, **kwargs: Unpack[TransformersKwargs]):
pass
| 8
| 0
| 18
| 3
| 14
| 1
| 3
| 0.1
| 1
| 3
| 0
| 0
| 7
| 14
| 7
| 17
| 131
| 26
| 96
| 52
| 79
| 10
| 80
| 43
| 72
| 10
| 1
| 2
| 19
|
3,170
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/instructblip/modeling_instructblip.py
|
transformers.models.instructblip.modeling_instructblip.InstructBlipQFormerOutput
|
from torch import nn
import torch
class InstructBlipQFormerOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
|
class InstructBlipQFormerOutput(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 3
| 2
| 12
| 12
| 1
| 11
| 6
| 8
| 0
| 11
| 6
| 8
| 1
| 1
| 0
| 2
|
3,171
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/instructblip/modeling_instructblip.py
|
transformers.models.instructblip.modeling_instructblip.InstructBlipQFormerSelfOutput
|
from torch import nn
import torch
class InstructBlipQFormerSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
|
class InstructBlipQFormerSelfOutput(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 3
| 2
| 12
| 12
| 1
| 11
| 6
| 8
| 0
| 11
| 6
| 8
| 1
| 1
| 0
| 2
|
3,172
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/instructblip/modeling_instructblip.py
|
transformers.models.instructblip.modeling_instructblip.InstructBlipVisionEmbeddings
|
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, logging, torch_int
from torch import nn
import torch
from .configuration_instructblip import InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig
class InstructBlipVisionEmbeddings(nn.Module):
def __init__(self, config: InstructBlipVisionConfig):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.image_size = config.image_size
self.patch_size = config.patch_size
self.class_embedding = nn.Parameter(torch.randn(1, 1, self.embed_dim))
self.patch_embedding = nn.Conv2d(in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size)
self.num_patches = (self.image_size // self.patch_size) ** 2
self.num_positions = self.num_patches + 1
self.position_embedding = nn.Parameter(torch.randn(1, self.num_positions, self.embed_dim))
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
"""
num_patches = embeddings.shape[1] - 1
num_positions = self.position_embedding.shape[1] - 1
if not torch.jit.is_tracing() and num_patches == num_positions and (height == width):
return self.position_embedding
class_pos_embed = self.position_embedding[:, :1]
patch_pos_embed = self.position_embedding[:, 1:]
dim = embeddings.shape[-1]
new_height = height // self.patch_size
new_width = width // self.patch_size
sqrt_num_positions = torch_int(num_positions ** 0.5)
patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
patch_pos_embed = nn.functional.interpolate(patch_pos_embed, size=(new_height, new_width), mode='bicubic', align_corners=False)
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed, patch_pos_embed), dim=1)
def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding: bool=False) -> torch.Tensor:
batch_size, _, height, width = pixel_values.shape
target_dtype = self.patch_embedding.weight.dtype
patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype))
patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
class_embeds = self.class_embedding.expand(batch_size, 1, -1).to(target_dtype)
embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
if interpolate_pos_encoding:
position_embedding = self.interpolate_pos_encoding(embeddings, height, width)
else:
position_embedding = self.position_embedding
embeddings = embeddings + position_embedding[:, :embeddings.size(1), :].to(target_dtype)
return embeddings
|
class InstructBlipVisionEmbeddings(nn.Module):
def __init__(self, config: InstructBlipVisionConfig):
pass
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
'''
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
'''
pass
def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding: bool=False) -> torch.Tensor:
pass
| 4
| 1
| 23
| 5
| 16
| 3
| 2
| 0.19
| 1
| 5
| 1
| 0
| 3
| 9
| 3
| 13
| 72
| 16
| 48
| 27
| 44
| 9
| 40
| 27
| 36
| 2
| 1
| 1
| 5
|
3,173
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/instructblip/modeling_instructblip.py
|
transformers.models.instructblip.modeling_instructblip.InstructBlipVisionModel
|
from .configuration_instructblip import InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig
from torch import nn
import torch
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, logging, torch_int
from typing import Any, Callable, Optional, Union
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions
from ...utils.generic import OutputRecorder, check_model_inputs
from ...processing_utils import Unpack
class InstructBlipVisionModel(InstructBlipPreTrainedModel):
main_input_name = 'pixel_values'
config: InstructBlipVisionConfig
_can_record_outputs = {'hidden_states': InstructBlipEncoderLayer, 'attentions': InstructBlipAttention}
def __init__(self, config: InstructBlipVisionConfig):
super().__init__(config)
self.config = config
embed_dim = config.hidden_size
self.embeddings = InstructBlipVisionEmbeddings(config)
self.encoder = InstructBlipEncoder(config)
self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
self.post_init()
@check_model_inputs
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, interpolate_pos_encoding: bool=False, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, BaseModelOutputWithPooling]:
if pixel_values is None:
raise ValueError('You have to specify pixel_values')
hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
encoder_outputs: BaseModelOutput = self.encoder(inputs_embeds=hidden_states, **kwargs)
last_hidden_state = encoder_outputs.last_hidden_state
last_hidden_state = self.post_layernorm(last_hidden_state)
pooled_output = last_hidden_state[:, 0, :]
pooled_output = self.post_layernorm(pooled_output)
return BaseModelOutputWithPooling(last_hidden_state=last_hidden_state, pooler_output=pooled_output)
def get_input_embeddings(self):
return self.embeddings
|
class InstructBlipVisionModel(InstructBlipPreTrainedModel):
def __init__(self, config: InstructBlipVisionConfig):
pass
@check_model_inputs
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, interpolate_pos_encoding: bool=False, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, BaseModelOutputWithPooling]:
pass
def get_input_embeddings(self):
pass
| 6
| 0
| 19
| 3
| 15
| 1
| 3
| 0.06
| 1
| 7
| 4
| 0
| 3
| 4
| 3
| 4
| 65
| 13
| 49
| 23
| 36
| 3
| 28
| 15
| 24
| 6
| 2
| 1
| 8
|
3,174
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/instructblip/processing_instructblip.py
|
transformers.models.instructblip.processing_instructblip.InstructBlipProcessor
|
from typing import Optional, Union
from ..auto import AutoTokenizer
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
import os
from ...tokenization_utils_base import AddedToken, PreTokenizedInput, TextInput
from ...processing_utils import ProcessingKwargs, ProcessorMixin, Unpack
class InstructBlipProcessor(ProcessorMixin):
"""
Constructs an InstructBLIP processor which wraps a BLIP image processor and a LLaMa/T5 tokenizer into a single
processor.
[`InstructBlipProcessor`] offers all the functionalities of [`BlipImageProcessor`] and [`AutoTokenizer`]. See the
docstring of [`~BlipProcessor.__call__`] and [`~BlipProcessor.decode`] for more information.
Args:
image_processor (`BlipImageProcessor`):
An instance of [`BlipImageProcessor`]. The image processor is a required input.
tokenizer (`AutoTokenizer`):
An instance of ['PreTrainedTokenizer`]. The tokenizer is a required input.
qformer_tokenizer (`AutoTokenizer`):
An instance of ['PreTrainedTokenizer`]. The Q-Former tokenizer is a required input.
num_query_tokens (`int`, *optional*):"
Number of tokens used by the Qformer as queries, should be same as in model's config.
"""
attributes = ['image_processor', 'tokenizer', 'qformer_tokenizer']
image_processor_class = ('BlipImageProcessor', 'BlipImageProcessorFast')
tokenizer_class = 'AutoTokenizer'
qformer_tokenizer_class = 'AutoTokenizer'
def __init__(self, image_processor, tokenizer, qformer_tokenizer, num_query_tokens=None, **kwargs):
if not hasattr(tokenizer, 'image_token'):
self.image_token = AddedToken('<image>', normalized=False, special=True)
tokenizer.add_tokens([self.image_token], special_tokens=True)
else:
self.image_token = tokenizer.image_token
self.num_query_tokens = num_query_tokens
super().__init__(image_processor, tokenizer, qformer_tokenizer)
def __call__(self, images: Optional[ImageInput]=None, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]=None, audio=None, videos=None, **kwargs: Unpack[InstructBlipProcessorKwargs]) -> BatchFeature:
"""
This method uses [`BlipImageProcessor.__call__`] method to prepare image(s) for the model, and
[`BertTokenizerFast.__call__`] to prepare text for the model.
Please refer to the docstring of the above two methods for more information.
Args:
images (`ImageInput`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. Both channels-first and channels-last formats are supported.
text (`TextInput`, `PreTokenizedInput`, `list[TextInput]`, `list[PreTokenizedInput]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
"""
if images is None and text is None:
raise ValueError('You have to specify at least images or text.')
output_kwargs = self._merge_kwargs(InstructBlipProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs)
return_tensors = output_kwargs['text_kwargs'].pop('return_tensors', None)
encoding = {}
if text is not None:
if isinstance(text, str):
text = [text]
elif not isinstance(text, list) and (not isinstance(text[0], str)):
raise ValueError('Invalid input text. Please provide a string, or a list of strings')
qformer_text_encoding = self.qformer_tokenizer(text, **output_kwargs['text_kwargs'])
encoding['qformer_input_ids'] = qformer_text_encoding.pop('input_ids')
encoding['qformer_attention_mask'] = qformer_text_encoding.pop('attention_mask')
if output_kwargs['text_kwargs'].get('max_length') is not None:
output_kwargs['text_kwargs']['max_length'] -= self.num_query_tokens
text_encoding = self.tokenizer(text, **output_kwargs['text_kwargs'])
if images is not None:
image_tokens = self.image_token.content * self.num_query_tokens
output_kwargs['text_kwargs']['add_special_tokens'] = False
output_kwargs['text_kwargs']['padding'] = False
output_kwargs['text_kwargs']['truncation'] = False
image_text_encoding = self.tokenizer(image_tokens, **output_kwargs['text_kwargs'])
for k in text_encoding:
text_encoding[k] = [image_text_encoding[k] + sample for sample in text_encoding[k]]
encoding.update(text_encoding)
if images is not None:
image_encoding = self.image_processor(images, **output_kwargs['images_kwargs'])
encoding.update(image_encoding)
encoding = BatchFeature(encoding, tensor_type=return_tensors)
return encoding
@property
def model_input_names(self):
tokenizer_input_names = self.tokenizer.model_input_names
image_processor_input_names = self.image_processor.model_input_names
qformer_input_names = ['qformer_input_ids', 'qformer_attention_mask']
return tokenizer_input_names + image_processor_input_names + qformer_input_names
def save_pretrained(self, save_directory, **kwargs):
if os.path.isfile(save_directory):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file')
os.makedirs(save_directory, exist_ok=True)
qformer_tokenizer_path = os.path.join(save_directory, 'qformer_tokenizer')
self.qformer_tokenizer.save_pretrained(qformer_tokenizer_path)
qformer_present = 'qformer_tokenizer' in self.attributes
if qformer_present:
self.attributes.remove('qformer_tokenizer')
outputs = super().save_pretrained(save_directory, **kwargs)
if qformer_present:
self.attributes += ['qformer_tokenizer']
return outputs
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
processor = super().from_pretrained(pretrained_model_name_or_path, **kwargs)
if isinstance(processor, tuple):
processor = processor[0]
qformer_tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path, subfolder='qformer_tokenizer')
processor.qformer_tokenizer = qformer_tokenizer
return processor
|
class InstructBlipProcessor(ProcessorMixin):
'''
Constructs an InstructBLIP processor which wraps a BLIP image processor and a LLaMa/T5 tokenizer into a single
processor.
[`InstructBlipProcessor`] offers all the functionalities of [`BlipImageProcessor`] and [`AutoTokenizer`]. See the
docstring of [`~BlipProcessor.__call__`] and [`~BlipProcessor.decode`] for more information.
Args:
image_processor (`BlipImageProcessor`):
An instance of [`BlipImageProcessor`]. The image processor is a required input.
tokenizer (`AutoTokenizer`):
An instance of ['PreTrainedTokenizer`]. The tokenizer is a required input.
qformer_tokenizer (`AutoTokenizer`):
An instance of ['PreTrainedTokenizer`]. The Q-Former tokenizer is a required input.
num_query_tokens (`int`, *optional*):"
Number of tokens used by the Qformer as queries, should be same as in model's config.
'''
def __init__(self, image_processor, tokenizer, qformer_tokenizer, num_query_tokens=None, **kwargs):
pass
def __call__(self, images: Optional[ImageInput]=None, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]=None, audio=None, videos=None, **kwargs: Unpack[InstructBlipProcessorKwargs]) -> BatchFeature:
'''
This method uses [`BlipImageProcessor.__call__`] method to prepare image(s) for the model, and
[`BertTokenizerFast.__call__`] to prepare text for the model.
Please refer to the docstring of the above two methods for more information.
Args:
images (`ImageInput`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. Both channels-first and channels-last formats are supported.
text (`TextInput`, `PreTokenizedInput`, `list[TextInput]`, `list[PreTokenizedInput]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
'''
pass
@property
def model_input_names(self):
pass
def save_pretrained(self, save_directory, **kwargs):
pass
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
pass
| 8
| 2
| 18
| 2
| 13
| 4
| 3
| 0.49
| 1
| 11
| 4
| 0
| 6
| 2
| 7
| 24
| 166
| 23
| 96
| 41
| 79
| 47
| 71
| 32
| 63
| 9
| 2
| 3
| 20
|
3,175
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/instructblip/processing_instructblip.py
|
transformers.models.instructblip.processing_instructblip.InstructBlipProcessorKwargs
|
from ...processing_utils import ProcessingKwargs, ProcessorMixin, Unpack
class InstructBlipProcessorKwargs(ProcessingKwargs, total=False):
_defaults = {'text_kwargs': {'add_special_tokens': True, 'padding': False, 'stride': 0, 'return_overflowing_tokens': False, 'return_special_tokens_mask': False, 'return_offsets_mapping': False, 'return_token_type_ids': False, 'return_length': False, 'verbose': True}, 'images_kwargs': {}}
|
class InstructBlipProcessorKwargs(ProcessingKwargs, total=False):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 15
| 0
| 15
| 2
| 14
| 0
| 2
| 2
| 1
| 0
| 3
| 0
| 0
|
3,176
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/instructblipvideo/configuration_instructblipvideo.py
|
transformers.models.instructblipvideo.configuration_instructblipvideo.InstructBlipVideoConfig
|
from ...configuration_utils import PretrainedConfig
from ..auto import CONFIG_MAPPING, AutoConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
class InstructBlipVideoConfig(PretrainedConfig):
"""
[`InstructBlipVideoConfig`] is the configuration class to store the configuration of a
[`InstructBlipVideoForConditionalGeneration`]. It is used to instantiate a Instructblipvideo model according to the specified
arguments, defining the vision model, Q-Former model and language model configs. Instantiating a configuration with
the defaults will yield a similar configuration to that of the Instructblipvideo
[Salesforce/instruct-blip-flan-t5](https://huggingface.co/Salesforce/instruct-blip-flan-t5) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vision_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`InstructBlipVideoVisionConfig`].
qformer_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`InstructBlipVideoQFormerConfig`].
text_config (`dict`, *optional*):
Dictionary of configuration options used to initialize any [`PretrainedConfig`].
num_query_tokens (`int`, *optional*, defaults to 32):
The number of query tokens passed through the Transformer.
video_token_index (`int`, *optional*):
Token index of special video token.
kwargs (*optional*):
Dictionary of keyword arguments.
Example:
```python
>>> from transformers import (
... InstructBlipVideoVisionConfig,
... InstructBlipVideoQFormerConfig,
... OPTConfig,
... InstructBlipVideoConfig,
... InstructBlipVideoForConditionalGeneration,
... )
>>> # Initializing a InstructBlipVideoConfig with Salesforce/instruct-blip-flan-t5 style configuration
>>> configuration = InstructBlipVideoConfig()
>>> # Initializing a InstructBlipVideoForConditionalGeneration (with random weights) from the Salesforce/instruct-blip-flan-t5 style configuration
>>> model = InstructBlipVideoForConditionalGeneration(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
>>> # We can also initialize a InstructBlipVideoConfig from a InstructBlipVideoVisionConfig, InstructBlipVideoQFormerConfig and any PretrainedConfig
>>> # Initializing Instructblipvideo vision, Instructblipvideo Q-Former and language model configurations
>>> vision_config = InstructBlipVideoVisionConfig()
>>> qformer_config = InstructBlipVideoQFormerConfig()
>>> text_config = OPTConfig()
>>> config = InstructBlipVideoConfig.from_text_vision_configs(vision_config, qformer_config, text_config)
```"""
model_type = 'instructblipvideo'
attribute_map = {'video_token_id': 'video_token_index'}
sub_configs = {'text_config': AutoConfig, 'qformer_config': InstructBlipVideoQFormerConfig, 'vision_config': InstructBlipVideoVisionConfig}
def __init__(self, vision_config=None, qformer_config=None, text_config=None, num_query_tokens=32, video_token_index=None, **kwargs):
super().__init__(**kwargs)
if vision_config is None:
vision_config = {}
logger.info('vision_config is None. initializing the InstructBlipVideoVisionConfig with default values.')
if qformer_config is None:
qformer_config = {}
logger.info('qformer_config is None. Initializing the InstructBlipVideoQFormerConfig with default values.')
if text_config is None:
text_config = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).')
self.vision_config = InstructBlipVideoVisionConfig(**vision_config)
self.qformer_config = InstructBlipVideoQFormerConfig(**qformer_config)
text_model_type = text_config.get('model_type', 'opt')
self.text_config = CONFIG_MAPPING[text_model_type](**text_config)
self.num_query_tokens = num_query_tokens
self.video_token_index = video_token_index
self.qformer_config.encoder_hidden_size = self.vision_config.hidden_size
self.use_decoder_only_language_model = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
self.initializer_factor = 1.0
self.initializer_range = 0.02
@classmethod
def from_vision_qformer_text_configs(cls, vision_config: InstructBlipVideoVisionConfig, qformer_config: InstructBlipVideoQFormerConfig, text_config: PretrainedConfig, **kwargs):
"""
Instantiate a [`InstructBlipVideoConfig`] (or a derived class) from a InstructBlipVideo vision model, Q-Former and
language model configurations.
Returns:
[`InstructBlipVideoConfig`]: An instance of a configuration object
"""
return cls(vision_config=vision_config.to_dict(), qformer_config=qformer_config.to_dict(), text_config=text_config.to_dict(), **kwargs)
|
class InstructBlipVideoConfig(PretrainedConfig):
'''
[`InstructBlipVideoConfig`] is the configuration class to store the configuration of a
[`InstructBlipVideoForConditionalGeneration`]. It is used to instantiate a Instructblipvideo model according to the specified
arguments, defining the vision model, Q-Former model and language model configs. Instantiating a configuration with
the defaults will yield a similar configuration to that of the Instructblipvideo
[Salesforce/instruct-blip-flan-t5](https://huggingface.co/Salesforce/instruct-blip-flan-t5) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vision_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`InstructBlipVideoVisionConfig`].
qformer_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`InstructBlipVideoQFormerConfig`].
text_config (`dict`, *optional*):
Dictionary of configuration options used to initialize any [`PretrainedConfig`].
num_query_tokens (`int`, *optional*, defaults to 32):
The number of query tokens passed through the Transformer.
video_token_index (`int`, *optional*):
Token index of special video token.
kwargs (*optional*):
Dictionary of keyword arguments.
Example:
```python
>>> from transformers import (
... InstructBlipVideoVisionConfig,
... InstructBlipVideoQFormerConfig,
... OPTConfig,
... InstructBlipVideoConfig,
... InstructBlipVideoForConditionalGeneration,
... )
>>> # Initializing a InstructBlipVideoConfig with Salesforce/instruct-blip-flan-t5 style configuration
>>> configuration = InstructBlipVideoConfig()
>>> # Initializing a InstructBlipVideoForConditionalGeneration (with random weights) from the Salesforce/instruct-blip-flan-t5 style configuration
>>> model = InstructBlipVideoForConditionalGeneration(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
>>> # We can also initialize a InstructBlipVideoConfig from a InstructBlipVideoVisionConfig, InstructBlipVideoQFormerConfig and any PretrainedConfig
>>> # Initializing Instructblipvideo vision, Instructblipvideo Q-Former and language model configurations
>>> vision_config = InstructBlipVideoVisionConfig()
>>> qformer_config = InstructBlipVideoQFormerConfig()
>>> text_config = OPTConfig()
>>> config = InstructBlipVideoConfig.from_text_vision_configs(vision_config, qformer_config, text_config)
```'''
def __init__(self, vision_config=None, qformer_config=None, text_config=None, num_query_tokens=32, video_token_index=None, **kwargs):
pass
@classmethod
def from_vision_qformer_text_configs(cls, vision_config: InstructBlipVideoVisionConfig, qformer_config: InstructBlipVideoQFormerConfig, text_config: PretrainedConfig, **kwargs):
'''
Instantiate a [`InstructBlipVideoConfig`] (or a derived class) from a InstructBlipVideo vision model, Q-Former and
language model configurations.
Returns:
[`InstructBlipVideoConfig`]: An instance of a configuration object
'''
pass
| 4
| 2
| 28
| 4
| 21
| 3
| 3
| 0.98
| 1
| 3
| 2
| 0
| 1
| 8
| 2
| 2
| 120
| 21
| 50
| 29
| 32
| 49
| 26
| 14
| 23
| 5
| 1
| 1
| 6
|
3,177
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/instructblipvideo/configuration_instructblipvideo.py
|
transformers.models.instructblipvideo.configuration_instructblipvideo.InstructBlipVideoQFormerConfig
|
from ...configuration_utils import PretrainedConfig
class InstructBlipVideoQFormerConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`InstructBlipVideoQFormerModel`]. It is used to
instantiate a InstructBlipVideo Querying Transformer (Q-Former) model according to the specified arguments, defining the
model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of
the InstructBlipVideo [Salesforce/instruct-blip-flan-t5](https://huggingface.co/Salesforce/instruct-blip-flan-t5)
architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs.
Read the documentation from [`PretrainedConfig`] for more information.
Note that [`InstructBlipVideoQFormerModel`] is very similar to [`BertLMHeadModel`] with interleaved cross-attention.
Args:
vocab_size (`int`, *optional*, defaults to 30522):
Vocabulary size of the Q-Former model. Defines the number of different tokens that can be represented by
the `inputs_ids` passed when calling the model.
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
pad_token_id (`int`, *optional*, defaults to 0):
Token id used for padding sequences.
position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
[Self-Attention with Relative Position Representations (Shaw et al.)](https://huggingface.co/papers/1803.02155).
For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
with Better Relative Position Embeddings (Huang et al.)](https://huggingface.co/papers/2009.13658).
cross_attention_frequency (`int`, *optional*, defaults to 2):
The frequency of adding cross-attention to the Transformer layers.
encoder_hidden_size (`int`, *optional*, defaults to 1408):
The hidden size of the hidden states for cross-attention.
Examples:
```python
>>> from transformers import InstructBlipVideoQFormerConfig, InstructBlipVideoQFormerModel
>>> # Initializing a InstructBlipVideo Salesforce/instruct-blip-flan-t5 style configuration
>>> configuration = InstructBlipVideoQFormerConfig()
>>> # Initializing a model (with random weights) from the Salesforce/instruct-blip-flan-t5 style configuration
>>> model = InstructBlipVideoQFormerModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'instructblipvideo_qformer'
base_config_key = 'qformer_config'
def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, position_embedding_type='absolute', cross_attention_frequency=2, encoder_hidden_size=1408, **kwargs):
super().__init__(pad_token_id=pad_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.position_embedding_type = position_embedding_type
self.cross_attention_frequency = cross_attention_frequency
self.encoder_hidden_size = encoder_hidden_size
|
class InstructBlipVideoQFormerConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`InstructBlipVideoQFormerModel`]. It is used to
instantiate a InstructBlipVideo Querying Transformer (Q-Former) model according to the specified arguments, defining the
model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of
the InstructBlipVideo [Salesforce/instruct-blip-flan-t5](https://huggingface.co/Salesforce/instruct-blip-flan-t5)
architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs.
Read the documentation from [`PretrainedConfig`] for more information.
Note that [`InstructBlipVideoQFormerModel`] is very similar to [`BertLMHeadModel`] with interleaved cross-attention.
Args:
vocab_size (`int`, *optional*, defaults to 30522):
Vocabulary size of the Q-Former model. Defines the number of different tokens that can be represented by
the `inputs_ids` passed when calling the model.
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
pad_token_id (`int`, *optional*, defaults to 0):
Token id used for padding sequences.
position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
[Self-Attention with Relative Position Representations (Shaw et al.)](https://huggingface.co/papers/1803.02155).
For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
with Better Relative Position Embeddings (Huang et al.)](https://huggingface.co/papers/2009.13658).
cross_attention_frequency (`int`, *optional*, defaults to 2):
The frequency of adding cross-attention to the Transformer layers.
encoder_hidden_size (`int`, *optional*, defaults to 1408):
The hidden size of the hidden states for cross-attention.
Examples:
```python
>>> from transformers import InstructBlipVideoQFormerConfig, InstructBlipVideoQFormerModel
>>> # Initializing a InstructBlipVideo Salesforce/instruct-blip-flan-t5 style configuration
>>> configuration = InstructBlipVideoQFormerConfig()
>>> # Initializing a model (with random weights) from the Salesforce/instruct-blip-flan-t5 style configuration
>>> model = InstructBlipVideoQFormerModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, position_embedding_type='absolute', cross_attention_frequency=2, encoder_hidden_size=1408, **kwargs):
pass
| 2
| 1
| 35
| 1
| 34
| 0
| 1
| 1.51
| 1
| 1
| 0
| 0
| 1
| 14
| 1
| 1
| 102
| 9
| 37
| 36
| 17
| 56
| 19
| 18
| 17
| 1
| 1
| 0
| 1
|
3,178
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/instructblipvideo/configuration_instructblipvideo.py
|
transformers.models.instructblipvideo.configuration_instructblipvideo.InstructBlipVideoVisionConfig
|
from ...configuration_utils import PretrainedConfig
class InstructBlipVideoVisionConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`InstructBlipVideoVisionModel`]. It is used to
instantiate a InstructBlipVideo vision encoder according to the specified arguments, defining the model architecture.
Instantiating a configuration defaults will yield a similar configuration to that of the InstructBlipVideo
[Salesforce/instruct-blip-flan-t5](https://huggingface.co/Salesforce/instruct-blip-flan-t5) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 1408):
Dimensionality of the encoder layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 6144):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
num_hidden_layers (`int`, *optional*, defaults to 39):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 14):
The size (resolution) of each patch.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` `"gelu"` are supported. to 1e-5): The epsilon used by the layer
normalization layers.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 1e-10):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries and values in the self-attention layers.
Example:
```python
>>> from transformers import InstructBlipVideoVisionConfig, InstructBlipVideoVisionModel
>>> # Initializing a InstructBlipVideoVisionConfig with Salesforce/instruct-blip-flan-t5 style configuration
>>> configuration = InstructBlipVideoVisionConfig()
>>> # Initializing a InstructBlipVideoVisionModel (with random weights) from the Salesforce/instruct-blip-flan-t5 style configuration
>>> model = InstructBlipVideoVisionModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'instructblipvideo_vision_model'
base_config_key = 'vision_config'
def __init__(self, hidden_size=1408, intermediate_size=6144, num_hidden_layers=39, num_attention_heads=16, image_size=224, patch_size=14, hidden_act='gelu', layer_norm_eps=1e-06, attention_dropout=0.0, initializer_range=1e-10, qkv_bias=True, **kwargs):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.patch_size = patch_size
self.image_size = image_size
self.initializer_range = initializer_range
self.attention_dropout = attention_dropout
self.layer_norm_eps = layer_norm_eps
self.hidden_act = hidden_act
self.qkv_bias = qkv_bias
|
class InstructBlipVideoVisionConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`InstructBlipVideoVisionModel`]. It is used to
instantiate a InstructBlipVideo vision encoder according to the specified arguments, defining the model architecture.
Instantiating a configuration defaults will yield a similar configuration to that of the InstructBlipVideo
[Salesforce/instruct-blip-flan-t5](https://huggingface.co/Salesforce/instruct-blip-flan-t5) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 1408):
Dimensionality of the encoder layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 6144):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
num_hidden_layers (`int`, *optional*, defaults to 39):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 14):
The size (resolution) of each patch.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` `"gelu"` are supported. to 1e-5): The epsilon used by the layer
normalization layers.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 1e-10):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries and values in the self-attention layers.
Example:
```python
>>> from transformers import InstructBlipVideoVisionConfig, InstructBlipVideoVisionModel
>>> # Initializing a InstructBlipVideoVisionConfig with Salesforce/instruct-blip-flan-t5 style configuration
>>> configuration = InstructBlipVideoVisionConfig()
>>> # Initializing a InstructBlipVideoVisionModel (with random weights) from the Salesforce/instruct-blip-flan-t5 style configuration
>>> model = InstructBlipVideoVisionModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, hidden_size=1408, intermediate_size=6144, num_hidden_layers=39, num_attention_heads=16, image_size=224, patch_size=14, hidden_act='gelu', layer_norm_eps=1e-06, attention_dropout=0.0, initializer_range=1e-10, qkv_bias=True, **kwargs):
pass
| 2
| 1
| 28
| 1
| 27
| 0
| 1
| 1.4
| 1
| 1
| 0
| 0
| 1
| 11
| 1
| 1
| 82
| 10
| 30
| 29
| 14
| 42
| 16
| 15
| 14
| 1
| 1
| 0
| 1
|
3,179
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/instructblipvideo/image_processing_instructblipvideo.py
|
transformers.models.instructblipvideo.image_processing_instructblipvideo.InstructBlipVideoImageProcessor
|
import numpy as np
from ...image_transforms import convert_to_rgb, resize, to_channel_dimension_format
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...video_utils import VideoInput, make_batched_videos
from typing import Optional, Union
from ...utils import TensorType, filter_out_non_signature_kwargs, logging
from ...image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, to_numpy_array, valid_images, validate_preprocess_arguments
class InstructBlipVideoImageProcessor(BaseImageProcessor):
"""
Constructs a InstructBLIPVideo image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
`do_resize` parameter in the `preprocess` method.
size (`dict`, *optional*, defaults to `{"height": 384, "width": 384}`):
Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess`
method.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`. Can be
overridden by the `resample` parameter in the `preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the
`do_rescale` parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Only has an effect if `do_rescale` is set to `True`. Can be
overridden by the `rescale_factor` parameter in the `preprocess` method.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
method. Can be overridden by the `do_normalize` parameter in the `preprocess` method.
image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be
overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
Can be overridden by the `image_std` parameter in the `preprocess` method.
do_convert_rgb (`bool`, *optional*, defaults to `True`):
Whether to convert the image to RGB.
"""
model_input_names = ['pixel_values']
def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BICUBIC, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_convert_rgb: bool=True, **kwargs) -> None:
super().__init__(**kwargs)
size = size if size is not None else {'height': 384, 'width': 384}
size = get_size_dict(size, default_to_square=True)
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD
self.do_convert_rgb = do_convert_rgb
def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
"""
Resize an image to `(size["height"], size["width"])`.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
`PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
Returns:
`np.ndarray`: The resized image.
"""
size = get_size_dict(size)
if 'height' not in size or 'width' not in size:
raise ValueError(f'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}')
output_size = (size['height'], size['width'])
return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)
@filter_out_non_signature_kwargs()
def preprocess(self, images: Optional[VideoInput]=None, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, return_tensors: Optional[Union[str, TensorType]]=None, do_convert_rgb: Optional[bool]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> BatchFeature:
"""
Preprocess a video or batch of images/videos.
Args:
videos (`VideoInput`):
Video frames to preprocess. Expects a single or batch of videos as a list of frames with pixel values
ranging from 0 to 255. If passing in video with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the video.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Controls the size of the video after `resize`. The shortest edge of the image is resized to
`size["shortest_edge"]` whilst preserving the aspect ratio. If the longest edge of this resized image
is > `int(size["shortest_edge"] * (1333 / 800))`, then the image is resized again to make the longest
edge equal to `int(size["shortest_edge"] * (1333 / 800))`.
resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the video. Only has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the video values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the video by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the video.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to normalize the video by if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to normalize the video by if `do_normalize` is set to `True`.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
Whether to convert the image to RGB.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
resample = resample if resample is not None else self.resample
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
size = size if size is not None else self.size
size = get_size_dict(size, default_to_square=False)
videos = make_batched_videos(images)
logger.warning('`InstructBlipVideoImageProcessor` is deprecated and will be removed in v5.0. We recommend to load an instance of `InstructBlipVideoVideoProcessor` to process videos for the model. ')
validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample)
if not valid_images(videos):
raise ValueError('Invalid input type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor')
pixel_values = [[self._preprocess_image(image=frame, do_resize=do_resize, size=size, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_convert_rgb=do_convert_rgb, data_format=data_format, input_data_format=input_data_format) for frame in video] for video in videos]
encoded_outputs = BatchFeature(data={'pixel_values': pixel_values}, tensor_type=return_tensors)
return encoded_outputs
def _preprocess_image(self, image: Optional[ImageInput]=None, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_convert_rgb: Optional[bool]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
if do_convert_rgb:
image = convert_to_rgb(image)
image = to_numpy_array(image)
if do_rescale and is_scaled_image(image):
logger.warning_once('It looks like you are trying to rescale already rescaled video frames. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.')
if input_data_format is None:
input_data_format = infer_channel_dimension_format(image)
if do_resize:
image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
if do_rescale:
image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
if do_normalize:
image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
return image
|
class InstructBlipVideoImageProcessor(BaseImageProcessor):
'''
Constructs a InstructBLIPVideo image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
`do_resize` parameter in the `preprocess` method.
size (`dict`, *optional*, defaults to `{"height": 384, "width": 384}`):
Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess`
method.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`. Can be
overridden by the `resample` parameter in the `preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the
`do_rescale` parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Only has an effect if `do_rescale` is set to `True`. Can be
overridden by the `rescale_factor` parameter in the `preprocess` method.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
method. Can be overridden by the `do_normalize` parameter in the `preprocess` method.
image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be
overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
Can be overridden by the `image_std` parameter in the `preprocess` method.
do_convert_rgb (`bool`, *optional*, defaults to `True`):
Whether to convert the image to RGB.
'''
def __init__(self, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BICUBIC, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_convert_rgb: bool=True, **kwargs) -> None:
pass
def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
'''
Resize an image to `(size["height"], size["width"])`.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
`PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
Returns:
`np.ndarray`: The resized image.
'''
pass
@filter_out_non_signature_kwargs()
def preprocess(self, images: Optional[VideoInput]=None, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, return_tensors: Optional[Union[str, TensorType]]=None, do_convert_rgb: Optional[bool]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> BatchFeature:
'''
Preprocess a video or batch of images/videos.
Args:
videos (`VideoInput`):
Video frames to preprocess. Expects a single or batch of videos as a list of frames with pixel values
ranging from 0 to 255. If passing in video with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the video.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Controls the size of the video after `resize`. The shortest edge of the image is resized to
`size["shortest_edge"]` whilst preserving the aspect ratio. If the longest edge of this resized image
is > `int(size["shortest_edge"] * (1333 / 800))`, then the image is resized again to make the longest
edge equal to `int(size["shortest_edge"] * (1333 / 800))`.
resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the video. Only has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the video values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the video by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the video.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to normalize the video by if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to normalize the video by if `do_normalize` is set to `True`.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
Whether to convert the image to RGB.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
'''
pass
def _preprocess_image(self, image: Optional[ImageInput]=None, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, do_convert_rgb: Optional[bool]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:
pass
| 6
| 3
| 59
| 5
| 36
| 18
| 6
| 0.74
| 1
| 8
| 2
| 0
| 4
| 9
| 4
| 24
| 279
| 25
| 146
| 69
| 91
| 108
| 55
| 19
| 50
| 11
| 3
| 1
| 24
|
3,180
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py
|
transformers.models.instructblipvideo.modeling_instructblipvideo.InstructBlipVideoAttention
|
from torch import nn
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from typing import Any, Callable, Optional, Union
import torch
class InstructBlipVideoAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads}).')
self.scale = self.head_dim ** (-0.5)
self.is_causal = False
self.attention_dropout = config.attention_dropout
self.qkv = nn.Linear(self.embed_dim, 3 * self.embed_dim, bias=False)
if config.qkv_bias:
q_bias = nn.Parameter(torch.zeros(self.embed_dim))
v_bias = nn.Parameter(torch.zeros(self.embed_dim))
else:
q_bias = None
v_bias = None
if q_bias is not None:
qkv_bias = torch.cat((q_bias, torch.zeros_like(v_bias, requires_grad=False), v_bias))
self.qkv.bias = nn.Parameter(qkv_bias)
self.projection = nn.Linear(self.embed_dim, self.embed_dim)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None, **kwargs) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
bsz, tgt_len, embed_dim = hidden_states.size()
mixed_qkv = self.qkv(hidden_states)
mixed_qkv = mixed_qkv.reshape(bsz, tgt_len, 3, self.num_heads, embed_dim // self.num_heads).permute(2, 0, 3, 1, 4)
query_states, key_states, value_states = (mixed_qkv[0], mixed_qkv[1], mixed_qkv[2])
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, attention_mask=None, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scale, **kwargs)
attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous()
attn_output = self.projection(attn_output)
return (attn_output, attn_weights)
|
class InstructBlipVideoAttention(nn.Module):
'''Multi-headed attention from 'Attention Is All You Need' paper'''
def __init__(self, config):
pass
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
pass
def forward(self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor]=None, **kwargs) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
'''Input shape: Batch x Time x Channel'''
pass
| 4
| 2
| 25
| 6
| 17
| 2
| 3
| 0.16
| 1
| 5
| 0
| 0
| 3
| 8
| 3
| 13
| 79
| 20
| 51
| 29
| 42
| 8
| 40
| 24
| 36
| 4
| 1
| 1
| 8
|
3,181
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py
|
transformers.models.instructblipvideo.modeling_instructblipvideo.InstructBlipVideoEncoder
|
import torch
from ...processing_utils import Unpack
from .configuration_instructblipvideo import InstructBlipVideoConfig, InstructBlipVideoQFormerConfig, InstructBlipVideoVisionConfig
from typing import Any, Callable, Optional, Union
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, logging, torch_int
from torch import nn
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions
class InstructBlipVideoEncoder(nn.Module):
"""
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
[`InstructBlipVideoEncoderLayer`].
Args:
config (`InstructBlipVideoConfig`):
The corresponding vision configuration for the `InstructBlipVideoEncoder`.
"""
def __init__(self, config: InstructBlipVideoConfig):
super().__init__()
self.config = config
self.layers = nn.ModuleList([InstructBlipVideoEncoderLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
@auto_docstring
def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, BaseModelOutput]:
hidden_states = inputs_embeds
for encoder_layer in self.layers:
hidden_states = encoder_layer(hidden_states, attention_mask=attention_mask, **kwargs)
return BaseModelOutput(last_hidden_state=hidden_states)
|
class InstructBlipVideoEncoder(nn.Module):
'''
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
[`InstructBlipVideoEncoderLayer`].
Args:
config (`InstructBlipVideoConfig`):
The corresponding vision configuration for the `InstructBlipVideoEncoder`.
'''
def __init__(self, config: InstructBlipVideoConfig):
pass
@auto_docstring
def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, BaseModelOutput]:
pass
| 4
| 1
| 37
| 4
| 24
| 9
| 7
| 0.52
| 1
| 9
| 3
| 0
| 2
| 3
| 2
| 12
| 84
| 11
| 48
| 18
| 38
| 25
| 27
| 11
| 24
| 12
| 1
| 2
| 13
|
3,182
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py
|
transformers.models.instructblipvideo.modeling_instructblipvideo.InstructBlipVideoEncoderLayer
|
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, logging, torch_int
from torch import nn
import torch
from ...modeling_layers import GradientCheckpointingLayer
from ...processing_utils import Unpack
from .configuration_instructblipvideo import InstructBlipVideoConfig, InstructBlipVideoQFormerConfig, InstructBlipVideoVisionConfig
class InstructBlipVideoEncoderLayer(GradientCheckpointingLayer):
def __init__(self, config: InstructBlipVideoConfig):
super().__init__()
self.embed_dim = config.hidden_size
self.self_attn = InstructBlipVideoAttention(config)
self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
self.mlp = InstructBlipVideoMLP(config)
self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
@auto_docstring
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, **kwargs: Unpack[TransformersKwargs]) -> torch.FloatTensor:
residual = hidden_states
hidden_states = self.layer_norm1(hidden_states)
hidden_states, _ = self.self_attn(hidden_states=hidden_states, head_mask=attention_mask, **kwargs)
hidden_states = hidden_states + residual
residual = hidden_states
hidden_states = self.layer_norm2(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = hidden_states + residual
return hidden_states
|
class InstructBlipVideoEncoderLayer(GradientCheckpointingLayer):
def __init__(self, config: InstructBlipVideoConfig):
pass
@auto_docstring
def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, **kwargs: Unpack[TransformersKwargs]) -> torch.FloatTensor:
pass
| 4
| 0
| 22
| 3
| 15
| 5
| 2
| 0.33
| 1
| 6
| 3
| 0
| 2
| 5
| 2
| 12
| 46
| 6
| 30
| 16
| 22
| 10
| 21
| 11
| 18
| 2
| 1
| 1
| 3
|
3,183
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py
|
transformers.models.instructblipvideo.modeling_instructblipvideo.InstructBlipVideoForConditionalGeneration
|
from ..auto import AutoModel, AutoModelForCausalLM, AutoModelForSeq2SeqLM
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, logging, torch_int
from torch import nn
import torch
from ...processing_utils import Unpack
from .configuration_instructblipvideo import InstructBlipVideoConfig, InstructBlipVideoQFormerConfig, InstructBlipVideoVisionConfig
from ...generation import GenerationMixin
from typing import Any, Callable, Optional, Union
@auto_docstring(custom_intro='\n InstructBlipVideo Model for generating text given an image and an optional text prompt. The model consists of a vision\n encoder, Querying Transformer (Q-Former) and a language model.\n\n One can optionally pass `input_ids` to the model, which serve as a text prompt, to make the language model continue\n the prompt. Otherwise, the language model starts generating text from the [BOS] (beginning-of-sequence) token.\n ')
class InstructBlipVideoForConditionalGeneration(InstructBlipVideoPreTrainedModel, GenerationMixin):
config: InstructBlipVideoConfig
main_input_name = 'pixel_values'
_can_compile_fullgraph = True
_keep_in_fp32_modules = ['query_tokens']
def __init__(self, config: InstructBlipVideoConfig):
super().__init__(config)
self.vision_model = InstructBlipVideoVisionModel._from_config(config.vision_config)
self.query_tokens = nn.Parameter(torch.zeros(1, config.num_query_tokens, config.qformer_config.hidden_size))
self.qformer = InstructBlipVideoQFormerModel._from_config(config.qformer_config)
self.language_projection = nn.Linear(config.qformer_config.hidden_size, config.text_config.hidden_size)
if config.use_decoder_only_language_model:
language_model = AutoModelForCausalLM.from_config(config.text_config)
else:
language_model = AutoModelForSeq2SeqLM.from_config(config.text_config)
if language_model._no_split_modules is not None:
self._no_split_modules.extend(language_model._no_split_modules)
if language_model._keep_in_fp32_modules is not None:
self._keep_in_fp32_modules.extend(language_model._keep_in_fp32_modules)
self.language_model = language_model
self.post_init()
def get_input_embeddings(self):
return self.language_model.get_input_embeddings()
def set_input_embeddings(self, value):
self.language_model.set_input_embeddings(value)
def set_output_embeddings(self, new_embeddings):
self.language_model.set_output_embeddings(new_embeddings)
def get_output_embeddings(self) -> nn.Module:
return self.language_model.get_output_embeddings()
def get_encoder(self):
return self.language_model.get_encoder()
def get_decoder(self):
return self.language_model.get_decoder()
def _tie_weights(self):
if not self.config.use_decoder_only_language_model:
self.language_model.encoder.embed_tokens = self.language_model.shared
self.language_model.decoder.embed_tokens = self.language_model.shared
def _preprocess_accelerate(self):
"""
Some pre-processing hacks to make the model `accelerate` compatible. Check
https://github.com/huggingface/transformers/pull/21707 for more details.
"""
hf_device_map = self.hf_device_map
if len(hf_device_map) > 1 and 'language_model' not in hf_device_map and (torch.cuda.device_count() > 1):
logger.warning('The `language_model` is not in the `hf_device_map` dictionary and you are running your script in a multi-GPU environment. this may lead to unexpected behavior when using `accelerate`. Please pass a `device_map` that contains `language_model` to remove this warning. Please refer to https://github.com/huggingface/blog/blob/main/accelerate-large-models.md for more details on creating a `device_map` for large models.')
if hasattr(self.language_model, '_hf_hook'):
self.language_model._hf_hook.io_same_device = True
def get_image_features(self, pixel_values: torch.FloatTensor, qformer_input_ids: torch.LongTensor, qformer_attention_mask: Optional[torch.LongTensor]=None, interpolate_pos_encoding: Optional[bool]=False, return_dict: Optional[bool]=False):
"""
Encodes images into continuous embeddings that can be forwarded to the language model.
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
The tensors corresponding to the input images.
"""
pass
def get_placeholder_mask(self, input_ids: torch.LongTensor, inputs_embeds: torch.FloatTensor):
"""
Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`.
"""
if input_ids is None:
special_image_mask = inputs_embeds == self.get_input_embeddings()(torch.tensor(self.config.video_token_id, dtype=torch.long, device=inputs_embeds.device))
special_image_mask = special_image_mask.all(-1)
else:
special_image_mask = input_ids == self.config.video_token_id
special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
return special_image_mask
@can_return_tuple
@auto_docstring
def forward(self, pixel_values: torch.FloatTensor, qformer_input_ids: torch.FloatTensor, qformer_attention_mask: Optional[torch.LongTensor]=None, input_ids: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.LongTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, labels: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, interpolate_pos_encoding: bool=False, use_cache: Optional[bool]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, InstructBlipVideoForConditionalGenerationModelOutput]:
"""
qformer_input_ids (`torch.LongTensor` of shape (batch_size, sequence_length)):
The sequence used as a prompt to be fed to the Q-Former module.
qformer_attention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
Mask to avoid performing attention on padding token indices.
Examples:
```python
>>> from transformers import InstructBlipVideoProcessor, InstructBlipVideoForConditionalGeneration
>>> import torch
>>> from huggingface_hub import hf_hub_download
>>> import av
>>> import numpy as np
>>> def read_video_pyav(container, indices):
... '''
... Decode the video with PyAV decoder.
... Args:
... container (`av.container.input.InputContainer`): PyAV container.
... indices (`list[int]`): List of frame indices to decode.
... Returns:
... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
... '''
... frames = []
... container.seek(0)
... start_index = indices[0]
... end_index = indices[-1]
... for i, frame in enumerate(container.decode(video=0)):
... if i > end_index:
... break
... if i >= start_index and i in indices:
... frames.append(frame)
... return np.stack([x.to_ndarray(format="rgb24") for x in frames])
>>> model = InstructBlipVideoForConditionalGeneration.from_pretrained("Salesforce/instructblip-vicuna-7b", device_map="auto")
>>> processor = InstructBlipVideoProcessor.from_pretrained("Salesforce/instructblip-vicuna-7b")
>>> file_path = hf_hub_download(
... repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
... )
>>> container = av.open(file_path)
>>> # sample uniformly 4 frames from the videWhy is this video funny?o
>>> total_frames = container.streams.video[0].frames
>>> indices = np.arange(0, total_frames, total_frames / 4).astype(int)
>>> clip = read_video_pyav(container, indices)
>>> prompt = "What is happening in the video?"
>>> inputs = processor(text=prompt, images=clip, return_tensors="pt").to(model.device)
>>> outputs = model.generate(
... **inputs,
... do_sample=False,
... num_beams=5,
... max_length=256,
... repetition_penalty=1.5,
... length_penalty=1.0,
... )
>>> generated_text = processor.batch_decode(outputs, skip_special_tokens=True)[0].strip()
>>> print(generated_text)
"A person is eating a bowl of pasta, and they are using a fork to eat it. The person is sitting at a table, and the plate of pasta is on the table in front"
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
language_model_inputs, vision_outputs, query_outputs = self.get_video_features(pixel_values, qformer_input_ids=qformer_input_ids, qformer_attention_mask=qformer_attention_mask, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=True)
vision_outputs = vision_outputs.to_tuple() if not return_dict else vision_outputs
query_outputs = query_outputs.to_tuple() if not return_dict else query_outputs
if inputs_embeds is None:
inputs_embeds = self.get_input_embeddings()(input_ids)
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
language_model_inputs = language_model_inputs.to(inputs_embeds.device, inputs_embeds.dtype)
special_image_mask = self.get_placeholder_mask(input_ids, inputs_embeds=inputs_embeds)
inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, language_model_inputs)
if self.config.use_decoder_only_language_model:
outputs = self.language_model(inputs_embeds=inputs_embeds, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, use_cache=use_cache, **kwargs)
logits = outputs.logits if return_dict else outputs[0]
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **kwargs)
else:
outputs = self.language_model(inputs_embeds=inputs_embeds, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, labels=labels, use_cache=use_cache, **kwargs)
loss = outputs.loss if return_dict else outputs[0]
logits = outputs.logits if return_dict else outputs[1]
return InstructBlipVideoForConditionalGenerationModelOutput(loss=loss, logits=logits, vision_outputs=vision_outputs, qformer_outputs=query_outputs, language_model_outputs=outputs)
@torch.no_grad()
def generate(self, pixel_values: torch.FloatTensor, qformer_input_ids: Optional[torch.LongTensor]=None, qformer_attention_mask: Optional[torch.LongTensor]=None, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, interpolate_pos_encoding: bool=False, **generate_kwargs) -> torch.LongTensor:
"""
Overrides `generate` function to be able to use the model as a conditional generator.
Args:
pixel_values (`torch.FloatTensor` of shape (batch_size, num_channels, height, width) or
(batch_size, num_frames, num_channels, height, width)): Input images or videos to be processed.
qformer_input_ids (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
The sequence used as a prompt to be fed to the Q-Former module.
qformer_attention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
Mask to avoid performing attention on padding token indices.
input_ids (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
The sequence used as a prompt for the generation.
attention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
Mask to avoid performing attention on padding token indices.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Embedded representation of the inputs. Should be float, not int tokens.
interpolate_pos_encoding (`bool`, *optional*, defaults to `False`):
Whether to interpolate the positional encoding of the image embeddings.
Returns:
captions (list): A list of strings of length batch_size * num_captions.
"""
if hasattr(self, 'hf_device_map'):
self._preprocess_accelerate()
batch_size = pixel_values.shape[0]
language_model_inputs, vision_outputs, query_outputs = self.get_video_features(pixel_values, qformer_input_ids=qformer_input_ids, qformer_attention_mask=qformer_attention_mask, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=True)
if inputs_embeds is None:
if input_ids is None:
video_tokens = [self.config.video_token_index] * self.config.num_query_tokens * 4
start_tokens = video_tokens + [self.config.text_config.bos_token_id]
input_ids = torch.tensor([start_tokens], dtype=torch.long, device=pixel_values.device)
input_ids = input_ids.repeat(batch_size, 1)
inputs_embeds = self.get_input_embeddings()(input_ids)
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
language_model_inputs = language_model_inputs.to(inputs_embeds.device, inputs_embeds.dtype)
special_image_mask = self.get_placeholder_mask(input_ids, inputs_embeds=inputs_embeds)
inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, language_model_inputs)
inputs = {'inputs_embeds': inputs_embeds, 'attention_mask': attention_mask}
if not self.language_model.config.is_encoder_decoder:
inputs['input_ids'] = input_ids
outputs = self.language_model.generate(**inputs, **generate_kwargs)
return outputs
def get_video_features(self, pixel_values: torch.FloatTensor, qformer_input_ids: torch.LongTensor, qformer_attention_mask: Optional[torch.LongTensor]=None, interpolate_pos_encoding: Optional[bool]=False, return_dict: Optional[bool]=False):
"""
Encodes images into continuous embeddings that can be forwarded to the language model.
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
The tensors corresponding to the input images.
"""
batch_size, frames, channel, height, width = pixel_values.shape
pixel_values = pixel_values.reshape(batch_size * frames, channel, height, width)
vision_outputs = self.vision_model(pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=True)
image_embeds = vision_outputs[0]
image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device)
query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)
query_attention_mask = torch.ones(query_tokens.size()[:-1], dtype=torch.long, device=image_embeds.device)
if qformer_attention_mask is None:
qformer_attention_mask = torch.ones_like(qformer_input_ids)
qformer_input_ids = qformer_input_ids.repeat_interleave(frames, dim=0)
qformer_attention_mask = qformer_attention_mask.repeat_interleave(frames, dim=0)
qformer_attention_mask = torch.cat([query_attention_mask, qformer_attention_mask], dim=1)
query_outputs = self.qformer(input_ids=qformer_input_ids, attention_mask=qformer_attention_mask, query_embeds=query_tokens, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, return_dict=True)
query_output = query_outputs[0][:, :query_tokens.size(1), :]
language_model_inputs = self.language_projection(query_output)
language_model_inputs = language_model_inputs.reshape(batch_size, self.config.num_query_tokens * frames, -1)
if return_dict:
return (language_model_inputs, vision_outputs, query_outputs)
return language_model_inputs
| null | 19
| 6
| 34
| 5
| 20
| 9
| 3
| 0.43
| 2
| 8
| 6
| 0
| 11
| 5
| 11
| 12
| 390
| 61
| 231
| 79
| 191
| 99
| 128
| 54
| 116
| 12
| 2
| 2
| 36
|
3,184
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py
|
transformers.models.instructblipvideo.modeling_instructblipvideo.InstructBlipVideoForConditionalGenerationModelOutput
|
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, logging, torch_int
from dataclasses import dataclass
from typing import Any, Callable, Optional, Union
import torch
@dataclass
@auto_docstring(custom_intro='\n Class defining the outputs of [`InstructBlipVideoForConditionalGeneration`].\n ')
class InstructBlipVideoForConditionalGenerationModelOutput(ModelOutput):
"""
loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
Language modeling loss from the language model.
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head of the language model.
vision_outputs (`BaseModelOutputWithPooling`):
Outputs of the vision encoder.
qformer_outputs (`BaseModelOutputWithPoolingAndCrossAttentions`):
Outputs of the Q-Former (Querying Transformer).
language_model_outputs (`CausalLMOutputWithPast` or `Seq2SeqLMOutput`):
Outputs of the language model.
"""
loss: Optional[tuple[torch.FloatTensor]] = None
logits: Optional[tuple[torch.FloatTensor]] = None
vision_outputs: Optional[torch.FloatTensor] = None
qformer_outputs: Optional[tuple[torch.FloatTensor]] = None
language_model_outputs: Optional[tuple[torch.FloatTensor]] = None
def to_tuple(self) -> tuple[Any]:
return tuple((self[k] if k not in ['vision_outputs', 'qformer_outputs', 'language_model_outputs'] else getattr(self, k).to_tuple() for k in self.keys()))
|
@dataclass
@auto_docstring(custom_intro='\n Class defining the outputs of [`InstructBlipVideoForConditionalGeneration`].\n ')
class InstructBlipVideoForConditionalGenerationModelOutput(ModelOutput):
'''
loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
Language modeling loss from the language model.
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head of the language model.
vision_outputs (`BaseModelOutputWithPooling`):
Outputs of the vision encoder.
qformer_outputs (`BaseModelOutputWithPoolingAndCrossAttentions`):
Outputs of the Q-Former (Querying Transformer).
language_model_outputs (`CausalLMOutputWithPast` or `Seq2SeqLMOutput`):
Outputs of the language model.
'''
def to_tuple(self) -> tuple[Any]:
pass
| 4
| 1
| 7
| 0
| 7
| 0
| 2
| 1.08
| 1
| 2
| 0
| 0
| 1
| 0
| 1
| 1
| 30
| 3
| 13
| 7
| 11
| 14
| 8
| 7
| 6
| 2
| 1
| 0
| 2
|
3,185
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py
|
transformers.models.instructblipvideo.modeling_instructblipvideo.InstructBlipVideoMLP
|
from torch import nn
from ...activations import ACT2FN
import torch
class InstructBlipVideoMLP(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.activation_fn = ACT2FN[config.hidden_act]
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.fc1(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = self.fc2(hidden_states)
return hidden_states
|
class InstructBlipVideoMLP(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 4
| 2
| 12
| 13
| 1
| 12
| 7
| 9
| 0
| 12
| 7
| 9
| 1
| 1
| 0
| 2
|
3,186
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py
|
transformers.models.instructblipvideo.modeling_instructblipvideo.InstructBlipVideoPreTrainedModel
|
from torch import nn
from .configuration_instructblipvideo import InstructBlipVideoConfig, InstructBlipVideoQFormerConfig, InstructBlipVideoVisionConfig
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, logging, torch_int
@auto_docstring
class InstructBlipVideoPreTrainedModel(PreTrainedModel):
config: InstructBlipVideoConfig
base_model_prefix = 'blip'
supports_gradient_checkpointing = True
_supports_attention_backend = True
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = True
_no_split_modules = ['InstructBlipVideoQFormerEmbeddings', 'InstructBlipVideoAttention', 'InstructBlipVideoQFormerMultiHeadAttention', 'InstructBlipVideoQFormerSelfOutput']
def _init_weights(self, module):
"""Initialize the weights"""
factor = self.config.initializer_range
if isinstance(module, (nn.Linear, nn.Conv2d)):
module.weight.data.normal_(mean=0.0, std=factor)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=factor)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, InstructBlipVideoVisionEmbeddings):
nn.init.trunc_normal_(module.position_embedding, mean=0.0, std=factor)
nn.init.trunc_normal_(module.class_embedding, mean=0.0, std=factor)
elif isinstance(module, (InstructBlipVideoForConditionalGeneration, InstructBlipVideoModel)):
module.query_tokens.data.zero_()
|
@auto_docstring
class InstructBlipVideoPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
'''Initialize the weights'''
pass
| 3
| 1
| 19
| 2
| 16
| 1
| 7
| 0.19
| 1
| 2
| 2
| 3
| 1
| 0
| 1
| 1
| 37
| 5
| 27
| 8
| 25
| 5
| 20
| 8
| 18
| 7
| 1
| 2
| 7
|
3,187
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py
|
transformers.models.instructblipvideo.modeling_instructblipvideo.InstructBlipVideoQFormerAttention
|
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, logging, torch_int
from torch import nn
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
import torch
from ...processing_utils import Unpack
from typing import Any, Callable, Optional, Union
class InstructBlipVideoQFormerAttention(nn.Module):
def __init__(self, config, is_cross_attention=False):
super().__init__()
self.attention = InstructBlipVideoQFormerMultiHeadAttention(config, is_cross_attention)
self.output = InstructBlipVideoQFormerSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads)
self.attention.query = prune_linear_layer(self.attention.query, index)
self.attention.key = prune_linear_layer(self.attention.key, index)
self.attention.value = prune_linear_layer(self.attention.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> torch.Tensor:
attn_output, _ = self.attention(hidden_states=hidden_states, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, **kwargs)
attention_output = self.output(attn_output, hidden_states)
return attention_output
|
class InstructBlipVideoQFormerAttention(nn.Module):
def __init__(self, config, is_cross_attention=False):
pass
def prune_heads(self, heads):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> torch.Tensor:
pass
| 4
| 0
| 15
| 1
| 13
| 1
| 1
| 0.07
| 1
| 6
| 2
| 0
| 3
| 3
| 3
| 13
| 47
| 4
| 41
| 20
| 28
| 3
| 22
| 11
| 18
| 2
| 1
| 1
| 4
|
3,188
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py
|
transformers.models.instructblipvideo.modeling_instructblipvideo.InstructBlipVideoQFormerEmbeddings
|
from torch import nn
import torch
class InstructBlipVideoQFormerEmbeddings(nn.Module):
"""Construct the embeddings from word and position embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False)
self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute')
self.config = config
def forward(self, input_ids=None, position_ids=None, query_embeds=None, past_key_values_length=0):
if input_ids is not None:
seq_length = input_ids.size()[1]
else:
seq_length = 0
if position_ids is None:
position_ids = self.position_ids[:, past_key_values_length:seq_length + past_key_values_length].clone()
if input_ids is not None:
embeddings = self.word_embeddings(input_ids)
if self.position_embedding_type == 'absolute':
position_embeddings = self.position_embeddings(position_ids.to(embeddings.device))
embeddings = embeddings + position_embeddings
if query_embeds is not None:
embeddings = torch.cat((query_embeds, embeddings), dim=1)
else:
embeddings = query_embeds
embeddings = embeddings.to(self.layernorm.weight.dtype)
embeddings = self.layernorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
|
class InstructBlipVideoQFormerEmbeddings(nn.Module):
'''Construct the embeddings from word and position embeddings.'''
def __init__(self, config):
pass
def forward(self, input_ids=None, position_ids=None, query_embeds=None, past_key_values_length=0):
pass
| 3
| 1
| 23
| 4
| 19
| 1
| 4
| 0.05
| 1
| 1
| 0
| 0
| 2
| 6
| 2
| 12
| 49
| 9
| 38
| 18
| 29
| 2
| 28
| 12
| 25
| 6
| 1
| 2
| 7
|
3,189
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py
|
transformers.models.instructblipvideo.modeling_instructblipvideo.InstructBlipVideoQFormerEncoder
|
from ...processing_utils import Unpack
from torch import nn
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, logging, torch_int
class InstructBlipVideoQFormerEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([InstructBlipVideoQFormerLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
@can_return_tuple
def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, query_length=0, **kwargs: Unpack[TransformersKwargs]):
for i in range(self.config.num_hidden_layers):
layer_module = self.layer[i]
layer_head_mask = head_mask[i] if head_mask is not None else None
hidden_states = layer_module(hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, query_length=query_length, **kwargs)
return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states)
|
class InstructBlipVideoQFormerEncoder(nn.Module):
def __init__(self, config):
pass
@can_return_tuple
def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, query_length=0, **kwargs: Unpack[TransformersKwargs]):
pass
| 4
| 0
| 46
| 4
| 42
| 0
| 9
| 0
| 1
| 5
| 2
| 0
| 2
| 3
| 2
| 12
| 93
| 8
| 85
| 28
| 69
| 0
| 35
| 15
| 32
| 16
| 1
| 3
| 17
|
3,190
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py
|
transformers.models.instructblipvideo.modeling_instructblipvideo.InstructBlipVideoQFormerIntermediate
|
from torch import nn
from ...activations import ACT2FN
import torch
class InstructBlipVideoQFormerIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
|
class InstructBlipVideoQFormerIntermediate(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 2
| 0
| 1
| 3
| 0
| 0
| 2
| 2
| 2
| 12
| 13
| 1
| 12
| 5
| 9
| 0
| 11
| 5
| 8
| 2
| 1
| 1
| 3
|
3,191
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py
|
transformers.models.instructblipvideo.modeling_instructblipvideo.InstructBlipVideoQFormerLayer
|
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, logging, torch_int
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
import torch
from ...modeling_layers import GradientCheckpointingLayer
from ...processing_utils import Unpack
class InstructBlipVideoQFormerLayer(GradientCheckpointingLayer):
def __init__(self, config, layer_idx):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = InstructBlipVideoQFormerAttention(config)
self.layer_idx = layer_idx
if layer_idx % config.cross_attention_frequency == 0:
self.crossattention = InstructBlipVideoQFormerAttention(config, is_cross_attention=True)
self.has_cross_attention = True
else:
self.has_cross_attention = False
self.intermediate = InstructBlipVideoQFormerIntermediate(config)
self.output = InstructBlipVideoQFormerOutput(config)
self.intermediate_query = InstructBlipVideoQFormerIntermediate(config)
self.output_query = InstructBlipVideoQFormerOutput(config)
def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, query_length=0, **kwargs: Unpack[TransformersKwargs]):
attention_output = self.attention(hidden_states, attention_mask=attention_mask, head_mask=head_mask, **kwargs)
if query_length > 0:
query_attention_output = attention_output[:, :query_length, :]
if self.has_cross_attention:
if encoder_hidden_states is None:
raise ValueError('encoder_hidden_states must be given for cross-attention layers')
query_attention_output = self.crossattention(query_attention_output, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, **kwargs)
layer_output = apply_chunking_to_forward(self.feed_forward_chunk_query, self.chunk_size_feed_forward, self.seq_len_dim, query_attention_output)
if attention_output.shape[1] > query_length:
layer_output_text = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output[:, query_length:, :]).to(layer_output.device)
layer_output = torch.cat([layer_output, layer_output_text], dim=1)
else:
layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output)
return layer_output
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
def feed_forward_chunk_query(self, attention_output):
intermediate_output = self.intermediate_query(attention_output)
layer_output = self.output_query(intermediate_output, attention_output)
return layer_output
|
class InstructBlipVideoQFormerLayer(GradientCheckpointingLayer):
def __init__(self, config, layer_idx):
pass
def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, query_length=0, **kwargs: Unpack[TransformersKwargs]):
pass
def feed_forward_chunk(self, attention_output):
pass
def feed_forward_chunk_query(self, attention_output):
pass
| 5
| 0
| 24
| 3
| 21
| 1
| 3
| 0.02
| 1
| 5
| 3
| 0
| 4
| 10
| 4
| 14
| 101
| 14
| 85
| 38
| 70
| 2
| 45
| 28
| 40
| 6
| 1
| 3
| 10
|
3,192
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py
|
transformers.models.instructblipvideo.modeling_instructblipvideo.InstructBlipVideoQFormerModel
|
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, logging, torch_int
from ...utils.generic import OutputRecorder, check_model_inputs
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions
import torch
from ...processing_utils import Unpack
from .configuration_instructblipvideo import InstructBlipVideoConfig, InstructBlipVideoQFormerConfig, InstructBlipVideoVisionConfig
from typing import Any, Callable, Optional, Union
class InstructBlipVideoQFormerModel(InstructBlipVideoPreTrainedModel):
"""
Querying Transformer (Q-Former), used in InstructBlipVideo. Slightly modified from BLIP-2 as it also takes the
instruction as input.
"""
_supports_attention_backend = False
_supports_flash_attn = False
_supports_sdpa = False
_supports_flex_attn = False
_can_record_outputs = {'hidden_states': InstructBlipVideoQFormerLayer, 'attentions': [OutputRecorder(InstructBlipVideoQFormerMultiHeadAttention, index=1, layer_name='.attention')], 'cross_attentions': [OutputRecorder(InstructBlipVideoQFormerMultiHeadAttention, index=1, layer_name='.crossattention')]}
def __init__(self, config: InstructBlipVideoQFormerConfig):
super().__init__(config)
self.config = config
self.embeddings = InstructBlipVideoQFormerEmbeddings(config)
self.encoder = InstructBlipVideoQFormerEncoder(config)
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def get_extended_attention_mask(self, attention_mask: torch.Tensor, input_shape: tuple[int], device: torch.device, has_query: bool=False) -> torch.Tensor:
"""
Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
Arguments:
attention_mask (`torch.Tensor`):
Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
input_shape (`tuple[int]`):
The shape of the input to the model.
device: (`torch.device`):
The device of the input to the model.
Returns:
`torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`.
"""
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
elif attention_mask.dim() == 2:
extended_attention_mask = attention_mask[:, None, None, :]
else:
raise ValueError(f'Wrong shape for input_ids (shape {input_shape}) or attention_mask (shape {attention_mask.shape})')
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype)
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
@check_model_inputs
@auto_docstring
def forward(self, input_ids: torch.LongTensor, attention_mask: Optional[torch.FloatTensor]=None, position_ids: Optional[torch.LongTensor]=None, query_embeds: Optional[torch.Tensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.FloatTensor], BaseModelOutputWithPoolingAndCrossAttentions]:
"""
query_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Hidden states to be used in the attention computation. If cross-attention,
will be used for the query (i.e., key and value will use the encoder_hidden_states).
"""
if input_ids is None and query_embeds is None:
raise ValueError('You have to specify query_embeds when input_ids is None')
query_length = query_embeds.shape[1] if query_embeds is not None else 0
embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, query_embeds=query_embeds)
input_shape = embedding_output.size()[:-1]
batch_size, seq_length = input_shape
device = embedding_output.device
if attention_mask is None:
attention_mask = torch.ones((batch_size, seq_length), device=device)
extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, device)
if encoder_hidden_states is not None:
if isinstance(encoder_hidden_states, list):
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size()
else:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if isinstance(encoder_attention_mask, list):
encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask]
elif encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
encoder_outputs: BaseModelOutput = self.encoder(embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, query_length=query_length, **kwargs)
sequence_output = encoder_outputs.last_hidden_state
pooled_output = sequence_output[:, 0, :]
return BaseModelOutputWithPoolingAndCrossAttentions(last_hidden_state=sequence_output, pooler_output=pooled_output)
|
class InstructBlipVideoQFormerModel(InstructBlipVideoPreTrainedModel):
'''
Querying Transformer (Q-Former), used in InstructBlipVideo. Slightly modified from BLIP-2 as it also takes the
instruction as input.
'''
def __init__(self, config: InstructBlipVideoQFormerConfig):
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, value):
pass
def _prune_heads(self, heads_to_prune):
'''
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
'''
pass
def get_extended_attention_mask(self, attention_mask: torch.Tensor, input_shape: tuple[int], device: torch.device, has_query: bool=False) -> torch.Tensor:
'''
Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
Arguments:
attention_mask (`torch.Tensor`):
Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
input_shape (`tuple[int]`):
The shape of the input to the model.
device: (`torch.device`):
The device of the input to the model.
Returns:
`torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`.
'''
pass
@check_model_inputs
@auto_docstring
def forward(self, input_ids: torch.LongTensor, attention_mask: Optional[torch.FloatTensor]=None, position_ids: Optional[torch.LongTensor]=None, query_embeds: Optional[torch.Tensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple[torch.FloatTensor], BaseModelOutputWithPoolingAndCrossAttentions]:
'''
query_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Hidden states to be used in the attention computation. If cross-attention,
will be used for the query (i.e., key and value will use the encoder_hidden_states).
'''
pass
| 9
| 4
| 31
| 3
| 18
| 9
| 4
| 0.53
| 1
| 10
| 4
| 0
| 6
| 3
| 6
| 7
| 194
| 25
| 111
| 45
| 84
| 59
| 55
| 25
| 48
| 13
| 2
| 2
| 21
|
3,193
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py
|
transformers.models.instructblipvideo.modeling_instructblipvideo.InstructBlipVideoQFormerMultiHeadAttention
|
import torch
from ...processing_utils import Unpack
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, logging, torch_int
from torch import nn
import math
class InstructBlipVideoQFormerMultiHeadAttention(nn.Module):
def __init__(self, config, is_cross_attention=False):
super().__init__()
self.config = config
if config.hidden_size % config.num_attention_heads != 0 and (not hasattr(config, 'embedding_size')):
raise ValueError('The hidden size (%d) is not a multiple of the number of attention heads (%d)' % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
if is_cross_attention:
self.key = nn.Linear(config.encoder_hidden_size, self.all_head_size)
self.value = nn.Linear(config.encoder_hidden_size, self.all_head_size)
else:
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = getattr(config, 'position_embedding_type', 'absolute')
if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query':
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
self.save_attention = False
def save_attn_gradients(self, attn_gradients):
self.attn_gradients = attn_gradients
def get_attn_gradients(self):
return self.attn_gradients
def save_attention_map(self, attention_map):
self.attention_map = attention_map
def get_attention_map(self):
return self.attention_map
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, **kwargs: Unpack[TransformersKwargs]):
is_cross_attention = encoder_hidden_states is not None
if is_cross_attention:
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
attention_mask = encoder_attention_mask
else:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
mixed_query_layer = self.query(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.position_embedding_type == 'relative_key' or self.position_embedding_type == 'relative_key_query':
seq_length = hidden_states.size()[1]
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
positional_embedding = positional_embedding.to(dtype=query_layer.dtype)
if self.position_embedding_type == 'relative_key':
relative_position_scores = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == 'relative_key_query':
relative_position_scores_query = torch.einsum('bhld,lrd->bhlr', query_layer, positional_embedding)
relative_position_scores_key = torch.einsum('bhrd,lrd->bhlr', key_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
attention_scores_dtype = attention_scores.dtype
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
attention_probs = nn.Softmax(dim=-1)(attention_scores).to(attention_scores_dtype)
if is_cross_attention and self.save_attention:
self.save_attention_map(attention_probs)
attention_probs.register_hook(self.save_attn_gradients)
attention_probs_dropped = self.dropout(attention_probs)
if head_mask is not None:
attention_probs_dropped = attention_probs_dropped * head_mask
context_layer = torch.matmul(attention_probs_dropped, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return (context_layer, attention_probs)
|
class InstructBlipVideoQFormerMultiHeadAttention(nn.Module):
def __init__(self, config, is_cross_attention=False):
pass
def save_attn_gradients(self, attn_gradients):
pass
def get_attn_gradients(self):
pass
def save_attention_map(self, attention_map):
pass
def get_attention_map(self):
pass
def transpose_for_scores(self, x):
pass
def forward(self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, **kwargs: Unpack[TransformersKwargs]):
pass
| 8
| 0
| 18
| 3
| 14
| 1
| 3
| 0.1
| 1
| 3
| 0
| 0
| 7
| 14
| 7
| 17
| 131
| 26
| 96
| 52
| 79
| 10
| 80
| 43
| 72
| 10
| 1
| 2
| 19
|
3,194
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py
|
transformers.models.instructblipvideo.modeling_instructblipvideo.InstructBlipVideoQFormerOutput
|
from torch import nn
import torch
class InstructBlipVideoQFormerOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
|
class InstructBlipVideoQFormerOutput(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 3
| 2
| 12
| 12
| 1
| 11
| 6
| 8
| 0
| 11
| 6
| 8
| 1
| 1
| 0
| 2
|
3,195
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py
|
transformers.models.instructblipvideo.modeling_instructblipvideo.InstructBlipVideoQFormerSelfOutput
|
from torch import nn
import torch
class InstructBlipVideoQFormerSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
|
class InstructBlipVideoQFormerSelfOutput(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 3
| 2
| 12
| 12
| 1
| 11
| 6
| 8
| 0
| 11
| 6
| 8
| 1
| 1
| 0
| 2
|
3,196
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py
|
transformers.models.instructblipvideo.modeling_instructblipvideo.InstructBlipVideoVisionEmbeddings
|
from torch import nn
from .configuration_instructblipvideo import InstructBlipVideoConfig, InstructBlipVideoQFormerConfig, InstructBlipVideoVisionConfig
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, logging, torch_int
import torch
class InstructBlipVideoVisionEmbeddings(nn.Module):
def __init__(self, config: InstructBlipVideoVisionConfig):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.image_size = config.image_size
self.patch_size = config.patch_size
self.class_embedding = nn.Parameter(torch.randn(1, 1, self.embed_dim))
self.patch_embedding = nn.Conv2d(in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size)
self.num_patches = (self.image_size // self.patch_size) ** 2
self.num_positions = self.num_patches + 1
self.position_embedding = nn.Parameter(torch.randn(1, self.num_positions, self.embed_dim))
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
"""
num_patches = embeddings.shape[1] - 1
num_positions = self.position_embedding.shape[1] - 1
if not torch.jit.is_tracing() and num_patches == num_positions and (height == width):
return self.position_embedding
class_pos_embed = self.position_embedding[:, :1]
patch_pos_embed = self.position_embedding[:, 1:]
dim = embeddings.shape[-1]
new_height = height // self.patch_size
new_width = width // self.patch_size
sqrt_num_positions = torch_int(num_positions ** 0.5)
patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
patch_pos_embed = nn.functional.interpolate(patch_pos_embed, size=(new_height, new_width), mode='bicubic', align_corners=False)
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed, patch_pos_embed), dim=1)
def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding: bool=False) -> torch.Tensor:
batch_size, _, height, width = pixel_values.shape
target_dtype = self.patch_embedding.weight.dtype
patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype))
patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
class_embeds = self.class_embedding.expand(batch_size, 1, -1).to(target_dtype)
embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
if interpolate_pos_encoding:
position_embedding = self.interpolate_pos_encoding(embeddings, height, width)
else:
position_embedding = self.position_embedding
embeddings = embeddings + position_embedding[:, :embeddings.size(1), :].to(target_dtype)
return embeddings
|
class InstructBlipVideoVisionEmbeddings(nn.Module):
def __init__(self, config: InstructBlipVideoVisionConfig):
pass
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
'''
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
'''
pass
def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding: bool=False) -> torch.Tensor:
pass
| 4
| 1
| 23
| 5
| 16
| 3
| 2
| 0.19
| 1
| 5
| 1
| 0
| 3
| 9
| 3
| 13
| 72
| 16
| 48
| 27
| 44
| 9
| 40
| 27
| 36
| 2
| 1
| 1
| 5
|
3,197
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/instructblipvideo/modeling_instructblipvideo.py
|
transformers.models.instructblipvideo.modeling_instructblipvideo.InstructBlipVideoVisionModel
|
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, logging, torch_int
from torch import nn
from ...utils.generic import OutputRecorder, check_model_inputs
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions
import torch
from ...processing_utils import Unpack
from .configuration_instructblipvideo import InstructBlipVideoConfig, InstructBlipVideoQFormerConfig, InstructBlipVideoVisionConfig
from typing import Any, Callable, Optional, Union
class InstructBlipVideoVisionModel(InstructBlipVideoPreTrainedModel):
main_input_name = 'pixel_values'
config: InstructBlipVideoVisionConfig
_can_record_outputs = {'hidden_states': InstructBlipVideoEncoderLayer, 'attentions': InstructBlipVideoAttention}
def __init__(self, config: InstructBlipVideoVisionConfig):
super().__init__(config)
self.config = config
embed_dim = config.hidden_size
self.embeddings = InstructBlipVideoVisionEmbeddings(config)
self.encoder = InstructBlipVideoEncoder(config)
self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
self.post_init()
@check_model_inputs
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, interpolate_pos_encoding: bool=False, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, BaseModelOutputWithPooling]:
if pixel_values is None:
raise ValueError('You have to specify pixel_values')
hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
encoder_outputs: BaseModelOutput = self.encoder(inputs_embeds=hidden_states, **kwargs)
last_hidden_state = encoder_outputs.last_hidden_state
last_hidden_state = self.post_layernorm(last_hidden_state)
pooled_output = last_hidden_state[:, 0, :]
pooled_output = self.post_layernorm(pooled_output)
return BaseModelOutputWithPooling(last_hidden_state=last_hidden_state, pooler_output=pooled_output)
def get_input_embeddings(self):
return self.embeddings
|
class InstructBlipVideoVisionModel(InstructBlipVideoPreTrainedModel):
def __init__(self, config: InstructBlipVideoVisionConfig):
pass
@check_model_inputs
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, interpolate_pos_encoding: bool=False, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, BaseModelOutputWithPooling]:
pass
def get_input_embeddings(self):
pass
| 6
| 0
| 19
| 3
| 15
| 1
| 3
| 0.06
| 1
| 7
| 4
| 0
| 3
| 4
| 3
| 4
| 65
| 13
| 49
| 23
| 36
| 3
| 28
| 15
| 24
| 6
| 2
| 1
| 8
|
3,198
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/instructblipvideo/modular_instructblipvideo.py
|
transformers.models.instructblipvideo.modular_instructblipvideo.InstructBlipVideoConfig
|
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...configuration_utils import PretrainedConfig
from ..auto import CONFIG_MAPPING, AutoConfig
class InstructBlipVideoConfig(PretrainedConfig):
"""
[`InstructBlipVideoConfig`] is the configuration class to store the configuration of a
[`InstructBlipVideoForConditionalGeneration`]. It is used to instantiate a Instructblipvideo model according to the specified
arguments, defining the vision model, Q-Former model and language model configs. Instantiating a configuration with
the defaults will yield a similar configuration to that of the Instructblipvideo
[Salesforce/instruct-blip-flan-t5](https://huggingface.co/Salesforce/instruct-blip-flan-t5) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vision_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`InstructBlipVideoVisionConfig`].
qformer_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`InstructBlipVideoQFormerConfig`].
text_config (`dict`, *optional*):
Dictionary of configuration options used to initialize any [`PretrainedConfig`].
num_query_tokens (`int`, *optional*, defaults to 32):
The number of query tokens passed through the Transformer.
video_token_index (`int`, *optional*):
Token index of special video token.
kwargs (*optional*):
Dictionary of keyword arguments.
Example:
```python
>>> from transformers import (
... InstructBlipVideoVisionConfig,
... InstructBlipVideoQFormerConfig,
... OPTConfig,
... InstructBlipVideoConfig,
... InstructBlipVideoForConditionalGeneration,
... )
>>> # Initializing a InstructBlipVideoConfig with Salesforce/instruct-blip-flan-t5 style configuration
>>> configuration = InstructBlipVideoConfig()
>>> # Initializing a InstructBlipVideoForConditionalGeneration (with random weights) from the Salesforce/instruct-blip-flan-t5 style configuration
>>> model = InstructBlipVideoForConditionalGeneration(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
>>> # We can also initialize a InstructBlipVideoConfig from a InstructBlipVideoVisionConfig, InstructBlipVideoQFormerConfig and any PretrainedConfig
>>> # Initializing Instructblipvideo vision, Instructblipvideo Q-Former and language model configurations
>>> vision_config = InstructBlipVideoVisionConfig()
>>> qformer_config = InstructBlipVideoQFormerConfig()
>>> text_config = OPTConfig()
>>> config = InstructBlipVideoConfig.from_text_vision_configs(vision_config, qformer_config, text_config)
```"""
model_type = 'instructblipvideo'
attribute_map = {'video_token_id': 'video_token_index'}
sub_configs = {'text_config': AutoConfig, 'qformer_config': InstructBlipVideoQFormerConfig, 'vision_config': InstructBlipVideoVisionConfig}
def __init__(self, vision_config=None, qformer_config=None, text_config=None, num_query_tokens=32, video_token_index=None, **kwargs):
super().__init__(**kwargs)
if vision_config is None:
vision_config = {}
logger.info('vision_config is None. initializing the InstructBlipVideoVisionConfig with default values.')
if qformer_config is None:
qformer_config = {}
logger.info('qformer_config is None. Initializing the InstructBlipVideoQFormerConfig with default values.')
if text_config is None:
text_config = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).')
self.vision_config = InstructBlipVideoVisionConfig(**vision_config)
self.qformer_config = InstructBlipVideoQFormerConfig(**qformer_config)
text_model_type = text_config.get('model_type', 'opt')
self.text_config = CONFIG_MAPPING[text_model_type](**text_config)
self.num_query_tokens = num_query_tokens
self.video_token_index = video_token_index
self.qformer_config.encoder_hidden_size = self.vision_config.hidden_size
self.use_decoder_only_language_model = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
self.initializer_factor = 1.0
self.initializer_range = 0.02
@classmethod
def from_vision_qformer_text_configs(cls, vision_config: InstructBlipVideoVisionConfig, qformer_config: InstructBlipVideoQFormerConfig, text_config: PretrainedConfig, **kwargs):
"""
Instantiate a [`InstructBlipVideoConfig`] (or a derived class) from a InstructBlipVideo vision model, Q-Former and
language model configurations.
Returns:
[`InstructBlipVideoConfig`]: An instance of a configuration object
"""
return cls(vision_config=vision_config.to_dict(), qformer_config=qformer_config.to_dict(), text_config=text_config.to_dict(), **kwargs)
|
class InstructBlipVideoConfig(PretrainedConfig):
'''
[`InstructBlipVideoConfig`] is the configuration class to store the configuration of a
[`InstructBlipVideoForConditionalGeneration`]. It is used to instantiate a Instructblipvideo model according to the specified
arguments, defining the vision model, Q-Former model and language model configs. Instantiating a configuration with
the defaults will yield a similar configuration to that of the Instructblipvideo
[Salesforce/instruct-blip-flan-t5](https://huggingface.co/Salesforce/instruct-blip-flan-t5) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vision_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`InstructBlipVideoVisionConfig`].
qformer_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`InstructBlipVideoQFormerConfig`].
text_config (`dict`, *optional*):
Dictionary of configuration options used to initialize any [`PretrainedConfig`].
num_query_tokens (`int`, *optional*, defaults to 32):
The number of query tokens passed through the Transformer.
video_token_index (`int`, *optional*):
Token index of special video token.
kwargs (*optional*):
Dictionary of keyword arguments.
Example:
```python
>>> from transformers import (
... InstructBlipVideoVisionConfig,
... InstructBlipVideoQFormerConfig,
... OPTConfig,
... InstructBlipVideoConfig,
... InstructBlipVideoForConditionalGeneration,
... )
>>> # Initializing a InstructBlipVideoConfig with Salesforce/instruct-blip-flan-t5 style configuration
>>> configuration = InstructBlipVideoConfig()
>>> # Initializing a InstructBlipVideoForConditionalGeneration (with random weights) from the Salesforce/instruct-blip-flan-t5 style configuration
>>> model = InstructBlipVideoForConditionalGeneration(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
>>> # We can also initialize a InstructBlipVideoConfig from a InstructBlipVideoVisionConfig, InstructBlipVideoQFormerConfig and any PretrainedConfig
>>> # Initializing Instructblipvideo vision, Instructblipvideo Q-Former and language model configurations
>>> vision_config = InstructBlipVideoVisionConfig()
>>> qformer_config = InstructBlipVideoQFormerConfig()
>>> text_config = OPTConfig()
>>> config = InstructBlipVideoConfig.from_text_vision_configs(vision_config, qformer_config, text_config)
```'''
def __init__(self, vision_config=None, qformer_config=None, text_config=None, num_query_tokens=32, video_token_index=None, **kwargs):
pass
@classmethod
def from_vision_qformer_text_configs(cls, vision_config: InstructBlipVideoVisionConfig, qformer_config: InstructBlipVideoQFormerConfig, text_config: PretrainedConfig, **kwargs):
'''
Instantiate a [`InstructBlipVideoConfig`] (or a derived class) from a InstructBlipVideo vision model, Q-Former and
language model configurations.
Returns:
[`InstructBlipVideoConfig`]: An instance of a configuration object
'''
pass
| 4
| 2
| 28
| 4
| 21
| 3
| 3
| 0.98
| 1
| 3
| 2
| 0
| 1
| 8
| 2
| 34
| 120
| 21
| 50
| 29
| 32
| 49
| 26
| 14
| 23
| 5
| 2
| 1
| 6
|
3,199
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/instructblipvideo/modular_instructblipvideo.py
|
transformers.models.instructblipvideo.modular_instructblipvideo.InstructBlipVideoForConditionalGeneration
|
import torch
from typing import Optional, Union
from ...processing_utils import Unpack
from transformers.models.instructblip.modeling_instructblip import InstructBlipForConditionalGeneration, InstructBlipForConditionalGenerationModelOutput, InstructBlipModel, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, TransformersKwargs
class InstructBlipVideoForConditionalGeneration(InstructBlipForConditionalGeneration):
def get_video_features(self, pixel_values: torch.FloatTensor, qformer_input_ids: torch.LongTensor, qformer_attention_mask: Optional[torch.LongTensor]=None, interpolate_pos_encoding: Optional[bool]=False, return_dict: Optional[bool]=False):
"""
Encodes images into continuous embeddings that can be forwarded to the language model.
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
The tensors corresponding to the input images.
"""
batch_size, frames, channel, height, width = pixel_values.shape
pixel_values = pixel_values.reshape(batch_size * frames, channel, height, width)
vision_outputs = self.vision_model(pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=True)
image_embeds = vision_outputs[0]
image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device)
query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)
query_attention_mask = torch.ones(query_tokens.size()[:-1], dtype=torch.long, device=image_embeds.device)
if qformer_attention_mask is None:
qformer_attention_mask = torch.ones_like(qformer_input_ids)
qformer_input_ids = qformer_input_ids.repeat_interleave(frames, dim=0)
qformer_attention_mask = qformer_attention_mask.repeat_interleave(frames, dim=0)
qformer_attention_mask = torch.cat([query_attention_mask, qformer_attention_mask], dim=1)
query_outputs = self.qformer(input_ids=qformer_input_ids, attention_mask=qformer_attention_mask, query_embeds=query_tokens, encoder_hidden_states=image_embeds, encoder_attention_mask=image_attention_mask, return_dict=True)
query_output = query_outputs[0][:, :query_tokens.size(1), :]
language_model_inputs = self.language_projection(query_output)
language_model_inputs = language_model_inputs.reshape(batch_size, self.config.num_query_tokens * frames, -1)
if return_dict:
return (language_model_inputs, vision_outputs, query_outputs)
return language_model_inputs
def get_image_features(self, pixel_values: torch.FloatTensor, qformer_input_ids: torch.LongTensor, qformer_attention_mask: Optional[torch.LongTensor]=None, interpolate_pos_encoding: Optional[bool]=False, return_dict: Optional[bool]=False):
pass
def get_placeholder_mask(self, input_ids: torch.LongTensor, inputs_embeds: torch.FloatTensor):
"""
Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`.
"""
if input_ids is None:
special_image_mask = inputs_embeds == self.get_input_embeddings()(torch.tensor(self.config.video_token_id, dtype=torch.long, device=inputs_embeds.device))
special_image_mask = special_image_mask.all(-1)
else:
special_image_mask = input_ids == self.config.video_token_id
special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
return special_image_mask
def forward(self, pixel_values: torch.FloatTensor, qformer_input_ids: torch.FloatTensor, qformer_attention_mask: Optional[torch.LongTensor]=None, input_ids: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.LongTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, labels: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, interpolate_pos_encoding: bool=False, use_cache: Optional[bool]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, InstructBlipVideoForConditionalGenerationModelOutput]:
"""
qformer_input_ids (`torch.LongTensor` of shape (batch_size, sequence_length)):
The sequence used as a prompt to be fed to the Q-Former module.
qformer_attention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
Mask to avoid performing attention on padding token indices.
Examples:
```python
>>> from transformers import InstructBlipVideoProcessor, InstructBlipVideoForConditionalGeneration
>>> import torch
>>> from huggingface_hub import hf_hub_download
>>> import av
>>> import numpy as np
>>> def read_video_pyav(container, indices):
... '''
... Decode the video with PyAV decoder.
... Args:
... container (`av.container.input.InputContainer`): PyAV container.
... indices (`list[int]`): List of frame indices to decode.
... Returns:
... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
... '''
... frames = []
... container.seek(0)
... start_index = indices[0]
... end_index = indices[-1]
... for i, frame in enumerate(container.decode(video=0)):
... if i > end_index:
... break
... if i >= start_index and i in indices:
... frames.append(frame)
... return np.stack([x.to_ndarray(format="rgb24") for x in frames])
>>> model = InstructBlipVideoForConditionalGeneration.from_pretrained("Salesforce/instructblip-vicuna-7b", device_map="auto")
>>> processor = InstructBlipVideoProcessor.from_pretrained("Salesforce/instructblip-vicuna-7b")
>>> file_path = hf_hub_download(
... repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
... )
>>> container = av.open(file_path)
>>> # sample uniformly 4 frames from the videWhy is this video funny?o
>>> total_frames = container.streams.video[0].frames
>>> indices = np.arange(0, total_frames, total_frames / 4).astype(int)
>>> clip = read_video_pyav(container, indices)
>>> prompt = "What is happening in the video?"
>>> inputs = processor(text=prompt, images=clip, return_tensors="pt").to(model.device)
>>> outputs = model.generate(
... **inputs,
... do_sample=False,
... num_beams=5,
... max_length=256,
... repetition_penalty=1.5,
... length_penalty=1.0,
... )
>>> generated_text = processor.batch_decode(outputs, skip_special_tokens=True)[0].strip()
>>> print(generated_text)
"A person is eating a bowl of pasta, and they are using a fork to eat it. The person is sitting at a table, and the plate of pasta is on the table in front"
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
language_model_inputs, vision_outputs, query_outputs = self.get_video_features(pixel_values, qformer_input_ids=qformer_input_ids, qformer_attention_mask=qformer_attention_mask, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=True)
vision_outputs = vision_outputs.to_tuple() if not return_dict else vision_outputs
query_outputs = query_outputs.to_tuple() if not return_dict else query_outputs
if inputs_embeds is None:
inputs_embeds = self.get_input_embeddings()(input_ids)
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
language_model_inputs = language_model_inputs.to(inputs_embeds.device, inputs_embeds.dtype)
special_image_mask = self.get_placeholder_mask(input_ids, inputs_embeds=inputs_embeds)
inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, language_model_inputs)
if self.config.use_decoder_only_language_model:
outputs = self.language_model(inputs_embeds=inputs_embeds, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, use_cache=use_cache, **kwargs)
logits = outputs.logits if return_dict else outputs[0]
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **kwargs)
else:
outputs = self.language_model(inputs_embeds=inputs_embeds, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, labels=labels, use_cache=use_cache, **kwargs)
loss = outputs.loss if return_dict else outputs[0]
logits = outputs.logits if return_dict else outputs[1]
return InstructBlipVideoForConditionalGenerationModelOutput(loss=loss, logits=logits, vision_outputs=vision_outputs, qformer_outputs=query_outputs, language_model_outputs=outputs)
@torch.no_grad()
def generate(self, pixel_values: torch.FloatTensor, qformer_input_ids: Optional[torch.LongTensor]=None, qformer_attention_mask: Optional[torch.LongTensor]=None, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, interpolate_pos_encoding: bool=False, **generate_kwargs) -> torch.LongTensor:
"""
Overrides `generate` function to be able to use the model as a conditional generator.
Args:
pixel_values (`torch.FloatTensor` of shape (batch_size, num_channels, height, width) or
(batch_size, num_frames, num_channels, height, width)): Input images or videos to be processed.
qformer_input_ids (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
The sequence used as a prompt to be fed to the Q-Former module.
qformer_attention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
Mask to avoid performing attention on padding token indices.
input_ids (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
The sequence used as a prompt for the generation.
attention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
Mask to avoid performing attention on padding token indices.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Embedded representation of the inputs. Should be float, not int tokens.
interpolate_pos_encoding (`bool`, *optional*, defaults to `False`):
Whether to interpolate the positional encoding of the image embeddings.
Returns:
captions (list): A list of strings of length batch_size * num_captions.
"""
if hasattr(self, 'hf_device_map'):
self._preprocess_accelerate()
batch_size = pixel_values.shape[0]
language_model_inputs, vision_outputs, query_outputs = self.get_video_features(pixel_values, qformer_input_ids=qformer_input_ids, qformer_attention_mask=qformer_attention_mask, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=True)
if inputs_embeds is None:
if input_ids is None:
video_tokens = [self.config.video_token_index] * self.config.num_query_tokens * 4
start_tokens = video_tokens + [self.config.text_config.bos_token_id]
input_ids = torch.tensor([start_tokens], dtype=torch.long, device=pixel_values.device)
input_ids = input_ids.repeat(batch_size, 1)
inputs_embeds = self.get_input_embeddings()(input_ids)
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
language_model_inputs = language_model_inputs.to(inputs_embeds.device, inputs_embeds.dtype)
special_image_mask = self.get_placeholder_mask(input_ids, inputs_embeds=inputs_embeds)
inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, language_model_inputs)
inputs = {'inputs_embeds': inputs_embeds, 'attention_mask': attention_mask}
if not self.language_model.config.is_encoder_decoder:
inputs['input_ids'] = input_ids
outputs = self.language_model.generate(**inputs, **generate_kwargs)
return outputs
|
class InstructBlipVideoForConditionalGeneration(InstructBlipForConditionalGeneration):
def get_video_features(self, pixel_values: torch.FloatTensor, qformer_input_ids: torch.LongTensor, qformer_attention_mask: Optional[torch.LongTensor]=None, interpolate_pos_encoding: Optional[bool]=False, return_dict: Optional[bool]=False):
'''
Encodes images into continuous embeddings that can be forwarded to the language model.
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
The tensors corresponding to the input images.
'''
pass
def get_image_features(self, pixel_values: torch.FloatTensor, qformer_input_ids: torch.LongTensor, qformer_attention_mask: Optional[torch.LongTensor]=None, interpolate_pos_encoding: Optional[bool]=False, return_dict: Optional[bool]=False):
pass
def get_placeholder_mask(self, input_ids: torch.LongTensor, inputs_embeds: torch.FloatTensor):
'''
Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`.
'''
pass
def forward(self, pixel_values: torch.FloatTensor, qformer_input_ids: torch.FloatTensor, qformer_attention_mask: Optional[torch.LongTensor]=None, input_ids: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.LongTensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, labels: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, interpolate_pos_encoding: bool=False, use_cache: Optional[bool]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, InstructBlipVideoForConditionalGenerationModelOutput]:
'''
qformer_input_ids (`torch.LongTensor` of shape (batch_size, sequence_length)):
The sequence used as a prompt to be fed to the Q-Former module.
qformer_attention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
Mask to avoid performing attention on padding token indices.
Examples:
```python
>>> from transformers import InstructBlipVideoProcessor, InstructBlipVideoForConditionalGeneration
>>> import torch
>>> from huggingface_hub import hf_hub_download
>>> import av
>>> import numpy as np
>>> def read_video_pyav(container, indices):
... '''
... Decode the video with PyAV decoder.
... Args:
... container (`av.container.input.InputContainer`): PyAV container.
... indices (`list[int]`): List of frame indices to decode.
... Returns:
... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
... '''
... frames = []
... container.seek(0)
... start_index = indices[0]
... end_index = indices[-1]
... for i, frame in enumerate(container.decode(video=0)):
... if i > end_index:
... break
... if i >= start_index and i in indices:
... frames.append(frame)
... return np.stack([x.to_ndarray(format="rgb24") for x in frames])
>>> model = InstructBlipVideoForConditionalGeneration.from_pretrained("Salesforce/instructblip-vicuna-7b", device_map="auto")
>>> processor = InstructBlipVideoProcessor.from_pretrained("Salesforce/instructblip-vicuna-7b")
>>> file_path = hf_hub_download(
... repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
... )
>>> container = av.open(file_path)
>>> # sample uniformly 4 frames from the videWhy is this video funny?o
>>> total_frames = container.streams.video[0].frames
>>> indices = np.arange(0, total_frames, total_frames / 4).astype(int)
>>> clip = read_video_pyav(container, indices)
>>> prompt = "What is happening in the video?"
>>> inputs = processor(text=prompt, images=clip, return_tensors="pt").to(model.device)
>>> outputs = model.generate(
... **inputs,
... do_sample=False,
... num_beams=5,
... max_length=256,
... repetition_penalty=1.5,
... length_penalty=1.0,
... )
>>> generated_text = processor.batch_decode(outputs, skip_special_tokens=True)[0].strip()
>>> print(generated_text)
"A person is eating a bowl of pasta, and they are using a fork to eat it. The person is sitting at a table, and the plate of pasta is on the table in front"
```'''
pass
@torch.no_grad()
def generate(self, pixel_values: torch.FloatTensor, qformer_input_ids: Optional[torch.LongTensor]=None, qformer_attention_mask: Optional[torch.LongTensor]=None, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, interpolate_pos_encoding: bool=False, **generate_kwargs) -> torch.LongTensor:
'''
Overrides `generate` function to be able to use the model as a conditional generator.
Args:
pixel_values (`torch.FloatTensor` of shape (batch_size, num_channels, height, width) or
(batch_size, num_frames, num_channels, height, width)): Input images or videos to be processed.
qformer_input_ids (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
The sequence used as a prompt to be fed to the Q-Former module.
qformer_attention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
Mask to avoid performing attention on padding token indices.
input_ids (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
The sequence used as a prompt for the generation.
attention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):
Mask to avoid performing attention on padding token indices.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Embedded representation of the inputs. Should be float, not int tokens.
interpolate_pos_encoding (`bool`, *optional*, defaults to `False`):
Whether to interpolate the positional encoding of the image embeddings.
Returns:
captions (list): A list of strings of length batch_size * num_captions.
'''
pass
| 7
| 4
| 151
| 19
| 90
| 43
| 11
| 0.48
| 1
| 2
| 1
| 0
| 2
| 0
| 2
| 14
| 305
| 38
| 181
| 60
| 154
| 86
| 89
| 36
| 86
| 12
| 3
| 2
| 21
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.