id
int64
0
328k
repository_name
stringlengths
7
58
file_path
stringlengths
9
302
class_name
stringlengths
5
256
human_written_code
stringlengths
16
2.16M
class_skeleton
stringlengths
18
1.49M
total_program_units
int64
1
1.76k
total_doc_str
int64
0
771
AvgCountLine
float64
0
7.89k
AvgCountLineBlank
float64
0
297
AvgCountLineCode
float64
0
7.89k
AvgCountLineComment
float64
0
7.89k
AvgCyclomatic
float64
0
130
CommentToCodeRatio
float64
0
168
CountClassBase
float64
0
40
CountClassCoupled
float64
0
583
CountClassCoupledModified
float64
0
575
CountClassDerived
float64
0
5.35k
CountDeclInstanceMethod
float64
0
529
CountDeclInstanceVariable
float64
0
296
CountDeclMethod
float64
0
599
CountDeclMethodAll
float64
0
1.12k
CountLine
float64
1
40.4k
CountLineBlank
float64
0
8.16k
CountLineCode
float64
1
25.7k
CountLineCodeDecl
float64
1
8.15k
CountLineCodeExe
float64
0
24.2k
CountLineComment
float64
0
16.5k
CountStmt
float64
1
9.71k
CountStmtDecl
float64
1
8.15k
CountStmtExe
float64
0
9.69k
MaxCyclomatic
float64
0
759
MaxInheritanceTree
float64
0
16
MaxNesting
float64
0
34
SumCyclomatic
float64
0
2.9k
4,300
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/oneformer/modeling_oneformer.py
transformers.models.oneformer.modeling_oneformer.OneFormerTransformerDecoderQueryTransformer
from torch import Tensor, nn import torch class OneFormerTransformerDecoderQueryTransformer(nn.Module): def __init__(self, d_model=512, nhead=8, num_decoder_layers=6, dim_feedforward=2048, dropout=0.1, activation='relu', normalize_before=False, return_intermediate_dec=False, layer_norm_eps=1e-05): super().__init__() decoder_layer = OneFormerTransformerDecoderQueryTransformerDecoderLayer(d_model, nhead, dim_feedforward, dropout, activation, normalize_before, layer_norm_eps) decoder_norm = nn.LayerNorm(d_model, eps=layer_norm_eps) self.decoder = OneFormerTransformerDecoderQueryTransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm, return_intermediate=return_intermediate_dec) self.d_model = d_model self.nhead = nhead def forward(self, src, mask, query_embed, pos_embed, task_token=None): batch_size = src.shape[0] src = src.flatten(2).permute(2, 0, 1) pos_embed = pos_embed.flatten(2).permute(2, 0, 1) query_embed = query_embed.unsqueeze(1).repeat(1, batch_size, 1) if mask is not None: mask = mask.flatten(1) if task_token is None: queries = torch.zeros_like(query_embed) else: queries = task_token.repeat(query_embed.shape[0], 1, 1) queries = self.decoder(queries, src, memory_key_padding_mask=mask, pos=pos_embed, query_pos=query_embed) return queries.transpose(1, 2)
class OneFormerTransformerDecoderQueryTransformer(nn.Module): def __init__(self, d_model=512, nhead=8, num_decoder_layers=6, dim_feedforward=2048, dropout=0.1, activation='relu', normalize_before=False, return_intermediate_dec=False, layer_norm_eps=1e-05): pass def forward(self, src, mask, query_embed, pos_embed, task_token=None): pass
3
0
21
2
19
0
2
0
1
3
2
0
2
3
2
12
44
5
39
21
25
0
20
10
17
3
1
1
4
4,301
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/oneformer/modeling_oneformer.py
transformers.models.oneformer.modeling_oneformer.OneFormerTransformerDecoderQueryTransformerDecoder
import torch from typing import Optional, Union from torch import Tensor, nn class OneFormerTransformerDecoderQueryTransformerDecoder(nn.Module): def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False): super().__init__() self.layers = _get_clones(decoder_layer, num_layers) self.num_layers = num_layers self.norm = norm self.return_intermediate = return_intermediate def forward(self, output, memory, output_mask: Optional[Tensor]=None, memory_mask: Optional[Tensor]=None, output_key_padding_mask: Optional[Tensor]=None, memory_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None, query_pos: Optional[Tensor]=None): intermediate = [] for layer in self.layers: output = layer(output, memory, output_mask=output_mask, memory_mask=memory_mask, output_key_padding_mask=output_key_padding_mask, memory_key_padding_mask=memory_key_padding_mask, pos=pos, query_pos=query_pos) if self.return_intermediate: intermediate.append(self.norm(output)) if self.norm is not None: output = self.norm(output) if self.return_intermediate: intermediate.pop() intermediate.append(output) if self.return_intermediate: return torch.stack(intermediate) return output.unsqueeze(0)
class OneFormerTransformerDecoderQueryTransformerDecoder(nn.Module): def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False): pass def forward(self, output, memory, output_mask: Optional[Tensor]=None, memory_mask: Optional[Tensor]=None, output_key_padding_mask: Optional[Tensor]=None, memory_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None, query_pos: Optional[Tensor]=None): pass
3
0
22
2
20
0
4
0
1
2
0
0
2
4
2
12
45
5
40
19
27
0
21
9
18
6
1
2
7
4,302
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/oneformer/modeling_oneformer.py
transformers.models.oneformer.modeling_oneformer.OneFormerTransformerDecoderQueryTransformerDecoderLayer
from typing import Optional, Union from torch import Tensor, nn from ...activations import ACT2FN class OneFormerTransformerDecoderQueryTransformerDecoderLayer(nn.Module): def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation='relu', normalize_before=False, layer_norm_eps=1e-05): super().__init__() self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) self.linear1 = nn.Linear(d_model, dim_feedforward) self.dropout = nn.Dropout(dropout) self.linear2 = nn.Linear(dim_feedforward, d_model) self.norm1 = nn.LayerNorm(d_model, eps=layer_norm_eps) self.norm2 = nn.LayerNorm(d_model, eps=layer_norm_eps) self.norm3 = nn.LayerNorm(d_model, eps=layer_norm_eps) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.dropout3 = nn.Dropout(dropout) self.activation = ACT2FN[activation] self.normalize_before = normalize_before def with_pos_embed(self, tensor, pos: Optional[Tensor]): return tensor if pos is None else tensor + pos def forward_post(self, output, memory, output_mask: Optional[Tensor]=None, memory_mask: Optional[Tensor]=None, output_key_padding_mask: Optional[Tensor]=None, memory_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None, query_pos: Optional[Tensor]=None): q = k = self.with_pos_embed(output, query_pos) output2 = self.self_attn(q, k, value=output, attn_mask=output_mask, key_padding_mask=output_key_padding_mask) output2 = output2[0] output = output + self.dropout1(output2) output = self.norm1(output) output2 = self.multihead_attn(query=self.with_pos_embed(output, query_pos), key=self.with_pos_embed(memory, pos), value=memory, attn_mask=memory_mask, key_padding_mask=memory_key_padding_mask) output2 = output2[0] output = output + self.dropout2(output2) output = self.norm2(output) output2 = self.linear2(self.dropout(self.activation(self.linear1(output)))) output = output + self.dropout3(output2) output = self.norm3(output) return output def forward_pre(self, output, memory, output_mask: Optional[Tensor]=None, memory_mask: Optional[Tensor]=None, output_key_padding_mask: Optional[Tensor]=None, memory_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None, query_pos: Optional[Tensor]=None): output2 = self.norm1(output) q = k = self.with_pos_embed(output2, query_pos) output2 = self.self_attn(q, k, value=output2, attn_mask=output_mask, key_padding_mask=output_key_padding_mask) output2 = output2[0] output = output + self.dropout1(output2) output2 = self.norm2(output) output2 = self.multihead_attn(query=self.with_pos_embed(output2, query_pos), key=self.with_pos_embed(memory, pos), value=memory, attn_mask=memory_mask, key_padding_mask=memory_key_padding_mask) output2 = output2[0] output = output + self.dropout2(output2) output2 = self.norm3(output) output2 = self.linear2(self.dropout(self.activation(self.linear1(output2)))) output = output + self.dropout3(output2) return output def forward(self, output, memory, output_mask: Optional[Tensor]=None, memory_mask: Optional[Tensor]=None, output_key_padding_mask: Optional[Tensor]=None, memory_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None, query_pos: Optional[Tensor]=None): if self.normalize_before: return self.forward_pre(output, memory, output_mask, memory_mask, output_key_padding_mask, memory_key_padding_mask, pos, query_pos) return self.forward_post(output, memory, output_mask, memory_mask, output_key_padding_mask, memory_key_padding_mask, pos, query_pos)
class OneFormerTransformerDecoderQueryTransformerDecoderLayer(nn.Module): def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation='relu', normalize_before=False, layer_norm_eps=1e-05): pass def with_pos_embed(self, tensor, pos: Optional[Tensor]): pass def forward_post(self, output, memory, output_mask: Optional[Tensor]=None, memory_mask: Optional[Tensor]=None, output_key_padding_mask: Optional[Tensor]=None, memory_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None, query_pos: Optional[Tensor]=None): pass def forward_pre(self, output, memory, output_mask: Optional[Tensor]=None, memory_mask: Optional[Tensor]=None, output_key_padding_mask: Optional[Tensor]=None, memory_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None, query_pos: Optional[Tensor]=None): pass def forward_post(self, output, memory, output_mask: Optional[Tensor]=None, memory_mask: Optional[Tensor]=None, output_key_padding_mask: Optional[Tensor]=None, memory_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None, query_pos: Optional[Tensor]=None): pass
6
0
24
0
24
0
1
0.01
1
2
0
0
5
13
5
15
126
6
119
62
74
1
50
23
44
2
1
1
7
4,303
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/oneformer/modeling_oneformer.py
transformers.models.oneformer.modeling_oneformer.OneFormerTransformerDecoderSelfAttentionLayer
from typing import Optional, Union from torch import Tensor, nn from ...activations import ACT2FN class OneFormerTransformerDecoderSelfAttentionLayer(nn.Module): def __init__(self, embed_dim, num_heads, dropout=0.0, activation='relu', normalize_before=False, layer_norm_eps=1e-05): super().__init__() self.self_attn = OneFormerAttention(embed_dim=embed_dim, num_heads=num_heads, dropout=dropout, is_decoder=True) self.norm = nn.LayerNorm(embed_dim, eps=layer_norm_eps) self.dropout = nn.Dropout(dropout) self.activation = ACT2FN[activation] self.normalize_before = normalize_before def with_pos_embed(self, tensor, pos: Optional[Tensor]): return tensor if pos is None else tensor + pos def forward_post(self, output, output_mask: Optional[Tensor]=None, output_key_padding_mask: Optional[Tensor]=None, query_pos: Optional[Tensor]=None): output2, attention_weights = self.self_attn(hidden_states=output, position_embeddings=query_pos, attention_mask=output_mask, output_attentions=True) output = output + self.dropout(output2) output = self.norm(output) return (output, attention_weights) def forward_pre(self, output, output_mask: Optional[Tensor]=None, output_key_padding_mask: Optional[Tensor]=None, query_pos: Optional[Tensor]=None): output2 = self.norm(output) output2, attention_weights = self.self_attn(hidden_states=output2, position_embeddings=query_pos, attention_mask=output_mask, output_attentions=True) output = output + self.dropout(output2) return (output, attention_weights) def forward(self, output, output_mask: Optional[Tensor]=None, output_key_padding_mask: Optional[Tensor]=None, query_pos: Optional[Tensor]=None): if self.normalize_before: return self.forward_pre(output, output_mask, output_key_padding_mask, query_pos) return self.forward_post(output, output_mask, output_key_padding_mask, query_pos)
class OneFormerTransformerDecoderSelfAttentionLayer(nn.Module): def __init__(self, embed_dim, num_heads, dropout=0.0, activation='relu', normalize_before=False, layer_norm_eps=1e-05): pass def with_pos_embed(self, tensor, pos: Optional[Tensor]): pass def forward_post(self, output, output_mask: Optional[Tensor]=None, output_key_padding_mask: Optional[Tensor]=None, query_pos: Optional[Tensor]=None): pass def forward_pre(self, output, output_mask: Optional[Tensor]=None, output_key_padding_mask: Optional[Tensor]=None, query_pos: Optional[Tensor]=None): pass def forward_post(self, output, output_mask: Optional[Tensor]=None, output_key_padding_mask: Optional[Tensor]=None, query_pos: Optional[Tensor]=None): pass
6
0
10
1
9
0
1
0
1
3
1
0
5
5
5
15
56
8
48
34
22
0
24
14
18
2
1
1
7
4,304
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/oneformer/modeling_oneformer.py
transformers.models.oneformer.modeling_oneformer.OneFormerTransformerModule
from torch import Tensor, nn from .configuration_oneformer import OneFormerConfig class OneFormerTransformerModule(nn.Module): """ The OneFormer's transformer module. """ def __init__(self, in_features: int, config: OneFormerConfig): super().__init__() hidden_dim = config.hidden_dim self.num_feature_levels = 3 self.position_embedder = OneFormerSinePositionEmbedding(num_pos_feats=hidden_dim // 2, normalize=True) self.queries_embedder = nn.Embedding(config.num_queries, hidden_dim) self.input_projections = [] for _ in range(self.num_feature_levels): if in_features != hidden_dim or config.enforce_input_proj: self.input_projections.append(nn.Conv2d(in_features, hidden_dim, kernel_size=1)) else: self.input_projections.append(nn.Sequential()) self.decoder = OneFormerTransformerDecoder(in_channels=in_features, config=config) self.level_embed = nn.Embedding(self.num_feature_levels, hidden_dim) def forward(self, multi_scale_features: list[Tensor], mask_features: Tensor, task_token: Tensor, output_attentions: bool=False) -> OneFormerTransformerDecoderOutput: if not len(multi_scale_features) == self.num_feature_levels: raise ValueError(f'Number of elements in multi_scale_features ({len(multi_scale_features)}) and num_feature_levels ({self.num_feature_levels}) do not match!') multi_stage_features = [] multi_stage_positional_embeddings = [] size_list = [] for i in range(self.num_feature_levels): size_list.append(multi_scale_features[i].shape[-2:]) multi_stage_positional_embeddings.append(self.position_embedder(multi_scale_features[i].shape, multi_scale_features[i].device, multi_scale_features[i].dtype, None).flatten(2)) multi_stage_features.append(self.input_projections[i](multi_scale_features[i]).flatten(2) + self.level_embed.weight[i][None, :, None]) multi_stage_positional_embeddings[-1] = multi_stage_positional_embeddings[-1].permute(2, 0, 1) multi_stage_features[-1] = multi_stage_features[-1].permute(2, 0, 1) _, batch_size, _ = multi_stage_features[0].shape query_embeddings = self.queries_embedder.weight.unsqueeze(1).repeat(1, batch_size, 1) task_token = task_token.unsqueeze(0) query_features = self.position_embedder(mask_features.shape, mask_features.device, mask_features.dtype, None) return self.decoder(task_token=task_token, multi_stage_features=multi_stage_features, multi_stage_positional_embeddings=multi_stage_positional_embeddings, mask_features=mask_features, query_features=query_features, query_embeddings=query_embeddings, query_embedder=self.queries_embedder, size_list=size_list, output_attentions=output_attentions)
class OneFormerTransformerModule(nn.Module): ''' The OneFormer's transformer module. ''' def __init__(self, in_features: int, config: OneFormerConfig): pass def forward(self, multi_scale_features: list[Tensor], mask_features: Tensor, task_token: Tensor, output_attentions: bool=False) -> OneFormerTransformerDecoderOutput: pass
3
1
32
4
27
1
3
0.09
1
10
4
0
2
6
2
12
69
10
54
24
45
5
31
18
28
3
1
2
6
4,305
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/oneformer/modeling_oneformer.py
transformers.models.oneformer.modeling_oneformer.PredictionBlock
from torch import Tensor, nn class PredictionBlock(nn.Module): def __init__(self, in_dim: int, out_dim: int, activation: nn.Module) -> None: super().__init__() self.layers = [nn.Linear(in_dim, out_dim), activation] for i, layer in enumerate(self.layers): self.add_module(str(i), layer) def forward(self, input: Tensor) -> Tensor: hidden_state = input for layer in self.layers: hidden_state = layer(hidden_state) return hidden_state
class PredictionBlock(nn.Module): def __init__(self, in_dim: int, out_dim: int, activation: nn.Module) -> None: pass def forward(self, input: Tensor) -> Tensor: pass
3
0
6
0
5
1
2
0.09
1
5
0
0
2
1
2
12
13
1
11
7
8
1
11
7
8
2
1
1
4
4,306
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/oneformer/processing_oneformer.py
transformers.models.oneformer.processing_oneformer.OneFormerProcessor
from ...processing_utils import ProcessorMixin class OneFormerProcessor(ProcessorMixin): """ Constructs an OneFormer processor which wraps [`OneFormerImageProcessor`] and [`CLIPTokenizer`]/[`CLIPTokenizerFast`] into a single processor that inherits both the image processor and tokenizer functionalities. Args: image_processor ([`OneFormerImageProcessor`]): The image processor is a required input. tokenizer ([`CLIPTokenizer`, `CLIPTokenizerFast`]): The tokenizer is a required input. max_seq_len (`int`, *optional*, defaults to 77)): Sequence length for input text list. task_seq_len (`int`, *optional*, defaults to 77): Sequence length for input task token. """ attributes = ['image_processor', 'tokenizer'] image_processor_class = 'OneFormerImageProcessor' tokenizer_class = ('CLIPTokenizer', 'CLIPTokenizerFast') def __init__(self, image_processor=None, tokenizer=None, max_seq_length: int=77, task_seq_length: int=77, **kwargs): self.max_seq_length = max_seq_length self.task_seq_length = task_seq_length super().__init__(image_processor, tokenizer) def _preprocess_text(self, text_list=None, max_length=77): if text_list is None: raise ValueError('tokens cannot be None.') tokens = self.tokenizer(text_list, padding='max_length', max_length=max_length, truncation=True) attention_masks, input_ids = (tokens['attention_mask'], tokens['input_ids']) token_inputs = [] for attn_mask, input_id in zip(attention_masks, input_ids): token = torch.tensor(attn_mask) * torch.tensor(input_id) token_inputs.append(token.unsqueeze(0)) token_inputs = torch.cat(token_inputs, dim=0) return token_inputs def __call__(self, images=None, task_inputs=None, segmentation_maps=None, **kwargs): """ Main method to prepare for the model one or several task input(s) and image(s). This method forwards the `task_inputs` and `kwargs` arguments to CLIPTokenizer's [`~CLIPTokenizer.__call__`] if `task_inputs` is not `None` to encode. To prepare the image(s), this method forwards the `images` and `kwargs` arguments to OneFormerImageProcessor's [`~OneFormerImageProcessor.__call__`] if `images` is not `None`. Please refer to the docstring of the above two methods for more information. Args: task_inputs (`str`, `list[str]`): The sequence or batch of task_inputs sequences to be encoded. Each sequence can be a string or a list of strings of the template "the task is {task}". images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. Both channels-first and channels-last formats are supported. segmentation_maps (`ImageInput`, *optional*): The corresponding semantic segmentation maps with the pixel-wise annotations. (`bool`, *optional*, defaults to `True`): Whether or not to pad images up to the largest image in a batch and create a pixel mask. If left to the default, will return a pixel mask that is: - 1 for pixels that are real (i.e. **not masked**), - 0 for pixels that are padding (i.e. **masked**). Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **task_inputs** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. """ if task_inputs is None: raise ValueError('You have to specify the task_input. Found None.') elif images is None: raise ValueError('You have to specify the image. Found None.') if not all((task in ['semantic', 'instance', 'panoptic'] for task in task_inputs)): raise ValueError('task_inputs must be semantic, instance, or panoptic.') encoded_inputs = self.image_processor(images, task_inputs, segmentation_maps, **kwargs) if isinstance(task_inputs, str): task_inputs = [task_inputs] if isinstance(task_inputs, list) and all((isinstance(task_input, str) for task_input in task_inputs)): task_token_inputs = [] for task in task_inputs: task_input = f'the task is {task}' task_token_inputs.append(task_input) encoded_inputs['task_inputs'] = self._preprocess_text(task_token_inputs, max_length=self.task_seq_length) else: raise TypeError('Task Inputs should be a string or a list of strings.') if hasattr(encoded_inputs, 'text_inputs'): texts_list = encoded_inputs.text_inputs text_inputs = [] for texts in texts_list: text_input_list = self._preprocess_text(texts, max_length=self.max_seq_length) text_inputs.append(text_input_list.unsqueeze(0)) encoded_inputs['text_inputs'] = torch.cat(text_inputs, dim=0) return encoded_inputs def encode_inputs(self, images=None, task_inputs=None, segmentation_maps=None, **kwargs): """ This method forwards all its arguments to [`OneFormerImageProcessor.encode_inputs`] and then tokenizes the task_inputs. Please refer to the docstring of this method for more information. """ if task_inputs is None: raise ValueError('You have to specify the task_input. Found None.') elif images is None: raise ValueError('You have to specify the image. Found None.') if not all((task in ['semantic', 'instance', 'panoptic'] for task in task_inputs)): raise ValueError('task_inputs must be semantic, instance, or panoptic.') encoded_inputs = self.image_processor.encode_inputs(images, task_inputs, segmentation_maps, **kwargs) if isinstance(task_inputs, str): task_inputs = [task_inputs] if isinstance(task_inputs, list) and all((isinstance(task_input, str) for task_input in task_inputs)): task_token_inputs = [] for task in task_inputs: task_input = f'the task is {task}' task_token_inputs.append(task_input) encoded_inputs['task_inputs'] = self._preprocess_text(task_token_inputs, max_length=self.task_seq_length) else: raise TypeError('Task Inputs should be a string or a list of strings.') if hasattr(encoded_inputs, 'text_inputs'): texts_list = encoded_inputs.text_inputs text_inputs = [] for texts in texts_list: text_input_list = self._preprocess_text(texts, max_length=self.max_seq_length) text_inputs.append(text_input_list.unsqueeze(0)) encoded_inputs['text_inputs'] = torch.cat(text_inputs, dim=0) return encoded_inputs def post_process_semantic_segmentation(self, *args, **kwargs): """ This method forwards all its arguments to [`OneFormerImageProcessor.post_process_semantic_segmentation`]. Please refer to the docstring of this method for more information. """ return self.image_processor.post_process_semantic_segmentation(*args, **kwargs) def post_process_instance_segmentation(self, *args, **kwargs): """ This method forwards all its arguments to [`OneFormerImageProcessor.post_process_instance_segmentation`]. Please refer to the docstring of this method for more information. """ return self.image_processor.post_process_instance_segmentation(*args, **kwargs) def post_process_panoptic_segmentation(self, *args, **kwargs): """ This method forwards all its arguments to [`OneFormerImageProcessor.post_process_panoptic_segmentation`]. Please refer to the docstring of this method for more information. """ return self.image_processor.post_process_panoptic_segmentation(*args, **kwargs)
class OneFormerProcessor(ProcessorMixin): ''' Constructs an OneFormer processor which wraps [`OneFormerImageProcessor`] and [`CLIPTokenizer`]/[`CLIPTokenizerFast`] into a single processor that inherits both the image processor and tokenizer functionalities. Args: image_processor ([`OneFormerImageProcessor`]): The image processor is a required input. tokenizer ([`CLIPTokenizer`, `CLIPTokenizerFast`]): The tokenizer is a required input. max_seq_len (`int`, *optional*, defaults to 77)): Sequence length for input text list. task_seq_len (`int`, *optional*, defaults to 77): Sequence length for input task token. ''' def __init__(self, image_processor=None, tokenizer=None, max_seq_length: int=77, task_seq_length: int=77, **kwargs): pass def _preprocess_text(self, text_list=None, max_length=77): pass def __call__(self, images=None, task_inputs=None, segmentation_maps=None, **kwargs): ''' Main method to prepare for the model one or several task input(s) and image(s). This method forwards the `task_inputs` and `kwargs` arguments to CLIPTokenizer's [`~CLIPTokenizer.__call__`] if `task_inputs` is not `None` to encode. To prepare the image(s), this method forwards the `images` and `kwargs` arguments to OneFormerImageProcessor's [`~OneFormerImageProcessor.__call__`] if `images` is not `None`. Please refer to the docstring of the above two methods for more information. Args: task_inputs (`str`, `list[str]`): The sequence or batch of task_inputs sequences to be encoded. Each sequence can be a string or a list of strings of the template "the task is {task}". images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. Both channels-first and channels-last formats are supported. segmentation_maps (`ImageInput`, *optional*): The corresponding semantic segmentation maps with the pixel-wise annotations. (`bool`, *optional*, defaults to `True`): Whether or not to pad images up to the largest image in a batch and create a pixel mask. If left to the default, will return a pixel mask that is: - 1 for pixels that are real (i.e. **not masked**), - 0 for pixels that are padding (i.e. **masked**). Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **task_inputs** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. ''' pass def encode_inputs(self, images=None, task_inputs=None, segmentation_maps=None, **kwargs): ''' This method forwards all its arguments to [`OneFormerImageProcessor.encode_inputs`] and then tokenizes the task_inputs. Please refer to the docstring of this method for more information. ''' pass def post_process_semantic_segmentation(self, *args, **kwargs): ''' This method forwards all its arguments to [`OneFormerImageProcessor.post_process_semantic_segmentation`]. Please refer to the docstring of this method for more information. ''' pass def post_process_instance_segmentation(self, *args, **kwargs): ''' This method forwards all its arguments to [`OneFormerImageProcessor.post_process_instance_segmentation`]. Please refer to the docstring of this method for more information. ''' pass def post_process_panoptic_segmentation(self, *args, **kwargs): ''' This method forwards all its arguments to [`OneFormerImageProcessor.post_process_panoptic_segmentation`]. Please refer to the docstring of this method for more information. ''' pass
8
6
21
4
11
6
4
0.67
1
6
0
0
7
2
7
24
176
37
83
32
73
56
77
30
69
9
2
2
27
4,307
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/openai/configuration_openai.py
transformers.models.openai.configuration_openai.OpenAIGPTConfig
from ...configuration_utils import PretrainedConfig class OpenAIGPTConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`OpenAIGPTModel`] or a [`TFOpenAIGPTModel`]. It is used to instantiate a GPT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the GPT [openai-community/openai-gpt](https://huggingface.co/openai-community/openai-gpt) architecture from OpenAI. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 40478): Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`OpenAIGPTModel`] or [`TFOpenAIGPTModel`]. n_positions (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). n_embd (`int`, *optional*, defaults to 768): Dimensionality of the embeddings and hidden states. n_layer (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. n_head (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. afn (`str` or `Callable`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. resid_pdrop (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. embd_pdrop (`int`, *optional*, defaults to 0.1): The dropout ratio for the embeddings. attn_pdrop (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention. layer_norm_epsilon (`float`, *optional*, defaults to 1e-05): The epsilon to use in the layer normalization layers initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. summary_type (`str`, *optional*, defaults to `"cls_index"`): Argument used when doing sequence summary, used in the models [`OpenAIGPTDoubleHeadsModel`] and [`OpenAIGPTDoubleHeadsModel`]. Has to be one of the following options: - `"last"`: Take the last token hidden state (like XLNet). - `"first"`: Take the first token hidden state (like BERT). - `"mean"`: Take the mean of all tokens hidden states. - `"cls_index"`: Supply a Tensor of classification token position (like GPT/GPT-2). - `"attn"`: Not implemented now, use multi-head attention. summary_use_proj (`bool`, *optional*, defaults to `True`): Argument used when doing sequence summary, used in the models [`OpenAIGPTDoubleHeadsModel`] and [`OpenAIGPTDoubleHeadsModel`]. Whether or not to add a projection after the vector extraction. summary_activation (`str`, *optional*): Argument used when doing sequence summary, used in the models [`OpenAIGPTDoubleHeadsModel`] and [`OpenAIGPTDoubleHeadsModel`]. Pass `"tanh"` for a tanh activation to the output, any other value will result in no activation. summary_proj_to_labels (`bool`, *optional*, defaults to `True`): Argument used when doing sequence summary, used in the models [`OpenAIGPTDoubleHeadsModel`] and [`OpenAIGPTDoubleHeadsModel`]. Whether the projection outputs should have `config.num_labels` or `config.hidden_size` classes. summary_first_dropout (`float`, *optional*, defaults to 0.1): Argument used when doing sequence summary, used in the models [`OpenAIGPTDoubleHeadsModel`] and [`OpenAIGPTDoubleHeadsModel`]. The dropout ratio to be used after the projection and activation. Examples: ```python >>> from transformers import OpenAIGPTConfig, OpenAIGPTModel >>> # Initializing a GPT configuration >>> configuration = OpenAIGPTConfig() >>> # Initializing a model (with random weights) from the configuration >>> model = OpenAIGPTModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'openai-gpt' attribute_map = {'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer'} def __init__(self, vocab_size=40478, n_positions=512, n_embd=768, n_layer=12, n_head=12, afn='gelu', resid_pdrop=0.1, embd_pdrop=0.1, attn_pdrop=0.1, layer_norm_epsilon=1e-05, initializer_range=0.02, summary_type='cls_index', summary_use_proj=True, summary_activation=None, summary_proj_to_labels=True, summary_first_dropout=0.1, **kwargs): self.vocab_size = vocab_size self.n_positions = n_positions self.n_embd = n_embd self.n_layer = n_layer self.n_head = n_head self.afn = afn self.resid_pdrop = resid_pdrop self.embd_pdrop = embd_pdrop self.attn_pdrop = attn_pdrop self.layer_norm_epsilon = layer_norm_epsilon self.initializer_range = initializer_range self.summary_type = summary_type self.summary_use_proj = summary_use_proj self.summary_activation = summary_activation self.summary_first_dropout = summary_first_dropout self.summary_proj_to_labels = summary_proj_to_labels super().__init__(**kwargs)
class OpenAIGPTConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`OpenAIGPTModel`] or a [`TFOpenAIGPTModel`]. It is used to instantiate a GPT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the GPT [openai-community/openai-gpt](https://huggingface.co/openai-community/openai-gpt) architecture from OpenAI. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 40478): Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`OpenAIGPTModel`] or [`TFOpenAIGPTModel`]. n_positions (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). n_embd (`int`, *optional*, defaults to 768): Dimensionality of the embeddings and hidden states. n_layer (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. n_head (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. afn (`str` or `Callable`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. resid_pdrop (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. embd_pdrop (`int`, *optional*, defaults to 0.1): The dropout ratio for the embeddings. attn_pdrop (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention. layer_norm_epsilon (`float`, *optional*, defaults to 1e-05): The epsilon to use in the layer normalization layers initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. summary_type (`str`, *optional*, defaults to `"cls_index"`): Argument used when doing sequence summary, used in the models [`OpenAIGPTDoubleHeadsModel`] and [`OpenAIGPTDoubleHeadsModel`]. Has to be one of the following options: - `"last"`: Take the last token hidden state (like XLNet). - `"first"`: Take the first token hidden state (like BERT). - `"mean"`: Take the mean of all tokens hidden states. - `"cls_index"`: Supply a Tensor of classification token position (like GPT/GPT-2). - `"attn"`: Not implemented now, use multi-head attention. summary_use_proj (`bool`, *optional*, defaults to `True`): Argument used when doing sequence summary, used in the models [`OpenAIGPTDoubleHeadsModel`] and [`OpenAIGPTDoubleHeadsModel`]. Whether or not to add a projection after the vector extraction. summary_activation (`str`, *optional*): Argument used when doing sequence summary, used in the models [`OpenAIGPTDoubleHeadsModel`] and [`OpenAIGPTDoubleHeadsModel`]. Pass `"tanh"` for a tanh activation to the output, any other value will result in no activation. summary_proj_to_labels (`bool`, *optional*, defaults to `True`): Argument used when doing sequence summary, used in the models [`OpenAIGPTDoubleHeadsModel`] and [`OpenAIGPTDoubleHeadsModel`]. Whether the projection outputs should have `config.num_labels` or `config.hidden_size` classes. summary_first_dropout (`float`, *optional*, defaults to 0.1): Argument used when doing sequence summary, used in the models [`OpenAIGPTDoubleHeadsModel`] and [`OpenAIGPTDoubleHeadsModel`]. The dropout ratio to be used after the projection and activation. Examples: ```python >>> from transformers import OpenAIGPTConfig, OpenAIGPTModel >>> # Initializing a GPT configuration >>> configuration = OpenAIGPTConfig() >>> # Initializing a model (with random weights) from the configuration >>> model = OpenAIGPTModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, vocab_size=40478, n_positions=512, n_embd=768, n_layer=12, n_head=12, afn='gelu', resid_pdrop=0.1, embd_pdrop=0.1, attn_pdrop=0.1, layer_norm_epsilon=1e-05, initializer_range=0.02, summary_type='cls_index', summary_use_proj=True, summary_activation=None, summary_proj_to_labels=True, summary_first_dropout=0.1, **kwargs): pass
2
1
37
0
37
0
1
1.51
1
1
0
0
1
16
1
1
129
16
45
39
24
68
21
20
19
1
1
0
1
4,308
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/openai/modeling_openai.py
transformers.models.openai.modeling_openai.Attention
from ...pytorch_utils import Conv1D, find_pruneable_heads_and_indices, prune_conv1d_layer import torch from torch import nn import math class Attention(nn.Module): def __init__(self, nx, n_positions, config, scale=False): super().__init__() n_state = nx if n_state % config.n_head != 0: raise ValueError(f'Attention n_state shape: {n_state} must be divisible by config.n_head {config.n_head}') self.register_buffer('bias', torch.tril(torch.ones(n_positions, n_positions)).view(1, 1, n_positions, n_positions), persistent=False) self.n_head = config.n_head self.split_size = n_state self.scale = scale self.c_attn = Conv1D(n_state * 3, nx) self.c_proj = Conv1D(n_state, nx) self.attn_dropout = nn.Dropout(config.attn_pdrop) self.resid_dropout = nn.Dropout(config.resid_pdrop) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices(heads, self.n_head, self.split_size // self.n_head, self.pruned_heads) index_attn = torch.cat([index, index + self.split_size, index + 2 * self.split_size]) self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1) self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0) self.split_size = self.split_size // self.n_head * (self.n_head - len(heads)) self.n_head = self.n_head - len(heads) self.pruned_heads = self.pruned_heads.union(heads) def _attn(self, q, k, v, attention_mask=None, head_mask=None, output_attentions=False): w = torch.matmul(q, k) if self.scale: w = w / math.sqrt(v.size(-1)) b = self.bias[:, :, :w.size(-2), :w.size(-1)] w = w * b + -10000.0 * (1 - b) if attention_mask is not None: w = w + attention_mask w = nn.functional.softmax(w, dim=-1) w = self.attn_dropout(w) if head_mask is not None: w = w * head_mask outputs = [torch.matmul(w, v)] if output_attentions: outputs.append(w) return outputs def merge_heads(self, x): x = x.permute(0, 2, 1, 3).contiguous() new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),) return x.view(*new_x_shape) def split_heads(self, x, k=False): new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head) x = x.view(*new_x_shape) if k: return x.permute(0, 2, 3, 1) else: return x.permute(0, 2, 1, 3) def forward(self, x, attention_mask=None, head_mask=None, output_attentions=False): x = self.c_attn(x) query, key, value = x.split(self.split_size, dim=2) query = self.split_heads(query) key = self.split_heads(key, k=True) value = self.split_heads(value) attn_outputs = self._attn(query, key, value, attention_mask, head_mask, output_attentions) a = attn_outputs[0] a = self.merge_heads(a) a = self.c_proj(a) a = self.resid_dropout(a) outputs = [a] + attn_outputs[1:] return outputs
class Attention(nn.Module): def __init__(self, nx, n_positions, config, scale=False): pass def prune_heads(self, heads): pass def _attn(self, q, k, v, attention_mask=None, head_mask=None, output_attentions=False): pass def merge_heads(self, x): pass def split_heads(self, x, k=False): pass def forward(self, x, attention_mask=None, head_mask=None, output_attentions=False): pass
7
0
14
1
12
2
2
0.15
1
4
1
0
6
8
6
16
91
13
71
27
64
11
64
27
57
5
1
1
13
4,309
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/openai/modeling_openai.py
transformers.models.openai.modeling_openai.Block
from torch import nn class Block(nn.Module): def __init__(self, n_positions, config, scale=False): super().__init__() nx = config.n_embd self.attn = Attention(nx, n_positions, config, scale) self.ln_1 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon) self.mlp = MLP(4 * nx, config) self.ln_2 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon) def forward(self, x, attention_mask=None, head_mask=None, output_attentions=False): attn_outputs = self.attn(x, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions) a = attn_outputs[0] n = self.ln_1(x + a) m = self.mlp(n) h = self.ln_2(n + m) outputs = [h] + attn_outputs[1:] return outputs
class Block(nn.Module): def __init__(self, n_positions, config, scale=False): pass def forward(self, x, attention_mask=None, head_mask=None, output_attentions=False): pass
3
0
11
1
10
0
1
0
1
3
2
0
2
4
2
12
24
3
21
14
18
0
16
14
13
1
1
0
2
4,310
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/openai/modeling_openai.py
transformers.models.openai.modeling_openai.MLP
from ...pytorch_utils import Conv1D, find_pruneable_heads_and_indices, prune_conv1d_layer from torch import nn class MLP(nn.Module): def __init__(self, n_state, config): super().__init__() nx = config.n_embd self.c_fc = Conv1D(n_state, nx) self.c_proj = Conv1D(nx, n_state) self.act = ACT_FNS[config.afn] self.dropout = nn.Dropout(config.resid_pdrop) def forward(self, x): h = self.act(self.c_fc(x)) h2 = self.c_proj(h) return self.dropout(h2)
class MLP(nn.Module): def __init__(self, n_state, config): pass def forward(self, x): pass
3
0
6
0
6
1
1
0.08
1
2
1
0
2
4
2
12
13
1
12
10
9
1
12
10
9
1
1
0
2
4,311
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/openai/modeling_openai.py
transformers.models.openai.modeling_openai.OpenAIGPTDoubleHeadsModel
import torch from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from torch import nn from typing import Any, Callable, Optional, Union from ...utils import ModelOutput, auto_docstring, logging @auto_docstring(custom_intro='\n OpenAI GPT Model transformer with a language modeling and a multiple-choice classification head on top e.g. for\n RocStories/SWAG tasks. The two heads are two linear layers. The language modeling head has its weights tied to the\n input embeddings, the classification head takes as input the input of a specified classification token index in the\n input sequence).\n ') class OpenAIGPTDoubleHeadsModel(OpenAIGPTPreTrainedModel): _tied_weights_keys = ['lm_head.weight'] def __init__(self, config): super().__init__(config) config.num_labels = 1 self.transformer = OpenAIGPTModel(config) self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) self.multiple_choice_head = OpenAIGPTSequenceSummary(config) self.post_init() @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, mc_token_ids: Optional[torch.LongTensor]=None, labels: Optional[torch.LongTensor]=None, mc_labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], OpenAIGPTDoubleHeadsModelOutput]: """ mc_token_ids (`torch.LongTensor` of shape `(batch_size, num_choices)`, *optional*, default to index of the last token of the input): Index of the classification token in each input sequence. Selected in the range `[0, input_ids.size(-1) - 1]`. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-1, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` mc_labels (`torch.LongTensor` of shape `(batch_size)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]` where *num_choices* is the size of the second dimension of the input tensors. (see *input_ids* above) Examples: ```python >>> from transformers import AutoTokenizer, OpenAIGPTDoubleHeadsModel >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("openai-community/openai-gpt") >>> model = OpenAIGPTDoubleHeadsModel.from_pretrained("openai-community/openai-gpt") >>> tokenizer.add_special_tokens( ... {"cls_token": "[CLS]"} ... ) # Add a [CLS] to the vocabulary (we should train it also!) >>> model.resize_token_embeddings(len(tokenizer)) >>> choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"] >>> input_ids = torch.tensor([tokenizer.encode(s) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices >>> mc_token_ids = torch.tensor([input_ids.size(-1) - 1, input_ids.size(-1) - 1]).unsqueeze(0) # Batch size 1 >>> outputs = model(input_ids, mc_token_ids=mc_token_ids) >>> lm_logits = outputs.logits >>> mc_logits = outputs.mc_logits ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = transformer_outputs[0] lm_logits = self.lm_head(hidden_states) mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids).squeeze(-1) lm_loss, mc_loss = (None, None) if mc_labels is not None: loss_fct = CrossEntropyLoss() mc_loss = loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1)) if labels is not None: shift_logits = lm_logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() loss_fct = CrossEntropyLoss() lm_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) if not return_dict: output = (lm_logits, mc_logits) + transformer_outputs[1:] if mc_loss is not None: output = (mc_loss,) + output return (lm_loss,) + output if lm_loss is not None else output return OpenAIGPTDoubleHeadsModelOutput(loss=lm_loss, mc_loss=mc_loss, logits=lm_logits, mc_logits=mc_logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions)
@auto_docstring(custom_intro='\n OpenAI GPT Model transformer with a language modeling and a multiple-choice classification head on top e.g. for\n RocStories/SWAG tasks. The two heads are two linear layers. The language modeling head has its weights tied to the\n input embeddings, the classification head takes as input the input of a specified classification token index in the\n input sequence).\n ') class OpenAIGPTDoubleHeadsModel(OpenAIGPTPreTrainedModel): def __init__(self, config): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, mc_token_ids: Optional[torch.LongTensor]=None, labels: Optional[torch.LongTensor]=None, mc_labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], OpenAIGPTDoubleHeadsModelOutput]: ''' mc_token_ids (`torch.LongTensor` of shape `(batch_size, num_choices)`, *optional*, default to index of the last token of the input): Index of the classification token in each input sequence. Selected in the range `[0, input_ids.size(-1) - 1]`. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-1, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` mc_labels (`torch.LongTensor` of shape `(batch_size)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]` where *num_choices* is the size of the second dimension of the input tensors. (see *input_ids* above) Examples: ```python >>> from transformers import AutoTokenizer, OpenAIGPTDoubleHeadsModel >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("openai-community/openai-gpt") >>> model = OpenAIGPTDoubleHeadsModel.from_pretrained("openai-community/openai-gpt") >>> tokenizer.add_special_tokens( ... {"cls_token": "[CLS]"} ... ) # Add a [CLS] to the vocabulary (we should train it also!) >>> model.resize_token_embeddings(len(tokenizer)) >>> choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"] >>> input_ids = torch.tensor([tokenizer.encode(s) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices >>> mc_token_ids = torch.tensor([input_ids.size(-1) - 1, input_ids.size(-1) - 1]).unsqueeze(0) # Batch size 1 >>> outputs = model(input_ids, mc_token_ids=mc_token_ids) >>> lm_logits = outputs.logits >>> mc_logits = outputs.mc_logits ```''' pass
5
1
27
3
16
8
3
0.45
1
5
2
0
4
3
4
5
114
17
67
33
46
30
34
18
29
7
2
2
10
4,312
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/openai/modeling_openai.py
transformers.models.openai.modeling_openai.OpenAIGPTForSequenceClassification
from ...utils import ModelOutput, auto_docstring, logging import torch from ...modeling_outputs import BaseModelOutput, CausalLMOutput, SequenceClassifierOutput from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from typing import Any, Callable, Optional, Union from torch import nn @auto_docstring(custom_intro='\n The Original OpenAI GPT Model transformer with a sequence classification head on top (linear layer).\n [`OpenAIGPTForSequenceClassification`] uses the last token in order to do the classification, as other causal\n models (e.g. GPT-2) do. Since it does classification on the last token, it requires to know the position of the\n last token. If a `pad_token_id` is defined in the configuration, it finds the last token that is not a padding\n token in each row. If no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since\n it cannot guess the padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take\n the last value in each row of the batch).\n ') class OpenAIGPTForSequenceClassification(OpenAIGPTPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.transformer = OpenAIGPTModel(config) self.score = nn.Linear(config.n_embd, self.num_labels, bias=False) self.post_init() @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], SequenceClassifierOutput]: """ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = transformer_outputs[0] logits = self.score(hidden_states) if input_ids is not None: batch_size, sequence_length = input_ids.shape[:2] else: batch_size, sequence_length = inputs_embeds.shape[:2] if self.config.pad_token_id is None and batch_size != 1: raise ValueError('Cannot handle batch sizes > 1 if no padding token is defined.') if self.config.pad_token_id is None: last_non_pad_token = -1 elif input_ids is not None: non_pad_mask = (input_ids != self.config.pad_token_id).to(logits.device, torch.int32) token_indices = torch.arange(input_ids.shape[-1], device=logits.device, dtype=torch.int32) last_non_pad_token = (token_indices * non_pad_mask).argmax(-1) else: last_non_pad_token = -1 logger.warning_once(f'{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be unexpected if using padding tokens in conjunction with `inputs_embeds.`') pooled_logits = logits[torch.arange(batch_size, device=logits.device), last_non_pad_token] loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return (loss,) + output if loss is not None else output return SequenceClassifierOutput(loss=loss, logits=pooled_logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions)
null
5
1
50
5
41
5
9
0.1
1
7
2
0
2
3
2
3
108
10
89
30
68
9
45
17
42
16
2
3
17
4,313
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/openai/modeling_openai.py
transformers.models.openai.modeling_openai.OpenAIGPTLMHeadModel
from ...generation import GenerationMixin from typing import Any, Callable, Optional, Union from ...modeling_outputs import BaseModelOutput, CausalLMOutput, SequenceClassifierOutput import torch from ...utils import ModelOutput, auto_docstring, logging from torch import nn @auto_docstring(custom_intro='\n OpenAI GPT Model transformer with a language modeling head on top (linear layer with weights tied to the input\n embeddings).\n ') class OpenAIGPTLMHeadModel(OpenAIGPTPreTrainedModel, GenerationMixin): _tied_weights_keys = ['lm_head.weight'] def __init__(self, config): super().__init__(config) self.transformer = OpenAIGPTModel(config) self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) self.post_init() @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[tuple[torch.Tensor], CausalLMOutput]: """ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = transformer_outputs[0] lm_logits = self.lm_head(hidden_states) loss = None if labels is not None: loss = self.loss_function(lm_logits, labels, vocab_size=self.config.vocab_size, **kwargs) if not return_dict: output = (lm_logits,) + transformer_outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutput(loss=loss, logits=lm_logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions) def prepare_inputs_for_generation(self, input_ids: torch.LongTensor, **kwargs) -> dict[str, Any]: model_inputs = {'input_ids': input_ids} for key, value in kwargs.items(): if key not in model_inputs: model_inputs[key] = value return model_inputs
@auto_docstring(custom_intro='\n OpenAI GPT Model transformer with a language modeling head on top (linear layer with weights tied to the input\n embeddings).\n ') class OpenAIGPTLMHeadModel(OpenAIGPTPreTrainedModel, GenerationMixin): def __init__(self, config): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[tuple[torch.Tensor], CausalLMOutput]: ''' labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` ''' pass def prepare_inputs_for_generation(self, input_ids: torch.LongTensor, **kwargs) -> dict[str, Any]: pass
6
1
14
1
11
2
2
0.14
2
7
2
0
5
2
5
6
83
10
64
28
39
9
25
14
19
5
2
1
9
4,314
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/openai/modeling_openai.py
transformers.models.openai.modeling_openai.OpenAIGPTModel
import torch from typing import Any, Callable, Optional, Union from torch import nn from ...utils import ModelOutput, auto_docstring, logging from ...modeling_outputs import BaseModelOutput, CausalLMOutput, SequenceClassifierOutput @auto_docstring class OpenAIGPTModel(OpenAIGPTPreTrainedModel): def __init__(self, config): super().__init__(config) self.tokens_embed = nn.Embedding(config.vocab_size, config.n_embd) self.positions_embed = nn.Embedding(config.n_positions, config.n_embd) self.drop = nn.Dropout(config.embd_pdrop) self.h = nn.ModuleList([Block(config.n_positions, config, scale=True) for _ in range(config.n_layer)]) self.register_buffer('position_ids', torch.arange(config.n_positions), persistent=False) self.post_init() def get_input_embeddings(self): return self.tokens_embed def set_input_embeddings(self, new_embeddings): self.tokens_embed = new_embeddings def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} """ for layer, heads in heads_to_prune.items(): self.h[layer].attn.prune_heads(heads) @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time') elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds') if position_ids is None: position_ids = self.position_ids[None, :input_shape[-1]] if attention_mask is not None: attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) attention_mask = attention_mask.to(dtype=next(self.parameters()).dtype) attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min head_mask = self.get_head_mask(head_mask, self.config.n_layer) if inputs_embeds is None: inputs_embeds = self.tokens_embed(input_ids) position_embeds = self.positions_embed(position_ids) if token_type_ids is not None: token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) token_type_embeds = self.tokens_embed(token_type_ids) else: token_type_embeds = 0 hidden_states = inputs_embeds + position_embeds + token_type_embeds hidden_states = self.drop(hidden_states) output_shape = input_shape + (hidden_states.size(-1),) all_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for i, block in enumerate(self.h): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) outputs = block(hidden_states, attention_mask, head_mask[i], output_attentions=output_attentions) hidden_states = outputs[0] if output_attentions: all_attentions = all_attentions + (outputs[1],) hidden_states = hidden_states.view(*output_shape) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions)
@auto_docstring class OpenAIGPTModel(OpenAIGPTPreTrainedModel): def __init__(self, config): pass def get_input_embeddings(self): pass def set_input_embeddings(self, new_embeddings): pass def _prune_heads(self, heads_to_prune): ''' Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} ''' pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple[torch.Tensor], BaseModelOutput]: pass
8
1
22
3
16
4
5
0.22
1
9
2
0
5
4
5
6
122
18
86
32
63
19
59
20
53
18
2
2
23
4,315
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/openai/modeling_openai.py
transformers.models.openai.modeling_openai.OpenAIGPTPreTrainedModel
from torch import nn from ...utils import ModelOutput, auto_docstring, logging from ...modeling_utils import PreTrainedModel from ...pytorch_utils import Conv1D, find_pruneable_heads_and_indices, prune_conv1d_layer from .configuration_openai import OpenAIGPTConfig @auto_docstring class OpenAIGPTPreTrainedModel(PreTrainedModel): config: OpenAIGPTConfig base_model_prefix = 'transformer' def _init_weights(self, module): """Initialize the weights.""" if isinstance(module, (nn.Linear, Conv1D)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0)
@auto_docstring class OpenAIGPTPreTrainedModel(PreTrainedModel): def _init_weights(self, module): '''Initialize the weights.''' pass
3
1
15
0
12
3
6
0.44
1
1
1
4
1
0
1
1
25
2
16
5
14
7
14
5
12
6
1
2
6
4,316
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/openai/tokenization_openai.py
transformers.models.openai.tokenization_openai.OpenAIGPTTokenizer
from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace import json import os from typing import Optional class OpenAIGPTTokenizer(PreTrainedTokenizer): """ Construct a GPT Tokenizer. Based on Byte-Pair-Encoding with the following peculiarities: - lowercases all inputs, - uses `SpaCy` tokenizer and `ftfy` for pre-BPE tokenization if they are installed, fallback to BERT's `BasicTokenizer` if not. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. merges_file (`str`): Path to the merges file. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] def __init__(self, vocab_file, merges_file, unk_token='<unk>', **kwargs): try: import ftfy from spacy.lang.en import English _nlp = English() self.nlp = _nlp.tokenizer self.fix_text = ftfy.fix_text except ImportError: logger.warning('ftfy or spacy is not installed using BERT BasicTokenizer instead of SpaCy & ftfy.') self.nlp = BasicTokenizer(do_lower_case=True) self.fix_text = None with open(vocab_file, encoding='utf-8') as vocab_handle: self.encoder = json.load(vocab_handle) self.decoder = {v: k for k, v in self.encoder.items()} with open(merges_file, encoding='utf-8') as merges_handle: merges = merges_handle.read().split('\n')[1:-1] merges = [tuple(merge.split()) for merge in merges] self.bpe_ranks = dict(zip(merges, range(len(merges)))) self.cache = {} super().__init__(unk_token=unk_token, **kwargs) @property def do_lower_case(self): return True @property def vocab_size(self): return len(self.encoder) def get_vocab(self): return dict(self.encoder, **self.added_tokens_encoder) def bpe(self, token): word = tuple(token[:-1]) + (token[-1] + '</w>',) if token in self.cache: return self.cache[token] pairs = get_pairs(word) if not pairs: return token + '</w>' while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf'))) if bigram not in self.bpe_ranks: break first, second = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) i = j if word[i] == first and i < len(word) - 1 and (word[i + 1] == second): new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = ' '.join(word) if word == '\n </w>': word = '\n</w>' self.cache[token] = word return word def _tokenize(self, text): """Tokenize a string.""" split_tokens = [] if self.fix_text is None: text = self.nlp.tokenize(text) for token in text: split_tokens.extend(list(self.bpe(token).split(' '))) else: text = self.nlp(text_standardize(self.fix_text(text))) for token in text: split_tokens.extend(list(self.bpe(token.text.lower()).split(' '))) return split_tokens def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an id in a token (BPE) using the vocab.""" return self.decoder.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" out_string = ''.join(tokens).replace('</w>', ' ').strip() return out_string def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: if not os.path.isdir(save_directory): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) merge_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']) with open(vocab_file, 'w', encoding='utf-8') as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + '\n') index = 0 with open(merge_file, 'w', encoding='utf-8') as writer: writer.write('#version: 0.2\n') for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning(f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!') index = token_index writer.write(' '.join(bpe_tokens) + '\n') index += 1 return (vocab_file, merge_file)
class OpenAIGPTTokenizer(PreTrainedTokenizer): ''' Construct a GPT Tokenizer. Based on Byte-Pair-Encoding with the following peculiarities: - lowercases all inputs, - uses `SpaCy` tokenizer and `ftfy` for pre-BPE tokenization if they are installed, fallback to BERT's `BasicTokenizer` if not. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. merges_file (`str`): Path to the merges file. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. ''' def __init__(self, vocab_file, merges_file, unk_token='<unk>', **kwargs): pass @property def do_lower_case(self): pass @property def vocab_size(self): pass def get_vocab(self): pass def bpe(self, token): pass def _tokenize(self, text): '''Tokenize a string.''' pass def _convert_token_to_id(self, token): '''Converts a token (str) in an id using the vocab.''' pass def _convert_id_to_token(self, index): '''Converts an id in a token (BPE) using the vocab.''' pass def convert_tokens_to_string(self, tokens): '''Converts a sequence of tokens (string) in a single string.''' pass def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: pass
13
5
12
1
11
1
3
0.2
1
11
1
0
10
6
10
99
159
23
114
43
99
23
102
37
89
10
3
3
28
4,317
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/openai/tokenization_openai_fast.py
transformers.models.openai.tokenization_openai_fast.OpenAIGPTTokenizerFast
from typing import Optional from .tokenization_openai import OpenAIGPTTokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast class OpenAIGPTTokenizerFast(PreTrainedTokenizerFast): """ Construct a "fast" GPT Tokenizer (backed by HuggingFace's *tokenizers* library). Based on Byte-Pair-Encoding with the following peculiarities: - lower case all inputs - uses BERT's BasicTokenizer for pre-BPE tokenization This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. merges_file (`str`): Path to the merges file. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ['input_ids', 'attention_mask'] slow_tokenizer_class = OpenAIGPTTokenizer def __init__(self, vocab_file=None, merges_file=None, tokenizer_file=None, unk_token='<unk>', **kwargs): super().__init__(vocab_file, merges_file, tokenizer_file=tokenizer_file, unk_token=unk_token, **kwargs) @property def do_lower_case(self): return True def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: files = self._tokenizer.model.save(save_directory, name=filename_prefix) return tuple(files)
class OpenAIGPTTokenizerFast(PreTrainedTokenizerFast): ''' Construct a "fast" GPT Tokenizer (backed by HuggingFace's *tokenizers* library). Based on Byte-Pair-Encoding with the following peculiarities: - lower case all inputs - uses BERT's BasicTokenizer for pre-BPE tokenization This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. merges_file (`str`): Path to the merges file. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. ''' def __init__(self, vocab_file=None, merges_file=None, tokenizer_file=None, unk_token='<unk>', **kwargs): pass @property def do_lower_case(self): pass def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]: pass
5
1
2
0
2
0
1
1.33
1
3
0
0
3
0
3
91
35
7
12
9
7
16
11
8
7
1
3
0
3
4,318
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/opt/configuration_opt.py
transformers.models.opt.configuration_opt.OPTConfig
from ...configuration_utils import PretrainedConfig class OPTConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`OPTModel`]. It is used to instantiate a OPT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the OPT [facebook/opt-350m](https://huggingface.co/facebook/opt-350m) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50272): Vocabulary size of the OPT model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`OPTModel`] hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of decoder layers. ffn_dim (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer decoder. activation_function (`str` or `function`, *optional*, defaults to `"relu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. max_position_embeddings (`int`, *optional*, defaults to 2048): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). do_layer_norm_before (`bool`, *optional*, defaults to `True`): Whether to perform layer normalization before the attention block. word_embed_proj_dim (`int`, *optional*): `word_embed_proj_dim` can be set to down-project word embeddings, *e.g.* `opt-350m`. Defaults to `hidden_size`. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). enable_bias (`bool`, *optional*, defaults to `True`): Whether or not if the linear layers in the attention blocks should use the bias term. layer_norm_elementwise_affine (`bool`, *optional*, defaults to `True`): Whether or not if the layer norms should have learnable parameters. Example: ```python >>> from transformers import OPTConfig, OPTModel >>> # Initializing a OPT facebook/opt-large style configuration >>> configuration = OPTConfig() >>> # Initializing a model (with random weights) from the facebook/opt-large style configuration >>> model = OPTModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'opt' keys_to_ignore_at_inference = ['past_key_values'] def __init__(self, vocab_size=50272, hidden_size=768, num_hidden_layers=12, ffn_dim=3072, max_position_embeddings=2048, do_layer_norm_before=True, _remove_final_layer_norm=False, word_embed_proj_dim=None, dropout=0.1, attention_dropout=0.0, num_attention_heads=12, activation_function='relu', layerdrop=0.0, init_std=0.02, use_cache=True, pad_token_id=1, bos_token_id=2, eos_token_id=2, enable_bias=True, layer_norm_elementwise_affine=True, **kwargs): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.num_attention_heads = num_attention_heads self.word_embed_proj_dim = word_embed_proj_dim if word_embed_proj_dim is not None else hidden_size self.ffn_dim = ffn_dim self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.dropout = dropout self.attention_dropout = attention_dropout self.activation_function = activation_function self.init_std = init_std self.layerdrop = layerdrop self.use_cache = use_cache self.do_layer_norm_before = do_layer_norm_before self.enable_bias = enable_bias self.layer_norm_elementwise_affine = layer_norm_elementwise_affine self._remove_final_layer_norm = _remove_final_layer_norm
class OPTConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`OPTModel`]. It is used to instantiate a OPT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the OPT [facebook/opt-350m](https://huggingface.co/facebook/opt-350m) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50272): Vocabulary size of the OPT model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`OPTModel`] hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of decoder layers. ffn_dim (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer decoder. activation_function (`str` or `function`, *optional*, defaults to `"relu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. max_position_embeddings (`int`, *optional*, defaults to 2048): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). do_layer_norm_before (`bool`, *optional*, defaults to `True`): Whether to perform layer normalization before the attention block. word_embed_proj_dim (`int`, *optional*): `word_embed_proj_dim` can be set to down-project word embeddings, *e.g.* `opt-350m`. Defaults to `hidden_size`. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). enable_bias (`bool`, *optional*, defaults to `True`): Whether or not if the linear layers in the attention blocks should use the bias term. layer_norm_elementwise_affine (`bool`, *optional*, defaults to `True`): Whether or not if the layer norms should have learnable parameters. Example: ```python >>> from transformers import OPTConfig, OPTModel >>> # Initializing a OPT facebook/opt-large style configuration >>> configuration = OPTConfig() >>> # Initializing a model (with random weights) from the facebook/opt-large style configuration >>> model = OPTModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, vocab_size=50272, hidden_size=768, num_hidden_layers=12, ffn_dim=3072, max_position_embeddings=2048, do_layer_norm_before=True, _remove_final_layer_norm=False, word_embed_proj_dim=None, dropout=0.1, attention_dropout=0.0, num_attention_heads=12, activation_function='relu', layerdrop=0.0, init_std=0.02, use_cache=True, pad_token_id=1, bos_token_id=2, eos_token_id=2, enable_bias=True, layer_norm_elementwise_affine=True, **kwargs): pass
2
1
52
1
47
4
2
1.18
1
1
0
0
1
17
1
1
120
11
50
44
25
59
22
21
20
2
1
0
2
4,319
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/opt/modeling_opt.py
transformers.models.opt.modeling_opt.OPTAttention
from torch import nn from .configuration_opt import OPTConfig from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel import torch from ...cache_utils import Cache, DynamicCache from typing import Callable, Optional, Union from ...utils.deprecation import deprecate_kwarg class OPTAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: OPTConfig, layer_idx: Optional[int]=None, **kwargs): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.dropout = config.attention_dropout self.enable_bias = config.enable_bias self.layer_idx = layer_idx if layer_idx is None: logger.warning_once(f'Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` when creating this class.') self.head_dim = self.embed_dim // self.num_heads self.is_causal = True if self.head_dim * self.num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads}).') self.scaling = self.head_dim ** (-0.5) self.k_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=self.enable_bias) self.v_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=self.enable_bias) self.q_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=self.enable_bias) self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=self.enable_bias) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, cache_position: Optional[torch.Tensor]=None, **kwargs) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[Cache]]: """Input shape: Batch x Time x Channel""" bsz, tgt_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) * self.scaling query_states = query_states.view(bsz, -1, self.num_heads, self.head_dim).transpose(1, 2) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) key_states = key_states.view(bsz, -1, self.num_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, -1, self.num_heads, self.head_dim).transpose(1, 2) if past_key_values is not None: key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, {'cache_position': cache_position}) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != 'eager': attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.dropout, scaling=1.0, **kwargs) attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous() attn_output = self.out_proj(attn_output) if not output_attentions: attn_weights = None return (attn_output, attn_weights)
class OPTAttention(nn.Module): '''Multi-headed attention from 'Attention Is All You Need' paper''' def __init__(self, config: OPTConfig, layer_idx: Optional[int]=None, **kwargs): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, past_key_values: Optional[Cache]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, cache_position: Optional[torch.Tensor]=None, **kwargs) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[Cache]]: '''Input shape: Batch x Time x Channel''' pass
4
2
50
7
35
8
5
0.23
1
6
1
2
3
13
3
13
156
24
107
42
89
25
72
28
68
13
1
2
16
4,320
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/opt/modeling_opt.py
transformers.models.opt.modeling_opt.OPTDecoder
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, QuestionAnsweringModelOutput, SequenceClassifierOutputWithPast from ...cache_utils import Cache, DynamicCache from ...modeling_attn_mask_utils import AttentionMaskConverter from ...processing_utils import Unpack from .configuration_opt import OPTConfig from ...modeling_flash_attention_utils import FlashAttentionKwargs import torch from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, is_torch_flex_attn_available, logging from typing import Callable, Optional, Union from torch import nn class OPTDecoder(OPTPreTrainedModel): """ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`OPTDecoderLayer`] Args: config: OPTConfig """ def __init__(self, config: OPTConfig): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.layerdrop self.padding_idx = config.pad_token_id self.max_target_positions = config.max_position_embeddings self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.word_embed_proj_dim, self.padding_idx) self.embed_positions = OPTLearnedPositionalEmbedding(config.max_position_embeddings, config.hidden_size) if config.word_embed_proj_dim != config.hidden_size: self.project_out = nn.Linear(config.hidden_size, config.word_embed_proj_dim, bias=False) else: self.project_out = None if config.word_embed_proj_dim != config.hidden_size: self.project_in = nn.Linear(config.word_embed_proj_dim, config.hidden_size, bias=False) else: self.project_in = None if config.do_layer_norm_before and (not config._remove_final_layer_norm): self.final_layer_norm = nn.LayerNorm(config.hidden_size, elementwise_affine=config.layer_norm_elementwise_affine) else: self.final_layer_norm = None self.layers = nn.ModuleList([OPTDecoderLayer(config, layer_idx=i) for i in range(config.num_hidden_layers)]) self.gradient_checkpointing = False self.post_init() def _update_causal_mask(self, attention_mask: Union[torch.Tensor, 'BlockMask'], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool=False): if self.config._attn_implementation == 'flash_attention_2': if attention_mask is not None and (attention_mask == 0.0).any(): return attention_mask return None if self.config._attn_implementation == 'flex_attention': if isinstance(attention_mask, torch.Tensor): attention_mask = make_flex_block_causal_mask(attention_mask) return attention_mask past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 using_compilable_cache = past_key_values.is_compileable if past_key_values is not None else False if self.config._attn_implementation == 'sdpa' and (not using_compilable_cache) and (not output_attentions): if AttentionMaskConverter._ignore_causal_mask_sdpa(attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training): return None dtype = input_tensor.dtype sequence_length = input_tensor.shape[1] if using_compilable_cache: target_length = past_key_values.get_max_cache_shape() else: target_length = attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1 causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, cache_position=cache_position, batch_size=input_tensor.shape[0]) if self.config._attn_implementation == 'sdpa' and attention_mask is not None and (attention_mask.device.type in ['cuda', 'xpu', 'npu']) and (not output_attentions): min_dtype = torch.finfo(dtype).min causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) return causal_mask @staticmethod def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs): """ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. """ if attention_mask is not None and attention_mask.dim() == 4: causal_mask = attention_mask else: min_dtype = torch.finfo(dtype).min causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device) padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype) return causal_mask @can_return_tuple def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, position_ids: Optional[torch.LongTensor]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> Union[tuple, BaseModelOutputWithPast]: """ Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(num_hidden_layers, num_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.n_positions - 1]`. for padding use -1. [What are position IDs?](../glossary#position-ids) cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`, this tensor is not affected by padding. It is used to update the cache in the correct position and to infer the complete sequence length. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError('You must specify exactly one of input_ids or inputs_embeds') if self.gradient_checkpointing and self.training and use_cache: logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.') use_cache = False if input_ids is not None: input_ids = input_ids.view(-1, input_ids.shape[-1]) if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if use_cache and past_key_values is None: past_key_values = DynamicCache(config=self.config) past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 if cache_position is None: cache_position = torch.arange(past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device) if attention_mask is None: seq_length = past_seen_tokens + inputs_embeds.shape[1] attention_mask = torch.ones(inputs_embeds.shape[0], seq_length, device=inputs_embeds.device) causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions) if position_ids is None: position_ids = torch.cumsum(attention_mask, dim=1) position_ids = (position_ids * attention_mask - 1).long() position_ids = position_ids[:, past_seen_tokens:] pos_embeds = self.embed_positions(attention_mask, past_seen_tokens, position_ids=position_ids) if self.project_in is not None: inputs_embeds = self.project_in(inputs_embeds) hidden_states = inputs_embeds + pos_embeds.to(inputs_embeds.device) all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None for attn_mask, mask_name in zip([head_mask], ['head_mask']): if attn_mask is not None: if attn_mask.size()[0] != len(self.layers): raise ValueError(f'The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.') for idx, decoder_layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: continue layer_outputs = decoder_layer(hidden_states, attention_mask=causal_mask, position_ids=position_ids, layer_head_mask=head_mask[idx] if head_mask is not None else None, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, **kwargs) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],) if self.final_layer_norm is not None: hidden_states = self.final_layer_norm(hidden_states) if self.project_out is not None: hidden_states = self.project_out(hidden_states) if output_hidden_states: all_hidden_states += (hidden_states,) return BaseModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns)
class OPTDecoder(OPTPreTrainedModel): ''' Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`OPTDecoderLayer`] Args: config: OPTConfig ''' def __init__(self, config: OPTConfig): pass def _update_causal_mask(self, attention_mask: Union[torch.Tensor, 'BlockMask'], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool=False): pass @staticmethod def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs): ''' Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. ''' pass @can_return_tuple def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, position_ids: Optional[torch.LongTensor]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> Union[tuple, BaseModelOutputWithPast]: ''' Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(num_hidden_layers, num_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.n_positions - 1]`. for padding use -1. [What are position IDs?](../glossary#position-ids) cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`, this tensor is not affected by padding. It is used to update the cache in the correct position and to infer the complete sequence length. ''' pass
7
3
55
8
35
11
10
0.35
1
13
4
0
5
14
5
6
285
48
175
57
149
62
102
37
96
36
2
3
49
4,321
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/opt/modeling_opt.py
transformers.models.opt.modeling_opt.OPTDecoderLayer
from ...utils.deprecation import deprecate_kwarg from typing import Callable, Optional, Union from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...activations import ACT2FN from .configuration_opt import OPTConfig from ...processing_utils import Unpack import torch from torch import nn from ...modeling_layers import GradientCheckpointingLayer from ...cache_utils import Cache, DynamicCache class OPTDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: OPTConfig, layer_idx: Optional[int]=None): super().__init__() self.embed_dim = config.hidden_size self.self_attn = OPTAttention(config=config, layer_idx=layer_idx) self.do_layer_norm_before = config.do_layer_norm_before self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim, elementwise_affine=config.layer_norm_elementwise_affine) self.fc1 = nn.Linear(self.embed_dim, config.ffn_dim, bias=config.enable_bias) self.fc2 = nn.Linear(config.ffn_dim, self.embed_dim, bias=config.enable_bias) self.final_layer_norm = nn.LayerNorm(self.embed_dim, elementwise_affine=config.layer_norm_elementwise_affine) @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, position_ids: Optional[torch.LongTensor]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`, *optional*): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). past_key_values (`Cache`, *optional*): cached past key and value projection states cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence.. """ residual = hidden_states if self.do_layer_norm_before: hidden_states = self.self_attn_layer_norm(hidden_states) hidden_states, self_attn_weights = self.self_attn(hidden_states=hidden_states, past_key_values=past_key_values, position_ids=position_ids, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, cache_position=cache_position, **kwargs) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states if not self.do_layer_norm_before: hidden_states = self.self_attn_layer_norm(hidden_states) hidden_states_shape = hidden_states.shape hidden_states = hidden_states.reshape(-1, hidden_states.size(-1)) residual = hidden_states if self.do_layer_norm_before: hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = (residual + hidden_states).view(hidden_states_shape) if not self.do_layer_norm_before: hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) return outputs
class OPTDecoderLayer(GradientCheckpointingLayer): def __init__(self, config: OPTConfig, layer_idx: Optional[int]=None): pass @deprecate_kwarg('past_key_value', new_name='past_key_values', version='4.58') def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, position_ids: Optional[torch.LongTensor]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: ''' Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`, *optional*): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). past_key_values (`Cache`, *optional*): cached past key and value projection states cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence.. ''' pass
4
1
47
9
28
11
4
0.37
1
4
1
0
2
9
2
12
96
18
57
25
45
21
39
16
36
7
1
1
8
4,322
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/opt/modeling_opt.py
transformers.models.opt.modeling_opt.OPTForCausalLM
from typing import Callable, Optional, Union import torch from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, is_torch_flex_attn_available, logging from ...processing_utils import Unpack from torch import nn from ...generation import GenerationMixin from ...cache_utils import Cache, DynamicCache from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, QuestionAnsweringModelOutput, SequenceClassifierOutputWithPast class OPTForCausalLM(OPTPreTrainedModel, GenerationMixin): _tied_weights_keys = ['lm_head.weight'] def __init__(self, config): super().__init__(config) self.model = OPTModel(config) self.lm_head = nn.Linear(config.word_embed_proj_dim, config.vocab_size, bias=False) self.post_init() def get_input_embeddings(self): return self.model.decoder.embed_tokens def set_input_embeddings(self, value): self.model.decoder.embed_tokens = value def set_decoder(self, decoder): self.model.decoder = decoder def get_decoder(self): return self.model.decoder @can_return_tuple @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, position_ids: Optional[torch.LongTensor]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, CausalLMOutputWithPast]: """ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from transformers import AutoTokenizer, OPTForCausalLM >>> model = OPTForCausalLM.from_pretrained("facebook/opt-350m") >>> tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") >>> prompt = "Hey, are you conscious? Can you talk to me?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you conscious? Can you talk to me?\\nI'm not conscious. I'm just a little bit of a weirdo." ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.model.decoder(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, cache_position=cache_position, **kwargs) logits = self.lm_head(outputs[0]).contiguous() loss = None if labels is not None: labels = labels.to(logits.device) loss = self.loss_function(logits, labels, vocab_size=self.config.vocab_size, **kwargs) return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
class OPTForCausalLM(OPTPreTrainedModel, GenerationMixin): def __init__(self, config): pass def get_input_embeddings(self): pass def set_input_embeddings(self, value): pass def set_decoder(self, decoder): pass def get_decoder(self): pass @can_return_tuple @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, position_ids: Optional[torch.LongTensor]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, CausalLMOutputWithPast]: ''' labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from transformers import AutoTokenizer, OPTForCausalLM >>> model = OPTForCausalLM.from_pretrained("facebook/opt-350m") >>> tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") >>> prompt = "Hey, are you conscious? Can you talk to me?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you conscious? Can you talk to me?\nI'm not conscious. I'm just a little bit of a weirdo." ```''' pass
9
1
18
3
8
7
2
0.84
2
6
2
0
8
2
9
10
179
32
80
35
54
67
38
19
28
7
2
1
16
4,323
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/opt/modeling_opt.py
transformers.models.opt.modeling_opt.OPTForQuestionAnswering
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, QuestionAnsweringModelOutput, SequenceClassifierOutputWithPast from torch import nn from ...cache_utils import Cache, DynamicCache from .configuration_opt import OPTConfig from typing import Callable, Optional, Union from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss import torch from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, is_torch_flex_attn_available, logging @auto_docstring class OPTForQuestionAnswering(OPTPreTrainedModel): def __init__(self, config: OPTConfig): super().__init__(config) self.model = OPTModel(config) self.qa_outputs = nn.Linear(config.word_embed_proj_dim, 2) self.post_init() @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, position_ids: Optional[torch.LongTensor]=None) -> Union[tuple, QuestionAnsweringModelOutput]: """ Example: ```python >>> from transformers import AutoTokenizer, OPTForQuestionAnswering >>> import torch >>> torch.manual_seed(4) # doctest: +IGNORE_RESULT >>> tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") >>> # note: we are loading a OPTForQuestionAnswering from the hub here, >>> # so the head will be randomly initialized, hence the predictions will be random >>> model = OPTForQuestionAnswering.from_pretrained("facebook/opt-350m") >>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" >>> inputs = tokenizer(question, text, return_tensors="pt") >>> with torch.no_grad(): ... outputs = model(**inputs) >>> answer_start_index = outputs.start_logits.argmax() >>> answer_end_index = outputs.end_logits.argmax() >>> answer_offset = len(tokenizer(question)[0]) >>> predict_answer_tokens = inputs.input_ids[ ... 0, answer_offset + answer_start_index : answer_offset + answer_end_index + 1 ... ] >>> predicted = tokenizer.decode(predict_answer_tokens) >>> predicted ' a nice puppet' ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.model(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = transformer_outputs[0] logits = self.qa_outputs(hidden_states) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index).to(logits.device) end_positions = end_positions.clamp(0, ignored_index).to(logits.device) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + transformer_outputs[2:] return (total_loss,) + output if total_loss is not None else output return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions) def get_input_embeddings(self): return self.model.decoder.embed_tokens def set_input_embeddings(self, value): self.model.decoder.embed_tokens = value
@auto_docstring class OPTForQuestionAnswering(OPTPreTrainedModel): def __init__(self, config: OPTConfig): pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, position_ids: Optional[torch.LongTensor]=None) -> Union[tuple, QuestionAnsweringModelOutput]: ''' Example: ```python >>> from transformers import AutoTokenizer, OPTForQuestionAnswering >>> import torch >>> torch.manual_seed(4) # doctest: +IGNORE_RESULT >>> tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") >>> # note: we are loading a OPTForQuestionAnswering from the hub here, >>> # so the head will be randomly initialized, hence the predictions will be random >>> model = OPTForQuestionAnswering.from_pretrained("facebook/opt-350m") >>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" >>> inputs = tokenizer(question, text, return_tensors="pt") >>> with torch.no_grad(): ... outputs = model(**inputs) >>> answer_start_index = outputs.start_logits.argmax() >>> answer_end_index = outputs.end_logits.argmax() >>> answer_offset = len(tokenizer(question)[0]) >>> predict_answer_tokens = inputs.input_ids[ ... 0, answer_offset + answer_start_index : answer_offset + answer_end_index + 1 ... ] >>> predicted = tokenizer.decode(predict_answer_tokens) >>> predicted ' a nice puppet' ```''' pass def get_input_embeddings(self): pass def set_input_embeddings(self, value): pass
7
1
30
4
16
9
3
0.53
1
6
3
0
4
2
4
5
124
20
68
32
47
36
35
17
30
7
2
2
10
4,324
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/opt/modeling_opt.py
transformers.models.opt.modeling_opt.OPTForSequenceClassification
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, QuestionAnsweringModelOutput, SequenceClassifierOutputWithPast from ...cache_utils import Cache, DynamicCache from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, is_torch_flex_attn_available, logging from typing import Callable, Optional, Union from .configuration_opt import OPTConfig from torch import nn import torch @auto_docstring(custom_intro='\n The OPT Model transformer with a sequence classification head on top (linear layer).\n\n [`OPTForSequenceClassification`] uses the last token in order to do the classification, as other causal models\n (e.g. GPT-2) do.\n\n Since it does classification on the last token, it requires to know the position of the last token. If a\n `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If\n no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the\n padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in\n each row of the batch).\n ') class OPTForSequenceClassification(OPTPreTrainedModel): def __init__(self, config: OPTConfig): super().__init__(config) self.num_labels = config.num_labels self.model = OPTModel(config) self.score = nn.Linear(config.word_embed_proj_dim, self.num_labels, bias=False) self.post_init() @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, position_ids: Optional[torch.LongTensor]=None) -> Union[tuple, SequenceClassifierOutputWithPast]: """ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.model(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) hidden_states = transformer_outputs[0] logits = self.score(hidden_states) if input_ids is not None: batch_size, sequence_length = input_ids.shape[:2] else: batch_size, sequence_length = inputs_embeds.shape[:2] if self.config.pad_token_id is None and batch_size != 1: raise ValueError('Cannot handle batch sizes > 1 if no padding token is defined.') if self.config.pad_token_id is None: last_non_pad_token = -1 elif input_ids is not None: non_pad_mask = (input_ids != self.config.pad_token_id).to(logits.device, torch.int32) token_indices = torch.arange(input_ids.shape[-1], device=logits.device, dtype=torch.int32) last_non_pad_token = (token_indices * non_pad_mask).argmax(-1) else: last_non_pad_token = -1 logger.warning_once(f'{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be unexpected if using padding tokens in conjunction with `inputs_embeds.`') pooled_logits = logits[torch.arange(batch_size, device=logits.device), last_non_pad_token] loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = 'single_label_classification' else: self.config.problem_type = 'multi_label_classification' if self.config.problem_type == 'regression': loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == 'single_label_classification': loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == 'multi_label_classification': loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return (loss,) + output if loss is not None else output return SequenceClassifierOutputWithPast(loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions) def get_input_embeddings(self): return self.model.decoder.embed_tokens def set_input_embeddings(self, value): self.model.decoder.embed_tokens = value
null
7
1
26
2
22
2
5
0.08
1
8
3
0
4
3
4
5
117
11
98
33
72
8
49
19
44
16
2
3
19
4,325
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/opt/modeling_opt.py
transformers.models.opt.modeling_opt.OPTLearnedPositionalEmbedding
from typing import Callable, Optional, Union import torch from torch import nn class OPTLearnedPositionalEmbedding(nn.Embedding): """ This module learns positional embeddings up to a fixed maximum size. """ def __init__(self, num_embeddings: int, embedding_dim: int): self.offset = 2 super().__init__(num_embeddings + self.offset, embedding_dim) def forward(self, attention_mask: torch.LongTensor, past_key_values_length: int=0, position_ids: Optional[torch.LongTensor]=None): """`input_ids_shape` is expected to be [bsz x seqlen].""" if position_ids is None: position_ids = torch.cumsum(attention_mask, dim=1) position_ids = (position_ids * attention_mask - 1).long() position_ids = position_ids[:, past_key_values_length:] return super().forward(position_ids + self.offset)
class OPTLearnedPositionalEmbedding(nn.Embedding): ''' This module learns positional embeddings up to a fixed maximum size. ''' def __init__(self, num_embeddings: int, embedding_dim: int): pass def forward(self, attention_mask: torch.LongTensor, past_key_values_length: int=0, position_ids: Optional[torch.LongTensor]=None): '''`input_ids_shape` is expected to be [bsz x seqlen].''' pass
3
2
10
1
7
2
2
0.47
1
2
0
0
2
1
2
2
26
4
15
9
7
7
10
4
7
2
1
1
3
4,326
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/opt/modeling_opt.py
transformers.models.opt.modeling_opt.OPTModel
from ...modeling_flash_attention_utils import FlashAttentionKwargs from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, is_torch_flex_attn_available, logging from ...processing_utils import Unpack from .configuration_opt import OPTConfig from ...cache_utils import Cache, DynamicCache import torch from typing import Callable, Optional, Union from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, QuestionAnsweringModelOutput, SequenceClassifierOutputWithPast @auto_docstring class OPTModel(OPTPreTrainedModel): def __init__(self, config: OPTConfig): super().__init__(config) self.decoder = OPTDecoder(config) self.post_init() def get_input_embeddings(self): return self.decoder.embed_tokens def set_input_embeddings(self, value): self.decoder.embed_tokens = value @can_return_tuple @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, position_ids: Optional[torch.LongTensor]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> Union[tuple, BaseModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict decoder_outputs = self.decoder(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, cache_position=cache_position, **kwargs) return BaseModelOutputWithPast(last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, hidden_states=decoder_outputs.hidden_states, attentions=decoder_outputs.attentions)
@auto_docstring class OPTModel(OPTPreTrainedModel): def __init__(self, config: OPTConfig): pass def get_input_embeddings(self): pass def set_input_embeddings(self, value): pass @can_return_tuple @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, position_ids: Optional[torch.LongTensor]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> Union[tuple, BaseModelOutputWithPast]: pass
8
0
11
1
10
0
2
0.04
1
6
3
0
5
1
5
6
66
7
57
21
32
2
20
8
14
6
2
1
10
4,327
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/opt/modeling_opt.py
transformers.models.opt.modeling_opt.OPTPreTrainedModel
from torch import nn from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, is_torch_flex_attn_available, logging from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel from .configuration_opt import OPTConfig @auto_docstring class OPTPreTrainedModel(PreTrainedModel): config: OPTConfig base_model_prefix = 'model' supports_gradient_checkpointing = True _no_split_modules = ['OPTDecoderLayer'] _supports_attention_backend = True _supports_flash_attn = True _supports_sdpa = True _supports_flex_attn = True _can_compile_fullgraph = True def _init_weights(self, module): std = self.config.init_std if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.weight.data.fill_(1.0) module.bias.data.zero_()
@auto_docstring class OPTPreTrainedModel(PreTrainedModel): def _init_weights(self, module): pass
3
0
10
0
10
0
5
0
1
0
0
5
1
0
1
1
18
1
17
9
15
0
16
9
14
5
1
2
5
4,328
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/owlv2/configuration_owlv2.py
transformers.models.owlv2.configuration_owlv2.Owlv2Config
from ...configuration_utils import PretrainedConfig class Owlv2Config(PretrainedConfig): """ [`Owlv2Config`] is the configuration class to store the configuration of an [`Owlv2Model`]. It is used to instantiate an OWLv2 model according to the specified arguments, defining the text model and vision model configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the OWLv2 [google/owlv2-base-patch16](https://huggingface.co/google/owlv2-base-patch16) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: text_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`Owlv2TextConfig`]. vision_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`Owlv2VisionConfig`]. projection_dim (`int`, *optional*, defaults to 512): Dimensionality of text and vision projection layers. logit_scale_init_value (`float`, *optional*, defaults to 2.6592): The initial value of the *logit_scale* parameter. Default is used as per the original OWLv2 implementation. return_dict (`bool`, *optional*, defaults to `True`): Whether or not the model should return a dictionary. If `False`, returns a tuple. kwargs (*optional*): Dictionary of keyword arguments. """ model_type = 'owlv2' sub_configs = {'text_config': Owlv2TextConfig, 'vision_config': Owlv2VisionConfig} def __init__(self, text_config=None, vision_config=None, projection_dim=512, logit_scale_init_value=2.6592, return_dict=True, **kwargs): super().__init__(**kwargs) if text_config is None: text_config = {} logger.info('text_config is None. Initializing the Owlv2TextConfig with default values.') if vision_config is None: vision_config = {} logger.info('vision_config is None. initializing the Owlv2VisionConfig with default values.') self.text_config = Owlv2TextConfig(**text_config) self.vision_config = Owlv2VisionConfig(**vision_config) self.projection_dim = projection_dim self.logit_scale_init_value = logit_scale_init_value self.return_dict = return_dict self.initializer_factor = 1.0 @classmethod def from_text_vision_configs(cls, text_config: dict, vision_config: dict, **kwargs): """ Instantiate a [`Owlv2Config`] (or a derived class) from owlv2 text model configuration and owlv2 vision model configuration. Returns: [`Owlv2Config`]: An instance of a configuration object """ config_dict = {} config_dict['text_config'] = text_config config_dict['vision_config'] = vision_config return cls.from_dict(config_dict, **kwargs)
class Owlv2Config(PretrainedConfig): ''' [`Owlv2Config`] is the configuration class to store the configuration of an [`Owlv2Model`]. It is used to instantiate an OWLv2 model according to the specified arguments, defining the text model and vision model configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the OWLv2 [google/owlv2-base-patch16](https://huggingface.co/google/owlv2-base-patch16) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: text_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`Owlv2TextConfig`]. vision_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`Owlv2VisionConfig`]. projection_dim (`int`, *optional*, defaults to 512): Dimensionality of text and vision projection layers. logit_scale_init_value (`float`, *optional*, defaults to 2.6592): The initial value of the *logit_scale* parameter. Default is used as per the original OWLv2 implementation. return_dict (`bool`, *optional*, defaults to `True`): Whether or not the model should return a dictionary. If `False`, returns a tuple. kwargs (*optional*): Dictionary of keyword arguments. ''' def __init__(self, text_config=None, vision_config=None, projection_dim=512, logit_scale_init_value=2.6592, return_dict=True, **kwargs): pass @classmethod def from_text_vision_configs(cls, text_config: dict, vision_config: dict, **kwargs): ''' Instantiate a [`Owlv2Config`] (or a derived class) from owlv2 text model configuration and owlv2 vision model configuration. Returns: [`Owlv2Config`]: An instance of a configuration object ''' pass
4
2
20
3
14
3
2
0.9
1
3
2
0
1
6
2
2
70
11
31
21
19
28
22
12
19
3
1
1
4
4,329
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/owlv2/configuration_owlv2.py
transformers.models.owlv2.configuration_owlv2.Owlv2TextConfig
from ...configuration_utils import PretrainedConfig class Owlv2TextConfig(PretrainedConfig): """ This is the configuration class to store the configuration of an [`Owlv2TextModel`]. It is used to instantiate an Owlv2 text encoder according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Owlv2 [google/owlv2-base-patch16](https://huggingface.co/google/owlv2-base-patch16) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 49408): Vocabulary size of the OWLv2 text model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`Owlv2TextModel`]. hidden_size (`int`, *optional*, defaults to 512): Dimensionality of the encoder layers and the pooler layer. intermediate_size (`int`, *optional*, defaults to 2048): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. max_position_embeddings (`int`, *optional*, defaults to 16): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. initializer_factor (`float`, *optional*, defaults to 1.0): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). pad_token_id (`int`, *optional*, defaults to 0): The id of the padding token in the input sequences. bos_token_id (`int`, *optional*, defaults to 49406): The id of the beginning-of-sequence token in the input sequences. eos_token_id (`int`, *optional*, defaults to 49407): The id of the end-of-sequence token in the input sequences. Example: ```python >>> from transformers import Owlv2TextConfig, Owlv2TextModel >>> # Initializing a Owlv2TextModel with google/owlv2-base-patch16 style configuration >>> configuration = Owlv2TextConfig() >>> # Initializing a Owlv2TextConfig from the google/owlv2-base-patch16 style configuration >>> model = Owlv2TextModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'owlv2_text_model' base_config_key = 'text_config' def __init__(self, vocab_size=49408, hidden_size=512, intermediate_size=2048, num_hidden_layers=12, num_attention_heads=8, max_position_embeddings=16, hidden_act='quick_gelu', layer_norm_eps=1e-05, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, pad_token_id=0, bos_token_id=49406, eos_token_id=49407, **kwargs): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.max_position_embeddings = max_position_embeddings self.hidden_act = hidden_act self.layer_norm_eps = layer_norm_eps self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.initializer_factor = initializer_factor
class Owlv2TextConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of an [`Owlv2TextModel`]. It is used to instantiate an Owlv2 text encoder according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Owlv2 [google/owlv2-base-patch16](https://huggingface.co/google/owlv2-base-patch16) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 49408): Vocabulary size of the OWLv2 text model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`Owlv2TextModel`]. hidden_size (`int`, *optional*, defaults to 512): Dimensionality of the encoder layers and the pooler layer. intermediate_size (`int`, *optional*, defaults to 2048): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. max_position_embeddings (`int`, *optional*, defaults to 16): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. initializer_factor (`float`, *optional*, defaults to 1.0): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). pad_token_id (`int`, *optional*, defaults to 0): The id of the padding token in the input sequences. bos_token_id (`int`, *optional*, defaults to 49406): The id of the beginning-of-sequence token in the input sequences. eos_token_id (`int`, *optional*, defaults to 49407): The id of the end-of-sequence token in the input sequences. Example: ```python >>> from transformers import Owlv2TextConfig, Owlv2TextModel >>> # Initializing a Owlv2TextModel with google/owlv2-base-patch16 style configuration >>> configuration = Owlv2TextConfig() >>> # Initializing a Owlv2TextConfig from the google/owlv2-base-patch16 style configuration >>> model = Owlv2TextModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, vocab_size=49408, hidden_size=512, intermediate_size=2048, num_hidden_layers=12, num_attention_heads=8, max_position_embeddings=16, hidden_act='quick_gelu', layer_norm_eps=1e-05, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, pad_token_id=0, bos_token_id=49406, eos_token_id=49407, **kwargs): pass
2
1
31
1
30
0
1
1.52
1
1
0
0
1
11
1
1
94
11
33
32
14
50
16
15
14
1
1
0
1
4,330
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/owlv2/configuration_owlv2.py
transformers.models.owlv2.configuration_owlv2.Owlv2VisionConfig
from ...configuration_utils import PretrainedConfig class Owlv2VisionConfig(PretrainedConfig): """ This is the configuration class to store the configuration of an [`Owlv2VisionModel`]. It is used to instantiate an OWLv2 image encoder according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the OWLv2 [google/owlv2-base-patch16](https://huggingface.co/google/owlv2-base-patch16) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. num_channels (`int`, *optional*, defaults to 3): Number of channels in the input images. image_size (`int`, *optional*, defaults to 768): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 16): The size (resolution) of each patch. hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. initializer_factor (`float`, *optional*, defaults to 1.0): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). Example: ```python >>> from transformers import Owlv2VisionConfig, Owlv2VisionModel >>> # Initializing a Owlv2VisionModel with google/owlv2-base-patch16 style configuration >>> configuration = Owlv2VisionConfig() >>> # Initializing a Owlv2VisionModel model from the google/owlv2-base-patch16 style configuration >>> model = Owlv2VisionModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'owlv2_vision_model' base_config_key = 'vision_config' def __init__(self, hidden_size=768, intermediate_size=3072, num_hidden_layers=12, num_attention_heads=12, num_channels=3, image_size=768, patch_size=16, hidden_act='quick_gelu', layer_norm_eps=1e-05, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, **kwargs): super().__init__(**kwargs) self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_channels = num_channels self.image_size = image_size self.patch_size = patch_size self.hidden_act = hidden_act self.layer_norm_eps = layer_norm_eps self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.initializer_factor = initializer_factor
class Owlv2VisionConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of an [`Owlv2VisionModel`]. It is used to instantiate an OWLv2 image encoder according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the OWLv2 [google/owlv2-base-patch16](https://huggingface.co/google/owlv2-base-patch16) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. num_channels (`int`, *optional*, defaults to 3): Number of channels in the input images. image_size (`int`, *optional*, defaults to 768): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 16): The size (resolution) of each patch. hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. initializer_factor (`float`, *optional*, defaults to 1.0): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). Example: ```python >>> from transformers import Owlv2VisionConfig, Owlv2VisionModel >>> # Initializing a Owlv2VisionModel with google/owlv2-base-patch16 style configuration >>> configuration = Owlv2VisionConfig() >>> # Initializing a Owlv2VisionModel model from the google/owlv2-base-patch16 style configuration >>> model = Owlv2VisionModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, hidden_size=768, intermediate_size=3072, num_hidden_layers=12, num_attention_heads=12, num_channels=3, image_size=768, patch_size=16, hidden_act='quick_gelu', layer_norm_eps=1e-05, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, **kwargs): pass
2
1
30
1
29
0
1
1.38
1
1
0
0
1
12
1
1
86
10
32
31
15
44
17
16
15
1
1
0
1
4,331
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/owlv2/image_processing_owlv2.py
transformers.models.owlv2.image_processing_owlv2.Owlv2ImageProcessor
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from typing import TYPE_CHECKING, Optional, Union from ...image_transforms import center_to_corners_format, pad, to_channel_dimension_format import numpy as np from ...utils import TensorType, filter_out_non_signature_kwargs, is_scipy_available, is_torch_available, is_vision_available, logging, requires_backends import warnings from ...image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_scaled_image, make_flat_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments class Owlv2ImageProcessor(BaseImageProcessor): """ Constructs an OWLv2 image processor. Args: do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess` method. do_pad (`bool`, *optional*, defaults to `True`): Whether to pad the image to a square with gray pixels on the bottom and the right. Can be overridden by `do_pad` in the `preprocess` method. do_resize (`bool`, *optional*, defaults to `True`): Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by `do_resize` in the `preprocess` method. size (`dict[str, int]` *optional*, defaults to `{"height": 960, "width": 960}`): Size to resize the image to. Can be overridden by `size` in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`): Resampling method to use if resizing the image. Can be overridden by `resample` in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `list[float]`, *optional*, defaults to `OPENAI_CLIP_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `list[float]`, *optional*, defaults to `OPENAI_CLIP_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. """ model_input_names = ['pixel_values'] def __init__(self, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_pad: bool=True, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BILINEAR, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, **kwargs) -> None: super().__init__(**kwargs) self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_pad = do_pad self.do_resize = do_resize self.size = size if size is not None else {'height': 960, 'width': 960} self.resample = resample self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD def pad(self, image: np.ndarray, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None): """ Pad an image to a square with gray pixels on the bottom and the right, as per the original OWLv2 implementation. Args: image (`np.ndarray`): Image to pad. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred from the input image. """ height, width = get_image_size(image) size = max(height, width) image = pad(image=image, padding=((0, size - height), (0, size - width)), constant_values=0.5, data_format=data_format, input_data_format=input_data_format) return image def resize(self, image: np.ndarray, size: dict[str, int], anti_aliasing: bool=True, anti_aliasing_sigma=None, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: """ Resize an image as per the original implementation. Args: image (`np.ndarray`): Image to resize. size (`dict[str, int]`): Dictionary containing the height and width to resize the image to. anti_aliasing (`bool`, *optional*, defaults to `True`): Whether to apply anti-aliasing when downsampling the image. anti_aliasing_sigma (`float`, *optional*, defaults to `None`): Standard deviation for Gaussian kernel when downsampling the image. If `None`, it will be calculated automatically. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred from the input image. """ requires_backends(self, 'scipy') output_shape = (size['height'], size['width']) image = to_channel_dimension_format(image, ChannelDimension.LAST) image, output_shape = _preprocess_resize_output_shape(image, output_shape) input_shape = image.shape factors = np.divide(input_shape, output_shape) ndi_mode = 'mirror' cval = 0 order = 1 if anti_aliasing: if anti_aliasing_sigma is None: anti_aliasing_sigma = np.maximum(0, (factors - 1) / 2) else: anti_aliasing_sigma = np.atleast_1d(anti_aliasing_sigma) * np.ones_like(factors) if np.any(anti_aliasing_sigma < 0): raise ValueError('Anti-aliasing standard deviation must be greater than or equal to zero') elif np.any((anti_aliasing_sigma > 0) & (factors <= 1)): warnings.warn('Anti-aliasing standard deviation greater than zero but not down-sampling along all axes') filtered = ndi.gaussian_filter(image, anti_aliasing_sigma, cval=cval, mode=ndi_mode) else: filtered = image zoom_factors = [1 / f for f in factors] out = ndi.zoom(filtered, zoom_factors, order=order, mode=ndi_mode, cval=cval, grid_mode=True) image = _clip_warp_output(image, out) image = to_channel_dimension_format(image, input_data_format, ChannelDimension.LAST) image = to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image return image @filter_out_non_signature_kwargs() def preprocess(self, images: ImageInput, do_pad: Optional[bool]=None, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image: """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_pad (`bool`, *optional*, defaults to `self.do_pad`): Whether to pad the image to a square with gray pixels on the bottom and the right. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to `self.size`): Size to resize the image to. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between [0 - 1]. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Image mean. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Image standard deviation. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_pad = do_pad if do_pad is not None else self.do_pad do_resize = do_resize if do_resize is not None else self.do_resize do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std size = size if size is not None else self.size size = get_size_dict(size) images = make_flat_list_of_images(images) if not valid_images(images): raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor') validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, size=size) images = [to_numpy_array(image) for image in images] if do_rescale and is_scaled_image(images[0]): logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.') if input_data_format is None: input_data_format = infer_channel_dimension_format(images[0]) if do_rescale: images = [self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images] if do_pad: images = [self.pad(image=image, input_data_format=input_data_format) for image in images] if do_resize: images = [self.resize(image=image, size=size, input_data_format=input_data_format) for image in images] if do_normalize: images = [self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images] images = [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images] data = {'pixel_values': images} return BatchFeature(data=data, tensor_type=return_tensors) def post_process_object_detection(self, outputs: 'Owlv2ObjectDetectionOutput', threshold: float=0.1, target_sizes: Optional[Union[TensorType, list[tuple]]]=None): """ Converts the raw output of [`Owlv2ForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. Args: outputs ([`Owlv2ObjectDetectionOutput`]): Raw outputs of the model. threshold (`float`, *optional*, defaults to 0.1): Score threshold to keep object detection predictions. target_sizes (`torch.Tensor` or `list[tuple[int, int]]`, *optional*): Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size `(height, width)` of each image in the batch. If unset, predictions will not be resized. Returns: `list[Dict]`: A list of dictionaries, each dictionary containing the following keys: - "scores": The confidence scores for each predicted box on the image. - "labels": Indexes of the classes predicted by the model on the image. - "boxes": Image bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. """ batch_logits, batch_boxes = (outputs.logits, outputs.pred_boxes) batch_size = len(batch_logits) if target_sizes is not None and len(target_sizes) != batch_size: raise ValueError('Make sure that you pass in as many target sizes as images') batch_class_logits = torch.max(batch_logits, dim=-1) batch_scores = torch.sigmoid(batch_class_logits.values) batch_labels = batch_class_logits.indices batch_boxes = center_to_corners_format(batch_boxes) if target_sizes is not None: batch_boxes = _scale_boxes(batch_boxes, target_sizes) results = [] for scores, labels, boxes in zip(batch_scores, batch_labels, batch_boxes): keep = scores > threshold scores = scores[keep] labels = labels[keep] boxes = boxes[keep] results.append({'scores': scores, 'labels': labels, 'boxes': boxes}) return results def post_process_image_guided_detection(self, outputs, threshold=0.0, nms_threshold=0.3, target_sizes=None): """ Converts the output of [`OwlViTForObjectDetection.image_guided_detection`] into the format expected by the COCO api. Args: outputs ([`OwlViTImageGuidedObjectDetectionOutput`]): Raw outputs of the model. threshold (`float`, *optional*, defaults to 0.0): Minimum confidence threshold to use to filter out predicted boxes. nms_threshold (`float`, *optional*, defaults to 0.3): IoU threshold for non-maximum suppression of overlapping boxes. target_sizes (`torch.Tensor`, *optional*): Tensor of shape (batch_size, 2) where each entry is the (height, width) of the corresponding image in the batch. If set, predicted normalized bounding boxes are rescaled to the target sizes. If left to None, predictions will not be unnormalized. Returns: `list[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model. All labels are set to None as `OwlViTForObjectDetection.image_guided_detection` perform one-shot object detection. """ logits, target_boxes = (outputs.logits, outputs.target_pred_boxes) if target_sizes is not None and len(logits) != len(target_sizes): raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits') if target_sizes is not None and target_sizes.shape[1] != 2: raise ValueError('Each element of target_sizes must contain the size (h, w) of each image of the batch') probs = torch.max(logits, dim=-1) scores = torch.sigmoid(probs.values) target_boxes = center_to_corners_format(target_boxes) if nms_threshold < 1.0: for idx in range(target_boxes.shape[0]): for i in torch.argsort(-scores[idx]): if not scores[idx][i]: continue ious = box_iou(target_boxes[idx][i, :].unsqueeze(0), target_boxes[idx])[0][0] ious[i] = -1.0 scores[idx][ious > nms_threshold] = 0.0 if target_sizes is not None: target_boxes = _scale_boxes(target_boxes, target_sizes) results = [] alphas = torch.zeros_like(scores) for idx in range(target_boxes.shape[0]): query_scores = scores[idx] if not query_scores.nonzero().numel(): continue query_scores[query_scores < threshold] = 0.0 max_score = torch.max(query_scores) + 1e-06 query_alphas = (query_scores - max_score * 0.1) / (max_score * 0.9) query_alphas = torch.clip(query_alphas, 0.0, 1.0) alphas[idx] = query_alphas mask = alphas[idx] > 0 box_scores = alphas[idx][mask] boxes = target_boxes[idx][mask] results.append({'scores': box_scores, 'labels': None, 'boxes': boxes}) return results
class Owlv2ImageProcessor(BaseImageProcessor): ''' Constructs an OWLv2 image processor. Args: do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess` method. do_pad (`bool`, *optional*, defaults to `True`): Whether to pad the image to a square with gray pixels on the bottom and the right. Can be overridden by `do_pad` in the `preprocess` method. do_resize (`bool`, *optional*, defaults to `True`): Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by `do_resize` in the `preprocess` method. size (`dict[str, int]` *optional*, defaults to `{"height": 960, "width": 960}`): Size to resize the image to. Can be overridden by `size` in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`): Resampling method to use if resizing the image. Can be overridden by `resample` in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `list[float]`, *optional*, defaults to `OPENAI_CLIP_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `list[float]`, *optional*, defaults to `OPENAI_CLIP_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. ''' def __init__(self, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_pad: bool=True, do_resize: bool=True, size: Optional[dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BILINEAR, do_normalize: bool=True, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, **kwargs) -> None: pass def pad(self, image: np.ndarray, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None): ''' Pad an image to a square with gray pixels on the bottom and the right, as per the original OWLv2 implementation. Args: image (`np.ndarray`): Image to pad. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred from the input image. ''' pass def resize(self, image: np.ndarray, size: dict[str, int], anti_aliasing: bool=True, anti_aliasing_sigma=None, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: ''' Resize an image as per the original implementation. Args: image (`np.ndarray`): Image to resize. size (`dict[str, int]`): Dictionary containing the height and width to resize the image to. anti_aliasing (`bool`, *optional*, defaults to `True`): Whether to apply anti-aliasing when downsampling the image. anti_aliasing_sigma (`float`, *optional*, defaults to `None`): Standard deviation for Gaussian kernel when downsampling the image. If `None`, it will be calculated automatically. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred from the input image. ''' pass @filter_out_non_signature_kwargs() def preprocess(self, images: ImageInput, do_pad: Optional[bool]=None, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image: ''' Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_pad (`bool`, *optional*, defaults to `self.do_pad`): Whether to pad the image to a square with gray pixels on the bottom and the right. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`dict[str, int]`, *optional*, defaults to `self.size`): Size to resize the image to. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between [0 - 1]. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`): Image mean. image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`): Image standard deviation. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. ''' pass def post_process_object_detection(self, outputs: 'Owlv2ObjectDetectionOutput', threshold: float=0.1, target_sizes: Optional[Union[TensorType, list[tuple]]]=None): ''' Converts the raw output of [`Owlv2ForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. Args: outputs ([`Owlv2ObjectDetectionOutput`]): Raw outputs of the model. threshold (`float`, *optional*, defaults to 0.1): Score threshold to keep object detection predictions. target_sizes (`torch.Tensor` or `list[tuple[int, int]]`, *optional*): Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size `(height, width)` of each image in the batch. If unset, predictions will not be resized. Returns: `list[Dict]`: A list of dictionaries, each dictionary containing the following keys: - "scores": The confidence scores for each predicted box on the image. - "labels": Indexes of the classes predicted by the model on the image. - "boxes": Image bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. ''' pass def post_process_image_guided_detection(self, outputs, threshold=0.0, nms_threshold=0.3, target_sizes=None): ''' Converts the output of [`OwlViTForObjectDetection.image_guided_detection`] into the format expected by the COCO api. Args: outputs ([`OwlViTImageGuidedObjectDetectionOutput`]): Raw outputs of the model. threshold (`float`, *optional*, defaults to 0.0): Minimum confidence threshold to use to filter out predicted boxes. nms_threshold (`float`, *optional*, defaults to 0.3): IoU threshold for non-maximum suppression of overlapping boxes. target_sizes (`torch.Tensor`, *optional*): Tensor of shape (batch_size, 2) where each entry is the (height, width) of the corresponding image in the batch. If set, predicted normalized bounding boxes are rescaled to the target sizes. If left to None, predictions will not be unnormalized. Returns: `list[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model. All labels are set to None as `OwlViTForObjectDetection.image_guided_detection` perform one-shot object detection. ''' pass
8
6
63
7
35
21
7
0.74
1
10
2
0
6
9
6
26
422
52
214
97
161
158
127
51
120
16
3
4
41
4,332
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/owlv2/modeling_owlv2.py
transformers.models.owlv2.modeling_owlv2.Owlv2Attention
from torch import Tensor, nn import torch from typing import Any, Optional, Union class Owlv2Attention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads}).') self.scale = self.head_dim ** (-0.5) self.dropout = config.attention_dropout self.k_proj = nn.Linear(self.embed_dim, self.embed_dim) self.v_proj = nn.Linear(self.embed_dim, self.embed_dim) self.q_proj = nn.Linear(self.embed_dim, self.embed_dim) self.out_proj = nn.Linear(self.embed_dim, self.embed_dim) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" bsz, tgt_len, embed_dim = hidden_states.size() query_states = self.q_proj(hidden_states) * self.scale key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError(f'Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}') if causal_attention_mask is not None: if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError(f'Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {causal_attention_mask.size()}') attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError(f'Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}') attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if output_attentions: attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_probs = attn_probs.to(value_states.dtype) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError(f'`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}') attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, tgt_len, embed_dim) attn_output = self.out_proj(attn_output) return (attn_output, attn_weights_reshaped)
class Owlv2Attention(nn.Module): '''Multi-headed attention from 'Attention Is All You Need' paper''' def __init__(self, config): pass def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): pass def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: '''Input shape: Batch x Time x Channel''' pass
4
2
33
6
25
3
4
0.12
1
5
0
0
3
10
3
13
105
20
76
30
66
9
55
24
51
8
1
2
11
4,333
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/owlv2/modeling_owlv2.py
transformers.models.owlv2.modeling_owlv2.Owlv2BoxPredictionHead
from .configuration_owlv2 import Owlv2Config, Owlv2TextConfig, Owlv2VisionConfig from torch import Tensor, nn import torch class Owlv2BoxPredictionHead(nn.Module): def __init__(self, config: Owlv2Config, out_dim: int=4): super().__init__() width = config.vision_config.hidden_size self.dense0 = nn.Linear(width, width) self.dense1 = nn.Linear(width, width) self.gelu = nn.GELU() self.dense2 = nn.Linear(width, out_dim) def forward(self, image_features: torch.Tensor) -> torch.FloatTensor: output = self.dense0(image_features) output = self.gelu(output) output = self.dense1(output) output = self.gelu(output) output = self.dense2(output) return output
class Owlv2BoxPredictionHead(nn.Module): def __init__(self, config: Owlv2Config, out_dim: int=4): pass def forward(self, image_features: torch.Tensor) -> torch.FloatTensor: pass
3
0
8
1
7
0
1
0
1
4
1
0
2
4
2
12
17
2
15
9
12
0
15
9
12
1
1
0
2
4,334
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/owlv2/modeling_owlv2.py
transformers.models.owlv2.modeling_owlv2.Owlv2ClassPredictionHead
from torch import Tensor, nn from typing import Any, Optional, Union from .configuration_owlv2 import Owlv2Config, Owlv2TextConfig, Owlv2VisionConfig import torch class Owlv2ClassPredictionHead(nn.Module): def __init__(self, config: Owlv2Config): super().__init__() out_dim = config.text_config.hidden_size self.query_dim = config.vision_config.hidden_size self.dense0 = nn.Linear(self.query_dim, out_dim) self.logit_shift = nn.Linear(self.query_dim, 1) self.logit_scale = nn.Linear(self.query_dim, 1) self.elu = nn.ELU() def forward(self, image_embeds: torch.FloatTensor, query_embeds: Optional[torch.FloatTensor], query_mask: Optional[torch.Tensor]) -> tuple[torch.FloatTensor]: image_class_embeds = self.dense0(image_embeds) if query_embeds is None: device = image_class_embeds.device batch_size, num_patches = image_class_embeds.shape[:2] pred_logits = torch.zeros((batch_size, num_patches, self.query_dim)).to(device) return (pred_logits, image_class_embeds) image_class_embeds = image_class_embeds / (torch.linalg.norm(image_class_embeds, dim=-1, keepdim=True) + 1e-06) query_embeds = query_embeds / (torch.linalg.norm(query_embeds, dim=-1, keepdim=True) + 1e-06) pred_logits = torch.einsum('...pd,...qd->...pq', image_class_embeds, query_embeds) logit_shift = self.logit_shift(image_embeds) logit_scale = self.logit_scale(image_embeds) logit_scale = self.elu(logit_scale) + 1 pred_logits = (pred_logits + logit_shift) * logit_scale if query_mask is not None: if query_mask.ndim > 1: query_mask = torch.unsqueeze(query_mask, dim=-2) pred_logits = torch.where(query_mask == 0, torch.finfo(pred_logits.dtype).min, pred_logits) pred_logits = pred_logits.to(torch.float32) return (pred_logits, image_class_embeds)
class Owlv2ClassPredictionHead(nn.Module): def __init__(self, config: Owlv2Config): pass def forward(self, image_embeds: torch.FloatTensor, query_embeds: Optional[torch.FloatTensor], query_mask: Optional[torch.Tensor]) -> tuple[torch.FloatTensor]: pass
3
0
22
4
17
2
3
0.09
1
3
1
0
2
5
2
12
46
9
34
20
26
3
29
15
26
4
1
2
5
4,335
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/owlv2/modeling_owlv2.py
transformers.models.owlv2.modeling_owlv2.Owlv2Encoder
from typing import Any, Optional, Union from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling from torch import Tensor, nn import torch from .configuration_owlv2 import Owlv2Config, Owlv2TextConfig, Owlv2VisionConfig class Owlv2Encoder(nn.Module): """ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a [`Owlv2EncoderLayer`]. Args: config: Owlv2Config """ def __init__(self, config: Owlv2Config): super().__init__() self.layers = nn.ModuleList([Owlv2EncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutput]: """ Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`). attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Causal mask for the text model. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for encoder_layer in self.layers: if output_hidden_states: encoder_states = encoder_states + (hidden_states,) layer_outputs = encoder_layer(hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)
class Owlv2Encoder(nn.Module): ''' Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a [`Owlv2EncoderLayer`]. Args: config: Owlv2Config ''' def __init__(self, config: Owlv2Config): pass def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutput]: ''' Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`). attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Causal mask for the text model. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' pass
3
2
39
3
25
11
7
0.56
1
8
3
0
2
2
2
12
87
9
50
18
39
28
26
10
23
12
1
2
13
4,336
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/owlv2/modeling_owlv2.py
transformers.models.owlv2.modeling_owlv2.Owlv2EncoderLayer
from .configuration_owlv2 import Owlv2Config, Owlv2TextConfig, Owlv2VisionConfig from typing import Any, Optional, Union from ...modeling_layers import GradientCheckpointingLayer import torch from torch import Tensor, nn class Owlv2EncoderLayer(GradientCheckpointingLayer): def __init__(self, config: Owlv2Config): super().__init__() self.embed_dim = config.hidden_size self.self_attn = Owlv2Attention(config) self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.mlp = Owlv2MLP(config) self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, causal_attention_mask: torch.Tensor, output_attentions: Optional[bool]=False) -> tuple[torch.FloatTensor]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. `(config.encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.layer_norm1(hidden_states) hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs
class Owlv2EncoderLayer(GradientCheckpointingLayer): def __init__(self, config: Owlv2Config): pass def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, causal_attention_mask: torch.Tensor, output_attentions: Optional[bool]=False) -> tuple[torch.FloatTensor]: ''' Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. `(config.encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. ''' pass
3
1
23
3
16
5
2
0.31
1
6
3
0
2
5
2
12
48
6
32
17
23
10
21
11
18
2
1
1
3
4,337
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/owlv2/modeling_owlv2.py
transformers.models.owlv2.modeling_owlv2.Owlv2ForObjectDetection
from typing import Any, Optional, Union from torch import Tensor, nn import torch from ...utils import ModelOutput, auto_docstring, filter_out_non_signature_kwargs, is_vision_available, logging, torch_int from functools import lru_cache from .configuration_owlv2 import Owlv2Config, Owlv2TextConfig, Owlv2VisionConfig class Owlv2ForObjectDetection(Owlv2PreTrainedModel): config: Owlv2Config def __init__(self, config: Owlv2Config): super().__init__(config) self.owlv2 = Owlv2Model(config) self.class_head = Owlv2ClassPredictionHead(config) self.box_head = Owlv2BoxPredictionHead(config) self.objectness_head = Owlv2BoxPredictionHead(config, out_dim=1) self.layer_norm = nn.LayerNorm(config.vision_config.hidden_size, eps=config.vision_config.layer_norm_eps) self.sigmoid = nn.Sigmoid() self.config = config self.num_patches_height = self.config.vision_config.image_size // self.config.vision_config.patch_size self.num_patches_width = self.config.vision_config.image_size // self.config.vision_config.patch_size self.box_bias = self.compute_box_bias(self.num_patches_height, self.num_patches_width) self.post_init() @staticmethod def normalize_grid_corner_coordinates(num_patches_height: int, num_patches_width: int) -> torch.Tensor: x_coordinates = torch.arange(1, num_patches_width + 1, dtype=torch.float32) y_coordinates = torch.arange(1, num_patches_height + 1, dtype=torch.float32) xx, yy = torch.meshgrid(x_coordinates, y_coordinates, indexing='xy') box_coordinates = torch.stack((xx, yy), dim=-1) box_coordinates[..., 0] /= num_patches_width box_coordinates[..., 1] /= num_patches_height box_coordinates = box_coordinates.view(-1, 2) return box_coordinates def objectness_predictor(self, image_features: torch.FloatTensor) -> torch.FloatTensor: """Predicts the probability that each image feature token is an object. Args: image_features (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_dim)`)): Features extracted from the image. Returns: Objectness scores. """ image_features = image_features.detach() objectness_logits = self.objectness_head(image_features) objectness_logits = objectness_logits[..., 0] return objectness_logits @lru_cache(maxsize=2) def compute_box_bias(self, num_patches_height: int, num_patches_width: int, feature_map: Optional[torch.FloatTensor]=None) -> torch.Tensor: if feature_map is not None: raise ValueError('feature_map has been deprecated as an input. Please pass in num_patches instead') box_coordinates = self.normalize_grid_corner_coordinates(num_patches_height, num_patches_width) box_coordinates = torch.clip(box_coordinates, 0.0, 1.0) box_coord_bias = torch.log(box_coordinates + 0.0001) - torch.log1p(-box_coordinates + 0.0001) box_size = torch.full_like(box_coord_bias, 1.0) box_size[..., 0] /= num_patches_width box_size[..., 1] /= num_patches_height box_size_bias = torch.log(box_size + 0.0001) - torch.log1p(-box_size + 0.0001) box_bias = torch.cat([box_coord_bias, box_size_bias], dim=-1) return box_bias def box_predictor(self, image_feats: torch.FloatTensor, feature_map: torch.FloatTensor, interpolate_pos_encoding: bool=False) -> torch.FloatTensor: """ Args: image_feats: Features extracted from the image, returned by the `image_text_embedder` method. feature_map: A spatial re-arrangement of image_features, also returned by the `image_text_embedder` method. interpolate_pos_encoding: Whether to interpolate the pre-trained position encodings. Returns: pred_boxes: List of predicted boxes (cxcywh normalized to 0, 1) nested within a dictionary. """ pred_boxes = self.box_head(image_feats) if interpolate_pos_encoding: _, num_patches_height, num_patches_width, _ = feature_map.shape box_bias = self.compute_box_bias(num_patches_height, num_patches_width) else: box_bias = self.box_bias box_bias = box_bias.to(feature_map.device) pred_boxes += box_bias pred_boxes = self.sigmoid(pred_boxes) return pred_boxes def class_predictor(self, image_feats: torch.FloatTensor, query_embeds: Optional[torch.FloatTensor]=None, query_mask: Optional[torch.Tensor]=None) -> tuple[torch.FloatTensor]: """ Args: image_feats: Features extracted from the `image_text_embedder`. query_embeds: Text query embeddings. query_mask: Must be provided with query_embeddings. A mask indicating which query embeddings are valid. """ pred_logits, image_class_embeds = self.class_head(image_feats, query_embeds, query_mask) return (pred_logits, image_class_embeds) def image_text_embedder(self, input_ids: torch.Tensor, pixel_values: torch.FloatTensor, attention_mask: torch.Tensor, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False) -> tuple[torch.FloatTensor]: outputs = self.owlv2(pixel_values=pixel_values, input_ids=input_ids, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=True) if interpolate_pos_encoding: _, _, height, width = pixel_values.shape num_patches_height = height // self.config.vision_config.patch_size num_patches_width = width // self.config.vision_config.patch_size else: num_patches_height = self.num_patches_height num_patches_width = self.num_patches_width last_hidden_state = outputs.vision_model_output[0] image_embeds = self.owlv2.vision_model.post_layernorm(last_hidden_state) class_token_out = torch.broadcast_to(image_embeds[:, :1, :], image_embeds[:, :-1].shape) image_embeds = image_embeds[:, 1:, :] * class_token_out image_embeds = self.layer_norm(image_embeds) new_size = (image_embeds.shape[0], num_patches_height, num_patches_width, image_embeds.shape[-1]) image_embeds = image_embeds.reshape(new_size) text_embeds = outputs[-4] return (text_embeds, image_embeds, outputs) def image_embedder(self, pixel_values: torch.FloatTensor, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False) -> tuple[torch.FloatTensor]: vision_outputs = self.owlv2.vision_model(pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=True) if interpolate_pos_encoding: _, _, height, width = pixel_values.shape num_patches_height = height // self.config.vision_config.patch_size num_patches_width = width // self.config.vision_config.patch_size else: num_patches_height = self.num_patches_height num_patches_width = self.num_patches_width last_hidden_state = vision_outputs[0] image_embeds = self.owlv2.vision_model.post_layernorm(last_hidden_state) class_token_out = torch.broadcast_to(image_embeds[:, :1, :], image_embeds[:, :-1].shape) image_embeds = image_embeds[:, 1:, :] * class_token_out image_embeds = self.layer_norm(image_embeds) new_size = (image_embeds.shape[0], num_patches_height, num_patches_width, image_embeds.shape[-1]) image_embeds = image_embeds.reshape(new_size) return (image_embeds, vision_outputs) def embed_image_query(self, query_image_features: torch.FloatTensor, query_feature_map: torch.FloatTensor, interpolate_pos_encoding: bool=False) -> torch.FloatTensor: _, class_embeds = self.class_predictor(query_image_features) pred_boxes = self.box_predictor(query_image_features, query_feature_map, interpolate_pos_encoding) pred_boxes_as_corners = center_to_corners_format(pred_boxes) best_class_embeds = [] best_box_indices = [] pred_boxes_device = pred_boxes_as_corners.device for i in range(query_image_features.shape[0]): each_query_box = torch.tensor([[0, 0, 1, 1]], device=pred_boxes_device) each_query_pred_boxes = pred_boxes_as_corners[i] ious, _ = box_iou(each_query_box, each_query_pred_boxes) if torch.all(ious[0] == 0.0): ious = generalized_box_iou(each_query_box, each_query_pred_boxes) iou_threshold = torch.max(ious) * 0.8 selected_inds = (ious[0] >= iou_threshold).nonzero() if selected_inds.numel(): selected_embeddings = class_embeds[i][selected_inds.squeeze(1)] mean_embeds = torch.mean(class_embeds[i], axis=0) mean_sim = torch.einsum('d,id->i', mean_embeds, selected_embeddings) best_box_ind = selected_inds[torch.argmin(mean_sim)] best_class_embeds.append(class_embeds[i][best_box_ind]) best_box_indices.append(best_box_ind) if best_class_embeds: query_embeds = torch.stack(best_class_embeds) box_indices = torch.stack(best_box_indices) else: query_embeds, box_indices = (None, None) return (query_embeds, box_indices, pred_boxes) @auto_docstring def image_guided_detection(self, pixel_values: torch.FloatTensor, query_pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> Owlv2ImageGuidedObjectDetectionOutput: """ query_pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values of query image(s) to be detected. Pass in one query image per target image. Examples: ```python >>> import requests >>> from PIL import Image >>> import torch >>> from transformers import AutoProcessor, Owlv2ForObjectDetection >>> processor = AutoProcessor.from_pretrained("google/owlv2-base-patch16-ensemble") >>> model = Owlv2ForObjectDetection.from_pretrained("google/owlv2-base-patch16-ensemble") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> query_url = "http://images.cocodataset.org/val2017/000000001675.jpg" >>> query_image = Image.open(requests.get(query_url, stream=True).raw) >>> inputs = processor(images=image, query_images=query_image, return_tensors="pt") >>> # forward pass >>> with torch.no_grad(): ... outputs = model.image_guided_detection(**inputs) >>> target_sizes = torch.Tensor([image.size[::-1]]) >>> # Convert outputs (bounding boxes and class logits) to Pascal VOC format (xmin, ymin, xmax, ymax) >>> results = processor.post_process_image_guided_detection( ... outputs=outputs, threshold=0.9, nms_threshold=0.3, target_sizes=target_sizes ... ) >>> i = 0 # Retrieve predictions for the first image >>> boxes, scores = results[i]["boxes"], results[i]["scores"] >>> for box, score in zip(boxes, scores): ... box = [round(i, 2) for i in box.tolist()] ... print(f"Detected similar object with confidence {round(score.item(), 3)} at location {box}") Detected similar object with confidence 0.938 at location [327.31, 54.94, 547.39, 268.06] Detected similar object with confidence 0.959 at location [5.78, 360.65, 619.12, 366.39] Detected similar object with confidence 0.902 at location [2.85, 360.01, 627.63, 380.8] Detected similar object with confidence 0.985 at location [176.98, -29.45, 672.69, 182.83] Detected similar object with confidence 1.0 at location [6.53, 14.35, 624.87, 470.82] Detected similar object with confidence 0.998 at location [579.98, 29.14, 615.49, 489.05] Detected similar object with confidence 0.985 at location [206.15, 10.53, 247.74, 466.01] Detected similar object with confidence 0.947 at location [18.62, 429.72, 646.5, 457.72] Detected similar object with confidence 0.996 at location [523.88, 20.69, 586.84, 483.18] Detected similar object with confidence 0.998 at location [3.39, 360.59, 617.29, 499.21] Detected similar object with confidence 0.969 at location [4.47, 449.05, 614.5, 474.76] Detected similar object with confidence 0.966 at location [31.44, 463.65, 654.66, 471.07] Detected similar object with confidence 0.924 at location [30.93, 468.07, 635.35, 475.39] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.return_dict query_feature_map = self.image_embedder(pixel_values=query_pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)[0] feature_map, vision_outputs = self.image_embedder(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding) batch_size, num_patches_height, num_patches_width, hidden_dim = feature_map.shape image_feats = torch.reshape(feature_map, (batch_size, num_patches_height * num_patches_width, hidden_dim)) batch_size, num_patches_height, num_patches_width, hidden_dim = query_feature_map.shape query_image_feats = torch.reshape(query_feature_map, (batch_size, num_patches_height * num_patches_width, hidden_dim)) query_embeds, best_box_indices, query_pred_boxes = self.embed_image_query(query_image_feats, query_feature_map, interpolate_pos_encoding) pred_logits, class_embeds = self.class_predictor(image_feats=image_feats, query_embeds=query_embeds) target_pred_boxes = self.box_predictor(image_feats, feature_map, interpolate_pos_encoding) if not return_dict: output = (feature_map, query_feature_map, target_pred_boxes, query_pred_boxes, pred_logits, class_embeds, vision_outputs.to_tuple()) output = tuple((x for x in output if x is not None)) return output return Owlv2ImageGuidedObjectDetectionOutput(image_embeds=feature_map, query_image_embeds=query_feature_map, target_pred_boxes=target_pred_boxes, query_pred_boxes=query_pred_boxes, logits=pred_logits, class_embeds=class_embeds, text_model_output=None, vision_model_output=vision_outputs) @auto_docstring def forward(self, input_ids: torch.Tensor, pixel_values: torch.FloatTensor, attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> Owlv2ObjectDetectionOutput: """ input_ids (`torch.LongTensor` of shape `(batch_size * num_max_text_queries, sequence_length)`, *optional*): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids). output_hidden_states (`bool`, *optional*): Whether or not to return the last hidden state. See `text_model_last_hidden_state` and `vision_model_last_hidden_state` under returned tensors for more detail. Examples: ```python >>> import requests >>> from PIL import Image >>> import torch >>> from transformers import Owlv2Processor, Owlv2ForObjectDetection >>> processor = Owlv2Processor.from_pretrained("google/owlv2-base-patch16-ensemble") >>> model = Owlv2ForObjectDetection.from_pretrained("google/owlv2-base-patch16-ensemble") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> text_labels = [["a photo of a cat", "a photo of a dog"]] >>> inputs = processor(text=text_labels, images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> # Target image sizes (height, width) to rescale box predictions [batch_size, 2] >>> target_sizes = torch.tensor([(image.height, image.width)]) >>> # Convert outputs (bounding boxes and class logits) to Pascal VOC format (xmin, ymin, xmax, ymax) >>> results = processor.post_process_grounded_object_detection( ... outputs=outputs, target_sizes=target_sizes, threshold=0.1, text_labels=text_labels ... ) >>> # Retrieve predictions for the first image for the corresponding text queries >>> result = results[0] >>> boxes, scores, text_labels = result["boxes"], result["scores"], result["text_labels"] >>> for box, score, text_label in zip(boxes, scores, text_labels): ... box = [round(i, 2) for i in box.tolist()] ... print(f"Detected {text_label} with confidence {round(score.item(), 3)} at location {box}") Detected a photo of a cat with confidence 0.614 at location [341.67, 23.39, 642.32, 371.35] Detected a photo of a cat with confidence 0.665 at location [6.75, 51.96, 326.62, 473.13] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.return_dict query_embeds, feature_map, outputs = self.image_text_embedder(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding) text_outputs = outputs.text_model_output vision_outputs = outputs.vision_model_output batch_size, num_patches_height, num_patches_width, hidden_dim = feature_map.shape image_feats = torch.reshape(feature_map, (batch_size, num_patches_height * num_patches_width, hidden_dim)) max_text_queries = input_ids.shape[0] // batch_size query_embeds = query_embeds.reshape(batch_size, max_text_queries, query_embeds.shape[-1]) input_ids = input_ids.reshape(batch_size, max_text_queries, input_ids.shape[-1]) query_mask = input_ids[..., 0] > 0 pred_logits, class_embeds = self.class_predictor(image_feats, query_embeds, query_mask) objectness_logits = self.objectness_predictor(image_feats) pred_boxes = self.box_predictor(image_feats, feature_map, interpolate_pos_encoding) if not return_dict: output = (pred_logits, objectness_logits, pred_boxes, query_embeds, feature_map, class_embeds, text_outputs.to_tuple(), vision_outputs.to_tuple()) output = tuple((x for x in output if x is not None)) return output return Owlv2ObjectDetectionOutput(image_embeds=feature_map, text_embeds=query_embeds, pred_boxes=pred_boxes, logits=pred_logits, objectness_logits=objectness_logits, class_embeds=class_embeds, text_model_output=text_outputs, vision_model_output=vision_outputs)
class Owlv2ForObjectDetection(Owlv2PreTrainedModel): def __init__(self, config: Owlv2Config): pass @staticmethod def normalize_grid_corner_coordinates(num_patches_height: int, num_patches_width: int) -> torch.Tensor: pass def objectness_predictor(self, image_features: torch.FloatTensor) -> torch.FloatTensor: '''Predicts the probability that each image feature token is an object. Args: image_features (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_dim)`)): Features extracted from the image. Returns: Objectness scores. ''' pass @lru_cache(maxsize=2) def compute_box_bias(self, num_patches_height: int, num_patches_width: int, feature_map: Optional[torch.FloatTensor]=None) -> torch.Tensor: pass def box_predictor(self, image_feats: torch.FloatTensor, feature_map: torch.FloatTensor, interpolate_pos_encoding: bool=False) -> torch.FloatTensor: ''' Args: image_feats: Features extracted from the image, returned by the `image_text_embedder` method. feature_map: A spatial re-arrangement of image_features, also returned by the `image_text_embedder` method. interpolate_pos_encoding: Whether to interpolate the pre-trained position encodings. Returns: pred_boxes: List of predicted boxes (cxcywh normalized to 0, 1) nested within a dictionary. ''' pass def class_predictor(self, image_feats: torch.FloatTensor, query_embeds: Optional[torch.FloatTensor]=None, query_mask: Optional[torch.Tensor]=None) -> tuple[torch.FloatTensor]: ''' Args: image_feats: Features extracted from the `image_text_embedder`. query_embeds: Text query embeddings. query_mask: Must be provided with query_embeddings. A mask indicating which query embeddings are valid. ''' pass def image_text_embedder(self, input_ids: torch.Tensor, pixel_values: torch.FloatTensor, attention_mask: torch.Tensor, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False) -> tuple[torch.FloatTensor]: pass def image_embedder(self, pixel_values: torch.FloatTensor, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False) -> tuple[torch.FloatTensor]: pass def embed_image_query(self, query_image_features: torch.FloatTensor, query_feature_map: torch.FloatTensor, interpolate_pos_encoding: bool=False) -> torch.FloatTensor: pass @auto_docstring def image_guided_detection(self, pixel_values: torch.FloatTensor, query_pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> Owlv2ImageGuidedObjectDetectionOutput: ''' query_pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values of query image(s) to be detected. Pass in one query image per target image. Examples: ```python >>> import requests >>> from PIL import Image >>> import torch >>> from transformers import AutoProcessor, Owlv2ForObjectDetection >>> processor = AutoProcessor.from_pretrained("google/owlv2-base-patch16-ensemble") >>> model = Owlv2ForObjectDetection.from_pretrained("google/owlv2-base-patch16-ensemble") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> query_url = "http://images.cocodataset.org/val2017/000000001675.jpg" >>> query_image = Image.open(requests.get(query_url, stream=True).raw) >>> inputs = processor(images=image, query_images=query_image, return_tensors="pt") >>> # forward pass >>> with torch.no_grad(): ... outputs = model.image_guided_detection(**inputs) >>> target_sizes = torch.Tensor([image.size[::-1]]) >>> # Convert outputs (bounding boxes and class logits) to Pascal VOC format (xmin, ymin, xmax, ymax) >>> results = processor.post_process_image_guided_detection( ... outputs=outputs, threshold=0.9, nms_threshold=0.3, target_sizes=target_sizes ... ) >>> i = 0 # Retrieve predictions for the first image >>> boxes, scores = results[i]["boxes"], results[i]["scores"] >>> for box, score in zip(boxes, scores): ... box = [round(i, 2) for i in box.tolist()] ... print(f"Detected similar object with confidence {round(score.item(), 3)} at location {box}") Detected similar object with confidence 0.938 at location [327.31, 54.94, 547.39, 268.06] Detected similar object with confidence 0.959 at location [5.78, 360.65, 619.12, 366.39] Detected similar object with confidence 0.902 at location [2.85, 360.01, 627.63, 380.8] Detected similar object with confidence 0.985 at location [176.98, -29.45, 672.69, 182.83] Detected similar object with confidence 1.0 at location [6.53, 14.35, 624.87, 470.82] Detected similar object with confidence 0.998 at location [579.98, 29.14, 615.49, 489.05] Detected similar object with confidence 0.985 at location [206.15, 10.53, 247.74, 466.01] Detected similar object with confidence 0.947 at location [18.62, 429.72, 646.5, 457.72] Detected similar object with confidence 0.996 at location [523.88, 20.69, 586.84, 483.18] Detected similar object with confidence 0.998 at location [3.39, 360.59, 617.29, 499.21] Detected similar object with confidence 0.969 at location [4.47, 449.05, 614.5, 474.76] Detected similar object with confidence 0.966 at location [31.44, 463.65, 654.66, 471.07] Detected similar object with confidence 0.924 at location [30.93, 468.07, 635.35, 475.39] ```''' pass @auto_docstring def forward(self, input_ids: torch.Tensor, pixel_values: torch.FloatTensor, attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> Owlv2ObjectDetectionOutput: ''' input_ids (`torch.LongTensor` of shape `(batch_size * num_max_text_queries, sequence_length)`, *optional*): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids). output_hidden_states (`bool`, *optional*): Whether or not to return the last hidden state. See `text_model_last_hidden_state` and `vision_model_last_hidden_state` under returned tensors for more detail. Examples: ```python >>> import requests >>> from PIL import Image >>> import torch >>> from transformers import Owlv2Processor, Owlv2ForObjectDetection >>> processor = Owlv2Processor.from_pretrained("google/owlv2-base-patch16-ensemble") >>> model = Owlv2ForObjectDetection.from_pretrained("google/owlv2-base-patch16-ensemble") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> text_labels = [["a photo of a cat", "a photo of a dog"]] >>> inputs = processor(text=text_labels, images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> # Target image sizes (height, width) to rescale box predictions [batch_size, 2] >>> target_sizes = torch.tensor([(image.height, image.width)]) >>> # Convert outputs (bounding boxes and class logits) to Pascal VOC format (xmin, ymin, xmax, ymax) >>> results = processor.post_process_grounded_object_detection( ... outputs=outputs, target_sizes=target_sizes, threshold=0.1, text_labels=text_labels ... ) >>> # Retrieve predictions for the first image for the corresponding text queries >>> result = results[0] >>> boxes, scores, text_labels = result["boxes"], result["scores"], result["text_labels"] >>> for box, score, text_label in zip(boxes, scores, text_labels): ... box = [round(i, 2) for i in box.tolist()] ... print(f"Detected {text_label} with confidence {round(score.item(), 3)} at location {box}") Detected a photo of a cat with confidence 0.614 at location [341.67, 23.39, 642.32, 371.35] Detected a photo of a cat with confidence 0.665 at location [6.75, 51.96, 326.62, 473.13] ```''' pass
16
5
43
5
25
12
2
0.49
1
13
6
0
10
10
11
12
497
70
287
144
221
140
152
92
140
5
2
2
27
4,338
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/owlv2/modeling_owlv2.py
transformers.models.owlv2.modeling_owlv2.Owlv2ImageGuidedObjectDetectionOutput
from typing import Any, Optional, Union import torch from dataclasses import dataclass from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling from ...utils import ModelOutput, auto_docstring, filter_out_non_signature_kwargs, is_vision_available, logging, torch_int @dataclass @auto_docstring(custom_intro='\n Output type of [`Owlv2ForObjectDetection.image_guided_detection`].\n ') class Owlv2ImageGuidedObjectDetectionOutput(ModelOutput): """ logits (`torch.FloatTensor` of shape `(batch_size, num_patches, num_queries)`): Classification logits (including no-object) for all queries. image_embeds (`torch.FloatTensor` of shape `(batch_size, patch_size, patch_size, output_dim`): Pooled output of [`Owlv2VisionModel`]. OWLv2 represents images as a set of image patches and computes image embeddings for each patch. query_image_embeds (`torch.FloatTensor` of shape `(batch_size, patch_size, patch_size, output_dim`): Pooled output of [`Owlv2VisionModel`]. OWLv2 represents images as a set of image patches and computes image embeddings for each patch. target_pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_patches, 4)`): Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual target image in the batch (disregarding possible padding). You can use [`~Owlv2ImageProcessor.post_process_object_detection`] to retrieve the unnormalized bounding boxes. query_pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_patches, 4)`): Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual query image in the batch (disregarding possible padding). You can use [`~Owlv2ImageProcessor.post_process_object_detection`] to retrieve the unnormalized bounding boxes. class_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`): Class embeddings of all image patches. OWLv2 represents images as a set of image patches where the total number of patches is (image_size / patch_size)**2. text_model_output (tuple[`BaseModelOutputWithPooling`]): The output of the [`Owlv2TextModel`]. vision_model_output (`BaseModelOutputWithPooling`): The output of the [`Owlv2VisionModel`]. """ logits: Optional[torch.FloatTensor] = None image_embeds: Optional[torch.FloatTensor] = None query_image_embeds: Optional[torch.FloatTensor] = None target_pred_boxes: Optional[torch.FloatTensor] = None query_pred_boxes: Optional[torch.FloatTensor] = None class_embeds: Optional[torch.FloatTensor] = None text_model_output: BaseModelOutputWithPooling = None vision_model_output: BaseModelOutputWithPooling = None def to_tuple(self) -> tuple[Any]: return tuple((self[k] if k not in ['text_model_output', 'vision_model_output'] else getattr(self, k).to_tuple() for k in self.keys()))
@dataclass @auto_docstring(custom_intro='\n Output type of [`Owlv2ForObjectDetection.image_guided_detection`].\n ') class Owlv2ImageGuidedObjectDetectionOutput(ModelOutput): ''' logits (`torch.FloatTensor` of shape `(batch_size, num_patches, num_queries)`): Classification logits (including no-object) for all queries. image_embeds (`torch.FloatTensor` of shape `(batch_size, patch_size, patch_size, output_dim`): Pooled output of [`Owlv2VisionModel`]. OWLv2 represents images as a set of image patches and computes image embeddings for each patch. query_image_embeds (`torch.FloatTensor` of shape `(batch_size, patch_size, patch_size, output_dim`): Pooled output of [`Owlv2VisionModel`]. OWLv2 represents images as a set of image patches and computes image embeddings for each patch. target_pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_patches, 4)`): Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual target image in the batch (disregarding possible padding). You can use [`~Owlv2ImageProcessor.post_process_object_detection`] to retrieve the unnormalized bounding boxes. query_pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_patches, 4)`): Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual query image in the batch (disregarding possible padding). You can use [`~Owlv2ImageProcessor.post_process_object_detection`] to retrieve the unnormalized bounding boxes. class_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`): Class embeddings of all image patches. OWLv2 represents images as a set of image patches where the total number of patches is (image_size / patch_size)**2. text_model_output (tuple[`BaseModelOutputWithPooling`]): The output of the [`Owlv2TextModel`]. vision_model_output (`BaseModelOutputWithPooling`): The output of the [`Owlv2VisionModel`]. ''' def to_tuple(self) -> tuple[Any]: pass
4
1
5
0
5
0
2
2.07
1
2
0
0
1
0
1
1
46
3
14
10
12
29
11
10
9
2
1
0
2
4,339
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/owlv2/modeling_owlv2.py
transformers.models.owlv2.modeling_owlv2.Owlv2MLP
from torch import Tensor, nn import torch from ...activations import ACT2FN class Owlv2MLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.activation_fn = ACT2FN[config.hidden_act] self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states
class Owlv2MLP(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: pass
3
0
6
0
6
0
1
0
1
2
0
0
2
4
2
12
13
1
12
7
9
0
12
7
9
1
1
0
2
4,340
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/owlv2/modeling_owlv2.py
transformers.models.owlv2.modeling_owlv2.Owlv2Model
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling from torch import Tensor, nn from ...utils import ModelOutput, auto_docstring, filter_out_non_signature_kwargs, is_vision_available, logging, torch_int from .configuration_owlv2 import Owlv2Config, Owlv2TextConfig, Owlv2VisionConfig from typing import Any, Optional, Union import torch @auto_docstring class Owlv2Model(Owlv2PreTrainedModel): config: Owlv2Config def __init__(self, config: Owlv2Config): super().__init__(config) if not isinstance(config.text_config, Owlv2TextConfig): raise TypeError(f'config.text_config is expected to be of type Owlv2TextConfig but is of type {type(config.text_config)}.') if not isinstance(config.vision_config, Owlv2VisionConfig): raise TypeError(f'config.vision_config is expected to be of type Owlv2VisionConfig but is of type {type(config.vision_config)}.') text_config = config.text_config vision_config = config.vision_config self.projection_dim = config.projection_dim self.text_embed_dim = text_config.hidden_size self.vision_embed_dim = vision_config.hidden_size self.text_model = Owlv2TextTransformer(text_config) self.vision_model = Owlv2VisionTransformer(vision_config) self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False) self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False) self.logit_scale = nn.Parameter(torch.tensor(config.logit_scale_init_value)) self.post_init() @filter_out_non_signature_kwargs() @auto_docstring def get_text_features(self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor]=None) -> torch.FloatTensor: """ input_ids (`torch.LongTensor` of shape `(batch_size * num_max_text_queries, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) Returns: text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`Owlv2TextModel`]. Examples: ```python >>> import torch >>> from transformers import AutoProcessor, Owlv2Model >>> model = Owlv2Model.from_pretrained("google/owlv2-base-patch16-ensemble") >>> processor = AutoProcessor.from_pretrained("google/owlv2-base-patch16-ensemble") >>> inputs = processor( ... text=[["a photo of a cat", "a photo of a dog"], ["photo of a astranaut"]], return_tensors="pt" ... ) >>> with torch.inference_mode(): ... text_features = model.get_text_features(**inputs) ```""" text_outputs: BaseModelOutputWithPooling = self.text_model(input_ids=input_ids, attention_mask=attention_mask) text_features = self.text_projection(text_outputs.pooler_output) return text_features @filter_out_non_signature_kwargs() @auto_docstring def get_image_features(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool=False) -> torch.FloatTensor: """ Returns: image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of [`Owlv2VisionModel`]. Examples: ```python >>> import torch >>> from transformers.image_utils import load_image >>> from transformers import AutoProcessor, Owlv2Model >>> model = Owlv2Model.from_pretrained("google/owlv2-base-patch16-ensemble") >>> processor = AutoProcessor.from_pretrained("google/owlv2-base-patch16-ensemble") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = load_image(url) >>> inputs = processor(images=image, return_tensors="pt") >>> with torch.inference_mode(): ... image_features = model.get_image_features(**inputs) ```""" vision_outputs: BaseModelOutputWithPooling = self.vision_model(pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) image_features = self.visual_projection(vision_outputs.pooler_output) return image_features @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, return_loss: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_base_image_embeds: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, Owlv2Output]: """ return_loss (`bool`, *optional*): Whether or not to return the contrastive loss. return_base_image_embeds (`bool`, *optional*): Whether or not to return the base image embeddings. Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, Owlv2Model >>> model = Owlv2Model.from_pretrained("google/owlv2-base-patch16-ensemble") >>> processor = AutoProcessor.from_pretrained("google/owlv2-base-patch16-ensemble") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(text=[["a photo of a cat", "a photo of a dog"]], images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict) text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) text_embeds = text_outputs[1] text_embeds = self.text_projection(text_embeds) image_embeds = vision_outputs[1] image_embeds = self.visual_projection(image_embeds) image_embeds = image_embeds / torch.linalg.norm(image_embeds, ord=2, dim=-1, keepdim=True) text_embeds_norm = text_embeds / torch.linalg.norm(text_embeds, ord=2, dim=-1, keepdim=True) logit_scale = self.logit_scale.exp().to(image_embeds.device) logits_per_text = torch.matmul(text_embeds_norm, image_embeds.t()) * logit_scale logits_per_image = logits_per_text.t() loss = None if return_loss: loss = owlv2_loss(logits_per_text) text_embeds = text_embeds_norm if not return_dict: output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs) return (loss,) + output if loss is not None else output return Owlv2Output(loss=loss, logits_per_image=logits_per_image, logits_per_text=logits_per_text, text_embeds=text_embeds, image_embeds=image_embeds, text_model_output=text_outputs, vision_model_output=vision_outputs)
@auto_docstring class Owlv2Model(Owlv2PreTrainedModel): def __init__(self, config: Owlv2Config): pass @filter_out_non_signature_kwargs() @auto_docstring def get_text_features(self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor]=None) -> torch.FloatTensor: ''' input_ids (`torch.LongTensor` of shape `(batch_size * num_max_text_queries, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) Returns: text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`Owlv2TextModel`]. Examples: ```python >>> import torch >>> from transformers import AutoProcessor, Owlv2Model >>> model = Owlv2Model.from_pretrained("google/owlv2-base-patch16-ensemble") >>> processor = AutoProcessor.from_pretrained("google/owlv2-base-patch16-ensemble") >>> inputs = processor( ... text=[["a photo of a cat", "a photo of a dog"], ["photo of a astranaut"]], return_tensors="pt" ... ) >>> with torch.inference_mode(): ... text_features = model.get_text_features(**inputs) ```''' pass @filter_out_non_signature_kwargs() @auto_docstring def get_image_features(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool=False) -> torch.FloatTensor: ''' Returns: image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of [`Owlv2VisionModel`]. Examples: ```python >>> import torch >>> from transformers.image_utils import load_image >>> from transformers import AutoProcessor, Owlv2Model >>> model = Owlv2Model.from_pretrained("google/owlv2-base-patch16-ensemble") >>> processor = AutoProcessor.from_pretrained("google/owlv2-base-patch16-ensemble") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = load_image(url) >>> inputs = processor(images=image, return_tensors="pt") >>> with torch.inference_mode(): ... image_features = model.get_image_features(**inputs) ```''' pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, return_loss: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_base_image_embeds: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, Owlv2Output]: ''' return_loss (`bool`, *optional*): Whether or not to return the contrastive loss. return_base_image_embeds (`bool`, *optional*): Whether or not to return the base image embeddings. Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, Owlv2Model >>> model = Owlv2Model.from_pretrained("google/owlv2-base-patch16-ensemble") >>> processor = AutoProcessor.from_pretrained("google/owlv2-base-patch16-ensemble") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(text=[["a photo of a cat", "a photo of a dog"]], images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities ```''' pass
11
3
49
7
29
14
4
0.45
1
11
6
0
4
8
4
5
207
32
121
60
87
54
56
32
51
7
2
1
16
4,341
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/owlv2/modeling_owlv2.py
transformers.models.owlv2.modeling_owlv2.Owlv2ObjectDetectionOutput
from dataclasses import dataclass from typing import Any, Optional, Union from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling import torch from ...utils import ModelOutput, auto_docstring, filter_out_non_signature_kwargs, is_vision_available, logging, torch_int @dataclass @auto_docstring(custom_intro='\n Output type of [`Owlv2ForObjectDetection`].\n ') class Owlv2ObjectDetectionOutput(ModelOutput): """ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)): Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized scale-invariant IoU loss. loss_dict (`Dict`, *optional*): A dictionary containing the individual losses. Useful for logging. logits (`torch.FloatTensor` of shape `(batch_size, num_patches, num_queries)`): Classification logits (including no-object) for all queries. objectness_logits (`torch.FloatTensor` of shape `(batch_size, num_patches, 1)`): The objectness logits of all image patches. OWL-ViT represents images as a set of image patches where the total number of patches is (image_size / patch_size)**2. pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_patches, 4)`): Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding possible padding). You can use [`~Owlv2ImageProcessor.post_process_object_detection`] to retrieve the unnormalized bounding boxes. text_embeds (`torch.FloatTensor` of shape `(batch_size, num_max_text_queries, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`Owlv2TextModel`]. image_embeds (`torch.FloatTensor` of shape `(batch_size, patch_size, patch_size, output_dim`): Pooled output of [`Owlv2VisionModel`]. OWLv2 represents images as a set of image patches and computes image embeddings for each patch. class_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`): Class embeddings of all image patches. OWLv2 represents images as a set of image patches where the total number of patches is (image_size / patch_size)**2. text_model_output (tuple[`BaseModelOutputWithPooling`]): The output of the [`Owlv2TextModel`]. vision_model_output (`BaseModelOutputWithPooling`): The output of the [`Owlv2VisionModel`]. """ loss: Optional[torch.FloatTensor] = None loss_dict: Optional[dict] = None logits: Optional[torch.FloatTensor] = None objectness_logits: Optional[torch.FloatTensor] = None pred_boxes: Optional[torch.FloatTensor] = None text_embeds: Optional[torch.FloatTensor] = None image_embeds: Optional[torch.FloatTensor] = None class_embeds: Optional[torch.FloatTensor] = None text_model_output: BaseModelOutputWithPooling = None vision_model_output: BaseModelOutputWithPooling = None def to_tuple(self) -> tuple[Any]: return tuple((self[k] if k not in ['text_model_output', 'vision_model_output'] else getattr(self, k).to_tuple() for k in self.keys()))
@dataclass @auto_docstring(custom_intro='\n Output type of [`Owlv2ForObjectDetection`].\n ') class Owlv2ObjectDetectionOutput(ModelOutput): ''' loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)): Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized scale-invariant IoU loss. loss_dict (`Dict`, *optional*): A dictionary containing the individual losses. Useful for logging. logits (`torch.FloatTensor` of shape `(batch_size, num_patches, num_queries)`): Classification logits (including no-object) for all queries. objectness_logits (`torch.FloatTensor` of shape `(batch_size, num_patches, 1)`): The objectness logits of all image patches. OWL-ViT represents images as a set of image patches where the total number of patches is (image_size / patch_size)**2. pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_patches, 4)`): Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding possible padding). You can use [`~Owlv2ImageProcessor.post_process_object_detection`] to retrieve the unnormalized bounding boxes. text_embeds (`torch.FloatTensor` of shape `(batch_size, num_max_text_queries, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`Owlv2TextModel`]. image_embeds (`torch.FloatTensor` of shape `(batch_size, patch_size, patch_size, output_dim`): Pooled output of [`Owlv2VisionModel`]. OWLv2 represents images as a set of image patches and computes image embeddings for each patch. class_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`): Class embeddings of all image patches. OWLv2 represents images as a set of image patches where the total number of patches is (image_size / patch_size)**2. text_model_output (tuple[`BaseModelOutputWithPooling`]): The output of the [`Owlv2TextModel`]. vision_model_output (`BaseModelOutputWithPooling`): The output of the [`Owlv2VisionModel`]. ''' def to_tuple(self) -> tuple[Any]: pass
4
1
5
0
5
0
2
2
1
2
0
0
1
0
1
1
51
3
16
12
14
32
13
12
11
2
1
0
2
4,342
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/owlv2/modeling_owlv2.py
transformers.models.owlv2.modeling_owlv2.Owlv2Output
from typing import Any, Optional, Union import torch from dataclasses import dataclass from ...utils import ModelOutput, auto_docstring, filter_out_non_signature_kwargs, is_vision_available, logging, torch_int from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling @dataclass @auto_docstring class Owlv2Output(ModelOutput): """ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): Contrastive loss for image-text similarity. logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`): The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text similarity scores. logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`): The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image similarity scores. text_embeds (`torch.FloatTensor` of shape `(batch_size * num_max_text_queries, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`Owlv2TextModel`]. image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of [`Owlv2VisionModel`]. text_model_output (tuple[`BaseModelOutputWithPooling`]): The output of the [`Owlv2TextModel`]. vision_model_output (`BaseModelOutputWithPooling`): The output of the [`Owlv2VisionModel`]. """ loss: Optional[torch.FloatTensor] = None logits_per_image: Optional[torch.FloatTensor] = None logits_per_text: Optional[torch.FloatTensor] = None text_embeds: Optional[torch.FloatTensor] = None image_embeds: Optional[torch.FloatTensor] = None text_model_output: BaseModelOutputWithPooling = None vision_model_output: BaseModelOutputWithPooling = None def to_tuple(self) -> tuple[Any]: return tuple((self[k] if k not in ['text_model_output', 'vision_model_output'] else getattr(self, k).to_tuple() for k in self.keys()))
@dataclass @auto_docstring class Owlv2Output(ModelOutput): ''' loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): Contrastive loss for image-text similarity. logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`): The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text similarity scores. logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`): The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image similarity scores. text_embeds (`torch.FloatTensor` of shape `(batch_size * num_max_text_queries, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`Owlv2TextModel`]. image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of [`Owlv2VisionModel`]. text_model_output (tuple[`BaseModelOutputWithPooling`]): The output of the [`Owlv2TextModel`]. vision_model_output (`BaseModelOutputWithPooling`): The output of the [`Owlv2VisionModel`]. ''' def to_tuple(self) -> tuple[Any]: pass
4
1
5
0
5
0
2
1.54
1
2
0
0
1
0
1
1
35
2
13
9
11
20
10
9
8
2
1
0
2
4,343
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/owlv2/modeling_owlv2.py
transformers.models.owlv2.modeling_owlv2.Owlv2PreTrainedModel
from .configuration_owlv2 import Owlv2Config, Owlv2TextConfig, Owlv2VisionConfig from torch import Tensor, nn from ...modeling_utils import PreTrainedModel from ...utils import ModelOutput, auto_docstring, filter_out_non_signature_kwargs, is_vision_available, logging, torch_int @auto_docstring class Owlv2PreTrainedModel(PreTrainedModel): config: Owlv2Config base_model_prefix = 'owlv2' supports_gradient_checkpointing = True _no_split_modules = ['Owlv2EncoderLayer'] def _init_weights(self, module: nn.Module): """Initialize the weights""" factor = self.config.initializer_factor if isinstance(module, Owlv2TextEmbeddings): module.token_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02) module.position_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02) elif isinstance(module, Owlv2VisionEmbeddings): nn.init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim ** (-0.5) * factor) nn.init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor) nn.init.normal_(module.position_embedding.weight, std=module.config.initializer_range * factor) elif isinstance(module, Owlv2Attention): in_proj_std = module.embed_dim ** (-0.5) * (2 * module.config.num_hidden_layers) ** (-0.5) * factor out_proj_std = module.embed_dim ** (-0.5) * factor nn.init.normal_(module.q_proj.weight, std=in_proj_std) nn.init.normal_(module.k_proj.weight, std=in_proj_std) nn.init.normal_(module.v_proj.weight, std=in_proj_std) nn.init.normal_(module.out_proj.weight, std=out_proj_std) elif isinstance(module, Owlv2MLP): in_proj_std = module.config.hidden_size ** (-0.5) * (2 * module.config.num_hidden_layers) ** (-0.5) * factor fc_std = (2 * module.config.hidden_size) ** (-0.5) * factor nn.init.normal_(module.fc1.weight, std=fc_std) nn.init.normal_(module.fc2.weight, std=in_proj_std) elif isinstance(module, Owlv2Model): nn.init.normal_(module.text_projection.weight, std=module.text_embed_dim ** (-0.5) * factor) nn.init.normal_(module.visual_projection.weight, std=module.vision_embed_dim ** (-0.5) * factor) module.logit_scale.data.fill_(self.config.logit_scale_init_value) if isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=factor) if module.bias is not None: module.bias.data.zero_()
@auto_docstring class Owlv2PreTrainedModel(PreTrainedModel): def _init_weights(self, module: nn.Module): '''Initialize the weights''' pass
3
1
39
0
38
1
8
0.12
1
5
5
4
1
0
1
1
50
2
43
10
41
5
33
10
31
8
1
1
8
4,344
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/owlv2/modeling_owlv2.py
transformers.models.owlv2.modeling_owlv2.Owlv2TextEmbeddings
from torch import Tensor, nn import torch from .configuration_owlv2 import Owlv2Config, Owlv2TextConfig, Owlv2VisionConfig from typing import Any, Optional, Union class Owlv2TextEmbeddings(nn.Module): def __init__(self, config: Owlv2TextConfig): super().__init__() self.token_embedding = nn.Embedding(config.vocab_size, config.hidden_size) self.position_embedding = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False) def forward(self, input_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None) -> torch.Tensor: seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2] if position_ids is None: position_ids = self.position_ids[:, :seq_length] if inputs_embeds is None: inputs_embeds = self.token_embedding(input_ids) position_embeddings = self.position_embedding(position_ids) embeddings = inputs_embeds + position_embeddings return embeddings
class Owlv2TextEmbeddings(nn.Module): def __init__(self, config: Owlv2TextConfig): pass def forward(self, input_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None) -> torch.Tensor: pass
3
0
14
3
11
1
3
0.05
1
3
1
0
2
2
2
12
29
6
22
13
14
1
15
8
12
4
1
1
5
4,345
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/owlv2/modeling_owlv2.py
transformers.models.owlv2.modeling_owlv2.Owlv2TextModel
from ...utils import ModelOutput, auto_docstring, filter_out_non_signature_kwargs, is_vision_available, logging, torch_int from torch import Tensor, nn from typing import Any, Optional, Union import torch from .configuration_owlv2 import Owlv2Config, Owlv2TextConfig, Owlv2VisionConfig from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling class Owlv2TextModel(Owlv2PreTrainedModel): config: Owlv2TextConfig def __init__(self, config: Owlv2TextConfig): super().__init__(config) self.text_model = Owlv2TextTransformer(config) self.post_init() def get_input_embeddings(self) -> nn.Module: return self.text_model.embeddings.token_embedding def set_input_embeddings(self, value): self.text_model.embeddings.token_embedding = value @auto_docstring def forward(self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPooling]: """ input_ids (`torch.LongTensor` of shape `(batch_size * num_max_text_queries, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) Examples: ```python >>> from transformers import AutoProcessor, Owlv2TextModel >>> model = Owlv2TextModel.from_pretrained("google/owlv2-base-patch16") >>> processor = AutoProcessor.from_pretrained("google/owlv2-base-patch16") >>> inputs = processor( ... text=[["a photo of a cat", "a photo of a dog"], ["photo of a astranaut"]], return_tensors="pt" ... ) >>> outputs = model(**inputs) >>> last_hidden_state = outputs.last_hidden_state >>> pooled_output = outputs.pooler_output # pooled (EOS token) states ```""" return self.text_model(input_ids=input_ids, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
class Owlv2TextModel(Owlv2PreTrainedModel): def __init__(self, config: Owlv2TextConfig): pass def get_input_embeddings(self) -> nn.Module: pass def set_input_embeddings(self, value): pass @auto_docstring def forward(self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPooling]: ''' input_ids (`torch.LongTensor` of shape `(batch_size * num_max_text_queries, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) Examples: ```python >>> from transformers import AutoProcessor, Owlv2TextModel >>> model = Owlv2TextModel.from_pretrained("google/owlv2-base-patch16") >>> processor = AutoProcessor.from_pretrained("google/owlv2-base-patch16") >>> inputs = processor( ... text=[["a photo of a cat", "a photo of a dog"], ["photo of a astranaut"]], return_tensors="pt" ... ) >>> outputs = model(**inputs) >>> last_hidden_state = outputs.last_hidden_state >>> pooled_output = outputs.pooler_output # pooled (EOS token) states ```''' pass
6
1
11
1
6
4
1
0.59
1
6
3
0
4
1
4
5
50
7
27
15
13
16
12
7
7
1
2
0
4
4,346
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/owlv2/modeling_owlv2.py
transformers.models.owlv2.modeling_owlv2.Owlv2TextTransformer
import torch from typing import Any, Optional, Union from ...utils import ModelOutput, auto_docstring, filter_out_non_signature_kwargs, is_vision_available, logging, torch_int from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling from ...modeling_attn_mask_utils import _create_4d_causal_attention_mask, _prepare_4d_attention_mask from .configuration_owlv2 import Owlv2Config, Owlv2TextConfig, Owlv2VisionConfig from torch import Tensor, nn class Owlv2TextTransformer(nn.Module): def __init__(self, config: Owlv2TextConfig): super().__init__() self.config = config embed_dim = config.hidden_size self.embeddings = Owlv2TextEmbeddings(config) self.encoder = Owlv2Encoder(config) self.final_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) @auto_docstring def forward(self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPooling]: """ input_ids (`torch.LongTensor` of shape `(batch_size * num_max_text_queries, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids) causal_attention_mask = _create_4d_causal_attention_mask(input_shape, hidden_states.dtype, device=hidden_states.device) if attention_mask is not None: attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype) encoder_outputs = self.encoder(inputs_embeds=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) last_hidden_state = encoder_outputs[0] last_hidden_state = self.final_layer_norm(last_hidden_state) pooled_output = last_hidden_state[torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device), input_ids.to(torch.int).argmax(dim=-1).to(last_hidden_state.device)] if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling(last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions)
class Owlv2TextTransformer(nn.Module): def __init__(self, config: Owlv2TextConfig): pass @auto_docstring def forward(self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPooling]: ''' input_ids (`torch.LongTensor` of shape `(batch_size * num_max_text_queries, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) ''' pass
4
1
34
4
26
5
4
0.19
1
8
4
0
2
4
2
12
72
8
54
23
41
10
25
14
22
6
1
1
7
4,347
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/owlv2/modeling_owlv2.py
transformers.models.owlv2.modeling_owlv2.Owlv2VisionEmbeddings
import torch from .configuration_owlv2 import Owlv2Config, Owlv2TextConfig, Owlv2VisionConfig from ...utils import ModelOutput, auto_docstring, filter_out_non_signature_kwargs, is_vision_available, logging, torch_int from torch import Tensor, nn class Owlv2VisionEmbeddings(nn.Module): def __init__(self, config: Owlv2VisionConfig): super().__init__() self.patch_size = config.patch_size self.config = config self.embed_dim = config.hidden_size self.class_embedding = nn.Parameter(torch.randn(config.hidden_size)) self.patch_embedding = nn.Conv2d(in_channels=config.num_channels, out_channels=self.embed_dim, kernel_size=config.patch_size, stride=config.patch_size, bias=False) self.num_patches = (config.image_size // config.patch_size) ** 2 self.num_positions = self.num_patches + 1 self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim) self.register_buffer('position_ids', torch.arange(self.num_positions).expand((1, -1)), persistent=False) def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: """ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution images. This method is also adapted to support torch.jit tracing. Adapted from: - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211 """ num_patches = embeddings.shape[1] - 1 position_embedding = self.position_embedding.weight.unsqueeze(0) num_positions = position_embedding.shape[1] - 1 if not torch.jit.is_tracing() and num_patches == num_positions and (height == width): return self.position_embedding(self.position_ids) class_pos_embed = position_embedding[:, :1] patch_pos_embed = position_embedding[:, 1:] dim = embeddings.shape[-1] new_height = height // self.patch_size new_width = width // self.patch_size sqrt_num_positions = torch_int(num_positions ** 0.5) patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim) patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) patch_pos_embed = nn.functional.interpolate(patch_pos_embed, size=(new_height, new_width), mode='bicubic', align_corners=False) patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) return torch.cat((class_pos_embed, patch_pos_embed), dim=1) def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding: bool=False) -> torch.Tensor: batch_size, _, height, width = pixel_values.shape patch_embeds = self.patch_embedding(pixel_values) patch_embeds = patch_embeds.flatten(2).transpose(1, 2) class_embeds = self.class_embedding.expand(batch_size, 1, -1) embeddings = torch.cat([class_embeds, patch_embeds], dim=1) if interpolate_pos_encoding: embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width) else: embeddings = embeddings + self.position_embedding(self.position_ids) return embeddings
class Owlv2VisionEmbeddings(nn.Module): def __init__(self, config: Owlv2VisionConfig): pass def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: ''' This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution images. This method is also adapted to support torch.jit tracing. Adapted from: - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211 ''' pass def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding: bool=False) -> torch.Tensor: pass
4
1
24
4
17
3
2
0.2
1
5
1
0
3
8
3
13
75
15
51
25
47
10
39
25
35
2
1
1
5
4,348
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/owlv2/modeling_owlv2.py
transformers.models.owlv2.modeling_owlv2.Owlv2VisionModel
from torch import Tensor, nn from typing import Any, Optional, Union import torch from ...utils import ModelOutput, auto_docstring, filter_out_non_signature_kwargs, is_vision_available, logging, torch_int from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling from .configuration_owlv2 import Owlv2Config, Owlv2TextConfig, Owlv2VisionConfig class Owlv2VisionModel(Owlv2PreTrainedModel): config: Owlv2VisionConfig main_input_name = 'pixel_values' def __init__(self, config: Owlv2VisionConfig): super().__init__(config) self.vision_model = Owlv2VisionTransformer(config) self.post_init() def get_input_embeddings(self) -> nn.Module: return self.vision_model.embeddings.patch_embedding @auto_docstring def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPooling]: """ Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, Owlv2VisionModel >>> model = Owlv2VisionModel.from_pretrained("google/owlv2-base-patch16") >>> processor = AutoProcessor.from_pretrained("google/owlv2-base-patch16") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_state = outputs.last_hidden_state >>> pooled_output = outputs.pooler_output # pooled CLS states ```""" return self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict)
class Owlv2VisionModel(Owlv2PreTrainedModel): def __init__(self, config: Owlv2VisionConfig): pass def get_input_embeddings(self) -> nn.Module: pass @auto_docstring def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPooling]: ''' Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, Owlv2VisionModel >>> model = Owlv2VisionModel.from_pretrained("google/owlv2-base-patch16") >>> processor = AutoProcessor.from_pretrained("google/owlv2-base-patch16") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_state = outputs.last_hidden_state >>> pooled_output = outputs.pooler_output # pooled CLS states ```''' pass
5
1
14
1
7
6
1
0.65
1
5
3
0
3
1
3
4
50
7
26
15
13
17
11
7
7
1
2
0
3
4,349
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/owlv2/modeling_owlv2.py
transformers.models.owlv2.modeling_owlv2.Owlv2VisionTransformer
from typing import Any, Optional, Union from .configuration_owlv2 import Owlv2Config, Owlv2TextConfig, Owlv2VisionConfig import torch from torch import Tensor, nn from ...utils import ModelOutput, auto_docstring, filter_out_non_signature_kwargs, is_vision_available, logging, torch_int from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling class Owlv2VisionTransformer(nn.Module): def __init__(self, config: Owlv2VisionConfig): super().__init__() self.config = config self.embeddings = Owlv2VisionEmbeddings(config) self.pre_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.encoder = Owlv2Encoder(config) self.post_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) @auto_docstring def forward(self, pixel_values: torch.FloatTensor, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: Optional[bool]=False, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict expected_input_dtype = self.embeddings.patch_embedding.weight.dtype pixel_values = pixel_values.to(expected_input_dtype) hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) hidden_states = self.pre_layernorm(hidden_states) encoder_outputs = self.encoder(inputs_embeds=hidden_states, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) last_hidden_state = encoder_outputs[0] pooled_output = last_hidden_state[:, 0, :] pooled_output = self.post_layernorm(pooled_output) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling(last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions)
class Owlv2VisionTransformer(nn.Module): def __init__(self, config: Owlv2VisionConfig): pass @auto_docstring def forward(self, pixel_values: torch.FloatTensor, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: Optional[bool]=False, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPooling]: pass
4
0
27
4
21
2
3
0.09
1
6
4
0
2
5
2
12
57
9
44
21
32
4
23
13
20
5
1
1
6
4,350
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/owlv2/processing_owlv2.py
transformers.models.owlv2.processing_owlv2.Owlv2Processor
import warnings from typing import TYPE_CHECKING, Optional, Union from ...image_utils import ImageInput from ...tokenization_utils_base import PreTokenizedInput, TextInput from ...image_processing_utils import BatchFeature from ...utils import TensorType, is_torch_available from ...processing_utils import ImagesKwargs, ProcessingKwargs, ProcessorMixin, Unpack import numpy as np class Owlv2Processor(ProcessorMixin): """ Constructs an Owlv2 processor which wraps [`Owlv2ImageProcessor`]/[`Owlv2ImageProcessorFast`] and [`CLIPTokenizer`]/[`CLIPTokenizerFast`] into a single processor that inherits both the image processor and tokenizer functionalities. See the [`~OwlViTProcessor.__call__`] and [`~OwlViTProcessor.decode`] for more information. Args: image_processor ([`Owlv2ImageProcessor`, `Owlv2ImageProcessorFast`]): The image processor is a required input. tokenizer ([`CLIPTokenizer`, `CLIPTokenizerFast`]): The tokenizer is a required input. """ attributes = ['image_processor', 'tokenizer'] image_processor_class = ('Owlv2ImageProcessor', 'Owlv2ImageProcessorFast') tokenizer_class = ('CLIPTokenizer', 'CLIPTokenizerFast') def __init__(self, image_processor, tokenizer, **kwargs): super().__init__(image_processor, tokenizer) def __call__(self, images: Optional[ImageInput]=None, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]=None, audio=None, videos=None, **kwargs: Unpack[Owlv2ProcessorKwargs]) -> BatchFeature: """ Main method to prepare for the model one or several text(s) and image(s). This method forwards the `text` and `kwargs` arguments to CLIPTokenizerFast's [`~CLIPTokenizerFast.__call__`] if `text` is not `None` to encode: the text. To prepare the image(s), this method forwards the `images` and `kwargs` arguments to CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the docstring of the above two methods for more information. Args: images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. Both channels-first and channels-last formats are supported. text (`str`, `list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). query_images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`): The query image to be prepared, one query image is expected per target image to be queried. Each image can be a PIL image, NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a number of channels, H and W are image height and width. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. - **query_pixel_values** -- Pixel values of the query images to be fed to a model. Returned when `query_images` is not `None`. """ output_kwargs = self._merge_kwargs(Owlv2ProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs) query_images = output_kwargs['images_kwargs'].pop('query_images', None) return_tensors = output_kwargs['common_kwargs']['return_tensors'] if text is None and query_images is None and (images is None): raise ValueError('You have to specify at least one text or query image or image. All three cannot be none.') data = {} if text is not None: if isinstance(text, str) or (isinstance(text, list) and (not isinstance(text[0], list))): encodings = [self.tokenizer(text, **output_kwargs['text_kwargs'])] elif isinstance(text, list) and isinstance(text[0], list): encodings = [] max_num_queries = max([len(text_single) for text_single in text]) for text_single in text: if len(text_single) != max_num_queries: text_single = text_single + [' '] * (max_num_queries - len(text_single)) encoding = self.tokenizer(text_single, **output_kwargs['text_kwargs']) encodings.append(encoding) else: raise TypeError('Input text should be a string, a list of strings or a nested list of strings') if return_tensors == 'np': input_ids = np.concatenate([encoding['input_ids'] for encoding in encodings], axis=0) attention_mask = np.concatenate([encoding['attention_mask'] for encoding in encodings], axis=0) elif return_tensors == 'pt' and is_torch_available(): import torch input_ids = torch.cat([encoding['input_ids'] for encoding in encodings], dim=0) attention_mask = torch.cat([encoding['attention_mask'] for encoding in encodings], dim=0) else: raise ValueError('Target return tensor type could not be returned') data['input_ids'] = input_ids data['attention_mask'] = attention_mask if query_images is not None: query_pixel_values = self.image_processor(query_images, **output_kwargs['images_kwargs']).pixel_values data = {'query_pixel_values': query_pixel_values} if images is not None: image_features = self.image_processor(images, **output_kwargs['images_kwargs']) data['pixel_values'] = image_features.pixel_values return BatchFeature(data=data, tensor_type=return_tensors) def post_process_object_detection(self, *args, **kwargs): """ This method forwards all its arguments to [`Owlv2ImageProcessor.post_process_object_detection`]. Please refer to the docstring of this method for more information. """ warnings.warn('`post_process_object_detection` method is deprecated for OwlVitProcessor and will be removed in v5. Use `post_process_grounded_object_detection` instead.', FutureWarning) return self.image_processor.post_process_object_detection(*args, **kwargs) def post_process_grounded_object_detection(self, outputs: 'Owlv2ObjectDetectionOutput', threshold: float=0.1, target_sizes: Optional[Union[TensorType, list[tuple]]]=None, text_labels: Optional[list[list[str]]]=None): """ Converts the raw output of [`Owlv2ForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. Args: outputs ([`Owlv2ObjectDetectionOutput`]): Raw outputs of the model. threshold (`float`, *optional*, defaults to 0.1): Score threshold to keep object detection predictions. target_sizes (`torch.Tensor` or `list[tuple[int, int]]`, *optional*): Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size `(height, width)` of each image in the batch. If unset, predictions will not be resized. text_labels (`list[list[str]]`, *optional*): List of lists of text labels for each image in the batch. If unset, "text_labels" in output will be set to `None`. Returns: `list[Dict]`: A list of dictionaries, each dictionary containing the following keys: - "scores": The confidence scores for each predicted box on the image. - "labels": Indexes of the classes predicted by the model on the image. - "boxes": Image bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. - "text_labels": The text labels for each predicted bounding box on the image. """ output = self.image_processor.post_process_object_detection(outputs=outputs, threshold=threshold, target_sizes=target_sizes) if text_labels is not None and len(text_labels) != len(output): raise ValueError('Make sure that you pass in as many lists of text labels as images') if text_labels is not None: for image_output, image_text_labels in zip(output, text_labels): object_text_labels = [image_text_labels[i] for i in image_output['labels']] image_output['text_labels'] = object_text_labels else: for image_output in output: image_output['text_labels'] = None return output def post_process_image_guided_detection(self, outputs: 'Owlv2ImageGuidedObjectDetectionOutput', threshold: float=0.0, nms_threshold: float=0.3, target_sizes: Optional[Union[TensorType, list[tuple]]]=None): """ Converts the output of [`Owlv2ForObjectDetection.image_guided_detection`] into the format expected by the COCO api. Args: outputs ([`Owlv2ImageGuidedObjectDetectionOutput`]): Raw outputs of the model. threshold (`float`, *optional*, defaults to 0.0): Minimum confidence threshold to use to filter out predicted boxes. nms_threshold (`float`, *optional*, defaults to 0.3): IoU threshold for non-maximum suppression of overlapping boxes. target_sizes (`torch.Tensor`, *optional*): Tensor of shape (batch_size, 2) where each entry is the (height, width) of the corresponding image in the batch. If set, predicted normalized bounding boxes are rescaled to the target sizes. If left to None, predictions will not be unnormalized. Returns: `list[Dict]`: A list of dictionaries, each dictionary containing the following keys: - "scores": The confidence scores for each predicted box on the image. - "boxes": Image bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. - "labels": Set to `None`. """ return self.image_processor.post_process_image_guided_detection(outputs=outputs, threshold=threshold, nms_threshold=nms_threshold, target_sizes=target_sizes)
class Owlv2Processor(ProcessorMixin): ''' Constructs an Owlv2 processor which wraps [`Owlv2ImageProcessor`]/[`Owlv2ImageProcessorFast`] and [`CLIPTokenizer`]/[`CLIPTokenizerFast`] into a single processor that inherits both the image processor and tokenizer functionalities. See the [`~OwlViTProcessor.__call__`] and [`~OwlViTProcessor.decode`] for more information. Args: image_processor ([`Owlv2ImageProcessor`, `Owlv2ImageProcessorFast`]): The image processor is a required input. tokenizer ([`CLIPTokenizer`, `CLIPTokenizerFast`]): The tokenizer is a required input. ''' def __init__(self, image_processor, tokenizer, **kwargs): pass def __call__(self, images: Optional[ImageInput]=None, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]=None, audio=None, videos=None, **kwargs: Unpack[Owlv2ProcessorKwargs]) -> BatchFeature: ''' Main method to prepare for the model one or several text(s) and image(s). This method forwards the `text` and `kwargs` arguments to CLIPTokenizerFast's [`~CLIPTokenizerFast.__call__`] if `text` is not `None` to encode: the text. To prepare the image(s), this method forwards the `images` and `kwargs` arguments to CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the docstring of the above two methods for more information. Args: images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. Both channels-first and channels-last formats are supported. text (`str`, `list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). query_images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`): The query image to be prepared, one query image is expected per target image to be queried. Each image can be a PIL image, NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a number of channels, H and W are image height and width. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. - **query_pixel_values** -- Pixel values of the query images to be fed to a model. Returned when `query_images` is not `None`. ''' pass def post_process_object_detection(self, *args, **kwargs): ''' This method forwards all its arguments to [`Owlv2ImageProcessor.post_process_object_detection`]. Please refer to the docstring of this method for more information. ''' pass def post_process_grounded_object_detection(self, outputs: 'Owlv2ObjectDetectionOutput', threshold: float=0.1, target_sizes: Optional[Union[TensorType, list[tuple]]]=None, text_labels: Optional[list[list[str]]]=None): ''' Converts the raw output of [`Owlv2ForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. Args: outputs ([`Owlv2ObjectDetectionOutput`]): Raw outputs of the model. threshold (`float`, *optional*, defaults to 0.1): Score threshold to keep object detection predictions. target_sizes (`torch.Tensor` or `list[tuple[int, int]]`, *optional*): Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size `(height, width)` of each image in the batch. If unset, predictions will not be resized. text_labels (`list[list[str]]`, *optional*): List of lists of text labels for each image in the batch. If unset, "text_labels" in output will be set to `None`. Returns: `list[Dict]`: A list of dictionaries, each dictionary containing the following keys: - "scores": The confidence scores for each predicted box on the image. - "labels": Indexes of the classes predicted by the model on the image. - "boxes": Image bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. - "text_labels": The text labels for each predicted bounding box on the image. ''' pass def post_process_image_guided_detection(self, outputs: 'Owlv2ImageGuidedObjectDetectionOutput', threshold: float=0.0, nms_threshold: float=0.3, target_sizes: Optional[Union[TensorType, list[tuple]]]=None): ''' Converts the output of [`Owlv2ForObjectDetection.image_guided_detection`] into the format expected by the COCO api. Args: outputs ([`Owlv2ImageGuidedObjectDetectionOutput`]): Raw outputs of the model. threshold (`float`, *optional*, defaults to 0.0): Minimum confidence threshold to use to filter out predicted boxes. nms_threshold (`float`, *optional*, defaults to 0.3): IoU threshold for non-maximum suppression of overlapping boxes. target_sizes (`torch.Tensor`, *optional*): Tensor of shape (batch_size, 2) where each entry is the (height, width) of the corresponding image in the batch. If set, predicted normalized bounding boxes are rescaled to the target sizes. If left to None, predictions will not be unnormalized. Returns: `list[Dict]`: A list of dictionaries, each dictionary containing the following keys: - "scores": The confidence scores for each predicted box on the image. - "boxes": Image bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. - "labels": Set to `None`. ''' pass
6
5
31
4
14
13
4
1.02
1
9
1
0
7
0
7
24
243
35
103
36
80
105
69
24
58
16
2
4
26
4,351
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/owlvit/configuration_owlvit.py
transformers.models.owlvit.configuration_owlvit.OwlViTConfig
from ...configuration_utils import PretrainedConfig class OwlViTConfig(PretrainedConfig): """ [`OwlViTConfig`] is the configuration class to store the configuration of an [`OwlViTModel`]. It is used to instantiate an OWL-ViT model according to the specified arguments, defining the text model and vision model configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the OWL-ViT [google/owlvit-base-patch32](https://huggingface.co/google/owlvit-base-patch32) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: text_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`OwlViTTextConfig`]. vision_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`OwlViTVisionConfig`]. projection_dim (`int`, *optional*, defaults to 512): Dimensionality of text and vision projection layers. logit_scale_init_value (`float`, *optional*, defaults to 2.6592): The initial value of the *logit_scale* parameter. Default is used as per the original OWL-ViT implementation. return_dict (`bool`, *optional*, defaults to `True`): Whether or not the model should return a dictionary. If `False`, returns a tuple. kwargs (*optional*): Dictionary of keyword arguments. """ model_type = 'owlvit' sub_configs = {'text_config': OwlViTTextConfig, 'vision_config': OwlViTVisionConfig} def __init__(self, text_config=None, vision_config=None, projection_dim=512, logit_scale_init_value=2.6592, return_dict=True, **kwargs): super().__init__(**kwargs) if text_config is None: text_config = {} logger.info('text_config is None. Initializing the OwlViTTextConfig with default values.') if vision_config is None: vision_config = {} logger.info('vision_config is None. initializing the OwlViTVisionConfig with default values.') self.text_config = OwlViTTextConfig(**text_config) self.vision_config = OwlViTVisionConfig(**vision_config) self.projection_dim = projection_dim self.logit_scale_init_value = logit_scale_init_value self.return_dict = return_dict self.initializer_factor = 1.0 @classmethod def from_text_vision_configs(cls, text_config: dict, vision_config: dict, **kwargs): """ Instantiate a [`OwlViTConfig`] (or a derived class) from owlvit text model configuration and owlvit vision model configuration. Returns: [`OwlViTConfig`]: An instance of a configuration object """ config_dict = {} config_dict['text_config'] = text_config config_dict['vision_config'] = vision_config return cls.from_dict(config_dict, **kwargs)
class OwlViTConfig(PretrainedConfig): ''' [`OwlViTConfig`] is the configuration class to store the configuration of an [`OwlViTModel`]. It is used to instantiate an OWL-ViT model according to the specified arguments, defining the text model and vision model configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the OWL-ViT [google/owlvit-base-patch32](https://huggingface.co/google/owlvit-base-patch32) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: text_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`OwlViTTextConfig`]. vision_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`OwlViTVisionConfig`]. projection_dim (`int`, *optional*, defaults to 512): Dimensionality of text and vision projection layers. logit_scale_init_value (`float`, *optional*, defaults to 2.6592): The initial value of the *logit_scale* parameter. Default is used as per the original OWL-ViT implementation. return_dict (`bool`, *optional*, defaults to `True`): Whether or not the model should return a dictionary. If `False`, returns a tuple. kwargs (*optional*): Dictionary of keyword arguments. ''' def __init__(self, text_config=None, vision_config=None, projection_dim=512, logit_scale_init_value=2.6592, return_dict=True, **kwargs): pass @classmethod def from_text_vision_configs(cls, text_config: dict, vision_config: dict, **kwargs): ''' Instantiate a [`OwlViTConfig`] (or a derived class) from owlvit text model configuration and owlvit vision model configuration. Returns: [`OwlViTConfig`]: An instance of a configuration object ''' pass
4
2
20
3
14
3
2
0.9
1
3
2
0
1
6
2
2
70
11
31
21
19
28
22
12
19
3
1
1
4
4,352
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/owlvit/configuration_owlvit.py
transformers.models.owlvit.configuration_owlvit.OwlViTOnnxConfig
from ...onnx import OnnxConfig from typing import TYPE_CHECKING, Any from collections.abc import Mapping from collections import OrderedDict class OwlViTOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict([('input_ids', {0: 'batch', 1: 'sequence'}), ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ('attention_mask', {0: 'batch', 1: 'sequence'})]) @property def outputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict([('logits_per_image', {0: 'batch'}), ('logits_per_text', {0: 'batch'}), ('text_embeds', {0: 'batch'}), ('image_embeds', {0: 'batch'})]) @property def atol_for_validation(self) -> float: return 0.0001 def generate_dummy_inputs(self, processor: 'ProcessorMixin', batch_size: int=-1, seq_length: int=-1) -> Mapping[str, Any]: text_input_dict = super().generate_dummy_inputs(processor.tokenizer, batch_size=batch_size, seq_length=seq_length) image_input_dict = super().generate_dummy_inputs(processor.image_processor, batch_size=batch_size) return {**text_input_dict, **image_input_dict} @property def default_onnx_opset(self) -> int: return 14
class OwlViTOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: pass @property def outputs(self) -> Mapping[str, Mapping[int, str]]: pass @property def atol_for_validation(self) -> float: pass def generate_dummy_inputs(self, processor: 'ProcessorMixin', batch_size: int=-1, seq_length: int=-1) -> Mapping[str, Any]: pass @property def default_onnx_opset(self) -> int: pass
10
0
7
0
7
0
1
0
1
6
0
0
5
0
5
5
44
4
40
18
24
0
13
8
7
1
1
0
5
4,353
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/owlvit/configuration_owlvit.py
transformers.models.owlvit.configuration_owlvit.OwlViTTextConfig
from ...configuration_utils import PretrainedConfig class OwlViTTextConfig(PretrainedConfig): """ This is the configuration class to store the configuration of an [`OwlViTTextModel`]. It is used to instantiate an OwlViT text encoder according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the OwlViT [google/owlvit-base-patch32](https://huggingface.co/google/owlvit-base-patch32) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 49408): Vocabulary size of the OWL-ViT text model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`OwlViTTextModel`]. hidden_size (`int`, *optional*, defaults to 512): Dimensionality of the encoder layers and the pooler layer. intermediate_size (`int`, *optional*, defaults to 2048): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. max_position_embeddings (`int`, *optional*, defaults to 16): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. initializer_factor (`float`, *optional*, defaults to 1.0): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). pad_token_id (`int`, *optional*, defaults to 0): The id of the padding token in the input sequences. bos_token_id (`int`, *optional*, defaults to 49406): The id of the beginning-of-sequence token in the input sequences. eos_token_id (`int`, *optional*, defaults to 49407): The id of the end-of-sequence token in the input sequences. Example: ```python >>> from transformers import OwlViTTextConfig, OwlViTTextModel >>> # Initializing a OwlViTTextModel with google/owlvit-base-patch32 style configuration >>> configuration = OwlViTTextConfig() >>> # Initializing a OwlViTTextConfig from the google/owlvit-base-patch32 style configuration >>> model = OwlViTTextModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'owlvit_text_model' base_config_key = 'text_config' def __init__(self, vocab_size=49408, hidden_size=512, intermediate_size=2048, num_hidden_layers=12, num_attention_heads=8, max_position_embeddings=16, hidden_act='quick_gelu', layer_norm_eps=1e-05, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, pad_token_id=0, bos_token_id=49406, eos_token_id=49407, **kwargs): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.max_position_embeddings = max_position_embeddings self.hidden_act = hidden_act self.layer_norm_eps = layer_norm_eps self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.initializer_factor = initializer_factor
class OwlViTTextConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of an [`OwlViTTextModel`]. It is used to instantiate an OwlViT text encoder according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the OwlViT [google/owlvit-base-patch32](https://huggingface.co/google/owlvit-base-patch32) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 49408): Vocabulary size of the OWL-ViT text model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`OwlViTTextModel`]. hidden_size (`int`, *optional*, defaults to 512): Dimensionality of the encoder layers and the pooler layer. intermediate_size (`int`, *optional*, defaults to 2048): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. max_position_embeddings (`int`, *optional*, defaults to 16): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. initializer_factor (`float`, *optional*, defaults to 1.0): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). pad_token_id (`int`, *optional*, defaults to 0): The id of the padding token in the input sequences. bos_token_id (`int`, *optional*, defaults to 49406): The id of the beginning-of-sequence token in the input sequences. eos_token_id (`int`, *optional*, defaults to 49407): The id of the end-of-sequence token in the input sequences. Example: ```python >>> from transformers import OwlViTTextConfig, OwlViTTextModel >>> # Initializing a OwlViTTextModel with google/owlvit-base-patch32 style configuration >>> configuration = OwlViTTextConfig() >>> # Initializing a OwlViTTextConfig from the google/owlvit-base-patch32 style configuration >>> model = OwlViTTextModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, vocab_size=49408, hidden_size=512, intermediate_size=2048, num_hidden_layers=12, num_attention_heads=8, max_position_embeddings=16, hidden_act='quick_gelu', layer_norm_eps=1e-05, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, pad_token_id=0, bos_token_id=49406, eos_token_id=49407, **kwargs): pass
2
1
31
1
30
0
1
1.52
1
1
0
0
1
11
1
1
94
11
33
32
14
50
16
15
14
1
1
0
1
4,354
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/owlvit/configuration_owlvit.py
transformers.models.owlvit.configuration_owlvit.OwlViTVisionConfig
from ...configuration_utils import PretrainedConfig class OwlViTVisionConfig(PretrainedConfig): """ This is the configuration class to store the configuration of an [`OwlViTVisionModel`]. It is used to instantiate an OWL-ViT image encoder according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the OWL-ViT [google/owlvit-base-patch32](https://huggingface.co/google/owlvit-base-patch32) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. num_channels (`int`, *optional*, defaults to 3): Number of channels in the input images. image_size (`int`, *optional*, defaults to 768): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 32): The size (resolution) of each patch. hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. initializer_factor (`float`, *optional*, defaults to 1.0): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). Example: ```python >>> from transformers import OwlViTVisionConfig, OwlViTVisionModel >>> # Initializing a OwlViTVisionModel with google/owlvit-base-patch32 style configuration >>> configuration = OwlViTVisionConfig() >>> # Initializing a OwlViTVisionModel model from the google/owlvit-base-patch32 style configuration >>> model = OwlViTVisionModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'owlvit_vision_model' base_config_key = 'vision_config' def __init__(self, hidden_size=768, intermediate_size=3072, num_hidden_layers=12, num_attention_heads=12, num_channels=3, image_size=768, patch_size=32, hidden_act='quick_gelu', layer_norm_eps=1e-05, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, **kwargs): super().__init__(**kwargs) self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_channels = num_channels self.image_size = image_size self.patch_size = patch_size self.hidden_act = hidden_act self.layer_norm_eps = layer_norm_eps self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.initializer_factor = initializer_factor
class OwlViTVisionConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of an [`OwlViTVisionModel`]. It is used to instantiate an OWL-ViT image encoder according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the OWL-ViT [google/owlvit-base-patch32](https://huggingface.co/google/owlvit-base-patch32) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. num_channels (`int`, *optional*, defaults to 3): Number of channels in the input images. image_size (`int`, *optional*, defaults to 768): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 32): The size (resolution) of each patch. hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. initializer_factor (`float`, *optional*, defaults to 1.0): A factor for initializing all weight matrices (should be kept to 1, used internally for initialization testing). Example: ```python >>> from transformers import OwlViTVisionConfig, OwlViTVisionModel >>> # Initializing a OwlViTVisionModel with google/owlvit-base-patch32 style configuration >>> configuration = OwlViTVisionConfig() >>> # Initializing a OwlViTVisionModel model from the google/owlvit-base-patch32 style configuration >>> model = OwlViTVisionModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, hidden_size=768, intermediate_size=3072, num_hidden_layers=12, num_attention_heads=12, num_channels=3, image_size=768, patch_size=32, hidden_act='quick_gelu', layer_norm_eps=1e-05, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, **kwargs): pass
2
1
30
1
29
0
1
1.38
1
1
0
0
1
12
1
1
86
10
32
31
15
44
17
16
15
1
1
0
1
4,355
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/owlvit/feature_extraction_owlvit.py
transformers.models.owlvit.feature_extraction_owlvit.OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor import warnings from ...utils.import_utils import requires @requires(backends=('vision',)) class OwlViTFeatureExtractor(OwlViTImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn('The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please use OwlViTImageProcessor instead.', FutureWarning) super().__init__(*args, **kwargs)
@requires(backends=('vision',)) class OwlViTFeatureExtractor(OwlViTImageProcessor): def __init__(self, *args, **kwargs) -> None: pass
3
0
7
0
7
0
1
0
1
2
0
0
1
0
1
29
8
0
8
2
6
0
4
2
2
1
4
0
1
4,356
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/owlvit/image_processing_owlvit.py
transformers.models.owlvit.image_processing_owlvit.OwlViTImageProcessor
import numpy as np from ...utils import TensorType, filter_out_non_signature_kwargs, is_torch_available, logging import warnings from ...utils.import_utils import requires from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from typing import TYPE_CHECKING, Optional, Union from ...image_transforms import center_crop, center_to_corners_format, rescale, resize, to_channel_dimension_format from ...image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, infer_channel_dimension_format, is_scaled_image, make_flat_list_of_images, to_numpy_array, valid_images, validate_preprocess_arguments @requires(backends=('vision',)) class OwlViTImageProcessor(BaseImageProcessor): """ Constructs an OWL-ViT image processor. This image processor inherits from [`ImageProcessingMixin`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the shorter edge of the input to a certain `size`. size (`dict[str, int]`, *optional*, defaults to {"height": 768, "width": 768}): The size to use for resizing the image. Only has an effect if `do_resize` is set to `True`. If `size` is a sequence like (h, w), output size will be matched to this. If `size` is an int, then image will be resized to (size, size). resample (`int`, *optional*, defaults to `Resampling.BICUBIC`): An optional resampling filter. This can be one of `PIL.Image.Resampling.NEAREST`, `PIL.Image.Resampling.BOX`, `PIL.Image.Resampling.BILINEAR`, `PIL.Image.Resampling.HAMMING`, `PIL.Image.Resampling.BICUBIC` or `PIL.Image.Resampling.LANCZOS`. Only has an effect if `do_resize` is set to `True`. do_center_crop (`bool`, *optional*, defaults to `False`): Whether to crop the input at the center. If the input size is smaller than `crop_size` along any edge, the image is padded with 0's and then center cropped. crop_size (`int`, *optional*, defaults to {"height": 768, "width": 768}): The size to use for center cropping the image. Only has an effect if `do_center_crop` is set to `True`. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the input by a certain factor. rescale_factor (`float`, *optional*, defaults to `1/255`): The factor to use for rescaling the image. Only has an effect if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `True`): Whether or not to normalize the input with `image_mean` and `image_std`. Desired output size when applying center-cropping. Only has an effect if `do_center_crop` is set to `True`. image_mean (`list[int]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`): The sequence of means for each channel, to be used when normalizing images. image_std (`list[int]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`): The sequence of standard deviations for each channel, to be used when normalizing images. """ model_input_names = ['pixel_values'] def __init__(self, do_resize=True, size=None, resample=PILImageResampling.BICUBIC, do_center_crop=False, crop_size=None, do_rescale=True, rescale_factor=1 / 255, do_normalize=True, image_mean=None, image_std=None, **kwargs): size = size if size is not None else {'height': 768, 'width': 768} size = get_size_dict(size, default_to_square=True) crop_size = crop_size if crop_size is not None else {'height': 768, 'width': 768} crop_size = get_size_dict(crop_size, default_to_square=True) if 'rescale' in kwargs: rescale_val = kwargs.pop('rescale') kwargs['do_rescale'] = rescale_val super().__init__(**kwargs) self.do_resize = do_resize self.size = size self.resample = resample self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: """ Resize an image to a certain size. Args: image (`np.ndarray`): Image to resize. size (`dict[str, int]`): The size to resize the image to. Must contain height and width keys. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): The resampling filter to use when resizing the input. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ size = get_size_dict(size, default_to_square=True) if 'height' not in size or 'width' not in size: raise ValueError('size dictionary must contain height and width keys') return resize(image, (size['height'], size['width']), resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs) def center_crop(self, image: np.ndarray, crop_size: dict[str, int], data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: """ Center crop an image to a certain size. Args: image (`np.ndarray`): Image to center crop. crop_size (`dict[str, int]`): The size to center crop the image to. Must contain height and width keys. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ crop_size = get_size_dict(crop_size, default_to_square=True) if 'height' not in crop_size or 'width' not in crop_size: raise ValueError('crop_size dictionary must contain height and width keys') return center_crop(image, (crop_size['height'], crop_size['width']), data_format=data_format, input_data_format=input_data_format, **kwargs) def rescale(self, image: np.ndarray, rescale_factor: float, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray: """ Rescale the image by the given factor. image = image * rescale_factor. Args: image (`np.ndarray`): Image to rescale. rescale_factor (`float`): The value to use for rescaling. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the input image. If unset, is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. """ return rescale(image, rescale_factor, data_format=data_format, input_data_format=input_data_format) @filter_out_non_signature_kwargs() def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_center_crop: Optional[bool]=None, crop_size: Optional[dict[str, int]]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, return_tensors: Optional[Union[TensorType, str]]=None, data_format: Union[str, ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> BatchFeature: """ Prepares an image or batch of images for the model. Args: images (`ImageInput`): The image or batch of images to be prepared. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether or not to resize the input. If `True`, will resize the input to the size specified by `size`. size (`dict[str, int]`, *optional*, defaults to `self.size`): The size to resize the input to. Only has an effect if `do_resize` is set to `True`. resample (`PILImageResampling`, *optional*, defaults to `self.resample`): The resampling filter to use when resizing the input. Only has an effect if `do_resize` is set to `True`. do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`): Whether or not to center crop the input. If `True`, will center crop the input to the size specified by `crop_size`. crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`): The size to center crop the input to. Only has an effect if `do_center_crop` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether or not to rescale the input. If `True`, will rescale the input by dividing it by `rescale_factor`. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): The factor to rescale the input by. Only has an effect if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether or not to normalize the input. If `True`, will normalize the input by subtracting `image_mean` and dividing by `image_std`. image_mean (`Union[float, list[float]]`, *optional*, defaults to `self.image_mean`): The mean to subtract from the input when normalizing. Only has an effect if `do_normalize` is set to `True`. image_std (`Union[float, list[float]]`, *optional*, defaults to `self.image_std`): The standard deviation to divide the input by when normalizing. Only has an effect if `do_normalize` is set to `True`. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: defaults to the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size resample = resample if resample is not None else self.resample do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop crop_size = crop_size if crop_size is not None else self.crop_size do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std images = make_flat_list_of_images(images) if not valid_images(images): raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor') validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_center_crop=do_center_crop, crop_size=crop_size, do_resize=do_resize, size=size, resample=resample) images = [to_numpy_array(image) for image in images] if do_rescale and is_scaled_image(images[0]): logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.') if input_data_format is None: input_data_format = infer_channel_dimension_format(images[0]) if do_resize: images = [self.resize(image, size=size, resample=resample, input_data_format=input_data_format) for image in images] if do_center_crop: images = [self.center_crop(image, crop_size=crop_size, input_data_format=input_data_format) for image in images] if do_rescale: images = [self.rescale(image, rescale_factor=rescale_factor, input_data_format=input_data_format) for image in images] if do_normalize: images = [self.normalize(image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images] images = [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images] encoded_inputs = BatchFeature(data={'pixel_values': images}, tensor_type=return_tensors) return encoded_inputs def post_process(self, outputs, target_sizes): """ Converts the raw output of [`OwlViTForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. Args: outputs ([`OwlViTObjectDetectionOutput`]): Raw outputs of the model. target_sizes (`torch.Tensor` of shape `(batch_size, 2)`): Tensor containing the size (h, w) of each image of the batch. For evaluation, this must be the original image size (before any data augmentation). For visualization, this should be the image size after data augment, but before padding. Returns: `list[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model. """ warnings.warn('`post_process` is deprecated and will be removed in v5 of Transformers, please use `post_process_object_detection` instead, with `threshold=0.` for equivalent results.', FutureWarning) logits, boxes = (outputs.logits, outputs.pred_boxes) if len(logits) != len(target_sizes): raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits') if target_sizes.shape[1] != 2: raise ValueError('Each element of target_sizes must contain the size (h, w) of each image of the batch') probs = torch.max(logits, dim=-1) scores = torch.sigmoid(probs.values) labels = probs.indices boxes = center_to_corners_format(boxes) img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device) boxes = boxes * scale_fct[:, None, :] results = [{'scores': s, 'labels': l, 'boxes': b} for s, l, b in zip(scores, labels, boxes)] return results def post_process_object_detection(self, outputs: 'OwlViTObjectDetectionOutput', threshold: float=0.1, target_sizes: Optional[Union[TensorType, list[tuple]]]=None): """ Converts the raw output of [`OwlViTForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. Args: outputs ([`OwlViTObjectDetectionOutput`]): Raw outputs of the model. threshold (`float`, *optional*, defaults to 0.1): Score threshold to keep object detection predictions. target_sizes (`torch.Tensor` or `list[tuple[int, int]]`, *optional*): Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size `(height, width)` of each image in the batch. If unset, predictions will not be resized. Returns: `list[Dict]`: A list of dictionaries, each dictionary containing the following keys: - "scores": The confidence scores for each predicted box on the image. - "labels": Indexes of the classes predicted by the model on the image. - "boxes": Image bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. """ batch_logits, batch_boxes = (outputs.logits, outputs.pred_boxes) batch_size = len(batch_logits) if target_sizes is not None and len(target_sizes) != batch_size: raise ValueError('Make sure that you pass in as many target sizes as images') batch_class_logits = torch.max(batch_logits, dim=-1) batch_scores = torch.sigmoid(batch_class_logits.values) batch_labels = batch_class_logits.indices batch_boxes = center_to_corners_format(batch_boxes) if target_sizes is not None: batch_boxes = _scale_boxes(batch_boxes, target_sizes) results = [] for scores, labels, boxes in zip(batch_scores, batch_labels, batch_boxes): keep = scores > threshold scores = scores[keep] labels = labels[keep] boxes = boxes[keep] results.append({'scores': scores, 'labels': labels, 'boxes': boxes}) return results def post_process_image_guided_detection(self, outputs, threshold=0.0, nms_threshold=0.3, target_sizes=None): """ Converts the output of [`OwlViTForObjectDetection.image_guided_detection`] into the format expected by the COCO api. Args: outputs ([`OwlViTImageGuidedObjectDetectionOutput`]): Raw outputs of the model. threshold (`float`, *optional*, defaults to 0.0): Minimum confidence threshold to use to filter out predicted boxes. nms_threshold (`float`, *optional*, defaults to 0.3): IoU threshold for non-maximum suppression of overlapping boxes. target_sizes (`torch.Tensor`, *optional*): Tensor of shape (batch_size, 2) where each entry is the (height, width) of the corresponding image in the batch. If set, predicted normalized bounding boxes are rescaled to the target sizes. If left to None, predictions will not be unnormalized. Returns: `list[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model. All labels are set to None as `OwlViTForObjectDetection.image_guided_detection` perform one-shot object detection. """ logits, target_boxes = (outputs.logits, outputs.target_pred_boxes) if target_sizes is not None and len(logits) != len(target_sizes): raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits') if target_sizes is not None and target_sizes.shape[1] != 2: raise ValueError('Each element of target_sizes must contain the size (h, w) of each image of the batch') probs = torch.max(logits, dim=-1) scores = torch.sigmoid(probs.values) target_boxes = center_to_corners_format(target_boxes) if nms_threshold < 1.0: for idx in range(target_boxes.shape[0]): for i in torch.argsort(-scores[idx]): if not scores[idx][i]: continue ious = box_iou(target_boxes[idx][i, :].unsqueeze(0), target_boxes[idx])[0][0] ious[i] = -1.0 scores[idx][ious > nms_threshold] = 0.0 if target_sizes is not None: target_boxes = _scale_boxes(target_boxes, target_sizes) results = [] alphas = torch.zeros_like(scores) for idx in range(target_boxes.shape[0]): query_scores = scores[idx] if not query_scores.nonzero().numel(): continue query_scores[query_scores < threshold] = 0.0 max_score = torch.max(query_scores) + 1e-06 query_alphas = (query_scores - max_score * 0.1) / (max_score * 0.9) query_alphas = torch.clip(query_alphas, 0.0, 1.0) alphas[idx] = query_alphas mask = alphas[idx] > 0 box_scores = alphas[idx][mask] boxes = target_boxes[idx][mask] results.append({'scores': box_scores, 'labels': None, 'boxes': boxes}) return results
@requires(backends=('vision',)) class OwlViTImageProcessor(BaseImageProcessor): ''' Constructs an OWL-ViT image processor. This image processor inherits from [`ImageProcessingMixin`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the shorter edge of the input to a certain `size`. size (`dict[str, int]`, *optional*, defaults to {"height": 768, "width": 768}): The size to use for resizing the image. Only has an effect if `do_resize` is set to `True`. If `size` is a sequence like (h, w), output size will be matched to this. If `size` is an int, then image will be resized to (size, size). resample (`int`, *optional*, defaults to `Resampling.BICUBIC`): An optional resampling filter. This can be one of `PIL.Image.Resampling.NEAREST`, `PIL.Image.Resampling.BOX`, `PIL.Image.Resampling.BILINEAR`, `PIL.Image.Resampling.HAMMING`, `PIL.Image.Resampling.BICUBIC` or `PIL.Image.Resampling.LANCZOS`. Only has an effect if `do_resize` is set to `True`. do_center_crop (`bool`, *optional*, defaults to `False`): Whether to crop the input at the center. If the input size is smaller than `crop_size` along any edge, the image is padded with 0's and then center cropped. crop_size (`int`, *optional*, defaults to {"height": 768, "width": 768}): The size to use for center cropping the image. Only has an effect if `do_center_crop` is set to `True`. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the input by a certain factor. rescale_factor (`float`, *optional*, defaults to `1/255`): The factor to use for rescaling the image. Only has an effect if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `True`): Whether or not to normalize the input with `image_mean` and `image_std`. Desired output size when applying center-cropping. Only has an effect if `do_center_crop` is set to `True`. image_mean (`list[int]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`): The sequence of means for each channel, to be used when normalizing images. image_std (`list[int]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`): The sequence of standard deviations for each channel, to be used when normalizing images. ''' def __init__(self, do_resize=True, size=None, resample=PILImageResampling.BICUBIC, do_center_crop=False, crop_size=None, do_rescale=True, rescale_factor=1 / 255, do_normalize=True, image_mean=None, image_std=None, **kwargs): pass def resize(self, image: np.ndarray, size: dict[str, int], resample: PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: ''' Resize an image to a certain size. Args: image (`np.ndarray`): Image to resize. size (`dict[str, int]`): The size to resize the image to. Must contain height and width keys. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): The resampling filter to use when resizing the input. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. ''' pass def center_crop(self, image: np.ndarray, crop_size: dict[str, int], data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray: ''' Center crop an image to a certain size. Args: image (`np.ndarray`): Image to center crop. crop_size (`dict[str, int]`): The size to center crop the image to. Must contain height and width keys. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. ''' pass def rescale(self, image: np.ndarray, rescale_factor: float, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray: ''' Rescale the image by the given factor. image = image * rescale_factor. Args: image (`np.ndarray`): Image to rescale. rescale_factor (`float`): The value to use for rescaling. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the input image. If unset, is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. ''' pass @filter_out_non_signature_kwargs() def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_center_crop: Optional[bool]=None, crop_size: Optional[dict[str, int]]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, list[float]]]=None, image_std: Optional[Union[float, list[float]]]=None, return_tensors: Optional[Union[TensorType, str]]=None, data_format: Union[str, ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> BatchFeature: ''' Prepares an image or batch of images for the model. Args: images (`ImageInput`): The image or batch of images to be prepared. Expects a single or batch of images with pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether or not to resize the input. If `True`, will resize the input to the size specified by `size`. size (`dict[str, int]`, *optional*, defaults to `self.size`): The size to resize the input to. Only has an effect if `do_resize` is set to `True`. resample (`PILImageResampling`, *optional*, defaults to `self.resample`): The resampling filter to use when resizing the input. Only has an effect if `do_resize` is set to `True`. do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`): Whether or not to center crop the input. If `True`, will center crop the input to the size specified by `crop_size`. crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`): The size to center crop the input to. Only has an effect if `do_center_crop` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether or not to rescale the input. If `True`, will rescale the input by dividing it by `rescale_factor`. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): The factor to rescale the input by. Only has an effect if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether or not to normalize the input. If `True`, will normalize the input by subtracting `image_mean` and dividing by `image_std`. image_mean (`Union[float, list[float]]`, *optional*, defaults to `self.image_mean`): The mean to subtract from the input when normalizing. Only has an effect if `do_normalize` is set to `True`. image_std (`Union[float, list[float]]`, *optional*, defaults to `self.image_std`): The standard deviation to divide the input by when normalizing. Only has an effect if `do_normalize` is set to `True`. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: defaults to the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. ''' pass def post_process(self, outputs, target_sizes): ''' Converts the raw output of [`OwlViTForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. Args: outputs ([`OwlViTObjectDetectionOutput`]): Raw outputs of the model. target_sizes (`torch.Tensor` of shape `(batch_size, 2)`): Tensor containing the size (h, w) of each image of the batch. For evaluation, this must be the original image size (before any data augmentation). For visualization, this should be the image size after data augment, but before padding. Returns: `list[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model. ''' pass def post_process_object_detection(self, outputs: 'OwlViTObjectDetectionOutput', threshold: float=0.1, target_sizes: Optional[Union[TensorType, list[tuple]]]=None): ''' Converts the raw output of [`OwlViTForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. Args: outputs ([`OwlViTObjectDetectionOutput`]): Raw outputs of the model. threshold (`float`, *optional*, defaults to 0.1): Score threshold to keep object detection predictions. target_sizes (`torch.Tensor` or `list[tuple[int, int]]`, *optional*): Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size `(height, width)` of each image in the batch. If unset, predictions will not be resized. Returns: `list[Dict]`: A list of dictionaries, each dictionary containing the following keys: - "scores": The confidence scores for each predicted box on the image. - "labels": Indexes of the classes predicted by the model on the image. - "boxes": Image bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. ''' pass def post_process_image_guided_detection(self, outputs, threshold=0.0, nms_threshold=0.3, target_sizes=None): ''' Converts the output of [`OwlViTForObjectDetection.image_guided_detection`] into the format expected by the COCO api. Args: outputs ([`OwlViTImageGuidedObjectDetectionOutput`]): Raw outputs of the model. threshold (`float`, *optional*, defaults to 0.0): Minimum confidence threshold to use to filter out predicted boxes. nms_threshold (`float`, *optional*, defaults to 0.3): IoU threshold for non-maximum suppression of overlapping boxes. target_sizes (`torch.Tensor`, *optional*): Tensor of shape (batch_size, 2) where each entry is the (height, width) of the corresponding image in the batch. If set, predicted normalized bounding boxes are rescaled to the target sizes. If left to None, predictions will not be unnormalized. Returns: `list[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model. All labels are set to None as `OwlViTForObjectDetection.image_guided_detection` perform one-shot object detection. ''' pass
11
8
56
6
29
21
6
0.85
1
11
2
1
8
10
8
28
498
61
237
107
172
201
134
51
125
18
3
4
46
4,357
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/owlvit/modeling_owlvit.py
transformers.models.owlvit.modeling_owlvit.OwlViTAttention
from typing import Any, Optional, Union import torch from torch import Tensor, nn class OwlViTAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads}).') self.scale = self.head_dim ** (-0.5) self.dropout = config.attention_dropout self.k_proj = nn.Linear(self.embed_dim, self.embed_dim) self.v_proj = nn.Linear(self.embed_dim, self.embed_dim) self.q_proj = nn.Linear(self.embed_dim, self.embed_dim) self.out_proj = nn.Linear(self.embed_dim, self.embed_dim) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" bsz, tgt_len, embed_dim = hidden_states.size() query_states = self.q_proj(hidden_states) * self.scale key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError(f'Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}') if causal_attention_mask is not None: if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError(f'Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {causal_attention_mask.size()}') attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError(f'Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}') attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if output_attentions: attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_probs = attn_probs.to(value_states.dtype) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError(f'`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}') attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, tgt_len, embed_dim) attn_output = self.out_proj(attn_output) return (attn_output, attn_weights_reshaped)
class OwlViTAttention(nn.Module): '''Multi-headed attention from 'Attention Is All You Need' paper''' def __init__(self, config): pass def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): pass def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: '''Input shape: Batch x Time x Channel''' pass
4
2
33
6
25
3
4
0.12
1
5
0
0
3
10
3
13
105
20
76
30
66
9
55
24
51
8
1
2
11
4,358
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/owlvit/modeling_owlvit.py
transformers.models.owlvit.modeling_owlvit.OwlViTBoxPredictionHead
from torch import Tensor, nn from .configuration_owlvit import OwlViTConfig, OwlViTTextConfig, OwlViTVisionConfig import torch class OwlViTBoxPredictionHead(nn.Module): def __init__(self, config: OwlViTConfig, out_dim: int=4): super().__init__() width = config.vision_config.hidden_size self.dense0 = nn.Linear(width, width) self.dense1 = nn.Linear(width, width) self.gelu = nn.GELU() self.dense2 = nn.Linear(width, out_dim) def forward(self, image_features: torch.Tensor) -> torch.FloatTensor: output = self.dense0(image_features) output = self.gelu(output) output = self.dense1(output) output = self.gelu(output) output = self.dense2(output) return output
class OwlViTBoxPredictionHead(nn.Module): def __init__(self, config: OwlViTConfig, out_dim: int=4): pass def forward(self, image_features: torch.Tensor) -> torch.FloatTensor: pass
3
0
8
1
7
0
1
0
1
4
1
0
2
4
2
12
17
2
15
9
12
0
15
9
12
1
1
0
2
4,359
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/owlvit/modeling_owlvit.py
transformers.models.owlvit.modeling_owlvit.OwlViTClassPredictionHead
from .configuration_owlvit import OwlViTConfig, OwlViTTextConfig, OwlViTVisionConfig import torch from torch import Tensor, nn from typing import Any, Optional, Union class OwlViTClassPredictionHead(nn.Module): def __init__(self, config: OwlViTConfig): super().__init__() out_dim = config.text_config.hidden_size self.query_dim = config.vision_config.hidden_size self.dense0 = nn.Linear(self.query_dim, out_dim) self.logit_shift = nn.Linear(self.query_dim, 1) self.logit_scale = nn.Linear(self.query_dim, 1) self.elu = nn.ELU() def forward(self, image_embeds: torch.FloatTensor, query_embeds: Optional[torch.FloatTensor], query_mask: Optional[torch.Tensor]) -> tuple[torch.FloatTensor]: image_class_embeds = self.dense0(image_embeds) if query_embeds is None: device = image_class_embeds.device batch_size, num_patches = image_class_embeds.shape[:2] pred_logits = torch.zeros((batch_size, num_patches, self.query_dim)).to(device) return (pred_logits, image_class_embeds) image_class_embeds = image_class_embeds / (torch.linalg.norm(image_class_embeds, dim=-1, keepdim=True) + 1e-06) query_embeds = query_embeds / (torch.linalg.norm(query_embeds, dim=-1, keepdim=True) + 1e-06) pred_logits = torch.einsum('...pd,...qd->...pq', image_class_embeds, query_embeds) logit_shift = self.logit_shift(image_embeds) logit_scale = self.logit_scale(image_embeds) logit_scale = self.elu(logit_scale) + 1 pred_logits = (pred_logits + logit_shift) * logit_scale if query_mask is not None: if query_mask.ndim > 1: query_mask = torch.unsqueeze(query_mask, dim=-2) pred_logits = torch.where(query_mask == 0, torch.finfo(pred_logits.dtype).min, pred_logits) pred_logits = pred_logits.to(torch.float32) return (pred_logits, image_class_embeds)
class OwlViTClassPredictionHead(nn.Module): def __init__(self, config: OwlViTConfig): pass def forward(self, image_embeds: torch.FloatTensor, query_embeds: Optional[torch.FloatTensor], query_mask: Optional[torch.Tensor]) -> tuple[torch.FloatTensor]: pass
3
0
22
4
17
2
3
0.09
1
3
1
0
2
5
2
12
46
9
34
20
26
3
29
15
26
4
1
2
5
4,360
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/owlvit/modeling_owlvit.py
transformers.models.owlvit.modeling_owlvit.OwlViTEncoder
from .configuration_owlvit import OwlViTConfig, OwlViTTextConfig, OwlViTVisionConfig from typing import Any, Optional, Union import torch from torch import Tensor, nn from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling class OwlViTEncoder(nn.Module): """ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a [`OwlViTEncoderLayer`]. Args: config: OwlViTConfig """ def __init__(self, config: OwlViTConfig): super().__init__() self.layers = nn.ModuleList([OwlViTEncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutput]: """ Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`). attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Causal mask for the text model. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for encoder_layer in self.layers: if output_hidden_states: encoder_states = encoder_states + (hidden_states,) layer_outputs = encoder_layer(hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)
class OwlViTEncoder(nn.Module): ''' Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a [`OwlViTEncoderLayer`]. Args: config: OwlViTConfig ''' def __init__(self, config: OwlViTConfig): pass def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutput]: ''' Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`). attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Causal mask for the text model. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' pass
3
2
39
3
25
11
7
0.56
1
8
3
0
2
2
2
12
87
9
50
18
39
28
26
10
23
12
1
2
13
4,361
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/owlvit/modeling_owlvit.py
transformers.models.owlvit.modeling_owlvit.OwlViTEncoderLayer
from torch import Tensor, nn import torch from ...modeling_layers import GradientCheckpointingLayer from typing import Any, Optional, Union from .configuration_owlvit import OwlViTConfig, OwlViTTextConfig, OwlViTVisionConfig class OwlViTEncoderLayer(GradientCheckpointingLayer): def __init__(self, config: OwlViTConfig): super().__init__() self.embed_dim = config.hidden_size self.self_attn = OwlViTAttention(config) self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.mlp = OwlViTMLP(config) self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, causal_attention_mask: torch.Tensor, output_attentions: Optional[bool]=False) -> tuple[torch.FloatTensor]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. `(config.encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.layer_norm1(hidden_states) hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs
class OwlViTEncoderLayer(GradientCheckpointingLayer): def __init__(self, config: OwlViTConfig): pass def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, causal_attention_mask: torch.Tensor, output_attentions: Optional[bool]=False) -> tuple[torch.FloatTensor]: ''' Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. `(config.encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. ''' pass
3
1
23
3
16
5
2
0.31
1
6
3
0
2
5
2
12
48
6
32
17
23
10
21
11
18
2
1
1
3
4,362
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/owlvit/modeling_owlvit.py
transformers.models.owlvit.modeling_owlvit.OwlViTForObjectDetection
from .configuration_owlvit import OwlViTConfig, OwlViTTextConfig, OwlViTVisionConfig from ...utils import ModelOutput, auto_docstring, filter_out_non_signature_kwargs, is_vision_available, logging, torch_int from typing import Any, Optional, Union import torch from torch import Tensor, nn from functools import lru_cache class OwlViTForObjectDetection(OwlViTPreTrainedModel): config: OwlViTConfig def __init__(self, config: OwlViTConfig): super().__init__(config) self.owlvit = OwlViTModel(config) self.class_head = OwlViTClassPredictionHead(config) self.box_head = OwlViTBoxPredictionHead(config) self.layer_norm = nn.LayerNorm(config.vision_config.hidden_size, eps=config.vision_config.layer_norm_eps) self.sigmoid = nn.Sigmoid() self.config = config self.num_patches_height = self.config.vision_config.image_size // self.config.vision_config.patch_size self.num_patches_width = self.config.vision_config.image_size // self.config.vision_config.patch_size self.box_bias = self.compute_box_bias(self.num_patches_height, self.num_patches_width) @staticmethod def normalize_grid_corner_coordinates(num_patches_height: int, num_patches_width: int) -> torch.Tensor: x_coordinates = torch.arange(1, num_patches_width + 1, dtype=torch.float32) y_coordinates = torch.arange(1, num_patches_height + 1, dtype=torch.float32) xx, yy = torch.meshgrid(x_coordinates, y_coordinates, indexing='xy') box_coordinates = torch.stack((xx, yy), dim=-1) box_coordinates[..., 0] /= num_patches_width box_coordinates[..., 1] /= num_patches_height box_coordinates = box_coordinates.view(-1, 2) return box_coordinates @lru_cache(maxsize=2) def compute_box_bias(self, num_patches_height: int, num_patches_width: int, feature_map: Optional[torch.FloatTensor]=None) -> torch.Tensor: if feature_map is not None: raise ValueError('feature_map has been deprecated as an input. Please pass in num_patches instead') box_coordinates = self.normalize_grid_corner_coordinates(num_patches_height, num_patches_width) box_coordinates = torch.clip(box_coordinates, 0.0, 1.0) box_coord_bias = torch.log(box_coordinates + 0.0001) - torch.log1p(-box_coordinates + 0.0001) box_size = torch.full_like(box_coord_bias, 1.0) box_size[..., 0] /= num_patches_width box_size[..., 1] /= num_patches_height box_size_bias = torch.log(box_size + 0.0001) - torch.log1p(-box_size + 0.0001) box_bias = torch.cat([box_coord_bias, box_size_bias], dim=-1) return box_bias def box_predictor(self, image_feats: torch.FloatTensor, feature_map: torch.FloatTensor, interpolate_pos_encoding: bool=False) -> torch.FloatTensor: """ Args: image_feats: Features extracted from the image, returned by the `image_text_embedder` method. feature_map: A spatial re-arrangement of image_features, also returned by the `image_text_embedder` method. interpolate_pos_encoding: Whether to interpolate the pre-trained position encodings. Returns: pred_boxes: List of predicted boxes (cxcywh normalized to 0, 1) nested within a dictionary. """ pred_boxes = self.box_head(image_feats) if interpolate_pos_encoding: _, num_patches_height, num_patches_width, _ = feature_map.shape box_bias = self.compute_box_bias(num_patches_height, num_patches_width) else: box_bias = self.box_bias box_bias = box_bias.to(feature_map.device) pred_boxes += box_bias pred_boxes = self.sigmoid(pred_boxes) return pred_boxes def class_predictor(self, image_feats: torch.FloatTensor, query_embeds: Optional[torch.FloatTensor]=None, query_mask: Optional[torch.Tensor]=None) -> tuple[torch.FloatTensor]: """ Args: image_feats: Features extracted from the `image_text_embedder`. query_embeds: Text query embeddings. query_mask: Must be provided with query_embeddings. A mask indicating which query embeddings are valid. """ pred_logits, image_class_embeds = self.class_head(image_feats, query_embeds, query_mask) return (pred_logits, image_class_embeds) def image_text_embedder(self, input_ids: torch.Tensor, pixel_values: torch.FloatTensor, attention_mask: torch.Tensor, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False) -> tuple[torch.FloatTensor]: outputs = self.owlvit(pixel_values=pixel_values, input_ids=input_ids, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=True) if interpolate_pos_encoding: _, _, height, width = pixel_values.shape num_patches_height = height // self.config.vision_config.patch_size num_patches_width = width // self.config.vision_config.patch_size else: num_patches_height = self.num_patches_height num_patches_width = self.num_patches_width last_hidden_state = outputs.vision_model_output[0] image_embeds = self.owlvit.vision_model.post_layernorm(last_hidden_state) class_token_out = torch.broadcast_to(image_embeds[:, :1, :], image_embeds[:, :-1].shape) image_embeds = image_embeds[:, 1:, :] * class_token_out image_embeds = self.layer_norm(image_embeds) new_size = (image_embeds.shape[0], num_patches_height, num_patches_width, image_embeds.shape[-1]) image_embeds = image_embeds.reshape(new_size) text_embeds = outputs[-4] return (text_embeds, image_embeds, outputs) def image_embedder(self, pixel_values: torch.FloatTensor, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False) -> tuple[torch.FloatTensor]: vision_outputs = self.owlvit.vision_model(pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=True) if interpolate_pos_encoding: _, _, height, width = pixel_values.shape num_patches_height = height // self.config.vision_config.patch_size num_patches_width = width // self.config.vision_config.patch_size else: num_patches_height = self.num_patches_height num_patches_width = self.num_patches_width last_hidden_state = vision_outputs[0] image_embeds = self.owlvit.vision_model.post_layernorm(last_hidden_state) class_token_out = torch.broadcast_to(image_embeds[:, :1, :], image_embeds[:, :-1].shape) image_embeds = image_embeds[:, 1:, :] * class_token_out image_embeds = self.layer_norm(image_embeds) new_size = (image_embeds.shape[0], num_patches_height, num_patches_width, image_embeds.shape[-1]) image_embeds = image_embeds.reshape(new_size) return (image_embeds, vision_outputs) def embed_image_query(self, query_image_features: torch.FloatTensor, query_feature_map: torch.FloatTensor, interpolate_pos_encoding: bool=False) -> torch.FloatTensor: _, class_embeds = self.class_predictor(query_image_features) pred_boxes = self.box_predictor(query_image_features, query_feature_map, interpolate_pos_encoding) pred_boxes_as_corners = center_to_corners_format(pred_boxes) best_class_embeds = [] best_box_indices = [] pred_boxes_device = pred_boxes_as_corners.device for i in range(query_image_features.shape[0]): each_query_box = torch.tensor([[0, 0, 1, 1]], device=pred_boxes_device) each_query_pred_boxes = pred_boxes_as_corners[i] ious, _ = box_iou(each_query_box, each_query_pred_boxes) if torch.all(ious[0] == 0.0): ious = generalized_box_iou(each_query_box, each_query_pred_boxes) iou_threshold = torch.max(ious) * 0.8 selected_inds = (ious[0] >= iou_threshold).nonzero() if selected_inds.numel(): selected_embeddings = class_embeds[i][selected_inds.squeeze(1)] mean_embeds = torch.mean(class_embeds[i], axis=0) mean_sim = torch.einsum('d,id->i', mean_embeds, selected_embeddings) best_box_ind = selected_inds[torch.argmin(mean_sim)] best_class_embeds.append(class_embeds[i][best_box_ind]) best_box_indices.append(best_box_ind) if best_class_embeds: query_embeds = torch.stack(best_class_embeds) box_indices = torch.stack(best_box_indices) else: query_embeds, box_indices = (None, None) return (query_embeds, box_indices, pred_boxes) @auto_docstring def image_guided_detection(self, pixel_values: torch.FloatTensor, query_pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> OwlViTImageGuidedObjectDetectionOutput: """ query_pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values of query image(s) to be detected. Pass in one query image per target image. Examples: ```python >>> import requests >>> from PIL import Image >>> import torch >>> from transformers import AutoProcessor, OwlViTForObjectDetection >>> processor = AutoProcessor.from_pretrained("google/owlvit-base-patch16") >>> model = OwlViTForObjectDetection.from_pretrained("google/owlvit-base-patch16") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> query_url = "http://images.cocodataset.org/val2017/000000001675.jpg" >>> query_image = Image.open(requests.get(query_url, stream=True).raw) >>> inputs = processor(images=image, query_images=query_image, return_tensors="pt") >>> with torch.no_grad(): ... outputs = model.image_guided_detection(**inputs) >>> # Target image sizes (height, width) to rescale box predictions [batch_size, 2] >>> target_sizes = torch.Tensor([image.size[::-1]]) >>> # Convert outputs (bounding boxes and class logits) to Pascal VOC format (xmin, ymin, xmax, ymax) >>> results = processor.post_process_image_guided_detection( ... outputs=outputs, threshold=0.6, nms_threshold=0.3, target_sizes=target_sizes ... ) >>> i = 0 # Retrieve predictions for the first image >>> boxes, scores = results[i]["boxes"], results[i]["scores"] >>> for box, score in zip(boxes, scores): ... box = [round(i, 2) for i in box.tolist()] ... print(f"Detected similar object with confidence {round(score.item(), 3)} at location {box}") Detected similar object with confidence 0.856 at location [10.94, 50.4, 315.8, 471.39] Detected similar object with confidence 1.0 at location [334.84, 25.33, 636.16, 374.71] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.return_dict query_feature_map = self.image_embedder(pixel_values=query_pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)[0] feature_map, vision_outputs = self.image_embedder(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding) batch_size, num_patches_height, num_patches_width, hidden_dim = feature_map.shape image_feats = torch.reshape(feature_map, (batch_size, num_patches_height * num_patches_width, hidden_dim)) batch_size, num_patches_height, num_patches_width, hidden_dim = query_feature_map.shape query_image_feats = torch.reshape(query_feature_map, (batch_size, num_patches_height * num_patches_width, hidden_dim)) query_embeds, best_box_indices, query_pred_boxes = self.embed_image_query(query_image_feats, query_feature_map, interpolate_pos_encoding) pred_logits, class_embeds = self.class_predictor(image_feats=image_feats, query_embeds=query_embeds) target_pred_boxes = self.box_predictor(image_feats, feature_map, interpolate_pos_encoding) if not return_dict: output = (feature_map, query_feature_map, target_pred_boxes, query_pred_boxes, pred_logits, class_embeds, vision_outputs.to_tuple()) output = tuple((x for x in output if x is not None)) return output return OwlViTImageGuidedObjectDetectionOutput(image_embeds=feature_map, query_image_embeds=query_feature_map, target_pred_boxes=target_pred_boxes, query_pred_boxes=query_pred_boxes, logits=pred_logits, class_embeds=class_embeds, text_model_output=None, vision_model_output=vision_outputs) @auto_docstring def forward(self, input_ids: torch.Tensor, pixel_values: torch.FloatTensor, attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> OwlViTObjectDetectionOutput: """ input_ids (`torch.LongTensor` of shape `(batch_size * num_max_text_queries, sequence_length)`, *optional*): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids). output_hidden_states (`bool`, *optional*): Whether or not to return the last hidden state. See `text_model_last_hidden_state` and `vision_model_last_hidden_state` under returned tensors for more detail. Examples: ```python >>> import requests >>> from PIL import Image >>> import torch >>> from transformers import OwlViTProcessor, OwlViTForObjectDetection >>> processor = OwlViTProcessor.from_pretrained("google/owlvit-base-patch32") >>> model = OwlViTForObjectDetection.from_pretrained("google/owlvit-base-patch32") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> text_labels = [["a photo of a cat", "a photo of a dog"]] >>> inputs = processor(text=text_labels, images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> # Target image sizes (height, width) to rescale box predictions [batch_size, 2] >>> target_sizes = torch.tensor([(image.height, image.width)]) >>> # Convert outputs (bounding boxes and class logits) to Pascal VOC format (xmin, ymin, xmax, ymax) >>> results = processor.post_process_grounded_object_detection( ... outputs=outputs, target_sizes=target_sizes, threshold=0.1, text_labels=text_labels ... ) >>> # Retrieve predictions for the first image for the corresponding text queries >>> result = results[0] >>> boxes, scores, text_labels = result["boxes"], result["scores"], result["text_labels"] >>> for box, score, text_label in zip(boxes, scores, text_labels): ... box = [round(i, 2) for i in box.tolist()] ... print(f"Detected {text_label} with confidence {round(score.item(), 3)} at location {box}") Detected a photo of a cat with confidence 0.707 at location [324.97, 20.44, 640.58, 373.29] Detected a photo of a cat with confidence 0.717 at location [1.46, 55.26, 315.55, 472.17] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.return_dict query_embeds, feature_map, outputs = self.image_text_embedder(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding) text_outputs = outputs.text_model_output vision_outputs = outputs.vision_model_output batch_size, num_patches_height, num_patches_width, hidden_dim = feature_map.shape image_feats = torch.reshape(feature_map, (batch_size, num_patches_height * num_patches_width, hidden_dim)) max_text_queries = input_ids.shape[0] // batch_size query_embeds = query_embeds.reshape(batch_size, max_text_queries, query_embeds.shape[-1]) input_ids = input_ids.reshape(batch_size, max_text_queries, input_ids.shape[-1]) query_mask = input_ids[..., 0] > 0 pred_logits, class_embeds = self.class_predictor(image_feats, query_embeds, query_mask) pred_boxes = self.box_predictor(image_feats, feature_map, interpolate_pos_encoding) if not return_dict: output = (pred_logits, pred_boxes, query_embeds, feature_map, class_embeds, text_outputs.to_tuple(), vision_outputs.to_tuple()) output = tuple((x for x in output if x is not None)) return output return OwlViTObjectDetectionOutput(image_embeds=feature_map, text_embeds=query_embeds, pred_boxes=pred_boxes, logits=pred_logits, class_embeds=class_embeds, text_model_output=text_outputs, vision_model_output=vision_outputs)
class OwlViTForObjectDetection(OwlViTPreTrainedModel): def __init__(self, config: OwlViTConfig): pass @staticmethod def normalize_grid_corner_coordinates(num_patches_height: int, num_patches_width: int) -> torch.Tensor: pass @lru_cache(maxsize=2) def compute_box_bias(self, num_patches_height: int, num_patches_width: int, feature_map: Optional[torch.FloatTensor]=None) -> torch.Tensor: pass def box_predictor(self, image_feats: torch.FloatTensor, feature_map: torch.FloatTensor, interpolate_pos_encoding: bool=False) -> torch.FloatTensor: ''' Args: image_feats: Features extracted from the image, returned by the `image_text_embedder` method. feature_map: A spatial re-arrangement of image_features, also returned by the `image_text_embedder` method. interpolate_pos_encoding: Whether to interpolate the pre-trained position encodings. Returns: pred_boxes: List of predicted boxes (cxcywh normalized to 0, 1) nested within a dictionary. ''' pass def class_predictor(self, image_feats: torch.FloatTensor, query_embeds: Optional[torch.FloatTensor]=None, query_mask: Optional[torch.Tensor]=None) -> tuple[torch.FloatTensor]: ''' Args: image_feats: Features extracted from the `image_text_embedder`. query_embeds: Text query embeddings. query_mask: Must be provided with query_embeddings. A mask indicating which query embeddings are valid. ''' pass def image_text_embedder(self, input_ids: torch.Tensor, pixel_values: torch.FloatTensor, attention_mask: torch.Tensor, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False) -> tuple[torch.FloatTensor]: pass def image_embedder(self, pixel_values: torch.FloatTensor, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False) -> tuple[torch.FloatTensor]: pass def embed_image_query(self, query_image_features: torch.FloatTensor, query_feature_map: torch.FloatTensor, interpolate_pos_encoding: bool=False) -> torch.FloatTensor: pass @auto_docstring def image_guided_detection(self, pixel_values: torch.FloatTensor, query_pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> OwlViTImageGuidedObjectDetectionOutput: ''' query_pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values of query image(s) to be detected. Pass in one query image per target image. Examples: ```python >>> import requests >>> from PIL import Image >>> import torch >>> from transformers import AutoProcessor, OwlViTForObjectDetection >>> processor = AutoProcessor.from_pretrained("google/owlvit-base-patch16") >>> model = OwlViTForObjectDetection.from_pretrained("google/owlvit-base-patch16") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> query_url = "http://images.cocodataset.org/val2017/000000001675.jpg" >>> query_image = Image.open(requests.get(query_url, stream=True).raw) >>> inputs = processor(images=image, query_images=query_image, return_tensors="pt") >>> with torch.no_grad(): ... outputs = model.image_guided_detection(**inputs) >>> # Target image sizes (height, width) to rescale box predictions [batch_size, 2] >>> target_sizes = torch.Tensor([image.size[::-1]]) >>> # Convert outputs (bounding boxes and class logits) to Pascal VOC format (xmin, ymin, xmax, ymax) >>> results = processor.post_process_image_guided_detection( ... outputs=outputs, threshold=0.6, nms_threshold=0.3, target_sizes=target_sizes ... ) >>> i = 0 # Retrieve predictions for the first image >>> boxes, scores = results[i]["boxes"], results[i]["scores"] >>> for box, score in zip(boxes, scores): ... box = [round(i, 2) for i in box.tolist()] ... print(f"Detected similar object with confidence {round(score.item(), 3)} at location {box}") Detected similar object with confidence 0.856 at location [10.94, 50.4, 315.8, 471.39] Detected similar object with confidence 1.0 at location [334.84, 25.33, 636.16, 374.71] ```''' pass @auto_docstring def forward(self, input_ids: torch.Tensor, pixel_values: torch.FloatTensor, attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> OwlViTObjectDetectionOutput: ''' input_ids (`torch.LongTensor` of shape `(batch_size * num_max_text_queries, sequence_length)`, *optional*): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids). output_hidden_states (`bool`, *optional*): Whether or not to return the last hidden state. See `text_model_last_hidden_state` and `vision_model_last_hidden_state` under returned tensors for more detail. Examples: ```python >>> import requests >>> from PIL import Image >>> import torch >>> from transformers import OwlViTProcessor, OwlViTForObjectDetection >>> processor = OwlViTProcessor.from_pretrained("google/owlvit-base-patch32") >>> model = OwlViTForObjectDetection.from_pretrained("google/owlvit-base-patch32") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> text_labels = [["a photo of a cat", "a photo of a dog"]] >>> inputs = processor(text=text_labels, images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> # Target image sizes (height, width) to rescale box predictions [batch_size, 2] >>> target_sizes = torch.tensor([(image.height, image.width)]) >>> # Convert outputs (bounding boxes and class logits) to Pascal VOC format (xmin, ymin, xmax, ymax) >>> results = processor.post_process_grounded_object_detection( ... outputs=outputs, target_sizes=target_sizes, threshold=0.1, text_labels=text_labels ... ) >>> # Retrieve predictions for the first image for the corresponding text queries >>> result = results[0] >>> boxes, scores, text_labels = result["boxes"], result["scores"], result["text_labels"] >>> for box, score, text_label in zip(boxes, scores, text_labels): ... box = [round(i, 2) for i in box.tolist()] ... print(f"Detected {text_label} with confidence {round(score.item(), 3)} at location {box}") Detected a photo of a cat with confidence 0.707 at location [324.97, 20.44, 640.58, 373.29] Detected a photo of a cat with confidence 0.717 at location [1.46, 55.26, 315.55, 472.17] ```''' pass
15
4
44
5
27
11
3
0.41
1
13
6
0
9
9
10
11
455
63
278
140
213
114
145
88
134
5
2
2
26
4,363
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/owlvit/modeling_owlvit.py
transformers.models.owlvit.modeling_owlvit.OwlViTImageGuidedObjectDetectionOutput
from ...utils import ModelOutput, auto_docstring, filter_out_non_signature_kwargs, is_vision_available, logging, torch_int import torch from typing import Any, Optional, Union from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling from dataclasses import dataclass @dataclass @auto_docstring(custom_intro='\n Output type of [`OwlViTForObjectDetection.image_guided_detection`].\n ') class OwlViTImageGuidedObjectDetectionOutput(ModelOutput): """ logits (`torch.FloatTensor` of shape `(batch_size, num_patches, num_queries)`): Classification logits (including no-object) for all queries. image_embeds (`torch.FloatTensor` of shape `(batch_size, patch_size, patch_size, output_dim`): Pooled output of [`OwlViTVisionModel`]. OWL-ViT represents images as a set of image patches and computes image embeddings for each patch. query_image_embeds (`torch.FloatTensor` of shape `(batch_size, patch_size, patch_size, output_dim`): Pooled output of [`OwlViTVisionModel`]. OWL-ViT represents images as a set of image patches and computes image embeddings for each patch. target_pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_patches, 4)`): Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual target image in the batch (disregarding possible padding). You can use [`~OwlViTImageProcessor.post_process_object_detection`] to retrieve the unnormalized bounding boxes. query_pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_patches, 4)`): Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual query image in the batch (disregarding possible padding). You can use [`~OwlViTImageProcessor.post_process_object_detection`] to retrieve the unnormalized bounding boxes. class_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`): Class embeddings of all image patches. OWL-ViT represents images as a set of image patches where the total number of patches is (image_size / patch_size)**2. text_model_output (tuple[`BaseModelOutputWithPooling`]): The output of the [`OwlViTTextModel`]. vision_model_output (`BaseModelOutputWithPooling`): The output of the [`OwlViTVisionModel`]. """ logits: Optional[torch.FloatTensor] = None image_embeds: Optional[torch.FloatTensor] = None query_image_embeds: Optional[torch.FloatTensor] = None target_pred_boxes: Optional[torch.FloatTensor] = None query_pred_boxes: Optional[torch.FloatTensor] = None class_embeds: Optional[torch.FloatTensor] = None text_model_output: BaseModelOutputWithPooling = None vision_model_output: BaseModelOutputWithPooling = None def to_tuple(self) -> tuple[Any]: return tuple((self[k] if k not in ['text_model_output', 'vision_model_output'] else getattr(self, k).to_tuple() for k in self.keys()))
@dataclass @auto_docstring(custom_intro='\n Output type of [`OwlViTForObjectDetection.image_guided_detection`].\n ') class OwlViTImageGuidedObjectDetectionOutput(ModelOutput): ''' logits (`torch.FloatTensor` of shape `(batch_size, num_patches, num_queries)`): Classification logits (including no-object) for all queries. image_embeds (`torch.FloatTensor` of shape `(batch_size, patch_size, patch_size, output_dim`): Pooled output of [`OwlViTVisionModel`]. OWL-ViT represents images as a set of image patches and computes image embeddings for each patch. query_image_embeds (`torch.FloatTensor` of shape `(batch_size, patch_size, patch_size, output_dim`): Pooled output of [`OwlViTVisionModel`]. OWL-ViT represents images as a set of image patches and computes image embeddings for each patch. target_pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_patches, 4)`): Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual target image in the batch (disregarding possible padding). You can use [`~OwlViTImageProcessor.post_process_object_detection`] to retrieve the unnormalized bounding boxes. query_pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_patches, 4)`): Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual query image in the batch (disregarding possible padding). You can use [`~OwlViTImageProcessor.post_process_object_detection`] to retrieve the unnormalized bounding boxes. class_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`): Class embeddings of all image patches. OWL-ViT represents images as a set of image patches where the total number of patches is (image_size / patch_size)**2. text_model_output (tuple[`BaseModelOutputWithPooling`]): The output of the [`OwlViTTextModel`]. vision_model_output (`BaseModelOutputWithPooling`): The output of the [`OwlViTVisionModel`]. ''' def to_tuple(self) -> tuple[Any]: pass
4
1
5
0
5
0
2
2.07
1
2
0
0
1
0
1
1
46
3
14
10
12
29
11
10
9
2
1
0
2
4,364
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/owlvit/modeling_owlvit.py
transformers.models.owlvit.modeling_owlvit.OwlViTMLP
from torch import Tensor, nn import torch from ...activations import ACT2FN class OwlViTMLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.activation_fn = ACT2FN[config.hidden_act] self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states
class OwlViTMLP(nn.Module): def __init__(self, config): pass def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: pass
3
0
6
0
6
0
1
0
1
2
0
0
2
4
2
12
13
1
12
7
9
0
12
7
9
1
1
0
2
4,365
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/owlvit/modeling_owlvit.py
transformers.models.owlvit.modeling_owlvit.OwlViTModel
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling import torch from .configuration_owlvit import OwlViTConfig, OwlViTTextConfig, OwlViTVisionConfig from ...utils import ModelOutput, auto_docstring, filter_out_non_signature_kwargs, is_vision_available, logging, torch_int from torch import Tensor, nn from typing import Any, Optional, Union @auto_docstring class OwlViTModel(OwlViTPreTrainedModel): config: OwlViTConfig def __init__(self, config: OwlViTConfig): super().__init__(config) if not isinstance(config.text_config, OwlViTTextConfig): raise TypeError(f'config.text_config is expected to be of type OwlViTTextConfig but is of type {type(config.text_config)}.') if not isinstance(config.vision_config, OwlViTVisionConfig): raise TypeError(f'config.vision_config is expected to be of type OwlViTVisionConfig but is of type {type(config.vision_config)}.') text_config = config.text_config vision_config = config.vision_config self.projection_dim = config.projection_dim self.text_embed_dim = text_config.hidden_size self.vision_embed_dim = vision_config.hidden_size self.text_model = OwlViTTextTransformer(text_config) self.vision_model = OwlViTVisionTransformer(vision_config) self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False) self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False) self.logit_scale = nn.Parameter(torch.tensor(config.logit_scale_init_value)) self.post_init() @filter_out_non_signature_kwargs() @auto_docstring def get_text_features(self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor]=None) -> torch.FloatTensor: """ input_ids (`torch.LongTensor` of shape `(batch_size * num_max_text_queries, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) Returns: text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`OwlViTTextModel`]. Examples: ```python >>> import torch >>> from transformers import AutoProcessor, OwlViTModel >>> model = OwlViTModel.from_pretrained("google/owlvit-base-patch32") >>> processor = AutoProcessor.from_pretrained("google/owlvit-base-patch32") >>> inputs = processor( ... text=[["a photo of a cat", "a photo of a dog"], ["photo of a astranaut"]], return_tensors="pt" ... ) >>> with torch.inference_mode(): ... text_features = model.get_text_features(**inputs) ```""" text_outputs: BaseModelOutputWithPooling = self.text_model(input_ids=input_ids, attention_mask=attention_mask) text_features = self.text_projection(text_outputs.pooler_output) return text_features @filter_out_non_signature_kwargs() @auto_docstring def get_image_features(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool=False) -> torch.FloatTensor: """ Returns: image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of [`OwlViTVisionModel`]. Examples: ```python >>> import torch >>> from transformers.image_utils import load_image >>> from transformers import AutoProcessor, OwlViTModel >>> model = OwlViTModel.from_pretrained("google/owlvit-base-patch32") >>> processor = AutoProcessor.from_pretrained("google/owlvit-base-patch32") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = load_image(url) >>> inputs = processor(images=image, return_tensors="pt") >>> with torch.inference_mode(): ... image_features = model.get_image_features(**inputs) ```""" vision_outputs: BaseModelOutputWithPooling = self.vision_model(pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) image_features = self.visual_projection(vision_outputs.pooler_output) return image_features @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, return_loss: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_base_image_embeds: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, OwlViTOutput]: """ return_loss (`bool`, *optional*): Whether or not to return the contrastive loss. return_base_image_embeds (`bool`, *optional*): Whether or not to return the base image embeddings. Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, OwlViTModel >>> model = OwlViTModel.from_pretrained("google/owlvit-base-patch32") >>> processor = AutoProcessor.from_pretrained("google/owlvit-base-patch32") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(text=[["a photo of a cat", "a photo of a dog"]], images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict) text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) text_embeds = text_outputs[1] text_embeds = self.text_projection(text_embeds) image_embeds = vision_outputs[1] image_embeds = self.visual_projection(image_embeds) image_embeds = image_embeds / torch.linalg.norm(image_embeds, ord=2, dim=-1, keepdim=True) text_embeds_norm = text_embeds / torch.linalg.norm(text_embeds, ord=2, dim=-1, keepdim=True) logit_scale = self.logit_scale.exp().to(image_embeds.device) logits_per_text = torch.matmul(text_embeds_norm, image_embeds.t()) * logit_scale logits_per_image = logits_per_text.t() loss = None if return_loss: loss = owlvit_loss(logits_per_text) text_embeds = text_embeds_norm if not return_dict: output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs) return (loss,) + output if loss is not None else output return OwlViTOutput(loss=loss, logits_per_image=logits_per_image, logits_per_text=logits_per_text, text_embeds=text_embeds, image_embeds=image_embeds, text_model_output=text_outputs, vision_model_output=vision_outputs)
@auto_docstring class OwlViTModel(OwlViTPreTrainedModel): def __init__(self, config: OwlViTConfig): pass @filter_out_non_signature_kwargs() @auto_docstring def get_text_features(self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor]=None) -> torch.FloatTensor: ''' input_ids (`torch.LongTensor` of shape `(batch_size * num_max_text_queries, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) Returns: text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`OwlViTTextModel`]. Examples: ```python >>> import torch >>> from transformers import AutoProcessor, OwlViTModel >>> model = OwlViTModel.from_pretrained("google/owlvit-base-patch32") >>> processor = AutoProcessor.from_pretrained("google/owlvit-base-patch32") >>> inputs = processor( ... text=[["a photo of a cat", "a photo of a dog"], ["photo of a astranaut"]], return_tensors="pt" ... ) >>> with torch.inference_mode(): ... text_features = model.get_text_features(**inputs) ```''' pass @filter_out_non_signature_kwargs() @auto_docstring def get_image_features(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool=False) -> torch.FloatTensor: ''' Returns: image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of [`OwlViTVisionModel`]. Examples: ```python >>> import torch >>> from transformers.image_utils import load_image >>> from transformers import AutoProcessor, OwlViTModel >>> model = OwlViTModel.from_pretrained("google/owlvit-base-patch32") >>> processor = AutoProcessor.from_pretrained("google/owlvit-base-patch32") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = load_image(url) >>> inputs = processor(images=image, return_tensors="pt") >>> with torch.inference_mode(): ... image_features = model.get_image_features(**inputs) ```''' pass @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, return_loss: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_base_image_embeds: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, OwlViTOutput]: ''' return_loss (`bool`, *optional*): Whether or not to return the contrastive loss. return_base_image_embeds (`bool`, *optional*): Whether or not to return the base image embeddings. Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, OwlViTModel >>> model = OwlViTModel.from_pretrained("google/owlvit-base-patch32") >>> processor = AutoProcessor.from_pretrained("google/owlvit-base-patch32") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(text=[["a photo of a cat", "a photo of a dog"]], images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities ```''' pass
11
3
49
7
29
14
4
0.45
1
11
6
0
4
8
4
5
207
32
121
60
87
54
56
32
51
7
2
1
16
4,366
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/owlvit/modeling_owlvit.py
transformers.models.owlvit.modeling_owlvit.OwlViTObjectDetectionOutput
from dataclasses import dataclass from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling from ...utils import ModelOutput, auto_docstring, filter_out_non_signature_kwargs, is_vision_available, logging, torch_int from typing import Any, Optional, Union import torch @dataclass @auto_docstring(custom_intro='\n Output type of [`OwlViTForObjectDetection`].\n ') class OwlViTObjectDetectionOutput(ModelOutput): """ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)): Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized scale-invariant IoU loss. loss_dict (`Dict`, *optional*): A dictionary containing the individual losses. Useful for logging. logits (`torch.FloatTensor` of shape `(batch_size, num_patches, num_queries)`): Classification logits (including no-object) for all queries. pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_patches, 4)`): Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding possible padding). You can use [`~OwlViTImageProcessor.post_process_object_detection`] to retrieve the unnormalized bounding boxes. text_embeds (`torch.FloatTensor` of shape `(batch_size, num_max_text_queries, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`OwlViTTextModel`]. image_embeds (`torch.FloatTensor` of shape `(batch_size, patch_size, patch_size, output_dim`): Pooled output of [`OwlViTVisionModel`]. OWL-ViT represents images as a set of image patches and computes image embeddings for each patch. class_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`): Class embeddings of all image patches. OWL-ViT represents images as a set of image patches where the total number of patches is (image_size / patch_size)**2. text_model_output (tuple[`BaseModelOutputWithPooling`]): The output of the [`OwlViTTextModel`]. vision_model_output (`BaseModelOutputWithPooling`): The output of the [`OwlViTVisionModel`]. """ loss: Optional[torch.FloatTensor] = None loss_dict: Optional[dict] = None logits: Optional[torch.FloatTensor] = None pred_boxes: Optional[torch.FloatTensor] = None text_embeds: Optional[torch.FloatTensor] = None image_embeds: Optional[torch.FloatTensor] = None class_embeds: Optional[torch.FloatTensor] = None text_model_output: BaseModelOutputWithPooling = None vision_model_output: BaseModelOutputWithPooling = None def to_tuple(self) -> tuple[Any]: return tuple((self[k] if k not in ['text_model_output', 'vision_model_output'] else getattr(self, k).to_tuple() for k in self.keys()))
@dataclass @auto_docstring(custom_intro='\n Output type of [`OwlViTForObjectDetection`].\n ') class OwlViTObjectDetectionOutput(ModelOutput): ''' loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)): Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized scale-invariant IoU loss. loss_dict (`Dict`, *optional*): A dictionary containing the individual losses. Useful for logging. logits (`torch.FloatTensor` of shape `(batch_size, num_patches, num_queries)`): Classification logits (including no-object) for all queries. pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_patches, 4)`): Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding possible padding). You can use [`~OwlViTImageProcessor.post_process_object_detection`] to retrieve the unnormalized bounding boxes. text_embeds (`torch.FloatTensor` of shape `(batch_size, num_max_text_queries, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`OwlViTTextModel`]. image_embeds (`torch.FloatTensor` of shape `(batch_size, patch_size, patch_size, output_dim`): Pooled output of [`OwlViTVisionModel`]. OWL-ViT represents images as a set of image patches and computes image embeddings for each patch. class_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`): Class embeddings of all image patches. OWL-ViT represents images as a set of image patches where the total number of patches is (image_size / patch_size)**2. text_model_output (tuple[`BaseModelOutputWithPooling`]): The output of the [`OwlViTTextModel`]. vision_model_output (`BaseModelOutputWithPooling`): The output of the [`OwlViTVisionModel`]. ''' def to_tuple(self) -> tuple[Any]: pass
4
1
5
0
5
0
2
1.93
1
2
0
0
1
0
1
1
47
3
15
11
13
29
12
11
10
2
1
0
2
4,367
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/owlvit/modeling_owlvit.py
transformers.models.owlvit.modeling_owlvit.OwlViTOutput
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling from ...utils import ModelOutput, auto_docstring, filter_out_non_signature_kwargs, is_vision_available, logging, torch_int from typing import Any, Optional, Union import torch from dataclasses import dataclass @dataclass @auto_docstring class OwlViTOutput(ModelOutput): """ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): Contrastive loss for image-text similarity. logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`): The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text similarity scores. logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`): The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image similarity scores. text_embeds (`torch.FloatTensor` of shape `(batch_size * num_max_text_queries, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`OwlViTTextModel`]. image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of [`OwlViTVisionModel`]. text_model_output (tuple[`BaseModelOutputWithPooling`]): The output of the [`OwlViTTextModel`]. vision_model_output (`BaseModelOutputWithPooling`): The output of the [`OwlViTVisionModel`]. """ loss: Optional[torch.FloatTensor] = None logits_per_image: Optional[torch.FloatTensor] = None logits_per_text: Optional[torch.FloatTensor] = None text_embeds: Optional[torch.FloatTensor] = None image_embeds: Optional[torch.FloatTensor] = None text_model_output: BaseModelOutputWithPooling = None vision_model_output: BaseModelOutputWithPooling = None def to_tuple(self) -> tuple[Any]: return tuple((self[k] if k not in ['text_model_output', 'vision_model_output'] else getattr(self, k).to_tuple() for k in self.keys()))
@dataclass @auto_docstring class OwlViTOutput(ModelOutput): ''' loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): Contrastive loss for image-text similarity. logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`): The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text similarity scores. logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`): The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image similarity scores. text_embeds (`torch.FloatTensor` of shape `(batch_size * num_max_text_queries, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`OwlViTTextModel`]. image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of [`OwlViTVisionModel`]. text_model_output (tuple[`BaseModelOutputWithPooling`]): The output of the [`OwlViTTextModel`]. vision_model_output (`BaseModelOutputWithPooling`): The output of the [`OwlViTVisionModel`]. ''' def to_tuple(self) -> tuple[Any]: pass
4
1
5
0
5
0
2
1.54
1
2
0
0
1
0
1
1
35
2
13
9
11
20
10
9
8
2
1
0
2
4,368
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/owlvit/modeling_owlvit.py
transformers.models.owlvit.modeling_owlvit.OwlViTPreTrainedModel
from .configuration_owlvit import OwlViTConfig, OwlViTTextConfig, OwlViTVisionConfig from ...utils import ModelOutput, auto_docstring, filter_out_non_signature_kwargs, is_vision_available, logging, torch_int from ...modeling_utils import PreTrainedModel from torch import Tensor, nn @auto_docstring class OwlViTPreTrainedModel(PreTrainedModel): config: OwlViTConfig base_model_prefix = 'owlvit' supports_gradient_checkpointing = True _no_split_modules = ['OwlViTEncoderLayer'] def _init_weights(self, module: nn.Module): """Initialize the weights""" factor = self.config.initializer_factor if isinstance(module, OwlViTTextEmbeddings): module.token_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02) module.position_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02) elif isinstance(module, OwlViTVisionEmbeddings): nn.init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim ** (-0.5) * factor) nn.init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor) nn.init.normal_(module.position_embedding.weight, std=module.config.initializer_range * factor) elif isinstance(module, OwlViTAttention): in_proj_std = module.embed_dim ** (-0.5) * (2 * module.config.num_hidden_layers) ** (-0.5) * factor out_proj_std = module.embed_dim ** (-0.5) * factor nn.init.normal_(module.q_proj.weight, std=in_proj_std) nn.init.normal_(module.k_proj.weight, std=in_proj_std) nn.init.normal_(module.v_proj.weight, std=in_proj_std) nn.init.normal_(module.out_proj.weight, std=out_proj_std) elif isinstance(module, OwlViTMLP): in_proj_std = module.config.hidden_size ** (-0.5) * (2 * module.config.num_hidden_layers) ** (-0.5) * factor fc_std = (2 * module.config.hidden_size) ** (-0.5) * factor nn.init.normal_(module.fc1.weight, std=fc_std) nn.init.normal_(module.fc2.weight, std=in_proj_std) elif isinstance(module, OwlViTModel): nn.init.normal_(module.text_projection.weight, std=module.text_embed_dim ** (-0.5) * factor) nn.init.normal_(module.visual_projection.weight, std=module.vision_embed_dim ** (-0.5) * factor) module.logit_scale.data.fill_(self.config.logit_scale_init_value) if isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=factor) if module.bias is not None: module.bias.data.zero_()
@auto_docstring class OwlViTPreTrainedModel(PreTrainedModel): def _init_weights(self, module: nn.Module): '''Initialize the weights''' pass
3
1
39
0
38
1
8
0.12
1
5
5
4
1
0
1
1
50
2
43
10
41
5
33
10
31
8
1
1
8
4,369
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/owlvit/modeling_owlvit.py
transformers.models.owlvit.modeling_owlvit.OwlViTTextEmbeddings
from torch import Tensor, nn from .configuration_owlvit import OwlViTConfig, OwlViTTextConfig, OwlViTVisionConfig import torch from typing import Any, Optional, Union class OwlViTTextEmbeddings(nn.Module): def __init__(self, config: OwlViTTextConfig): super().__init__() self.token_embedding = nn.Embedding(config.vocab_size, config.hidden_size) self.position_embedding = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False) def forward(self, input_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None) -> torch.Tensor: seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2] if position_ids is None: position_ids = self.position_ids[:, :seq_length] if inputs_embeds is None: inputs_embeds = self.token_embedding(input_ids) position_embeddings = self.position_embedding(position_ids) embeddings = inputs_embeds + position_embeddings return embeddings
class OwlViTTextEmbeddings(nn.Module): def __init__(self, config: OwlViTTextConfig): pass def forward(self, input_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None) -> torch.Tensor: pass
3
0
14
3
11
1
3
0.05
1
3
1
0
2
2
2
12
29
6
22
13
14
1
15
8
12
4
1
1
5
4,370
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/owlvit/modeling_owlvit.py
transformers.models.owlvit.modeling_owlvit.OwlViTTextModel
from typing import Any, Optional, Union from ...utils import ModelOutput, auto_docstring, filter_out_non_signature_kwargs, is_vision_available, logging, torch_int import torch from .configuration_owlvit import OwlViTConfig, OwlViTTextConfig, OwlViTVisionConfig from torch import Tensor, nn from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling class OwlViTTextModel(OwlViTPreTrainedModel): config: OwlViTTextConfig def __init__(self, config: OwlViTTextConfig): super().__init__(config) self.text_model = OwlViTTextTransformer(config) self.post_init() def get_input_embeddings(self) -> nn.Module: return self.text_model.embeddings.token_embedding def set_input_embeddings(self, value): self.text_model.embeddings.token_embedding = value @auto_docstring def forward(self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPooling]: """ input_ids (`torch.LongTensor` of shape `(batch_size * num_max_text_queries, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) Examples: ```python >>> from transformers import AutoProcessor, OwlViTTextModel >>> model = OwlViTTextModel.from_pretrained("google/owlvit-base-patch32") >>> processor = AutoProcessor.from_pretrained("google/owlvit-base-patch32") >>> inputs = processor( ... text=[["a photo of a cat", "a photo of a dog"], ["photo of a astranaut"]], return_tensors="pt" ... ) >>> outputs = model(**inputs) >>> last_hidden_state = outputs.last_hidden_state >>> pooled_output = outputs.pooler_output # pooled (EOS token) states ```""" return self.text_model(input_ids=input_ids, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
class OwlViTTextModel(OwlViTPreTrainedModel): def __init__(self, config: OwlViTTextConfig): pass def get_input_embeddings(self) -> nn.Module: pass def set_input_embeddings(self, value): pass @auto_docstring def forward(self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPooling]: ''' input_ids (`torch.LongTensor` of shape `(batch_size * num_max_text_queries, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) Examples: ```python >>> from transformers import AutoProcessor, OwlViTTextModel >>> model = OwlViTTextModel.from_pretrained("google/owlvit-base-patch32") >>> processor = AutoProcessor.from_pretrained("google/owlvit-base-patch32") >>> inputs = processor( ... text=[["a photo of a cat", "a photo of a dog"], ["photo of a astranaut"]], return_tensors="pt" ... ) >>> outputs = model(**inputs) >>> last_hidden_state = outputs.last_hidden_state >>> pooled_output = outputs.pooler_output # pooled (EOS token) states ```''' pass
6
1
11
1
6
4
1
0.59
1
6
3
0
4
1
4
5
50
7
27
15
13
16
12
7
7
1
2
0
4
4,371
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/owlvit/modeling_owlvit.py
transformers.models.owlvit.modeling_owlvit.OwlViTTextTransformer
from typing import Any, Optional, Union from ...modeling_attn_mask_utils import _create_4d_causal_attention_mask, _prepare_4d_attention_mask from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling from ...utils import ModelOutput, auto_docstring, filter_out_non_signature_kwargs, is_vision_available, logging, torch_int from torch import Tensor, nn import torch from .configuration_owlvit import OwlViTConfig, OwlViTTextConfig, OwlViTVisionConfig class OwlViTTextTransformer(nn.Module): def __init__(self, config: OwlViTTextConfig): super().__init__() self.config = config embed_dim = config.hidden_size self.embeddings = OwlViTTextEmbeddings(config) self.encoder = OwlViTEncoder(config) self.final_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) @auto_docstring def forward(self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPooling]: """ input_ids (`torch.LongTensor` of shape `(batch_size * num_max_text_queries, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids) causal_attention_mask = _create_4d_causal_attention_mask(input_shape, hidden_states.dtype, device=hidden_states.device) if attention_mask is not None: attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype) encoder_outputs = self.encoder(inputs_embeds=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) last_hidden_state = encoder_outputs[0] last_hidden_state = self.final_layer_norm(last_hidden_state) pooled_output = last_hidden_state[torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device), input_ids.to(torch.int).argmax(dim=-1).to(last_hidden_state.device)] if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling(last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions)
class OwlViTTextTransformer(nn.Module): def __init__(self, config: OwlViTTextConfig): pass @auto_docstring def forward(self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPooling]: ''' input_ids (`torch.LongTensor` of shape `(batch_size * num_max_text_queries, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) ''' pass
4
1
34
4
26
5
4
0.19
1
8
4
0
2
4
2
12
72
8
54
23
41
10
25
14
22
6
1
1
7
4,372
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/owlvit/modeling_owlvit.py
transformers.models.owlvit.modeling_owlvit.OwlViTVisionEmbeddings
from torch import Tensor, nn from .configuration_owlvit import OwlViTConfig, OwlViTTextConfig, OwlViTVisionConfig from ...utils import ModelOutput, auto_docstring, filter_out_non_signature_kwargs, is_vision_available, logging, torch_int import torch class OwlViTVisionEmbeddings(nn.Module): def __init__(self, config: OwlViTVisionConfig): super().__init__() self.patch_size = config.patch_size self.config = config self.embed_dim = config.hidden_size self.class_embedding = nn.Parameter(torch.randn(config.hidden_size)) self.patch_embedding = nn.Conv2d(in_channels=config.num_channels, out_channels=self.embed_dim, kernel_size=config.patch_size, stride=config.patch_size, bias=False) self.num_patches = (config.image_size // config.patch_size) ** 2 self.num_positions = self.num_patches + 1 self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim) self.register_buffer('position_ids', torch.arange(self.num_positions).expand((1, -1)), persistent=False) def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: """ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution images. This method is also adapted to support torch.jit tracing. Adapted from: - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211 """ num_patches = embeddings.shape[1] - 1 position_embedding = self.position_embedding.weight.unsqueeze(0) num_positions = position_embedding.shape[1] - 1 if not torch.jit.is_tracing() and num_patches == num_positions and (height == width): return self.position_embedding(self.position_ids) class_pos_embed = position_embedding[:, :1] patch_pos_embed = position_embedding[:, 1:] dim = embeddings.shape[-1] new_height = height // self.patch_size new_width = width // self.patch_size sqrt_num_positions = torch_int(num_positions ** 0.5) patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim) patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) patch_pos_embed = nn.functional.interpolate(patch_pos_embed, size=(new_height, new_width), mode='bicubic', align_corners=False) patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) return torch.cat((class_pos_embed, patch_pos_embed), dim=1) def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding: bool=False) -> torch.Tensor: batch_size, _, height, width = pixel_values.shape patch_embeds = self.patch_embedding(pixel_values) patch_embeds = patch_embeds.flatten(2).transpose(1, 2) class_embeds = self.class_embedding.expand(batch_size, 1, -1) embeddings = torch.cat([class_embeds, patch_embeds], dim=1) if interpolate_pos_encoding: embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width) else: embeddings = embeddings + self.position_embedding(self.position_ids) return embeddings
class OwlViTVisionEmbeddings(nn.Module): def __init__(self, config: OwlViTVisionConfig): pass def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: ''' This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution images. This method is also adapted to support torch.jit tracing. Adapted from: - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211 ''' pass def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding: bool=False) -> torch.Tensor: pass
4
1
22
3
17
3
2
0.2
1
5
1
0
3
8
3
13
71
11
51
25
47
10
39
25
35
2
1
1
5
4,373
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/owlvit/modeling_owlvit.py
transformers.models.owlvit.modeling_owlvit.OwlViTVisionModel
import torch from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling from typing import Any, Optional, Union from ...utils import ModelOutput, auto_docstring, filter_out_non_signature_kwargs, is_vision_available, logging, torch_int from .configuration_owlvit import OwlViTConfig, OwlViTTextConfig, OwlViTVisionConfig from torch import Tensor, nn class OwlViTVisionModel(OwlViTPreTrainedModel): config: OwlViTVisionConfig main_input_name = 'pixel_values' def __init__(self, config: OwlViTVisionConfig): super().__init__(config) self.vision_model = OwlViTVisionTransformer(config) self.post_init() def get_input_embeddings(self) -> nn.Module: return self.vision_model.embeddings.patch_embedding @auto_docstring def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPooling]: """ Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, OwlViTVisionModel >>> model = OwlViTVisionModel.from_pretrained("google/owlvit-base-patch32") >>> processor = AutoProcessor.from_pretrained("google/owlvit-base-patch32") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_state = outputs.last_hidden_state >>> pooled_output = outputs.pooler_output # pooled CLS states ```""" return self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict)
class OwlViTVisionModel(OwlViTPreTrainedModel): def __init__(self, config: OwlViTVisionConfig): pass def get_input_embeddings(self) -> nn.Module: pass @auto_docstring def forward(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPooling]: ''' Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, OwlViTVisionModel >>> model = OwlViTVisionModel.from_pretrained("google/owlvit-base-patch32") >>> processor = AutoProcessor.from_pretrained("google/owlvit-base-patch32") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_state = outputs.last_hidden_state >>> pooled_output = outputs.pooler_output # pooled CLS states ```''' pass
5
1
14
1
7
6
1
0.65
1
5
3
0
3
1
3
4
50
7
26
15
13
17
11
7
7
1
2
0
3
4,374
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/owlvit/modeling_owlvit.py
transformers.models.owlvit.modeling_owlvit.OwlViTVisionTransformer
from ...utils import ModelOutput, auto_docstring, filter_out_non_signature_kwargs, is_vision_available, logging, torch_int import torch from typing import Any, Optional, Union from .configuration_owlvit import OwlViTConfig, OwlViTTextConfig, OwlViTVisionConfig from torch import Tensor, nn from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling class OwlViTVisionTransformer(nn.Module): def __init__(self, config: OwlViTVisionConfig): super().__init__() self.config = config self.embeddings = OwlViTVisionEmbeddings(config) self.pre_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.encoder = OwlViTEncoder(config) self.post_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) @auto_docstring def forward(self, pixel_values: torch.FloatTensor, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: Optional[bool]=False, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPooling]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict expected_input_dtype = self.embeddings.patch_embedding.weight.dtype pixel_values = pixel_values.to(expected_input_dtype) hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) hidden_states = self.pre_layernorm(hidden_states) encoder_outputs = self.encoder(inputs_embeds=hidden_states, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) last_hidden_state = encoder_outputs[0] pooled_output = last_hidden_state[:, 0, :] pooled_output = self.post_layernorm(pooled_output) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling(last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions)
class OwlViTVisionTransformer(nn.Module): def __init__(self, config: OwlViTVisionConfig): pass @auto_docstring def forward(self, pixel_values: torch.FloatTensor, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: Optional[bool]=False, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithPooling]: pass
4
0
27
4
21
2
3
0.09
1
6
4
0
2
5
2
12
57
9
44
21
32
4
23
13
20
5
1
1
6
4,375
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/owlvit/processing_owlvit.py
transformers.models.owlvit.processing_owlvit.OwlViTProcessor
from typing import TYPE_CHECKING, Optional, Union from ...utils import TensorType, is_torch_available from ...image_utils import ImageInput from ...processing_utils import ImagesKwargs, ProcessingKwargs, ProcessorMixin, Unpack import warnings import numpy as np from ...tokenization_utils_base import PreTokenizedInput, TextInput from ...image_processing_utils import BatchFeature class OwlViTProcessor(ProcessorMixin): """ Constructs an OWL-ViT processor which wraps [`OwlViTImageProcessor`] and [`CLIPTokenizer`]/[`CLIPTokenizerFast`] into a single processor that inherits both the image processor and tokenizer functionalities. See the [`~OwlViTProcessor.__call__`] and [`~OwlViTProcessor.decode`] for more information. Args: image_processor ([`OwlViTImageProcessor`], *optional*): The image processor is a required input. tokenizer ([`CLIPTokenizer`, `CLIPTokenizerFast`], *optional*): The tokenizer is a required input. """ attributes = ['image_processor', 'tokenizer'] image_processor_class = 'OwlViTImageProcessor' tokenizer_class = ('CLIPTokenizer', 'CLIPTokenizerFast') def __init__(self, image_processor=None, tokenizer=None, **kwargs): feature_extractor = None if 'feature_extractor' in kwargs: warnings.warn('The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor` instead.', FutureWarning) feature_extractor = kwargs.pop('feature_extractor') image_processor = image_processor if image_processor is not None else feature_extractor super().__init__(image_processor, tokenizer) def __call__(self, images: Optional[ImageInput]=None, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]=None, audio=None, videos=None, **kwargs: Unpack[OwlViTProcessorKwargs]) -> BatchFeature: """ Main method to prepare for the model one or several text(s) and image(s). This method forwards the `text` and `kwargs` arguments to CLIPTokenizerFast's [`~CLIPTokenizerFast.__call__`] if `text` is not `None` to encode: the text. To prepare the image(s), this method forwards the `images` and `kwargs` arguments to CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the docstring of the above two methods for more information. Args: images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. Both channels-first and channels-last formats are supported. text (`str`, `list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). query_images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`): The query image to be prepared, one query image is expected per target image to be queried. Each image can be a PIL image, NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a number of channels, H and W are image height and width. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. - **query_pixel_values** -- Pixel values of the query images to be fed to a model. Returned when `query_images` is not `None`. """ output_kwargs = self._merge_kwargs(OwlViTProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs) query_images = output_kwargs['images_kwargs'].pop('query_images', None) return_tensors = output_kwargs['common_kwargs']['return_tensors'] if text is None and query_images is None and (images is None): raise ValueError('You have to specify at least one text or query image or image. All three cannot be none.') data = {} if text is not None: if isinstance(text, str) or (isinstance(text, list) and (not isinstance(text[0], list))): encodings = [self.tokenizer(text, **output_kwargs['text_kwargs'])] elif isinstance(text, list) and isinstance(text[0], list): encodings = [] max_num_queries = max([len(text_single) for text_single in text]) for text_single in text: if len(text_single) != max_num_queries: text_single = text_single + [' '] * (max_num_queries - len(text_single)) encoding = self.tokenizer(text_single, **output_kwargs['text_kwargs']) encodings.append(encoding) else: raise TypeError('Input text should be a string, a list of strings or a nested list of strings') if return_tensors == 'np': input_ids = np.concatenate([encoding['input_ids'] for encoding in encodings], axis=0) attention_mask = np.concatenate([encoding['attention_mask'] for encoding in encodings], axis=0) elif return_tensors == 'pt' and is_torch_available(): import torch input_ids = torch.cat([encoding['input_ids'] for encoding in encodings], dim=0) attention_mask = torch.cat([encoding['attention_mask'] for encoding in encodings], dim=0) else: raise ValueError('Target return tensor type could not be returned') data['input_ids'] = input_ids data['attention_mask'] = attention_mask if query_images is not None: query_pixel_values = self.image_processor(query_images, **output_kwargs['images_kwargs']).pixel_values data = {'query_pixel_values': query_pixel_values} if images is not None: image_features = self.image_processor(images, **output_kwargs['images_kwargs']) data['pixel_values'] = image_features.pixel_values return BatchFeature(data=data, tensor_type=return_tensors) def post_process(self, *args, **kwargs): """ This method forwards all its arguments to [`OwlViTImageProcessor.post_process`]. Please refer to the docstring of this method for more information. """ return self.image_processor.post_process(*args, **kwargs) def post_process_object_detection(self, *args, **kwargs): """ This method forwards all its arguments to [`OwlViTImageProcessor.post_process_object_detection`]. Please refer to the docstring of this method for more information. """ warnings.warn('`post_process_object_detection` method is deprecated for OwlVitProcessor and will be removed in v5. Use `post_process_grounded_object_detection` instead.', FutureWarning) return self.image_processor.post_process_object_detection(*args, **kwargs) def post_process_grounded_object_detection(self, outputs: 'OwlViTObjectDetectionOutput', threshold: float=0.1, target_sizes: Optional[Union[TensorType, list[tuple]]]=None, text_labels: Optional[list[list[str]]]=None): """ Converts the raw output of [`OwlViTForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. Args: outputs ([`OwlViTObjectDetectionOutput`]): Raw outputs of the model. threshold (`float`, *optional*, defaults to 0.1): Score threshold to keep object detection predictions. target_sizes (`torch.Tensor` or `list[tuple[int, int]]`, *optional*): Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size `(height, width)` of each image in the batch. If unset, predictions will not be resized. text_labels (`list[list[str]]`, *optional*): List of lists of text labels for each image in the batch. If unset, "text_labels" in output will be set to `None`. Returns: `list[Dict]`: A list of dictionaries, each dictionary containing the following keys: - "scores": The confidence scores for each predicted box on the image. - "labels": Indexes of the classes predicted by the model on the image. - "boxes": Image bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. - "text_labels": The text labels for each predicted bounding box on the image. """ output = self.image_processor.post_process_object_detection(outputs=outputs, threshold=threshold, target_sizes=target_sizes) if text_labels is not None and len(text_labels) != len(output): raise ValueError('Make sure that you pass in as many lists of text labels as images') if text_labels is not None: for image_output, image_text_labels in zip(output, text_labels): object_text_labels = [image_text_labels[i] for i in image_output['labels']] image_output['text_labels'] = object_text_labels else: for image_output in output: image_output['text_labels'] = None return output def post_process_image_guided_detection(self, outputs: 'OwlViTImageGuidedObjectDetectionOutput', threshold: float=0.0, nms_threshold: float=0.3, target_sizes: Optional[Union[TensorType, list[tuple]]]=None): """ Converts the output of [`OwlViTForObjectDetection.image_guided_detection`] into the format expected by the COCO api. Args: outputs ([`OwlViTImageGuidedObjectDetectionOutput`]): Raw outputs of the model. threshold (`float`, *optional*, defaults to 0.0): Minimum confidence threshold to use to filter out predicted boxes. nms_threshold (`float`, *optional*, defaults to 0.3): IoU threshold for non-maximum suppression of overlapping boxes. target_sizes (`torch.Tensor`, *optional*): Tensor of shape (batch_size, 2) where each entry is the (height, width) of the corresponding image in the batch. If set, predicted normalized bounding boxes are rescaled to the target sizes. If left to None, predictions will not be unnormalized. Returns: `list[Dict]`: A list of dictionaries, each dictionary containing the following keys: - "scores": The confidence scores for each predicted box on the image. - "boxes": Image bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. - "labels": Set to `None`. """ return self.image_processor.post_process_image_guided_detection(outputs=outputs, threshold=threshold, nms_threshold=nms_threshold, target_sizes=target_sizes) @property def feature_extractor_class(self): warnings.warn('`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.', FutureWarning) return self.image_processor_class @property def feature_extractor(self): warnings.warn('`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.', FutureWarning) return self.image_processor
class OwlViTProcessor(ProcessorMixin): ''' Constructs an OWL-ViT processor which wraps [`OwlViTImageProcessor`] and [`CLIPTokenizer`]/[`CLIPTokenizerFast`] into a single processor that inherits both the image processor and tokenizer functionalities. See the [`~OwlViTProcessor.__call__`] and [`~OwlViTProcessor.decode`] for more information. Args: image_processor ([`OwlViTImageProcessor`], *optional*): The image processor is a required input. tokenizer ([`CLIPTokenizer`, `CLIPTokenizerFast`], *optional*): The tokenizer is a required input. ''' def __init__(self, image_processor=None, tokenizer=None, **kwargs): pass def __call__(self, images: Optional[ImageInput]=None, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]=None, audio=None, videos=None, **kwargs: Unpack[OwlViTProcessorKwargs]) -> BatchFeature: ''' Main method to prepare for the model one or several text(s) and image(s). This method forwards the `text` and `kwargs` arguments to CLIPTokenizerFast's [`~CLIPTokenizerFast.__call__`] if `text` is not `None` to encode: the text. To prepare the image(s), this method forwards the `images` and `kwargs` arguments to CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the docstring of the above two methods for more information. Args: images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. Both channels-first and channels-last formats are supported. text (`str`, `list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). query_images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`): The query image to be prepared, one query image is expected per target image to be queried. Each image can be a PIL image, NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a number of channels, H and W are image height and width. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. - **query_pixel_values** -- Pixel values of the query images to be fed to a model. Returned when `query_images` is not `None`. ''' pass def post_process(self, *args, **kwargs): ''' This method forwards all its arguments to [`OwlViTImageProcessor.post_process`]. Please refer to the docstring of this method for more information. ''' pass def post_process_object_detection(self, *args, **kwargs): ''' This method forwards all its arguments to [`OwlViTImageProcessor.post_process_object_detection`]. Please refer to the docstring of this method for more information. ''' pass def post_process_grounded_object_detection(self, outputs: 'OwlViTObjectDetectionOutput', threshold: float=0.1, target_sizes: Optional[Union[TensorType, list[tuple]]]=None, text_labels: Optional[list[list[str]]]=None): ''' Converts the raw output of [`OwlViTForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. Args: outputs ([`OwlViTObjectDetectionOutput`]): Raw outputs of the model. threshold (`float`, *optional*, defaults to 0.1): Score threshold to keep object detection predictions. target_sizes (`torch.Tensor` or `list[tuple[int, int]]`, *optional*): Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size `(height, width)` of each image in the batch. If unset, predictions will not be resized. text_labels (`list[list[str]]`, *optional*): List of lists of text labels for each image in the batch. If unset, "text_labels" in output will be set to `None`. Returns: `list[Dict]`: A list of dictionaries, each dictionary containing the following keys: - "scores": The confidence scores for each predicted box on the image. - "labels": Indexes of the classes predicted by the model on the image. - "boxes": Image bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. - "text_labels": The text labels for each predicted bounding box on the image. ''' pass def post_process_image_guided_detection(self, outputs: 'OwlViTImageGuidedObjectDetectionOutput', threshold: float=0.0, nms_threshold: float=0.3, target_sizes: Optional[Union[TensorType, list[tuple]]]=None): ''' Converts the output of [`OwlViTForObjectDetection.image_guided_detection`] into the format expected by the COCO api. Args: outputs ([`OwlViTImageGuidedObjectDetectionOutput`]): Raw outputs of the model. threshold (`float`, *optional*, defaults to 0.0): Minimum confidence threshold to use to filter out predicted boxes. nms_threshold (`float`, *optional*, defaults to 0.3): IoU threshold for non-maximum suppression of overlapping boxes. target_sizes (`torch.Tensor`, *optional*): Tensor of shape (batch_size, 2) where each entry is the (height, width) of the corresponding image in the batch. If set, predicted normalized bounding boxes are rescaled to the target sizes. If left to None, predictions will not be unnormalized. Returns: `list[Dict]`: A list of dictionaries, each dictionary containing the following keys: - "scores": The confidence scores for each predicted box on the image. - "boxes": Image bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. - "labels": Set to `None`. ''' pass @property def feature_extractor_class(self): pass @property def feature_extractor_class(self): pass
11
6
25
3
13
9
3
0.78
1
9
1
0
10
0
10
27
275
40
132
42
104
103
86
28
72
16
2
4
33
4,376
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/paligemma/configuration_paligemma.py
transformers.models.paligemma.configuration_paligemma.PaliGemmaConfig
from ..auto import CONFIG_MAPPING, AutoConfig from ...configuration_utils import PretrainedConfig class PaliGemmaConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`PaliGemmaForConditionalGeneration`]. It is used to instantiate an PaliGemmamodel according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the PaliGemma-2B. e.g. [paligemma-hf/paligemma-2b](https://huggingface.co/paligemma-hf/paligemma-2b) Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vision_config (`PaliGemmaVisionConfig`, *optional*): Custom vision config or dict text_config (`Union[AutoConfig, dict]`, *optional*): The config object of the text backbone. Can be any of `LlamaConfig` or `MistralConfig`. image_token_index (`int`, *optional*, defaults to 256000): The image token index to encode the image prompt. vocab_size (`int`, *optional*, defaults to 257152): Vocabulary size of the PaliGemmamodel. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`~PaliGemmaForConditionalGeneration`] projection_dim (`int`, *optional*, defaults to 2048): Dimension of the multimodal projection space. hidden_size (`int`, *optional*, defaults to 2048): Dimension of the hidden layer of the Language model. Example: ```python >>> from transformers import PaliGemmaForConditionalGeneration, PaliGemmaConfig, SiglipVisionConfig, GemmaConfig >>> # Initializing a Siglip-like vision config >>> vision_config = SiglipVisionConfig() >>> # Initializing a PaliGemma config >>> text_config = GemmaConfig() >>> # Initializing a PaliGemma paligemma-3b-224 style configuration >>> configuration = PaliGemmaConfig(vision_config, text_config) >>> # Initializing a model from the paligemma-3b-224 style configuration >>> model = PaliGemmaForConditionalGeneration(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'paligemma' attribute_map = {'image_token_id': 'image_token_index'} sub_configs = {'text_config': AutoConfig, 'vision_config': AutoConfig} keys_to_ignore_at_inference = ['past_key_values'] def __init__(self, vision_config=None, text_config=None, image_token_index=256000, vocab_size=257152, projection_dim=2048, hidden_size=2048, **kwargs): self.image_token_index = image_token_index self.projection_dim = projection_dim self.hidden_size = hidden_size self.vision_config = vision_config self.is_encoder_decoder = False if isinstance(self.vision_config, dict): vision_config['model_type'] = vision_config.get('model_type', 'siglip_vision_model') self.vision_config = CONFIG_MAPPING[vision_config['model_type']](**vision_config) elif vision_config is None: self.vision_config = CONFIG_MAPPING['siglip_vision_model'](intermediate_size=4096, hidden_size=1152, patch_size=14, image_size=224, num_hidden_layers=27, num_attention_heads=16, vocab_size=257152, vision_use_head=False) self.text_config = text_config if isinstance(self.text_config, dict): text_config['model_type'] = text_config.get('model_type', 'gemma') self.text_config = CONFIG_MAPPING[text_config['model_type']](**text_config) elif text_config is None: self.text_config = CONFIG_MAPPING['gemma'](hidden_size=2048, num_hidden_layers=18, intermediate_size=16384, num_attention_heads=8, num_key_value_heads=1, is_encoder_decoder=False, vocab_size=vocab_size) self.text_config.num_image_tokens = (self.vision_config.image_size // self.vision_config.patch_size) ** 2 self.vision_config.projection_dim = projection_dim super().__init__(**kwargs)
class PaliGemmaConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`PaliGemmaForConditionalGeneration`]. It is used to instantiate an PaliGemmamodel according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the PaliGemma-2B. e.g. [paligemma-hf/paligemma-2b](https://huggingface.co/paligemma-hf/paligemma-2b) Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vision_config (`PaliGemmaVisionConfig`, *optional*): Custom vision config or dict text_config (`Union[AutoConfig, dict]`, *optional*): The config object of the text backbone. Can be any of `LlamaConfig` or `MistralConfig`. image_token_index (`int`, *optional*, defaults to 256000): The image token index to encode the image prompt. vocab_size (`int`, *optional*, defaults to 257152): Vocabulary size of the PaliGemmamodel. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`~PaliGemmaForConditionalGeneration`] projection_dim (`int`, *optional*, defaults to 2048): Dimension of the multimodal projection space. hidden_size (`int`, *optional*, defaults to 2048): Dimension of the hidden layer of the Language model. Example: ```python >>> from transformers import PaliGemmaForConditionalGeneration, PaliGemmaConfig, SiglipVisionConfig, GemmaConfig >>> # Initializing a Siglip-like vision config >>> vision_config = SiglipVisionConfig() >>> # Initializing a PaliGemma config >>> text_config = GemmaConfig() >>> # Initializing a PaliGemma paligemma-3b-224 style configuration >>> configuration = PaliGemmaConfig(vision_config, text_config) >>> # Initializing a model from the paligemma-3b-224 style configuration >>> model = PaliGemmaForConditionalGeneration(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, vision_config=None, text_config=None, image_token_index=256000, vocab_size=257152, projection_dim=2048, hidden_size=2048, **kwargs): pass
2
1
16
1
16
0
3
0.54
1
3
0
0
4
8
4
4
122
17
68
28
51
37
32
16
27
7
1
1
10
4,377
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/paligemma/modeling_paligemma.py
transformers.models.paligemma.modeling_paligemma.PaliGemmaCausalLMOutputWithPast
import torch from ...cache_utils import Cache, StaticCache from dataclasses import dataclass from typing import Optional, Union from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, logging @dataclass @auto_docstring(custom_intro='\n Base class for PaliGemma causal language model (or autoregressive) outputs.\n ') class PaliGemmaCausalLMOutputWithPast(ModelOutput): """ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.text_config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. image_hidden_states (`torch.FloatTensor`, *optional*): A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`. image_hidden_states of the model produced by the vision encoder after projecting last hidden state. """ loss: Optional[torch.FloatTensor] = None logits: Optional[torch.FloatTensor] = None past_key_values: Optional[Cache] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None attentions: Optional[tuple[torch.FloatTensor]] = None image_hidden_states: Optional[torch.FloatTensor] = None
@dataclass @auto_docstring(custom_intro='\n Base class for PaliGemma causal language model (or autoregressive) outputs.\n ') class PaliGemmaCausalLMOutputWithPast(ModelOutput): ''' loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.text_config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. image_hidden_states (`torch.FloatTensor`, *optional*): A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`. image_hidden_states of the model produced by the vision encoder after projecting last hidden state. ''' pass
3
1
0
0
0
0
0
3.57
1
0
0
0
0
0
0
0
37
5
7
7
6
25
7
7
6
0
1
0
0
4,378
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/paligemma/modeling_paligemma.py
transformers.models.paligemma.modeling_paligemma.PaliGemmaForConditionalGeneration
from ...cache_utils import Cache, StaticCache from torch import nn from ...processing_utils import Unpack import torch from .configuration_paligemma import PaliGemmaConfig from typing import Optional, Union from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, logging from ...generation import GenerationMixin @auto_docstring(custom_intro='\n The Base Paligemma model which consists of a vision backbone and a language model without language modeling head.,\n ') class PaliGemmaForConditionalGeneration(PaliGemmaPreTrainedModel, GenerationMixin): _checkpoint_conversion_mapping = {'^language_model.model': 'model.language_model', '^vision_tower': 'model.vision_tower', '^multi_modal_projector': 'model.multi_modal_projector', '^language_model.lm_head': 'lm_head'} _tied_weights_keys = ['lm_head.weight'] def __init__(self, config: PaliGemmaConfig): super().__init__(config) self.model = PaliGemmaModel(config) self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False) self.post_init() def get_input_embeddings(self): return self.model.get_input_embeddings() def set_input_embeddings(self, value): self.model.set_input_embeddings(value) def set_decoder(self, decoder): self.model.set_decoder(decoder) def get_decoder(self): return self.model.get_decoder() def get_image_features(self, pixel_values): return self.model.get_image_features(pixel_values) @property def language_model(self): return self.model.language_model @property def vision_tower(self): return self.model.vision_tower @property def multi_modal_projector(self): return self.model.multi_modal_projector @can_return_tuple @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, token_type_ids: Optional[torch.LongTensor]=None, cache_position: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, PaliGemmaCausalLMOutputWithPast]: """ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.text_config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.text_config.vocab_size]`. Example: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, PaliGemmaForConditionalGeneration >>> model = PaliGemmaForConditionalGeneration.from_pretrained("google/paligemma2-3b-mix-224") >>> processor = AutoProcessor.from_pretrained("google/paligemma2-3b-mix-224") >>> prompt = "Where is the cat standing?" >>> url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, text=prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(**inputs,) >>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Where is the cat standing?\\nsnow" ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.model(input_ids=input_ids, pixel_values=pixel_values, token_type_ids=token_type_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, labels=labels, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, cache_position=cache_position, **kwargs) hidden_states = outputs[0] slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None if labels is not None: loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **kwargs) return PaliGemmaCausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, image_hidden_states=outputs.image_hidden_states) def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, cache_position=None, position_ids=None, pixel_values=None, attention_mask=None, token_type_ids=None, use_cache=True, logits_to_keep=None, labels=None, **kwargs): model_inputs = super().prepare_inputs_for_generation(input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, attention_mask=attention_mask, position_ids=position_ids, cache_position=cache_position, use_cache=use_cache, logits_to_keep=logits_to_keep, token_type_ids=token_type_ids, **kwargs) if model_inputs.get('position_ids') is not None: model_inputs['position_ids'] += 1 if cache_position[0] == 0: model_inputs['pixel_values'] = pixel_values is_training = token_type_ids is not None and labels is not None is_static_hybrid_cache = isinstance(past_key_values, StaticCache) and any(past_key_values.is_sliding) if cache_position[0] == 0 and is_static_hybrid_cache: input_tensor = inputs_embeds if inputs_embeds is not None else input_ids causal_mask = self.model._update_causal_mask(attention_mask, token_type_ids, past_key_values, cache_position, input_tensor, is_training) model_inputs['attention_mask'] = causal_mask return model_inputs @staticmethod def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs): """ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. """ if attention_mask is not None and attention_mask.dim() == 4: causal_mask = attention_mask else: min_dtype = torch.finfo(dtype).min causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device) if sequence_length != 1: causal_mask = torch.triu(causal_mask, diagonal=1) causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1) causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1) if attention_mask is not None: causal_mask = causal_mask.clone() mask_length = attention_mask.shape[-1] padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device) padding_mask = padding_mask == 0 causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype) return causal_mask
@auto_docstring(custom_intro='\n The Base Paligemma model which consists of a vision backbone and a language model without language modeling head.,\n ') class PaliGemmaForConditionalGeneration(PaliGemmaPreTrainedModel, GenerationMixin): def __init__(self, config: PaliGemmaConfig): pass def get_input_embeddings(self): pass def set_input_embeddings(self, value): pass def set_decoder(self, decoder): pass def get_decoder(self): pass def get_image_features(self, pixel_values): pass @property def language_model(self): pass @property def vision_tower(self): pass @property def multi_modal_projector(self): pass @can_return_tuple @auto_docstring def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, token_type_ids: Optional[torch.LongTensor]=None, cache_position: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, PaliGemmaCausalLMOutputWithPast]: ''' labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.text_config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.text_config.vocab_size]`. Example: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, PaliGemmaForConditionalGeneration >>> model = PaliGemmaForConditionalGeneration.from_pretrained("google/paligemma2-3b-mix-224") >>> processor = AutoProcessor.from_pretrained("google/paligemma2-3b-mix-224") >>> prompt = "Where is the cat standing?" >>> url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, text=prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(**inputs,) >>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Where is the cat standing?\nsnow" ```''' pass def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, cache_position=None, position_ids=None, pixel_values=None, attention_mask=None, token_type_ids=None, use_cache=True, logits_to_keep=None, labels=None, **kwargs): pass @staticmethod def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs): ''' Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing. Args: attention_mask (`torch.Tensor`): A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`. sequence_length (`int`): The sequence length being processed. target_length (`int`): The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet. dtype (`torch.dtype`): The dtype to use for the 4D attention mask. cache_position (`torch.Tensor`): Indices depicting the position of the input sequence tokens in the sequence. batch_size (`torch.Tensor`): Batch size. ''' pass
20
2
28
3
20
5
4
0.26
2
13
8
0
11
7
11
12
323
42
225
90
171
58
122
49
110
18
2
2
44
4,379
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/paligemma/modeling_paligemma.py
transformers.models.paligemma.modeling_paligemma.PaliGemmaMultiModalProjector
from .configuration_paligemma import PaliGemmaConfig from torch import nn class PaliGemmaMultiModalProjector(nn.Module): def __init__(self, config: PaliGemmaConfig): super().__init__() self.linear = nn.Linear(config.vision_config.hidden_size, config.vision_config.projection_dim, bias=True) def forward(self, image_features): hidden_states = self.linear(image_features) return hidden_states
class PaliGemmaMultiModalProjector(nn.Module): def __init__(self, config: PaliGemmaConfig): pass def forward(self, image_features): pass
3
0
4
1
3
0
1
0
1
2
1
0
2
1
2
12
9
2
7
5
4
0
7
5
4
1
1
0
2
4,380
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/paligemma/modeling_paligemma.py
transformers.models.paligemma.modeling_paligemma.PaliGemmaPreTrainedModel
from ...modeling_utils import PreTrainedModel from .configuration_paligemma import PaliGemmaConfig from torch import nn from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, logging @auto_docstring class PaliGemmaPreTrainedModel(PreTrainedModel): config: PaliGemmaConfig base_model_prefix = '' supports_gradient_checkpointing = True _no_split_modules = ['PaliGemmaMultiModalProjector'] _skip_keys_device_placement = 'past_key_values' _can_compile_fullgraph = False _supports_flash_attn = True _supports_sdpa = True _supports_flex_attn = True _supports_attention_backend = True def _init_weights(self, module): std = getattr(self.config, 'initializer_range', self.config.get_text_config().initializer_range) if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_()
@auto_docstring class PaliGemmaPreTrainedModel(PreTrainedModel): def _init_weights(self, module): pass
3
0
20
2
16
2
7
0.07
1
0
0
1
1
0
1
1
32
3
27
13
25
2
22
13
20
7
1
2
7
4,381
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/paligemma/processing_paligemma.py
transformers.models.paligemma.processing_paligemma.PaliGemmaImagesKwargs
from ...processing_utils import ImagesKwargs, MultiModalData, ProcessingKwargs, ProcessorMixin, TextKwargs, Unpack from typing import Optional, Union class PaliGemmaImagesKwargs(ImagesKwargs): do_convert_rgb: Optional[bool]
class PaliGemmaImagesKwargs(ImagesKwargs): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
2
0
2
1
1
0
2
1
1
0
2
0
0
4,382
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/paligemma/processing_paligemma.py
transformers.models.paligemma.processing_paligemma.PaliGemmaProcessor
import numpy as np from ...processing_utils import ImagesKwargs, MultiModalData, ProcessingKwargs, ProcessorMixin, TextKwargs, Unpack from typing import Optional, Union from ...feature_extraction_utils import BatchFeature from ...image_utils import ImageInput, is_valid_image from ...tokenization_utils_base import AddedToken, PreTokenizedInput, TextInput class PaliGemmaProcessor(ProcessorMixin): """ Constructs a PaliGemma processor which wraps a PaliGemma image processor and a PaliGemma tokenizer into a single processor. [`PaliGemmaProcessor`] offers all the functionalities of [`SiglipImageProcessor`] and [`GemmaTokenizerFast`]. See the [`~PaliGemmaProcessor.__call__`] and [`~PaliGemmaProcessor.decode`] for more information. Args: image_processor ([`SiglipImageProcessor`], *optional*): The image processor is a required input. tokenizer ([`GemmaTokenizerFast`], *optional*): The tokenizer is a required input. chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages in a chat into a tokenizable string. """ attributes = ['image_processor', 'tokenizer'] image_processor_class = ('SiglipImageProcessor', 'SiglipImageProcessorFast') tokenizer_class = ('GemmaTokenizer', 'GemmaTokenizerFast') def __init__(self, image_processor=None, tokenizer=None, chat_template=None, **kwargs): if not hasattr(image_processor, 'image_seq_length'): raise ValueError('Image processor is missing an `image_seq_length` attribute.') self.image_seq_length = image_processor.image_seq_length if not hasattr(tokenizer, 'image_token'): image_token = AddedToken(IMAGE_TOKEN, normalized=False, special=True) tokens_to_add = {'additional_special_tokens': [image_token]} tokenizer.add_special_tokens(tokens_to_add) self.image_token_id = tokenizer.convert_tokens_to_ids(IMAGE_TOKEN) self.image_token = IMAGE_TOKEN else: self.image_token_id = tokenizer.image_token_id self.image_token = tokenizer.image_token tokenizer.add_tokens(EXTRA_TOKENS) tokenizer.add_bos_token = False tokenizer.add_eos_token = False super().__init__(image_processor, tokenizer, chat_template=chat_template) def __call__(self, images: Optional[ImageInput]=None, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]=None, audio=None, videos=None, **kwargs: Unpack[PaliGemmaProcessorKwargs]) -> BatchFeature: """ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text` and `kwargs` arguments to GemmaTokenizerFast's [`~GemmaTokenizerFast.__call__`] if `text` is not `None` to encode the text. To prepare the image(s), this method forwards the `images` and `kwargs` arguments to SiglipImageProcessor's [`~SiglipImageProcessor.__call__`] if `images` is not `None`. Please refer to the docstring of the above two methods for more information. The usage for PaliGemma fine-tuning preparation is slightly different than usual. suffix passed are suffixes to the prompt in `text`, and will be placed after the prompt. This is because attention is handled differently for the prefix and the suffix. For instance, ```python image = PIL_cow_image prompt = "answer en Where is the cow standing?" suffix = "on the beach" inputs = processor(text=prompt, images=image, suffix=suffix) ``` Here `inputs` will contain the `input_ids` and `token_type_ids` that follow ```python inputs["input_ids"][:, 256:] # tensor([[ 2, 6006, 603, 573, 13910, 9980, 235336, 108, 477, 573, 8318]]) inputs["token_type_ids"][:, 256:] tensor([[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1]]) ``` Meaning the last three tokens are of "label" ("suffix") type while the other ones are of "prefix" type. Args: images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a number of channels, H and W are image height and width. text (`str`, `list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. suffix (`str`, `list[str]`, `list[list[str]]`): The suffixes or batch of suffixes to be encoded. Only necessary for finetuning. See https://github.com/google-research/big_vision/blob/main/big_vision/configs/proj/paligemma/README.md for more information. If your prompt is "<image> What is on the image", the suffix corresponds to the expected prediction "a cow sitting on a bench". Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. If `suffix` is provided, the `input_ids` will also contain the suffix input ids. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. - **labels** -- Labels compatible with training if `suffix` is not None """ output_kwargs = self._merge_kwargs(PaliGemmaProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs) suffix = output_kwargs['text_kwargs'].pop('suffix', None) return_token_type_ids = suffix is not None if images is None: raise ValueError('`images` are expected as arguments to a `PaliGemmaProcessor` instance.') if text is None: logger.warning_once('You are using PaliGemma without a text prefix. It will perform as a picture-captioning model.') text = '' if _is_str_or_image(text): text = [text] elif isinstance(text, list) and _is_str_or_image(text[0]): pass if text is not None and images is not None: if not any((IMAGE_TOKEN in sample for sample in text)): logger.warning('You are passing both `text` and `images` to `PaliGemmaProcessor`. The processor expects special image tokens in the text, as many tokens as there are images per each text. It is recommended to add `<image>` tokens in the very beginning of your text. For this call, we will infer how many images each text has and add special tokens.') if isinstance(text, list) and isinstance(images, list): if len(images) != len(text): raise ValueError(f'Received {len(images)} images for {len(text)} prompts. Each prompt should be associated with an image or list of images.') if is_valid_image(images): images = [[images]] elif isinstance(images, (list, tuple)) and is_valid_image(images[0]): images = [[image] for image in images] elif not (isinstance(images, (list, tuple)) and isinstance(images[0], (list, tuple)) and is_valid_image(images[0][0])): raise ValueError('images must be an image, list of images or list of list of images') input_strings = [build_string_from_input(prompt=prompt, bos_token=self.tokenizer.bos_token, image_seq_len=self.image_seq_length, image_token=IMAGE_TOKEN, num_images=len(image_list) if isinstance(image_list, list) else 1) for prompt, image_list in zip(text, images)] else: expanded_samples = [] for sample in text: expanded_sample = sample.replace(IMAGE_TOKEN, IMAGE_TOKEN * self.image_seq_length) bos_rfind_index = expanded_sample.rfind(IMAGE_TOKEN) bos_index = bos_rfind_index + len(IMAGE_TOKEN) if bos_rfind_index != -1 else 0 expanded_sample = expanded_sample[:bos_index] + self.tokenizer.bos_token + expanded_sample[bos_index:] expanded_samples.append(expanded_sample) input_strings = [f'{sample}\n' for sample in expanded_samples] if suffix is not None and _is_str_or_image(suffix): suffix = [suffix] if suffix is not None: suffix = [sfx + self.tokenizer.eos_token for sfx in suffix] pixel_values = self.image_processor(images, **output_kwargs['images_kwargs'])['pixel_values'] return_tensors = output_kwargs['text_kwargs'].pop('return_tensors', None) return_mm_token_type_ids = output_kwargs['text_kwargs'].pop('return_mm_token_type_ids', None) inputs = self.tokenizer(input_strings, text_pair=suffix, return_token_type_ids=return_token_type_ids, **output_kwargs['text_kwargs']) self._check_special_mm_tokens(input_strings, inputs, modalities=['image']) return_data = {**inputs, 'pixel_values': pixel_values} if return_token_type_ids: labels = np.array(inputs['input_ids']) labels[np.array(inputs['token_type_ids']) == 0] = -100 return_data.update({'labels': labels}) if return_mm_token_type_ids: array_ids = np.array(return_data['input_ids']) mm_token_type_ids = np.zeros_like(return_data['input_ids']) mm_token_type_ids[array_ids == self.image_token_id] = 1 return_data['mm_token_type_ids'] = mm_token_type_ids.tolist() return BatchFeature(data=return_data, tensor_type=return_tensors) def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs): """ Computes the number of placeholder tokens needed for multimodal inputs with the given sizes. Args: image_sizes (list[list[str]], *optional*): The input sizes formatted as (height, width) per each image. Returns: `MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided input modalities, along with other useful data. """ vision_data = {} if image_sizes is not None: num_image_tokens = [self.image_seq_length] * len(image_sizes) num_image_patches = [1] * len(image_sizes) vision_data.update({'num_image_tokens': num_image_tokens, 'num_image_patches': num_image_patches}) return MultiModalData(**vision_data)
class PaliGemmaProcessor(ProcessorMixin): ''' Constructs a PaliGemma processor which wraps a PaliGemma image processor and a PaliGemma tokenizer into a single processor. [`PaliGemmaProcessor`] offers all the functionalities of [`SiglipImageProcessor`] and [`GemmaTokenizerFast`]. See the [`~PaliGemmaProcessor.__call__`] and [`~PaliGemmaProcessor.decode`] for more information. Args: image_processor ([`SiglipImageProcessor`], *optional*): The image processor is a required input. tokenizer ([`GemmaTokenizerFast`], *optional*): The tokenizer is a required input. chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages in a chat into a tokenizable string. ''' def __init__(self, image_processor=None, tokenizer=None, chat_template=None, **kwargs): pass def __call__(self, images: Optional[ImageInput]=None, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]=None, audio=None, videos=None, **kwargs: Unpack[PaliGemmaProcessorKwargs]) -> BatchFeature: ''' Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text` and `kwargs` arguments to GemmaTokenizerFast's [`~GemmaTokenizerFast.__call__`] if `text` is not `None` to encode the text. To prepare the image(s), this method forwards the `images` and `kwargs` arguments to SiglipImageProcessor's [`~SiglipImageProcessor.__call__`] if `images` is not `None`. Please refer to the docstring of the above two methods for more information. The usage for PaliGemma fine-tuning preparation is slightly different than usual. suffix passed are suffixes to the prompt in `text`, and will be placed after the prompt. This is because attention is handled differently for the prefix and the suffix. For instance, ```python image = PIL_cow_image prompt = "answer en Where is the cow standing?" suffix = "on the beach" inputs = processor(text=prompt, images=image, suffix=suffix) ``` Here `inputs` will contain the `input_ids` and `token_type_ids` that follow ```python inputs["input_ids"][:, 256:] # tensor([[ 2, 6006, 603, 573, 13910, 9980, 235336, 108, 477, 573, 8318]]) inputs["token_type_ids"][:, 256:] tensor([[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1]]) ``` Meaning the last three tokens are of "label" ("suffix") type while the other ones are of "prefix" type. Args: images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a number of channels, H and W are image height and width. text (`str`, `list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. suffix (`str`, `list[str]`, `list[list[str]]`): The suffixes or batch of suffixes to be encoded. Only necessary for finetuning. See https://github.com/google-research/big_vision/blob/main/big_vision/configs/proj/paligemma/README.md for more information. If your prompt is "<image> What is on the image", the suffix corresponds to the expected prediction "a cow sitting on a bench". Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. If `suffix` is provided, the `input_ids` will also contain the suffix input ids. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. - **labels** -- Labels compatible with training if `suffix` is not None ''' pass def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs): ''' Computes the number of placeholder tokens needed for multimodal inputs with the given sizes. Args: image_sizes (list[list[str]], *optional*): The input sizes formatted as (height, width) per each image. Returns: `MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided input modalities, along with other useful data. ''' pass
4
3
41
5
24
12
6
0.62
1
7
2
0
5
2
5
22
233
31
125
42
105
77
77
28
71
20
2
4
28
4,383
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/paligemma/processing_paligemma.py
transformers.models.paligemma.processing_paligemma.PaliGemmaProcessorKwargs
from ...processing_utils import ImagesKwargs, MultiModalData, ProcessingKwargs, ProcessorMixin, TextKwargs, Unpack class PaliGemmaProcessorKwargs(ProcessingKwargs, total=False): text_kwargs: PaliGemmaTextKwargs images_kwargs: PaliGemmaImagesKwargs _defaults = {'text_kwargs': {'padding': False, 'return_mm_token_type_ids': False}, 'images_kwargs': {'data_format': 'channels_first'}}
class PaliGemmaProcessorKwargs(ProcessingKwargs, total=False): pass
1
0
0
0
0
0
0
0
2
0
0
0
0
0
0
0
11
0
11
2
10
0
4
2
3
0
3
0
0
4,384
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/paligemma/processing_paligemma.py
transformers.models.paligemma.processing_paligemma.PaliGemmaTextKwargs
from ...processing_utils import ImagesKwargs, MultiModalData, ProcessingKwargs, ProcessorMixin, TextKwargs, Unpack from typing import Optional, Union from ...tokenization_utils_base import AddedToken, PreTokenizedInput, TextInput class PaliGemmaTextKwargs(TextKwargs): suffix: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]]
class PaliGemmaTextKwargs(TextKwargs): pass
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
2
0
2
1
1
0
2
1
1
0
2
0
0
4,385
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtsmixer/configuration_patchtsmixer.py
transformers.models.patchtsmixer.configuration_patchtsmixer.PatchTSMixerConfig
from ...configuration_utils import PretrainedConfig from typing import Optional, Union class PatchTSMixerConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`PatchTSMixerModel`]. It is used to instantiate a PatchTSMixer model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the PatchTSMixer [ibm/patchtsmixer-etth1-pretrain](https://huggingface.co/ibm/patchtsmixer-etth1-pretrain) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: context_length (`int`, *optional*, defaults to 32): The context/history length for the input sequence. patch_length (`int`, *optional*, defaults to 8): The patch length for the input sequence. num_input_channels (`int`, *optional*, defaults to 1): Number of input variates. For Univariate, set it to 1. patch_stride (`int`, *optional*, defaults to 8): Determines the overlap between two consecutive patches. Set it to patch_length (or greater), if we want non-overlapping patches. num_parallel_samples (`int`, *optional*, defaults to 100): The number of samples to generate in parallel for probabilistic forecast. d_model (`int`, *optional*, defaults to 8): Hidden dimension of the model. Recommended to set it as a multiple of patch_length (i.e. 2-5X of patch_length). Larger value indicates more complex model. expansion_factor (`int`, *optional*, defaults to 2): Expansion factor to use inside MLP. Recommended range is 2-5. Larger value indicates more complex model. num_layers (`int`, *optional*, defaults to 3): Number of layers to use. Recommended range is 3-15. Larger value indicates more complex model. dropout (`float`, *optional*, defaults to 0.2): The dropout probability the `PatchTSMixer` backbone. Recommended range is 0.2-0.7 mode (`str`, *optional*, defaults to `"common_channel"`): Mixer Mode. Determines how to process the channels. Allowed values: "common_channel", "mix_channel". In "common_channel" mode, we follow Channel-independent modelling with no explicit channel-mixing. Channel mixing happens in an implicit manner via shared weights across channels. (preferred first approach) In "mix_channel" mode, we follow explicit channel-mixing in addition to patch and feature mixer. (preferred approach when channel correlations are very important to model) gated_attn (`bool`, *optional*, defaults to `True`): Enable Gated Attention. norm_mlp (`str`, *optional*, defaults to `"LayerNorm"`): Normalization layer (BatchNorm or LayerNorm). self_attn (`bool`, *optional*, defaults to `False`): Enable Tiny self attention across patches. This can be enabled when the output of Vanilla PatchTSMixer with gated attention is not satisfactory. Enabling this leads to explicit pair-wise attention and modelling across patches. self_attn_heads (`int`, *optional*, defaults to 1): Number of self-attention heads. Works only when `self_attn` is set to `True`. use_positional_encoding (`bool`, *optional*, defaults to `False`): Enable the use of positional embedding for the tiny self-attention layers. Works only when `self_attn` is set to `True`. positional_encoding_type (`str`, *optional*, defaults to `"sincos"`): Positional encodings. Options `"random"` and `"sincos"` are supported. Works only when `use_positional_encoding` is set to `True` scaling (`string` or `bool`, *optional*, defaults to `"std"`): Whether to scale the input targets via "mean" scaler, "std" scaler or no scaler if `None`. If `True`, the scaler is set to "mean". loss (`string`, *optional*, defaults to `"mse"`): The loss function for the model corresponding to the `distribution_output` head. For parametric distributions it is the negative log likelihood ("nll") and for point estimates it is the mean squared error "mse". init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated normal weight initialization distribution. post_init (`bool`, *optional*, defaults to `False`): Whether to use custom weight initialization from `transformers` library, or the default initialization in `PyTorch`. Setting it to `False` performs `PyTorch` weight initialization. norm_eps (`float`, *optional*, defaults to 1e-05): A value added to the denominator for numerical stability of normalization. mask_type (`str`, *optional*, defaults to `"random"`): Type of masking to use for Masked Pretraining mode. Allowed values are "random", "forecast". In Random masking, points are masked randomly. In Forecast masking, points are masked towards the end. random_mask_ratio (`float`, *optional*, defaults to 0.5): Masking ratio to use when `mask_type` is `random`. Higher value indicates more masking. num_forecast_mask_patches (`int` or `list`, *optional*, defaults to `[2]`): Number of patches to be masked at the end of each batch sample. If it is an integer, all the samples in the batch will have the same number of masked patches. If it is a list, samples in the batch will be randomly masked by numbers defined in the list. This argument is only used for forecast pretraining. mask_value (`float`, *optional*, defaults to `0.0`): Mask value to use. masked_loss (`bool`, *optional*, defaults to `True`): Whether to compute pretraining loss only at the masked portions, or on the entire output. channel_consistent_masking (`bool`, *optional*, defaults to `True`): When true, masking will be same across all channels of a timeseries. Otherwise, masking positions will vary across channels. unmasked_channel_indices (`list`, *optional*): Channels that are not masked during pretraining. head_dropout (`float`, *optional*, defaults to 0.2): The dropout probability the `PatchTSMixer` head. distribution_output (`string`, *optional*, defaults to `"student_t"`): The distribution emission head for the model when loss is "nll". Could be either "student_t", "normal" or "negative_binomial". prediction_length (`int`, *optional*, defaults to 16): Number of time steps to forecast for a forecasting task. Also known as the Forecast Horizon. prediction_channel_indices (`list`, *optional*): List of channel indices to forecast. If None, forecast all channels. Target data is expected to have all channels and we explicitly filter the channels in prediction and target before loss computation. num_targets (`int`, *optional*, defaults to 3): Number of targets (dimensionality of the regressed variable) for a regression task. output_range (`list`, *optional*): Output range to restrict for the regression task. Defaults to None. head_aggregation (`str`, *optional*, defaults to `"max_pool"`): Aggregation mode to enable for classification or regression task. Allowed values are `None`, "use_last", "max_pool", "avg_pool". Example: ```python >>> from transformers import PatchTSMixerConfig, PatchTSMixerModel >>> # Initializing a default PatchTSMixer configuration >>> configuration = PatchTSMixerConfig() >>> # Randomly initializing a model (with random weights) from the configuration >>> model = PatchTSMixerModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = 'patchtsmixer' attribute_map = {'hidden_size': 'd_model', 'num_hidden_layers': 'num_layers'} def __init__(self, context_length: int=32, patch_length: int=8, num_input_channels: int=1, patch_stride: int=8, num_parallel_samples: int=100, d_model: int=8, expansion_factor: int=2, num_layers: int=3, dropout: float=0.2, mode: str='common_channel', gated_attn: bool=True, norm_mlp: str='LayerNorm', self_attn: bool=False, self_attn_heads: int=1, use_positional_encoding: bool=False, positional_encoding_type: str='sincos', scaling: Optional[Union[str, bool]]='std', loss: str='mse', init_std: float=0.02, post_init: bool=False, norm_eps: float=1e-05, mask_type: str='random', random_mask_ratio: float=0.5, num_forecast_mask_patches: Optional[Union[list[int], int]]=[2], mask_value: int=0, masked_loss: bool=True, channel_consistent_masking: bool=True, unmasked_channel_indices: Optional[list[int]]=None, head_dropout: float=0.2, distribution_output: str='student_t', prediction_length: int=16, prediction_channel_indices: Optional[list]=None, num_targets: int=3, output_range: Optional[list]=None, head_aggregation: str='max_pool', **kwargs): self.num_input_channels = num_input_channels self.context_length = context_length self.patch_length = patch_length self.patch_stride = patch_stride self.d_model = d_model self.expansion_factor = expansion_factor self.num_layers = num_layers self.dropout = dropout self.mode = mode self.gated_attn = gated_attn self.norm_mlp = norm_mlp self.scaling = scaling self.head_dropout = head_dropout self.num_patches = (max(context_length, patch_length) - patch_length) // patch_stride + 1 self.mask_type = mask_type self.random_mask_ratio = random_mask_ratio self.num_forecast_mask_patches = num_forecast_mask_patches self.mask_value = mask_value self.channel_consistent_masking = channel_consistent_masking self.masked_loss = masked_loss self.patch_last = True self.use_positional_encoding = use_positional_encoding self.positional_encoding_type = positional_encoding_type self.prediction_length = prediction_length self.prediction_channel_indices = prediction_channel_indices self.num_targets = num_targets self.output_range = output_range self.head_aggregation = head_aggregation self.self_attn = self_attn self.self_attn_heads = self_attn_heads self.init_std = init_std self.post_init = post_init self.distribution_output = distribution_output self.loss = loss self.num_parallel_samples = num_parallel_samples self.unmasked_channel_indices = unmasked_channel_indices self.norm_eps = norm_eps super().__init__(**kwargs)
class PatchTSMixerConfig(PretrainedConfig): ''' This is the configuration class to store the configuration of a [`PatchTSMixerModel`]. It is used to instantiate a PatchTSMixer model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the PatchTSMixer [ibm/patchtsmixer-etth1-pretrain](https://huggingface.co/ibm/patchtsmixer-etth1-pretrain) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: context_length (`int`, *optional*, defaults to 32): The context/history length for the input sequence. patch_length (`int`, *optional*, defaults to 8): The patch length for the input sequence. num_input_channels (`int`, *optional*, defaults to 1): Number of input variates. For Univariate, set it to 1. patch_stride (`int`, *optional*, defaults to 8): Determines the overlap between two consecutive patches. Set it to patch_length (or greater), if we want non-overlapping patches. num_parallel_samples (`int`, *optional*, defaults to 100): The number of samples to generate in parallel for probabilistic forecast. d_model (`int`, *optional*, defaults to 8): Hidden dimension of the model. Recommended to set it as a multiple of patch_length (i.e. 2-5X of patch_length). Larger value indicates more complex model. expansion_factor (`int`, *optional*, defaults to 2): Expansion factor to use inside MLP. Recommended range is 2-5. Larger value indicates more complex model. num_layers (`int`, *optional*, defaults to 3): Number of layers to use. Recommended range is 3-15. Larger value indicates more complex model. dropout (`float`, *optional*, defaults to 0.2): The dropout probability the `PatchTSMixer` backbone. Recommended range is 0.2-0.7 mode (`str`, *optional*, defaults to `"common_channel"`): Mixer Mode. Determines how to process the channels. Allowed values: "common_channel", "mix_channel". In "common_channel" mode, we follow Channel-independent modelling with no explicit channel-mixing. Channel mixing happens in an implicit manner via shared weights across channels. (preferred first approach) In "mix_channel" mode, we follow explicit channel-mixing in addition to patch and feature mixer. (preferred approach when channel correlations are very important to model) gated_attn (`bool`, *optional*, defaults to `True`): Enable Gated Attention. norm_mlp (`str`, *optional*, defaults to `"LayerNorm"`): Normalization layer (BatchNorm or LayerNorm). self_attn (`bool`, *optional*, defaults to `False`): Enable Tiny self attention across patches. This can be enabled when the output of Vanilla PatchTSMixer with gated attention is not satisfactory. Enabling this leads to explicit pair-wise attention and modelling across patches. self_attn_heads (`int`, *optional*, defaults to 1): Number of self-attention heads. Works only when `self_attn` is set to `True`. use_positional_encoding (`bool`, *optional*, defaults to `False`): Enable the use of positional embedding for the tiny self-attention layers. Works only when `self_attn` is set to `True`. positional_encoding_type (`str`, *optional*, defaults to `"sincos"`): Positional encodings. Options `"random"` and `"sincos"` are supported. Works only when `use_positional_encoding` is set to `True` scaling (`string` or `bool`, *optional*, defaults to `"std"`): Whether to scale the input targets via "mean" scaler, "std" scaler or no scaler if `None`. If `True`, the scaler is set to "mean". loss (`string`, *optional*, defaults to `"mse"`): The loss function for the model corresponding to the `distribution_output` head. For parametric distributions it is the negative log likelihood ("nll") and for point estimates it is the mean squared error "mse". init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated normal weight initialization distribution. post_init (`bool`, *optional*, defaults to `False`): Whether to use custom weight initialization from `transformers` library, or the default initialization in `PyTorch`. Setting it to `False` performs `PyTorch` weight initialization. norm_eps (`float`, *optional*, defaults to 1e-05): A value added to the denominator for numerical stability of normalization. mask_type (`str`, *optional*, defaults to `"random"`): Type of masking to use for Masked Pretraining mode. Allowed values are "random", "forecast". In Random masking, points are masked randomly. In Forecast masking, points are masked towards the end. random_mask_ratio (`float`, *optional*, defaults to 0.5): Masking ratio to use when `mask_type` is `random`. Higher value indicates more masking. num_forecast_mask_patches (`int` or `list`, *optional*, defaults to `[2]`): Number of patches to be masked at the end of each batch sample. If it is an integer, all the samples in the batch will have the same number of masked patches. If it is a list, samples in the batch will be randomly masked by numbers defined in the list. This argument is only used for forecast pretraining. mask_value (`float`, *optional*, defaults to `0.0`): Mask value to use. masked_loss (`bool`, *optional*, defaults to `True`): Whether to compute pretraining loss only at the masked portions, or on the entire output. channel_consistent_masking (`bool`, *optional*, defaults to `True`): When true, masking will be same across all channels of a timeseries. Otherwise, masking positions will vary across channels. unmasked_channel_indices (`list`, *optional*): Channels that are not masked during pretraining. head_dropout (`float`, *optional*, defaults to 0.2): The dropout probability the `PatchTSMixer` head. distribution_output (`string`, *optional*, defaults to `"student_t"`): The distribution emission head for the model when loss is "nll". Could be either "student_t", "normal" or "negative_binomial". prediction_length (`int`, *optional*, defaults to 16): Number of time steps to forecast for a forecasting task. Also known as the Forecast Horizon. prediction_channel_indices (`list`, *optional*): List of channel indices to forecast. If None, forecast all channels. Target data is expected to have all channels and we explicitly filter the channels in prediction and target before loss computation. num_targets (`int`, *optional*, defaults to 3): Number of targets (dimensionality of the regressed variable) for a regression task. output_range (`list`, *optional*): Output range to restrict for the regression task. Defaults to None. head_aggregation (`str`, *optional*, defaults to `"max_pool"`): Aggregation mode to enable for classification or regression task. Allowed values are `None`, "use_last", "max_pool", "avg_pool". Example: ```python >>> from transformers import PatchTSMixerConfig, PatchTSMixerModel >>> # Initializing a default PatchTSMixer configuration >>> configuration = PatchTSMixerConfig() >>> # Randomly initializing a model (with random weights) from the configuration >>> model = PatchTSMixerModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```''' def __init__(self, context_length: int=32, patch_length: int=8, num_input_channels: int=1, patch_stride: int=8, num_parallel_samples: int=100, d_model: int=8, expansion_factor: int=2, num_layers: int=3, dropout: float=0.2, mode: str='common_channel', gated_attn: bool=True, norm_mlp: str='LayerNorm', self_attn: bool=False, self_attn_heads: int=1, use_positional_encoding: bool=False, positional_encoding_type: str='sincos', scaling: Optional[Union[str, bool]]='std', loss: str='mse', init_std: float=0.02, post_init: bool=False, norm_eps: float=1e-05, mask_type: str='random', random_mask_ratio: float=0.5, num_forecast_mask_patches: Optional[Union[list[int], int]]=[2], mask_value: int=0, masked_loss: bool=True, channel_consistent_masking: bool=True, unmasked_channel_indices: Optional[list[int]]=None, head_dropout: float=0.2, distribution_output: str='student_t', prediction_length: int=16, prediction_channel_indices: Optional[list]=None, num_targets: int=3, output_range: Optional[list]=None, head_aggregation: str='max_pool', **kwargs): pass
2
1
83
0
77
6
1
1.39
1
6
0
0
1
37
1
1
207
9
83
79
43
115
42
41
40
1
1
0
1
4,386
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
transformers.models.patchtsmixer.modeling_patchtsmixer.FeatureMixerBlock
import torch from .configuration_patchtsmixer import PatchTSMixerConfig import torch.nn as nn class FeatureMixerBlock(nn.Module): """This module mixes the hidden feature dimension. Args: config (`PatchTSMixerConfig`): Configuration. """ def __init__(self, config: PatchTSMixerConfig): super().__init__() self.norm = PatchTSMixerNormLayer(config) self.gated_attn = config.gated_attn self.mlp = PatchTSMixerMLP(in_features=config.d_model, out_features=config.d_model, config=config) if config.gated_attn: self.gating_block = PatchTSMixerGatedAttention(in_size=config.d_model, out_size=config.d_model) def forward(self, hidden: torch.Tensor): """ Args: hidden (`torch.Tensor` of shape `(batch_size, num_patches, d_model)`): Input tensor to the layer. Returns: `torch.Tensor`: Transformed tensor. """ residual = hidden hidden = self.norm(hidden) hidden = self.mlp(hidden) if self.gated_attn: hidden = self.gating_block(hidden) out = hidden + residual return out
class FeatureMixerBlock(nn.Module): '''This module mixes the hidden feature dimension. Args: config (`PatchTSMixerConfig`): Configuration. ''' def __init__(self, config: PatchTSMixerConfig): pass def forward(self, hidden: torch.Tensor): ''' Args: hidden (`torch.Tensor` of shape `(batch_size, num_patches, d_model)`): Input tensor to the layer. Returns: `torch.Tensor`: Transformed tensor. ''' pass
3
2
17
4
10
4
2
0.6
1
6
4
0
2
4
2
12
43
11
20
9
17
12
16
9
13
2
1
1
4
4,387
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
transformers.models.patchtsmixer.modeling_patchtsmixer.InjectScalerStatistics4D
import torch.nn as nn import torch class InjectScalerStatistics4D(nn.Module): def __init__(self, d_model: int, num_patches: int, expansion: int=2): super().__init__() self.inverse_trans_expansion = nn.Linear(d_model + 2, expansion * d_model) self.inverse_trans_compression = nn.Linear(expansion * d_model, d_model) self.map_scale_expansion = nn.Linear(2, 2 * expansion) self.map_scale_compression = nn.Linear(2 * expansion, 2) self.num_patches = num_patches def forward(self, inputs: torch.Tensor, loc: torch.Tensor, scale: torch.Tensor): """ Args: inputs (`torch.Tensor` of shape `(batch_size, num_input_channels, num_patch, d_model)`) loc (`torch.Tensor` of shape `(batch_size, 1, num_input_channels)`) scale (`torch.Tensor` of shape `(batch_size, 1, num_input_channels)`) Returns: `torch.Tensor` of shape `(batch_size, num_input_channels, num_patch, d_model)` """ mean = loc.transpose(-1, -2) mean = mean.unsqueeze(-2) mean = mean.repeat(1, 1, self.num_patches, 1) stdev = scale.transpose(-1, -2) stdev = stdev.unsqueeze(-2) stdev = stdev.repeat(1, 1, self.num_patches, 1) concat_stats = torch.cat([mean, stdev], dim=-1) concat_stats = self.map_scale_expansion(concat_stats) concat_stats = self.map_scale_compression(concat_stats) inputs = torch.cat([inputs, concat_stats], dim=-1) inputs = self.inverse_trans_expansion(inputs) inputs = self.inverse_trans_compression(inputs) return inputs
class InjectScalerStatistics4D(nn.Module): def __init__(self, d_model: int, num_patches: int, expansion: int=2): pass def forward(self, inputs: torch.Tensor, loc: torch.Tensor, scale: torch.Tensor): ''' Args: inputs (`torch.Tensor` of shape `(batch_size, num_input_channels, num_patch, d_model)`) loc (`torch.Tensor` of shape `(batch_size, 1, num_input_channels)`) scale (`torch.Tensor` of shape `(batch_size, 1, num_input_channels)`) Returns: `torch.Tensor` of shape `(batch_size, num_input_channels, num_patch, d_model)` ''' pass
3
1
18
4
11
10
1
0.91
1
3
0
0
2
5
2
12
38
8
22
11
19
20
22
11
19
1
1
0
2
4,388
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
transformers.models.patchtsmixer.modeling_patchtsmixer.PatchMixerBlock
from .configuration_patchtsmixer import PatchTSMixerConfig import torch.nn as nn class PatchMixerBlock(nn.Module): """This module mixes the patch dimension. Args: config (`PatchTSMixerConfig`): Configuration. """ def __init__(self, config: PatchTSMixerConfig): super().__init__() self.norm = PatchTSMixerNormLayer(config) self.self_attn = config.self_attn self.gated_attn = config.gated_attn self.mlp = PatchTSMixerMLP(in_features=config.num_patches, out_features=config.num_patches, config=config) if config.gated_attn: self.gating_block = PatchTSMixerGatedAttention(in_size=config.num_patches, out_size=config.num_patches) if config.self_attn: self.self_attn_layer = PatchTSMixerAttention(embed_dim=config.d_model, num_heads=config.self_attn_heads, dropout=config.dropout, config=config) self.norm_attn = PatchTSMixerNormLayer(config) def forward(self, hidden_state): """ Args: hidden_state (`torch.Tensor`): Input tensor. Returns: `torch.Tensor`: Transformed tensor. """ residual = hidden_state hidden_state = self.norm(hidden_state) if self.self_attn: batch_size, n_vars, num_patches, d_model = hidden_state.shape hidden_state_reshaped = hidden_state.reshape(batch_size * n_vars, num_patches, d_model) x_attn, _, _ = self.self_attn_layer(hidden_state_reshaped, output_attentions=False) x_attn = x_attn.reshape(batch_size, n_vars, num_patches, d_model) hidden_state = hidden_state.transpose(2, 3) hidden_state = self.mlp(hidden_state) if self.gated_attn: hidden_state = self.gating_block(hidden_state) hidden_state = hidden_state.transpose(2, 3) if self.self_attn: hidden_state = self.norm_attn(hidden_state + x_attn) out = hidden_state + residual return out
class PatchMixerBlock(nn.Module): '''This module mixes the patch dimension. Args: config (`PatchTSMixerConfig`): Configuration. ''' def __init__(self, config: PatchTSMixerConfig): pass def forward(self, hidden_state): ''' Args: hidden_state (`torch.Tensor`): Input tensor. Returns: `torch.Tensor`: Transformed tensor. ''' pass
3
2
29
7
18
4
4
0.35
1
6
5
0
2
7
2
12
67
17
37
15
34
13
29
15
26
4
1
1
7
4,389
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
transformers.models.patchtsmixer.modeling_patchtsmixer.PatchTSMixerAttention
from typing import Callable, Optional, Union import torch.nn as nn from ...processing_utils import Unpack import torch from .configuration_patchtsmixer import PatchTSMixerConfig from ...modeling_utils import ALL_ATTENTION_FUNCTIONS from ...modeling_flash_attention_utils import FlashAttentionKwargs class PatchTSMixerAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True, is_causal: bool=False, config: Optional[PatchTSMixerConfig]=None): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads self.config = config if self.head_dim * num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads}).') self.scaling = self.head_dim ** (-0.5) self.is_decoder = is_decoder self.is_causal = is_causal self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" is_cross_attention = key_value_states is not None bsz, tgt_len = hidden_states.shape[:-1] src_len = key_value_states.shape[1] if is_cross_attention else tgt_len q_input_shape = (bsz, tgt_len, -1, self.head_dim) kv_input_shape = (bsz, src_len, -1, self.head_dim) query_states = self.q_proj(hidden_states).view(*q_input_shape).transpose(1, 2) current_states = key_value_states if is_cross_attention else hidden_states key_states = self.k_proj(current_states).view(*kv_input_shape).transpose(1, 2) value_states = self.v_proj(current_states).view(*kv_input_shape).transpose(1, 2) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != 'eager': attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface(self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.dropout, scaling=self.scaling, output_attentions=output_attentions, head_mask=layer_head_mask, **kwargs) attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous() attn_output = self.out_proj(attn_output) return (attn_output, attn_weights, None)
class PatchTSMixerAttention(nn.Module): '''Multi-headed attention from 'Attention Is All You Need' paper''' def __init__(self, embed_dim: int, num_heads: int, dropout: float=0.0, is_decoder: bool=False, bias: bool=True, is_causal: bool=False, config: Optional[PatchTSMixerConfig]=None): pass def forward(self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False, **kwargs: Unpack[FlashAttentionKwargs]) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: '''Input shape: Batch x Time x Channel''' pass
3
2
50
7
35
8
5
0.24
1
7
1
0
3
12
3
13
156
23
107
44
86
26
68
27
64
12
1
2
15
4,390
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
transformers.models.patchtsmixer.modeling_patchtsmixer.PatchTSMixerBatchNorm
import torch from .configuration_patchtsmixer import PatchTSMixerConfig import torch.nn as nn class PatchTSMixerBatchNorm(nn.Module): """ Compute batch normalization over the sequence length (time) dimension. """ def __init__(self, config: PatchTSMixerConfig): super().__init__() self.batchnorm = nn.BatchNorm1d(config.d_model, eps=config.norm_eps) def forward(self, inputs: torch.Tensor): """ Parameters: inputs (`torch.Tensor` of shape `(batch_size, sequence_length, d_model)`): input for Batch norm calculation Returns: `torch.Tensor` of shape `(batch_size, sequence_length, d_model)` """ output = inputs.transpose(1, 2) output = self.batchnorm(output) return output.transpose(1, 2)
class PatchTSMixerBatchNorm(nn.Module): ''' Compute batch normalization over the sequence length (time) dimension. ''' def __init__(self, config: PatchTSMixerConfig): pass def forward(self, inputs: torch.Tensor): ''' Parameters: inputs (`torch.Tensor` of shape `(batch_size, sequence_length, d_model)`): input for Batch norm calculation Returns: `torch.Tensor` of shape `(batch_size, sequence_length, d_model)` ''' pass
3
2
7
0
4
4
1
1.38
1
3
1
0
2
1
2
12
20
2
8
5
5
11
8
5
5
1
1
0
2
4,391
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
transformers.models.patchtsmixer.modeling_patchtsmixer.PatchTSMixerBlock
from .configuration_patchtsmixer import PatchTSMixerConfig import torch.nn as nn class PatchTSMixerBlock(nn.Module): """The main computing framework of the `PatchTSMixer` model. Args: config (`PatchTSMixerConfig`): Configuration. """ def __init__(self, config: PatchTSMixerConfig): super().__init__() num_layers = config.num_layers self.mixers = nn.ModuleList([PatchTSMixerLayer(config=config) for _ in range(num_layers)]) def forward(self, hidden_state, output_hidden_states: bool=False): """ Args: hidden_state (`torch.Tensor`): The input tensor. output_hidden_states (`bool`, *optional*, defaults to False.): Whether to output the hidden states as well. Returns: `torch.Tensor`: The embedding. `list`: List of all hidden states if `output_hidden_states` is set to `True`. """ all_hidden_states = [] embedding = hidden_state for mod in self.mixers: embedding = mod(embedding) if output_hidden_states: all_hidden_states.append(embedding) if output_hidden_states: return (embedding, all_hidden_states) else: return (embedding, None)
class PatchTSMixerBlock(nn.Module): '''The main computing framework of the `PatchTSMixer` model. Args: config (`PatchTSMixerConfig`): Configuration. ''' def __init__(self, config: PatchTSMixerConfig): pass def forward(self, hidden_state, output_hidden_states: bool=False): ''' Args: hidden_state (`torch.Tensor`): The input tensor. output_hidden_states (`bool`, *optional*, defaults to False.): Whether to output the hidden states as well. Returns: `torch.Tensor`: The embedding. `list`: List of all hidden states if `output_hidden_states` is set to `True`. ''' pass
3
2
15
3
8
5
3
0.88
1
5
2
0
2
1
2
12
39
9
16
8
13
14
15
8
12
4
1
2
5
4,392
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
transformers.models.patchtsmixer.modeling_patchtsmixer.PatchTSMixerChannelFeatureMixerBlock
import torch from .configuration_patchtsmixer import PatchTSMixerConfig import torch.nn as nn class PatchTSMixerChannelFeatureMixerBlock(nn.Module): """This module mixes the features in the channel dimension. Args: config (`PatchTSMixerConfig`): Configuration. """ def __init__(self, config: PatchTSMixerConfig): super().__init__() self.norm = PatchTSMixerNormLayer(config) self.gated_attn = config.gated_attn self.mlp = PatchTSMixerMLP(in_features=config.num_input_channels, out_features=config.num_input_channels, config=config) if config.gated_attn: self.gating_block = PatchTSMixerGatedAttention(in_size=config.num_input_channels, out_size=config.num_input_channels) def forward(self, inputs: torch.Tensor): """ Args: inputs (`torch.Tensor` of shape `((batch_size, num_channels, num_patches, d_model))`): input to the MLP layer Returns: `torch.Tensor` of the same shape as `inputs` """ residual = inputs inputs = self.norm(inputs) inputs = inputs.permute(0, 3, 2, 1) if self.gated_attn: inputs = self.gating_block(inputs) inputs = self.mlp(inputs) inputs = inputs.permute(0, 3, 2, 1) out = inputs + residual return out
class PatchTSMixerChannelFeatureMixerBlock(nn.Module): '''This module mixes the features in the channel dimension. Args: config (`PatchTSMixerConfig`): Configuration. ''' def __init__(self, config: PatchTSMixerConfig): pass def forward(self, inputs: torch.Tensor): ''' Args: inputs (`torch.Tensor` of shape `((batch_size, num_channels, num_patches, d_model))`): input to the MLP layer Returns: `torch.Tensor` of the same shape as `inputs` ''' pass
3
2
19
4
12
4
2
0.5
1
6
4
0
2
4
2
12
46
10
24
9
21
12
18
9
15
2
1
1
4
4,393
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
transformers.models.patchtsmixer.modeling_patchtsmixer.PatchTSMixerEncoder
from ...utils import auto_docstring, logging from typing import Callable, Optional, Union import torch.nn as nn import torch from .configuration_patchtsmixer import PatchTSMixerConfig class PatchTSMixerEncoder(PatchTSMixerPreTrainedModel): """ Encoder for PatchTSMixer which inputs patched time-series and outputs patched embeddings. Args: config (`PatchTSMixerConfig`): Configuration. """ def __init__(self, config: PatchTSMixerConfig): super().__init__(config) self.use_return_dict = config.use_return_dict self.patcher = nn.Linear(config.patch_length, config.d_model) if config.use_positional_encoding: self.positional_encoder = PatchTSMixerPositionalEncoding(config=config) else: self.positional_encoder = None self.mlp_mixer_encoder = PatchTSMixerBlock(config=config) if config.post_init: self.post_init() @auto_docstring def forward(self, past_values: torch.Tensor, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=None) -> Union[tuple, PatchTSMixerEncoderOutput]: """ past_values (`torch.FloatTensor` of shape `(batch_size, seq_length, num_input_channels)`): Context values of the time series. For a pretraining task, this denotes the input time series to predict the masked portion. For a forecasting task, this denotes the history/past time series values. Similarly, for classification or regression tasks, it denotes the appropriate context values of the time series. For univariate time series, `num_input_channels` dimension should be 1. For multivariate time series, it is greater than 1. Returns: `torch.FloatTensor` of shape `(batch_size, n_vars, num_patches, d_model)` """ return_dict = return_dict if return_dict is not None else self.use_return_dict patches = self.patcher(past_values) if self.positional_encoder is not None: patches = self.positional_encoder(patches) last_hidden_state, hidden_states = self.mlp_mixer_encoder(patches, output_hidden_states=output_hidden_states) if not return_dict: return tuple((v for v in [last_hidden_state, hidden_states])) return PatchTSMixerEncoderOutput(last_hidden_state=last_hidden_state, hidden_states=hidden_states)
class PatchTSMixerEncoder(PatchTSMixerPreTrainedModel): ''' Encoder for PatchTSMixer which inputs patched time-series and outputs patched embeddings. Args: config (`PatchTSMixerConfig`): Configuration. ''' def __init__(self, config: PatchTSMixerConfig): pass @auto_docstring def forward(self, past_values: torch.Tensor, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=None) -> Union[tuple, PatchTSMixerEncoderOutput]: ''' past_values (`torch.FloatTensor` of shape `(batch_size, seq_length, num_input_channels)`): Context values of the time series. For a pretraining task, this denotes the input time series to predict the masked portion. For a forecasting task, this denotes the history/past time series values. Similarly, for classification or regression tasks, it denotes the appropriate context values of the time series. For univariate time series, `num_input_channels` dimension should be 1. For multivariate time series, it is greater than 1. Returns: `torch.FloatTensor` of shape `(batch_size, n_vars, num_patches, d_model)` ''' pass
4
2
32
7
16
10
4
0.76
1
8
4
0
2
4
2
130
74
16
33
15
24
25
20
9
17
4
3
1
7
4,394
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
transformers.models.patchtsmixer.modeling_patchtsmixer.PatchTSMixerForPreTrainingOutput
from ...utils import auto_docstring, logging from typing import Callable, Optional, Union from transformers.utils import ModelOutput from dataclasses import dataclass import torch.nn as nn import torch @dataclass @auto_docstring(custom_intro='\n Output type of [`PatchTSMixerForPreTrainingOutput`].\n ') class PatchTSMixerForPreTrainingOutput(ModelOutput): """ loss (*optional*, returned when `y` is provided, `torch.FloatTensor` of shape `()`): Total loss prediction_outputs (`torch.FloatTensor` of shape `(batch_size, num_input_channels, num_patches, patch_length)`): Prediction output from the pretrain head. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_input_channels, num_patches, d_model)`): Backbone embeddings before passing through the head. hidden_states (`tuple(torch.FloatTensor)`, *optional*): Hidden-states of the model at the output of each layer. """ loss: Optional[torch.FloatTensor] = None prediction_outputs: Optional[torch.FloatTensor] = None last_hidden_state: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None
@dataclass @auto_docstring(custom_intro='\n Output type of [`PatchTSMixerForPreTrainingOutput`].\n ') class PatchTSMixerForPreTrainingOutput(ModelOutput): ''' loss (*optional*, returned when `y` is provided, `torch.FloatTensor` of shape `()`): Total loss prediction_outputs (`torch.FloatTensor` of shape `(batch_size, num_input_channels, num_patches, patch_length)`): Prediction output from the pretrain head. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_input_channels, num_patches, d_model)`): Backbone embeddings before passing through the head. hidden_states (`tuple(torch.FloatTensor)`, *optional*): Hidden-states of the model at the output of each layer. ''' pass
3
1
0
0
0
0
0
2.4
1
0
0
0
0
0
0
62
19
2
5
5
4
12
5
5
4
0
4
0
0
4,395
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
transformers.models.patchtsmixer.modeling_patchtsmixer.PatchTSMixerForPrediction
from ...utils import auto_docstring, logging from typing import Callable, Optional, Union from ...time_series_utils import NegativeBinomialOutput, NormalOutput, StudentTOutput import torch.nn as nn import torch from .configuration_patchtsmixer import PatchTSMixerConfig class PatchTSMixerForPrediction(PatchTSMixerPreTrainedModel): """ `PatchTSMixer` for forecasting application. Args: config (`PatchTSMixerConfig`): Configuration. Returns: `None`. """ def __init__(self, config: PatchTSMixerConfig): super().__init__(config) self.loss = config.loss self.use_return_dict = config.use_return_dict self.prediction_channel_indices = config.prediction_channel_indices self.num_parallel_samples = config.num_parallel_samples if config.loss == 'mse': self.distribution_output = None else: dim = config.prediction_length distribution_output_map = {'student_t': StudentTOutput, 'normal': NormalOutput, 'negative_binomial': NegativeBinomialOutput} output_class = distribution_output_map.get(config.distribution_output, None) if output_class is not None: self.distribution_output = output_class(dim=dim) else: raise ValueError(f'Unknown distribution output {config.distribution_output}') self.model = PatchTSMixerModel(config) self.head = PatchTSMixerForPredictionHead(config=config, distribution_output=self.distribution_output) if config.post_init: self.post_init() @auto_docstring def forward(self, past_values: torch.Tensor, observed_mask: Optional[torch.Tensor]=None, future_values: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=False, return_loss: bool=True, return_dict: Optional[bool]=None) -> PatchTSMixerForPredictionOutput: """ past_values (`torch.FloatTensor` of shape `(batch_size, seq_length, num_input_channels)`): Context values of the time series. For a pretraining task, this denotes the input time series to predict the masked portion. For a forecasting task, this denotes the history/past time series values. Similarly, for classification or regression tasks, it denotes the appropriate context values of the time series. For univariate time series, `num_input_channels` dimension should be 1. For multivariate time series, it is greater than 1. observed_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*): Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in `[0, 1]`: - 1 for values that are **observed**, - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). future_values (`torch.FloatTensor` of shape `(batch_size, target_len, num_input_channels)` for forecasting,: `(batch_size, num_targets)` for regression, or `(batch_size,)` for classification, *optional*): Target values of the time series, that serve as labels for the model. The `future_values` is what the Transformer needs during training to learn to output, given the `past_values`. Note that, this is NOT required for a pretraining task. For a forecasting task, the shape is be `(batch_size, target_len, num_input_channels)`. Even if we want to forecast only specific channels by setting the indices in `prediction_channel_indices` parameter, pass the target data with all channels, as channel Filtering for both prediction and target will be manually applied before the loss computation. return_loss (`bool`, *optional*): Whether to return the loss in the `forward` call. """ if self.loss == 'mse': loss = nn.MSELoss(reduction='mean') elif self.loss == 'nll': loss = nll else: raise ValueError('Invalid loss function: Allowed values: mse and nll') return_dict = return_dict if return_dict is not None else self.use_return_dict model_output = self.model(past_values, observed_mask=observed_mask, output_hidden_states=output_hidden_states, return_dict=return_dict) if isinstance(model_output, tuple): model_output = PatchTSMixerModelOutput(*model_output) y_hat = self.head(model_output.last_hidden_state) loss_val = None if self.prediction_channel_indices is not None: if self.distribution_output: distribution = self.distribution_output.distribution(y_hat, loc=model_output.loc[..., self.prediction_channel_indices], scale=model_output.scale[..., self.prediction_channel_indices]) if future_values is not None and return_loss is True: loss_val = loss(distribution, future_values[..., self.prediction_channel_indices]) loss_val = weighted_average(loss_val) else: y_hat = y_hat * model_output.scale[..., self.prediction_channel_indices] + model_output.loc[..., self.prediction_channel_indices] if future_values is not None and return_loss is True: loss_val = loss(y_hat, future_values[..., self.prediction_channel_indices]) elif self.distribution_output: distribution = self.distribution_output.distribution(y_hat, loc=model_output.loc, scale=model_output.scale) if future_values is not None and return_loss is True: loss_val = loss(distribution, future_values) loss_val = weighted_average(loss_val) else: y_hat = y_hat * model_output.scale + model_output.loc if future_values is not None and return_loss is True: loss_val = loss(y_hat, future_values) if self.prediction_channel_indices is not None: loc = model_output.loc[..., self.prediction_channel_indices] scale = model_output.scale[..., self.prediction_channel_indices] else: loc = model_output.loc scale = model_output.scale if not return_dict: return tuple((v for v in [loss_val, y_hat, model_output.last_hidden_state, model_output.hidden_states, loc, scale])) return PatchTSMixerForPredictionOutput(loss=loss_val, prediction_outputs=y_hat, last_hidden_state=model_output.last_hidden_state, hidden_states=model_output.hidden_states, loc=loc, scale=scale) @torch.no_grad() def generate(self, past_values: torch.Tensor, observed_mask: Optional[torch.Tensor]=None) -> SamplePatchTSMixerPredictionOutput: """ Generate sequences of sample predictions from a model with a probability distribution head. Args: past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_input_channels)`): Past values of the time series that serves as context in order to predict the future. observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*): Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in `[0, 1]`: - 1 for values that are **observed**, - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). Return: [`SamplePatchTSMixerPredictionOutput`] where the outputs `sequences` tensor will have shape `(batch_size, number of samples, prediction_length, num_input_channels)`. """ num_parallel_samples = self.num_parallel_samples outputs = self(past_values=past_values, future_values=None, observed_mask=observed_mask, output_hidden_states=False) distribution = self.distribution_output.distribution(outputs.prediction_outputs, loc=outputs.loc, scale=outputs.scale) samples = [distribution.sample() for _ in range(num_parallel_samples)] samples = torch.stack(samples, dim=1) return SamplePatchTSMixerPredictionOutput(sequences=samples)
class PatchTSMixerForPrediction(PatchTSMixerPreTrainedModel): ''' `PatchTSMixer` for forecasting application. Args: config (`PatchTSMixerConfig`): Configuration. Returns: `None`. ''' def __init__(self, config: PatchTSMixerConfig): pass @auto_docstring def forward(self, past_values: torch.Tensor, observed_mask: Optional[torch.Tensor]=None, future_values: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=False, return_loss: bool=True, return_dict: Optional[bool]=None) -> PatchTSMixerForPredictionOutput: ''' past_values (`torch.FloatTensor` of shape `(batch_size, seq_length, num_input_channels)`): Context values of the time series. For a pretraining task, this denotes the input time series to predict the masked portion. For a forecasting task, this denotes the history/past time series values. Similarly, for classification or regression tasks, it denotes the appropriate context values of the time series. For univariate time series, `num_input_channels` dimension should be 1. For multivariate time series, it is greater than 1. observed_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*): Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in `[0, 1]`: - 1 for values that are **observed**, - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). future_values (`torch.FloatTensor` of shape `(batch_size, target_len, num_input_channels)` for forecasting,: `(batch_size, num_targets)` for regression, or `(batch_size,)` for classification, *optional*): Target values of the time series, that serve as labels for the model. The `future_values` is what the Transformer needs during training to learn to output, given the `past_values`. Note that, this is NOT required for a pretraining task. For a forecasting task, the shape is be `(batch_size, target_len, num_input_channels)`. Even if we want to forecast only specific channels by setting the indices in `prediction_channel_indices` parameter, pass the target data with all channels, as channel Filtering for both prediction and target will be manually applied before the loss computation. return_loss (`bool`, *optional*): Whether to return the loss in the `forward` call. ''' pass @torch.no_grad() def generate(self, past_values: torch.Tensor, observed_mask: Optional[torch.Tensor]=None) -> SamplePatchTSMixerPredictionOutput: ''' Generate sequences of sample predictions from a model with a probability distribution head. Args: past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_input_channels)`): Past values of the time series that serves as context in order to predict the future. observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*): Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in `[0, 1]`: - 1 for values that are **observed**, - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). Return: [`SamplePatchTSMixerPredictionOutput`] where the outputs `sequences` tensor will have shape `(batch_size, number of samples, prediction_length, num_input_channels)`. ''' pass
6
3
64
7
43
15
6
0.41
1
15
9
0
3
7
3
131
209
27
132
38
114
54
62
25
58
14
3
3
19
4,396
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
transformers.models.patchtsmixer.modeling_patchtsmixer.PatchTSMixerForPredictionHead
from .configuration_patchtsmixer import PatchTSMixerConfig import torch.nn as nn class PatchTSMixerForPredictionHead(nn.Module): """Prediction Head for Forecasting Args: config (`PatchTSMixerConfig`): Configuration. """ def __init__(self, config: PatchTSMixerConfig, distribution_output=None): super().__init__() self.prediction_channel_indices = config.prediction_channel_indices if self.prediction_channel_indices is not None: self.prediction_channel_indices.sort() self.dropout_layer = nn.Dropout(config.head_dropout) if distribution_output is None: self.base_forecast_block = nn.Linear(config.num_patches * config.d_model, config.prediction_length) else: self.base_forecast_block = distribution_output.get_parameter_projection(config.num_patches * config.d_model) self.flatten = nn.Flatten(start_dim=-2) def forward(self, hidden_features): """ Args: hidden_features (`torch.Tensor` of shape `(batch_size, num_patch, d_model)` in `flatten` mode or `(batch_size, n_vars, num_patch, d_model)` in `common_channel`/`mix_channel` mode.): Input hidden features. Returns: `torch.Tensor` of shape `(batch_size, prediction_length, nvars)`. """ hidden_features = self.flatten(hidden_features) hidden_features = self.dropout_layer(hidden_features) forecast = self.base_forecast_block(hidden_features) if isinstance(forecast, tuple): forecast = tuple((z.transpose(-1, -2) for z in forecast)) else: forecast = forecast.transpose(-1, -2) if self.prediction_channel_indices is not None: if isinstance(forecast, tuple): forecast = tuple((z[..., self.prediction_channel_indices] for z in forecast)) else: forecast = forecast[..., self.prediction_channel_indices] return forecast
class PatchTSMixerForPredictionHead(nn.Module): '''Prediction Head for Forecasting Args: config (`PatchTSMixerConfig`): Configuration. ''' def __init__(self, config: PatchTSMixerConfig, distribution_output=None): pass def forward(self, hidden_features): ''' Args: hidden_features (`torch.Tensor` of shape `(batch_size, num_patch, d_model)` in `flatten` mode or `(batch_size, n_vars, num_patch, d_model)` in `common_channel`/`mix_channel` mode.): Input hidden features. Returns: `torch.Tensor` of shape `(batch_size, prediction_length, nvars)`. ''' pass
3
2
23
5
14
7
4
0.64
1
3
1
0
2
4
2
12
54
13
28
8
25
18
23
8
20
4
1
2
7
4,397
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
transformers.models.patchtsmixer.modeling_patchtsmixer.PatchTSMixerForPredictionOutput
from ...utils import auto_docstring, logging from typing import Callable, Optional, Union from transformers.utils import ModelOutput from dataclasses import dataclass import torch.nn as nn import torch @dataclass @auto_docstring(custom_intro='\n Output type of [`PatchTSMixerForPredictionOutput`].\n ') class PatchTSMixerForPredictionOutput(ModelOutput): """ loss (*optional*, returned when `y` is provided, `torch.FloatTensor` of shape `()`): Total loss. prediction_outputs (`torch.FloatTensor` of shape `(batch_size, prediction_length, num_input_channels)`): Prediction output from the forecast head. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_input_channels, num_patches, d_model)`): Backbone embeddings before passing through the head. hidden_states (`tuple(torch.FloatTensor)`, *optional*): Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. loc (`torch.FloatTensor`, *optional* of shape `(batch_size, 1, num_input_channels)`): Input mean scale (`torch.FloatTensor`, *optional* of shape `(batch_size, 1, num_input_channels)`): Input std dev """ loss: Optional[torch.FloatTensor] = None prediction_outputs: Optional[torch.FloatTensor] = None last_hidden_state: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor]] = None loc: Optional[torch.FloatTensor] = None scale: Optional[torch.FloatTensor] = None
@dataclass @auto_docstring(custom_intro='\n Output type of [`PatchTSMixerForPredictionOutput`].\n ') class PatchTSMixerForPredictionOutput(ModelOutput): ''' loss (*optional*, returned when `y` is provided, `torch.FloatTensor` of shape `()`): Total loss. prediction_outputs (`torch.FloatTensor` of shape `(batch_size, prediction_length, num_input_channels)`): Prediction output from the forecast head. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_input_channels, num_patches, d_model)`): Backbone embeddings before passing through the head. hidden_states (`tuple(torch.FloatTensor)`, *optional*): Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. loc (`torch.FloatTensor`, *optional* of shape `(batch_size, 1, num_input_channels)`): Input mean scale (`torch.FloatTensor`, *optional* of shape `(batch_size, 1, num_input_channels)`): Input std dev ''' pass
3
1
0
0
0
0
0
2.29
1
0
0
0
0
0
0
62
26
3
7
7
6
16
7
7
6
0
4
0
0
4,398
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
transformers.models.patchtsmixer.modeling_patchtsmixer.PatchTSMixerForPretraining
from ...utils import auto_docstring, logging from typing import Callable, Optional, Union import torch.nn as nn import torch from .configuration_patchtsmixer import PatchTSMixerConfig @auto_docstring(custom_intro='\n `PatchTSMixer` for mask pretraining.\n ') class PatchTSMixerForPretraining(PatchTSMixerPreTrainedModel): def __init__(self, config: PatchTSMixerConfig): super().__init__(config) self.model = PatchTSMixerModel(config, mask_input=True) self.head = PatchTSMixerPretrainHead(config=config) self.masked_loss = config.masked_loss self.use_return_dict = config.use_return_dict if config.post_init: self.post_init() @auto_docstring def forward(self, past_values: torch.Tensor, observed_mask: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=False, return_loss: bool=True, return_dict: Optional[bool]=None) -> PatchTSMixerForPreTrainingOutput: """ past_values (`torch.FloatTensor` of shape `(batch_size, seq_length, num_input_channels)`): Context values of the time series. For a pretraining task, this denotes the input time series to predict the masked portion. For a forecasting task, this denotes the history/past time series values. Similarly, for classification or regression tasks, it denotes the appropriate context values of the time series. For univariate time series, `num_input_channels` dimension should be 1. For multivariate time series, it is greater than 1. observed_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*): Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in `[0, 1]`: - 1 for values that are **observed**, - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). return_loss (`bool`, *optional*): Whether to return the loss in the `forward` call. """ return_dict = return_dict if return_dict is not None else self.use_return_dict if self.masked_loss is True: loss = torch.nn.MSELoss(reduction='none') else: loss = torch.nn.MSELoss(reduction='mean') model_output = self.model(past_values, observed_mask=observed_mask, output_hidden_states=output_hidden_states, return_dict=return_dict) if isinstance(model_output, tuple): model_output = PatchTSMixerModelOutput(*model_output) x_hat = self.head(model_output.last_hidden_state) if return_loss is True: loss_val = loss(x_hat, model_output.patch_input) else: loss_val = None if self.masked_loss is True and loss_val is not None: loss_val = (loss_val.mean(dim=-1) * model_output.mask).sum() / (model_output.mask.sum() + 1e-10) if not return_dict: return tuple((v for v in [loss_val, x_hat, model_output.last_hidden_state, model_output.hidden_states])) return PatchTSMixerForPreTrainingOutput(loss=loss_val, prediction_outputs=x_hat, last_hidden_state=model_output.last_hidden_state, hidden_states=model_output.hidden_states)
@auto_docstring(custom_intro='\n `PatchTSMixer` for mask pretraining.\n ') class PatchTSMixerForPretraining(PatchTSMixerPreTrainedModel): def __init__(self, config: PatchTSMixerConfig): pass @auto_docstring def forward(self, past_values: torch.Tensor, observed_mask: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=False, return_loss: bool=True, return_dict: Optional[bool]=None) -> PatchTSMixerForPreTrainingOutput: ''' past_values (`torch.FloatTensor` of shape `(batch_size, seq_length, num_input_channels)`): Context values of the time series. For a pretraining task, this denotes the input time series to predict the masked portion. For a forecasting task, this denotes the history/past time series values. Similarly, for classification or regression tasks, it denotes the appropriate context values of the time series. For univariate time series, `num_input_channels` dimension should be 1. For multivariate time series, it is greater than 1. observed_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*): Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in `[0, 1]`: - 1 for values that are **observed**, - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). return_loss (`bool`, *optional*): Whether to return the loss in the `forward` call. ''' pass
5
1
38
5
26
9
5
0.45
1
9
5
0
2
4
2
130
90
14
55
19
43
25
26
11
23
7
3
1
9
4,399
huggingface/pytorch-pretrained-BERT
huggingface_pytorch-pretrained-BERT/src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
transformers.models.patchtsmixer.modeling_patchtsmixer.PatchTSMixerForRegression
from ...utils import auto_docstring, logging from typing import Callable, Optional, Union from ...time_series_utils import NegativeBinomialOutput, NormalOutput, StudentTOutput import torch.nn as nn import torch from .configuration_patchtsmixer import PatchTSMixerConfig @auto_docstring(custom_intro='\n `PatchTSMixer` for regression application.\n ') class PatchTSMixerForRegression(PatchTSMixerPreTrainedModel): def __init__(self, config: PatchTSMixerConfig): super().__init__(config) self.model = PatchTSMixerModel(config) self.loss = config.loss self.distribution_output = config.distribution_output self.use_return_dict = config.use_return_dict self.num_parallel_samples = config.num_parallel_samples if config.loss == 'mse': self.distribution_output = None else: distribution_output_map = {'student_t': StudentTOutput, 'normal': NormalOutput, 'negative_binomial': NegativeBinomialOutput} output_class = distribution_output_map.get(config.distribution_output) if output_class is not None: self.distribution_output = output_class(dim=config.num_targets) else: raise ValueError(f'Unknown distribution output {config.distribution_output}') if config.scaling in ['std', 'mean', True]: self.inject_scale = InjectScalerStatistics4D(d_model=config.d_model, num_patches=config.num_patches) else: self.inject_scale = None self.head = PatchTSMixerLinearHead(config=config, distribution_output=self.distribution_output) if config.post_init: self.post_init() @auto_docstring def forward(self, past_values: torch.Tensor, target_values: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=False, return_loss: bool=True, return_dict: Optional[bool]=None) -> PatchTSMixerForRegressionOutput: """ past_values (`torch.FloatTensor` of shape `(batch_size, seq_length, num_input_channels)`): Context values of the time series. For a pretraining task, this denotes the input time series to predict the masked portion. For a forecasting task, this denotes the history/past time series values. Similarly, for classification or regression tasks, it denotes the appropriate context values of the time series. For univariate time series, `num_input_channels` dimension should be 1. For multivariate time series, it is greater than 1. target_values (`torch.FloatTensor` of shape `(batch_size, target_len, num_input_channels)` for forecasting, `(batch_size, num_targets)` for regression, or `(batch_size,)` for classification, *optional*): Target values of the time series, that serve as labels for the model. The `target_values` is what the Transformer needs during training to learn to output, given the `past_values`. Note that, this is NOT required for a pretraining task. For a forecasting task, the shape is be `(batch_size, target_len, num_input_channels)`. Even if we want to forecast only specific channels by setting the indices in `prediction_channel_indices` parameter, pass the target data with all channels, as channel Filtering for both prediction and target will be manually applied before the loss computation. For a classification task, it has a shape of `(batch_size,)`. For a regression task, it has a shape of `(batch_size, num_targets)`. return_loss (`bool`, *optional*): Whether to return the loss in the `forward` call. """ if self.loss == 'mse': loss = nn.MSELoss(reduction='mean') elif self.loss == 'nll': loss = nll else: raise ValueError('Invalid loss function: Allowed values: mse and nll') return_dict = return_dict if return_dict is not None else self.use_return_dict model_output = self.model(past_values, output_hidden_states=output_hidden_states, return_dict=return_dict) if isinstance(model_output, tuple): model_output = PatchTSMixerModelOutput(*model_output) if self.inject_scale is not None: model_output.last_hidden_state = self.inject_scale(model_output.last_hidden_state, loc=model_output.loc, scale=model_output.scale) y_hat = self.head(model_output.last_hidden_state) if target_values is not None and return_loss is True: if self.distribution_output: if self.distribution_output == 'negative_binomial' and torch.any(target_values < 0): raise Exception('target_values cannot be negative for negative_binomial distribution.') distribution = self.distribution_output.distribution(y_hat) y_hat = tuple((item.view(-1, self.config.num_targets) for item in y_hat)) loss_val = loss(distribution, target_values) loss_val = weighted_average(loss_val) else: loss_val = loss(y_hat, target_values) else: loss_val = None if not return_dict: return tuple((v for v in [loss_val, y_hat, model_output.last_hidden_state, model_output.hidden_states])) return PatchTSMixerForRegressionOutput(loss=loss_val, regression_outputs=y_hat, last_hidden_state=model_output.last_hidden_state, hidden_states=model_output.hidden_states) @torch.no_grad() def generate(self, past_values: torch.Tensor) -> SamplePatchTSMixerRegressionOutput: """ Generate sequences of sample predictions from a model with a probability distribution head. Args: past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_input_channels)`): Past values of the time series that serves as context in order to predict the target values. Return: [`SamplePatchTSMixerRegressionOutput`] where the outputs `sequences` tensor will have shape `(batch_size, number of samples, num_targets)`. """ num_parallel_samples = self.num_parallel_samples outputs = self(past_values=past_values, target_values=None, output_hidden_states=False) distribution = self.distribution_output.distribution(outputs.regression_outputs) samples = [distribution.sample() for _ in range(num_parallel_samples)] samples = torch.stack(samples, dim=1).view(-1, num_parallel_samples, self.config.num_targets) return SamplePatchTSMixerRegressionOutput(sequences=samples)
@auto_docstring(custom_intro='\n `PatchTSMixer` for regression application.\n ') class PatchTSMixerForRegression(PatchTSMixerPreTrainedModel): def __init__(self, config: PatchTSMixerConfig): pass @auto_docstring def forward(self, past_values: torch.Tensor, target_values: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=False, return_loss: bool=True, return_dict: Optional[bool]=None) -> PatchTSMixerForRegressionOutput: ''' past_values (`torch.FloatTensor` of shape `(batch_size, seq_length, num_input_channels)`): Context values of the time series. For a pretraining task, this denotes the input time series to predict the masked portion. For a forecasting task, this denotes the history/past time series values. Similarly, for classification or regression tasks, it denotes the appropriate context values of the time series. For univariate time series, `num_input_channels` dimension should be 1. For multivariate time series, it is greater than 1. target_values (`torch.FloatTensor` of shape `(batch_size, target_len, num_input_channels)` for forecasting, `(batch_size, num_targets)` for regression, or `(batch_size,)` for classification, *optional*): Target values of the time series, that serve as labels for the model. The `target_values` is what the Transformer needs during training to learn to output, given the `past_values`. Note that, this is NOT required for a pretraining task. For a forecasting task, the shape is be `(batch_size, target_len, num_input_channels)`. Even if we want to forecast only specific channels by setting the indices in `prediction_channel_indices` parameter, pass the target data with all channels, as channel Filtering for both prediction and target will be manually applied before the loss computation. For a classification task, it has a shape of `(batch_size,)`. For a regression task, it has a shape of `(batch_size, num_targets)`. return_loss (`bool`, *optional*): Whether to return the loss in the `forward` call. ''' pass @torch.no_grad() def generate(self, past_values: torch.Tensor) -> SamplePatchTSMixerRegressionOutput: ''' Generate sequences of sample predictions from a model with a probability distribution head. Args: past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_input_channels)`): Past values of the time series that serves as context in order to predict the target values. Return: [`SamplePatchTSMixerRegressionOutput`] where the outputs `sequences` tensor will have shape `(batch_size, number of samples, num_targets)`. ''' pass
7
2
54
8
34
13
5
0.45
1
17
10
0
3
7
3
131
177
29
106
34
90
48
53
22
49
10
3
3
16