id
int64 0
328k
| repository_name
stringlengths 7
58
| file_path
stringlengths 9
302
| class_name
stringlengths 5
256
| human_written_code
stringlengths 16
2.16M
| class_skeleton
stringlengths 18
1.49M
⌀ | total_program_units
int64 1
1.76k
| total_doc_str
int64 0
771
| AvgCountLine
float64 0
7.89k
| AvgCountLineBlank
float64 0
297
| AvgCountLineCode
float64 0
7.89k
| AvgCountLineComment
float64 0
7.89k
| AvgCyclomatic
float64 0
130
| CommentToCodeRatio
float64 0
168
| CountClassBase
float64 0
40
| CountClassCoupled
float64 0
583
| CountClassCoupledModified
float64 0
575
| CountClassDerived
float64 0
5.35k
| CountDeclInstanceMethod
float64 0
529
| CountDeclInstanceVariable
float64 0
296
| CountDeclMethod
float64 0
599
| CountDeclMethodAll
float64 0
1.12k
| CountLine
float64 1
40.4k
| CountLineBlank
float64 0
8.16k
| CountLineCode
float64 1
25.7k
| CountLineCodeDecl
float64 1
8.15k
| CountLineCodeExe
float64 0
24.2k
| CountLineComment
float64 0
16.5k
| CountStmt
float64 1
9.71k
| CountStmtDecl
float64 1
8.15k
| CountStmtExe
float64 0
9.69k
| MaxCyclomatic
float64 0
759
| MaxInheritanceTree
float64 0
16
| MaxNesting
float64 0
34
| SumCyclomatic
float64 0
2.9k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4,800
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py
|
transformers.models.qwen2_vl.modeling_qwen2_vl.Qwen2VLModel
|
from typing import Any, Callable, Optional, Union
import torch.nn.functional as F
import torch.nn as nn
from .configuration_qwen2_vl import Qwen2VLConfig, Qwen2VLTextConfig, Qwen2VLVisionConfig
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, is_torchdynamo_compiling, logging
import torch
from ...cache_utils import Cache, DynamicCache
from ...processing_utils import Unpack
@auto_docstring
class Qwen2VLModel(Qwen2VLPreTrainedModel):
base_model_prefix = ''
_checkpoint_conversion_mapping = {'^model': 'language_model'}
accepts_loss_kwargs = False
def __init__(self, config: Qwen2VLConfig):
super().__init__(config)
self.visual = Qwen2VisionTransformerPretrainedModel._from_config(config.vision_config)
self.language_model = Qwen2VLTextModel._from_config(config.text_config)
self.rope_deltas = None
self.post_init()
def get_input_embeddings(self):
return self.language_model.get_input_embeddings()
def set_input_embeddings(self, value):
self.language_model.set_input_embeddings(value)
def set_decoder(self, decoder):
self.language_model = decoder
def get_decoder(self):
return self.language_model
def get_rope_index(self, input_ids: Optional[torch.LongTensor]=None, image_grid_thw: Optional[torch.LongTensor]=None, video_grid_thw: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None) -> tuple[torch.Tensor, torch.Tensor]:
"""
Calculate the 3D rope index based on image and video's temporal, height and width in LLM.
Explanation:
Each embedding sequence contains vision embedding and text embedding or just contains text embedding.
For pure text embedding sequence, the rotary position embedding has no difference with modern LLMs.
Examples:
input_ids: [T T T T T], here T is for text.
temporal position_ids: [0, 1, 2, 3, 4]
height position_ids: [0, 1, 2, 3, 4]
width position_ids: [0, 1, 2, 3, 4]
For vision and text embedding sequence, we calculate 3D rotary position embedding for vision part
and 1D rotary position embedding for text part.
Examples:
Assume we have a video input with 3 temporal patches, 2 height patches and 2 width patches.
input_ids: [V V V V V V V V V V V V T T T T T], here V is for vision.
vision temporal position_ids: [0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2]
vision height position_ids: [0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1]
vision width position_ids: [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
text temporal position_ids: [3, 4, 5, 6, 7]
text height position_ids: [3, 4, 5, 6, 7]
text width position_ids: [3, 4, 5, 6, 7]
Here we calculate the text start position_ids as the max vision position_ids plus 1.
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
The temporal, height and width of feature shape of each image in LLM.
video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
The temporal, height and width of feature shape of each video in LLM.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
Returns:
position_ids (`torch.LongTensor` of shape `(3, batch_size, sequence_length)`)
mrope_position_deltas (`torch.Tensor` of shape `(batch_size)`)
"""
spatial_merge_size = self.config.vision_config.spatial_merge_size
image_token_id = self.config.image_token_id
video_token_id = self.config.video_token_id
vision_start_token_id = self.config.vision_start_token_id
mrope_position_deltas = []
if input_ids is not None and (image_grid_thw is not None or video_grid_thw is not None):
total_input_ids = input_ids
if attention_mask is None:
attention_mask = torch.ones_like(total_input_ids)
position_ids = torch.ones(3, input_ids.shape[0], input_ids.shape[1], dtype=input_ids.dtype, device=input_ids.device)
image_index, video_index = (0, 0)
for i, input_ids in enumerate(total_input_ids):
input_ids = input_ids[attention_mask[i].to(input_ids.device) == 1]
image_nums, video_nums = (0, 0)
vision_start_indices = torch.argwhere(input_ids == vision_start_token_id).squeeze(1)
vision_tokens = input_ids[vision_start_indices + 1]
image_nums = (vision_tokens == image_token_id).sum()
video_nums = (vision_tokens == video_token_id).sum()
input_tokens = input_ids.tolist()
llm_pos_ids_list: list = []
st = 0
remain_images, remain_videos = (image_nums, video_nums)
for _ in range(image_nums + video_nums):
if image_token_id in input_tokens and remain_images > 0:
ed_image = input_tokens.index(image_token_id, st)
else:
ed_image = len(input_tokens) + 1
if video_token_id in input_tokens and remain_videos > 0:
ed_video = input_tokens.index(video_token_id, st)
else:
ed_video = len(input_tokens) + 1
if ed_image < ed_video:
t, h, w = (image_grid_thw[image_index][0], image_grid_thw[image_index][1], image_grid_thw[image_index][2])
image_index += 1
remain_images -= 1
ed = ed_image
else:
t, h, w = (video_grid_thw[video_index][0], video_grid_thw[video_index][1], video_grid_thw[video_index][2])
video_index += 1
remain_videos -= 1
ed = ed_video
llm_grid_t, llm_grid_h, llm_grid_w = (t.item(), h.item() // spatial_merge_size, w.item() // spatial_merge_size)
text_len = ed - st
st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx)
t_index = torch.arange(llm_grid_t).view(-1, 1).expand(-1, llm_grid_h * llm_grid_w).flatten()
h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand(llm_grid_t, -1, llm_grid_w).flatten()
w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand(llm_grid_t, llm_grid_h, -1).flatten()
llm_pos_ids_list.append(torch.stack([t_index, h_index, w_index]) + text_len + st_idx)
st = ed + llm_grid_t * llm_grid_h * llm_grid_w
if st < len(input_tokens):
st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
text_len = len(input_tokens) - st
llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx)
llm_positions = torch.cat(llm_pos_ids_list, dim=1).reshape(3, -1)
position_ids[..., i, attention_mask[i] == 1] = llm_positions.to(position_ids.device)
mrope_position_deltas.append(llm_positions.max() + 1 - len(total_input_ids[i]))
mrope_position_deltas = torch.tensor(mrope_position_deltas, device=input_ids.device).unsqueeze(1)
return (position_ids, mrope_position_deltas)
else:
if attention_mask is not None:
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
position_ids = position_ids.unsqueeze(0).expand(3, -1, -1).to(attention_mask.device)
max_position_ids = position_ids.max(0, keepdim=False)[0].max(-1, keepdim=True)[0]
mrope_position_deltas = max_position_ids + 1 - attention_mask.shape[-1]
else:
position_ids = torch.arange(input_ids.shape[1], device=input_ids.device).view(1, 1, -1).expand(3, input_ids.shape[0], -1)
mrope_position_deltas = torch.zeros([input_ids.shape[0], 1], device=input_ids.device, dtype=input_ids.dtype)
return (position_ids, mrope_position_deltas)
def get_video_features(self, pixel_values_videos: torch.FloatTensor, video_grid_thw: Optional[torch.LongTensor]=None):
"""
Encodes videos into continuous embeddings that can be forwarded to the language model.
Args:
pixel_values_videos (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
The tensors corresponding to the input videos.
video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
The temporal, height and width of feature shape of each video in LLM.
"""
pixel_values_videos = pixel_values_videos.type(self.visual.dtype)
video_embeds = self.visual(pixel_values_videos, grid_thw=video_grid_thw)
split_sizes = (video_grid_thw.prod(-1) // self.visual.spatial_merge_size ** 2).tolist()
video_embeds = torch.split(video_embeds, split_sizes)
return video_embeds
def get_image_features(self, pixel_values: torch.FloatTensor, image_grid_thw: Optional[torch.LongTensor]=None):
"""
Encodes images into continuous embeddings that can be forwarded to the language model.
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
The tensors corresponding to the input images.
image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
The temporal, height and width of feature shape of each image in LLM.
"""
pixel_values = pixel_values.type(self.visual.dtype)
image_embeds = self.visual(pixel_values, grid_thw=image_grid_thw)
split_sizes = (image_grid_thw.prod(-1) // self.visual.spatial_merge_size ** 2).tolist()
image_embeds = torch.split(image_embeds, split_sizes)
return image_embeds
def get_placeholder_mask(self, input_ids: torch.LongTensor, inputs_embeds: torch.FloatTensor, image_features: Optional[torch.FloatTensor]=None, video_features: Optional[torch.FloatTensor]=None):
"""
Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is
equal to the length of multimodal features. If the lengths are different, an error is raised.
"""
if input_ids is None:
special_image_mask = inputs_embeds == self.get_input_embeddings()(torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device))
special_image_mask = special_image_mask.all(-1)
special_video_mask = inputs_embeds == self.get_input_embeddings()(torch.tensor(self.config.video_token_id, dtype=torch.long, device=inputs_embeds.device))
special_video_mask = special_video_mask.all(-1)
else:
special_image_mask = input_ids == self.config.image_token_id
special_video_mask = input_ids == self.config.video_token_id
n_image_tokens = special_image_mask.sum()
special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
if image_features is not None and inputs_embeds[special_image_mask].numel() != image_features.numel():
raise ValueError(f'Image features and image tokens do not match: tokens: {n_image_tokens}, features {image_features.shape[0]}')
n_video_tokens = special_video_mask.sum()
special_video_mask = special_video_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
if video_features is not None and inputs_embeds[special_video_mask].numel() != video_features.numel():
raise ValueError(f'Videos features and video tokens do not match: tokens: {n_video_tokens}, features {video_features.shape[0]}')
return (special_image_mask, special_video_mask)
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, pixel_values: Optional[torch.Tensor]=None, pixel_values_videos: Optional[torch.FloatTensor]=None, image_grid_thw: Optional[torch.LongTensor]=None, video_grid_thw: Optional[torch.LongTensor]=None, rope_deltas: Optional[torch.LongTensor]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, Qwen2VLModelOutputWithPast]:
"""
image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
The temporal, height and width of feature shape of each image in LLM.
video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
The temporal, height and width of feature shape of each video in LLM.
rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*):
The rope index difference between sequence length and multimodal rope.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if inputs_embeds is None:
inputs_embeds = self.get_input_embeddings()(input_ids)
if pixel_values is not None:
image_embeds = self.get_image_features(pixel_values, image_grid_thw)
image_embeds = torch.cat(image_embeds, dim=0).to(inputs_embeds.device, inputs_embeds.dtype)
image_mask, _ = self.get_placeholder_mask(input_ids, inputs_embeds=inputs_embeds, image_features=image_embeds)
inputs_embeds = inputs_embeds.masked_scatter(image_mask, image_embeds)
if pixel_values_videos is not None:
video_embeds = self.get_video_features(pixel_values_videos, video_grid_thw)
video_embeds = torch.cat(video_embeds, dim=0).to(inputs_embeds.device, inputs_embeds.dtype)
_, video_mask = self.get_placeholder_mask(input_ids, inputs_embeds=inputs_embeds, video_features=video_embeds)
inputs_embeds = inputs_embeds.masked_scatter(video_mask, video_embeds)
if position_ids is None:
if self.rope_deltas is None or cache_position is None or cache_position[0] == 0:
position_ids, rope_deltas = self.get_rope_index(input_ids, image_grid_thw, video_grid_thw, attention_mask)
self.rope_deltas = rope_deltas
else:
batch_size, seq_length, _ = inputs_embeds.shape
position_ids = torch.arange(seq_length, device=inputs_embeds.device)
position_ids = position_ids.view(1, 1, -1).expand(3, batch_size, -1)
if cache_position is not None:
delta = (cache_position[0] + self.rope_deltas).to(inputs_embeds.device)
else:
delta = torch.zeros((batch_size, seq_length), device=inputs_embeds.device)
delta = delta.repeat_interleave(batch_size // delta.shape[0], dim=0)
position_ids = position_ids + delta.to(position_ids.device)
outputs = self.language_model(input_ids=None, position_ids=position_ids, attention_mask=attention_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, cache_position=cache_position, **kwargs)
output = Qwen2VLModelOutputWithPast(last_hidden_state=outputs.last_hidden_state, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, rope_deltas=self.rope_deltas)
return output if return_dict else output.to_tuple()
|
@auto_docstring
class Qwen2VLModel(Qwen2VLPreTrainedModel):
def __init__(self, config: Qwen2VLConfig):
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, value):
pass
def set_decoder(self, decoder):
pass
def get_decoder(self):
pass
def get_rope_index(self, input_ids: Optional[torch.LongTensor]=None, image_grid_thw: Optional[torch.LongTensor]=None, video_grid_thw: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None) -> tuple[torch.Tensor, torch.Tensor]:
'''
Calculate the 3D rope index based on image and video's temporal, height and width in LLM.
Explanation:
Each embedding sequence contains vision embedding and text embedding or just contains text embedding.
For pure text embedding sequence, the rotary position embedding has no difference with modern LLMs.
Examples:
input_ids: [T T T T T], here T is for text.
temporal position_ids: [0, 1, 2, 3, 4]
height position_ids: [0, 1, 2, 3, 4]
width position_ids: [0, 1, 2, 3, 4]
For vision and text embedding sequence, we calculate 3D rotary position embedding for vision part
and 1D rotary position embedding for text part.
Examples:
Assume we have a video input with 3 temporal patches, 2 height patches and 2 width patches.
input_ids: [V V V V V V V V V V V V T T T T T], here V is for vision.
vision temporal position_ids: [0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2]
vision height position_ids: [0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1]
vision width position_ids: [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
text temporal position_ids: [3, 4, 5, 6, 7]
text height position_ids: [3, 4, 5, 6, 7]
text width position_ids: [3, 4, 5, 6, 7]
Here we calculate the text start position_ids as the max vision position_ids plus 1.
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
The temporal, height and width of feature shape of each image in LLM.
video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
The temporal, height and width of feature shape of each video in LLM.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
Returns:
position_ids (`torch.LongTensor` of shape `(3, batch_size, sequence_length)`)
mrope_position_deltas (`torch.Tensor` of shape `(batch_size)`)
'''
pass
def get_video_features(self, pixel_values_videos: torch.FloatTensor, video_grid_thw: Optional[torch.LongTensor]=None):
'''
Encodes videos into continuous embeddings that can be forwarded to the language model.
Args:
pixel_values_videos (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
The tensors corresponding to the input videos.
video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
The temporal, height and width of feature shape of each video in LLM.
'''
pass
def get_image_features(self, pixel_values: torch.FloatTensor, image_grid_thw: Optional[torch.LongTensor]=None):
'''
Encodes images into continuous embeddings that can be forwarded to the language model.
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
The tensors corresponding to the input images.
image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
The temporal, height and width of feature shape of each image in LLM.
'''
pass
def get_placeholder_mask(self, input_ids: torch.LongTensor, inputs_embeds: torch.FloatTensor, image_features: Optional[torch.FloatTensor]=None, video_features: Optional[torch.FloatTensor]=None):
'''
Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is
equal to the length of multimodal features. If the lengths are different, an error is raised.
'''
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, pixel_values: Optional[torch.Tensor]=None, pixel_values_videos: Optional[torch.FloatTensor]=None, image_grid_thw: Optional[torch.LongTensor]=None, video_grid_thw: Optional[torch.LongTensor]=None, rope_deltas: Optional[torch.LongTensor]=None, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[TransformersKwargs]) -> Union[tuple, Qwen2VLModelOutputWithPast]:
'''
image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
The temporal, height and width of feature shape of each image in LLM.
video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
The temporal, height and width of feature shape of each video in LLM.
rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*):
The rope index difference between sequence length and multimodal rope.
'''
pass
| 13
| 5
| 47
| 5
| 35
| 7
| 8
| 0.21
| 1
| 17
| 10
| 0
| 5
| 9
| 6
| 7
| 292
| 34
| 214
| 71
| 177
| 45
| 107
| 40
| 100
| 25
| 2
| 3
| 45
|
4,801
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py
|
transformers.models.qwen2_vl.modeling_qwen2_vl.Qwen2VLPreTrainedModel
|
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, is_torchdynamo_compiling, logging
from .configuration_qwen2_vl import Qwen2VLConfig, Qwen2VLTextConfig, Qwen2VLVisionConfig
@auto_docstring
class Qwen2VLPreTrainedModel(PreTrainedModel):
config: Qwen2VLConfig
base_model_prefix = 'model'
supports_gradient_checkpointing = True
_no_split_modules = ['Qwen2VLDecoderLayer', 'Qwen2VLVisionBlock']
_skip_keys_device_placement = 'past_key_values'
_supports_flash_attn = True
_supports_sdpa = True
_can_compile_fullgraph = True
_supports_attention_backend = True
|
@auto_docstring
class Qwen2VLPreTrainedModel(PreTrainedModel):
pass
| 2
| 0
| 10
| 0
| 10
| 0
| 5
| 0.05
| 1
| 0
| 0
| 3
| 1
| 0
| 1
| 1
| 21
| 1
| 20
| 12
| 18
| 1
| 19
| 12
| 17
| 5
| 1
| 2
| 5
|
4,802
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py
|
transformers.models.qwen2_vl.modeling_qwen2_vl.Qwen2VLRotaryEmbedding
|
import torch.nn.functional as F
from .configuration_qwen2_vl import Qwen2VLConfig, Qwen2VLTextConfig, Qwen2VLVisionConfig
import torch.nn as nn
from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
import torch
class Qwen2VLRotaryEmbedding(nn.Module):
inv_freq: torch.Tensor
def __init__(self, config: Qwen2VLTextConfig, device=None):
super().__init__()
if hasattr(config, 'rope_scaling') and config.rope_scaling is not None:
self.rope_type = config.rope_scaling.get('rope_type', config.rope_scaling.get('type'))
else:
self.rope_type = 'default'
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
self.register_buffer('inv_freq', inv_freq, persistent=False)
self.original_inv_freq = self.inv_freq
@torch.no_grad()
@dynamic_rope_update
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, None, :, None].float().expand(3, position_ids.shape[1], -1, 1)
position_ids_expanded = position_ids[:, :, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != 'mps' else 'cpu'
with torch.autocast(device_type=device_type, enabled=False):
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(2, 3)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return (cos.to(dtype=x.dtype), sin.to(dtype=x.dtype))
|
class Qwen2VLRotaryEmbedding(nn.Module):
def __init__(self, config: Qwen2VLTextConfig, device=None):
pass
@torch.no_grad()
@dynamic_rope_update
def forward(self, x, position_ids):
pass
| 5
| 0
| 18
| 2
| 13
| 5
| 3
| 0.34
| 1
| 4
| 1
| 0
| 3
| 7
| 3
| 13
| 59
| 8
| 41
| 21
| 36
| 14
| 37
| 20
| 33
| 3
| 1
| 1
| 8
|
4,803
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py
|
transformers.models.qwen2_vl.modeling_qwen2_vl.Qwen2VLVisionBlock
|
from ...modeling_layers import GradientCheckpointingLayer
from torch.nn import LayerNorm
from typing import Any, Callable, Optional, Union
import torch.nn.functional as F
import torch.nn as nn
import torch
class Qwen2VLVisionBlock(GradientCheckpointingLayer):
def __init__(self, config, attn_implementation: str='sdpa') -> None:
super().__init__()
self.norm1 = LayerNorm(config.embed_dim, eps=1e-06)
self.norm2 = LayerNorm(config.embed_dim, eps=1e-06)
mlp_hidden_dim = int(config.embed_dim * config.mlp_ratio)
self.attn = VisionAttention(config=config)
self.mlp = VisionMlp(dim=config.embed_dim, hidden_dim=mlp_hidden_dim, hidden_act=config.hidden_act)
def forward(self, hidden_states: torch.Tensor, cu_seqlens: torch.Tensor, rotary_pos_emb: Optional[torch.Tensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs) -> torch.Tensor:
hidden_states = hidden_states + self.attn(self.norm1(hidden_states), cu_seqlens=cu_seqlens, rotary_pos_emb=rotary_pos_emb, position_embeddings=position_embeddings, **kwargs)
hidden_states = hidden_states + self.mlp(self.norm2(hidden_states))
return hidden_states
|
class Qwen2VLVisionBlock(GradientCheckpointingLayer):
def __init__(self, config, attn_implementation: str='sdpa') -> None:
pass
def forward(self, hidden_states: torch.Tensor, cu_seqlens: torch.Tensor, rotary_pos_emb: Optional[torch.Tensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs) -> torch.Tensor:
pass
| 3
| 0
| 8
| 1
| 8
| 0
| 1
| 0
| 1
| 5
| 1
| 0
| 2
| 4
| 2
| 12
| 18
| 2
| 16
| 8
| 13
| 0
| 12
| 8
| 9
| 1
| 1
| 0
| 2
|
4,804
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py
|
transformers.models.qwen2_vl.modeling_qwen2_vl.Qwen2VisionTransformerPretrainedModel
|
import torch.nn.functional as F
import torch.nn as nn
from .configuration_qwen2_vl import Qwen2VLConfig, Qwen2VLTextConfig, Qwen2VLVisionConfig
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, is_torchdynamo_compiling, logging
import torch
@auto_docstring
class Qwen2VisionTransformerPretrainedModel(Qwen2VLPreTrainedModel):
config: Qwen2VLVisionConfig
_no_split_modules = ['Qwen2VLVisionBlock']
def __init__(self, config) -> None:
super().__init__(config)
self.spatial_merge_size = config.spatial_merge_size
self.patch_embed = PatchEmbed(patch_size=config.patch_size, temporal_patch_size=config.temporal_patch_size, in_channels=config.in_channels, embed_dim=config.embed_dim)
head_dim = config.embed_dim // config.num_heads
self.rotary_pos_emb = VisionRotaryEmbedding(head_dim // 2)
self.blocks = nn.ModuleList([Qwen2VLVisionBlock(config) for _ in range(config.depth)])
self.merger = PatchMerger(dim=config.hidden_size, context_dim=config.embed_dim, spatial_merge_size=config.spatial_merge_size)
self.gradient_checkpointing = False
def get_dtype(self) -> torch.dtype:
return self.blocks[0].mlp.fc2.weight.dtype
def get_device(self) -> torch.device:
return self.blocks[0].mlp.fc2.weight.device
def rot_pos_emb(self, grid_thw):
pos_ids = []
for t, h, w in grid_thw:
hpos_ids = torch.arange(h).unsqueeze(1).expand(-1, w)
hpos_ids = hpos_ids.reshape(h // self.spatial_merge_size, self.spatial_merge_size, w // self.spatial_merge_size, self.spatial_merge_size)
hpos_ids = hpos_ids.permute(0, 2, 1, 3)
hpos_ids = hpos_ids.flatten()
wpos_ids = torch.arange(w).unsqueeze(0).expand(h, -1)
wpos_ids = wpos_ids.reshape(h // self.spatial_merge_size, self.spatial_merge_size, w // self.spatial_merge_size, self.spatial_merge_size)
wpos_ids = wpos_ids.permute(0, 2, 1, 3)
wpos_ids = wpos_ids.flatten()
pos_ids.append(torch.stack([hpos_ids, wpos_ids], dim=-1).repeat(t, 1))
pos_ids = torch.cat(pos_ids, dim=0)
max_grid_size = grid_thw[:, 1:].max()
rotary_pos_emb_full = self.rotary_pos_emb(max_grid_size)
rotary_pos_emb = rotary_pos_emb_full[pos_ids].flatten(1)
return rotary_pos_emb
@auto_docstring
def forward(self, hidden_states: torch.Tensor, grid_thw: torch.Tensor, **kwargs) -> torch.Tensor:
"""
grid_thw (`torch.LongTensor` of shape `(num_images, 3)`):
The temporal, height and width dimensions of feature shape for each image. Each row contains [t, h, w] values.
"""
hidden_states = self.patch_embed(hidden_states)
rotary_pos_emb = self.rot_pos_emb(grid_thw)
emb = torch.cat((rotary_pos_emb, rotary_pos_emb), dim=-1)
position_embeddings = (emb.cos(), emb.sin())
cu_seqlens = torch.repeat_interleave(grid_thw[:, 1] * grid_thw[:, 2], grid_thw[:, 0]).cumsum(dim=0, dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32)
cu_seqlens = F.pad(cu_seqlens, (1, 0), value=0)
for blk in self.blocks:
hidden_states = blk(hidden_states, cu_seqlens=cu_seqlens, position_embeddings=position_embeddings, **kwargs)
return self.merger(hidden_states)
|
@auto_docstring
class Qwen2VisionTransformerPretrainedModel(Qwen2VLPreTrainedModel):
def __init__(self, config) -> None:
pass
def get_dtype(self) -> torch.dtype:
pass
def get_device(self) -> torch.device:
pass
def rot_pos_emb(self, grid_thw):
pass
@auto_docstring
def forward(self, hidden_states: torch.Tensor, grid_thw: torch.Tensor, **kwargs) -> torch.Tensor:
'''
grid_thw (`torch.LongTensor` of shape `(num_images, 3)`):
The temporal, height and width dimensions of feature shape for each image. Each row contains [t, h, w] values.
'''
pass
| 8
| 1
| 15
| 1
| 13
| 1
| 2
| 0.06
| 1
| 7
| 4
| 0
| 5
| 6
| 5
| 6
| 84
| 12
| 68
| 26
| 62
| 4
| 43
| 25
| 37
| 4
| 2
| 2
| 9
|
4,805
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py
|
transformers.models.qwen2_vl.modeling_qwen2_vl.VisionAttention
|
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from typing import Any, Callable, Optional, Union
import torch.nn.functional as F
import torch.nn as nn
from .configuration_qwen2_vl import Qwen2VLConfig, Qwen2VLTextConfig, Qwen2VLVisionConfig
import torch
class VisionAttention(nn.Module):
def __init__(self, config: Qwen2VLVisionConfig) -> None:
super().__init__()
self.dim = config.embed_dim
self.num_heads = config.num_heads
self.head_dim = self.dim // self.num_heads
self.num_key_value_groups = 1
self.qkv = nn.Linear(self.dim, self.dim * 3, bias=True)
self.proj = nn.Linear(self.dim, self.dim)
self.scaling = self.head_dim ** (-0.5)
self.config = config
self.attention_dropout = 0.0
self.is_causal = False
def forward(self, hidden_states: torch.Tensor, cu_seqlens: torch.Tensor, rotary_pos_emb: Optional[torch.Tensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs) -> torch.Tensor:
seq_length = hidden_states.shape[0]
query_states, key_states, value_states = self.qkv(hidden_states).reshape(seq_length, 3, self.num_heads, -1).permute(1, 0, 2, 3).unbind(0)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb_vision(query_states, key_states, cos, sin)
query_states = query_states.transpose(0, 1).unsqueeze(0)
key_states = key_states.transpose(0, 1).unsqueeze(0)
value_states = value_states.transpose(0, 1).unsqueeze(0)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != 'eager':
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
if self.config._attn_implementation == 'flash_attention_2':
max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max()
attn_output, _ = attention_interface(self, query_states, key_states, value_states, attention_mask=None, scaling=self.scaling, dropout=0.0 if not self.training else self.attention_dropout, cu_seq_lens_q=cu_seqlens, cu_seq_lens_k=cu_seqlens, max_length_q=max_seqlen, max_length_k=max_seqlen, is_causal=False, **kwargs)
else:
lengths = cu_seqlens[1:] - cu_seqlens[:-1]
splits = [torch.split(tensor, lengths.tolist(), dim=2) for tensor in (query_states, key_states, value_states)]
attn_outputs = [attention_interface(self, q, k, v, attention_mask=None, scaling=self.scaling, dropout=0.0 if not self.training else self.attention_dropout, is_causal=False, **kwargs)[0] for q, k, v in zip(*splits)]
attn_output = torch.cat(attn_outputs, dim=1)
attn_output = attn_output.reshape(seq_length, -1).contiguous()
attn_output = self.proj(attn_output)
return attn_output
|
class VisionAttention(nn.Module):
def __init__(self, config: Qwen2VLVisionConfig) -> None:
pass
def forward(self, hidden_states: torch.Tensor, cu_seqlens: torch.Tensor, rotary_pos_emb: Optional[torch.Tensor]=None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]]=None, **kwargs) -> torch.Tensor:
pass
| 3
| 0
| 16
| 1
| 15
| 0
| 2
| 0
| 1
| 4
| 0
| 0
| 2
| 4
| 2
| 12
| 33
| 3
| 30
| 15
| 25
| 0
| 26
| 13
| 23
| 2
| 1
| 1
| 3
|
4,806
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py
|
transformers.models.qwen2_vl.modeling_qwen2_vl.VisionMlp
|
import torch.nn.functional as F
from ...activations import ACT2FN
import torch
import torch.nn as nn
class VisionMlp(nn.Module):
def __init__(self, dim: int, hidden_dim: int, hidden_act: str) -> None:
super().__init__()
self.fc1 = nn.Linear(dim, hidden_dim)
self.act = ACT2FN[hidden_act]
self.fc2 = nn.Linear(hidden_dim, dim)
def forward(self, x) -> torch.Tensor:
return self.fc2(self.act(self.fc1(x)))
|
class VisionMlp(nn.Module):
def __init__(self, dim: int, hidden_dim: int, hidden_act: str) -> None:
pass
def forward(self, x) -> torch.Tensor:
pass
| 3
| 0
| 4
| 0
| 4
| 0
| 1
| 0
| 1
| 4
| 0
| 0
| 2
| 3
| 2
| 12
| 9
| 1
| 8
| 6
| 5
| 0
| 8
| 6
| 5
| 1
| 1
| 0
| 2
|
4,807
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py
|
transformers.models.qwen2_vl.modeling_qwen2_vl.VisionRotaryEmbedding
|
import torch.nn.functional as F
import torch
import torch.nn as nn
class VisionRotaryEmbedding(nn.Module):
inv_freq: torch.Tensor
def __init__(self, dim: int, theta: float=10000.0) -> None:
super().__init__()
inv_freq = 1.0 / theta ** (torch.arange(0, dim, 2, dtype=torch.float) / dim)
self.register_buffer('inv_freq', inv_freq, persistent=False)
def forward(self, seqlen: int) -> torch.Tensor:
seq = torch.arange(seqlen, device=self.inv_freq.device, dtype=self.inv_freq.dtype)
freqs = torch.outer(seq, self.inv_freq)
return freqs
|
class VisionRotaryEmbedding(nn.Module):
def __init__(self, dim: int, theta: float=10000.0) -> None:
pass
def forward(self, seqlen: int) -> torch.Tensor:
pass
| 3
| 0
| 4
| 0
| 4
| 0
| 1
| 0
| 1
| 4
| 0
| 0
| 2
| 0
| 2
| 12
| 10
| 1
| 9
| 6
| 6
| 0
| 9
| 6
| 6
| 1
| 1
| 0
| 2
|
4,808
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/qwen2_vl/processing_qwen2_vl.py
|
transformers.models.qwen2_vl.processing_qwen2_vl.Qwen2VLProcessor
|
from ...video_utils import VideoInput
from typing import Optional, Union
from ...feature_extraction_utils import BatchFeature
from ...tokenization_utils_base import PreTokenizedInput, TextInput
from ...image_utils import ImageInput
import numpy as np
from ...processing_utils import ImagesKwargs, MultiModalData, ProcessingKwargs, ProcessorMixin, Unpack
class Qwen2VLProcessor(ProcessorMixin):
"""
Constructs a Qwen2-VL processor which wraps a Qwen2-VL image processor and a Qwen2 tokenizer into a single processor.
[`Qwen2VLProcessor`] offers all the functionalities of [`Qwen2VLImageProcessor`] and [`Qwen2TokenizerFast`]. See the
[`~Qwen2VLProcessor.__call__`] and [`~Qwen2VLProcessor.decode`] for more information.
Args:
image_processor ([`Qwen2VLImageProcessor`], *optional*):
The image processor is a required input.
tokenizer ([`Qwen2TokenizerFast`], *optional*):
The tokenizer is a required input.
video_processor ([`Qwen2VLVideoProcessor`], *optional*):
The video processor is a required input.
chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
in a chat into a tokenizable string.
"""
attributes = ['image_processor', 'tokenizer', 'video_processor']
image_processor_class = 'AutoImageProcessor'
video_processor_class = 'AutoVideoProcessor'
tokenizer_class = ('Qwen2Tokenizer', 'Qwen2TokenizerFast')
def __init__(self, image_processor=None, tokenizer=None, video_processor=None, chat_template=None, **kwargs):
self.image_token = '<|image_pad|>' if not hasattr(tokenizer, 'image_token') else tokenizer.image_token
self.video_token = '<|video_pad|>' if not hasattr(tokenizer, 'video_token') else tokenizer.video_token
self.image_token_id = tokenizer.image_token_id if getattr(tokenizer, 'image_token_id', None) else tokenizer.convert_tokens_to_ids(self.image_token)
self.video_token_id = tokenizer.video_token_id if getattr(tokenizer, 'video_token_id', None) else tokenizer.convert_tokens_to_ids(self.video_token)
super().__init__(image_processor, tokenizer, video_processor, chat_template=chat_template)
def __call__(self, images: Optional[ImageInput]=None, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]=None, videos: Optional[VideoInput]=None, **kwargs: Unpack[Qwen2VLProcessorKwargs]) -> BatchFeature:
"""
Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
and `kwargs` arguments to Qwen2TokenizerFast's [`~Qwen2TokenizerFast.__call__`] if `text` is not `None` to encode
the text. To prepare the vision inputs, this method forwards the `vision_infos` and `kwargs` arguments to
Qwen2VLImageProcessor's [`~Qwen2VLImageProcessor.__call__`] if `vision_infos` is not `None`.
Args:
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. Both channels-first and channels-last formats are supported.
text (`str`, `list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
videos (`np.ndarray`, `torch.Tensor`, `list[np.ndarray]`, `list[torch.Tensor]`):
The image or batch of videos to be prepared. Each video can be a 4D NumPy array or PyTorch
tensor, or a nested list of 3D frames. Both channels-first and channels-last formats are supported.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors of a particular framework. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
- **pixel_values_videos** -- Pixel values of videos to be fed to a model. Returned when `videos` is not `None`.
- **image_grid_thw** -- List of image 3D grid in LLM. Returned when `images` is not `None`.
- **video_grid_thw** -- List of video 3D grid in LLM. Returned when `videos` is not `None`.
"""
output_kwargs = self._merge_kwargs(Qwen2VLProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs)
image_inputs = videos_inputs = {}
if images is not None:
image_inputs = self.image_processor(images=images, **output_kwargs['images_kwargs'])
image_grid_thw = image_inputs['image_grid_thw']
if videos is not None:
videos_inputs = self.video_processor(videos=videos, **output_kwargs['videos_kwargs'])
video_grid_thw = videos_inputs['video_grid_thw']
if not isinstance(text, list):
text = [text]
text = text.copy()
if images is not None:
merge_length = self.image_processor.merge_size ** 2
index = 0
for i in range(len(text)):
while self.image_token in text[i]:
num_image_tokens = image_grid_thw[index].prod() // merge_length
text[i] = text[i].replace(self.image_token, '<|placeholder|>' * num_image_tokens, 1)
index += 1
text[i] = text[i].replace('<|placeholder|>', self.image_token)
if videos is not None:
merge_length = self.video_processor.merge_size ** 2
index = 0
for i in range(len(text)):
while self.video_token in text[i]:
num_video_tokens = video_grid_thw[index].prod() // merge_length
text[i] = text[i].replace(self.video_token, '<|placeholder|>' * num_video_tokens, 1)
index += 1
text[i] = text[i].replace('<|placeholder|>', self.video_token)
return_tensors = output_kwargs['text_kwargs'].pop('return_tensors', None)
return_mm_token_type_ids = output_kwargs['text_kwargs'].pop('return_mm_token_type_ids', False)
text_inputs = self.tokenizer(text, **output_kwargs['text_kwargs'], return_tensors=None)
self._check_special_mm_tokens(text, text_inputs, modalities=['image', 'video'])
if return_mm_token_type_ids:
array_ids = np.array(text_inputs['input_ids'])
mm_token_type_ids = np.zeros_like(text_inputs['input_ids'])
mm_token_type_ids[array_ids == self.image_token_id] = 1
text_inputs['mm_token_type_ids'] = mm_token_type_ids.tolist()
return BatchFeature(data={**text_inputs, **image_inputs, **videos_inputs}, tensor_type=return_tensors)
def _get_num_multimodal_tokens(self, image_sizes=None, video_sizes=None, **kwargs):
"""
Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.
Args:
image_sizes (`list[list[int]]`, *optional*):
The input sizes formatted as (height, width) per each image.
video_sizes (`list[list[int]]`, *optional*):
The input sizes formatted as (num_frames, height, width) per each video.
Returns:
`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided
input modalities, along with other useful data.
"""
vision_data = {}
if image_sizes is not None:
images_kwargs = Qwen2VLProcessorKwargs._defaults.get('images_kwargs', {})
images_kwargs.update(kwargs)
merge_size = images_kwargs.get('merge_size', None) or self.image_processor.merge_size
num_image_patches = [self.image_processor.get_number_of_image_patches(*image_size, images_kwargs) for image_size in image_sizes]
num_image_tokens = [num_patches // merge_size ** 2 for num_patches in num_image_patches]
vision_data.update({'num_image_tokens': num_image_tokens, 'num_image_patches': num_image_patches})
if video_sizes is not None:
videos_kwargs = Qwen2VLProcessorKwargs._defaults.get('videos_kwargs', {})
videos_kwargs.update(kwargs)
num_video_patches = [self.video_processor.get_number_of_video_patches(*video_size, videos_kwargs) for video_size in video_sizes]
num_video_tokens = [num_patches // merge_size ** 2 for num_patches in num_video_patches]
vision_data['num_video_tokens'] = num_video_tokens
return MultiModalData(**vision_data)
def post_process_image_text_to_text(self, generated_outputs, skip_special_tokens=True, clean_up_tokenization_spaces=False, **kwargs):
"""
Post-process the output of the model to decode the text.
Args:
generated_outputs (`torch.Tensor` or `np.ndarray`):
The output of the model `generate` function. The output is expected to be a tensor of shape `(batch_size, sequence_length)`
or `(sequence_length,)`.
skip_special_tokens (`bool`, *optional*, defaults to `True`):
Whether or not to remove special tokens in the output. Argument passed to the tokenizer's `batch_decode` method.
clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
Whether or not to clean up the tokenization spaces. Argument passed to the tokenizer's `batch_decode` method.
**kwargs:
Additional arguments to be passed to the tokenizer's `batch_decode method`.
Returns:
`list[str]`: The decoded text.
"""
return self.tokenizer.batch_decode(generated_outputs, skip_special_tokens=skip_special_tokens, clean_up_tokenization_spaces=clean_up_tokenization_spaces, **kwargs)
|
class Qwen2VLProcessor(ProcessorMixin):
'''
Constructs a Qwen2-VL processor which wraps a Qwen2-VL image processor and a Qwen2 tokenizer into a single processor.
[`Qwen2VLProcessor`] offers all the functionalities of [`Qwen2VLImageProcessor`] and [`Qwen2TokenizerFast`]. See the
[`~Qwen2VLProcessor.__call__`] and [`~Qwen2VLProcessor.decode`] for more information.
Args:
image_processor ([`Qwen2VLImageProcessor`], *optional*):
The image processor is a required input.
tokenizer ([`Qwen2TokenizerFast`], *optional*):
The tokenizer is a required input.
video_processor ([`Qwen2VLVideoProcessor`], *optional*):
The video processor is a required input.
chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
in a chat into a tokenizable string.
'''
def __init__(self, image_processor=None, tokenizer=None, video_processor=None, chat_template=None, **kwargs):
pass
def __call__(self, images: Optional[ImageInput]=None, text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]=None, videos: Optional[VideoInput]=None, **kwargs: Unpack[Qwen2VLProcessorKwargs]) -> BatchFeature:
'''
Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
and `kwargs` arguments to Qwen2TokenizerFast's [`~Qwen2TokenizerFast.__call__`] if `text` is not `None` to encode
the text. To prepare the vision inputs, this method forwards the `vision_infos` and `kwargs` arguments to
Qwen2VLImageProcessor's [`~Qwen2VLImageProcessor.__call__`] if `vision_infos` is not `None`.
Args:
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. Both channels-first and channels-last formats are supported.
text (`str`, `list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
videos (`np.ndarray`, `torch.Tensor`, `list[np.ndarray]`, `list[torch.Tensor]`):
The image or batch of videos to be prepared. Each video can be a 4D NumPy array or PyTorch
tensor, or a nested list of 3D frames. Both channels-first and channels-last formats are supported.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors of a particular framework. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
- **pixel_values_videos** -- Pixel values of videos to be fed to a model. Returned when `videos` is not `None`.
- **image_grid_thw** -- List of image 3D grid in LLM. Returned when `images` is not `None`.
- **video_grid_thw** -- List of video 3D grid in LLM. Returned when `videos` is not `None`.
'''
pass
def _get_num_multimodal_tokens(self, image_sizes=None, video_sizes=None, **kwargs):
'''
Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.
Args:
image_sizes (`list[list[int]]`, *optional*):
The input sizes formatted as (height, width) per each image.
video_sizes (`list[list[int]]`, *optional*):
The input sizes formatted as (num_frames, height, width) per each video.
Returns:
`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided
input modalities, along with other useful data.
'''
pass
def post_process_image_text_to_text(self, generated_outputs, skip_special_tokens=True, clean_up_tokenization_spaces=False, **kwargs):
'''
Post-process the output of the model to decode the text.
Args:
generated_outputs (`torch.Tensor` or `np.ndarray`):
The output of the model `generate` function. The output is expected to be a tensor of shape `(batch_size, sequence_length)`
or `(sequence_length,)`.
skip_special_tokens (`bool`, *optional*, defaults to `True`):
Whether or not to remove special tokens in the output. Argument passed to the tokenizer's `batch_decode` method.
clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
Whether or not to clean up the tokenization spaces. Argument passed to the tokenizer's `batch_decode` method.
**kwargs:
Additional arguments to be passed to the tokenizer's `batch_decode method`.
Returns:
`list[str]`: The decoded text.
'''
pass
| 5
| 4
| 21
| 2
| 11
| 8
| 3
| 0.89
| 1
| 6
| 2
| 0
| 6
| 2
| 6
| 23
| 150
| 18
| 70
| 31
| 56
| 62
| 51
| 24
| 44
| 10
| 2
| 3
| 17
|
4,809
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/qwen2_vl/processing_qwen2_vl.py
|
transformers.models.qwen2_vl.processing_qwen2_vl.Qwen2VLProcessorKwargs
|
from ...processing_utils import ImagesKwargs, MultiModalData, ProcessingKwargs, ProcessorMixin, Unpack
class Qwen2VLProcessorKwargs(ProcessingKwargs, total=False):
images_kwargs: Qwen2VLImagesKwargs
_defaults = {'text_kwargs': {'padding': False, 'return_mm_token_type_ids': False}}
|
class Qwen2VLProcessorKwargs(ProcessingKwargs, total=False):
pass
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6
| 0
| 6
| 2
| 5
| 0
| 2
| 2
| 1
| 0
| 3
| 0
| 0
|
4,810
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/rag/configuration_rag.py
|
transformers.models.rag.configuration_rag.RagConfig
|
from ...utils import add_start_docstrings
from ...configuration_utils import PretrainedConfig
@add_start_docstrings(RAG_CONFIG_DOC)
class RagConfig(PretrainedConfig):
model_type = 'rag'
has_no_defaults_at_init = True
def __init__(self, vocab_size=None, is_encoder_decoder=True, prefix=None, bos_token_id=None, pad_token_id=None, eos_token_id=None, decoder_start_token_id=None, title_sep=' / ', doc_sep=' // ', n_docs=5, max_combined_length=300, retrieval_vector_size=768, retrieval_batch_size=8, dataset='wiki_dpr', dataset_split='train', index_name='compressed', index_path=None, passages_path=None, use_dummy_dataset=False, reduce_loss=False, label_smoothing=0.0, do_deduplication=True, exclude_bos_score=False, do_marginalize=False, output_retrieved=False, use_cache=True, forced_eos_token_id=None, dataset_revision=None, **kwargs):
super().__init__(bos_token_id=bos_token_id, pad_token_id=pad_token_id, eos_token_id=eos_token_id, decoder_start_token_id=decoder_start_token_id, forced_eos_token_id=forced_eos_token_id, is_encoder_decoder=is_encoder_decoder, prefix=prefix, vocab_size=vocab_size, **kwargs)
if 'question_encoder' not in kwargs or 'generator' not in kwargs:
raise ValueError(f'A configuration of type {self.model_type} cannot be instantiated because both `question_encoder` and `generator` sub-configurations were not passed, only {kwargs}')
question_encoder_config = kwargs.pop('question_encoder')
question_encoder_model_type = question_encoder_config.pop('model_type')
decoder_config = kwargs.pop('generator')
decoder_model_type = decoder_config.pop('model_type')
from ..auto.configuration_auto import AutoConfig
self.question_encoder = AutoConfig.for_model(question_encoder_model_type, **question_encoder_config)
self.generator = AutoConfig.for_model(decoder_model_type, **decoder_config)
self.reduce_loss = reduce_loss
self.label_smoothing = label_smoothing
self.exclude_bos_score = exclude_bos_score
self.do_marginalize = do_marginalize
self.title_sep = title_sep
self.doc_sep = doc_sep
self.n_docs = n_docs
self.max_combined_length = max_combined_length
self.dataset = dataset
self.dataset_split = dataset_split
self.index_name = index_name
self.retrieval_vector_size = retrieval_vector_size
self.retrieval_batch_size = retrieval_batch_size
self.passages_path = passages_path
self.index_path = index_path
self.use_dummy_dataset = use_dummy_dataset
self.dataset_revision = dataset_revision
self.output_retrieved = output_retrieved
self.do_deduplication = do_deduplication
self.use_cache = use_cache
if self.forced_eos_token_id is None:
self.forced_eos_token_id = getattr(self.generator, 'forced_eos_token_id', None)
@classmethod
def from_question_encoder_generator_configs(cls, question_encoder_config: PretrainedConfig, generator_config: PretrainedConfig, **kwargs) -> PretrainedConfig:
"""
Instantiate a [`EncoderDecoderConfig`] (or a derived class) from a pre-trained encoder model configuration and
decoder model configuration.
Returns:
[`EncoderDecoderConfig`]: An instance of a configuration object
"""
return cls(question_encoder=question_encoder_config.to_dict(), generator=generator_config.to_dict(), **kwargs)
| null | 5
| 1
| 49
| 6
| 41
| 3
| 2
| 0.07
| 1
| 3
| 1
| 0
| 1
| 23
| 2
| 2
| 104
| 13
| 85
| 67
| 47
| 6
| 38
| 33
| 34
| 3
| 1
| 1
| 4
|
4,811
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/rag/modeling_rag.py
|
transformers.models.rag.modeling_rag.RagModel
|
from typing import Callable, Optional, Union
from .configuration_rag import RagConfig
import torch
from ...utils import auto_docstring, logging
from ...modeling_utils import PreTrainedModel
from ...cache_utils import Cache, EncoderDecoderCache
from ...configuration_utils import PretrainedConfig
from .retrieval_rag import RagRetriever
@auto_docstring
class RagModel(RagPreTrainedModel):
def __init__(self, config: Optional[PretrainedConfig]=None, question_encoder: Optional[PreTrainedModel]=None, generator: Optional[PreTrainedModel]=None, retriever: Optional[RagRetriever]=None, **kwargs):
"""
question_encoder (`PreTrainedModel`, *optional*):
The model responsible for encoding the question into hidden states for retrieval.
generator (`PreTrainedModel`, *optional*):
The model responsible for generating text based on retrieved documents.
retriever (`RagRetriever`, *optional*):
The component responsible for retrieving documents from a knowledge base given the encoded question.
"""
assert config is not None or (question_encoder is not None and generator is not None), 'Either a configuration or an question_encoder and a generator has to be provided.'
if config is None:
config = RagConfig.from_question_encoder_generator_configs(question_encoder.config, generator.config, **kwargs)
else:
assert isinstance(config, self.config_class), f'config: {config} has to be of type {self.config_class}'
super().__init__(config)
if question_encoder is None:
from ..auto.modeling_auto import AutoModel
question_encoder = AutoModel.from_config(config.question_encoder)
if generator is None:
from ..auto.modeling_auto import AutoModelForSeq2SeqLM
generator = AutoModelForSeq2SeqLM.from_config(config.generator)
self.retriever = retriever
if self.retriever is not None:
assert isinstance(retriever, RagRetriever), f'`self.retriever` is of type {type(self.retriever)}, but should be of type `RagRetriever`'
self.retriever = retriever
self.question_encoder = question_encoder
self.generator = generator
self.ctx_encoder = None
self.context_encoder_training = False
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.BoolTensor]=None, past_key_values: Optional[Cache]=None, doc_scores: Optional[torch.FloatTensor]=None, context_input_ids: Optional[torch.LongTensor]=None, context_attention_mask: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_retrieved: Optional[bool]=None, n_docs: Optional[int]=None) -> Union[tuple[torch.Tensor], RetrievAugLMOutput]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. [`RagConfig`], used to initialize the model, specifies
which generator to use, it also specifies a compatible generator tokenizer. Use that tokenizer class to
obtain the indices.
[What are input IDs?](../glossary#input-ids)
encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*)
Tuple consists of (`generator_enc_last_hidden_state`, *optional*: `generator_enc_hidden_states`,
*optional*: `generator_enc_attentions`). `generator_enc_last_hidden_state` of shape `(batch_size, n_docs *
sequence_length, hidden_size)` is a sequence of hidden-states at the output of the last layer of the
generator's encoder.
Used by the ([`RagModel`]) model during decoding.
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Provide for generation tasks. `None` by default, construct as per instructions for the generator model
you're using with your RAG instance.
decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
doc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`):
Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and
`question_encoder_last_hidden_state`. If the model has is not initialized with a `retriever` `doc_scores`
has to be provided to the forward pass. `doc_scores` can be computed via
`question_encoder_last_hidden_state` and `retrieved_doc_embeds`, see examples for more information.
context_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
Input IDs post-processed from the retrieved documents and the question encoder `input_ids` by the
retriever. If the model was not initialized with a `retriever` ``context_input_ids` has to be provided to
the forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`].
context_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`,*optional*, returned when *output_retrieved=True*):
Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the
retriever. If the model has is not initialized with a `retriever` `context_attention_mask` has to be
provided to the forward pass. `context_attention_mask` are returned by [`~RagRetriever.__call__`].
output_retrieved (`bool`, *optional*):
Whether or not to return the `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask`. See returned tensors for more detail.
n_docs (`int`, *optional*):
The number of documents to retrieve.
Example:
```python
>>> from transformers import AutoTokenizer, RagRetriever, RagModel
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/rag-token-base")
>>> retriever = RagRetriever.from_pretrained(
... "facebook/rag-token-base", index_name="exact", use_dummy_dataset=True
... )
>>> # initialize with RagRetriever to do everything in one forward call
>>> model = RagModel.from_pretrained("facebook/rag-token-base", retriever=retriever)
>>> inputs = tokenizer("How many people live in Paris?", return_tensors="pt")
>>> outputs = model(input_ids=inputs["input_ids"])
```"""
n_docs = n_docs if n_docs is not None else self.config.n_docs
use_cache = use_cache if use_cache is not None else self.config.use_cache
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
output_retrieved = output_retrieved if output_retrieved is not None else self.config.output_retrieved
has_to_retrieve = self.retriever is not None and (context_input_ids is None or context_attention_mask is None or doc_scores is None) and (encoder_outputs is None)
if encoder_outputs is None:
if has_to_retrieve:
question_enc_outputs = self.question_encoder(input_ids, attention_mask=attention_mask, return_dict=True)
question_encoder_last_hidden_state = question_enc_outputs[0]
retriever_outputs = self.retriever(input_ids, question_encoder_last_hidden_state.detach().to(device='cpu', dtype=torch.float32).numpy(), prefix=self.generator.config.prefix, n_docs=n_docs, return_tensors='pt')
if self.context_encoder_training:
context_input_ids, context_attention_mask, retrieved_doc_embeds, retrieved_doc_input_ids, retrieved_doc_attention_mask, retrieved_doc_ids = (retriever_outputs['context_input_ids'], retriever_outputs['context_attention_mask'], retriever_outputs['retrieved_doc_embeds'], retriever_outputs['tokenized_doc_ids'], retriever_outputs['tokenized_doc_attention_mask'], retriever_outputs['doc_ids'])
context_input_ids = context_input_ids.to(input_ids)
context_attention_mask = context_attention_mask.to(input_ids)
retrieved_doc_input_ids = retrieved_doc_input_ids.to(input_ids)
retrieved_doc_attention_mask = retrieved_doc_attention_mask.to(input_ids)
retrieved_doc_embeds = self.ctx_encoder(retrieved_doc_input_ids, attention_mask=retrieved_doc_attention_mask, return_dict=True).pooler_output
retrieved_doc_embeds = retrieved_doc_embeds.view(-1, n_docs, question_encoder_last_hidden_state.shape[1])
doc_scores = torch.bmm(question_encoder_last_hidden_state.unsqueeze(1), retrieved_doc_embeds.transpose(1, 2)).squeeze(1)
else:
context_input_ids, context_attention_mask, retrieved_doc_embeds, retrieved_doc_ids = (retriever_outputs['context_input_ids'], retriever_outputs['context_attention_mask'], retriever_outputs['retrieved_doc_embeds'], retriever_outputs['doc_ids'])
retrieved_doc_embeds = retrieved_doc_embeds.to(question_encoder_last_hidden_state)
context_input_ids = context_input_ids.to(input_ids)
context_attention_mask = context_attention_mask.to(input_ids)
doc_scores = torch.bmm(question_encoder_last_hidden_state.unsqueeze(1), retrieved_doc_embeds.transpose(1, 2)).squeeze(1)
else:
assert context_input_ids is not None, 'Make sure that `context_input_ids` are passed, if no `retriever` is set. Alternatively, you can set a retriever using the `set_retriever(...)` function.'
assert context_attention_mask is not None, 'Make sure that `context_attention_mask` are passed, if no `retriever` is set. Alternatively, you can set a retriever using the `set_retriever(...)` function.'
assert doc_scores is not None, 'Make sure that `doc_scores` are passed, if no `retriever` is set. Alternatively, you can set a retriever using the `set_retriever(...)` function.'
assert doc_scores is not None, 'Make sure that `doc_scores` are passed when passing `encoder_outputs` to the forward function.'
assert doc_scores.shape[1] % n_docs == 0, f' The first dimension of `context_input_ids` should be a multiple of `n_docs`={n_docs}, but is {context_input_ids.shape[0]}.'
if decoder_input_ids is not None:
decoder_input_ids = decoder_input_ids.repeat_interleave(n_docs, dim=0)
if decoder_attention_mask is not None:
decoder_attention_mask = decoder_attention_mask.repeat_interleave(n_docs, dim=0)
gen_outputs = self.generator(input_ids=context_input_ids, attention_mask=context_attention_mask, encoder_outputs=encoder_outputs, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, return_dict=True)
if not has_to_retrieve:
question_encoder_last_hidden_state = None
question_enc_hidden_states = None
question_enc_attentions = None
retrieved_doc_embeds = None
retrieved_doc_ids = None
else:
question_enc_hidden_states = question_enc_outputs.hidden_states
question_enc_attentions = question_enc_outputs.attentions
if not has_to_retrieve or not output_retrieved:
context_input_ids = (None,)
context_attention_mask = None
retrieved_doc_embeds = None
retrieved_doc_ids = None
return RetrievAugLMOutput(logits=gen_outputs.logits, doc_scores=doc_scores, past_key_values=gen_outputs.past_key_values, context_input_ids=context_input_ids, context_attention_mask=context_attention_mask, retrieved_doc_embeds=retrieved_doc_embeds, retrieved_doc_ids=retrieved_doc_ids, question_encoder_last_hidden_state=question_encoder_last_hidden_state, question_enc_hidden_states=question_enc_hidden_states, question_enc_attentions=question_enc_attentions, generator_enc_last_hidden_state=gen_outputs.encoder_last_hidden_state, generator_enc_hidden_states=gen_outputs.encoder_hidden_states, generator_enc_attentions=gen_outputs.encoder_attentions, generator_dec_hidden_states=gen_outputs.decoder_hidden_states, generator_dec_attentions=gen_outputs.decoder_attentions, generator_cross_attentions=gen_outputs.cross_attentions)
|
@auto_docstring
class RagModel(RagPreTrainedModel):
def __init__(self, config: Optional[PretrainedConfig]=None, question_encoder: Optional[PreTrainedModel]=None, generator: Optional[PreTrainedModel]=None, retriever: Optional[RagRetriever]=None, **kwargs):
'''
question_encoder (`PreTrainedModel`, *optional*):
The model responsible for encoding the question into hidden states for retrieval.
generator (`PreTrainedModel`, *optional*):
The model responsible for generating text based on retrieved documents.
retriever (`RagRetriever`, *optional*):
The component responsible for retrieving documents from a knowledge base given the encoded question.
'''
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.BoolTensor]=None, past_key_values: Optional[Cache]=None, doc_scores: Optional[torch.FloatTensor]=None, context_input_ids: Optional[torch.LongTensor]=None, context_attention_mask: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_retrieved: Optional[bool]=None, n_docs: Optional[int]=None) -> Union[tuple[torch.Tensor], RetrievAugLMOutput]:
'''
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. [`RagConfig`], used to initialize the model, specifies
which generator to use, it also specifies a compatible generator tokenizer. Use that tokenizer class to
obtain the indices.
[What are input IDs?](../glossary#input-ids)
encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*)
Tuple consists of (`generator_enc_last_hidden_state`, *optional*: `generator_enc_hidden_states`,
*optional*: `generator_enc_attentions`). `generator_enc_last_hidden_state` of shape `(batch_size, n_docs *
sequence_length, hidden_size)` is a sequence of hidden-states at the output of the last layer of the
generator's encoder.
Used by the ([`RagModel`]) model during decoding.
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Provide for generation tasks. `None` by default, construct as per instructions for the generator model
you're using with your RAG instance.
decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
doc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`):
Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and
`question_encoder_last_hidden_state`. If the model has is not initialized with a `retriever` `doc_scores`
has to be provided to the forward pass. `doc_scores` can be computed via
`question_encoder_last_hidden_state` and `retrieved_doc_embeds`, see examples for more information.
context_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
Input IDs post-processed from the retrieved documents and the question encoder `input_ids` by the
retriever. If the model was not initialized with a `retriever` ``context_input_ids` has to be provided to
the forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`].
context_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`,*optional*, returned when *output_retrieved=True*):
Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the
retriever. If the model has is not initialized with a `retriever` `context_attention_mask` has to be
provided to the forward pass. `context_attention_mask` are returned by [`~RagRetriever.__call__`].
output_retrieved (`bool`, *optional*):
Whether or not to return the `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask`. See returned tensors for more detail.
n_docs (`int`, *optional*):
The number of documents to retrieve.
Example:
```python
>>> from transformers import AutoTokenizer, RagRetriever, RagModel
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/rag-token-base")
>>> retriever = RagRetriever.from_pretrained(
... "facebook/rag-token-base", index_name="exact", use_dummy_dataset=True
... )
>>> # initialize with RagRetriever to do everything in one forward call
>>> model = RagModel.from_pretrained("facebook/rag-token-base", retriever=retriever)
>>> inputs = tokenizer("How many people live in Paris?", return_tensors="pt")
>>> outputs = model(input_ids=inputs["input_ids"])
```'''
pass
| 5
| 2
| 117
| 14
| 93
| 13
| 9
| 0.13
| 1
| 10
| 5
| 0
| 2
| 5
| 2
| 4
| 238
| 28
| 188
| 45
| 158
| 25
| 71
| 18
| 66
| 13
| 2
| 3
| 18
|
4,812
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/rag/modeling_rag.py
|
transformers.models.rag.modeling_rag.RagPreTrainedModel
|
from .retrieval_rag import RagRetriever
from ...utils import auto_docstring, logging
from typing import Callable, Optional, Union
from ...modeling_utils import PreTrainedModel
from .configuration_rag import RagConfig
@auto_docstring(custom_intro='\n RAG models were released with the paper [Retrieval-Augmented Generation for Knowledge-Intensive NLP\n Tasks](https://huggingface.co/papers/2005.11401) by Patrick Lewis, Ethan Perez, Aleksandra Piktus et al.\n\n RAG is a retriever augmented model and encapsulate three components: a question encoder, a dataset retriever and a\n generator, the encoder and generator are trainable while the retriever is just an indexed dataset.\n ')
@auto_docstring
class RagPreTrainedModel(PreTrainedModel):
config: RagConfig
base_model_prefix = 'rag'
_supports_flash_attn = True
_supports_sdpa = True
@classmethod
def from_pretrained_question_encoder_generator(cls, question_encoder_pretrained_model_name_or_path: Optional[str]=None, generator_pretrained_model_name_or_path: Optional[str]=None, retriever: RagRetriever=None, **kwargs) -> PreTrainedModel:
"""
Instantiates an question encoder and a generator from one or two base classes of the library from pretrained
model checkpoints.
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train
the model, you need to first set it back in training mode with `model.train()`.
Params:
question_encoder_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`):
Information necessary to initiate the question encoder. Can be either:
- A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
- A path to a *directory* containing model weights saved using
[`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
generator_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`):
Information necessary to initiate the generator. Can be either:
- A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
- A path to a *directory* containing model weights saved using
[`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
model_args (remaining positional arguments, *optional*):
All remaining positional arguments will be passed to the underlying model's `__init__` method.
retriever ([`RagRetriever`], *optional*):
The retriever to use.
kwwargs (remaining dictionary of keyword arguments, *optional*):
Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
`output_attentions=True`).
- To update the question_encoder configuration, use the prefix *question_encoder_* for each
configuration parameter.
- To update the generator configuration, use the prefix *generator_* for each configuration parameter.
- To update the parent model configuration, do not use a prefix for each configuration parameter.
Behaves differently depending on whether a `config` is provided or automatically loaded.
Example:
```python
>>> from transformers import RagModel
>>> # initialize a RAG from two pretrained models.
>>> model = RagModel.from_pretrained_question_encoder_generator(
... "facebook/dpr-question_encoder-single-nq-base", "google-t5/t5-small"
... )
>>> # saving model after fine-tuning
>>> model.save_pretrained("./rag")
>>> # load fine-tuned model
>>> model = RagModel.from_pretrained("./rag")
```"""
kwargs_question_encoder = {argument[len('question_encoder_'):]: value for argument, value in kwargs.items() if argument.startswith('question_encoder_')}
kwargs_generator = {argument[len('generator_'):]: value for argument, value in kwargs.items() if argument.startswith('generator_')}
for key in kwargs_question_encoder:
del kwargs['question_encoder_' + key]
for key in kwargs_generator:
del kwargs['generator_' + key]
question_encoder = kwargs_question_encoder.pop('model', None)
if question_encoder is None:
assert question_encoder_pretrained_model_name_or_path is not None, 'If `model` is not defined as an argument, a `question_encoder_pretrained_model_name_or_path` has to be defined'
from ..auto.modeling_auto import AutoModel
if 'config' not in kwargs_question_encoder:
from ..auto.configuration_auto import AutoConfig
question_encoder_config, kwargs_question_encoder = AutoConfig.from_pretrained(question_encoder_pretrained_model_name_or_path, **kwargs_question_encoder, return_unused_kwargs=True)
kwargs_question_encoder['config'] = question_encoder_config
question_encoder = AutoModel.from_pretrained(question_encoder_pretrained_model_name_or_path, **kwargs_question_encoder)
generator = kwargs_generator.pop('model', None)
if generator is None:
assert generator_pretrained_model_name_or_path is not None, 'If `generator_model` is not defined as an argument, a `generator_pretrained_model_name_or_path` has to be defined'
from ..auto.modeling_auto import AutoModelForSeq2SeqLM
if 'config' not in kwargs_generator:
from ..auto.configuration_auto import AutoConfig
generator_config, kwargs_generator = AutoConfig.from_pretrained(generator_pretrained_model_name_or_path, **kwargs_generator, return_unused_kwargs=True)
kwargs_generator['config'] = generator_config
generator = AutoModelForSeq2SeqLM.from_pretrained(generator_pretrained_model_name_or_path, **kwargs_generator)
config = kwargs.get('config')
if config is None:
config = RagConfig.from_question_encoder_generator_configs(question_encoder.config, generator.config, **kwargs)
return cls(question_encoder=question_encoder, generator=generator, config=config, retriever=retriever)
|
@auto_docstring(custom_intro='\n RAG models were released with the paper [Retrieval-Augmented Generation for Knowledge-Intensive NLP\n Tasks](https://huggingface.co/papers/2005.11401) by Patrick Lewis, Ethan Perez, Aleksandra Piktus et al.\n\n RAG is a retriever augmented model and encapsulate three components: a question encoder, a dataset retriever and a\n generator, the encoder and generator are trainable while the retriever is just an indexed dataset.\n ')
@auto_docstring
class RagPreTrainedModel(PreTrainedModel):
@classmethod
def from_pretrained_question_encoder_generator(cls, question_encoder_pretrained_model_name_or_path: Optional[str]=None, generator_pretrained_model_name_or_path: Optional[str]=None, retriever: RagRetriever=None, **kwargs) -> PreTrainedModel:
'''
Instantiates an question encoder and a generator from one or two base classes of the library from pretrained
model checkpoints.
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train
the model, you need to first set it back in training mode with `model.train()`.
Params:
question_encoder_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`):
Information necessary to initiate the question encoder. Can be either:
- A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
- A path to a *directory* containing model weights saved using
[`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
generator_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`):
Information necessary to initiate the generator. Can be either:
- A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
- A path to a *directory* containing model weights saved using
[`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
model_args (remaining positional arguments, *optional*):
All remaining positional arguments will be passed to the underlying model's `__init__` method.
retriever ([`RagRetriever`], *optional*):
The retriever to use.
kwwargs (remaining dictionary of keyword arguments, *optional*):
Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
`output_attentions=True`).
- To update the question_encoder configuration, use the prefix *question_encoder_* for each
configuration parameter.
- To update the generator configuration, use the prefix *generator_* for each configuration parameter.
- To update the parent model configuration, do not use a prefix for each configuration parameter.
Behaves differently depending on whether a `config` is provided or automatically loaded.
Example:
```python
>>> from transformers import RagModel
>>> # initialize a RAG from two pretrained models.
>>> model = RagModel.from_pretrained_question_encoder_generator(
... "facebook/dpr-question_encoder-single-nq-base", "google-t5/t5-small"
... )
>>> # saving model after fine-tuning
>>> model.save_pretrained("./rag")
>>> # load fine-tuned model
>>> model = RagModel.from_pretrained("./rag")
```'''
pass
| 5
| 1
| 72
| 13
| 32
| 28
| 5
| 0.86
| 1
| 7
| 5
| 3
| 0
| 0
| 2
| 2
| 162
| 30
| 71
| 27
| 56
| 61
| 37
| 19
| 30
| 8
| 1
| 2
| 9
|
4,813
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/rag/modeling_rag.py
|
transformers.models.rag.modeling_rag.RagSequenceForGeneration
|
import torch
from typing import Callable, Optional, Union
from .configuration_rag import RagConfig
from torch import nn
from ...utils import auto_docstring, logging
from .retrieval_rag import RagRetriever
from ...configuration_utils import PretrainedConfig
from ...cache_utils import Cache, EncoderDecoderCache
from ...modeling_utils import PreTrainedModel
@auto_docstring(custom_intro='\n A RAG-sequence model implementation. It performs RAG-sequence specific marginalization in the forward pass.\n ')
class RagSequenceForGeneration(RagPreTrainedModel):
def __init__(self, config: Optional[PretrainedConfig]=None, question_encoder: Optional[PreTrainedModel]=None, generator: Optional[PreTrainedModel]=None, retriever: Optional[RagRetriever]=None, **kwargs):
"""
question_encoder (`PreTrainedModel`, *optional*):
The model responsible for encoding the question into hidden states for retrieval.
generator (`PreTrainedModel`, *optional*):
The model responsible for generating text based on retrieved documents.
retriever (`RagRetriever`, *optional*):
The component responsible for retrieving documents from a knowledge base given the encoded question.
"""
assert config is not None or (question_encoder is not None and generator is not None), 'Either a configuration or an encoder and a generator has to be provided.'
if config is None:
config = RagConfig.from_question_encoder_generator_configs(question_encoder.config, generator.config, **kwargs)
super().__init__(config)
self.rag = RagModel(config=config, question_encoder=question_encoder, generator=generator, retriever=retriever)
def set_retriever(self, retriever: RagRetriever):
self.rag.retriever = retriever
def set_context_encoder_for_training(self, ctx_encoder: PreTrainedModel):
self.rag.context_encoder_training = True
self.rag.ctx_encoder = ctx_encoder
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[tuple[tuple[torch.Tensor]]]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.BoolTensor]=None, past_key_values: Optional[Cache]=None, context_input_ids: Optional[torch.LongTensor]=None, context_attention_mask: Optional[torch.LongTensor]=None, doc_scores: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_retrieved: Optional[bool]=None, exclude_bos_score: Optional[bool]=None, reduce_loss: Optional[bool]=None, labels: Optional[torch.LongTensor]=None, n_docs: Optional[int]=None, **kwargs) -> RetrievAugLMMarginOutput:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. [`RagConfig`], used to initialize the model, specifies
which generator to use, it also specifies a compatible generator tokenizer. Use that tokenizer class to
obtain the indices.
[What are input IDs?](../glossary#input-ids)
encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*)
Tuple consists of (`generator_enc_last_hidden_state`, *optional*: `generator_enc_hidden_states`,
*optional*: `generator_enc_attentions`). `generator_enc_last_hidden_state` of shape `(batch_size, n_docs *
sequence_length, hidden_size)` is a sequence of hidden-states at the output of the last layer of the
generator's encoder.
Used by the ([`RagModel`]) model during decoding.
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Provide for generation tasks. `None` by default, construct as per instructions for the generator model
you're using with your RAG instance.
decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
context_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
Input IDs post-processed from the retrieved documents and the question encoder `input_ids` by the
retriever. If the model was not initialized with a `retriever` ``context_input_ids` has to be provided to
the forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`].
context_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`,*optional*, returned when *output_retrieved=True*):
Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the
retriever. If the model has is not initialized with a `retriever` `context_attention_mask` has to be
provided to the forward pass. `context_attention_mask` are returned by [`~RagRetriever.__call__`].
doc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`):
Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and
`question_encoder_last_hidden_state`. If the model has is not initialized with a `retriever` `doc_scores`
has to be provided to the forward pass. `doc_scores` can be computed via
`question_encoder_last_hidden_state` and `retrieved_doc_embeds`, see examples for more information.
output_retrieved (`bool`, *optional*):
Whether or not to return the `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask`. See returned tensors for more detail.
exclude_bos_score (`bool`, *optional*):
Only relevant if `labels` is passed. If `True`, the score of the BOS token is disregarded when computing
the loss.
reduce_loss (`bool`, *optional*):
Only relevant if `labels` is passed. If `True`, the NLL loss is reduced using the `torch.Tensor.sum`
operation.
n_docs (`int`, *optional*):
The number of documents to retrieve.
Example:
```python
>>> from transformers import AutoTokenizer, RagRetriever, RagSequenceForGeneration
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/rag-sequence-nq")
>>> retriever = RagRetriever.from_pretrained(
... "facebook/rag-sequence-nq", index_name="exact", use_dummy_dataset=True
... )
>>> # initialize with RagRetriever to do everything in one forward call
>>> model = RagSequenceForGeneration.from_pretrained("facebook/rag-token-nq", retriever=retriever)
>>> inputs = tokenizer("How many people live in Paris?", return_tensors="pt")
>>> targets = tokenizer(text_target="In Paris, there are 10 million people.", return_tensors="pt")
>>> input_ids = inputs["input_ids"]
>>> labels = targets["input_ids"]
>>> outputs = model(input_ids=input_ids, labels=labels)
>>> # or use retriever separately
>>> model = RagSequenceForGeneration.from_pretrained("facebook/rag-sequence-nq", use_dummy_dataset=True)
>>> # 1. Encode
>>> question_hidden_states = model.question_encoder(input_ids)[0]
>>> # 2. Retrieve
>>> docs_dict = retriever(input_ids.numpy(), question_hidden_states.detach().numpy(), return_tensors="pt")
>>> doc_scores = torch.bmm(
... question_hidden_states.unsqueeze(1), docs_dict["retrieved_doc_embeds"].float().transpose(1, 2)
... ).squeeze(1)
>>> # 3. Forward to generator
>>> outputs = model(
... context_input_ids=docs_dict["context_input_ids"],
... context_attention_mask=docs_dict["context_attention_mask"],
... doc_scores=doc_scores,
... decoder_input_ids=labels,
... )
```"""
n_docs = n_docs if n_docs is not None else self.config.n_docs
exclude_bos_score = exclude_bos_score if exclude_bos_score is not None else self.config.exclude_bos_score
reduce_loss = reduce_loss if reduce_loss is not None else self.config.reduce_loss
if labels is not None:
if decoder_input_ids is None:
decoder_input_ids = labels
use_cache = False
outputs = self.rag(input_ids=input_ids, attention_mask=attention_mask, encoder_outputs=encoder_outputs, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, context_input_ids=context_input_ids, context_attention_mask=context_attention_mask, doc_scores=doc_scores, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_retrieved=output_retrieved, n_docs=n_docs)
loss = None
if labels is not None:
loss = self.get_nll(outputs.logits, outputs.doc_scores, decoder_input_ids, reduce_loss=reduce_loss, epsilon=self.config.label_smoothing, exclude_bos_score=exclude_bos_score, n_docs=n_docs)
return RetrievAugLMMarginOutput(loss=loss, logits=outputs.logits, doc_scores=outputs.doc_scores, past_key_values=outputs.past_key_values, context_input_ids=outputs.context_input_ids, context_attention_mask=outputs.context_attention_mask, retrieved_doc_embeds=outputs.retrieved_doc_embeds, retrieved_doc_ids=outputs.retrieved_doc_ids, question_encoder_last_hidden_state=outputs.question_encoder_last_hidden_state, question_enc_hidden_states=outputs.question_enc_hidden_states, question_enc_attentions=outputs.question_enc_attentions, generator_enc_last_hidden_state=outputs.generator_enc_last_hidden_state, generator_enc_hidden_states=outputs.generator_enc_hidden_states, generator_enc_attentions=outputs.generator_enc_attentions, generator_dec_hidden_states=outputs.generator_dec_hidden_states, generator_dec_attentions=outputs.generator_dec_attentions, generator_cross_attentions=outputs.generator_cross_attentions)
@property
def retriever(self):
return self.rag.retriever
@property
def generator(self):
return self.rag.generator
@property
def question_encoder(self):
return self.rag.question_encoder
@torch.no_grad()
def generate(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None, context_input_ids: Optional[torch.LongTensor]=None, context_attention_mask: Optional[torch.LongTensor]=None, doc_scores: Optional[torch.FloatTensor]=None, do_deduplication: Optional[bool]=None, num_return_sequences: Optional[int]=None, num_beams: Optional[int]=None, n_docs: Optional[int]=None, **model_kwargs) -> torch.LongTensor:
"""
Implements RAG sequence "thorough" decoding. Read the [`~generation.GenerationMixin.generate`]` documentation
for more information on how to set other generate input parameters.
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
The sequence used as a prompt for the generation. If `input_ids` is not passed, then
`context_input_ids` has to be provided.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
context_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
Input IDs post-processed from the retrieved documents and the question encoder input_ids by the
retriever.
context_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the
retriever.
If the model is not initialized with a `retriever` or `input_ids` is not given, `context_input_ids` and
`context_attention_mask` have to be provided to the forward pass. They are returned by
[`~RagRetriever.__call__`].
doc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`):
Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and
`question_encoder_last_hidden_state`.
If the model is not initialized with a `retriever` or `input_ids` is not given, `doc_scores` has to be
provided to the forward pass. `doc_scores` are returned by [`~RagRetriever.__call__`].
do_deduplication (`bool`, *optional*):
Whether or not to deduplicate the generations from different context documents for a given input. Has
to be set to `False` if used while training with distributed backend.
num_return_sequences(`int`, *optional*, defaults to 1):
The number of independently computed returned sequences for each element in the batch. Note that this
is not the value we pass to the `generator`'s `[`~generation.GenerationMixin.generate`]` function,
where we set `num_return_sequences` to `num_beams`.
num_beams (`int`, *optional*, defaults to 1):
Number of beams for beam search. 1 means no beam search.
n_docs (`int`, *optional*, defaults to `config.n_docs`)
Number of documents to retrieve and/or number of documents for which to generate an answer.
kwargs (`dict[str, Any]`, *optional*):
Additional kwargs will be passed to [`~generation.GenerationMixin.generate`].
Return:
`torch.LongTensor` of shape `(batch_size * num_return_sequences, sequence_length)`: The generated
sequences. The second dimension (sequence length) is either equal to `max_length` or shorter if all batches
finished early due to the `eos_token_id`.
"""
n_docs = n_docs if n_docs is not None else self.config.n_docs
do_deduplication = do_deduplication if do_deduplication is not None else self.config.do_deduplication
num_doc_return_sequences = num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences
num_beams = num_beams if num_beams is not None else self.config.num_beams
assert input_ids is not None or context_input_ids is not None, ' At least one of input_ids or context_input_ids must be given'
if self.retriever is not None and context_input_ids is None:
question_hidden_states = self.question_encoder(input_ids, attention_mask=attention_mask)[0]
context_input_ids = self.retriever(input_ids, question_hidden_states.detach().to(device='cpu', dtype=torch.float32).numpy(), prefix=self.generator.config.prefix, n_docs=n_docs, return_tensors='pt')['context_input_ids']
context_input_ids = context_input_ids.to(input_ids)
hypos = []
model_kwargs['num_beams'] = num_beams
model_kwargs['num_return_sequences'] = num_beams
model_kwargs['attention_mask'] = None
batch_size = input_ids.shape[0] if input_ids is not None else context_input_ids.shape[0] // n_docs
for index in range(batch_size):
generator_input_ids = context_input_ids[index * n_docs:(index + 1) * n_docs]
output_sequences = self.generator.generate(generator_input_ids, **model_kwargs)
if do_deduplication:
output_sequences = torch.stack(list({str(k.tolist()): k for k in output_sequences}.values()))
num_candidates = output_sequences.shape[0]
if input_ids is not None:
new_input_ids = input_ids[index:index + 1].repeat(num_candidates, 1)
outputs = self(new_input_ids, labels=output_sequences, exclude_bos_score=True)
else:
assert context_attention_mask is not None, 'Make sure that `context_attention_mask` are passed, if no `input_ids` is set. Alternatively, you can set a retriever using the `set_retriever(...)` function.'
assert doc_scores is not None, 'Make sure that `doc_scores` are passed, if no `input_ids` is set. Alternatively, you can set a retriever using the `set_retriever(...)` function.'
individual_input_ids = generator_input_ids.repeat(num_candidates, 1)
individual_attention_mask = context_attention_mask[index * n_docs:(index + 1) * n_docs]
individual_attention_mask = individual_attention_mask.repeat(num_candidates, 1)
individual_doc_scores = doc_scores[index:index + 1, :]
individual_doc_scores = individual_doc_scores.repeat(num_candidates, 1)
outputs = self(context_input_ids=individual_input_ids, context_attention_mask=individual_attention_mask, doc_scores=individual_doc_scores, labels=output_sequences, exclude_bos_score=True)
top_cand_inds = (-outputs['loss']).topk(num_doc_return_sequences)[1]
hypos.append(output_sequences[top_cand_inds])
return self._cat_and_pad(hypos, pad_token_id=self.config.generator.pad_token_id)
def get_nll(self, seq_logits, doc_scores, target, reduce_loss=False, epsilon=0.0, exclude_bos_score=False, n_docs=None):
target = torch.cat([target[:, 1:], target.new(target.shape[0], 1).fill_(self.config.generator.pad_token_id)], 1)
n_docs = n_docs if n_docs is not None else self.config.n_docs
bos_token_id = self.config.bos_token_id or self.config.generator.bos_token_id
use_bos = bos_token_id is not None and target[:, 0].eq(bos_token_id).all()
def _mask_pads(ll, smooth_obj):
pad_mask = target.eq(self.config.generator.pad_token_id)
if pad_mask.any():
ll.masked_fill_(pad_mask, 0.0)
smooth_obj.masked_fill_(pad_mask, 0.0)
return (ll.squeeze(-1), smooth_obj.squeeze(-1))
seq_logprobs = nn.functional.log_softmax(seq_logits, dim=-1).view(seq_logits.shape[0] // n_docs, n_docs, -1, seq_logits.size(-1))
doc_logprobs = nn.functional.log_softmax(doc_scores, dim=1).unsqueeze(-1).unsqueeze(-1)
first_token_scores = seq_logprobs[:, :, :1, :]
second_token_scores = seq_logprobs[:, :, 1:2, :]
remainder = seq_logprobs[:, :, 2:, :]
rag_logprobs = torch.cat([first_token_scores, second_token_scores + doc_logprobs, remainder], dim=2)
target = target.unsqueeze(1).unsqueeze(-1).repeat(1, n_docs, 1, 1)
assert target.dim() == rag_logprobs.dim()
ll = rag_logprobs.gather(dim=-1, index=target)
smooth_obj = rag_logprobs.sum(dim=-1, keepdim=True)
ll, smooth_obj = _mask_pads(ll, smooth_obj)
ll = ll[:, :, 1:].sum(2) if exclude_bos_score and use_bos else ll.sum(2)
smooth_obj = smooth_obj.sum(2)
ll = ll.logsumexp(1)
smooth_obj = smooth_obj.logsumexp(1)
nll_loss = -ll
smooth_loss = -smooth_obj
if reduce_loss:
nll_loss = nll_loss.sum()
smooth_loss = smooth_loss.sum()
eps_i = epsilon / rag_logprobs.size(-1)
loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss
return loss
@staticmethod
def _cat_and_pad(tensors, pad_token_id):
output = tensors[0].new(sum([t.shape[0] for t in tensors]), max([t.shape[1] for t in tensors])).fill_(pad_token_id)
ind = 0
for t in tensors:
output[ind:ind + t.shape[0], :t.shape[1]] = t
ind += t.shape[0]
return output
| null | 19
| 3
| 34
| 4
| 21
| 10
| 3
| 0.47
| 1
| 11
| 4
| 0
| 9
| 1
| 10
| 12
| 390
| 56
| 236
| 93
| 176
| 112
| 107
| 46
| 95
| 10
| 2
| 2
| 32
|
4,814
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/rag/modeling_rag.py
|
transformers.models.rag.modeling_rag.RagTokenForGeneration
|
from ...cache_utils import Cache, EncoderDecoderCache
from torch import nn
from ...generation import GenerationConfig, GenerationMixin, LogitsProcessorList, StoppingCriteriaList
from ...utils import auto_docstring, logging
from typing import Callable, Optional, Union
from ...modeling_utils import PreTrainedModel
from ...configuration_utils import PretrainedConfig
import copy
import torch
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
@auto_docstring(custom_intro='\n A RAG-token model implementation. It performs RAG-token specific marginalization in the forward pass.\n ')
class RagTokenForGeneration(RagPreTrainedModel, GenerationMixin):
def __init__(self, config: Optional[PretrainedConfig]=None, question_encoder: Optional[PreTrainedModel]=None, generator: Optional[PreTrainedModel]=None, retriever: Optional[RagRetriever]=None, **kwargs):
"""
question_encoder (`PreTrainedModel`, *optional*):
The model responsible for encoding the question into hidden states for retrieval.
generator (`PreTrainedModel`, *optional*):
The model responsible for generating text based on retrieved documents.
retriever (`RagRetriever`, *optional*):
The component responsible for retrieving documents from a knowledge base given the encoded question.
"""
assert config is not None or (question_encoder is not None and generator is not None), 'Either a configuration or an encoder and a generator has to be provided.'
if config is None:
config = RagConfig.from_question_encoder_generator_configs(question_encoder.config, generator.config, **kwargs)
super().__init__(config)
self.rag = RagModel(config=config, question_encoder=question_encoder, generator=generator, retriever=retriever)
def set_retriever(self, retriever: RagRetriever):
self.rag.retriever = retriever
def set_context_encoder_for_training(self, ctx_encoder: PreTrainedModel):
self.rag.context_encoder_training = True
self.rag.ctx_encoder = ctx_encoder
def prepare_inputs_for_generation(self, decoder_input_ids, past_key_values=None, attention_mask=None, use_cache=None, encoder_outputs=None, doc_scores=None, n_docs=None, **kwargs):
if past_key_values is not None:
decoder_input_ids = decoder_input_ids[:, -1:]
return {'input_ids': None, 'encoder_outputs': encoder_outputs, 'doc_scores': doc_scores, 'context_attention_mask': attention_mask, 'decoder_input_ids': decoder_input_ids, 'past_key_values': past_key_values, 'use_cache': use_cache, 'do_marginalize': True, 'n_docs': n_docs}
@property
def retriever(self):
return self.rag.retriever
@property
def generator(self):
return self.rag.generator
@property
def question_encoder(self):
return self.rag.question_encoder
@staticmethod
def _reorder_cache(past_key_values, beam_idx):
"""Reorders cache for generation. BART-inspired but we need to take care of the extra dimension for docs"""
def _reorder_stacked(hidden_states, new_order):
n_docs = hidden_states.shape[0] // new_order.shape[0]
hidden_states = hidden_states.view(-1, n_docs, *hidden_states.shape[1:])
hidden_states = hidden_states.index_select(0, new_order)
result = hidden_states.view(-1, *hidden_states.shape[2:])
return result
reordered_past = ()
for layer_past in past_key_values:
reordered_past += (tuple((_reorder_stacked(past_state, beam_idx.to(past_state.device)) for past_state in layer_past)),)
if isinstance(past_key_values, EncoderDecoderCache):
reordered_past = EncoderDecoderCache.from_legacy_cache(reordered_past)
return reordered_past
def marginalize(self, seq_logits, doc_scores, n_docs=None):
n_docs = n_docs if n_docs is not None else self.config.n_docs
seq_logprobs = nn.functional.log_softmax(seq_logits, dim=-1).view(seq_logits.shape[0] // n_docs, n_docs, -1, seq_logits.size(-1))
doc_logprobs = torch.log_softmax(doc_scores, dim=1)
log_prob_sum = seq_logprobs + doc_logprobs.unsqueeze(-1).unsqueeze(-1)
return torch.logsumexp(log_prob_sum, dim=1)
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, encoder_outputs: Optional[tuple[tuple[torch.Tensor]]]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.BoolTensor]=None, past_key_values: Optional[Cache]=None, context_input_ids: Optional[torch.LongTensor]=None, context_attention_mask: Optional[torch.LongTensor]=None, doc_scores: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_retrieved: Optional[bool]=None, do_marginalize: Optional[bool]=None, reduce_loss: Optional[bool]=None, labels: Optional[torch.LongTensor]=None, n_docs: Optional[int]=None, **kwargs) -> RetrievAugLMMarginOutput:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. [`RagConfig`], used to initialize the model, specifies
which generator to use, it also specifies a compatible generator tokenizer. Use that tokenizer class to
obtain the indices.
[What are input IDs?](../glossary#input-ids)
encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*)
Tuple consists of (`generator_enc_last_hidden_state`, *optional*: `generator_enc_hidden_states`,
*optional*: `generator_enc_attentions`). `generator_enc_last_hidden_state` of shape `(batch_size, n_docs *
sequence_length, hidden_size)` is a sequence of hidden-states at the output of the last layer of the
generator's encoder.
Used by the ([`RagModel`]) model during decoding.
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Provide for generation tasks. `None` by default, construct as per instructions for the generator model
you're using with your RAG instance.
decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
context_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
Input IDs post-processed from the retrieved documents and the question encoder `input_ids` by the
retriever. If the model was not initialized with a `retriever` ``context_input_ids` has to be provided to
the forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`].
context_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`,*optional*, returned when *output_retrieved=True*):
Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the
retriever. If the model has is not initialized with a `retriever` `context_attention_mask` has to be
provided to the forward pass. `context_attention_mask` are returned by [`~RagRetriever.__call__`].
doc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`):
Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and
`question_encoder_last_hidden_state`. If the model has is not initialized with a `retriever` `doc_scores`
has to be provided to the forward pass. `doc_scores` can be computed via
`question_encoder_last_hidden_state` and `retrieved_doc_embeds`, see examples for more information.
output_retrieved (`bool`, *optional*):
Whether or not to return the `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask`. See returned tensors for more detail.
do_marginalize (`bool`, *optional*):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*):
Only relevant if `labels` is passed. If `True`, the NLL loss is reduced using the `torch.Tensor.sum`
operation.
n_docs (`int`, *optional*):
The number of documents to retrieve.
Example:
```python
>>> from transformers import AutoTokenizer, RagRetriever, RagTokenForGeneration
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/rag-token-nq")
>>> retriever = RagRetriever.from_pretrained(
... "facebook/rag-token-nq", index_name="exact", use_dummy_dataset=True
... )
>>> # initialize with RagRetriever to do everything in one forward call
>>> model = RagTokenForGeneration.from_pretrained("facebook/rag-token-nq", retriever=retriever)
>>> inputs = tokenizer("How many people live in Paris?", return_tensors="pt")
>>> targets = tokenizer(text_target="In Paris, there are 10 million people.", return_tensors="pt")
>>> input_ids = inputs["input_ids"]
>>> labels = targets["input_ids"]
>>> outputs = model(input_ids=input_ids, labels=labels)
>>> # or use retriever separately
>>> model = RagTokenForGeneration.from_pretrained("facebook/rag-token-nq", use_dummy_dataset=True)
>>> # 1. Encode
>>> question_hidden_states = model.question_encoder(input_ids)[0]
>>> # 2. Retrieve
>>> docs_dict = retriever(input_ids.numpy(), question_hidden_states.detach().numpy(), return_tensors="pt")
>>> doc_scores = torch.bmm(
... question_hidden_states.unsqueeze(1), docs_dict["retrieved_doc_embeds"].float().transpose(1, 2)
... ).squeeze(1)
>>> # 3. Forward to generator
>>> outputs = model(
... context_input_ids=docs_dict["context_input_ids"],
... context_attention_mask=docs_dict["context_attention_mask"],
... doc_scores=doc_scores,
... decoder_input_ids=labels,
... )
>>> # or directly generate
>>> generated = model.generate(
... context_input_ids=docs_dict["context_input_ids"],
... context_attention_mask=docs_dict["context_attention_mask"],
... doc_scores=doc_scores,
... )
>>> generated_string = tokenizer.batch_decode(generated, skip_special_tokens=True)
```"""
n_docs = n_docs if n_docs is not None else self.config.n_docs
do_marginalize = do_marginalize if do_marginalize is not None else self.config.do_marginalize
reduce_loss = reduce_loss if reduce_loss is not None else self.config.reduce_loss
if labels is not None:
if decoder_input_ids is None:
decoder_input_ids = labels
use_cache = False
outputs = self.rag(input_ids=input_ids, attention_mask=attention_mask, encoder_outputs=encoder_outputs, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, context_input_ids=context_input_ids, context_attention_mask=context_attention_mask, doc_scores=doc_scores, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_retrieved=output_retrieved, n_docs=n_docs)
loss = None
logits = outputs.logits
if labels is not None:
assert decoder_input_ids is not None
loss = self.get_nll(outputs.logits, outputs.doc_scores, labels, reduce_loss=reduce_loss, epsilon=self.config.label_smoothing, n_docs=n_docs)
if do_marginalize:
logits = self.marginalize(logits, outputs.doc_scores, n_docs)
return RetrievAugLMMarginOutput(loss=loss, logits=logits, doc_scores=outputs.doc_scores, past_key_values=outputs.past_key_values, context_input_ids=outputs.context_input_ids, context_attention_mask=outputs.context_attention_mask, retrieved_doc_embeds=outputs.retrieved_doc_embeds, retrieved_doc_ids=outputs.retrieved_doc_ids, question_encoder_last_hidden_state=outputs.question_encoder_last_hidden_state, question_enc_hidden_states=outputs.question_enc_hidden_states, question_enc_attentions=outputs.question_enc_attentions, generator_enc_last_hidden_state=outputs.generator_enc_last_hidden_state, generator_enc_hidden_states=outputs.generator_enc_hidden_states, generator_enc_attentions=outputs.generator_enc_attentions, generator_dec_hidden_states=outputs.generator_dec_hidden_states, generator_dec_attentions=outputs.generator_dec_attentions, generator_cross_attentions=outputs.generator_cross_attentions)
@torch.no_grad()
def generate(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None, context_input_ids: Optional[torch.LongTensor]=None, context_attention_mask: Optional[torch.LongTensor]=None, doc_scores: Optional[torch.FloatTensor]=None, n_docs: Optional[int]=None, generation_config: Optional[GenerationConfig]=None, prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], list[int]]]=None, logits_processor: Optional[LogitsProcessorList]=LogitsProcessorList(), stopping_criteria: Optional[StoppingCriteriaList]=StoppingCriteriaList(), **kwargs) -> torch.LongTensor:
"""
Implements RAG token decoding.
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
The sequence used as a prompt for the generation. If `input_ids` is not passed, then
`context_input_ids` has to be provided.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
context_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
Input IDs post-processed from the retrieved documents and the question encoder `input_ids` by the
retriever.
If the model has is not initialized with a `retriever`, `context_input_ids` has to be provided to the
forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`].
context_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the
retriever.
If the model has is not initialized with a `retriever`, `context_input_ids` has to be provided to the
forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`].
doc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`):
Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and
`question_encoder_last_hidden_state`.
If the model has is not initialized with a `retriever`, `context_input_ids` has to be provided to the
forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`].
n_docs (`int`, *optional*, defaults to `config.n_docs`)
Number of documents to retrieve and/or number of documents for which to generate an answer.
generation_config (`~generation.GenerationConfig`, *optional*):
The generation configuration to be used as base parametrization for the generation call. `**kwargs`
passed to generate matching the attributes of `generation_config` will override them. If
`generation_config` is not provided, the default will be used, which has the following loading
priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model
configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s
default values, whose documentation should be checked to parameterize generation.
prefix_allowed_tokens_fn (`Callable[[int, torch.Tensor], list[int]]`, *optional*):
If provided, this function constraints the beam search to allowed tokens only at each step. If not
provided no constraint is applied. This function takes 2 arguments `inputs_ids` and the batch ID
`batch_id`. It has to return a list with the allowed tokens for the next generation step conditioned on
the previously generated tokens `inputs_ids` and the batch ID `batch_id`. This argument is useful for
constrained generation conditioned on the prefix, as described in [Autoregressive Entity
Retrieval](https://huggingface.co/papers/2010.00904).
logits_processor (`LogitsProcessorList`, *optional*):
Custom logits processors that complement the default logits processors built from arguments and a
model's config. If a logit processor is passed that is already created with the arguments or a model's
config an error is thrown.
stopping_criteria (`StoppingCriteriaList`, *optional*):
Custom stopping criteria that complement the default stopping criteria built from arguments and a
model's config. If a stopping criteria is passed that is already created with the arguments or a
model's config an error is thrown.
kwargs (`dict[str, Any]`, *optional*):
Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be
forwarded to the `forward` function of the model.
Return:
`torch.LongTensor` of shape `(batch_size * num_return_sequences, sequence_length)`: The generated
sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches
finished early due to the `eos_token_id`.
"""
if generation_config is None:
generation_config = self.generation_config
generation_config = copy.deepcopy(generation_config)
model_kwargs = generation_config.update(**kwargs)
kwargs_has_attention_mask = model_kwargs.get('attention_mask', None) is not None
self._prepare_special_tokens(generation_config, kwargs_has_attention_mask)
n_docs = n_docs if n_docs is not None else self.config.n_docs
if self.retriever is not None and context_input_ids is None:
question_hidden_states = self.question_encoder(input_ids, attention_mask=attention_mask)[0]
out = self.retriever(input_ids, question_hidden_states.detach().to(device='cpu', dtype=torch.float32).numpy(), prefix=self.generator.config.prefix, n_docs=n_docs, return_tensors='pt')
context_input_ids, context_attention_mask, retrieved_doc_embeds = (out['context_input_ids'], out['context_attention_mask'], out['retrieved_doc_embeds'])
retrieved_doc_embeds = retrieved_doc_embeds.to(question_hidden_states)
context_input_ids = context_input_ids.to(input_ids)
context_attention_mask = context_attention_mask.to(input_ids)
doc_scores = torch.bmm(question_hidden_states.unsqueeze(1), retrieved_doc_embeds.transpose(1, 2)).squeeze(1)
assert context_input_ids.shape[0] % n_docs == 0, f' The first dimension of `context_input_ids` should be a multiple of `n_docs`={n_docs}, but is {context_input_ids.shape[0]}.'
batch_size = context_input_ids.shape[0] // n_docs
encoder = self.rag.generator.get_encoder()
encoder_outputs = encoder(input_ids=context_input_ids, attention_mask=context_attention_mask, return_dict=True)
input_ids = torch.full((batch_size * generation_config.num_beams, 1), generation_config.decoder_start_token_id, dtype=torch.long, device=next(self.parameters()).device)
input_ids_seq_length = input_ids.shape[-1]
last_hidden_state = encoder_outputs['last_hidden_state']
def extend_enc_output(tensor, num_beams=None):
tensor = tensor[None, None, :].reshape((batch_size, 1, n_docs) + tensor.shape[1:])
tensor = tensor.expand((batch_size, num_beams, n_docs) + tensor.shape[3:])
return tensor.reshape((batch_size * num_beams * n_docs,) + tensor.shape[3:])
context_attention_mask = extend_enc_output(context_attention_mask, num_beams=generation_config.num_beams)
encoder_outputs['last_hidden_state'] = extend_enc_output(last_hidden_state, num_beams=generation_config.num_beams)
doc_scores = doc_scores.repeat_interleave(generation_config.num_beams, dim=0)
model_kwargs['doc_scores'] = doc_scores
model_kwargs['encoder_outputs'] = encoder_outputs
model_kwargs['attention_mask'] = context_attention_mask
model_kwargs['n_docs'] = n_docs
pre_processor = self._get_logits_processor(generation_config=generation_config, input_ids_seq_length=input_ids_seq_length, encoder_input_ids=context_input_ids, prefix_allowed_tokens_fn=prefix_allowed_tokens_fn, logits_processor=logits_processor, device=input_ids.device)
prepared_stopping_criteria = self._get_stopping_criteria(generation_config=generation_config, stopping_criteria=stopping_criteria)
self._prepare_cache_for_generation(generation_config, model_kwargs, generation_mode=None, batch_size=input_ids.shape[0], max_cache_length=generation_config.max_length - 1)
if generation_config.num_beams == 1:
if generation_config.num_return_sequences > 1:
raise ValueError(f'num_return_sequences has to be 1, but is {generation_config.num_return_sequences} when doing greedy search.')
return self._sample(input_ids, logits_processor=pre_processor, stopping_criteria=prepared_stopping_criteria, generation_config=generation_config, synced_gpus=False, streamer=None, **model_kwargs)
elif generation_config.num_beams > 1:
if generation_config.num_return_sequences > generation_config.num_beams:
raise ValueError('`num_return_sequences` has to be smaller or equal to `num_beams`.')
return self._beam_search(input_ids, logits_processor=pre_processor, stopping_criteria=prepared_stopping_criteria, generation_config=generation_config, synced_gpus=False, **model_kwargs)
else:
raise ValueError(f'`num_beams` has to be an integer strictly superior to 0 (≥ 1), but is {generation_config.num_beams}')
def _temporary_reorder_cache(self, past_key_values, beam_idx):
past_key_values = self._reorder_cache(past_key_values, beam_idx)
return past_key_values
def get_input_embeddings(self):
return self.rag.generator.get_input_embeddings()
def get_output_embeddings(self):
return self.rag.generator.get_output_embeddings()
def set_output_embeddings(self, new_embeddings):
return self.rag.generator.set_output_embeddings(new_embeddings)
def shift_tokens_right(self, input_ids, start_token_id=None):
"""Shift input ids one token to the right, and pad with start_token_id"""
if start_token_id is None:
start_token_id = self.config.decoder_start_token_id
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
shifted_input_ids[:, 0] = start_token_id
return shifted_input_ids
def get_nll(self, seq_logits, doc_scores, target, reduce_loss=False, epsilon=0.0, n_docs=None):
n_docs = n_docs if n_docs is not None else self.config.n_docs
target = torch.cat([target[:, 1:], target.new(target.shape[0], 1).fill_(self.config.generator.pad_token_id)], 1)
def _mask_pads(ll, smooth_obj):
pad_mask = target.eq(self.config.generator.pad_token_id)
if pad_mask.any():
ll.masked_fill_(pad_mask, 0.0)
smooth_obj.masked_fill_(pad_mask, 0.0)
return (ll.squeeze(-1), smooth_obj.squeeze(-1))
rag_logprobs = self.marginalize(seq_logits, doc_scores, n_docs)
target = target.unsqueeze(-1)
assert target.dim() == rag_logprobs.dim()
ll = rag_logprobs.gather(dim=-1, index=target)
smooth_obj = rag_logprobs.sum(dim=-1, keepdim=True)
ll, smooth_obj = _mask_pads(ll, smooth_obj)
ll = ll.sum(1)
smooth_obj = smooth_obj.sum(1)
nll_loss = -ll
smooth_loss = -smooth_obj
if reduce_loss:
nll_loss = nll_loss.sum()
smooth_loss = smooth_loss.sum()
eps_i = epsilon / rag_logprobs.size(-1)
loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss
return loss
| null | 28
| 5
| 27
| 3
| 17
| 7
| 2
| 0.41
| 1
| 11
| 5
| 0
| 15
| 2
| 16
| 18
| 510
| 66
| 318
| 110
| 241
| 130
| 139
| 53
| 119
| 8
| 2
| 2
| 41
|
4,815
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/rag/modeling_rag.py
|
transformers.models.rag.modeling_rag.RetrievAugLMMarginOutput
|
from ...cache_utils import Cache, EncoderDecoderCache
from typing import Callable, Optional, Union
from dataclasses import dataclass
import torch
from ...utils import auto_docstring, logging
from ...modeling_outputs import ModelOutput
@dataclass
@auto_docstring(custom_intro='\n Base class for retriever augmented marginalized models outputs.\n ')
class RetrievAugLMMarginOutput(ModelOutput):
"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss.
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head. The score is possibly marginalized over all documents for
each vocabulary token.
doc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`):
Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and
`question_encoder_last_hidden_state`.
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used
(see `past_key_values` input) to speed up sequential decoding.
retrieved_doc_embeds (`torch.FloatTensor` of shape `(batch_size, config.n_docs, hidden_size)`, *optional*, returned when *output_retrieved=True*):
Embedded documents retrieved by the retriever. Is used with `question_encoder_last_hidden_state` to compute
the `doc_scores`.
retrieved_doc_ids (`torch.LongTensor` of shape `(batch_size, config.n_docs)`, *optional*, returned when *output_retrieved=True*):
The indexes of the embedded documents retrieved by the retriever.
context_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever.
context_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the
retriever.
question_encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden states at the output of the last layer of the question encoder pooled output of the
model.
question_enc_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden states of the question encoder at the output of each layer plus the initial embedding outputs.
question_enc_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the question encoder, after the attention softmax, used to compute the weighted
average in the self-attention heads.
generator_enc_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the generator encoder of the model.
generator_enc_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs.
generator_enc_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted
average in the self-attention heads.
generator_dec_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs.
generator_dec_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted
average in the self-attention heads.
generator_cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Cross-attentions weights of the generator decoder, after the attention softmax, used to compute the
weighted average in the cross-attention heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
doc_scores: Optional[torch.FloatTensor] = None
past_key_values: Optional[Cache] = None
retrieved_doc_embeds: Optional[torch.FloatTensor] = None
retrieved_doc_ids: Optional[torch.LongTensor] = None
context_input_ids: Optional[torch.LongTensor] = None
context_attention_mask: Optional[torch.LongTensor] = None
question_encoder_last_hidden_state: Optional[torch.FloatTensor] = None
question_enc_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
question_enc_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
generator_enc_last_hidden_state: Optional[torch.FloatTensor] = None
generator_enc_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
generator_enc_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
generator_dec_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
generator_dec_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
generator_cross_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
|
@dataclass
@auto_docstring(custom_intro='\n Base class for retriever augmented marginalized models outputs.\n ')
class RetrievAugLMMarginOutput(ModelOutput):
'''
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss.
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head. The score is possibly marginalized over all documents for
each vocabulary token.
doc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`):
Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and
`question_encoder_last_hidden_state`.
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used
(see `past_key_values` input) to speed up sequential decoding.
retrieved_doc_embeds (`torch.FloatTensor` of shape `(batch_size, config.n_docs, hidden_size)`, *optional*, returned when *output_retrieved=True*):
Embedded documents retrieved by the retriever. Is used with `question_encoder_last_hidden_state` to compute
the `doc_scores`.
retrieved_doc_ids (`torch.LongTensor` of shape `(batch_size, config.n_docs)`, *optional*, returned when *output_retrieved=True*):
The indexes of the embedded documents retrieved by the retriever.
context_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever.
context_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the
retriever.
question_encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden states at the output of the last layer of the question encoder pooled output of the
model.
question_enc_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden states of the question encoder at the output of each layer plus the initial embedding outputs.
question_enc_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the question encoder, after the attention softmax, used to compute the weighted
average in the self-attention heads.
generator_enc_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the generator encoder of the model.
generator_enc_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs.
generator_enc_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted
average in the self-attention heads.
generator_dec_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs.
generator_dec_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted
average in the self-attention heads.
generator_cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Cross-attentions weights of the generator decoder, after the attention softmax, used to compute the
weighted average in the cross-attention heads.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3.56
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 92
| 10
| 18
| 18
| 17
| 64
| 18
| 18
| 17
| 0
| 1
| 0
| 0
|
4,816
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/rag/modeling_rag.py
|
transformers.models.rag.modeling_rag.RetrievAugLMOutput
|
from ...modeling_outputs import ModelOutput
from ...cache_utils import Cache, EncoderDecoderCache
from dataclasses import dataclass
from ...utils import auto_docstring, logging
import torch
from typing import Callable, Optional, Union
@dataclass
@auto_docstring
class RetrievAugLMOutput(ModelOutput):
"""
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head. The score is possibly marginalized over all documents for
each vocabulary token.
doc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`):
Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and
`question_encoder_last_hidden_state`.
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used
(see `past_key_values` input) to speed up sequential decoding.
retrieved_doc_embeds (`torch.FloatTensor` of shape `(batch_size, config.n_docs, hidden_size)`, *optional*, returned when *output_retrieved=True*):
Embedded documents retrieved by the retriever. Is used with `question_encoder_last_hidden_state` to compute
the `doc_scores`.
retrieved_doc_ids (`torch.LongTensor` of shape `(batch_size, config.n_docs)`, *optional*, returned when *output_retrieved=True*):
The indexes of the embedded documents retrieved by the retriever.
context_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever.
context_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the
retriever.
question_encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden states at the output of the last layer of the question encoder pooled output of the
model.
question_enc_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden states of the question encoder at the output of each layer plus the initial embedding outputs.
question_enc_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the question encoder, after the attention softmax, used to compute the weighted
average in the self-attention heads.
generator_enc_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the generator encoder of the model.
generator_enc_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs.
generator_enc_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted
average in the self-attention heads.
generator_dec_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs.
generator_dec_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted
average in the self-attention heads.
generator_cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Cross-attentions weights of the generator decoder, after the attention softmax, used to compute the
weighted average in the cross-attention heads.
"""
logits: Optional[torch.FloatTensor] = None
doc_scores: Optional[torch.FloatTensor] = None
past_key_values: Optional[Cache] = None
retrieved_doc_embeds: Optional[torch.FloatTensor] = None
retrieved_doc_ids: Optional[torch.LongTensor] = None
context_input_ids: Optional[torch.LongTensor] = None
context_attention_mask: Optional[torch.LongTensor] = None
question_encoder_last_hidden_state: Optional[torch.FloatTensor] = None
question_enc_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
question_enc_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
generator_enc_last_hidden_state: Optional[torch.FloatTensor] = None
generator_enc_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
generator_enc_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
generator_dec_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
generator_dec_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
generator_cross_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
|
@dataclass
@auto_docstring
class RetrievAugLMOutput(ModelOutput):
'''
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head. The score is possibly marginalized over all documents for
each vocabulary token.
doc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`):
Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and
`question_encoder_last_hidden_state`.
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used
(see `past_key_values` input) to speed up sequential decoding.
retrieved_doc_embeds (`torch.FloatTensor` of shape `(batch_size, config.n_docs, hidden_size)`, *optional*, returned when *output_retrieved=True*):
Embedded documents retrieved by the retriever. Is used with `question_encoder_last_hidden_state` to compute
the `doc_scores`.
retrieved_doc_ids (`torch.LongTensor` of shape `(batch_size, config.n_docs)`, *optional*, returned when *output_retrieved=True*):
The indexes of the embedded documents retrieved by the retriever.
context_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever.
context_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the
retriever.
question_encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden states at the output of the last layer of the question encoder pooled output of the
model.
question_enc_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden states of the question encoder at the output of each layer plus the initial embedding outputs.
question_enc_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the question encoder, after the attention softmax, used to compute the weighted
average in the self-attention heads.
generator_enc_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the generator encoder of the model.
generator_enc_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs.
generator_enc_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted
average in the self-attention heads.
generator_dec_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs.
generator_dec_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted
average in the self-attention heads.
generator_cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Cross-attentions weights of the generator decoder, after the attention softmax, used to compute the
weighted average in the cross-attention heads.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 3.59
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 87
| 9
| 17
| 17
| 16
| 61
| 17
| 17
| 16
| 0
| 1
| 0
| 0
|
4,817
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/rag/retrieval_rag.py
|
transformers.models.rag.retrieval_rag.CanonicalHFIndex
|
from ...utils import cached_file, is_datasets_available, is_faiss_available, logging, requires_backends, strtobool
from typing import Optional
class CanonicalHFIndex(HFIndexBase):
"""
A wrapper around an instance of [`~datasets.Datasets`]. If `index_path` is set to `None`, we load the pre-computed
index available with the [`~datasets.arrow_dataset.Dataset`], otherwise, we load the index from the indicated path
on disk.
Args:
vector_size (`int`): the dimension of the passages embeddings used by the index
dataset_name (`str`, optional, defaults to `wiki_dpr`):
A dataset identifier of the indexed dataset on HuggingFace AWS bucket (list all available datasets and ids
with `datasets.list_datasets()`).
dataset_split (`str`, optional, defaults to `train`)
Which split of the `dataset` to load.
index_name (`str`, optional, defaults to `train`)
The index_name of the index associated with the `dataset`. The index loaded from `index_path` will be saved
under this name.
index_path (`str`, optional, defaults to `None`)
The path to the serialized faiss index on disk.
use_dummy_dataset (`bool`, optional, defaults to `False`):
If True, use the dummy configuration of the dataset for tests.
"""
def __init__(self, vector_size: int, dataset_name: str='wiki_dpr', dataset_split: str='train', index_name: Optional[str]=None, index_path: Optional[str]=None, use_dummy_dataset=False, dataset_revision=None):
requires_backends(self, ['faiss'])
if int(index_path is None) + int(index_name is None) != 1:
raise ValueError('Please provide `index_name` or `index_path`.')
self.dataset_name = dataset_name
self.dataset_split = dataset_split
self.index_name = index_name
self.index_path = index_path
self.use_dummy_dataset = use_dummy_dataset
self.dataset_revision = dataset_revision
logger.info(f'Loading passages from {self.dataset_name}')
dataset = load_dataset(self.dataset_name, with_index=False, split=self.dataset_split, dummy=self.use_dummy_dataset, revision=dataset_revision)
super().__init__(vector_size, dataset, index_initialized=False)
def init_index(self):
if self.index_path is not None:
logger.info(f'Loading index from {self.index_path}')
self.dataset.load_faiss_index('embeddings', file=self.index_path)
else:
logger.info(f'Loading index from {self.dataset_name} with index name {self.index_name}')
self.dataset = load_dataset(self.dataset_name, with_embeddings=True, with_index=True, split=self.dataset_split, index_name=self.index_name, dummy=self.use_dummy_dataset, revision=self.dataset_revision)
self.dataset.set_format('numpy', columns=['embeddings'], output_all_columns=True)
self._index_initialized = True
|
class CanonicalHFIndex(HFIndexBase):
'''
A wrapper around an instance of [`~datasets.Datasets`]. If `index_path` is set to `None`, we load the pre-computed
index available with the [`~datasets.arrow_dataset.Dataset`], otherwise, we load the index from the indicated path
on disk.
Args:
vector_size (`int`): the dimension of the passages embeddings used by the index
dataset_name (`str`, optional, defaults to `wiki_dpr`):
A dataset identifier of the indexed dataset on HuggingFace AWS bucket (list all available datasets and ids
with `datasets.list_datasets()`).
dataset_split (`str`, optional, defaults to `train`)
Which split of the `dataset` to load.
index_name (`str`, optional, defaults to `train`)
The index_name of the index associated with the `dataset`. The index loaded from `index_path` will be saved
under this name.
index_path (`str`, optional, defaults to `None`)
The path to the serialized faiss index on disk.
use_dummy_dataset (`bool`, optional, defaults to `False`):
If True, use the dummy configuration of the dataset for tests.
'''
def __init__(self, vector_size: int, dataset_name: str='wiki_dpr', dataset_split: str='train', index_name: Optional[str]=None, index_path: Optional[str]=None, use_dummy_dataset=False, dataset_revision=None):
pass
def init_index(self):
pass
| 3
| 1
| 22
| 0
| 22
| 0
| 2
| 0.42
| 1
| 4
| 0
| 0
| 2
| 8
| 2
| 12
| 67
| 3
| 45
| 21
| 33
| 19
| 21
| 12
| 18
| 2
| 2
| 1
| 4
|
4,818
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/rag/retrieval_rag.py
|
transformers.models.rag.retrieval_rag.CustomHFIndex
|
from ...utils import cached_file, is_datasets_available, is_faiss_available, logging, requires_backends, strtobool
class CustomHFIndex(HFIndexBase):
"""
A wrapper around an instance of [`~datasets.Datasets`]. The dataset and the index are both loaded from the
indicated paths on disk.
Args:
vector_size (`int`): the dimension of the passages embeddings used by the index
dataset_path (`str`):
The path to the serialized dataset on disk. The dataset should have 3 columns: title (str), text (str) and
embeddings (arrays of dimension vector_size)
index_path (`str`)
The path to the serialized faiss index on disk.
"""
def __init__(self, vector_size: int, dataset, index_path=None):
requires_backends(self, ['faiss'])
super().__init__(vector_size, dataset, index_initialized=index_path is None)
self.index_path = index_path
@classmethod
def load_from_disk(cls, vector_size, dataset_path, index_path):
logger.info(f'Loading passages from {dataset_path}')
if dataset_path is None or index_path is None:
raise ValueError("Please provide `dataset_path` and `index_path` after calling `dataset.save_to_disk(dataset_path)` and `dataset.get_index('embeddings').save(index_path)`.")
dataset = load_from_disk(dataset_path)
return cls(vector_size=vector_size, dataset=dataset, index_path=index_path)
def init_index(self):
if not self.is_initialized():
logger.info(f'Loading index from {self.index_path}')
self.dataset.load_faiss_index('embeddings', file=self.index_path)
self._index_initialized = True
|
class CustomHFIndex(HFIndexBase):
'''
A wrapper around an instance of [`~datasets.Datasets`]. The dataset and the index are both loaded from the
indicated paths on disk.
Args:
vector_size (`int`): the dimension of the passages embeddings used by the index
dataset_path (`str`):
The path to the serialized dataset on disk. The dataset should have 3 columns: title (str), text (str) and
embeddings (arrays of dimension vector_size)
index_path (`str`)
The path to the serialized faiss index on disk.
'''
def __init__(self, vector_size: int, dataset, index_path=None):
pass
@classmethod
def load_from_disk(cls, vector_size, dataset_path, index_path):
pass
def init_index(self):
pass
| 5
| 1
| 6
| 0
| 6
| 0
| 2
| 0.58
| 1
| 3
| 0
| 0
| 2
| 2
| 3
| 13
| 34
| 4
| 19
| 8
| 14
| 11
| 15
| 7
| 11
| 2
| 2
| 1
| 5
|
4,819
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/rag/retrieval_rag.py
|
transformers.models.rag.retrieval_rag.HFIndexBase
|
from ...utils import cached_file, is_datasets_available, is_faiss_available, logging, requires_backends, strtobool
import numpy as np
class HFIndexBase(Index):
def __init__(self, vector_size, dataset, index_initialized=False):
requires_backends(self, ['faiss'])
self.vector_size = vector_size
self.dataset = dataset
self._index_initialized = index_initialized
self._check_dataset_format(with_index=index_initialized)
dataset.set_format('numpy', columns=['embeddings'], output_all_columns=True, dtype='float32')
def _check_dataset_format(self, with_index: bool):
if not isinstance(self.dataset, Dataset):
raise TypeError(f'Dataset should be a datasets.Dataset object, but got {type(self.dataset)}')
if len({'title', 'text', 'embeddings'} - set(self.dataset.column_names)) > 0:
raise ValueError(f'Dataset should be a dataset with the following columns: title (str), text (str) and embeddings (arrays of dimension vector_size), but got columns {self.dataset.column_names}')
if with_index and 'embeddings' not in self.dataset.list_indexes():
raise ValueError('Missing faiss index in the dataset. Make sure you called `dataset.add_faiss_index` to compute it or `dataset.load_faiss_index` to load one from the disk.')
def init_index(self):
raise NotImplementedError()
def is_initialized(self):
return self._index_initialized
def get_doc_dicts(self, doc_ids: np.ndarray) -> list[dict]:
return [self.dataset[doc_ids[i].tolist()] for i in range(doc_ids.shape[0])]
def get_top_docs(self, question_hidden_states: np.ndarray, n_docs=5) -> tuple[np.ndarray, np.ndarray]:
_, ids = self.dataset.search_batch('embeddings', question_hidden_states, n_docs)
docs = [self.dataset[[i for i in indices if i >= 0]] for indices in ids]
vectors = [doc['embeddings'] for doc in docs]
for i in range(len(vectors)):
if len(vectors[i]) < n_docs:
vectors[i] = np.vstack([vectors[i], np.zeros((n_docs - len(vectors[i]), self.vector_size))])
return (np.array(ids), np.array(vectors))
|
class HFIndexBase(Index):
def __init__(self, vector_size, dataset, index_initialized=False):
pass
def _check_dataset_format(self, with_index: bool):
pass
def init_index(self):
pass
def is_initialized(self):
pass
def get_doc_dicts(self, doc_ids: np.ndarray) -> list[dict]:
pass
def get_top_docs(self, question_hidden_states: np.ndarray, n_docs=5) -> tuple[np.ndarray, np.ndarray]:
pass
| 7
| 0
| 6
| 0
| 6
| 0
| 2
| 0.03
| 1
| 9
| 0
| 2
| 6
| 3
| 6
| 10
| 40
| 5
| 35
| 13
| 28
| 1
| 28
| 13
| 21
| 4
| 1
| 2
| 11
|
4,820
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/rag/retrieval_rag.py
|
transformers.models.rag.retrieval_rag.Index
|
import numpy as np
class Index:
"""
A base class for the Indices encapsulated by the [`RagRetriever`].
"""
def get_doc_dicts(self, doc_ids: np.ndarray) -> list[dict]:
"""
Returns a list of dictionaries, containing titles and text of the retrieved documents.
Args:
doc_ids (`np.ndarray` of shape `(batch_size, n_docs)`):
A tensor of document indices.
"""
raise NotImplementedError
def get_top_docs(self, question_hidden_states: np.ndarray, n_docs=5) -> tuple[np.ndarray, np.ndarray]:
"""
For each query in the batch, retrieves `n_docs` documents.
Args:
question_hidden_states (`np.ndarray` of shape `(batch_size, vector_size)`):
An array of query vectors.
n_docs (`int`):
The number of docs retrieved per query.
Returns:
`np.ndarray` of shape `(batch_size, n_docs)`: A tensor of indices of retrieved documents. `np.ndarray` of
shape `(batch_size, vector_size)`: A tensor of vector representations of retrieved documents.
"""
raise NotImplementedError
def is_initialized(self):
"""
Returns `True` if index is already initialized.
"""
raise NotImplementedError
def init_index(self):
"""
A function responsible for loading the index into memory. Should be called only once per training run of a RAG
model. E.g. if the model is trained on multiple GPUs in a distributed setup, only one of the workers will load
the index.
"""
raise NotImplementedError
|
class Index:
'''
A base class for the Indices encapsulated by the [`RagRetriever`].
'''
def get_doc_dicts(self, doc_ids: np.ndarray) -> list[dict]:
'''
Returns a list of dictionaries, containing titles and text of the retrieved documents.
Args:
doc_ids (`np.ndarray` of shape `(batch_size, n_docs)`):
A tensor of document indices.
'''
pass
def get_top_docs(self, question_hidden_states: np.ndarray, n_docs=5) -> tuple[np.ndarray, np.ndarray]:
'''
For each query in the batch, retrieves `n_docs` documents.
Args:
question_hidden_states (`np.ndarray` of shape `(batch_size, vector_size)`):
An array of query vectors.
n_docs (`int`):
The number of docs retrieved per query.
Returns:
`np.ndarray` of shape `(batch_size, n_docs)`: A tensor of indices of retrieved documents. `np.ndarray` of
shape `(batch_size, vector_size)`: A tensor of vector representations of retrieved documents.
'''
pass
def is_initialized(self):
'''
Returns `True` if index is already initialized.
'''
pass
def init_index(self):
'''
A function responsible for loading the index into memory. Should be called only once per training run of a RAG
model. E.g. if the model is trained on multiple GPUs in a distributed setup, only one of the workers will load
the index.
'''
pass
| 5
| 5
| 9
| 1
| 2
| 6
| 1
| 3.11
| 0
| 2
| 0
| 2
| 4
| 0
| 4
| 4
| 44
| 7
| 9
| 5
| 4
| 28
| 9
| 5
| 4
| 1
| 0
| 0
| 4
|
4,821
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/rag/retrieval_rag.py
|
transformers.models.rag.retrieval_rag.LegacyIndex
|
import pickle
import os
import numpy as np
from ...utils import cached_file, is_datasets_available, is_faiss_available, logging, requires_backends, strtobool
class LegacyIndex(Index):
"""
An index which can be deserialized from the files built using https://github.com/facebookresearch/DPR. We use
default faiss index parameters as specified in that repository.
Args:
vector_size (`int`):
The dimension of indexed vectors.
index_path (`str`):
A path to a *directory* containing index files compatible with [`~models.rag.retrieval_rag.LegacyIndex`]
"""
INDEX_FILENAME = 'hf_bert_base.hnswSQ8_correct_phi_128.c_index'
PASSAGE_FILENAME = 'psgs_w100.tsv.pkl'
def __init__(self, vector_size, index_path):
requires_backends(self, ['faiss'])
self.index_id_to_db_id = []
self.index_path = index_path
self.passages = self._load_passages()
self.vector_size = vector_size
self.index = None
self._index_initialized = False
def _resolve_path(self, index_path, filename):
is_local = os.path.isdir(index_path)
try:
resolved_archive_file = cached_file(index_path, filename)
except OSError:
msg = f"Can't load '{filename}'. Make sure that:\n\n- '{index_path}' is a correct remote path to a directory containing a file named {filename}\n\n- or '{index_path}' is the correct path to a directory containing a file named {filename}.\n\n"
raise OSError(msg)
if is_local:
logger.info(f'loading file {resolved_archive_file}')
else:
logger.info(f'loading file {filename} from cache at {resolved_archive_file}')
return resolved_archive_file
def _load_passages(self):
logger.info(f'Loading passages from {self.index_path}')
passages_path = self._resolve_path(self.index_path, self.PASSAGE_FILENAME)
if not strtobool(os.environ.get('TRUST_REMOTE_CODE', 'False')):
raise ValueError("This part uses `pickle.load` which is insecure and will execute arbitrary code that is potentially malicious. It's recommended to never unpickle data that could have come from an untrusted source, or that could have been tampered with. If you already verified the pickle data and decided to use it, you can set the environment variable `TRUST_REMOTE_CODE` to `True` to allow it.")
with open(passages_path, 'rb') as passages_file:
passages = pickle.load(passages_file)
return passages
def _deserialize_index(self):
logger.info(f'Loading index from {self.index_path}')
resolved_index_path = self._resolve_path(self.index_path, self.INDEX_FILENAME + '.index.dpr')
self.index = faiss.read_index(resolved_index_path)
resolved_meta_path = self._resolve_path(self.index_path, self.INDEX_FILENAME + '.index_meta.dpr')
if not strtobool(os.environ.get('TRUST_REMOTE_CODE', 'False')):
raise ValueError("This part uses `pickle.load` which is insecure and will execute arbitrary code that is potentially malicious. It's recommended to never unpickle data that could have come from an untrusted source, or that could have been tampered with. If you already verified the pickle data and decided to use it, you can set the environment variable `TRUST_REMOTE_CODE` to `True` to allow it.")
with open(resolved_meta_path, 'rb') as metadata_file:
self.index_id_to_db_id = pickle.load(metadata_file)
assert len(self.index_id_to_db_id) == self.index.ntotal, 'Deserialized index_id_to_db_id should match faiss index size'
def is_initialized(self):
return self._index_initialized
def init_index(self):
index = faiss.IndexHNSWFlat(self.vector_size + 1, 512)
index.hnsw.efSearch = 128
index.hnsw.efConstruction = 200
self.index = index
self._deserialize_index()
self._index_initialized = True
def get_doc_dicts(self, doc_ids: np.ndarray):
doc_list = []
for doc_ids_i in doc_ids:
ids = [str(int(doc_id)) for doc_id in doc_ids_i]
docs = [self.passages[doc_id] for doc_id in ids]
doc_list.append(docs)
doc_dicts = []
for docs in doc_list:
doc_dict = {}
doc_dict['title'] = [doc[1] for doc in docs]
doc_dict['text'] = [doc[0] for doc in docs]
doc_dicts.append(doc_dict)
return doc_dicts
def get_top_docs(self, question_hidden_states: np.ndarray, n_docs=5) -> tuple[np.ndarray, np.ndarray]:
aux_dim = np.zeros(len(question_hidden_states), dtype='float32').reshape(-1, 1)
query_nhsw_vectors = np.hstack((question_hidden_states, aux_dim))
_, docs_ids = self.index.search(query_nhsw_vectors, n_docs)
vectors = [[self.index.reconstruct(int(doc_id))[:-1] for doc_id in doc_ids] for doc_ids in docs_ids]
ids = [[int(self.index_id_to_db_id[doc_id]) for doc_id in doc_ids] for doc_ids in docs_ids]
return (np.array(ids), np.array(vectors))
| null | 9
| 1
| 10
| 0
| 10
| 0
| 2
| 0.12
| 1
| 3
| 0
| 0
| 8
| 6
| 8
| 12
| 105
| 10
| 85
| 38
| 76
| 10
| 68
| 36
| 59
| 3
| 1
| 1
| 14
|
4,822
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/rag/retrieval_rag.py
|
transformers.models.rag.retrieval_rag.RagRetriever
|
import numpy as np
from ...tokenization_utils_base import BatchEncoding
from .tokenization_rag import RagTokenizer
from ...utils import cached_file, is_datasets_available, is_faiss_available, logging, requires_backends, strtobool
from .configuration_rag import RagConfig
from collections.abc import Iterable
from ...tokenization_utils import PreTrainedTokenizer
import time
import os
class RagRetriever:
"""
Retriever used to get documents from vector queries. It retrieves the documents embeddings as well as the documents
contents, and it formats them to be used with a RagModel.
Args:
config ([`RagConfig`]):
The configuration of the RAG model this Retriever is used with. Contains parameters indicating which
`Index` to build. You can load your own custom dataset with `config.index_name="custom"` or use a canonical
one (default) from the datasets library with `config.index_name="wiki_dpr"` for example.
question_encoder_tokenizer ([`PreTrainedTokenizer`]):
The tokenizer that was used to tokenize the question. It is used to decode the question and then use the
generator_tokenizer.
generator_tokenizer ([`PreTrainedTokenizer`]):
The tokenizer used for the generator part of the RagModel.
index ([`~models.rag.retrieval_rag.Index`], optional, defaults to the one defined by the configuration):
If specified, use this index instead of the one built using the configuration
Examples:
```python
>>> # To load the default "wiki_dpr" dataset with 21M passages from wikipedia (index name is 'compressed' or 'exact')
>>> from transformers import RagRetriever
>>> retriever = RagRetriever.from_pretrained(
... "facebook/dpr-ctx_encoder-single-nq-base", dataset="wiki_dpr", index_name="compressed"
... )
>>> # To load your own indexed dataset built with the datasets library. More info on how to build the indexed dataset in examples/rag/use_own_knowledge_dataset.py
>>> from transformers import RagRetriever
>>> dataset = (
... ...
... ) # dataset must be a datasets.Datasets object with columns "title", "text" and "embeddings", and it must have a supported index (e.g., Faiss or other index types depending on your setup)
>>> retriever = RagRetriever.from_pretrained("facebook/dpr-ctx_encoder-single-nq-base", indexed_dataset=dataset)
>>> # To load your own indexed dataset built with the datasets library that was saved on disk. More info in examples/rag/use_own_knowledge_dataset.py
>>> from transformers import RagRetriever
>>> dataset_path = "path/to/my/dataset" # dataset saved via *dataset.save_to_disk(...)*
>>> index_path = "path/to/my/index" # index saved via *dataset.get_index("embeddings").save(...)*
>>> retriever = RagRetriever.from_pretrained(
... "facebook/dpr-ctx_encoder-single-nq-base",
... index_name="custom",
... passages_path=dataset_path,
... index_path=index_path,
... )
>>> # To load the legacy index built originally for Rag's paper
>>> from transformers import RagRetriever
>>> retriever = RagRetriever.from_pretrained("facebook/dpr-ctx_encoder-single-nq-base", index_name="legacy")
```"""
def __init__(self, config, question_encoder_tokenizer, generator_tokenizer, index=None, init_retrieval=True):
self._init_retrieval = init_retrieval
requires_backends(self, ['datasets'])
super().__init__()
self.index = index or self._build_index(config)
self.generator_tokenizer = generator_tokenizer
self.question_encoder_tokenizer = question_encoder_tokenizer
self.n_docs = config.n_docs
self.batch_size = config.retrieval_batch_size
self.config = config
if self._init_retrieval:
self.init_retrieval()
self.ctx_encoder_tokenizer = None
self.return_tokenized_docs = False
@staticmethod
def _build_index(config):
if config.index_name == 'legacy':
return LegacyIndex(config.retrieval_vector_size, config.index_path or LEGACY_INDEX_PATH)
elif config.index_name == 'custom':
return CustomHFIndex.load_from_disk(vector_size=config.retrieval_vector_size, dataset_path=config.passages_path, index_path=config.index_path)
else:
return CanonicalHFIndex(vector_size=config.retrieval_vector_size, dataset_name=config.dataset, dataset_split=config.dataset_split, index_name=config.index_name, index_path=config.index_path, use_dummy_dataset=config.use_dummy_dataset, dataset_revision=config.dataset_revision)
@classmethod
def from_pretrained(cls, retriever_name_or_path, indexed_dataset=None, **kwargs):
requires_backends(cls, ['datasets'])
config = kwargs.pop('config', None) or RagConfig.from_pretrained(retriever_name_or_path, **kwargs)
rag_tokenizer = RagTokenizer.from_pretrained(retriever_name_or_path, config=config)
question_encoder_tokenizer = rag_tokenizer.question_encoder
generator_tokenizer = rag_tokenizer.generator
if indexed_dataset is not None:
config.index_name = 'custom'
index = CustomHFIndex(config.retrieval_vector_size, indexed_dataset)
else:
index = cls._build_index(config)
return cls(config, question_encoder_tokenizer=question_encoder_tokenizer, generator_tokenizer=generator_tokenizer, index=index)
def save_pretrained(self, save_directory):
if isinstance(self.index, CustomHFIndex):
if self.config.index_path is None:
index_path = os.path.join(save_directory, 'hf_dataset_index.faiss')
self.index.dataset.get_index('embeddings').save(index_path)
self.config.index_path = index_path
if self.config.passages_path is None:
passages_path = os.path.join(save_directory, 'hf_dataset')
faiss_index = self.index.dataset._indexes.pop('embeddings')
self.index.dataset.save_to_disk(passages_path)
self.index.dataset._indexes['embeddings'] = faiss_index
self.config.passages_path = passages_path
self.config.save_pretrained(save_directory)
rag_tokenizer = RagTokenizer(question_encoder=self.question_encoder_tokenizer, generator=self.generator_tokenizer)
rag_tokenizer.save_pretrained(save_directory)
def init_retrieval(self):
"""
Retriever initialization function. It loads the index into memory.
"""
logger.info('initializing retrieval')
self.index.init_index()
def postprocess_docs(self, docs, input_strings, prefix, n_docs, return_tensors=None):
"""
Postprocessing retrieved `docs` and combining them with `input_strings`.
Args:
docs (`dict`):
Retrieved documents.
input_strings (`str`):
Input strings decoded by `preprocess_query`.
prefix (`str`):
Prefix added at the beginning of each input, typically used with T5-based models.
Return:
`tuple(tensors)`: a tuple consisting of two elements: contextualized `input_ids` and a compatible
`attention_mask`.
"""
def cat_input_and_doc(doc_title, doc_text, input_string, prefix):
if doc_title.startswith('"'):
doc_title = doc_title[1:]
if doc_title.endswith('"'):
doc_title = doc_title[:-1]
if prefix is None:
prefix = ''
out = (prefix + doc_title + self.config.title_sep + doc_text + self.config.doc_sep + input_string).replace(' ', ' ')
return out
rag_input_strings = [cat_input_and_doc(docs[i]['title'][j], docs[i]['text'][j], input_strings[i], prefix) for i in range(len(docs)) for j in range(n_docs)]
contextualized_inputs = self.generator_tokenizer.batch_encode_plus(rag_input_strings, max_length=self.config.max_combined_length, return_tensors=return_tensors, padding='max_length', truncation=True)
return (contextualized_inputs['input_ids'], contextualized_inputs['attention_mask'])
def _chunk_tensor(self, t: Iterable, chunk_size: int) -> list[Iterable]:
return [t[i:i + chunk_size] for i in range(0, len(t), chunk_size)]
def _main_retrieve(self, question_hidden_states: np.ndarray, n_docs: int) -> tuple[np.ndarray, np.ndarray]:
question_hidden_states_batched = self._chunk_tensor(question_hidden_states, self.batch_size)
ids_batched = []
vectors_batched = []
for question_hidden_states in question_hidden_states_batched:
start_time = time.time()
ids, vectors = self.index.get_top_docs(question_hidden_states, n_docs)
logger.debug(f'index search time: {time.time() - start_time} sec, batch size {question_hidden_states.shape}')
ids_batched.extend(ids)
vectors_batched.extend(vectors)
return (np.array(ids_batched), np.array(vectors_batched))
def retrieve(self, question_hidden_states: np.ndarray, n_docs: int) -> tuple[np.ndarray, np.ndarray, list[dict]]:
"""
Retrieves documents for specified `question_hidden_states`.
Args:
question_hidden_states (`np.ndarray` of shape `(batch_size, vector_size)`):
A batch of query vectors to retrieve with.
n_docs (`int`):
The number of docs retrieved per query.
Return:
`tuple[np.ndarray, np.ndarray, list[dict]]`: A tuple with the following objects:
- **retrieved_doc_embeds** (`np.ndarray` of shape `(batch_size, n_docs, dim)`) -- The retrieval embeddings
of the retrieved docs per query.
- **doc_ids** (`np.ndarray` of shape `(batch_size, n_docs)`) -- The ids of the documents in the index
- **doc_dicts** (`list[dict]`): The `retrieved_doc_embeds` examples per query.
"""
doc_ids, retrieved_doc_embeds = self._main_retrieve(question_hidden_states, n_docs)
return (retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(doc_ids))
def set_ctx_encoder_tokenizer(self, ctx_encoder_tokenizer: PreTrainedTokenizer):
self.ctx_encoder_tokenizer = ctx_encoder_tokenizer
self.return_tokenized_docs = True
def __call__(self, question_input_ids: list[list[int]], question_hidden_states: np.ndarray, prefix=None, n_docs=None, return_tensors=None) -> BatchEncoding:
"""
Retrieves documents for specified `question_hidden_states`.
Args:
question_input_ids (`list[list[int]]`) batch of input ids
question_hidden_states (`np.ndarray` of shape `(batch_size, vector_size)`:
A batch of query vectors to retrieve with.
prefix (`str`, *optional*):
The prefix used by the generator's tokenizer.
n_docs (`int`, *optional*):
The number of docs retrieved per query.
return_tensors (`str` or [`~utils.TensorType`], *optional*, defaults to "pt"):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
Returns: [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
- **context_input_ids** -- List of token ids to be fed to a model.
[What are input IDs?](../glossary#input-ids)
- **context_attention_mask** -- List of indices specifying which tokens should be attended to by the model
(when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names`).
[What are attention masks?](../glossary#attention-mask)
- **retrieved_doc_embeds** -- List of embeddings of the retrieved documents
- **doc_ids** -- List of ids of the retrieved documents
"""
n_docs = n_docs if n_docs is not None else self.n_docs
prefix = prefix if prefix is not None else self.config.generator.prefix
retrieved_doc_embeds, doc_ids, docs = self.retrieve(question_hidden_states, n_docs)
input_strings = self.question_encoder_tokenizer.batch_decode(question_input_ids, skip_special_tokens=True)
context_input_ids, context_attention_mask = self.postprocess_docs(docs, input_strings, prefix, n_docs, return_tensors=return_tensors)
if self.return_tokenized_docs:
retrieved_doc_text = []
retrieved_doc_title = []
for b_idx in range(len(docs)):
for doc_idx in range(n_docs):
retrieved_doc_text.append(docs[b_idx]['text'][doc_idx])
retrieved_doc_title.append(docs[b_idx]['title'][doc_idx])
tokenized_docs = self.ctx_encoder_tokenizer(retrieved_doc_title, retrieved_doc_text, truncation=True, padding='longest', return_tensors=return_tensors)
return BatchEncoding({'context_input_ids': context_input_ids, 'context_attention_mask': context_attention_mask, 'retrieved_doc_embeds': retrieved_doc_embeds, 'doc_ids': doc_ids, 'tokenized_doc_ids': tokenized_docs['input_ids'], 'tokenized_doc_attention_mask': tokenized_docs['attention_mask']}, tensor_type=return_tensors)
else:
return BatchEncoding({'context_input_ids': context_input_ids, 'context_attention_mask': context_attention_mask, 'retrieved_doc_embeds': retrieved_doc_embeds, 'doc_ids': doc_ids}, tensor_type=return_tensors)
|
class RagRetriever:
'''
Retriever used to get documents from vector queries. It retrieves the documents embeddings as well as the documents
contents, and it formats them to be used with a RagModel.
Args:
config ([`RagConfig`]):
The configuration of the RAG model this Retriever is used with. Contains parameters indicating which
`Index` to build. You can load your own custom dataset with `config.index_name="custom"` or use a canonical
one (default) from the datasets library with `config.index_name="wiki_dpr"` for example.
question_encoder_tokenizer ([`PreTrainedTokenizer`]):
The tokenizer that was used to tokenize the question. It is used to decode the question and then use the
generator_tokenizer.
generator_tokenizer ([`PreTrainedTokenizer`]):
The tokenizer used for the generator part of the RagModel.
index ([`~models.rag.retrieval_rag.Index`], optional, defaults to the one defined by the configuration):
If specified, use this index instead of the one built using the configuration
Examples:
```python
>>> # To load the default "wiki_dpr" dataset with 21M passages from wikipedia (index name is 'compressed' or 'exact')
>>> from transformers import RagRetriever
>>> retriever = RagRetriever.from_pretrained(
... "facebook/dpr-ctx_encoder-single-nq-base", dataset="wiki_dpr", index_name="compressed"
... )
>>> # To load your own indexed dataset built with the datasets library. More info on how to build the indexed dataset in examples/rag/use_own_knowledge_dataset.py
>>> from transformers import RagRetriever
>>> dataset = (
... ...
... ) # dataset must be a datasets.Datasets object with columns "title", "text" and "embeddings", and it must have a supported index (e.g., Faiss or other index types depending on your setup)
>>> retriever = RagRetriever.from_pretrained("facebook/dpr-ctx_encoder-single-nq-base", indexed_dataset=dataset)
>>> # To load your own indexed dataset built with the datasets library that was saved on disk. More info in examples/rag/use_own_knowledge_dataset.py
>>> from transformers import RagRetriever
>>> dataset_path = "path/to/my/dataset" # dataset saved via *dataset.save_to_disk(...)*
>>> index_path = "path/to/my/index" # index saved via *dataset.get_index("embeddings").save(...)*
>>> retriever = RagRetriever.from_pretrained(
... "facebook/dpr-ctx_encoder-single-nq-base",
... index_name="custom",
... passages_path=dataset_path,
... index_path=index_path,
... )
>>> # To load the legacy index built originally for Rag's paper
>>> from transformers import RagRetriever
>>> retriever = RagRetriever.from_pretrained("facebook/dpr-ctx_encoder-single-nq-base", index_name="legacy")
```'''
def __init__(self, config, question_encoder_tokenizer, generator_tokenizer, index=None, init_retrieval=True):
pass
@staticmethod
def _build_index(config):
pass
@classmethod
def from_pretrained(cls, retriever_name_or_path, indexed_dataset=None, **kwargs):
pass
def save_pretrained(self, save_directory):
pass
def init_retrieval(self):
'''
Retriever initialization function. It loads the index into memory.
'''
pass
def postprocess_docs(self, docs, input_strings, prefix, n_docs, return_tensors=None):
'''
Postprocessing retrieved `docs` and combining them with `input_strings`.
Args:
docs (`dict`):
Retrieved documents.
input_strings (`str`):
Input strings decoded by `preprocess_query`.
prefix (`str`):
Prefix added at the beginning of each input, typically used with T5-based models.
Return:
`tuple(tensors)`: a tuple consisting of two elements: contextualized `input_ids` and a compatible
`attention_mask`.
'''
pass
def cat_input_and_doc(doc_title, doc_text, input_string, prefix):
pass
def _chunk_tensor(self, t: Iterable, chunk_size: int) -> list[Iterable]:
pass
def _main_retrieve(self, question_hidden_states: np.ndarray, n_docs: int) -> tuple[np.ndarray, np.ndarray]:
pass
def retrieve(self, question_hidden_states: np.ndarray, n_docs: int) -> tuple[np.ndarray, np.ndarray, list[dict]]:
'''
Retrieves documents for specified `question_hidden_states`.
Args:
question_hidden_states (`np.ndarray` of shape `(batch_size, vector_size)`):
A batch of query vectors to retrieve with.
n_docs (`int`):
The number of docs retrieved per query.
Return:
`tuple[np.ndarray, np.ndarray, list[dict]]`: A tuple with the following objects:
- **retrieved_doc_embeds** (`np.ndarray` of shape `(batch_size, n_docs, dim)`) -- The retrieval embeddings
of the retrieved docs per query.
- **doc_ids** (`np.ndarray` of shape `(batch_size, n_docs)`) -- The ids of the documents in the index
- **doc_dicts** (`list[dict]`): The `retrieved_doc_embeds` examples per query.
'''
pass
def set_ctx_encoder_tokenizer(self, ctx_encoder_tokenizer: PreTrainedTokenizer):
pass
def __call__(self, question_input_ids: list[list[int]], question_hidden_states: np.ndarray, prefix=None, n_docs=None, return_tensors=None) -> BatchEncoding:
'''
Retrieves documents for specified `question_hidden_states`.
Args:
question_input_ids (`list[list[int]]`) batch of input ids
question_hidden_states (`np.ndarray` of shape `(batch_size, vector_size)`:
A batch of query vectors to retrieve with.
prefix (`str`, *optional*):
The prefix used by the generator's tokenizer.
n_docs (`int`, *optional*):
The number of docs retrieved per query.
return_tensors (`str` or [`~utils.TensorType`], *optional*, defaults to "pt"):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
Returns: [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
- **context_input_ids** -- List of token ids to be fed to a model.
[What are input IDs?](../glossary#input-ids)
- **context_attention_mask** -- List of indices specifying which tokens should be attended to by the model
(when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names`).
[What are attention masks?](../glossary#attention-mask)
- **retrieved_doc_embeds** -- List of embeddings of the retrieved documents
- **doc_ids** -- List of ids of the retrieved documents
'''
pass
| 15
| 5
| 23
| 2
| 16
| 5
| 2
| 0.56
| 0
| 11
| 7
| 0
| 9
| 9
| 11
| 11
| 331
| 50
| 181
| 57
| 159
| 101
| 97
| 48
| 84
| 6
| 0
| 3
| 28
|
4,823
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/rag/tokenization_rag.py
|
transformers.models.rag.tokenization_rag.RagTokenizer
|
from .configuration_rag import RagConfig
from typing import Optional
import warnings
from ...tokenization_utils_base import BatchEncoding
import os
class RagTokenizer:
def __init__(self, question_encoder, generator):
self.question_encoder = question_encoder
self.generator = generator
self.current_tokenizer = self.question_encoder
def save_pretrained(self, save_directory):
if os.path.isfile(save_directory):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file')
os.makedirs(save_directory, exist_ok=True)
question_encoder_path = os.path.join(save_directory, 'question_encoder_tokenizer')
generator_path = os.path.join(save_directory, 'generator_tokenizer')
self.question_encoder.save_pretrained(question_encoder_path)
self.generator.save_pretrained(generator_path)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
from ..auto.tokenization_auto import AutoTokenizer
config = kwargs.pop('config', None)
if config is None:
config = RagConfig.from_pretrained(pretrained_model_name_or_path)
question_encoder = AutoTokenizer.from_pretrained(pretrained_model_name_or_path, config=config.question_encoder, subfolder='question_encoder_tokenizer')
generator = AutoTokenizer.from_pretrained(pretrained_model_name_or_path, config=config.generator, subfolder='generator_tokenizer')
return cls(question_encoder=question_encoder, generator=generator)
def __call__(self, *args, **kwargs):
return self.current_tokenizer(*args, **kwargs)
def batch_decode(self, *args, **kwargs):
return self.generator.batch_decode(*args, **kwargs)
def decode(self, *args, **kwargs):
return self.generator.decode(*args, **kwargs)
def _switch_to_input_mode(self):
self.current_tokenizer = self.question_encoder
def _switch_to_target_mode(self):
self.current_tokenizer = self.generator
def prepare_seq2seq_batch(self, src_texts: list[str], tgt_texts: Optional[list[str]]=None, max_length: Optional[int]=None, max_target_length: Optional[int]=None, padding: str='longest', return_tensors: Optional[str]=None, truncation: bool=True, **kwargs) -> BatchEncoding:
warnings.warn('`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` context manager to prepare your targets. See the documentation of your specific tokenizer for more details', FutureWarning)
if max_length is None:
max_length = self.current_tokenizer.model_max_length
model_inputs = self(src_texts, add_special_tokens=True, return_tensors=return_tensors, max_length=max_length, padding=padding, truncation=truncation, **kwargs)
if tgt_texts is None:
return model_inputs
if max_target_length is None:
max_target_length = self.current_tokenizer.model_max_length
labels = self(text_target=tgt_texts, add_special_tokens=True, return_tensors=return_tensors, padding=padding, max_length=max_target_length, truncation=truncation, **kwargs)
model_inputs['labels'] = labels['input_ids']
return model_inputs
|
class RagTokenizer:
def __init__(self, question_encoder, generator):
pass
def save_pretrained(self, save_directory):
pass
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
pass
def __call__(self, *args, **kwargs):
pass
def batch_decode(self, *args, **kwargs):
pass
def decode(self, *args, **kwargs):
pass
def _switch_to_input_mode(self):
pass
def _switch_to_target_mode(self):
pass
def prepare_seq2seq_batch(self, src_texts: list[str], tgt_texts: Optional[list[str]]=None, max_length: Optional[int]=None, max_target_length: Optional[int]=None, padding: str='longest', return_tensors: Optional[str]=None, truncation: bool=True, **kwargs) -> BatchEncoding:
pass
| 11
| 0
| 9
| 0
| 9
| 0
| 2
| 0.03
| 0
| 8
| 3
| 0
| 8
| 3
| 9
| 9
| 93
| 11
| 80
| 32
| 58
| 2
| 43
| 21
| 32
| 4
| 0
| 1
| 14
|
4,824
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/recurrent_gemma/configuration_recurrent_gemma.py
|
transformers.models.recurrent_gemma.configuration_recurrent_gemma.RecurrentGemmaConfig
|
from ...configuration_utils import PretrainedConfig
class RecurrentGemmaConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`RecurrentGemmaModel`]. It is used to instantiate a RecurrentGemma
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the RecurrentGemma-7B.
e.g. [google/recurrentgemma-2b](https://huggingface.co/google/recurrentgemma-2b)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
num_hidden_layers (`int`, *optional*, defaults to 26):
The number of hidden layers in the model.
vocab_size (`int`, *optional*, defaults to 256000):
Vocabulary size of the RecurrentGemma model. Defines the number of
different tokens that can be represented by the
`inputs_ids` passed when calling [`RecurrentGemmaModel`]
hidden_size (`int`, *optional*, defaults to 2560):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 7680):
Dimension of the MLP representations.
num_attention_heads (`int`, *optional*, defaults to 10):
The number of heads for the attention block and the number of
heads/blocks for the block-diagonal layers used in the RG-LRU gates.
This number must divide `hidden_size` and `lru_width`.
lru_width (`int` or `None`, *optional*):
Dimension of the hidden representations of the RG-LRU. If `None`
this will be set to `hidden_size`.
Whether to scale the output of the embeddings by `sqrt(hidden_size)`.
attention_window_size (`int`, *optional*, defaults to 2048):
The size of the attention window used in the attention block.
conv1d_width (`int`, *optional*, defaults to 4):
The kernel size of conv1d layers used in the recurrent blocks.
logits_soft_cap (`float`, *optional*, defaults to 30.0):
The value at which the logits should be soft-capped to after the transformer and LM-head computation in the Causal LM architecture.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether the model should return the last key/values
attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*, defaults to 0):
Padding token id.
eos_token_id (`int`, *optional*, defaults to 1):
End of stream token id.
bos_token_id (`int`, *optional*, defaults to 2):
Beginning of stream token id.
hidden_activation (``str` or `function``, *optional*, defaults to `"gelu_pytorch_tanh"`):
The hidden activation used in the recurrent block as well as the MLP layer of the decoder layers.
partial_rotary_factor (`float`, *optional*, defaults to 0.5):
The partial rotary factor used in the initialization of the rotary embeddings.
rope_theta (`float`, *optional*, defaults to 10000.0):
The base period of the RoPE embeddings.
block_types (`list[str]`, *optional*, defaults to `('recurrent', 'recurrent', 'attention')`):
List of aleternating blocks that will be repeated to initialize the `temporal_block` layer.
attention_dropout (`float`, *optional*, defaults to 0.0): dropout value to use after the attention softmax.
num_key_value_heads (`16`, *optional*, defaults to 16): Number of key value heads to use GQA.
attention_bias (`bool`, *optional*, defaults to `False`): whether or not the linear q,k,v of the Attention layer should have bias
w_init_variance_scale (`float`, *optional*, defaults to 0.01): weight initialization variance.
```python
>>> from transformers import RecurrentGemmaModel, RecurrentGemmaConfig
>>> # Initializing a RecurrentGemma recurrentgemma-2b style configuration
>>> configuration = RecurrentGemmaConfig()
>>> # Initializing a model from the recurrentgemma-2b style configuration
>>> model = RecurrentGemmaModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'recurrent_gemma'
def __init__(self, num_hidden_layers=26, vocab_size=256000, hidden_size=2560, intermediate_size=3 * 2560, num_attention_heads=10, lru_width=None, attention_window_size=2048, conv1d_width=4, logits_soft_cap=30.0, rms_norm_eps=1e-06, use_cache=True, pad_token_id=0, eos_token_id=1, bos_token_id=2, hidden_activation='gelu_pytorch_tanh', partial_rotary_factor=0.5, rope_theta=10000.0, block_types=('recurrent', 'recurrent', 'attention'), attention_dropout=0.0, num_key_value_heads=None, attention_bias=False, w_init_variance_scale=0.01, **kwargs):
self.num_hidden_layers = num_hidden_layers
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_attention_heads = num_attention_heads
self.lru_width = lru_width if lru_width is not None else hidden_size
self.attention_window_size = attention_window_size
self.conv1d_width = conv1d_width
self.logits_soft_cap = logits_soft_cap
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.rope_theta = rope_theta
self.partial_rotary_factor = partial_rotary_factor
self.block_types = list(block_types)
self.hidden_activation = hidden_activation
self.head_dim = self.hidden_size // self.num_attention_heads
self.num_key_value_heads = num_key_value_heads if num_key_value_heads is not None else num_attention_heads
if self.num_key_value_heads > self.num_attention_heads:
raise ValueError('The number of `num_key_value_heads` must be smaller than `num_attention_heads`')
self.attention_dropout = attention_dropout
self.attention_bias = attention_bias
self.w_init_variance_scale = w_init_variance_scale
self.final_w_init_variance_scale = 2.0 / self.num_hidden_layers
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
@property
def layers_block_type(self):
return (self.block_types * 100)[:self.num_hidden_layers]
|
class RecurrentGemmaConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`RecurrentGemmaModel`]. It is used to instantiate a RecurrentGemma
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the RecurrentGemma-7B.
e.g. [google/recurrentgemma-2b](https://huggingface.co/google/recurrentgemma-2b)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
num_hidden_layers (`int`, *optional*, defaults to 26):
The number of hidden layers in the model.
vocab_size (`int`, *optional*, defaults to 256000):
Vocabulary size of the RecurrentGemma model. Defines the number of
different tokens that can be represented by the
`inputs_ids` passed when calling [`RecurrentGemmaModel`]
hidden_size (`int`, *optional*, defaults to 2560):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 7680):
Dimension of the MLP representations.
num_attention_heads (`int`, *optional*, defaults to 10):
The number of heads for the attention block and the number of
heads/blocks for the block-diagonal layers used in the RG-LRU gates.
This number must divide `hidden_size` and `lru_width`.
lru_width (`int` or `None`, *optional*):
Dimension of the hidden representations of the RG-LRU. If `None`
this will be set to `hidden_size`.
Whether to scale the output of the embeddings by `sqrt(hidden_size)`.
attention_window_size (`int`, *optional*, defaults to 2048):
The size of the attention window used in the attention block.
conv1d_width (`int`, *optional*, defaults to 4):
The kernel size of conv1d layers used in the recurrent blocks.
logits_soft_cap (`float`, *optional*, defaults to 30.0):
The value at which the logits should be soft-capped to after the transformer and LM-head computation in the Causal LM architecture.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether the model should return the last key/values
attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*, defaults to 0):
Padding token id.
eos_token_id (`int`, *optional*, defaults to 1):
End of stream token id.
bos_token_id (`int`, *optional*, defaults to 2):
Beginning of stream token id.
hidden_activation (``str` or `function``, *optional*, defaults to `"gelu_pytorch_tanh"`):
The hidden activation used in the recurrent block as well as the MLP layer of the decoder layers.
partial_rotary_factor (`float`, *optional*, defaults to 0.5):
The partial rotary factor used in the initialization of the rotary embeddings.
rope_theta (`float`, *optional*, defaults to 10000.0):
The base period of the RoPE embeddings.
block_types (`list[str]`, *optional*, defaults to `('recurrent', 'recurrent', 'attention')`):
List of aleternating blocks that will be repeated to initialize the `temporal_block` layer.
attention_dropout (`float`, *optional*, defaults to 0.0): dropout value to use after the attention softmax.
num_key_value_heads (`16`, *optional*, defaults to 16): Number of key value heads to use GQA.
attention_bias (`bool`, *optional*, defaults to `False`): whether or not the linear q,k,v of the Attention layer should have bias
w_init_variance_scale (`float`, *optional*, defaults to 0.01): weight initialization variance.
```python
>>> from transformers import RecurrentGemmaModel, RecurrentGemmaConfig
>>> # Initializing a RecurrentGemma recurrentgemma-2b style configuration
>>> configuration = RecurrentGemmaConfig()
>>> # Initializing a model from the recurrentgemma-2b style configuration
>>> model = RecurrentGemmaModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```'''
def __init__(self, num_hidden_layers=26, vocab_size=256000, hidden_size=2560, intermediate_size=3 * 2560, num_attention_heads=10, lru_width=None, attention_window_size=2048, conv1d_width=4, logits_soft_cap=30.0, rms_norm_eps=1e-06, use_cache=True, pad_token_id=0, eos_token_id=1, bos_token_id=2, hidden_activation='gelu_pytorch_tanh', partial_rotary_factor=0.5, rope_theta=10000.0, block_types=('recurrent', 'recurrent', 'attention'), attention_dropout=0.0, num_key_value_heads=None, attention_bias=False, w_init_variance_scale=0.01, **kwargs):
pass
@property
def layers_block_type(self):
pass
| 4
| 1
| 29
| 0
| 29
| 0
| 3
| 1.08
| 1
| 3
| 0
| 0
| 2
| 21
| 2
| 2
| 135
| 10
| 60
| 51
| 31
| 65
| 29
| 25
| 26
| 4
| 1
| 1
| 5
|
4,825
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/recurrent_gemma/modeling_recurrent_gemma.py
|
transformers.models.recurrent_gemma.modeling_recurrent_gemma.RecurrentGemmaDecoderLayer
|
from typing import Optional, Union
import torch
from ...modeling_layers import GradientCheckpointingLayer
class RecurrentGemmaDecoderLayer(GradientCheckpointingLayer):
"""Griffin and Hawk's residual block."""
def __init__(self, config, layer_idx):
super().__init__()
self.temporal_pre_norm = RecurrentGemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.temporal_block = TEMPORAL_BLOCK_CLASSES[config.layers_block_type[layer_idx]](config)
self.channel_pre_norm = RecurrentGemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.mlp_block = RecurrentGemmaMlp(config)
def forward(self, activations: torch.Tensor, position_ids: torch.Tensor, attention_mask: torch.Tensor, cache_position: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None) -> tuple[torch.Tensor, dict[str, torch.Tensor]]:
raw_activations = activations
inputs_normalized = self.temporal_pre_norm(raw_activations)
hidden_states = self.temporal_block(inputs_normalized, position_ids, attention_mask, cache_position=cache_position, use_cache=use_cache)
residual = hidden_states + raw_activations
hidden_states = self.channel_pre_norm(residual)
hidden_states = self.mlp_block(hidden_states)
hidden_states = hidden_states + residual
return hidden_states
|
class RecurrentGemmaDecoderLayer(GradientCheckpointingLayer):
'''Griffin and Hawk's residual block.'''
def __init__(self, config, layer_idx):
pass
def forward(self, activations: torch.Tensor, position_ids: torch.Tensor, attention_mask: torch.Tensor, cache_position: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None) -> tuple[torch.Tensor, dict[str, torch.Tensor]]:
pass
| 3
| 1
| 14
| 2
| 12
| 1
| 1
| 0.08
| 1
| 6
| 2
| 0
| 2
| 4
| 2
| 12
| 32
| 6
| 25
| 18
| 15
| 2
| 16
| 11
| 13
| 1
| 1
| 0
| 2
|
4,826
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/recurrent_gemma/modeling_recurrent_gemma.py
|
transformers.models.recurrent_gemma.modeling_recurrent_gemma.RecurrentGemmaForCausalLM
|
from typing import Optional, Union
from ...modeling_outputs import BaseModelOutputWithNoAttention, CausalLMOutput
import torch
from torch import nn
from ...generation import GenerationMixin
from ...utils import auto_docstring, logging
@auto_docstring
class RecurrentGemmaForCausalLM(RecurrentGemmaPreTrainedModel, GenerationMixin):
_tied_weights_keys = ['lm_head.weight']
def __init__(self, config):
super().__init__(config)
self.model = RecurrentGemmaModel(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, use_cache: Optional[bool]=None, **kwargs) -> Union[tuple, CausalLMOutput]:
"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoTokenizer, RecurrentGemmaForCausalLM
>>> model = RecurrentGemmaForCausalLM.from_pretrained("google/recurrentgemma-2b")
>>> tokenizer = AutoTokenizer.from_pretrained("google/recurrentgemma-2b")
>>> prompt = "What is your favorite condiment?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"What is your favorite condiment?"
```"""
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = True
outputs = self.model(input_ids=input_ids, position_ids=position_ids, cache_position=cache_position, attention_mask=attention_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_hidden_states=output_hidden_states, return_dict=return_dict)
hidden_states = outputs[0]
logits = self.lm_head(hidden_states)
cap = self.config.logits_soft_cap
logits = nn.functional.tanh(logits / cap) * cap
loss = None
if labels is not None:
logits = logits.float()
loss = self.loss_function(logits, labels, vocab_size=self.config.vocab_size, **kwargs)
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return CausalLMOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
|
@auto_docstring
class RecurrentGemmaForCausalLM(RecurrentGemmaPreTrainedModel, GenerationMixin):
def __init__(self, config):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, cache_position: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, use_cache: Optional[bool]=None, **kwargs) -> Union[tuple, CausalLMOutput]:
'''
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoTokenizer, RecurrentGemmaForCausalLM
>>> model = RecurrentGemmaForCausalLM.from_pretrained("google/recurrentgemma-2b")
>>> tokenizer = AutoTokenizer.from_pretrained("google/recurrentgemma-2b")
>>> prompt = "What is your favorite condiment?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"What is your favorite condiment?"
```'''
pass
| 5
| 1
| 12
| 1
| 8
| 3
| 2
| 0.32
| 2
| 5
| 2
| 0
| 9
| 3
| 9
| 12
| 125
| 21
| 79
| 36
| 55
| 25
| 45
| 23
| 35
| 6
| 2
| 2
| 16
|
4,827
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/recurrent_gemma/modeling_recurrent_gemma.py
|
transformers.models.recurrent_gemma.modeling_recurrent_gemma.RecurrentGemmaMlp
|
from torch import nn
from ...activations import ACT2FN
class RecurrentGemmaMlp(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size // 2
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=True)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=True)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=True)
self.act_fn = ACT2FN[config.hidden_activation]
def forward(self, hidden_states):
gate = self.act_fn(self.gate_proj(hidden_states))
return self.down_proj(gate * self.up_proj(hidden_states))
|
class RecurrentGemmaMlp(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 2
| 7
| 2
| 12
| 14
| 1
| 13
| 11
| 10
| 0
| 13
| 11
| 10
| 1
| 1
| 0
| 2
|
4,828
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/recurrent_gemma/modeling_recurrent_gemma.py
|
transformers.models.recurrent_gemma.modeling_recurrent_gemma.RecurrentGemmaModel
|
from ...modeling_attn_mask_utils import AttentionMaskConverter
from .configuration_recurrent_gemma import RecurrentGemmaConfig
from torch import nn
import torch
from ...modeling_outputs import BaseModelOutputWithNoAttention, CausalLMOutput
from ...utils import auto_docstring, logging
from typing import Optional, Union
@auto_docstring
class RecurrentGemmaModel(RecurrentGemmaPreTrainedModel):
def __init__(self, config: RecurrentGemmaConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList([RecurrentGemmaDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])
self.final_norm = RecurrentGemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.gradient_checkpointing = False
self.register_buffer('normalizer', torch.tensor(self.config.hidden_size ** 0.5, dtype=torch.bfloat16), persistent=False)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, cache_position: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithNoAttention]:
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError('You must specify exactly one of input_ids or inputs_embeds')
if self.gradient_checkpointing and self.training and use_cache:
logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.')
use_cache = False
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
hidden_states = inputs_embeds
if use_cache and inputs_embeds.shape[1] != 1:
self._setup_cache(self.config, hidden_states.shape[0], hidden_states.device, hidden_states.dtype)
if cache_position is None:
cache_position = torch.arange(hidden_states.shape[1], device=hidden_states.device)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position)
hidden_states = hidden_states * self.normalizer.type(hidden_states.dtype)
all_hidden_states = () if output_hidden_states else None
for i, residual_block in enumerate(self.layers):
if output_hidden_states:
all_hidden_states += (hidden_states,)
hidden_states = residual_block(hidden_states, position_ids, causal_mask, cache_position, use_cache)
hidden_states = self.final_norm(hidden_states)
if output_hidden_states:
all_hidden_states += (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, all_hidden_states] if v is not None))
return BaseModelOutputWithNoAttention(last_hidden_state=hidden_states, hidden_states=all_hidden_states)
def _update_causal_mask(self, attention_mask, input_tensor, cache_position):
dtype, device = (input_tensor.dtype, input_tensor.device)
min_dtype = torch.finfo(dtype).min
sequence_length = input_tensor.shape[1]
target_length = max(self.config.attention_window_size, sequence_length)
diagonal = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device)
causal_mask = diagonal
if sequence_length != 1:
causal_mask = torch.triu(diagonal, diagonal=-1)
causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
causal_mask = causal_mask[None, None, :, :].expand(input_tensor.shape[0], 1, -1, -1)
if attention_mask is not None:
causal_mask = causal_mask.clone()
if attention_mask.dim() == 2:
attention_mask = attention_mask[:, -target_length:]
mask_length = attention_mask.shape[-1]
padding_mask = causal_mask[..., :mask_length].eq(0.0) * attention_mask[:, None, None, :].eq(0.0)
causal_mask[..., :mask_length] = causal_mask[..., :mask_length].masked_fill(padding_mask, min_dtype)
if attention_mask is not None and attention_mask.device.type in ['cuda', 'xpu', 'npu']:
causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
return causal_mask
|
@auto_docstring
class RecurrentGemmaModel(RecurrentGemmaPreTrainedModel):
def __init__(self, config: RecurrentGemmaConfig):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, cache_position: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutputWithNoAttention]:
pass
def _update_causal_mask(self, attention_mask, input_tensor, cache_position):
pass
| 6
| 0
| 23
| 4
| 18
| 1
| 5
| 0.16
| 1
| 12
| 5
| 0
| 5
| 6
| 5
| 8
| 131
| 25
| 93
| 35
| 76
| 15
| 68
| 24
| 62
| 16
| 2
| 2
| 24
|
4,829
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/recurrent_gemma/modeling_recurrent_gemma.py
|
transformers.models.recurrent_gemma.modeling_recurrent_gemma.RecurrentGemmaPreTrainedModel
|
import math
from torch import nn
from ...utils import auto_docstring, logging
import torch
from ...modeling_utils import PreTrainedModel
from .configuration_recurrent_gemma import RecurrentGemmaConfig
@auto_docstring
class RecurrentGemmaPreTrainedModel(PreTrainedModel):
config: RecurrentGemmaConfig
base_model_prefix = 'model'
supports_gradient_checkpointing = True
_no_split_modules = ['RecurrentGemmaDecoderLayer']
_skip_keys_device_placement = ['cache']
_supports_flash_attn = False
_supports_sdpa = False
def _init_weights(self, module):
std = math.sqrt(self.config.w_init_variance_scale / self.config.conv1d_width)
if isinstance(module, nn.Conv1d):
torch.nn.init.normal_(module.weight, mean=0.0, std=std)
torch.nn.init.zeros_(module.bias)
elif isinstance(module, RecurrentGemmaSdpaAttention):
torch.nn.init.normal_(module.q_proj.weight, mean=0.0, std=math.sqrt(1.0 / self.config.hidden_size))
torch.nn.init.normal_(module.k_proj.weight, mean=0.0, std=math.sqrt(1.0 / self.config.hidden_size))
torch.nn.init.normal_(module.v_proj.weight, mean=0.0, std=math.sqrt(1.0 / self.config.hidden_size))
std = math.sqrt(self.config.final_w_init_variance_scale / self.config.hidden_size)
torch.nn.init.normal_(module.o_proj.weight, mean=0.0, std=std)
elif isinstance(module, RecurrentGemmaRecurrentBlock):
torch.nn.init.zeros_(module.linear_x.bias)
torch.nn.init.normal_(module.linear_x.weight, mean=0.0, std=math.sqrt(1.0 / self.config.hidden_size))
torch.nn.init.zeros_(module.linear_y.bias)
torch.nn.init.normal_(module.linear_y.weight, mean=0.0, std=math.sqrt(1.0 / self.config.hidden_size))
std = math.sqrt(self.config.final_w_init_variance_scale / self.config.lru_width)
torch.nn.init.normal_(module.linear_out.weight, mean=0.0, std=std)
torch.nn.init.zeros_(module.linear_out.bias)
elif isinstance(module, RecurrentGemmaRglru):
std = math.sqrt(self.config.w_init_variance_scale / (self.config.lru_width // self.config.num_attention_heads))
torch.nn.init.normal_(module.input_gate_weight, mean=0.0, std=std)
torch.nn.init.normal_(module.recurrent_gate_weight, mean=0.0, std=std)
torch.nn.init.zeros_(module.input_gate_bias)
torch.nn.init.zeros_(module.recurrent_gate_bias)
module.recurrent_param.data.uniform_(0.9 ** 2 + 1e-08, 0.999 ** 2 + 1e-08)
module.recurrent_param.data.log_().mul_(0.5)
module.recurrent_param.data.neg_().exp_().sub_(1.0).log_()
elif isinstance(module, nn.Linear):
torch.nn.init.normal_(module.weight, mean=0.0, std=std)
if getattr(module, 'bias', None) is not None:
torch.nn.init.zeros_(module.bias)
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, RecurrentGemmaRMSNorm):
module.weight.data.zero_()
def _setup_cache(self, config, batch, device, dtype):
layers = getattr(self, 'model', self).layers
for layer in layers:
layer.temporal_block._setup_cache(batch, device, dtype)
def reset_cache(self, batch, device, dtype):
pass
|
@auto_docstring
class RecurrentGemmaPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
pass
def _setup_cache(self, config, batch, device, dtype):
pass
def reset_cache(self, batch, device, dtype):
pass
| 5
| 0
| 15
| 1
| 13
| 0
| 3
| 0.02
| 1
| 3
| 3
| 2
| 3
| 0
| 3
| 3
| 57
| 7
| 50
| 16
| 46
| 1
| 44
| 16
| 40
| 7
| 1
| 2
| 10
|
4,830
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/recurrent_gemma/modeling_recurrent_gemma.py
|
transformers.models.recurrent_gemma.modeling_recurrent_gemma.RecurrentGemmaRMSNorm
|
import torch
from torch import nn
class RecurrentGemmaRMSNorm(nn.Module):
def __init__(self, dim: int, eps: float=1e-06):
super().__init__()
self.eps = eps
self.weight = nn.Parameter(torch.zeros(dim))
def _norm(self, x):
return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
def forward(self, x):
output = self._norm(x.float())
output = output * (1.0 + self.weight.float())
return output.type_as(x)
def extra_repr(self):
return f'{tuple(self.weight.shape)}, eps={self.eps}'
|
class RecurrentGemmaRMSNorm(nn.Module):
def __init__(self, dim: int, eps: float=1e-06):
pass
def _norm(self, x):
pass
def forward(self, x):
pass
def extra_repr(self):
pass
| 5
| 0
| 4
| 0
| 3
| 1
| 1
| 0.15
| 1
| 4
| 0
| 0
| 4
| 2
| 4
| 14
| 18
| 3
| 13
| 8
| 8
| 2
| 13
| 8
| 8
| 1
| 1
| 0
| 4
|
4,831
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/recurrent_gemma/modeling_recurrent_gemma.py
|
transformers.models.recurrent_gemma.modeling_recurrent_gemma.RecurrentGemmaRecurrentBlock
|
import torch
from ...activations import ACT2FN
from torch import nn
class RecurrentGemmaRecurrentBlock(nn.Module):
"""Griffin and Hawk's recurrent block."""
def __init__(self, config):
super().__init__()
self.lru_width = config.lru_width
self.hidden_size = config.hidden_size
self.linear_y = nn.Linear(in_features=config.hidden_size, out_features=config.lru_width)
self.linear_x = nn.Linear(in_features=config.hidden_size, out_features=config.lru_width)
self.linear_out = nn.Linear(in_features=config.lru_width, out_features=config.hidden_size)
self.conv1d_width = config.conv1d_width
self.conv_1d = nn.Conv1d(config.lru_width, config.lru_width, kernel_size=config.conv1d_width, groups=config.lru_width, padding=config.conv1d_width - 1)
self.rg_lru = RecurrentGemmaRglru(config)
self.act_fn = ACT2FN[config.hidden_activation]
self.conv1d_state = None
def forward(self, input_states: torch.Tensor, position_ids: torch.Tensor, attention_mask: torch.Tensor, cache_position: torch.Tensor, use_cache: bool=True) -> tuple[torch.Tensor, dict[str, torch.Tensor]]:
_, seq_len, _ = input_states.shape
y_branch = self.linear_y(input_states)
y_branch = self.act_fn(y_branch)
x_branch = self.linear_x(input_states)
x_branch = x_branch.transpose(1, 2)
if use_cache:
if cache_position.shape[0] != 1:
self.conv1d_state = nn.functional.pad(x_branch, (self.conv1d_width - x_branch.shape[-1] - 1, 0))
x_branch = self.conv_1d(x_branch)[..., :seq_len]
else:
conv_state = torch.cat((self.conv1d_state, x_branch), -1)
x_branch = torch.sum(conv_state * self.conv_1d.weight[:, 0, :], dim=-1) + self.conv_1d.bias
x_branch = x_branch.unsqueeze(-1)
self.conv1d_state = conv_state[:, :, 1:]
else:
x_branch = self.conv_1d(x_branch)[..., :seq_len]
x_branch = self.rg_lru(x_branch.transpose(1, 2), position_ids)
hidden_states = x_branch * y_branch
hidden_states = self.linear_out(hidden_states)
return hidden_states
def _setup_cache(self, batch, device, dtype):
self.rg_lru.recurrent_states = torch.zeros((batch, self.lru_width), device=device, dtype=torch.float32)
self.conv1d_state = torch.zeros((batch, self.hidden_size, self.conv1d_width - 1), device=device, dtype=dtype)
|
class RecurrentGemmaRecurrentBlock(nn.Module):
'''Griffin and Hawk's recurrent block.'''
def __init__(self, config):
pass
def forward(self, input_states: torch.Tensor, position_ids: torch.Tensor, attention_mask: torch.Tensor, cache_position: torch.Tensor, use_cache: bool=True) -> tuple[torch.Tensor, dict[str, torch.Tensor]]:
pass
def _setup_cache(self, batch, device, dtype):
pass
| 4
| 1
| 19
| 2
| 16
| 1
| 2
| 0.08
| 1
| 5
| 1
| 0
| 3
| 10
| 3
| 13
| 61
| 9
| 50
| 26
| 39
| 4
| 35
| 19
| 31
| 3
| 1
| 2
| 5
|
4,832
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/recurrent_gemma/modeling_recurrent_gemma.py
|
transformers.models.recurrent_gemma.modeling_recurrent_gemma.RecurrentGemmaRglru
|
import torch
from torch import nn
from ...utils.import_utils import is_torchdynamo_compiling
from typing import Optional, Union
class RecurrentGemmaRglru(nn.Module):
"""A Real-Gated Linear Recurrent Unit (RG-LRU) layer."""
def __init__(self, config):
super().__init__()
self.num_attention_heads = config.num_attention_heads
self.block_width = config.lru_width // self.num_attention_heads
self.recurrent_param = nn.Parameter(torch.empty([config.lru_width]))
self.input_gate_weight = nn.Parameter(torch.empty([self.num_attention_heads, self.block_width, self.block_width]))
self.input_gate_bias = nn.Parameter(torch.empty([self.num_attention_heads, self.block_width]))
self.recurrent_gate_weight = nn.Parameter(torch.empty([self.num_attention_heads, self.block_width, self.block_width]))
self.recurrent_gate_bias = nn.Parameter(torch.empty([self.num_attention_heads, self.block_width]))
self.recurrent_states = None
def forward(self, activations: torch.Tensor, position_ids: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
batch_size, seq_len, lru_width = activations.shape
reset = position_ids[:, :, None] == 0
reshape_act = activations.reshape(batch_size * seq_len, self.num_attention_heads, self.block_width)
reshape_act = reshape_act.permute(1, 0, 2)
res = torch.baddbmm(self.input_gate_bias[:, None, :], reshape_act, self.input_gate_weight)
input_gate = torch.sigmoid(res.transpose(0, 1).reshape(batch_size, seq_len, lru_width))
res = torch.baddbmm(self.recurrent_gate_bias[:, None, :], reshape_act, self.recurrent_gate_weight)
recurrent_gate = torch.sigmoid(res.transpose(0, 1).reshape(batch_size, seq_len, lru_width))
log_recurrent_gate = -8.0 * recurrent_gate * nn.functional.softplus(self.recurrent_param)
recurrent_gate = torch.exp(log_recurrent_gate)
a_square = torch.exp(2 * log_recurrent_gate)
gated_inputs = activations * input_gate
multiplier = 1
tracing = isinstance(activations, torch.fx.Proxy) or is_torchdynamo_compiling()
if not torch.jit.is_tracing() and (not tracing):
multiplier = SqrtBoundDerivative.apply(1 - a_square)
multiplier = reset + ~reset * multiplier
normalized_x = gated_inputs * multiplier.type(activations.dtype)
hidden_states, recurrent_states = self._rnn_scan(hidden_states=normalized_x, recurrent_gate=recurrent_gate, reset=reset, recurrent_states=self.recurrent_states)
self.recurrent_states = recurrent_states
return hidden_states
def _rnn_scan(self, hidden_states: torch.Tensor, recurrent_gate: torch.Tensor, reset: torch.Tensor, recurrent_states: Union[torch.Tensor, None], acc_dtype: torch.dtype=torch.float32) -> tuple[torch.Tensor, torch.Tensor]:
"""Runs the recurrence of a linear RNN.
Args:
hidden_states: The input sequence.
recurrent_gate: The diagonal of the recurrence matrix `A`.
reset: Indicator of document boundaries, e.g. when to reset the hidden state
of the RNN.
recurrent_states: The initial hidden state.
acc_dtype: The data type for the accumulation.
Returns:
The output of the linear recurrence.
"""
recurrent_gate = recurrent_gate * ~reset
if hidden_states.shape[1] == 1:
if recurrent_states is None:
return (hidden_states, hidden_states[:, 0].type(acc_dtype))
else:
contextualized_states = recurrent_gate.type(acc_dtype) * recurrent_states[:, None].to(recurrent_gate.device)
contextualized_states += hidden_states.type(acc_dtype)
return (contextualized_states.type(hidden_states.dtype), contextualized_states[:, -1])
else:
if recurrent_states is None:
recurrent_states = torch.zeros(hidden_states[:, 0].shape, dtype=acc_dtype, device=hidden_states.device)
contextualized_states = torch.zeros_like(hidden_states)
for t in range(hidden_states.shape[1]):
recurrent_states = recurrent_gate[:, t].type(acc_dtype) * recurrent_states.to(recurrent_gate.device)
recurrent_states = recurrent_states + hidden_states[:, t].type(acc_dtype)
contextualized_states[:, t] = recurrent_states.type(hidden_states.dtype)
return (contextualized_states, recurrent_states)
|
class RecurrentGemmaRglru(nn.Module):
'''A Real-Gated Linear Recurrent Unit (RG-LRU) layer.'''
def __init__(self, config):
pass
def forward(self, activations: torch.Tensor, position_ids: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
pass
def _rnn_scan(self, hidden_states: torch.Tensor, recurrent_gate: torch.Tensor, reset: torch.Tensor, recurrent_states: Union[torch.Tensor, None], acc_dtype: torch.dtype=torch.float32) -> tuple[torch.Tensor, torch.Tensor]:
'''Runs the recurrence of a linear RNN.
Args:
hidden_states: The input sequence.
recurrent_gate: The diagonal of the recurrence matrix `A`.
reset: Indicator of document boundaries, e.g. when to reset the hidden state
of the RNN.
recurrent_states: The initial hidden state.
acc_dtype: The data type for the accumulation.
Returns:
The output of the linear recurrence.
'''
pass
| 4
| 2
| 35
| 5
| 24
| 6
| 3
| 0.29
| 1
| 5
| 1
| 0
| 3
| 8
| 3
| 13
| 112
| 19
| 73
| 38
| 58
| 21
| 49
| 27
| 45
| 5
| 1
| 2
| 8
|
4,833
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/recurrent_gemma/modeling_recurrent_gemma.py
|
transformers.models.recurrent_gemma.modeling_recurrent_gemma.RecurrentGemmaRotaryEmbedding
|
import torch
from torch import nn
class RecurrentGemmaRotaryEmbedding(nn.Module):
inv_freq: torch.Tensor
def __init__(self, dim, base=10000, device=None):
super().__init__()
self.dim = dim
self.base = base
inv_freq = 1.0 / self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float() / self.dim)
self.register_buffer('inv_freq', tensor=inv_freq, persistent=False)
@torch.no_grad()
def forward(self, x, position_ids, seq_len=None):
self.inv_freq.to(x.device)
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type
device_type = device_type if device_type != 'mps' else 'cpu'
with torch.autocast(device_type=device_type, enabled=False):
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos()
sin = emb.sin()
return (cos.to(dtype=x.dtype), sin.to(dtype=x.dtype))
|
class RecurrentGemmaRotaryEmbedding(nn.Module):
def __init__(self, dim, base=10000, device=None):
pass
@torch.no_grad()
def forward(self, x, position_ids, seq_len=None):
pass
| 4
| 0
| 11
| 0
| 9
| 2
| 2
| 0.15
| 1
| 3
| 0
| 0
| 2
| 2
| 2
| 12
| 24
| 1
| 20
| 14
| 16
| 3
| 19
| 13
| 16
| 2
| 1
| 1
| 3
|
4,834
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/recurrent_gemma/modeling_recurrent_gemma.py
|
transformers.models.recurrent_gemma.modeling_recurrent_gemma.RecurrentGemmaSdpaAttention
|
from torch import nn
from typing import Optional, Union
import torch
from .configuration_recurrent_gemma import RecurrentGemmaConfig
class RecurrentGemmaSdpaAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: RecurrentGemmaConfig):
super().__init__()
self.config = config
self.attention_dropout = config.attention_dropout
self.hidden_size = config.hidden_size
self.num_attention_heads = config.num_attention_heads
self.head_dim = config.head_dim
self.num_key_value_heads = config.num_key_value_heads
self.num_key_value_groups = self.num_attention_heads // self.num_key_value_heads
self.partial_rotary_factor = config.partial_rotary_factor
self.q_proj = nn.Linear(self.hidden_size, self.num_attention_heads * self.head_dim, bias=config.attention_bias)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
self.o_proj = nn.Linear(self.num_attention_heads * self.head_dim, self.hidden_size, bias=True)
self.rotary_emb = RecurrentGemmaRotaryEmbedding(int(self.partial_rotary_factor * self.head_dim), base=config.rope_theta)
def forward(self, hidden_states: torch.Tensor, position_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, cache_position: Optional[torch.LongTensor]=None, use_cache: bool=False) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
bsz, q_len, _ = hidden_states.size()
query_states = self.q_proj(hidden_states)
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
query_states = query_states.view(bsz, q_len, self.num_attention_heads, self.head_dim).transpose(1, 2)
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
cos, sin = self.rotary_emb(value_states, position_ids)
query_rot, query_pass = torch.chunk(query_states, int(1 / self.partial_rotary_factor), dim=-1)
key_rot, key_pass = torch.chunk(key_states, int(1 / self.partial_rotary_factor), dim=-1)
query_rot, key_rot = apply_rotary_pos_emb(query_rot, key_rot, cos, sin, position_ids)
query_states = torch.cat((query_rot, query_pass), dim=-1)
key_states = torch.cat((key_rot, key_pass), dim=-1)
if use_cache and hasattr(self, 'key_states'):
cache_kwargs = {'cache_position': cache_position}
key_states, value_states = self._update_cache(key_states, value_states, **cache_kwargs)
key_states = repeat_kv(key_states, self.num_key_value_groups)
value_states = repeat_kv(value_states, self.num_key_value_groups)
causal_mask = attention_mask
if attention_mask is not None:
causal_mask = causal_mask[:, :, :, :key_states.shape[-2]]
attn_output = torch.nn.functional.scaled_dot_product_attention(query_states.contiguous(), key_states.contiguous(), value_states.contiguous(), attn_mask=causal_mask, dropout_p=self.attention_dropout if self.training else 0.0, scale=self.head_dim ** (-0.5))
attn_output = attn_output.transpose(1, 2).contiguous()
attn_output = attn_output.view(bsz, q_len, self.hidden_size)
attn_output = self.o_proj(attn_output)
return attn_output
def _setup_cache(self, batch_size, device, dtype=None):
if dtype is None and self.config.dtype is not None:
dtype = self.config.dtype
dtype = dtype if dtype is not None else torch.float32
cache_shape = (batch_size, self.num_key_value_heads, self.config.attention_window_size, self.head_dim)
self.value_states = torch.zeros(cache_shape, dtype=dtype, device=device)
self.key_states = torch.zeros(cache_shape, dtype=dtype, device=device)
@torch.no_grad()
def _update_cache(self, key_states, value_states, **cache_kwargs):
"""
torch.compile compatible sliding window.
Computes the `indices` based on `cache_position >= self.config.attention_window_size - 1`.
The `to_shift` is only true once we are above attention_window_size. Thus with `attention_window_size==64`:
indices = (slicing + to_shift[-1].int()-1) % self.config.attention_window_size
tensor([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
55, 56, 57, 58, 59, 60, 61, 62, 63, 0])
We overwrite the cache using these, then we always write at cache_position (clamped to `attention_window_size`)
"""
cache_position = cache_kwargs.get('cache_position')
if cache_position.shape[0] > self.config.attention_window_size:
k_out = key_states[:, :, -self.config.attention_window_size:, :]
v_out = value_states[:, :, -self.config.attention_window_size:, :]
else:
slicing = torch.ones(self.config.attention_window_size, dtype=torch.long, device=value_states.device).cumsum(0)
cache_position = cache_position.clamp(0, self.config.attention_window_size - 1)
to_shift = cache_position >= self.config.attention_window_size - 1
indices = (slicing + to_shift[-1].int() - 1) % self.config.attention_window_size
k_out, v_out = (self.key_states.to(key_states.device), self.value_states.to(value_states.device))
k_out = k_out[:, :, indices]
v_out = v_out[:, :, indices]
k_out[:, :, cache_position] = key_states.to(k_out.dtype)
v_out[:, :, cache_position] = value_states.to(v_out.dtype)
self.key_states, self.value_states = (k_out, v_out)
return (k_out, v_out)
|
class RecurrentGemmaSdpaAttention(nn.Module):
'''Multi-headed attention from 'Attention Is All You Need' paper'''
def __init__(self, config: RecurrentGemmaConfig):
pass
def forward(self, hidden_states: torch.Tensor, position_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, cache_position: Optional[torch.LongTensor]=None, use_cache: bool=False) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
pass
def _setup_cache(self, batch_size, device, dtype=None):
pass
@torch.no_grad()
def _update_cache(self, key_states, value_states, **cache_kwargs):
'''
torch.compile compatible sliding window.
Computes the `indices` based on `cache_position >= self.config.attention_window_size - 1`.
The `to_shift` is only true once we are above attention_window_size. Thus with `attention_window_size==64`:
indices = (slicing + to_shift[-1].int()-1) % self.config.attention_window_size
tensor([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
55, 56, 57, 58, 59, 60, 61, 62, 63, 0])
We overwrite the cache using these, then we always write at cache_position (clamped to `attention_window_size`)
'''
pass
| 6
| 2
| 28
| 4
| 21
| 4
| 3
| 0.17
| 1
| 6
| 2
| 0
| 4
| 15
| 4
| 14
| 120
| 19
| 87
| 45
| 74
| 15
| 66
| 37
| 61
| 4
| 1
| 1
| 10
|
4,835
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/recurrent_gemma/modeling_recurrent_gemma.py
|
transformers.models.recurrent_gemma.modeling_recurrent_gemma.SqrtBoundDerivative
|
import torch
class SqrtBoundDerivative(torch.autograd.Function):
"""Computes a square root with a gradient clipped at `_MAX_SQRT_GRADIENT`."""
@staticmethod
def forward(ctx, x: torch.Tensor) -> torch.Tensor:
"""The forward pass, which is a normal `sqrt`."""
ctx.save_for_backward(x)
return torch.sqrt(x)
@staticmethod
def backward(ctx, grad_output: torch.Tensor) -> torch.Tensor:
"""The backward pass, which clips the `sqrt` gradient."""
x, = ctx.saved_tensors
clipped_x_times_4 = torch.clip(4.0 * x, min=1 / _MAX_SQRT_GRADIENT ** 2)
return grad_output / torch.sqrt(clipped_x_times_4)
| null | 5
| 3
| 5
| 0
| 4
| 1
| 1
| 0.3
| 1
| 1
| 0
| 0
| 0
| 0
| 2
| 32
| 15
| 2
| 10
| 7
| 5
| 3
| 8
| 5
| 5
| 1
| 5
| 0
| 2
|
4,836
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/reformer/configuration_reformer.py
|
transformers.models.reformer.configuration_reformer.ReformerConfig
|
from ...configuration_utils import PretrainedConfig
class ReformerConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`ReformerModel`]. It is used to instantiate a
Reformer model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the ReFormer
[google/reformer-crime-and-punishment](https://huggingface.co/google/reformer-crime-and-punishment) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
attention_head_size (`int`, *optional*, defaults to 64):
Dimensionality of the projected key, query and value vectors
attn_layers (`list[str]`, *optional*, defaults to `["local", "lsh", "local", "lsh", "local", "lsh"]`):
List of attention layer types in ascending order. It can be chosen between a LSHSelfAttention layer
(`"lsh"`) and a LocalSelfAttention layer (`"local"`).
For more information on LSHSelfAttention layer, see [LSH Self Attention](reformer#lsh-self-attention). For
more information on LocalSelfAttention layer, see [Local Self Attention](reformer#local-self-attention).
axial_pos_embds (`bool`, *optional*, defaults to `True`):
Whether or not to use axial position embeddings. For more information on how axial position embeddings
work, see [Axial Position Encodings](reformer#axial-positional-encodings).
axial_norm_std (`float`, *optional*, defaults to 1.0):
The standard deviation of the normal_initializer for initializing the weight matrices of the axial
positional encodings.
axial_pos_shape (`list[int]`, *optional*, defaults to `[64, 64]`):
The position dims of the axial position encodings. During training, the product of the position dims has to
be equal to the sequence length.
For more information on how axial position embeddings work, see [Axial Position
Encodings](reformer#axial-positional-encodings).
axial_pos_embds_dim (`list[int]`, *optional*, defaults to `[64, 192]`):
The embedding dims of the axial position encodings. The sum of the embedding dims has to be equal to the
hidden size.
For more information on how axial position embeddings work, see [Axial Position
Encodings](reformer#axial-positional-encodings).
chunk_size_lm_head (`int`, *optional*, defaults to 0):
The chunk size of the final language model feed forward head layer. A chunk size of 0 means that the feed
forward layer is not chunked. A chunk size of n means that the feed forward layer processes n <
sequence_length embeddings at a time.
For more information on feed forward chunking, see [How does Feed Forward Chunking
work?](../glossary#feed-forward-chunking).
eos_token_id (`int`, *optional*, defaults to 2):
The token id for the end-of-sentence token.
feed_forward_size (`int`, *optional*, defaults to 512):
Dimensionality of the feed_forward layer in the residual attention block.
hash_seed (`int`, *optional*):
Seed that can be used to make local sensitive hashing in `LSHSelfAttention` deterministic. This should only
be set for testing purposed. For evaluation and training purposes `hash_seed` should be left as `None` to
ensure fully random rotations in local sensitive hashing scheme.
hidden_act (`str` or `Callable`, *optional*, defaults to `"relu"`):
The non-linear activation function (function or string) in the feed forward layer in the residual attention
block. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.05):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
hidden_size (`int`, *optional*, defaults to 256):
Dimensionality of the output hidden states of the residual attention blocks.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
is_decoder (`bool`, *optional*, defaults to `False`):
Whether or not to use a causal mask in addition to the `attention_mask` passed to [`ReformerModel`]. When
using the Reformer for causal language modeling, this argument should be set to `True`.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
local_chunk_length (`int`, *optional*, defaults to 64):
Length of chunk which attends to itself in `LocalSelfAttention`. Chunking reduces memory complexity from
sequence length x sequence length (self attention) to chunk length x chunk length x sequence length / chunk
length (chunked self attention).
local_num_chunks_before (`int`, *optional*, defaults to 1):
Number of previous neighbouring chunks to attend to in `LocalSelfAttention` layer to itself.
local_num_chunks_after (`int`, *optional*, defaults to 0):
Number of following neighbouring chunks to attend to in `LocalSelfAttention` layer in addition to itself.
local_attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities in `LocalSelfAttention`.
lsh_attn_chunk_length (`int`, *optional*, defaults to 64):
Length of chunk which attends to itself in `LSHSelfAttention`. Chunking reduces memory complexity from
sequence length x sequence length (self attention) to chunk length x chunk length x sequence length / chunk
length (chunked self attention).
lsh_num_chunks_before (`int`, *optional*, defaults to 1):
Number of previous neighbouring chunks to attend to in `LSHSelfAttention` layer to itself.
lsh_num_chunks_after (`int`, *optional*, defaults to 0):
Number of following neighbouring chunks to attend to in `LSHSelfAttention` layer to itself.
lsh_attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities in `LSHSelfAttention`.
max_position_embeddings (`int`, *optional*, defaults to 4096):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
num_buckets (`int` or `list[int]`, *optional*):
Number of buckets, the key query vectors can be "hashed into" using the locality sensitive hashing scheme.
Each query key vector is hashed into a hash in `1, ..., num_buckets`. The number of buckets can also be
factorized into a list for improved memory complexity. In this case, each query key vector is hashed into a
hash in `1-1, 1-2, ..., num_buckets[0]-1, ..., num_buckets[0]-num_buckets[1]` if `num_buckets` is
factorized into two factors. The number of buckets (or the product the factors) should approximately equal
sequence length / lsh_chunk_length. If `num_buckets` not set, a good value is calculated on the fly.
num_hashes (`int`, *optional*, defaults to 1):
Number of hashing rounds (e.g., number of random rotations) in Local Sensitive Hashing scheme. The higher
`num_hashes`, the more accurate the `LSHSelfAttention` becomes, but also the more memory and time intensive
the hashing becomes.
pad_token_id (`int`, *optional*, defaults to 0):
The token id for the padding token.
vocab_size (`int`, *optional*, defaults to 320):\\
Vocabulary size of the Reformer model. Defines the number of different tokens that can be represented by
the `inputs_ids` passed when calling [`ReformerModel`].
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie input and output embeddings.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
classifier_dropout (`float`, *optional*):
The dropout ratio for the classification head.
Examples:
```python
>>> from transformers import ReformerConfig, ReformerModel
>>> # Initializing a Reformer configuration
>>> configuration = ReformerConfig()
>>> # Initializing a Reformer model (with random weights)
>>> model = ReformerModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = 'reformer'
keys_to_ignore_at_inference = ['past_buckets_states']
attribute_map = {}
def __init__(self, attention_head_size=64, attn_layers=['local', 'lsh', 'local', 'lsh', 'local', 'lsh'], axial_norm_std=1.0, axial_pos_embds=True, axial_pos_shape=[64, 64], axial_pos_embds_dim=[64, 192], chunk_size_lm_head=0, eos_token_id=2, feed_forward_size=512, hash_seed=None, hidden_act='relu', hidden_dropout_prob=0.05, hidden_size=256, initializer_range=0.02, is_decoder=False, layer_norm_eps=1e-12, local_num_chunks_before=1, local_num_chunks_after=0, local_attention_probs_dropout_prob=0.05, local_attn_chunk_length=64, lsh_attn_chunk_length=64, lsh_attention_probs_dropout_prob=0.0, lsh_num_chunks_before=1, lsh_num_chunks_after=0, max_position_embeddings=4096, num_attention_heads=12, num_buckets=None, num_hashes=1, pad_token_id=0, vocab_size=320, tie_word_embeddings=False, use_cache=True, classifier_dropout=None, **kwargs):
self.hash_seed = hash_seed
self.vocab_size = vocab_size
self.attention_head_size = attention_head_size
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
self.num_hashes = num_hashes
self.num_hidden_layers = len(attn_layers)
self.num_buckets = tuple(num_buckets) if isinstance(num_buckets, list) else num_buckets
self.lsh_attn_chunk_length = lsh_attn_chunk_length
self.local_attn_chunk_length = local_attn_chunk_length
self.lsh_num_chunks_after = lsh_num_chunks_after
self.lsh_num_chunks_before = lsh_num_chunks_before
self.local_num_chunks_after = local_num_chunks_after
self.local_num_chunks_before = local_num_chunks_before
self.hidden_act = hidden_act
self.feed_forward_size = feed_forward_size
self.hidden_dropout_prob = hidden_dropout_prob
self.lsh_attention_probs_dropout_prob = lsh_attention_probs_dropout_prob
self.local_attention_probs_dropout_prob = local_attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.axial_pos_embds = axial_pos_embds
self.axial_pos_shape = tuple(axial_pos_shape)
self.axial_pos_embds_dim = tuple(axial_pos_embds_dim)
self.axial_norm_std = axial_norm_std
self.chunk_size_lm_head = chunk_size_lm_head
self.attn_layers = attn_layers
self.use_cache = use_cache
self.classifier_dropout = classifier_dropout
super().__init__(pad_token_id=pad_token_id, eos_token_id=eos_token_id, is_decoder=is_decoder, tie_word_embeddings=tie_word_embeddings, **kwargs)
|
class ReformerConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`ReformerModel`]. It is used to instantiate a
Reformer model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the ReFormer
[google/reformer-crime-and-punishment](https://huggingface.co/google/reformer-crime-and-punishment) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
attention_head_size (`int`, *optional*, defaults to 64):
Dimensionality of the projected key, query and value vectors
attn_layers (`list[str]`, *optional*, defaults to `["local", "lsh", "local", "lsh", "local", "lsh"]`):
List of attention layer types in ascending order. It can be chosen between a LSHSelfAttention layer
(`"lsh"`) and a LocalSelfAttention layer (`"local"`).
For more information on LSHSelfAttention layer, see [LSH Self Attention](reformer#lsh-self-attention). For
more information on LocalSelfAttention layer, see [Local Self Attention](reformer#local-self-attention).
axial_pos_embds (`bool`, *optional*, defaults to `True`):
Whether or not to use axial position embeddings. For more information on how axial position embeddings
work, see [Axial Position Encodings](reformer#axial-positional-encodings).
axial_norm_std (`float`, *optional*, defaults to 1.0):
The standard deviation of the normal_initializer for initializing the weight matrices of the axial
positional encodings.
axial_pos_shape (`list[int]`, *optional*, defaults to `[64, 64]`):
The position dims of the axial position encodings. During training, the product of the position dims has to
be equal to the sequence length.
For more information on how axial position embeddings work, see [Axial Position
Encodings](reformer#axial-positional-encodings).
axial_pos_embds_dim (`list[int]`, *optional*, defaults to `[64, 192]`):
The embedding dims of the axial position encodings. The sum of the embedding dims has to be equal to the
hidden size.
For more information on how axial position embeddings work, see [Axial Position
Encodings](reformer#axial-positional-encodings).
chunk_size_lm_head (`int`, *optional*, defaults to 0):
The chunk size of the final language model feed forward head layer. A chunk size of 0 means that the feed
forward layer is not chunked. A chunk size of n means that the feed forward layer processes n <
sequence_length embeddings at a time.
For more information on feed forward chunking, see [How does Feed Forward Chunking
work?](../glossary#feed-forward-chunking).
eos_token_id (`int`, *optional*, defaults to 2):
The token id for the end-of-sentence token.
feed_forward_size (`int`, *optional*, defaults to 512):
Dimensionality of the feed_forward layer in the residual attention block.
hash_seed (`int`, *optional*):
Seed that can be used to make local sensitive hashing in `LSHSelfAttention` deterministic. This should only
be set for testing purposed. For evaluation and training purposes `hash_seed` should be left as `None` to
ensure fully random rotations in local sensitive hashing scheme.
hidden_act (`str` or `Callable`, *optional*, defaults to `"relu"`):
The non-linear activation function (function or string) in the feed forward layer in the residual attention
block. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.05):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
hidden_size (`int`, *optional*, defaults to 256):
Dimensionality of the output hidden states of the residual attention blocks.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
is_decoder (`bool`, *optional*, defaults to `False`):
Whether or not to use a causal mask in addition to the `attention_mask` passed to [`ReformerModel`]. When
using the Reformer for causal language modeling, this argument should be set to `True`.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
local_chunk_length (`int`, *optional*, defaults to 64):
Length of chunk which attends to itself in `LocalSelfAttention`. Chunking reduces memory complexity from
sequence length x sequence length (self attention) to chunk length x chunk length x sequence length / chunk
length (chunked self attention).
local_num_chunks_before (`int`, *optional*, defaults to 1):
Number of previous neighbouring chunks to attend to in `LocalSelfAttention` layer to itself.
local_num_chunks_after (`int`, *optional*, defaults to 0):
Number of following neighbouring chunks to attend to in `LocalSelfAttention` layer in addition to itself.
local_attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities in `LocalSelfAttention`.
lsh_attn_chunk_length (`int`, *optional*, defaults to 64):
Length of chunk which attends to itself in `LSHSelfAttention`. Chunking reduces memory complexity from
sequence length x sequence length (self attention) to chunk length x chunk length x sequence length / chunk
length (chunked self attention).
lsh_num_chunks_before (`int`, *optional*, defaults to 1):
Number of previous neighbouring chunks to attend to in `LSHSelfAttention` layer to itself.
lsh_num_chunks_after (`int`, *optional*, defaults to 0):
Number of following neighbouring chunks to attend to in `LSHSelfAttention` layer to itself.
lsh_attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities in `LSHSelfAttention`.
max_position_embeddings (`int`, *optional*, defaults to 4096):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
num_buckets (`int` or `list[int]`, *optional*):
Number of buckets, the key query vectors can be "hashed into" using the locality sensitive hashing scheme.
Each query key vector is hashed into a hash in `1, ..., num_buckets`. The number of buckets can also be
factorized into a list for improved memory complexity. In this case, each query key vector is hashed into a
hash in `1-1, 1-2, ..., num_buckets[0]-1, ..., num_buckets[0]-num_buckets[1]` if `num_buckets` is
factorized into two factors. The number of buckets (or the product the factors) should approximately equal
sequence length / lsh_chunk_length. If `num_buckets` not set, a good value is calculated on the fly.
num_hashes (`int`, *optional*, defaults to 1):
Number of hashing rounds (e.g., number of random rotations) in Local Sensitive Hashing scheme. The higher
`num_hashes`, the more accurate the `LSHSelfAttention` becomes, but also the more memory and time intensive
the hashing becomes.
pad_token_id (`int`, *optional*, defaults to 0):
The token id for the padding token.
vocab_size (`int`, *optional*, defaults to 320):\
Vocabulary size of the Reformer model. Defines the number of different tokens that can be represented by
the `inputs_ids` passed when calling [`ReformerModel`].
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie input and output embeddings.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
classifier_dropout (`float`, *optional*):
The dropout ratio for the classification head.
Examples:
```python
>>> from transformers import ReformerConfig, ReformerModel
>>> # Initializing a Reformer configuration
>>> configuration = ReformerConfig()
>>> # Initializing a Reformer model (with random weights)
>>> model = ReformerModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
'''
def __init__(self, attention_head_size=64, attn_layers=['local', 'lsh', 'local', 'lsh', 'local', 'lsh'], axial_norm_std=1.0, axial_pos_embds=True, axial_pos_shape=[64, 64], axial_pos_embds_dim=[64, 192], chunk_size_lm_head=0, eos_token_id=2, feed_forward_size=512, hash_seed=None, hidden_act='relu', hidden_dropout_prob=0.05, hidden_size=256, initializer_range=0.02, is_decoder=False, layer_norm_eps=1e-12, local_num_chunks_before=1, local_num_chunks_after=0, local_attention_probs_dropout_prob=0.05, local_attn_chunk_length=64, lsh_attn_chunk_length=64, lsh_attention_probs_dropout_prob=0.0, lsh_num_chunks_before=1, lsh_num_chunks_after=0, max_position_embeddings=4096, num_attention_heads=12, num_buckets=None, num_hashes=1, pad_token_id=0, vocab_size=320, tie_word_embeddings=False, use_cache=True, classifier_dropout=None, **kwargs):
pass
| 2
| 1
| 74
| 0
| 74
| 0
| 2
| 1.5
| 1
| 3
| 0
| 0
| 1
| 30
| 1
| 1
| 208
| 13
| 78
| 71
| 40
| 117
| 36
| 35
| 34
| 2
| 1
| 0
| 2
|
4,837
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/reformer/modeling_reformer.py
|
transformers.models.reformer.modeling_reformer.AxialPositionEmbeddings
|
from functools import reduce
from operator import mul
from torch import nn
import torch
class AxialPositionEmbeddings(nn.Module):
"""
Constructs axial position embeddings. Useful for very long input sequences to save memory and time.
"""
def __init__(self, config):
super().__init__()
self.axial_pos_shape = config.axial_pos_shape
self.axial_pos_embds_dim = config.axial_pos_embds_dim
self.dropout = config.hidden_dropout_prob
self.least_common_mult_chunk_length = _get_least_common_mult_chunk_len(config)
self.weights = nn.ParameterList()
if sum(self.axial_pos_embds_dim) != config.hidden_size:
raise ValueError(f'Make sure that config.axial_pos_embds factors: {self.axial_pos_embds_dim} sum to config.hidden_size: {config.hidden_size}')
for axis, axial_pos_embd_dim in enumerate(self.axial_pos_embds_dim):
ax_shape = [1] * len(self.axial_pos_shape)
ax_shape[axis] = self.axial_pos_shape[axis]
ax_shape = tuple(ax_shape) + (axial_pos_embd_dim,)
self.weights.append(nn.Parameter(torch.ones(ax_shape, dtype=torch.float32)))
def forward(self, position_ids):
batch_size = position_ids.shape[0]
sequence_length = position_ids.shape[1]
broadcasted_weights = [weight.expand((batch_size,) + self.axial_pos_shape + weight.shape[-1:]) for weight in self.weights]
if self.training is True:
if reduce(mul, self.axial_pos_shape) != sequence_length:
raise ValueError(f'If training, make sure that config.axial_pos_shape factors: {self.axial_pos_shape} multiply to sequence length. Got prod({self.axial_pos_shape}) != sequence_length: {sequence_length}. You might want to consider padding your sequence length to {reduce(mul, self.axial_pos_shape)} or changing config.axial_pos_shape.')
if self.dropout > 0:
weights = torch.cat(broadcasted_weights, dim=-1)
transposed_weights = weights.transpose(2, 1)
dropped_transposed_weights = nn.functional.dropout2d(transposed_weights, p=self.dropout, training=self.training)
dropped_weights = dropped_transposed_weights.transpose(2, 1)
position_encodings = torch.reshape(dropped_weights, (batch_size, sequence_length, -1))
else:
position_encodings = torch.cat([torch.reshape(weight, (batch_size, sequence_length, -1)) for weight in broadcasted_weights], dim=-1)
else:
if reduce(mul, self.axial_pos_shape) < sequence_length:
raise ValueError(f'Make sure that config.axial_pos_shape factors: {self.axial_pos_shape} multiply at least to max(sequence_length, least_common_mult_chunk_length): max({sequence_length}, {self.least_common_mult_chunk_length}).')
max_position_id = position_ids.max().item()
required_pos_encodings_columns = -(-(max_position_id + 1) // self.axial_pos_shape[1])
position_encodings = torch.cat([weight[:, :required_pos_encodings_columns] for weight in broadcasted_weights], dim=-1)
position_encodings = torch.reshape(position_encodings, (batch_size, -1, position_encodings.shape[-1]))
position_encodings = torch.cat([torch.index_select(position_encodings[i], 0, position_ids[i]).unsqueeze(0) for i in range(batch_size)], dim=0)
return position_encodings
|
class AxialPositionEmbeddings(nn.Module):
'''
Constructs axial position embeddings. Useful for very long input sequences to save memory and time.
'''
def __init__(self, config):
pass
def forward(self, position_ids):
pass
| 3
| 1
| 44
| 7
| 33
| 5
| 4
| 0.18
| 1
| 5
| 0
| 0
| 2
| 5
| 2
| 12
| 94
| 16
| 66
| 20
| 63
| 12
| 37
| 20
| 34
| 5
| 1
| 2
| 8
|
4,838
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/reformer/modeling_reformer.py
|
transformers.models.reformer.modeling_reformer.ChunkReformerFeedForward
|
from ...pytorch_utils import apply_chunking_to_forward
from torch import nn
class ChunkReformerFeedForward(nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dense = ReformerFeedForwardDense(config)
self.output = ReformerFeedForwardOutput(config)
def forward(self, attention_output):
return apply_chunking_to_forward(self.forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output)
def forward_chunk(self, hidden_states):
hidden_states = self.layer_norm(hidden_states)
hidden_states = self.dense(hidden_states)
return self.output(hidden_states)
|
class ChunkReformerFeedForward(nn.Module):
def __init__(self, config):
pass
def forward(self, attention_output):
pass
def forward_chunk(self, hidden_states):
pass
| 4
| 0
| 6
| 0
| 6
| 0
| 1
| 0
| 1
| 3
| 2
| 0
| 3
| 5
| 3
| 13
| 22
| 3
| 19
| 9
| 15
| 0
| 14
| 9
| 10
| 1
| 1
| 0
| 3
|
4,839
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/reformer/modeling_reformer.py
|
transformers.models.reformer.modeling_reformer.EfficientAttentionMixin
|
import torch
class EfficientAttentionMixin:
"""
A few utilities for nn.Modules in Reformer, to be used as a mixin.
"""
def _look_adjacent(self, vectors, num_chunks_before, num_chunks_after):
"""
Used to implement attention between consecutive chunks.
Args:
vectors: array of shape [batch_size, num_attention_heads, n_chunks, chunk_len, ...]
num_chunks_before: chunks before current chunk to include in attention
num_chunks_after: chunks after current chunk to include in attention
Returns:
tensor of shape [num_chunks, N * chunk_length, ...], where N = (1 + num_chunks_before + num_chunks_after).
"""
if num_chunks_before == 0 and num_chunks_after == 0:
return vectors
slices = []
for i in range(-num_chunks_before, num_chunks_after + 1):
if i == 0:
slices.append(vectors)
else:
slices.append(torch.cat([vectors[:, :, i:, ...], vectors[:, :, :i, ...]], dim=2))
return torch.cat(slices, dim=3)
def _split_hidden_size_dim(self, x, num_attn_heads, attn_head_size):
"""
splits hidden_size dim into attn_head_size and num_attn_heads
"""
new_x_shape = x.size()[:-1] + (num_attn_heads, attn_head_size)
x = x.view(*new_x_shape)
return x.transpose(2, 1)
def _merge_hidden_size_dims(self, x, num_attn_heads, attn_head_size):
"""
merges attn_head_size dim and num_attn_heads dim into hidden_size
"""
x = x.permute(0, 2, 1, 3)
return torch.reshape(x, (x.size()[0], -1, num_attn_heads * attn_head_size))
def _split_seq_length_dim_to(self, vectors, dim_factor_1, dim_factor_2, num_attn_heads, attn_head_size=None):
"""
splits sequence length dim of vectors into `dim_factor_1` and `dim_factor_2` dims
"""
batch_size = vectors.shape[0]
split_dim_shape = (batch_size, num_attn_heads, dim_factor_1, dim_factor_2)
if len(vectors.shape) == 4:
return torch.reshape(vectors, split_dim_shape + (attn_head_size,))
elif len(vectors.shape) == 3:
return torch.reshape(vectors, split_dim_shape)
else:
raise ValueError(f'Input vector rank should be one of [3, 4], but is: {len(vectors.shape)}')
|
class EfficientAttentionMixin:
'''
A few utilities for nn.Modules in Reformer, to be used as a mixin.
'''
def _look_adjacent(self, vectors, num_chunks_before, num_chunks_after):
'''
Used to implement attention between consecutive chunks.
Args:
vectors: array of shape [batch_size, num_attention_heads, n_chunks, chunk_len, ...]
num_chunks_before: chunks before current chunk to include in attention
num_chunks_after: chunks after current chunk to include in attention
Returns:
tensor of shape [num_chunks, N * chunk_length, ...], where N = (1 + num_chunks_before + num_chunks_after).
'''
pass
def _split_hidden_size_dim(self, x, num_attn_heads, attn_head_size):
'''
splits hidden_size dim into attn_head_size and num_attn_heads
'''
pass
def _merge_hidden_size_dims(self, x, num_attn_heads, attn_head_size):
'''
merges attn_head_size dim and num_attn_heads dim into hidden_size
'''
pass
def _split_seq_length_dim_to(self, vectors, dim_factor_1, dim_factor_2, num_attn_heads, attn_head_size=None):
'''
splits sequence length dim of vectors into `dim_factor_1` and `dim_factor_2` dims
'''
pass
| 5
| 5
| 12
| 1
| 7
| 5
| 2
| 0.78
| 0
| 2
| 0
| 2
| 4
| 0
| 4
| 4
| 56
| 8
| 27
| 10
| 22
| 21
| 24
| 10
| 19
| 4
| 0
| 2
| 9
|
4,840
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/reformer/modeling_reformer.py
|
transformers.models.reformer.modeling_reformer.LSHSelfAttention
|
from functools import reduce
import torch
import numpy as np
from operator import mul
from torch import nn
class LSHSelfAttention(nn.Module, EfficientAttentionMixin):
def __init__(self, config, layer_idx=None):
super().__init__()
self.config = config
self.chunk_length = config.lsh_attn_chunk_length
self.num_hashes = config.num_hashes
self.num_buckets = config.num_buckets
self.num_chunks_before = config.lsh_num_chunks_before
self.num_chunks_after = config.lsh_num_chunks_after
self.hash_seed = config.hash_seed
self.is_decoder = config.is_decoder
self.max_position_embeddings = config.max_position_embeddings
self.layer_idx = layer_idx
self.dropout = config.lsh_attention_probs_dropout_prob
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = config.attention_head_size
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.hidden_size = config.hidden_size
self.query_key = nn.Linear(self.hidden_size, self.all_head_size, bias=False)
self.value = nn.Linear(self.hidden_size, self.all_head_size, bias=False)
self.register_buffer('self_mask_value_float16', torch.tensor(-1000.0), persistent=False)
self.register_buffer('self_mask_value_float32', torch.tensor(-100000.0), persistent=False)
self.register_buffer('mask_value_float16', torch.tensor(-10000.0), persistent=False)
self.register_buffer('mask_value_float32', torch.tensor(-1000000000.0), persistent=False)
def forward(self, hidden_states, attention_mask=None, head_mask=None, num_hashes=None, buckets=None, past_buckets_states=None, use_cache=False, output_attentions=False, cache_position=None, **kwargs):
sequence_length = hidden_states.shape[1]
batch_size = hidden_states.shape[0]
num_hashes = num_hashes if num_hashes is not None else self.num_hashes
exists_cache = past_buckets_states is not None and len(past_buckets_states) > self.layer_idx
if exists_cache:
assert sequence_length == 1, f'At the moment, auto-regressive language generation is only possible one word at a time. Make sure that input sequence length {sequence_length} equals 1, when `past_buckets_states` is passed.'
query_vectors = self.query_key(hidden_states)
query_vectors = self._split_hidden_size_dim(query_vectors, self.num_attention_heads, self.attention_head_size)
past_buckets = past_buckets_states.buckets_cache[self.layer_idx]
past_states = past_buckets_states.states_cache[self.layer_idx]
if past_buckets.numel() != 0:
key_value_hidden_states, sorted_bucket_idx, buckets = self._get_relevant_hid_states_and_buckets(query_vectors=query_vectors, attention_mask=attention_mask, num_hashes=num_hashes, hidden_states=hidden_states, past_states=past_states, past_buckets=past_buckets)
query_key_vectors = self._query_per_attn_head(key_value_hidden_states)
value_vectors = self._value_per_attn_head(key_value_hidden_states)
query_key_vectors = self._split_seq_length_dim_to(query_key_vectors, num_hashes, -1, self.num_attention_heads, self.attention_head_size)
value_vectors = self._split_seq_length_dim_to(value_vectors, num_hashes, -1, self.num_attention_heads, self.attention_head_size)
query_vectors = query_vectors.unsqueeze(2).repeat(1, 1, num_hashes, 1, 1)
else:
key_value_hidden_states = torch.cat([past_states, hidden_states], dim=1)
query_key_vectors = self.query_key(key_value_hidden_states)
value_vectors = self.value(key_value_hidden_states)
else:
query_vectors = None
query_key_vectors = self.query_key(hidden_states)
value_vectors = self.value(hidden_states)
if not exists_cache or past_buckets.numel() == 0:
query_key_vectors = self._split_hidden_size_dim(query_key_vectors, self.num_attention_heads, self.attention_head_size)
value_vectors = self._split_hidden_size_dim(value_vectors, self.num_attention_heads, self.attention_head_size)
if exists_cache and key_value_hidden_states.shape[1] >= self.chunk_length:
buckets = self._hash_vectors(query_key_vectors, num_hashes, attention_mask)
del hidden_states
assert query_key_vectors.shape[-1] == self.attention_head_size, f'last dim of query_key_vectors is {query_key_vectors.shape[-1]} but should be {self.attention_head_size}.'
assert value_vectors.shape[-1] == self.attention_head_size, f'last dim of value_vectors is {value_vectors.shape[-1]} but should be {self.attention_head_size}.'
do_standard_self_attention = sequence_length <= self.chunk_length or (exists_cache and past_states is not None)
if not do_standard_self_attention:
if self.num_buckets is None:
self._set_num_buckets(sequence_length)
if buckets is None:
buckets = self._hash_vectors(query_key_vectors, num_hashes, attention_mask)
else:
buckets = buckets.view(batch_size, self.num_attention_heads, num_hashes * sequence_length)
assert int(buckets.shape[-1]) == num_hashes * sequence_length, f'last dim of buckets is {buckets.shape[-1]}, but should be {num_hashes * sequence_length}'
sorted_bucket_idx, undo_sorted_bucket_idx = self._get_sorted_bucket_idx_and_undo_sorted_bucket_idx(sequence_length, buckets, num_hashes)
sorted_bucket_idx_per_hash = sorted_bucket_idx % sequence_length
query_key_vectors = self._gather_by_expansion(query_key_vectors, sorted_bucket_idx_per_hash, num_hashes)
value_vectors = self._gather_by_expansion(value_vectors, sorted_bucket_idx_per_hash, num_hashes)
query_key_vectors = self._split_seq_length_dim_to(query_key_vectors, -1, self.chunk_length, self.num_attention_heads, self.attention_head_size)
value_vectors = self._split_seq_length_dim_to(value_vectors, -1, self.chunk_length, self.num_attention_heads, self.attention_head_size)
if self.chunk_length is None:
assert self.num_chunks_before == 0 and self.num_chunks_after == 0, 'If `config.chunk_length` is `None`, make sure `config.num_chunks_after` and `config.num_chunks_before` are set to 0.'
elif exists_cache and past_buckets.numel() != 0:
sorted_bucket_idx_per_hash = sorted_bucket_idx
else:
sorted_bucket_idx_per_hash = torch.arange(sequence_length, device=query_key_vectors.device).repeat(batch_size, self.num_attention_heads, 1)
sqrt_num = np.sqrt(self.attention_head_size)
key_vectors = self._len_and_dim_norm(query_key_vectors, sqrt_num)
query_vectors = query_vectors if query_vectors is not None else query_key_vectors
del query_key_vectors
out_vectors, logits, attention_probs = self._attend(query_vectors=query_vectors, key_vectors=key_vectors, value_vectors=value_vectors, sorted_bucket_idx_per_hash=sorted_bucket_idx_per_hash, attention_mask=attention_mask, head_mask=head_mask, do_standard_self_attention=do_standard_self_attention, use_cache=exists_cache)
del key_vectors, value_vectors
if not do_standard_self_attention:
out_vectors, logits = ReverseSort.apply(out_vectors, logits, sorted_bucket_idx, undo_sorted_bucket_idx)
if not do_standard_self_attention or (exists_cache and past_buckets.numel() != 0):
if num_hashes > 1:
out_vectors = self._split_seq_length_dim_to(out_vectors, num_hashes, sequence_length, self.num_attention_heads, self.attention_head_size)
logits = self._split_seq_length_dim_to(logits, num_hashes, sequence_length, self.num_attention_heads, self.attention_head_size).unsqueeze(-1)
probs_vectors = torch.exp(logits - torch.logsumexp(logits, dim=2, keepdim=True))
out_vectors = torch.sum(out_vectors * probs_vectors, dim=2)
del probs_vectors
del logits
assert out_vectors.shape == (batch_size, self.num_attention_heads, sequence_length, self.attention_head_size), 'out_vectors have be of shape `[batch_size, config.num_attention_heads, sequence_length, config.attention_head_size]`.'
out_vectors = self._merge_hidden_size_dims(out_vectors, self.num_attention_heads, self.attention_head_size)
if output_attentions is False:
attention_probs = ()
if buckets is not None:
buckets = buckets.view(batch_size, self.num_attention_heads, num_hashes, -1)
return LSHSelfAttentionOutput(hidden_states=out_vectors, attention_probs=attention_probs, buckets=buckets)
def _query_per_attn_head(self, hidden_states):
per_head_query_key = self.query_key.weight.reshape(self.num_attention_heads, self.attention_head_size, self.hidden_size).transpose(-2, -1)
query_key_vectors = torch.einsum('balh,ahr->balr', hidden_states, per_head_query_key)
return query_key_vectors
def _value_per_attn_head(self, hidden_states):
per_head_value = self.value.weight.reshape(self.num_attention_heads, self.attention_head_size, self.hidden_size).transpose(-2, -1)
value_vectors = torch.einsum('balh,ahr->balr', hidden_states, per_head_value)
return value_vectors
def _hash_vectors(self, vectors, num_hashes, attention_mask, increase_num_buckets=False):
batch_size = vectors.shape[0]
if isinstance(self.num_buckets, int):
assert self.num_buckets % 2 == 0, f'There should be an even number of buckets, but `self.num_buckets`: {self.num_buckets}'
rotation_size = self.num_buckets
num_buckets = self.num_buckets
else:
rotation_size, num_buckets = (0, 1)
for bucket_factor in self.num_buckets:
assert bucket_factor % 2 == 0, f'The number of buckets should be even, but `num_bucket`: {bucket_factor}'
rotation_size = rotation_size + bucket_factor
num_buckets = num_buckets * bucket_factor
vectors = vectors.detach()
if self.hash_seed is not None:
torch.manual_seed(self.hash_seed)
rotations_shape = (self.num_attention_heads, vectors.shape[-1], num_hashes, rotation_size // 2)
random_rotations = torch.randn(rotations_shape, device=vectors.device, dtype=vectors.dtype)
rotated_vectors = torch.einsum('bmtd,mdhr->bmhtr', vectors, random_rotations)
if isinstance(self.num_buckets, int) or len(self.num_buckets) == 1:
rotated_vectors = torch.cat([rotated_vectors, -rotated_vectors], dim=-1)
buckets = torch.argmax(rotated_vectors, dim=-1)
else:
buckets, cur_sum, cur_product = (None, 0, 1)
for bucket_factor in self.num_buckets:
rotated_vectors_factor = rotated_vectors[..., cur_sum:cur_sum + bucket_factor // 2]
cur_sum = cur_sum + bucket_factor // 2
rotated_vectors_factor = torch.cat([rotated_vectors_factor, -rotated_vectors_factor], dim=-1)
if buckets is None:
buckets = torch.argmax(rotated_vectors_factor, dim=-1)
else:
buckets = buckets + cur_product * torch.argmax(rotated_vectors_factor, dim=-1)
cur_product = cur_product * bucket_factor
if attention_mask is not None and attention_mask.sum().item() < batch_size * attention_mask.shape[-1]:
num_buckets = num_buckets + 1
buckets_mask = attention_mask.to(torch.bool)[:, None, None, :].expand(buckets.shape)
buckets = torch.where(buckets_mask, buckets, torch.tensor(num_buckets - 1, dtype=torch.long, device=buckets.device))
elif increase_num_buckets:
num_buckets = num_buckets + 1
offsets = torch.arange(num_hashes, device=vectors.device)
offsets = (offsets * num_buckets).view((1, 1, -1, 1))
offsets = offsets.expand((batch_size, self.num_attention_heads) + offsets.shape[-2:])
offset_buckets = (buckets + offsets).flatten(start_dim=2, end_dim=3)
return offset_buckets
def _get_sorted_bucket_idx_and_undo_sorted_bucket_idx(self, sequence_length, buckets, num_hashes):
with torch.no_grad():
sorted_bucket_idx = _stable_argsort(buckets, dim=-1)
indices = torch.arange(sorted_bucket_idx.shape[-1], device=buckets.device).view(1, 1, -1).expand(sorted_bucket_idx.shape)
undo_sorted_bucket_idx = sorted_bucket_idx.new(*sorted_bucket_idx.size())
undo_sorted_bucket_idx.scatter_(-1, sorted_bucket_idx, indices)
return (sorted_bucket_idx, undo_sorted_bucket_idx)
def _set_num_buckets(self, sequence_length):
num_buckets_pow_2 = (2 * (sequence_length // self.chunk_length)).bit_length() - 1
num_buckets = 2 ** num_buckets_pow_2
num_buckets_limit = 2 * max(int((self.max_position_embeddings // self.chunk_length) ** 0.5), self.chunk_length)
if num_buckets > num_buckets_limit:
num_buckets = [2 ** (num_buckets_pow_2 // 2), 2 ** (num_buckets_pow_2 - num_buckets_pow_2 // 2)]
logger.warning(f'config.num_buckets is not set. Setting config.num_buckets to {num_buckets}...')
self.config.num_buckets = num_buckets
self.num_buckets = num_buckets
def _attend(self, query_vectors, key_vectors, value_vectors, sorted_bucket_idx_per_hash, attention_mask, head_mask, do_standard_self_attention, use_cache):
if not do_standard_self_attention:
key_vectors = self._look_adjacent(key_vectors, self.num_chunks_before, self.num_chunks_after)
value_vectors = self._look_adjacent(value_vectors, self.num_chunks_before, self.num_chunks_after)
query_key_dots = torch.matmul(query_vectors, key_vectors.transpose(-1, -2))
del query_vectors, key_vectors
if not do_standard_self_attention:
query_bucket_idx = self._split_seq_length_dim_to(sorted_bucket_idx_per_hash, -1, self.chunk_length, self.num_attention_heads)
key_value_bucket_idx = self._look_adjacent(query_bucket_idx, self.num_chunks_before, self.num_chunks_after)
elif use_cache and query_key_dots.ndim > 4:
key_value_bucket_idx = sorted_bucket_idx_per_hash
query_bucket_idx = key_value_bucket_idx.new_ones(key_value_bucket_idx.shape[:-1] + (1,)) * key_value_bucket_idx.max()
elif use_cache and query_key_dots.ndim <= 4:
query_bucket_idx = (query_key_dots.shape[-1] - 1) * torch.ones_like(query_key_dots)[:, :, :, -1]
key_value_bucket_idx = torch.arange(query_key_dots.shape[-1], dtype=torch.long, device=query_key_dots.device)[None, None, :].expand(query_bucket_idx.shape[:2] + (-1,))
else:
query_bucket_idx = key_value_bucket_idx = sorted_bucket_idx_per_hash
if query_key_dots.dtype == torch.float16:
self_mask_value = self.self_mask_value_float16.half()
mask_value = self.mask_value_float16.half()
else:
self_mask_value = self.self_mask_value_float32
mask_value = self.mask_value_float32
if not use_cache:
mask = self._compute_attn_mask(query_bucket_idx, key_value_bucket_idx, attention_mask, query_key_dots.shape, do_standard_self_attention)
if mask is not None:
query_key_dots = torch.where(mask, query_key_dots, mask_value)
del mask
self_mask = torch.ne(query_bucket_idx.unsqueeze(-1), key_value_bucket_idx.unsqueeze(-2)).to(query_bucket_idx.device)
query_key_dots = torch.where(self_mask, query_key_dots, self_mask_value)
del self_mask
logits = torch.logsumexp(query_key_dots, dim=-1, keepdim=True)
attention_probs = torch.exp(query_key_dots - logits)
del query_key_dots
attention_probs = nn.functional.dropout(attention_probs, p=self.dropout, training=self.training)
if head_mask is not None:
attention_probs = attention_probs * head_mask
out_vectors = torch.matmul(attention_probs, value_vectors)
del value_vectors
if out_vectors.ndim > 4:
logits = logits.flatten(start_dim=2, end_dim=3).squeeze(-1)
out_vectors = out_vectors.flatten(start_dim=2, end_dim=3)
return (out_vectors, logits, attention_probs)
def _compute_attn_mask(self, query_indices, key_indices, attention_mask, query_key_dot_shape, do_standard_self_attention):
if attention_mask is not None:
attention_mask = attention_mask.to(torch.bool)[:, None, :]
if not do_standard_self_attention:
attention_mask = attention_mask[:, None, :]
attention_mask = attention_mask.expand(query_indices.shape[:-1] + (-1,))
attention_mask = torch.gather(attention_mask, -1, key_indices)
attention_mask = attention_mask.unsqueeze(-2).expand(query_key_dot_shape)
if self.is_decoder is True:
causal_mask = torch.ge(query_indices.unsqueeze(-1), key_indices.unsqueeze(-2)).to(query_indices.device)
if attention_mask is not None:
attention_mask = causal_mask * attention_mask
else:
attention_mask = causal_mask
return attention_mask
def _get_relevant_hid_states_and_buckets(self, query_vectors, attention_mask, num_hashes, hidden_states, past_states, past_buckets):
hidden_states = torch.cat([past_states, hidden_states], dim=1)
batch_size = hidden_states.shape[0]
sequence_length = hidden_states.shape[1]
max_bucket = self.num_buckets if isinstance(self.num_buckets, int) else reduce(mul, self.num_buckets)
increase_num_buckets = past_buckets.max() > num_hashes * max_bucket - 1
query_buckets = self._hash_vectors(query_vectors, num_hashes, attention_mask, increase_num_buckets=increase_num_buckets)
concat_buckets = torch.cat([past_buckets, query_buckets.unsqueeze(-1)], dim=-1)
bucket_idx = _stable_argsort(concat_buckets, dim=-1)
assert bucket_idx.shape == (batch_size, self.num_attention_heads, num_hashes, sequence_length), f'bucket_idx should have shape {(batch_size, self.num_attention_heads, num_hashes, sequence_length)}, but has shape {bucket_idx.shape}.'
relevant_bucket_idx = (bucket_idx == bucket_idx.shape[-1] - 1).nonzero()
relevant_bucket_idx_chunk = self._expand_to_indices_in_relevant_chunk(relevant_bucket_idx, sequence_length)
relevant_bucket_idx_chunk = bucket_idx[tuple(relevant_bucket_idx_chunk.transpose(0, 1))]
offset = torch.arange(relevant_bucket_idx_chunk.shape[-1], device=hidden_states.device, dtype=torch.long)
bucket_idx_batch_offset = sequence_length * (batch_size * torch.div(offset, relevant_bucket_idx_chunk.shape[-1], rounding_mode='floor'))
relevant_bucket_idx_chunk_all_batch = relevant_bucket_idx_chunk + bucket_idx_batch_offset
hidden_states = hidden_states.reshape((-1, self.hidden_size))
relevant_hidden_states = hidden_states.index_select(0, relevant_bucket_idx_chunk_all_batch)
relevant_hidden_states = relevant_hidden_states.reshape(batch_size, self.num_attention_heads, -1, self.hidden_size)
relevant_bucket_idx_chunk = relevant_bucket_idx_chunk.reshape(batch_size, self.num_attention_heads, num_hashes, -1)
assert relevant_hidden_states.shape[2] == (self.num_chunks_before + self.num_chunks_after + 1) * self.chunk_length * num_hashes, f'There should be {(self.num_chunks_before + self.num_chunks_after + 1) * self.chunk_length * num_hashes} `hidden_states`, there are {relevant_hidden_states.shape[2]} `hidden_states`.'
assert relevant_bucket_idx_chunk.shape[-1] == (self.num_chunks_before + self.num_chunks_after + 1) * self.chunk_length, f'There should be {(self.num_chunks_before + self.num_chunks_after + 1) * self.chunk_length} `hidden_states`, there are {relevant_bucket_idx_chunk.shape[-1]} `bucket_idx`.'
return (relevant_hidden_states, relevant_bucket_idx_chunk, query_buckets)
def _expand_to_indices_in_relevant_chunk(self, indices, sequence_length):
start_indices_chunk = (indices[:, -1] // self.chunk_length - self.num_chunks_before) * self.chunk_length
total_chunk_size = self.chunk_length * (1 + self.num_chunks_before + self.num_chunks_after)
expanded_start_indices = start_indices_chunk.unsqueeze(-1).expand(indices.shape[0], total_chunk_size)
chunk_sequence_indices = expanded_start_indices + torch.arange(total_chunk_size, device=indices.device, dtype=torch.long).unsqueeze(0).expand(indices.shape[0], total_chunk_size)
chunk_sequence_indices = chunk_sequence_indices.flatten() % sequence_length
indices = indices.unsqueeze(1).expand((indices.shape[0], total_chunk_size, -1)).flatten(0, 1).clone()
indices[:, -1] = chunk_sequence_indices
return indices
def _len_and_dim_norm(self, vectors, sqrt_num):
"""
length and attention head size dim normalization
"""
vectors = self._len_norm(vectors)
vectors = vectors / sqrt_num
return vectors
def _len_norm(self, x, epsilon=1e-06):
"""
length normalization
"""
variance = torch.mean(x ** 2, -1, keepdim=True)
norm_x = x * torch.rsqrt(variance + epsilon)
return norm_x
def _gather_by_expansion(self, vectors, idxs, num_hashes):
"""
expand dims of idxs and vectors for all hashes and gather
"""
expanded_idxs = idxs.unsqueeze(-1).expand(-1, -1, -1, self.attention_head_size)
vectors = vectors.repeat(1, 1, num_hashes, 1)
return torch.gather(vectors, 2, expanded_idxs)
|
class LSHSelfAttention(nn.Module, EfficientAttentionMixin):
def __init__(self, config, layer_idx=None):
pass
def forward(self, hidden_states, attention_mask=None, head_mask=None, num_hashes=None, buckets=None, past_buckets_states=None, use_cache=False, output_attentions=False, cache_position=None, **kwargs):
pass
def _query_per_attn_head(self, hidden_states):
pass
def _value_per_attn_head(self, hidden_states):
pass
def _hash_vectors(self, vectors, num_hashes, attention_mask, increase_num_buckets=False):
pass
def _get_sorted_bucket_idx_and_undo_sorted_bucket_idx(self, sequence_length, buckets, num_hashes):
pass
def _set_num_buckets(self, sequence_length):
pass
def _attend(self, query_vectors, key_vectors, value_vectors, sorted_bucket_idx_per_hash, attention_mask, head_mask, do_standard_self_attention, use_cache):
pass
def _compute_attn_mask(self, query_indices, key_indices, attention_mask, query_key_dot_shape, do_standard_self_attention):
pass
def _get_relevant_hid_states_and_buckets(self, query_vectors, attention_mask, num_hashes, hidden_states, past_states, past_buckets):
pass
def _expand_to_indices_in_relevant_chunk(self, indices, sequence_length):
pass
def _len_and_dim_norm(self, vectors, sqrt_num):
'''
length and attention head size dim normalization
'''
pass
def _len_norm(self, x, epsilon=1e-06):
'''
length normalization
'''
pass
def _gather_by_expansion(self, vectors, idxs, num_hashes):
'''
expand dims of idxs and vectors for all hashes and gather
'''
pass
| 15
| 3
| 47
| 7
| 32
| 8
| 4
| 0.26
| 2
| 5
| 1
| 0
| 14
| 16
| 14
| 28
| 666
| 111
| 442
| 126
| 402
| 113
| 256
| 101
| 241
| 17
| 1
| 3
| 53
|
4,841
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/reformer/modeling_reformer.py
|
transformers.models.reformer.modeling_reformer.LocalSelfAttention
|
import numpy as np
import torch
from torch import nn
class LocalSelfAttention(nn.Module, EfficientAttentionMixin):
def __init__(self, config, layer_idx=None):
super().__init__()
self.num_attention_heads = config.num_attention_heads
self.chunk_length = config.local_attn_chunk_length
self.num_chunks_before = config.local_num_chunks_before
self.num_chunks_after = config.local_num_chunks_after
self.is_decoder = config.is_decoder
self.pad_token_id = config.pad_token_id
self.attention_head_size = config.attention_head_size
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.hidden_size = config.hidden_size
self.layer_idx = layer_idx
self.query = nn.Linear(self.hidden_size, self.all_head_size, bias=False)
self.key = nn.Linear(self.hidden_size, self.all_head_size, bias=False)
self.value = nn.Linear(self.hidden_size, self.all_head_size, bias=False)
self.dropout = config.local_attention_probs_dropout_prob
self.register_buffer('mask_value_float16', torch.tensor(-10000.0), persistent=False)
self.register_buffer('mask_value_float32', torch.tensor(-1000000000.0), persistent=False)
def forward(self, hidden_states, attention_mask=None, head_mask=None, past_buckets_states=None, use_cache=False, output_attentions=False, **kwargs):
sequence_length = hidden_states.shape[1]
batch_size = hidden_states.shape[0]
if past_buckets_states is not None and len(past_buckets_states) > self.layer_idx:
past_buckets = past_buckets_states.buckets_cache[self.layer_idx]
past_states = past_buckets_states.states_cache[self.layer_idx]
assert past_buckets.numel() == 0, 'LocalSelfAttention should not make use of `buckets`. There seems to be an error when caching hidden_states_and_buckets.'
key_value_hidden_states = self._retrieve_relevant_hidden_states(past_states, self.chunk_length, self.num_chunks_before)
key_value_hidden_states = torch.cat([key_value_hidden_states, hidden_states], dim=1)
query_vectors = self.query(hidden_states)
key_vectors = self.key(key_value_hidden_states)
value_vectors = self.value(key_value_hidden_states)
del key_value_hidden_states
else:
query_vectors = self.query(hidden_states)
key_vectors = self.key(hidden_states)
value_vectors = self.value(hidden_states)
query_vectors = self._split_hidden_size_dim(query_vectors, self.num_attention_heads, self.attention_head_size)
key_vectors = self._split_hidden_size_dim(key_vectors, self.num_attention_heads, self.attention_head_size)
value_vectors = self._split_hidden_size_dim(value_vectors, self.num_attention_heads, self.attention_head_size)
assert query_vectors.shape[-1] == self.attention_head_size, f'last dim of query_key_vectors is {query_vectors.shape[-1]} but should be {self.attention_head_size}.'
assert key_vectors.shape[-1] == self.attention_head_size, f'last dim of query_key_vectors is {key_vectors.shape[-1]} but should be {self.attention_head_size}.'
assert value_vectors.shape[-1] == self.attention_head_size, f'last dim of query_key_vectors is {value_vectors.shape[-1]} but should be {self.attention_head_size}.'
if self.chunk_length is None:
assert self.num_chunks_before == 0 and self.num_chunks_after == 0, 'If `config.chunk_length` is `None`, make sure `config.num_chunks_after` and `config.num_chunks_before` are set to 0.'
key_vectors = key_vectors / np.sqrt(self.attention_head_size)
indices = torch.arange(sequence_length, device=query_vectors.device).repeat(batch_size, self.num_attention_heads, 1)
do_standard_self_attention = sequence_length <= self.chunk_length
if not do_standard_self_attention:
query_vectors = self._split_seq_length_dim_to(query_vectors, -1, self.chunk_length, self.num_attention_heads, self.attention_head_size)
key_vectors = self._split_seq_length_dim_to(key_vectors, -1, self.chunk_length, self.num_attention_heads, self.attention_head_size)
value_vectors = self._split_seq_length_dim_to(value_vectors, -1, self.chunk_length, self.num_attention_heads, self.attention_head_size)
query_indices = self._split_seq_length_dim_to(indices, -1, self.chunk_length, self.num_attention_heads)
key_indices = self._split_seq_length_dim_to(indices, -1, self.chunk_length, self.num_attention_heads)
key_vectors = self._look_adjacent(key_vectors, self.num_chunks_before, self.num_chunks_after)
value_vectors = self._look_adjacent(value_vectors, self.num_chunks_before, self.num_chunks_after)
key_indices = self._look_adjacent(key_indices, self.num_chunks_before, self.num_chunks_after)
else:
query_indices = key_indices = indices
query_key_dots = torch.matmul(query_vectors, key_vectors.transpose(-1, -2))
del query_vectors, key_vectors
mask = self._compute_attn_mask(query_indices, key_indices, attention_mask, query_key_dots.shape, do_standard_self_attention)
if mask is not None:
if query_key_dots.dtype == torch.float16:
mask_value = self.mask_value_float16.half()
else:
mask_value = self.mask_value_float32
query_key_dots = torch.where(mask, query_key_dots, mask_value)
del mask
logits = torch.logsumexp(query_key_dots, dim=-1, keepdim=True)
attention_probs = torch.exp(query_key_dots - logits)
del logits
attention_probs = nn.functional.dropout(attention_probs, p=self.dropout, training=self.training)
if head_mask is not None:
attention_probs = attention_probs * head_mask
out_vectors = torch.matmul(attention_probs, value_vectors)
del value_vectors
if not do_standard_self_attention:
out_vectors = out_vectors.flatten(start_dim=2, end_dim=3)
assert out_vectors.shape == (batch_size, self.num_attention_heads, sequence_length, self.attention_head_size)
out_vectors = self._merge_hidden_size_dims(out_vectors, self.num_attention_heads, self.attention_head_size)
if output_attentions is False:
attention_probs = ()
return LocalSelfAttentionOutput(hidden_states=out_vectors, attention_probs=attention_probs)
def _compute_attn_mask(self, query_indices, key_indices, attention_mask, query_key_dots_shape, do_standard_self_attention):
if attention_mask is not None:
attention_mask = attention_mask.to(torch.bool)[:, None, :]
if not do_standard_self_attention:
attention_mask = self._split_seq_length_dim_to(attention_mask, -1, self.chunk_length, 1)
attention_mask = self._look_adjacent(attention_mask, self.num_chunks_before, self.num_chunks_after)
attention_mask = attention_mask.unsqueeze(-2).expand(query_key_dots_shape)
if self.is_decoder is True:
causal_mask = torch.ge(query_indices.unsqueeze(-1), key_indices.unsqueeze(-2)).to(query_indices.device)
if attention_mask is not None:
attention_mask = causal_mask * attention_mask
else:
attention_mask = causal_mask
return attention_mask
@staticmethod
def _retrieve_relevant_hidden_states(previous_hidden_states, chunk_length, num_chunks_before):
start_position = (previous_hidden_states.shape[1] // chunk_length - num_chunks_before) * chunk_length
return previous_hidden_states[:, start_position:]
|
class LocalSelfAttention(nn.Module, EfficientAttentionMixin):
def __init__(self, config, layer_idx=None):
pass
def forward(self, hidden_states, attention_mask=None, head_mask=None, past_buckets_states=None, use_cache=False, output_attentions=False, **kwargs):
pass
def _compute_attn_mask(self, query_indices, key_indices, attention_mask, query_key_dots_shape, do_standard_self_attention):
pass
@staticmethod
def _retrieve_relevant_hidden_states(previous_hidden_states, chunk_length, num_chunks_before):
pass
| 6
| 0
| 54
| 10
| 37
| 8
| 4
| 0.21
| 2
| 2
| 0
| 0
| 3
| 13
| 4
| 18
| 222
| 41
| 150
| 48
| 133
| 31
| 93
| 36
| 88
| 9
| 1
| 2
| 16
|
4,842
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/reformer/modeling_reformer.py
|
transformers.models.reformer.modeling_reformer.PositionEmbeddings
|
from torch import nn
class PositionEmbeddings(nn.Module):
"""Constructs conventional position embeddings of shape `[max_pos_embeddings, hidden_size]`."""
def __init__(self, config):
super().__init__()
self.dropout = config.hidden_dropout_prob
self.embedding = nn.Embedding(config.max_position_embeddings, config.hidden_size)
def forward(self, position_ids):
position_embeddings = self.embedding(position_ids)
position_embeddings = nn.functional.dropout(position_embeddings, p=self.dropout, training=self.training)
return position_embeddings
|
class PositionEmbeddings(nn.Module):
'''Constructs conventional position embeddings of shape `[max_pos_embeddings, hidden_size]`.'''
def __init__(self, config):
pass
def forward(self, position_ids):
pass
| 3
| 1
| 4
| 0
| 4
| 0
| 1
| 0.11
| 1
| 1
| 0
| 0
| 2
| 2
| 2
| 12
| 12
| 2
| 9
| 6
| 6
| 1
| 9
| 6
| 6
| 1
| 1
| 0
| 2
|
4,843
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/reformer/modeling_reformer.py
|
transformers.models.reformer.modeling_reformer.ReformerAttention
|
from torch import nn
class ReformerAttention(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.layer_id = layer_id
self.attn_layers = config.attn_layers
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
if len(set(self.attn_layers)) == 1 and self.attn_layers[0] == 'lsh':
self.self_attention = LSHSelfAttention(config, layer_idx=layer_id)
elif len(set(self.attn_layers)) == 1 and self.attn_layers[0] == 'local':
self.self_attention = LocalSelfAttention(config, layer_idx=layer_id)
elif len(set(self.attn_layers)) == 2 and set(self.attn_layers) == {'lsh', 'local'}:
if self.attn_layers[self.layer_id] == 'lsh':
self.self_attention = LSHSelfAttention(config, layer_idx=layer_id)
else:
self.self_attention = LocalSelfAttention(config, layer_idx=layer_id)
else:
raise NotImplementedError(f"Only attn layer types 'lsh' and 'local' exist, but got `config.attn_layers`: {self.attn_layers}. Select attn layer types from ['lsh', 'local'] only.")
self.output = ReformerSelfOutput(config)
def forward(self, hidden_states, attention_mask=None, head_mask=None, num_hashes=None, past_buckets_states=None, use_cache=False, orig_sequence_length=None, output_attentions=False, buckets=None, cache_position=None):
hidden_states = self.layer_norm(hidden_states)
self_attention_outputs = self.self_attention(hidden_states=hidden_states, head_mask=head_mask, attention_mask=attention_mask, num_hashes=num_hashes, past_buckets_states=past_buckets_states, use_cache=use_cache, output_attentions=output_attentions, buckets=buckets, cache_position=cache_position)
if hasattr(self_attention_outputs, 'buckets'):
buckets = self_attention_outputs.buckets
else:
buckets = None
if use_cache and past_buckets_states is not None:
states = hidden_states[:, :orig_sequence_length] if len(past_buckets_states.states_cache) <= self.layer_id else hidden_states
buckets = buckets[:, :, :, :orig_sequence_length] if len(past_buckets_states.buckets_cache) <= self.layer_id and buckets is not None and (orig_sequence_length > 1) else buckets
buckets, hidden_states = past_buckets_states.update(buckets, states[:, :orig_sequence_length], self.layer_id)
attention_output = self.output(self_attention_outputs.hidden_states)
return AttentionOutput(hidden_states=attention_output, attention_probs=self_attention_outputs.attention_probs, buckets=buckets)
|
class ReformerAttention(nn.Module):
def __init__(self, config, layer_id=0):
pass
def forward(self, hidden_states, attention_mask=None, head_mask=None, num_hashes=None, past_buckets_states=None, use_cache=False, orig_sequence_length=None, output_attentions=False, buckets=None, cache_position=None):
pass
| 3
| 0
| 44
| 5
| 36
| 4
| 6
| 0.11
| 1
| 6
| 3
| 0
| 2
| 5
| 2
| 12
| 90
| 10
| 72
| 24
| 58
| 8
| 33
| 13
| 30
| 7
| 1
| 2
| 12
|
4,844
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/reformer/modeling_reformer.py
|
transformers.models.reformer.modeling_reformer.ReformerClassificationHead
|
import torch
from torch import nn
class ReformerClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(2 * config.hidden_size, config.hidden_size)
classifier_dropout = config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
self.dropout = nn.Dropout(classifier_dropout)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, hidden_states, **kwargs):
hidden_states = hidden_states[:, 0, :]
hidden_states = self.dropout(hidden_states)
hidden_states = self.dense(hidden_states)
hidden_states = torch.tanh(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.out_proj(hidden_states)
return hidden_states
|
class ReformerClassificationHead(nn.Module):
'''Head for sentence-level classification tasks.'''
def __init__(self, config):
pass
def forward(self, hidden_states, **kwargs):
pass
| 3
| 1
| 8
| 0
| 8
| 1
| 2
| 0.12
| 1
| 1
| 0
| 0
| 2
| 3
| 2
| 12
| 20
| 2
| 17
| 7
| 14
| 2
| 15
| 7
| 12
| 2
| 1
| 0
| 3
|
4,845
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/reformer/modeling_reformer.py
|
transformers.models.reformer.modeling_reformer.ReformerEmbeddings
|
import torch
from torch import nn
class ReformerEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.max_position_embeddings = config.max_position_embeddings
self.dropout = config.hidden_dropout_prob
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = AxialPositionEmbeddings(config) if config.axial_pos_embds else PositionEmbeddings(config)
def forward(self, input_ids=None, position_ids=None, inputs_embeds=None, start_idx_pos_encodings=0):
if input_ids is not None:
input_shape = input_ids.size()
device = input_ids.device
else:
input_shape = inputs_embeds.size()[:-1]
device = inputs_embeds.device
seq_length = input_shape[1]
if position_ids is None:
position_ids = torch.arange(start_idx_pos_encodings, start_idx_pos_encodings + seq_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).expand(input_shape)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
if position_ids.shape[-1] > self.max_position_embeddings:
raise ValueError(f'Sequence Length: {position_ids.shape[-1]} has to be less or equal than config.max_position_embeddings {self.max_position_embeddings}.')
embeddings = nn.functional.dropout(inputs_embeds, p=self.dropout, training=self.training)
position_embeddings = self.position_embeddings(position_ids)
embeddings = embeddings + position_embeddings
return embeddings
|
class ReformerEmbeddings(nn.Module):
'''Construct the embeddings from word, position and token_type embeddings.'''
def __init__(self, config):
pass
def forward(self, input_ids=None, position_ids=None, inputs_embeds=None, start_idx_pos_encodings=0):
pass
| 3
| 1
| 20
| 3
| 16
| 1
| 4
| 0.09
| 1
| 4
| 2
| 0
| 2
| 4
| 2
| 12
| 44
| 8
| 33
| 12
| 30
| 3
| 25
| 12
| 22
| 5
| 1
| 1
| 7
|
4,846
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/reformer/modeling_reformer.py
|
transformers.models.reformer.modeling_reformer.ReformerEncoder
|
import torch
from torch import nn
class ReformerEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.dropout = config.hidden_dropout_prob
self.layers = nn.ModuleList([ReformerLayer(config, i) for i in range(config.num_hidden_layers)])
self.layer_norm = nn.LayerNorm(2 * config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states, attention_mask=None, head_mask=None, num_hashes=None, past_buckets_states=None, use_cache=False, orig_sequence_length=None, output_hidden_states=False, output_attentions=False):
all_hidden_states = []
all_attentions = []
if use_cache and past_buckets_states is None:
past_buckets_states = ReformerDynamicCache()
elif use_cache and isinstance(past_buckets_states, tuple):
logger.warning_once('Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. You should pass an instance of `ReformerDynamicCache` instead, e.g. `past_key_values=ReformerDynamicCache.from_legacy_cache(past_key_values)`.')
past_buckets_states = ReformerDynamicCache.from_legacy_cache(past_buckets_states)
hidden_states = torch.cat([hidden_states, hidden_states], dim=-1)
hidden_states = _ReversibleFunction.apply(hidden_states, self.layers, attention_mask, head_mask, num_hashes, all_hidden_states, all_attentions, past_buckets_states, use_cache, orig_sequence_length, output_hidden_states, output_attentions)
hidden_states = self.layer_norm(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
next_cache = past_buckets_states if use_cache else None
return ReformerEncoderOutput(hidden_states=hidden_states, all_hidden_states=all_hidden_states, all_attentions=all_attentions, past_buckets_states=next_cache)
|
class ReformerEncoder(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states, attention_mask=None, head_mask=None, num_hashes=None, past_buckets_states=None, use_cache=False, orig_sequence_length=None, output_hidden_states=False, output_attentions=False):
pass
| 3
| 0
| 29
| 3
| 22
| 4
| 2
| 0.16
| 1
| 4
| 2
| 0
| 2
| 3
| 2
| 12
| 59
| 7
| 45
| 20
| 31
| 7
| 16
| 8
| 13
| 2
| 1
| 1
| 3
|
4,847
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/reformer/modeling_reformer.py
|
transformers.models.reformer.modeling_reformer.ReformerFeedForwardDense
|
from ...activations import ACT2FN
from torch import nn
class ReformerFeedForwardDense(nn.Module):
def __init__(self, config):
super().__init__()
self.dropout = config.hidden_dropout_prob
if isinstance(config.hidden_act, str):
self.act_fn = ACT2FN[config.hidden_act]
else:
self.act_fn = config.hidden_act
self.dense = nn.Linear(config.hidden_size, config.feed_forward_size)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = self.act_fn(hidden_states)
return hidden_states
|
class ReformerFeedForwardDense(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 8
| 1
| 7
| 0
| 2
| 0
| 1
| 2
| 0
| 0
| 2
| 3
| 2
| 12
| 17
| 3
| 14
| 6
| 11
| 0
| 13
| 6
| 10
| 2
| 1
| 1
| 3
|
4,848
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/reformer/modeling_reformer.py
|
transformers.models.reformer.modeling_reformer.ReformerFeedForwardOutput
|
from torch import nn
class ReformerFeedForwardOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dropout = config.hidden_dropout_prob
self.dense = nn.Linear(config.feed_forward_size, config.hidden_size)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
return hidden_states
|
class ReformerFeedForwardOutput(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 5
| 1
| 4
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 2
| 2
| 2
| 12
| 11
| 2
| 9
| 5
| 6
| 0
| 9
| 5
| 6
| 1
| 1
| 0
| 2
|
4,849
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/reformer/modeling_reformer.py
|
transformers.models.reformer.modeling_reformer.ReformerForMaskedLM
|
import torch
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from typing import Any, Optional, Union
from ...modeling_outputs import CausalLMOutput, MaskedLMOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput
from ...utils import DUMMY_INPUTS, DUMMY_MASK, ModelOutput, auto_docstring, logging
@auto_docstring
class ReformerForMaskedLM(ReformerPreTrainedModel):
_tied_weights_keys = ['lm_head.decoder.weight', 'lm_head.decoder.bias']
def __init__(self, config):
super().__init__(config)
assert not config.is_decoder, 'If you want to use `ReformerForMaskedLM` make sure `config.is_decoder=False` for bi-directional self-attention.'
self.reformer = ReformerModel(config)
self.lm_head = ReformerOnlyLMHead(config)
self.post_init()
def get_output_embeddings(self):
return self.lm_head.decoder
def set_output_embeddings(self, new_embeddings):
self.lm_head.decoder = new_embeddings
self.lm_head.bias = new_embeddings.bias
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, num_hashes: Optional[int]=None, labels: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, MaskedLMOutput]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. During training the input_ids sequence_length has to be
a multiple of the relevant model's chunk lengths (lsh's, local's or both). During evaluation, the indices
are automatically padded to be a multiple of the chunk length.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
num_hashes (`int`, *optional*):
The number of hashing rounds that should be performed during bucketing. Setting this argument overwrites
the default defined in `config.num_hashes`.
For more information, see `num_hashes` in [`ReformerConfig`].
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked),
the loss is only computed for the tokens with labels
<Tip warning={true}>
This example uses a false checkpoint since we don't have any available pretrained model for the masked language
modeling task with the Reformer architecture.
</Tip>
Example:
```python
>>> import torch
>>> from transformers import AutoTokenizer, ReformerForMaskedLM
>>> tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-reformer")
>>> model = ReformerForMaskedLM.from_pretrained("hf-internal-testing/tiny-random-reformer")
>>> # add mask_token
>>> tokenizer.add_special_tokens({"mask_token": "[MASK]"}) # doctest: +IGNORE_RESULT
>>> inputs = tokenizer("The capital of France is [MASK].", return_tensors="pt")
>>> # resize model's embedding matrix
>>> model.resize_token_embeddings(new_num_tokens=model.config.vocab_size + 1) # doctest: +IGNORE_RESULT
>>> with torch.no_grad():
... logits = model(**inputs).logits
>>> # retrieve index of [MASK]
>>> mask_token_index = (inputs.input_ids == tokenizer.mask_token_id)[0].nonzero(as_tuple=True)[0]
>>> predicted_token_id = logits[0, mask_token_index].argmax(axis=-1)
>>> predicted_token = tokenizer.decode(predicted_token_id)
```
```python
>>> labels = tokenizer("The capital of France is Paris.", return_tensors="pt")["input_ids"]
>>> # mask labels of non-[MASK] tokens
>>> labels = torch.where(
... inputs.input_ids == tokenizer.mask_token_id, labels[:, : inputs["input_ids"].shape[-1]], -100
... )
>>> outputs = model(**inputs, labels=labels)
>>> loss = round(outputs.loss.item(), 2)
```
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
reformer_outputs = self.reformer(input_ids, position_ids=position_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, num_hashes=num_hashes, use_cache=False, output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=return_dict)
sequence_output = reformer_outputs[0]
logits = self.lm_head(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (logits,) + reformer_outputs[1:]
return (masked_lm_loss,) + output if masked_lm_loss is not None else output
return MaskedLMOutput(loss=masked_lm_loss, logits=logits, hidden_states=reformer_outputs.hidden_states, attentions=reformer_outputs.attentions)
|
@auto_docstring
class ReformerForMaskedLM(ReformerPreTrainedModel):
def __init__(self, config):
pass
def get_output_embeddings(self):
pass
def set_output_embeddings(self, new_embeddings):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, num_hashes: Optional[int]=None, labels: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, MaskedLMOutput]:
'''
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. During training the input_ids sequence_length has to be
a multiple of the relevant model's chunk lengths (lsh's, local's or both). During evaluation, the indices
are automatically padded to be a multiple of the chunk length.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
num_hashes (`int`, *optional*):
The number of hashing rounds that should be performed during bucketing. Setting this argument overwrites
the default defined in `config.num_hashes`.
For more information, see `num_hashes` in [`ReformerConfig`].
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked),
the loss is only computed for the tokens with labels
<Tip warning={true}>
This example uses a false checkpoint since we don't have any available pretrained model for the masked language
modeling task with the Reformer architecture.
</Tip>
Example:
```python
>>> import torch
>>> from transformers import AutoTokenizer, ReformerForMaskedLM
>>> tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-reformer")
>>> model = ReformerForMaskedLM.from_pretrained("hf-internal-testing/tiny-random-reformer")
>>> # add mask_token
>>> tokenizer.add_special_tokens({"mask_token": "[MASK]"}) # doctest: +IGNORE_RESULT
>>> inputs = tokenizer("The capital of France is [MASK].", return_tensors="pt")
>>> # resize model's embedding matrix
>>> model.resize_token_embeddings(new_num_tokens=model.config.vocab_size + 1) # doctest: +IGNORE_RESULT
>>> with torch.no_grad():
... logits = model(**inputs).logits
>>> # retrieve index of [MASK]
>>> mask_token_index = (inputs.input_ids == tokenizer.mask_token_id)[0].nonzero(as_tuple=True)[0]
>>> predicted_token_id = logits[0, mask_token_index].argmax(axis=-1)
>>> predicted_token = tokenizer.decode(predicted_token_id)
```
```python
>>> labels = tokenizer("The capital of France is Paris.", return_tensors="pt")["input_ids"]
>>> # mask labels of non-[MASK] tokens
>>> labels = torch.where(
... inputs.input_ids == tokenizer.mask_token_id, labels[:, : inputs["input_ids"].shape[-1]], -100
... )
>>> outputs = model(**inputs, labels=labels)
>>> loss = round(outputs.loss.item(), 2)
```
'''
pass
| 7
| 1
| 29
| 5
| 14
| 10
| 2
| 0.69
| 1
| 7
| 3
| 0
| 4
| 2
| 4
| 6
| 122
| 24
| 59
| 27
| 40
| 41
| 26
| 14
| 21
| 5
| 2
| 1
| 8
|
4,850
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/reformer/modeling_reformer.py
|
transformers.models.reformer.modeling_reformer.ReformerForQuestionAnswering
|
import torch
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from typing import Any, Optional, Union
from ...modeling_outputs import CausalLMOutput, MaskedLMOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput
from torch import nn
from ...utils import DUMMY_INPUTS, DUMMY_MASK, ModelOutput, auto_docstring, logging
@auto_docstring
class ReformerForQuestionAnswering(ReformerPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.reformer = ReformerModel(config)
self.qa_outputs = nn.Linear(2 * config.hidden_size, config.num_labels)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, num_hashes: Optional[int]=None, start_positions: Optional[torch.Tensor]=None, end_positions: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, QuestionAnsweringModelOutput]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. During training the input_ids sequence_length has to be
a multiple of the relevant model's chunk lengths (lsh's, local's or both). During evaluation, the indices
are automatically padded to be a multiple of the chunk length.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
num_hashes (`int`, *optional*):
The number of hashing rounds that should be performed during bucketing. Setting this argument overwrites
the default defined in `config.num_hashes`.
For more information, see `num_hashes` in [`ReformerConfig`].
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
reformer_outputs = self.reformer(input_ids, position_ids=position_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, num_hashes=num_hashes, use_cache=False, output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=return_dict)
sequence_output = reformer_outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + reformer_outputs[1:]
return (total_loss,) + output if total_loss is not None else output
return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=reformer_outputs.hidden_states, attentions=reformer_outputs.attentions)
|
@auto_docstring
class ReformerForQuestionAnswering(ReformerPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, num_hashes: Optional[int]=None, start_positions: Optional[torch.Tensor]=None, end_positions: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, QuestionAnsweringModelOutput]:
'''
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. During training the input_ids sequence_length has to be
a multiple of the relevant model's chunk lengths (lsh's, local's or both). During evaluation, the indices
are automatically padded to be a multiple of the chunk length.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
num_hashes (`int`, *optional*):
The number of hashing rounds that should be performed during bucketing. Setting this argument overwrites
the default defined in `config.num_hashes`.
For more information, see `num_hashes` in [`ReformerConfig`].
'''
pass
| 5
| 1
| 42
| 5
| 31
| 8
| 4
| 0.22
| 1
| 6
| 2
| 0
| 2
| 3
| 2
| 4
| 92
| 10
| 68
| 30
| 46
| 15
| 32
| 16
| 29
| 7
| 2
| 2
| 8
|
4,851
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/reformer/modeling_reformer.py
|
transformers.models.reformer.modeling_reformer.ReformerForSequenceClassification
|
import torch
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from typing import Any, Optional, Union
from ...modeling_outputs import CausalLMOutput, MaskedLMOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput
from ...utils import DUMMY_INPUTS, DUMMY_MASK, ModelOutput, auto_docstring, logging
@auto_docstring(custom_intro='\n Reformer Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n pooled output) e.g. for GLUE tasks.\n ')
class ReformerForSequenceClassification(ReformerPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.reformer = ReformerModel(config)
self.classifier = ReformerClassificationHead(config)
if config.is_decoder is True:
logger.warning('You might want to disable causal masking for sequence classification')
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, num_hashes: Optional[int]=None, labels: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, SequenceClassifierOutput]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. During training the input_ids sequence_length has to be
a multiple of the relevant model's chunk lengths (lsh's, local's or both). During evaluation, the indices
are automatically padded to be a multiple of the chunk length.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
num_hashes (`int`, *optional*):
The number of hashing rounds that should be performed during bucketing. Setting this argument overwrites
the default defined in `config.num_hashes`.
For more information, see `num_hashes` in [`ReformerConfig`].
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Example of single-label classification:
```python
>>> import torch
>>> from transformers import AutoTokenizer, ReformerForSequenceClassification
>>> tokenizer = AutoTokenizer.from_pretrained("google/reformer-crime-and-punishment")
>>> model = ReformerForSequenceClassification.from_pretrained("google/reformer-crime-and-punishment")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> with torch.no_grad():
... logits = model(**inputs).logits
>>> predicted_class_id = logits.argmax().item()
>>> label = model.config.id2label[predicted_class_id]
```
```python
>>> # To train a model on `num_labels` classes, you can pass `num_labels=num_labels` to `.from_pretrained(...)`
>>> num_labels = len(model.config.id2label)
>>> model = ReformerForSequenceClassification.from_pretrained(
... "google/reformer-crime-and-punishment", num_labels=num_labels
... )
>>> labels = torch.tensor(1)
>>> loss = model(**inputs, labels=labels).loss
```
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.reformer(input_ids, position_ids=position_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, num_hashes=num_hashes, output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=return_dict)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = 'single_label_classification'
else:
self.config.problem_type = 'multi_label_classification'
if self.config.problem_type == 'regression':
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == 'single_label_classification':
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == 'multi_label_classification':
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring(custom_intro='\n Reformer Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n pooled output) e.g. for GLUE tasks.\n ')
class ReformerForSequenceClassification(ReformerPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, num_hashes: Optional[int]=None, labels: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, SequenceClassifierOutput]:
'''
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. During training the input_ids sequence_length has to be
a multiple of the relevant model's chunk lengths (lsh's, local's or both). During evaluation, the indices
are automatically padded to be a multiple of the chunk length.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
num_hashes (`int`, *optional*):
The number of hashing rounds that should be performed during bucketing. Setting this argument overwrites
the default defined in `config.num_hashes`.
For more information, see `num_hashes` in [`ReformerConfig`].
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Example of single-label classification:
```python
>>> import torch
>>> from transformers import AutoTokenizer, ReformerForSequenceClassification
>>> tokenizer = AutoTokenizer.from_pretrained("google/reformer-crime-and-punishment")
>>> model = ReformerForSequenceClassification.from_pretrained("google/reformer-crime-and-punishment")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> with torch.no_grad():
... logits = model(**inputs).logits
>>> predicted_class_id = logits.argmax().item()
>>> label = model.config.id2label[predicted_class_id]
```
```python
>>> # To train a model on `num_labels` classes, you can pass `num_labels=num_labels` to `.from_pretrained(...)`
>>> num_labels = len(model.config.id2label)
>>> model = ReformerForSequenceClassification.from_pretrained(
... "google/reformer-crime-and-punishment", num_labels=num_labels
... )
>>> labels = torch.tensor(1)
>>> loss = model(**inputs, labels=labels).loss
```
'''
pass
| 5
| 1
| 56
| 9
| 33
| 15
| 7
| 0.42
| 1
| 7
| 3
| 0
| 2
| 4
| 2
| 4
| 116
| 18
| 69
| 26
| 52
| 29
| 35
| 13
| 32
| 12
| 2
| 3
| 14
|
4,852
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/reformer/modeling_reformer.py
|
transformers.models.reformer.modeling_reformer.ReformerLayer
|
import sys
import torch
from torch import nn
class ReformerLayer(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.attention = ReformerAttention(config, layer_id)
self.attention_seed = None
self.feed_forward_seed = None
self.feed_forward = ChunkReformerFeedForward(config)
def _init_attention_seed(self):
"""
This function sets a new seed for the attention layer to make dropout deterministic for both forward calls: 1
normal forward call and 1 forward call in backward to recalculate activations.
"""
if hasattr(torch.cuda, 'default_generators') and len(torch.cuda.default_generators) > 0:
device_idx = torch.cuda.current_device()
self.attention_seed = torch.cuda.default_generators[device_idx].seed()
else:
self.attention_seed = int(torch.seed() % sys.maxsize)
torch.manual_seed(self.attention_seed)
def _init_feed_forward_seed(self):
"""
This function sets a new seed for the feed forward layer to make dropout deterministic for both forward calls:
1 normal forward call and 1 forward call in backward to recalculate activations.
"""
if hasattr(torch.cuda, 'default_generators') and len(torch.cuda.default_generators) > 0:
device_idx = torch.cuda.current_device()
self.feed_forward_seed = torch.cuda.default_generators[device_idx].seed()
else:
self.feed_forward_seed = int(torch.seed() % sys.maxsize)
torch.manual_seed(self.feed_forward_seed)
def forward(self, prev_attn_output, hidden_states, attention_mask=None, head_mask=None, num_hashes=None, past_buckets_states=None, use_cache=False, orig_sequence_length=None, output_attentions=False):
with torch.no_grad():
if self.training:
self._init_attention_seed()
attn_outputs = self.attention(hidden_states=hidden_states, head_mask=head_mask, attention_mask=attention_mask, num_hashes=num_hashes, past_buckets_states=past_buckets_states, use_cache=use_cache, orig_sequence_length=orig_sequence_length, output_attentions=output_attentions)
attn_output = attn_outputs.hidden_states
attn_output = prev_attn_output + attn_output
del prev_attn_output
if self.training:
self._init_feed_forward_seed()
hidden_states = hidden_states + self.feed_forward(attn_output)
return ReformerOutput(attn_output=attn_output, hidden_states=hidden_states, attention_probs=attn_outputs.attention_probs, buckets=attn_outputs.buckets)
def backward_pass(self, next_attn_output, hidden_states, grad_attn_output, grad_hidden_states, attention_mask=None, head_mask=None, buckets=None):
assert self.training, 'If you want to train `ReformerModel` and its variations, make sure to use `model.train()` to put the model into training mode.'
with torch.enable_grad():
next_attn_output.requires_grad = True
torch.manual_seed(self.feed_forward_seed)
res_hidden_states = self.feed_forward(next_attn_output)
res_hidden_states.backward(grad_hidden_states, retain_graph=True)
with torch.no_grad():
hidden_states = hidden_states - res_hidden_states
del res_hidden_states
grad_attn_output = grad_attn_output + next_attn_output.grad
next_attn_output.grad = None
with torch.enable_grad():
hidden_states.requires_grad = True
torch.manual_seed(self.attention_seed)
output = self.attention(hidden_states=hidden_states, head_mask=head_mask, attention_mask=attention_mask, buckets=buckets).hidden_states
output.backward(grad_attn_output, retain_graph=True)
with torch.no_grad():
attn_output = next_attn_output - output
del output, next_attn_output
grad_hidden_states = grad_hidden_states + hidden_states.grad
hidden_states.grad = None
hidden_states = hidden_states.detach()
return ReformerBackwardOutput(attn_output=attn_output, hidden_states=hidden_states, grad_attn_output=grad_attn_output, grad_hidden_states=grad_hidden_states)
|
class ReformerLayer(nn.Module):
def __init__(self, config, layer_id=0):
pass
def _init_attention_seed(self):
'''
This function sets a new seed for the attention layer to make dropout deterministic for both forward calls: 1
normal forward call and 1 forward call in backward to recalculate activations.
'''
pass
def _init_feed_forward_seed(self):
'''
This function sets a new seed for the feed forward layer to make dropout deterministic for both forward calls:
1 normal forward call and 1 forward call in backward to recalculate activations.
'''
pass
def forward(self, prev_attn_output, hidden_states, attention_mask=None, head_mask=None, num_hashes=None, past_buckets_states=None, use_cache=False, orig_sequence_length=None, output_attentions=False):
pass
def backward_pass(self, next_attn_output, hidden_states, grad_attn_output, grad_hidden_states, attention_mask=None, head_mask=None, buckets=None):
| 6
| 2
| 32
| 4
| 21
| 8
| 2
| 0.38
| 1
| 4
| 2
| 0
| 5
| 4
| 5
| 15
| 166
| 23
| 104
| 37
| 78
| 39
| 55
| 17
| 49
| 3
| 1
| 2
| 9
|
4,853
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/reformer/modeling_reformer.py
|
transformers.models.reformer.modeling_reformer.ReformerModel
|
import torch
from typing import Any, Optional, Union
from ...utils import DUMMY_INPUTS, DUMMY_MASK, ModelOutput, auto_docstring, logging
@auto_docstring
class ReformerModel(ReformerPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
assert self.config.num_hidden_layers > 0, "`config.attn_layers` is empty. Select at least one attn layer form ['lsh', 'local']"
self.embeddings = ReformerEmbeddings(config)
self.encoder = ReformerEncoder(config)
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, num_hashes: Optional[int]=None, past_buckets_states: Optional[list[tuple[torch.Tensor]]]=None, use_cache: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, ReformerModelOutput]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. During training the input_ids sequence_length has to be
a multiple of the relevant model's chunk lengths (lsh's, local's or both). During evaluation, the indices
are automatically padded to be a multiple of the chunk length.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
num_hashes (`int`, *optional*):
The number of hashing rounds that should be performed during bucketing. Setting this argument overwrites
the default defined in `config.num_hashes`.
For more information, see `num_hashes` in [`ReformerConfig`].
past_buckets_states (`list[tuple(torch.LongTensor, torch.FloatTensor)]`, *optional*):
List of `tuple(torch.LongTensor, torch.FloatTensor` of length `config.n_layers`, with the first element
being the previous *buckets* of shape `(batch_size, num_heads, num_hashes, sequence_length)`) and the
second being the previous *hidden_states* of shape `(batch_size, sequence_length, hidden_size)`).
Contains precomputed hidden-states and buckets (only relevant for LSH Self-Attention). Can be used to speed
up sequential decoding.
"""
use_cache = use_cache if use_cache is not None else self.config.use_cache
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
elif input_ids is not None:
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
input_shape = input_ids.size()
device = input_ids.device
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
device = inputs_embeds.device
else:
raise ValueError('You have to specify either input_ids or inputs_embeds')
assert len(input_shape) == 2, f'`input_ids` have be of shape `[batch_size, sequence_length]`, but got shape: {input_shape}'
if past_buckets_states is not None:
assert not self.training, '`past_buckets_states` can only be used for inference, not for training`.'
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers, is_attention_chunked=True)
orig_sequence_length = input_shape[-1]
least_common_mult_chunk_length = _get_least_common_mult_chunk_len(self.config)
min_chunk_length = _get_min_chunk_len(self.config)
must_pad_to_match_chunk_length = input_shape[-1] % least_common_mult_chunk_length != 0 and input_shape[-1] > min_chunk_length and (past_buckets_states is None)
if must_pad_to_match_chunk_length:
padding_length = least_common_mult_chunk_length - input_shape[-1] % least_common_mult_chunk_length
if self.training is True:
raise ValueError(f'If training, sequence length {input_shape[-1]} has to be a multiple of least common multiple chunk_length {least_common_mult_chunk_length}. Please consider padding the input to a length of {input_shape[-1] + padding_length}.')
input_ids, inputs_embeds, attention_mask, position_ids, input_shape = self._pad_to_mult_of_chunk_length(input_ids, inputs_embeds=inputs_embeds, attention_mask=attention_mask, position_ids=position_ids, input_shape=input_shape, padding_length=padding_length, padded_seq_length=least_common_mult_chunk_length, device=device)
if past_buckets_states is not None:
start_idx_pos_encodings = past_buckets_states[0][1].shape[1]
else:
start_idx_pos_encodings = 0
embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, start_idx_pos_encodings=start_idx_pos_encodings)
encoder_outputs = self.encoder(hidden_states=embedding_output, head_mask=head_mask, attention_mask=attention_mask, num_hashes=num_hashes, past_buckets_states=past_buckets_states, use_cache=use_cache, orig_sequence_length=orig_sequence_length, output_hidden_states=output_hidden_states, output_attentions=output_attentions)
sequence_output = encoder_outputs.hidden_states
if must_pad_to_match_chunk_length:
sequence_output = sequence_output[:, :orig_sequence_length]
past_buckets_states = encoder_outputs.past_buckets_states if use_cache else None
hidden_states = encoder_outputs.all_hidden_states if output_hidden_states else None
attentions = encoder_outputs.all_attentions if output_attentions else None
if not return_dict:
return tuple((v for v in [sequence_output, past_buckets_states, hidden_states, attentions] if v is not None))
return ReformerModelOutput(last_hidden_state=sequence_output, past_buckets_states=past_buckets_states, hidden_states=hidden_states, attentions=attentions)
def _pad_to_mult_of_chunk_length(self, input_ids, inputs_embeds=None, attention_mask=None, position_ids=None, input_shape=None, padding_length=None, padded_seq_length=None, device=None):
logger.warning_once(f'Input ids are automatically padded from {input_shape[-1]} to {input_shape[-1] + padding_length} to be a multiple of `config.chunk_length`: {padded_seq_length}')
padded_input_ids = torch.full((input_shape[0], padding_length), self.config.pad_token_id, device=device, dtype=torch.long)
if attention_mask is not None:
pad_attention_mask = torch.zeros(input_shape[0], padding_length, device=device, dtype=attention_mask.dtype)
attention_mask = torch.cat([attention_mask, pad_attention_mask], dim=-1)
else:
attention_mask = torch.cat([torch.ones(input_shape, device=device, dtype=torch.bool), torch.zeros((input_shape[0], padding_length), device=device, dtype=torch.bool)], dim=-1)
if input_ids is not None:
input_ids = torch.cat([input_ids, padded_input_ids], dim=-1)
input_shape = input_ids.size()
if position_ids is not None:
padded_position_ids = torch.arange(input_shape[-1], padded_seq_length, dtype=torch.long, device=device)
padded_position_ids = position_ids.unsqueeze(0).expand(input_shape[0], padding_length)
position_ids = torch.cat([position_ids, padded_position_ids], dim=-1)
if inputs_embeds is not None:
padded_inputs_embeds = self.get_input_embeddings()(padded_input_ids)
inputs_embeds = torch.cat([inputs_embeds, padded_inputs_embeds], dim=-2)
input_shape = inputs_embeds.size()
return (input_ids, inputs_embeds, attention_mask, position_ids, input_shape)
|
@auto_docstring
class ReformerModel(ReformerPreTrainedModel):
def __init__(self, config):
pass
def get_input_embeddings(self):
pass
def set_input_embeddings(self, value):
pass
def _prune_heads(self, heads_to_prune):
'''
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
'''
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, num_hashes: Optional[int]=None, past_buckets_states: Optional[list[tuple[torch.Tensor]]]=None, use_cache: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, ReformerModelOutput]:
'''
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. During training the input_ids sequence_length has to be
a multiple of the relevant model's chunk lengths (lsh's, local's or both). During evaluation, the indices
are automatically padded to be a multiple of the chunk length.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
num_hashes (`int`, *optional*):
The number of hashing rounds that should be performed during bucketing. Setting this argument overwrites
the default defined in `config.num_hashes`.
For more information, see `num_hashes` in [`ReformerConfig`].
past_buckets_states (`list[tuple(torch.LongTensor, torch.FloatTensor)]`, *optional*):
List of `tuple(torch.LongTensor, torch.FloatTensor` of length `config.n_layers`, with the first element
being the previous *buckets* of shape `(batch_size, num_heads, num_hashes, sequence_length)`) and the
second being the previous *hidden_states* of shape `(batch_size, sequence_length, hidden_size)`).
Contains precomputed hidden-states and buckets (only relevant for LSH Self-Attention). Can be used to speed
up sequential decoding.
'''
pass
def _pad_to_mult_of_chunk_length(self, input_ids, inputs_embeds=None, attention_mask=None, position_ids=None, input_shape=None, padding_length=None, padded_seq_length=None, device=None):
pass
| 9
| 2
| 33
| 4
| 26
| 3
| 5
| 0.1
| 1
| 9
| 3
| 0
| 6
| 3
| 6
| 8
| 209
| 29
| 165
| 52
| 129
| 17
| 74
| 28
| 67
| 17
| 2
| 2
| 27
|
4,854
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/reformer/modeling_reformer.py
|
transformers.models.reformer.modeling_reformer.ReformerModelOutput
|
import torch
from typing import Any, Optional, Union
from ...utils import DUMMY_INPUTS, DUMMY_MASK, ModelOutput, auto_docstring, logging
from dataclasses import dataclass
@dataclass
@auto_docstring(custom_intro='\n Output type of [`ReformerModel`].\n ')
class ReformerModelOutput(ModelOutput):
"""
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_predict, hidden_size)`):
Sequence of hidden-states at the last layer of the model.
`num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict`
corresponds to `sequence_length`.
past_buckets_states (`list[tuple(torch.LongTensor, torch.FloatTensor)]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
List of `tuple(torch.LongTensor, torch.FloatTensor` of length `config.n_layers`, with the first element
being the previous *buckets* of shape `(batch_size, num_heads, num_hashes, sequence_length)`) and the
second being the previous *hidden_states* of shape `(batch_size, sequence_length, hidden_size)`).
Contains precomputed buckets and hidden-states that can be used (see `past_buckets_states` input) to speed
up sequential decoding.
"""
last_hidden_state: torch.FloatTensor
past_buckets_states: Optional[list[tuple[torch.LongTensor, torch.FloatTensor]]] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
|
@dataclass
@auto_docstring(custom_intro='\n Output type of [`ReformerModel`].\n ')
class ReformerModelOutput(ModelOutput):
'''
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_predict, hidden_size)`):
Sequence of hidden-states at the last layer of the model.
`num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict`
corresponds to `sequence_length`.
past_buckets_states (`list[tuple(torch.LongTensor, torch.FloatTensor)]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
List of `tuple(torch.LongTensor, torch.FloatTensor` of length `config.n_layers`, with the first element
being the previous *buckets* of shape `(batch_size, num_heads, num_hashes, sequence_length)`) and the
second being the previous *hidden_states* of shape `(batch_size, sequence_length, hidden_size)`).
Contains precomputed buckets and hidden-states that can be used (see `past_buckets_states` input) to speed
up sequential decoding.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 4.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 34
| 6
| 5
| 4
| 4
| 23
| 5
| 4
| 4
| 0
| 1
| 0
| 0
|
4,855
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/reformer/modeling_reformer.py
|
transformers.models.reformer.modeling_reformer.ReformerModelWithLMHead
|
import torch
from typing import Any, Optional, Union
from ...modeling_outputs import CausalLMOutput, MaskedLMOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput
from ...utils import DUMMY_INPUTS, DUMMY_MASK, ModelOutput, auto_docstring, logging
from ...generation import GenerationMixin
@auto_docstring(custom_intro='\n Reformer Model with a `language modeling` head on top.\n ')
class ReformerModelWithLMHead(ReformerPreTrainedModel, GenerationMixin):
_tied_weights_keys = ['lm_head.decoder.weight', 'lm_head.decoder.bias']
def __init__(self, config):
super().__init__(config)
assert config.is_decoder, 'If you want to use `ReformerModelWithLMHead` make sure that `is_decoder=True`.'
assert 'local' not in self.config.attn_layers or config.local_num_chunks_after == 0, f'If causal mask is enabled, make sure that `config.local_num_chunks_after` is set to 0 and not {config.local_num_chunks_after}.'
assert 'lsh' not in self.config.attn_layers or config.lsh_num_chunks_after == 0, f'If causal mask is enabled, make sure that `config.lsh_num_chunks_after` is set to 1 and not {config.lsh_num_chunks_after}.'
self.reformer = ReformerModel(config)
self.lm_head = ReformerOnlyLMHead(config)
self.post_init()
def get_output_embeddings(self):
return self.lm_head.decoder
def set_output_embeddings(self, new_embeddings):
self.lm_head.decoder = new_embeddings
self.lm_head.bias = new_embeddings.bias
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, num_hashes: Optional[int]=None, past_buckets_states: Optional[list[tuple[torch.Tensor]]]=None, use_cache: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None, labels: Optional[torch.Tensor]=None, **kwargs) -> Union[tuple, CausalLMOutput]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. During training the input_ids sequence_length has to be
a multiple of the relevant model's chunk lengths (lsh's, local's or both). During evaluation, the indices
are automatically padded to be a multiple of the chunk length.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
num_hashes (`int`, *optional*):
The number of hashing rounds that should be performed during bucketing. Setting this argument overwrites
the default defined in `config.num_hashes`.
For more information, see `num_hashes` in [`ReformerConfig`].
past_buckets_states (`list[tuple(torch.LongTensor, torch.FloatTensor)]`, *optional*):
List of `tuple(torch.LongTensor, torch.FloatTensor` of length `config.n_layers`, with the first element
being the previous *buckets* of shape `(batch_size, num_heads, num_hashes, sequence_length)`) and the
second being the previous *hidden_states* of shape `(batch_size, sequence_length, hidden_size)`).
Contains precomputed hidden-states and buckets (only relevant for LSH Self-Attention). Can be used to speed
up sequential decoding.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ...,
config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for
labels in `[0, ..., config.vocab_size]`
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
reformer_outputs = self.reformer(input_ids, position_ids=position_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, num_hashes=num_hashes, past_buckets_states=past_buckets_states, use_cache=use_cache, output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=return_dict)
sequence_output = reformer_outputs[0]
logits = self.lm_head(sequence_output)
loss = None
if labels is not None:
loss = self.loss_function(logits, labels, vocab_size=self.config.vocab_size, **kwargs)
if not return_dict:
output = (logits,) + reformer_outputs[1:]
return (loss,) + output if loss is not None else output
return ReformerModelWithLMHeadOutput(loss=loss, logits=logits, past_buckets_states=reformer_outputs.past_buckets_states, hidden_states=reformer_outputs.hidden_states, attentions=reformer_outputs.attentions)
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, use_cache=None, num_hashes=None, **kwargs):
if past_key_values is not None:
input_ids = input_ids[:, -1:]
model_inputs = {'input_ids': input_ids, 'past_buckets_states': past_key_values, 'use_cache': use_cache, 'num_hashes': num_hashes}
kwargs.pop('attention_mask', None)
for key, value in kwargs.items():
if key not in model_inputs:
print(f'Warning: {key} is not a recognized input.')
model_inputs[key] = value
return model_inputs
def _reorder_cache(self, past_key_values, beam_idx):
reord_past_buckets_states = []
for buckets, hidden_states in past_key_values:
if buckets is not None and buckets.numel() > 0:
reord_buckets = buckets.index_select(0, beam_idx.to(buckets.device))
else:
reord_buckets = None
reord_hidden_states = hidden_states.index_select(0, beam_idx.to(hidden_states.device))
reord_past_buckets_states.append((reord_buckets, reord_hidden_states))
if isinstance(past_key_values, ReformerDynamicCache):
reord_past_buckets_states = ReformerDynamicCache.from_legacy_cache(reord_past_buckets_states)
return reord_past_buckets_states
|
@auto_docstring(custom_intro='\n Reformer Model with a `language modeling` head on top.\n ')
class ReformerModelWithLMHead(ReformerPreTrainedModel, GenerationMixin):
def __init__(self, config):
pass
def get_output_embeddings(self):
pass
def set_output_embeddings(self, new_embeddings):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, num_hashes: Optional[int]=None, past_buckets_states: Optional[list[tuple[torch.Tensor]]]=None, use_cache: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None, labels: Optional[torch.Tensor]=None, **kwargs) -> Union[tuple, CausalLMOutput]:
'''
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. During training the input_ids sequence_length has to be
a multiple of the relevant model's chunk lengths (lsh's, local's or both). During evaluation, the indices
are automatically padded to be a multiple of the chunk length.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
num_hashes (`int`, *optional*):
The number of hashing rounds that should be performed during bucketing. Setting this argument overwrites
the default defined in `config.num_hashes`.
For more information, see `num_hashes` in [`ReformerConfig`].
past_buckets_states (`list[tuple(torch.LongTensor, torch.FloatTensor)]`, *optional*):
List of `tuple(torch.LongTensor, torch.FloatTensor` of length `config.n_layers`, with the first element
being the previous *buckets* of shape `(batch_size, num_heads, num_hashes, sequence_length)`) and the
second being the previous *hidden_states* of shape `(batch_size, sequence_length, hidden_size)`).
Contains precomputed hidden-states and buckets (only relevant for LSH Self-Attention). Can be used to speed
up sequential decoding.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ...,
config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for
labels in `[0, ..., config.vocab_size]`
'''
pass
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, use_cache=None, num_hashes=None, **kwargs):
pass
def _reorder_cache(self, past_key_values, beam_idx):
pass
| 9
| 1
| 19
| 2
| 15
| 2
| 2
| 0.11
| 2
| 8
| 4
| 0
| 6
| 2
| 6
| 8
| 127
| 17
| 99
| 38
| 69
| 11
| 41
| 20
| 34
| 5
| 2
| 2
| 13
|
4,856
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/reformer/modeling_reformer.py
|
transformers.models.reformer.modeling_reformer.ReformerModelWithLMHeadOutput
|
import torch
from typing import Any, Optional, Union
from ...utils import DUMMY_INPUTS, DUMMY_MASK, ModelOutput, auto_docstring, logging
from dataclasses import dataclass
@dataclass
@auto_docstring(custom_intro='\n Output type of [`ReformerModelWithLMHead`].\n ')
class ReformerModelWithLMHeadOutput(ModelOutput):
"""
loss (`torch.FloatTensor` of shape *(1,)*, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logits (`torch.FloatTensor` of shape `(batch_size, num_predict, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
`num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict`
corresponds to `sequence_length`.
past_buckets_states (`list[tuple(torch.LongTensor, torch.FloatTensor)]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
List of `tuple(torch.LongTensor, torch.FloatTensor` of length `config.n_layers`, with the first element
being the previous *buckets* of shape `(batch_size, num_heads, num_hashes, sequence_length)`) and the
second being the previous *hidden_states* of shape `(batch_size, sequence_length, hidden_size)`).
Contains precomputed buckets and hidden-states that can be used (see `past_buckets_states` input) to speed
up sequential decoding.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
past_buckets_states: Optional[list[tuple[torch.LongTensor, torch.FloatTensor]]] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
|
@dataclass
@auto_docstring(custom_intro='\n Output type of [`ReformerModelWithLMHead`].\n ')
class ReformerModelWithLMHeadOutput(ModelOutput):
'''
loss (`torch.FloatTensor` of shape *(1,)*, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logits (`torch.FloatTensor` of shape `(batch_size, num_predict, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
`num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict`
corresponds to `sequence_length`.
past_buckets_states (`list[tuple(torch.LongTensor, torch.FloatTensor)]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
List of `tuple(torch.LongTensor, torch.FloatTensor` of length `config.n_layers`, with the first element
being the previous *buckets* of shape `(batch_size, num_heads, num_hashes, sequence_length)`) and the
second being the previous *hidden_states* of shape `(batch_size, sequence_length, hidden_size)`).
Contains precomputed buckets and hidden-states that can be used (see `past_buckets_states` input) to speed
up sequential decoding.
'''
pass
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 4.17
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 37
| 6
| 6
| 6
| 5
| 25
| 6
| 6
| 5
| 0
| 1
| 0
| 0
|
4,857
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/reformer/modeling_reformer.py
|
transformers.models.reformer.modeling_reformer.ReformerOnlyLMHead
|
import torch
from ...pytorch_utils import apply_chunking_to_forward
from torch import nn
class ReformerOnlyLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.seq_len_dim = 1
self.chunk_size_lm_head = config.chunk_size_lm_head
self.decoder = nn.Linear(2 * config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
self.decoder.bias = self.bias
def forward(self, hidden_states):
return apply_chunking_to_forward(self.forward_chunk, self.chunk_size_lm_head, self.seq_len_dim, hidden_states)
def forward_chunk(self, hidden_states):
hidden_states = self.decoder(hidden_states)
return hidden_states
def _tie_weights(self) -> None:
if self.decoder.bias.device.type == 'meta':
self.decoder.bias = self.bias
else:
self.bias = self.decoder.bias
|
class ReformerOnlyLMHead(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states):
pass
def forward_chunk(self, hidden_states):
pass
def _tie_weights(self) -> None:
pass
| 5
| 0
| 5
| 0
| 4
| 1
| 1
| 0.22
| 1
| 1
| 0
| 0
| 4
| 4
| 4
| 14
| 25
| 3
| 18
| 9
| 13
| 4
| 17
| 9
| 12
| 2
| 1
| 1
| 5
|
4,858
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/reformer/modeling_reformer.py
|
transformers.models.reformer.modeling_reformer.ReformerPreTrainedModel
|
import torch
from .configuration_reformer import ReformerConfig
from ...modeling_utils import PreTrainedModel
from torch import nn
from ...utils import DUMMY_INPUTS, DUMMY_MASK, ModelOutput, auto_docstring, logging
@auto_docstring
class ReformerPreTrainedModel(PreTrainedModel):
config: ReformerConfig
base_model_prefix = 'reformer'
@property
def dummy_inputs(self):
input_ids = torch.tensor(DUMMY_INPUTS)
input_mask = torch.tensor(DUMMY_MASK)
dummy_inputs = {'input_ids': input_ids, 'attention_mask': input_mask}
return dummy_inputs
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, AxialPositionEmbeddings):
for weight in module.weights:
nn.init.normal_(weight, std=self.config.axial_norm_std)
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
|
@auto_docstring
class ReformerPreTrainedModel(PreTrainedModel):
@property
def dummy_inputs(self):
pass
def _init_weights(self, module):
'''Initialize the weights'''
pass
| 5
| 1
| 13
| 0
| 12
| 2
| 5
| 0.26
| 1
| 1
| 1
| 5
| 2
| 0
| 2
| 2
| 37
| 3
| 27
| 10
| 23
| 7
| 20
| 9
| 17
| 8
| 1
| 2
| 9
|
4,859
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/reformer/modeling_reformer.py
|
transformers.models.reformer.modeling_reformer.ReformerSelfOutput
|
from torch import nn
class ReformerSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
all_head_size = config.num_attention_heads * config.attention_head_size
self.dropout = config.hidden_dropout_prob
self.dense = nn.Linear(all_head_size, config.hidden_size, bias=False)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
return hidden_states
|
class ReformerSelfOutput(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states):
pass
| 3
| 0
| 5
| 1
| 5
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 2
| 2
| 2
| 12
| 12
| 2
| 10
| 6
| 7
| 0
| 10
| 6
| 7
| 1
| 1
| 0
| 2
|
4,860
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/reformer/modeling_reformer.py
|
transformers.models.reformer.modeling_reformer.ReverseSort
|
import torch
from torch.autograd.function import Function
class ReverseSort(Function):
"""
After chunked attention is applied which sorted clusters, original ordering has to be restored. Since customized
backward function is used for Reformer, the gradients of the output vectors have to be explicitly sorted here.
"""
@staticmethod
def forward(ctx, out_vectors, logits, sorted_bucket_idx, undo_sorted_bucket_idx):
with torch.no_grad():
ctx.sorted_bucket_idx = sorted_bucket_idx
expanded_undo_sort_indices = undo_sorted_bucket_idx.unsqueeze(-1).expand(out_vectors.shape)
out_vectors = torch.gather(out_vectors, 2, expanded_undo_sort_indices)
logits = torch.gather(logits, 2, undo_sorted_bucket_idx)
return (out_vectors, logits)
@staticmethod
def backward(ctx, grad_out_vectors, grad_logits):
sorted_bucket_idx = ctx.sorted_bucket_idx
expanded_sort_indices = sorted_bucket_idx.unsqueeze(-1).expand(grad_out_vectors.shape)
grad_out_vectors = torch.gather(grad_out_vectors, 2, expanded_sort_indices)
grad_logits = torch.gather(grad_logits, 2, sorted_bucket_idx)
return (grad_out_vectors, grad_logits, None, None)
|
class ReverseSort(Function):
'''
After chunked attention is applied which sorted clusters, original ordering has to be restored. Since customized
backward function is used for Reformer, the gradients of the output vectors have to be explicitly sorted here.
'''
@staticmethod
def forward(ctx, out_vectors, logits, sorted_bucket_idx, undo_sorted_bucket_idx):
pass
@staticmethod
def backward(ctx, grad_out_vectors, grad_logits):
pass
| 5
| 1
| 11
| 2
| 7
| 3
| 1
| 0.56
| 1
| 0
| 0
| 0
| 0
| 0
| 2
| 32
| 30
| 5
| 16
| 8
| 11
| 9
| 14
| 6
| 11
| 1
| 5
| 1
| 2
|
4,861
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/reformer/modeling_reformer.py
|
transformers.models.reformer.modeling_reformer._ReversibleFunction
|
import torch
from torch.autograd.function import Function
class _ReversibleFunction(Function):
"""
To prevent PyTorch from performing the usual backpropagation, a customized backward function is implemented here.
This way it is made sure that no memory expensive activations are saved during the forward pass. This function is
heavily inspired by https://github.com/lucidrains/reformer-pytorch/blob/master/reformer_pytorch/reversible.py
"""
@staticmethod
def forward(ctx, hidden_states, layers, attention_mask, head_mask, num_hashes, all_hidden_states, all_attentions, past_buckets_states, use_cache, orig_sequence_length, output_hidden_states, output_attentions):
all_buckets = ()
hidden_states, attn_output = torch.chunk(hidden_states, 2, dim=-1)
for layer_id, (layer, layer_head_mask) in enumerate(zip(layers, head_mask)):
if output_hidden_states is True:
all_hidden_states.append(hidden_states)
layer_outputs = layer(prev_attn_output=attn_output, hidden_states=hidden_states, attention_mask=attention_mask, head_mask=layer_head_mask, num_hashes=num_hashes, past_buckets_states=past_buckets_states, use_cache=use_cache, orig_sequence_length=orig_sequence_length, output_attentions=output_attentions)
attn_output = layer_outputs.attn_output
hidden_states = layer_outputs.hidden_states
all_buckets = all_buckets + (layer_outputs.buckets,)
if output_attentions:
all_attentions.append(layer_outputs.attention_probs)
if output_hidden_states is True:
all_hidden_states.append(hidden_states)
ctx.save_for_backward(attn_output.detach(), hidden_states.detach())
ctx.layers = layers
ctx.all_buckets = all_buckets
ctx.head_mask = head_mask
ctx.attention_mask = attention_mask
return torch.cat([attn_output, hidden_states], dim=-1)
@staticmethod
def backward(ctx, grad_hidden_states):
grad_attn_output, grad_hidden_states = torch.chunk(grad_hidden_states, 2, dim=-1)
attn_output, hidden_states = ctx.saved_tensors
output = ReformerBackwardOutput(attn_output=attn_output, hidden_states=hidden_states, grad_attn_output=grad_attn_output, grad_hidden_states=grad_hidden_states)
del grad_attn_output, grad_hidden_states, attn_output, hidden_states
layers = ctx.layers
all_buckets = ctx.all_buckets
head_mask = ctx.head_mask
attention_mask = ctx.attention_mask
for idx, layer in enumerate(layers[::-1]):
buckets = all_buckets[-1]
all_buckets = all_buckets[:-1]
output = layer.backward_pass(next_attn_output=output.attn_output, hidden_states=output.hidden_states, grad_attn_output=output.grad_attn_output, grad_hidden_states=output.grad_hidden_states, head_mask=head_mask[len(layers) - idx - 1], attention_mask=attention_mask, buckets=buckets)
assert all_buckets == (), 'buckets have to be empty after backpropagation'
grad_hidden_states = torch.cat([output.grad_attn_output, output.grad_hidden_states], dim=-1)
return (grad_hidden_states, None, None, None, None, None, None, None, None, None, None, None)
|
class _ReversibleFunction(Function):
'''
To prevent PyTorch from performing the usual backpropagation, a customized backward function is implemented here.
This way it is made sure that no memory expensive activations are saved during the forward pass. This function is
heavily inspired by https://github.com/lucidrains/reformer-pytorch/blob/master/reformer_pytorch/reversible.py
'''
@staticmethod
def forward(ctx, hidden_states, layers, attention_mask, head_mask, num_hashes, all_hidden_states, all_attentions, past_buckets_states, use_cache, orig_sequence_length, output_hidden_states, output_attentions):
pass
@staticmethod
def backward(ctx, grad_hidden_states):
pass
| 5
| 1
| 50
| 8
| 37
| 6
| 4
| 0.21
| 1
| 2
| 0
| 0
| 0
| 0
| 2
| 32
| 110
| 18
| 76
| 32
| 57
| 16
| 37
| 16
| 34
| 5
| 5
| 2
| 7
|
4,862
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/reformer/tokenization_reformer.py
|
transformers.models.reformer.tokenization_reformer.ReformerTokenizer
|
from ...tokenization_utils import PreTrainedTokenizer
from shutil import copyfile
import sentencepiece as spm
from ...utils.import_utils import requires
import os
from typing import Any, Optional
@requires(backends=('sentencepiece',))
class ReformerTokenizer(PreTrainedTokenizer):
"""
Construct a Reformer tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece) .
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
contains the vocabulary necessary to instantiate a tokenizer.
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
additional_special_tokens (`list[str]`, *optional*, defaults to `[]`):
Additional special tokens used by the tokenizer.
sp_model_kwargs (`dict`, *optional*):
Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
to set:
- `enable_sampling`: Enable subword regularization.
- `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
- `nbest_size = {0,1}`: No sampling is performed.
- `nbest_size > 1`: samples from the nbest_size results.
- `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
using forward-filtering-and-backward-sampling algorithm.
- `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
BPE-dropout.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ['input_ids', 'attention_mask']
def __init__(self, vocab_file, eos_token='</s>', unk_token='<unk>', additional_special_tokens=[], sp_model_kwargs: Optional[dict[str, Any]]=None, **kwargs) -> None:
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
self.vocab_file = vocab_file
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(vocab_file)
super().__init__(eos_token=eos_token, unk_token=unk_token, additional_special_tokens=additional_special_tokens, sp_model_kwargs=self.sp_model_kwargs, **kwargs)
@property
def vocab_size(self):
return self.sp_model.get_piece_size()
def get_vocab(self) -> dict[str, int]:
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__(self):
state = self.__dict__.copy()
state['sp_model'] = None
return state
def __setstate__(self, d):
self.__dict__ = d
if not hasattr(self, 'sp_model_kwargs'):
self.sp_model_kwargs = {}
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def _tokenize(self, text: str) -> list[str]:
"""Take as input a string and return a list of strings (tokens) for words/sub-words"""
return self.sp_model.encode(text, out_type=str)
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.sp_model.piece_to_id(token)
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
if index < self.sp_model.get_piece_size():
token = self.sp_model.IdToPiece(index)
return token
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
current_sub_tokens = []
out_string = ''
for token in tokens:
if token in self.all_special_tokens:
out_string += self.sp_model.decode(current_sub_tokens) + token
current_sub_tokens = []
else:
current_sub_tokens.append(token)
out_string += self.sp_model.decode(current_sub_tokens)
return out_string.strip()
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file, out_vocab_file)
elif not os.path.isfile(self.vocab_file):
with open(out_vocab_file, 'wb') as fi:
content_spiece_model = self.sp_model.serialized_model_proto()
fi.write(content_spiece_model)
return (out_vocab_file,)
|
@requires(backends=('sentencepiece',))
class ReformerTokenizer(PreTrainedTokenizer):
'''
Construct a Reformer tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece) .
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
contains the vocabulary necessary to instantiate a tokenizer.
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
additional_special_tokens (`list[str]`, *optional*, defaults to `[]`):
Additional special tokens used by the tokenizer.
sp_model_kwargs (`dict`, *optional*):
Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
to set:
- `enable_sampling`: Enable subword regularization.
- `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
- `nbest_size = {0,1}`: No sampling is performed.
- `nbest_size > 1`: samples from the nbest_size results.
- `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
using forward-filtering-and-backward-sampling algorithm.
- `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
BPE-dropout.
'''
def __init__(self, vocab_file, eos_token='</s>', unk_token='<unk>', additional_special_tokens=[], sp_model_kwargs: Optional[dict[str, Any]]=None, **kwargs) -> None:
pass
@property
def vocab_size(self):
pass
def get_vocab(self) -> dict[str, int]:
pass
def __getstate__(self):
pass
def __setstate__(self, d):
pass
def _tokenize(self, text: str) -> list[str]:
'''Take as input a string and return a list of strings (tokens) for words/sub-words'''
pass
def _convert_token_to_id(self, token):
'''Converts a token (str) in an id using the vocab.'''
pass
def _convert_id_to_token(self, index):
'''Converts an index (integer) in a token (str) using the vocab.'''
pass
def convert_tokens_to_string(self, tokens):
'''Converts a sequence of tokens (string) in a single string.'''
pass
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
pass
| 13
| 5
| 8
| 1
| 7
| 1
| 2
| 0.52
| 1
| 5
| 0
| 0
| 10
| 4
| 10
| 99
| 137
| 26
| 73
| 35
| 53
| 38
| 54
| 25
| 43
| 5
| 3
| 2
| 19
|
4,863
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/reformer/tokenization_reformer_fast.py
|
transformers.models.reformer.tokenization_reformer_fast.ReformerTokenizerFast
|
from typing import Optional
import os
from shutil import copyfile
from ...tokenization_utils_fast import PreTrainedTokenizerFast
class ReformerTokenizerFast(PreTrainedTokenizerFast):
"""
Construct a "fast" Reformer tokenizer (backed by HuggingFace's *tokenizers* library). Based on
[Unigram](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models).
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
contains the vocabulary necessary to instantiate a tokenizer.
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
additional_special_tokens (`list[str]`, *optional*):
Additional special tokens used by the tokenizer.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ['input_ids', 'attention_mask']
slow_tokenizer_class = ReformerTokenizer
def __init__(self, vocab_file=None, tokenizer_file=None, eos_token='</s>', unk_token='<unk>', additional_special_tokens=[], **kwargs):
super().__init__(vocab_file, tokenizer_file=tokenizer_file, eos_token=eos_token, unk_token=unk_token, additional_special_tokens=additional_special_tokens, **kwargs)
self.vocab_file = vocab_file
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError('Your fast tokenizer does not have the necessary information to save the vocabulary for a slow tokenizer.')
if not os.path.isdir(save_directory):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
copyfile(self.vocab_file, out_vocab_file)
return (out_vocab_file,)
|
class ReformerTokenizerFast(PreTrainedTokenizerFast):
'''
Construct a "fast" Reformer tokenizer (backed by HuggingFace's *tokenizers* library). Based on
[Unigram](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models).
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
contains the vocabulary necessary to instantiate a tokenizer.
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
additional_special_tokens (`list[str]`, *optional*):
Additional special tokens used by the tokenizer.
'''
def __init__(self, vocab_file=None, tokenizer_file=None, eos_token='</s>', unk_token='<unk>', additional_special_tokens=[], **kwargs):
pass
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> tuple[str]:
pass
| 3
| 1
| 13
| 1
| 12
| 0
| 3
| 0.57
| 1
| 4
| 0
| 0
| 3
| 1
| 3
| 91
| 77
| 14
| 40
| 18
| 27
| 23
| 19
| 9
| 15
| 5
| 3
| 1
| 8
|
4,864
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/regnet/configuration_regnet.py
|
transformers.models.regnet.configuration_regnet.RegNetConfig
|
from ...configuration_utils import PretrainedConfig
class RegNetConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`RegNetModel`]. It is used to instantiate a RegNet
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the RegNet
[facebook/regnet-y-040](https://huggingface.co/facebook/regnet-y-040) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
embedding_size (`int`, *optional*, defaults to 64):
Dimensionality (hidden size) for the embedding layer.
hidden_sizes (`list[int]`, *optional*, defaults to `[256, 512, 1024, 2048]`):
Dimensionality (hidden size) at each stage.
depths (`list[int]`, *optional*, defaults to `[3, 4, 6, 3]`):
Depth (number of layers) for each stage.
layer_type (`str`, *optional*, defaults to `"y"`):
The layer to use, it can be either `"x" or `"y"`. An `x` layer is a ResNet's BottleNeck layer with
`reduction` fixed to `1`. While a `y` layer is a `x` but with squeeze and excitation. Please refer to the
paper for a detailed explanation of how these layers were constructed.
hidden_act (`str`, *optional*, defaults to `"relu"`):
The non-linear activation function in each block. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"`
are supported.
downsample_in_first_stage (`bool`, *optional*, defaults to `False`):
If `True`, the first stage will downsample the inputs using a `stride` of 2.
Example:
```python
>>> from transformers import RegNetConfig, RegNetModel
>>> # Initializing a RegNet regnet-y-40 style configuration
>>> configuration = RegNetConfig()
>>> # Initializing a model from the regnet-y-40 style configuration
>>> model = RegNetModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = 'regnet'
layer_types = ['x', 'y']
def __init__(self, num_channels=3, embedding_size=32, hidden_sizes=[128, 192, 512, 1088], depths=[2, 6, 12, 2], groups_width=64, layer_type='y', hidden_act='relu', **kwargs):
super().__init__(**kwargs)
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types)}")
self.num_channels = num_channels
self.embedding_size = embedding_size
self.hidden_sizes = hidden_sizes
self.depths = depths
self.groups_width = groups_width
self.layer_type = layer_type
self.hidden_act = hidden_act
self.downsample_in_first_stage = True
|
class RegNetConfig(PretrainedConfig):
'''
This is the configuration class to store the configuration of a [`RegNetModel`]. It is used to instantiate a RegNet
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the RegNet
[facebook/regnet-y-040](https://huggingface.co/facebook/regnet-y-040) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
embedding_size (`int`, *optional*, defaults to 64):
Dimensionality (hidden size) for the embedding layer.
hidden_sizes (`list[int]`, *optional*, defaults to `[256, 512, 1024, 2048]`):
Dimensionality (hidden size) at each stage.
depths (`list[int]`, *optional*, defaults to `[3, 4, 6, 3]`):
Depth (number of layers) for each stage.
layer_type (`str`, *optional*, defaults to `"y"`):
The layer to use, it can be either `"x" or `"y"`. An `x` layer is a ResNet's BottleNeck layer with
`reduction` fixed to `1`. While a `y` layer is a `x` but with squeeze and excitation. Please refer to the
paper for a detailed explanation of how these layers were constructed.
hidden_act (`str`, *optional*, defaults to `"relu"`):
The non-linear activation function in each block. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"`
are supported.
downsample_in_first_stage (`bool`, *optional*, defaults to `False`):
If `True`, the first stage will downsample the inputs using a `stride` of 2.
Example:
```python
>>> from transformers import RegNetConfig, RegNetModel
>>> # Initializing a RegNet regnet-y-40 style configuration
>>> configuration = RegNetConfig()
>>> # Initializing a model from the regnet-y-40 style configuration
>>> model = RegNetModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
'''
def __init__(self, num_channels=3, embedding_size=32, hidden_sizes=[128, 192, 512, 1088], depths=[2, 6, 12, 2], groups_width=64, layer_type='y', hidden_act='relu', **kwargs):
pass
| 2
| 1
| 23
| 0
| 22
| 1
| 2
| 1.48
| 1
| 2
| 0
| 0
| 1
| 8
| 1
| 1
| 68
| 6
| 25
| 22
| 13
| 37
| 15
| 12
| 13
| 2
| 1
| 1
| 2
|
4,865
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/regnet/convert_regnet_seer_10b_to_pytorch.py
|
transformers.models.regnet.convert_regnet_seer_10b_to_pytorch.FakeRegNetParams
|
from classy_vision.models.regnet import RegNet, RegNetParams
class FakeRegNetParams(RegNetParams):
"""
Used to instantiace a RegNet model from classy vision with the same depth as the 10B one but with super small
parameters, so we can trace it in memory.
"""
def get_expanded_params(self):
return [(8, 2, 2, 8, 1.0), (8, 2, 7, 8, 1.0), (8, 2, 17, 8, 1.0), (8, 2, 1, 8, 1.0)]
|
class FakeRegNetParams(RegNetParams):
'''
Used to instantiace a RegNet model from classy vision with the same depth as the 10B one but with super small
parameters, so we can trace it in memory.
'''
def get_expanded_params(self):
pass
| 2
| 1
| 2
| 0
| 2
| 0
| 1
| 1.33
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 8
| 1
| 3
| 2
| 1
| 4
| 3
| 2
| 1
| 1
| 1
| 0
| 1
|
4,866
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/regnet/convert_regnet_seer_10b_to_pytorch.py
|
transformers.models.regnet.convert_regnet_seer_10b_to_pytorch.FakeRegNetVisslWrapper
|
import torch.nn as nn
from vissl.models.model_helpers import get_trunk_forward_outputs
from torch import Tensor
class FakeRegNetVisslWrapper(nn.Module):
"""
Fake wrapper for RegNet that mimics what vissl does without the need to pass a config file.
"""
def __init__(self, model: nn.Module):
super().__init__()
feature_blocks: list[tuple[str, nn.Module]] = []
feature_blocks.append(('conv1', model.stem))
for k, v in model.trunk_output.named_children():
assert k.startswith('block'), f'Unexpected layer name {k}'
block_index = len(feature_blocks) + 1
feature_blocks.append((f'res{block_index}', v))
self._feature_blocks = nn.ModuleDict(feature_blocks)
def forward(self, x: Tensor):
return get_trunk_forward_outputs(x, out_feat_keys=None, feature_blocks=self._feature_blocks)
|
class FakeRegNetVisslWrapper(nn.Module):
'''
Fake wrapper for RegNet that mimics what vissl does without the need to pass a config file.
'''
def __init__(self, model: nn.Module):
pass
def forward(self, x: Tensor):
pass
| 3
| 1
| 10
| 1
| 8
| 1
| 2
| 0.31
| 1
| 3
| 0
| 0
| 2
| 1
| 2
| 12
| 25
| 4
| 16
| 7
| 13
| 5
| 12
| 7
| 9
| 2
| 1
| 1
| 3
|
4,867
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/regnet/convert_regnet_seer_10b_to_pytorch.py
|
transformers.models.regnet.convert_regnet_seer_10b_to_pytorch.Tracker
|
import torch.nn as nn
from collections import OrderedDict
from torch import Tensor
from functools import partial
from dataclasses import dataclass, field
@dataclass
class Tracker:
module: nn.Module
traced: list[nn.Module] = field(default_factory=list)
handles: list = field(default_factory=list)
name2module: dict[str, nn.Module] = field(default_factory=OrderedDict)
def _forward_hook(self, m, inputs: Tensor, outputs: Tensor, name: str):
has_not_submodules = len(list(m.modules())) == 1 or isinstance(m, (nn.Conv2d, nn.BatchNorm2d))
if has_not_submodules:
self.traced.append(m)
self.name2module[name] = m
def __call__(self, x: Tensor):
for name, m in self.module.named_modules():
self.handles.append(m.register_forward_hook(partial(self._forward_hook, name=name)))
self.module(x)
[x.remove() for x in self.handles]
return self
@property
def parametrized(self):
return {k: v for k, v in self.name2module.items() if len(list(v.state_dict().keys())) > 0}
|
@dataclass
class Tracker:
def _forward_hook(self, m, inputs: Tensor, outputs: Tensor, name: str):
pass
def __call__(self, x: Tensor):
pass
@property
def parametrized(self):
pass
| 6
| 0
| 5
| 0
| 4
| 0
| 2
| 0.05
| 0
| 4
| 0
| 0
| 3
| 0
| 3
| 3
| 23
| 3
| 19
| 10
| 14
| 1
| 18
| 9
| 14
| 2
| 0
| 1
| 5
|
4,868
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/regnet/convert_regnet_to_pytorch.py
|
transformers.models.regnet.convert_regnet_to_pytorch.FakeRegNetVisslWrapper
|
from vissl.models.model_helpers import get_trunk_forward_outputs
from torch import Tensor
import torch.nn as nn
class FakeRegNetVisslWrapper(nn.Module):
"""
Fake wrapper for RegNet that mimics what vissl does without the need to pass a config file.
"""
def __init__(self, model: nn.Module):
super().__init__()
feature_blocks: list[tuple[str, nn.Module]] = []
feature_blocks.append(('conv1', model.stem))
for k, v in model.trunk_output.named_children():
assert k.startswith('block'), f'Unexpected layer name {k}'
block_index = len(feature_blocks) + 1
feature_blocks.append((f'res{block_index}', v))
self._feature_blocks = nn.ModuleDict(feature_blocks)
def forward(self, x: Tensor):
return get_trunk_forward_outputs(x, out_feat_keys=None, feature_blocks=self._feature_blocks)
|
class FakeRegNetVisslWrapper(nn.Module):
'''
Fake wrapper for RegNet that mimics what vissl does without the need to pass a config file.
'''
def __init__(self, model: nn.Module):
pass
def forward(self, x: Tensor):
pass
| 3
| 1
| 10
| 1
| 8
| 1
| 2
| 0.31
| 1
| 3
| 0
| 0
| 2
| 1
| 2
| 12
| 25
| 4
| 16
| 7
| 13
| 5
| 12
| 7
| 9
| 2
| 1
| 1
| 3
|
4,869
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/regnet/convert_regnet_to_pytorch.py
|
transformers.models.regnet.convert_regnet_to_pytorch.ModuleTransfer
|
import torch.nn as nn
from torch import Tensor
from dataclasses import dataclass, field
@dataclass
class ModuleTransfer:
src: nn.Module
dest: nn.Module
verbose: int = 1
src_skip: list = field(default_factory=list)
dest_skip: list = field(default_factory=list)
raise_if_mismatch: bool = True
def __call__(self, x: Tensor):
"""
Transfer the weights of `self.src` to `self.dest` by performing a forward pass using `x` as input. Under the
hood we tracked all the operations in both modules.
"""
dest_traced = Tracker(self.dest)(x).parametrized
src_traced = Tracker(self.src)(x).parametrized
src_traced = list(filter(lambda x: type(x) not in self.src_skip, src_traced))
dest_traced = list(filter(lambda x: type(x) not in self.dest_skip, dest_traced))
if len(dest_traced) != len(src_traced) and self.raise_if_mismatch:
raise Exception(f'Numbers of operations are different. Source module has {len(src_traced)} operations while destination module has {len(dest_traced)}.')
for dest_m, src_m in zip(dest_traced, src_traced):
dest_m.load_state_dict(src_m.state_dict())
if self.verbose == 1:
print(f'Transferred from={src_m} to={dest_m}')
|
@dataclass
class ModuleTransfer:
def __call__(self, x: Tensor):
'''
Transfer the weights of `self.src` to `self.dest` by performing a forward pass using `x` as input. Under the
hood we tracked all the operations in both modules.
'''
pass
| 3
| 1
| 21
| 3
| 14
| 4
| 4
| 0.19
| 0
| 7
| 1
| 0
| 1
| 0
| 1
| 1
| 29
| 4
| 21
| 9
| 19
| 4
| 18
| 9
| 16
| 4
| 0
| 2
| 4
|
4,870
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/regnet/convert_regnet_to_pytorch.py
|
transformers.models.regnet.convert_regnet_to_pytorch.NameToFromModelFuncMap
|
from functools import partial
import torch.nn as nn
from typing import Callable, Optional
import timm
class NameToFromModelFuncMap(dict):
"""
A Dictionary with some additional logic to return a function that creates the correct original model.
"""
def convert_name_to_timm(self, x: str) -> str:
x_split = x.split('-')
return x_split[0] + x_split[1] + '_' + ''.join(x_split[2:])
def __getitem__(self, x: str) -> Callable[[], tuple[nn.Module, dict]]:
if x not in self:
x = self.convert_name_to_timm(x)
val = partial(lambda: (timm.create_model(x, pretrained=True).eval(), None))
else:
val = super().__getitem__(x)
return val
|
class NameToFromModelFuncMap(dict):
'''
A Dictionary with some additional logic to return a function that creates the correct original model.
'''
def convert_name_to_timm(self, x: str) -> str:
pass
def __getitem__(self, x: str) -> Callable[[], tuple[nn.Module, dict]]:
pass
| 3
| 1
| 7
| 1
| 5
| 1
| 2
| 0.36
| 1
| 3
| 0
| 0
| 2
| 0
| 2
| 29
| 19
| 4
| 11
| 5
| 8
| 4
| 10
| 5
| 7
| 2
| 2
| 1
| 3
|
4,871
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/regnet/convert_regnet_to_pytorch.py
|
transformers.models.regnet.convert_regnet_to_pytorch.NameToOurModelFuncMap
|
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
import torch.nn as nn
from typing import Callable, Optional
class NameToOurModelFuncMap(dict):
"""
A Dictionary with some additional logic to return the correct hugging face RegNet class reference.
"""
def __getitem__(self, x: str) -> Callable[[], nn.Module]:
if 'seer' in x and 'in1k' not in x:
val = RegNetModel
else:
val = RegNetForImageClassification
return val
|
class NameToOurModelFuncMap(dict):
'''
A Dictionary with some additional logic to return the correct hugging face RegNet class reference.
'''
def __getitem__(self, x: str) -> Callable[[], nn.Module]:
pass
| 2
| 1
| 6
| 0
| 6
| 0
| 2
| 0.43
| 1
| 3
| 0
| 0
| 1
| 0
| 1
| 28
| 11
| 1
| 7
| 3
| 5
| 3
| 6
| 3
| 4
| 2
| 2
| 1
| 2
|
4,872
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/regnet/modeling_regnet.py
|
transformers.models.regnet.modeling_regnet.RegNetConvLayer
|
from typing import Optional
from torch import Tensor, nn
from ...activations import ACT2FN
class RegNetConvLayer(nn.Module):
def __init__(self, in_channels: int, out_channels: int, kernel_size: int=3, stride: int=1, groups: int=1, activation: Optional[str]='relu'):
super().__init__()
self.convolution = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=kernel_size // 2, groups=groups, bias=False)
self.normalization = nn.BatchNorm2d(out_channels)
self.activation = ACT2FN[activation] if activation is not None else nn.Identity()
def forward(self, hidden_state):
hidden_state = self.convolution(hidden_state)
hidden_state = self.normalization(hidden_state)
hidden_state = self.activation(hidden_state)
return hidden_state
|
class RegNetConvLayer(nn.Module):
def __init__(self, in_channels: int, out_channels: int, kernel_size: int=3, stride: int=1, groups: int=1, activation: Optional[str]='relu'):
pass
def forward(self, hidden_state):
pass
| 3
| 0
| 13
| 0
| 13
| 0
| 2
| 0
| 1
| 3
| 0
| 0
| 2
| 3
| 2
| 12
| 28
| 1
| 27
| 14
| 16
| 0
| 11
| 6
| 8
| 2
| 1
| 0
| 3
|
4,873
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/regnet/modeling_regnet.py
|
transformers.models.regnet.modeling_regnet.RegNetEmbeddings
|
from .configuration_regnet import RegNetConfig
from torch import Tensor, nn
class RegNetEmbeddings(nn.Module):
"""
RegNet Embeddings (stem) composed of a single aggressive convolution.
"""
def __init__(self, config: RegNetConfig):
super().__init__()
self.embedder = RegNetConvLayer(config.num_channels, config.embedding_size, kernel_size=3, stride=2, activation=config.hidden_act)
self.num_channels = config.num_channels
def forward(self, pixel_values):
num_channels = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError('Make sure that the channel dimension of the pixel values match with the one set in the configuration.')
hidden_state = self.embedder(pixel_values)
return hidden_state
|
class RegNetEmbeddings(nn.Module):
'''
RegNet Embeddings (stem) composed of a single aggressive convolution.
'''
def __init__(self, config: RegNetConfig):
pass
def forward(self, pixel_values):
pass
| 3
| 1
| 7
| 0
| 7
| 0
| 2
| 0.2
| 1
| 4
| 2
| 0
| 2
| 2
| 2
| 12
| 20
| 2
| 15
| 7
| 12
| 3
| 11
| 7
| 8
| 2
| 1
| 1
| 3
|
4,874
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/regnet/modeling_regnet.py
|
transformers.models.regnet.modeling_regnet.RegNetEncoder
|
from ...modeling_outputs import BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from torch import Tensor, nn
from .configuration_regnet import RegNetConfig
class RegNetEncoder(nn.Module):
def __init__(self, config: RegNetConfig):
super().__init__()
self.stages = nn.ModuleList([])
self.stages.append(RegNetStage(config, config.embedding_size, config.hidden_sizes[0], stride=2 if config.downsample_in_first_stage else 1, depth=config.depths[0]))
in_out_channels = zip(config.hidden_sizes, config.hidden_sizes[1:])
for (in_channels, out_channels), depth in zip(in_out_channels, config.depths[1:]):
self.stages.append(RegNetStage(config, in_channels, out_channels, depth=depth))
def forward(self, hidden_state: Tensor, output_hidden_states: bool=False, return_dict: bool=True) -> BaseModelOutputWithNoAttention:
hidden_states = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
hidden_states = hidden_states + (hidden_state,)
hidden_state = stage_module(hidden_state)
if output_hidden_states:
hidden_states = hidden_states + (hidden_state,)
if not return_dict:
return tuple((v for v in [hidden_state, hidden_states] if v is not None))
return BaseModelOutputWithNoAttention(last_hidden_state=hidden_state, hidden_states=hidden_states)
|
class RegNetEncoder(nn.Module):
def __init__(self, config: RegNetConfig):
pass
def forward(self, hidden_state: Tensor, output_hidden_states: bool=False, return_dict: bool=True) -> BaseModelOutputWithNoAttention:
pass
| 3
| 0
| 17
| 3
| 14
| 1
| 5
| 0.03
| 1
| 8
| 3
| 0
| 2
| 1
| 2
| 12
| 36
| 6
| 29
| 10
| 24
| 1
| 19
| 8
| 16
| 6
| 1
| 2
| 9
|
4,875
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/regnet/modeling_regnet.py
|
transformers.models.regnet.modeling_regnet.RegNetForImageClassification
|
from torch import Tensor, nn
from typing import Optional
from ...modeling_outputs import BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...utils import auto_docstring, logging
import torch
@auto_docstring(custom_intro='\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ')
class RegNetForImageClassification(RegNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.regnet = RegNetModel(config)
self.classifier = nn.Sequential(nn.Flatten(), nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity())
self.post_init()
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> ImageClassifierOutputWithNoAttention:
"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.regnet(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
pooled_output = outputs.pooler_output if return_dict else outputs[1]
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
loss = self.loss_function(labels, logits, self.config)
if not return_dict:
output = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
|
@auto_docstring(custom_intro='\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ')
class RegNetForImageClassification(RegNetPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> ImageClassifierOutputWithNoAttention:
'''
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
'''
pass
| 5
| 1
| 30
| 4
| 23
| 4
| 8
| 0.13
| 1
| 5
| 2
| 0
| 2
| 3
| 2
| 3
| 68
| 8
| 53
| 19
| 37
| 7
| 32
| 12
| 29
| 13
| 2
| 3
| 15
|
4,876
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/regnet/modeling_regnet.py
|
transformers.models.regnet.modeling_regnet.RegNetModel
|
from torch import Tensor, nn
from ...utils import auto_docstring, logging
from ...modeling_outputs import BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from typing import Optional
@auto_docstring
class RegNetModel(RegNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
self.embedder = RegNetEmbeddings(config)
self.encoder = RegNetEncoder(config)
self.pooler = nn.AdaptiveAvgPool2d((1, 1))
self.post_init()
@auto_docstring
def forward(self, pixel_values: Tensor, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> BaseModelOutputWithPoolingAndNoAttention:
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
embedding_output = self.embedder(pixel_values)
encoder_outputs = self.encoder(embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict)
last_hidden_state = encoder_outputs[0]
pooled_output = self.pooler(last_hidden_state)
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states)
|
@auto_docstring
class RegNetModel(RegNetPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, pixel_values: Tensor, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> BaseModelOutputWithPoolingAndNoAttention:
pass
| 5
| 0
| 17
| 3
| 14
| 1
| 3
| 0.03
| 1
| 6
| 3
| 0
| 2
| 4
| 2
| 3
| 44
| 7
| 36
| 14
| 23
| 1
| 18
| 11
| 15
| 4
| 2
| 1
| 5
|
4,877
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/regnet/modeling_regnet.py
|
transformers.models.regnet.modeling_regnet.RegNetPreTrainedModel
|
from ...modeling_utils import PreTrainedModel
from torch import Tensor, nn
from .configuration_regnet import RegNetConfig
import math
from ...utils import auto_docstring, logging
@auto_docstring
class RegNetPreTrainedModel(PreTrainedModel):
config: RegNetConfig
base_model_prefix = 'regnet'
main_input_name = 'pixel_values'
_no_split_modules = ['RegNetYLayer']
def _init_weights(self, module):
if isinstance(module, nn.Conv2d):
nn.init.kaiming_normal_(module.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(module, nn.Linear):
nn.init.kaiming_uniform_(module.weight, a=math.sqrt(5))
if module.bias is not None:
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(module.weight)
bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
nn.init.uniform_(module.bias, -bound, bound)
elif isinstance(module, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(module.weight, 1)
nn.init.constant_(module.bias, 0)
|
@auto_docstring
class RegNetPreTrainedModel(PreTrainedModel):
def _init_weights(self, module):
pass
| 3
| 0
| 13
| 0
| 12
| 1
| 6
| 0.35
| 1
| 0
| 0
| 2
| 1
| 0
| 1
| 1
| 25
| 2
| 17
| 8
| 15
| 6
| 15
| 8
| 13
| 6
| 1
| 2
| 6
|
4,878
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/regnet/modeling_regnet.py
|
transformers.models.regnet.modeling_regnet.RegNetSELayer
|
from torch import Tensor, nn
class RegNetSELayer(nn.Module):
"""
Squeeze and Excitation layer (SE) proposed in [Squeeze-and-Excitation Networks](https://huggingface.co/papers/1709.01507).
"""
def __init__(self, in_channels: int, reduced_channels: int):
super().__init__()
self.pooler = nn.AdaptiveAvgPool2d((1, 1))
self.attention = nn.Sequential(nn.Conv2d(in_channels, reduced_channels, kernel_size=1), nn.ReLU(), nn.Conv2d(reduced_channels, in_channels, kernel_size=1), nn.Sigmoid())
def forward(self, hidden_state):
pooled = self.pooler(hidden_state)
attention = self.attention(pooled)
hidden_state = hidden_state * attention
return hidden_state
|
class RegNetSELayer(nn.Module):
'''
Squeeze and Excitation layer (SE) proposed in [Squeeze-and-Excitation Networks](https://huggingface.co/papers/1709.01507).
'''
def __init__(self, in_channels: int, reduced_channels: int):
pass
def forward(self, hidden_state):
pass
| 3
| 1
| 8
| 1
| 7
| 1
| 1
| 0.27
| 1
| 2
| 0
| 0
| 2
| 2
| 2
| 12
| 22
| 3
| 15
| 7
| 12
| 4
| 10
| 7
| 7
| 1
| 1
| 0
| 2
|
4,879
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/regnet/modeling_regnet.py
|
transformers.models.regnet.modeling_regnet.RegNetShortCut
|
from torch import Tensor, nn
class RegNetShortCut(nn.Module):
"""
RegNet shortcut, used to project the residual features to the correct size. If needed, it is also used to
downsample the input using `stride=2`.
"""
def __init__(self, in_channels: int, out_channels: int, stride: int=2):
super().__init__()
self.convolution = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False)
self.normalization = nn.BatchNorm2d(out_channels)
def forward(self, input: Tensor) -> Tensor:
hidden_state = self.convolution(input)
hidden_state = self.normalization(hidden_state)
return hidden_state
|
class RegNetShortCut(nn.Module):
'''
RegNet shortcut, used to project the residual features to the correct size. If needed, it is also used to
downsample the input using `stride=2`.
'''
def __init__(self, in_channels: int, out_channels: int, stride: int=2):
pass
def forward(self, input: Tensor) -> Tensor:
pass
| 3
| 1
| 4
| 0
| 4
| 0
| 1
| 0.44
| 1
| 3
| 0
| 0
| 2
| 2
| 2
| 12
| 15
| 2
| 9
| 6
| 6
| 4
| 9
| 6
| 6
| 1
| 1
| 0
| 2
|
4,880
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/regnet/modeling_regnet.py
|
transformers.models.regnet.modeling_regnet.RegNetStage
|
from .configuration_regnet import RegNetConfig
from torch import Tensor, nn
class RegNetStage(nn.Module):
"""
A RegNet stage composed by stacked layers.
"""
def __init__(self, config: RegNetConfig, in_channels: int, out_channels: int, stride: int=2, depth: int=2):
super().__init__()
layer = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer
self.layers = nn.Sequential(layer(config, in_channels, out_channels, stride=stride), *[layer(config, out_channels, out_channels) for _ in range(depth - 1)])
def forward(self, hidden_state):
hidden_state = self.layers(hidden_state)
return hidden_state
|
class RegNetStage(nn.Module):
'''
A RegNet stage composed by stacked layers.
'''
def __init__(self, config: RegNetConfig, in_channels: int, out_channels: int, stride: int=2, depth: int=2):
pass
def forward(self, hidden_state):
pass
| 3
| 1
| 13
| 1
| 11
| 1
| 2
| 0.17
| 1
| 6
| 3
| 0
| 2
| 1
| 2
| 12
| 31
| 4
| 23
| 13
| 13
| 4
| 8
| 5
| 5
| 2
| 1
| 0
| 3
|
4,881
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/regnet/modeling_regnet.py
|
transformers.models.regnet.modeling_regnet.RegNetXLayer
|
from .configuration_regnet import RegNetConfig
from torch import Tensor, nn
from ...activations import ACT2FN
class RegNetXLayer(nn.Module):
"""
RegNet's layer composed by three `3x3` convolutions, same as a ResNet bottleneck layer with reduction = 1.
"""
def __init__(self, config: RegNetConfig, in_channels: int, out_channels: int, stride: int=1):
super().__init__()
should_apply_shortcut = in_channels != out_channels or stride != 1
groups = max(1, out_channels // config.groups_width)
self.shortcut = RegNetShortCut(in_channels, out_channels, stride=stride) if should_apply_shortcut else nn.Identity()
self.layer = nn.Sequential(RegNetConvLayer(in_channels, out_channels, kernel_size=1, activation=config.hidden_act), RegNetConvLayer(out_channels, out_channels, stride=stride, groups=groups, activation=config.hidden_act), RegNetConvLayer(out_channels, out_channels, kernel_size=1, activation=None))
self.activation = ACT2FN[config.hidden_act]
def forward(self, hidden_state):
residual = hidden_state
hidden_state = self.layer(hidden_state)
residual = self.shortcut(residual)
hidden_state += residual
hidden_state = self.activation(hidden_state)
return hidden_state
|
class RegNetXLayer(nn.Module):
'''
RegNet's layer composed by three `3x3` convolutions, same as a ResNet bottleneck layer with reduction = 1.
'''
def __init__(self, config: RegNetConfig, in_channels: int, out_channels: int, stride: int=1):
pass
def forward(self, hidden_state):
pass
| 3
| 1
| 10
| 0
| 10
| 0
| 2
| 0.14
| 1
| 5
| 3
| 0
| 2
| 3
| 2
| 12
| 26
| 2
| 21
| 9
| 18
| 3
| 15
| 9
| 12
| 2
| 1
| 0
| 3
|
4,882
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/regnet/modeling_regnet.py
|
transformers.models.regnet.modeling_regnet.RegNetYLayer
|
from .configuration_regnet import RegNetConfig
from torch import Tensor, nn
from ...activations import ACT2FN
class RegNetYLayer(nn.Module):
"""
RegNet's Y layer: an X layer with Squeeze and Excitation.
"""
def __init__(self, config: RegNetConfig, in_channels: int, out_channels: int, stride: int=1):
super().__init__()
should_apply_shortcut = in_channels != out_channels or stride != 1
groups = max(1, out_channels // config.groups_width)
self.shortcut = RegNetShortCut(in_channels, out_channels, stride=stride) if should_apply_shortcut else nn.Identity()
self.layer = nn.Sequential(RegNetConvLayer(in_channels, out_channels, kernel_size=1, activation=config.hidden_act), RegNetConvLayer(out_channels, out_channels, stride=stride, groups=groups, activation=config.hidden_act), RegNetSELayer(out_channels, reduced_channels=int(round(in_channels / 4))), RegNetConvLayer(out_channels, out_channels, kernel_size=1, activation=None))
self.activation = ACT2FN[config.hidden_act]
def forward(self, hidden_state):
residual = hidden_state
hidden_state = self.layer(hidden_state)
residual = self.shortcut(residual)
hidden_state += residual
hidden_state = self.activation(hidden_state)
return hidden_state
|
class RegNetYLayer(nn.Module):
'''
RegNet's Y layer: an X layer with Squeeze and Excitation.
'''
def __init__(self, config: RegNetConfig, in_channels: int, out_channels: int, stride: int=1):
pass
def forward(self, hidden_state):
pass
| 3
| 1
| 11
| 0
| 11
| 0
| 2
| 0.14
| 1
| 6
| 4
| 0
| 2
| 3
| 2
| 12
| 27
| 2
| 22
| 9
| 19
| 3
| 15
| 9
| 12
| 2
| 1
| 0
| 3
|
4,883
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/rembert/configuration_rembert.py
|
transformers.models.rembert.configuration_rembert.RemBertConfig
|
from ...configuration_utils import PretrainedConfig
class RemBertConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`RemBertModel`]. It is used to instantiate an
RemBERT model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the RemBERT
[google/rembert](https://huggingface.co/google/rembert) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 250300):
Vocabulary size of the RemBERT model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`RemBertModel`] or [`TFRemBertModel`]. Vocabulary size of the model.
Defines the different tokens that can be represented by the *inputs_ids* passed to the forward method of
[`RemBertModel`].
hidden_size (`int`, *optional*, defaults to 1152):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 18):
Number of attention heads for each attention layer in the Transformer encoder.
input_embedding_size (`int`, *optional*, defaults to 256):
Dimensionality of the input embeddings.
output_embedding_size (`int`, *optional*, defaults to 1664):
Dimensionality of the output embeddings.
intermediate_size (`int`, *optional*, defaults to 4608):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0):
The dropout ratio for the attention probabilities.
classifier_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the classifier layer when fine-tuning.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the `token_type_ids` passed when calling [`RemBertModel`] or [`TFRemBertModel`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
is_decoder (`bool`, *optional*, defaults to `False`):
Whether the model is used as a decoder or not. If `False`, the model is used as an encoder.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
Example:
```python
>>> from transformers import RemBertModel, RemBertConfig
>>> # Initializing a RemBERT rembert style configuration
>>> configuration = RemBertConfig()
>>> # Initializing a model from the rembert style configuration
>>> model = RemBertModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = 'rembert'
def __init__(self, vocab_size=250300, hidden_size=1152, num_hidden_layers=32, num_attention_heads=18, input_embedding_size=256, output_embedding_size=1664, intermediate_size=4608, hidden_act='gelu', hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, classifier_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, use_cache=True, pad_token_id=0, bos_token_id=312, eos_token_id=313, **kwargs):
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
self.vocab_size = vocab_size
self.input_embedding_size = input_embedding_size
self.output_embedding_size = output_embedding_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.classifier_dropout_prob = classifier_dropout_prob
self.initializer_range = initializer_range
self.type_vocab_size = type_vocab_size
self.layer_norm_eps = layer_norm_eps
self.use_cache = use_cache
self.tie_word_embeddings = False
| null | 2
| 1
| 42
| 1
| 41
| 0
| 1
| 1.35
| 1
| 1
| 0
| 0
| 1
| 17
| 1
| 1
| 112
| 11
| 43
| 42
| 19
| 58
| 21
| 20
| 19
| 1
| 1
| 0
| 1
|
4,884
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/rembert/configuration_rembert.py
|
transformers.models.rembert.configuration_rembert.RemBertOnnxConfig
|
from ...onnx import OnnxConfig
from collections import OrderedDict
from collections.abc import Mapping
class RemBertOnnxConfig(OnnxConfig):
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
if self.task == 'multiple-choice':
dynamic_axis = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
dynamic_axis = {0: 'batch', 1: 'sequence'}
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)])
@property
def atol_for_validation(self) -> float:
return 0.0001
|
class RemBertOnnxConfig(OnnxConfig):
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
pass
@property
def atol_for_validation(self) -> float:
pass
| 5
| 0
| 7
| 0
| 7
| 0
| 2
| 0
| 1
| 4
| 0
| 0
| 2
| 0
| 2
| 2
| 18
| 1
| 17
| 6
| 12
| 0
| 8
| 4
| 5
| 2
| 1
| 1
| 3
|
4,885
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/rembert/modeling_rembert.py
|
transformers.models.rembert.modeling_rembert.RemBertAttention
|
from torch import nn
import torch
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from typing import Optional, Union
class RemBertAttention(nn.Module):
def __init__(self, config, layer_idx=None):
super().__init__()
self.self = RemBertSelfAttention(config, layer_idx=layer_idx)
self.output = RemBertSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads)
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.Tensor]:
self_outputs = self.self(hidden_states, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, past_key_values=past_key_values, output_attentions=output_attentions, cache_position=cache_position)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:]
return outputs
|
class RemBertAttention(nn.Module):
def __init__(self, config, layer_idx=None):
pass
def prune_heads(self, heads):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.Tensor]:
pass
| 4
| 0
| 15
| 1
| 13
| 1
| 1
| 0.12
| 1
| 6
| 2
| 0
| 3
| 3
| 3
| 13
| 49
| 4
| 41
| 20
| 28
| 5
| 22
| 11
| 18
| 2
| 1
| 1
| 4
|
4,886
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/rembert/modeling_rembert.py
|
transformers.models.rembert.modeling_rembert.RemBertEmbeddings
|
import torch
from torch import nn
from typing import Optional, Union
class RemBertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.input_embedding_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.input_embedding_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.input_embedding_size)
self.LayerNorm = nn.LayerNorm(config.input_embedding_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.register_buffer('position_ids', torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False)
def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, past_key_values_length: int=0) -> torch.Tensor:
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, past_key_values_length:seq_length + past_key_values_length]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
|
class RemBertEmbeddings(nn.Module):
'''Construct the embeddings from word, position and token_type embeddings.'''
def __init__(self, config):
pass
def forward(self, input_ids: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, past_key_values_length: int=0) -> torch.Tensor:
pass
| 3
| 1
| 24
| 4
| 19
| 2
| 3
| 0.1
| 1
| 3
| 0
| 0
| 2
| 5
| 2
| 12
| 52
| 9
| 39
| 20
| 29
| 4
| 27
| 13
| 24
| 5
| 1
| 1
| 6
|
4,887
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/rembert/modeling_rembert.py
|
transformers.models.rembert.modeling_rembert.RemBertEncoder
|
from typing import Optional, Union
from torch import nn
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
import torch
class RemBertEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.embedding_hidden_mapping_in = nn.Linear(config.input_embedding_size, config.hidden_size)
self.layer = nn.ModuleList([RemBertLayer(config, layer_idx=i) for i in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=None, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True, cache_position: Optional[torch.Tensor]=None) -> Union[tuple, BaseModelOutputWithPastAndCrossAttentions]:
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...')
use_cache = False
if use_cache and past_key_values is None:
past_key_values = EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config))
if use_cache and isinstance(past_key_values, tuple):
logger.warning_once('Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. You should pass an instance of `EncoderDecoderCache` instead, e.g. `past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`.')
past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values)
hidden_states = self.embedding_hidden_mapping_in(hidden_states)
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
layer_outputs = layer_module(hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_values, output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple((v for v in [hidden_states, past_key_values, all_hidden_states, all_self_attentions, all_cross_attentions] if v is not None))
return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions)
|
class RemBertEncoder(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=None, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True, cache_position: Optional[torch.Tensor]=None) -> Union[tuple, BaseModelOutputWithPastAndCrossAttentions]:
pass
| 3
| 0
| 46
| 4
| 42
| 0
| 9
| 0
| 1
| 8
| 2
| 0
| 2
| 4
| 2
| 12
| 93
| 8
| 85
| 27
| 70
| 0
| 37
| 15
| 34
| 17
| 1
| 3
| 18
|
4,888
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/rembert/modeling_rembert.py
|
transformers.models.rembert.modeling_rembert.RemBertForCausalLM
|
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from ...utils import auto_docstring, logging
from typing import Optional, Union
import torch
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from ...generation import GenerationMixin
@auto_docstring(custom_intro='\n RemBERT Model with a `language modeling` head on top for CLM fine-tuning.\n ')
class RemBertForCausalLM(RemBertPreTrainedModel, GenerationMixin):
_tied_weights_keys = ['cls.predictions.decoder.weight']
def __init__(self, config):
super().__init__(config)
if not config.is_decoder:
logger.warning('If you want to use `RemBertForCausalLM` as a standalone, add `is_decoder=True.`')
self.rembert = RemBertModel(config, add_pooling_layer=False)
self.cls = RemBertOnlyMLMHead(config)
self.post_init()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[tuple, CausalLMOutputWithCrossAttentions]:
"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
`[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoTokenizer, RemBertForCausalLM, RemBertConfig
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("google/rembert")
>>> config = RemBertConfig.from_pretrained("google/rembert")
>>> config.is_decoder = True
>>> model = RemBertForCausalLM.from_pretrained("google/rembert", config=config)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.rembert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
lm_loss = None
if labels is not None:
lm_loss = self.loss_function(prediction_scores, labels, vocab_size=self.config.vocab_size, **kwargs)
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return (lm_loss,) + output if lm_loss is not None else output
return CausalLMOutputWithCrossAttentions(loss=lm_loss, logits=prediction_scores, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions)
|
@auto_docstring(custom_intro='\n RemBERT Model with a `language modeling` head on top for CLM fine-tuning.\n ')
class RemBertForCausalLM(RemBertPreTrainedModel, GenerationMixin):
def __init__(self, config):
pass
def get_output_embeddings(self):
pass
def set_output_embeddings(self, new_embeddings):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[tuple, CausalLMOutputWithCrossAttentions]:
'''
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
`[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoTokenizer, RemBertForCausalLM, RemBertConfig
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("google/rembert")
>>> config = RemBertConfig.from_pretrained("google/rembert")
>>> config.is_decoder = True
>>> model = RemBertForCausalLM.from_pretrained("google/rembert", config=config)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
```'''
pass
| 7
| 1
| 25
| 3
| 15
| 7
| 2
| 0.45
| 2
| 6
| 3
| 0
| 5
| 2
| 5
| 6
| 133
| 20
| 78
| 34
| 53
| 35
| 30
| 16
| 24
| 5
| 2
| 1
| 11
|
4,889
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/rembert/modeling_rembert.py
|
transformers.models.rembert.modeling_rembert.RemBertForMaskedLM
|
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...utils import auto_docstring, logging
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from typing import Optional, Union
import torch
@auto_docstring
class RemBertForMaskedLM(RemBertPreTrainedModel):
_tied_weights_keys = ['cls.predictions.decoder.weight']
def __init__(self, config):
super().__init__(config)
if config.is_decoder:
logger.warning('If you want to use `RemBertForMaskedLM` make sure `config.is_decoder=False` for bi-directional self-attention.')
self.rembert = RemBertModel(config, add_pooling_layer=False)
self.cls = RemBertOnlyMLMHead(config)
self.post_init()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, MaskedLMOutput]:
"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.rembert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return (masked_lm_loss,) + output if masked_lm_loss is not None else output
return MaskedLMOutput(loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
effective_batch_size = input_shape[0]
assert self.config.pad_token_id is not None, 'The PAD token should be defined for generation'
attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)
dummy_token = torch.full((effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device)
input_ids = torch.cat([input_ids, dummy_token], dim=1)
return {'input_ids': input_ids, 'attention_mask': attention_mask}
@classmethod
def can_generate(cls) -> bool:
"""
Legacy correction: RemBertForMaskedLM can't call `generate()` from `GenerationMixin`, even though it has a
`prepare_inputs_for_generation` method.
"""
return False
|
@auto_docstring
class RemBertForMaskedLM(RemBertPreTrainedModel):
def __init__(self, config):
pass
def get_output_embeddings(self):
pass
def set_output_embeddings(self, new_embeddings):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, MaskedLMOutput]:
'''
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
'''
pass
def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):
pass
@classmethod
def can_generate(cls) -> bool:
'''
Legacy correction: RemBertForMaskedLM can't call `generate()` from `GenerationMixin`, even though it has a
`prepare_inputs_for_generation` method.
'''
pass
| 10
| 2
| 17
| 2
| 14
| 2
| 2
| 0.12
| 1
| 5
| 3
| 0
| 5
| 2
| 5
| 6
| 99
| 15
| 76
| 33
| 50
| 9
| 34
| 18
| 28
| 5
| 2
| 1
| 10
|
4,890
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/rembert/modeling_rembert.py
|
transformers.models.rembert.modeling_rembert.RemBertForMultipleChoice
|
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from typing import Optional, Union
from ...utils import auto_docstring, logging
import torch
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from torch import nn
@auto_docstring
class RemBertForMultipleChoice(RemBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.rembert = RemBertModel(config)
self.dropout = nn.Dropout(config.classifier_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, MultipleChoiceModelOutput]:
"""
input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
model's internal embedding lookup matrix.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
`input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
inputs_embeds = inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None
outputs = self.rembert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return MultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring
class RemBertForMultipleChoice(RemBertPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, MultipleChoiceModelOutput]:
'''
input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
model's internal embedding lookup matrix.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
`input_ids` above)
'''
pass
| 5
| 1
| 37
| 5
| 29
| 4
| 6
| 0.11
| 1
| 4
| 2
| 0
| 2
| 3
| 2
| 3
| 82
| 10
| 65
| 27
| 44
| 7
| 28
| 14
| 25
| 11
| 2
| 1
| 12
|
4,891
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/rembert/modeling_rembert.py
|
transformers.models.rembert.modeling_rembert.RemBertForQuestionAnswering
|
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from torch import nn
import torch
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...utils import auto_docstring, logging
from typing import Optional, Union
@auto_docstring
class RemBertForQuestionAnswering(RemBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.rembert = RemBertModel(config, add_pooling_layer=False)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, QuestionAnsweringModelOutput]:
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.rembert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
total_loss = None
if start_positions is not None and end_positions is not None:
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return (total_loss,) + output if total_loss is not None else output
return QuestionAnsweringModelOutput(loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring
class RemBertForQuestionAnswering(RemBertPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, QuestionAnsweringModelOutput]:
pass
| 5
| 0
| 42
| 5
| 30
| 7
| 4
| 0.19
| 1
| 4
| 2
| 0
| 2
| 3
| 2
| 3
| 91
| 11
| 67
| 30
| 45
| 13
| 32
| 16
| 29
| 7
| 2
| 2
| 8
|
4,892
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/rembert/modeling_rembert.py
|
transformers.models.rembert.modeling_rembert.RemBertForSequenceClassification
|
from torch import nn
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from ...utils import auto_docstring, logging
from typing import Optional, Union
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
import torch
@auto_docstring(custom_intro='\n RemBERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n pooled output) e.g. for GLUE tasks.\n ')
class RemBertForSequenceClassification(RemBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.rembert = RemBertModel(config)
self.dropout = nn.Dropout(config.classifier_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, SequenceClassifierOutput]:
"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.rembert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = 'single_label_classification'
else:
self.config.problem_type = 'multi_label_classification'
if self.config.problem_type == 'regression':
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == 'single_label_classification':
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == 'multi_label_classification':
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring(custom_intro='\n RemBERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the\n pooled output) e.g. for GLUE tasks.\n ')
class RemBertForSequenceClassification(RemBertPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, SequenceClassifierOutput]:
'''
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
'''
pass
| 5
| 1
| 40
| 4
| 33
| 4
| 7
| 0.1
| 1
| 5
| 2
| 0
| 2
| 4
| 2
| 3
| 87
| 8
| 72
| 26
| 51
| 7
| 34
| 13
| 31
| 12
| 2
| 3
| 13
|
4,893
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/rembert/modeling_rembert.py
|
transformers.models.rembert.modeling_rembert.RemBertForTokenClassification
|
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from ...utils import auto_docstring, logging
from typing import Optional, Union
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
import torch
from torch import nn
@auto_docstring
class RemBertForTokenClassification(RemBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.rembert = RemBertModel(config, add_pooling_layer=False)
self.dropout = nn.Dropout(config.classifier_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.post_init()
@auto_docstring
def forward(self, input_ids: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, TokenClassifierOutput]:
"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.rembert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return TokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
|
@auto_docstring
class RemBertForTokenClassification(RemBertPreTrainedModel):
def __init__(self, config):
pass
@auto_docstring
def forward(self, input_ids: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, TokenClassifierOutput]:
'''
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
'''
pass
| 5
| 1
| 31
| 4
| 24
| 3
| 3
| 0.09
| 1
| 4
| 2
| 0
| 2
| 4
| 2
| 3
| 69
| 9
| 55
| 26
| 34
| 5
| 22
| 13
| 19
| 5
| 2
| 1
| 6
|
4,894
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/rembert/modeling_rembert.py
|
transformers.models.rembert.modeling_rembert.RemBertIntermediate
|
from ...activations import ACT2FN
from torch import nn
import torch
class RemBertIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
|
class RemBertIntermediate(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 2
| 0
| 1
| 3
| 0
| 0
| 2
| 2
| 2
| 12
| 13
| 1
| 12
| 5
| 9
| 0
| 11
| 5
| 8
| 2
| 1
| 1
| 3
|
4,895
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/rembert/modeling_rembert.py
|
transformers.models.rembert.modeling_rembert.RemBertLMPredictionHead
|
from ...activations import ACT2FN
import torch
from torch import nn
class RemBertLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.output_embedding_size)
self.decoder = nn.Linear(config.output_embedding_size, config.vocab_size)
self.activation = ACT2FN[config.hidden_act]
self.LayerNorm = nn.LayerNorm(config.output_embedding_size, eps=config.layer_norm_eps)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
|
class RemBertLMPredictionHead(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 6
| 0
| 6
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 4
| 2
| 12
| 14
| 1
| 13
| 7
| 10
| 0
| 13
| 7
| 10
| 1
| 1
| 0
| 2
|
4,896
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/rembert/modeling_rembert.py
|
transformers.models.rembert.modeling_rembert.RemBertLayer
|
from ...modeling_layers import GradientCheckpointingLayer
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from typing import Optional, Union
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
import torch
class RemBertLayer(GradientCheckpointingLayer):
def __init__(self, config, layer_idx=None):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = RemBertAttention(config, layer_idx)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
if not self.is_decoder:
raise ValueError(f'{self} should be used as a decoder model if cross attention is added')
self.crossattention = RemBertAttention(config, layer_idx=layer_idx)
self.intermediate = RemBertIntermediate(config)
self.output = RemBertOutput(config)
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.Tensor]:
self_attention_outputs = self.attention(hidden_states, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, past_key_values=past_key_values, cache_position=cache_position)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:]
if self.is_decoder and encoder_hidden_states is not None:
if not hasattr(self, 'crossattention'):
raise ValueError(f'If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`')
cross_attention_outputs = self.crossattention(attention_output, attention_mask=encoder_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, past_key_values=past_key_values, output_attentions=output_attentions, cache_position=cache_position)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:]
layer_output = apply_chunking_to_forward(self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output)
outputs = (layer_output,) + outputs
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
|
class RemBertLayer(GradientCheckpointingLayer):
def __init__(self, config, layer_idx=None):
pass
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, output_attentions: Optional[bool]=False, cache_position: Optional[torch.Tensor]=None) -> tuple[torch.Tensor]:
pass
def feed_forward_chunk(self, attention_output):
pass
| 4
| 0
| 27
| 2
| 23
| 2
| 4
| 0.13
| 1
| 7
| 3
| 0
| 3
| 8
| 3
| 13
| 86
| 9
| 70
| 32
| 57
| 9
| 41
| 23
| 37
| 7
| 1
| 2
| 11
|
4,897
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/rembert/modeling_rembert.py
|
transformers.models.rembert.modeling_rembert.RemBertModel
|
import torch
from typing import Optional, Union
from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from ...utils import auto_docstring, logging
@auto_docstring(custom_intro='\n The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of\n cross-attention is added between the self-attention layers, following the architecture described in [Attention is\n all you need](https://huggingface.co/papers/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,\n Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.\n\n To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set\n to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and\n `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.\n ')
class RemBertModel(RemBertPreTrainedModel):
def __init__(self, config, add_pooling_layer=True):
"""
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
"""
super().__init__(config)
self.config = config
self.embeddings = RemBertEmbeddings(config)
self.encoder = RemBertEncoder(config)
self.pooler = RemBertPooler(config) if add_pooling_layer else None
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@auto_docstring
def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, past_key_values: Optional[Cache]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[tuple, BaseModelOutputWithPoolingAndCrossAttentions]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
elif input_ids is not None:
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds')
batch_size, seq_length = input_shape
device = input_ids.device if input_ids is not None else inputs_embeds.device
past_key_values_length = 0
if past_key_values is not None:
past_key_values_length = past_key_values[0][0].shape[-2] if not isinstance(past_key_values, Cache) else past_key_values.get_seq_length()
if attention_mask is None:
attention_mask = torch.ones((batch_size, seq_length + past_key_values_length), device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length)
encoder_outputs = self.encoder(embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions)
| null | 8
| 2
| 29
| 3
| 19
| 7
| 5
| 0.41
| 1
| 8
| 4
| 0
| 5
| 4
| 5
| 6
| 167
| 22
| 103
| 39
| 76
| 42
| 51
| 23
| 45
| 17
| 2
| 2
| 23
|
4,898
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/rembert/modeling_rembert.py
|
transformers.models.rembert.modeling_rembert.RemBertOnlyMLMHead
|
from torch import nn
import torch
class RemBertOnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = RemBertLMPredictionHead(config)
def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
prediction_scores = self.predictions(sequence_output)
return prediction_scores
|
class RemBertOnlyMLMHead(nn.Module):
def __init__(self, config):
pass
def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 3
| 0
| 3
| 0
| 1
| 0
| 1
| 3
| 1
| 0
| 2
| 1
| 2
| 12
| 8
| 1
| 7
| 5
| 4
| 0
| 7
| 5
| 4
| 1
| 1
| 0
| 2
|
4,899
|
huggingface/pytorch-pretrained-BERT
|
huggingface_pytorch-pretrained-BERT/src/transformers/models/rembert/modeling_rembert.py
|
transformers.models.rembert.modeling_rembert.RemBertOutput
|
from torch import nn
import torch
class RemBertOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
|
class RemBertOutput(nn.Module):
def __init__(self, config):
pass
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
pass
| 3
| 0
| 5
| 0
| 5
| 0
| 1
| 0
| 1
| 2
| 0
| 0
| 2
| 3
| 2
| 12
| 12
| 1
| 11
| 6
| 8
| 0
| 11
| 6
| 8
| 1
| 1
| 0
| 2
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.