|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""PyTorch CLVP model.""" |
|
|
|
|
|
import copy |
|
|
import math |
|
|
from dataclasses import dataclass |
|
|
from typing import Callable, Optional, Union |
|
|
|
|
|
import torch |
|
|
from torch import nn |
|
|
from torch.nn import CrossEntropyLoss |
|
|
|
|
|
from ...activations import ACT2FN, get_activation |
|
|
from ...cache_utils import Cache, DynamicCache |
|
|
from ...generation import GenerationConfig, GenerationMixin |
|
|
from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask |
|
|
from ...modeling_outputs import ( |
|
|
BaseModelOutput, |
|
|
BaseModelOutputWithPastAndCrossAttentions, |
|
|
BaseModelOutputWithPooling, |
|
|
CausalLMOutputWithCrossAttentions, |
|
|
) |
|
|
from ...modeling_utils import PreTrainedModel |
|
|
from ...pytorch_utils import Conv1D, isin_mps_friendly |
|
|
from ...utils import ( |
|
|
ModelOutput, |
|
|
auto_docstring, |
|
|
logging, |
|
|
) |
|
|
from ...utils.deprecation import deprecate_kwarg |
|
|
from .configuration_clvp import ( |
|
|
ClvpConfig, |
|
|
ClvpDecoderConfig, |
|
|
ClvpEncoderConfig, |
|
|
) |
|
|
|
|
|
|
|
|
logger = logging.get_logger(__name__) |
|
|
|
|
|
|
|
|
|
|
|
def contrastive_loss(logits: torch.Tensor) -> torch.Tensor: |
|
|
return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device)) |
|
|
|
|
|
|
|
|
|
|
|
def clvp_loss(similarity: torch.Tensor) -> torch.Tensor: |
|
|
caption_loss = contrastive_loss(similarity) |
|
|
speech_loss = contrastive_loss(similarity.t()) |
|
|
return (caption_loss + speech_loss) / 2.0 |
|
|
|
|
|
|
|
|
|
|
|
def rotate_half(x): |
|
|
"""Rotates half the hidden dims of the input.""" |
|
|
x1 = x[..., : x.shape[-1] // 2] |
|
|
x2 = x[..., x.shape[-1] // 2 :] |
|
|
return torch.cat((-x2, x1), dim=-1) |
|
|
|
|
|
|
|
|
def apply_rotary_pos_emb(q, k, v, cos, sin, position_ids, unsqueeze_dim=1): |
|
|
"""Applies Rotary Position Embedding to the query and key tensors. |
|
|
|
|
|
Args: |
|
|
q (`torch.Tensor`): The query tensor. |
|
|
k (`torch.Tensor`): The key tensor. |
|
|
cos (`torch.Tensor`): The cosine part of the rotary embedding. |
|
|
sin (`torch.Tensor`): The sine part of the rotary embedding. |
|
|
position_ids (`torch.Tensor`): |
|
|
The position indices of the tokens corresponding to the query and key tensors. For example, this can be |
|
|
used to pass offsetted position ids when working with a KV-cache. |
|
|
unsqueeze_dim (`int`, *optional*, defaults to 1): |
|
|
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and |
|
|
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note |
|
|
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and |
|
|
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes |
|
|
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have |
|
|
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. |
|
|
Returns: |
|
|
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. |
|
|
""" |
|
|
cos = cos[position_ids].unsqueeze(unsqueeze_dim) |
|
|
sin = sin[position_ids].unsqueeze(unsqueeze_dim) |
|
|
q_embed = (q * cos) + (rotate_half(q) * sin) |
|
|
k_embed = (k * cos) + (rotate_half(k) * sin) |
|
|
v_embed = (v * cos) + (rotate_half(v) * sin) |
|
|
return q_embed, k_embed, v_embed |
|
|
|
|
|
|
|
|
def _pad_extra_bos_eos_tokens( |
|
|
input_ids, |
|
|
attention_mask=None, |
|
|
pad_token_id=0, |
|
|
bos_token_id=255, |
|
|
eos_token_id=0, |
|
|
add_bos_token=True, |
|
|
add_eos_token=True, |
|
|
): |
|
|
""" |
|
|
This method adds extra bos and eos tokens to input_ids and accordingly modifies the attention_mask which is used in |
|
|
`ClvpConditioningEncoder` and the generation loop of the `ClvpModelForConditionalGeneration`. |
|
|
""" |
|
|
|
|
|
|
|
|
if add_bos_token: |
|
|
input_ids = torch.nn.functional.pad(input_ids, (1, 0), value=bos_token_id) |
|
|
attention_mask = ( |
|
|
torch.nn.functional.pad(attention_mask, (1, 0), value=1) if attention_mask is not None else attention_mask |
|
|
) |
|
|
|
|
|
modified_input_ids = input_ids |
|
|
if add_eos_token: |
|
|
modified_input_ids = torch.zeros( |
|
|
(input_ids.shape[0], input_ids.shape[1] + 1), dtype=input_ids.dtype, device=input_ids.device |
|
|
) |
|
|
for i, each_input_id in enumerate(input_ids): |
|
|
|
|
|
if isin_mps_friendly(each_input_id, pad_token_id).sum(): |
|
|
pos = torch.where(each_input_id == pad_token_id)[0].min() |
|
|
modified_input_ids[i] = torch.concatenate( |
|
|
[each_input_id[:pos], torch.tensor([eos_token_id], device=input_ids.device), each_input_id[pos:]] |
|
|
) |
|
|
else: |
|
|
|
|
|
modified_input_ids[i] = torch.nn.functional.pad(each_input_id, (0, 1), value=eos_token_id) |
|
|
attention_mask = ( |
|
|
torch.nn.functional.pad(attention_mask, (1, 0), value=1) if attention_mask is not None else attention_mask |
|
|
) |
|
|
|
|
|
return modified_input_ids, attention_mask |
|
|
|
|
|
|
|
|
@dataclass |
|
|
@auto_docstring( |
|
|
custom_intro=""" |
|
|
Base class for CLVP encoder's outputs that contains a pooling of the last hidden states as well as a projection |
|
|
output (a linear layer on top of the pooled output). |
|
|
""" |
|
|
) |
|
|
class ClvpEncoderOutput(ModelOutput): |
|
|
r""" |
|
|
embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when model is initialized with `with_projection=True`): |
|
|
The embeddings obtained by applying the projection layer to the pooler_output. |
|
|
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): |
|
|
The hidden state of the last layer of the model. |
|
|
pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): |
|
|
Pooled output of the `last_hidden_state`. |
|
|
""" |
|
|
|
|
|
embeds: Optional[torch.FloatTensor] = None |
|
|
last_hidden_state: Optional[torch.FloatTensor] = None |
|
|
pooler_output: Optional[torch.FloatTensor] = None |
|
|
hidden_states: Optional[tuple[torch.FloatTensor]] = None |
|
|
attentions: Optional[tuple[torch.FloatTensor]] = None |
|
|
|
|
|
|
|
|
@dataclass |
|
|
@auto_docstring |
|
|
class ClvpOutput(ModelOutput): |
|
|
r""" |
|
|
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): |
|
|
Contrastive loss for speech-text similarity. |
|
|
speech_ids (`torch.LongTensor`, *optional*): |
|
|
speech_ids (or speech candidates) generated by the `ClvpForCausalLM` model. |
|
|
logits_per_speech (`torch.FloatTensor` of shape `(speech_batch_size, text_batch_size)`): |
|
|
The scaled dot product scores between `speech_embeds` and `text_embeds`. This represents the speech-text |
|
|
similarity scores. |
|
|
logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, speech_batch_size)`): |
|
|
The scaled dot product scores between `text_embeds` and `speech_embeds`. This represents the text-speech |
|
|
similarity scores. |
|
|
text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`): |
|
|
The text embeddings obtained by applying the projection layer to the pooled output of the text encoder |
|
|
model. |
|
|
speech_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`): |
|
|
The speech embeddings obtained by applying the projection layer to the pooled output of the speech encoder |
|
|
model. |
|
|
text_model_output (`BaseModelOutputWithPooling`): |
|
|
The pooled output of the `last_hidden_state` of the text encoder Model. |
|
|
speech_model_output (`BaseModelOutputWithPooling`): |
|
|
The pooled output of the `last_hidden_state` of the speech encoder Model. |
|
|
decoder_hidden_states (`torch.FloatTensor`, *optional*): |
|
|
The hidden states of the decoder model. |
|
|
text_encoder_hidden_states (`torch.FloatTensor`, *optional*): |
|
|
The hidden states of the text encoder model. |
|
|
speech_encoder_hidden_states (`torch.FloatTensor`, *optional*): |
|
|
The hidden states of the speech encoder model. |
|
|
""" |
|
|
|
|
|
loss: Optional[torch.FloatTensor] = None |
|
|
speech_ids: Optional[torch.LongTensor] = None |
|
|
logits_per_speech: Optional[torch.FloatTensor] = None |
|
|
logits_per_text: Optional[torch.FloatTensor] = None |
|
|
text_embeds: Optional[torch.FloatTensor] = None |
|
|
speech_embeds: Optional[torch.FloatTensor] = None |
|
|
text_model_output: BaseModelOutputWithPooling = None |
|
|
speech_model_output: BaseModelOutputWithPooling = None |
|
|
decoder_hidden_states: Optional[torch.FloatTensor] = None |
|
|
text_encoder_hidden_states: Optional[torch.FloatTensor] = None |
|
|
speech_encoder_hidden_states: Optional[torch.FloatTensor] = None |
|
|
|
|
|
|
|
|
|
|
|
class ClvpRMSNorm(nn.Module): |
|
|
def __init__(self, hidden_size, eps=1e-6): |
|
|
""" |
|
|
ClvpRMSNorm is equivalent to T5LayerNorm |
|
|
""" |
|
|
super().__init__() |
|
|
self.weight = nn.Parameter(torch.ones(hidden_size)) |
|
|
self.variance_epsilon = eps |
|
|
|
|
|
def forward(self, hidden_states): |
|
|
input_dtype = hidden_states.dtype |
|
|
hidden_states = hidden_states.to(torch.float32) |
|
|
variance = hidden_states.pow(2).mean(-1, keepdim=True) |
|
|
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) |
|
|
return self.weight * hidden_states.to(input_dtype) |
|
|
|
|
|
def extra_repr(self): |
|
|
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}" |
|
|
|
|
|
|
|
|
class ClvpRotaryPositionalEmbedding(nn.Module): |
|
|
""" |
|
|
Rotary Position Embedding Class for CLVP. It was proposed in the paper 'ROFORMER: ENHANCED TRANSFORMER WITH ROTARY |
|
|
POSITION EMBEDDING', Please see https://huggingface.co/papers/2104.09864v1.pdf . |
|
|
""" |
|
|
|
|
|
def __init__(self, config): |
|
|
super().__init__() |
|
|
dim = max(config.projection_dim // (config.num_attention_heads * 2), 32) |
|
|
inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2, dtype=torch.int64).float() / dim)) |
|
|
|
|
|
self.register_buffer("inv_freq", inv_freq) |
|
|
self.cached_sequence_length = None |
|
|
self.cached_rotary_positional_embedding = None |
|
|
|
|
|
def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor: |
|
|
sequence_length = hidden_states.shape[1] |
|
|
|
|
|
if sequence_length == self.cached_sequence_length and self.cached_rotary_positional_embedding is not None: |
|
|
return self.cached_rotary_positional_embedding |
|
|
|
|
|
self.cached_sequence_length = sequence_length |
|
|
time_stamps = torch.arange(sequence_length, device=hidden_states.device).type_as(self.inv_freq) |
|
|
freqs = torch.einsum("i,j->ij", time_stamps, self.inv_freq) |
|
|
embeddings = torch.cat((freqs, freqs), dim=-1) |
|
|
|
|
|
self.cached_rotary_positional_embedding = embeddings.unsqueeze(0) |
|
|
return self.cached_rotary_positional_embedding |
|
|
|
|
|
|
|
|
class ClvpSelfAttention(nn.Module): |
|
|
""" |
|
|
Multi-headed attention to combine Absolute and Rotary Positional Embeddings into a single Attention module. |
|
|
""" |
|
|
|
|
|
def __init__(self, config, layer_idx=None): |
|
|
super().__init__() |
|
|
self.config = config |
|
|
self.embed_dim = config.hidden_size |
|
|
self.num_heads = config.num_attention_heads |
|
|
self.head_dim = self.embed_dim // self.num_heads |
|
|
if self.head_dim * self.num_heads != self.embed_dim: |
|
|
raise ValueError( |
|
|
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" |
|
|
f" {self.num_heads})." |
|
|
) |
|
|
self.scale = self.head_dim**-0.5 |
|
|
self.dropout = config.attention_dropout |
|
|
self.layer_idx = layer_idx |
|
|
|
|
|
if hasattr(config, "max_position_embeddings"): |
|
|
max_positions = config.max_position_embeddings |
|
|
bias = torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)) |
|
|
bias = bias.view(1, 1, max_positions, max_positions) |
|
|
self.register_buffer("bias", bias, persistent=False) |
|
|
|
|
|
self.k_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.use_attention_bias) |
|
|
self.v_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.use_attention_bias) |
|
|
self.q_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.use_attention_bias) |
|
|
self.out_proj = nn.Linear(self.embed_dim, self.embed_dim) |
|
|
|
|
|
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): |
|
|
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() |
|
|
|
|
|
@deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") |
|
|
def forward( |
|
|
self, |
|
|
hidden_states: torch.FloatTensor, |
|
|
rotary_pos_emb: Optional[torch.FloatTensor] = None, |
|
|
attention_mask: Optional[torch.LongTensor] = None, |
|
|
position_ids: Optional[torch.LongTensor] = None, |
|
|
past_key_values: Optional[Cache] = None, |
|
|
use_cache: Optional[bool] = False, |
|
|
head_mask: Optional[torch.FloatTensor] = None, |
|
|
output_attentions: Optional[bool] = False, |
|
|
cache_position: Optional[torch.Tensor] = None, |
|
|
) -> tuple[torch.FloatTensor, Optional[torch.FloatTensor], Optional[tuple[torch.FloatTensor]]]: |
|
|
|
|
|
|
|
|
if rotary_pos_emb is not None and position_ids is None: |
|
|
raise ValueError("`position_ids` must be provided when `rotary_pos_emb` is not None.") |
|
|
|
|
|
bsz, _, embed_dim = hidden_states.size() |
|
|
|
|
|
|
|
|
query_states = self._shape(self.q_proj(hidden_states), -1, bsz) * self.scale |
|
|
key_states = self._shape(self.k_proj(hidden_states), -1, bsz) |
|
|
value_states = self._shape(self.v_proj(hidden_states), -1, bsz) |
|
|
|
|
|
if past_key_values is not None: |
|
|
key_states, value_states = past_key_values.update( |
|
|
key_states, value_states, self.layer_idx, {"cache_position": cache_position} |
|
|
) |
|
|
|
|
|
if rotary_pos_emb is not None: |
|
|
rotary_emb_dim = rotary_pos_emb.shape[-1] |
|
|
|
|
|
|
|
|
query_rot, query_pass = ( |
|
|
query_states[..., :rotary_emb_dim], |
|
|
query_states[..., rotary_emb_dim:], |
|
|
) |
|
|
key_rot, key_pass = ( |
|
|
key_states[..., :rotary_emb_dim], |
|
|
key_states[..., rotary_emb_dim:], |
|
|
) |
|
|
value_rot, value_pass = ( |
|
|
value_states[..., :rotary_emb_dim], |
|
|
value_states[..., rotary_emb_dim:], |
|
|
) |
|
|
|
|
|
cos, sin = rotary_pos_emb.cos().squeeze(0), rotary_pos_emb.sin().squeeze(0) |
|
|
query_rot, key_rot, value_rot = apply_rotary_pos_emb(query_rot, key_rot, value_rot, cos, sin, position_ids) |
|
|
|
|
|
|
|
|
query_states = torch.cat((query_rot, query_pass), dim=-1) |
|
|
key_states = torch.cat((key_rot, key_pass), dim=-1) |
|
|
value_states = torch.cat((value_rot, value_pass), dim=-1) |
|
|
|
|
|
tgt_len = query_states.shape[2] |
|
|
src_len = key_states.shape[2] |
|
|
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) |
|
|
|
|
|
if attention_mask is not None: |
|
|
if attention_mask.size() != (bsz, 1, tgt_len, src_len): |
|
|
raise ValueError( |
|
|
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" |
|
|
) |
|
|
attn_weights = attn_weights + attention_mask |
|
|
|
|
|
attn_weights = nn.functional.softmax(attn_weights, dim=-1) |
|
|
|
|
|
|
|
|
if head_mask is not None: |
|
|
attn_weights = attn_weights * head_mask |
|
|
|
|
|
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) |
|
|
attn_output = torch.matmul(attn_probs, value_states) |
|
|
|
|
|
if attn_output.size() != (bsz, self.num_heads, tgt_len, self.head_dim): |
|
|
raise ValueError( |
|
|
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" |
|
|
f" {attn_output.size()}" |
|
|
) |
|
|
|
|
|
attn_output = attn_output.transpose(1, 2).contiguous() |
|
|
attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) |
|
|
|
|
|
attn_output = self.out_proj(attn_output) |
|
|
|
|
|
return attn_output, attn_weights |
|
|
|
|
|
|
|
|
class ClvpGatedLinearUnit(nn.Module): |
|
|
""" |
|
|
`ClvpGatedLinearUnit` uses the second half of the `hidden_states` to act as a gate for the first half of the |
|
|
`hidden_states` which controls the flow of data from the first of the tensor. |
|
|
""" |
|
|
|
|
|
def __init__(self, config): |
|
|
super().__init__() |
|
|
self.activation_fn = ACT2FN[config.hidden_act] |
|
|
self.proj = nn.Linear(config.hidden_size, config.intermediate_size * 2) |
|
|
|
|
|
def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor: |
|
|
hidden_states, gate = self.proj(hidden_states).chunk(2, dim=-1) |
|
|
return hidden_states * self.activation_fn(gate) |
|
|
|
|
|
|
|
|
class ClvpEncoderMLP(nn.Module): |
|
|
""" |
|
|
This MLP is used in CLVP speech or text encoder models. |
|
|
""" |
|
|
|
|
|
def __init__(self, config): |
|
|
super().__init__() |
|
|
self.config = config |
|
|
|
|
|
self.fc1 = ClvpGatedLinearUnit(config) |
|
|
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) |
|
|
self.dropout_layer = nn.Dropout(config.dropout) |
|
|
|
|
|
def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor: |
|
|
hidden_states = self.fc1(hidden_states) |
|
|
hidden_states = self.dropout_layer(hidden_states) |
|
|
hidden_states = self.fc2(hidden_states) |
|
|
return hidden_states |
|
|
|
|
|
|
|
|
class ClvpEncoderLayer(nn.Module): |
|
|
def __init__(self, config: ClvpConfig): |
|
|
super().__init__() |
|
|
self.config = config |
|
|
self.embed_dim = config.hidden_size |
|
|
self.self_attn = ClvpSelfAttention(config) |
|
|
self.mlp = ClvpEncoderMLP(config) |
|
|
|
|
|
self.input_rmsnorm = ClvpRMSNorm(self.embed_dim, eps=config.layer_norm_eps) |
|
|
self.post_attention_rmsnorm = ClvpRMSNorm(self.embed_dim, eps=config.layer_norm_eps) |
|
|
|
|
|
def forward( |
|
|
self, |
|
|
hidden_states: torch.FloatTensor, |
|
|
rotary_pos_emb: torch.FloatTensor, |
|
|
attention_mask: torch.LongTensor, |
|
|
position_ids: torch.LongTensor, |
|
|
output_attentions: Optional[bool] = False, |
|
|
) -> tuple[torch.FloatTensor]: |
|
|
""" |
|
|
Args: |
|
|
hidden_states (`torch.FloatTensor` of shape `(batch, seq_len, embed_dim)`): |
|
|
input to the layer. |
|
|
rotary_pos_emb (`torch.FloatTensor`): |
|
|
rotary position embeddings generated by `ClvpRotaryPositionalEmbedding` module. |
|
|
attention_mask (`torch.FloatTensor` of shape `(batch, 1, tgt_len, src_len)`): |
|
|
attention mask where padding elements are indicated by very large negative values. |
|
|
position_ids (`torch.LongTensor`): |
|
|
Denotes position ids of the input tokens. |
|
|
output_attentions (`bool`, *optional*, defaults to `False`): |
|
|
Whether or not to return the attentions tensors of all attention layers. See `attentions` under |
|
|
returned tensors for more detail. |
|
|
""" |
|
|
residual = hidden_states |
|
|
|
|
|
hidden_states = self.input_rmsnorm(hidden_states) |
|
|
|
|
|
hidden_states, attn_weights = self.self_attn( |
|
|
hidden_states=hidden_states, |
|
|
rotary_pos_emb=rotary_pos_emb, |
|
|
attention_mask=attention_mask, |
|
|
position_ids=position_ids, |
|
|
output_attentions=output_attentions, |
|
|
) |
|
|
|
|
|
hidden_states = residual + hidden_states |
|
|
|
|
|
residual = hidden_states |
|
|
hidden_states = self.post_attention_rmsnorm(hidden_states) |
|
|
hidden_states = self.mlp(hidden_states) |
|
|
hidden_states = residual + hidden_states |
|
|
|
|
|
return hidden_states, attn_weights |
|
|
|
|
|
|
|
|
|
|
|
class ClvpSequenceSummary(nn.Module): |
|
|
r""" |
|
|
Compute a single vector summary of a sequence hidden states. |
|
|
|
|
|
Args: |
|
|
config ([`ClvpConfig`]): |
|
|
The config used by the model. Relevant arguments in the config class of the model are (refer to the actual |
|
|
config class of your model for the default values it uses): |
|
|
|
|
|
- **summary_type** (`str`) -- The method to use to make this summary. Accepted values are: |
|
|
|
|
|
- `"last"` -- Take the last token hidden state (like XLNet) |
|
|
- `"first"` -- Take the first token hidden state (like Bert) |
|
|
- `"mean"` -- Take the mean of all tokens hidden states |
|
|
- `"cls_index"` -- Supply a Tensor of classification token position (GPT/GPT-2) |
|
|
- `"attn"` -- Not implemented now, use multi-head attention |
|
|
|
|
|
- **summary_use_proj** (`bool`) -- Add a projection after the vector extraction. |
|
|
- **summary_proj_to_labels** (`bool`) -- If `True`, the projection outputs to `config.num_labels` classes |
|
|
(otherwise to `config.hidden_size`). |
|
|
- **summary_activation** (`Optional[str]`) -- Set to `"tanh"` to add a tanh activation to the output, |
|
|
another string or `None` will add no activation. |
|
|
- **summary_first_dropout** (`float`) -- Optional dropout probability before the projection and activation. |
|
|
- **summary_last_dropout** (`float`)-- Optional dropout probability after the projection and activation. |
|
|
""" |
|
|
|
|
|
def __init__(self, config: ClvpConfig): |
|
|
super().__init__() |
|
|
|
|
|
self.summary_type = getattr(config, "summary_type", "last") |
|
|
if self.summary_type == "attn": |
|
|
|
|
|
|
|
|
|
|
|
raise NotImplementedError |
|
|
|
|
|
self.summary = nn.Identity() |
|
|
if hasattr(config, "summary_use_proj") and config.summary_use_proj: |
|
|
if hasattr(config, "summary_proj_to_labels") and config.summary_proj_to_labels and config.num_labels > 0: |
|
|
num_classes = config.num_labels |
|
|
else: |
|
|
num_classes = config.hidden_size |
|
|
self.summary = nn.Linear(config.hidden_size, num_classes) |
|
|
|
|
|
activation_string = getattr(config, "summary_activation", None) |
|
|
self.activation: Callable = get_activation(activation_string) if activation_string else nn.Identity() |
|
|
|
|
|
self.first_dropout = nn.Identity() |
|
|
if hasattr(config, "summary_first_dropout") and config.summary_first_dropout > 0: |
|
|
self.first_dropout = nn.Dropout(config.summary_first_dropout) |
|
|
|
|
|
self.last_dropout = nn.Identity() |
|
|
if hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0: |
|
|
self.last_dropout = nn.Dropout(config.summary_last_dropout) |
|
|
|
|
|
def forward( |
|
|
self, hidden_states: torch.FloatTensor, cls_index: Optional[torch.LongTensor] = None |
|
|
) -> torch.FloatTensor: |
|
|
""" |
|
|
Compute a single vector summary of a sequence hidden states. |
|
|
|
|
|
Args: |
|
|
hidden_states (`torch.FloatTensor` of shape `[batch_size, seq_len, hidden_size]`): |
|
|
The hidden states of the last layer. |
|
|
cls_index (`torch.LongTensor` of shape `[batch_size]` or `[batch_size, ...]` where ... are optional leading dimensions of `hidden_states`, *optional*): |
|
|
Used if `summary_type == "cls_index"` and takes the last token of the sequence as classification token. |
|
|
|
|
|
Returns: |
|
|
`torch.FloatTensor`: The summary of the sequence hidden states. |
|
|
""" |
|
|
if self.summary_type == "last": |
|
|
output = hidden_states[:, -1] |
|
|
elif self.summary_type == "first": |
|
|
output = hidden_states[:, 0] |
|
|
elif self.summary_type == "mean": |
|
|
output = hidden_states.mean(dim=1) |
|
|
elif self.summary_type == "cls_index": |
|
|
if cls_index is None: |
|
|
cls_index = torch.full_like( |
|
|
hidden_states[..., :1, :], |
|
|
hidden_states.shape[-2] - 1, |
|
|
dtype=torch.long, |
|
|
) |
|
|
else: |
|
|
cls_index = cls_index.unsqueeze(-1).unsqueeze(-1) |
|
|
cls_index = cls_index.expand((-1,) * (cls_index.dim() - 1) + (hidden_states.size(-1),)) |
|
|
|
|
|
output = hidden_states.gather(-2, cls_index).squeeze(-2) |
|
|
elif self.summary_type == "attn": |
|
|
raise NotImplementedError |
|
|
|
|
|
output = self.first_dropout(output) |
|
|
output = self.summary(output) |
|
|
output = self.activation(output) |
|
|
output = self.last_dropout(output) |
|
|
|
|
|
return output |
|
|
|
|
|
|
|
|
|
|
|
class ClvpDecoderMLP(nn.Module): |
|
|
def __init__(self, intermediate_size, config): |
|
|
super().__init__() |
|
|
embed_dim = config.hidden_size |
|
|
self.c_fc = Conv1D(intermediate_size, embed_dim) |
|
|
self.c_proj = Conv1D(embed_dim, intermediate_size) |
|
|
self.act = ACT2FN[config.activation_function] |
|
|
self.dropout = nn.Dropout(config.resid_pdrop) |
|
|
|
|
|
def forward(self, hidden_states: Optional[tuple[torch.FloatTensor]]) -> torch.FloatTensor: |
|
|
hidden_states = self.c_fc(hidden_states) |
|
|
hidden_states = self.act(hidden_states) |
|
|
hidden_states = self.c_proj(hidden_states) |
|
|
hidden_states = self.dropout(hidden_states) |
|
|
return hidden_states |
|
|
|
|
|
|
|
|
class ClvpDecoderLayer(nn.Module): |
|
|
def __init__(self, config, layer_idx=None): |
|
|
super().__init__() |
|
|
hidden_size = config.hidden_size |
|
|
inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size |
|
|
|
|
|
self.input_layernorm = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) |
|
|
self.attn = ClvpSelfAttention(config, layer_idx=layer_idx) |
|
|
self.post_attention_layernorm = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) |
|
|
|
|
|
self.mlp = ClvpDecoderMLP(inner_dim, config) |
|
|
|
|
|
@deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58") |
|
|
def forward( |
|
|
self, |
|
|
hidden_states: Optional[tuple[torch.FloatTensor]], |
|
|
past_key_values: Optional[Cache] = None, |
|
|
attention_mask: Optional[torch.LongTensor] = None, |
|
|
position_ids: Optional[torch.LongTensor] = None, |
|
|
head_mask: Optional[torch.FloatTensor] = None, |
|
|
use_cache: Optional[bool] = False, |
|
|
output_attentions: Optional[bool] = False, |
|
|
cache_position: Optional[torch.Tensor] = None, |
|
|
) -> Union[tuple[torch.Tensor], Optional[tuple[torch.Tensor, tuple[torch.FloatTensor, ...]]]]: |
|
|
residual = hidden_states |
|
|
hidden_states = self.input_layernorm(hidden_states) |
|
|
attn_outputs = self.attn( |
|
|
hidden_states, |
|
|
past_key_values=past_key_values, |
|
|
attention_mask=attention_mask, |
|
|
position_ids=position_ids, |
|
|
head_mask=head_mask, |
|
|
use_cache=use_cache, |
|
|
output_attentions=output_attentions, |
|
|
cache_position=cache_position, |
|
|
) |
|
|
attn_output = attn_outputs[0] |
|
|
|
|
|
hidden_states = attn_output + residual |
|
|
|
|
|
residual = hidden_states |
|
|
hidden_states = self.post_attention_layernorm(hidden_states) |
|
|
feed_forward_hidden_states = self.mlp(hidden_states) |
|
|
|
|
|
hidden_states = residual + feed_forward_hidden_states |
|
|
|
|
|
return (hidden_states,) + attn_outputs[1:] |
|
|
|
|
|
|
|
|
class ClvpConditioningEncoder(nn.Module): |
|
|
""" |
|
|
This class processes the log-mel spectrograms(extracted by the Feature Extractor) and text tokens(produced by the |
|
|
tokenizer) as inputs for the decoder model. |
|
|
|
|
|
First each log-mel spectrogram is processed into a single vector which captures valuable characteristics from each |
|
|
of them, then the text tokens are converted into token embeddings and position embeddings are added afterwards. |
|
|
Both of these vectors are concatenated and then passed to the decoder model. |
|
|
|
|
|
The text tokens helps to incorporate the "text information" and the log-mel spectrogram is used to specify the |
|
|
"voice characteristics" into the generated mel tokens. |
|
|
""" |
|
|
|
|
|
def __init__(self, config: ClvpConfig): |
|
|
super().__init__() |
|
|
|
|
|
self.text_config = config.text_config |
|
|
self.decoder_config = config.decoder_config |
|
|
|
|
|
self.text_token_embedding = nn.Embedding(self.text_config.vocab_size, self.decoder_config.hidden_size) |
|
|
self.text_position_embedding = nn.Embedding( |
|
|
self.decoder_config.max_text_tokens, self.decoder_config.hidden_size |
|
|
) |
|
|
|
|
|
self.mel_conv = nn.Conv1d(self.decoder_config.feature_size, self.decoder_config.hidden_size, kernel_size=1) |
|
|
|
|
|
|
|
|
num_groups = self.compute_groupnorm_groups(self.decoder_config.hidden_size) |
|
|
self.group_norms = nn.ModuleList( |
|
|
[ |
|
|
nn.GroupNorm(num_groups, self.decoder_config.hidden_size, eps=1e-5, affine=True) |
|
|
for _ in range(self.decoder_config.num_mel_attn_blocks) |
|
|
] |
|
|
) |
|
|
|
|
|
|
|
|
self.mel_attn_blocks = nn.ModuleList( |
|
|
[ClvpSelfAttention(self.decoder_config) for _ in range(self.decoder_config.num_mel_attn_blocks)] |
|
|
) |
|
|
|
|
|
self.gradient_checkpointing = False |
|
|
|
|
|
def compute_groupnorm_groups(self, channels: int, groups: int = 32): |
|
|
""" |
|
|
Calculates the value of `num_groups` for nn.GroupNorm. This logic is taken from the official tortoise |
|
|
repository. link : |
|
|
https://github.com/neonbjb/tortoise-tts/blob/4003544b6ff4b68c09856e04d3eff9da26d023c2/tortoise/models/arch_util.py#L26 |
|
|
""" |
|
|
if channels <= 16: |
|
|
groups = 8 |
|
|
elif channels <= 64: |
|
|
groups = 16 |
|
|
while channels % groups != 0: |
|
|
groups = int(groups / 2) |
|
|
|
|
|
if groups <= 2: |
|
|
raise ValueError( |
|
|
f"Number of groups for the GroupNorm must be greater than 2, but it is {groups}." |
|
|
f"Please consider using a different `hidden_size`" |
|
|
) |
|
|
|
|
|
return groups |
|
|
|
|
|
def forward( |
|
|
self, |
|
|
input_features: torch.FloatTensor, |
|
|
input_ids: Optional[torch.LongTensor] = None, |
|
|
inputs_embeds: Optional[torch.FloatTensor] = None, |
|
|
attention_mask: Optional[torch.LongTensor] = None, |
|
|
): |
|
|
|
|
|
if input_ids is not None and inputs_embeds is not None: |
|
|
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") |
|
|
elif input_ids is not None: |
|
|
batch_size, seq_length = input_ids.size() |
|
|
elif inputs_embeds is not None: |
|
|
batch_size, seq_length = inputs_embeds.size()[:-1] |
|
|
else: |
|
|
raise ValueError("You have to specify either input_ids or inputs_embeds") |
|
|
|
|
|
|
|
|
if attention_mask is None: |
|
|
attention_mask = torch.ones([batch_size, seq_length], dtype=torch.long, device=input_ids.device) |
|
|
|
|
|
|
|
|
|
|
|
input_ids, attention_mask = _pad_extra_bos_eos_tokens( |
|
|
input_ids, |
|
|
attention_mask, |
|
|
bos_token_id=self.text_config.bos_token_id, |
|
|
eos_token_id=self.text_config.eos_token_id, |
|
|
) |
|
|
|
|
|
inputs_embeds = self.text_token_embedding(input_ids) |
|
|
position_ids = attention_mask.cumsum(-1) - 1 |
|
|
position_embeds = self.text_position_embedding(position_ids) |
|
|
text_embeds = inputs_embeds + position_embeds |
|
|
|
|
|
if self.gradient_checkpointing and self.training: |
|
|
|
|
|
mel_spec = torch.utils.checkpoint.checkpoint(self.mel_conv, input_features) |
|
|
|
|
|
for i, mel_attn_block in enumerate(self.mel_attn_blocks): |
|
|
residual_mel_spec = mel_spec.transpose(1, 2) |
|
|
|
|
|
mel_spec = torch.utils.checkpoint.checkpoint(self.group_norms[i], mel_spec).transpose(1, 2) |
|
|
mel_spec = torch.utils.checkpoint.checkpoint(mel_attn_block, mel_spec)[0] + residual_mel_spec |
|
|
mel_spec = mel_spec.transpose(1, 2) |
|
|
|
|
|
else: |
|
|
|
|
|
mel_spec = self.mel_conv(input_features) |
|
|
|
|
|
for i, mel_attn_block in enumerate(self.mel_attn_blocks): |
|
|
residual_mel_spec = mel_spec.transpose(1, 2) |
|
|
|
|
|
mel_spec = self.group_norms[i](mel_spec).transpose(1, 2) |
|
|
mel_spec = mel_attn_block(mel_spec)[0] + residual_mel_spec |
|
|
mel_spec = mel_spec.transpose(1, 2) |
|
|
|
|
|
mel_spec = mel_spec[:, :, 0] |
|
|
mel_spec = mel_spec.unsqueeze(1) |
|
|
|
|
|
|
|
|
if text_embeds.shape[0] == 1 and mel_spec.shape[0] != 1: |
|
|
text_embeds = text_embeds.repeat(mel_spec.shape[0], 1, 1) |
|
|
elif text_embeds.shape[0] != 1 and mel_spec.shape[0] == 1: |
|
|
mel_spec = mel_spec.repeat(text_embeds.shape[0], 1, 1) |
|
|
|
|
|
elif text_embeds.shape[0] != mel_spec.shape[0]: |
|
|
raise ValueError( |
|
|
f"The number of texts and number of audios must be same. " |
|
|
f"Found {text_embeds.shape[0]} texts vs {mel_spec.shape[0]} audios" |
|
|
) |
|
|
|
|
|
return torch.concat([mel_spec, text_embeds], dim=1) |
|
|
|
|
|
|
|
|
@auto_docstring |
|
|
class ClvpPreTrainedModel(PreTrainedModel): |
|
|
config: ClvpConfig |
|
|
base_model_prefix = "clvp" |
|
|
supports_gradient_checkpointing = True |
|
|
_skip_keys_device_placement = "past_key_values" |
|
|
|
|
|
def _init_weights(self, module: nn.Module): |
|
|
"""Initialize the weights""" |
|
|
factor = self.config.initializer_factor |
|
|
if isinstance(module, nn.Embedding): |
|
|
module.weight.data.normal_(mean=0.0, std=factor * 0.02) |
|
|
elif isinstance(module, (nn.Linear, Conv1D, nn.Conv1d)): |
|
|
module.weight.data.normal_(mean=0.0, std=factor * 0.02) |
|
|
if module.bias is not None: |
|
|
module.bias.data.zero_() |
|
|
elif isinstance(module, ClvpRMSNorm): |
|
|
module.weight.data.fill_(1.0) |
|
|
elif isinstance(module, ClvpEncoderMLP): |
|
|
in_proj_std = (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor |
|
|
fc_std = (2 * module.config.hidden_size) ** -0.5 * factor |
|
|
nn.init.normal_(module.fc1.proj.weight if getattr(module.fc1, "proj") else module.fc1.weight, std=fc_std) |
|
|
nn.init.normal_(module.fc2.weight, std=in_proj_std) |
|
|
elif isinstance(module, ClvpEncoder): |
|
|
config = self.config.get_text_config() |
|
|
factor = config.initializer_factor |
|
|
module.projection.weight.data.normal_(mean=0.0, std=factor * (config.hidden_size**-0.5)) |
|
|
elif isinstance(module, ClvpConditioningEncoder): |
|
|
module.mel_conv.weight.data.normal_(mean=0.0, std=factor) |
|
|
module.mel_conv.bias.data.zero_() |
|
|
elif isinstance(module, ClvpForCausalLM): |
|
|
for name, p in module.named_parameters(): |
|
|
if name == "c_proj.weight": |
|
|
p.data.normal_( |
|
|
mean=0.0, std=(self.config.initializer_range / math.sqrt(2 * self.config.num_hidden_layers)) |
|
|
) |
|
|
elif isinstance(module, ClvpModelForConditionalGeneration): |
|
|
module.logit_scale.data.fill_(self.config.logit_scale_init_value) |
|
|
|
|
|
if isinstance(module, (nn.LayerNorm, nn.GroupNorm)): |
|
|
module.bias.data.zero_() |
|
|
module.weight.data.fill_(1.0) |
|
|
|
|
|
|
|
|
class ClvpEncoder(ClvpPreTrainedModel): |
|
|
""" |
|
|
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a |
|
|
[`ClvpEncoderLayer`]. |
|
|
|
|
|
Args: |
|
|
config: ClvpConfig |
|
|
""" |
|
|
|
|
|
def __init__(self, config: ClvpConfig): |
|
|
super().__init__(config) |
|
|
|
|
|
self.config = config |
|
|
self.token_embedding = nn.Embedding(config.vocab_size, config.hidden_size) |
|
|
self.rotary_pos_emb = ClvpRotaryPositionalEmbedding(config) if config.use_rotary_embedding else None |
|
|
self.layers = nn.ModuleList([ClvpEncoderLayer(config) for _ in range(config.num_hidden_layers)]) |
|
|
|
|
|
self.sequence_summary = ClvpSequenceSummary(config) |
|
|
self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) |
|
|
|
|
|
self.projection = nn.Linear(config.hidden_size, config.projection_dim, bias=False) |
|
|
|
|
|
self.gradient_checkpointing = False |
|
|
|
|
|
self.post_init() |
|
|
|
|
|
def get_input_embeddings(self): |
|
|
return self.token_embedding |
|
|
|
|
|
def set_input_embeddings(self, value): |
|
|
self.token_embedding = value |
|
|
|
|
|
def forward( |
|
|
self, |
|
|
input_ids: Optional[torch.LongTensor] = None, |
|
|
inputs_embeds: Optional[torch.LongTensor] = None, |
|
|
attention_mask: Optional[torch.LongTensor] = None, |
|
|
position_ids: Optional[torch.LongTensor] = None, |
|
|
output_attentions: Optional[bool] = None, |
|
|
output_hidden_states: Optional[bool] = None, |
|
|
return_dict: Optional[bool] = None, |
|
|
) -> Union[tuple, BaseModelOutput]: |
|
|
r""" |
|
|
Args: |
|
|
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`, *optional*): |
|
|
Indices of input sequence tokens in the vocabulary. |
|
|
|
|
|
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and |
|
|
[`PreTrainedTokenizer.__call__`] for details. |
|
|
|
|
|
[What are input IDs?](../glossary#input-ids) |
|
|
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): |
|
|
input embeddings for the model. This bypasses the model's internal embedding lookup matrix. |
|
|
attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): |
|
|
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: |
|
|
|
|
|
- 1 for tokens that are **not masked**, |
|
|
- 0 for tokens that are **masked**. |
|
|
|
|
|
[What are attention masks?](../glossary#attention-mask) |
|
|
position_ids (`torch.LongTensor`, *optional*): |
|
|
Denotes the position ids of `input_ids`. |
|
|
output_attentions (`bool`, *optional*): |
|
|
Whether or not to return the attentions tensors of all attention layers. See `attentions` under |
|
|
returned tensors for more detail. |
|
|
output_hidden_states (`bool`, *optional*): |
|
|
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors |
|
|
for more detail. |
|
|
return_dict (`bool`, *optional*): |
|
|
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. |
|
|
""" |
|
|
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
|
|
output_hidden_states = ( |
|
|
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
|
|
) |
|
|
return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
|
|
|
if input_ids is not None and inputs_embeds is not None: |
|
|
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") |
|
|
elif input_ids is not None: |
|
|
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) |
|
|
input_shape = input_ids.size() |
|
|
input_ids = input_ids.view(-1, input_shape[-1]) |
|
|
inputs_embeds = self.token_embedding(input_ids) |
|
|
elif inputs_embeds is not None: |
|
|
input_shape = inputs_embeds.size()[:-1] |
|
|
else: |
|
|
raise ValueError("You have to specify either input_ids or inputs_embeds") |
|
|
|
|
|
|
|
|
if attention_mask is not None: |
|
|
|
|
|
attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) |
|
|
|
|
|
if position_ids is None: |
|
|
device = input_ids.device if input_ids is not None else inputs_embeds.device |
|
|
position_ids = torch.arange(input_shape[1], dtype=torch.long, device=device) |
|
|
position_ids = position_ids.unsqueeze(0) |
|
|
|
|
|
encoder_states = () if output_hidden_states else None |
|
|
all_attentions = () if output_attentions else None |
|
|
|
|
|
rotary_pos_emb = self.rotary_pos_emb(inputs_embeds) if self.rotary_pos_emb is not None else None |
|
|
|
|
|
hidden_states = inputs_embeds |
|
|
for idx, encoder_layer in enumerate(self.layers): |
|
|
if output_hidden_states: |
|
|
encoder_states = encoder_states + (hidden_states,) |
|
|
if self.gradient_checkpointing and self.training: |
|
|
layer_outputs = torch.utils.checkpoint.checkpoint( |
|
|
encoder_layer.__call__, |
|
|
hidden_states, |
|
|
rotary_pos_emb, |
|
|
attention_mask, |
|
|
position_ids, |
|
|
) |
|
|
else: |
|
|
layer_outputs = encoder_layer( |
|
|
hidden_states, |
|
|
rotary_pos_emb, |
|
|
attention_mask, |
|
|
position_ids, |
|
|
output_attentions=output_attentions, |
|
|
) |
|
|
|
|
|
hidden_states = layer_outputs[0] |
|
|
|
|
|
if output_attentions: |
|
|
all_attentions = all_attentions + (layer_outputs[1],) |
|
|
|
|
|
if output_hidden_states: |
|
|
encoder_states = encoder_states + (hidden_states,) |
|
|
|
|
|
last_hidden_state = hidden_states |
|
|
last_hidden_state = self.final_layer_norm(last_hidden_state) |
|
|
|
|
|
|
|
|
pooled_output = self.sequence_summary(last_hidden_state) |
|
|
|
|
|
|
|
|
embeds = self.projection(pooled_output) |
|
|
|
|
|
if not return_dict: |
|
|
return tuple( |
|
|
v for v in [embeds, last_hidden_state, pooled_output, encoder_states, all_attentions] if v is not None |
|
|
) |
|
|
|
|
|
return ClvpEncoderOutput( |
|
|
embeds=embeds, |
|
|
last_hidden_state=last_hidden_state, |
|
|
pooler_output=pooled_output, |
|
|
hidden_states=encoder_states, |
|
|
attentions=all_attentions, |
|
|
) |
|
|
|
|
|
|
|
|
class ClvpDecoder(ClvpPreTrainedModel): |
|
|
""" |
|
|
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`ClvpDecoderLayer`] |
|
|
""" |
|
|
|
|
|
def __init__(self, config): |
|
|
super().__init__(config) |
|
|
|
|
|
self.config = config |
|
|
|
|
|
self.input_embeds_layer = nn.Embedding(self.config.vocab_size, self.config.hidden_size) |
|
|
self.position_embeds_layer = nn.Embedding(self.config.max_position_embeddings, self.config.hidden_size) |
|
|
|
|
|
self.drop = nn.Dropout(self.config.embd_pdrop) |
|
|
self.layers = nn.ModuleList( |
|
|
[ClvpDecoderLayer(self.config, layer_idx=i) for i in range(self.config.num_hidden_layers)] |
|
|
) |
|
|
self.layer_norm = nn.LayerNorm(self.config.hidden_size, eps=self.config.layer_norm_epsilon) |
|
|
|
|
|
self.gradient_checkpointing = False |
|
|
|
|
|
|
|
|
self.post_init() |
|
|
|
|
|
def get_input_embeddings(self): |
|
|
return self.input_embeds_layer |
|
|
|
|
|
def set_input_embeddings(self, new_embeddings): |
|
|
self.input_embeds_layer = new_embeddings |
|
|
|
|
|
def _prune_heads(self, heads_to_prune): |
|
|
""" |
|
|
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} |
|
|
""" |
|
|
for layer, heads in heads_to_prune.items(): |
|
|
self.layers[layer].attn.prune_heads(heads) |
|
|
|
|
|
@auto_docstring |
|
|
def forward( |
|
|
self, |
|
|
input_ids: Optional[torch.LongTensor] = None, |
|
|
attention_mask: Optional[torch.FloatTensor] = None, |
|
|
token_type_ids: Optional[torch.LongTensor] = None, |
|
|
position_ids: Optional[torch.LongTensor] = None, |
|
|
head_mask: Optional[torch.FloatTensor] = None, |
|
|
past_key_values: Optional[Cache] = None, |
|
|
inputs_embeds: Optional[torch.FloatTensor] = None, |
|
|
use_cache: Optional[bool] = None, |
|
|
output_attentions: Optional[bool] = None, |
|
|
output_hidden_states: Optional[bool] = None, |
|
|
return_dict: Optional[bool] = None, |
|
|
cache_position: Optional[torch.Tensor] = None, |
|
|
) -> Union[tuple, BaseModelOutputWithPastAndCrossAttentions]: |
|
|
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
|
|
output_hidden_states = ( |
|
|
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
|
|
) |
|
|
use_cache = use_cache if use_cache is not None else self.config.use_cache |
|
|
return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
|
|
|
if input_ids is not None and inputs_embeds is not None: |
|
|
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") |
|
|
elif input_ids is not None: |
|
|
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) |
|
|
input_shape = input_ids.size() |
|
|
input_ids = input_ids.view(-1, input_shape[-1]) |
|
|
input_ids.shape[0] |
|
|
elif inputs_embeds is not None: |
|
|
input_shape = inputs_embeds.size()[:-1] |
|
|
inputs_embeds.shape[0] |
|
|
else: |
|
|
raise ValueError("You have to specify either input_ids or inputs_embeds") |
|
|
|
|
|
device = input_ids.device if input_ids is not None else inputs_embeds.device |
|
|
|
|
|
if token_type_ids is not None: |
|
|
token_type_ids = token_type_ids.view(-1, input_shape[-1]) |
|
|
|
|
|
if self.gradient_checkpointing and self.training: |
|
|
if use_cache: |
|
|
logger.warning_once( |
|
|
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." |
|
|
) |
|
|
use_cache = False |
|
|
|
|
|
if use_cache and past_key_values is None: |
|
|
past_key_values = DynamicCache(config=self.config) |
|
|
if use_cache and isinstance(past_key_values, tuple): |
|
|
logger.warning_once( |
|
|
"Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. " |
|
|
"You should pass an instance of `DynamicCache` instead, e.g. " |
|
|
"`past_key_values=DynamicCache.from_legacy_cache(past_key_values)`." |
|
|
) |
|
|
past_key_values = DynamicCache.from_legacy_cache(past_key_values) |
|
|
|
|
|
past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0 |
|
|
if position_ids is None: |
|
|
position_ids = torch.arange( |
|
|
past_key_values_length, input_shape[-1] + past_key_values_length, dtype=torch.long, device=device |
|
|
) |
|
|
position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1]) |
|
|
|
|
|
if inputs_embeds is None: |
|
|
inputs_embeds = self.input_embeds_layer(input_ids) |
|
|
position_embeds = self.position_embeds_layer(position_ids) |
|
|
inputs_embeds = inputs_embeds + position_embeds |
|
|
|
|
|
attention_mask = _prepare_4d_causal_attention_mask( |
|
|
attention_mask, input_shape, inputs_embeds, past_key_values_length |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) |
|
|
|
|
|
hidden_states = inputs_embeds |
|
|
|
|
|
if token_type_ids is not None: |
|
|
token_type_embeds = self.input_embeds_layer(token_type_ids) |
|
|
hidden_states = hidden_states + token_type_embeds |
|
|
|
|
|
hidden_states = self.drop(hidden_states) |
|
|
|
|
|
output_shape = (-1,) + input_shape[1:] + (hidden_states.size(-1),) |
|
|
|
|
|
all_self_attentions = () if output_attentions else None |
|
|
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None |
|
|
all_hidden_states = () if output_hidden_states else None |
|
|
for i, block in enumerate(self.layers): |
|
|
if output_hidden_states: |
|
|
all_hidden_states = all_hidden_states + (hidden_states,) |
|
|
|
|
|
if self.gradient_checkpointing and self.training: |
|
|
outputs = torch.utils.checkpoint.checkpoint( |
|
|
block.__call__, |
|
|
hidden_states, |
|
|
None, |
|
|
attention_mask, |
|
|
position_ids, |
|
|
head_mask[i], |
|
|
cache_position, |
|
|
) |
|
|
else: |
|
|
outputs = block( |
|
|
hidden_states, |
|
|
past_key_values=past_key_values, |
|
|
attention_mask=attention_mask, |
|
|
position_ids=position_ids, |
|
|
head_mask=head_mask[i], |
|
|
use_cache=use_cache, |
|
|
output_attentions=output_attentions, |
|
|
cache_position=cache_position, |
|
|
) |
|
|
|
|
|
hidden_states = outputs[0] |
|
|
|
|
|
if output_attentions: |
|
|
all_self_attentions = all_self_attentions + (outputs[1],) |
|
|
if self.config.add_cross_attention: |
|
|
all_cross_attentions = all_cross_attentions + (outputs[2],) |
|
|
|
|
|
hidden_states = self.layer_norm(hidden_states) |
|
|
|
|
|
hidden_states = hidden_states.view(output_shape) |
|
|
|
|
|
|
|
|
if output_hidden_states: |
|
|
all_hidden_states = all_hidden_states + (hidden_states,) |
|
|
|
|
|
if not return_dict: |
|
|
return tuple( |
|
|
v |
|
|
for v in [hidden_states, past_key_values, all_hidden_states, all_self_attentions, all_cross_attentions] |
|
|
if v is not None |
|
|
) |
|
|
|
|
|
return BaseModelOutputWithPastAndCrossAttentions( |
|
|
last_hidden_state=hidden_states, |
|
|
past_key_values=past_key_values, |
|
|
hidden_states=all_hidden_states, |
|
|
attentions=all_self_attentions, |
|
|
cross_attentions=all_cross_attentions, |
|
|
) |
|
|
|
|
|
|
|
|
@auto_docstring |
|
|
class ClvpModel(ClvpPreTrainedModel): |
|
|
def __init__(self, config: ClvpDecoderConfig): |
|
|
super().__init__(config) |
|
|
self.config = config |
|
|
self.decoder = ClvpDecoder(self.config) |
|
|
|
|
|
|
|
|
self.post_init() |
|
|
|
|
|
def get_input_embeddings(self): |
|
|
return self.decoder.input_embeds_layer |
|
|
|
|
|
def set_input_embeddings(self, value): |
|
|
self.decoder.input_embeds_layer = value |
|
|
|
|
|
@auto_docstring |
|
|
def forward( |
|
|
self, |
|
|
input_ids: Optional[torch.LongTensor] = None, |
|
|
attention_mask: Optional[torch.FloatTensor] = None, |
|
|
token_type_ids: Optional[torch.LongTensor] = None, |
|
|
position_ids: Optional[torch.LongTensor] = None, |
|
|
head_mask: Optional[torch.FloatTensor] = None, |
|
|
past_key_values: Optional[Cache] = None, |
|
|
inputs_embeds: Optional[torch.FloatTensor] = None, |
|
|
use_cache: Optional[bool] = None, |
|
|
output_attentions: Optional[bool] = None, |
|
|
output_hidden_states: Optional[bool] = None, |
|
|
return_dict: Optional[bool] = None, |
|
|
cache_position: Optional[torch.Tensor] = None, |
|
|
) -> Union[tuple, BaseModelOutputWithPastAndCrossAttentions]: |
|
|
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
|
|
output_hidden_states = ( |
|
|
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
|
|
) |
|
|
use_cache = use_cache if use_cache is not None else self.config.use_cache |
|
|
return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
|
|
|
|
|
|
decoder_outputs = self.decoder( |
|
|
input_ids=input_ids, |
|
|
attention_mask=attention_mask, |
|
|
token_type_ids=token_type_ids, |
|
|
position_ids=position_ids, |
|
|
head_mask=head_mask, |
|
|
past_key_values=past_key_values, |
|
|
inputs_embeds=inputs_embeds, |
|
|
use_cache=use_cache, |
|
|
output_attentions=output_attentions, |
|
|
output_hidden_states=output_hidden_states, |
|
|
return_dict=return_dict, |
|
|
cache_position=cache_position, |
|
|
) |
|
|
|
|
|
if not return_dict: |
|
|
return decoder_outputs |
|
|
|
|
|
return BaseModelOutputWithPastAndCrossAttentions( |
|
|
last_hidden_state=decoder_outputs.last_hidden_state, |
|
|
past_key_values=decoder_outputs.past_key_values, |
|
|
hidden_states=decoder_outputs.hidden_states, |
|
|
attentions=decoder_outputs.attentions, |
|
|
cross_attentions=decoder_outputs.cross_attentions, |
|
|
) |
|
|
|
|
|
|
|
|
@auto_docstring( |
|
|
custom_intro=""" |
|
|
The CLVP decoder model with a language modelling head on top. |
|
|
""" |
|
|
) |
|
|
class ClvpForCausalLM(ClvpPreTrainedModel, GenerationMixin): |
|
|
def __init__(self, config): |
|
|
super().__init__(config) |
|
|
|
|
|
self.config = config |
|
|
self.model = ClvpModel(self.config) |
|
|
|
|
|
self.final_norm = nn.LayerNorm(self.config.hidden_size) |
|
|
self.lm_head = nn.Linear(self.config.hidden_size, self.config.vocab_size, bias=True) |
|
|
|
|
|
|
|
|
self.post_init() |
|
|
|
|
|
def get_output_embeddings(self): |
|
|
return None |
|
|
|
|
|
def get_input_embeddings(self): |
|
|
return self.model.decoder.input_embeds_layer |
|
|
|
|
|
def set_input_embeddings(self, new_embeddings): |
|
|
self.model.decoder.input_embeds_layer = new_embeddings |
|
|
|
|
|
def _prepare_model_inputs( |
|
|
self, |
|
|
inputs: Optional[torch.Tensor] = None, |
|
|
bos_token_id: Optional[int] = None, |
|
|
model_kwargs: Optional[dict[str, torch.Tensor]] = None, |
|
|
) -> tuple[torch.Tensor, Optional[str], dict[str, torch.Tensor]]: |
|
|
""" |
|
|
This function extracts the model-specific `inputs` for generation. |
|
|
""" |
|
|
input_name = self.main_input_name |
|
|
|
|
|
model_kwargs = {k: v for k, v in model_kwargs.items() if v is not None} |
|
|
|
|
|
inputs_kwarg = model_kwargs.pop(input_name, None) |
|
|
if inputs_kwarg is not None and inputs is not None: |
|
|
raise ValueError( |
|
|
f"`inputs`: {inputs}` were passed alongside {input_name} which is not allowed." |
|
|
f"Make sure to either pass {inputs} or {input_name}=..." |
|
|
) |
|
|
elif inputs_kwarg is not None: |
|
|
inputs = inputs_kwarg |
|
|
|
|
|
if input_name == "input_ids" and "inputs_embeds" in model_kwargs: |
|
|
model_kwargs["input_ids"] = self._maybe_initialize_input_ids_for_generation( |
|
|
inputs, bos_token_id, model_kwargs=model_kwargs |
|
|
) |
|
|
inputs, input_name = model_kwargs["inputs_embeds"], "inputs_embeds" |
|
|
|
|
|
|
|
|
|
|
|
conditioning_embeds = model_kwargs.get("conditioning_embeds") |
|
|
|
|
|
if conditioning_embeds is not None: |
|
|
mel_start_token_embedding = self.model.decoder.input_embeds_layer( |
|
|
torch.full( |
|
|
(conditioning_embeds.shape[0], 1), |
|
|
fill_value=self.config.bos_token_id, |
|
|
device=conditioning_embeds.device, |
|
|
) |
|
|
) |
|
|
mel_start_token_embedding += self.model.decoder.position_embeds_layer( |
|
|
torch.full((conditioning_embeds.shape[0], 1), fill_value=0, device=conditioning_embeds.device) |
|
|
) |
|
|
conditioning_embeds = torch.concat([conditioning_embeds, mel_start_token_embedding], dim=1) |
|
|
|
|
|
|
|
|
if hasattr(model_kwargs, "attention_mask"): |
|
|
position_ids = model_kwargs["attention_mask"].long().cumsum(-1) - 1 |
|
|
else: |
|
|
position_ids = torch.arange( |
|
|
0, conditioning_embeds.shape[1], dtype=torch.long, device=conditioning_embeds.device |
|
|
) |
|
|
position_ids = position_ids.unsqueeze(0).repeat(conditioning_embeds.shape[0], 1) |
|
|
|
|
|
model_kwargs["inputs_embeds"] = conditioning_embeds - self.model.decoder.position_embeds_layer( |
|
|
position_ids |
|
|
) |
|
|
model_kwargs["input_ids"] = ( |
|
|
torch.ones((model_kwargs["inputs_embeds"].shape[0], 1), dtype=torch.long, device=self.device) |
|
|
* self.config.bos_token_id |
|
|
) |
|
|
|
|
|
return model_kwargs["inputs_embeds"], "inputs_embeds", model_kwargs |
|
|
|
|
|
inputs = self._maybe_initialize_input_ids_for_generation(inputs, bos_token_id, model_kwargs) |
|
|
return inputs, input_name, model_kwargs |
|
|
|
|
|
def prepare_inputs_for_generation( |
|
|
self, |
|
|
input_ids, |
|
|
past_key_values=None, |
|
|
inputs_embeds=None, |
|
|
conditioning_embeds=None, |
|
|
cache_position=None, |
|
|
**kwargs, |
|
|
): |
|
|
|
|
|
|
|
|
input_ids_length = input_ids.shape[-1] |
|
|
|
|
|
model_inputs = super().prepare_inputs_for_generation( |
|
|
input_ids, |
|
|
past_key_values=past_key_values, |
|
|
inputs_embeds=inputs_embeds, |
|
|
cache_position=cache_position, |
|
|
**kwargs, |
|
|
) |
|
|
if conditioning_embeds is not None and cache_position[0] != 0: |
|
|
model_inputs["position_ids"] = torch.tensor([input_ids_length], dtype=torch.long, device=input_ids.device) |
|
|
|
|
|
return model_inputs |
|
|
|
|
|
@auto_docstring |
|
|
def forward( |
|
|
self, |
|
|
input_ids: Optional[torch.LongTensor] = None, |
|
|
past_key_values: Optional[Cache] = None, |
|
|
attention_mask: Optional[torch.FloatTensor] = None, |
|
|
token_type_ids: Optional[torch.LongTensor] = None, |
|
|
position_ids: Optional[torch.LongTensor] = None, |
|
|
head_mask: Optional[torch.FloatTensor] = None, |
|
|
inputs_embeds: Optional[torch.FloatTensor] = None, |
|
|
labels: Optional[torch.LongTensor] = None, |
|
|
use_cache: Optional[bool] = None, |
|
|
output_attentions: Optional[bool] = None, |
|
|
output_hidden_states: Optional[bool] = None, |
|
|
return_dict: Optional[bool] = None, |
|
|
cache_position: Optional[torch.Tensor] = None, |
|
|
) -> Union[tuple, CausalLMOutputWithCrossAttentions]: |
|
|
r""" |
|
|
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): |
|
|
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set |
|
|
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` |
|
|
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` |
|
|
""" |
|
|
|
|
|
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
|
|
output_hidden_states = ( |
|
|
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
|
|
) |
|
|
use_cache = use_cache if use_cache is not None else self.config.use_cache |
|
|
return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
|
|
|
outputs = self.model( |
|
|
input_ids=input_ids, |
|
|
past_key_values=past_key_values, |
|
|
attention_mask=attention_mask, |
|
|
token_type_ids=token_type_ids, |
|
|
position_ids=position_ids, |
|
|
head_mask=head_mask, |
|
|
inputs_embeds=inputs_embeds, |
|
|
use_cache=use_cache, |
|
|
output_attentions=output_attentions, |
|
|
output_hidden_states=output_hidden_states, |
|
|
return_dict=return_dict, |
|
|
cache_position=cache_position, |
|
|
) |
|
|
|
|
|
hidden_states = outputs[0] |
|
|
|
|
|
lm_logits = self.final_norm(hidden_states) |
|
|
lm_logits = self.lm_head(lm_logits) |
|
|
|
|
|
loss = None |
|
|
if labels is not None: |
|
|
labels = labels.to(lm_logits.device) |
|
|
|
|
|
shift_logits = lm_logits[..., :-1, :].contiguous() |
|
|
shift_labels = labels[..., 1:].contiguous() |
|
|
|
|
|
loss_fct = CrossEntropyLoss() |
|
|
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) |
|
|
|
|
|
if not return_dict: |
|
|
output = (lm_logits,) + outputs[1:] |
|
|
return ((loss,) + output) if loss is not None else output |
|
|
|
|
|
return CausalLMOutputWithCrossAttentions( |
|
|
loss=loss, |
|
|
logits=lm_logits, |
|
|
past_key_values=outputs.past_key_values, |
|
|
hidden_states=outputs.hidden_states, |
|
|
attentions=outputs.attentions, |
|
|
cross_attentions=outputs.cross_attentions, |
|
|
) |
|
|
|
|
|
|
|
|
@auto_docstring( |
|
|
custom_intro=""" |
|
|
The composite CLVP model with a text encoder, speech encoder and speech decoder model. |
|
|
""" |
|
|
) |
|
|
class ClvpModelForConditionalGeneration(ClvpPreTrainedModel, GenerationMixin): |
|
|
config: ClvpConfig |
|
|
|
|
|
def __init__(self, config: ClvpConfig): |
|
|
super().__init__(config) |
|
|
|
|
|
if not isinstance(config.text_config, ClvpEncoderConfig): |
|
|
raise TypeError( |
|
|
"config.text_config is expected to be of type `ClvpEncoderConfig` but is of type" |
|
|
f" {type(config.text_config)}." |
|
|
) |
|
|
|
|
|
if not isinstance(config.speech_config, ClvpEncoderConfig): |
|
|
raise TypeError( |
|
|
"config.speech_config is expected to be of type `ClvpEncoderConfig` but is of type" |
|
|
f" {type(config.speech_config)}." |
|
|
) |
|
|
|
|
|
if not isinstance(config.decoder_config, ClvpDecoderConfig): |
|
|
raise TypeError( |
|
|
"config.decoder_config is expected to be of type `ClvpDecoderConfig` but is of type" |
|
|
f" {type(config.decoder_config)}." |
|
|
) |
|
|
|
|
|
self.conditioning_encoder = ClvpConditioningEncoder(config) |
|
|
|
|
|
self.speech_decoder_model = ClvpForCausalLM(config.decoder_config) |
|
|
|
|
|
self.text_encoder_model = ClvpEncoder(config.text_config) |
|
|
self.speech_encoder_model = ClvpEncoder(config.speech_config) |
|
|
|
|
|
self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value)) |
|
|
|
|
|
|
|
|
self.post_init() |
|
|
|
|
|
|
|
|
|
|
|
def fix_speech_decoder_output(self, speech_ids: torch.LongTensor) -> torch.LongTensor: |
|
|
""" |
|
|
This method modifies the output of the decoder model, such as replacing the `eos_token_id` and changing the |
|
|
last few tokens of each sequence. |
|
|
|
|
|
Args: |
|
|
speech_ids (`torch.LongTensor`): |
|
|
This refers to the output of the decoder model. |
|
|
""" |
|
|
decoder_fixing_codes = self.config.decoder_config.decoder_fixing_codes |
|
|
speech_ids = speech_ids[:, 1:] |
|
|
|
|
|
stop_token_indices = torch.where(speech_ids == self.speech_decoder_model.config.eos_token_id, 1, 0) |
|
|
speech_ids = torch.masked_fill(speech_ids, mask=stop_token_indices.bool(), value=decoder_fixing_codes[0]) |
|
|
|
|
|
for i, each_seq_stop_token_index in enumerate(stop_token_indices): |
|
|
|
|
|
|
|
|
if each_seq_stop_token_index.sum() == 0: |
|
|
continue |
|
|
|
|
|
stm = each_seq_stop_token_index.argmax() |
|
|
speech_ids[i, stm:] = decoder_fixing_codes[0] |
|
|
if stm - 3 < speech_ids.shape[1]: |
|
|
speech_ids[i, -3:] = torch.tensor( |
|
|
[decoder_fixing_codes[1:]], device=speech_ids.device, dtype=torch.long |
|
|
) |
|
|
|
|
|
return speech_ids |
|
|
|
|
|
def get_text_features( |
|
|
self, |
|
|
input_ids: Optional[torch.LongTensor] = None, |
|
|
text_encoder_inputs_embeds: Optional[torch.FloatTensor] = None, |
|
|
attention_mask: Optional[torch.LongTensor] = None, |
|
|
) -> torch.FloatTensor: |
|
|
r""" |
|
|
This method can be used to extract text_embeds from a text. The text embeddings obtained by applying the |
|
|
projection layer to the pooled output of the CLVP text encoder model. |
|
|
|
|
|
Args: |
|
|
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): |
|
|
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you |
|
|
provide it. |
|
|
|
|
|
[What are input IDs?](../glossary#input-ids) |
|
|
text_encoder_inputs_embeds (`torch.FloatTensor`, *optional*): |
|
|
inputs_embeds for the text encoder model passed in place of `input_ids`. |
|
|
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): |
|
|
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: |
|
|
|
|
|
- 1 for tokens that are **not masked**, |
|
|
- 0 for tokens that are **masked**. |
|
|
|
|
|
[What are attention masks?](../glossary#attention-mask) |
|
|
|
|
|
Returns: |
|
|
`torch.FloatTensor` of shape `(batch_size, output_dim)`: |
|
|
The text embeddings obtained by applying the projection layer to the pooled output of the CLVP Text |
|
|
Model. |
|
|
|
|
|
Examples: |
|
|
|
|
|
```python |
|
|
>>> from transformers import ClvpProcessor, ClvpModelForConditionalGeneration |
|
|
|
|
|
>>> # Define the Text |
|
|
>>> text = "This is an example text." |
|
|
|
|
|
>>> # Define processor and model |
|
|
>>> processor = ClvpProcessor.from_pretrained("susnato/clvp_dev") |
|
|
>>> model = ClvpModelForConditionalGeneration.from_pretrained("susnato/clvp_dev") |
|
|
|
|
|
>>> # Generate processor output and text embeds |
|
|
>>> processor_output = processor(text=text, return_tensors="pt") |
|
|
>>> text_embeds = model.get_text_features(input_ids=processor_output["input_ids"]) |
|
|
``` |
|
|
""" |
|
|
|
|
|
outputs = self.text_encoder_model( |
|
|
input_ids=input_ids, |
|
|
inputs_embeds=text_encoder_inputs_embeds, |
|
|
attention_mask=attention_mask, |
|
|
) |
|
|
|
|
|
return outputs[0] |
|
|
|
|
|
def get_speech_features( |
|
|
self, |
|
|
speech_ids: Optional[torch.LongTensor] = None, |
|
|
input_ids: Optional[torch.LongTensor] = None, |
|
|
input_features: Optional[torch.FloatTensor] = None, |
|
|
conditioning_encoder_inputs_embeds: Optional[torch.FloatTensor] = None, |
|
|
attention_mask: Optional[torch.Tensor] = None, |
|
|
generation_config: Optional[GenerationConfig] = None, |
|
|
**kwargs, |
|
|
) -> torch.FloatTensor: |
|
|
r""" |
|
|
This method can be used to extract speech_embeds. The speech embeddings are obtained by applying the speech |
|
|
model on speech_ids. If speech_ids is not present but both input_ids and input_features are given then the |
|
|
decoder model will be used to first generate the speech_ids and then applying the speech model. |
|
|
|
|
|
Args: |
|
|
speech_ids (`torch.LongTensor` of shape `(batch_size, num_speech_ids)`, *optional*): |
|
|
Speech Tokens. Padding will be ignored by default should you provide it. If speech_ids are provided |
|
|
then input_ids and input_features will be automatically ignored. |
|
|
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): |
|
|
Input text Tokens. Processed from the [`ClvpTokenizer`]. If speech_ids is not provided, then input_ids |
|
|
and input_features will be used. |
|
|
conditioning_encoder_inputs_embeds (`torch.FloatTensor`, *optional*): |
|
|
inputs_embeds for `ClvpConditioningEncoder`. Can be used in place of `input_ids`. |
|
|
attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): |
|
|
Mask to avoid performing attention on padding speech token indices. Mask values selected in `[0, 1]`: |
|
|
|
|
|
- 1 for tokens that are **not masked**, |
|
|
- 0 for tokens that are **masked**. |
|
|
|
|
|
[What are attention masks?](../glossary#attention-mask) |
|
|
generation_config (`GenerationConfig`, *optional*): |
|
|
generation config to control the generation of speech_ids if they are not provided. |
|
|
|
|
|
Returns: |
|
|
`torch.FloatTensor` of shape `(batch_size, output_dim)`: |
|
|
The speech embeddings obtained by applying the projection layer to the pooled output of the CLVP Speech |
|
|
Model. |
|
|
|
|
|
Examples: |
|
|
|
|
|
```python |
|
|
>>> import datasets |
|
|
>>> from transformers import ClvpProcessor, ClvpModelForConditionalGeneration |
|
|
|
|
|
>>> # Define the Text and Load the Audio (We are taking an audio example from HuggingFace Hub using `datasets` library) |
|
|
>>> text = "This is an example text." |
|
|
>>> ds = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") |
|
|
>>> ds = ds.cast_column("audio", datasets.Audio(sampling_rate=22050)) |
|
|
>>> audio = ds.sort("id")["audio"][0] |
|
|
>>> audio_sample, sr = audio["array"], audio["sampling_rate"] |
|
|
|
|
|
>>> # Define processor and model |
|
|
>>> processor = ClvpProcessor.from_pretrained("susnato/clvp_dev") |
|
|
>>> model = ClvpModelForConditionalGeneration.from_pretrained("susnato/clvp_dev") |
|
|
|
|
|
>>> # Generate processor output and model output |
|
|
>>> processor_output = processor(raw_speech=audio_sample, sampling_rate=sr, text=text, return_tensors="pt") |
|
|
>>> speech_embeds = model.get_speech_features( |
|
|
... input_ids=processor_output["input_ids"], input_features=processor_output["input_features"] |
|
|
... ) |
|
|
``` |
|
|
""" |
|
|
|
|
|
if speech_ids is None: |
|
|
if (input_ids is None and conditioning_encoder_inputs_embeds is None) or input_features is None: |
|
|
raise ValueError( |
|
|
"Either speech_ids or input_ids/conditioning_encoder_inputs_embeds and input_features must be provided." |
|
|
) |
|
|
|
|
|
if generation_config is None: |
|
|
generation_config = self.generation_config |
|
|
generation_config.update(**kwargs) |
|
|
|
|
|
conditioning_embeds = self.conditioning_encoder( |
|
|
input_features=input_features, |
|
|
input_ids=input_ids, |
|
|
inputs_embeds=conditioning_encoder_inputs_embeds, |
|
|
attention_mask=attention_mask, |
|
|
) |
|
|
|
|
|
speech_ids = self.speech_decoder_model.generate( |
|
|
conditioning_embeds=conditioning_embeds, |
|
|
generation_config=generation_config, |
|
|
) |
|
|
|
|
|
speech_ids = self.fix_speech_decoder_output(speech_ids[0]) |
|
|
|
|
|
outputs = self.speech_encoder_model( |
|
|
input_ids=speech_ids, |
|
|
attention_mask=attention_mask, |
|
|
) |
|
|
|
|
|
return outputs[0] |
|
|
|
|
|
@auto_docstring |
|
|
def forward( |
|
|
self, |
|
|
input_ids: Optional[torch.LongTensor] = None, |
|
|
input_features: Optional[torch.FloatTensor] = None, |
|
|
conditioning_encoder_inputs_embeds: Optional[torch.FloatTensor] = None, |
|
|
text_encoder_inputs_embeds: Optional[torch.FloatTensor] = None, |
|
|
attention_mask: Optional[torch.LongTensor] = None, |
|
|
return_loss: Optional[bool] = None, |
|
|
output_hidden_states: Optional[bool] = None, |
|
|
output_attentions: Optional[bool] = False, |
|
|
return_dict: Optional[bool] = None, |
|
|
cache_position: Optional[torch.Tensor] = None, |
|
|
) -> Union[tuple, ClvpOutput]: |
|
|
r""" |
|
|
conditioning_encoder_inputs_embeds (`torch.FloatTensor`, *optional*): |
|
|
inputs_embeds for `ClvpConditioningEncoder`. Can be used in place of `input_ids`. |
|
|
text_encoder_inputs_embeds (`torch.FloatTensor`, *optional*): |
|
|
inputs_embeds for the text encoder model passed in place of `input_ids`. |
|
|
return_loss (`bool`, *optional*): |
|
|
Whether or not to return the contrastive loss. |
|
|
|
|
|
Examples: |
|
|
|
|
|
```python |
|
|
>>> import datasets |
|
|
>>> from transformers import ClvpProcessor, ClvpModelForConditionalGeneration |
|
|
|
|
|
>>> # Define the Text and Load the Audio (We are taking an audio example from HuggingFace Hub using `datasets` library) |
|
|
>>> text = "This is an example text." |
|
|
|
|
|
>>> ds = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") |
|
|
>>> ds = ds.cast_column("audio", datasets.Audio(sampling_rate=22050)) |
|
|
>>> audio = ds.sort("id")["audio"][0] |
|
|
>>> audio_sample, sr = audio["array"], audio["sampling_rate"] |
|
|
|
|
|
>>> # Define processor and model |
|
|
>>> processor = ClvpProcessor.from_pretrained("susnato/clvp_dev") |
|
|
>>> model = ClvpModelForConditionalGeneration.from_pretrained("susnato/clvp_dev") |
|
|
|
|
|
>>> # processor outputs and model outputs |
|
|
>>> processor_output = processor(raw_speech=audio_sample, sampling_rate=sr, text=text, return_tensors="pt") |
|
|
>>> outputs = model( |
|
|
... input_ids=processor_output["input_ids"], |
|
|
... input_features=processor_output["input_features"], |
|
|
... return_dict=True, |
|
|
... ) |
|
|
``` |
|
|
""" |
|
|
|
|
|
|
|
|
output_hidden_states = ( |
|
|
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
|
|
) |
|
|
return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
|
|
|
conditioning_embeds = self.conditioning_encoder( |
|
|
input_features=input_features, |
|
|
input_ids=input_ids, |
|
|
inputs_embeds=conditioning_encoder_inputs_embeds, |
|
|
attention_mask=attention_mask, |
|
|
) |
|
|
|
|
|
decoder_outputs = self.speech_decoder_model( |
|
|
inputs_embeds=conditioning_embeds, |
|
|
output_hidden_states=output_hidden_states, |
|
|
return_dict=return_dict, |
|
|
cache_position=cache_position, |
|
|
) |
|
|
|
|
|
speech_ids = decoder_outputs[0] |
|
|
|
|
|
|
|
|
|
|
|
if speech_ids.ndim == 3: |
|
|
speech_ids = speech_ids.argmax(2) |
|
|
speech_ids = self.fix_speech_decoder_output(speech_ids) |
|
|
|
|
|
speech_outputs = self.speech_encoder_model( |
|
|
input_ids=speech_ids, |
|
|
output_hidden_states=output_hidden_states, |
|
|
return_dict=return_dict, |
|
|
) |
|
|
|
|
|
text_outputs = self.text_encoder_model( |
|
|
input_ids=input_ids, |
|
|
inputs_embeds=text_encoder_inputs_embeds, |
|
|
attention_mask=attention_mask, |
|
|
output_hidden_states=output_hidden_states, |
|
|
return_dict=return_dict, |
|
|
) |
|
|
|
|
|
speech_embeds = speech_outputs[0] |
|
|
text_embeds = text_outputs[0] |
|
|
|
|
|
|
|
|
speech_embeds = speech_embeds / speech_embeds.norm(p=2, dim=-1, keepdim=True) |
|
|
text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True) |
|
|
|
|
|
|
|
|
logit_scale = self.logit_scale.exp() |
|
|
logits_per_text = torch.matmul(text_embeds, speech_embeds.t()) * logit_scale |
|
|
logits_per_speech = logits_per_text.t() |
|
|
|
|
|
loss = None |
|
|
if return_loss: |
|
|
loss = clvp_loss(logits_per_text) |
|
|
|
|
|
if not return_dict: |
|
|
output = ( |
|
|
logits_per_speech, |
|
|
logits_per_text, |
|
|
text_embeds, |
|
|
speech_embeds, |
|
|
text_outputs[2], |
|
|
speech_outputs[2], |
|
|
) |
|
|
if output_hidden_states: |
|
|
output += ( |
|
|
decoder_outputs[-1], |
|
|
text_outputs[-1], |
|
|
speech_outputs[-1], |
|
|
) |
|
|
|
|
|
return ((loss,) + output) if loss is not None else output |
|
|
|
|
|
return ClvpOutput( |
|
|
loss=loss, |
|
|
logits_per_speech=logits_per_speech, |
|
|
logits_per_text=logits_per_text, |
|
|
text_embeds=text_embeds, |
|
|
speech_embeds=speech_embeds, |
|
|
text_model_output=text_outputs[2], |
|
|
speech_model_output=speech_outputs[2], |
|
|
decoder_hidden_states=decoder_outputs.hidden_states, |
|
|
text_encoder_hidden_states=text_outputs.hidden_states, |
|
|
speech_encoder_hidden_states=speech_outputs.hidden_states, |
|
|
) |
|
|
|
|
|
@torch.no_grad() |
|
|
def generate( |
|
|
self, |
|
|
input_ids: Optional[torch.LongTensor] = None, |
|
|
input_features: Optional[torch.FloatTensor] = None, |
|
|
attention_mask: Optional[torch.LongTensor] = None, |
|
|
generation_config: Optional[GenerationConfig] = None, |
|
|
pad_to_max_mel_tokens: Optional[int] = None, |
|
|
output_hidden_states: Optional[bool] = None, |
|
|
**kwargs, |
|
|
): |
|
|
""" |
|
|
Generate method for `ClvpModelForConditionalGeneration`, this method calls the `generate` method of |
|
|
`ClvpForCausalLM` and then uses those generated `speech_ids` to process `text_embeds` and `speech_embeds` using |
|
|
`ClvpEncoder`. |
|
|
|
|
|
Args: |
|
|
input_ids (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): |
|
|
Input text Tokens. Processed from the [`ClvpTokenizer`]. |
|
|
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): |
|
|
Mask to avoid performing attention on padding text token indices. Mask values selected in `[0, 1]`: |
|
|
|
|
|
- 1 for tokens that are **not masked**, |
|
|
- 0 for tokens that are **masked**. |
|
|
|
|
|
[What are attention masks?](../glossary#attention-mask) |
|
|
generation_config (`~generation.GenerationConfig`, *optional*): |
|
|
The generation configuration to be used as base parametrization for the generation call. `**kwargs` |
|
|
passed to generate matching the attributes of `generation_config` will override them. If |
|
|
`generation_config` is not provided, the default will be used, which had the following loading |
|
|
priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model |
|
|
configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s |
|
|
default values, whose documentation should be checked to parameterize generation. |
|
|
pad_to_max_mel_tokens (`int`, *optional*): |
|
|
Pads generated speech_ids to the specified value. This is to implement the same logic from the official |
|
|
repo, link: https://github.com/neonbjb/tortoise-tts/blob/80f89987a5abda5e2b082618cd74f9c7411141dc/tortoise/api.py#L430 |
|
|
and to make sure the logits are same. |
|
|
This does not affect generation quality so please don't consider using it since it is less efficient. |
|
|
output_hidden_states (`bool`, *optional*): |
|
|
Whether or not to return the hidden states of decoder model, text encoder and speech encoder models. |
|
|
|
|
|
Returns: |
|
|
`ClvpOutput` or tuple: A `ClvpOutput` (if `return_dict_in_generate=True` or when |
|
|
`config.return_dict_in_generate=True`) or a tuple. |
|
|
""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
sequence_length = input_ids.shape[-1] |
|
|
if sequence_length > (self.config.decoder_config.max_text_tokens - 3): |
|
|
raise ValueError( |
|
|
f"Maximum sequence length reached! Found input_ids of length {sequence_length}." |
|
|
f"Please make sure that the maximum length of input_ids is {self.config.decoder_config.max_text_tokens - 3}" |
|
|
) |
|
|
|
|
|
if generation_config is None: |
|
|
generation_config = self.generation_config |
|
|
|
|
|
generation_config = copy.deepcopy(generation_config) |
|
|
model_kwargs = generation_config.update(**kwargs) |
|
|
generation_config.validate() |
|
|
self._validate_model_kwargs(model_kwargs.copy()) |
|
|
|
|
|
|
|
|
|
|
|
input_ids, attention_mask = _pad_extra_bos_eos_tokens( |
|
|
input_ids, |
|
|
attention_mask, |
|
|
add_bos_token=False, |
|
|
bos_token_id=self.config.text_config.bos_token_id, |
|
|
eos_token_id=self.config.text_config.eos_token_id, |
|
|
) |
|
|
|
|
|
conditioning_embeds = self.conditioning_encoder( |
|
|
input_features=input_features, |
|
|
input_ids=input_ids, |
|
|
attention_mask=attention_mask, |
|
|
) |
|
|
|
|
|
decoder_outputs = self.speech_decoder_model.generate( |
|
|
conditioning_embeds=conditioning_embeds, |
|
|
generation_config=generation_config, |
|
|
output_hidden_states=output_hidden_states, |
|
|
return_dict=generation_config.return_dict_in_generate, |
|
|
) |
|
|
if isinstance(decoder_outputs, ModelOutput): |
|
|
speech_ids = decoder_outputs.sequences |
|
|
|
|
|
|
|
|
|
|
|
if pad_to_max_mel_tokens is not None: |
|
|
padding_needed = pad_to_max_mel_tokens - speech_ids.shape[-1] |
|
|
speech_ids = torch.nn.functional.pad( |
|
|
speech_ids, (0, padding_needed), value=self.generation_config.eos_token_id |
|
|
) |
|
|
|
|
|
speech_ids = self.fix_speech_decoder_output(speech_ids) |
|
|
|
|
|
speech_outputs = self.speech_encoder_model( |
|
|
input_ids=speech_ids, |
|
|
output_hidden_states=output_hidden_states, |
|
|
return_dict=generation_config.return_dict_in_generate, |
|
|
) |
|
|
text_outputs = self.text_encoder_model( |
|
|
input_ids=input_ids, |
|
|
attention_mask=attention_mask, |
|
|
output_hidden_states=output_hidden_states, |
|
|
return_dict=generation_config.return_dict_in_generate, |
|
|
) |
|
|
|
|
|
speech_embeds = speech_outputs[0] |
|
|
text_embeds = text_outputs[0] |
|
|
|
|
|
|
|
|
speech_embeds = speech_embeds / speech_embeds.norm(p=2, dim=-1, keepdim=True) |
|
|
text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True) |
|
|
|
|
|
|
|
|
logit_scale = self.logit_scale.exp() |
|
|
logits_per_text = torch.matmul(text_embeds, speech_embeds.t()) * logit_scale |
|
|
logits_per_speech = logits_per_text.t() |
|
|
|
|
|
if not generation_config.return_dict_in_generate: |
|
|
output = ( |
|
|
speech_ids, |
|
|
logits_per_speech, |
|
|
logits_per_text, |
|
|
text_embeds, |
|
|
speech_embeds, |
|
|
text_outputs[2], |
|
|
speech_outputs[2], |
|
|
) |
|
|
if output_hidden_states: |
|
|
output += ( |
|
|
decoder_outputs[-1], |
|
|
text_outputs[-1], |
|
|
speech_outputs[-1], |
|
|
) |
|
|
|
|
|
return output |
|
|
|
|
|
return ClvpOutput( |
|
|
speech_ids=speech_ids, |
|
|
logits_per_speech=logits_per_speech, |
|
|
logits_per_text=logits_per_text, |
|
|
text_embeds=text_embeds, |
|
|
speech_embeds=speech_embeds, |
|
|
text_model_output=text_outputs[2], |
|
|
speech_model_output=speech_outputs[2], |
|
|
decoder_hidden_states=decoder_outputs.hidden_states, |
|
|
text_encoder_hidden_states=text_outputs.hidden_states, |
|
|
speech_encoder_hidden_states=speech_outputs.hidden_states, |
|
|
) |
|
|
|
|
|
|
|
|
__all__ = [ |
|
|
"ClvpModelForConditionalGeneration", |
|
|
"ClvpForCausalLM", |
|
|
"ClvpModel", |
|
|
"ClvpPreTrainedModel", |
|
|
"ClvpEncoder", |
|
|
"ClvpDecoder", |
|
|
] |
|
|
|