Add files using upload-large-folder tool
Browse files- r1-a/response_generation/Kimi-Audio/kimia_infer/models/tokenizer/glm4/cosyvoice/transformer/decoder.py +396 -0
- r1-a/response_generation/Kimi-Audio/kimia_infer/models/tokenizer/glm4/cosyvoice/transformer/decoder_layer.py +132 -0
- r1-a/response_generation/Kimi-Audio/kimia_infer/models/tokenizer/glm4/cosyvoice/transformer/embedding.py +293 -0
- r1-a/response_generation/Kimi-Audio/kimia_infer/models/tokenizer/glm4/cosyvoice/transformer/encoder.py +567 -0
- r1-a/response_generation/Kimi-Audio/kimia_infer/models/tokenizer/glm4/cosyvoice/transformer/encoder_layer.py +236 -0
- r1-a/response_generation/Kimi-Audio/kimia_infer/models/tokenizer/glm4/cosyvoice/transformer/label_smoothing_loss.py +96 -0
- r1-a/response_generation/Kimi-Audio/kimia_infer/models/tokenizer/glm4/cosyvoice/transformer/positionwise_feed_forward.py +115 -0
- r1-a/response_generation/Kimi-Audio/kimia_infer/models/tokenizer/glm4/cosyvoice/transformer/subsampling.py +383 -0
- r1-a/response_generation/Kimi-Audio/kimia_infer/models/tokenizer/glm4/cosyvoice/utils/__init__.py +0 -0
- r1-a/response_generation/Kimi-Audio/kimia_infer/models/tokenizer/glm4/cosyvoice/utils/block_mask_util.py +34 -0
- r1-a/response_generation/Kimi-Audio/kimia_infer/models/tokenizer/glm4/cosyvoice/utils/class_utils.py +72 -0
- r1-a/response_generation/Kimi-Audio/kimia_infer/models/tokenizer/glm4/cosyvoice/utils/common.py +103 -0
- r1-a/response_generation/Kimi-Audio/kimia_infer/models/tokenizer/glm4/cosyvoice/utils/executor.py +132 -0
- r1-a/response_generation/Kimi-Audio/kimia_infer/models/tokenizer/glm4/cosyvoice/utils/file_utils.py +53 -0
- r1-a/response_generation/Kimi-Audio/kimia_infer/models/tokenizer/glm4/cosyvoice/utils/frontend_utils.py +125 -0
- r1-a/response_generation/Kimi-Audio/kimia_infer/models/tokenizer/glm4/cosyvoice/utils/mask.py +227 -0
- r1-a/response_generation/Kimi-Audio/kimia_infer/models/tokenizer/glm4/cosyvoice/utils/scheduler.py +739 -0
- r1-a/response_generation/Kimi-Audio/kimia_infer/models/tokenizer/glm4/cosyvoice/utils/train_utils.py +289 -0
- r1-a/response_generation/Kimi-Audio/kimia_infer/models/tokenizer/glm4/speech_tokenizer/__init__.py +0 -0
- r1-a/response_generation/Kimi-Audio/kimia_infer/models/tokenizer/glm4/speech_tokenizer/configuration_whisper.py +37 -0
r1-a/response_generation/Kimi-Audio/kimia_infer/models/tokenizer/glm4/cosyvoice/transformer/decoder.py
ADDED
|
@@ -0,0 +1,396 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2021 Mobvoi Inc. (authors: Binbin Zhang, Di Wu)
|
| 2 |
+
# 2024 Alibaba Inc (Xiang Lyu)
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
# Modified from ESPnet(https://github.com/espnet/espnet)
|
| 16 |
+
"""Decoder definition."""
|
| 17 |
+
from typing import Tuple, List, Optional
|
| 18 |
+
|
| 19 |
+
import torch
|
| 20 |
+
import torch.utils.checkpoint as ckpt
|
| 21 |
+
import logging
|
| 22 |
+
|
| 23 |
+
from cosyvoice.transformer.decoder_layer import DecoderLayer
|
| 24 |
+
from cosyvoice.transformer.positionwise_feed_forward import PositionwiseFeedForward
|
| 25 |
+
from cosyvoice.utils.class_utils import (
|
| 26 |
+
COSYVOICE_EMB_CLASSES,
|
| 27 |
+
COSYVOICE_ATTENTION_CLASSES,
|
| 28 |
+
COSYVOICE_ACTIVATION_CLASSES,
|
| 29 |
+
)
|
| 30 |
+
from cosyvoice.utils.mask import (subsequent_mask, make_pad_mask)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class TransformerDecoder(torch.nn.Module):
|
| 34 |
+
"""Base class of Transfomer decoder module.
|
| 35 |
+
Args:
|
| 36 |
+
vocab_size: output dim
|
| 37 |
+
encoder_output_size: dimension of attention
|
| 38 |
+
attention_heads: the number of heads of multi head attention
|
| 39 |
+
linear_units: the hidden units number of position-wise feedforward
|
| 40 |
+
num_blocks: the number of decoder blocks
|
| 41 |
+
dropout_rate: dropout rate
|
| 42 |
+
self_attention_dropout_rate: dropout rate for attention
|
| 43 |
+
input_layer: input layer type
|
| 44 |
+
use_output_layer: whether to use output layer
|
| 45 |
+
pos_enc_class: PositionalEncoding or ScaledPositionalEncoding
|
| 46 |
+
normalize_before:
|
| 47 |
+
True: use layer_norm before each sub-block of a layer.
|
| 48 |
+
False: use layer_norm after each sub-block of a layer.
|
| 49 |
+
src_attention: if false, encoder-decoder cross attention is not
|
| 50 |
+
applied, such as CIF model
|
| 51 |
+
key_bias: whether use bias in attention.linear_k, False for whisper models.
|
| 52 |
+
gradient_checkpointing: rerunning a forward-pass segment for each
|
| 53 |
+
checkpointed segment during backward.
|
| 54 |
+
tie_word_embedding: Tie or clone module weights depending of whether we are
|
| 55 |
+
using TorchScript or not
|
| 56 |
+
"""
|
| 57 |
+
|
| 58 |
+
def __init__(
|
| 59 |
+
self,
|
| 60 |
+
vocab_size: int,
|
| 61 |
+
encoder_output_size: int,
|
| 62 |
+
attention_heads: int = 4,
|
| 63 |
+
linear_units: int = 2048,
|
| 64 |
+
num_blocks: int = 6,
|
| 65 |
+
dropout_rate: float = 0.1,
|
| 66 |
+
positional_dropout_rate: float = 0.1,
|
| 67 |
+
self_attention_dropout_rate: float = 0.0,
|
| 68 |
+
src_attention_dropout_rate: float = 0.0,
|
| 69 |
+
input_layer: str = "embed",
|
| 70 |
+
use_output_layer: bool = True,
|
| 71 |
+
normalize_before: bool = True,
|
| 72 |
+
src_attention: bool = True,
|
| 73 |
+
key_bias: bool = True,
|
| 74 |
+
activation_type: str = "relu",
|
| 75 |
+
gradient_checkpointing: bool = False,
|
| 76 |
+
tie_word_embedding: bool = False,
|
| 77 |
+
):
|
| 78 |
+
super().__init__()
|
| 79 |
+
attention_dim = encoder_output_size
|
| 80 |
+
activation = COSYVOICE_ACTIVATION_CLASSES[activation_type]()
|
| 81 |
+
|
| 82 |
+
self.embed = torch.nn.Sequential(
|
| 83 |
+
torch.nn.Identity() if input_layer == "no_pos" else
|
| 84 |
+
torch.nn.Embedding(vocab_size, attention_dim),
|
| 85 |
+
COSYVOICE_EMB_CLASSES[input_layer](attention_dim,
|
| 86 |
+
positional_dropout_rate),
|
| 87 |
+
)
|
| 88 |
+
|
| 89 |
+
self.normalize_before = normalize_before
|
| 90 |
+
self.after_norm = torch.nn.LayerNorm(attention_dim, eps=1e-5)
|
| 91 |
+
self.use_output_layer = use_output_layer
|
| 92 |
+
if use_output_layer:
|
| 93 |
+
self.output_layer = torch.nn.Linear(attention_dim, vocab_size)
|
| 94 |
+
else:
|
| 95 |
+
self.output_layer = torch.nn.Identity()
|
| 96 |
+
self.num_blocks = num_blocks
|
| 97 |
+
self.decoders = torch.nn.ModuleList([
|
| 98 |
+
DecoderLayer(
|
| 99 |
+
attention_dim,
|
| 100 |
+
COSYVOICE_ATTENTION_CLASSES["selfattn"](
|
| 101 |
+
attention_heads, attention_dim,
|
| 102 |
+
self_attention_dropout_rate, key_bias),
|
| 103 |
+
COSYVOICE_ATTENTION_CLASSES["selfattn"](
|
| 104 |
+
attention_heads, attention_dim, src_attention_dropout_rate,
|
| 105 |
+
key_bias) if src_attention else None,
|
| 106 |
+
PositionwiseFeedForward(attention_dim, linear_units,
|
| 107 |
+
dropout_rate, activation),
|
| 108 |
+
dropout_rate,
|
| 109 |
+
normalize_before,
|
| 110 |
+
) for _ in range(self.num_blocks)
|
| 111 |
+
])
|
| 112 |
+
|
| 113 |
+
self.gradient_checkpointing = gradient_checkpointing
|
| 114 |
+
self.tie_word_embedding = tie_word_embedding
|
| 115 |
+
|
| 116 |
+
def forward(
|
| 117 |
+
self,
|
| 118 |
+
memory: torch.Tensor,
|
| 119 |
+
memory_mask: torch.Tensor,
|
| 120 |
+
ys_in_pad: torch.Tensor,
|
| 121 |
+
ys_in_lens: torch.Tensor,
|
| 122 |
+
r_ys_in_pad: torch.Tensor = torch.empty(0),
|
| 123 |
+
reverse_weight: float = 0.0,
|
| 124 |
+
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
| 125 |
+
"""Forward decoder.
|
| 126 |
+
Args:
|
| 127 |
+
memory: encoded memory, float32 (batch, maxlen_in, feat)
|
| 128 |
+
memory_mask: encoder memory mask, (batch, 1, maxlen_in)
|
| 129 |
+
ys_in_pad: padded input token ids, int64 (batch, maxlen_out)
|
| 130 |
+
ys_in_lens: input lengths of this batch (batch)
|
| 131 |
+
r_ys_in_pad: not used in transformer decoder, in order to unify api
|
| 132 |
+
with bidirectional decoder
|
| 133 |
+
reverse_weight: not used in transformer decoder, in order to unify
|
| 134 |
+
api with bidirectional decode
|
| 135 |
+
Returns:
|
| 136 |
+
(tuple): tuple containing:
|
| 137 |
+
x: decoded token score before softmax (batch, maxlen_out,
|
| 138 |
+
vocab_size) if use_output_layer is True,
|
| 139 |
+
torch.tensor(0.0), in order to unify api with bidirectional decoder
|
| 140 |
+
olens: (batch, )
|
| 141 |
+
NOTE(xcsong):
|
| 142 |
+
We pass the `__call__` method of the modules instead of `forward` to the
|
| 143 |
+
checkpointing API because `__call__` attaches all the hooks of the module.
|
| 144 |
+
https://discuss.pytorch.org/t/any-different-between-model-input-and-model-forward-input/3690/2
|
| 145 |
+
"""
|
| 146 |
+
tgt = ys_in_pad
|
| 147 |
+
maxlen = tgt.size(1)
|
| 148 |
+
# tgt_mask: (B, 1, L)
|
| 149 |
+
tgt_mask = ~make_pad_mask(ys_in_lens, maxlen).unsqueeze(1)
|
| 150 |
+
tgt_mask = tgt_mask.to(tgt.device)
|
| 151 |
+
# m: (1, L, L)
|
| 152 |
+
m = subsequent_mask(tgt_mask.size(-1),
|
| 153 |
+
device=tgt_mask.device).unsqueeze(0)
|
| 154 |
+
# tgt_mask: (B, L, L)
|
| 155 |
+
tgt_mask = tgt_mask & m
|
| 156 |
+
x, _ = self.embed(tgt)
|
| 157 |
+
if self.gradient_checkpointing and self.training:
|
| 158 |
+
x = self.forward_layers_checkpointed(x, tgt_mask, memory,
|
| 159 |
+
memory_mask)
|
| 160 |
+
else:
|
| 161 |
+
x = self.forward_layers(x, tgt_mask, memory, memory_mask)
|
| 162 |
+
if self.normalize_before:
|
| 163 |
+
x = self.after_norm(x)
|
| 164 |
+
if self.use_output_layer:
|
| 165 |
+
x = self.output_layer(x)
|
| 166 |
+
olens = tgt_mask.sum(1)
|
| 167 |
+
return x, torch.tensor(0.0), olens
|
| 168 |
+
|
| 169 |
+
def forward_layers(self, x: torch.Tensor, tgt_mask: torch.Tensor,
|
| 170 |
+
memory: torch.Tensor,
|
| 171 |
+
memory_mask: torch.Tensor) -> torch.Tensor:
|
| 172 |
+
for layer in self.decoders:
|
| 173 |
+
x, tgt_mask, memory, memory_mask = layer(x, tgt_mask, memory,
|
| 174 |
+
memory_mask)
|
| 175 |
+
return x
|
| 176 |
+
|
| 177 |
+
@torch.jit.ignore(drop=True)
|
| 178 |
+
def forward_layers_checkpointed(self, x: torch.Tensor,
|
| 179 |
+
tgt_mask: torch.Tensor,
|
| 180 |
+
memory: torch.Tensor,
|
| 181 |
+
memory_mask: torch.Tensor) -> torch.Tensor:
|
| 182 |
+
for layer in self.decoders:
|
| 183 |
+
x, tgt_mask, memory, memory_mask = ckpt.checkpoint(
|
| 184 |
+
layer.__call__, x, tgt_mask, memory, memory_mask)
|
| 185 |
+
return x
|
| 186 |
+
|
| 187 |
+
def forward_one_step(
|
| 188 |
+
self,
|
| 189 |
+
memory: torch.Tensor,
|
| 190 |
+
memory_mask: torch.Tensor,
|
| 191 |
+
tgt: torch.Tensor,
|
| 192 |
+
tgt_mask: torch.Tensor,
|
| 193 |
+
cache: Optional[List[torch.Tensor]] = None,
|
| 194 |
+
) -> Tuple[torch.Tensor, List[torch.Tensor]]:
|
| 195 |
+
"""Forward one step.
|
| 196 |
+
This is only used for decoding.
|
| 197 |
+
Args:
|
| 198 |
+
memory: encoded memory, float32 (batch, maxlen_in, feat)
|
| 199 |
+
memory_mask: encoded memory mask, (batch, 1, maxlen_in)
|
| 200 |
+
tgt: input token ids, int64 (batch, maxlen_out)
|
| 201 |
+
tgt_mask: input token mask, (batch, maxlen_out)
|
| 202 |
+
dtype=torch.uint8 in PyTorch 1.2-
|
| 203 |
+
dtype=torch.bool in PyTorch 1.2+ (include 1.2)
|
| 204 |
+
cache: cached output list of (batch, max_time_out-1, size)
|
| 205 |
+
Returns:
|
| 206 |
+
y, cache: NN output value and cache per `self.decoders`.
|
| 207 |
+
y.shape` is (batch, maxlen_out, token)
|
| 208 |
+
"""
|
| 209 |
+
x, _ = self.embed(tgt)
|
| 210 |
+
new_cache = []
|
| 211 |
+
for i, decoder in enumerate(self.decoders):
|
| 212 |
+
if cache is None:
|
| 213 |
+
c = None
|
| 214 |
+
else:
|
| 215 |
+
c = cache[i]
|
| 216 |
+
x, tgt_mask, memory, memory_mask = decoder(x,
|
| 217 |
+
tgt_mask,
|
| 218 |
+
memory,
|
| 219 |
+
memory_mask,
|
| 220 |
+
cache=c)
|
| 221 |
+
new_cache.append(x)
|
| 222 |
+
if self.normalize_before:
|
| 223 |
+
y = self.after_norm(x[:, -1])
|
| 224 |
+
else:
|
| 225 |
+
y = x[:, -1]
|
| 226 |
+
if self.use_output_layer:
|
| 227 |
+
y = torch.log_softmax(self.output_layer(y), dim=-1)
|
| 228 |
+
return y, new_cache
|
| 229 |
+
|
| 230 |
+
def tie_or_clone_weights(self, jit_mode: bool = True):
|
| 231 |
+
"""Tie or clone module weights (between word_emb and output_layer)
|
| 232 |
+
depending of whether we are using TorchScript or not"""
|
| 233 |
+
if not self.use_output_layer:
|
| 234 |
+
return
|
| 235 |
+
if jit_mode:
|
| 236 |
+
logging.info("clone emb.weight to output.weight")
|
| 237 |
+
self.output_layer.weight = torch.nn.Parameter(
|
| 238 |
+
self.embed[0].weight.clone())
|
| 239 |
+
else:
|
| 240 |
+
logging.info("tie emb.weight with output.weight")
|
| 241 |
+
self.output_layer.weight = self.embed[0].weight
|
| 242 |
+
|
| 243 |
+
if getattr(self.output_layer, "bias", None) is not None:
|
| 244 |
+
self.output_layer.bias.data = torch.nn.functional.pad(
|
| 245 |
+
self.output_layer.bias.data,
|
| 246 |
+
(
|
| 247 |
+
0,
|
| 248 |
+
self.output_layer.weight.shape[0] -
|
| 249 |
+
self.output_layer.bias.shape[0],
|
| 250 |
+
),
|
| 251 |
+
"constant",
|
| 252 |
+
0,
|
| 253 |
+
)
|
| 254 |
+
|
| 255 |
+
|
| 256 |
+
class BiTransformerDecoder(torch.nn.Module):
|
| 257 |
+
"""Base class of Transfomer decoder module.
|
| 258 |
+
Args:
|
| 259 |
+
vocab_size: output dim
|
| 260 |
+
encoder_output_size: dimension of attention
|
| 261 |
+
attention_heads: the number of heads of multi head attention
|
| 262 |
+
linear_units: the hidden units number of position-wise feedforward
|
| 263 |
+
num_blocks: the number of decoder blocks
|
| 264 |
+
r_num_blocks: the number of right to left decoder blocks
|
| 265 |
+
dropout_rate: dropout rate
|
| 266 |
+
self_attention_dropout_rate: dropout rate for attention
|
| 267 |
+
input_layer: input layer type
|
| 268 |
+
use_output_layer: whether to use output layer
|
| 269 |
+
pos_enc_class: PositionalEncoding or ScaledPositionalEncoding
|
| 270 |
+
normalize_before:
|
| 271 |
+
True: use layer_norm before each sub-block of a layer.
|
| 272 |
+
False: use layer_norm after each sub-block of a layer.
|
| 273 |
+
key_bias: whether use bias in attention.linear_k, False for whisper models.
|
| 274 |
+
"""
|
| 275 |
+
|
| 276 |
+
def __init__(
|
| 277 |
+
self,
|
| 278 |
+
vocab_size: int,
|
| 279 |
+
encoder_output_size: int,
|
| 280 |
+
attention_heads: int = 4,
|
| 281 |
+
linear_units: int = 2048,
|
| 282 |
+
num_blocks: int = 6,
|
| 283 |
+
r_num_blocks: int = 0,
|
| 284 |
+
dropout_rate: float = 0.1,
|
| 285 |
+
positional_dropout_rate: float = 0.1,
|
| 286 |
+
self_attention_dropout_rate: float = 0.0,
|
| 287 |
+
src_attention_dropout_rate: float = 0.0,
|
| 288 |
+
input_layer: str = "embed",
|
| 289 |
+
use_output_layer: bool = True,
|
| 290 |
+
normalize_before: bool = True,
|
| 291 |
+
key_bias: bool = True,
|
| 292 |
+
gradient_checkpointing: bool = False,
|
| 293 |
+
tie_word_embedding: bool = False,
|
| 294 |
+
):
|
| 295 |
+
|
| 296 |
+
super().__init__()
|
| 297 |
+
self.tie_word_embedding = tie_word_embedding
|
| 298 |
+
self.left_decoder = TransformerDecoder(
|
| 299 |
+
vocab_size,
|
| 300 |
+
encoder_output_size,
|
| 301 |
+
attention_heads,
|
| 302 |
+
linear_units,
|
| 303 |
+
num_blocks,
|
| 304 |
+
dropout_rate,
|
| 305 |
+
positional_dropout_rate,
|
| 306 |
+
self_attention_dropout_rate,
|
| 307 |
+
src_attention_dropout_rate,
|
| 308 |
+
input_layer,
|
| 309 |
+
use_output_layer,
|
| 310 |
+
normalize_before,
|
| 311 |
+
key_bias=key_bias,
|
| 312 |
+
gradient_checkpointing=gradient_checkpointing,
|
| 313 |
+
tie_word_embedding=tie_word_embedding)
|
| 314 |
+
|
| 315 |
+
self.right_decoder = TransformerDecoder(
|
| 316 |
+
vocab_size,
|
| 317 |
+
encoder_output_size,
|
| 318 |
+
attention_heads,
|
| 319 |
+
linear_units,
|
| 320 |
+
r_num_blocks,
|
| 321 |
+
dropout_rate,
|
| 322 |
+
positional_dropout_rate,
|
| 323 |
+
self_attention_dropout_rate,
|
| 324 |
+
src_attention_dropout_rate,
|
| 325 |
+
input_layer,
|
| 326 |
+
use_output_layer,
|
| 327 |
+
normalize_before,
|
| 328 |
+
key_bias=key_bias,
|
| 329 |
+
gradient_checkpointing=gradient_checkpointing,
|
| 330 |
+
tie_word_embedding=tie_word_embedding)
|
| 331 |
+
|
| 332 |
+
def forward(
|
| 333 |
+
self,
|
| 334 |
+
memory: torch.Tensor,
|
| 335 |
+
memory_mask: torch.Tensor,
|
| 336 |
+
ys_in_pad: torch.Tensor,
|
| 337 |
+
ys_in_lens: torch.Tensor,
|
| 338 |
+
r_ys_in_pad: torch.Tensor,
|
| 339 |
+
reverse_weight: float = 0.0,
|
| 340 |
+
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
| 341 |
+
"""Forward decoder.
|
| 342 |
+
Args:
|
| 343 |
+
memory: encoded memory, float32 (batch, maxlen_in, feat)
|
| 344 |
+
memory_mask: encoder memory mask, (batch, 1, maxlen_in)
|
| 345 |
+
ys_in_pad: padded input token ids, int64 (batch, maxlen_out)
|
| 346 |
+
ys_in_lens: input lengths of this batch (batch)
|
| 347 |
+
r_ys_in_pad: padded input token ids, int64 (batch, maxlen_out),
|
| 348 |
+
used for right to left decoder
|
| 349 |
+
reverse_weight: used for right to left decoder
|
| 350 |
+
Returns:
|
| 351 |
+
(tuple): tuple containing:
|
| 352 |
+
x: decoded token score before softmax (batch, maxlen_out,
|
| 353 |
+
vocab_size) if use_output_layer is True,
|
| 354 |
+
r_x: x: decoded token score (right to left decoder)
|
| 355 |
+
before softmax (batch, maxlen_out, vocab_size)
|
| 356 |
+
if use_output_layer is True,
|
| 357 |
+
olens: (batch, )
|
| 358 |
+
"""
|
| 359 |
+
l_x, _, olens = self.left_decoder(memory, memory_mask, ys_in_pad,
|
| 360 |
+
ys_in_lens)
|
| 361 |
+
r_x = torch.tensor(0.0)
|
| 362 |
+
if reverse_weight > 0.0:
|
| 363 |
+
r_x, _, olens = self.right_decoder(memory, memory_mask,
|
| 364 |
+
r_ys_in_pad, ys_in_lens)
|
| 365 |
+
return l_x, r_x, olens
|
| 366 |
+
|
| 367 |
+
def forward_one_step(
|
| 368 |
+
self,
|
| 369 |
+
memory: torch.Tensor,
|
| 370 |
+
memory_mask: torch.Tensor,
|
| 371 |
+
tgt: torch.Tensor,
|
| 372 |
+
tgt_mask: torch.Tensor,
|
| 373 |
+
cache: Optional[List[torch.Tensor]] = None,
|
| 374 |
+
) -> Tuple[torch.Tensor, List[torch.Tensor]]:
|
| 375 |
+
"""Forward one step.
|
| 376 |
+
This is only used for decoding.
|
| 377 |
+
Args:
|
| 378 |
+
memory: encoded memory, float32 (batch, maxlen_in, feat)
|
| 379 |
+
memory_mask: encoded memory mask, (batch, 1, maxlen_in)
|
| 380 |
+
tgt: input token ids, int64 (batch, maxlen_out)
|
| 381 |
+
tgt_mask: input token mask, (batch, maxlen_out)
|
| 382 |
+
dtype=torch.uint8 in PyTorch 1.2-
|
| 383 |
+
dtype=torch.bool in PyTorch 1.2+ (include 1.2)
|
| 384 |
+
cache: cached output list of (batch, max_time_out-1, size)
|
| 385 |
+
Returns:
|
| 386 |
+
y, cache: NN output value and cache per `self.decoders`.
|
| 387 |
+
y.shape` is (batch, maxlen_out, token)
|
| 388 |
+
"""
|
| 389 |
+
return self.left_decoder.forward_one_step(memory, memory_mask, tgt,
|
| 390 |
+
tgt_mask, cache)
|
| 391 |
+
|
| 392 |
+
def tie_or_clone_weights(self, jit_mode: bool = True):
|
| 393 |
+
"""Tie or clone module weights (between word_emb and output_layer)
|
| 394 |
+
depending of whether we are using TorchScript or not"""
|
| 395 |
+
self.left_decoder.tie_or_clone_weights(jit_mode)
|
| 396 |
+
self.right_decoder.tie_or_clone_weights(jit_mode)
|
r1-a/response_generation/Kimi-Audio/kimia_infer/models/tokenizer/glm4/cosyvoice/transformer/decoder_layer.py
ADDED
|
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2019 Shigeki Karita
|
| 2 |
+
# 2020 Mobvoi Inc (Binbin Zhang)
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""Decoder self-attention layer definition."""
|
| 16 |
+
from typing import Optional, Tuple
|
| 17 |
+
|
| 18 |
+
import torch
|
| 19 |
+
from torch import nn
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class DecoderLayer(nn.Module):
|
| 23 |
+
"""Single decoder layer module.
|
| 24 |
+
|
| 25 |
+
Args:
|
| 26 |
+
size (int): Input dimension.
|
| 27 |
+
self_attn (torch.nn.Module): Self-attention module instance.
|
| 28 |
+
`MultiHeadedAttention` instance can be used as the argument.
|
| 29 |
+
src_attn (torch.nn.Module): Inter-attention module instance.
|
| 30 |
+
`MultiHeadedAttention` instance can be used as the argument.
|
| 31 |
+
If `None` is passed, Inter-attention is not used, such as
|
| 32 |
+
CIF, GPT, and other decoder only model.
|
| 33 |
+
feed_forward (torch.nn.Module): Feed-forward module instance.
|
| 34 |
+
`PositionwiseFeedForward` instance can be used as the argument.
|
| 35 |
+
dropout_rate (float): Dropout rate.
|
| 36 |
+
normalize_before (bool):
|
| 37 |
+
True: use layer_norm before each sub-block.
|
| 38 |
+
False: to use layer_norm after each sub-block.
|
| 39 |
+
"""
|
| 40 |
+
|
| 41 |
+
def __init__(
|
| 42 |
+
self,
|
| 43 |
+
size: int,
|
| 44 |
+
self_attn: nn.Module,
|
| 45 |
+
src_attn: Optional[nn.Module],
|
| 46 |
+
feed_forward: nn.Module,
|
| 47 |
+
dropout_rate: float,
|
| 48 |
+
normalize_before: bool = True,
|
| 49 |
+
):
|
| 50 |
+
"""Construct an DecoderLayer object."""
|
| 51 |
+
super().__init__()
|
| 52 |
+
self.size = size
|
| 53 |
+
self.self_attn = self_attn
|
| 54 |
+
self.src_attn = src_attn
|
| 55 |
+
self.feed_forward = feed_forward
|
| 56 |
+
self.norm1 = nn.LayerNorm(size, eps=1e-5)
|
| 57 |
+
self.norm2 = nn.LayerNorm(size, eps=1e-5)
|
| 58 |
+
self.norm3 = nn.LayerNorm(size, eps=1e-5)
|
| 59 |
+
self.dropout = nn.Dropout(dropout_rate)
|
| 60 |
+
self.normalize_before = normalize_before
|
| 61 |
+
|
| 62 |
+
def forward(
|
| 63 |
+
self,
|
| 64 |
+
tgt: torch.Tensor,
|
| 65 |
+
tgt_mask: torch.Tensor,
|
| 66 |
+
memory: torch.Tensor,
|
| 67 |
+
memory_mask: torch.Tensor,
|
| 68 |
+
cache: Optional[torch.Tensor] = None
|
| 69 |
+
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
|
| 70 |
+
"""Compute decoded features.
|
| 71 |
+
|
| 72 |
+
Args:
|
| 73 |
+
tgt (torch.Tensor): Input tensor (#batch, maxlen_out, size).
|
| 74 |
+
tgt_mask (torch.Tensor): Mask for input tensor
|
| 75 |
+
(#batch, maxlen_out).
|
| 76 |
+
memory (torch.Tensor): Encoded memory
|
| 77 |
+
(#batch, maxlen_in, size).
|
| 78 |
+
memory_mask (torch.Tensor): Encoded memory mask
|
| 79 |
+
(#batch, maxlen_in).
|
| 80 |
+
cache (torch.Tensor): cached tensors.
|
| 81 |
+
(#batch, maxlen_out - 1, size).
|
| 82 |
+
|
| 83 |
+
Returns:
|
| 84 |
+
torch.Tensor: Output tensor (#batch, maxlen_out, size).
|
| 85 |
+
torch.Tensor: Mask for output tensor (#batch, maxlen_out).
|
| 86 |
+
torch.Tensor: Encoded memory (#batch, maxlen_in, size).
|
| 87 |
+
torch.Tensor: Encoded memory mask (#batch, maxlen_in).
|
| 88 |
+
|
| 89 |
+
"""
|
| 90 |
+
residual = tgt
|
| 91 |
+
if self.normalize_before:
|
| 92 |
+
tgt = self.norm1(tgt)
|
| 93 |
+
|
| 94 |
+
if cache is None:
|
| 95 |
+
tgt_q = tgt
|
| 96 |
+
tgt_q_mask = tgt_mask
|
| 97 |
+
else:
|
| 98 |
+
# compute only the last frame query keeping dim: max_time_out -> 1
|
| 99 |
+
assert cache.shape == (
|
| 100 |
+
tgt.shape[0],
|
| 101 |
+
tgt.shape[1] - 1,
|
| 102 |
+
self.size,
|
| 103 |
+
), "{cache.shape} == {(tgt.shape[0], tgt.shape[1] - 1, self.size)}"
|
| 104 |
+
tgt_q = tgt[:, -1:, :]
|
| 105 |
+
residual = residual[:, -1:, :]
|
| 106 |
+
tgt_q_mask = tgt_mask[:, -1:, :]
|
| 107 |
+
|
| 108 |
+
x = residual + self.dropout(
|
| 109 |
+
self.self_attn(tgt_q, tgt, tgt, tgt_q_mask)[0])
|
| 110 |
+
if not self.normalize_before:
|
| 111 |
+
x = self.norm1(x)
|
| 112 |
+
|
| 113 |
+
if self.src_attn is not None:
|
| 114 |
+
residual = x
|
| 115 |
+
if self.normalize_before:
|
| 116 |
+
x = self.norm2(x)
|
| 117 |
+
x = residual + self.dropout(
|
| 118 |
+
self.src_attn(x, memory, memory, memory_mask)[0])
|
| 119 |
+
if not self.normalize_before:
|
| 120 |
+
x = self.norm2(x)
|
| 121 |
+
|
| 122 |
+
residual = x
|
| 123 |
+
if self.normalize_before:
|
| 124 |
+
x = self.norm3(x)
|
| 125 |
+
x = residual + self.dropout(self.feed_forward(x))
|
| 126 |
+
if not self.normalize_before:
|
| 127 |
+
x = self.norm3(x)
|
| 128 |
+
|
| 129 |
+
if cache is not None:
|
| 130 |
+
x = torch.cat([cache, x], dim=1)
|
| 131 |
+
|
| 132 |
+
return x, tgt_mask, memory, memory_mask
|
r1-a/response_generation/Kimi-Audio/kimia_infer/models/tokenizer/glm4/cosyvoice/transformer/embedding.py
ADDED
|
@@ -0,0 +1,293 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2020 Mobvoi Inc. (authors: Binbin Zhang, Di Wu)
|
| 2 |
+
# 2024 Alibaba Inc (Xiang Lyu)
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
# Modified from ESPnet(https://github.com/espnet/espnet)
|
| 16 |
+
"""Positonal Encoding Module."""
|
| 17 |
+
|
| 18 |
+
import math
|
| 19 |
+
from typing import Tuple, Union
|
| 20 |
+
|
| 21 |
+
import torch
|
| 22 |
+
import torch.nn.functional as F
|
| 23 |
+
import numpy as np
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class PositionalEncoding(torch.nn.Module):
|
| 27 |
+
"""Positional encoding.
|
| 28 |
+
|
| 29 |
+
:param int d_model: embedding dim
|
| 30 |
+
:param float dropout_rate: dropout rate
|
| 31 |
+
:param int max_len: maximum input length
|
| 32 |
+
|
| 33 |
+
PE(pos, 2i) = sin(pos/(10000^(2i/dmodel)))
|
| 34 |
+
PE(pos, 2i+1) = cos(pos/(10000^(2i/dmodel)))
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
+
def __init__(self,
|
| 38 |
+
d_model: int,
|
| 39 |
+
dropout_rate: float,
|
| 40 |
+
max_len: int = 5000,
|
| 41 |
+
reverse: bool = False):
|
| 42 |
+
"""Construct an PositionalEncoding object."""
|
| 43 |
+
super().__init__()
|
| 44 |
+
self.d_model = d_model
|
| 45 |
+
self.xscale = math.sqrt(self.d_model)
|
| 46 |
+
self.dropout = torch.nn.Dropout(p=dropout_rate)
|
| 47 |
+
self.max_len = max_len
|
| 48 |
+
|
| 49 |
+
self.pe = torch.zeros(self.max_len, self.d_model)
|
| 50 |
+
position = torch.arange(0, self.max_len,
|
| 51 |
+
dtype=torch.float32).unsqueeze(1)
|
| 52 |
+
div_term = torch.exp(
|
| 53 |
+
torch.arange(0, self.d_model, 2, dtype=torch.float32) *
|
| 54 |
+
-(math.log(10000.0) / self.d_model))
|
| 55 |
+
self.pe[:, 0::2] = torch.sin(position * div_term)
|
| 56 |
+
self.pe[:, 1::2] = torch.cos(position * div_term)
|
| 57 |
+
self.pe = self.pe.unsqueeze(0)
|
| 58 |
+
|
| 59 |
+
def forward(self,
|
| 60 |
+
x: torch.Tensor,
|
| 61 |
+
offset: Union[int, torch.Tensor] = 0) \
|
| 62 |
+
-> Tuple[torch.Tensor, torch.Tensor]:
|
| 63 |
+
"""Add positional encoding.
|
| 64 |
+
|
| 65 |
+
Args:
|
| 66 |
+
x (torch.Tensor): Input. Its shape is (batch, time, ...)
|
| 67 |
+
offset (int, torch.tensor): position offset
|
| 68 |
+
|
| 69 |
+
Returns:
|
| 70 |
+
torch.Tensor: Encoded tensor. Its shape is (batch, time, ...)
|
| 71 |
+
torch.Tensor: for compatibility to RelPositionalEncoding
|
| 72 |
+
"""
|
| 73 |
+
|
| 74 |
+
self.pe = self.pe.to(x.device)
|
| 75 |
+
pos_emb = self.position_encoding(offset, x.size(1), False)
|
| 76 |
+
x = x * self.xscale + pos_emb
|
| 77 |
+
return self.dropout(x), self.dropout(pos_emb)
|
| 78 |
+
|
| 79 |
+
def position_encoding(self,
|
| 80 |
+
offset: Union[int, torch.Tensor],
|
| 81 |
+
size: int,
|
| 82 |
+
apply_dropout: bool = True) -> torch.Tensor:
|
| 83 |
+
""" For getting encoding in a streaming fashion
|
| 84 |
+
|
| 85 |
+
Attention!!!!!
|
| 86 |
+
we apply dropout only once at the whole utterance level in a none
|
| 87 |
+
streaming way, but will call this function several times with
|
| 88 |
+
increasing input size in a streaming scenario, so the dropout will
|
| 89 |
+
be applied several times.
|
| 90 |
+
|
| 91 |
+
Args:
|
| 92 |
+
offset (int or torch.tensor): start offset
|
| 93 |
+
size (int): required size of position encoding
|
| 94 |
+
|
| 95 |
+
Returns:
|
| 96 |
+
torch.Tensor: Corresponding encoding
|
| 97 |
+
"""
|
| 98 |
+
# How to subscript a Union type:
|
| 99 |
+
# https://github.com/pytorch/pytorch/issues/69434
|
| 100 |
+
if isinstance(offset, int):
|
| 101 |
+
assert offset + size <= self.max_len
|
| 102 |
+
pos_emb = self.pe[:, offset:offset + size]
|
| 103 |
+
elif isinstance(offset, torch.Tensor) and offset.dim() == 0: # scalar
|
| 104 |
+
assert offset + size <= self.max_len
|
| 105 |
+
pos_emb = self.pe[:, offset:offset + size]
|
| 106 |
+
else: # for batched streaming decoding on GPU
|
| 107 |
+
assert torch.max(offset) + size <= self.max_len
|
| 108 |
+
index = offset.unsqueeze(1) + \
|
| 109 |
+
torch.arange(0, size).to(offset.device) # B X T
|
| 110 |
+
flag = index > 0
|
| 111 |
+
# remove negative offset
|
| 112 |
+
index = index * flag
|
| 113 |
+
pos_emb = F.embedding(index, self.pe[0]) # B X T X d_model
|
| 114 |
+
|
| 115 |
+
if apply_dropout:
|
| 116 |
+
pos_emb = self.dropout(pos_emb)
|
| 117 |
+
return pos_emb
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
class RelPositionalEncoding(PositionalEncoding):
|
| 121 |
+
"""Relative positional encoding module.
|
| 122 |
+
See : Appendix B in https://arxiv.org/abs/1901.02860
|
| 123 |
+
Args:
|
| 124 |
+
d_model (int): Embedding dimension.
|
| 125 |
+
dropout_rate (float): Dropout rate.
|
| 126 |
+
max_len (int): Maximum input length.
|
| 127 |
+
"""
|
| 128 |
+
|
| 129 |
+
def __init__(self, d_model: int, dropout_rate: float, max_len: int = 5000):
|
| 130 |
+
"""Initialize class."""
|
| 131 |
+
super().__init__(d_model, dropout_rate, max_len, reverse=True)
|
| 132 |
+
|
| 133 |
+
def forward(self,
|
| 134 |
+
x: torch.Tensor,
|
| 135 |
+
offset: Union[int, torch.Tensor] = 0) \
|
| 136 |
+
-> Tuple[torch.Tensor, torch.Tensor]:
|
| 137 |
+
"""Compute positional encoding.
|
| 138 |
+
Args:
|
| 139 |
+
x (torch.Tensor): Input tensor (batch, time, `*`).
|
| 140 |
+
Returns:
|
| 141 |
+
torch.Tensor: Encoded tensor (batch, time, `*`).
|
| 142 |
+
torch.Tensor: Positional embedding tensor (1, time, `*`).
|
| 143 |
+
"""
|
| 144 |
+
self.pe = self.pe.to(x.device)
|
| 145 |
+
x = x * self.xscale
|
| 146 |
+
pos_emb = self.position_encoding(offset, x.size(1), False)
|
| 147 |
+
return self.dropout(x), self.dropout(pos_emb)
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
class WhisperPositionalEncoding(PositionalEncoding):
|
| 151 |
+
""" Sinusoids position encoding used in openai-whisper.encoder
|
| 152 |
+
"""
|
| 153 |
+
|
| 154 |
+
def __init__(self, d_model: int, dropout_rate: float, max_len: int = 1500):
|
| 155 |
+
super().__init__(d_model, dropout_rate, max_len)
|
| 156 |
+
self.xscale = 1.0
|
| 157 |
+
log_timescale_increment = np.log(10000) / (d_model // 2 - 1)
|
| 158 |
+
inv_timescales = torch.exp(-log_timescale_increment *
|
| 159 |
+
torch.arange(d_model // 2))
|
| 160 |
+
scaled_time = torch.arange(max_len)[:, np.newaxis] * \
|
| 161 |
+
inv_timescales[np.newaxis, :]
|
| 162 |
+
pe = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=1)
|
| 163 |
+
delattr(self, "pe")
|
| 164 |
+
self.register_buffer("pe", pe.unsqueeze(0))
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
class LearnablePositionalEncoding(PositionalEncoding):
|
| 168 |
+
""" Learnable position encoding used in openai-whisper.decoder
|
| 169 |
+
"""
|
| 170 |
+
|
| 171 |
+
def __init__(self, d_model: int, dropout_rate: float, max_len: int = 448):
|
| 172 |
+
super().__init__(d_model, dropout_rate, max_len)
|
| 173 |
+
# NOTE(xcsong): overwrite self.pe & self.xscale
|
| 174 |
+
self.pe = torch.nn.Parameter(torch.empty(1, max_len, d_model))
|
| 175 |
+
self.xscale = 1.0
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
class NoPositionalEncoding(torch.nn.Module):
|
| 179 |
+
""" No position encoding
|
| 180 |
+
"""
|
| 181 |
+
|
| 182 |
+
def __init__(self, d_model: int, dropout_rate: float):
|
| 183 |
+
super().__init__()
|
| 184 |
+
self.d_model = d_model
|
| 185 |
+
self.dropout = torch.nn.Dropout(p=dropout_rate)
|
| 186 |
+
|
| 187 |
+
def forward(self,
|
| 188 |
+
x: torch.Tensor,
|
| 189 |
+
offset: Union[int, torch.Tensor] = 0) \
|
| 190 |
+
-> Tuple[torch.Tensor, torch.Tensor]:
|
| 191 |
+
""" Just return zero vector for interface compatibility
|
| 192 |
+
"""
|
| 193 |
+
pos_emb = torch.zeros(1, x.size(1), self.d_model).to(x.device)
|
| 194 |
+
return self.dropout(x), pos_emb
|
| 195 |
+
|
| 196 |
+
def position_encoding(self, offset: Union[int, torch.Tensor],
|
| 197 |
+
size: int) -> torch.Tensor:
|
| 198 |
+
return torch.zeros(1, size, self.d_model)
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
class EspnetRelPositionalEncoding(torch.nn.Module):
|
| 202 |
+
"""Relative positional encoding module (new implementation).
|
| 203 |
+
|
| 204 |
+
Details can be found in https://github.com/espnet/espnet/pull/2816.
|
| 205 |
+
|
| 206 |
+
See : Appendix B in https://arxiv.org/abs/1901.02860
|
| 207 |
+
|
| 208 |
+
Args:
|
| 209 |
+
d_model (int): Embedding dimension.
|
| 210 |
+
dropout_rate (float): Dropout rate.
|
| 211 |
+
max_len (int): Maximum input length.
|
| 212 |
+
|
| 213 |
+
"""
|
| 214 |
+
|
| 215 |
+
def __init__(self, d_model, dropout_rate, max_len=5000):
|
| 216 |
+
"""Construct an PositionalEncoding object."""
|
| 217 |
+
super(EspnetRelPositionalEncoding, self).__init__()
|
| 218 |
+
self.d_model = d_model
|
| 219 |
+
self.xscale = math.sqrt(self.d_model)
|
| 220 |
+
self.dropout = torch.nn.Dropout(p=dropout_rate)
|
| 221 |
+
self.pe = None
|
| 222 |
+
self.extend_pe(torch.tensor(0.0).expand(1, max_len))
|
| 223 |
+
|
| 224 |
+
def extend_pe(self, x):
|
| 225 |
+
"""Reset the positional encodings."""
|
| 226 |
+
if self.pe is not None:
|
| 227 |
+
# self.pe contains both positive and negative parts
|
| 228 |
+
# the length of self.pe is 2 * input_len - 1
|
| 229 |
+
if self.pe.size(1) >= x.size(1) * 2 - 1:
|
| 230 |
+
if self.pe.dtype != x.dtype or self.pe.device != x.device:
|
| 231 |
+
self.pe = self.pe.to(dtype=x.dtype, device=x.device)
|
| 232 |
+
return
|
| 233 |
+
# Suppose `i` means to the position of query vecotr and `j` means the
|
| 234 |
+
# position of key vector. We use position relative positions when keys
|
| 235 |
+
# are to the left (i>j) and negative relative positions otherwise (i<j).
|
| 236 |
+
pe_positive = torch.zeros(x.size(1), self.d_model)
|
| 237 |
+
pe_negative = torch.zeros(x.size(1), self.d_model)
|
| 238 |
+
position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1)
|
| 239 |
+
div_term = torch.exp(
|
| 240 |
+
torch.arange(0, self.d_model, 2, dtype=torch.float32)
|
| 241 |
+
* -(math.log(10000.0) / self.d_model)
|
| 242 |
+
)
|
| 243 |
+
pe_positive[:, 0::2] = torch.sin(position * div_term)
|
| 244 |
+
pe_positive[:, 1::2] = torch.cos(position * div_term)
|
| 245 |
+
pe_negative[:, 0::2] = torch.sin(-1 * position * div_term)
|
| 246 |
+
pe_negative[:, 1::2] = torch.cos(-1 * position * div_term)
|
| 247 |
+
|
| 248 |
+
# Reserve the order of positive indices and concat both positive and
|
| 249 |
+
# negative indices. This is used to support the shifting trick
|
| 250 |
+
# as in https://arxiv.org/abs/1901.02860
|
| 251 |
+
pe_positive = torch.flip(pe_positive, [0]).unsqueeze(0)
|
| 252 |
+
pe_negative = pe_negative[1:].unsqueeze(0)
|
| 253 |
+
pe = torch.cat([pe_positive, pe_negative], dim=1)
|
| 254 |
+
self.pe = pe.to(device=x.device, dtype=x.dtype)
|
| 255 |
+
|
| 256 |
+
def forward(self, x: torch.Tensor, offset: Union[int, torch.Tensor] = 0):
|
| 257 |
+
"""Add positional encoding.
|
| 258 |
+
|
| 259 |
+
Args:
|
| 260 |
+
x (torch.Tensor): Input tensor (batch, time, `*`).
|
| 261 |
+
|
| 262 |
+
Returns:
|
| 263 |
+
torch.Tensor: Encoded tensor (batch, time, `*`).
|
| 264 |
+
|
| 265 |
+
"""
|
| 266 |
+
self.extend_pe(x)
|
| 267 |
+
x = x * self.xscale
|
| 268 |
+
pos_emb = self.position_encoding(size=x.size(1), offset=offset)
|
| 269 |
+
return self.dropout(x), self.dropout(pos_emb)
|
| 270 |
+
|
| 271 |
+
def position_encoding(self,
|
| 272 |
+
offset: Union[int, torch.Tensor],
|
| 273 |
+
size: int) -> torch.Tensor:
|
| 274 |
+
""" For getting encoding in a streaming fashion
|
| 275 |
+
|
| 276 |
+
Attention!!!!!
|
| 277 |
+
we apply dropout only once at the whole utterance level in a none
|
| 278 |
+
streaming way, but will call this function several times with
|
| 279 |
+
increasing input size in a streaming scenario, so the dropout will
|
| 280 |
+
be applied several times.
|
| 281 |
+
|
| 282 |
+
Args:
|
| 283 |
+
offset (int or torch.tensor): start offset
|
| 284 |
+
size (int): required size of position encoding
|
| 285 |
+
|
| 286 |
+
Returns:
|
| 287 |
+
torch.Tensor: Corresponding encoding
|
| 288 |
+
"""
|
| 289 |
+
pos_emb = self.pe[
|
| 290 |
+
:,
|
| 291 |
+
self.pe.size(1) // 2 - size + 1 : self.pe.size(1) // 2 + size,
|
| 292 |
+
]
|
| 293 |
+
return pos_emb
|
r1-a/response_generation/Kimi-Audio/kimia_infer/models/tokenizer/glm4/cosyvoice/transformer/encoder.py
ADDED
|
@@ -0,0 +1,567 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2021 Mobvoi Inc (Binbin Zhang, Di Wu)
|
| 2 |
+
# 2022 Xingchen Song (sxc19@mails.tsinghua.edu.cn)
|
| 3 |
+
# 2024 Alibaba Inc (Xiang Lyu)
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
# Modified from ESPnet(https://github.com/espnet/espnet)
|
| 17 |
+
"""Encoder definition."""
|
| 18 |
+
from typing import Tuple
|
| 19 |
+
|
| 20 |
+
import torch
|
| 21 |
+
import torch.utils.checkpoint as ckpt
|
| 22 |
+
|
| 23 |
+
from cosyvoice.transformer.convolution import ConvolutionModule
|
| 24 |
+
from cosyvoice.transformer.encoder_layer import TransformerEncoderLayer
|
| 25 |
+
from cosyvoice.transformer.encoder_layer import ConformerEncoderLayer
|
| 26 |
+
from cosyvoice.transformer.positionwise_feed_forward import PositionwiseFeedForward
|
| 27 |
+
from cosyvoice.utils.class_utils import (
|
| 28 |
+
COSYVOICE_EMB_CLASSES,
|
| 29 |
+
COSYVOICE_SUBSAMPLE_CLASSES,
|
| 30 |
+
COSYVOICE_ATTENTION_CLASSES,
|
| 31 |
+
COSYVOICE_ACTIVATION_CLASSES,
|
| 32 |
+
)
|
| 33 |
+
from cosyvoice.utils.mask import make_pad_mask
|
| 34 |
+
from cosyvoice.utils.mask import add_optional_chunk_mask
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
class BaseEncoder(torch.nn.Module):
|
| 38 |
+
|
| 39 |
+
def __init__(
|
| 40 |
+
self,
|
| 41 |
+
input_size: int,
|
| 42 |
+
output_size: int = 256,
|
| 43 |
+
attention_heads: int = 4,
|
| 44 |
+
linear_units: int = 2048,
|
| 45 |
+
num_blocks: int = 6,
|
| 46 |
+
dropout_rate: float = 0.1,
|
| 47 |
+
positional_dropout_rate: float = 0.1,
|
| 48 |
+
attention_dropout_rate: float = 0.0,
|
| 49 |
+
input_layer: str = "conv2d",
|
| 50 |
+
pos_enc_layer_type: str = "abs_pos",
|
| 51 |
+
normalize_before: bool = True,
|
| 52 |
+
static_chunk_size: int = 0,
|
| 53 |
+
use_dynamic_chunk: bool = False,
|
| 54 |
+
global_cmvn: torch.nn.Module = None,
|
| 55 |
+
use_dynamic_left_chunk: bool = False,
|
| 56 |
+
gradient_checkpointing: bool = False,
|
| 57 |
+
):
|
| 58 |
+
"""
|
| 59 |
+
Args:
|
| 60 |
+
input_size (int): input dim
|
| 61 |
+
output_size (int): dimension of attention
|
| 62 |
+
attention_heads (int): the number of heads of multi head attention
|
| 63 |
+
linear_units (int): the hidden units number of position-wise feed
|
| 64 |
+
forward
|
| 65 |
+
num_blocks (int): the number of decoder blocks
|
| 66 |
+
dropout_rate (float): dropout rate
|
| 67 |
+
attention_dropout_rate (float): dropout rate in attention
|
| 68 |
+
positional_dropout_rate (float): dropout rate after adding
|
| 69 |
+
positional encoding
|
| 70 |
+
input_layer (str): input layer type.
|
| 71 |
+
optional [linear, conv2d, conv2d6, conv2d8]
|
| 72 |
+
pos_enc_layer_type (str): Encoder positional encoding layer type.
|
| 73 |
+
opitonal [abs_pos, scaled_abs_pos, rel_pos, no_pos]
|
| 74 |
+
normalize_before (bool):
|
| 75 |
+
True: use layer_norm before each sub-block of a layer.
|
| 76 |
+
False: use layer_norm after each sub-block of a layer.
|
| 77 |
+
static_chunk_size (int): chunk size for static chunk training and
|
| 78 |
+
decoding
|
| 79 |
+
use_dynamic_chunk (bool): whether use dynamic chunk size for
|
| 80 |
+
training or not, You can only use fixed chunk(chunk_size > 0)
|
| 81 |
+
or dyanmic chunk size(use_dynamic_chunk = True)
|
| 82 |
+
global_cmvn (Optional[torch.nn.Module]): Optional GlobalCMVN module
|
| 83 |
+
use_dynamic_left_chunk (bool): whether use dynamic left chunk in
|
| 84 |
+
dynamic chunk training
|
| 85 |
+
key_bias: whether use bias in attention.linear_k, False for whisper models.
|
| 86 |
+
gradient_checkpointing: rerunning a forward-pass segment for each
|
| 87 |
+
checkpointed segment during backward.
|
| 88 |
+
"""
|
| 89 |
+
super().__init__()
|
| 90 |
+
self._output_size = output_size
|
| 91 |
+
|
| 92 |
+
self.global_cmvn = global_cmvn
|
| 93 |
+
self.embed = COSYVOICE_SUBSAMPLE_CLASSES[input_layer](
|
| 94 |
+
input_size,
|
| 95 |
+
output_size,
|
| 96 |
+
dropout_rate,
|
| 97 |
+
COSYVOICE_EMB_CLASSES[pos_enc_layer_type](output_size,
|
| 98 |
+
positional_dropout_rate),
|
| 99 |
+
)
|
| 100 |
+
|
| 101 |
+
self.normalize_before = normalize_before
|
| 102 |
+
self.after_norm = torch.nn.LayerNorm(output_size, eps=1e-5)
|
| 103 |
+
self.static_chunk_size = static_chunk_size
|
| 104 |
+
self.use_dynamic_chunk = use_dynamic_chunk
|
| 105 |
+
self.use_dynamic_left_chunk = use_dynamic_left_chunk
|
| 106 |
+
self.gradient_checkpointing = gradient_checkpointing
|
| 107 |
+
|
| 108 |
+
def output_size(self) -> int:
|
| 109 |
+
return self._output_size
|
| 110 |
+
|
| 111 |
+
def forward(
|
| 112 |
+
self,
|
| 113 |
+
xs: torch.Tensor,
|
| 114 |
+
xs_lens: torch.Tensor,
|
| 115 |
+
decoding_chunk_size: int = 0,
|
| 116 |
+
num_decoding_left_chunks: int = -1,
|
| 117 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 118 |
+
"""Embed positions in tensor.
|
| 119 |
+
|
| 120 |
+
Args:
|
| 121 |
+
xs: padded input tensor (B, T, D)
|
| 122 |
+
xs_lens: input length (B)
|
| 123 |
+
decoding_chunk_size: decoding chunk size for dynamic chunk
|
| 124 |
+
0: default for training, use random dynamic chunk.
|
| 125 |
+
<0: for decoding, use full chunk.
|
| 126 |
+
>0: for decoding, use fixed chunk size as set.
|
| 127 |
+
num_decoding_left_chunks: number of left chunks, this is for decoding,
|
| 128 |
+
the chunk size is decoding_chunk_size.
|
| 129 |
+
>=0: use num_decoding_left_chunks
|
| 130 |
+
<0: use all left chunks
|
| 131 |
+
Returns:
|
| 132 |
+
encoder output tensor xs, and subsampled masks
|
| 133 |
+
xs: padded output tensor (B, T' ~= T/subsample_rate, D)
|
| 134 |
+
masks: torch.Tensor batch padding mask after subsample
|
| 135 |
+
(B, 1, T' ~= T/subsample_rate)
|
| 136 |
+
NOTE(xcsong):
|
| 137 |
+
We pass the `__call__` method of the modules instead of `forward` to the
|
| 138 |
+
checkpointing API because `__call__` attaches all the hooks of the module.
|
| 139 |
+
https://discuss.pytorch.org/t/any-different-between-model-input-and-model-forward-input/3690/2
|
| 140 |
+
"""
|
| 141 |
+
T = xs.size(1)
|
| 142 |
+
masks = ~make_pad_mask(xs_lens, T).unsqueeze(1) # (B, 1, T)
|
| 143 |
+
if self.global_cmvn is not None:
|
| 144 |
+
xs = self.global_cmvn(xs)
|
| 145 |
+
xs, pos_emb, masks = self.embed(xs, masks)
|
| 146 |
+
mask_pad = masks # (B, 1, T/subsample_rate)
|
| 147 |
+
chunk_masks = add_optional_chunk_mask(xs, masks,
|
| 148 |
+
self.use_dynamic_chunk,
|
| 149 |
+
self.use_dynamic_left_chunk,
|
| 150 |
+
decoding_chunk_size,
|
| 151 |
+
self.static_chunk_size,
|
| 152 |
+
num_decoding_left_chunks)
|
| 153 |
+
if self.gradient_checkpointing and self.training:
|
| 154 |
+
xs = self.forward_layers_checkpointed(xs, chunk_masks, pos_emb,
|
| 155 |
+
mask_pad)
|
| 156 |
+
else:
|
| 157 |
+
xs = self.forward_layers(xs, chunk_masks, pos_emb, mask_pad)
|
| 158 |
+
if self.normalize_before:
|
| 159 |
+
xs = self.after_norm(xs)
|
| 160 |
+
# Here we assume the mask is not changed in encoder layers, so just
|
| 161 |
+
# return the masks before encoder layers, and the masks will be used
|
| 162 |
+
# for cross attention with decoder later
|
| 163 |
+
return xs, masks
|
| 164 |
+
|
| 165 |
+
def forward_layers(self, xs: torch.Tensor, chunk_masks: torch.Tensor,
|
| 166 |
+
pos_emb: torch.Tensor,
|
| 167 |
+
mask_pad: torch.Tensor) -> torch.Tensor:
|
| 168 |
+
for layer in self.encoders:
|
| 169 |
+
xs, chunk_masks, _, _ = layer(xs, chunk_masks, pos_emb, mask_pad)
|
| 170 |
+
return xs
|
| 171 |
+
|
| 172 |
+
@torch.jit.ignore(drop=True)
|
| 173 |
+
def forward_layers_checkpointed(self, xs: torch.Tensor,
|
| 174 |
+
chunk_masks: torch.Tensor,
|
| 175 |
+
pos_emb: torch.Tensor,
|
| 176 |
+
mask_pad: torch.Tensor) -> torch.Tensor:
|
| 177 |
+
for layer in self.encoders:
|
| 178 |
+
xs, chunk_masks, _, _ = ckpt.checkpoint(layer.__call__, xs,
|
| 179 |
+
chunk_masks, pos_emb,
|
| 180 |
+
mask_pad)
|
| 181 |
+
return xs
|
| 182 |
+
|
| 183 |
+
def forward_chunk(
|
| 184 |
+
self,
|
| 185 |
+
xs: torch.Tensor,
|
| 186 |
+
offset: int,
|
| 187 |
+
required_cache_size: int,
|
| 188 |
+
att_cache: torch.Tensor = torch.zeros(0, 0, 0, 0),
|
| 189 |
+
cnn_cache: torch.Tensor = torch.zeros(0, 0, 0, 0),
|
| 190 |
+
att_mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),
|
| 191 |
+
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
| 192 |
+
""" Forward just one chunk
|
| 193 |
+
|
| 194 |
+
Args:
|
| 195 |
+
xs (torch.Tensor): chunk input, with shape (b=1, time, mel-dim),
|
| 196 |
+
where `time == (chunk_size - 1) * subsample_rate + \
|
| 197 |
+
subsample.right_context + 1`
|
| 198 |
+
offset (int): current offset in encoder output time stamp
|
| 199 |
+
required_cache_size (int): cache size required for next chunk
|
| 200 |
+
compuation
|
| 201 |
+
>=0: actual cache size
|
| 202 |
+
<0: means all history cache is required
|
| 203 |
+
att_cache (torch.Tensor): cache tensor for KEY & VALUE in
|
| 204 |
+
transformer/conformer attention, with shape
|
| 205 |
+
(elayers, head, cache_t1, d_k * 2), where
|
| 206 |
+
`head * d_k == hidden-dim` and
|
| 207 |
+
`cache_t1 == chunk_size * num_decoding_left_chunks`.
|
| 208 |
+
cnn_cache (torch.Tensor): cache tensor for cnn_module in conformer,
|
| 209 |
+
(elayers, b=1, hidden-dim, cache_t2), where
|
| 210 |
+
`cache_t2 == cnn.lorder - 1`
|
| 211 |
+
|
| 212 |
+
Returns:
|
| 213 |
+
torch.Tensor: output of current input xs,
|
| 214 |
+
with shape (b=1, chunk_size, hidden-dim).
|
| 215 |
+
torch.Tensor: new attention cache required for next chunk, with
|
| 216 |
+
dynamic shape (elayers, head, ?, d_k * 2)
|
| 217 |
+
depending on required_cache_size.
|
| 218 |
+
torch.Tensor: new conformer cnn cache required for next chunk, with
|
| 219 |
+
same shape as the original cnn_cache.
|
| 220 |
+
|
| 221 |
+
"""
|
| 222 |
+
assert xs.size(0) == 1
|
| 223 |
+
# tmp_masks is just for interface compatibility
|
| 224 |
+
tmp_masks = torch.ones(1,
|
| 225 |
+
xs.size(1),
|
| 226 |
+
device=xs.device,
|
| 227 |
+
dtype=torch.bool)
|
| 228 |
+
tmp_masks = tmp_masks.unsqueeze(1)
|
| 229 |
+
if self.global_cmvn is not None:
|
| 230 |
+
xs = self.global_cmvn(xs)
|
| 231 |
+
# NOTE(xcsong): Before embed, shape(xs) is (b=1, time, mel-dim)
|
| 232 |
+
xs, pos_emb, _ = self.embed(xs, tmp_masks, offset)
|
| 233 |
+
# NOTE(xcsong): After embed, shape(xs) is (b=1, chunk_size, hidden-dim)
|
| 234 |
+
elayers, cache_t1 = att_cache.size(0), att_cache.size(2)
|
| 235 |
+
chunk_size = xs.size(1)
|
| 236 |
+
attention_key_size = cache_t1 + chunk_size
|
| 237 |
+
pos_emb = self.embed.position_encoding(offset=offset - cache_t1,
|
| 238 |
+
size=attention_key_size)
|
| 239 |
+
if required_cache_size < 0:
|
| 240 |
+
next_cache_start = 0
|
| 241 |
+
elif required_cache_size == 0:
|
| 242 |
+
next_cache_start = attention_key_size
|
| 243 |
+
else:
|
| 244 |
+
next_cache_start = max(attention_key_size - required_cache_size, 0)
|
| 245 |
+
r_att_cache = []
|
| 246 |
+
r_cnn_cache = []
|
| 247 |
+
for i, layer in enumerate(self.encoders):
|
| 248 |
+
# NOTE(xcsong): Before layer.forward
|
| 249 |
+
# shape(att_cache[i:i + 1]) is (1, head, cache_t1, d_k * 2),
|
| 250 |
+
# shape(cnn_cache[i]) is (b=1, hidden-dim, cache_t2)
|
| 251 |
+
xs, _, new_att_cache, new_cnn_cache = layer(
|
| 252 |
+
xs,
|
| 253 |
+
att_mask,
|
| 254 |
+
pos_emb,
|
| 255 |
+
att_cache=att_cache[i:i + 1] if elayers > 0 else att_cache,
|
| 256 |
+
cnn_cache=cnn_cache[i] if cnn_cache.size(0) > 0 else cnn_cache)
|
| 257 |
+
# NOTE(xcsong): After layer.forward
|
| 258 |
+
# shape(new_att_cache) is (1, head, attention_key_size, d_k * 2),
|
| 259 |
+
# shape(new_cnn_cache) is (b=1, hidden-dim, cache_t2)
|
| 260 |
+
r_att_cache.append(new_att_cache[:, :, next_cache_start:, :])
|
| 261 |
+
r_cnn_cache.append(new_cnn_cache.unsqueeze(0))
|
| 262 |
+
if self.normalize_before:
|
| 263 |
+
xs = self.after_norm(xs)
|
| 264 |
+
|
| 265 |
+
# NOTE(xcsong): shape(r_att_cache) is (elayers, head, ?, d_k * 2),
|
| 266 |
+
# ? may be larger than cache_t1, it depends on required_cache_size
|
| 267 |
+
r_att_cache = torch.cat(r_att_cache, dim=0)
|
| 268 |
+
# NOTE(xcsong): shape(r_cnn_cache) is (e, b=1, hidden-dim, cache_t2)
|
| 269 |
+
r_cnn_cache = torch.cat(r_cnn_cache, dim=0)
|
| 270 |
+
|
| 271 |
+
return (xs, r_att_cache, r_cnn_cache)
|
| 272 |
+
|
| 273 |
+
def forward_chunk_by_chunk(
|
| 274 |
+
self,
|
| 275 |
+
xs: torch.Tensor,
|
| 276 |
+
decoding_chunk_size: int,
|
| 277 |
+
num_decoding_left_chunks: int = -1,
|
| 278 |
+
) -> Tuple[torch.Tensor, torch.Tensor]:
|
| 279 |
+
""" Forward input chunk by chunk with chunk_size like a streaming
|
| 280 |
+
fashion
|
| 281 |
+
|
| 282 |
+
Here we should pay special attention to computation cache in the
|
| 283 |
+
streaming style forward chunk by chunk. Three things should be taken
|
| 284 |
+
into account for computation in the current network:
|
| 285 |
+
1. transformer/conformer encoder layers output cache
|
| 286 |
+
2. convolution in conformer
|
| 287 |
+
3. convolution in subsampling
|
| 288 |
+
|
| 289 |
+
However, we don't implement subsampling cache for:
|
| 290 |
+
1. We can control subsampling module to output the right result by
|
| 291 |
+
overlapping input instead of cache left context, even though it
|
| 292 |
+
wastes some computation, but subsampling only takes a very
|
| 293 |
+
small fraction of computation in the whole model.
|
| 294 |
+
2. Typically, there are several covolution layers with subsampling
|
| 295 |
+
in subsampling module, it is tricky and complicated to do cache
|
| 296 |
+
with different convolution layers with different subsampling
|
| 297 |
+
rate.
|
| 298 |
+
3. Currently, nn.Sequential is used to stack all the convolution
|
| 299 |
+
layers in subsampling, we need to rewrite it to make it work
|
| 300 |
+
with cache, which is not prefered.
|
| 301 |
+
Args:
|
| 302 |
+
xs (torch.Tensor): (1, max_len, dim)
|
| 303 |
+
chunk_size (int): decoding chunk size
|
| 304 |
+
"""
|
| 305 |
+
assert decoding_chunk_size > 0
|
| 306 |
+
# The model is trained by static or dynamic chunk
|
| 307 |
+
assert self.static_chunk_size > 0 or self.use_dynamic_chunk
|
| 308 |
+
subsampling = self.embed.subsampling_rate
|
| 309 |
+
context = self.embed.right_context + 1 # Add current frame
|
| 310 |
+
stride = subsampling * decoding_chunk_size
|
| 311 |
+
decoding_window = (decoding_chunk_size - 1) * subsampling + context
|
| 312 |
+
num_frames = xs.size(1)
|
| 313 |
+
att_cache: torch.Tensor = torch.zeros((0, 0, 0, 0), device=xs.device)
|
| 314 |
+
cnn_cache: torch.Tensor = torch.zeros((0, 0, 0, 0), device=xs.device)
|
| 315 |
+
outputs = []
|
| 316 |
+
offset = 0
|
| 317 |
+
required_cache_size = decoding_chunk_size * num_decoding_left_chunks
|
| 318 |
+
|
| 319 |
+
# Feed forward overlap input step by step
|
| 320 |
+
for cur in range(0, num_frames - context + 1, stride):
|
| 321 |
+
end = min(cur + decoding_window, num_frames)
|
| 322 |
+
chunk_xs = xs[:, cur:end, :]
|
| 323 |
+
(y, att_cache,
|
| 324 |
+
cnn_cache) = self.forward_chunk(chunk_xs, offset,
|
| 325 |
+
required_cache_size, att_cache,
|
| 326 |
+
cnn_cache)
|
| 327 |
+
outputs.append(y)
|
| 328 |
+
offset += y.size(1)
|
| 329 |
+
ys = torch.cat(outputs, 1)
|
| 330 |
+
masks = torch.ones((1, 1, ys.size(1)),
|
| 331 |
+
device=ys.device,
|
| 332 |
+
dtype=torch.bool)
|
| 333 |
+
return ys, masks
|
| 334 |
+
|
| 335 |
+
|
| 336 |
+
class TransformerEncoder(BaseEncoder):
|
| 337 |
+
"""Transformer encoder module."""
|
| 338 |
+
|
| 339 |
+
def __init__(
|
| 340 |
+
self,
|
| 341 |
+
input_size: int,
|
| 342 |
+
output_size: int = 256,
|
| 343 |
+
attention_heads: int = 4,
|
| 344 |
+
linear_units: int = 2048,
|
| 345 |
+
num_blocks: int = 6,
|
| 346 |
+
dropout_rate: float = 0.1,
|
| 347 |
+
positional_dropout_rate: float = 0.1,
|
| 348 |
+
attention_dropout_rate: float = 0.0,
|
| 349 |
+
input_layer: str = "conv2d",
|
| 350 |
+
pos_enc_layer_type: str = "abs_pos",
|
| 351 |
+
normalize_before: bool = True,
|
| 352 |
+
static_chunk_size: int = 0,
|
| 353 |
+
use_dynamic_chunk: bool = False,
|
| 354 |
+
global_cmvn: torch.nn.Module = None,
|
| 355 |
+
use_dynamic_left_chunk: bool = False,
|
| 356 |
+
key_bias: bool = True,
|
| 357 |
+
selfattention_layer_type: str = "selfattn",
|
| 358 |
+
activation_type: str = "relu",
|
| 359 |
+
gradient_checkpointing: bool = False,
|
| 360 |
+
):
|
| 361 |
+
""" Construct TransformerEncoder
|
| 362 |
+
|
| 363 |
+
See Encoder for the meaning of each parameter.
|
| 364 |
+
"""
|
| 365 |
+
super().__init__(input_size, output_size, attention_heads,
|
| 366 |
+
linear_units, num_blocks, dropout_rate,
|
| 367 |
+
positional_dropout_rate, attention_dropout_rate,
|
| 368 |
+
input_layer, pos_enc_layer_type, normalize_before,
|
| 369 |
+
static_chunk_size, use_dynamic_chunk, global_cmvn,
|
| 370 |
+
use_dynamic_left_chunk, gradient_checkpointing)
|
| 371 |
+
activation = COSYVOICE_ACTIVATION_CLASSES[activation_type]()
|
| 372 |
+
self.encoders = torch.nn.ModuleList([
|
| 373 |
+
TransformerEncoderLayer(
|
| 374 |
+
output_size,
|
| 375 |
+
COSYVOICE_ATTENTION_CLASSES[selfattention_layer_type](attention_heads,
|
| 376 |
+
output_size,
|
| 377 |
+
attention_dropout_rate,
|
| 378 |
+
key_bias),
|
| 379 |
+
PositionwiseFeedForward(output_size, linear_units,
|
| 380 |
+
dropout_rate, activation),
|
| 381 |
+
dropout_rate, normalize_before) for _ in range(num_blocks)
|
| 382 |
+
])
|
| 383 |
+
|
| 384 |
+
|
| 385 |
+
class ConformerEncoder(BaseEncoder):
|
| 386 |
+
"""Conformer encoder module."""
|
| 387 |
+
|
| 388 |
+
def __init__(
|
| 389 |
+
self,
|
| 390 |
+
input_size: int,
|
| 391 |
+
output_size: int = 256,
|
| 392 |
+
attention_heads: int = 4,
|
| 393 |
+
linear_units: int = 2048,
|
| 394 |
+
num_blocks: int = 6,
|
| 395 |
+
dropout_rate: float = 0.1,
|
| 396 |
+
positional_dropout_rate: float = 0.1,
|
| 397 |
+
attention_dropout_rate: float = 0.0,
|
| 398 |
+
input_layer: str = "conv2d",
|
| 399 |
+
pos_enc_layer_type: str = "rel_pos",
|
| 400 |
+
normalize_before: bool = True,
|
| 401 |
+
static_chunk_size: int = 0,
|
| 402 |
+
use_dynamic_chunk: bool = False,
|
| 403 |
+
global_cmvn: torch.nn.Module = None,
|
| 404 |
+
use_dynamic_left_chunk: bool = False,
|
| 405 |
+
positionwise_conv_kernel_size: int = 1,
|
| 406 |
+
macaron_style: bool = True,
|
| 407 |
+
selfattention_layer_type: str = "rel_selfattn",
|
| 408 |
+
activation_type: str = "swish",
|
| 409 |
+
use_cnn_module: bool = True,
|
| 410 |
+
cnn_module_kernel: int = 15,
|
| 411 |
+
causal: bool = False,
|
| 412 |
+
cnn_module_norm: str = "batch_norm",
|
| 413 |
+
key_bias: bool = True,
|
| 414 |
+
gradient_checkpointing: bool = False,
|
| 415 |
+
):
|
| 416 |
+
"""Construct ConformerEncoder
|
| 417 |
+
|
| 418 |
+
Args:
|
| 419 |
+
input_size to use_dynamic_chunk, see in BaseEncoder
|
| 420 |
+
positionwise_conv_kernel_size (int): Kernel size of positionwise
|
| 421 |
+
conv1d layer.
|
| 422 |
+
macaron_style (bool): Whether to use macaron style for
|
| 423 |
+
positionwise layer.
|
| 424 |
+
selfattention_layer_type (str): Encoder attention layer type,
|
| 425 |
+
the parameter has no effect now, it's just for configure
|
| 426 |
+
compatibility.
|
| 427 |
+
activation_type (str): Encoder activation function type.
|
| 428 |
+
use_cnn_module (bool): Whether to use convolution module.
|
| 429 |
+
cnn_module_kernel (int): Kernel size of convolution module.
|
| 430 |
+
causal (bool): whether to use causal convolution or not.
|
| 431 |
+
key_bias: whether use bias in attention.linear_k, False for whisper models.
|
| 432 |
+
"""
|
| 433 |
+
super().__init__(input_size, output_size, attention_heads,
|
| 434 |
+
linear_units, num_blocks, dropout_rate,
|
| 435 |
+
positional_dropout_rate, attention_dropout_rate,
|
| 436 |
+
input_layer, pos_enc_layer_type, normalize_before,
|
| 437 |
+
static_chunk_size, use_dynamic_chunk, global_cmvn,
|
| 438 |
+
use_dynamic_left_chunk, gradient_checkpointing)
|
| 439 |
+
activation = COSYVOICE_ACTIVATION_CLASSES[activation_type]()
|
| 440 |
+
|
| 441 |
+
# self-attention module definition
|
| 442 |
+
encoder_selfattn_layer_args = (
|
| 443 |
+
attention_heads,
|
| 444 |
+
output_size,
|
| 445 |
+
attention_dropout_rate,
|
| 446 |
+
key_bias,
|
| 447 |
+
)
|
| 448 |
+
# feed-forward module definition
|
| 449 |
+
positionwise_layer_args = (
|
| 450 |
+
output_size,
|
| 451 |
+
linear_units,
|
| 452 |
+
dropout_rate,
|
| 453 |
+
activation,
|
| 454 |
+
)
|
| 455 |
+
# convolution module definition
|
| 456 |
+
convolution_layer_args = (output_size, cnn_module_kernel, activation,
|
| 457 |
+
cnn_module_norm, causal)
|
| 458 |
+
|
| 459 |
+
self.encoders = torch.nn.ModuleList([
|
| 460 |
+
ConformerEncoderLayer(
|
| 461 |
+
output_size,
|
| 462 |
+
COSYVOICE_ATTENTION_CLASSES[selfattention_layer_type](
|
| 463 |
+
*encoder_selfattn_layer_args),
|
| 464 |
+
PositionwiseFeedForward(*positionwise_layer_args),
|
| 465 |
+
PositionwiseFeedForward(
|
| 466 |
+
*positionwise_layer_args) if macaron_style else None,
|
| 467 |
+
ConvolutionModule(
|
| 468 |
+
*convolution_layer_args) if use_cnn_module else None,
|
| 469 |
+
dropout_rate,
|
| 470 |
+
normalize_before,
|
| 471 |
+
) for _ in range(num_blocks)
|
| 472 |
+
])
|
| 473 |
+
|
| 474 |
+
|
| 475 |
+
|
| 476 |
+
|
| 477 |
+
class BlockConformerEncoder(BaseEncoder):
|
| 478 |
+
"""Conformer encoder module."""
|
| 479 |
+
|
| 480 |
+
def __init__(
|
| 481 |
+
self,
|
| 482 |
+
input_size: int,
|
| 483 |
+
output_size: int = 256,
|
| 484 |
+
attention_heads: int = 4,
|
| 485 |
+
linear_units: int = 2048,
|
| 486 |
+
num_blocks: int = 6,
|
| 487 |
+
dropout_rate: float = 0.1,
|
| 488 |
+
positional_dropout_rate: float = 0.1,
|
| 489 |
+
attention_dropout_rate: float = 0.0,
|
| 490 |
+
input_layer: str = "conv2d",
|
| 491 |
+
pos_enc_layer_type: str = "rel_pos",
|
| 492 |
+
normalize_before: bool = True,
|
| 493 |
+
static_chunk_size: int = 0,
|
| 494 |
+
use_dynamic_chunk: bool = False,
|
| 495 |
+
global_cmvn: torch.nn.Module = None,
|
| 496 |
+
use_dynamic_left_chunk: bool = False,
|
| 497 |
+
positionwise_conv_kernel_size: int = 1,
|
| 498 |
+
macaron_style: bool = True,
|
| 499 |
+
selfattention_layer_type: str = "rel_selfattn",
|
| 500 |
+
activation_type: str = "swish",
|
| 501 |
+
use_cnn_module: bool = True,
|
| 502 |
+
cnn_module_kernel: int = 15,
|
| 503 |
+
causal: bool = False,
|
| 504 |
+
cnn_module_norm: str = "batch_norm",
|
| 505 |
+
key_bias: bool = True,
|
| 506 |
+
gradient_checkpointing: bool = False,
|
| 507 |
+
block_size=25,
|
| 508 |
+
):
|
| 509 |
+
"""Construct ConformerEncoder
|
| 510 |
+
|
| 511 |
+
Args:
|
| 512 |
+
input_size to use_dynamic_chunk, see in BaseEncoder
|
| 513 |
+
positionwise_conv_kernel_size (int): Kernel size of positionwise
|
| 514 |
+
conv1d layer.
|
| 515 |
+
macaron_style (bool): Whether to use macaron style for
|
| 516 |
+
positionwise layer.
|
| 517 |
+
selfattention_layer_type (str): Encoder attention layer type,
|
| 518 |
+
the parameter has no effect now, it's just for configure
|
| 519 |
+
compatibility.
|
| 520 |
+
activation_type (str): Encoder activation function type.
|
| 521 |
+
use_cnn_module (bool): Whether to use convolution module.
|
| 522 |
+
cnn_module_kernel (int): Kernel size of convolution module.
|
| 523 |
+
causal (bool): whether to use causal convolution or not.
|
| 524 |
+
key_bias: whether use bias in attention.linear_k, False for whisper models.
|
| 525 |
+
"""
|
| 526 |
+
super().__init__(input_size, output_size, attention_heads,
|
| 527 |
+
linear_units, num_blocks, dropout_rate,
|
| 528 |
+
positional_dropout_rate, attention_dropout_rate,
|
| 529 |
+
input_layer, pos_enc_layer_type, normalize_before,
|
| 530 |
+
static_chunk_size, use_dynamic_chunk, global_cmvn,
|
| 531 |
+
use_dynamic_left_chunk, gradient_checkpointing)
|
| 532 |
+
activation = COSYVOICE_ACTIVATION_CLASSES[activation_type]()
|
| 533 |
+
|
| 534 |
+
# self-attention module definition
|
| 535 |
+
encoder_selfattn_layer_args = (
|
| 536 |
+
attention_heads,
|
| 537 |
+
output_size,
|
| 538 |
+
attention_dropout_rate,
|
| 539 |
+
key_bias,
|
| 540 |
+
block_size,
|
| 541 |
+
)
|
| 542 |
+
# feed-forward module definition
|
| 543 |
+
positionwise_layer_args = (
|
| 544 |
+
output_size,
|
| 545 |
+
linear_units,
|
| 546 |
+
dropout_rate,
|
| 547 |
+
activation,
|
| 548 |
+
)
|
| 549 |
+
# convolution module definition
|
| 550 |
+
convolution_layer_args = (output_size, cnn_module_kernel, activation,
|
| 551 |
+
cnn_module_norm, causal)
|
| 552 |
+
|
| 553 |
+
self.encoders = torch.nn.ModuleList([
|
| 554 |
+
ConformerEncoderLayer(
|
| 555 |
+
output_size,
|
| 556 |
+
COSYVOICE_ATTENTION_CLASSES[selfattention_layer_type](
|
| 557 |
+
*encoder_selfattn_layer_args),
|
| 558 |
+
PositionwiseFeedForward(*positionwise_layer_args),
|
| 559 |
+
PositionwiseFeedForward(
|
| 560 |
+
*positionwise_layer_args) if macaron_style else None,
|
| 561 |
+
ConvolutionModule(
|
| 562 |
+
*convolution_layer_args) if use_cnn_module else None,
|
| 563 |
+
dropout_rate,
|
| 564 |
+
normalize_before,
|
| 565 |
+
) for _ in range(num_blocks)
|
| 566 |
+
])
|
| 567 |
+
self.block_size=block_size
|
r1-a/response_generation/Kimi-Audio/kimia_infer/models/tokenizer/glm4/cosyvoice/transformer/encoder_layer.py
ADDED
|
@@ -0,0 +1,236 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2021 Mobvoi Inc (Binbin Zhang, Di Wu)
|
| 2 |
+
# 2022 Xingchen Song (sxc19@mails.tsinghua.edu.cn)
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
# Modified from ESPnet(https://github.com/espnet/espnet)
|
| 16 |
+
"""Encoder self-attention layer definition."""
|
| 17 |
+
|
| 18 |
+
from typing import Optional, Tuple
|
| 19 |
+
|
| 20 |
+
import torch
|
| 21 |
+
from torch import nn
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class TransformerEncoderLayer(nn.Module):
|
| 25 |
+
"""Encoder layer module.
|
| 26 |
+
|
| 27 |
+
Args:
|
| 28 |
+
size (int): Input dimension.
|
| 29 |
+
self_attn (torch.nn.Module): Self-attention module instance.
|
| 30 |
+
`MultiHeadedAttention` or `RelPositionMultiHeadedAttention`
|
| 31 |
+
instance can be used as the argument.
|
| 32 |
+
feed_forward (torch.nn.Module): Feed-forward module instance.
|
| 33 |
+
`PositionwiseFeedForward`, instance can be used as the argument.
|
| 34 |
+
dropout_rate (float): Dropout rate.
|
| 35 |
+
normalize_before (bool):
|
| 36 |
+
True: use layer_norm before each sub-block.
|
| 37 |
+
False: to use layer_norm after each sub-block.
|
| 38 |
+
"""
|
| 39 |
+
|
| 40 |
+
def __init__(
|
| 41 |
+
self,
|
| 42 |
+
size: int,
|
| 43 |
+
self_attn: torch.nn.Module,
|
| 44 |
+
feed_forward: torch.nn.Module,
|
| 45 |
+
dropout_rate: float,
|
| 46 |
+
normalize_before: bool = True,
|
| 47 |
+
):
|
| 48 |
+
"""Construct an EncoderLayer object."""
|
| 49 |
+
super().__init__()
|
| 50 |
+
self.self_attn = self_attn
|
| 51 |
+
self.feed_forward = feed_forward
|
| 52 |
+
self.norm1 = nn.LayerNorm(size, eps=1e-5)
|
| 53 |
+
self.norm2 = nn.LayerNorm(size, eps=1e-5)
|
| 54 |
+
self.dropout = nn.Dropout(dropout_rate)
|
| 55 |
+
self.size = size
|
| 56 |
+
self.normalize_before = normalize_before
|
| 57 |
+
|
| 58 |
+
def forward(
|
| 59 |
+
self,
|
| 60 |
+
x: torch.Tensor,
|
| 61 |
+
mask: torch.Tensor,
|
| 62 |
+
pos_emb: torch.Tensor,
|
| 63 |
+
mask_pad: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),
|
| 64 |
+
att_cache: torch.Tensor = torch.zeros((0, 0, 0, 0)),
|
| 65 |
+
cnn_cache: torch.Tensor = torch.zeros((0, 0, 0, 0)),
|
| 66 |
+
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
|
| 67 |
+
"""Compute encoded features.
|
| 68 |
+
|
| 69 |
+
Args:
|
| 70 |
+
x (torch.Tensor): (#batch, time, size)
|
| 71 |
+
mask (torch.Tensor): Mask tensor for the input (#batch, time,time),
|
| 72 |
+
(0, 0, 0) means fake mask.
|
| 73 |
+
pos_emb (torch.Tensor): just for interface compatibility
|
| 74 |
+
to ConformerEncoderLayer
|
| 75 |
+
mask_pad (torch.Tensor): does not used in transformer layer,
|
| 76 |
+
just for unified api with conformer.
|
| 77 |
+
att_cache (torch.Tensor): Cache tensor of the KEY & VALUE
|
| 78 |
+
(#batch=1, head, cache_t1, d_k * 2), head * d_k == size.
|
| 79 |
+
cnn_cache (torch.Tensor): Convolution cache in conformer layer
|
| 80 |
+
(#batch=1, size, cache_t2), not used here, it's for interface
|
| 81 |
+
compatibility to ConformerEncoderLayer.
|
| 82 |
+
Returns:
|
| 83 |
+
torch.Tensor: Output tensor (#batch, time, size).
|
| 84 |
+
torch.Tensor: Mask tensor (#batch, time, time).
|
| 85 |
+
torch.Tensor: att_cache tensor,
|
| 86 |
+
(#batch=1, head, cache_t1 + time, d_k * 2).
|
| 87 |
+
torch.Tensor: cnn_cahce tensor (#batch=1, size, cache_t2).
|
| 88 |
+
|
| 89 |
+
"""
|
| 90 |
+
residual = x
|
| 91 |
+
if self.normalize_before:
|
| 92 |
+
x = self.norm1(x)
|
| 93 |
+
x_att, new_att_cache = self.self_attn(x, x, x, mask, pos_emb=pos_emb, cache=att_cache)
|
| 94 |
+
x = residual + self.dropout(x_att)
|
| 95 |
+
if not self.normalize_before:
|
| 96 |
+
x = self.norm1(x)
|
| 97 |
+
|
| 98 |
+
residual = x
|
| 99 |
+
if self.normalize_before:
|
| 100 |
+
x = self.norm2(x)
|
| 101 |
+
x = residual + self.dropout(self.feed_forward(x))
|
| 102 |
+
if not self.normalize_before:
|
| 103 |
+
x = self.norm2(x)
|
| 104 |
+
|
| 105 |
+
fake_cnn_cache = torch.zeros((0, 0, 0), dtype=x.dtype, device=x.device)
|
| 106 |
+
return x, mask, new_att_cache, fake_cnn_cache
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
class ConformerEncoderLayer(nn.Module):
|
| 110 |
+
"""Encoder layer module.
|
| 111 |
+
Args:
|
| 112 |
+
size (int): Input dimension.
|
| 113 |
+
self_attn (torch.nn.Module): Self-attention module instance.
|
| 114 |
+
`MultiHeadedAttention` or `RelPositionMultiHeadedAttention`
|
| 115 |
+
instance can be used as the argument.
|
| 116 |
+
feed_forward (torch.nn.Module): Feed-forward module instance.
|
| 117 |
+
`PositionwiseFeedForward` instance can be used as the argument.
|
| 118 |
+
feed_forward_macaron (torch.nn.Module): Additional feed-forward module
|
| 119 |
+
instance.
|
| 120 |
+
`PositionwiseFeedForward` instance can be used as the argument.
|
| 121 |
+
conv_module (torch.nn.Module): Convolution module instance.
|
| 122 |
+
`ConvlutionModule` instance can be used as the argument.
|
| 123 |
+
dropout_rate (float): Dropout rate.
|
| 124 |
+
normalize_before (bool):
|
| 125 |
+
True: use layer_norm before each sub-block.
|
| 126 |
+
False: use layer_norm after each sub-block.
|
| 127 |
+
"""
|
| 128 |
+
|
| 129 |
+
def __init__(
|
| 130 |
+
self,
|
| 131 |
+
size: int,
|
| 132 |
+
self_attn: torch.nn.Module,
|
| 133 |
+
feed_forward: Optional[nn.Module] = None,
|
| 134 |
+
feed_forward_macaron: Optional[nn.Module] = None,
|
| 135 |
+
conv_module: Optional[nn.Module] = None,
|
| 136 |
+
dropout_rate: float = 0.1,
|
| 137 |
+
normalize_before: bool = True,
|
| 138 |
+
):
|
| 139 |
+
"""Construct an EncoderLayer object."""
|
| 140 |
+
super().__init__()
|
| 141 |
+
self.self_attn = self_attn
|
| 142 |
+
self.feed_forward = feed_forward
|
| 143 |
+
self.feed_forward_macaron = feed_forward_macaron
|
| 144 |
+
self.conv_module = conv_module
|
| 145 |
+
self.norm_ff = nn.LayerNorm(size, eps=1e-5) # for the FNN module
|
| 146 |
+
self.norm_mha = nn.LayerNorm(size, eps=1e-5) # for the MHA module
|
| 147 |
+
if feed_forward_macaron is not None:
|
| 148 |
+
self.norm_ff_macaron = nn.LayerNorm(size, eps=1e-5)
|
| 149 |
+
self.ff_scale = 0.5
|
| 150 |
+
else:
|
| 151 |
+
self.ff_scale = 1.0
|
| 152 |
+
if self.conv_module is not None:
|
| 153 |
+
self.norm_conv = nn.LayerNorm(size, eps=1e-5) # for the CNN module
|
| 154 |
+
self.norm_final = nn.LayerNorm(
|
| 155 |
+
size, eps=1e-5) # for the final output of the block
|
| 156 |
+
self.dropout = nn.Dropout(dropout_rate)
|
| 157 |
+
self.size = size
|
| 158 |
+
self.normalize_before = normalize_before
|
| 159 |
+
|
| 160 |
+
def forward(
|
| 161 |
+
self,
|
| 162 |
+
x: torch.Tensor,
|
| 163 |
+
mask: torch.Tensor,
|
| 164 |
+
pos_emb: torch.Tensor,
|
| 165 |
+
mask_pad: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),
|
| 166 |
+
att_cache: torch.Tensor = torch.zeros((0, 0, 0, 0)),
|
| 167 |
+
cnn_cache: torch.Tensor = torch.zeros((0, 0, 0, 0)),
|
| 168 |
+
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
|
| 169 |
+
"""Compute encoded features.
|
| 170 |
+
|
| 171 |
+
Args:
|
| 172 |
+
x (torch.Tensor): (#batch, time, size)
|
| 173 |
+
mask (torch.Tensor): Mask tensor for the input (#batch, time,time),
|
| 174 |
+
(0, 0, 0) means fake mask.
|
| 175 |
+
pos_emb (torch.Tensor): positional encoding, must not be None
|
| 176 |
+
for ConformerEncoderLayer.
|
| 177 |
+
mask_pad (torch.Tensor): batch padding mask used for conv module.
|
| 178 |
+
(#batch, 1,time), (0, 0, 0) means fake mask.
|
| 179 |
+
att_cache (torch.Tensor): Cache tensor of the KEY & VALUE
|
| 180 |
+
(#batch=1, head, cache_t1, d_k * 2), head * d_k == size.
|
| 181 |
+
cnn_cache (torch.Tensor): Convolution cache in conformer layer
|
| 182 |
+
(#batch=1, size, cache_t2)
|
| 183 |
+
Returns:
|
| 184 |
+
torch.Tensor: Output tensor (#batch, time, size).
|
| 185 |
+
torch.Tensor: Mask tensor (#batch, time, time).
|
| 186 |
+
torch.Tensor: att_cache tensor,
|
| 187 |
+
(#batch=1, head, cache_t1 + time, d_k * 2).
|
| 188 |
+
torch.Tensor: cnn_cahce tensor (#batch, size, cache_t2).
|
| 189 |
+
"""
|
| 190 |
+
|
| 191 |
+
# whether to use macaron style
|
| 192 |
+
if self.feed_forward_macaron is not None:
|
| 193 |
+
residual = x
|
| 194 |
+
if self.normalize_before:
|
| 195 |
+
x = self.norm_ff_macaron(x)
|
| 196 |
+
x = residual + self.ff_scale * self.dropout(
|
| 197 |
+
self.feed_forward_macaron(x))
|
| 198 |
+
if not self.normalize_before:
|
| 199 |
+
x = self.norm_ff_macaron(x)
|
| 200 |
+
|
| 201 |
+
# multi-headed self-attention module
|
| 202 |
+
residual = x
|
| 203 |
+
if self.normalize_before:
|
| 204 |
+
x = self.norm_mha(x)
|
| 205 |
+
x_att, new_att_cache = self.self_attn(x, x, x, mask, pos_emb,
|
| 206 |
+
att_cache)
|
| 207 |
+
x = residual + self.dropout(x_att)
|
| 208 |
+
if not self.normalize_before:
|
| 209 |
+
x = self.norm_mha(x)
|
| 210 |
+
|
| 211 |
+
# convolution module
|
| 212 |
+
# Fake new cnn cache here, and then change it in conv_module
|
| 213 |
+
new_cnn_cache = torch.zeros((0, 0, 0), dtype=x.dtype, device=x.device)
|
| 214 |
+
if self.conv_module is not None:
|
| 215 |
+
residual = x
|
| 216 |
+
if self.normalize_before:
|
| 217 |
+
x = self.norm_conv(x)
|
| 218 |
+
x, new_cnn_cache = self.conv_module(x, mask_pad, cnn_cache)
|
| 219 |
+
x = residual + self.dropout(x)
|
| 220 |
+
|
| 221 |
+
if not self.normalize_before:
|
| 222 |
+
x = self.norm_conv(x)
|
| 223 |
+
|
| 224 |
+
# feed forward module
|
| 225 |
+
residual = x
|
| 226 |
+
if self.normalize_before:
|
| 227 |
+
x = self.norm_ff(x)
|
| 228 |
+
|
| 229 |
+
x = residual + self.ff_scale * self.dropout(self.feed_forward(x))
|
| 230 |
+
if not self.normalize_before:
|
| 231 |
+
x = self.norm_ff(x)
|
| 232 |
+
|
| 233 |
+
if self.conv_module is not None:
|
| 234 |
+
x = self.norm_final(x)
|
| 235 |
+
|
| 236 |
+
return x, mask, new_att_cache, new_cnn_cache
|
r1-a/response_generation/Kimi-Audio/kimia_infer/models/tokenizer/glm4/cosyvoice/transformer/label_smoothing_loss.py
ADDED
|
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2019 Shigeki Karita
|
| 2 |
+
# 2020 Mobvoi Inc (Binbin Zhang)
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""Label smoothing module."""
|
| 16 |
+
|
| 17 |
+
import torch
|
| 18 |
+
from torch import nn
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class LabelSmoothingLoss(nn.Module):
|
| 22 |
+
"""Label-smoothing loss.
|
| 23 |
+
|
| 24 |
+
In a standard CE loss, the label's data distribution is:
|
| 25 |
+
[0,1,2] ->
|
| 26 |
+
[
|
| 27 |
+
[1.0, 0.0, 0.0],
|
| 28 |
+
[0.0, 1.0, 0.0],
|
| 29 |
+
[0.0, 0.0, 1.0],
|
| 30 |
+
]
|
| 31 |
+
|
| 32 |
+
In the smoothing version CE Loss,some probabilities
|
| 33 |
+
are taken from the true label prob (1.0) and are divided
|
| 34 |
+
among other labels.
|
| 35 |
+
|
| 36 |
+
e.g.
|
| 37 |
+
smoothing=0.1
|
| 38 |
+
[0,1,2] ->
|
| 39 |
+
[
|
| 40 |
+
[0.9, 0.05, 0.05],
|
| 41 |
+
[0.05, 0.9, 0.05],
|
| 42 |
+
[0.05, 0.05, 0.9],
|
| 43 |
+
]
|
| 44 |
+
|
| 45 |
+
Args:
|
| 46 |
+
size (int): the number of class
|
| 47 |
+
padding_idx (int): padding class id which will be ignored for loss
|
| 48 |
+
smoothing (float): smoothing rate (0.0 means the conventional CE)
|
| 49 |
+
normalize_length (bool):
|
| 50 |
+
normalize loss by sequence length if True
|
| 51 |
+
normalize loss by batch size if False
|
| 52 |
+
"""
|
| 53 |
+
|
| 54 |
+
def __init__(self,
|
| 55 |
+
size: int,
|
| 56 |
+
padding_idx: int,
|
| 57 |
+
smoothing: float,
|
| 58 |
+
normalize_length: bool = False):
|
| 59 |
+
"""Construct an LabelSmoothingLoss object."""
|
| 60 |
+
super(LabelSmoothingLoss, self).__init__()
|
| 61 |
+
self.criterion = nn.KLDivLoss(reduction="none")
|
| 62 |
+
self.padding_idx = padding_idx
|
| 63 |
+
self.confidence = 1.0 - smoothing
|
| 64 |
+
self.smoothing = smoothing
|
| 65 |
+
self.size = size
|
| 66 |
+
self.normalize_length = normalize_length
|
| 67 |
+
|
| 68 |
+
def forward(self, x: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
|
| 69 |
+
"""Compute loss between x and target.
|
| 70 |
+
|
| 71 |
+
The model outputs and data labels tensors are flatten to
|
| 72 |
+
(batch*seqlen, class) shape and a mask is applied to the
|
| 73 |
+
padding part which should not be calculated for loss.
|
| 74 |
+
|
| 75 |
+
Args:
|
| 76 |
+
x (torch.Tensor): prediction (batch, seqlen, class)
|
| 77 |
+
target (torch.Tensor):
|
| 78 |
+
target signal masked with self.padding_id (batch, seqlen)
|
| 79 |
+
Returns:
|
| 80 |
+
loss (torch.Tensor) : The KL loss, scalar float value
|
| 81 |
+
"""
|
| 82 |
+
assert x.size(2) == self.size
|
| 83 |
+
batch_size = x.size(0)
|
| 84 |
+
x = x.view(-1, self.size)
|
| 85 |
+
target = target.view(-1)
|
| 86 |
+
# use zeros_like instead of torch.no_grad() for true_dist,
|
| 87 |
+
# since no_grad() can not be exported by JIT
|
| 88 |
+
true_dist = torch.zeros_like(x)
|
| 89 |
+
true_dist.fill_(self.smoothing / (self.size - 1))
|
| 90 |
+
ignore = target == self.padding_idx # (B,)
|
| 91 |
+
total = len(target) - ignore.sum().item()
|
| 92 |
+
target = target.masked_fill(ignore, 0) # avoid -1 index
|
| 93 |
+
true_dist.scatter_(1, target.unsqueeze(1), self.confidence)
|
| 94 |
+
kl = self.criterion(torch.log_softmax(x, dim=1), true_dist)
|
| 95 |
+
denom = total if self.normalize_length else batch_size
|
| 96 |
+
return kl.masked_fill(ignore.unsqueeze(1), 0).sum() / denom
|
r1-a/response_generation/Kimi-Audio/kimia_infer/models/tokenizer/glm4/cosyvoice/transformer/positionwise_feed_forward.py
ADDED
|
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2019 Shigeki Karita
|
| 2 |
+
# 2020 Mobvoi Inc (Binbin Zhang)
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""Positionwise feed forward layer definition."""
|
| 16 |
+
|
| 17 |
+
import torch
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class PositionwiseFeedForward(torch.nn.Module):
|
| 21 |
+
"""Positionwise feed forward layer.
|
| 22 |
+
|
| 23 |
+
FeedForward are appied on each position of the sequence.
|
| 24 |
+
The output dim is same with the input dim.
|
| 25 |
+
|
| 26 |
+
Args:
|
| 27 |
+
idim (int): Input dimenstion.
|
| 28 |
+
hidden_units (int): The number of hidden units.
|
| 29 |
+
dropout_rate (float): Dropout rate.
|
| 30 |
+
activation (torch.nn.Module): Activation function
|
| 31 |
+
"""
|
| 32 |
+
|
| 33 |
+
def __init__(
|
| 34 |
+
self,
|
| 35 |
+
idim: int,
|
| 36 |
+
hidden_units: int,
|
| 37 |
+
dropout_rate: float,
|
| 38 |
+
activation: torch.nn.Module = torch.nn.ReLU(),
|
| 39 |
+
):
|
| 40 |
+
"""Construct a PositionwiseFeedForward object."""
|
| 41 |
+
super(PositionwiseFeedForward, self).__init__()
|
| 42 |
+
self.w_1 = torch.nn.Linear(idim, hidden_units)
|
| 43 |
+
self.activation = activation
|
| 44 |
+
self.dropout = torch.nn.Dropout(dropout_rate)
|
| 45 |
+
self.w_2 = torch.nn.Linear(hidden_units, idim)
|
| 46 |
+
|
| 47 |
+
def forward(self, xs: torch.Tensor) -> torch.Tensor:
|
| 48 |
+
"""Forward function.
|
| 49 |
+
|
| 50 |
+
Args:
|
| 51 |
+
xs: input tensor (B, L, D)
|
| 52 |
+
Returns:
|
| 53 |
+
output tensor, (B, L, D)
|
| 54 |
+
"""
|
| 55 |
+
return self.w_2(self.dropout(self.activation(self.w_1(xs))))
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
class MoEFFNLayer(torch.nn.Module):
|
| 59 |
+
"""
|
| 60 |
+
Mixture of expert with Positionwise feed forward layer
|
| 61 |
+
See also figure 1 in https://arxiv.org/pdf/2305.15663.pdf
|
| 62 |
+
The output dim is same with the input dim.
|
| 63 |
+
|
| 64 |
+
Modified from https://github.com/Lightning-AI/lit-gpt/pull/823
|
| 65 |
+
https://github.com/mistralai/mistral-src/blob/b46d6/moe_one_file_ref.py#L203-L219
|
| 66 |
+
Args:
|
| 67 |
+
n_expert: number of expert.
|
| 68 |
+
n_expert_per_token: The actual number of experts used for each frame
|
| 69 |
+
idim (int): Input dimenstion.
|
| 70 |
+
hidden_units (int): The number of hidden units.
|
| 71 |
+
dropout_rate (float): Dropout rate.
|
| 72 |
+
activation (torch.nn.Module): Activation function
|
| 73 |
+
"""
|
| 74 |
+
|
| 75 |
+
def __init__(
|
| 76 |
+
self,
|
| 77 |
+
n_expert: int,
|
| 78 |
+
n_expert_per_token: int,
|
| 79 |
+
idim: int,
|
| 80 |
+
hidden_units: int,
|
| 81 |
+
dropout_rate: float,
|
| 82 |
+
activation: torch.nn.Module = torch.nn.ReLU(),
|
| 83 |
+
):
|
| 84 |
+
super(MoEFFNLayer, self).__init__()
|
| 85 |
+
self.gate = torch.nn.Linear(idim, n_expert, bias=False)
|
| 86 |
+
self.experts = torch.nn.ModuleList(
|
| 87 |
+
PositionwiseFeedForward(idim, hidden_units, dropout_rate,
|
| 88 |
+
activation) for _ in range(n_expert))
|
| 89 |
+
self.n_expert_per_token = n_expert_per_token
|
| 90 |
+
|
| 91 |
+
def forward(self, xs: torch.Tensor) -> torch.Tensor:
|
| 92 |
+
"""Foward function.
|
| 93 |
+
Args:
|
| 94 |
+
xs: input tensor (B, L, D)
|
| 95 |
+
Returns:
|
| 96 |
+
output tensor, (B, L, D)
|
| 97 |
+
|
| 98 |
+
"""
|
| 99 |
+
B, L, D = xs.size(
|
| 100 |
+
) # batch size, sequence length, embedding dimension (idim)
|
| 101 |
+
xs = xs.view(-1, D) # (B*L, D)
|
| 102 |
+
router = self.gate(xs) # (B*L, n_expert)
|
| 103 |
+
logits, indices = torch.topk(
|
| 104 |
+
router, self.n_expert_per_token
|
| 105 |
+
) # probs:(B*L, n_expert), indices: (B*L, n_expert)
|
| 106 |
+
weights = torch.nn.functional.softmax(
|
| 107 |
+
logits, dim=1,
|
| 108 |
+
dtype=torch.float).to(dtype=xs.dtype) # (B*L, n_expert_per_token)
|
| 109 |
+
output = torch.zeros_like(xs) # (B*L, D)
|
| 110 |
+
for i, expert in enumerate(self.experts):
|
| 111 |
+
mask = indices == i
|
| 112 |
+
batch_idx, ith_expert = torch.where(mask)
|
| 113 |
+
output[batch_idx] += weights[batch_idx, ith_expert, None] * expert(
|
| 114 |
+
xs[batch_idx])
|
| 115 |
+
return output.view(B, L, D)
|
r1-a/response_generation/Kimi-Audio/kimia_infer/models/tokenizer/glm4/cosyvoice/transformer/subsampling.py
ADDED
|
@@ -0,0 +1,383 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2021 Mobvoi Inc (Binbin Zhang, Di Wu)
|
| 2 |
+
# 2024 Alibaba Inc (Xiang Lyu)
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
# Modified from ESPnet(https://github.com/espnet/espnet)
|
| 16 |
+
"""Subsampling layer definition."""
|
| 17 |
+
|
| 18 |
+
from typing import Tuple, Union
|
| 19 |
+
|
| 20 |
+
import torch
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class BaseSubsampling(torch.nn.Module):
|
| 24 |
+
|
| 25 |
+
def __init__(self):
|
| 26 |
+
super().__init__()
|
| 27 |
+
self.right_context = 0
|
| 28 |
+
self.subsampling_rate = 1
|
| 29 |
+
|
| 30 |
+
def position_encoding(self, offset: Union[int, torch.Tensor],
|
| 31 |
+
size: int) -> torch.Tensor:
|
| 32 |
+
return self.pos_enc.position_encoding(offset, size)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
class EmbedinigNoSubsampling(BaseSubsampling):
|
| 36 |
+
"""Embedding input without subsampling
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
def __init__(self, idim: int, odim: int, dropout_rate: float,
|
| 40 |
+
pos_enc_class: torch.nn.Module):
|
| 41 |
+
super().__init__()
|
| 42 |
+
self.embed = torch.nn.Embedding(idim, odim)
|
| 43 |
+
self.pos_enc = pos_enc_class
|
| 44 |
+
|
| 45 |
+
def forward(
|
| 46 |
+
self,
|
| 47 |
+
x: torch.Tensor,
|
| 48 |
+
x_mask: torch.Tensor,
|
| 49 |
+
offset: Union[int, torch.Tensor] = 0
|
| 50 |
+
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
| 51 |
+
"""Input x.
|
| 52 |
+
|
| 53 |
+
Args:
|
| 54 |
+
x (torch.Tensor): Input tensor (#batch, time, idim).
|
| 55 |
+
x_mask (torch.Tensor): Input mask (#batch, 1, time).
|
| 56 |
+
|
| 57 |
+
Returns:
|
| 58 |
+
torch.Tensor: linear input tensor (#batch, time', odim),
|
| 59 |
+
where time' = time .
|
| 60 |
+
torch.Tensor: linear input mask (#batch, 1, time'),
|
| 61 |
+
where time' = time .
|
| 62 |
+
|
| 63 |
+
"""
|
| 64 |
+
x = self.embed(x)
|
| 65 |
+
x, pos_emb = self.pos_enc(x, offset)
|
| 66 |
+
return x, pos_emb, x_mask
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
class LinearNoSubsampling(BaseSubsampling):
|
| 70 |
+
"""Linear transform the input without subsampling
|
| 71 |
+
|
| 72 |
+
Args:
|
| 73 |
+
idim (int): Input dimension.
|
| 74 |
+
odim (int): Output dimension.
|
| 75 |
+
dropout_rate (float): Dropout rate.
|
| 76 |
+
|
| 77 |
+
"""
|
| 78 |
+
|
| 79 |
+
def __init__(self, idim: int, odim: int, dropout_rate: float,
|
| 80 |
+
pos_enc_class: torch.nn.Module):
|
| 81 |
+
"""Construct an linear object."""
|
| 82 |
+
super().__init__()
|
| 83 |
+
self.out = torch.nn.Sequential(
|
| 84 |
+
torch.nn.Linear(idim, odim),
|
| 85 |
+
torch.nn.LayerNorm(odim, eps=1e-5),
|
| 86 |
+
torch.nn.Dropout(dropout_rate),
|
| 87 |
+
)
|
| 88 |
+
self.pos_enc = pos_enc_class
|
| 89 |
+
self.right_context = 0
|
| 90 |
+
self.subsampling_rate = 1
|
| 91 |
+
|
| 92 |
+
def forward(
|
| 93 |
+
self,
|
| 94 |
+
x: torch.Tensor,
|
| 95 |
+
x_mask: torch.Tensor,
|
| 96 |
+
offset: Union[int, torch.Tensor] = 0
|
| 97 |
+
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
| 98 |
+
"""Input x.
|
| 99 |
+
|
| 100 |
+
Args:
|
| 101 |
+
x (torch.Tensor): Input tensor (#batch, time, idim).
|
| 102 |
+
x_mask (torch.Tensor): Input mask (#batch, 1, time).
|
| 103 |
+
|
| 104 |
+
Returns:
|
| 105 |
+
torch.Tensor: linear input tensor (#batch, time', odim),
|
| 106 |
+
where time' = time .
|
| 107 |
+
torch.Tensor: linear input mask (#batch, 1, time'),
|
| 108 |
+
where time' = time .
|
| 109 |
+
|
| 110 |
+
"""
|
| 111 |
+
x = self.out(x)
|
| 112 |
+
x, pos_emb = self.pos_enc(x, offset)
|
| 113 |
+
return x, pos_emb, x_mask
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
class Conv1dSubsampling2(BaseSubsampling):
|
| 117 |
+
"""Convolutional 1D subsampling (to 1/2 length).
|
| 118 |
+
It is designed for Whisper, ref:
|
| 119 |
+
https://github.com/openai/whisper/blob/main/whisper/model.py
|
| 120 |
+
|
| 121 |
+
Args:
|
| 122 |
+
idim (int): Input dimension.
|
| 123 |
+
odim (int): Output dimension.
|
| 124 |
+
dropout_rate (float): Dropout rate.
|
| 125 |
+
|
| 126 |
+
"""
|
| 127 |
+
|
| 128 |
+
def __init__(self, idim: int, odim: int, dropout_rate: float,
|
| 129 |
+
pos_enc_class: torch.nn.Module):
|
| 130 |
+
"""Construct an Conv1dSubsampling2 object."""
|
| 131 |
+
super().__init__()
|
| 132 |
+
self.conv = torch.nn.Sequential(
|
| 133 |
+
torch.nn.Conv1d(idim, odim, kernel_size=3, padding=1),
|
| 134 |
+
torch.nn.GELU(),
|
| 135 |
+
torch.nn.Conv1d(odim, odim, kernel_size=3, stride=2, padding=1),
|
| 136 |
+
torch.nn.GELU(),
|
| 137 |
+
)
|
| 138 |
+
self.pos_enc = pos_enc_class
|
| 139 |
+
# The right context for every conv layer is computed by:
|
| 140 |
+
# (kernel_size - 1) * frame_rate_of_this_layer
|
| 141 |
+
self.subsampling_rate = 2
|
| 142 |
+
# 4 = (3 - 1) * 1 + (3 - 1) * 1
|
| 143 |
+
self.right_context = 4
|
| 144 |
+
|
| 145 |
+
def forward(
|
| 146 |
+
self,
|
| 147 |
+
x: torch.Tensor,
|
| 148 |
+
x_mask: torch.Tensor,
|
| 149 |
+
offset: Union[int, torch.Tensor] = 0
|
| 150 |
+
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
| 151 |
+
"""Subsample x.
|
| 152 |
+
|
| 153 |
+
Args:
|
| 154 |
+
x (torch.Tensor): Input tensor (#batch, time, idim).
|
| 155 |
+
x_mask (torch.Tensor): Input mask (#batch, 1, time).
|
| 156 |
+
|
| 157 |
+
Returns:
|
| 158 |
+
torch.Tensor: Subsampled tensor (#batch, time', odim),
|
| 159 |
+
where time' = time // 2.
|
| 160 |
+
torch.Tensor: Subsampled mask (#batch, 1, time'),
|
| 161 |
+
where time' = time // 2.
|
| 162 |
+
torch.Tensor: positional encoding
|
| 163 |
+
|
| 164 |
+
"""
|
| 165 |
+
time = x.size(1)
|
| 166 |
+
x = x.transpose(1, 2) # (b, f, t)
|
| 167 |
+
x = self.conv(x)
|
| 168 |
+
x = x.transpose(1, 2) # (b, t, f)
|
| 169 |
+
x, pos_emb = self.pos_enc(x, offset)
|
| 170 |
+
return x, pos_emb, x_mask[:, :, (time + 1) % 2::2]
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
class Conv2dSubsampling4(BaseSubsampling):
|
| 174 |
+
"""Convolutional 2D subsampling (to 1/4 length).
|
| 175 |
+
|
| 176 |
+
Args:
|
| 177 |
+
idim (int): Input dimension.
|
| 178 |
+
odim (int): Output dimension.
|
| 179 |
+
dropout_rate (float): Dropout rate.
|
| 180 |
+
|
| 181 |
+
"""
|
| 182 |
+
|
| 183 |
+
def __init__(self, idim: int, odim: int, dropout_rate: float,
|
| 184 |
+
pos_enc_class: torch.nn.Module):
|
| 185 |
+
"""Construct an Conv2dSubsampling4 object."""
|
| 186 |
+
super().__init__()
|
| 187 |
+
self.conv = torch.nn.Sequential(
|
| 188 |
+
torch.nn.Conv2d(1, odim, 3, 2),
|
| 189 |
+
torch.nn.ReLU(),
|
| 190 |
+
torch.nn.Conv2d(odim, odim, 3, 2),
|
| 191 |
+
torch.nn.ReLU(),
|
| 192 |
+
)
|
| 193 |
+
self.out = torch.nn.Sequential(
|
| 194 |
+
torch.nn.Linear(odim * (((idim - 1) // 2 - 1) // 2), odim))
|
| 195 |
+
self.pos_enc = pos_enc_class
|
| 196 |
+
# The right context for every conv layer is computed by:
|
| 197 |
+
# (kernel_size - 1) * frame_rate_of_this_layer
|
| 198 |
+
self.subsampling_rate = 4
|
| 199 |
+
# 6 = (3 - 1) * 1 + (3 - 1) * 2
|
| 200 |
+
self.right_context = 6
|
| 201 |
+
|
| 202 |
+
def forward(
|
| 203 |
+
self,
|
| 204 |
+
x: torch.Tensor,
|
| 205 |
+
x_mask: torch.Tensor,
|
| 206 |
+
offset: Union[int, torch.Tensor] = 0
|
| 207 |
+
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
| 208 |
+
"""Subsample x.
|
| 209 |
+
|
| 210 |
+
Args:
|
| 211 |
+
x (torch.Tensor): Input tensor (#batch, time, idim).
|
| 212 |
+
x_mask (torch.Tensor): Input mask (#batch, 1, time).
|
| 213 |
+
|
| 214 |
+
Returns:
|
| 215 |
+
torch.Tensor: Subsampled tensor (#batch, time', odim),
|
| 216 |
+
where time' = time // 4.
|
| 217 |
+
torch.Tensor: Subsampled mask (#batch, 1, time'),
|
| 218 |
+
where time' = time // 4.
|
| 219 |
+
torch.Tensor: positional encoding
|
| 220 |
+
|
| 221 |
+
"""
|
| 222 |
+
x = x.unsqueeze(1) # (b, c=1, t, f)
|
| 223 |
+
x = self.conv(x)
|
| 224 |
+
b, c, t, f = x.size()
|
| 225 |
+
x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f))
|
| 226 |
+
x, pos_emb = self.pos_enc(x, offset)
|
| 227 |
+
return x, pos_emb, x_mask[:, :, 2::2][:, :, 2::2]
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
class Conv2dSubsampling6(BaseSubsampling):
|
| 231 |
+
"""Convolutional 2D subsampling (to 1/6 length).
|
| 232 |
+
Args:
|
| 233 |
+
idim (int): Input dimension.
|
| 234 |
+
odim (int): Output dimension.
|
| 235 |
+
dropout_rate (float): Dropout rate.
|
| 236 |
+
pos_enc (torch.nn.Module): Custom position encoding layer.
|
| 237 |
+
"""
|
| 238 |
+
|
| 239 |
+
def __init__(self, idim: int, odim: int, dropout_rate: float,
|
| 240 |
+
pos_enc_class: torch.nn.Module):
|
| 241 |
+
"""Construct an Conv2dSubsampling6 object."""
|
| 242 |
+
super().__init__()
|
| 243 |
+
self.conv = torch.nn.Sequential(
|
| 244 |
+
torch.nn.Conv2d(1, odim, 3, 2),
|
| 245 |
+
torch.nn.ReLU(),
|
| 246 |
+
torch.nn.Conv2d(odim, odim, 5, 3),
|
| 247 |
+
torch.nn.ReLU(),
|
| 248 |
+
)
|
| 249 |
+
self.linear = torch.nn.Linear(odim * (((idim - 1) // 2 - 2) // 3),
|
| 250 |
+
odim)
|
| 251 |
+
self.pos_enc = pos_enc_class
|
| 252 |
+
# 10 = (3 - 1) * 1 + (5 - 1) * 2
|
| 253 |
+
self.subsampling_rate = 6
|
| 254 |
+
self.right_context = 10
|
| 255 |
+
|
| 256 |
+
def forward(
|
| 257 |
+
self,
|
| 258 |
+
x: torch.Tensor,
|
| 259 |
+
x_mask: torch.Tensor,
|
| 260 |
+
offset: Union[int, torch.Tensor] = 0
|
| 261 |
+
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
| 262 |
+
"""Subsample x.
|
| 263 |
+
Args:
|
| 264 |
+
x (torch.Tensor): Input tensor (#batch, time, idim).
|
| 265 |
+
x_mask (torch.Tensor): Input mask (#batch, 1, time).
|
| 266 |
+
|
| 267 |
+
Returns:
|
| 268 |
+
torch.Tensor: Subsampled tensor (#batch, time', odim),
|
| 269 |
+
where time' = time // 6.
|
| 270 |
+
torch.Tensor: Subsampled mask (#batch, 1, time'),
|
| 271 |
+
where time' = time // 6.
|
| 272 |
+
torch.Tensor: positional encoding
|
| 273 |
+
"""
|
| 274 |
+
x = x.unsqueeze(1) # (b, c, t, f)
|
| 275 |
+
x = self.conv(x)
|
| 276 |
+
b, c, t, f = x.size()
|
| 277 |
+
x = self.linear(x.transpose(1, 2).contiguous().view(b, t, c * f))
|
| 278 |
+
x, pos_emb = self.pos_enc(x, offset)
|
| 279 |
+
return x, pos_emb, x_mask[:, :, 2::2][:, :, 4::3]
|
| 280 |
+
|
| 281 |
+
|
| 282 |
+
class Conv2dSubsampling8(BaseSubsampling):
|
| 283 |
+
"""Convolutional 2D subsampling (to 1/8 length).
|
| 284 |
+
|
| 285 |
+
Args:
|
| 286 |
+
idim (int): Input dimension.
|
| 287 |
+
odim (int): Output dimension.
|
| 288 |
+
dropout_rate (float): Dropout rate.
|
| 289 |
+
|
| 290 |
+
"""
|
| 291 |
+
|
| 292 |
+
def __init__(self, idim: int, odim: int, dropout_rate: float,
|
| 293 |
+
pos_enc_class: torch.nn.Module):
|
| 294 |
+
"""Construct an Conv2dSubsampling8 object."""
|
| 295 |
+
super().__init__()
|
| 296 |
+
self.conv = torch.nn.Sequential(
|
| 297 |
+
torch.nn.Conv2d(1, odim, 3, 2),
|
| 298 |
+
torch.nn.ReLU(),
|
| 299 |
+
torch.nn.Conv2d(odim, odim, 3, 2),
|
| 300 |
+
torch.nn.ReLU(),
|
| 301 |
+
torch.nn.Conv2d(odim, odim, 3, 2),
|
| 302 |
+
torch.nn.ReLU(),
|
| 303 |
+
)
|
| 304 |
+
self.linear = torch.nn.Linear(
|
| 305 |
+
odim * ((((idim - 1) // 2 - 1) // 2 - 1) // 2), odim)
|
| 306 |
+
self.pos_enc = pos_enc_class
|
| 307 |
+
self.subsampling_rate = 8
|
| 308 |
+
# 14 = (3 - 1) * 1 + (3 - 1) * 2 + (3 - 1) * 4
|
| 309 |
+
self.right_context = 14
|
| 310 |
+
|
| 311 |
+
def forward(
|
| 312 |
+
self,
|
| 313 |
+
x: torch.Tensor,
|
| 314 |
+
x_mask: torch.Tensor,
|
| 315 |
+
offset: Union[int, torch.Tensor] = 0
|
| 316 |
+
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
| 317 |
+
"""Subsample x.
|
| 318 |
+
|
| 319 |
+
Args:
|
| 320 |
+
x (torch.Tensor): Input tensor (#batch, time, idim).
|
| 321 |
+
x_mask (torch.Tensor): Input mask (#batch, 1, time).
|
| 322 |
+
|
| 323 |
+
Returns:
|
| 324 |
+
torch.Tensor: Subsampled tensor (#batch, time', odim),
|
| 325 |
+
where time' = time // 8.
|
| 326 |
+
torch.Tensor: Subsampled mask (#batch, 1, time'),
|
| 327 |
+
where time' = time // 8.
|
| 328 |
+
torch.Tensor: positional encoding
|
| 329 |
+
"""
|
| 330 |
+
x = x.unsqueeze(1) # (b, c, t, f)
|
| 331 |
+
x = self.conv(x)
|
| 332 |
+
b, c, t, f = x.size()
|
| 333 |
+
x = self.linear(x.transpose(1, 2).contiguous().view(b, t, c * f))
|
| 334 |
+
x, pos_emb = self.pos_enc(x, offset)
|
| 335 |
+
return x, pos_emb, x_mask[:, :, 2::2][:, :, 2::2][:, :, 2::2]
|
| 336 |
+
|
| 337 |
+
|
| 338 |
+
class LegacyLinearNoSubsampling(BaseSubsampling):
|
| 339 |
+
"""Linear transform the input without subsampling
|
| 340 |
+
|
| 341 |
+
Args:
|
| 342 |
+
idim (int): Input dimension.
|
| 343 |
+
odim (int): Output dimension.
|
| 344 |
+
dropout_rate (float): Dropout rate.
|
| 345 |
+
|
| 346 |
+
"""
|
| 347 |
+
|
| 348 |
+
def __init__(self, idim: int, odim: int, dropout_rate: float,
|
| 349 |
+
pos_enc_class: torch.nn.Module):
|
| 350 |
+
"""Construct an linear object."""
|
| 351 |
+
super().__init__()
|
| 352 |
+
self.out = torch.nn.Sequential(
|
| 353 |
+
torch.nn.Linear(idim, odim),
|
| 354 |
+
torch.nn.LayerNorm(odim, eps=1e-5),
|
| 355 |
+
torch.nn.Dropout(dropout_rate),
|
| 356 |
+
torch.nn.ReLU(),
|
| 357 |
+
)
|
| 358 |
+
self.pos_enc = pos_enc_class
|
| 359 |
+
self.right_context = 0
|
| 360 |
+
self.subsampling_rate = 1
|
| 361 |
+
|
| 362 |
+
def forward(
|
| 363 |
+
self,
|
| 364 |
+
x: torch.Tensor,
|
| 365 |
+
x_mask: torch.Tensor,
|
| 366 |
+
offset: Union[int, torch.Tensor] = 0
|
| 367 |
+
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
|
| 368 |
+
"""Input x.
|
| 369 |
+
|
| 370 |
+
Args:
|
| 371 |
+
x (torch.Tensor): Input tensor (#batch, time, idim).
|
| 372 |
+
x_mask (torch.Tensor): Input mask (#batch, 1, time).
|
| 373 |
+
|
| 374 |
+
Returns:
|
| 375 |
+
torch.Tensor: linear input tensor (#batch, time', odim),
|
| 376 |
+
where time' = time .
|
| 377 |
+
torch.Tensor: linear input mask (#batch, 1, time'),
|
| 378 |
+
where time' = time .
|
| 379 |
+
|
| 380 |
+
"""
|
| 381 |
+
x = self.out(x)
|
| 382 |
+
x, pos_emb = self.pos_enc(x, offset)
|
| 383 |
+
return x, pos_emb, x_mask
|
r1-a/response_generation/Kimi-Audio/kimia_infer/models/tokenizer/glm4/cosyvoice/utils/__init__.py
ADDED
|
File without changes
|
r1-a/response_generation/Kimi-Audio/kimia_infer/models/tokenizer/glm4/cosyvoice/utils/block_mask_util.py
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def create_grid_mask(seq_length, trunck_length, fill_triangle):
|
| 5 |
+
assert seq_length > 0
|
| 6 |
+
|
| 7 |
+
# 先不考虑seen_length创建一个grid mask:
|
| 8 |
+
if fill_triangle:
|
| 9 |
+
mask = 1 - torch.triu(torch.ones(seq_length, seq_length), diagonal=1)
|
| 10 |
+
# 下三角与主对角线都为1
|
| 11 |
+
else:
|
| 12 |
+
mask = torch.zeros(seq_length, seq_length)
|
| 13 |
+
|
| 14 |
+
for i in range(seq_length):
|
| 15 |
+
trunck_idx = i // trunck_length
|
| 16 |
+
trunck_start = trunck_idx * trunck_length
|
| 17 |
+
trunck_end = trunck_length + trunck_start
|
| 18 |
+
mask[i][trunck_start:trunck_end] = 1
|
| 19 |
+
|
| 20 |
+
return mask
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
if __name__ == "__main__":
|
| 24 |
+
mask = create_grid_mask(seq_length=8, trunck_length=3, fill_triangle=True).int()
|
| 25 |
+
print(mask)
|
| 26 |
+
# tensor([[1, 1, 1, 0, 0, 0, 0, 0],
|
| 27 |
+
# [1, 1, 1, 0, 0, 0, 0, 0],
|
| 28 |
+
# [1, 1, 1, 0, 0, 0, 0, 0],
|
| 29 |
+
# [1, 1, 1, 1, 1, 1, 0, 0],
|
| 30 |
+
# [1, 1, 1, 1, 1, 1, 0, 0],
|
| 31 |
+
# [1, 1, 1, 1, 1, 1, 0, 0],
|
| 32 |
+
# [1, 1, 1, 1, 1, 1, 1, 1],
|
| 33 |
+
# [1, 1, 1, 1, 1, 1, 1, 1]]
|
| 34 |
+
|
r1-a/response_generation/Kimi-Audio/kimia_infer/models/tokenizer/glm4/cosyvoice/utils/class_utils.py
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright [2023-11-28] <sxc19@mails.tsinghua.edu.cn, Xingchen Song>
|
| 2 |
+
# 2024 Alibaba Inc (authors: Xiang Lyu)
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
import torch
|
| 16 |
+
|
| 17 |
+
from cosyvoice.transformer.activation import Swish
|
| 18 |
+
from cosyvoice.transformer.subsampling import (
|
| 19 |
+
LinearNoSubsampling,
|
| 20 |
+
EmbedinigNoSubsampling,
|
| 21 |
+
Conv1dSubsampling2,
|
| 22 |
+
Conv2dSubsampling4,
|
| 23 |
+
Conv2dSubsampling6,
|
| 24 |
+
Conv2dSubsampling8,
|
| 25 |
+
)
|
| 26 |
+
from cosyvoice.transformer.embedding import (PositionalEncoding,
|
| 27 |
+
RelPositionalEncoding,
|
| 28 |
+
WhisperPositionalEncoding,
|
| 29 |
+
LearnablePositionalEncoding,
|
| 30 |
+
NoPositionalEncoding)
|
| 31 |
+
from cosyvoice.transformer.attention import (MultiHeadedAttention,
|
| 32 |
+
RelPositionMultiHeadedAttention,
|
| 33 |
+
BlockRelPositionMultiHeadedAttention)
|
| 34 |
+
from cosyvoice.transformer.embedding import EspnetRelPositionalEncoding
|
| 35 |
+
from cosyvoice.transformer.subsampling import LegacyLinearNoSubsampling
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
COSYVOICE_ACTIVATION_CLASSES = {
|
| 39 |
+
"hardtanh": torch.nn.Hardtanh,
|
| 40 |
+
"tanh": torch.nn.Tanh,
|
| 41 |
+
"relu": torch.nn.ReLU,
|
| 42 |
+
"selu": torch.nn.SELU,
|
| 43 |
+
"swish": getattr(torch.nn, "SiLU", Swish),
|
| 44 |
+
"gelu": torch.nn.GELU,
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
COSYVOICE_SUBSAMPLE_CLASSES = {
|
| 48 |
+
"linear": LinearNoSubsampling,
|
| 49 |
+
"linear_legacy": LegacyLinearNoSubsampling,
|
| 50 |
+
"embed": EmbedinigNoSubsampling,
|
| 51 |
+
"conv1d2": Conv1dSubsampling2,
|
| 52 |
+
"conv2d": Conv2dSubsampling4,
|
| 53 |
+
"conv2d6": Conv2dSubsampling6,
|
| 54 |
+
"conv2d8": Conv2dSubsampling8,
|
| 55 |
+
'paraformer_dummy': torch.nn.Identity
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
COSYVOICE_EMB_CLASSES = {
|
| 59 |
+
"embed": PositionalEncoding,
|
| 60 |
+
"abs_pos": PositionalEncoding,
|
| 61 |
+
"rel_pos": RelPositionalEncoding,
|
| 62 |
+
"rel_pos_espnet": EspnetRelPositionalEncoding,
|
| 63 |
+
"no_pos": NoPositionalEncoding,
|
| 64 |
+
"abs_pos_whisper": WhisperPositionalEncoding,
|
| 65 |
+
"embed_learnable_pe": LearnablePositionalEncoding,
|
| 66 |
+
}
|
| 67 |
+
|
| 68 |
+
COSYVOICE_ATTENTION_CLASSES = {
|
| 69 |
+
"selfattn": MultiHeadedAttention,
|
| 70 |
+
"rel_selfattn": RelPositionMultiHeadedAttention,
|
| 71 |
+
"block_rel_selfattn": BlockRelPositionMultiHeadedAttention,
|
| 72 |
+
}
|
r1-a/response_generation/Kimi-Audio/kimia_infer/models/tokenizer/glm4/cosyvoice/utils/common.py
ADDED
|
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2020 Mobvoi Inc (Binbin Zhang)
|
| 2 |
+
# 2024 Alibaba Inc (authors: Xiang Lyu)
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
# Modified from ESPnet(https://github.com/espnet/espnet)
|
| 16 |
+
"""Unility functions for Transformer."""
|
| 17 |
+
|
| 18 |
+
from typing import List
|
| 19 |
+
|
| 20 |
+
import torch
|
| 21 |
+
|
| 22 |
+
IGNORE_ID = -1
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def pad_list(xs: List[torch.Tensor], pad_value: int):
|
| 26 |
+
"""Perform padding for the list of tensors.
|
| 27 |
+
|
| 28 |
+
Args:
|
| 29 |
+
xs (List): List of Tensors [(T_1, `*`), (T_2, `*`), ..., (T_B, `*`)].
|
| 30 |
+
pad_value (float): Value for padding.
|
| 31 |
+
|
| 32 |
+
Returns:
|
| 33 |
+
Tensor: Padded tensor (B, Tmax, `*`).
|
| 34 |
+
|
| 35 |
+
Examples:
|
| 36 |
+
>>> x = [torch.ones(4), torch.ones(2), torch.ones(1)]
|
| 37 |
+
>>> x
|
| 38 |
+
[tensor([1., 1., 1., 1.]), tensor([1., 1.]), tensor([1.])]
|
| 39 |
+
>>> pad_list(x, 0)
|
| 40 |
+
tensor([[1., 1., 1., 1.],
|
| 41 |
+
[1., 1., 0., 0.],
|
| 42 |
+
[1., 0., 0., 0.]])
|
| 43 |
+
|
| 44 |
+
"""
|
| 45 |
+
max_len = max([len(item) for item in xs])
|
| 46 |
+
batchs = len(xs)
|
| 47 |
+
ndim = xs[0].ndim
|
| 48 |
+
if ndim == 1:
|
| 49 |
+
pad_res = torch.zeros(batchs,
|
| 50 |
+
max_len,
|
| 51 |
+
dtype=xs[0].dtype,
|
| 52 |
+
device=xs[0].device)
|
| 53 |
+
elif ndim == 2:
|
| 54 |
+
pad_res = torch.zeros(batchs,
|
| 55 |
+
max_len,
|
| 56 |
+
xs[0].shape[1],
|
| 57 |
+
dtype=xs[0].dtype,
|
| 58 |
+
device=xs[0].device)
|
| 59 |
+
elif ndim == 3:
|
| 60 |
+
pad_res = torch.zeros(batchs,
|
| 61 |
+
max_len,
|
| 62 |
+
xs[0].shape[1],
|
| 63 |
+
xs[0].shape[2],
|
| 64 |
+
dtype=xs[0].dtype,
|
| 65 |
+
device=xs[0].device)
|
| 66 |
+
else:
|
| 67 |
+
raise ValueError(f"Unsupported ndim: {ndim}")
|
| 68 |
+
pad_res.fill_(pad_value)
|
| 69 |
+
for i in range(batchs):
|
| 70 |
+
pad_res[i, :len(xs[i])] = xs[i]
|
| 71 |
+
return pad_res
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def th_accuracy(pad_outputs: torch.Tensor, pad_targets: torch.Tensor,
|
| 75 |
+
ignore_label: int) -> torch.Tensor:
|
| 76 |
+
"""Calculate accuracy.
|
| 77 |
+
|
| 78 |
+
Args:
|
| 79 |
+
pad_outputs (Tensor): Prediction tensors (B * Lmax, D).
|
| 80 |
+
pad_targets (LongTensor): Target label tensors (B, Lmax).
|
| 81 |
+
ignore_label (int): Ignore label id.
|
| 82 |
+
|
| 83 |
+
Returns:
|
| 84 |
+
torch.Tensor: Accuracy value (0.0 - 1.0).
|
| 85 |
+
|
| 86 |
+
"""
|
| 87 |
+
pad_pred = pad_outputs.view(pad_targets.size(0), pad_targets.size(1),
|
| 88 |
+
pad_outputs.size(1)).argmax(2)
|
| 89 |
+
mask = pad_targets != ignore_label
|
| 90 |
+
numerator = torch.sum(
|
| 91 |
+
pad_pred.masked_select(mask) == pad_targets.masked_select(mask))
|
| 92 |
+
denominator = torch.sum(mask)
|
| 93 |
+
return (numerator / denominator).detach()
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
def get_padding(kernel_size, dilation=1):
|
| 97 |
+
return int((kernel_size * dilation - dilation) / 2)
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def init_weights(m, mean=0.0, std=0.01):
|
| 101 |
+
classname = m.__class__.__name__
|
| 102 |
+
if classname.find("Conv") != -1:
|
| 103 |
+
m.weight.data.normal_(mean, std)
|
r1-a/response_generation/Kimi-Audio/kimia_infer/models/tokenizer/glm4/cosyvoice/utils/executor.py
ADDED
|
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2020 Mobvoi Inc (Binbin Zhang)
|
| 2 |
+
# 2024 Alibaba Inc (authors: Xiang Lyu)
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
|
| 16 |
+
import logging
|
| 17 |
+
from contextlib import nullcontext
|
| 18 |
+
import os
|
| 19 |
+
|
| 20 |
+
import torch
|
| 21 |
+
import torch.distributed as dist
|
| 22 |
+
import tqdm
|
| 23 |
+
|
| 24 |
+
from cosyvoice.utils.train_utils import update_parameter_and_lr, log_per_step, log_per_save, batch_forward, batch_backward, save_model, cosyvoice_join
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class Executor:
|
| 28 |
+
|
| 29 |
+
def __init__(self):
|
| 30 |
+
self.step = 0
|
| 31 |
+
self.epoch = 0
|
| 32 |
+
self.rank = int(os.environ.get('RANK', 0))
|
| 33 |
+
self.device = torch.device('cuda:{}'.format(self.rank))
|
| 34 |
+
|
| 35 |
+
def train_one_epoc(self, model, optimizer, scheduler, train_data_loader, cv_data_loader, writer, info_dict, group_join):
|
| 36 |
+
''' Train one epoch
|
| 37 |
+
'''
|
| 38 |
+
|
| 39 |
+
lr = optimizer.param_groups[0]['lr']
|
| 40 |
+
logging.info('Epoch {} TRAIN info lr {} rank {}'.format(self.epoch, lr, self.rank))
|
| 41 |
+
logging.info('using accumulate grad, new batch size is {} times'
|
| 42 |
+
' larger than before'.format(info_dict['accum_grad']))
|
| 43 |
+
# A context manager to be used in conjunction with an instance of
|
| 44 |
+
# torch.nn.parallel.DistributedDataParallel to be able to train
|
| 45 |
+
# with uneven inputs across participating processes.
|
| 46 |
+
model.train()
|
| 47 |
+
model_context = model.join if info_dict['train_engine'] == 'torch_ddp' else nullcontext
|
| 48 |
+
with model_context():
|
| 49 |
+
for batch_idx, batch_dict in tqdm.tqdm(enumerate(train_data_loader)):
|
| 50 |
+
# print("======== forword ========")
|
| 51 |
+
info_dict["tag"] = "TRAIN"
|
| 52 |
+
info_dict["step"] = self.step
|
| 53 |
+
info_dict["epoch"] = self.epoch
|
| 54 |
+
info_dict["batch_idx"] = batch_idx
|
| 55 |
+
if cosyvoice_join(group_join, info_dict):
|
| 56 |
+
break
|
| 57 |
+
# import pdb
|
| 58 |
+
# pdb.set_trace()
|
| 59 |
+
# Disable gradient synchronizations across DDP processes.
|
| 60 |
+
# Within this context, gradients will be accumulated on module
|
| 61 |
+
# variables, which will later be synchronized.
|
| 62 |
+
if info_dict['train_engine'] == 'torch_ddp' and (batch_idx + 1) % info_dict["accum_grad"] != 0:
|
| 63 |
+
context = model.no_sync
|
| 64 |
+
# Used for single gpu training and DDP gradient synchronization
|
| 65 |
+
# processes.
|
| 66 |
+
else:
|
| 67 |
+
context = nullcontext
|
| 68 |
+
|
| 69 |
+
new_batch_dict={
|
| 70 |
+
# "utts":batch_dict["utts"],
|
| 71 |
+
"speech_token":batch_dict["speech_token"],
|
| 72 |
+
"speech_token_len":batch_dict["speech_token_len"],
|
| 73 |
+
"speech_feat":batch_dict["speech_feat"],
|
| 74 |
+
"speech_feat_len":batch_dict["speech_feat_len"],
|
| 75 |
+
"embedding":batch_dict["embedding"],
|
| 76 |
+
# "embedding":torch.zeros((batch_dict["speech_feat"].size(0),192),device=batch_dict["speech_feat"].device)
|
| 77 |
+
}
|
| 78 |
+
|
| 79 |
+
with context():
|
| 80 |
+
info_dict = batch_forward(model, new_batch_dict, info_dict)
|
| 81 |
+
info_dict = batch_backward(model, info_dict)
|
| 82 |
+
|
| 83 |
+
info_dict = update_parameter_and_lr(model, optimizer, scheduler, info_dict)
|
| 84 |
+
log_per_step(writer, info_dict)
|
| 85 |
+
# NOTE specify save_per_step in cosyvoice.yaml if you want to enable step save
|
| 86 |
+
if info_dict['save_per_step'] > 0 and (self.step + 1) % info_dict['save_per_step'] == 0 and (batch_idx + 1) % info_dict["accum_grad"] == 0:
|
| 87 |
+
dist.barrier()
|
| 88 |
+
# try:
|
| 89 |
+
# dist.barrier()
|
| 90 |
+
# except RuntimeError as e:
|
| 91 |
+
# logging.info('except RuntimeError as e: {}'.format(e))
|
| 92 |
+
self.cv(model, cv_data_loader, writer, info_dict, on_batch_end=False)
|
| 93 |
+
model.train()
|
| 94 |
+
if (batch_idx + 1) % info_dict["accum_grad"] == 0:
|
| 95 |
+
self.step += 1
|
| 96 |
+
dist.barrier()
|
| 97 |
+
# try:
|
| 98 |
+
# dist.barrier()
|
| 99 |
+
# except RuntimeError as e:
|
| 100 |
+
# logging.info('except RuntimeError as e: {}'.format(e))
|
| 101 |
+
self.cv(model, cv_data_loader, writer, info_dict, on_batch_end=True)
|
| 102 |
+
|
| 103 |
+
@torch.inference_mode()
|
| 104 |
+
def cv(self, model, cv_data_loader, writer, info_dict, on_batch_end=True):
|
| 105 |
+
''' Cross validation on
|
| 106 |
+
'''
|
| 107 |
+
logging.info('Epoch {} Step {} on_batch_end {} CV rank {}'.format(self.epoch, self.step + 1, on_batch_end, self.rank))
|
| 108 |
+
model.eval()
|
| 109 |
+
total_num_utts, total_loss_dict = 0, {} # avoid division by 0
|
| 110 |
+
for batch_idx, batch_dict in enumerate(cv_data_loader):
|
| 111 |
+
info_dict["tag"] = "CV"
|
| 112 |
+
info_dict["step"] = self.step
|
| 113 |
+
info_dict["epoch"] = self.epoch
|
| 114 |
+
info_dict["batch_idx"] = batch_idx
|
| 115 |
+
|
| 116 |
+
# num_utts = len(batch_dict["utts"])
|
| 117 |
+
num_utts=batch_dict["speech_token"].size(0)
|
| 118 |
+
total_num_utts += num_utts
|
| 119 |
+
|
| 120 |
+
info_dict = batch_forward(model, batch_dict, info_dict)
|
| 121 |
+
|
| 122 |
+
for k, v in info_dict['loss_dict'].items():
|
| 123 |
+
if k not in total_loss_dict:
|
| 124 |
+
total_loss_dict[k] = []
|
| 125 |
+
total_loss_dict[k].append(v.item() * num_utts)
|
| 126 |
+
log_per_step(None, info_dict)
|
| 127 |
+
for k, v in total_loss_dict.items():
|
| 128 |
+
total_loss_dict[k] = sum(v) / total_num_utts
|
| 129 |
+
info_dict['loss_dict'] = total_loss_dict
|
| 130 |
+
log_per_save(writer, info_dict)
|
| 131 |
+
model_name = 'epoch_{}_whole'.format(self.epoch) if on_batch_end else 'epoch_{}_step_{}'.format(self.epoch, self.step + 1)
|
| 132 |
+
save_model(model, model_name, info_dict)
|
r1-a/response_generation/Kimi-Audio/kimia_infer/models/tokenizer/glm4/cosyvoice/utils/file_utils.py
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2021 Mobvoi Inc. (authors: Binbin Zhang)
|
| 2 |
+
# 2024 Alibaba Inc (authors: Xiang Lyu)
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
|
| 16 |
+
import json
|
| 17 |
+
import torchaudio
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def read_lists(list_file):
|
| 21 |
+
lists = []
|
| 22 |
+
with open(list_file, 'r', encoding='utf8') as fin:
|
| 23 |
+
for line in fin:
|
| 24 |
+
lists.append(line.strip())
|
| 25 |
+
return lists
|
| 26 |
+
|
| 27 |
+
def read_json_lists(list_file):
|
| 28 |
+
lists = read_lists(list_file)
|
| 29 |
+
results = {}
|
| 30 |
+
for fn in lists:
|
| 31 |
+
with open(fn, 'r', encoding='utf8') as fin:
|
| 32 |
+
results.update(json.load(fin))
|
| 33 |
+
return results
|
| 34 |
+
|
| 35 |
+
def load_wav(wav, target_sr):
|
| 36 |
+
speech, sample_rate = torchaudio.load(wav)
|
| 37 |
+
speech = speech.mean(dim=0, keepdim=True)
|
| 38 |
+
if sample_rate != target_sr:
|
| 39 |
+
assert sample_rate > target_sr, 'wav sample rate {} must be greater than {}'.format(sample_rate, target_sr)
|
| 40 |
+
speech = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=target_sr)(speech)
|
| 41 |
+
return speech
|
| 42 |
+
|
| 43 |
+
def speed_change(waveform, sample_rate, speed_factor: str):
|
| 44 |
+
effects = [
|
| 45 |
+
["tempo", speed_factor], # speed_factor
|
| 46 |
+
["rate", f"{sample_rate}"]
|
| 47 |
+
]
|
| 48 |
+
augmented_waveform, new_sample_rate = torchaudio.sox_effects.apply_effects_tensor(
|
| 49 |
+
waveform,
|
| 50 |
+
sample_rate,
|
| 51 |
+
effects
|
| 52 |
+
)
|
| 53 |
+
return augmented_waveform, new_sample_rate
|
r1-a/response_generation/Kimi-Audio/kimia_infer/models/tokenizer/glm4/cosyvoice/utils/frontend_utils.py
ADDED
|
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Zhihao Du)
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import re
|
| 16 |
+
chinese_char_pattern = re.compile(r'[\u4e00-\u9fff]+')
|
| 17 |
+
|
| 18 |
+
# whether contain chinese character
|
| 19 |
+
def contains_chinese(text):
|
| 20 |
+
return bool(chinese_char_pattern.search(text))
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
# replace special symbol
|
| 24 |
+
def replace_corner_mark(text):
|
| 25 |
+
text = text.replace('²', '平方')
|
| 26 |
+
text = text.replace('³', '立方')
|
| 27 |
+
return text
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
# remove meaningless symbol
|
| 31 |
+
def remove_bracket(text):
|
| 32 |
+
text = text.replace('(', '').replace(')', '')
|
| 33 |
+
text = text.replace('【', '').replace('】', '')
|
| 34 |
+
text = text.replace('`', '').replace('`', '')
|
| 35 |
+
text = text.replace("——", " ")
|
| 36 |
+
return text
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
# spell Arabic numerals
|
| 40 |
+
def spell_out_number(text: str, inflect_parser):
|
| 41 |
+
new_text = []
|
| 42 |
+
st = None
|
| 43 |
+
for i, c in enumerate(text):
|
| 44 |
+
if not c.isdigit():
|
| 45 |
+
if st is not None:
|
| 46 |
+
num_str = inflect_parser.number_to_words(text[st: i])
|
| 47 |
+
new_text.append(num_str)
|
| 48 |
+
st = None
|
| 49 |
+
new_text.append(c)
|
| 50 |
+
else:
|
| 51 |
+
if st is None:
|
| 52 |
+
st = i
|
| 53 |
+
if st is not None and st < len(text):
|
| 54 |
+
num_str = inflect_parser.number_to_words(text[st:])
|
| 55 |
+
new_text.append(num_str)
|
| 56 |
+
return ''.join(new_text)
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
# split paragrah logic:
|
| 60 |
+
# 1. per sentence max len token_max_n, min len token_min_n, merge if last sentence len less than merge_len
|
| 61 |
+
# 2. cal sentence len according to lang
|
| 62 |
+
# 3. split sentence according to puncatation
|
| 63 |
+
def split_paragraph(text: str, tokenize, lang="zh", token_max_n=80, token_min_n=60, merge_len=20, comma_split=False):
|
| 64 |
+
def calc_utt_length(_text: str):
|
| 65 |
+
if lang == "zh":
|
| 66 |
+
return len(_text)
|
| 67 |
+
else:
|
| 68 |
+
return len(tokenize(_text))
|
| 69 |
+
|
| 70 |
+
def should_merge(_text: str):
|
| 71 |
+
if lang == "zh":
|
| 72 |
+
return len(_text) < merge_len
|
| 73 |
+
else:
|
| 74 |
+
return len(tokenize(_text)) < merge_len
|
| 75 |
+
|
| 76 |
+
if lang == "zh":
|
| 77 |
+
pounc = ['。', '?', '!', ';', ':', '、', '.', '?', '!', ';']
|
| 78 |
+
else:
|
| 79 |
+
pounc = ['.', '?', '!', ';', ':']
|
| 80 |
+
if comma_split:
|
| 81 |
+
pounc.extend([',', ','])
|
| 82 |
+
st = 0
|
| 83 |
+
utts = []
|
| 84 |
+
for i, c in enumerate(text):
|
| 85 |
+
if c in pounc:
|
| 86 |
+
if len(text[st: i]) > 0:
|
| 87 |
+
utts.append(text[st: i] + c)
|
| 88 |
+
if i + 1 < len(text) and text[i + 1] in ['"', '”']:
|
| 89 |
+
tmp = utts.pop(-1)
|
| 90 |
+
utts.append(tmp + text[i + 1])
|
| 91 |
+
st = i + 2
|
| 92 |
+
else:
|
| 93 |
+
st = i + 1
|
| 94 |
+
if len(utts) == 0:
|
| 95 |
+
if lang == "zh":
|
| 96 |
+
utts.append(text + '。')
|
| 97 |
+
else:
|
| 98 |
+
utts.append(text + '.')
|
| 99 |
+
final_utts = []
|
| 100 |
+
cur_utt = ""
|
| 101 |
+
for utt in utts:
|
| 102 |
+
if calc_utt_length(cur_utt + utt) > token_max_n and calc_utt_length(cur_utt) > token_min_n:
|
| 103 |
+
final_utts.append(cur_utt)
|
| 104 |
+
cur_utt = ""
|
| 105 |
+
cur_utt = cur_utt + utt
|
| 106 |
+
if len(cur_utt) > 0:
|
| 107 |
+
if should_merge(cur_utt) and len(final_utts) != 0:
|
| 108 |
+
final_utts[-1] = final_utts[-1] + cur_utt
|
| 109 |
+
else:
|
| 110 |
+
final_utts.append(cur_utt)
|
| 111 |
+
|
| 112 |
+
return final_utts
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
# remove blank between chinese character
|
| 116 |
+
def replace_blank(text: str):
|
| 117 |
+
out_str = []
|
| 118 |
+
for i, c in enumerate(text):
|
| 119 |
+
if c == " ":
|
| 120 |
+
if ((text[i + 1].isascii() and text[i + 1] != " ") and
|
| 121 |
+
(text[i - 1].isascii() and text[i - 1] != " ")):
|
| 122 |
+
out_str.append(c)
|
| 123 |
+
else:
|
| 124 |
+
out_str.append(c)
|
| 125 |
+
return "".join(out_str)
|
r1-a/response_generation/Kimi-Audio/kimia_infer/models/tokenizer/glm4/cosyvoice/utils/mask.py
ADDED
|
@@ -0,0 +1,227 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2019 Shigeki Karita
|
| 2 |
+
# 2020 Mobvoi Inc (Binbin Zhang)
|
| 3 |
+
# 2024 Alibaba Inc (authors: Xiang Lyu)
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
|
| 17 |
+
import torch
|
| 18 |
+
'''
|
| 19 |
+
def subsequent_mask(
|
| 20 |
+
size: int,
|
| 21 |
+
device: torch.device = torch.device("cpu"),
|
| 22 |
+
) -> torch.Tensor:
|
| 23 |
+
"""Create mask for subsequent steps (size, size).
|
| 24 |
+
|
| 25 |
+
This mask is used only in decoder which works in an auto-regressive mode.
|
| 26 |
+
This means the current step could only do attention with its left steps.
|
| 27 |
+
|
| 28 |
+
In encoder, fully attention is used when streaming is not necessary and
|
| 29 |
+
the sequence is not long. In this case, no attention mask is needed.
|
| 30 |
+
|
| 31 |
+
When streaming is need, chunk-based attention is used in encoder. See
|
| 32 |
+
subsequent_chunk_mask for the chunk-based attention mask.
|
| 33 |
+
|
| 34 |
+
Args:
|
| 35 |
+
size (int): size of mask
|
| 36 |
+
str device (str): "cpu" or "cuda" or torch.Tensor.device
|
| 37 |
+
dtype (torch.device): result dtype
|
| 38 |
+
|
| 39 |
+
Returns:
|
| 40 |
+
torch.Tensor: mask
|
| 41 |
+
|
| 42 |
+
Examples:
|
| 43 |
+
>>> subsequent_mask(3)
|
| 44 |
+
[[1, 0, 0],
|
| 45 |
+
[1, 1, 0],
|
| 46 |
+
[1, 1, 1]]
|
| 47 |
+
"""
|
| 48 |
+
ret = torch.ones(size, size, device=device, dtype=torch.bool)
|
| 49 |
+
return torch.tril(ret)
|
| 50 |
+
'''
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def subsequent_mask(
|
| 54 |
+
size: int,
|
| 55 |
+
device: torch.device = torch.device("cpu"),
|
| 56 |
+
) -> torch.Tensor:
|
| 57 |
+
"""Create mask for subsequent steps (size, size).
|
| 58 |
+
|
| 59 |
+
This mask is used only in decoder which works in an auto-regressive mode.
|
| 60 |
+
This means the current step could only do attention with its left steps.
|
| 61 |
+
|
| 62 |
+
In encoder, fully attention is used when streaming is not necessary and
|
| 63 |
+
the sequence is not long. In this case, no attention mask is needed.
|
| 64 |
+
|
| 65 |
+
When streaming is need, chunk-based attention is used in encoder. See
|
| 66 |
+
subsequent_chunk_mask for the chunk-based attention mask.
|
| 67 |
+
|
| 68 |
+
Args:
|
| 69 |
+
size (int): size of mask
|
| 70 |
+
str device (str): "cpu" or "cuda" or torch.Tensor.device
|
| 71 |
+
dtype (torch.device): result dtype
|
| 72 |
+
|
| 73 |
+
Returns:
|
| 74 |
+
torch.Tensor: mask
|
| 75 |
+
|
| 76 |
+
Examples:
|
| 77 |
+
>>> subsequent_mask(3)
|
| 78 |
+
[[1, 0, 0],
|
| 79 |
+
[1, 1, 0],
|
| 80 |
+
[1, 1, 1]]
|
| 81 |
+
"""
|
| 82 |
+
arange = torch.arange(size, device=device)
|
| 83 |
+
mask = arange.expand(size, size)
|
| 84 |
+
arange = arange.unsqueeze(-1)
|
| 85 |
+
mask = mask <= arange
|
| 86 |
+
return mask
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
def subsequent_chunk_mask(
|
| 90 |
+
size: int,
|
| 91 |
+
chunk_size: int,
|
| 92 |
+
num_left_chunks: int = -1,
|
| 93 |
+
device: torch.device = torch.device("cpu"),
|
| 94 |
+
) -> torch.Tensor:
|
| 95 |
+
"""Create mask for subsequent steps (size, size) with chunk size,
|
| 96 |
+
this is for streaming encoder
|
| 97 |
+
|
| 98 |
+
Args:
|
| 99 |
+
size (int): size of mask
|
| 100 |
+
chunk_size (int): size of chunk
|
| 101 |
+
num_left_chunks (int): number of left chunks
|
| 102 |
+
<0: use full chunk
|
| 103 |
+
>=0: use num_left_chunks
|
| 104 |
+
device (torch.device): "cpu" or "cuda" or torch.Tensor.device
|
| 105 |
+
|
| 106 |
+
Returns:
|
| 107 |
+
torch.Tensor: mask
|
| 108 |
+
|
| 109 |
+
Examples:
|
| 110 |
+
>>> subsequent_chunk_mask(4, 2)
|
| 111 |
+
[[1, 1, 0, 0],
|
| 112 |
+
[1, 1, 0, 0],
|
| 113 |
+
[1, 1, 1, 1],
|
| 114 |
+
[1, 1, 1, 1]]
|
| 115 |
+
"""
|
| 116 |
+
ret = torch.zeros(size, size, device=device, dtype=torch.bool)
|
| 117 |
+
for i in range(size):
|
| 118 |
+
if num_left_chunks < 0:
|
| 119 |
+
start = 0
|
| 120 |
+
else:
|
| 121 |
+
start = max((i // chunk_size - num_left_chunks) * chunk_size, 0)
|
| 122 |
+
ending = min((i // chunk_size + 1) * chunk_size, size)
|
| 123 |
+
ret[i, start:ending] = True
|
| 124 |
+
return ret
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
def add_optional_chunk_mask(xs: torch.Tensor,
|
| 128 |
+
masks: torch.Tensor,
|
| 129 |
+
use_dynamic_chunk: bool,
|
| 130 |
+
use_dynamic_left_chunk: bool,
|
| 131 |
+
decoding_chunk_size: int,
|
| 132 |
+
static_chunk_size: int,
|
| 133 |
+
num_decoding_left_chunks: int,
|
| 134 |
+
enable_full_context: bool = True):
|
| 135 |
+
""" Apply optional mask for encoder.
|
| 136 |
+
|
| 137 |
+
Args:
|
| 138 |
+
xs (torch.Tensor): padded input, (B, L, D), L for max length
|
| 139 |
+
mask (torch.Tensor): mask for xs, (B, 1, L)
|
| 140 |
+
use_dynamic_chunk (bool): whether to use dynamic chunk or not
|
| 141 |
+
use_dynamic_left_chunk (bool): whether to use dynamic left chunk for
|
| 142 |
+
training.
|
| 143 |
+
decoding_chunk_size (int): decoding chunk size for dynamic chunk, it's
|
| 144 |
+
0: default for training, use random dynamic chunk.
|
| 145 |
+
<0: for decoding, use full chunk.
|
| 146 |
+
>0: for decoding, use fixed chunk size as set.
|
| 147 |
+
static_chunk_size (int): chunk size for static chunk training/decoding
|
| 148 |
+
if it's greater than 0, if use_dynamic_chunk is true,
|
| 149 |
+
this parameter will be ignored
|
| 150 |
+
num_decoding_left_chunks: number of left chunks, this is for decoding,
|
| 151 |
+
the chunk size is decoding_chunk_size.
|
| 152 |
+
>=0: use num_decoding_left_chunks
|
| 153 |
+
<0: use all left chunks
|
| 154 |
+
enable_full_context (bool):
|
| 155 |
+
True: chunk size is either [1, 25] or full context(max_len)
|
| 156 |
+
False: chunk size ~ U[1, 25]
|
| 157 |
+
|
| 158 |
+
Returns:
|
| 159 |
+
torch.Tensor: chunk mask of the input xs.
|
| 160 |
+
"""
|
| 161 |
+
# Whether to use chunk mask or not
|
| 162 |
+
if use_dynamic_chunk:
|
| 163 |
+
max_len = xs.size(1)
|
| 164 |
+
if decoding_chunk_size < 0:
|
| 165 |
+
chunk_size = max_len
|
| 166 |
+
num_left_chunks = -1
|
| 167 |
+
elif decoding_chunk_size > 0:
|
| 168 |
+
chunk_size = decoding_chunk_size
|
| 169 |
+
num_left_chunks = num_decoding_left_chunks
|
| 170 |
+
else:
|
| 171 |
+
# chunk size is either [1, 25] or full context(max_len).
|
| 172 |
+
# Since we use 4 times subsampling and allow up to 1s(100 frames)
|
| 173 |
+
# delay, the maximum frame is 100 / 4 = 25.
|
| 174 |
+
chunk_size = torch.randint(1, max_len, (1, )).item()
|
| 175 |
+
num_left_chunks = -1
|
| 176 |
+
if chunk_size > max_len // 2 and enable_full_context:
|
| 177 |
+
chunk_size = max_len
|
| 178 |
+
else:
|
| 179 |
+
chunk_size = chunk_size % 25 + 1
|
| 180 |
+
if use_dynamic_left_chunk:
|
| 181 |
+
max_left_chunks = (max_len - 1) // chunk_size
|
| 182 |
+
num_left_chunks = torch.randint(0, max_left_chunks,
|
| 183 |
+
(1, )).item()
|
| 184 |
+
chunk_masks = subsequent_chunk_mask(xs.size(1), chunk_size,
|
| 185 |
+
num_left_chunks,
|
| 186 |
+
xs.device) # (L, L)
|
| 187 |
+
chunk_masks = chunk_masks.unsqueeze(0) # (1, L, L)
|
| 188 |
+
chunk_masks = masks & chunk_masks # (B, L, L)
|
| 189 |
+
elif static_chunk_size > 0:
|
| 190 |
+
num_left_chunks = num_decoding_left_chunks
|
| 191 |
+
chunk_masks = subsequent_chunk_mask(xs.size(1), static_chunk_size,
|
| 192 |
+
num_left_chunks,
|
| 193 |
+
xs.device) # (L, L)
|
| 194 |
+
chunk_masks = chunk_masks.unsqueeze(0) # (1, L, L)
|
| 195 |
+
chunk_masks = masks & chunk_masks # (B, L, L)
|
| 196 |
+
else:
|
| 197 |
+
chunk_masks = masks
|
| 198 |
+
return chunk_masks
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
def make_pad_mask(lengths: torch.Tensor, max_len: int = 0) -> torch.Tensor:
|
| 202 |
+
"""Make mask tensor containing indices of padded part.
|
| 203 |
+
|
| 204 |
+
See description of make_non_pad_mask.
|
| 205 |
+
|
| 206 |
+
Args:
|
| 207 |
+
lengths (torch.Tensor): Batch of lengths (B,).
|
| 208 |
+
Returns:
|
| 209 |
+
torch.Tensor: Mask tensor containing indices of padded part.
|
| 210 |
+
|
| 211 |
+
Examples:
|
| 212 |
+
>>> lengths = [5, 3, 2]
|
| 213 |
+
>>> make_pad_mask(lengths)
|
| 214 |
+
masks = [[0, 0, 0, 0 ,0],
|
| 215 |
+
[0, 0, 0, 1, 1],
|
| 216 |
+
[0, 0, 1, 1, 1]]
|
| 217 |
+
"""
|
| 218 |
+
batch_size = lengths.size(0)
|
| 219 |
+
max_len = max_len if max_len > 0 else lengths.max().item()
|
| 220 |
+
seq_range = torch.arange(0,
|
| 221 |
+
max_len,
|
| 222 |
+
dtype=torch.int64,
|
| 223 |
+
device=lengths.device)
|
| 224 |
+
seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len)
|
| 225 |
+
seq_length_expand = lengths.unsqueeze(-1)
|
| 226 |
+
mask = seq_range_expand >= seq_length_expand
|
| 227 |
+
return mask
|
r1-a/response_generation/Kimi-Audio/kimia_infer/models/tokenizer/glm4/cosyvoice/utils/scheduler.py
ADDED
|
@@ -0,0 +1,739 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2020 Mobvoi Inc (Binbin Zhang)
|
| 2 |
+
# 2022 Ximalaya Inc (Yuguang Yang)
|
| 3 |
+
# 2024 Alibaba Inc (authors: Xiang Lyu)
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
# Modified from ESPnet(https://github.com/espnet/espnet)
|
| 17 |
+
# NeMo(https://github.com/NVIDIA/NeMo)
|
| 18 |
+
|
| 19 |
+
from typing import Union
|
| 20 |
+
|
| 21 |
+
import math
|
| 22 |
+
import warnings
|
| 23 |
+
import torch
|
| 24 |
+
from torch.optim.lr_scheduler import _LRScheduler
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class WarmupLR(_LRScheduler):
|
| 28 |
+
"""The WarmupLR scheduler
|
| 29 |
+
|
| 30 |
+
This scheduler is almost same as NoamLR Scheduler except for following
|
| 31 |
+
difference:
|
| 32 |
+
|
| 33 |
+
NoamLR:
|
| 34 |
+
lr = optimizer.lr * model_size ** -0.5
|
| 35 |
+
* min(step ** -0.5, step * warmup_step ** -1.5)
|
| 36 |
+
WarmupLR:
|
| 37 |
+
lr = optimizer.lr * warmup_step ** 0.5
|
| 38 |
+
* min(step ** -0.5, step * warmup_step ** -1.5)
|
| 39 |
+
|
| 40 |
+
Note that the maximum lr equals to optimizer.lr in this scheduler.
|
| 41 |
+
|
| 42 |
+
"""
|
| 43 |
+
|
| 44 |
+
def __init__(
|
| 45 |
+
self,
|
| 46 |
+
optimizer: torch.optim.Optimizer,
|
| 47 |
+
warmup_steps: Union[int, float] = 25000,
|
| 48 |
+
last_epoch: int = -1,
|
| 49 |
+
):
|
| 50 |
+
self.warmup_steps = warmup_steps
|
| 51 |
+
|
| 52 |
+
# __init__() must be invoked before setting field
|
| 53 |
+
# because step() is also invoked in __init__()
|
| 54 |
+
super().__init__(optimizer, last_epoch)
|
| 55 |
+
|
| 56 |
+
def __repr__(self):
|
| 57 |
+
return f"{self.__class__.__name__}(warmup_steps={self.warmup_steps})"
|
| 58 |
+
|
| 59 |
+
def get_lr(self):
|
| 60 |
+
step_num = self.last_epoch + 1
|
| 61 |
+
if self.warmup_steps == 0:
|
| 62 |
+
return [lr * step_num**-0.5 for lr in self.base_lrs]
|
| 63 |
+
else:
|
| 64 |
+
return [
|
| 65 |
+
lr * self.warmup_steps**0.5 *
|
| 66 |
+
min(step_num**-0.5, step_num * self.warmup_steps**-1.5)
|
| 67 |
+
for lr in self.base_lrs
|
| 68 |
+
]
|
| 69 |
+
|
| 70 |
+
def set_step(self, step: int):
|
| 71 |
+
self.last_epoch = step
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
class WarmupPolicy(_LRScheduler):
|
| 75 |
+
"""Adds warmup kwargs and warmup logic to lr policy.
|
| 76 |
+
All arguments should be passed as kwargs for clarity,
|
| 77 |
+
Args:
|
| 78 |
+
warmup_steps: Number of training steps in warmup stage
|
| 79 |
+
warmup_ratio: Ratio of warmup steps to total steps
|
| 80 |
+
max_steps: Total number of steps while training or `None` for
|
| 81 |
+
infinite training
|
| 82 |
+
"""
|
| 83 |
+
|
| 84 |
+
def __init__(self,
|
| 85 |
+
optimizer,
|
| 86 |
+
*,
|
| 87 |
+
warmup_steps=None,
|
| 88 |
+
warmup_ratio=None,
|
| 89 |
+
max_steps=None,
|
| 90 |
+
min_lr=0.0,
|
| 91 |
+
last_epoch=-1):
|
| 92 |
+
assert not (warmup_steps is not None and warmup_ratio is not None),\
|
| 93 |
+
"Either use particular number of step or ratio"
|
| 94 |
+
assert warmup_ratio is None or max_steps is not None, \
|
| 95 |
+
"If there is a ratio, there should be a total steps"
|
| 96 |
+
|
| 97 |
+
# It is necessary to assign all attributes *before* __init__,
|
| 98 |
+
# as class is wrapped by an inner class.
|
| 99 |
+
self.max_steps = max_steps
|
| 100 |
+
if warmup_steps is not None:
|
| 101 |
+
self.warmup_steps = warmup_steps
|
| 102 |
+
elif warmup_ratio is not None:
|
| 103 |
+
self.warmup_steps = int(warmup_ratio * max_steps)
|
| 104 |
+
else:
|
| 105 |
+
self.warmup_steps = 0
|
| 106 |
+
|
| 107 |
+
self.min_lr = min_lr
|
| 108 |
+
super().__init__(optimizer, last_epoch)
|
| 109 |
+
|
| 110 |
+
def get_lr(self):
|
| 111 |
+
if not self._get_lr_called_within_step:
|
| 112 |
+
warnings.warn(
|
| 113 |
+
"To get the last learning rate computed "
|
| 114 |
+
"by the scheduler, please use `get_last_lr()`.",
|
| 115 |
+
UserWarning,
|
| 116 |
+
stacklevel=2)
|
| 117 |
+
|
| 118 |
+
step = self.last_epoch
|
| 119 |
+
|
| 120 |
+
if step <= self.warmup_steps and self.warmup_steps > 0:
|
| 121 |
+
return self._get_warmup_lr(step)
|
| 122 |
+
|
| 123 |
+
if step > self.max_steps:
|
| 124 |
+
return [self.min_lr for _ in self.base_lrs]
|
| 125 |
+
|
| 126 |
+
return self._get_lr(step)
|
| 127 |
+
|
| 128 |
+
def _get_warmup_lr(self, step):
|
| 129 |
+
lr_val = (step + 1) / (self.warmup_steps + 1)
|
| 130 |
+
return [initial_lr * lr_val for initial_lr in self.base_lrs]
|
| 131 |
+
|
| 132 |
+
def _get_lr(self, step):
|
| 133 |
+
"""Simple const lr policy"""
|
| 134 |
+
return self.base_lrs
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
class SquareRootConstantPolicy(_LRScheduler):
|
| 138 |
+
"""Adds warmup kwargs and warmup logic to lr policy.
|
| 139 |
+
All arguments should be passed as kwargs for clarity,
|
| 140 |
+
Args:
|
| 141 |
+
warmup_steps: Number of training steps in warmup stage
|
| 142 |
+
warmup_ratio: Ratio of warmup steps to total steps
|
| 143 |
+
max_steps: Total number of steps while training or `None` for
|
| 144 |
+
infinite training
|
| 145 |
+
"""
|
| 146 |
+
|
| 147 |
+
def __init__(self,
|
| 148 |
+
optimizer,
|
| 149 |
+
*,
|
| 150 |
+
constant_steps=None,
|
| 151 |
+
constant_ratio=None,
|
| 152 |
+
max_steps=None,
|
| 153 |
+
min_lr=0.0,
|
| 154 |
+
last_epoch=-1):
|
| 155 |
+
assert not (constant_steps is not None
|
| 156 |
+
and constant_ratio is not None), \
|
| 157 |
+
"Either use particular number of step or ratio"
|
| 158 |
+
assert constant_ratio is None or max_steps is not None, \
|
| 159 |
+
"If there is a ratio, there should be a total steps"
|
| 160 |
+
|
| 161 |
+
# It is necessary to assign all attributes *before* __init__,
|
| 162 |
+
# as class is wrapped by an inner class.
|
| 163 |
+
self.max_steps = max_steps
|
| 164 |
+
if constant_steps is not None:
|
| 165 |
+
self.constant_steps = constant_steps
|
| 166 |
+
elif constant_ratio is not None:
|
| 167 |
+
self.constant_steps = int(constant_ratio * max_steps)
|
| 168 |
+
else:
|
| 169 |
+
self.constant_steps = 0
|
| 170 |
+
|
| 171 |
+
self.constant_lr = 1 / (constant_steps**0.5)
|
| 172 |
+
self.min_lr = min_lr
|
| 173 |
+
super().__init__(optimizer, last_epoch)
|
| 174 |
+
|
| 175 |
+
def get_lr(self):
|
| 176 |
+
if not self._get_lr_called_within_step:
|
| 177 |
+
warnings.warn(
|
| 178 |
+
"To get the last learning rate computed "
|
| 179 |
+
"by the scheduler, please use `get_last_lr()`.",
|
| 180 |
+
UserWarning,
|
| 181 |
+
stacklevel=2)
|
| 182 |
+
|
| 183 |
+
step = self.last_epoch
|
| 184 |
+
|
| 185 |
+
if step <= self.constant_steps:
|
| 186 |
+
return [self.constant_lr for _ in self.base_lrs]
|
| 187 |
+
|
| 188 |
+
if step > self.max_steps:
|
| 189 |
+
return [self.min_lr for _ in self.base_lrs]
|
| 190 |
+
|
| 191 |
+
return self._get_lr(step)
|
| 192 |
+
|
| 193 |
+
def _get_lr(self, step):
|
| 194 |
+
"""Simple const lr policy"""
|
| 195 |
+
return self.base_lrs
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
class WarmupHoldPolicy(WarmupPolicy):
|
| 199 |
+
"""Variant of WarmupPolicy which maintains high
|
| 200 |
+
learning rate for a defined number of steps.
|
| 201 |
+
All arguments should be passed as kwargs for clarity,
|
| 202 |
+
Args:
|
| 203 |
+
warmup_steps: Number of training steps in warmup stage
|
| 204 |
+
warmup_ratio: Ratio of warmup steps to total steps
|
| 205 |
+
hold_steps: Number of training steps to
|
| 206 |
+
hold the learning rate after warm up
|
| 207 |
+
hold_ratio: Ratio of hold steps to total steps
|
| 208 |
+
max_steps: Total number of steps while training or `None` for
|
| 209 |
+
infinite training
|
| 210 |
+
"""
|
| 211 |
+
|
| 212 |
+
def __init__(
|
| 213 |
+
self,
|
| 214 |
+
optimizer,
|
| 215 |
+
*,
|
| 216 |
+
warmup_steps=None,
|
| 217 |
+
warmup_ratio=None,
|
| 218 |
+
hold_steps=None,
|
| 219 |
+
hold_ratio=None,
|
| 220 |
+
max_steps=None,
|
| 221 |
+
min_lr=0.0,
|
| 222 |
+
last_epoch=-1,
|
| 223 |
+
):
|
| 224 |
+
assert not (hold_steps is not None and hold_ratio is not None), \
|
| 225 |
+
"Either use particular number of step or ratio"
|
| 226 |
+
assert hold_ratio is None or max_steps is not None, \
|
| 227 |
+
"If there is a ratio, there should be a total steps"
|
| 228 |
+
|
| 229 |
+
self.min_lr = min_lr
|
| 230 |
+
self._last_warmup_lr = 0.0
|
| 231 |
+
|
| 232 |
+
# Necessary to duplicate as class attributes are hidden in inner class
|
| 233 |
+
self.max_steps = max_steps
|
| 234 |
+
if warmup_steps is not None:
|
| 235 |
+
self.warmup_steps = warmup_steps
|
| 236 |
+
elif warmup_ratio is not None:
|
| 237 |
+
self.warmup_steps = int(warmup_ratio * max_steps)
|
| 238 |
+
else:
|
| 239 |
+
self.warmup_steps = 0
|
| 240 |
+
|
| 241 |
+
if hold_steps is not None:
|
| 242 |
+
self.hold_steps = hold_steps + self.warmup_steps
|
| 243 |
+
elif hold_ratio is not None:
|
| 244 |
+
self.hold_steps = int(hold_ratio * max_steps) + self.warmup_steps
|
| 245 |
+
else:
|
| 246 |
+
self.hold_steps = 0
|
| 247 |
+
|
| 248 |
+
super().__init__(
|
| 249 |
+
optimizer,
|
| 250 |
+
warmup_steps=warmup_steps,
|
| 251 |
+
warmup_ratio=warmup_ratio,
|
| 252 |
+
max_steps=max_steps,
|
| 253 |
+
last_epoch=last_epoch,
|
| 254 |
+
min_lr=min_lr,
|
| 255 |
+
)
|
| 256 |
+
|
| 257 |
+
def get_lr(self):
|
| 258 |
+
if not self._get_lr_called_within_step:
|
| 259 |
+
warnings.warn(
|
| 260 |
+
"To get the last learning rate computed by the scheduler,"
|
| 261 |
+
" "
|
| 262 |
+
"please use `get_last_lr()`.",
|
| 263 |
+
UserWarning,
|
| 264 |
+
stacklevel=2)
|
| 265 |
+
|
| 266 |
+
step = self.last_epoch
|
| 267 |
+
|
| 268 |
+
# Warmup phase
|
| 269 |
+
if step <= self.warmup_steps and self.warmup_steps > 0:
|
| 270 |
+
return self._get_warmup_lr(step)
|
| 271 |
+
|
| 272 |
+
# Hold phase
|
| 273 |
+
if (step >= self.warmup_steps) and (step < self.hold_steps):
|
| 274 |
+
return self.base_lrs
|
| 275 |
+
|
| 276 |
+
if step > self.max_steps:
|
| 277 |
+
return [self.min_lr for _ in self.base_lrs]
|
| 278 |
+
|
| 279 |
+
return self._get_lr(step)
|
| 280 |
+
|
| 281 |
+
|
| 282 |
+
class WarmupAnnealHoldPolicy(_LRScheduler):
|
| 283 |
+
"""Adds warmup kwargs and warmup logic to lr policy.
|
| 284 |
+
All arguments should be passed as kwargs for clarity,
|
| 285 |
+
Args:
|
| 286 |
+
warmup_steps: Number of training steps in warmup stage
|
| 287 |
+
warmup_ratio: Ratio of warmup steps to total steps
|
| 288 |
+
max_steps: Total number of steps while training or `None` for
|
| 289 |
+
infinite training
|
| 290 |
+
min_lr: Minimum lr to hold the learning rate after decay at.
|
| 291 |
+
constant_steps: Number of steps to keep lr constant at.
|
| 292 |
+
constant_ratio: Ratio of steps to keep lr constant.
|
| 293 |
+
"""
|
| 294 |
+
|
| 295 |
+
def __init__(
|
| 296 |
+
self,
|
| 297 |
+
optimizer,
|
| 298 |
+
*,
|
| 299 |
+
warmup_steps=None,
|
| 300 |
+
warmup_ratio=None,
|
| 301 |
+
constant_steps=None,
|
| 302 |
+
constant_ratio=None,
|
| 303 |
+
max_steps=None,
|
| 304 |
+
min_lr=0.0,
|
| 305 |
+
last_epoch=-1,
|
| 306 |
+
):
|
| 307 |
+
assert not (warmup_steps is not None
|
| 308 |
+
and warmup_ratio is not None), \
|
| 309 |
+
"Either use particular number of step or ratio"
|
| 310 |
+
assert not (constant_steps is not None
|
| 311 |
+
and constant_ratio is not None), \
|
| 312 |
+
"Either use constant_steps or constant_ratio"
|
| 313 |
+
assert warmup_ratio is None or max_steps is not None, \
|
| 314 |
+
"If there is a ratio, there should be a total steps"
|
| 315 |
+
|
| 316 |
+
# It is necessary to assign all attributes *before* __init__,
|
| 317 |
+
# as class is wrapped by an inner class.
|
| 318 |
+
self.max_steps = max_steps
|
| 319 |
+
|
| 320 |
+
if warmup_steps is not None:
|
| 321 |
+
self.warmup_steps = warmup_steps
|
| 322 |
+
elif warmup_ratio is not None:
|
| 323 |
+
self.warmup_steps = int(warmup_ratio * max_steps)
|
| 324 |
+
else:
|
| 325 |
+
self.warmup_steps = 0
|
| 326 |
+
|
| 327 |
+
if constant_steps is not None:
|
| 328 |
+
self.constant_steps = constant_steps
|
| 329 |
+
elif constant_ratio is not None:
|
| 330 |
+
self.constant_steps = int(constant_ratio * max_steps)
|
| 331 |
+
else:
|
| 332 |
+
self.constant_steps = 0
|
| 333 |
+
|
| 334 |
+
self.decay_steps = max_steps - (self.constant_steps +
|
| 335 |
+
self.warmup_steps)
|
| 336 |
+
|
| 337 |
+
self.min_lr = min_lr
|
| 338 |
+
super().__init__(optimizer, last_epoch)
|
| 339 |
+
|
| 340 |
+
def get_lr(self):
|
| 341 |
+
if not self._get_lr_called_within_step:
|
| 342 |
+
warnings.warn(
|
| 343 |
+
"To get the last learning rate computed "
|
| 344 |
+
"by the scheduler, please use `get_last_lr()`.",
|
| 345 |
+
UserWarning,
|
| 346 |
+
stacklevel=2)
|
| 347 |
+
|
| 348 |
+
step = self.last_epoch
|
| 349 |
+
|
| 350 |
+
# Warmup steps
|
| 351 |
+
if self.warmup_steps > 0 and step <= self.warmup_steps:
|
| 352 |
+
return self._get_warmup_lr(step)
|
| 353 |
+
|
| 354 |
+
# Constant steps after warmup and decay
|
| 355 |
+
if self.constant_steps > 0 and (
|
| 356 |
+
self.warmup_steps + self.decay_steps) < step <= self.max_steps:
|
| 357 |
+
return self._get_constant_lr(step)
|
| 358 |
+
|
| 359 |
+
# Min lr after max steps of updates
|
| 360 |
+
if step > self.max_steps:
|
| 361 |
+
return [self.min_lr for _ in self.base_lrs]
|
| 362 |
+
|
| 363 |
+
return self._get_lr(step)
|
| 364 |
+
|
| 365 |
+
def _get_warmup_lr(self, step):
|
| 366 |
+
lr_val = (step + 1) / (self.warmup_steps + 1)
|
| 367 |
+
return [initial_lr * lr_val for initial_lr in self.base_lrs]
|
| 368 |
+
|
| 369 |
+
def _get_constant_lr(self, step):
|
| 370 |
+
return [self.min_lr for _ in self.base_lrs]
|
| 371 |
+
|
| 372 |
+
def _get_lr(self, step):
|
| 373 |
+
"""Simple const lr policy"""
|
| 374 |
+
return self.base_lrs
|
| 375 |
+
|
| 376 |
+
|
| 377 |
+
def _squareroot_annealing(initial_lr, step, max_steps, min_lr):
|
| 378 |
+
mult = ((max_steps - step) / max_steps)**0.5
|
| 379 |
+
out_lr = initial_lr * mult
|
| 380 |
+
out_lr = max(out_lr, min_lr)
|
| 381 |
+
return out_lr
|
| 382 |
+
|
| 383 |
+
|
| 384 |
+
def _square_annealing(initial_lr, step, max_steps, min_lr):
|
| 385 |
+
mult = ((max_steps - step) / max_steps)**2
|
| 386 |
+
out_lr = initial_lr * mult
|
| 387 |
+
out_lr = max(out_lr, min_lr)
|
| 388 |
+
return out_lr
|
| 389 |
+
|
| 390 |
+
|
| 391 |
+
def _cosine_annealing(initial_lr, step, max_steps, min_lr):
|
| 392 |
+
mult = 0.5 * (1 + math.cos(math.pi * step / max_steps))
|
| 393 |
+
out_lr = (initial_lr - min_lr) * mult + min_lr
|
| 394 |
+
return out_lr
|
| 395 |
+
|
| 396 |
+
|
| 397 |
+
def _linear_warmup_with_cosine_annealing(max_lr, warmup_steps, step,
|
| 398 |
+
decay_steps, min_lr):
|
| 399 |
+
assert max_lr > min_lr
|
| 400 |
+
# Use linear warmup for the initial part.
|
| 401 |
+
if warmup_steps > 0 and step <= warmup_steps:
|
| 402 |
+
return max_lr * float(step) / float(warmup_steps)
|
| 403 |
+
|
| 404 |
+
# For any steps larger than `decay_steps`, use `min_lr`.
|
| 405 |
+
if step > warmup_steps + decay_steps:
|
| 406 |
+
return min_lr
|
| 407 |
+
|
| 408 |
+
# If we are done with the warmup period, use the decay style.
|
| 409 |
+
num_steps_ = step - warmup_steps
|
| 410 |
+
decay_steps_ = decay_steps
|
| 411 |
+
decay_ratio = float(num_steps_) / float(decay_steps_)
|
| 412 |
+
assert decay_ratio >= 0.0
|
| 413 |
+
assert decay_ratio <= 1.0
|
| 414 |
+
delta_lr = max_lr - min_lr
|
| 415 |
+
|
| 416 |
+
coeff = 0.5 * (math.cos(math.pi * decay_ratio) + 1.0)
|
| 417 |
+
|
| 418 |
+
return min_lr + coeff * delta_lr
|
| 419 |
+
|
| 420 |
+
|
| 421 |
+
def _poly_decay(initial_lr, step, decay_steps, power, min_lr, cycle):
|
| 422 |
+
if cycle:
|
| 423 |
+
multiplier = 1.0 if step == 0 else math.ceil(step / decay_steps)
|
| 424 |
+
decay_steps *= multiplier
|
| 425 |
+
else:
|
| 426 |
+
step = min(step, decay_steps)
|
| 427 |
+
p = step / decay_steps
|
| 428 |
+
lr = (initial_lr - min_lr) * math.pow(1.0 - p, power)
|
| 429 |
+
lr += min_lr
|
| 430 |
+
return lr
|
| 431 |
+
|
| 432 |
+
|
| 433 |
+
def _noam_hold_annealing(initial_lr, step, warmup_steps, hold_steps,
|
| 434 |
+
decay_rate, min_lr):
|
| 435 |
+
# hold_steps = total number of steps
|
| 436 |
+
# to hold the LR, not the warmup + hold steps.
|
| 437 |
+
T_warmup_decay = max(1, warmup_steps**decay_rate)
|
| 438 |
+
T_hold_decay = max(1, (step - hold_steps)**decay_rate)
|
| 439 |
+
lr = (initial_lr * T_warmup_decay) / T_hold_decay
|
| 440 |
+
lr = max(lr, min_lr)
|
| 441 |
+
return lr
|
| 442 |
+
|
| 443 |
+
|
| 444 |
+
class SquareAnnealing(WarmupPolicy):
|
| 445 |
+
|
| 446 |
+
def __init__(self,
|
| 447 |
+
optimizer,
|
| 448 |
+
*,
|
| 449 |
+
max_steps,
|
| 450 |
+
min_lr=1e-5,
|
| 451 |
+
last_epoch=-1,
|
| 452 |
+
**kwargs):
|
| 453 |
+
super().__init__(optimizer=optimizer,
|
| 454 |
+
max_steps=max_steps,
|
| 455 |
+
last_epoch=last_epoch,
|
| 456 |
+
min_lr=min_lr,
|
| 457 |
+
**kwargs)
|
| 458 |
+
|
| 459 |
+
def _get_lr(self, step):
|
| 460 |
+
new_lrs = [
|
| 461 |
+
_square_annealing(
|
| 462 |
+
initial_lr=initial_lr,
|
| 463 |
+
step=step - self.warmup_steps,
|
| 464 |
+
max_steps=self.max_steps - self.warmup_steps,
|
| 465 |
+
min_lr=self.min_lr,
|
| 466 |
+
) for initial_lr in self.base_lrs
|
| 467 |
+
]
|
| 468 |
+
return new_lrs
|
| 469 |
+
|
| 470 |
+
|
| 471 |
+
class SquareRootAnnealing(WarmupPolicy):
|
| 472 |
+
|
| 473 |
+
def __init__(self,
|
| 474 |
+
optimizer,
|
| 475 |
+
*,
|
| 476 |
+
max_steps,
|
| 477 |
+
min_lr=0,
|
| 478 |
+
last_epoch=-1,
|
| 479 |
+
**kwargs):
|
| 480 |
+
super().__init__(optimizer=optimizer,
|
| 481 |
+
max_steps=max_steps,
|
| 482 |
+
last_epoch=last_epoch,
|
| 483 |
+
min_lr=min_lr,
|
| 484 |
+
**kwargs)
|
| 485 |
+
|
| 486 |
+
def _get_lr(self, step):
|
| 487 |
+
new_lrs = [
|
| 488 |
+
_squareroot_annealing(initial_lr=initial_lr,
|
| 489 |
+
step=step,
|
| 490 |
+
max_steps=self.max_steps,
|
| 491 |
+
min_lr=self.min_lr)
|
| 492 |
+
for initial_lr in self.base_lrs
|
| 493 |
+
]
|
| 494 |
+
return new_lrs
|
| 495 |
+
|
| 496 |
+
|
| 497 |
+
class CosineAnnealing(WarmupAnnealHoldPolicy):
|
| 498 |
+
|
| 499 |
+
def __init__(self,
|
| 500 |
+
optimizer,
|
| 501 |
+
*,
|
| 502 |
+
max_steps,
|
| 503 |
+
min_lr=0,
|
| 504 |
+
last_epoch=-1,
|
| 505 |
+
**kwargs):
|
| 506 |
+
super().__init__(optimizer=optimizer,
|
| 507 |
+
max_steps=max_steps,
|
| 508 |
+
last_epoch=last_epoch,
|
| 509 |
+
min_lr=min_lr,
|
| 510 |
+
**kwargs)
|
| 511 |
+
|
| 512 |
+
def _get_lr(self, step):
|
| 513 |
+
for initial_lr in self.base_lrs:
|
| 514 |
+
if initial_lr < self.min_lr:
|
| 515 |
+
raise ValueError(
|
| 516 |
+
f"{self} received an initial learning rate "
|
| 517 |
+
f"that was lower than the minimum learning rate.")
|
| 518 |
+
|
| 519 |
+
if self.constant_steps is None or self.constant_steps == 0:
|
| 520 |
+
new_lrs = [
|
| 521 |
+
_cosine_annealing(
|
| 522 |
+
initial_lr=initial_lr,
|
| 523 |
+
step=step - self.warmup_steps,
|
| 524 |
+
max_steps=self.max_steps - self.warmup_steps,
|
| 525 |
+
min_lr=self.min_lr,
|
| 526 |
+
) for initial_lr in self.base_lrs
|
| 527 |
+
]
|
| 528 |
+
else:
|
| 529 |
+
new_lrs = self._get_linear_warmup_with_cosine_annealing_lr(step)
|
| 530 |
+
return new_lrs
|
| 531 |
+
|
| 532 |
+
def _get_warmup_lr(self, step):
|
| 533 |
+
if self.constant_steps is None or self.constant_steps == 0:
|
| 534 |
+
return super()._get_warmup_lr(step)
|
| 535 |
+
else:
|
| 536 |
+
# Use linear warmup for the initial part.
|
| 537 |
+
return self._get_linear_warmup_with_cosine_annealing_lr(step)
|
| 538 |
+
|
| 539 |
+
def _get_constant_lr(self, step):
|
| 540 |
+
# Only called when `constant_steps` > 0.
|
| 541 |
+
return self._get_linear_warmup_with_cosine_annealing_lr(step)
|
| 542 |
+
|
| 543 |
+
def _get_linear_warmup_with_cosine_annealing_lr(self, step):
|
| 544 |
+
# Cosine Schedule for Megatron LM,
|
| 545 |
+
# slightly different warmup schedule + constant LR at the end.
|
| 546 |
+
new_lrs = [
|
| 547 |
+
_linear_warmup_with_cosine_annealing(
|
| 548 |
+
max_lr=self.base_lrs[0],
|
| 549 |
+
warmup_steps=self.warmup_steps,
|
| 550 |
+
step=step,
|
| 551 |
+
decay_steps=self.decay_steps,
|
| 552 |
+
min_lr=self.min_lr,
|
| 553 |
+
) for _ in self.base_lrs
|
| 554 |
+
]
|
| 555 |
+
return new_lrs
|
| 556 |
+
|
| 557 |
+
|
| 558 |
+
class NoamAnnealing(_LRScheduler):
|
| 559 |
+
|
| 560 |
+
def __init__(self,
|
| 561 |
+
optimizer,
|
| 562 |
+
*,
|
| 563 |
+
d_model,
|
| 564 |
+
warmup_steps=None,
|
| 565 |
+
warmup_ratio=None,
|
| 566 |
+
max_steps=None,
|
| 567 |
+
min_lr=0.0,
|
| 568 |
+
last_epoch=-1):
|
| 569 |
+
self._normalize = d_model**(-0.5)
|
| 570 |
+
assert not (warmup_steps is not None
|
| 571 |
+
and warmup_ratio is not None), \
|
| 572 |
+
"Either use particular number of step or ratio"
|
| 573 |
+
assert warmup_ratio is None or max_steps is not None, \
|
| 574 |
+
"If there is a ratio, there should be a total steps"
|
| 575 |
+
|
| 576 |
+
# It is necessary to assign all attributes *before* __init__,
|
| 577 |
+
# as class is wrapped by an inner class.
|
| 578 |
+
self.max_steps = max_steps
|
| 579 |
+
if warmup_steps is not None:
|
| 580 |
+
self.warmup_steps = warmup_steps
|
| 581 |
+
elif warmup_ratio is not None:
|
| 582 |
+
self.warmup_steps = int(warmup_ratio * max_steps)
|
| 583 |
+
else:
|
| 584 |
+
self.warmup_steps = 0
|
| 585 |
+
|
| 586 |
+
self.min_lr = min_lr
|
| 587 |
+
super().__init__(optimizer, last_epoch)
|
| 588 |
+
|
| 589 |
+
def get_lr(self):
|
| 590 |
+
if not self._get_lr_called_within_step:
|
| 591 |
+
warnings.warn(
|
| 592 |
+
"To get the last learning rate computed "
|
| 593 |
+
"by the scheduler, please use `get_last_lr()`.",
|
| 594 |
+
UserWarning,
|
| 595 |
+
stacklevel=2)
|
| 596 |
+
|
| 597 |
+
step = max(1, self.last_epoch)
|
| 598 |
+
|
| 599 |
+
for initial_lr in self.base_lrs:
|
| 600 |
+
if initial_lr < self.min_lr:
|
| 601 |
+
raise ValueError(
|
| 602 |
+
f"{self} received an initial learning rate "
|
| 603 |
+
f"that was lower than the minimum learning rate.")
|
| 604 |
+
|
| 605 |
+
new_lrs = [
|
| 606 |
+
self._noam_annealing(initial_lr=initial_lr, step=step)
|
| 607 |
+
for initial_lr in self.base_lrs
|
| 608 |
+
]
|
| 609 |
+
return new_lrs
|
| 610 |
+
|
| 611 |
+
def _noam_annealing(self, initial_lr, step):
|
| 612 |
+
if self.warmup_steps > 0:
|
| 613 |
+
mult = self._normalize * min(step**(-0.5),
|
| 614 |
+
step * (self.warmup_steps**(-1.5)))
|
| 615 |
+
else:
|
| 616 |
+
mult = self._normalize * step**(-0.5)
|
| 617 |
+
|
| 618 |
+
out_lr = initial_lr * mult
|
| 619 |
+
if step > self.warmup_steps:
|
| 620 |
+
out_lr = max(out_lr, self.min_lr)
|
| 621 |
+
return out_lr
|
| 622 |
+
|
| 623 |
+
|
| 624 |
+
class NoamHoldAnnealing(WarmupHoldPolicy):
|
| 625 |
+
|
| 626 |
+
def __init__(self,
|
| 627 |
+
optimizer,
|
| 628 |
+
*,
|
| 629 |
+
max_steps,
|
| 630 |
+
decay_rate=0.5,
|
| 631 |
+
min_lr=0.0,
|
| 632 |
+
last_epoch=-1,
|
| 633 |
+
**kwargs):
|
| 634 |
+
"""
|
| 635 |
+
From Nemo:
|
| 636 |
+
Implementation of the Noam Hold Annealing policy
|
| 637 |
+
from the SqueezeFormer paper.
|
| 638 |
+
|
| 639 |
+
Unlike NoamAnnealing, the peak learning rate
|
| 640 |
+
can be explicitly set for this scheduler.
|
| 641 |
+
The schedule first performs linear warmup,
|
| 642 |
+
then holds the peak LR, then decays with some schedule for
|
| 643 |
+
the remainder of the steps.
|
| 644 |
+
Therefore the min-lr is still dependent
|
| 645 |
+
on the hyper parameters selected.
|
| 646 |
+
|
| 647 |
+
It's schedule is determined by three factors-
|
| 648 |
+
|
| 649 |
+
Warmup Steps: Initial stage, where linear warmup
|
| 650 |
+
occurs uptil the peak LR is reached. Unlike NoamAnnealing,
|
| 651 |
+
the peak LR is explicitly stated here instead of a scaling factor.
|
| 652 |
+
|
| 653 |
+
Hold Steps: Intermediate stage, where the peak LR
|
| 654 |
+
is maintained for some number of steps. In this region,
|
| 655 |
+
the high peak LR allows the model to converge faster
|
| 656 |
+
if training is stable. However the high LR
|
| 657 |
+
may also cause instability during training.
|
| 658 |
+
Should usually be a significant fraction of training
|
| 659 |
+
steps (around 30-40% of the entire training steps).
|
| 660 |
+
|
| 661 |
+
Decay Steps: Final stage, where the LR rapidly decays
|
| 662 |
+
with some scaling rate (set by decay rate).
|
| 663 |
+
To attain Noam decay, use 0.5,
|
| 664 |
+
for Squeezeformer recommended decay, use 1.0.
|
| 665 |
+
The fast decay after prolonged high LR during
|
| 666 |
+
hold phase allows for rapid convergence.
|
| 667 |
+
|
| 668 |
+
References:
|
| 669 |
+
- [Squeezeformer:
|
| 670 |
+
An Efficient Transformer for Automatic Speech Recognition]
|
| 671 |
+
(https://arxiv.org/abs/2206.00888)
|
| 672 |
+
|
| 673 |
+
Args:
|
| 674 |
+
optimizer: Pytorch compatible Optimizer object.
|
| 675 |
+
warmup_steps: Number of training steps in warmup stage
|
| 676 |
+
warmup_ratio: Ratio of warmup steps to total steps
|
| 677 |
+
hold_steps: Number of training steps to
|
| 678 |
+
hold the learning rate after warm up
|
| 679 |
+
hold_ratio: Ratio of hold steps to total steps
|
| 680 |
+
max_steps: Total number of steps while training or `None` for
|
| 681 |
+
infinite training
|
| 682 |
+
decay_rate: Float value describing the polynomial decay
|
| 683 |
+
after the hold period. Default value
|
| 684 |
+
of 0.5 corresponds to Noam decay.
|
| 685 |
+
min_lr: Minimum learning rate.
|
| 686 |
+
"""
|
| 687 |
+
self.decay_rate = decay_rate
|
| 688 |
+
super().__init__(optimizer=optimizer,
|
| 689 |
+
max_steps=max_steps,
|
| 690 |
+
last_epoch=last_epoch,
|
| 691 |
+
min_lr=min_lr,
|
| 692 |
+
**kwargs)
|
| 693 |
+
|
| 694 |
+
def _get_lr(self, step):
|
| 695 |
+
if self.warmup_steps is None or self.warmup_steps == 0:
|
| 696 |
+
raise ValueError(
|
| 697 |
+
"Noam scheduler cannot be used without warmup steps")
|
| 698 |
+
|
| 699 |
+
if self.hold_steps > 0:
|
| 700 |
+
hold_steps = self.hold_steps - self.warmup_steps
|
| 701 |
+
else:
|
| 702 |
+
hold_steps = 0
|
| 703 |
+
|
| 704 |
+
new_lrs = [
|
| 705 |
+
_noam_hold_annealing(
|
| 706 |
+
initial_lr,
|
| 707 |
+
step=step,
|
| 708 |
+
warmup_steps=self.warmup_steps,
|
| 709 |
+
hold_steps=hold_steps,
|
| 710 |
+
decay_rate=self.decay_rate,
|
| 711 |
+
min_lr=self.min_lr,
|
| 712 |
+
) for initial_lr in self.base_lrs
|
| 713 |
+
]
|
| 714 |
+
return new_lrs
|
| 715 |
+
|
| 716 |
+
def set_step(self, step: int):
|
| 717 |
+
self.last_epoch = step
|
| 718 |
+
|
| 719 |
+
|
| 720 |
+
class ConstantLR(_LRScheduler):
|
| 721 |
+
"""The ConstantLR scheduler
|
| 722 |
+
|
| 723 |
+
This scheduler keeps a constant lr
|
| 724 |
+
|
| 725 |
+
"""
|
| 726 |
+
|
| 727 |
+
def __init__(
|
| 728 |
+
self,
|
| 729 |
+
optimizer: torch.optim.Optimizer,
|
| 730 |
+
):
|
| 731 |
+
# __init__() must be invoked before setting field
|
| 732 |
+
# because step() is also invoked in __init__()
|
| 733 |
+
super().__init__(optimizer)
|
| 734 |
+
|
| 735 |
+
def get_lr(self):
|
| 736 |
+
return self.base_lrs
|
| 737 |
+
|
| 738 |
+
def set_step(self, step: int):
|
| 739 |
+
self.last_epoch = step
|
r1-a/response_generation/Kimi-Audio/kimia_infer/models/tokenizer/glm4/cosyvoice/utils/train_utils.py
ADDED
|
@@ -0,0 +1,289 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2021 Mobvoi Inc. (authors: Binbin Zhang)
|
| 2 |
+
# 2023 Horizon Inc. (authors: Xingchen Song)
|
| 3 |
+
# 2024 Alibaba Inc (authors: Xiang Lyu)
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
|
| 17 |
+
from contextlib import nullcontext
|
| 18 |
+
import logging
|
| 19 |
+
import os
|
| 20 |
+
import torch
|
| 21 |
+
import json
|
| 22 |
+
import re
|
| 23 |
+
import datetime
|
| 24 |
+
import yaml
|
| 25 |
+
|
| 26 |
+
# import deepspeed
|
| 27 |
+
import torch.optim as optim
|
| 28 |
+
import torch.distributed as dist
|
| 29 |
+
|
| 30 |
+
from torch.utils.tensorboard import SummaryWriter
|
| 31 |
+
from torch.utils.data import DataLoader
|
| 32 |
+
from torch.nn.utils import clip_grad_norm_
|
| 33 |
+
|
| 34 |
+
# from deepspeed.runtime.zero.stage_1_and_2 import estimate_zero2_model_states_mem_needs_all_live
|
| 35 |
+
|
| 36 |
+
from cosyvoice.dataset.dataset import Dataset
|
| 37 |
+
from cosyvoice.utils.scheduler import WarmupLR, NoamHoldAnnealing, ConstantLR
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def init_distributed(args):
|
| 41 |
+
world_size = int(os.environ.get('WORLD_SIZE', 1))
|
| 42 |
+
local_rank = int(os.environ.get('LOCAL_RANK', 0))
|
| 43 |
+
rank = int(os.environ.get('RANK', 0))
|
| 44 |
+
logging.info('training on multiple gpus, this gpu {}'.format(local_rank) +
|
| 45 |
+
', rank {}, world_size {}'.format(rank, world_size))
|
| 46 |
+
if args.train_engine == 'torch_ddp':
|
| 47 |
+
torch.cuda.set_device(local_rank)
|
| 48 |
+
dist.init_process_group(args.dist_backend)
|
| 49 |
+
else:
|
| 50 |
+
deepspeed.init_distributed(dist_backend=args.dist_backend)
|
| 51 |
+
return world_size, local_rank, rank
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def init_dataset_and_dataloader(args, configs):
|
| 55 |
+
train_dataset = Dataset(args.train_data, data_pipeline=configs['data_pipeline'], mode='train', shuffle=True, partition=True)
|
| 56 |
+
cv_dataset = Dataset(args.cv_data, data_pipeline=configs['data_pipeline'], mode='train', shuffle=False, partition=False)
|
| 57 |
+
|
| 58 |
+
# do not use persistent_workers=True, as whisper tokenizer opens tiktoken file each time when the for loop starts
|
| 59 |
+
train_data_loader = DataLoader(train_dataset,
|
| 60 |
+
batch_size=None,
|
| 61 |
+
pin_memory=args.pin_memory,
|
| 62 |
+
num_workers=args.num_workers,
|
| 63 |
+
prefetch_factor=args.prefetch)
|
| 64 |
+
cv_data_loader = DataLoader(cv_dataset,
|
| 65 |
+
batch_size=None,
|
| 66 |
+
pin_memory=args.pin_memory,
|
| 67 |
+
num_workers=args.num_workers,
|
| 68 |
+
prefetch_factor=args.prefetch)
|
| 69 |
+
return train_dataset, cv_dataset, train_data_loader, cv_data_loader
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def check_modify_and_save_config(args, configs):
|
| 74 |
+
if args.train_engine == "torch_ddp":
|
| 75 |
+
configs['train_conf']["dtype"] = 'fp32'
|
| 76 |
+
else:
|
| 77 |
+
with open(args.deepspeed_config, 'r') as fin:
|
| 78 |
+
ds_configs = json.load(fin)
|
| 79 |
+
if "fp16" in ds_configs and ds_configs["fp16"]["enabled"]:
|
| 80 |
+
configs['train_conf']["dtype"] = "fp16"
|
| 81 |
+
elif "bf16" in ds_configs and ds_configs["bf16"]["enabled"]:
|
| 82 |
+
configs['train_conf']["dtype"] = "bf16"
|
| 83 |
+
else:
|
| 84 |
+
configs['train_conf']["dtype"] = "fp32"
|
| 85 |
+
assert ds_configs["train_micro_batch_size_per_gpu"] == 1
|
| 86 |
+
# if use deepspeed, override ddp config
|
| 87 |
+
configs['train_conf']['save_per_step'] = int(configs['train_conf']['save_per_step'] * configs['train_conf']['accum_grad'] / ds_configs["gradient_accumulation_steps"])
|
| 88 |
+
configs['train_conf']['accum_grad'] = ds_configs["gradient_accumulation_steps"]
|
| 89 |
+
configs['train_conf']['grad_clip'] = ds_configs["gradient_clipping"]
|
| 90 |
+
configs['train_conf']['log_interval'] = ds_configs["steps_per_print"]
|
| 91 |
+
return configs
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def wrap_cuda_model(args, model):
|
| 95 |
+
local_world_size = int(os.environ.get('LOCAL_WORLD_SIZE', 1))
|
| 96 |
+
world_size = int(os.environ.get('WORLD_SIZE', 1))
|
| 97 |
+
if args.train_engine == "torch_ddp": # native pytorch ddp
|
| 98 |
+
assert (torch.cuda.is_available())
|
| 99 |
+
model.cuda()
|
| 100 |
+
model = torch.nn.parallel.DistributedDataParallel(model, find_unused_parameters=True)
|
| 101 |
+
else:
|
| 102 |
+
if int(os.environ.get('RANK', 0)) == 0:
|
| 103 |
+
logging.info("Estimating model states memory needs (zero2)...")
|
| 104 |
+
estimate_zero2_model_states_mem_needs_all_live(
|
| 105 |
+
model,
|
| 106 |
+
num_gpus_per_node=local_world_size,
|
| 107 |
+
num_nodes=world_size // local_world_size)
|
| 108 |
+
return model
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
def init_optimizer_and_scheduler(args, configs, model):
|
| 112 |
+
if configs['train_conf']['optim'] == 'adam':
|
| 113 |
+
optimizer = optim.Adam(model.parameters(), **configs['train_conf']['optim_conf'])
|
| 114 |
+
elif configs['train_conf']['optim'] == 'adamw':
|
| 115 |
+
optimizer = optim.AdamW(model.parameters(), **configs['train_conf']['optim_conf'])
|
| 116 |
+
else:
|
| 117 |
+
raise ValueError("unknown optimizer: " + configs['train_conf'])
|
| 118 |
+
|
| 119 |
+
if configs['train_conf']['scheduler'] == 'warmuplr':
|
| 120 |
+
scheduler_type = WarmupLR
|
| 121 |
+
scheduler = WarmupLR(optimizer, **configs['train_conf']['scheduler_conf'])
|
| 122 |
+
elif configs['train_conf']['scheduler'] == 'NoamHoldAnnealing':
|
| 123 |
+
scheduler_type = NoamHoldAnnealing
|
| 124 |
+
scheduler = NoamHoldAnnealing(optimizer, **configs['train_conf']['scheduler_conf'])
|
| 125 |
+
elif configs['train_conf']['scheduler'] == 'constantlr':
|
| 126 |
+
scheduler_type = ConstantLR
|
| 127 |
+
scheduler = ConstantLR(optimizer)
|
| 128 |
+
else:
|
| 129 |
+
raise ValueError("unknown scheduler: " + configs['train_conf'])
|
| 130 |
+
|
| 131 |
+
# use deepspeed optimizer for speedup
|
| 132 |
+
if args.train_engine == "deepspeed":
|
| 133 |
+
def scheduler(opt):
|
| 134 |
+
return scheduler_type(opt, **configs['train_conf']['scheduler_conf'])
|
| 135 |
+
model, optimizer, _, scheduler = deepspeed.initialize(
|
| 136 |
+
args=args,
|
| 137 |
+
model=model,
|
| 138 |
+
optimizer=None,
|
| 139 |
+
lr_scheduler=scheduler,
|
| 140 |
+
model_parameters=model.parameters())
|
| 141 |
+
|
| 142 |
+
return model, optimizer, scheduler
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
def init_summarywriter(args):
|
| 146 |
+
writer = None
|
| 147 |
+
if int(os.environ.get('RANK', 0)) == 0:
|
| 148 |
+
os.makedirs(args.model_dir, exist_ok=True)
|
| 149 |
+
writer = SummaryWriter(args.tensorboard_dir)
|
| 150 |
+
return writer
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
def save_model(model, model_name, info_dict):
|
| 154 |
+
rank = int(os.environ.get('RANK', 0))
|
| 155 |
+
model_dir = info_dict["model_dir"]
|
| 156 |
+
save_model_path = os.path.join(model_dir, '{}.pt'.format(model_name))
|
| 157 |
+
|
| 158 |
+
if info_dict["train_engine"] == "torch_ddp":
|
| 159 |
+
if rank == 0:
|
| 160 |
+
torch.save(model.module.state_dict(), save_model_path)
|
| 161 |
+
else:
|
| 162 |
+
with torch.no_grad():
|
| 163 |
+
model.save_checkpoint(save_dir=model_dir,
|
| 164 |
+
tag=model_name,
|
| 165 |
+
client_state=info_dict)
|
| 166 |
+
if rank == 0:
|
| 167 |
+
info_path = re.sub('.pt$', '.yaml', save_model_path)
|
| 168 |
+
info_dict['save_time'] = datetime.datetime.now().strftime('%d/%m/%Y %H:%M:%S')
|
| 169 |
+
with open(info_path, 'w') as fout:
|
| 170 |
+
data = yaml.dump(info_dict)
|
| 171 |
+
fout.write(data)
|
| 172 |
+
logging.info('[Rank {}] Checkpoint: save to checkpoint {}'.format(rank, save_model_path))
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
def cosyvoice_join(group_join, info_dict):
|
| 176 |
+
world_size = int(os.environ.get('WORLD_SIZE', 1))
|
| 177 |
+
local_rank = int(os.environ.get('LOCAL_RANK', 0))
|
| 178 |
+
rank = int(os.environ.get('RANK', 0))
|
| 179 |
+
|
| 180 |
+
if info_dict["batch_idx"] != 0:
|
| 181 |
+
# we try to join all rank in both ddp and deepspeed mode, in case different rank has different lr
|
| 182 |
+
try:
|
| 183 |
+
dist.monitored_barrier(group=group_join,
|
| 184 |
+
timeout=group_join.options._timeout)
|
| 185 |
+
return False
|
| 186 |
+
except RuntimeError as e:
|
| 187 |
+
logging.info("Detected uneven workload distribution: {}\n".format(e) +
|
| 188 |
+
"Break current worker to manually join all workers, " +
|
| 189 |
+
"world_size {}, current rank {}, current local_rank {}\n".
|
| 190 |
+
format(world_size, rank, local_rank))
|
| 191 |
+
return True
|
| 192 |
+
else:
|
| 193 |
+
return False
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
def batch_forward(model, batch, info_dict):
|
| 197 |
+
device = int(os.environ.get('LOCAL_RANK', 0))
|
| 198 |
+
|
| 199 |
+
dtype = info_dict["dtype"]
|
| 200 |
+
if dtype == "fp16":
|
| 201 |
+
dtype = torch.float16
|
| 202 |
+
elif dtype == "bf16":
|
| 203 |
+
dtype = torch.bfloat16
|
| 204 |
+
else: # fp32
|
| 205 |
+
dtype = torch.float32
|
| 206 |
+
|
| 207 |
+
if info_dict['train_engine'] == 'torch_ddp':
|
| 208 |
+
autocast = nullcontext()
|
| 209 |
+
else:
|
| 210 |
+
autocast = torch.cuda.amp.autocast(enabled=True, dtype=dtype, cache_enabled=False)
|
| 211 |
+
|
| 212 |
+
with autocast:
|
| 213 |
+
info_dict['loss_dict'] = model(batch, device)
|
| 214 |
+
return info_dict
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
def batch_backward(model, info_dict):
|
| 218 |
+
if info_dict["train_engine"] == "deepspeed":
|
| 219 |
+
scaled_loss = model.backward(info_dict['loss_dict']['loss'])
|
| 220 |
+
else:
|
| 221 |
+
scaled_loss = info_dict['loss_dict']['loss'] / info_dict['accum_grad']
|
| 222 |
+
scaled_loss.backward()
|
| 223 |
+
|
| 224 |
+
info_dict['loss_dict']['loss'] = scaled_loss
|
| 225 |
+
return info_dict
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
def update_parameter_and_lr(model, optimizer, scheduler, info_dict):
|
| 229 |
+
grad_norm = 0.0
|
| 230 |
+
if info_dict['train_engine'] == "deepspeed":
|
| 231 |
+
info_dict["is_gradient_accumulation_boundary"] = model.is_gradient_accumulation_boundary()
|
| 232 |
+
model.step()
|
| 233 |
+
grad_norm = model.get_global_grad_norm()
|
| 234 |
+
elif (info_dict['batch_idx'] + 1) % info_dict["accum_grad"] == 0:
|
| 235 |
+
grad_norm = clip_grad_norm_(model.parameters(), info_dict['grad_clip'])
|
| 236 |
+
if torch.isfinite(grad_norm):
|
| 237 |
+
optimizer.step()
|
| 238 |
+
optimizer.zero_grad()
|
| 239 |
+
scheduler.step()
|
| 240 |
+
info_dict["lr"] = optimizer.param_groups[0]['lr']
|
| 241 |
+
info_dict["grad_norm"] = grad_norm
|
| 242 |
+
return info_dict
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
def log_per_step(writer, info_dict):
|
| 246 |
+
tag = info_dict["tag"]
|
| 247 |
+
epoch = info_dict.get('epoch', 0)
|
| 248 |
+
step = info_dict["step"]
|
| 249 |
+
batch_idx = info_dict["batch_idx"]
|
| 250 |
+
loss_dict = info_dict['loss_dict']
|
| 251 |
+
rank = int(os.environ.get('RANK', 0))
|
| 252 |
+
|
| 253 |
+
# only rank 0 write to tensorboard to avoid multi-process write
|
| 254 |
+
if writer is not None:
|
| 255 |
+
if (info_dict['train_engine'] == 'deepspeed' and info_dict['is_gradient_accumulation_boundary'] is True) or \
|
| 256 |
+
(info_dict['train_engine'] == 'torch_ddp' and (info_dict['batch_idx'] + 1) % info_dict['accum_grad'] == 0):
|
| 257 |
+
for k in ['epoch', 'lr', 'grad_norm']:
|
| 258 |
+
writer.add_scalar('{}/{}'.format(tag, k), info_dict[k], step + 1)
|
| 259 |
+
for k, v in loss_dict.items():
|
| 260 |
+
writer.add_scalar('{}/{}'.format(tag, k), v, step + 1)
|
| 261 |
+
|
| 262 |
+
# TRAIN & CV, Shell log (stdout)
|
| 263 |
+
if (info_dict['batch_idx'] + 1) % info_dict['log_interval'] == 0:
|
| 264 |
+
log_str = '{} Batch {}/{} '.format(tag, epoch, batch_idx + 1)
|
| 265 |
+
for name, value in loss_dict.items():
|
| 266 |
+
log_str += '{} {:.6f} '.format(name, value)
|
| 267 |
+
if tag == "TRAIN":
|
| 268 |
+
log_str += 'lr {:.8f} grad_norm {:.6f}'.format(
|
| 269 |
+
info_dict["lr"], info_dict['grad_norm'])
|
| 270 |
+
log_str += ' rank {}'.format(rank)
|
| 271 |
+
logging.debug(log_str)
|
| 272 |
+
|
| 273 |
+
|
| 274 |
+
def log_per_save(writer, info_dict):
|
| 275 |
+
tag = info_dict["tag"]
|
| 276 |
+
epoch = info_dict["epoch"]
|
| 277 |
+
step = info_dict["step"]
|
| 278 |
+
loss_dict = info_dict["loss_dict"]
|
| 279 |
+
lr = info_dict['lr']
|
| 280 |
+
rank = int(os.environ.get('RANK', 0))
|
| 281 |
+
logging.info(
|
| 282 |
+
'Epoch {} Step {} CV info lr {} {} rank {}'.format(
|
| 283 |
+
epoch, step + 1, lr, rank, ' '.join(['{}_{}'.format(k, v) for k, v in loss_dict.items()])))
|
| 284 |
+
|
| 285 |
+
if writer is not None:
|
| 286 |
+
for k in ['epoch', 'lr']:
|
| 287 |
+
writer.add_scalar('{}/{}'.format(tag, k), info_dict[k], step + 1)
|
| 288 |
+
for k, v in loss_dict.items():
|
| 289 |
+
writer.add_scalar('{}/{}'.format(tag, k), v, step + 1)
|
r1-a/response_generation/Kimi-Audio/kimia_infer/models/tokenizer/glm4/speech_tokenizer/__init__.py
ADDED
|
File without changes
|
r1-a/response_generation/Kimi-Audio/kimia_infer/models/tokenizer/glm4/speech_tokenizer/configuration_whisper.py
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from transformers import WhisperConfig
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class WhisperVQConfig(WhisperConfig):
|
| 5 |
+
def __init__(self,
|
| 6 |
+
pooling_kernel_size=None,
|
| 7 |
+
pooling_type="max",
|
| 8 |
+
pooling_position=0,
|
| 9 |
+
quantize_vocab_size=None,
|
| 10 |
+
quantize_position=16,
|
| 11 |
+
quantize_commit_coefficient=0.25,
|
| 12 |
+
quantize_loss_scale=1.0,
|
| 13 |
+
quantize_ema_decay=None,
|
| 14 |
+
quantize_restart_interval=None,
|
| 15 |
+
quantize_encoder_only=False,
|
| 16 |
+
quantize_causal_encoder=False,
|
| 17 |
+
quantize_causal_block_size=None,
|
| 18 |
+
skip_language_detection=False,
|
| 19 |
+
encoder_causal_attention=False,
|
| 20 |
+
encoder_causal_convolution=False,
|
| 21 |
+
**kwargs):
|
| 22 |
+
self.pooling_kernel_size = pooling_kernel_size
|
| 23 |
+
self.pooling_type = pooling_type
|
| 24 |
+
self.pooling_position = pooling_position
|
| 25 |
+
self.quantize_vocab_size = quantize_vocab_size
|
| 26 |
+
self.quantize_position = quantize_position
|
| 27 |
+
self.quantize_commit_coefficient = quantize_commit_coefficient
|
| 28 |
+
self.quantize_loss_scale = quantize_loss_scale
|
| 29 |
+
self.quantize_ema_decay = quantize_ema_decay
|
| 30 |
+
self.quantize_restart_interval = quantize_restart_interval
|
| 31 |
+
self.quantize_encoder_only = quantize_encoder_only
|
| 32 |
+
self.quantize_causal_encoder = quantize_causal_encoder
|
| 33 |
+
self.quantize_causal_block_size = quantize_causal_block_size
|
| 34 |
+
self.skip_language_detection = skip_language_detection
|
| 35 |
+
self.encoder_causal_attention = encoder_causal_attention
|
| 36 |
+
self.encoder_causal_convolution = encoder_causal_convolution
|
| 37 |
+
super().__init__(**kwargs)
|