v2s / fish_speech /models /v2s_unit /load_pretrain_model.py
jlking's picture
Upload folder using huggingface_hub
7375975 verified
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import sys, logging
from argparse import Namespace
from typing import Dict, List, Optional, Tuple, Any
import os
import torch
import torch.nn as nn
from transformers import AutoModelForCausalLM, BitsAndBytesConfig
from einops import repeat
from dataclasses import dataclass, field,replace
from fairseq import checkpoint_utils, tasks, utils
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.models import BaseFairseqModel, FairseqEncoder, register_model
from fairseq.models.hubert.hubert import MASKING_DISTRIBUTION_CHOICES
from omegaconf import II, MISSING
# sys.path.insert(0,'/apdcephfs_nj7/share_303172353/ggyzhang/projects/v2s')
from fish_speech.models.v2s_unit.modules.hubert_pretraining import AVHubertPretrainingTask
from fish_speech.models.v2s_unit.modules.hubert import AVHubertModel,AVHubertConfig
logger = logging.getLogger(__name__)
MASKING_DISTRIBUTION_CHOICES = ChoiceEnum(
["static", "uniform", "normal", "poisson"]
)
@dataclass
class VSPLLMConfig(FairseqDataclass):
w2v_path: str = field(
default=MISSING, metadata={"help": "path to hubert model"}
)
llm_ckpt_path: str = field(
default=MISSING, metadata={"help": "path to llama model"}
)
no_pretrained_weights: bool = field(
default=False,
metadata={"help": "if true, does not load pretrained weights"},
)
dropout_input: float = field(
default=0.0,
metadata={"help": "dropout to apply to the input (after feat extr)"},
)
final_dropout: float = field(
default=0.0,
metadata={
"help": "dropout after transformer and before final projection"
},
)
dropout: float = field(
default=0.0,
metadata={"help": "dropout probability inside hubert model"},
)
attention_dropout: float = field(
default=0.0,
metadata={
"help": "dropout probability for attention weights "
"inside hubert model"
},
)
activation_dropout: float = field(
default=0.0,
metadata={
"help": "dropout probability after activation in FFN "
"inside hubert model"
},
)
# masking
apply_mask: bool = field(
default=False, metadata={"help": "apply masking during fine-tuning"}
)
mask_length: int = field(
default=10, metadata={"help": "repeat the mask indices multiple times"}
)
mask_prob: float = field(
default=0.5,
metadata={
"help": "probability of replacing a token with mask "
"(normalized by length)"
},
)
mask_selection: MASKING_DISTRIBUTION_CHOICES = field(
default="static", metadata={"help": "how to choose masks"}
)
mask_other: float = field(
default=0,
metadata={
"help": "secondary mask argument "
"(used for more complex distributions), "
"see help in compute_mask_indices"
},
)
no_mask_overlap: bool = field(
default=False, metadata={"help": "whether to allow masks to overlap"}
)
# channel masking
mask_channel_length: int = field(
default=10,
metadata={"help": "length of the mask for features (channels)"},
)
mask_channel_prob: float = field(
default=0.0,
metadata={"help": "probability of replacing a feature with 0"},
)
mask_channel_selection: MASKING_DISTRIBUTION_CHOICES = field(
default="static",
metadata={"help": "how to choose mask length for channel masking"},
)
mask_channel_other: float = field(
default=0,
metadata={
"help": "secondary mask argument "
"(used for more complex distributions), "
"see help in compute_mask_indices"
},
)
no_mask_channel_overlap: bool = field(
default=False,
metadata={"help": "whether to allow channel masks to overlap"},
)
masking_updates: int = field(
default=0,
metadata={"help": "dont finetune hubert for this many updates"},
)
feature_grad_mult: float = field(
default=0.0,
metadata={"help": "reset feature grad mult in hubert to this"},
)
layerdrop: float = field(
default=0.0,
metadata={"help": "probability of dropping a layer in hubert"},
)
normalize: bool = II("task.normalize")
data: str = II("task.data")
# this holds the loaded hubert args
w2v_args: Any = None
encoder_embed_dim: int = field(
default=1024, metadata={"help": "encoder embedding dimension"}
)
decoder_embed_dim: int = field(
default=4096, metadata={"help": "decoder embedding dimension"}
)
freeze_finetune_updates: int = field(
default=0,
metadata={"help": "dont finetune hubert for this many updates"},
)
class HubertEncoderWrapper(FairseqEncoder):
def __init__(self, w2v_model):
super().__init__(None)
self.w2v_model = w2v_model
def forward_(self, source, padding_mask, **kwargs):
src ={}
src['video'] = source
src['audio'] = None
w2v_args = {
"source": src,
"padding_mask": padding_mask,
}
x, padding_mask = self.w2v_model.extract_finetune(**w2v_args)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
return {
"encoder_out": x, # T x B x C
"encoder_padding_mask": padding_mask, # B x T
"padding_mask": padding_mask
}
def forward(self, source, padding_mask, **kwargs):
w2v_args = {
"source": source,
"padding_mask": padding_mask,
}
x, padding_mask = self.w2v_model.extract_finetune(**w2v_args)
return {
"encoder_out": x, # T x B x C
"encoder_padding_mask": padding_mask, # B x T
"padding_mask": padding_mask
}
def reorder_encoder_out(self, encoder_out, new_order):
if encoder_out["encoder_out"] is not None:
encoder_out["encoder_out"] = encoder_out[
"encoder_out"
].index_select(1, new_order)
if encoder_out["encoder_padding_mask"] is not None:
encoder_out["encoder_padding_mask"] = encoder_out[
"encoder_padding_mask"
].index_select(0, new_order)
if encoder_out["padding_mask"] is not None:
encoder_out["padding_mask"] = encoder_out[
"padding_mask"
].index_select(0, new_order)
return encoder_out
def build_avhubert_encoder(ckpt_path):
cfg = VSPLLMConfig(
w2v_path = ckpt_path,
apply_mask= False,
mask_selection="static",
mask_length=10,
mask_other=0,
mask_prob=0.75,
mask_channel_selection='static',
mask_channel_length=64,
mask_channel_other=0,
mask_channel_prob=0.5,
layerdrop=0.1,
dropout=0.0,
activation_dropout=0.1,
attention_dropout=0.0,
feature_grad_mult=1.0,
encoder_embed_dim=1024,
decoder_embed_dim=4096,
freeze_finetune_updates=40000,
normalize=True,
)
arg_overrides = {
"dropout": cfg.dropout,
"activation_dropout": cfg.activation_dropout,
"dropout_input": cfg.dropout_input,
"attention_dropout": cfg.attention_dropout,
"mask_length": cfg.mask_length,
"mask_prob": cfg.mask_prob,
"mask_selection": cfg.mask_selection,
"mask_other": cfg.mask_other,
"no_mask_overlap": cfg.no_mask_overlap,
"mask_channel_length": cfg.mask_channel_length,
"mask_channel_prob": cfg.mask_channel_prob,
"mask_channel_selection": cfg.mask_channel_selection,
"mask_channel_other": cfg.mask_channel_other,
"no_mask_channel_overlap": cfg.no_mask_channel_overlap,
"encoder_layerdrop": cfg.layerdrop,
"feature_grad_mult": cfg.feature_grad_mult,
}
w2v_path = ckpt_path
state = checkpoint_utils.load_checkpoint_to_cpu(w2v_path, arg_overrides)
w2v_args = state.get("cfg", None)
assert cfg.normalize == w2v_args.task.normalize, (
"Fine-tuning works best when data normalization is the same. "
"Please check that --normalize is set or unset for "
"both pre-training and here"
)
w2v_args.task.data = cfg.data
task_pretrain = AVHubertPretrainingTask.setup_task(w2v_args.task)
if state is not None:
task_pretrain.load_state_dict(state['task_state'])
model_cfg = AVHubertConfig()
model_cfg = replace(model_cfg, **(w2v_args.model))
encoder_ = AVHubertModel.build_model(model_cfg,task_pretrain)
encoder = HubertEncoderWrapper(encoder_)
if state is not None and not cfg.no_pretrained_weights:
# set strict=False because we omit some modules
del state['model']['mask_emb']
encoder.w2v_model.load_state_dict(state["model"], strict=False)
encoder.w2v_model.remove_pretraining_modules()
return encoder
# checkpoint_path = '/apdcephfs_nj7/share_303172353/ggyzhang/projects/v2s/checkpoints/large_vox_iter5.pt'
# encoder = build_avhubert_encoder(checkpoint_path)
# print(encoder)
# # print('*'*50)