File size: 3,171 Bytes
7375975
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
import math

import torch
from torch import nn

from fish_speech.models.vqgan.modules.fsq import DownsampleFiniteScalarQuantize
from fish_speech.models.vqgan.modules.wavenet import WaveNet
from fish_speech.models.vqgan.utils import sequence_mask
from fish_speech.utils.spectrogram import LogMelSpectrogram


class VQEncoder(nn.Module):
    def __init__(
        self,
    ):
        super().__init__()

        self.encoder = WaveNet(
            input_channels=128,
            residual_channels=768,
            residual_layers=20,
            dilation_cycle=4,
        )

        self.quantizer = DownsampleFiniteScalarQuantize(
            input_dim=768, n_codebooks=1, n_groups=2, levels=[8, 5, 5, 5]
        )

        self.spec = LogMelSpectrogram(
            sample_rate=44100,
            n_fft=2048,
            win_length=2048,
            hop_length=512,
            n_mels=128,
            f_min=0.0,
            f_max=8000.0,
        )

        self.eval()
        e = self.load_state_dict(
            torch.load("checkpoints/vq-gan-group-fsq-2x1024.pth", map_location="cpu"),
            strict=False,
        )

        assert len(e.missing_keys) == 0, e.missing_keys
        assert all(
            k.startswith("decoder.")
            or k.startswith("quality_projection.")
            or k.startswith("discriminator.")
            for k in e.unexpected_keys
        ), e.unexpected_keys

    @torch.no_grad()
    def forward(self, audios, audio_lengths, sr=None):
        mel_spec = self.spec(audios, sample_rate=sr)

        if sr is not None:
            audio_lengths = audio_lengths * 44100 // sr

        mel_lengths = audio_lengths // self.spec.hop_length
        mel_masks = (
            torch.arange(mel_spec.shape[2], device=mel_spec.device)
            < mel_lengths[:, None]
        )
        mel_masks_float_conv = mel_masks[:, None, :].float()
        mels = mel_spec * mel_masks_float_conv

        # Encode
        encoded_features = self.encoder(mels) * mel_masks_float_conv
        encoded_features = self.quantizer(encoded_features).z * mel_masks_float_conv

        return encoded_features

    @torch.no_grad()
    def indicies_to_vq_features(
        self,
        indices,
        feature_lengths,
    ):
        factor = math.prod(self.quantizer.downsample_factor)
        mel_masks = sequence_mask(feature_lengths * factor, indices.shape[2] * factor)
        mel_masks_float_conv = mel_masks[:, None, :].float()
        z = self.quantizer.decode(indices) * mel_masks_float_conv

        return z

    @torch.no_grad()
    def encode(self, audios, audio_lengths, sr=None):
        audios = audios.float()

        mels = self.spec(audios, sample_rate=sr)
        mel_lengths = audio_lengths // self.spec.hop_length
        mel_masks = sequence_mask(mel_lengths, mels.shape[2])
        mel_masks_float_conv = mel_masks[:, None, :].float()
        mels = mels * mel_masks_float_conv

        # Encode
        encoded_features = self.encoder(mels) * mel_masks_float_conv
        feature_lengths = mel_lengths // math.prod(self.quantizer.downsample_factor)

        return self.quantizer.encode(encoded_features), feature_lengths