Elesh Vaishnav commited on
Commit
1816524
·
verified ·
1 Parent(s): 12c964f

Upload 64 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. rvc/configs/32000.json +42 -0
  2. rvc/configs/40000.json +42 -0
  3. rvc/configs/48000.json +42 -0
  4. rvc/configs/config.py +98 -0
  5. rvc/infer/infer.py +496 -0
  6. rvc/infer/pipeline.py +566 -0
  7. rvc/lib/algorithm/__init__.py +0 -0
  8. rvc/lib/algorithm/attentions.py +243 -0
  9. rvc/lib/algorithm/commons.py +138 -0
  10. rvc/lib/algorithm/discriminators.py +149 -0
  11. rvc/lib/algorithm/encoders.py +209 -0
  12. rvc/lib/algorithm/generators/__init__.py +0 -0
  13. rvc/lib/algorithm/generators/hifigan.py +228 -0
  14. rvc/lib/algorithm/generators/hifigan_mrf.py +374 -0
  15. rvc/lib/algorithm/generators/hifigan_nsf.py +235 -0
  16. rvc/lib/algorithm/generators/refinegan.py +451 -0
  17. rvc/lib/algorithm/modules.py +117 -0
  18. rvc/lib/algorithm/normalization.py +26 -0
  19. rvc/lib/algorithm/residuals.py +261 -0
  20. rvc/lib/algorithm/synthesizers.py +243 -0
  21. rvc/lib/predictors/F0Extractor.py +105 -0
  22. rvc/lib/predictors/FCPE.py +920 -0
  23. rvc/lib/predictors/RMVPE.py +564 -0
  24. rvc/lib/predictors/f0.py +118 -0
  25. rvc/lib/tools/__pycache__/prerequisites_download.cpython-310.pyc +0 -0
  26. rvc/lib/tools/analyzer.py +76 -0
  27. rvc/lib/tools/gdown.py +285 -0
  28. rvc/lib/tools/launch_tensorboard.py +21 -0
  29. rvc/lib/tools/model_download.py +226 -0
  30. rvc/lib/tools/prerequisites_download.py +153 -0
  31. rvc/lib/tools/pretrained_selector.py +13 -0
  32. rvc/lib/tools/split_audio.py +79 -0
  33. rvc/lib/tools/tts.py +29 -0
  34. rvc/lib/tools/tts_voices.json +0 -0
  35. rvc/lib/utils.py +156 -0
  36. rvc/lib/zluda.py +76 -0
  37. rvc/models/embedders/.gitkeep +1 -0
  38. rvc/models/embedders/embedders_custom/.gitkeep +1 -0
  39. rvc/models/formant/.gitkeep +1 -0
  40. rvc/models/predictors/.gitkeep +0 -0
  41. rvc/models/pretraineds/.gitkeep +0 -0
  42. rvc/models/pretraineds/custom/.gitkeep +1 -0
  43. rvc/models/pretraineds/hifi-gan/.gitkeep +0 -0
  44. rvc/realtime/audio.py +349 -0
  45. rvc/realtime/callbacks.py +114 -0
  46. rvc/realtime/core.py +376 -0
  47. rvc/realtime/pipeline.py +414 -0
  48. rvc/realtime/utils/torch.py +8 -0
  49. rvc/realtime/utils/vad.py +85 -0
  50. rvc/train/anyprecision_optimizer.py +182 -0
rvc/configs/32000.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train": {
3
+ "log_interval": 200,
4
+ "seed": 1234,
5
+ "learning_rate": 1e-4,
6
+ "betas": [0.8, 0.99],
7
+ "eps": 1e-9,
8
+ "lr_decay": 0.999875,
9
+ "segment_size": 12800,
10
+ "c_mel": 45,
11
+ "c_kl": 1.0
12
+ },
13
+ "data": {
14
+ "max_wav_value": 32768.0,
15
+ "sample_rate": 32000,
16
+ "filter_length": 1024,
17
+ "hop_length": 320,
18
+ "win_length": 1024,
19
+ "n_mel_channels": 80,
20
+ "mel_fmin": 0.0,
21
+ "mel_fmax": null
22
+ },
23
+ "model": {
24
+ "inter_channels": 192,
25
+ "hidden_channels": 192,
26
+ "filter_channels": 768,
27
+ "text_enc_hidden_dim": 768,
28
+ "n_heads": 2,
29
+ "n_layers": 6,
30
+ "kernel_size": 3,
31
+ "p_dropout": 0,
32
+ "resblock": "1",
33
+ "resblock_kernel_sizes": [3,7,11],
34
+ "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
35
+ "upsample_rates": [10,8,2,2],
36
+ "upsample_initial_channel": 512,
37
+ "upsample_kernel_sizes": [20,16,4,4],
38
+ "use_spectral_norm": false,
39
+ "gin_channels": 256,
40
+ "spk_embed_dim": 109
41
+ }
42
+ }
rvc/configs/40000.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train": {
3
+ "log_interval": 200,
4
+ "seed": 1234,
5
+ "learning_rate": 1e-4,
6
+ "betas": [0.8, 0.99],
7
+ "eps": 1e-9,
8
+ "lr_decay": 0.999875,
9
+ "segment_size": 12800,
10
+ "c_mel": 45,
11
+ "c_kl": 1.0
12
+ },
13
+ "data": {
14
+ "max_wav_value": 32768.0,
15
+ "sample_rate": 40000,
16
+ "filter_length": 2048,
17
+ "hop_length": 400,
18
+ "win_length": 2048,
19
+ "n_mel_channels": 125,
20
+ "mel_fmin": 0.0,
21
+ "mel_fmax": null
22
+ },
23
+ "model": {
24
+ "inter_channels": 192,
25
+ "hidden_channels": 192,
26
+ "filter_channels": 768,
27
+ "text_enc_hidden_dim": 768,
28
+ "n_heads": 2,
29
+ "n_layers": 6,
30
+ "kernel_size": 3,
31
+ "p_dropout": 0,
32
+ "resblock": "1",
33
+ "resblock_kernel_sizes": [3,7,11],
34
+ "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
35
+ "upsample_rates": [10,10,2,2],
36
+ "upsample_initial_channel": 512,
37
+ "upsample_kernel_sizes": [16,16,4,4],
38
+ "use_spectral_norm": false,
39
+ "gin_channels": 256,
40
+ "spk_embed_dim": 109
41
+ }
42
+ }
rvc/configs/48000.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train": {
3
+ "log_interval": 200,
4
+ "seed": 1234,
5
+ "learning_rate": 1e-4,
6
+ "betas": [0.8, 0.99],
7
+ "eps": 1e-9,
8
+ "lr_decay": 0.999875,
9
+ "segment_size": 17280,
10
+ "c_mel": 45,
11
+ "c_kl": 1.0
12
+ },
13
+ "data": {
14
+ "max_wav_value": 32768.0,
15
+ "sample_rate": 48000,
16
+ "filter_length": 2048,
17
+ "hop_length": 480,
18
+ "win_length": 2048,
19
+ "n_mel_channels": 128,
20
+ "mel_fmin": 0.0,
21
+ "mel_fmax": null
22
+ },
23
+ "model": {
24
+ "inter_channels": 192,
25
+ "hidden_channels": 192,
26
+ "filter_channels": 768,
27
+ "text_enc_hidden_dim": 768,
28
+ "n_heads": 2,
29
+ "n_layers": 6,
30
+ "kernel_size": 3,
31
+ "p_dropout": 0,
32
+ "resblock": "1",
33
+ "resblock_kernel_sizes": [3,7,11],
34
+ "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
35
+ "upsample_rates": [12,10,2,2],
36
+ "upsample_initial_channel": 512,
37
+ "upsample_kernel_sizes": [24,20,4,4],
38
+ "use_spectral_norm": false,
39
+ "gin_channels": 256,
40
+ "spk_embed_dim": 109
41
+ }
42
+ }
rvc/configs/config.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import json
3
+ import os
4
+
5
+ version_config_paths = [
6
+ os.path.join("48000.json"),
7
+ os.path.join("40000.json"),
8
+ os.path.join("32000.json"),
9
+ ]
10
+
11
+
12
+ def singleton(cls):
13
+ instances = {}
14
+
15
+ def get_instance(*args, **kwargs):
16
+ if cls not in instances:
17
+ instances[cls] = cls(*args, **kwargs)
18
+ return instances[cls]
19
+
20
+ return get_instance
21
+
22
+
23
+ @singleton
24
+ class Config:
25
+ def __init__(self):
26
+ self.device = "cuda:0" if torch.cuda.is_available() else "cpu"
27
+ self.gpu_name = (
28
+ torch.cuda.get_device_name(int(self.device.split(":")[-1]))
29
+ if self.device.startswith("cuda")
30
+ else None
31
+ )
32
+ self.json_config = self.load_config_json()
33
+ self.gpu_mem = None
34
+ self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config()
35
+
36
+ def load_config_json(self):
37
+ configs = {}
38
+ for config_file in version_config_paths:
39
+ config_path = os.path.join("rvc", "configs", config_file)
40
+ with open(config_path, "r") as f:
41
+ configs[config_file] = json.load(f)
42
+ return configs
43
+
44
+ def device_config(self):
45
+ if self.device.startswith("cuda"):
46
+ self.set_cuda_config()
47
+ else:
48
+ self.device = "cpu"
49
+
50
+ # Configuration for 6GB GPU memory
51
+ x_pad, x_query, x_center, x_max = (1, 6, 38, 41)
52
+ if self.gpu_mem is not None and self.gpu_mem <= 4:
53
+ # Configuration for 5GB GPU memory
54
+ x_pad, x_query, x_center, x_max = (1, 5, 30, 32)
55
+
56
+ return x_pad, x_query, x_center, x_max
57
+
58
+ def set_cuda_config(self):
59
+ i_device = int(self.device.split(":")[-1])
60
+ self.gpu_name = torch.cuda.get_device_name(i_device)
61
+ self.gpu_mem = torch.cuda.get_device_properties(i_device).total_memory // (
62
+ 1024**3
63
+ )
64
+
65
+
66
+ def max_vram_gpu(gpu):
67
+ if torch.cuda.is_available():
68
+ gpu_properties = torch.cuda.get_device_properties(gpu)
69
+ total_memory_gb = round(gpu_properties.total_memory / 1024 / 1024 / 1024)
70
+ return total_memory_gb
71
+ else:
72
+ return "8"
73
+
74
+
75
+ def get_gpu_info():
76
+ ngpu = torch.cuda.device_count()
77
+ gpu_infos = []
78
+ if torch.cuda.is_available() or ngpu != 0:
79
+ for i in range(ngpu):
80
+ gpu_name = torch.cuda.get_device_name(i)
81
+ mem = int(
82
+ torch.cuda.get_device_properties(i).total_memory / 1024 / 1024 / 1024
83
+ + 0.4
84
+ )
85
+ gpu_infos.append(f"{i}: {gpu_name} ({mem} GB)")
86
+ if len(gpu_infos) > 0:
87
+ gpu_info = "\n".join(gpu_infos)
88
+ else:
89
+ gpu_info = "Unfortunately, there is no compatible GPU available to support your training."
90
+ return gpu_info
91
+
92
+
93
+ def get_number_of_gpus():
94
+ if torch.cuda.is_available():
95
+ num_gpus = torch.cuda.device_count()
96
+ return "-".join(map(str, range(num_gpus)))
97
+ else:
98
+ return "-"
rvc/infer/infer.py ADDED
@@ -0,0 +1,496 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import soxr
4
+ import time
5
+ import torch
6
+ import librosa
7
+ import logging
8
+ import traceback
9
+ import numpy as np
10
+ import soundfile as sf
11
+ import noisereduce as nr
12
+ from pedalboard import (
13
+ Pedalboard,
14
+ Chorus,
15
+ Distortion,
16
+ Reverb,
17
+ PitchShift,
18
+ Limiter,
19
+ Gain,
20
+ Bitcrush,
21
+ Clipping,
22
+ Compressor,
23
+ Delay,
24
+ )
25
+
26
+ now_dir = os.getcwd()
27
+ sys.path.append(now_dir)
28
+
29
+ from rvc.infer.pipeline import Pipeline as VC
30
+ from rvc.lib.utils import load_audio_infer, load_embedding
31
+ from rvc.lib.tools.split_audio import process_audio, merge_audio
32
+ from rvc.lib.algorithm.synthesizers import Synthesizer
33
+ from rvc.configs.config import Config
34
+
35
+ logging.getLogger("httpx").setLevel(logging.WARNING)
36
+ logging.getLogger("httpcore").setLevel(logging.WARNING)
37
+ logging.getLogger("faiss").setLevel(logging.WARNING)
38
+ logging.getLogger("faiss.loader").setLevel(logging.WARNING)
39
+
40
+
41
+ class VoiceConverter:
42
+ """
43
+ A class for performing voice conversion using the Retrieval-Based Voice Conversion (RVC) method.
44
+ """
45
+
46
+ def __init__(self):
47
+ """
48
+ Initializes the VoiceConverter with default configuration, and sets up models and parameters.
49
+ """
50
+ self.config = Config() # Load configuration
51
+ self.hubert_model = (
52
+ None # Initialize the Hubert model (for embedding extraction)
53
+ )
54
+ self.last_embedder_model = None # Last used embedder model
55
+ self.tgt_sr = None # Target sampling rate for the output audio
56
+ self.net_g = None # Generator network for voice conversion
57
+ self.vc = None # Voice conversion pipeline instance
58
+ self.cpt = None # Checkpoint for loading model weights
59
+ self.version = None # Model version
60
+ self.n_spk = None # Number of speakers in the model
61
+ self.use_f0 = None # Whether the model uses F0
62
+ self.loaded_model = None
63
+
64
+ def load_hubert(self, embedder_model: str, embedder_model_custom: str = None):
65
+ """
66
+ Loads the HuBERT model for speaker embedding extraction.
67
+
68
+ Args:
69
+ embedder_model (str): Path to the pre-trained HuBERT model.
70
+ embedder_model_custom (str): Path to the custom HuBERT model.
71
+ """
72
+ self.hubert_model = load_embedding(embedder_model, embedder_model_custom)
73
+ self.hubert_model = self.hubert_model.to(self.config.device).float()
74
+ self.hubert_model.eval()
75
+
76
+ @staticmethod
77
+ def remove_audio_noise(data, sr, reduction_strength=0.7):
78
+ """
79
+ Removes noise from an audio file using the NoiseReduce library.
80
+
81
+ Args:
82
+ data (numpy.ndarray): The audio data as a NumPy array.
83
+ sr (int): The sample rate of the audio data.
84
+ reduction_strength (float): Strength of the noise reduction. Default is 0.7.
85
+ """
86
+ try:
87
+ reduced_noise = nr.reduce_noise(
88
+ y=data, sr=sr, prop_decrease=reduction_strength
89
+ )
90
+ return reduced_noise
91
+ except Exception as error:
92
+ print(f"An error occurred removing audio noise: {error}")
93
+ return None
94
+
95
+ @staticmethod
96
+ def convert_audio_format(input_path, output_path, output_format):
97
+ """
98
+ Converts an audio file to a specified output format.
99
+
100
+ Args:
101
+ input_path (str): Path to the input audio file.
102
+ output_path (str): Path to the output audio file.
103
+ output_format (str): Desired audio format (e.g., "WAV", "MP3").
104
+ """
105
+ try:
106
+ if output_format != "WAV":
107
+ print(f"Saving audio as {output_format}...")
108
+ audio, sample_rate = librosa.load(input_path, sr=None)
109
+ common_sample_rates = [
110
+ 8000,
111
+ 11025,
112
+ 12000,
113
+ 16000,
114
+ 22050,
115
+ 24000,
116
+ 32000,
117
+ 44100,
118
+ 48000,
119
+ ]
120
+ target_sr = min(common_sample_rates, key=lambda x: abs(x - sample_rate))
121
+ audio = librosa.resample(
122
+ audio, orig_sr=sample_rate, target_sr=target_sr, res_type="soxr_vhq"
123
+ )
124
+ sf.write(output_path, audio, target_sr, format=output_format.lower())
125
+ return output_path
126
+ except Exception as error:
127
+ print(f"An error occurred converting the audio format: {error}")
128
+
129
+ @staticmethod
130
+ def post_process_audio(
131
+ audio_input,
132
+ sample_rate,
133
+ **kwargs,
134
+ ):
135
+ board = Pedalboard()
136
+ if kwargs.get("reverb", False):
137
+ reverb = Reverb(
138
+ room_size=kwargs.get("reverb_room_size", 0.5),
139
+ damping=kwargs.get("reverb_damping", 0.5),
140
+ wet_level=kwargs.get("reverb_wet_level", 0.33),
141
+ dry_level=kwargs.get("reverb_dry_level", 0.4),
142
+ width=kwargs.get("reverb_width", 1.0),
143
+ freeze_mode=kwargs.get("reverb_freeze_mode", 0),
144
+ )
145
+ board.append(reverb)
146
+ if kwargs.get("pitch_shift", False):
147
+ pitch_shift = PitchShift(semitones=kwargs.get("pitch_shift_semitones", 0))
148
+ board.append(pitch_shift)
149
+ if kwargs.get("limiter", False):
150
+ limiter = Limiter(
151
+ threshold_db=kwargs.get("limiter_threshold", -6),
152
+ release_ms=kwargs.get("limiter_release", 0.05),
153
+ )
154
+ board.append(limiter)
155
+ if kwargs.get("gain", False):
156
+ gain = Gain(gain_db=kwargs.get("gain_db", 0))
157
+ board.append(gain)
158
+ if kwargs.get("distortion", False):
159
+ distortion = Distortion(drive_db=kwargs.get("distortion_gain", 25))
160
+ board.append(distortion)
161
+ if kwargs.get("chorus", False):
162
+ chorus = Chorus(
163
+ rate_hz=kwargs.get("chorus_rate", 1.0),
164
+ depth=kwargs.get("chorus_depth", 0.25),
165
+ centre_delay_ms=kwargs.get("chorus_delay", 7),
166
+ feedback=kwargs.get("chorus_feedback", 0.0),
167
+ mix=kwargs.get("chorus_mix", 0.5),
168
+ )
169
+ board.append(chorus)
170
+ if kwargs.get("bitcrush", False):
171
+ bitcrush = Bitcrush(bit_depth=kwargs.get("bitcrush_bit_depth", 8))
172
+ board.append(bitcrush)
173
+ if kwargs.get("clipping", False):
174
+ clipping = Clipping(threshold_db=kwargs.get("clipping_threshold", 0))
175
+ board.append(clipping)
176
+ if kwargs.get("compressor", False):
177
+ compressor = Compressor(
178
+ threshold_db=kwargs.get("compressor_threshold", 0),
179
+ ratio=kwargs.get("compressor_ratio", 1),
180
+ attack_ms=kwargs.get("compressor_attack", 1.0),
181
+ release_ms=kwargs.get("compressor_release", 100),
182
+ )
183
+ board.append(compressor)
184
+ if kwargs.get("delay", False):
185
+ delay = Delay(
186
+ delay_seconds=kwargs.get("delay_seconds", 0.5),
187
+ feedback=kwargs.get("delay_feedback", 0.0),
188
+ mix=kwargs.get("delay_mix", 0.5),
189
+ )
190
+ board.append(delay)
191
+ return board(audio_input, sample_rate)
192
+
193
+ def convert_audio(
194
+ self,
195
+ audio_input_path: str,
196
+ audio_output_path: str,
197
+ model_path: str,
198
+ index_path: str,
199
+ pitch: int = 0,
200
+ f0_method: str = "rmvpe",
201
+ index_rate: float = 0.75,
202
+ volume_envelope: float = 1.0,
203
+ protect: float = 0.5,
204
+ hop_length: int = 128,
205
+ split_audio: bool = False,
206
+ f0_autotune: bool = False,
207
+ f0_autotune_strength: float = 1,
208
+ embedder_model: str = "contentvec",
209
+ embedder_model_custom: str = None,
210
+ clean_audio: bool = False,
211
+ clean_strength: float = 0.5,
212
+ export_format: str = "WAV",
213
+ post_process: bool = False,
214
+ resample_sr: int = 0,
215
+ sid: int = 0,
216
+ proposed_pitch: bool = False,
217
+ proposed_pitch_threshold: float = 155.0,
218
+ **kwargs,
219
+ ):
220
+ """
221
+ Performs voice conversion on the input audio.
222
+
223
+ Args:
224
+ pitch (int): Key for F0 up-sampling.
225
+ index_rate (float): Rate for index matching.
226
+ volume_envelope (int): RMS mix rate.
227
+ protect (float): Protection rate for certain audio segments.
228
+ hop_length (int): Hop length for audio processing.
229
+ f0_method (str): Method for F0 extraction.
230
+ audio_input_path (str): Path to the input audio file.
231
+ audio_output_path (str): Path to the output audio file.
232
+ model_path (str): Path to the voice conversion model.
233
+ index_path (str): Path to the index file.
234
+ split_audio (bool): Whether to split the audio for processing.
235
+ f0_autotune (bool): Whether to use F0 autotune.
236
+ clean_audio (bool): Whether to clean the audio.
237
+ clean_strength (float): Strength of the audio cleaning.
238
+ export_format (str): Format for exporting the audio.
239
+ f0_file (str): Path to the F0 file.
240
+ embedder_model (str): Path to the embedder model.
241
+ embedder_model_custom (str): Path to the custom embedder model.
242
+ resample_sr (int, optional): Resample sampling rate. Default is 0.
243
+ sid (int, optional): Speaker ID. Default is 0.
244
+ **kwargs: Additional keyword arguments.
245
+ """
246
+ if not model_path:
247
+ print("No model path provided. Aborting conversion.")
248
+ return
249
+
250
+ self.get_vc(model_path, sid)
251
+
252
+ try:
253
+ start_time = time.time()
254
+ print(f"Converting audio '{audio_input_path}'...")
255
+
256
+ audio = load_audio_infer(
257
+ audio_input_path,
258
+ 16000,
259
+ **kwargs,
260
+ )
261
+ audio_max = np.abs(audio).max() / 0.95
262
+
263
+ if audio_max > 1:
264
+ audio /= audio_max
265
+
266
+ if not self.hubert_model or embedder_model != self.last_embedder_model:
267
+ self.load_hubert(embedder_model, embedder_model_custom)
268
+ self.last_embedder_model = embedder_model
269
+
270
+ file_index = (
271
+ index_path.strip()
272
+ .strip('"')
273
+ .strip("\n")
274
+ .strip('"')
275
+ .strip()
276
+ .replace("trained", "added")
277
+ )
278
+
279
+ if self.tgt_sr != resample_sr >= 16000:
280
+ self.tgt_sr = resample_sr
281
+
282
+ if split_audio:
283
+ chunks, intervals = process_audio(audio, 16000)
284
+ print(f"Audio split into {len(chunks)} chunks for processing.")
285
+ else:
286
+ chunks = []
287
+ chunks.append(audio)
288
+
289
+ converted_chunks = []
290
+ for c in chunks:
291
+ audio_opt = self.vc.pipeline(
292
+ model=self.hubert_model,
293
+ net_g=self.net_g,
294
+ sid=sid,
295
+ audio=c,
296
+ pitch=pitch,
297
+ f0_method=f0_method,
298
+ file_index=file_index,
299
+ index_rate=index_rate,
300
+ pitch_guidance=self.use_f0,
301
+ volume_envelope=volume_envelope,
302
+ version=self.version,
303
+ protect=protect,
304
+ f0_autotune=f0_autotune,
305
+ f0_autotune_strength=f0_autotune_strength,
306
+ proposed_pitch=proposed_pitch,
307
+ proposed_pitch_threshold=proposed_pitch_threshold,
308
+ )
309
+ converted_chunks.append(audio_opt)
310
+ if split_audio:
311
+ print(f"Converted audio chunk {len(converted_chunks)}")
312
+
313
+ if split_audio:
314
+ audio_opt = merge_audio(
315
+ chunks, converted_chunks, intervals, 16000, self.tgt_sr
316
+ )
317
+ else:
318
+ audio_opt = converted_chunks[0]
319
+
320
+ if clean_audio:
321
+ cleaned_audio = self.remove_audio_noise(
322
+ audio_opt, self.tgt_sr, clean_strength
323
+ )
324
+ if cleaned_audio is not None:
325
+ audio_opt = cleaned_audio
326
+
327
+ if post_process:
328
+ audio_opt = self.post_process_audio(
329
+ audio_input=audio_opt,
330
+ sample_rate=self.tgt_sr,
331
+ **kwargs,
332
+ )
333
+
334
+ sf.write(audio_output_path, audio_opt, self.tgt_sr, format="WAV")
335
+ output_path_format = audio_output_path.replace(
336
+ ".wav", f".{export_format.lower()}"
337
+ )
338
+ audio_output_path = self.convert_audio_format(
339
+ audio_output_path, output_path_format, export_format
340
+ )
341
+
342
+ elapsed_time = time.time() - start_time
343
+ print(
344
+ f"Conversion completed at '{audio_output_path}' in {elapsed_time:.2f} seconds."
345
+ )
346
+ except Exception as error:
347
+ print(f"An error occurred during audio conversion: {error}")
348
+ print(traceback.format_exc())
349
+
350
+ def convert_audio_batch(
351
+ self,
352
+ audio_input_paths: str,
353
+ audio_output_path: str,
354
+ **kwargs,
355
+ ):
356
+ """
357
+ Performs voice conversion on a batch of input audio files.
358
+
359
+ Args:
360
+ audio_input_paths (str): List of paths to the input audio files.
361
+ audio_output_path (str): Path to the output audio file.
362
+ resample_sr (int, optional): Resample sampling rate. Default is 0.
363
+ sid (int, optional): Speaker ID. Default is 0.
364
+ **kwargs: Additional keyword arguments.
365
+ """
366
+ pid = os.getpid()
367
+ try:
368
+ with open(
369
+ os.path.join(now_dir, "assets", "infer_pid.txt"), "w"
370
+ ) as pid_file:
371
+ pid_file.write(str(pid))
372
+ start_time = time.time()
373
+ print(f"Converting audio batch '{audio_input_paths}'...")
374
+ audio_files = [
375
+ f
376
+ for f in os.listdir(audio_input_paths)
377
+ if f.lower().endswith(
378
+ (
379
+ "wav",
380
+ "mp3",
381
+ "flac",
382
+ "ogg",
383
+ "opus",
384
+ "m4a",
385
+ "mp4",
386
+ "aac",
387
+ "alac",
388
+ "wma",
389
+ "aiff",
390
+ "webm",
391
+ "ac3",
392
+ )
393
+ )
394
+ ]
395
+ print(f"Detected {len(audio_files)} audio files for inference.")
396
+ for a in audio_files:
397
+ new_input = os.path.join(audio_input_paths, a)
398
+ new_output = os.path.splitext(a)[0] + "_output.wav"
399
+ new_output = os.path.join(audio_output_path, new_output)
400
+ if os.path.exists(new_output):
401
+ continue
402
+ self.convert_audio(
403
+ audio_input_path=new_input,
404
+ audio_output_path=new_output,
405
+ **kwargs,
406
+ )
407
+ print(f"Conversion completed at '{audio_input_paths}'.")
408
+ elapsed_time = time.time() - start_time
409
+ print(f"Batch conversion completed in {elapsed_time:.2f} seconds.")
410
+ except Exception as error:
411
+ print(f"An error occurred during audio batch conversion: {error}")
412
+ print(traceback.format_exc())
413
+ finally:
414
+ os.remove(os.path.join(now_dir, "assets", "infer_pid.txt"))
415
+
416
+ def get_vc(self, weight_root, sid):
417
+ """
418
+ Loads the voice conversion model and sets up the pipeline.
419
+
420
+ Args:
421
+ weight_root (str): Path to the model weights.
422
+ sid (int): Speaker ID.
423
+ """
424
+ if sid == "" or sid == []:
425
+ self.cleanup_model()
426
+ if torch.cuda.is_available():
427
+ torch.cuda.empty_cache()
428
+
429
+ if not self.loaded_model or self.loaded_model != weight_root:
430
+ self.load_model(weight_root)
431
+ if self.cpt is not None:
432
+ self.setup_network()
433
+ self.setup_vc_instance()
434
+ self.loaded_model = weight_root
435
+ else:
436
+ self.vc = None
437
+ self.loaded_model = None
438
+
439
+ def cleanup_model(self):
440
+ """
441
+ Cleans up the model and releases resources.
442
+ """
443
+ if self.hubert_model is not None:
444
+ del self.net_g, self.n_spk, self.vc, self.hubert_model, self.tgt_sr
445
+ self.hubert_model = self.net_g = self.n_spk = self.vc = self.tgt_sr = None
446
+ if torch.cuda.is_available():
447
+ torch.cuda.empty_cache()
448
+
449
+ del self.net_g, self.cpt
450
+ if torch.cuda.is_available():
451
+ torch.cuda.empty_cache()
452
+ self.cpt = None
453
+
454
+ def load_model(self, weight_root):
455
+ """
456
+ Loads the model weights from the specified path.
457
+
458
+ Args:
459
+ weight_root (str): Path to the model weights.
460
+ """
461
+ self.cpt = (
462
+ torch.load(weight_root, map_location="cpu", weights_only=True)
463
+ if os.path.isfile(weight_root)
464
+ else None
465
+ )
466
+
467
+ def setup_network(self):
468
+ """
469
+ Sets up the network configuration based on the loaded checkpoint.
470
+ """
471
+ if self.cpt is not None:
472
+ self.tgt_sr = self.cpt["config"][-1]
473
+ self.cpt["config"][-3] = self.cpt["weight"]["emb_g.weight"].shape[0]
474
+ self.use_f0 = self.cpt.get("f0", 1)
475
+
476
+ self.version = self.cpt.get("version", "v1")
477
+ self.text_enc_hidden_dim = 768 if self.version == "v2" else 256
478
+ self.vocoder = self.cpt.get("vocoder", "HiFi-GAN")
479
+ self.net_g = Synthesizer(
480
+ *self.cpt["config"],
481
+ use_f0=self.use_f0,
482
+ text_enc_hidden_dim=self.text_enc_hidden_dim,
483
+ vocoder=self.vocoder,
484
+ )
485
+ del self.net_g.enc_q
486
+ self.net_g.load_state_dict(self.cpt["weight"], strict=False)
487
+ self.net_g = self.net_g.to(self.config.device).float()
488
+ self.net_g.eval()
489
+
490
+ def setup_vc_instance(self):
491
+ """
492
+ Sets up the voice conversion pipeline instance based on the target sampling rate and configuration.
493
+ """
494
+ if self.cpt is not None:
495
+ self.vc = VC(self.tgt_sr, self.config)
496
+ self.n_spk = self.cpt["config"][-3]
rvc/infer/pipeline.py ADDED
@@ -0,0 +1,566 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gc
3
+ import sys
4
+ import torch
5
+ import torch.nn.functional as F
6
+ import torchcrepe
7
+ import faiss
8
+ import librosa
9
+ import numpy as np
10
+ from scipy import signal
11
+ from torch import Tensor
12
+
13
+ now_dir = os.getcwd()
14
+ sys.path.append(now_dir)
15
+
16
+ from rvc.lib.predictors.f0 import CREPE, FCPE, RMVPE, SWIFT
17
+
18
+ import logging
19
+
20
+ logging.getLogger("faiss").setLevel(logging.WARNING)
21
+
22
+ FILTER_ORDER = 5
23
+ CUTOFF_FREQUENCY = 48 # Hz
24
+ SAMPLE_RATE = 16000 # Hz
25
+ bh, ah = signal.butter(
26
+ N=FILTER_ORDER, Wn=CUTOFF_FREQUENCY, btype="high", fs=SAMPLE_RATE
27
+ )
28
+
29
+
30
+ class AudioProcessor:
31
+ """
32
+ A class for processing audio signals, specifically for adjusting RMS levels.
33
+ """
34
+
35
+ def change_rms(
36
+ source_audio: np.ndarray,
37
+ source_rate: int,
38
+ target_audio: np.ndarray,
39
+ target_rate: int,
40
+ rate: float,
41
+ ):
42
+ """
43
+ Adjust the RMS level of target_audio to match the RMS of source_audio, with a given blending rate.
44
+
45
+ Args:
46
+ source_audio: The source audio signal as a NumPy array.
47
+ source_rate: The sampling rate of the source audio.
48
+ target_audio: The target audio signal to adjust.
49
+ target_rate: The sampling rate of the target audio.
50
+ rate: The blending rate between the source and target RMS levels.
51
+ """
52
+ # Calculate RMS of both audio data
53
+ rms1 = librosa.feature.rms(
54
+ y=source_audio,
55
+ frame_length=source_rate // 2 * 2,
56
+ hop_length=source_rate // 2,
57
+ )
58
+ rms2 = librosa.feature.rms(
59
+ y=target_audio,
60
+ frame_length=target_rate // 2 * 2,
61
+ hop_length=target_rate // 2,
62
+ )
63
+
64
+ # Interpolate RMS to match target audio length
65
+ rms1 = F.interpolate(
66
+ torch.from_numpy(rms1).float().unsqueeze(0),
67
+ size=target_audio.shape[0],
68
+ mode="linear",
69
+ ).squeeze()
70
+ rms2 = F.interpolate(
71
+ torch.from_numpy(rms2).float().unsqueeze(0),
72
+ size=target_audio.shape[0],
73
+ mode="linear",
74
+ ).squeeze()
75
+ rms2 = torch.maximum(rms2, torch.zeros_like(rms2) + 1e-6)
76
+
77
+ # Adjust target audio RMS based on the source audio RMS
78
+ adjusted_audio = (
79
+ target_audio
80
+ * (torch.pow(rms1, 1 - rate) * torch.pow(rms2, rate - 1)).numpy()
81
+ )
82
+ return adjusted_audio
83
+
84
+
85
+ class Autotune:
86
+ """
87
+ A class for applying autotune to a given fundamental frequency (F0) contour.
88
+ """
89
+
90
+ def __init__(self):
91
+ """
92
+ Initializes the Autotune class with a set of reference frequencies.
93
+ """
94
+ self.note_dict = [
95
+ 49.00, # G1
96
+ 51.91, # G#1 / Ab1
97
+ 55.00, # A1
98
+ 58.27, # A#1 / Bb1
99
+ 61.74, # B1
100
+ 65.41, # C2
101
+ 69.30, # C#2 / Db2
102
+ 73.42, # D2
103
+ 77.78, # D#2 / Eb2
104
+ 82.41, # E2
105
+ 87.31, # F2
106
+ 92.50, # F#2 / Gb2
107
+ 98.00, # G2
108
+ 103.83, # G#2 / Ab2
109
+ 110.00, # A2
110
+ 116.54, # A#2 / Bb2
111
+ 123.47, # B2
112
+ 130.81, # C3
113
+ 138.59, # C#3 / Db3
114
+ 146.83, # D3
115
+ 155.56, # D#3 / Eb3
116
+ 164.81, # E3
117
+ 174.61, # F3
118
+ 185.00, # F#3 / Gb3
119
+ 196.00, # G3
120
+ 207.65, # G#3 / Ab3
121
+ 220.00, # A3
122
+ 233.08, # A#3 / Bb3
123
+ 246.94, # B3
124
+ 261.63, # C4
125
+ 277.18, # C#4 / Db4
126
+ 293.66, # D4
127
+ 311.13, # D#4 / Eb4
128
+ 329.63, # E4
129
+ 349.23, # F4
130
+ 369.99, # F#4 / Gb4
131
+ 392.00, # G4
132
+ 415.30, # G#4 / Ab4
133
+ 440.00, # A4
134
+ 466.16, # A#4 / Bb4
135
+ 493.88, # B4
136
+ 523.25, # C5
137
+ 554.37, # C#5 / Db5
138
+ 587.33, # D5
139
+ 622.25, # D#5 / Eb5
140
+ 659.25, # E5
141
+ 698.46, # F5
142
+ 739.99, # F#5 / Gb5
143
+ 783.99, # G5
144
+ 830.61, # G#5 / Ab5
145
+ 880.00, # A5
146
+ 932.33, # A#5 / Bb5
147
+ 987.77, # B5
148
+ 1046.50, # C6
149
+ ]
150
+
151
+ def autotune_f0(self, f0, f0_autotune_strength):
152
+ """
153
+ Autotunes a given F0 contour by snapping each frequency to the closest reference frequency.
154
+
155
+ Args:
156
+ f0: The input F0 contour as a NumPy array.
157
+ """
158
+ autotuned_f0 = np.zeros_like(f0)
159
+ for i, freq in enumerate(f0):
160
+ closest_note = min(self.note_dict, key=lambda x: abs(x - freq))
161
+ autotuned_f0[i] = freq + (closest_note - freq) * f0_autotune_strength
162
+ return autotuned_f0
163
+
164
+
165
+ class Pipeline:
166
+ """
167
+ The main pipeline class for performing voice conversion, including preprocessing, F0 estimation,
168
+ voice conversion using a model, and post-processing.
169
+ """
170
+
171
+ def __init__(self, tgt_sr, config):
172
+ """
173
+ Initializes the Pipeline class with target sampling rate and configuration parameters.
174
+
175
+ Args:
176
+ tgt_sr: The target sampling rate for the output audio.
177
+ config: A configuration object containing various parameters for the pipeline.
178
+ """
179
+ self.x_pad = config.x_pad
180
+ self.x_query = config.x_query
181
+ self.x_center = config.x_center
182
+ self.x_max = config.x_max
183
+ self.sample_rate = 16000
184
+ self.tgt_sr = tgt_sr
185
+ self.window = 160
186
+ self.t_pad = self.sample_rate * self.x_pad
187
+ self.t_pad_tgt = tgt_sr * self.x_pad
188
+ self.t_pad2 = self.t_pad * 2
189
+ self.t_query = self.sample_rate * self.x_query
190
+ self.t_center = self.sample_rate * self.x_center
191
+ self.t_max = self.sample_rate * self.x_max
192
+ self.time_step = self.window / self.sample_rate * 1000
193
+ self.f0_min = 50
194
+ self.f0_max = 1100
195
+ self.f0_mel_min = 1127 * np.log(1 + self.f0_min / 700)
196
+ self.f0_mel_max = 1127 * np.log(1 + self.f0_max / 700)
197
+ self.device = config.device
198
+ self.autotune = Autotune()
199
+
200
+ def get_f0(
201
+ self,
202
+ x,
203
+ p_len,
204
+ f0_method: str = "rmvpe",
205
+ pitch: int = 0,
206
+ f0_autotune: bool = False,
207
+ f0_autotune_strength: float = 1.0,
208
+ proposed_pitch: bool = False,
209
+ proposed_pitch_threshold: float = 155.0,
210
+ ):
211
+ """
212
+ Estimates the fundamental frequency (F0) of a given audio signal using various methods.
213
+
214
+ Args:
215
+ x: The input audio signal as a NumPy array.
216
+ p_len: Desired length of the F0 output.
217
+ pitch: Key to adjust the pitch of the F0 contour.
218
+ f0_method: Method to use for F0 estimation (e.g., "crepe").
219
+ f0_autotune: Whether to apply autotune to the F0 contour.
220
+ proposed_pitch: whether to apply proposed pitch adjustment
221
+ proposed_pitch_threshold: target frequency, 155.0 for male, 255.0 for female
222
+ """
223
+ if f0_method == "crepe":
224
+ model = CREPE(
225
+ device=self.device, sample_rate=self.sample_rate, hop_size=self.window
226
+ )
227
+ f0 = model.get_f0(x, self.f0_min, self.f0_max, p_len, "full")
228
+ del model
229
+ elif f0_method == "crepe-tiny":
230
+ model = CREPE(
231
+ device=self.device, sample_rate=self.sample_rate, hop_size=self.window
232
+ )
233
+ f0 = model.get_f0(x, self.f0_min, self.f0_max, p_len, "tiny")
234
+ del model
235
+ elif f0_method == "rmvpe":
236
+ model = RMVPE(
237
+ device=self.device, sample_rate=self.sample_rate, hop_size=self.window
238
+ )
239
+ f0 = model.get_f0(x, filter_radius=0.03)
240
+ del model
241
+ elif f0_method == "fcpe":
242
+ model = FCPE(
243
+ device=self.device, sample_rate=self.sample_rate, hop_size=self.window
244
+ )
245
+ f0 = model.get_f0(x, p_len, filter_radius=0.006)
246
+ del model
247
+ elif f0_method == "swift":
248
+ model = SWIFT(
249
+ device=self.device, sample_rate=self.sample_rate, hop_size=self.window
250
+ )
251
+ f0 = model.get_f0(
252
+ x, self.f0_min, self.f0_max, p_len, confidence_threshold=0.887
253
+ )
254
+ del model
255
+
256
+ # f0 adjustments
257
+ if f0_autotune is True:
258
+ f0 = self.autotune.autotune_f0(f0, f0_autotune_strength)
259
+ elif proposed_pitch is True:
260
+ limit = 12
261
+ # calculate median f0 of the audio
262
+ valid_f0 = np.where(f0 > 0)[0]
263
+ if len(valid_f0) < 2:
264
+ # no valid f0 detected
265
+ up_key = 0
266
+ else:
267
+ median_f0 = float(
268
+ np.median(np.interp(np.arange(len(f0)), valid_f0, f0[valid_f0]))
269
+ )
270
+ if median_f0 <= 0 or np.isnan(median_f0):
271
+ up_key = 0
272
+ else:
273
+ # calculate proposed shift
274
+ up_key = max(
275
+ -limit,
276
+ min(
277
+ limit,
278
+ int(
279
+ np.round(
280
+ 12 * np.log2(proposed_pitch_threshold / median_f0)
281
+ )
282
+ ),
283
+ ),
284
+ )
285
+ print("calculated pitch offset:", up_key)
286
+ f0 *= pow(2, (pitch + up_key) / 12)
287
+ else:
288
+ f0 *= pow(2, pitch / 12)
289
+ # quantizing f0 to 255 buckets to make coarse f0
290
+ f0bak = f0.copy()
291
+ f0_mel = 1127 * np.log(1 + f0 / 700)
292
+ f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - self.f0_mel_min) * 254 / (
293
+ self.f0_mel_max - self.f0_mel_min
294
+ ) + 1
295
+ f0_mel[f0_mel <= 1] = 1
296
+ f0_mel[f0_mel > 255] = 255
297
+ f0_coarse = np.rint(f0_mel).astype(int)
298
+
299
+ return f0_coarse, f0bak
300
+
301
+ def voice_conversion(
302
+ self,
303
+ model,
304
+ net_g,
305
+ sid,
306
+ audio0,
307
+ pitch,
308
+ pitchf,
309
+ index,
310
+ big_npy,
311
+ index_rate,
312
+ version,
313
+ protect,
314
+ ):
315
+ """
316
+ Performs voice conversion on a given audio segment.
317
+
318
+ Args:
319
+ model: The feature extractor model.
320
+ net_g: The generative model for synthesizing speech.
321
+ sid: Speaker ID for the target voice.
322
+ audio0: The input audio segment.
323
+ pitch: Quantized F0 contour for pitch guidance.
324
+ pitchf: Original F0 contour for pitch guidance.
325
+ index: FAISS index for speaker embedding retrieval.
326
+ big_npy: Speaker embeddings stored in a NumPy array.
327
+ index_rate: Blending rate for speaker embedding retrieval.
328
+ version: Model version (Keep to support old models).
329
+ protect: Protection level for preserving the original pitch.
330
+ """
331
+ with torch.no_grad():
332
+ pitch_guidance = pitch != None and pitchf != None
333
+ # prepare source audio
334
+ feats = torch.from_numpy(audio0).float()
335
+ feats = feats.mean(-1) if feats.dim() == 2 else feats
336
+ assert feats.dim() == 1, feats.dim()
337
+ feats = feats.view(1, -1).to(self.device)
338
+ # extract features
339
+ feats = model(feats)["last_hidden_state"]
340
+ feats = (
341
+ model.final_proj(feats[0]).unsqueeze(0) if version == "v1" else feats
342
+ )
343
+ # make a copy for pitch guidance and protection
344
+ feats0 = feats.clone() if pitch_guidance else None
345
+ if (
346
+ index
347
+ ): # set by parent function, only true if index is available, loaded, and index rate > 0
348
+ feats = self._retrieve_speaker_embeddings(
349
+ feats, index, big_npy, index_rate
350
+ )
351
+ # feature upsampling
352
+ feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(
353
+ 0, 2, 1
354
+ )
355
+ # adjust the length if the audio is short
356
+ p_len = min(audio0.shape[0] // self.window, feats.shape[1])
357
+ if pitch_guidance:
358
+ feats0 = F.interpolate(feats0.permute(0, 2, 1), scale_factor=2).permute(
359
+ 0, 2, 1
360
+ )
361
+ pitch, pitchf = pitch[:, :p_len], pitchf[:, :p_len]
362
+ # Pitch protection blending
363
+ if protect < 0.5:
364
+ pitchff = pitchf.clone()
365
+ pitchff[pitchf > 0] = 1
366
+ pitchff[pitchf < 1] = protect
367
+ feats = feats * pitchff.unsqueeze(-1) + feats0 * (
368
+ 1 - pitchff.unsqueeze(-1)
369
+ )
370
+ feats = feats.to(feats0.dtype)
371
+ else:
372
+ pitch, pitchf = None, None
373
+ p_len = torch.tensor([p_len], device=self.device).long()
374
+ audio1 = (
375
+ (net_g.infer(feats.float(), p_len, pitch, pitchf.float(), sid)[0][0, 0])
376
+ .data.cpu()
377
+ .float()
378
+ .numpy()
379
+ )
380
+ # clean up
381
+ del feats, feats0, p_len
382
+ if torch.cuda.is_available():
383
+ torch.cuda.empty_cache()
384
+ return audio1
385
+
386
+ def _retrieve_speaker_embeddings(self, feats, index, big_npy, index_rate):
387
+ npy = feats[0].cpu().numpy()
388
+ score, ix = index.search(npy, k=8)
389
+ weight = np.square(1 / score)
390
+ weight /= weight.sum(axis=1, keepdims=True)
391
+ npy = np.sum(big_npy[ix] * np.expand_dims(weight, axis=2), axis=1)
392
+ feats = (
393
+ torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate
394
+ + (1 - index_rate) * feats
395
+ )
396
+ return feats
397
+
398
+ def pipeline(
399
+ self,
400
+ model,
401
+ net_g,
402
+ sid,
403
+ audio,
404
+ pitch,
405
+ f0_method,
406
+ file_index,
407
+ index_rate,
408
+ pitch_guidance,
409
+ volume_envelope,
410
+ version,
411
+ protect,
412
+ f0_autotune,
413
+ f0_autotune_strength,
414
+ proposed_pitch,
415
+ proposed_pitch_threshold,
416
+ ):
417
+ """
418
+ The main pipeline function for performing voice conversion.
419
+
420
+ Args:
421
+ model: The feature extractor model.
422
+ net_g: The generative model for synthesizing speech.
423
+ sid: Speaker ID for the target voice.
424
+ audio: The input audio signal.
425
+ input_audio_path: Path to the input audio file.
426
+ pitch: Key to adjust the pitch of the F0 contour.
427
+ f0_method: Method to use for F0 estimation.
428
+ file_index: Path to the FAISS index file for speaker embedding retrieval.
429
+ index_rate: Blending rate for speaker embedding retrieval.
430
+ pitch_guidance: Whether to use pitch guidance during voice conversion.
431
+ tgt_sr: Target sampling rate for the output audio.
432
+ resample_sr: Resampling rate for the output audio.
433
+ version: Model version.
434
+ protect: Protection level for preserving the original pitch.
435
+ hop_length: Hop length for F0 estimation methods.
436
+ f0_autotune: Whether to apply autotune to the F0 contour.
437
+ """
438
+ if file_index != "" and os.path.exists(file_index) and index_rate > 0:
439
+ try:
440
+ index = faiss.read_index(file_index)
441
+ big_npy = index.reconstruct_n(0, index.ntotal)
442
+ except Exception as error:
443
+ print(f"An error occurred reading the FAISS index: {error}")
444
+ index = big_npy = None
445
+ else:
446
+ index = big_npy = None
447
+ audio = signal.filtfilt(bh, ah, audio)
448
+ audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode="reflect")
449
+ opt_ts = []
450
+ if audio_pad.shape[0] > self.t_max:
451
+ audio_sum = np.zeros_like(audio)
452
+ for i in range(self.window):
453
+ audio_sum += audio_pad[i : i - self.window]
454
+ for t in range(self.t_center, audio.shape[0], self.t_center):
455
+ opt_ts.append(
456
+ t
457
+ - self.t_query
458
+ + np.where(
459
+ np.abs(audio_sum[t - self.t_query : t + self.t_query])
460
+ == np.abs(audio_sum[t - self.t_query : t + self.t_query]).min()
461
+ )[0][0]
462
+ )
463
+ s = 0
464
+ audio_opt = []
465
+ t = None
466
+ audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode="reflect")
467
+ p_len = audio_pad.shape[0] // self.window
468
+ sid = torch.tensor(sid, device=self.device).unsqueeze(0).long()
469
+ if pitch_guidance:
470
+ pitch, pitchf = self.get_f0(
471
+ audio_pad,
472
+ p_len,
473
+ f0_method,
474
+ pitch,
475
+ f0_autotune,
476
+ f0_autotune_strength,
477
+ proposed_pitch,
478
+ proposed_pitch_threshold,
479
+ )
480
+ pitch = pitch[:p_len]
481
+ pitchf = pitchf[:p_len]
482
+ if self.device == "mps":
483
+ pitchf = pitchf.astype(np.float32)
484
+ pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long()
485
+ pitchf = torch.tensor(pitchf, device=self.device).unsqueeze(0).float()
486
+ for t in opt_ts:
487
+ t = t // self.window * self.window
488
+ if pitch_guidance:
489
+ audio_opt.append(
490
+ self.voice_conversion(
491
+ model,
492
+ net_g,
493
+ sid,
494
+ audio_pad[s : t + self.t_pad2 + self.window],
495
+ pitch[:, s // self.window : (t + self.t_pad2) // self.window],
496
+ pitchf[:, s // self.window : (t + self.t_pad2) // self.window],
497
+ index,
498
+ big_npy,
499
+ index_rate,
500
+ version,
501
+ protect,
502
+ )[self.t_pad_tgt : -self.t_pad_tgt]
503
+ )
504
+ else:
505
+ audio_opt.append(
506
+ self.voice_conversion(
507
+ model,
508
+ net_g,
509
+ sid,
510
+ audio_pad[s : t + self.t_pad2 + self.window],
511
+ None,
512
+ None,
513
+ index,
514
+ big_npy,
515
+ index_rate,
516
+ version,
517
+ protect,
518
+ )[self.t_pad_tgt : -self.t_pad_tgt]
519
+ )
520
+ s = t
521
+ if pitch_guidance:
522
+ audio_opt.append(
523
+ self.voice_conversion(
524
+ model,
525
+ net_g,
526
+ sid,
527
+ audio_pad[t:],
528
+ pitch[:, t // self.window :] if t is not None else pitch,
529
+ pitchf[:, t // self.window :] if t is not None else pitchf,
530
+ index,
531
+ big_npy,
532
+ index_rate,
533
+ version,
534
+ protect,
535
+ )[self.t_pad_tgt : -self.t_pad_tgt]
536
+ )
537
+ else:
538
+ audio_opt.append(
539
+ self.voice_conversion(
540
+ model,
541
+ net_g,
542
+ sid,
543
+ audio_pad[t:],
544
+ None,
545
+ None,
546
+ index,
547
+ big_npy,
548
+ index_rate,
549
+ version,
550
+ protect,
551
+ )[self.t_pad_tgt : -self.t_pad_tgt]
552
+ )
553
+ audio_opt = np.concatenate(audio_opt)
554
+ if volume_envelope != 1:
555
+ audio_opt = AudioProcessor.change_rms(
556
+ audio, self.sample_rate, audio_opt, self.tgt_sr, volume_envelope
557
+ )
558
+ audio_max = np.abs(audio_opt).max() / 0.99
559
+ if audio_max > 1:
560
+ audio_opt /= audio_max
561
+ if pitch_guidance:
562
+ del pitch, pitchf
563
+ del sid
564
+ if torch.cuda.is_available():
565
+ torch.cuda.empty_cache()
566
+ return audio_opt
rvc/lib/algorithm/__init__.py ADDED
File without changes
rvc/lib/algorithm/attentions.py ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import torch
3
+ from rvc.lib.algorithm.commons import convert_pad_shape
4
+
5
+
6
+ class MultiHeadAttention(torch.nn.Module):
7
+ """
8
+ Multi-head attention module with optional relative positional encoding and proximal bias.
9
+
10
+ Args:
11
+ channels (int): Number of input channels.
12
+ out_channels (int): Number of output channels.
13
+ n_heads (int): Number of attention heads.
14
+ p_dropout (float, optional): Dropout probability. Defaults to 0.0.
15
+ window_size (int, optional): Window size for relative positional encoding. Defaults to None.
16
+ heads_share (bool, optional): Whether to share relative positional embeddings across heads. Defaults to True.
17
+ block_length (int, optional): Block length for local attention. Defaults to None.
18
+ proximal_bias (bool, optional): Whether to use proximal bias in self-attention. Defaults to False.
19
+ proximal_init (bool, optional): Whether to initialize the key projection weights the same as query projection weights. Defaults to False.
20
+ """
21
+
22
+ def __init__(
23
+ self,
24
+ channels: int,
25
+ out_channels: int,
26
+ n_heads: int,
27
+ p_dropout: float = 0.0,
28
+ window_size: int = None,
29
+ heads_share: bool = True,
30
+ block_length: int = None,
31
+ proximal_bias: bool = False,
32
+ proximal_init: bool = False,
33
+ ):
34
+ super().__init__()
35
+ assert (
36
+ channels % n_heads == 0
37
+ ), "Channels must be divisible by the number of heads."
38
+
39
+ self.channels = channels
40
+ self.out_channels = out_channels
41
+ self.n_heads = n_heads
42
+ self.k_channels = channels // n_heads
43
+ self.window_size = window_size
44
+ self.block_length = block_length
45
+ self.proximal_bias = proximal_bias
46
+
47
+ # Define projections
48
+ self.conv_q = torch.nn.Conv1d(channels, channels, 1)
49
+ self.conv_k = torch.nn.Conv1d(channels, channels, 1)
50
+ self.conv_v = torch.nn.Conv1d(channels, channels, 1)
51
+ self.conv_o = torch.nn.Conv1d(channels, out_channels, 1)
52
+
53
+ self.drop = torch.nn.Dropout(p_dropout)
54
+
55
+ # Relative positional encodings
56
+ if window_size:
57
+ n_heads_rel = 1 if heads_share else n_heads
58
+ rel_stddev = self.k_channels**-0.5
59
+ self.emb_rel_k = torch.nn.Parameter(
60
+ torch.randn(n_heads_rel, 2 * window_size + 1, self.k_channels)
61
+ * rel_stddev
62
+ )
63
+ self.emb_rel_v = torch.nn.Parameter(
64
+ torch.randn(n_heads_rel, 2 * window_size + 1, self.k_channels)
65
+ * rel_stddev
66
+ )
67
+
68
+ # Initialize weights
69
+ torch.nn.init.xavier_uniform_(self.conv_q.weight)
70
+ torch.nn.init.xavier_uniform_(self.conv_k.weight)
71
+ torch.nn.init.xavier_uniform_(self.conv_v.weight)
72
+ torch.nn.init.xavier_uniform_(self.conv_o.weight)
73
+
74
+ if proximal_init:
75
+ with torch.no_grad():
76
+ self.conv_k.weight.copy_(self.conv_q.weight)
77
+ self.conv_k.bias.copy_(self.conv_q.bias)
78
+
79
+ def forward(self, x, c, attn_mask=None):
80
+ # Compute query, key, value projections
81
+ q, k, v = self.conv_q(x), self.conv_k(c), self.conv_v(c)
82
+
83
+ # Compute attention
84
+ x, self.attn = self.attention(q, k, v, mask=attn_mask)
85
+
86
+ # Final output projection
87
+ return self.conv_o(x)
88
+
89
+ def attention(self, query, key, value, mask=None):
90
+ # Reshape and compute scaled dot-product attention
91
+ b, d, t_s, t_t = (*key.size(), query.size(2))
92
+ query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
93
+ key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
94
+ value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
95
+
96
+ scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
97
+
98
+ if self.window_size:
99
+ assert t_s == t_t, "Relative attention only supports self-attention."
100
+ scores += self._compute_relative_scores(query, t_s)
101
+
102
+ if self.proximal_bias:
103
+ assert t_s == t_t, "Proximal bias only supports self-attention."
104
+ scores += self._attention_bias_proximal(t_s).to(scores.device, scores.dtype)
105
+
106
+ if mask is not None:
107
+ scores = scores.masked_fill(mask == 0, -1e4)
108
+ if self.block_length:
109
+ block_mask = (
110
+ torch.ones_like(scores)
111
+ .triu(-self.block_length)
112
+ .tril(self.block_length)
113
+ )
114
+ scores = scores.masked_fill(block_mask == 0, -1e4)
115
+
116
+ # Apply softmax and dropout
117
+ p_attn = self.drop(torch.nn.functional.softmax(scores, dim=-1))
118
+
119
+ # Compute attention output
120
+ output = torch.matmul(p_attn, value)
121
+
122
+ if self.window_size:
123
+ output += self._apply_relative_values(p_attn, t_s)
124
+
125
+ return output.transpose(2, 3).contiguous().view(b, d, t_t), p_attn
126
+
127
+ def _compute_relative_scores(self, query, length):
128
+ rel_emb = self._get_relative_embeddings(self.emb_rel_k, length)
129
+ rel_logits = self._matmul_with_relative_keys(
130
+ query / math.sqrt(self.k_channels), rel_emb
131
+ )
132
+ return self._relative_position_to_absolute_position(rel_logits)
133
+
134
+ def _apply_relative_values(self, p_attn, length):
135
+ rel_weights = self._absolute_position_to_relative_position(p_attn)
136
+ rel_emb = self._get_relative_embeddings(self.emb_rel_v, length)
137
+ return self._matmul_with_relative_values(rel_weights, rel_emb)
138
+
139
+ # Helper methods
140
+ def _matmul_with_relative_values(self, x, y):
141
+ return torch.matmul(x, y.unsqueeze(0))
142
+
143
+ def _matmul_with_relative_keys(self, x, y):
144
+ return torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
145
+
146
+ def _get_relative_embeddings(self, embeddings, length):
147
+ pad_length = max(length - (self.window_size + 1), 0)
148
+ start = max((self.window_size + 1) - length, 0)
149
+ end = start + 2 * length - 1
150
+
151
+ if pad_length > 0:
152
+ embeddings = torch.nn.functional.pad(
153
+ embeddings,
154
+ convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]),
155
+ )
156
+ return embeddings[:, start:end]
157
+
158
+ def _relative_position_to_absolute_position(self, x):
159
+ batch, heads, length, _ = x.size()
160
+ x = torch.nn.functional.pad(
161
+ x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]])
162
+ )
163
+ x_flat = x.view(batch, heads, length * 2 * length)
164
+ x_flat = torch.nn.functional.pad(
165
+ x_flat, convert_pad_shape([[0, 0], [0, 0], [0, length - 1]])
166
+ )
167
+ return x_flat.view(batch, heads, length + 1, 2 * length - 1)[
168
+ :, :, :length, length - 1 :
169
+ ]
170
+
171
+ def _absolute_position_to_relative_position(self, x):
172
+ batch, heads, length, _ = x.size()
173
+ x = torch.nn.functional.pad(
174
+ x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]])
175
+ )
176
+ x_flat = x.view(batch, heads, length**2 + length * (length - 1))
177
+ x_flat = torch.nn.functional.pad(
178
+ x_flat, convert_pad_shape([[0, 0], [0, 0], [length, 0]])
179
+ )
180
+ return x_flat.view(batch, heads, length, 2 * length)[:, :, :, 1:]
181
+
182
+ def _attention_bias_proximal(self, length):
183
+ r = torch.arange(length, dtype=torch.float32)
184
+ diff = r.unsqueeze(0) - r.unsqueeze(1)
185
+ return -torch.log1p(torch.abs(diff)).unsqueeze(0).unsqueeze(0)
186
+
187
+
188
+ class FFN(torch.nn.Module):
189
+ """
190
+ Feed-forward network module.
191
+
192
+ Args:
193
+ in_channels (int): Number of input channels.
194
+ out_channels (int): Number of output channels.
195
+ filter_channels (int): Number of filter channels in the convolution layers.
196
+ kernel_size (int): Kernel size of the convolution layers.
197
+ p_dropout (float, optional): Dropout probability. Defaults to 0.0.
198
+ activation (str, optional): Activation function to use. Defaults to None.
199
+ causal (bool, optional): Whether to use causal padding in the convolution layers. Defaults to False.
200
+ """
201
+
202
+ def __init__(
203
+ self,
204
+ in_channels: int,
205
+ out_channels: int,
206
+ filter_channels: int,
207
+ kernel_size: int,
208
+ p_dropout: float = 0.0,
209
+ activation: str = None,
210
+ causal: bool = False,
211
+ ):
212
+ super().__init__()
213
+ self.padding_fn = self._causal_padding if causal else self._same_padding
214
+
215
+ self.conv_1 = torch.nn.Conv1d(in_channels, filter_channels, kernel_size)
216
+ self.conv_2 = torch.nn.Conv1d(filter_channels, out_channels, kernel_size)
217
+ self.drop = torch.nn.Dropout(p_dropout)
218
+
219
+ self.activation = activation
220
+
221
+ def forward(self, x, x_mask):
222
+ x = self.conv_1(self.padding_fn(x * x_mask))
223
+ x = self._apply_activation(x)
224
+ x = self.drop(x)
225
+ x = self.conv_2(self.padding_fn(x * x_mask))
226
+ return x * x_mask
227
+
228
+ def _apply_activation(self, x):
229
+ if self.activation == "gelu":
230
+ return x * torch.sigmoid(1.702 * x)
231
+ return torch.relu(x)
232
+
233
+ def _causal_padding(self, x):
234
+ pad_l, pad_r = self.conv_1.kernel_size[0] - 1, 0
235
+ return torch.nn.functional.pad(
236
+ x, convert_pad_shape([[0, 0], [0, 0], [pad_l, pad_r]])
237
+ )
238
+
239
+ def _same_padding(self, x):
240
+ pad = (self.conv_1.kernel_size[0] - 1) // 2
241
+ return torch.nn.functional.pad(
242
+ x, convert_pad_shape([[0, 0], [0, 0], [pad, pad]])
243
+ )
rvc/lib/algorithm/commons.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from typing import Optional
3
+
4
+
5
+ def init_weights(m, mean=0.0, std=0.01):
6
+ """
7
+ Initialize the weights of a module.
8
+
9
+ Args:
10
+ m: The module to initialize.
11
+ mean: The mean of the normal distribution.
12
+ std: The standard deviation of the normal distribution.
13
+ """
14
+ classname = m.__class__.__name__
15
+ if classname.find("Conv") != -1:
16
+ m.weight.data.normal_(mean, std)
17
+
18
+
19
+ def get_padding(kernel_size, dilation=1):
20
+ """
21
+ Calculate the padding needed for a convolution.
22
+
23
+ Args:
24
+ kernel_size: The size of the kernel.
25
+ dilation: The dilation of the convolution.
26
+ """
27
+ return int((kernel_size * dilation - dilation) / 2)
28
+
29
+
30
+ def convert_pad_shape(pad_shape):
31
+ """
32
+ Convert the pad shape to a list of integers.
33
+
34
+ Args:
35
+ pad_shape: The pad shape..
36
+ """
37
+ l = pad_shape[::-1]
38
+ pad_shape = [item for sublist in l for item in sublist]
39
+ return pad_shape
40
+
41
+
42
+ def slice_segments(
43
+ x: torch.Tensor, ids_str: torch.Tensor, segment_size: int = 4, dim: int = 2
44
+ ):
45
+ """
46
+ Slice segments from a tensor, handling tensors with different numbers of dimensions.
47
+
48
+ Args:
49
+ x (torch.Tensor): The tensor to slice.
50
+ ids_str (torch.Tensor): The starting indices of the segments.
51
+ segment_size (int, optional): The size of each segment. Defaults to 4.
52
+ dim (int, optional): The dimension to slice across (2D or 3D tensors). Defaults to 2.
53
+ """
54
+ if dim == 2:
55
+ ret = torch.zeros_like(x[:, :segment_size])
56
+ elif dim == 3:
57
+ ret = torch.zeros_like(x[:, :, :segment_size])
58
+
59
+ for i in range(x.size(0)):
60
+ idx_str = ids_str[i].item()
61
+ idx_end = idx_str + segment_size
62
+ if dim == 2:
63
+ ret[i] = x[i, idx_str:idx_end]
64
+ else:
65
+ ret[i] = x[i, :, idx_str:idx_end]
66
+
67
+ return ret
68
+
69
+
70
+ def rand_slice_segments(x, x_lengths=None, segment_size=4):
71
+ """
72
+ Randomly slice segments from a tensor.
73
+
74
+ Args:
75
+ x: The tensor to slice.
76
+ x_lengths: The lengths of the sequences.
77
+ segment_size: The size of each segment.
78
+ """
79
+ b, d, t = x.size()
80
+ if x_lengths is None:
81
+ x_lengths = t
82
+ ids_str_max = x_lengths - segment_size + 1
83
+ ids_str = (torch.rand([b], device=x.device) * ids_str_max).to(dtype=torch.long)
84
+ ret = slice_segments(x, ids_str, segment_size, dim=3)
85
+ return ret, ids_str
86
+
87
+
88
+ @torch.jit.script
89
+ def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
90
+ """
91
+ Fused add tanh sigmoid multiply operation.
92
+
93
+ Args:
94
+ input_a: The first input tensor.
95
+ input_b: The second input tensor.
96
+ n_channels: The number of channels.
97
+ """
98
+ n_channels_int = n_channels[0]
99
+ in_act = input_a + input_b
100
+ t_act = torch.tanh(in_act[:, :n_channels_int, :])
101
+ s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
102
+ acts = t_act * s_act
103
+ return acts
104
+
105
+
106
+ def sequence_mask(length: torch.Tensor, max_length: Optional[int] = None):
107
+ """
108
+ Generate a sequence mask.
109
+
110
+ Args:
111
+ length: The lengths of the sequences.
112
+ max_length: The maximum length of the sequences.
113
+ """
114
+ if max_length is None:
115
+ max_length = length.max()
116
+ x = torch.arange(max_length, dtype=length.dtype, device=length.device)
117
+ return x.unsqueeze(0) < length.unsqueeze(1)
118
+
119
+
120
+ def grad_norm(parameters, norm_type: float = 2.0):
121
+ """
122
+ Calculates norm of parameter gradients
123
+
124
+ Args:
125
+ parameters: The list of parameters to clip.
126
+ norm_type: The type of norm to use for clipping.
127
+ """
128
+ if isinstance(parameters, torch.Tensor):
129
+ parameters = [parameters]
130
+
131
+ parameters = [p for p in parameters if p.grad is not None]
132
+
133
+ if not parameters:
134
+ return 0.0
135
+
136
+ return torch.linalg.vector_norm(
137
+ torch.stack([p.grad.norm(norm_type) for p in parameters]), ord=norm_type
138
+ ).item()
rvc/lib/algorithm/discriminators.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.utils.checkpoint import checkpoint
3
+ from torch.nn.utils.parametrizations import spectral_norm, weight_norm
4
+
5
+ from rvc.lib.algorithm.commons import get_padding
6
+ from rvc.lib.algorithm.residuals import LRELU_SLOPE
7
+
8
+
9
+ class MultiPeriodDiscriminator(torch.nn.Module):
10
+ """
11
+ Multi-period discriminator.
12
+
13
+ This class implements a multi-period discriminator, which is used to
14
+ discriminate between real and fake audio signals. The discriminator
15
+ is composed of a series of convolutional layers that are applied to
16
+ the input signal at different periods.
17
+
18
+ Args:
19
+ use_spectral_norm (bool): Whether to use spectral normalization.
20
+ Defaults to False.
21
+ """
22
+
23
+ def __init__(self, use_spectral_norm: bool = False, checkpointing: bool = False):
24
+ super().__init__()
25
+ periods = [2, 3, 5, 7, 11, 17, 23, 37]
26
+ self.checkpointing = checkpointing
27
+ self.discriminators = torch.nn.ModuleList(
28
+ [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
29
+ + [DiscriminatorP(p, use_spectral_norm=use_spectral_norm) for p in periods]
30
+ )
31
+
32
+ def forward(self, y, y_hat):
33
+ y_d_rs, y_d_gs, fmap_rs, fmap_gs = [], [], [], []
34
+ for d in self.discriminators:
35
+ if self.training and self.checkpointing:
36
+ y_d_r, fmap_r = checkpoint(d, y, use_reentrant=False)
37
+ y_d_g, fmap_g = checkpoint(d, y_hat, use_reentrant=False)
38
+ else:
39
+ y_d_r, fmap_r = d(y)
40
+ y_d_g, fmap_g = d(y_hat)
41
+ y_d_rs.append(y_d_r)
42
+ y_d_gs.append(y_d_g)
43
+ fmap_rs.append(fmap_r)
44
+ fmap_gs.append(fmap_g)
45
+
46
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
47
+
48
+
49
+ class DiscriminatorS(torch.nn.Module):
50
+ """
51
+ Discriminator for the short-term component.
52
+
53
+ This class implements a discriminator for the short-term component
54
+ of the audio signal. The discriminator is composed of a series of
55
+ convolutional layers that are applied to the input signal.
56
+ """
57
+
58
+ def __init__(self, use_spectral_norm: bool = False):
59
+ super().__init__()
60
+
61
+ norm_f = spectral_norm if use_spectral_norm else weight_norm
62
+ self.convs = torch.nn.ModuleList(
63
+ [
64
+ norm_f(torch.nn.Conv1d(1, 16, 15, 1, padding=7)),
65
+ norm_f(torch.nn.Conv1d(16, 64, 41, 4, groups=4, padding=20)),
66
+ norm_f(torch.nn.Conv1d(64, 256, 41, 4, groups=16, padding=20)),
67
+ norm_f(torch.nn.Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
68
+ norm_f(torch.nn.Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
69
+ norm_f(torch.nn.Conv1d(1024, 1024, 5, 1, padding=2)),
70
+ ]
71
+ )
72
+ self.conv_post = norm_f(torch.nn.Conv1d(1024, 1, 3, 1, padding=1))
73
+ self.lrelu = torch.nn.LeakyReLU(LRELU_SLOPE)
74
+
75
+ def forward(self, x):
76
+ fmap = []
77
+ for conv in self.convs:
78
+ x = self.lrelu(conv(x))
79
+ fmap.append(x)
80
+ x = self.conv_post(x)
81
+ fmap.append(x)
82
+ x = torch.flatten(x, 1, -1)
83
+ return x, fmap
84
+
85
+
86
+ class DiscriminatorP(torch.nn.Module):
87
+ """
88
+ Discriminator for the long-term component.
89
+
90
+ This class implements a discriminator for the long-term component
91
+ of the audio signal. The discriminator is composed of a series of
92
+ convolutional layers that are applied to the input signal at a given
93
+ period.
94
+
95
+ Args:
96
+ period (int): Period of the discriminator.
97
+ kernel_size (int): Kernel size of the convolutional layers. Defaults to 5.
98
+ stride (int): Stride of the convolutional layers. Defaults to 3.
99
+ use_spectral_norm (bool): Whether to use spectral normalization. Defaults to False.
100
+ """
101
+
102
+ def __init__(
103
+ self,
104
+ period: int,
105
+ kernel_size: int = 5,
106
+ stride: int = 3,
107
+ use_spectral_norm: bool = False,
108
+ ):
109
+ super().__init__()
110
+ self.period = period
111
+ norm_f = spectral_norm if use_spectral_norm else weight_norm
112
+
113
+ in_channels = [1, 32, 128, 512, 1024]
114
+ out_channels = [32, 128, 512, 1024, 1024]
115
+ strides = [3, 3, 3, 3, 1]
116
+
117
+ self.convs = torch.nn.ModuleList(
118
+ [
119
+ norm_f(
120
+ torch.nn.Conv2d(
121
+ in_ch,
122
+ out_ch,
123
+ (kernel_size, 1),
124
+ (s, 1),
125
+ padding=(get_padding(kernel_size, 1), 0),
126
+ )
127
+ )
128
+ for in_ch, out_ch, s in zip(in_channels, out_channels, strides)
129
+ ]
130
+ )
131
+
132
+ self.conv_post = norm_f(torch.nn.Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
133
+ self.lrelu = torch.nn.LeakyReLU(LRELU_SLOPE)
134
+
135
+ def forward(self, x):
136
+ fmap = []
137
+ b, c, t = x.shape
138
+ if t % self.period != 0:
139
+ n_pad = self.period - (t % self.period)
140
+ x = torch.nn.functional.pad(x, (0, n_pad), "reflect")
141
+ x = x.view(b, c, -1, self.period)
142
+
143
+ for conv in self.convs:
144
+ x = self.lrelu(conv(x))
145
+ fmap.append(x)
146
+ x = self.conv_post(x)
147
+ fmap.append(x)
148
+ x = torch.flatten(x, 1, -1)
149
+ return x, fmap
rvc/lib/algorithm/encoders.py ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import torch
3
+ from typing import Optional
4
+
5
+ from rvc.lib.algorithm.commons import sequence_mask
6
+ from rvc.lib.algorithm.modules import WaveNet
7
+ from rvc.lib.algorithm.normalization import LayerNorm
8
+ from rvc.lib.algorithm.attentions import FFN, MultiHeadAttention
9
+
10
+
11
+ class Encoder(torch.nn.Module):
12
+ """
13
+ Encoder module for the Transformer model.
14
+
15
+ Args:
16
+ hidden_channels (int): Number of hidden channels in the encoder.
17
+ filter_channels (int): Number of filter channels in the feed-forward network.
18
+ n_heads (int): Number of attention heads.
19
+ n_layers (int): Number of encoder layers.
20
+ kernel_size (int, optional): Kernel size of the convolution layers in the feed-forward network. Defaults to 1.
21
+ p_dropout (float, optional): Dropout probability. Defaults to 0.0.
22
+ window_size (int, optional): Window size for relative positional encoding. Defaults to 10.
23
+ """
24
+
25
+ def __init__(
26
+ self,
27
+ hidden_channels: int,
28
+ filter_channels: int,
29
+ n_heads: int,
30
+ n_layers: int,
31
+ kernel_size: int = 1,
32
+ p_dropout: float = 0.0,
33
+ window_size: int = 10,
34
+ ):
35
+ super().__init__()
36
+
37
+ self.hidden_channels = hidden_channels
38
+ self.n_layers = n_layers
39
+ self.drop = torch.nn.Dropout(p_dropout)
40
+
41
+ self.attn_layers = torch.nn.ModuleList(
42
+ [
43
+ MultiHeadAttention(
44
+ hidden_channels,
45
+ hidden_channels,
46
+ n_heads,
47
+ p_dropout=p_dropout,
48
+ window_size=window_size,
49
+ )
50
+ for _ in range(n_layers)
51
+ ]
52
+ )
53
+ self.norm_layers_1 = torch.nn.ModuleList(
54
+ [LayerNorm(hidden_channels) for _ in range(n_layers)]
55
+ )
56
+ self.ffn_layers = torch.nn.ModuleList(
57
+ [
58
+ FFN(
59
+ hidden_channels,
60
+ hidden_channels,
61
+ filter_channels,
62
+ kernel_size,
63
+ p_dropout=p_dropout,
64
+ )
65
+ for _ in range(n_layers)
66
+ ]
67
+ )
68
+ self.norm_layers_2 = torch.nn.ModuleList(
69
+ [LayerNorm(hidden_channels) for _ in range(n_layers)]
70
+ )
71
+
72
+ def forward(self, x, x_mask):
73
+ attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
74
+ x = x * x_mask
75
+
76
+ for i in range(self.n_layers):
77
+ y = self.attn_layers[i](x, x, attn_mask)
78
+ y = self.drop(y)
79
+ x = self.norm_layers_1[i](x + y)
80
+
81
+ y = self.ffn_layers[i](x, x_mask)
82
+ y = self.drop(y)
83
+ x = self.norm_layers_2[i](x + y)
84
+
85
+ return x * x_mask
86
+
87
+
88
+ class TextEncoder(torch.nn.Module):
89
+ """
90
+ Text Encoder with configurable embedding dimension.
91
+
92
+ Args:
93
+ out_channels (int): Output channels of the encoder.
94
+ hidden_channels (int): Hidden channels of the encoder.
95
+ filter_channels (int): Filter channels of the encoder.
96
+ n_heads (int): Number of attention heads.
97
+ n_layers (int): Number of encoder layers.
98
+ kernel_size (int): Kernel size of the convolutional layers.
99
+ p_dropout (float): Dropout probability.
100
+ embedding_dim (int): Embedding dimension for phone embeddings (v1 = 256, v2 = 768).
101
+ f0 (bool, optional): Whether to use F0 embedding. Defaults to True.
102
+ """
103
+
104
+ def __init__(
105
+ self,
106
+ out_channels: int,
107
+ hidden_channels: int,
108
+ filter_channels: int,
109
+ n_heads: int,
110
+ n_layers: int,
111
+ kernel_size: int,
112
+ p_dropout: float,
113
+ embedding_dim: int,
114
+ f0: bool = True,
115
+ ):
116
+ super().__init__()
117
+ self.hidden_channels = hidden_channels
118
+ self.out_channels = out_channels
119
+ self.emb_phone = torch.nn.Linear(embedding_dim, hidden_channels)
120
+ self.lrelu = torch.nn.LeakyReLU(0.1, inplace=True)
121
+ self.emb_pitch = torch.nn.Embedding(256, hidden_channels) if f0 else None
122
+
123
+ self.encoder = Encoder(
124
+ hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
125
+ )
126
+ self.proj = torch.nn.Conv1d(hidden_channels, out_channels * 2, 1)
127
+
128
+ def forward(
129
+ self, phone: torch.Tensor, pitch: Optional[torch.Tensor], lengths: torch.Tensor
130
+ ):
131
+ x = self.emb_phone(phone)
132
+ if pitch is not None and self.emb_pitch:
133
+ x += self.emb_pitch(pitch)
134
+
135
+ x *= math.sqrt(self.hidden_channels)
136
+ x = self.lrelu(x)
137
+ x = x.transpose(1, -1) # [B, H, T]
138
+
139
+ x_mask = sequence_mask(lengths, x.size(2)).unsqueeze(1).to(x.dtype)
140
+ x = self.encoder(x, x_mask)
141
+ stats = self.proj(x) * x_mask
142
+
143
+ m, logs = torch.split(stats, self.out_channels, dim=1)
144
+ return m, logs, x_mask
145
+
146
+
147
+ class PosteriorEncoder(torch.nn.Module):
148
+ """
149
+ Posterior Encoder for inferring latent representation.
150
+
151
+ Args:
152
+ in_channels (int): Number of channels in the input.
153
+ out_channels (int): Number of channels in the output.
154
+ hidden_channels (int): Number of hidden channels in the encoder.
155
+ kernel_size (int): Kernel size of the convolutional layers.
156
+ dilation_rate (int): Dilation rate of the convolutional layers.
157
+ n_layers (int): Number of layers in the encoder.
158
+ gin_channels (int, optional): Number of channels for the global conditioning input. Defaults to 0.
159
+ """
160
+
161
+ def __init__(
162
+ self,
163
+ in_channels: int,
164
+ out_channels: int,
165
+ hidden_channels: int,
166
+ kernel_size: int,
167
+ dilation_rate: int,
168
+ n_layers: int,
169
+ gin_channels: int = 0,
170
+ ):
171
+ super().__init__()
172
+ self.out_channels = out_channels
173
+ self.pre = torch.nn.Conv1d(in_channels, hidden_channels, 1)
174
+ self.enc = WaveNet(
175
+ hidden_channels,
176
+ kernel_size,
177
+ dilation_rate,
178
+ n_layers,
179
+ gin_channels=gin_channels,
180
+ )
181
+ self.proj = torch.nn.Conv1d(hidden_channels, out_channels * 2, 1)
182
+
183
+ def forward(
184
+ self, x: torch.Tensor, x_lengths: torch.Tensor, g: Optional[torch.Tensor] = None
185
+ ):
186
+ x_mask = sequence_mask(x_lengths, x.size(2)).unsqueeze(1).to(x.dtype)
187
+
188
+ x = self.pre(x) * x_mask
189
+ x = self.enc(x, x_mask, g=g)
190
+
191
+ stats = self.proj(x) * x_mask
192
+ m, logs = torch.split(stats, self.out_channels, dim=1)
193
+
194
+ z = m + torch.randn_like(m) * torch.exp(logs)
195
+ z *= x_mask
196
+
197
+ return z, m, logs, x_mask
198
+
199
+ def remove_weight_norm(self):
200
+ self.enc.remove_weight_norm()
201
+
202
+ def __prepare_scriptable__(self):
203
+ for hook in self.enc._forward_pre_hooks.values():
204
+ if (
205
+ hook.__module__ == "torch.nn.utils.parametrizations.weight_norm"
206
+ and hook.__class__.__name__ == "WeightNorm"
207
+ ):
208
+ torch.nn.utils.remove_weight_norm(self.enc)
209
+ return self
rvc/lib/algorithm/generators/__init__.py ADDED
File without changes
rvc/lib/algorithm/generators/hifigan.py ADDED
@@ -0,0 +1,228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+ from torch.nn.utils import remove_weight_norm
4
+ from torch.nn.utils.parametrizations import weight_norm
5
+ from typing import Optional
6
+
7
+ from rvc.lib.algorithm.residuals import LRELU_SLOPE, ResBlock
8
+ from rvc.lib.algorithm.commons import init_weights
9
+
10
+
11
+ class HiFiGANGenerator(torch.nn.Module):
12
+ """
13
+ HiFi-GAN Generator module for audio synthesis.
14
+
15
+ This module implements the generator part of the HiFi-GAN architecture,
16
+ which uses transposed convolutions for upsampling and residual blocks for
17
+ refining the audio output. It can also incorporate global conditioning.
18
+
19
+ Args:
20
+ initial_channel (int): Number of input channels to the initial convolutional layer.
21
+ resblock_kernel_sizes (list): List of kernel sizes for the residual blocks.
22
+ resblock_dilation_sizes (list): List of lists of dilation rates for the residual blocks, corresponding to each kernel size.
23
+ upsample_rates (list): List of upsampling factors for each upsampling layer.
24
+ upsample_initial_channel (int): Number of output channels from the initial convolutional layer, which is also the input to the first upsampling layer.
25
+ upsample_kernel_sizes (list): List of kernel sizes for the transposed convolutional layers used for upsampling.
26
+ gin_channels (int, optional): Number of input channels for the global conditioning. If 0, no global conditioning is used. Defaults to 0.
27
+ """
28
+
29
+ def __init__(
30
+ self,
31
+ initial_channel: int,
32
+ resblock_kernel_sizes: list,
33
+ resblock_dilation_sizes: list,
34
+ upsample_rates: list,
35
+ upsample_initial_channel: int,
36
+ upsample_kernel_sizes: list,
37
+ gin_channels: int = 0,
38
+ ):
39
+ super(HiFiGANGenerator, self).__init__()
40
+ self.num_kernels = len(resblock_kernel_sizes)
41
+ self.num_upsamples = len(upsample_rates)
42
+ self.conv_pre = torch.nn.Conv1d(
43
+ initial_channel, upsample_initial_channel, 7, 1, padding=3
44
+ )
45
+
46
+ self.ups = torch.nn.ModuleList()
47
+ self.resblocks = torch.nn.ModuleList()
48
+
49
+ for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
50
+ self.ups.append(
51
+ weight_norm(
52
+ torch.nn.ConvTranspose1d(
53
+ upsample_initial_channel // (2**i),
54
+ upsample_initial_channel // (2 ** (i + 1)),
55
+ k,
56
+ u,
57
+ padding=(k - u) // 2,
58
+ )
59
+ )
60
+ )
61
+ ch = upsample_initial_channel // (2 ** (i + 1))
62
+ for j, (k, d) in enumerate(
63
+ zip(resblock_kernel_sizes, resblock_dilation_sizes)
64
+ ):
65
+ self.resblocks.append(ResBlock(ch, k, d))
66
+
67
+ self.conv_post = torch.nn.Conv1d(ch, 1, 7, 1, padding=3, bias=False)
68
+ self.ups.apply(init_weights)
69
+
70
+ if gin_channels != 0:
71
+ self.cond = torch.nn.Conv1d(gin_channels, upsample_initial_channel, 1)
72
+
73
+ def forward(self, x: torch.Tensor, g: Optional[torch.Tensor] = None):
74
+ # new tensor
75
+ x = self.conv_pre(x)
76
+
77
+ if g is not None:
78
+ x = x + self.cond(g)
79
+
80
+ for i in range(self.num_upsamples):
81
+ x = torch.nn.functional.leaky_relu(x, LRELU_SLOPE)
82
+ x = self.ups[i](x)
83
+ xs = None
84
+ for j in range(self.num_kernels):
85
+ if xs is None:
86
+ xs = self.resblocks[i * self.num_kernels + j](x)
87
+ else:
88
+ xs += self.resblocks[i * self.num_kernels + j](x)
89
+ x = xs / self.num_kernels
90
+ # in-place call
91
+ x = torch.nn.functional.leaky_relu(x)
92
+ x = self.conv_post(x)
93
+ # in-place call
94
+ x = torch.tanh(x)
95
+
96
+ return x
97
+
98
+ def __prepare_scriptable__(self):
99
+ for l in self.ups_and_resblocks:
100
+ for hook in l._forward_pre_hooks.values():
101
+ if (
102
+ hook.__module__ == "torch.nn.utils.parametrizations.weight_norm"
103
+ and hook.__class__.__name__ == "WeightNorm"
104
+ ):
105
+ torch.nn.utils.remove_weight_norm(l)
106
+ return self
107
+
108
+ def remove_weight_norm(self):
109
+ for l in self.ups:
110
+ remove_weight_norm(l)
111
+ for l in self.resblocks:
112
+ l.remove_weight_norm()
113
+
114
+
115
+ class SineGenerator(torch.nn.Module):
116
+ """
117
+ Sine wave generator with optional harmonic overtones and noise.
118
+
119
+ This module generates sine waves for a fundamental frequency and its harmonics.
120
+ It can also add Gaussian noise and apply a voiced/unvoiced mask.
121
+
122
+ Args:
123
+ sampling_rate (int): The sampling rate of the audio in Hz.
124
+ num_harmonics (int, optional): The number of harmonic overtones to generate. Defaults to 0.
125
+ sine_amplitude (float, optional): The amplitude of the sine wave components. Defaults to 0.1.
126
+ noise_stddev (float, optional): The standard deviation of the additive Gaussian noise. Defaults to 0.003.
127
+ voiced_threshold (float, optional): The threshold for the fundamental frequency (F0) to determine if a frame is voiced. Defaults to 0.0.
128
+ """
129
+
130
+ def __init__(
131
+ self,
132
+ sampling_rate: int,
133
+ num_harmonics: int = 0,
134
+ sine_amplitude: float = 0.1,
135
+ noise_stddev: float = 0.003,
136
+ voiced_threshold: float = 0.0,
137
+ ):
138
+ super(SineGenerator, self).__init__()
139
+ self.sampling_rate = sampling_rate
140
+ self.num_harmonics = num_harmonics
141
+ self.sine_amplitude = sine_amplitude
142
+ self.noise_stddev = noise_stddev
143
+ self.voiced_threshold = voiced_threshold
144
+ self.waveform_dim = self.num_harmonics + 1 # fundamental + harmonics
145
+
146
+ def _compute_voiced_unvoiced(self, f0: torch.Tensor):
147
+ """
148
+ Generates a binary mask indicating voiced/unvoiced frames based on the fundamental frequency.
149
+
150
+ Args:
151
+ f0 (torch.Tensor): Fundamental frequency tensor of shape (batch_size, length).
152
+ """
153
+ uv_mask = (f0 > self.voiced_threshold).float()
154
+ return uv_mask
155
+
156
+ def _generate_sine_wave(self, f0: torch.Tensor, upsampling_factor: int):
157
+ """
158
+ Generates sine waves for the fundamental frequency and its harmonics.
159
+
160
+ Args:
161
+ f0 (torch.Tensor): Fundamental frequency tensor of shape (batch_size, length, 1).
162
+ upsampling_factor (int): The factor by which to upsample the sine wave.
163
+ """
164
+ batch_size, length, _ = f0.shape
165
+
166
+ # Create an upsampling grid
167
+ upsampling_grid = torch.arange(
168
+ 1, upsampling_factor + 1, dtype=f0.dtype, device=f0.device
169
+ )
170
+
171
+ # Calculate phase increments
172
+ phase_increments = (f0 / self.sampling_rate) * upsampling_grid
173
+ phase_remainder = torch.fmod(phase_increments[:, :-1, -1:] + 0.5, 1.0) - 0.5
174
+ cumulative_phase = phase_remainder.cumsum(dim=1).fmod(1.0).to(f0.dtype)
175
+ phase_increments += torch.nn.functional.pad(
176
+ cumulative_phase, (0, 0, 1, 0), mode="constant"
177
+ )
178
+
179
+ # Reshape to match the sine wave shape
180
+ phase_increments = phase_increments.reshape(batch_size, -1, 1)
181
+
182
+ # Scale for harmonics
183
+ harmonic_scale = torch.arange(
184
+ 1, self.waveform_dim + 1, dtype=f0.dtype, device=f0.device
185
+ ).reshape(1, 1, -1)
186
+ phase_increments *= harmonic_scale
187
+
188
+ # Add random phase offset (except for the fundamental)
189
+ random_phase = torch.rand(1, 1, self.waveform_dim, device=f0.device)
190
+ random_phase[..., 0] = 0 # Fundamental frequency has no random offset
191
+ phase_increments += random_phase
192
+
193
+ # Generate sine waves
194
+ sine_waves = torch.sin(2 * np.pi * phase_increments)
195
+ return sine_waves
196
+
197
+ def forward(self, f0: torch.Tensor, upsampling_factor: int):
198
+ with torch.no_grad():
199
+ # Expand `f0` to include waveform dimensions
200
+ f0 = f0.unsqueeze(-1)
201
+
202
+ # Generate sine waves
203
+ sine_waves = (
204
+ self._generate_sine_wave(f0, upsampling_factor) * self.sine_amplitude
205
+ )
206
+
207
+ # Compute voiced/unvoiced mask
208
+ voiced_mask = self._compute_voiced_unvoiced(f0)
209
+
210
+ # Upsample voiced/unvoiced mask
211
+ voiced_mask = torch.nn.functional.interpolate(
212
+ voiced_mask.transpose(2, 1),
213
+ scale_factor=float(upsampling_factor),
214
+ mode="nearest",
215
+ ).transpose(2, 1)
216
+
217
+ # Compute noise amplitude
218
+ noise_amplitude = voiced_mask * self.noise_stddev + (1 - voiced_mask) * (
219
+ self.sine_amplitude / 3
220
+ )
221
+
222
+ # Add Gaussian noise
223
+ noise = noise_amplitude * torch.randn_like(sine_waves)
224
+
225
+ # Combine sine waves and noise
226
+ sine_waveforms = sine_waves * voiced_mask + noise
227
+
228
+ return sine_waveforms, voiced_mask, noise
rvc/lib/algorithm/generators/hifigan_mrf.py ADDED
@@ -0,0 +1,374 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from typing import Optional
3
+
4
+ import numpy as np
5
+ import torch
6
+ from torch.nn.utils import remove_weight_norm
7
+ from torch.nn.utils.parametrizations import weight_norm
8
+ from torch.utils.checkpoint import checkpoint
9
+
10
+ LRELU_SLOPE = 0.1
11
+
12
+
13
+ class MRFLayer(torch.nn.Module):
14
+ """
15
+ A single layer of the Multi-Receptive Field (MRF) block.
16
+
17
+ This layer consists of two 1D convolutional layers with weight normalization
18
+ and Leaky ReLU activation in between. The first convolution has a dilation,
19
+ while the second has a dilation of 1. A skip connection is added from the input
20
+ to the output.
21
+
22
+ Args:
23
+ channels (int): The number of input and output channels.
24
+ kernel_size (int): The kernel size of the convolutional layers.
25
+ dilation (int): The dilation rate for the first convolutional layer.
26
+ """
27
+
28
+ def __init__(self, channels, kernel_size, dilation):
29
+ super().__init__()
30
+ self.conv1 = weight_norm(
31
+ torch.nn.Conv1d(
32
+ channels,
33
+ channels,
34
+ kernel_size,
35
+ padding=(kernel_size * dilation - dilation) // 2,
36
+ dilation=dilation,
37
+ )
38
+ )
39
+ self.conv2 = weight_norm(
40
+ torch.nn.Conv1d(
41
+ channels, channels, kernel_size, padding=kernel_size // 2, dilation=1
42
+ )
43
+ )
44
+
45
+ def forward(self, x: torch.Tensor):
46
+ y = torch.nn.functional.leaky_relu(x, LRELU_SLOPE)
47
+ y = self.conv1(y)
48
+ y = torch.nn.functional.leaky_relu(y, LRELU_SLOPE)
49
+ y = self.conv2(y)
50
+ return x + y
51
+
52
+ def remove_weight_norm(self):
53
+ remove_weight_norm(self.conv1)
54
+ remove_weight_norm(self.conv2)
55
+
56
+
57
+ class MRFBlock(torch.nn.Module):
58
+ """
59
+ A Multi-Receptive Field (MRF) block.
60
+
61
+ This block consists of multiple MRFLayers with different dilation rates.
62
+ It applies each layer sequentially to the input.
63
+
64
+ Args:
65
+ channels (int): The number of input and output channels for the MRFLayers.
66
+ kernel_size (int): The kernel size for the convolutional layers in the MRFLayers.
67
+ dilations (list[int]): A list of dilation rates for the MRFLayers.
68
+ """
69
+
70
+ def __init__(self, channels, kernel_size, dilations):
71
+ super().__init__()
72
+ self.layers = torch.nn.ModuleList()
73
+ for dilation in dilations:
74
+ self.layers.append(MRFLayer(channels, kernel_size, dilation))
75
+
76
+ def forward(self, x: torch.Tensor):
77
+ for layer in self.layers:
78
+ x = layer(x)
79
+ return x
80
+
81
+ def remove_weight_norm(self):
82
+ for layer in self.layers:
83
+ layer.remove_weight_norm()
84
+
85
+
86
+ class SineGenerator(torch.nn.Module):
87
+ """
88
+ Definition of sine generator
89
+
90
+ Generates sine waveforms with optional harmonics and additive noise.
91
+ Can be used to create harmonic noise source for neural vocoders.
92
+
93
+ Args:
94
+ samp_rate (int): Sampling rate in Hz.
95
+ harmonic_num (int): Number of harmonic overtones (default 0).
96
+ sine_amp (float): Amplitude of sine-waveform (default 0.1).
97
+ noise_std (float): Standard deviation of Gaussian noise (default 0.003).
98
+ voiced_threshold (float): F0 threshold for voiced/unvoiced classification (default 0).
99
+ """
100
+
101
+ def __init__(
102
+ self,
103
+ samp_rate: int,
104
+ harmonic_num: int = 0,
105
+ sine_amp: float = 0.1,
106
+ noise_std: float = 0.003,
107
+ voiced_threshold: float = 0,
108
+ ):
109
+ super(SineGenerator, self).__init__()
110
+ self.sine_amp = sine_amp
111
+ self.noise_std = noise_std
112
+ self.harmonic_num = harmonic_num
113
+ self.dim = self.harmonic_num + 1
114
+ self.sampling_rate = samp_rate
115
+ self.voiced_threshold = voiced_threshold
116
+
117
+ def _f02uv(self, f0: torch.Tensor):
118
+ """
119
+ Generates voiced/unvoiced (UV) signal based on the fundamental frequency (F0).
120
+
121
+ Args:
122
+ f0 (torch.Tensor): Fundamental frequency tensor of shape (batch_size, length, 1).
123
+ """
124
+ # generate uv signal
125
+ uv = torch.ones_like(f0)
126
+ uv = uv * (f0 > self.voiced_threshold)
127
+ return uv
128
+
129
+ def _f02sine(self, f0_values: torch.Tensor):
130
+ """
131
+ Generates sine waveforms based on the fundamental frequency (F0) and its harmonics.
132
+
133
+ Args:
134
+ f0_values (torch.Tensor): Tensor of fundamental frequency and its harmonics,
135
+ shape (batch_size, length, dim), where dim indicates
136
+ the fundamental tone and overtones.
137
+ """
138
+ # convert to F0 in rad. The integer part n can be ignored
139
+ # because 2 * np.pi * n doesn't affect phase
140
+ rad_values = (f0_values / self.sampling_rate) % 1
141
+
142
+ # initial phase noise (no noise for fundamental component)
143
+ rand_ini = torch.rand(
144
+ f0_values.shape[0], f0_values.shape[2], device=f0_values.device
145
+ )
146
+ rand_ini[:, 0] = 0
147
+ rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
148
+
149
+ # instantanouse phase sine[t] = sin(2*pi \sum_i=1 ^{t} rad)
150
+ tmp_over_one = torch.cumsum(rad_values, 1) % 1
151
+ tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0
152
+ cumsum_shift = torch.zeros_like(rad_values)
153
+ cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
154
+
155
+ sines = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi)
156
+
157
+ return sines
158
+
159
+ def forward(self, f0: torch.Tensor):
160
+ with torch.no_grad():
161
+ f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device)
162
+ # fundamental component
163
+ f0_buf[:, :, 0] = f0[:, :, 0]
164
+ for idx in np.arange(self.harmonic_num):
165
+ f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (idx + 2)
166
+
167
+ sine_waves = self._f02sine(f0_buf) * self.sine_amp
168
+
169
+ uv = self._f02uv(f0)
170
+
171
+ noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
172
+ noise = noise_amp * torch.randn_like(sine_waves)
173
+
174
+ sine_waves = sine_waves * uv + noise
175
+ return sine_waves, uv, noise
176
+
177
+
178
+ class SourceModuleHnNSF(torch.nn.Module):
179
+ """
180
+ Generates harmonic and noise source features.
181
+
182
+ This module uses the SineGenerator to create harmonic signals based on the
183
+ fundamental frequency (F0) and merges them into a single excitation signal.
184
+
185
+ Args:
186
+ sample_rate (int): Sampling rate in Hz.
187
+ harmonic_num (int, optional): Number of harmonics above F0. Defaults to 0.
188
+ sine_amp (float, optional): Amplitude of sine source signal. Defaults to 0.1.
189
+ add_noise_std (float, optional): Standard deviation of additive Gaussian noise. Defaults to 0.003.
190
+ voiced_threshod (float, optional): Threshold to set voiced/unvoiced given F0. Defaults to 0.
191
+ """
192
+
193
+ def __init__(
194
+ self,
195
+ sampling_rate: int,
196
+ harmonic_num: int = 0,
197
+ sine_amp: float = 0.1,
198
+ add_noise_std: float = 0.003,
199
+ voiced_threshold: float = 0,
200
+ ):
201
+ super(SourceModuleHnNSF, self).__init__()
202
+
203
+ self.sine_amp = sine_amp
204
+ self.noise_std = add_noise_std
205
+
206
+ # to produce sine waveforms
207
+ self.l_sin_gen = SineGenerator(
208
+ sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshold
209
+ )
210
+
211
+ # to merge source harmonics into a single excitation
212
+ self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
213
+ self.l_tanh = torch.nn.Tanh()
214
+
215
+ def forward(self, x: torch.Tensor):
216
+ sine_wavs, uv, _ = self.l_sin_gen(x)
217
+ sine_wavs = sine_wavs.to(dtype=self.l_linear.weight.dtype)
218
+ sine_merge = self.l_tanh(self.l_linear(sine_wavs))
219
+
220
+ return sine_merge, None, None
221
+
222
+
223
+ class HiFiGANMRFGenerator(torch.nn.Module):
224
+ """
225
+ HiFi-GAN generator with Multi-Receptive Field (MRF) blocks.
226
+
227
+ This generator takes an input feature sequence and fundamental frequency (F0)
228
+ as input and generates an audio waveform. It utilizes transposed convolutions
229
+ for upsampling and MRF blocks for feature refinement. It can also condition
230
+ on global conditioning features.
231
+
232
+ Args:
233
+ in_channel (int): Number of input channels.
234
+ upsample_initial_channel (int): Number of channels after the initial convolution.
235
+ upsample_rates (list[int]): List of upsampling rates for the transposed convolutions.
236
+ upsample_kernel_sizes (list[int]): List of kernel sizes for the transposed convolutions.
237
+ resblock_kernel_sizes (list[int]): List of kernel sizes for the convolutional layers in the MRF blocks.
238
+ resblock_dilations (list[list[int]]): List of lists of dilation rates for the MRF blocks.
239
+ gin_channels (int): Number of global conditioning input channels (0 if no global conditioning).
240
+ sample_rate (int): Sampling rate of the audio.
241
+ harmonic_num (int): Number of harmonics to generate.
242
+ checkpointing (bool): Whether to use checkpointing to save memory during training (default: False).
243
+ """
244
+
245
+ def __init__(
246
+ self,
247
+ in_channel: int,
248
+ upsample_initial_channel: int,
249
+ upsample_rates: list[int],
250
+ upsample_kernel_sizes: list[int],
251
+ resblock_kernel_sizes: list[int],
252
+ resblock_dilations: list[list[int]],
253
+ gin_channels: int,
254
+ sample_rate: int,
255
+ harmonic_num: int,
256
+ checkpointing: bool = False,
257
+ ):
258
+ super().__init__()
259
+ self.num_kernels = len(resblock_kernel_sizes)
260
+ self.checkpointing = checkpointing
261
+
262
+ self.f0_upsample = torch.nn.Upsample(scale_factor=np.prod(upsample_rates))
263
+ self.m_source = SourceModuleHnNSF(sample_rate, harmonic_num)
264
+
265
+ self.conv_pre = weight_norm(
266
+ torch.nn.Conv1d(
267
+ in_channel, upsample_initial_channel, kernel_size=7, stride=1, padding=3
268
+ )
269
+ )
270
+ self.upsamples = torch.nn.ModuleList()
271
+ self.noise_convs = torch.nn.ModuleList()
272
+
273
+ stride_f0s = [
274
+ math.prod(upsample_rates[i + 1 :]) if i + 1 < len(upsample_rates) else 1
275
+ for i in range(len(upsample_rates))
276
+ ]
277
+
278
+ for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
279
+ # handling odd upsampling rates
280
+ if u % 2 == 0:
281
+ # old method
282
+ padding = (k - u) // 2
283
+ else:
284
+ padding = u // 2 + u % 2
285
+
286
+ self.upsamples.append(
287
+ weight_norm(
288
+ torch.nn.ConvTranspose1d(
289
+ upsample_initial_channel // (2**i),
290
+ upsample_initial_channel // (2 ** (i + 1)),
291
+ kernel_size=k,
292
+ stride=u,
293
+ padding=padding,
294
+ output_padding=u % 2,
295
+ )
296
+ )
297
+ )
298
+ """ handling odd upsampling rates
299
+ # s k p
300
+ # 40 80 20
301
+ # 32 64 16
302
+ # 4 8 2
303
+ # 2 3 1
304
+ # 63 125 31
305
+ # 9 17 4
306
+ # 3 5 1
307
+ # 1 1 0
308
+ """
309
+ stride = stride_f0s[i]
310
+ kernel = 1 if stride == 1 else stride * 2 - stride % 2
311
+ padding = 0 if stride == 1 else (kernel - stride) // 2
312
+
313
+ self.noise_convs.append(
314
+ torch.nn.Conv1d(
315
+ 1,
316
+ upsample_initial_channel // (2 ** (i + 1)),
317
+ kernel_size=kernel,
318
+ stride=stride,
319
+ padding=padding,
320
+ )
321
+ )
322
+ self.mrfs = torch.nn.ModuleList()
323
+ for i in range(len(self.upsamples)):
324
+ channel = upsample_initial_channel // (2 ** (i + 1))
325
+ self.mrfs.append(
326
+ torch.nn.ModuleList(
327
+ [
328
+ MRFBlock(channel, kernel_size=k, dilations=d)
329
+ for k, d in zip(resblock_kernel_sizes, resblock_dilations)
330
+ ]
331
+ )
332
+ )
333
+ self.conv_post = weight_norm(
334
+ torch.nn.Conv1d(channel, 1, kernel_size=7, stride=1, padding=3)
335
+ )
336
+ if gin_channels != 0:
337
+ self.cond = torch.nn.Conv1d(gin_channels, upsample_initial_channel, 1)
338
+
339
+ def forward(
340
+ self, x: torch.Tensor, f0: torch.Tensor, g: Optional[torch.Tensor] = None
341
+ ):
342
+ f0 = self.f0_upsample(f0[:, None, :]).transpose(-1, -2)
343
+ har_source, _, _ = self.m_source(f0)
344
+ har_source = har_source.transpose(-1, -2)
345
+ x = self.conv_pre(x)
346
+
347
+ if g is not None:
348
+ x = x + self.cond(g)
349
+
350
+ for ups, mrf, noise_conv in zip(self.upsamples, self.mrfs, self.noise_convs):
351
+ x = torch.nn.functional.leaky_relu(x, LRELU_SLOPE)
352
+
353
+ if self.training and self.checkpointing:
354
+ x = checkpoint(ups, x, use_reentrant=False)
355
+ x = x + noise_conv(har_source)
356
+ xs = sum([checkpoint(layer, x, use_reentrant=False) for layer in mrf])
357
+ else:
358
+ x = ups(x)
359
+ x = x + noise_conv(har_source)
360
+ xs = sum([layer(x) for layer in mrf])
361
+ x = xs / self.num_kernels
362
+
363
+ x = torch.nn.functional.leaky_relu(x)
364
+ x = torch.tanh(self.conv_post(x))
365
+
366
+ return x
367
+
368
+ def remove_weight_norm(self):
369
+ remove_weight_norm(self.conv_pre)
370
+ for up in self.upsamples:
371
+ remove_weight_norm(up)
372
+ for mrf in self.mrfs:
373
+ mrf.remove_weight_norm()
374
+ remove_weight_norm(self.conv_post)
rvc/lib/algorithm/generators/hifigan_nsf.py ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from typing import Optional
3
+
4
+ import torch
5
+ from torch.nn.utils import remove_weight_norm
6
+ from torch.nn.utils.parametrizations import weight_norm
7
+ from torch.utils.checkpoint import checkpoint
8
+
9
+ from rvc.lib.algorithm.commons import init_weights
10
+ from rvc.lib.algorithm.generators.hifigan import SineGenerator
11
+ from rvc.lib.algorithm.residuals import LRELU_SLOPE, ResBlock
12
+
13
+
14
+ class SourceModuleHnNSF(torch.nn.Module):
15
+ """
16
+ Source Module for generating harmonic and noise components for audio synthesis.
17
+
18
+ This module generates a harmonic source signal using sine waves and adds
19
+ optional noise. It's often used in neural vocoders as a source of excitation.
20
+
21
+ Args:
22
+ sample_rate (int): Sampling rate of the audio in Hz.
23
+ harmonic_num (int, optional): Number of harmonic overtones to generate above the fundamental frequency (F0). Defaults to 0.
24
+ sine_amp (float, optional): Amplitude of the sine wave components. Defaults to 0.1.
25
+ add_noise_std (float, optional): Standard deviation of the additive white Gaussian noise. Defaults to 0.003.
26
+ voiced_threshod (float, optional): Threshold for the fundamental frequency (F0) to determine if a frame is voiced. If F0 is below this threshold, it's considered unvoiced. Defaults to 0.
27
+ """
28
+
29
+ def __init__(
30
+ self,
31
+ sample_rate: int,
32
+ harmonic_num: int = 0,
33
+ sine_amp: float = 0.1,
34
+ add_noise_std: float = 0.003,
35
+ voiced_threshod: float = 0,
36
+ ):
37
+ super(SourceModuleHnNSF, self).__init__()
38
+
39
+ self.sine_amp = sine_amp
40
+ self.noise_std = add_noise_std
41
+
42
+ self.l_sin_gen = SineGenerator(
43
+ sample_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod
44
+ )
45
+ self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
46
+ self.l_tanh = torch.nn.Tanh()
47
+
48
+ def forward(self, x: torch.Tensor, upsample_factor: int = 1):
49
+ sine_wavs, uv, _ = self.l_sin_gen(x, upsample_factor)
50
+ sine_wavs = sine_wavs.to(dtype=self.l_linear.weight.dtype)
51
+ sine_merge = self.l_tanh(self.l_linear(sine_wavs))
52
+ return sine_merge, None, None
53
+
54
+
55
+ class HiFiGANNSFGenerator(torch.nn.Module):
56
+ """
57
+ Generator module based on the Neural Source Filter (NSF) architecture.
58
+
59
+ This generator synthesizes audio by first generating a source excitation signal
60
+ (harmonic and noise) and then filtering it through a series of upsampling and
61
+ residual blocks. Global conditioning can be applied to influence the generation.
62
+
63
+ Args:
64
+ initial_channel (int): Number of input channels to the initial convolutional layer.
65
+ resblock_kernel_sizes (list): List of kernel sizes for the residual blocks.
66
+ resblock_dilation_sizes (list): List of lists of dilation rates for the residual blocks, corresponding to each kernel size.
67
+ upsample_rates (list): List of upsampling factors for each upsampling layer.
68
+ upsample_initial_channel (int): Number of output channels from the initial convolutional layer, which is also the input to the first upsampling layer.
69
+ upsample_kernel_sizes (list): List of kernel sizes for the transposed convolutional layers used for upsampling.
70
+ gin_channels (int): Number of input channels for the global conditioning. If 0, no global conditioning is used.
71
+ sr (int): Sampling rate of the audio.
72
+ checkpointing (bool, optional): Whether to use gradient checkpointing to save memory during training. Defaults to False.
73
+ """
74
+
75
+ def __init__(
76
+ self,
77
+ initial_channel: int,
78
+ resblock_kernel_sizes: list,
79
+ resblock_dilation_sizes: list,
80
+ upsample_rates: list,
81
+ upsample_initial_channel: int,
82
+ upsample_kernel_sizes: list,
83
+ gin_channels: int,
84
+ sr: int,
85
+ checkpointing: bool = False,
86
+ ):
87
+ super(HiFiGANNSFGenerator, self).__init__()
88
+
89
+ self.num_kernels = len(resblock_kernel_sizes)
90
+ self.num_upsamples = len(upsample_rates)
91
+ self.checkpointing = checkpointing
92
+ self.f0_upsamp = torch.nn.Upsample(scale_factor=math.prod(upsample_rates))
93
+ self.m_source = SourceModuleHnNSF(sample_rate=sr, harmonic_num=0)
94
+
95
+ self.conv_pre = torch.nn.Conv1d(
96
+ initial_channel, upsample_initial_channel, 7, 1, padding=3
97
+ )
98
+
99
+ self.ups = torch.nn.ModuleList()
100
+ self.noise_convs = torch.nn.ModuleList()
101
+
102
+ channels = [
103
+ upsample_initial_channel // (2 ** (i + 1))
104
+ for i in range(len(upsample_rates))
105
+ ]
106
+ stride_f0s = [
107
+ math.prod(upsample_rates[i + 1 :]) if i + 1 < len(upsample_rates) else 1
108
+ for i in range(len(upsample_rates))
109
+ ]
110
+
111
+ for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
112
+ # handling odd upsampling rates
113
+ if u % 2 == 0:
114
+ # old method
115
+ padding = (k - u) // 2
116
+ else:
117
+ padding = u // 2 + u % 2
118
+
119
+ self.ups.append(
120
+ weight_norm(
121
+ torch.nn.ConvTranspose1d(
122
+ upsample_initial_channel // (2**i),
123
+ channels[i],
124
+ k,
125
+ u,
126
+ padding=padding,
127
+ output_padding=u % 2,
128
+ )
129
+ )
130
+ )
131
+ """ handling odd upsampling rates
132
+ # s k p
133
+ # 40 80 20
134
+ # 32 64 16
135
+ # 4 8 2
136
+ # 2 3 1
137
+ # 63 125 31
138
+ # 9 17 4
139
+ # 3 5 1
140
+ # 1 1 0
141
+ """
142
+ stride = stride_f0s[i]
143
+ kernel = 1 if stride == 1 else stride * 2 - stride % 2
144
+ padding = 0 if stride == 1 else (kernel - stride) // 2
145
+
146
+ self.noise_convs.append(
147
+ torch.nn.Conv1d(
148
+ 1,
149
+ channels[i],
150
+ kernel_size=kernel,
151
+ stride=stride,
152
+ padding=padding,
153
+ )
154
+ )
155
+
156
+ self.resblocks = torch.nn.ModuleList(
157
+ [
158
+ ResBlock(channels[i], k, d)
159
+ for i in range(len(self.ups))
160
+ for k, d in zip(resblock_kernel_sizes, resblock_dilation_sizes)
161
+ ]
162
+ )
163
+
164
+ self.conv_post = torch.nn.Conv1d(channels[-1], 1, 7, 1, padding=3, bias=False)
165
+ self.ups.apply(init_weights)
166
+
167
+ if gin_channels != 0:
168
+ self.cond = torch.nn.Conv1d(gin_channels, upsample_initial_channel, 1)
169
+
170
+ self.upp = math.prod(upsample_rates)
171
+ self.lrelu_slope = LRELU_SLOPE
172
+
173
+ def forward(
174
+ self, x: torch.Tensor, f0: torch.Tensor, g: Optional[torch.Tensor] = None
175
+ ):
176
+ har_source, _, _ = self.m_source(f0, self.upp)
177
+ har_source = har_source.transpose(1, 2)
178
+ # new tensor
179
+ x = self.conv_pre(x)
180
+
181
+ if g is not None:
182
+ x = x + self.cond(g)
183
+
184
+ for i, (ups, noise_convs) in enumerate(zip(self.ups, self.noise_convs)):
185
+ x = torch.nn.functional.leaky_relu(x, self.lrelu_slope)
186
+ # Apply upsampling layer
187
+ if self.training and self.checkpointing:
188
+ x = checkpoint(ups, x, use_reentrant=False)
189
+ x = x + noise_convs(har_source)
190
+ xs = sum(
191
+ [
192
+ checkpoint(resblock, x, use_reentrant=False)
193
+ for j, resblock in enumerate(self.resblocks)
194
+ if j in range(i * self.num_kernels, (i + 1) * self.num_kernels)
195
+ ]
196
+ )
197
+ else:
198
+ x = ups(x)
199
+ x = x + noise_convs(har_source)
200
+ xs = sum(
201
+ [
202
+ resblock(x)
203
+ for j, resblock in enumerate(self.resblocks)
204
+ if j in range(i * self.num_kernels, (i + 1) * self.num_kernels)
205
+ ]
206
+ )
207
+ x = xs / self.num_kernels
208
+
209
+ x = torch.nn.functional.leaky_relu(x)
210
+ x = torch.tanh(self.conv_post(x))
211
+
212
+ return x
213
+
214
+ def remove_weight_norm(self):
215
+ for l in self.ups:
216
+ remove_weight_norm(l)
217
+ for l in self.resblocks:
218
+ l.remove_weight_norm()
219
+
220
+ def __prepare_scriptable__(self):
221
+ for l in self.ups:
222
+ for hook in l._forward_pre_hooks.values():
223
+ if (
224
+ hook.__module__ == "torch.nn.utils.parametrizations.weight_norm"
225
+ and hook.__class__.__name__ == "WeightNorm"
226
+ ):
227
+ remove_weight_norm(l)
228
+ for l in self.resblocks:
229
+ for hook in l._forward_pre_hooks.values():
230
+ if (
231
+ hook.__module__ == "torch.nn.utils.parametrizations.weight_norm"
232
+ and hook.__class__.__name__ == "WeightNorm"
233
+ ):
234
+ remove_weight_norm(l)
235
+ return self
rvc/lib/algorithm/generators/refinegan.py ADDED
@@ -0,0 +1,451 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+ import torchaudio
4
+ from torch import nn
5
+ from torch.nn import functional as F
6
+ from torch.nn.utils.parametrizations import weight_norm
7
+ from torch.nn.utils import remove_weight_norm
8
+ from torch.utils.checkpoint import checkpoint
9
+
10
+ from rvc.lib.algorithm.commons import init_weights, get_padding
11
+
12
+
13
+ class ResBlock(nn.Module):
14
+ """
15
+ Residual block with multiple dilated convolutions.
16
+
17
+ This block applies a sequence of dilated convolutional layers with Leaky ReLU activation.
18
+ It's designed to capture information at different scales due to the varying dilation rates.
19
+
20
+ Args:
21
+ in_channels (int): Number of input channels.
22
+ out_channels (int): Number of output channels.
23
+ kernel_size (int, optional): Kernel size for the convolutional layers. Defaults to 7.
24
+ dilation (tuple[int], optional): Tuple of dilation rates for the convolutional layers. Defaults to (1, 3, 5).
25
+ leaky_relu_slope (float, optional): Slope for the Leaky ReLU activation. Defaults to 0.2.
26
+ """
27
+
28
+ def __init__(
29
+ self,
30
+ channels: int,
31
+ kernel_size: int = 7,
32
+ dilation: tuple[int] = (1, 3, 5),
33
+ leaky_relu_slope: float = 0.2,
34
+ ):
35
+ super().__init__()
36
+
37
+ self.leaky_relu_slope = leaky_relu_slope
38
+
39
+ self.convs1 = nn.ModuleList(
40
+ [
41
+ weight_norm(
42
+ nn.Conv1d(
43
+ channels,
44
+ channels,
45
+ kernel_size,
46
+ stride=1,
47
+ dilation=d,
48
+ padding=get_padding(kernel_size, d),
49
+ )
50
+ )
51
+ for d in dilation
52
+ ]
53
+ )
54
+ self.convs1.apply(init_weights)
55
+
56
+ self.convs2 = nn.ModuleList(
57
+ [
58
+ weight_norm(
59
+ nn.Conv1d(
60
+ channels,
61
+ channels,
62
+ kernel_size,
63
+ stride=1,
64
+ dilation=1,
65
+ padding=get_padding(kernel_size, 1),
66
+ )
67
+ )
68
+ for d in dilation
69
+ ]
70
+ )
71
+ self.convs2.apply(init_weights)
72
+
73
+ def forward(self, x: torch.Tensor):
74
+ for c1, c2 in zip(self.convs1, self.convs2):
75
+ xt = F.leaky_relu(x, self.leaky_relu_slope)
76
+ xt = c1(xt)
77
+ xt = F.leaky_relu(xt, self.leaky_relu_slope)
78
+ xt = c2(xt)
79
+ x = xt + x
80
+
81
+ return x
82
+
83
+ def remove_weight_norm(self):
84
+ for c1, c2 in zip(self.convs1, self.convs2):
85
+ remove_weight_norm(c1)
86
+ remove_weight_norm(c2)
87
+
88
+
89
+ class AdaIN(nn.Module):
90
+ """
91
+ Adaptive Instance Normalization layer.
92
+
93
+ This layer applies a scaling factor to the input based on a learnable weight.
94
+
95
+ Args:
96
+ channels (int): Number of input channels.
97
+ leaky_relu_slope (float, optional): Slope for the Leaky ReLU activation applied after scaling. Defaults to 0.2.
98
+ """
99
+
100
+ def __init__(
101
+ self,
102
+ *,
103
+ channels: int,
104
+ leaky_relu_slope: float = 0.2,
105
+ ):
106
+ super().__init__()
107
+
108
+ self.weight = nn.Parameter(torch.ones(channels) * 1e-4)
109
+ # safe to use in-place as it is used on a new x+gaussian tensor
110
+ self.activation = nn.LeakyReLU(leaky_relu_slope)
111
+
112
+ def forward(self, x: torch.Tensor):
113
+ gaussian = torch.randn_like(x) * self.weight[None, :, None]
114
+
115
+ return self.activation(x + gaussian)
116
+
117
+
118
+ class ParallelResBlock(nn.Module):
119
+ """
120
+ Parallel residual block that applies multiple residual blocks with different kernel sizes in parallel.
121
+
122
+ Args:
123
+ in_channels (int): Number of input channels.
124
+ out_channels (int): Number of output channels.
125
+ kernel_sizes (tuple[int], optional): Tuple of kernel sizes for the parallel residual blocks. Defaults to (3, 7, 11).
126
+ dilation (tuple[int], optional): Tuple of dilation rates for the convolutional layers within the residual blocks. Defaults to (1, 3, 5).
127
+ leaky_relu_slope (float, optional): Slope for the Leaky ReLU activation. Defaults to 0.2.
128
+ """
129
+
130
+ def __init__(
131
+ self,
132
+ *,
133
+ in_channels: int,
134
+ out_channels: int,
135
+ kernel_sizes: tuple[int] = (3, 7, 11),
136
+ dilation: tuple[int] = (1, 3, 5),
137
+ leaky_relu_slope: float = 0.2,
138
+ ):
139
+ super().__init__()
140
+
141
+ self.in_channels = in_channels
142
+ self.out_channels = out_channels
143
+
144
+ self.input_conv = nn.Conv1d(
145
+ in_channels=in_channels,
146
+ out_channels=out_channels,
147
+ kernel_size=7,
148
+ stride=1,
149
+ padding=3,
150
+ )
151
+
152
+ self.input_conv.apply(init_weights)
153
+
154
+ self.blocks = nn.ModuleList(
155
+ [
156
+ nn.Sequential(
157
+ AdaIN(channels=out_channels),
158
+ ResBlock(
159
+ out_channels,
160
+ kernel_size=kernel_size,
161
+ dilation=dilation,
162
+ leaky_relu_slope=leaky_relu_slope,
163
+ ),
164
+ AdaIN(channels=out_channels),
165
+ )
166
+ for kernel_size in kernel_sizes
167
+ ]
168
+ )
169
+
170
+ def forward(self, x: torch.Tensor):
171
+ x = self.input_conv(x)
172
+ return torch.stack([block(x) for block in self.blocks], dim=0).mean(dim=0)
173
+
174
+ def remove_weight_norm(self):
175
+ remove_weight_norm(self.input_conv)
176
+ for block in self.blocks:
177
+ block[1].remove_weight_norm()
178
+
179
+
180
+ class SineGenerator(nn.Module):
181
+ """
182
+ Definition of sine generator
183
+
184
+ Generates sine waveforms with optional harmonics and additive noise.
185
+ Can be used to create harmonic noise source for neural vocoders.
186
+
187
+ Args:
188
+ samp_rate (int): Sampling rate in Hz.
189
+ harmonic_num (int): Number of harmonic overtones (default 0).
190
+ sine_amp (float): Amplitude of sine-waveform (default 0.1).
191
+ noise_std (float): Standard deviation of Gaussian noise (default 0.003).
192
+ voiced_threshold (float): F0 threshold for voiced/unvoiced classification (default 0).
193
+ """
194
+
195
+ def __init__(
196
+ self,
197
+ samp_rate,
198
+ harmonic_num=0,
199
+ sine_amp=0.1,
200
+ noise_std=0.003,
201
+ voiced_threshold=0,
202
+ ):
203
+ super(SineGenerator, self).__init__()
204
+ self.sine_amp = sine_amp
205
+ self.noise_std = noise_std
206
+ self.harmonic_num = harmonic_num
207
+ self.dim = self.harmonic_num + 1
208
+ self.sampling_rate = samp_rate
209
+ self.voiced_threshold = voiced_threshold
210
+
211
+ self.merge = nn.Sequential(
212
+ nn.Linear(self.dim, 1, bias=False),
213
+ nn.Tanh(),
214
+ )
215
+
216
+ def _f02uv(self, f0):
217
+ # generate uv signal
218
+ uv = torch.ones_like(f0)
219
+ uv = uv * (f0 > self.voiced_threshold)
220
+ return uv
221
+
222
+ def _f02sine(self, f0_values):
223
+ """f0_values: (batchsize, length, dim)
224
+ where dim indicates fundamental tone and overtones
225
+ """
226
+ # convert to F0 in rad. The integer part n can be ignored
227
+ # because 2 * np.pi * n doesn't affect phase
228
+ rad_values = (f0_values / self.sampling_rate) % 1
229
+
230
+ # initial phase noise (no noise for fundamental component)
231
+ rand_ini = torch.rand(
232
+ f0_values.shape[0], f0_values.shape[2], device=f0_values.device
233
+ )
234
+ rand_ini[:, 0] = 0
235
+ rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
236
+
237
+ # instantanouse phase sine[t] = sin(2*pi \sum_i=1 ^{t} rad)
238
+ tmp_over_one = torch.cumsum(rad_values, 1) % 1
239
+ tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0
240
+ cumsum_shift = torch.zeros_like(rad_values)
241
+ cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
242
+
243
+ sines = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi)
244
+
245
+ return sines
246
+
247
+ def forward(self, f0):
248
+ with torch.no_grad():
249
+ f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device)
250
+ # fundamental component
251
+ f0_buf[:, :, 0] = f0[:, :, 0]
252
+ for idx in np.arange(self.harmonic_num):
253
+ f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (idx + 2)
254
+
255
+ sine_waves = self._f02sine(f0_buf) * self.sine_amp
256
+
257
+ uv = self._f02uv(f0)
258
+
259
+ noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
260
+ noise = noise_amp * torch.randn_like(sine_waves)
261
+
262
+ sine_waves = sine_waves * uv + noise
263
+
264
+ # merge with grad
265
+ return self.merge(sine_waves)
266
+
267
+
268
+ class RefineGANGenerator(nn.Module):
269
+ """
270
+ RefineGAN generator for audio synthesis.
271
+
272
+ This generator uses a combination of downsampling, residual blocks, and parallel residual blocks
273
+ to refine an input mel-spectrogram and fundamental frequency (F0) into an audio waveform.
274
+ It can also incorporate global conditioning.
275
+
276
+ Args:
277
+ sample_rate (int, optional): Sampling rate of the audio. Defaults to 44100.
278
+ downsample_rates (tuple[int], optional): Downsampling rates for the downsampling blocks. Defaults to (2, 2, 8, 8).
279
+ upsample_rates (tuple[int], optional): Upsampling rates for the upsampling blocks. Defaults to (8, 8, 2, 2).
280
+ leaky_relu_slope (float, optional): Slope for the Leaky ReLU activation. Defaults to 0.2.
281
+ num_mels (int, optional): Number of mel-frequency bins in the input mel-spectrogram. Defaults to 128.
282
+ start_channels (int, optional): Number of channels in the initial convolutional layer. Defaults to 16.
283
+ gin_channels (int, optional): Number of channels for the global conditioning input. Defaults to 256.
284
+ checkpointing (bool, optional): Whether to use checkpointing for memory efficiency. Defaults to False.
285
+ """
286
+
287
+ def __init__(
288
+ self,
289
+ *,
290
+ sample_rate: int = 44100,
291
+ downsample_rates: tuple[int] = (2, 2, 8, 8), # unused
292
+ upsample_rates: tuple[int] = (8, 8, 2, 2),
293
+ leaky_relu_slope: float = 0.2,
294
+ num_mels: int = 128,
295
+ start_channels: int = 16, # unused
296
+ gin_channels: int = 256,
297
+ checkpointing: bool = False,
298
+ upsample_initial_channel=512,
299
+ ):
300
+ super().__init__()
301
+ self.upsample_rates = upsample_rates
302
+ self.leaky_relu_slope = leaky_relu_slope
303
+ self.checkpointing = checkpointing
304
+
305
+ self.upp = np.prod(upsample_rates)
306
+ self.m_source = SineGenerator(sample_rate)
307
+
308
+ # expanded f0 sinegen -> match mel_conv
309
+ # (8, 1, 17280) -> (8, 16, 17280)
310
+ self.pre_conv = weight_norm(
311
+ nn.Conv1d(
312
+ 1,
313
+ 16,
314
+ 7,
315
+ 1,
316
+ padding=3,
317
+ )
318
+ )
319
+
320
+ # (8, 16, 17280) = 4th upscale
321
+ # (8, 32, 8640) = 3rd upscale
322
+ # (8, 64, 4320) = 2nd upscale
323
+ # (8, 128, 432) = 1st upscale
324
+ # (8, 256, 36) merged to mel
325
+
326
+ # f0 downsampling and upchanneling
327
+ channels = start_channels
328
+ size = self.upp
329
+ self.downsample_blocks = nn.ModuleList([])
330
+ self.df0 = []
331
+ for i, u in enumerate(upsample_rates):
332
+
333
+ new_size = int(size / upsample_rates[-i - 1])
334
+ # T dimension factors for torchaudio.functional.resample
335
+ self.df0.append([size, new_size])
336
+ size = new_size
337
+
338
+ new_channels = channels * 2
339
+ self.downsample_blocks.append(
340
+ weight_norm(nn.Conv1d(channels, new_channels, 7, 1, padding=3))
341
+ )
342
+ channels = new_channels
343
+
344
+ # mel handling
345
+ channels = upsample_initial_channel
346
+
347
+ self.mel_conv = weight_norm(
348
+ nn.Conv1d(
349
+ num_mels,
350
+ channels // 2,
351
+ 7,
352
+ 1,
353
+ padding=3,
354
+ )
355
+ )
356
+
357
+ self.mel_conv.apply(init_weights)
358
+
359
+ if gin_channels != 0:
360
+ self.cond = nn.Conv1d(256, channels // 2, 1)
361
+
362
+ self.upsample_blocks = nn.ModuleList([])
363
+ self.upsample_conv_blocks = nn.ModuleList([])
364
+
365
+ for rate in upsample_rates:
366
+ new_channels = channels // 2
367
+
368
+ self.upsample_blocks.append(nn.Upsample(scale_factor=rate, mode="linear"))
369
+
370
+ self.upsample_conv_blocks.append(
371
+ ParallelResBlock(
372
+ in_channels=channels + channels // 4,
373
+ out_channels=new_channels,
374
+ kernel_sizes=(3, 7, 11),
375
+ dilation=(1, 3, 5),
376
+ leaky_relu_slope=leaky_relu_slope,
377
+ )
378
+ )
379
+
380
+ channels = new_channels
381
+
382
+ self.conv_post = weight_norm(
383
+ nn.Conv1d(channels, 1, 7, 1, padding=3, bias=False)
384
+ )
385
+ self.conv_post.apply(init_weights)
386
+
387
+ def forward(self, mel: torch.Tensor, f0: torch.Tensor, g: torch.Tensor = None):
388
+ f0_size = mel.shape[-1]
389
+ # change f0 helper to full size
390
+ f0 = F.interpolate(f0.unsqueeze(1), size=f0_size * self.upp, mode="linear")
391
+ # get f0 turned into sines harmonics
392
+ har_source = self.m_source(f0.transpose(1, 2)).transpose(1, 2)
393
+ # prepare for fusion to mel
394
+ x = self.pre_conv(har_source)
395
+ # downsampled/upchanneled versions for each upscale
396
+ downs = []
397
+ for block, (old_size, new_size) in zip(self.downsample_blocks, self.df0):
398
+ x = F.leaky_relu(x, self.leaky_relu_slope)
399
+ downs.append(x)
400
+ # attempt to cancel spectral aliasing
401
+ x = torchaudio.functional.resample(
402
+ x.contiguous(),
403
+ orig_freq=int(f0_size * old_size),
404
+ new_freq=int(f0_size * new_size),
405
+ lowpass_filter_width=64,
406
+ rolloff=0.9475937167399596,
407
+ resampling_method="sinc_interp_kaiser",
408
+ beta=14.769656459379492,
409
+ )
410
+ x = block(x)
411
+
412
+ # expanding spectrogram from 192 to 256 channels
413
+ mel = self.mel_conv(mel)
414
+ if g is not None:
415
+ # adding expanded speaker embedding
416
+ mel = mel + self.cond(g)
417
+
418
+ x = torch.cat([mel, x], dim=1)
419
+
420
+ for ups, res, down in zip(
421
+ self.upsample_blocks,
422
+ self.upsample_conv_blocks,
423
+ reversed(downs),
424
+ ):
425
+ x = F.leaky_relu(x, self.leaky_relu_slope)
426
+
427
+ if self.training and self.checkpointing:
428
+ x = checkpoint(ups, x, use_reentrant=False)
429
+ x = torch.cat([x, down], dim=1)
430
+ x = checkpoint(res, x, use_reentrant=False)
431
+ else:
432
+ x = ups(x)
433
+ x = torch.cat([x, down], dim=1)
434
+ x = res(x)
435
+
436
+ x = F.leaky_relu(x, self.leaky_relu_slope)
437
+ x = self.conv_post(x)
438
+ x = torch.tanh(x)
439
+
440
+ return x
441
+
442
+ def remove_weight_norm(self):
443
+ remove_weight_norm(self.pre_conv)
444
+ remove_weight_norm(self.mel_conv)
445
+ remove_weight_norm(self.conv_post)
446
+
447
+ for block in self.downsample_blocks:
448
+ block.remove_weight_norm()
449
+
450
+ for block in self.upsample_conv_blocks:
451
+ block.remove_weight_norm()
rvc/lib/algorithm/modules.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from rvc.lib.algorithm.commons import fused_add_tanh_sigmoid_multiply
3
+
4
+
5
+ class WaveNet(torch.nn.Module):
6
+ """
7
+ WaveNet residual blocks as used in WaveGlow.
8
+
9
+ Args:
10
+ hidden_channels (int): Number of hidden channels.
11
+ kernel_size (int): Size of the convolutional kernel.
12
+ dilation_rate (int): Dilation rate of the convolution.
13
+ n_layers (int): Number of convolutional layers.
14
+ gin_channels (int, optional): Number of conditioning channels. Defaults to 0.
15
+ p_dropout (float, optional): Dropout probability. Defaults to 0.
16
+ """
17
+
18
+ def __init__(
19
+ self,
20
+ hidden_channels: int,
21
+ kernel_size: int,
22
+ dilation_rate,
23
+ n_layers: int,
24
+ gin_channels: int = 0,
25
+ p_dropout: int = 0,
26
+ ):
27
+ super().__init__()
28
+ assert kernel_size % 2 == 1, "Kernel size must be odd for proper padding."
29
+
30
+ self.hidden_channels = hidden_channels
31
+ self.kernel_size = (kernel_size,)
32
+ self.dilation_rate = dilation_rate
33
+ self.n_layers = n_layers
34
+ self.gin_channels = gin_channels
35
+ self.p_dropout = p_dropout
36
+ self.n_channels_tensor = torch.IntTensor([hidden_channels]) # Static tensor
37
+
38
+ self.in_layers = torch.nn.ModuleList()
39
+ self.res_skip_layers = torch.nn.ModuleList()
40
+ self.drop = torch.nn.Dropout(p_dropout)
41
+
42
+ # Conditional layer for global conditioning
43
+ if gin_channels:
44
+ self.cond_layer = torch.nn.utils.parametrizations.weight_norm(
45
+ torch.nn.Conv1d(gin_channels, 2 * hidden_channels * n_layers, 1),
46
+ name="weight",
47
+ )
48
+
49
+ # Precompute dilations and paddings
50
+ dilations = [dilation_rate**i for i in range(n_layers)]
51
+ paddings = [(kernel_size * d - d) // 2 for d in dilations]
52
+
53
+ # Initialize layers
54
+ for i in range(n_layers):
55
+ self.in_layers.append(
56
+ torch.nn.utils.parametrizations.weight_norm(
57
+ torch.nn.Conv1d(
58
+ hidden_channels,
59
+ 2 * hidden_channels,
60
+ kernel_size,
61
+ dilation=dilations[i],
62
+ padding=paddings[i],
63
+ ),
64
+ name="weight",
65
+ )
66
+ )
67
+
68
+ res_skip_channels = (
69
+ hidden_channels if i == n_layers - 1 else 2 * hidden_channels
70
+ )
71
+ self.res_skip_layers.append(
72
+ torch.nn.utils.parametrizations.weight_norm(
73
+ torch.nn.Conv1d(hidden_channels, res_skip_channels, 1),
74
+ name="weight",
75
+ )
76
+ )
77
+
78
+ def forward(self, x, x_mask, g=None):
79
+ output = x.clone().zero_()
80
+
81
+ # Apply conditional layer if global conditioning is provided
82
+ g = self.cond_layer(g) if g is not None else None
83
+
84
+ for i in range(self.n_layers):
85
+ x_in = self.in_layers[i](x)
86
+ g_l = (
87
+ g[
88
+ :,
89
+ i * 2 * self.hidden_channels : (i + 1) * 2 * self.hidden_channels,
90
+ :,
91
+ ]
92
+ if g is not None
93
+ else 0
94
+ )
95
+
96
+ # Activation with fused Tanh-Sigmoid
97
+ acts = fused_add_tanh_sigmoid_multiply(x_in, g_l, self.n_channels_tensor)
98
+ acts = self.drop(acts)
99
+
100
+ # Residual and skip connections
101
+ res_skip_acts = self.res_skip_layers[i](acts)
102
+ if i < self.n_layers - 1:
103
+ res_acts = res_skip_acts[:, : self.hidden_channels, :]
104
+ x = (x + res_acts) * x_mask
105
+ output = output + res_skip_acts[:, self.hidden_channels :, :]
106
+ else:
107
+ output = output + res_skip_acts
108
+
109
+ return output * x_mask
110
+
111
+ def remove_weight_norm(self):
112
+ if self.gin_channels:
113
+ torch.nn.utils.remove_weight_norm(self.cond_layer)
114
+ for layer in self.in_layers:
115
+ torch.nn.utils.remove_weight_norm(layer)
116
+ for layer in self.res_skip_layers:
117
+ torch.nn.utils.remove_weight_norm(layer)
rvc/lib/algorithm/normalization.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+
4
+ class LayerNorm(torch.nn.Module):
5
+ """
6
+ Layer normalization module.
7
+
8
+ Args:
9
+ channels (int): Number of channels.
10
+ eps (float, optional): Epsilon value for numerical stability. Defaults to 1e-5.
11
+ """
12
+
13
+ def __init__(self, channels: int, eps: float = 1e-5):
14
+ super().__init__()
15
+ self.eps = eps
16
+ self.gamma = torch.nn.Parameter(torch.ones(channels))
17
+ self.beta = torch.nn.Parameter(torch.zeros(channels))
18
+
19
+ def forward(self, x):
20
+ # Transpose to (batch_size, time_steps, channels) for layer_norm
21
+ x = x.transpose(1, -1)
22
+ x = torch.nn.functional.layer_norm(
23
+ x, (x.size(-1),), self.gamma, self.beta, self.eps
24
+ )
25
+ # Transpose back to (batch_size, channels, time_steps)
26
+ return x.transpose(1, -1)
rvc/lib/algorithm/residuals.py ADDED
@@ -0,0 +1,261 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from itertools import chain
3
+ from typing import Optional, Tuple
4
+ from torch.nn.utils import remove_weight_norm
5
+ from torch.nn.utils.parametrizations import weight_norm
6
+
7
+ from rvc.lib.algorithm.modules import WaveNet
8
+ from rvc.lib.algorithm.commons import get_padding, init_weights
9
+
10
+ LRELU_SLOPE = 0.1
11
+
12
+
13
+ def create_conv1d_layer(channels, kernel_size, dilation):
14
+ return weight_norm(
15
+ torch.nn.Conv1d(
16
+ channels,
17
+ channels,
18
+ kernel_size,
19
+ 1,
20
+ dilation=dilation,
21
+ padding=get_padding(kernel_size, dilation),
22
+ )
23
+ )
24
+
25
+
26
+ def apply_mask(tensor: torch.Tensor, mask: Optional[torch.Tensor]):
27
+ return tensor * mask if mask else tensor
28
+
29
+
30
+ def apply_mask_(tensor: torch.Tensor, mask: Optional[torch.Tensor]):
31
+ return tensor.mul_(mask) if mask else tensor
32
+
33
+
34
+ class ResBlock(torch.nn.Module):
35
+ """
36
+ A residual block module that applies a series of 1D convolutional layers with residual connections.
37
+ """
38
+
39
+ def __init__(
40
+ self, channels: int, kernel_size: int = 3, dilations: Tuple[int] = (1, 3, 5)
41
+ ):
42
+ """
43
+ Initializes the ResBlock.
44
+
45
+ Args:
46
+ channels (int): Number of input and output channels for the convolution layers.
47
+ kernel_size (int): Size of the convolution kernel. Defaults to 3.
48
+ dilations (Tuple[int]): Tuple of dilation rates for the convolution layers in the first set.
49
+ """
50
+ super().__init__()
51
+ # Create convolutional layers with specified dilations and initialize weights
52
+ self.convs1 = self._create_convs(channels, kernel_size, dilations)
53
+ self.convs2 = self._create_convs(channels, kernel_size, [1] * len(dilations))
54
+
55
+ @staticmethod
56
+ def _create_convs(channels: int, kernel_size: int, dilations: Tuple[int]):
57
+ """
58
+ Creates a list of 1D convolutional layers with specified dilations.
59
+
60
+ Args:
61
+ channels (int): Number of input and output channels for the convolution layers.
62
+ kernel_size (int): Size of the convolution kernel.
63
+ dilations (Tuple[int]): Tuple of dilation rates for each convolution layer.
64
+ """
65
+ layers = torch.nn.ModuleList(
66
+ [create_conv1d_layer(channels, kernel_size, d) for d in dilations]
67
+ )
68
+ layers.apply(init_weights)
69
+ return layers
70
+
71
+ def forward(self, x: torch.Tensor, x_mask: torch.Tensor = None):
72
+ for conv1, conv2 in zip(self.convs1, self.convs2):
73
+ x_residual = x
74
+ x = torch.nn.functional.leaky_relu(x, LRELU_SLOPE)
75
+ x = apply_mask(x, x_mask)
76
+ x = torch.nn.functional.leaky_relu(conv1(x), LRELU_SLOPE)
77
+ x = apply_mask(x, x_mask)
78
+ x = conv2(x)
79
+ x = x + x_residual
80
+ return apply_mask(x, x_mask)
81
+
82
+ def remove_weight_norm(self):
83
+ for conv in chain(self.convs1, self.convs2):
84
+ remove_weight_norm(conv)
85
+
86
+
87
+ class Flip(torch.nn.Module):
88
+ """
89
+ Flip module for flow-based models.
90
+
91
+ This module flips the input along the time dimension.
92
+ """
93
+
94
+ def forward(self, x, *args, reverse=False, **kwargs):
95
+ x = torch.flip(x, [1])
96
+ if not reverse:
97
+ logdet = torch.zeros(x.size(0), dtype=x.dtype, device=x.device)
98
+ return x, logdet
99
+ else:
100
+ return x
101
+
102
+
103
+ class ResidualCouplingBlock(torch.nn.Module):
104
+ """
105
+ Residual Coupling Block for normalizing flow.
106
+
107
+ Args:
108
+ channels (int): Number of channels in the input.
109
+ hidden_channels (int): Number of hidden channels in the coupling layer.
110
+ kernel_size (int): Kernel size of the convolutional layers.
111
+ dilation_rate (int): Dilation rate of the convolutional layers.
112
+ n_layers (int): Number of layers in the coupling layer.
113
+ n_flows (int, optional): Number of coupling layers in the block. Defaults to 4.
114
+ gin_channels (int, optional): Number of channels for the global conditioning input. Defaults to 0.
115
+ """
116
+
117
+ def __init__(
118
+ self,
119
+ channels: int,
120
+ hidden_channels: int,
121
+ kernel_size: int,
122
+ dilation_rate: int,
123
+ n_layers: int,
124
+ n_flows: int = 4,
125
+ gin_channels: int = 0,
126
+ ):
127
+ super(ResidualCouplingBlock, self).__init__()
128
+ self.channels = channels
129
+ self.hidden_channels = hidden_channels
130
+ self.kernel_size = kernel_size
131
+ self.dilation_rate = dilation_rate
132
+ self.n_layers = n_layers
133
+ self.n_flows = n_flows
134
+ self.gin_channels = gin_channels
135
+
136
+ self.flows = torch.nn.ModuleList()
137
+ for _ in range(n_flows):
138
+ self.flows.append(
139
+ ResidualCouplingLayer(
140
+ channels,
141
+ hidden_channels,
142
+ kernel_size,
143
+ dilation_rate,
144
+ n_layers,
145
+ gin_channels=gin_channels,
146
+ mean_only=True,
147
+ )
148
+ )
149
+ self.flows.append(Flip())
150
+
151
+ def forward(
152
+ self,
153
+ x: torch.Tensor,
154
+ x_mask: torch.Tensor,
155
+ g: Optional[torch.Tensor] = None,
156
+ reverse: bool = False,
157
+ ):
158
+ if not reverse:
159
+ for flow in self.flows:
160
+ x, _ = flow(x, x_mask, g=g, reverse=reverse)
161
+ else:
162
+ for flow in reversed(self.flows):
163
+ x = flow.forward(x, x_mask, g=g, reverse=reverse)
164
+ return x
165
+
166
+ def remove_weight_norm(self):
167
+ for i in range(self.n_flows):
168
+ self.flows[i * 2].remove_weight_norm()
169
+
170
+ def __prepare_scriptable__(self):
171
+ for i in range(self.n_flows):
172
+ for hook in self.flows[i * 2]._forward_pre_hooks.values():
173
+ if (
174
+ hook.__module__ == "torch.nn.utils.parametrizations.weight_norm"
175
+ and hook.__class__.__name__ == "WeightNorm"
176
+ ):
177
+ torch.nn.utils.remove_weight_norm(self.flows[i * 2])
178
+
179
+ return self
180
+
181
+
182
+ class ResidualCouplingLayer(torch.nn.Module):
183
+ """
184
+ Residual coupling layer for flow-based models.
185
+
186
+ Args:
187
+ channels (int): Number of channels.
188
+ hidden_channels (int): Number of hidden channels.
189
+ kernel_size (int): Size of the convolutional kernel.
190
+ dilation_rate (int): Dilation rate of the convolution.
191
+ n_layers (int): Number of convolutional layers.
192
+ p_dropout (float, optional): Dropout probability. Defaults to 0.
193
+ gin_channels (int, optional): Number of conditioning channels. Defaults to 0.
194
+ mean_only (bool, optional): Whether to use mean-only coupling. Defaults to False.
195
+ """
196
+
197
+ def __init__(
198
+ self,
199
+ channels: int,
200
+ hidden_channels: int,
201
+ kernel_size: int,
202
+ dilation_rate: int,
203
+ n_layers: int,
204
+ p_dropout: float = 0,
205
+ gin_channels: int = 0,
206
+ mean_only: bool = False,
207
+ ):
208
+ assert channels % 2 == 0, "channels should be divisible by 2"
209
+ super().__init__()
210
+ self.channels = channels
211
+ self.hidden_channels = hidden_channels
212
+ self.kernel_size = kernel_size
213
+ self.dilation_rate = dilation_rate
214
+ self.n_layers = n_layers
215
+ self.half_channels = channels // 2
216
+ self.mean_only = mean_only
217
+
218
+ self.pre = torch.nn.Conv1d(self.half_channels, hidden_channels, 1)
219
+ self.enc = WaveNet(
220
+ hidden_channels,
221
+ kernel_size,
222
+ dilation_rate,
223
+ n_layers,
224
+ p_dropout=p_dropout,
225
+ gin_channels=gin_channels,
226
+ )
227
+ self.post = torch.nn.Conv1d(
228
+ hidden_channels, self.half_channels * (2 - mean_only), 1
229
+ )
230
+ self.post.weight.data.zero_()
231
+ self.post.bias.data.zero_()
232
+
233
+ def forward(
234
+ self,
235
+ x: torch.Tensor,
236
+ x_mask: torch.Tensor,
237
+ g: Optional[torch.Tensor] = None,
238
+ reverse: bool = False,
239
+ ):
240
+ x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
241
+ h = self.pre(x0) * x_mask
242
+ h = self.enc(h, x_mask, g=g)
243
+ stats = self.post(h) * x_mask
244
+ if not self.mean_only:
245
+ m, logs = torch.split(stats, [self.half_channels] * 2, 1)
246
+ else:
247
+ m = stats
248
+ logs = torch.zeros_like(m)
249
+
250
+ if not reverse:
251
+ x1 = m + x1 * torch.exp(logs) * x_mask
252
+ x = torch.cat([x0, x1], 1)
253
+ logdet = torch.sum(logs, [1, 2])
254
+ return x, logdet
255
+ else:
256
+ x1 = (x1 - m) * torch.exp(-logs) * x_mask
257
+ x = torch.cat([x0, x1], 1)
258
+ return x
259
+
260
+ def remove_weight_norm(self):
261
+ self.enc.remove_weight_norm()
rvc/lib/algorithm/synthesizers.py ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from typing import Optional
3
+ from rvc.lib.algorithm.generators.hifigan_mrf import HiFiGANMRFGenerator
4
+ from rvc.lib.algorithm.generators.hifigan_nsf import HiFiGANNSFGenerator
5
+ from rvc.lib.algorithm.generators.hifigan import HiFiGANGenerator
6
+ from rvc.lib.algorithm.generators.refinegan import RefineGANGenerator
7
+ from rvc.lib.algorithm.commons import slice_segments, rand_slice_segments
8
+ from rvc.lib.algorithm.residuals import ResidualCouplingBlock
9
+ from rvc.lib.algorithm.encoders import TextEncoder, PosteriorEncoder
10
+
11
+
12
+ class Synthesizer(torch.nn.Module):
13
+ """
14
+ Base Synthesizer model.
15
+
16
+ Args:
17
+ spec_channels (int): Number of channels in the spectrogram.
18
+ segment_size (int): Size of the audio segment.
19
+ inter_channels (int): Number of channels in the intermediate layers.
20
+ hidden_channels (int): Number of channels in the hidden layers.
21
+ filter_channels (int): Number of channels in the filter layers.
22
+ n_heads (int): Number of attention heads.
23
+ n_layers (int): Number of layers in the encoder.
24
+ kernel_size (int): Size of the convolution kernel.
25
+ p_dropout (float): Dropout probability.
26
+ resblock (str): Type of residual block.
27
+ resblock_kernel_sizes (list): Kernel sizes for the residual blocks.
28
+ resblock_dilation_sizes (list): Dilation sizes for the residual blocks.
29
+ upsample_rates (list): Upsampling rates for the decoder.
30
+ upsample_initial_channel (int): Number of channels in the initial upsampling layer.
31
+ upsample_kernel_sizes (list): Kernel sizes for the upsampling layers.
32
+ spk_embed_dim (int): Dimension of the speaker embedding.
33
+ gin_channels (int): Number of channels in the global conditioning vector.
34
+ sr (int): Sampling rate of the audio.
35
+ use_f0 (bool): Whether to use F0 information.
36
+ text_enc_hidden_dim (int): Hidden dimension for the text encoder.
37
+ kwargs: Additional keyword arguments.
38
+ """
39
+
40
+ def __init__(
41
+ self,
42
+ spec_channels: int,
43
+ segment_size: int,
44
+ inter_channels: int,
45
+ hidden_channels: int,
46
+ filter_channels: int,
47
+ n_heads: int,
48
+ n_layers: int,
49
+ kernel_size: int,
50
+ p_dropout: float,
51
+ resblock: str,
52
+ resblock_kernel_sizes: list,
53
+ resblock_dilation_sizes: list,
54
+ upsample_rates: list,
55
+ upsample_initial_channel: int,
56
+ upsample_kernel_sizes: list,
57
+ spk_embed_dim: int,
58
+ gin_channels: int,
59
+ sr: int,
60
+ use_f0: bool,
61
+ text_enc_hidden_dim: int = 768,
62
+ vocoder: str = "HiFi-GAN",
63
+ randomized: bool = True,
64
+ checkpointing: bool = False,
65
+ **kwargs,
66
+ ):
67
+ super().__init__()
68
+ self.segment_size = segment_size
69
+ self.use_f0 = use_f0
70
+ self.randomized = randomized
71
+
72
+ self.enc_p = TextEncoder(
73
+ inter_channels,
74
+ hidden_channels,
75
+ filter_channels,
76
+ n_heads,
77
+ n_layers,
78
+ kernel_size,
79
+ p_dropout,
80
+ text_enc_hidden_dim,
81
+ f0=use_f0,
82
+ )
83
+ print(f"Using {vocoder} vocoder")
84
+ if use_f0:
85
+ if vocoder == "MRF HiFi-GAN":
86
+ self.dec = HiFiGANMRFGenerator(
87
+ in_channel=inter_channels,
88
+ upsample_initial_channel=upsample_initial_channel,
89
+ upsample_rates=upsample_rates,
90
+ upsample_kernel_sizes=upsample_kernel_sizes,
91
+ resblock_kernel_sizes=resblock_kernel_sizes,
92
+ resblock_dilations=resblock_dilation_sizes,
93
+ gin_channels=gin_channels,
94
+ sample_rate=sr,
95
+ harmonic_num=8,
96
+ checkpointing=checkpointing,
97
+ )
98
+ elif vocoder == "RefineGAN":
99
+ self.dec = RefineGANGenerator(
100
+ sample_rate=sr,
101
+ downsample_rates=upsample_rates[::-1],
102
+ upsample_rates=upsample_rates,
103
+ start_channels=16,
104
+ num_mels=inter_channels,
105
+ checkpointing=checkpointing,
106
+ )
107
+ else:
108
+ self.dec = HiFiGANNSFGenerator(
109
+ inter_channels,
110
+ resblock_kernel_sizes,
111
+ resblock_dilation_sizes,
112
+ upsample_rates,
113
+ upsample_initial_channel,
114
+ upsample_kernel_sizes,
115
+ gin_channels=gin_channels,
116
+ sr=sr,
117
+ checkpointing=checkpointing,
118
+ )
119
+ else:
120
+ if vocoder == "MRF HiFi-GAN":
121
+ print("MRF HiFi-GAN does not support training without pitch guidance.")
122
+ self.dec = None
123
+ elif vocoder == "RefineGAN":
124
+ print("RefineGAN does not support training without pitch guidance.")
125
+ self.dec = None
126
+ else:
127
+ self.dec = HiFiGANGenerator(
128
+ inter_channels,
129
+ resblock_kernel_sizes,
130
+ resblock_dilation_sizes,
131
+ upsample_rates,
132
+ upsample_initial_channel,
133
+ upsample_kernel_sizes,
134
+ gin_channels=gin_channels,
135
+ )
136
+ self.enc_q = PosteriorEncoder(
137
+ spec_channels,
138
+ inter_channels,
139
+ hidden_channels,
140
+ 5,
141
+ 1,
142
+ 16,
143
+ gin_channels=gin_channels,
144
+ )
145
+ self.flow = ResidualCouplingBlock(
146
+ inter_channels,
147
+ hidden_channels,
148
+ 5,
149
+ 1,
150
+ 3,
151
+ gin_channels=gin_channels,
152
+ )
153
+ self.emb_g = torch.nn.Embedding(spk_embed_dim, gin_channels)
154
+
155
+ def _remove_weight_norm_from(self, module):
156
+ for hook in module._forward_pre_hooks.values():
157
+ if getattr(hook, "__class__", None).__name__ == "WeightNorm":
158
+ torch.nn.utils.remove_weight_norm(module)
159
+
160
+ def remove_weight_norm(self):
161
+ for module in [self.dec, self.flow, self.enc_q]:
162
+ self._remove_weight_norm_from(module)
163
+
164
+ def __prepare_scriptable__(self):
165
+ self.remove_weight_norm()
166
+ return self
167
+
168
+ def forward(
169
+ self,
170
+ phone: torch.Tensor,
171
+ phone_lengths: torch.Tensor,
172
+ pitch: Optional[torch.Tensor] = None,
173
+ pitchf: Optional[torch.Tensor] = None,
174
+ y: Optional[torch.Tensor] = None,
175
+ y_lengths: Optional[torch.Tensor] = None,
176
+ ds: Optional[torch.Tensor] = None,
177
+ ):
178
+ g = self.emb_g(ds).unsqueeze(-1)
179
+ m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
180
+
181
+ if y is not None:
182
+ z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
183
+ z_p = self.flow(z, y_mask, g=g)
184
+ # regular old training method using random slices
185
+ if self.randomized:
186
+ z_slice, ids_slice = rand_slice_segments(
187
+ z, y_lengths, self.segment_size
188
+ )
189
+ if self.use_f0:
190
+ pitchf = slice_segments(pitchf, ids_slice, self.segment_size, 2)
191
+ o = self.dec(z_slice, pitchf, g=g)
192
+ else:
193
+ o = self.dec(z_slice, g=g)
194
+ return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
195
+ # future use for finetuning using the entire dataset each pass
196
+ else:
197
+ if self.use_f0:
198
+ o = self.dec(z, pitchf, g=g)
199
+ else:
200
+ o = self.dec(z, g=g)
201
+ return o, None, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
202
+ else:
203
+ return None, None, x_mask, None, (None, None, m_p, logs_p, None, None)
204
+
205
+ @torch.jit.export
206
+ def infer(
207
+ self,
208
+ phone: torch.Tensor,
209
+ phone_lengths: torch.Tensor,
210
+ pitch: Optional[torch.Tensor] = None,
211
+ nsff0: Optional[torch.Tensor] = None,
212
+ sid: torch.Tensor = None,
213
+ rate: Optional[torch.Tensor] = None,
214
+ ):
215
+ """
216
+ Inference of the model.
217
+
218
+ Args:
219
+ phone (torch.Tensor): Phoneme sequence.
220
+ phone_lengths (torch.Tensor): Lengths of the phoneme sequences.
221
+ pitch (torch.Tensor, optional): Pitch sequence.
222
+ nsff0 (torch.Tensor, optional): Fine-grained pitch sequence.
223
+ sid (torch.Tensor): Speaker embedding.
224
+ rate (torch.Tensor, optional): Rate for time-stretching.
225
+ """
226
+ g = self.emb_g(sid).unsqueeze(-1)
227
+ m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
228
+ z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
229
+
230
+ if rate is not None:
231
+ head = int(z_p.shape[2] * (1.0 - rate.item()))
232
+ z_p, x_mask = z_p[:, :, head:], x_mask[:, :, head:]
233
+ if self.use_f0 and nsff0 is not None:
234
+ nsff0 = nsff0[:, head:]
235
+
236
+ z = self.flow(z_p, x_mask, g=g, reverse=True)
237
+ o = (
238
+ self.dec(z * x_mask, nsff0, g=g)
239
+ if self.use_f0
240
+ else self.dec(z * x_mask, g=g)
241
+ )
242
+
243
+ return o, x_mask, (z, z_p, m_p, logs_p)
rvc/lib/predictors/F0Extractor.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import dataclasses
2
+ import pathlib
3
+ import librosa
4
+ import numpy as np
5
+ import resampy
6
+ import torch
7
+ import torchcrepe
8
+ import torchfcpe
9
+ import os
10
+
11
+ # from tools.anyf0.rmvpe import RMVPE
12
+ from rvc.lib.predictors.RMVPE import RMVPE0Predictor
13
+ from rvc.configs.config import Config
14
+
15
+ config = Config()
16
+
17
+
18
+ @dataclasses.dataclass
19
+ class F0Extractor:
20
+ wav_path: pathlib.Path
21
+ sample_rate: int = 44100
22
+ hop_length: int = 512
23
+ f0_min: int = 50
24
+ f0_max: int = 1600
25
+ method: str = "rmvpe"
26
+ x: np.ndarray = dataclasses.field(init=False)
27
+
28
+ def __post_init__(self):
29
+ self.x, self.sample_rate = librosa.load(self.wav_path, sr=self.sample_rate)
30
+
31
+ @property
32
+ def hop_size(self):
33
+ return self.hop_length / self.sample_rate
34
+
35
+ @property
36
+ def wav16k(self):
37
+ return resampy.resample(self.x, self.sample_rate, 16000)
38
+
39
+ def extract_f0(self):
40
+ f0 = None
41
+ method = self.method
42
+ if method == "crepe":
43
+ wav16k_torch = torch.FloatTensor(self.wav16k).unsqueeze(0).to(config.device)
44
+ f0 = torchcrepe.predict(
45
+ wav16k_torch,
46
+ sample_rate=16000,
47
+ hop_length=160,
48
+ batch_size=512,
49
+ fmin=self.f0_min,
50
+ fmax=self.f0_max,
51
+ device=config.device,
52
+ )
53
+ f0 = f0[0].cpu().numpy()
54
+ elif method == "fcpe":
55
+ audio = librosa.to_mono(self.x)
56
+ audio_length = len(audio)
57
+ f0_target_length = (audio_length // self.hop_length) + 1
58
+ audio = (
59
+ torch.from_numpy(audio)
60
+ .float()
61
+ .unsqueeze(0)
62
+ .unsqueeze(-1)
63
+ .to(config.device)
64
+ )
65
+ model = torchfcpe.spawn_bundled_infer_model(device=config.device)
66
+
67
+ f0 = model.infer(
68
+ audio,
69
+ sr=self.sample_rate,
70
+ decoder_mode="local_argmax",
71
+ threshold=0.006,
72
+ f0_min=self.f0_min,
73
+ f0_max=self.f0_max,
74
+ interp_uv=False,
75
+ output_interp_target_length=f0_target_length,
76
+ )
77
+ f0 = f0.squeeze().cpu().numpy()
78
+ elif method == "rmvpe":
79
+ model_rmvpe = RMVPE0Predictor(
80
+ os.path.join("rvc", "models", "predictors", "rmvpe.pt"),
81
+ device=config.device,
82
+ # hop_length=80
83
+ )
84
+ f0 = model_rmvpe.infer_from_audio(self.wav16k, thred=0.03)
85
+
86
+ else:
87
+ raise ValueError(f"Unknown method: {self.method}")
88
+ return self.hz_to_cents(f0, librosa.midi_to_hz(0))
89
+
90
+ def plot_f0(self, f0):
91
+ from matplotlib import pyplot as plt
92
+
93
+ plt.figure(figsize=(10, 4))
94
+ plt.plot(f0)
95
+ plt.title(self.method)
96
+ plt.xlabel("Time (frames)")
97
+ plt.ylabel("F0 (cents)")
98
+ plt.show()
99
+
100
+ @staticmethod
101
+ def hz_to_cents(F, F_ref=55.0):
102
+ F_temp = np.array(F).astype(float)
103
+ F_temp[F_temp == 0] = np.nan
104
+ F_cents = 1200 * np.log2(F_temp / F_ref)
105
+ return F_cents
rvc/lib/predictors/FCPE.py ADDED
@@ -0,0 +1,920 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Union
2
+
3
+ import torch.nn.functional as F
4
+ import numpy as np
5
+ import torch
6
+ import torch.nn as nn
7
+ from torch.nn.utils.parametrizations import weight_norm
8
+ from torchaudio.transforms import Resample
9
+ import os
10
+ import librosa
11
+ import soundfile as sf
12
+ import torch.utils.data
13
+ from librosa.filters import mel as librosa_mel_fn
14
+ import math
15
+ from functools import partial
16
+
17
+ from einops import rearrange, repeat
18
+ from local_attention import LocalAttention
19
+ from torch import nn
20
+
21
+ os.environ["LRU_CACHE_CAPACITY"] = "3"
22
+
23
+
24
+ def load_wav_to_torch(full_path, target_sr=None, return_empty_on_exception=False):
25
+ """Loads wav file to torch tensor."""
26
+ try:
27
+ data, sample_rate = sf.read(full_path, always_2d=True)
28
+ except Exception as error:
29
+ print(f"An error occurred loading {full_path}: {error}")
30
+ if return_empty_on_exception:
31
+ return [], sample_rate or target_sr or 48000
32
+ else:
33
+ raise
34
+
35
+ data = data[:, 0] if len(data.shape) > 1 else data
36
+ assert len(data) > 2
37
+
38
+ # Normalize data
39
+ max_mag = (
40
+ -np.iinfo(data.dtype).min
41
+ if np.issubdtype(data.dtype, np.integer)
42
+ else max(np.amax(data), -np.amin(data))
43
+ )
44
+ max_mag = (
45
+ (2**31) + 1 if max_mag > (2**15) else ((2**15) + 1 if max_mag > 1.01 else 1.0)
46
+ )
47
+ data = torch.FloatTensor(data.astype(np.float32)) / max_mag
48
+
49
+ # Handle exceptions and resample
50
+ if (torch.isinf(data) | torch.isnan(data)).any() and return_empty_on_exception:
51
+ return [], sample_rate or target_sr or 48000
52
+ if target_sr is not None and sample_rate != target_sr:
53
+ data = torch.from_numpy(
54
+ librosa.core.resample(
55
+ data.numpy(), orig_sr=sample_rate, target_sr=target_sr
56
+ )
57
+ )
58
+ sample_rate = target_sr
59
+
60
+ return data, sample_rate
61
+
62
+
63
+ def dynamic_range_compression(x, C=1, clip_val=1e-5):
64
+ return np.log(np.clip(x, a_min=clip_val, a_max=None) * C)
65
+
66
+
67
+ def dynamic_range_decompression(x, C=1):
68
+ return np.exp(x) / C
69
+
70
+
71
+ def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
72
+ return torch.log(torch.clamp(x, min=clip_val) * C)
73
+
74
+
75
+ def dynamic_range_decompression_torch(x, C=1):
76
+ return torch.exp(x) / C
77
+
78
+
79
+ class STFT:
80
+ def __init__(
81
+ self,
82
+ sr=22050,
83
+ n_mels=80,
84
+ n_fft=1024,
85
+ win_size=1024,
86
+ hop_length=256,
87
+ fmin=20,
88
+ fmax=11025,
89
+ clip_val=1e-5,
90
+ ):
91
+ self.target_sr = sr
92
+ self.n_mels = n_mels
93
+ self.n_fft = n_fft
94
+ self.win_size = win_size
95
+ self.hop_length = hop_length
96
+ self.fmin = fmin
97
+ self.fmax = fmax
98
+ self.clip_val = clip_val
99
+ self.mel_basis = {}
100
+ self.hann_window = {}
101
+
102
+ def get_mel(self, y, keyshift=0, speed=1, center=False, train=False):
103
+ sample_rate = self.target_sr
104
+ n_mels = self.n_mels
105
+ n_fft = self.n_fft
106
+ win_size = self.win_size
107
+ hop_length = self.hop_length
108
+ fmin = self.fmin
109
+ fmax = self.fmax
110
+ clip_val = self.clip_val
111
+
112
+ factor = 2 ** (keyshift / 12)
113
+ n_fft_new = int(np.round(n_fft * factor))
114
+ win_size_new = int(np.round(win_size * factor))
115
+ hop_length_new = int(np.round(hop_length * speed))
116
+
117
+ # Optimize mel_basis and hann_window caching
118
+ mel_basis = self.mel_basis if not train else {}
119
+ hann_window = self.hann_window if not train else {}
120
+
121
+ mel_basis_key = str(fmax) + "_" + str(y.device)
122
+ if mel_basis_key not in mel_basis:
123
+ mel = librosa_mel_fn(
124
+ sr=sample_rate, n_fft=n_fft, n_mels=n_mels, fmin=fmin, fmax=fmax
125
+ )
126
+ mel_basis[mel_basis_key] = torch.from_numpy(mel).float().to(y.device)
127
+
128
+ keyshift_key = str(keyshift) + "_" + str(y.device)
129
+ if keyshift_key not in hann_window:
130
+ hann_window[keyshift_key] = torch.hann_window(win_size_new).to(y.device)
131
+
132
+ # Padding and STFT
133
+ pad_left = (win_size_new - hop_length_new) // 2
134
+ pad_right = max(
135
+ (win_size_new - hop_length_new + 1) // 2,
136
+ win_size_new - y.size(-1) - pad_left,
137
+ )
138
+ mode = "reflect" if pad_right < y.size(-1) else "constant"
139
+ y = torch.nn.functional.pad(y.unsqueeze(1), (pad_left, pad_right), mode=mode)
140
+ y = y.squeeze(1)
141
+
142
+ spec = torch.stft(
143
+ y,
144
+ n_fft=n_fft_new,
145
+ hop_length=hop_length_new,
146
+ win_length=win_size_new,
147
+ window=hann_window[keyshift_key],
148
+ center=center,
149
+ pad_mode="reflect",
150
+ normalized=False,
151
+ onesided=True,
152
+ return_complex=True,
153
+ )
154
+ spec = torch.sqrt(spec.real.pow(2) + spec.imag.pow(2) + (1e-9))
155
+
156
+ # Handle keyshift and mel conversion
157
+ if keyshift != 0:
158
+ size = n_fft // 2 + 1
159
+ resize = spec.size(1)
160
+ spec = (
161
+ F.pad(spec, (0, 0, 0, size - resize))
162
+ if resize < size
163
+ else spec[:, :size, :]
164
+ )
165
+ spec = spec * win_size / win_size_new
166
+ spec = torch.matmul(mel_basis[mel_basis_key], spec)
167
+ spec = dynamic_range_compression_torch(spec, clip_val=clip_val)
168
+ return spec
169
+
170
+ def __call__(self, audiopath):
171
+ audio, sr = load_wav_to_torch(audiopath, target_sr=self.target_sr)
172
+ spect = self.get_mel(audio.unsqueeze(0)).squeeze(0)
173
+ return spect
174
+
175
+
176
+ stft = STFT()
177
+
178
+
179
+ def softmax_kernel(
180
+ data, *, projection_matrix, is_query, normalize_data=True, eps=1e-4, device=None
181
+ ):
182
+ b, h, *_ = data.shape
183
+
184
+ # Normalize data
185
+ data_normalizer = (data.shape[-1] ** -0.25) if normalize_data else 1.0
186
+
187
+ # Project data
188
+ ratio = projection_matrix.shape[0] ** -0.5
189
+ projection = repeat(projection_matrix, "j d -> b h j d", b=b, h=h)
190
+ projection = projection.type_as(data)
191
+ data_dash = torch.einsum("...id,...jd->...ij", (data_normalizer * data), projection)
192
+
193
+ # Calculate diagonal data
194
+ diag_data = data**2
195
+ diag_data = torch.sum(diag_data, dim=-1)
196
+ diag_data = (diag_data / 2.0) * (data_normalizer**2)
197
+ diag_data = diag_data.unsqueeze(dim=-1)
198
+
199
+ # Apply softmax
200
+ if is_query:
201
+ data_dash = ratio * (
202
+ torch.exp(
203
+ data_dash
204
+ - diag_data
205
+ - torch.max(data_dash, dim=-1, keepdim=True).values
206
+ )
207
+ + eps
208
+ )
209
+ else:
210
+ data_dash = ratio * (torch.exp(data_dash - diag_data + eps))
211
+
212
+ return data_dash.type_as(data)
213
+
214
+
215
+ def orthogonal_matrix_chunk(cols, qr_uniform_q=False, device=None):
216
+ unstructured_block = torch.randn((cols, cols), device=device)
217
+ q, r = torch.linalg.qr(unstructured_block.cpu(), mode="reduced")
218
+ q, r = map(lambda t: t.to(device), (q, r))
219
+
220
+ if qr_uniform_q:
221
+ d = torch.diag(r, 0)
222
+ q *= d.sign()
223
+ return q.t()
224
+
225
+
226
+ def exists(val):
227
+ return val is not None
228
+
229
+
230
+ def empty(tensor):
231
+ return tensor.numel() == 0
232
+
233
+
234
+ def default(val, d):
235
+ return val if exists(val) else d
236
+
237
+
238
+ def cast_tuple(val):
239
+ return (val,) if not isinstance(val, tuple) else val
240
+
241
+
242
+ class PCmer(nn.Module):
243
+ def __init__(
244
+ self,
245
+ num_layers,
246
+ num_heads,
247
+ dim_model,
248
+ dim_keys,
249
+ dim_values,
250
+ residual_dropout,
251
+ attention_dropout,
252
+ ):
253
+ super().__init__()
254
+ self.num_layers = num_layers
255
+ self.num_heads = num_heads
256
+ self.dim_model = dim_model
257
+ self.dim_values = dim_values
258
+ self.dim_keys = dim_keys
259
+ self.residual_dropout = residual_dropout
260
+ self.attention_dropout = attention_dropout
261
+
262
+ self._layers = nn.ModuleList([_EncoderLayer(self) for _ in range(num_layers)])
263
+
264
+ def forward(self, phone, mask=None):
265
+ for layer in self._layers:
266
+ phone = layer(phone, mask)
267
+ return phone
268
+
269
+
270
+ class _EncoderLayer(nn.Module):
271
+ def __init__(self, parent: PCmer):
272
+ super().__init__()
273
+ self.conformer = ConformerConvModule(parent.dim_model)
274
+ self.norm = nn.LayerNorm(parent.dim_model)
275
+ self.dropout = nn.Dropout(parent.residual_dropout)
276
+ self.attn = SelfAttention(
277
+ dim=parent.dim_model, heads=parent.num_heads, causal=False
278
+ )
279
+
280
+ def forward(self, phone, mask=None):
281
+ phone = phone + (self.attn(self.norm(phone), mask=mask))
282
+ phone = phone + (self.conformer(phone))
283
+ return phone
284
+
285
+
286
+ def calc_same_padding(kernel_size):
287
+ pad = kernel_size // 2
288
+ return (pad, pad - (kernel_size + 1) % 2)
289
+
290
+
291
+ class Swish(nn.Module):
292
+ def forward(self, x):
293
+ return x * x.sigmoid()
294
+
295
+
296
+ class Transpose(nn.Module):
297
+ def __init__(self, dims):
298
+ super().__init__()
299
+ assert len(dims) == 2, "dims must be a tuple of two dimensions"
300
+ self.dims = dims
301
+
302
+ def forward(self, x):
303
+ return x.transpose(*self.dims)
304
+
305
+
306
+ class GLU(nn.Module):
307
+ def __init__(self, dim):
308
+ super().__init__()
309
+ self.dim = dim
310
+
311
+ def forward(self, x):
312
+ out, gate = x.chunk(2, dim=self.dim)
313
+ return out * gate.sigmoid()
314
+
315
+
316
+ class DepthWiseConv1d(nn.Module):
317
+ def __init__(self, chan_in, chan_out, kernel_size, padding):
318
+ super().__init__()
319
+ self.padding = padding
320
+ self.conv = nn.Conv1d(chan_in, chan_out, kernel_size, groups=chan_in)
321
+
322
+ def forward(self, x):
323
+ x = F.pad(x, self.padding)
324
+ return self.conv(x)
325
+
326
+
327
+ class ConformerConvModule(nn.Module):
328
+ def __init__(
329
+ self, dim, causal=False, expansion_factor=2, kernel_size=31, dropout=0.0
330
+ ):
331
+ super().__init__()
332
+
333
+ inner_dim = dim * expansion_factor
334
+ padding = calc_same_padding(kernel_size) if not causal else (kernel_size - 1, 0)
335
+
336
+ self.net = nn.Sequential(
337
+ nn.LayerNorm(dim),
338
+ Transpose((1, 2)),
339
+ nn.Conv1d(dim, inner_dim * 2, 1),
340
+ GLU(dim=1),
341
+ DepthWiseConv1d(
342
+ inner_dim, inner_dim, kernel_size=kernel_size, padding=padding
343
+ ),
344
+ Swish(),
345
+ nn.Conv1d(inner_dim, dim, 1),
346
+ Transpose((1, 2)),
347
+ nn.Dropout(dropout),
348
+ )
349
+
350
+ def forward(self, x):
351
+ return self.net(x)
352
+
353
+
354
+ def linear_attention(q, k, v):
355
+ if v is None:
356
+ out = torch.einsum("...ed,...nd->...ne", k, q)
357
+ return out
358
+ else:
359
+ k_cumsum = k.sum(dim=-2)
360
+ D_inv = 1.0 / (torch.einsum("...nd,...d->...n", q, k_cumsum.type_as(q)) + 1e-8)
361
+ context = torch.einsum("...nd,...ne->...de", k, v)
362
+ out = torch.einsum("...de,...nd,...n->...ne", context, q, D_inv)
363
+ return out
364
+
365
+
366
+ def gaussian_orthogonal_random_matrix(
367
+ nb_rows, nb_columns, scaling=0, qr_uniform_q=False, device=None
368
+ ):
369
+ nb_full_blocks = int(nb_rows / nb_columns)
370
+ block_list = []
371
+
372
+ for _ in range(nb_full_blocks):
373
+ q = orthogonal_matrix_chunk(
374
+ nb_columns, qr_uniform_q=qr_uniform_q, device=device
375
+ )
376
+ block_list.append(q)
377
+
378
+ remaining_rows = nb_rows - nb_full_blocks * nb_columns
379
+ if remaining_rows > 0:
380
+ q = orthogonal_matrix_chunk(
381
+ nb_columns, qr_uniform_q=qr_uniform_q, device=device
382
+ )
383
+ block_list.append(q[:remaining_rows])
384
+
385
+ final_matrix = torch.cat(block_list)
386
+
387
+ if scaling == 0:
388
+ multiplier = torch.randn((nb_rows, nb_columns), device=device).norm(dim=1)
389
+ elif scaling == 1:
390
+ multiplier = math.sqrt((float(nb_columns))) * torch.ones(
391
+ (nb_rows,), device=device
392
+ )
393
+ else:
394
+ raise ValueError(f"Invalid scaling {scaling}")
395
+
396
+ return torch.diag(multiplier) @ final_matrix
397
+
398
+
399
+ class FastAttention(nn.Module):
400
+ def __init__(
401
+ self,
402
+ dim_heads,
403
+ nb_features=None,
404
+ ortho_scaling=0,
405
+ causal=False,
406
+ generalized_attention=False,
407
+ kernel_fn=nn.ReLU(),
408
+ qr_uniform_q=False,
409
+ no_projection=False,
410
+ ):
411
+ super().__init__()
412
+ nb_features = default(nb_features, int(dim_heads * math.log(dim_heads)))
413
+
414
+ self.dim_heads = dim_heads
415
+ self.nb_features = nb_features
416
+ self.ortho_scaling = ortho_scaling
417
+
418
+ self.create_projection = partial(
419
+ gaussian_orthogonal_random_matrix,
420
+ nb_rows=self.nb_features,
421
+ nb_columns=dim_heads,
422
+ scaling=ortho_scaling,
423
+ qr_uniform_q=qr_uniform_q,
424
+ )
425
+ projection_matrix = self.create_projection()
426
+ self.register_buffer("projection_matrix", projection_matrix)
427
+
428
+ self.generalized_attention = generalized_attention
429
+ self.kernel_fn = kernel_fn
430
+ self.no_projection = no_projection
431
+ self.causal = causal
432
+
433
+ @torch.no_grad()
434
+ def redraw_projection_matrix(self):
435
+ projections = self.create_projection()
436
+ self.projection_matrix.copy_(projections)
437
+ del projections
438
+
439
+ def forward(self, q, k, v):
440
+ device = q.device
441
+
442
+ if self.no_projection:
443
+ q = q.softmax(dim=-1)
444
+ k = torch.exp(k) if self.causal else k.softmax(dim=-2)
445
+ else:
446
+ create_kernel = partial(
447
+ softmax_kernel, projection_matrix=self.projection_matrix, device=device
448
+ )
449
+ q = create_kernel(q, is_query=True)
450
+ k = create_kernel(k, is_query=False)
451
+
452
+ attn_fn = linear_attention if not self.causal else self.causal_linear_fn
453
+
454
+ if v is None:
455
+ out = attn_fn(q, k, None)
456
+ return out
457
+ else:
458
+ out = attn_fn(q, k, v)
459
+ return out
460
+
461
+
462
+ class SelfAttention(nn.Module):
463
+ def __init__(
464
+ self,
465
+ dim,
466
+ causal=False,
467
+ heads=8,
468
+ dim_head=64,
469
+ local_heads=0,
470
+ local_window_size=256,
471
+ nb_features=None,
472
+ feature_redraw_interval=1000,
473
+ generalized_attention=False,
474
+ kernel_fn=nn.ReLU(),
475
+ qr_uniform_q=False,
476
+ dropout=0.0,
477
+ no_projection=False,
478
+ ):
479
+ super().__init__()
480
+ assert dim % heads == 0, "dimension must be divisible by number of heads"
481
+ dim_head = default(dim_head, dim // heads)
482
+ inner_dim = dim_head * heads
483
+ self.fast_attention = FastAttention(
484
+ dim_head,
485
+ nb_features,
486
+ causal=causal,
487
+ generalized_attention=generalized_attention,
488
+ kernel_fn=kernel_fn,
489
+ qr_uniform_q=qr_uniform_q,
490
+ no_projection=no_projection,
491
+ )
492
+
493
+ self.heads = heads
494
+ self.global_heads = heads - local_heads
495
+ self.local_attn = (
496
+ LocalAttention(
497
+ window_size=local_window_size,
498
+ causal=causal,
499
+ autopad=True,
500
+ dropout=dropout,
501
+ look_forward=int(not causal),
502
+ rel_pos_emb_config=(dim_head, local_heads),
503
+ )
504
+ if local_heads > 0
505
+ else None
506
+ )
507
+
508
+ self.to_q = nn.Linear(dim, inner_dim)
509
+ self.to_k = nn.Linear(dim, inner_dim)
510
+ self.to_v = nn.Linear(dim, inner_dim)
511
+ self.to_out = nn.Linear(inner_dim, dim)
512
+ self.dropout = nn.Dropout(dropout)
513
+
514
+ @torch.no_grad()
515
+ def redraw_projection_matrix(self):
516
+ self.fast_attention.redraw_projection_matrix()
517
+
518
+ def forward(
519
+ self,
520
+ x,
521
+ context=None,
522
+ mask=None,
523
+ context_mask=None,
524
+ name=None,
525
+ inference=False,
526
+ **kwargs,
527
+ ):
528
+ _, _, _, h, gh = *x.shape, self.heads, self.global_heads
529
+
530
+ cross_attend = exists(context)
531
+ context = default(context, x)
532
+ context_mask = default(context_mask, mask) if not cross_attend else context_mask
533
+ q, k, v = self.to_q(x), self.to_k(context), self.to_v(context)
534
+
535
+ q, k, v = map(lambda t: rearrange(t, "b n (h d) -> b h n d", h=h), (q, k, v))
536
+ (q, lq), (k, lk), (v, lv) = map(lambda t: (t[:, :gh], t[:, gh:]), (q, k, v))
537
+
538
+ attn_outs = []
539
+ if not empty(q):
540
+ if exists(context_mask):
541
+ global_mask = context_mask[:, None, :, None]
542
+ v.masked_fill_(~global_mask, 0.0)
543
+ if cross_attend:
544
+ pass # TODO: Implement cross-attention
545
+ else:
546
+ out = self.fast_attention(q, k, v)
547
+ attn_outs.append(out)
548
+
549
+ if not empty(lq):
550
+ assert (
551
+ not cross_attend
552
+ ), "local attention is not compatible with cross attention"
553
+ out = self.local_attn(lq, lk, lv, input_mask=mask)
554
+ attn_outs.append(out)
555
+
556
+ out = torch.cat(attn_outs, dim=1)
557
+ out = rearrange(out, "b h n d -> b n (h d)")
558
+ out = self.to_out(out)
559
+ return self.dropout(out)
560
+
561
+
562
+ def l2_regularization(model, l2_alpha):
563
+ l2_loss = []
564
+ for module in model.modules():
565
+ if type(module) is nn.Conv2d:
566
+ l2_loss.append((module.weight**2).sum() / 2.0)
567
+ return l2_alpha * sum(l2_loss)
568
+
569
+
570
+ class FCPE(nn.Module):
571
+ def __init__(
572
+ self,
573
+ input_channel=128,
574
+ out_dims=360,
575
+ n_layers=12,
576
+ n_chans=512,
577
+ use_siren=False,
578
+ use_full=False,
579
+ loss_mse_scale=10,
580
+ loss_l2_regularization=False,
581
+ loss_l2_regularization_scale=1,
582
+ loss_grad1_mse=False,
583
+ loss_grad1_mse_scale=1,
584
+ f0_max=1975.5,
585
+ f0_min=32.70,
586
+ confidence=False,
587
+ threshold=0.05,
588
+ use_input_conv=True,
589
+ ):
590
+ super().__init__()
591
+ if use_siren is True:
592
+ raise ValueError("Siren is not supported yet.")
593
+ if use_full is True:
594
+ raise ValueError("Full model is not supported yet.")
595
+
596
+ self.loss_mse_scale = loss_mse_scale if (loss_mse_scale is not None) else 10
597
+ self.loss_l2_regularization = (
598
+ loss_l2_regularization if (loss_l2_regularization is not None) else False
599
+ )
600
+ self.loss_l2_regularization_scale = (
601
+ loss_l2_regularization_scale
602
+ if (loss_l2_regularization_scale is not None)
603
+ else 1
604
+ )
605
+ self.loss_grad1_mse = loss_grad1_mse if (loss_grad1_mse is not None) else False
606
+ self.loss_grad1_mse_scale = (
607
+ loss_grad1_mse_scale if (loss_grad1_mse_scale is not None) else 1
608
+ )
609
+ self.f0_max = f0_max if (f0_max is not None) else 1975.5
610
+ self.f0_min = f0_min if (f0_min is not None) else 32.70
611
+ self.confidence = confidence if (confidence is not None) else False
612
+ self.threshold = threshold if (threshold is not None) else 0.05
613
+ self.use_input_conv = use_input_conv if (use_input_conv is not None) else True
614
+
615
+ self.cent_table_b = torch.Tensor(
616
+ np.linspace(
617
+ self.f0_to_cent(torch.Tensor([f0_min]))[0],
618
+ self.f0_to_cent(torch.Tensor([f0_max]))[0],
619
+ out_dims,
620
+ )
621
+ )
622
+ self.register_buffer("cent_table", self.cent_table_b)
623
+
624
+ # conv in stack
625
+ _leaky = nn.LeakyReLU()
626
+ self.stack = nn.Sequential(
627
+ nn.Conv1d(input_channel, n_chans, 3, 1, 1),
628
+ nn.GroupNorm(4, n_chans),
629
+ _leaky,
630
+ nn.Conv1d(n_chans, n_chans, 3, 1, 1),
631
+ )
632
+
633
+ # transformer
634
+ self.decoder = PCmer(
635
+ num_layers=n_layers,
636
+ num_heads=8,
637
+ dim_model=n_chans,
638
+ dim_keys=n_chans,
639
+ dim_values=n_chans,
640
+ residual_dropout=0.1,
641
+ attention_dropout=0.1,
642
+ )
643
+ self.norm = nn.LayerNorm(n_chans)
644
+
645
+ # out
646
+ self.n_out = out_dims
647
+ self.dense_out = weight_norm(nn.Linear(n_chans, self.n_out))
648
+
649
+ def forward(
650
+ self, mel, infer=True, gt_f0=None, return_hz_f0=False, cdecoder="local_argmax"
651
+ ):
652
+ if cdecoder == "argmax":
653
+ self.cdecoder = self.cents_decoder
654
+ elif cdecoder == "local_argmax":
655
+ self.cdecoder = self.cents_local_decoder
656
+
657
+ x = (
658
+ self.stack(mel.transpose(1, 2)).transpose(1, 2)
659
+ if self.use_input_conv
660
+ else mel
661
+ )
662
+ x = self.decoder(x)
663
+ x = self.norm(x)
664
+ x = self.dense_out(x)
665
+ x = torch.sigmoid(x)
666
+
667
+ if not infer:
668
+ gt_cent_f0 = self.f0_to_cent(gt_f0)
669
+ gt_cent_f0 = self.gaussian_blurred_cent(gt_cent_f0)
670
+ loss_all = self.loss_mse_scale * F.binary_cross_entropy(x, gt_cent_f0)
671
+ if self.loss_l2_regularization:
672
+ loss_all = loss_all + l2_regularization(
673
+ model=self, l2_alpha=self.loss_l2_regularization_scale
674
+ )
675
+ x = loss_all
676
+ if infer:
677
+ x = self.cdecoder(x)
678
+ x = self.cent_to_f0(x)
679
+ x = (1 + x / 700).log() if not return_hz_f0 else x
680
+
681
+ return x
682
+
683
+ def cents_decoder(self, y, mask=True):
684
+ B, N, _ = y.size()
685
+ ci = self.cent_table[None, None, :].expand(B, N, -1)
686
+ rtn = torch.sum(ci * y, dim=-1, keepdim=True) / torch.sum(
687
+ y, dim=-1, keepdim=True
688
+ )
689
+ if mask:
690
+ confident = torch.max(y, dim=-1, keepdim=True)[0]
691
+ confident_mask = torch.ones_like(confident)
692
+ confident_mask[confident <= self.threshold] = float("-INF")
693
+ rtn = rtn * confident_mask
694
+ return (rtn, confident) if self.confidence else rtn
695
+
696
+ def cents_local_decoder(self, y, mask=True):
697
+ B, N, _ = y.size()
698
+ ci = self.cent_table[None, None, :].expand(B, N, -1)
699
+ confident, max_index = torch.max(y, dim=-1, keepdim=True)
700
+ local_argmax_index = torch.arange(0, 9).to(max_index.device) + (max_index - 4)
701
+ local_argmax_index = torch.clamp(local_argmax_index, 0, self.n_out - 1)
702
+ ci_l = torch.gather(ci, -1, local_argmax_index)
703
+ y_l = torch.gather(y, -1, local_argmax_index)
704
+ rtn = torch.sum(ci_l * y_l, dim=-1, keepdim=True) / torch.sum(
705
+ y_l, dim=-1, keepdim=True
706
+ )
707
+ if mask:
708
+ confident_mask = torch.ones_like(confident)
709
+ confident_mask[confident <= self.threshold] = float("-INF")
710
+ rtn = rtn * confident_mask
711
+ return (rtn, confident) if self.confidence else rtn
712
+
713
+ def cent_to_f0(self, cent):
714
+ return 10.0 * 2 ** (cent / 1200.0)
715
+
716
+ def f0_to_cent(self, f0):
717
+ return 1200.0 * torch.log2(f0 / 10.0)
718
+
719
+ def gaussian_blurred_cent(self, cents):
720
+ mask = (cents > 0.1) & (cents < (1200.0 * np.log2(self.f0_max / 10.0)))
721
+ B, N, _ = cents.size()
722
+ ci = self.cent_table[None, None, :].expand(B, N, -1)
723
+ return torch.exp(-torch.square(ci - cents) / 1250) * mask.float()
724
+
725
+
726
+ class FCPEInfer:
727
+ def __init__(self, model_path, device=None, dtype=torch.float32):
728
+ if device is None:
729
+ device = "cuda" if torch.cuda.is_available() else "cpu"
730
+ self.device = device
731
+ ckpt = torch.load(
732
+ model_path, map_location=torch.device(self.device), weights_only=True
733
+ )
734
+ self.args = DotDict(ckpt["config"])
735
+ self.dtype = dtype
736
+ model = FCPE(
737
+ input_channel=self.args.model.input_channel,
738
+ out_dims=self.args.model.out_dims,
739
+ n_layers=self.args.model.n_layers,
740
+ n_chans=self.args.model.n_chans,
741
+ use_siren=self.args.model.use_siren,
742
+ use_full=self.args.model.use_full,
743
+ loss_mse_scale=self.args.loss.loss_mse_scale,
744
+ loss_l2_regularization=self.args.loss.loss_l2_regularization,
745
+ loss_l2_regularization_scale=self.args.loss.loss_l2_regularization_scale,
746
+ loss_grad1_mse=self.args.loss.loss_grad1_mse,
747
+ loss_grad1_mse_scale=self.args.loss.loss_grad1_mse_scale,
748
+ f0_max=self.args.model.f0_max,
749
+ f0_min=self.args.model.f0_min,
750
+ confidence=self.args.model.confidence,
751
+ )
752
+ model.to(self.device).to(self.dtype)
753
+ model.load_state_dict(ckpt["model"])
754
+ model.eval()
755
+ self.model = model
756
+ self.wav2mel = Wav2Mel(self.args, dtype=self.dtype, device=self.device)
757
+
758
+ @torch.no_grad()
759
+ def __call__(self, audio, sr, threshold=0.05):
760
+ self.model.threshold = threshold
761
+ audio = audio[None, :]
762
+ mel = self.wav2mel(audio=audio, sample_rate=sr).to(self.dtype)
763
+ f0 = self.model(mel=mel, infer=True, return_hz_f0=True)
764
+ return f0
765
+
766
+
767
+ class Wav2Mel:
768
+ def __init__(self, args, device=None, dtype=torch.float32):
769
+ self.sample_rate = args.mel.sampling_rate
770
+ self.hop_size = args.mel.hop_size
771
+ if device is None:
772
+ device = "cuda" if torch.cuda.is_available() else "cpu"
773
+ self.device = device
774
+ self.dtype = dtype
775
+ self.stft = STFT(
776
+ args.mel.sampling_rate,
777
+ args.mel.num_mels,
778
+ args.mel.n_fft,
779
+ args.mel.win_size,
780
+ args.mel.hop_size,
781
+ args.mel.fmin,
782
+ args.mel.fmax,
783
+ )
784
+ self.resample_kernel = {}
785
+
786
+ def extract_nvstft(self, audio, keyshift=0, train=False):
787
+ mel = self.stft.get_mel(audio, keyshift=keyshift, train=train).transpose(1, 2)
788
+ return mel
789
+
790
+ def extract_mel(self, audio, sample_rate, keyshift=0, train=False):
791
+ audio = audio.to(self.dtype).to(self.device)
792
+ if sample_rate == self.sample_rate:
793
+ audio_res = audio
794
+ else:
795
+ key_str = str(sample_rate)
796
+ if key_str not in self.resample_kernel:
797
+ self.resample_kernel[key_str] = Resample(
798
+ sample_rate, self.sample_rate, lowpass_filter_width=128
799
+ )
800
+ self.resample_kernel[key_str] = (
801
+ self.resample_kernel[key_str].to(self.dtype).to(self.device)
802
+ )
803
+ audio_res = self.resample_kernel[key_str](audio)
804
+
805
+ mel = self.extract_nvstft(
806
+ audio_res, keyshift=keyshift, train=train
807
+ ) # B, n_frames, bins
808
+ n_frames = int(audio.shape[1] // self.hop_size) + 1
809
+ mel = (
810
+ torch.cat((mel, mel[:, -1:, :]), 1) if n_frames > int(mel.shape[1]) else mel
811
+ )
812
+ mel = mel[:, :n_frames, :] if n_frames < int(mel.shape[1]) else mel
813
+ return mel
814
+
815
+ def __call__(self, audio, sample_rate, keyshift=0, train=False):
816
+ return self.extract_mel(audio, sample_rate, keyshift=keyshift, train=train)
817
+
818
+
819
+ class DotDict(dict):
820
+ def __getattr__(*args):
821
+ val = dict.get(*args)
822
+ return DotDict(val) if type(val) is dict else val
823
+
824
+ __setattr__ = dict.__setitem__
825
+ __delattr__ = dict.__delitem__
826
+
827
+
828
+ class F0Predictor(object):
829
+ def compute_f0(self, wav, p_len):
830
+ pass
831
+
832
+ def compute_f0_uv(self, wav, p_len):
833
+ pass
834
+
835
+
836
+ class FCPEF0Predictor(F0Predictor):
837
+ def __init__(
838
+ self,
839
+ model_path,
840
+ hop_length=512,
841
+ f0_min=50,
842
+ f0_max=1100,
843
+ dtype=torch.float32,
844
+ device=None,
845
+ sample_rate=44100,
846
+ threshold=0.05,
847
+ ):
848
+ self.fcpe = FCPEInfer(model_path, device=device, dtype=dtype)
849
+ self.hop_length = hop_length
850
+ self.f0_min = f0_min
851
+ self.f0_max = f0_max
852
+ self.device = device or ("cuda" if torch.cuda.is_available() else "cpu")
853
+ self.threshold = threshold
854
+ self.sample_rate = sample_rate
855
+ self.dtype = dtype
856
+ self.name = "fcpe"
857
+
858
+ def repeat_expand(
859
+ self,
860
+ content: Union[torch.Tensor, np.ndarray],
861
+ target_len: int,
862
+ mode: str = "nearest",
863
+ ):
864
+ ndim = content.ndim
865
+ content = (
866
+ content[None, None]
867
+ if ndim == 1
868
+ else content[None] if ndim == 2 else content
869
+ )
870
+ assert content.ndim == 3
871
+ is_np = isinstance(content, np.ndarray)
872
+ content = torch.from_numpy(content) if is_np else content
873
+ results = torch.nn.functional.interpolate(content, size=target_len, mode=mode)
874
+ results = results.numpy() if is_np else results
875
+ return results[0, 0] if ndim == 1 else results[0] if ndim == 2 else results
876
+
877
+ def post_process(self, x, sample_rate, f0, pad_to):
878
+ f0 = (
879
+ torch.from_numpy(f0).float().to(x.device)
880
+ if isinstance(f0, np.ndarray)
881
+ else f0
882
+ )
883
+ f0 = self.repeat_expand(f0, pad_to) if pad_to is not None else f0
884
+
885
+ vuv_vector = torch.zeros_like(f0)
886
+ vuv_vector[f0 > 0.0] = 1.0
887
+ vuv_vector[f0 <= 0.0] = 0.0
888
+
889
+ nzindex = torch.nonzero(f0).squeeze()
890
+ f0 = torch.index_select(f0, dim=0, index=nzindex).cpu().numpy()
891
+ time_org = self.hop_length / sample_rate * nzindex.cpu().numpy()
892
+ time_frame = np.arange(pad_to) * self.hop_length / sample_rate
893
+
894
+ vuv_vector = F.interpolate(vuv_vector[None, None, :], size=pad_to)[0][0]
895
+
896
+ if f0.shape[0] <= 0:
897
+ return np.zeros(pad_to), vuv_vector.cpu().numpy()
898
+ if f0.shape[0] == 1:
899
+ return np.ones(pad_to) * f0[0], vuv_vector.cpu().numpy()
900
+
901
+ f0 = np.interp(time_frame, time_org, f0, left=f0[0], right=f0[-1])
902
+ return f0, vuv_vector.cpu().numpy()
903
+
904
+ def compute_f0(self, wav, p_len=None):
905
+ x = torch.FloatTensor(wav).to(self.dtype).to(self.device)
906
+ p_len = x.shape[0] // self.hop_length if p_len is None else p_len
907
+ f0 = self.fcpe(x, sr=self.sample_rate, threshold=self.threshold)[0, :, 0]
908
+ if torch.all(f0 == 0):
909
+ return f0.cpu().numpy() if p_len is None else np.zeros(p_len)
910
+ return self.post_process(x, self.sample_rate, f0, p_len)[0]
911
+
912
+ def compute_f0_uv(self, wav, p_len=None):
913
+ x = torch.FloatTensor(wav).to(self.dtype).to(self.device)
914
+ p_len = x.shape[0] // self.hop_length if p_len is None else p_len
915
+ f0 = self.fcpe(x, sr=self.sample_rate, threshold=self.threshold)[0, :, 0]
916
+ if torch.all(f0 == 0):
917
+ return f0.cpu().numpy() if p_len is None else np.zeros(p_len), (
918
+ f0.cpu().numpy() if p_len is None else np.zeros(p_len)
919
+ )
920
+ return self.post_process(x, self.sample_rate, f0, p_len)
rvc/lib/predictors/RMVPE.py ADDED
@@ -0,0 +1,564 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ import numpy as np
5
+
6
+ from librosa.filters import mel
7
+ from typing import List
8
+
9
+ N_MELS = 128
10
+ N_CLASS = 360
11
+
12
+
13
+ class ConvBlockRes(nn.Module):
14
+ """
15
+ A convolutional block with residual connection.
16
+
17
+ Args:
18
+ in_channels (int): Number of input channels.
19
+ out_channels (int): Number of output channels.
20
+ momentum (float): Momentum for batch normalization.
21
+ """
22
+
23
+ def __init__(self, in_channels, out_channels, momentum=0.01):
24
+ super(ConvBlockRes, self).__init__()
25
+ self.conv = nn.Sequential(
26
+ nn.Conv2d(
27
+ in_channels=in_channels,
28
+ out_channels=out_channels,
29
+ kernel_size=(3, 3),
30
+ stride=(1, 1),
31
+ padding=(1, 1),
32
+ bias=False,
33
+ ),
34
+ nn.BatchNorm2d(out_channels, momentum=momentum),
35
+ nn.ReLU(),
36
+ nn.Conv2d(
37
+ in_channels=out_channels,
38
+ out_channels=out_channels,
39
+ kernel_size=(3, 3),
40
+ stride=(1, 1),
41
+ padding=(1, 1),
42
+ bias=False,
43
+ ),
44
+ nn.BatchNorm2d(out_channels, momentum=momentum),
45
+ nn.ReLU(),
46
+ )
47
+ if in_channels != out_channels:
48
+ self.shortcut = nn.Conv2d(in_channels, out_channels, (1, 1))
49
+ self.is_shortcut = True
50
+ else:
51
+ self.is_shortcut = False
52
+
53
+ def forward(self, x):
54
+ if self.is_shortcut:
55
+ return self.conv(x) + self.shortcut(x)
56
+ else:
57
+ return self.conv(x) + x
58
+
59
+
60
+ class ResEncoderBlock(nn.Module):
61
+ """
62
+ A residual encoder block.
63
+
64
+ Args:
65
+ in_channels (int): Number of input channels.
66
+ out_channels (int): Number of output channels.
67
+ kernel_size (tuple): Size of the average pooling kernel.
68
+ n_blocks (int): Number of convolutional blocks in the block.
69
+ momentum (float): Momentum for batch normalization.
70
+ """
71
+
72
+ def __init__(
73
+ self, in_channels, out_channels, kernel_size, n_blocks=1, momentum=0.01
74
+ ):
75
+ super(ResEncoderBlock, self).__init__()
76
+ self.n_blocks = n_blocks
77
+ self.conv = nn.ModuleList()
78
+ self.conv.append(ConvBlockRes(in_channels, out_channels, momentum))
79
+ for _ in range(n_blocks - 1):
80
+ self.conv.append(ConvBlockRes(out_channels, out_channels, momentum))
81
+ self.kernel_size = kernel_size
82
+ if self.kernel_size is not None:
83
+ self.pool = nn.AvgPool2d(kernel_size=kernel_size)
84
+
85
+ def forward(self, x):
86
+ for i in range(self.n_blocks):
87
+ x = self.conv[i](x)
88
+ if self.kernel_size is not None:
89
+ return x, self.pool(x)
90
+ else:
91
+ return x
92
+
93
+
94
+ class Encoder(nn.Module):
95
+ """
96
+ The encoder part of the DeepUnet.
97
+
98
+ Args:
99
+ in_channels (int): Number of input channels.
100
+ in_size (int): Size of the input tensor.
101
+ n_encoders (int): Number of encoder blocks.
102
+ kernel_size (tuple): Size of the average pooling kernel.
103
+ n_blocks (int): Number of convolutional blocks in each encoder block.
104
+ out_channels (int): Number of output channels for the first encoder block.
105
+ momentum (float): Momentum for batch normalization.
106
+ """
107
+
108
+ def __init__(
109
+ self,
110
+ in_channels,
111
+ in_size,
112
+ n_encoders,
113
+ kernel_size,
114
+ n_blocks,
115
+ out_channels=16,
116
+ momentum=0.01,
117
+ ):
118
+ super(Encoder, self).__init__()
119
+ self.n_encoders = n_encoders
120
+ self.bn = nn.BatchNorm2d(in_channels, momentum=momentum)
121
+ self.layers = nn.ModuleList()
122
+ self.latent_channels = []
123
+ for i in range(self.n_encoders):
124
+ self.layers.append(
125
+ ResEncoderBlock(
126
+ in_channels, out_channels, kernel_size, n_blocks, momentum=momentum
127
+ )
128
+ )
129
+ self.latent_channels.append([out_channels, in_size])
130
+ in_channels = out_channels
131
+ out_channels *= 2
132
+ in_size //= 2
133
+ self.out_size = in_size
134
+ self.out_channel = out_channels
135
+
136
+ def forward(self, x: torch.Tensor):
137
+ concat_tensors: List[torch.Tensor] = []
138
+ x = self.bn(x)
139
+ for i in range(self.n_encoders):
140
+ t, x = self.layers[i](x)
141
+ concat_tensors.append(t)
142
+ return x, concat_tensors
143
+
144
+
145
+ class Intermediate(nn.Module):
146
+ """
147
+ The intermediate layer of the DeepUnet.
148
+
149
+ Args:
150
+ in_channels (int): Number of input channels.
151
+ out_channels (int): Number of output channels.
152
+ n_inters (int): Number of convolutional blocks in the intermediate layer.
153
+ n_blocks (int): Number of convolutional blocks in each intermediate block.
154
+ momentum (float): Momentum for batch normalization.
155
+ """
156
+
157
+ def __init__(self, in_channels, out_channels, n_inters, n_blocks, momentum=0.01):
158
+ super(Intermediate, self).__init__()
159
+ self.n_inters = n_inters
160
+ self.layers = nn.ModuleList()
161
+ self.layers.append(
162
+ ResEncoderBlock(in_channels, out_channels, None, n_blocks, momentum)
163
+ )
164
+ for _ in range(self.n_inters - 1):
165
+ self.layers.append(
166
+ ResEncoderBlock(out_channels, out_channels, None, n_blocks, momentum)
167
+ )
168
+
169
+ def forward(self, x):
170
+ for i in range(self.n_inters):
171
+ x = self.layers[i](x)
172
+ return x
173
+
174
+
175
+ class ResDecoderBlock(nn.Module):
176
+ """
177
+ A residual decoder block.
178
+
179
+ Args:
180
+ in_channels (int): Number of input channels.
181
+ out_channels (int): Number of output channels.
182
+ stride (tuple): Stride for transposed convolution.
183
+ n_blocks (int): Number of convolutional blocks in the block.
184
+ momentum (float): Momentum for batch normalization.
185
+ """
186
+
187
+ def __init__(self, in_channels, out_channels, stride, n_blocks=1, momentum=0.01):
188
+ super(ResDecoderBlock, self).__init__()
189
+ out_padding = (0, 1) if stride == (1, 2) else (1, 1)
190
+ self.n_blocks = n_blocks
191
+ self.conv1 = nn.Sequential(
192
+ nn.ConvTranspose2d(
193
+ in_channels=in_channels,
194
+ out_channels=out_channels,
195
+ kernel_size=(3, 3),
196
+ stride=stride,
197
+ padding=(1, 1),
198
+ output_padding=out_padding,
199
+ bias=False,
200
+ ),
201
+ nn.BatchNorm2d(out_channels, momentum=momentum),
202
+ nn.ReLU(),
203
+ )
204
+ self.conv2 = nn.ModuleList()
205
+ self.conv2.append(ConvBlockRes(out_channels * 2, out_channels, momentum))
206
+ for _ in range(n_blocks - 1):
207
+ self.conv2.append(ConvBlockRes(out_channels, out_channels, momentum))
208
+
209
+ def forward(self, x, concat_tensor):
210
+ x = self.conv1(x)
211
+ x = torch.cat((x, concat_tensor), dim=1)
212
+ for i in range(self.n_blocks):
213
+ x = self.conv2[i](x)
214
+ return x
215
+
216
+
217
+ class Decoder(nn.Module):
218
+ """
219
+ The decoder part of the DeepUnet.
220
+
221
+ Args:
222
+ in_channels (int): Number of input channels.
223
+ n_decoders (int): Number of decoder blocks.
224
+ stride (tuple): Stride for transposed convolution.
225
+ n_blocks (int): Number of convolutional blocks in each decoder block.
226
+ momentum (float): Momentum for batch normalization.
227
+ """
228
+
229
+ def __init__(self, in_channels, n_decoders, stride, n_blocks, momentum=0.01):
230
+ super(Decoder, self).__init__()
231
+ self.layers = nn.ModuleList()
232
+ self.n_decoders = n_decoders
233
+ for _ in range(self.n_decoders):
234
+ out_channels = in_channels // 2
235
+ self.layers.append(
236
+ ResDecoderBlock(in_channels, out_channels, stride, n_blocks, momentum)
237
+ )
238
+ in_channels = out_channels
239
+
240
+ def forward(self, x, concat_tensors):
241
+ for i in range(self.n_decoders):
242
+ x = self.layers[i](x, concat_tensors[-1 - i])
243
+ return x
244
+
245
+
246
+ class DeepUnet(nn.Module):
247
+ """
248
+ The DeepUnet architecture.
249
+
250
+ Args:
251
+ kernel_size (tuple): Size of the average pooling kernel.
252
+ n_blocks (int): Number of convolutional blocks in each encoder/decoder block.
253
+ en_de_layers (int): Number of encoder/decoder layers.
254
+ inter_layers (int): Number of convolutional blocks in the intermediate layer.
255
+ in_channels (int): Number of input channels.
256
+ en_out_channels (int): Number of output channels for the first encoder block.
257
+ """
258
+
259
+ def __init__(
260
+ self,
261
+ kernel_size,
262
+ n_blocks,
263
+ en_de_layers=5,
264
+ inter_layers=4,
265
+ in_channels=1,
266
+ en_out_channels=16,
267
+ ):
268
+ super(DeepUnet, self).__init__()
269
+ self.encoder = Encoder(
270
+ in_channels, 128, en_de_layers, kernel_size, n_blocks, en_out_channels
271
+ )
272
+ self.intermediate = Intermediate(
273
+ self.encoder.out_channel // 2,
274
+ self.encoder.out_channel,
275
+ inter_layers,
276
+ n_blocks,
277
+ )
278
+ self.decoder = Decoder(
279
+ self.encoder.out_channel, en_de_layers, kernel_size, n_blocks
280
+ )
281
+
282
+ def forward(self, x):
283
+ x, concat_tensors = self.encoder(x)
284
+ x = self.intermediate(x)
285
+ x = self.decoder(x, concat_tensors)
286
+ return x
287
+
288
+
289
+ class E2E(nn.Module):
290
+ """
291
+ The end-to-end model.
292
+
293
+ Args:
294
+ n_blocks (int): Number of convolutional blocks in each encoder/decoder block.
295
+ n_gru (int): Number of GRU layers.
296
+ kernel_size (tuple): Size of the average pooling kernel.
297
+ en_de_layers (int): Number of encoder/decoder layers.
298
+ inter_layers (int): Number of convolutional blocks in the intermediate layer.
299
+ in_channels (int): Number of input channels.
300
+ en_out_channels (int): Number of output channels for the first encoder block.
301
+ """
302
+
303
+ def __init__(
304
+ self,
305
+ n_blocks,
306
+ n_gru,
307
+ kernel_size,
308
+ en_de_layers=5,
309
+ inter_layers=4,
310
+ in_channels=1,
311
+ en_out_channels=16,
312
+ ):
313
+ super(E2E, self).__init__()
314
+ self.unet = DeepUnet(
315
+ kernel_size,
316
+ n_blocks,
317
+ en_de_layers,
318
+ inter_layers,
319
+ in_channels,
320
+ en_out_channels,
321
+ )
322
+ self.cnn = nn.Conv2d(en_out_channels, 3, (3, 3), padding=(1, 1))
323
+ if n_gru:
324
+ self.fc = nn.Sequential(
325
+ BiGRU(3 * 128, 256, n_gru),
326
+ nn.Linear(512, N_CLASS),
327
+ nn.Dropout(0.25),
328
+ nn.Sigmoid(),
329
+ )
330
+ else:
331
+ self.fc = nn.Sequential(
332
+ nn.Linear(3 * N_MELS, N_CLASS), nn.Dropout(0.25), nn.Sigmoid()
333
+ )
334
+
335
+ def forward(self, mel):
336
+ mel = mel.transpose(-1, -2).unsqueeze(1)
337
+ x = self.cnn(self.unet(mel)).transpose(1, 2).flatten(-2)
338
+ x = self.fc(x)
339
+ return x
340
+
341
+
342
+ class MelSpectrogram(torch.nn.Module):
343
+ """
344
+ Extracts Mel-spectrogram features from audio.
345
+
346
+ Args:
347
+ n_mel_channels (int): Number of Mel-frequency bands.
348
+ sample_rate (int): Sampling rate of the audio.
349
+ win_length (int): Length of the window function in samples.
350
+ hop_length (int): Hop size between frames in samples.
351
+ n_fft (int, optional): Length of the FFT window. Defaults to None, which uses win_length.
352
+ mel_fmin (int, optional): Minimum frequency for the Mel filter bank. Defaults to 0.
353
+ mel_fmax (int, optional): Maximum frequency for the Mel filter bank. Defaults to None.
354
+ clamp (float, optional): Minimum value for clamping the Mel-spectrogram. Defaults to 1e-5.
355
+ """
356
+
357
+ def __init__(
358
+ self,
359
+ n_mel_channels,
360
+ sample_rate,
361
+ win_length,
362
+ hop_length,
363
+ n_fft=None,
364
+ mel_fmin=0,
365
+ mel_fmax=None,
366
+ clamp=1e-5,
367
+ ):
368
+ super().__init__()
369
+ n_fft = win_length if n_fft is None else n_fft
370
+ self.hann_window = {}
371
+ mel_basis = mel(
372
+ sr=sample_rate,
373
+ n_fft=n_fft,
374
+ n_mels=n_mel_channels,
375
+ fmin=mel_fmin,
376
+ fmax=mel_fmax,
377
+ htk=True,
378
+ )
379
+ mel_basis = torch.from_numpy(mel_basis).float()
380
+ self.register_buffer("mel_basis", mel_basis)
381
+ self.n_fft = win_length if n_fft is None else n_fft
382
+ self.hop_length = hop_length
383
+ self.win_length = win_length
384
+ self.sample_rate = sample_rate
385
+ self.n_mel_channels = n_mel_channels
386
+ self.clamp = clamp
387
+
388
+ def forward(self, audio, keyshift=0, speed=1, center=True):
389
+ factor = 2 ** (keyshift / 12)
390
+ n_fft_new = int(np.round(self.n_fft * factor))
391
+ win_length_new = int(np.round(self.win_length * factor))
392
+ hop_length_new = int(np.round(self.hop_length * speed))
393
+ keyshift_key = str(keyshift) + "_" + str(audio.device)
394
+ if keyshift_key not in self.hann_window:
395
+ self.hann_window[keyshift_key] = torch.hann_window(win_length_new).to(
396
+ audio.device
397
+ )
398
+ fft = torch.stft(
399
+ audio,
400
+ n_fft=n_fft_new,
401
+ hop_length=hop_length_new,
402
+ win_length=win_length_new,
403
+ window=self.hann_window[keyshift_key],
404
+ center=center,
405
+ return_complex=True,
406
+ )
407
+
408
+ magnitude = torch.sqrt(fft.real.pow(2) + fft.imag.pow(2))
409
+ if keyshift != 0:
410
+ size = self.n_fft // 2 + 1
411
+ resize = magnitude.size(1)
412
+ if resize < size:
413
+ magnitude = F.pad(magnitude, (0, 0, 0, size - resize))
414
+ magnitude = magnitude[:, :size, :] * self.win_length / win_length_new
415
+ mel_output = torch.matmul(self.mel_basis, magnitude)
416
+ log_mel_spec = torch.log(torch.clamp(mel_output, min=self.clamp))
417
+ return log_mel_spec
418
+
419
+
420
+ class RMVPE0Predictor:
421
+ """
422
+ A predictor for fundamental frequency (F0) based on the RMVPE0 model.
423
+
424
+ Args:
425
+ model_path (str): Path to the RMVPE0 model file.
426
+ device (str, optional): Device to use for computation. Defaults to None, which uses CUDA if available.
427
+ """
428
+
429
+ def __init__(self, model_path, device=None):
430
+ self.resample_kernel = {}
431
+ model = E2E(4, 1, (2, 2))
432
+ ckpt = torch.load(model_path, map_location="cpu", weights_only=True)
433
+ model.load_state_dict(ckpt)
434
+ model.eval()
435
+ self.model = model
436
+ self.resample_kernel = {}
437
+ self.device = device
438
+ self.mel_extractor = MelSpectrogram(
439
+ N_MELS, 16000, 1024, 160, None, 30, 8000
440
+ ).to(device)
441
+ self.model = self.model.to(device)
442
+ cents_mapping = 20 * np.arange(N_CLASS) + 1997.3794084376191
443
+ self.cents_mapping = np.pad(cents_mapping, (4, 4))
444
+
445
+ def mel2hidden(self, mel, chunk_size=32000):
446
+ """
447
+ Converts Mel-spectrogram features to hidden representation.
448
+
449
+ Args:
450
+ mel (torch.Tensor): Mel-spectrogram features.
451
+ """
452
+ with torch.no_grad():
453
+ n_frames = mel.shape[-1]
454
+ # print('n_frames', n_frames)
455
+ # print('mel shape before padding', mel.shape)
456
+ mel = F.pad(
457
+ mel, (0, 32 * ((n_frames - 1) // 32 + 1) - n_frames), mode="reflect"
458
+ )
459
+ # print('mel shape after padding', mel.shape)
460
+
461
+ output_chunks = []
462
+ pad_frames = mel.shape[-1]
463
+ for start in range(0, pad_frames, chunk_size):
464
+ # print('chunk @', start)
465
+ end = min(start + chunk_size, pad_frames)
466
+ mel_chunk = mel[..., start:end]
467
+ assert (
468
+ mel_chunk.shape[-1] % 32 == 0
469
+ ), "chunk_size must be divisible by 32"
470
+ # print(' before padding', mel_chunk.shape)
471
+ # mel_chunk = F.pad(mel_chunk, (320, 320), mode="reflect")
472
+ # print(' after padding', mel_chunk.shape)
473
+
474
+ out_chunk = self.model(mel_chunk)
475
+ # print(' result chunk', out_chunk.shape)
476
+ # out_chunk = out_chunk[:, 320:-320, :]
477
+ # print(' trimmed chunk', out_chunk.shape)
478
+ output_chunks.append(out_chunk)
479
+
480
+ hidden = torch.cat(output_chunks, dim=1)
481
+ # print('output', hidden[:, :n_frames].shape)
482
+ return hidden[:, :n_frames]
483
+
484
+ def decode(self, hidden, thred=0.03):
485
+ """
486
+ Decodes hidden representation to F0.
487
+
488
+ Args:
489
+ hidden (np.ndarray): Hidden representation.
490
+ thred (float, optional): Threshold for salience. Defaults to 0.03.
491
+ """
492
+ cents_pred = self.to_local_average_cents(hidden, thred=thred)
493
+ f0 = 10 * (2 ** (cents_pred / 1200))
494
+ f0[f0 == 10] = 0
495
+ return f0
496
+
497
+ def infer_from_audio(self, audio, thred=0.03):
498
+ """
499
+ Infers F0 from audio.
500
+
501
+ Args:
502
+ audio (np.ndarray): Audio signal.
503
+ thred (float, optional): Threshold for salience. Defaults to 0.03.
504
+ """
505
+ audio = torch.from_numpy(audio).float().to(self.device).unsqueeze(0)
506
+ mel = self.mel_extractor(audio, center=True)
507
+ del audio
508
+ with torch.no_grad():
509
+ torch.cuda.empty_cache()
510
+ hidden = self.mel2hidden(mel)
511
+ hidden = hidden.squeeze(0).cpu().numpy()
512
+ f0 = self.decode(hidden, thred=thred)
513
+ return f0
514
+
515
+ def to_local_average_cents(self, salience, thred=0.05):
516
+ """
517
+ Converts salience to local average cents.
518
+
519
+ Args:
520
+ salience (np.ndarray): Salience values.
521
+ thred (float, optional): Threshold for salience. Defaults to 0.05.
522
+ """
523
+ center = np.argmax(salience, axis=1)
524
+ salience = np.pad(salience, ((0, 0), (4, 4)))
525
+ center += 4
526
+ todo_salience = []
527
+ todo_cents_mapping = []
528
+ starts = center - 4
529
+ ends = center + 5
530
+ for idx in range(salience.shape[0]):
531
+ todo_salience.append(salience[:, starts[idx] : ends[idx]][idx])
532
+ todo_cents_mapping.append(self.cents_mapping[starts[idx] : ends[idx]])
533
+ todo_salience = np.array(todo_salience)
534
+ todo_cents_mapping = np.array(todo_cents_mapping)
535
+ product_sum = np.sum(todo_salience * todo_cents_mapping, 1)
536
+ weight_sum = np.sum(todo_salience, 1)
537
+ devided = product_sum / weight_sum
538
+ maxx = np.max(salience, axis=1)
539
+ devided[maxx <= thred] = 0
540
+ return devided
541
+
542
+
543
+ class BiGRU(nn.Module):
544
+ """
545
+ A bidirectional GRU layer.
546
+
547
+ Args:
548
+ input_features (int): Number of input features.
549
+ hidden_features (int): Number of hidden features.
550
+ num_layers (int): Number of GRU layers.
551
+ """
552
+
553
+ def __init__(self, input_features, hidden_features, num_layers):
554
+ super(BiGRU, self).__init__()
555
+ self.gru = nn.GRU(
556
+ input_features,
557
+ hidden_features,
558
+ num_layers=num_layers,
559
+ batch_first=True,
560
+ bidirectional=True,
561
+ )
562
+
563
+ def forward(self, x):
564
+ return self.gru(x)[0]
rvc/lib/predictors/f0.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+
4
+ from rvc.lib.predictors.RMVPE import RMVPE0Predictor
5
+ from torchfcpe import spawn_bundled_infer_model
6
+ import torchcrepe
7
+ from swift_f0 import SwiftF0
8
+ import numpy as np
9
+
10
+
11
+ class RMVPE:
12
+ def __init__(self, device, model_name="rmvpe.pt", sample_rate=16000, hop_size=160):
13
+ self.device = device
14
+ self.sample_rate = sample_rate
15
+ self.hop_size = hop_size
16
+ self.model = RMVPE0Predictor(
17
+ os.path.join("rvc", "models", "predictors", model_name),
18
+ device=self.device,
19
+ )
20
+
21
+ def get_f0(self, x, filter_radius=0.03):
22
+ f0 = self.model.infer_from_audio(x, thred=filter_radius)
23
+ return f0
24
+
25
+
26
+ class CREPE:
27
+ def __init__(self, device, sample_rate=16000, hop_size=160):
28
+ self.device = device
29
+ self.sample_rate = sample_rate
30
+ self.hop_size = hop_size
31
+
32
+ def get_f0(self, x, f0_min=50, f0_max=1100, p_len=None, model="full"):
33
+ if p_len is None:
34
+ p_len = x.shape[0] // self.hop_size
35
+
36
+ if not torch.is_tensor(x):
37
+ x = torch.from_numpy(x)
38
+
39
+ batch_size = 512
40
+
41
+ f0, pd = torchcrepe.predict(
42
+ x.float().to(self.device).unsqueeze(dim=0),
43
+ self.sample_rate,
44
+ self.hop_size,
45
+ f0_min,
46
+ f0_max,
47
+ model=model,
48
+ batch_size=batch_size,
49
+ device=self.device,
50
+ return_periodicity=True,
51
+ )
52
+ pd = torchcrepe.filter.median(pd, 3)
53
+ f0 = torchcrepe.filter.mean(f0, 3)
54
+ f0[pd < 0.1] = 0
55
+ f0 = f0[0].cpu().numpy()
56
+
57
+ return f0
58
+
59
+
60
+ class FCPE:
61
+ def __init__(self, device, sample_rate=16000, hop_size=160):
62
+ self.device = device
63
+ self.sample_rate = sample_rate
64
+ self.hop_size = hop_size
65
+ self.model = spawn_bundled_infer_model(self.device)
66
+
67
+ def get_f0(self, x, p_len=None, filter_radius=0.006):
68
+ if p_len is None:
69
+ p_len = x.shape[0] // self.hop_size
70
+
71
+ if not torch.is_tensor(x):
72
+ x = torch.from_numpy(x)
73
+
74
+ f0 = (
75
+ self.model.infer(
76
+ x.float().to(self.device).unsqueeze(0),
77
+ sr=self.sample_rate,
78
+ decoder_mode="local_argmax",
79
+ threshold=filter_radius,
80
+ )
81
+ .squeeze()
82
+ .cpu()
83
+ .numpy()
84
+ )
85
+
86
+ return f0
87
+
88
+
89
+ class SWIFT:
90
+ def __init__(self, device, sample_rate=16000, hop_size=160):
91
+ self.device = "cpu"
92
+ self.sample_rate = sample_rate
93
+ self.hop_size = hop_size
94
+
95
+ def get_f0(self, x, f0_min=50, f0_max=1100, p_len=None, confidence_threshold=0.9):
96
+ if torch.is_tensor(x):
97
+ x = x.cpu().numpy()
98
+
99
+ if p_len is None:
100
+ p_len = x.shape[0] // self.hop_size
101
+
102
+ f0_min = max(f0_min, 46.875)
103
+ f0_max = min(f0_max, 2093.75)
104
+
105
+ detector = SwiftF0(
106
+ fmin=f0_min, fmax=f0_max, confidence_threshold=confidence_threshold
107
+ )
108
+ result = detector.detect_from_array(x, self.sample_rate)
109
+ if len(result.timestamps) == 0:
110
+ return np.zeros(p_len)
111
+ target_time = (
112
+ np.arange(p_len) * self.hop_size + self.hop_size / 2
113
+ ) / self.sample_rate
114
+ pitch = np.nan_to_num(result.pitch_hz, nan=0.0)
115
+ pitch[~result.voicing] = 0.0
116
+ f0 = np.interp(target_time, result.timestamps, pitch, left=0.0, right=0.0)
117
+
118
+ return f0
rvc/lib/tools/__pycache__/prerequisites_download.cpython-310.pyc ADDED
Binary file (4.16 kB). View file
 
rvc/lib/tools/analyzer.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import matplotlib.pyplot as plt
3
+ import librosa.display
4
+ import librosa
5
+
6
+
7
+ def calculate_features(y, sr):
8
+ stft = np.abs(librosa.stft(y))
9
+ duration = librosa.get_duration(y=y, sr=sr)
10
+ cent = librosa.feature.spectral_centroid(S=stft, sr=sr)[0]
11
+ bw = librosa.feature.spectral_bandwidth(S=stft, sr=sr)[0]
12
+ rolloff = librosa.feature.spectral_rolloff(S=stft, sr=sr)[0]
13
+ return stft, duration, cent, bw, rolloff
14
+
15
+
16
+ def plot_title(title):
17
+ plt.suptitle(title, fontsize=16, fontweight="bold")
18
+
19
+
20
+ def plot_spectrogram(y, sr, stft, duration, cmap="inferno"):
21
+ plt.subplot(3, 1, 1)
22
+ plt.imshow(
23
+ librosa.amplitude_to_db(stft, ref=np.max),
24
+ origin="lower",
25
+ extent=[0, duration, 0, sr / 1000],
26
+ aspect="auto",
27
+ cmap=cmap, # Change the colormap here
28
+ )
29
+ plt.colorbar(format="%+2.0f dB")
30
+ plt.xlabel("Time (s)")
31
+ plt.ylabel("Frequency (kHz)")
32
+ plt.title("Spectrogram")
33
+
34
+
35
+ def plot_waveform(y, sr, duration):
36
+ plt.subplot(3, 1, 2)
37
+ librosa.display.waveshow(y, sr=sr)
38
+ plt.xlabel("Time (s)")
39
+ plt.ylabel("Amplitude")
40
+ plt.title("Waveform")
41
+
42
+
43
+ def plot_features(times, cent, bw, rolloff, duration):
44
+ plt.subplot(3, 1, 3)
45
+ plt.plot(times, cent, label="Spectral Centroid (kHz)", color="b")
46
+ plt.plot(times, bw, label="Spectral Bandwidth (kHz)", color="g")
47
+ plt.plot(times, rolloff, label="Spectral Rolloff (kHz)", color="r")
48
+ plt.xlabel("Time (s)")
49
+ plt.title("Spectral Features")
50
+ plt.legend()
51
+
52
+
53
+ def analyze_audio(audio_file, save_plot_path="logs/audio_analysis.png"):
54
+ y, sr = librosa.load(audio_file)
55
+ stft, duration, cent, bw, rolloff = calculate_features(y, sr)
56
+
57
+ plt.figure(figsize=(12, 10))
58
+
59
+ plot_title("Audio Analysis" + " - " + audio_file.split("/")[-1])
60
+ plot_spectrogram(y, sr, stft, duration)
61
+ plot_waveform(y, sr, duration)
62
+ plot_features(librosa.times_like(cent), cent, bw, rolloff, duration)
63
+
64
+ plt.tight_layout()
65
+
66
+ if save_plot_path:
67
+ plt.savefig(save_plot_path, bbox_inches="tight", dpi=300)
68
+ plt.close()
69
+
70
+ audio_info = f"""Sample Rate: {sr}\nDuration: {(
71
+ str(round(duration, 2)) + " seconds"
72
+ if duration < 60
73
+ else str(round(duration / 60, 2)) + " minutes"
74
+ )}\nNumber of Samples: {len(y)}\nBits per Sample: {librosa.get_samplerate(audio_file)}\nChannels: {"Mono (1)" if y.ndim == 1 else "Stereo (2)"}"""
75
+
76
+ return audio_info, save_plot_path
rvc/lib/tools/gdown.py ADDED
@@ -0,0 +1,285 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ import sys
4
+ import json
5
+ import time
6
+ import shutil
7
+ import tempfile
8
+ import warnings
9
+ from typing import Optional, Union, IO
10
+ import requests
11
+ from urllib.parse import urlparse, unquote
12
+ from tqdm import tqdm
13
+
14
+ CHUNK_SIZE = 512 * 1024
15
+ HOME = os.path.expanduser("~")
16
+
17
+
18
+ def indent(text: str, prefix: str):
19
+ """Indent each non-empty line of text with the given prefix."""
20
+ return "".join(
21
+ (prefix + line if line.strip() else line) for line in text.splitlines(True)
22
+ )
23
+
24
+
25
+ class FileURLRetrievalError(Exception):
26
+ """Custom exception for issues retrieving file URLs."""
27
+
28
+
29
+ def _extract_download_url_from_confirmation(contents: str, url_origin: str):
30
+ """Extract the download URL from a Google Drive confirmation page."""
31
+ patterns = [
32
+ r'href="(\/uc\?export=download[^"]+)',
33
+ r'href="/open\?id=([^"]+)"',
34
+ r'"downloadUrl":"([^"]+)',
35
+ ]
36
+ for pattern in patterns:
37
+ match = re.search(pattern, contents)
38
+ if match:
39
+ url = match.group(1)
40
+ if pattern == r'href="/open\?id=([^"]+)"':
41
+ uuid_match = re.search(
42
+ r'<input\s+type="hidden"\s+name="uuid"\s+value="([^"]+)"',
43
+ contents,
44
+ )
45
+ if uuid_match:
46
+ uuid = uuid_match.group(1)
47
+ return (
48
+ "https://drive.usercontent.google.com/download?id="
49
+ + url
50
+ + "&confirm=t&uuid="
51
+ + uuid
52
+ )
53
+ raise FileURLRetrievalError(
54
+ f"Could not find UUID for download from {url_origin}"
55
+ )
56
+ elif pattern == r'"downloadUrl":"([^"]+)':
57
+ return url.replace("\\u003d", "=").replace("\\u0026", "&")
58
+ else:
59
+ return "https://docs.google.com" + url.replace("&", "&")
60
+
61
+ error_match = re.search(r'<p class="uc-error-subcaption">(.*)</p>', contents)
62
+ if error_match:
63
+ error = error_match.group(1)
64
+ raise FileURLRetrievalError(error)
65
+
66
+ raise FileURLRetrievalError(
67
+ "Cannot retrieve the public link of the file. "
68
+ "You may need to change the permission to "
69
+ "'Anyone with the link', or have had many accesses."
70
+ )
71
+
72
+
73
+ def _create_session(
74
+ proxy: Optional[str] = None,
75
+ use_cookies: bool = True,
76
+ return_cookies_file: bool = False,
77
+ ):
78
+ """Create a requests session with optional proxy and cookie handling."""
79
+ sess = requests.session()
80
+ sess.headers.update(
81
+ {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6)"}
82
+ )
83
+
84
+ if proxy:
85
+ sess.proxies = {"http": proxy, "https": proxy}
86
+
87
+ cookies_file = os.path.join(HOME, ".cache/gdown/cookies.json")
88
+ if os.path.exists(cookies_file) and use_cookies:
89
+ try:
90
+ with open(cookies_file) as f:
91
+ cookies = json.load(f)
92
+ for k, v in cookies:
93
+ sess.cookies[k] = v
94
+ except json.JSONDecodeError:
95
+ warnings.warn("Corrupted Cookies file")
96
+
97
+ return (sess, cookies_file) if return_cookies_file else sess
98
+
99
+
100
+ def download(
101
+ output: Optional[str] = None,
102
+ quiet: bool = False,
103
+ proxy: Optional[str] = None,
104
+ speed: Optional[float] = None,
105
+ use_cookies: bool = True,
106
+ verify: Union[bool, str] = True,
107
+ id: Optional[str] = None,
108
+ fuzzy: bool = True,
109
+ resume: bool = False,
110
+ format: Optional[str] = None,
111
+ url: Optional[str] = None,
112
+ ):
113
+ """Download a file from a URL, supporting Google Drive links.
114
+
115
+ Args:
116
+ output: Output filepath. Default is basename of URL.
117
+ quiet: Suppress terminal output.
118
+ proxy: HTTP/HTTPS proxy.
119
+ speed: Download speed limit (bytes per second).
120
+ use_cookies: Flag to use cookies.
121
+ verify: Verify TLS certificates.
122
+ id: Google Drive's file ID.
123
+ fuzzy: Fuzzy Google Drive ID extraction.
124
+ resume: Resume download from a tmp file.
125
+ format: Format for Google Docs/Sheets/Slides.
126
+ url: URL to download from.
127
+
128
+ Returns:
129
+ Output filename, or None on error.
130
+ """
131
+ if not (id is None) ^ (url is None):
132
+ raise ValueError("Either url or id has to be specified")
133
+
134
+ if id is not None:
135
+ url = f"https://drive.google.com/uc?id={id}"
136
+
137
+ url_origin = url
138
+ sess, cookies_file = _create_session(
139
+ proxy=proxy, use_cookies=use_cookies, return_cookies_file=True
140
+ )
141
+
142
+ while True:
143
+ res = sess.get(url, stream=True, verify=verify)
144
+ res.raise_for_status()
145
+
146
+ if url == url_origin and res.status_code == 500:
147
+ url = f"https://drive.google.com/open?id={id}"
148
+ continue
149
+
150
+ if res.headers.get("Content-Type", "").startswith("text/html"):
151
+ title_match = re.search("<title>(.+)</title>", res.text)
152
+ if title_match:
153
+ title = title_match.group(1)
154
+ if title.endswith(" - Google Docs"):
155
+ url = f"https://docs.google.com/document/d/{id}/export?format={'docx' if format is None else format}"
156
+ continue
157
+ if title.endswith(" - Google Sheets"):
158
+ url = f"https://docs.google.com/spreadsheets/d/{id}/export?format={'xlsx' if format is None else format}"
159
+ continue
160
+ if title.endswith(" - Google Slides"):
161
+ url = f"https://docs.google.com/presentation/d/{id}/export?format={'pptx' if format is None else format}"
162
+ continue
163
+ if (
164
+ "Content-Disposition" in res.headers
165
+ and res.headers["Content-Disposition"].endswith("pptx")
166
+ and format not in (None, "pptx")
167
+ ):
168
+ url = f"https://docs.google.com/presentation/d/{id}/export?format={'pptx' if format is None else format}"
169
+ continue
170
+
171
+ if use_cookies:
172
+ os.makedirs(os.path.dirname(cookies_file), exist_ok=True)
173
+ cookies = [
174
+ (k, v)
175
+ for k, v in sess.cookies.items()
176
+ if not k.startswith("download_warning_")
177
+ ]
178
+ with open(cookies_file, "w") as f:
179
+ json.dump(cookies, f, indent=2)
180
+
181
+ if "Content-Disposition" in res.headers:
182
+ break
183
+
184
+ parsed_url = urlparse(url)
185
+ is_gdrive = parsed_url.hostname in ("drive.google.com", "docs.google.com")
186
+ is_download_link = parsed_url.path.endswith("/uc")
187
+
188
+ if not (is_gdrive and is_download_link and fuzzy):
189
+ break
190
+
191
+ try:
192
+ url = _extract_download_url_from_confirmation(res.text, url_origin)
193
+ except FileURLRetrievalError as e:
194
+ raise FileURLRetrievalError(e)
195
+
196
+ content_disposition = res.headers.get("Content-Disposition", "")
197
+ filename_match = re.search(
198
+ r"filename\*=UTF-8''(.*)", content_disposition
199
+ ) or re.search(r'filename=["\']?(.*?)["\']?$', content_disposition)
200
+ filename_from_url = (
201
+ unquote(filename_match.group(1)) if filename_match else os.path.basename(url)
202
+ )
203
+ download_path = output or filename_from_url
204
+
205
+ if isinstance(download_path, str) and download_path.endswith(os.path.sep):
206
+ os.makedirs(download_path, exist_ok=True)
207
+ download_path = os.path.join(download_path, filename_from_url)
208
+
209
+ temp_dir = os.path.dirname(download_path) or "."
210
+ prefix = os.path.basename(download_path)
211
+
212
+ if isinstance(download_path, str):
213
+ existing_tmp_files = [
214
+ os.path.join(temp_dir, file)
215
+ for file in os.listdir(temp_dir)
216
+ if file.startswith(prefix)
217
+ ]
218
+ if resume and existing_tmp_files:
219
+ if len(existing_tmp_files) > 1:
220
+ print(
221
+ "There are multiple temporary files to resume:",
222
+ file=sys.stderr,
223
+ )
224
+ for file in existing_tmp_files:
225
+ print(f"\t{file}", file=sys.stderr)
226
+ print(
227
+ "Please remove them except one to resume downloading.",
228
+ file=sys.stderr,
229
+ )
230
+ return None
231
+ temp_file_path = existing_tmp_files[0]
232
+ else:
233
+ resume = False
234
+ temp_file_path = tempfile.mktemp(
235
+ suffix=tempfile.template, prefix=prefix, dir=temp_dir
236
+ )
237
+
238
+ try:
239
+ file_obj: IO = open(temp_file_path, "ab")
240
+ except Exception as e:
241
+ print(
242
+ f"Could not open the temporary file {temp_file_path}: {e}",
243
+ file=sys.stderr,
244
+ )
245
+ return None
246
+ else:
247
+ temp_file_path = None
248
+ file_obj = download_path
249
+
250
+ if temp_file_path is not None and file_obj.tell() != 0:
251
+ headers = {"Range": f"bytes={file_obj.tell()}-"}
252
+ res = sess.get(url, headers=headers, stream=True, verify=verify)
253
+ res.raise_for_status()
254
+
255
+ try:
256
+ total = int(res.headers.get("Content-Length", 0))
257
+ if total > 0:
258
+ if not quiet:
259
+ pbar = tqdm(
260
+ total=total, unit="B", unit_scale=True, desc=filename_from_url
261
+ )
262
+ else:
263
+ if not quiet:
264
+ pbar = tqdm(unit="B", unit_scale=True, desc=filename_from_url)
265
+
266
+ t_start = time.time()
267
+ for chunk in res.iter_content(chunk_size=CHUNK_SIZE):
268
+ file_obj.write(chunk)
269
+ if not quiet:
270
+ pbar.update(len(chunk))
271
+ if speed is not None:
272
+ elapsed_time_expected = 1.0 * pbar.n / speed
273
+ elapsed_time = time.time() - t_start
274
+ if elapsed_time < elapsed_time_expected:
275
+ time.sleep(elapsed_time_expected - elapsed_time)
276
+ if not quiet:
277
+ pbar.close()
278
+
279
+ if temp_file_path:
280
+ file_obj.close()
281
+ shutil.move(temp_file_path, download_path)
282
+ finally:
283
+ sess.close()
284
+
285
+ return download_path
rvc/lib/tools/launch_tensorboard.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ import logging
3
+ from tensorboard import program
4
+
5
+ log_path = "logs"
6
+
7
+
8
+ def launch_tensorboard_pipeline():
9
+ logging.getLogger("root").setLevel(logging.WARNING)
10
+ logging.getLogger("tensorboard").setLevel(logging.WARNING)
11
+
12
+ tb = program.TensorBoard()
13
+ tb.configure(argv=[None, "--logdir", log_path])
14
+ url = tb.launch()
15
+
16
+ print(
17
+ f"Access the tensorboard using the following link:\n{url}?pinnedCards=%5B%7B%22plugin%22%3A%22scalars%22%2C%22tag%22%3A%22loss%2Fg%2Ftotal%22%7D%2C%7B%22plugin%22%3A%22scalars%22%2C%22tag%22%3A%22loss%2Fd%2Ftotal%22%7D%2C%7B%22plugin%22%3A%22scalars%22%2C%22tag%22%3A%22loss%2Fg%2Fkl%22%7D%2C%7B%22plugin%22%3A%22scalars%22%2C%22tag%22%3A%22loss%2Fg%2Fmel%22%7D%5D"
18
+ )
19
+
20
+ while True:
21
+ time.sleep(600)
rvc/lib/tools/model_download.py ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ import sys
4
+ import shutil
5
+ import zipfile
6
+ import requests
7
+ from bs4 import BeautifulSoup
8
+ from urllib.parse import unquote
9
+ from tqdm import tqdm
10
+
11
+ now_dir = os.getcwd()
12
+ sys.path.append(now_dir)
13
+
14
+ from rvc.lib.utils import format_title
15
+ from rvc.lib.tools import gdown
16
+
17
+
18
+ file_path = os.path.join(now_dir, "logs")
19
+ zips_path = os.path.join(file_path, "zips")
20
+ os.makedirs(zips_path, exist_ok=True)
21
+
22
+
23
+ def search_pth_index(folder):
24
+ pth_paths = [
25
+ os.path.join(folder, file)
26
+ for file in os.listdir(folder)
27
+ if os.path.isfile(os.path.join(folder, file)) and file.endswith(".pth")
28
+ ]
29
+ index_paths = [
30
+ os.path.join(folder, file)
31
+ for file in os.listdir(folder)
32
+ if os.path.isfile(os.path.join(folder, file)) and file.endswith(".index")
33
+ ]
34
+ return pth_paths, index_paths
35
+
36
+
37
+ def download_from_url(url):
38
+ os.chdir(zips_path)
39
+
40
+ try:
41
+ if "drive.google.com" in url:
42
+ file_id = extract_google_drive_id(url)
43
+ if file_id:
44
+ gdown.download(
45
+ url=f"https://drive.google.com/uc?id={file_id}",
46
+ quiet=False,
47
+ fuzzy=True,
48
+ )
49
+ elif "/blob/" in url or "/resolve/" in url:
50
+ download_blob_or_resolve(url)
51
+ elif "/tree/main" in url:
52
+ download_from_huggingface(url)
53
+ else:
54
+ download_file(url)
55
+
56
+ rename_downloaded_files()
57
+ return "downloaded"
58
+ except Exception as error:
59
+ print(f"An error occurred downloading the file: {error}")
60
+ return None
61
+ finally:
62
+ os.chdir(now_dir)
63
+
64
+
65
+ def extract_google_drive_id(url):
66
+ if "file/d/" in url:
67
+ return url.split("file/d/")[1].split("/")[0]
68
+ if "id=" in url:
69
+ return url.split("id=")[1].split("&")[0]
70
+ return None
71
+
72
+
73
+ def download_blob_or_resolve(url):
74
+ if "/blob/" in url:
75
+ url = url.replace("/blob/", "/resolve/")
76
+ response = requests.get(url, stream=True)
77
+ if response.status_code == 200:
78
+ save_response_content(response)
79
+ else:
80
+ raise ValueError(
81
+ "Download failed with status code: " + str(response.status_code)
82
+ )
83
+
84
+
85
+ def save_response_content(response):
86
+ content_disposition = unquote(response.headers.get("Content-Disposition", ""))
87
+ file_name = (
88
+ re.search(r'filename="([^"]+)"', content_disposition)
89
+ .groups()[0]
90
+ .replace(os.path.sep, "_")
91
+ if content_disposition
92
+ else "downloaded_file"
93
+ )
94
+
95
+ total_size = int(response.headers.get("Content-Length", 0))
96
+ chunk_size = 1024
97
+
98
+ with open(os.path.join(zips_path, file_name), "wb") as file, tqdm(
99
+ total=total_size, unit="B", unit_scale=True, desc=file_name
100
+ ) as progress_bar:
101
+ for data in response.iter_content(chunk_size):
102
+ file.write(data)
103
+ progress_bar.update(len(data))
104
+
105
+
106
+ def download_from_huggingface(url):
107
+ response = requests.get(url)
108
+ soup = BeautifulSoup(response.content, "html.parser")
109
+ temp_url = next(
110
+ (
111
+ link["href"]
112
+ for link in soup.find_all("a", href=True)
113
+ if link["href"].endswith(".zip")
114
+ ),
115
+ None,
116
+ )
117
+ if temp_url:
118
+ url = temp_url.replace("blob", "resolve")
119
+ if "huggingface.co" not in url:
120
+ url = "https://huggingface.co" + url
121
+ download_file(url)
122
+ else:
123
+ raise ValueError("No zip file found in Huggingface URL")
124
+
125
+
126
+ def download_file(url):
127
+ response = requests.get(url, stream=True)
128
+ if response.status_code == 200:
129
+ save_response_content(response)
130
+ else:
131
+ raise ValueError(
132
+ "Download failed with status code: " + str(response.status_code)
133
+ )
134
+
135
+
136
+ def rename_downloaded_files():
137
+ for currentPath, _, zipFiles in os.walk(zips_path):
138
+ for file in zipFiles:
139
+ file_name, extension = os.path.splitext(file)
140
+ real_path = os.path.join(currentPath, file)
141
+ os.rename(real_path, file_name.replace(os.path.sep, "_") + extension)
142
+
143
+
144
+ def extract(zipfile_path, unzips_path):
145
+ try:
146
+ with zipfile.ZipFile(zipfile_path, "r") as zip_ref:
147
+ zip_ref.extractall(unzips_path)
148
+ os.remove(zipfile_path)
149
+ return True
150
+ except Exception as error:
151
+ print(f"An error occurred extracting the zip file: {error}")
152
+ return False
153
+
154
+
155
+ def unzip_file(zip_path, zip_file_name):
156
+ zip_file_path = os.path.join(zip_path, zip_file_name + ".zip")
157
+ extract_path = os.path.join(file_path, zip_file_name)
158
+ with zipfile.ZipFile(zip_file_path, "r") as zip_ref:
159
+ zip_ref.extractall(extract_path)
160
+ os.remove(zip_file_path)
161
+
162
+
163
+ def model_download_pipeline(url: str):
164
+ try:
165
+ result = download_from_url(url)
166
+ if result == "downloaded":
167
+ return handle_extraction_process()
168
+ else:
169
+ return "Error"
170
+ except Exception as error:
171
+ print(f"An unexpected error occurred: {error}")
172
+ return "Error"
173
+
174
+
175
+ def handle_extraction_process():
176
+ extract_folder_path = ""
177
+ for filename in os.listdir(zips_path):
178
+ if filename.endswith(".zip"):
179
+ zipfile_path = os.path.join(zips_path, filename)
180
+ model_name = format_title(os.path.basename(zipfile_path).split(".zip")[0])
181
+ extract_folder_path = os.path.join("logs", os.path.normpath(model_name))
182
+ success = extract(zipfile_path, extract_folder_path)
183
+ clean_extracted_files(extract_folder_path, model_name)
184
+
185
+ if success:
186
+ print(f"Model {model_name} downloaded!")
187
+ else:
188
+ print(f"Error downloading {model_name}")
189
+ return "Error"
190
+ if not extract_folder_path:
191
+ print("Zip file was not found.")
192
+ return "Error"
193
+ return search_pth_index(extract_folder_path)
194
+
195
+
196
+ def clean_extracted_files(extract_folder_path, model_name):
197
+ macosx_path = os.path.join(extract_folder_path, "__MACOSX")
198
+ if os.path.exists(macosx_path):
199
+ shutil.rmtree(macosx_path)
200
+
201
+ subfolders = [
202
+ f
203
+ for f in os.listdir(extract_folder_path)
204
+ if os.path.isdir(os.path.join(extract_folder_path, f))
205
+ ]
206
+ if len(subfolders) == 1:
207
+ subfolder_path = os.path.join(extract_folder_path, subfolders[0])
208
+ for item in os.listdir(subfolder_path):
209
+ shutil.move(
210
+ os.path.join(subfolder_path, item),
211
+ os.path.join(extract_folder_path, item),
212
+ )
213
+ os.rmdir(subfolder_path)
214
+
215
+ for item in os.listdir(extract_folder_path):
216
+ source_path = os.path.join(extract_folder_path, item)
217
+ if ".pth" in item:
218
+ new_file_name = model_name + ".pth"
219
+ elif ".index" in item:
220
+ new_file_name = model_name + ".index"
221
+ else:
222
+ continue
223
+
224
+ destination_path = os.path.join(extract_folder_path, new_file_name)
225
+ if not os.path.exists(destination_path):
226
+ os.rename(source_path, destination_path)
rvc/lib/tools/prerequisites_download.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from concurrent.futures import ThreadPoolExecutor
3
+ from tqdm import tqdm
4
+ import requests
5
+
6
+ url_base = "https://huggingface.co/IAHispano/Applio/resolve/main/Resources"
7
+
8
+ pretraineds_hifigan_list = [
9
+ (
10
+ "pretrained_v2/",
11
+ [
12
+ "f0D32k.pth",
13
+ "f0D40k.pth",
14
+ "f0D48k.pth",
15
+ "f0G32k.pth",
16
+ "f0G40k.pth",
17
+ "f0G48k.pth",
18
+ ],
19
+ )
20
+ ]
21
+ models_list = [("predictors/", ["rmvpe.pt", "fcpe.pt"])]
22
+ embedders_list = [("embedders/contentvec/", ["pytorch_model.bin", "config.json"])]
23
+ executables_list = [
24
+ ("", ["ffmpeg.exe", "ffprobe.exe"]),
25
+ ]
26
+
27
+ folder_mapping_list = {
28
+ "pretrained_v2/": "rvc/models/pretraineds/hifi-gan/",
29
+ "embedders/contentvec/": "rvc/models/embedders/contentvec/",
30
+ "predictors/": "rvc/models/predictors/",
31
+ "formant/": "rvc/models/formant/",
32
+ }
33
+
34
+
35
+ def get_file_size_if_missing(file_list):
36
+ """
37
+ Calculate the total size of files to be downloaded only if they do not exist locally.
38
+ """
39
+ total_size = 0
40
+ for remote_folder, files in file_list:
41
+ local_folder = folder_mapping_list.get(remote_folder, "")
42
+ for file in files:
43
+ destination_path = os.path.join(local_folder, file)
44
+ if not os.path.exists(destination_path):
45
+ url = f"{url_base}/{remote_folder}{file}"
46
+ response = requests.head(url)
47
+ total_size += int(response.headers.get("content-length", 0))
48
+ return total_size
49
+
50
+
51
+ def download_file(url, destination_path, global_bar):
52
+ """
53
+ Download a file from the given URL to the specified destination path,
54
+ updating the global progress bar as data is downloaded.
55
+ """
56
+
57
+ dir_name = os.path.dirname(destination_path)
58
+ if dir_name:
59
+ os.makedirs(dir_name, exist_ok=True)
60
+ response = requests.get(url, stream=True)
61
+ block_size = 1024
62
+ with open(destination_path, "wb") as file:
63
+ for data in response.iter_content(block_size):
64
+ file.write(data)
65
+ global_bar.update(len(data))
66
+
67
+
68
+ def download_mapping_files(file_mapping_list, global_bar):
69
+ """
70
+ Download all files in the provided file mapping list using a thread pool executor,
71
+ and update the global progress bar as downloads progress.
72
+ """
73
+ with ThreadPoolExecutor() as executor:
74
+ futures = []
75
+ for remote_folder, file_list in file_mapping_list:
76
+ local_folder = folder_mapping_list.get(remote_folder, "")
77
+ for file in file_list:
78
+ destination_path = os.path.join(local_folder, file)
79
+ if not os.path.exists(destination_path):
80
+ url = f"{url_base}/{remote_folder}{file}"
81
+ futures.append(
82
+ executor.submit(
83
+ download_file, url, destination_path, global_bar
84
+ )
85
+ )
86
+ for future in futures:
87
+ future.result()
88
+
89
+
90
+ def split_pretraineds(pretrained_list):
91
+ f0_list = []
92
+ non_f0_list = []
93
+ for folder, files in pretrained_list:
94
+ f0_files = [f for f in files if f.startswith("f0")]
95
+ non_f0_files = [f for f in files if not f.startswith("f0")]
96
+ if f0_files:
97
+ f0_list.append((folder, f0_files))
98
+ if non_f0_files:
99
+ non_f0_list.append((folder, non_f0_files))
100
+ return f0_list, non_f0_list
101
+
102
+
103
+ pretraineds_hifigan_list, _ = split_pretraineds(pretraineds_hifigan_list)
104
+
105
+
106
+ def calculate_total_size(
107
+ pretraineds_hifigan,
108
+ models,
109
+ exe,
110
+ ):
111
+ """
112
+ Calculate the total size of all files to be downloaded based on selected categories.
113
+ """
114
+ total_size = 0
115
+ if models:
116
+ total_size += get_file_size_if_missing(models_list)
117
+ total_size += get_file_size_if_missing(embedders_list)
118
+ if exe and os.name == "nt":
119
+ total_size += get_file_size_if_missing(executables_list)
120
+ total_size += get_file_size_if_missing(pretraineds_hifigan)
121
+ return total_size
122
+
123
+
124
+ def prequisites_download_pipeline(
125
+ pretraineds_hifigan,
126
+ models,
127
+ exe,
128
+ ):
129
+ """
130
+ Manage the download pipeline for different categories of files.
131
+ """
132
+ total_size = calculate_total_size(
133
+ pretraineds_hifigan_list if pretraineds_hifigan else [],
134
+ models,
135
+ exe,
136
+ )
137
+
138
+ if total_size > 0:
139
+ with tqdm(
140
+ total=total_size, unit="iB", unit_scale=True, desc="Downloading all files"
141
+ ) as global_bar:
142
+ if models:
143
+ download_mapping_files(models_list, global_bar)
144
+ download_mapping_files(embedders_list, global_bar)
145
+ if exe:
146
+ if os.name == "nt":
147
+ download_mapping_files(executables_list, global_bar)
148
+ else:
149
+ print("No executables needed")
150
+ if pretraineds_hifigan:
151
+ download_mapping_files(pretraineds_hifigan_list, global_bar)
152
+ else:
153
+ pass
rvc/lib/tools/pretrained_selector.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+
4
+ def pretrained_selector(vocoder, sample_rate):
5
+ base_path = os.path.join("rvc", "models", "pretraineds", f"{vocoder.lower()}")
6
+
7
+ path_g = os.path.join(base_path, f"f0G{str(sample_rate)[:2]}k.pth")
8
+ path_d = os.path.join(base_path, f"f0D{str(sample_rate)[:2]}k.pth")
9
+
10
+ if os.path.exists(path_g) and os.path.exists(path_d):
11
+ return path_g, path_d
12
+ else:
13
+ return "", ""
rvc/lib/tools/split_audio.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import librosa
3
+
4
+
5
+ def process_audio(audio, sr=16000, silence_thresh=-60, min_silence_len=250):
6
+ """
7
+ Splits an audio signal into segments using a fixed frame size and hop size.
8
+
9
+ Parameters:
10
+ - audio (np.ndarray): The audio signal to split.
11
+ - sr (int): The sample rate of the input audio (default is 16000).
12
+ - silence_thresh (int): Silence threshold (default =-60dB)
13
+ - min_silence_len (int): Minimum silence duration (default 250ms).
14
+
15
+ Returns:
16
+ - list of np.ndarray: A list of audio segments.
17
+ - np.ndarray: The intervals where the audio was split.
18
+ """
19
+ frame_length = int(min_silence_len / 1000 * sr)
20
+ hop_length = frame_length // 2
21
+ intervals = librosa.effects.split(
22
+ audio, top_db=-silence_thresh, frame_length=frame_length, hop_length=hop_length
23
+ )
24
+ audio_segments = [audio[start:end] for start, end in intervals]
25
+
26
+ return audio_segments, intervals
27
+
28
+
29
+ def merge_audio(audio_segments_org, audio_segments_new, intervals, sr_orig, sr_new):
30
+ """
31
+ Merges audio segments back into a single audio signal, filling gaps with silence.
32
+ Assumes audio segments are already at sr_new.
33
+
34
+ Parameters:
35
+ - audio_segments_org (list of np.ndarray): The non-silent audio segments (at sr_orig).
36
+ - audio_segments_new (list of np.ndarray): The non-silent audio segments (at sr_new).
37
+ - intervals (np.ndarray): The intervals used for splitting the original audio.
38
+ - sr_orig (int): The sample rate of the original audio
39
+ - sr_new (int): The sample rate of the model
40
+ Returns:
41
+ - np.ndarray: The merged audio signal with silent gaps restored.
42
+ """
43
+ merged_audio = np.array([], dtype=audio_segments_new[0].dtype)
44
+ sr_ratio = sr_new / sr_orig
45
+
46
+ for i, (start, end) in enumerate(intervals):
47
+
48
+ start_new = int(start * sr_ratio)
49
+ end_new = int(end * sr_ratio)
50
+
51
+ original_duration = len(audio_segments_org[i]) / sr_orig
52
+ new_duration = len(audio_segments_new[i]) / sr_new
53
+ duration_diff = new_duration - original_duration
54
+
55
+ silence_samples = int(abs(duration_diff) * sr_new)
56
+ silence_compensation = np.zeros(
57
+ silence_samples, dtype=audio_segments_new[0].dtype
58
+ )
59
+
60
+ if i == 0 and start_new > 0:
61
+ initial_silence = np.zeros(start_new, dtype=audio_segments_new[0].dtype)
62
+ merged_audio = np.concatenate((merged_audio, initial_silence))
63
+
64
+ if duration_diff > 0:
65
+ merged_audio = np.concatenate((merged_audio, silence_compensation))
66
+
67
+ merged_audio = np.concatenate((merged_audio, audio_segments_new[i]))
68
+
69
+ if duration_diff < 0:
70
+ merged_audio = np.concatenate((merged_audio, silence_compensation))
71
+
72
+ if i < len(intervals) - 1:
73
+ next_start_new = int(intervals[i + 1][0] * sr_ratio)
74
+ silence_duration = next_start_new - end_new
75
+ if silence_duration > 0:
76
+ silence = np.zeros(silence_duration, dtype=audio_segments_new[0].dtype)
77
+ merged_audio = np.concatenate((merged_audio, silence))
78
+
79
+ return merged_audio
rvc/lib/tools/tts.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import asyncio
3
+ import edge_tts
4
+ import os
5
+
6
+
7
+ async def main():
8
+ # Parse command line arguments
9
+ tts_file = str(sys.argv[1])
10
+ text = str(sys.argv[2])
11
+ voice = str(sys.argv[3])
12
+ rate = int(sys.argv[4])
13
+ output_file = str(sys.argv[5])
14
+
15
+ rates = f"+{rate}%" if rate >= 0 else f"{rate}%"
16
+ if tts_file and os.path.exists(tts_file):
17
+ text = ""
18
+ try:
19
+ with open(tts_file, "r", encoding="utf-8") as file:
20
+ text = file.read()
21
+ except UnicodeDecodeError:
22
+ with open(tts_file, "r") as file:
23
+ text = file.read()
24
+ await edge_tts.Communicate(text, voice, rate=rates).save(output_file)
25
+ # print(f"TTS with {voice} completed. Output TTS file: '{output_file}'")
26
+
27
+
28
+ if __name__ == "__main__":
29
+ asyncio.run(main())
rvc/lib/tools/tts_voices.json ADDED
The diff for this file is too large to render. See raw diff
 
rvc/lib/utils.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import soxr
4
+ import librosa
5
+ import soundfile as sf
6
+ import numpy as np
7
+ import re
8
+ import unicodedata
9
+ import wget
10
+ from torch import nn
11
+
12
+ import logging
13
+ from transformers import HubertModel
14
+ import warnings
15
+
16
+ # Remove this to see warnings about transformers models
17
+ warnings.filterwarnings("ignore")
18
+
19
+ logging.getLogger("fairseq").setLevel(logging.ERROR)
20
+ logging.getLogger("faiss.loader").setLevel(logging.ERROR)
21
+ logging.getLogger("transformers").setLevel(logging.ERROR)
22
+ logging.getLogger("torch").setLevel(logging.ERROR)
23
+
24
+ now_dir = os.getcwd()
25
+ sys.path.append(now_dir)
26
+
27
+ base_path = os.path.join(now_dir, "rvc", "models", "formant", "stftpitchshift")
28
+ stft = base_path + ".exe" if sys.platform == "win32" else base_path
29
+
30
+
31
+ class HubertModelWithFinalProj(HubertModel):
32
+ def __init__(self, config):
33
+ super().__init__(config)
34
+ self.final_proj = nn.Linear(config.hidden_size, config.classifier_proj_size)
35
+
36
+
37
+ def load_audio_16k(file):
38
+ # this is used by f0 and feature extractions that load preprocessed 16k files, so there's no need to resample
39
+ try:
40
+ audio, sr = librosa.load(file, sr=16000)
41
+ except Exception as error:
42
+ raise RuntimeError(f"An error occurred loading the audio: {error}")
43
+
44
+ return audio.flatten()
45
+
46
+
47
+ def load_audio(file, sample_rate):
48
+ try:
49
+ file = file.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
50
+ audio, sr = sf.read(file)
51
+ if len(audio.shape) > 1:
52
+ audio = librosa.to_mono(audio.T)
53
+ if sr != sample_rate:
54
+ audio = librosa.resample(
55
+ audio, orig_sr=sr, target_sr=sample_rate, res_type="soxr_vhq"
56
+ )
57
+ except Exception as error:
58
+ raise RuntimeError(f"An error occurred loading the audio: {error}")
59
+
60
+ return audio.flatten()
61
+
62
+
63
+ def load_audio_infer(
64
+ file,
65
+ sample_rate,
66
+ **kwargs,
67
+ ):
68
+ formant_shifting = kwargs.get("formant_shifting", False)
69
+ try:
70
+ file = file.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
71
+ if not os.path.isfile(file):
72
+ raise FileNotFoundError(f"File not found: {file}")
73
+ audio, sr = sf.read(file)
74
+ if len(audio.shape) > 1:
75
+ audio = librosa.to_mono(audio.T)
76
+ if sr != sample_rate:
77
+ audio = librosa.resample(
78
+ audio, orig_sr=sr, target_sr=sample_rate, res_type="soxr_vhq"
79
+ )
80
+ if formant_shifting:
81
+ formant_qfrency = kwargs.get("formant_qfrency", 0.8)
82
+ formant_timbre = kwargs.get("formant_timbre", 0.8)
83
+
84
+ from stftpitchshift import StftPitchShift
85
+
86
+ pitchshifter = StftPitchShift(1024, 32, sample_rate)
87
+ audio = pitchshifter.shiftpitch(
88
+ audio,
89
+ factors=1,
90
+ quefrency=formant_qfrency * 1e-3,
91
+ distortion=formant_timbre,
92
+ )
93
+ except Exception as error:
94
+ raise RuntimeError(f"An error occurred loading the audio: {error}")
95
+ return np.array(audio).flatten()
96
+
97
+
98
+ def format_title(title):
99
+ formatted_title = unicodedata.normalize("NFC", title)
100
+ formatted_title = re.sub(r"[\u2500-\u257F]+", "", formatted_title)
101
+ formatted_title = re.sub(r"[^\w\s.-]", "", formatted_title, flags=re.UNICODE)
102
+ formatted_title = re.sub(r"\s+", "_", formatted_title)
103
+ return formatted_title
104
+
105
+
106
+ def load_embedding(embedder_model, custom_embedder=None):
107
+ embedder_root = os.path.join(now_dir, "rvc", "models", "embedders")
108
+ embedding_list = {
109
+ "contentvec": os.path.join(embedder_root, "contentvec"),
110
+ "spin": os.path.join(embedder_root, "spin"),
111
+ "spin-v2": os.path.join(embedder_root, "spin-v2"),
112
+ "chinese-hubert-base": os.path.join(embedder_root, "chinese_hubert_base"),
113
+ "japanese-hubert-base": os.path.join(embedder_root, "japanese_hubert_base"),
114
+ "korean-hubert-base": os.path.join(embedder_root, "korean_hubert_base"),
115
+ }
116
+
117
+ online_embedders = {
118
+ "contentvec": "https://huggingface.co/IAHispano/Applio/resolve/main/Resources/embedders/contentvec/pytorch_model.bin",
119
+ "spin": "https://huggingface.co/IAHispano/Applio/resolve/main/Resources/embedders/spin/pytorch_model.bin",
120
+ "spin-v2": "https://huggingface.co/IAHispano/Applio/resolve/main/Resources/embedders/spin-v2/pytorch_model.bin",
121
+ "chinese-hubert-base": "https://huggingface.co/IAHispano/Applio/resolve/main/Resources/embedders/chinese_hubert_base/pytorch_model.bin",
122
+ "japanese-hubert-base": "https://huggingface.co/IAHispano/Applio/resolve/main/Resources/embedders/japanese_hubert_base/pytorch_model.bin",
123
+ "korean-hubert-base": "https://huggingface.co/IAHispano/Applio/resolve/main/Resources/embedders/korean_hubert_base/pytorch_model.bin",
124
+ }
125
+
126
+ config_files = {
127
+ "contentvec": "https://huggingface.co/IAHispano/Applio/resolve/main/Resources/embedders/contentvec/config.json",
128
+ "spin": "https://huggingface.co/IAHispano/Applio/resolve/main/Resources/embedders/spin/config.json",
129
+ "spin-v2": "https://huggingface.co/IAHispano/Applio/resolve/main/Resources/embedders/spin-v2/config.json",
130
+ "chinese-hubert-base": "https://huggingface.co/IAHispano/Applio/resolve/main/Resources/embedders/chinese_hubert_base/config.json",
131
+ "japanese-hubert-base": "https://huggingface.co/IAHispano/Applio/resolve/main/Resources/embedders/japanese_hubert_base/config.json",
132
+ "korean-hubert-base": "https://huggingface.co/IAHispano/Applio/resolve/main/Resources/embedders/korean_hubert_base/config.json",
133
+ }
134
+
135
+ if embedder_model == "custom":
136
+ if os.path.exists(custom_embedder):
137
+ model_path = custom_embedder
138
+ else:
139
+ print(f"Custom embedder not found: {custom_embedder}, using contentvec")
140
+ model_path = embedding_list["contentvec"]
141
+ else:
142
+ model_path = embedding_list[embedder_model]
143
+ bin_file = os.path.join(model_path, "pytorch_model.bin")
144
+ json_file = os.path.join(model_path, "config.json")
145
+ os.makedirs(model_path, exist_ok=True)
146
+ if not os.path.exists(bin_file):
147
+ url = online_embedders[embedder_model]
148
+ print(f"Downloading {url} to {model_path}...")
149
+ wget.download(url, out=bin_file)
150
+ if not os.path.exists(json_file):
151
+ url = config_files[embedder_model]
152
+ print(f"Downloading {url} to {model_path}...")
153
+ wget.download(url, out=json_file)
154
+
155
+ models = HubertModelWithFinalProj.from_pretrained(model_path)
156
+ return models
rvc/lib/zluda.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ if torch.cuda.is_available() and torch.cuda.get_device_name().endswith("[ZLUDA]"):
4
+
5
+ class STFT:
6
+ def __init__(self):
7
+ self.device = "cuda"
8
+ self.fourier_bases = {} # Cache for Fourier bases
9
+
10
+ def _get_fourier_basis(self, n_fft):
11
+ # Check if the basis for this n_fft is already cached
12
+ if n_fft in self.fourier_bases:
13
+ return self.fourier_bases[n_fft]
14
+ fourier_basis = torch.fft.fft(torch.eye(n_fft, device="cpu")).to(
15
+ self.device
16
+ )
17
+ # stack separated real and imaginary components and convert to torch tensor
18
+ cutoff = n_fft // 2 + 1
19
+ fourier_basis = torch.cat(
20
+ [fourier_basis.real[:cutoff], fourier_basis.imag[:cutoff]], dim=0
21
+ )
22
+ # cache the tensor and return
23
+ self.fourier_bases[n_fft] = fourier_basis
24
+ return fourier_basis
25
+
26
+ def transform(self, input, n_fft, hop_length, window):
27
+ # fetch cached Fourier basis
28
+ fourier_basis = self._get_fourier_basis(n_fft)
29
+ # apply hann window to Fourier basis
30
+ fourier_basis = fourier_basis * window
31
+ # pad input to center with reflect
32
+ pad_amount = n_fft // 2
33
+ input = torch.nn.functional.pad(
34
+ input, (pad_amount, pad_amount), mode="reflect"
35
+ )
36
+ # separate input into n_fft-sized frames
37
+ input_frames = input.unfold(1, n_fft, hop_length).permute(0, 2, 1)
38
+ # apply fft to each frame
39
+ fourier_transform = torch.matmul(fourier_basis, input_frames)
40
+ cutoff = n_fft // 2 + 1
41
+ return torch.complex(
42
+ fourier_transform[:, :cutoff, :], fourier_transform[:, cutoff:, :]
43
+ )
44
+
45
+ stft = STFT()
46
+ _torch_stft = torch.stft
47
+
48
+ def z_stft(input: torch.Tensor, window: torch.Tensor, *args, **kwargs):
49
+ # only optimizing a specific call from rvc.train.mel_processing.MultiScaleMelSpectrogramLoss
50
+ if (
51
+ kwargs.get("win_length") == None
52
+ and kwargs.get("center") == None
53
+ and kwargs.get("return_complex") == True
54
+ ):
55
+ # use GPU accelerated calculation
56
+ return stft.transform(
57
+ input, kwargs.get("n_fft"), kwargs.get("hop_length"), window
58
+ )
59
+ else:
60
+ # simply do the operation on CPU
61
+ return _torch_stft(
62
+ input=input.cpu(), window=window.cpu(), *args, **kwargs
63
+ ).to(input.device)
64
+
65
+ def z_jit(f, *_, **__):
66
+ f.graph = torch._C.Graph()
67
+ return f
68
+
69
+ # hijacks
70
+ torch.stft = z_stft
71
+ torch.jit.script = z_jit
72
+ # disabling unsupported cudnn
73
+ torch.backends.cudnn.enabled = False
74
+ torch.backends.cuda.enable_flash_sdp(False)
75
+ torch.backends.cuda.enable_math_sdp(True)
76
+ torch.backends.cuda.enable_mem_efficient_sdp(False)
rvc/models/embedders/.gitkeep ADDED
@@ -0,0 +1 @@
 
 
1
+
rvc/models/embedders/embedders_custom/.gitkeep ADDED
@@ -0,0 +1 @@
 
 
1
+
rvc/models/formant/.gitkeep ADDED
@@ -0,0 +1 @@
 
 
1
+
rvc/models/predictors/.gitkeep ADDED
File without changes
rvc/models/pretraineds/.gitkeep ADDED
File without changes
rvc/models/pretraineds/custom/.gitkeep ADDED
@@ -0,0 +1 @@
 
 
1
+
rvc/models/pretraineds/hifi-gan/.gitkeep ADDED
File without changes
rvc/realtime/audio.py ADDED
@@ -0,0 +1,349 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import librosa
4
+ import traceback
5
+ import numpy as np
6
+ import sounddevice as sd
7
+ from queue import Queue
8
+ from dataclasses import dataclass
9
+
10
+ now_dir = os.getcwd()
11
+ sys.path.append(now_dir)
12
+
13
+ from rvc.realtime.core import AUDIO_SAMPLE_RATE
14
+
15
+
16
+ @dataclass
17
+ class ServerAudioDevice:
18
+ index: int = 0
19
+ name: str = ""
20
+ host_api: str = ""
21
+ max_input_channels: int = 0
22
+ max_output_channels: int = 0
23
+ default_samplerate: int = 0
24
+
25
+
26
+ def check_the_device(device, type: str = "input"):
27
+ stream_cls = sd.InputStream if type == "input" else sd.OutputStream
28
+ try:
29
+ with stream_cls(
30
+ device=device["index"],
31
+ dtype=np.float32,
32
+ samplerate=device["default_samplerate"],
33
+ ):
34
+ return True
35
+ except Exception:
36
+ return False
37
+
38
+
39
+ def list_audio_device():
40
+ """
41
+ Function to query audio devices and host api.
42
+ """
43
+ try:
44
+ audio_device_list = sd.query_devices()
45
+ except Exception as e:
46
+ print("An error occurred while querying the audio device:", e)
47
+ audio_device_list = []
48
+ except OSError as e:
49
+ # This error can occur when the libportaudio2 library is missing.
50
+ print("An error occurred while querying the audio device:", e)
51
+ audio_device_list = []
52
+
53
+ input_audio_device_list = [
54
+ d
55
+ for d in audio_device_list
56
+ if d["max_input_channels"] > 0 and check_the_device(d, "input")
57
+ ]
58
+ output_audio_device_list = [
59
+ d
60
+ for d in audio_device_list
61
+ if d["max_output_channels"] > 0 and check_the_device(d, "output")
62
+ ]
63
+
64
+ try:
65
+ hostapis = sd.query_hostapis()
66
+ except Exception as e:
67
+ print("An error occurred while querying the host api:", e)
68
+ hostapis = []
69
+ except OSError as e:
70
+ # This error can occur when the libportaudio2 library is missing.
71
+ print("An error occurred while querying the host api:", e)
72
+ hostapis = []
73
+
74
+ audio_input_device = []
75
+ audio_output_device = []
76
+
77
+ for d in input_audio_device_list:
78
+ input_audio_device = ServerAudioDevice(
79
+ index=d["index"],
80
+ name=d["name"],
81
+ host_api=hostapis[d["hostapi"]]["name"],
82
+ max_input_channels=d["max_input_channels"],
83
+ max_output_channels=d["max_output_channels"],
84
+ default_samplerate=d["default_samplerate"],
85
+ )
86
+ audio_input_device.append(input_audio_device)
87
+
88
+ for d in output_audio_device_list:
89
+ output_audio_device = ServerAudioDevice(
90
+ index=d["index"],
91
+ name=d["name"],
92
+ host_api=hostapis[d["hostapi"]]["name"],
93
+ max_input_channels=d["max_input_channels"],
94
+ max_output_channels=d["max_output_channels"],
95
+ default_samplerate=d["default_samplerate"],
96
+ )
97
+ audio_output_device.append(output_audio_device)
98
+
99
+ return audio_input_device, audio_output_device
100
+
101
+
102
+ class Audio:
103
+ def __init__(
104
+ self,
105
+ callbacks,
106
+ f0_up_key: int = 0,
107
+ index_rate: float = 0.5,
108
+ protect: float = 0.5,
109
+ volume_envelope: float = 1,
110
+ f0_autotune: bool = False,
111
+ f0_autotune_strength: float = 1,
112
+ proposed_pitch=False,
113
+ proposed_pitch_threshold: float = 155.0,
114
+ input_audio_gain: float = 1.0,
115
+ output_audio_gain: float = 1.0,
116
+ monitor_audio_gain: float = 1.0,
117
+ monitor: bool = False,
118
+ ):
119
+ self.callbacks = callbacks
120
+ self.mon_queue = Queue()
121
+ self.stream = None
122
+ self.monitor = None
123
+ self.running = False
124
+ self.input_audio_gain = input_audio_gain
125
+ self.output_audio_gain = output_audio_gain
126
+ self.monitor_audio_gain = monitor_audio_gain
127
+ self.use_monitor = monitor
128
+ self.f0_up_key = f0_up_key
129
+ self.index_rate = index_rate
130
+ self.protect = protect
131
+ self.volume_envelope = volume_envelope
132
+ self.f0_autotune = f0_autotune
133
+ self.f0_autotune_strength = f0_autotune_strength
134
+ self.proposed_pitch = proposed_pitch
135
+ self.proposed_pitch_threshold = proposed_pitch_threshold
136
+
137
+ def get_input_audio_device(self, index: int):
138
+ audioinput, _ = list_audio_device()
139
+ serverAudioDevice = [x for x in audioinput if x.index == index]
140
+
141
+ return serverAudioDevice[0] if len(serverAudioDevice) > 0 else None
142
+
143
+ def get_output_audio_device(self, index: int):
144
+ _, audiooutput = list_audio_device()
145
+ serverAudioDevice = [x for x in audiooutput if x.index == index]
146
+
147
+ return serverAudioDevice[0] if len(serverAudioDevice) > 0 else None
148
+
149
+ def process_data(self, indata: np.ndarray):
150
+ indata = indata * self.input_audio_gain
151
+ unpacked_data = librosa.to_mono(indata.T)
152
+
153
+ return self.callbacks.change_voice(
154
+ unpacked_data,
155
+ self.f0_up_key,
156
+ self.index_rate,
157
+ self.protect,
158
+ self.volume_envelope,
159
+ self.f0_autotune,
160
+ self.f0_autotune_strength,
161
+ self.proposed_pitch,
162
+ self.proposed_pitch_threshold,
163
+ )
164
+
165
+ def process_data_with_time(self, indata: np.ndarray):
166
+ out_wav, _, perf, _ = self.process_data(indata)
167
+ performance_ms = perf[1]
168
+ # print(f"real-time voice conversion performance: {performance_ms:.2f} ms")
169
+ self.latency = performance_ms # latency to display on the application interface
170
+
171
+ return out_wav
172
+
173
+ def audio_stream_callback(
174
+ self, indata: np.ndarray, outdata: np.ndarray, frames, times, status
175
+ ):
176
+ try:
177
+ out_wav = self.process_data_with_time(indata)
178
+
179
+ output_channels = outdata.shape[1]
180
+ if self.use_monitor:
181
+ self.mon_queue.put(out_wav)
182
+
183
+ outdata[:] = (
184
+ np.repeat(out_wav, output_channels).reshape(-1, output_channels)
185
+ * self.output_audio_gain
186
+ )
187
+ except Exception as error:
188
+ print(f"An error occurred while running the audio stream: {error}")
189
+ print(traceback.format_exc())
190
+
191
+ def audio_queue(self, outdata: np.ndarray, frames, times, status):
192
+ try:
193
+ mon_wav = self.mon_queue.get()
194
+
195
+ while self.mon_queue.qsize() > 0:
196
+ self.mon_queue.get()
197
+
198
+ output_channels = outdata.shape[1]
199
+ outdata[:] = (
200
+ np.repeat(mon_wav, output_channels).reshape(-1, output_channels)
201
+ * self.monitor_audio_gain
202
+ )
203
+ except Exception as error:
204
+ print(f"An error occurred while running the audio queue: {error}")
205
+ print(traceback.format_exc())
206
+
207
+ def run_audio_stream(
208
+ self,
209
+ block_frame: int,
210
+ input_device_id: int,
211
+ output_device_id: int,
212
+ output_monitor_id: int,
213
+ input_max_channel: int,
214
+ output_max_channel: int,
215
+ output_monitor_max_channel: int,
216
+ input_extra_setting,
217
+ output_extra_setting,
218
+ output_monitor_extra_setting,
219
+ ):
220
+ self.stream = sd.Stream(
221
+ callback=self.audio_stream_callback,
222
+ latency="low",
223
+ dtype=np.float32,
224
+ device=(input_device_id, output_device_id),
225
+ blocksize=block_frame,
226
+ samplerate=AUDIO_SAMPLE_RATE,
227
+ channels=(input_max_channel, output_max_channel),
228
+ extra_settings=(input_extra_setting, output_extra_setting),
229
+ )
230
+ self.stream.start()
231
+
232
+ if self.use_monitor:
233
+ self.monitor = sd.OutputStream(
234
+ callback=self.audio_queue,
235
+ dtype=np.float32,
236
+ device=output_monitor_id,
237
+ blocksize=block_frame,
238
+ samplerate=AUDIO_SAMPLE_RATE,
239
+ channels=output_monitor_max_channel,
240
+ extra_settings=output_monitor_extra_setting,
241
+ )
242
+ self.monitor.start()
243
+
244
+ def stop(self):
245
+ self.running = False
246
+
247
+ if self.stream is not None:
248
+ self.stream.close()
249
+ self.stream = None
250
+
251
+ if self.monitor is not None:
252
+ self.monitor.close()
253
+ self.monitor = None
254
+
255
+ def start(
256
+ self,
257
+ input_device_id: int,
258
+ output_device_id: int,
259
+ output_monitor_id: int = None,
260
+ exclusive_mode: bool = False,
261
+ asio_input_channel: int = -1,
262
+ asio_output_channel: int = -1,
263
+ asio_output_monitor_channel: int = -1,
264
+ read_chunk_size: int = 192,
265
+ ):
266
+ self.stop()
267
+
268
+ input_audio_device, output_audio_device = self.get_input_audio_device(
269
+ input_device_id
270
+ ), self.get_output_audio_device(output_device_id)
271
+ input_channels, output_channels = (
272
+ input_audio_device.max_input_channels,
273
+ output_audio_device.max_output_channels,
274
+ )
275
+
276
+ (
277
+ input_extra_setting,
278
+ output_extra_setting,
279
+ output_monitor_extra_setting,
280
+ monitor_channels,
281
+ ) = (None, None, None, None)
282
+ wasapi_exclusive_mode = bool(exclusive_mode)
283
+
284
+ if input_audio_device and "WASAPI" in input_audio_device.host_api:
285
+ input_extra_setting = sd.WasapiSettings(
286
+ exclusive=wasapi_exclusive_mode, auto_convert=not wasapi_exclusive_mode
287
+ )
288
+ elif (
289
+ input_audio_device
290
+ and "ASIO" in input_audio_device.host_api
291
+ and asio_input_channel != -1
292
+ ):
293
+ input_extra_setting = sd.AsioSettings(
294
+ channel_selectors=[asio_input_channel]
295
+ )
296
+ input_channels = 1
297
+
298
+ if output_audio_device and "WASAPI" in output_audio_device.host_api:
299
+ output_extra_setting = sd.WasapiSettings(
300
+ exclusive=wasapi_exclusive_mode, auto_convert=not wasapi_exclusive_mode
301
+ )
302
+ elif (
303
+ input_audio_device
304
+ and "ASIO" in input_audio_device.host_api
305
+ and asio_output_channel != -1
306
+ ):
307
+ output_extra_setting = sd.AsioSettings(
308
+ channel_selectors=[asio_output_channel]
309
+ )
310
+ output_channels = 1
311
+
312
+ if self.use_monitor:
313
+ output_monitor_device = self.get_output_audio_device(output_monitor_id)
314
+ monitor_channels = output_monitor_device.max_output_channels
315
+
316
+ if output_monitor_device and "WASAPI" in output_monitor_device.host_api:
317
+ output_monitor_extra_setting = sd.WasapiSettings(
318
+ exclusive=wasapi_exclusive_mode,
319
+ auto_convert=not wasapi_exclusive_mode,
320
+ )
321
+ elif (
322
+ output_monitor_device
323
+ and "ASIO" in output_monitor_device.host_api
324
+ and asio_output_monitor_channel != -1
325
+ ):
326
+ output_monitor_extra_setting = sd.AsioSettings(
327
+ channel_selectors=[asio_output_monitor_channel]
328
+ )
329
+ monitor_channels = 1
330
+
331
+ block_frame = int((read_chunk_size * 128 / 48000) * AUDIO_SAMPLE_RATE)
332
+
333
+ try:
334
+ self.run_audio_stream(
335
+ block_frame,
336
+ input_device_id,
337
+ output_device_id,
338
+ output_monitor_id,
339
+ input_channels,
340
+ output_channels,
341
+ monitor_channels,
342
+ input_extra_setting,
343
+ output_extra_setting,
344
+ output_monitor_extra_setting,
345
+ )
346
+ self.running = True
347
+ except Exception as error:
348
+ print(f"An error occurred while streaming audio: {error}")
349
+ print(traceback.format_exc())
rvc/realtime/callbacks.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import threading
4
+ import numpy as np
5
+
6
+ sys.path.append(os.getcwd())
7
+
8
+ from rvc.realtime.audio import Audio
9
+ from rvc.realtime.core import VoiceChanger
10
+
11
+
12
+ class AudioCallbacks:
13
+ def __init__(
14
+ self,
15
+ pass_through: bool = False,
16
+ read_chunk_size: int = 192,
17
+ cross_fade_overlap_size: float = 0.1,
18
+ extra_convert_size: float = 0.5,
19
+ model_path: str = None,
20
+ index_path: str = None,
21
+ f0_method: str = "rmvpe",
22
+ embedder_model: str = None,
23
+ embedder_model_custom: str = None,
24
+ silent_threshold: int = -90,
25
+ f0_up_key: int = 0,
26
+ index_rate: float = 0.5,
27
+ protect: float = 0.5,
28
+ volume_envelope: float = 1,
29
+ f0_autotune: bool = False,
30
+ f0_autotune_strength: float = 1,
31
+ proposed_pitch: bool = False,
32
+ proposed_pitch_threshold: float = 155.0,
33
+ input_audio_gain: float = 1.0,
34
+ output_audio_gain: float = 1.0,
35
+ monitor_audio_gain: float = 1.0,
36
+ monitor: bool = False,
37
+ vad_enabled: bool = False,
38
+ vad_sensitivity: int = 3,
39
+ vad_frame_ms: int = 30,
40
+ sid: int = 0,
41
+ # device: str = "cuda",
42
+ ):
43
+ self.pass_through = pass_through
44
+ self.lock = threading.Lock()
45
+ self.vc = VoiceChanger(
46
+ read_chunk_size,
47
+ cross_fade_overlap_size,
48
+ extra_convert_size,
49
+ model_path,
50
+ index_path,
51
+ f0_method,
52
+ embedder_model,
53
+ embedder_model_custom,
54
+ silent_threshold,
55
+ vad_enabled,
56
+ vad_sensitivity,
57
+ vad_frame_ms,
58
+ sid,
59
+ # device,
60
+ )
61
+ self.audio = Audio(
62
+ self,
63
+ f0_up_key,
64
+ index_rate,
65
+ protect,
66
+ volume_envelope,
67
+ f0_autotune,
68
+ f0_autotune_strength,
69
+ proposed_pitch,
70
+ proposed_pitch_threshold,
71
+ input_audio_gain,
72
+ output_audio_gain,
73
+ monitor_audio_gain,
74
+ monitor,
75
+ )
76
+
77
+ def change_voice(
78
+ self,
79
+ received_data: np.ndarray,
80
+ f0_up_key: int = 0,
81
+ index_rate: float = 0.5,
82
+ protect: float = 0.5,
83
+ volume_envelope: float = 1,
84
+ f0_autotune: bool = False,
85
+ f0_autotune_strength: float = 1,
86
+ proposed_pitch: bool = False,
87
+ proposed_pitch_threshold: float = 155.0,
88
+ ):
89
+ if self.pass_through: # through
90
+ vol = float(np.sqrt(np.square(received_data).mean(dtype=np.float32)))
91
+ return received_data, vol, [0, 0, 0], None
92
+
93
+ try:
94
+ with self.lock:
95
+ audio, vol, perf = self.vc.on_request(
96
+ received_data,
97
+ f0_up_key,
98
+ index_rate,
99
+ protect,
100
+ volume_envelope,
101
+ f0_autotune,
102
+ f0_autotune_strength,
103
+ proposed_pitch,
104
+ proposed_pitch_threshold,
105
+ )
106
+
107
+ return audio, vol, perf, None
108
+ except RuntimeError as error:
109
+ import traceback
110
+
111
+ print(f"An error occurred during real-time voice conversion: {error}")
112
+ print(traceback.format_exc())
113
+
114
+ return np.zeros(1, dtype=np.float32), 0, [0, 0, 0], None
rvc/realtime/core.py ADDED
@@ -0,0 +1,376 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import time
4
+ import torch
5
+ import torch.nn.functional as F
6
+ import torchaudio.transforms as tat
7
+ import numpy as np
8
+
9
+ now_dir = os.getcwd()
10
+ sys.path.append(now_dir)
11
+
12
+ from rvc.realtime.utils.torch import circular_write
13
+ from rvc.realtime.utils.vad import VADProcessor
14
+ from rvc.realtime.pipeline import create_pipeline
15
+
16
+ SAMPLE_RATE = 16000
17
+ AUDIO_SAMPLE_RATE = 48000
18
+
19
+
20
+ class Realtime:
21
+ def __init__(
22
+ self,
23
+ model_path: str = None,
24
+ index_path: str = None,
25
+ f0_method: str = "rmvpe",
26
+ embedder_model: str = None,
27
+ embedder_model_custom: str = None,
28
+ silent_threshold: int = 0,
29
+ vad_enabled: bool = False,
30
+ vad_sensitivity: int = 3,
31
+ vad_frame_ms: int = 30,
32
+ sid: int = 0,
33
+ # device: str = "cuda",
34
+ ):
35
+ self.sample_rate = SAMPLE_RATE
36
+ self.convert_buffer = None
37
+ self.pitch_buffer = None
38
+ self.pitchf_buffer = None
39
+ self.return_length = 0
40
+ self.skip_head = 0
41
+ self.silence_front = 0
42
+ # Convert dB to RMS
43
+ self.input_sensitivity = 10 ** (silent_threshold / 20)
44
+ self.window_size = self.sample_rate // 100
45
+ self.dtype = torch.float32 # torch.float16 if config.is_half else torch.float32
46
+
47
+ self.vad = (
48
+ VADProcessor(
49
+ sensitivity_mode=vad_sensitivity,
50
+ sample_rate=self.sample_rate,
51
+ frame_duration_ms=vad_frame_ms,
52
+ )
53
+ if vad_enabled
54
+ else None
55
+ )
56
+ # Create conversion pipelines
57
+ self.pipeline = create_pipeline(
58
+ model_path,
59
+ index_path,
60
+ f0_method,
61
+ embedder_model,
62
+ embedder_model_custom,
63
+ # device,
64
+ sid,
65
+ )
66
+ self.device = self.pipeline.device
67
+ # Resampling of inputs and outputs.
68
+ self.resample_in = tat.Resample(
69
+ orig_freq=AUDIO_SAMPLE_RATE, new_freq=self.sample_rate, dtype=torch.float32
70
+ ).to(self.device)
71
+ self.resample_out = tat.Resample(
72
+ orig_freq=self.pipeline.tgt_sr,
73
+ new_freq=AUDIO_SAMPLE_RATE,
74
+ dtype=torch.float32,
75
+ ).to(self.device)
76
+
77
+ def realloc(
78
+ self,
79
+ block_frame: int,
80
+ extra_frame: int,
81
+ crossfade_frame: int,
82
+ sola_search_frame: int,
83
+ ):
84
+ # Calculate frame sizes based on DEVICE sample rate (f.e., 48000Hz) and convert to 16000Hz
85
+ block_frame_16k = int(block_frame / AUDIO_SAMPLE_RATE * self.sample_rate)
86
+ crossfade_frame_16k = int(
87
+ crossfade_frame / AUDIO_SAMPLE_RATE * self.sample_rate
88
+ )
89
+ sola_search_frame_16k = int(
90
+ sola_search_frame / AUDIO_SAMPLE_RATE * self.sample_rate
91
+ )
92
+ extra_frame_16k = int(extra_frame / AUDIO_SAMPLE_RATE * self.sample_rate)
93
+
94
+ convert_size_16k = (
95
+ block_frame_16k
96
+ + sola_search_frame_16k
97
+ + extra_frame_16k
98
+ + crossfade_frame_16k
99
+ )
100
+ if (
101
+ modulo := convert_size_16k % self.window_size
102
+ ) != 0: # Compensate for truncation due to hop size in model output.
103
+ convert_size_16k = convert_size_16k + (self.window_size - modulo)
104
+ self.convert_feature_size_16k = convert_size_16k // self.window_size
105
+
106
+ self.skip_head = extra_frame_16k // self.window_size
107
+ self.return_length = self.convert_feature_size_16k - self.skip_head
108
+ self.silence_front = (
109
+ extra_frame_16k - (self.window_size * 5) if self.silence_front else 0
110
+ )
111
+ # Audio buffer to measure volume between chunks
112
+ audio_buffer_size = block_frame_16k + crossfade_frame_16k
113
+ self.audio_buffer = torch.zeros(
114
+ audio_buffer_size, dtype=self.dtype, device=self.device
115
+ )
116
+ # Audio buffer for conversion without silence
117
+ self.convert_buffer = torch.zeros(
118
+ convert_size_16k, dtype=self.dtype, device=self.device
119
+ )
120
+ # Additional +1 is to compensate for pitch extraction algorithm
121
+ # that can output additional feature.
122
+ self.pitch_buffer = torch.zeros(
123
+ self.convert_feature_size_16k + 1, dtype=torch.int64, device=self.device
124
+ )
125
+ self.pitchf_buffer = torch.zeros(
126
+ self.convert_feature_size_16k + 1, dtype=self.dtype, device=self.device
127
+ )
128
+
129
+ def inference(
130
+ self,
131
+ audio_input: np.ndarray,
132
+ f0_up_key: int = 0,
133
+ index_rate: float = 0.5,
134
+ protect: float = 0.5,
135
+ volume_envelope: float = 1,
136
+ f0_autotune: bool = False,
137
+ f0_autotune_strength: float = 1,
138
+ proposed_pitch: bool = False,
139
+ proposed_pitch_threshold: float = 155.0,
140
+ ):
141
+ if self.pipeline is None:
142
+ raise RuntimeError("Pipeline is not initialized.")
143
+
144
+ # Input audio is always float32
145
+ audio_input_16k = self.resample_in(
146
+ torch.as_tensor(audio_input, dtype=torch.float32, device=self.device)
147
+ ).to(self.dtype)
148
+ circular_write(audio_input_16k, self.audio_buffer)
149
+
150
+ vol_t = torch.sqrt(torch.square(self.audio_buffer).mean())
151
+ vol = max(vol_t.item(), 0)
152
+
153
+ if self.vad is not None:
154
+ is_speech = self.vad.is_speech(audio_input_16k.cpu().numpy().copy())
155
+ if not is_speech:
156
+ # Busy wait to keep power manager happy and clocks stable. Running pipeline on-demand seems to lag when the delay between
157
+ # voice changer activation is too high.
158
+ # https://forums.developer.nvidia.com/t/why-kernel-calculate-speed-got-slower-after-waiting-for-a-while/221059/9
159
+ self.pipeline.voice_conversion(
160
+ self.convert_buffer,
161
+ self.pitch_buffer,
162
+ self.pitchf_buffer,
163
+ f0_up_key,
164
+ index_rate,
165
+ self.convert_feature_size_16k,
166
+ self.silence_front,
167
+ self.skip_head,
168
+ self.return_length,
169
+ protect,
170
+ volume_envelope,
171
+ f0_autotune,
172
+ f0_autotune_strength,
173
+ proposed_pitch,
174
+ proposed_pitch_threshold,
175
+ )
176
+ return None, vol
177
+
178
+ if vol < self.input_sensitivity:
179
+ # Busy wait to keep power manager happy and clocks stable. Running pipeline on-demand seems to lag when the delay between
180
+ # voice changer activation is too high.
181
+ # https://forums.developer.nvidia.com/t/why-kernel-calculate-speed-got-slower-after-waiting-for-a-while/221059/9
182
+ self.pipeline.voice_conversion(
183
+ self.convert_buffer,
184
+ self.pitch_buffer,
185
+ self.pitchf_buffer,
186
+ f0_up_key,
187
+ index_rate,
188
+ self.convert_feature_size_16k,
189
+ self.silence_front,
190
+ self.skip_head,
191
+ self.return_length,
192
+ protect,
193
+ volume_envelope,
194
+ f0_autotune,
195
+ f0_autotune_strength,
196
+ proposed_pitch,
197
+ proposed_pitch_threshold,
198
+ )
199
+
200
+ return None, vol
201
+
202
+ circular_write(audio_input_16k, self.convert_buffer)
203
+
204
+ audio_model = self.pipeline.voice_conversion(
205
+ self.convert_buffer,
206
+ self.pitch_buffer,
207
+ self.pitchf_buffer,
208
+ f0_up_key,
209
+ index_rate,
210
+ self.convert_feature_size_16k,
211
+ self.silence_front,
212
+ self.skip_head,
213
+ self.return_length,
214
+ protect,
215
+ volume_envelope,
216
+ f0_autotune,
217
+ f0_autotune_strength,
218
+ proposed_pitch,
219
+ proposed_pitch_threshold,
220
+ )
221
+
222
+ audio_out: torch.Tensor = self.resample_out(audio_model * torch.sqrt(vol_t))
223
+ return audio_out, vol
224
+
225
+ def __del__(self):
226
+ del self.pipeline
227
+
228
+
229
+ class VoiceChanger:
230
+ def __init__(
231
+ self,
232
+ read_chunk_size: int,
233
+ cross_fade_overlap_size: float,
234
+ extra_convert_size: float,
235
+ model_path: str = None,
236
+ index_path: str = None,
237
+ f0_method: str = "rmvpe",
238
+ embedder_model: str = None,
239
+ embedder_model_custom: str = None,
240
+ silent_threshold: int = 0,
241
+ vad_enabled: bool = False,
242
+ vad_sensitivity: int = 3,
243
+ vad_frame_ms: int = 30,
244
+ sid: int = 0,
245
+ # device: str = "cuda",
246
+ ):
247
+ self.block_frame = read_chunk_size * 128
248
+ self.crossfade_frame = int(cross_fade_overlap_size * AUDIO_SAMPLE_RATE)
249
+ self.extra_frame = int(extra_convert_size * AUDIO_SAMPLE_RATE)
250
+ self.sola_search_frame = AUDIO_SAMPLE_RATE // 100
251
+ self.sola_buffer = None
252
+ self.vc_model = Realtime(
253
+ model_path,
254
+ index_path,
255
+ f0_method,
256
+ embedder_model,
257
+ embedder_model_custom,
258
+ silent_threshold,
259
+ vad_enabled,
260
+ vad_sensitivity,
261
+ vad_frame_ms,
262
+ sid,
263
+ # device
264
+ )
265
+ self.device = self.vc_model.device
266
+ self.vc_model.realloc(
267
+ self.block_frame,
268
+ self.extra_frame,
269
+ self.crossfade_frame,
270
+ self.sola_search_frame,
271
+ )
272
+ self.generate_strength()
273
+
274
+ def generate_strength(self):
275
+ self.fade_in_window: torch.Tensor = (
276
+ torch.sin(
277
+ 0.5
278
+ * np.pi
279
+ * torch.linspace(
280
+ 0.0,
281
+ 1.0,
282
+ steps=self.crossfade_frame,
283
+ device=self.device,
284
+ dtype=torch.float32,
285
+ )
286
+ )
287
+ ** 2
288
+ )
289
+
290
+ self.fade_out_window: torch.Tensor = 1 - self.fade_in_window
291
+ # The size will change from the previous result, so the record will be deleted.
292
+ self.sola_buffer = torch.zeros(
293
+ self.crossfade_frame, device=self.device, dtype=torch.float32
294
+ )
295
+
296
+ def process_audio(
297
+ self,
298
+ audio_input: np.ndarray,
299
+ f0_up_key: int = 0,
300
+ index_rate: float = 0.5,
301
+ protect: float = 0.5,
302
+ volume_envelope: float = 1,
303
+ f0_autotune: bool = False,
304
+ f0_autotune_strength: float = 1,
305
+ proposed_pitch: bool = False,
306
+ proposed_pitch_threshold: float = 155.0,
307
+ ):
308
+ block_size = audio_input.shape[0]
309
+
310
+ audio, vol = self.vc_model.inference(
311
+ audio_input,
312
+ f0_up_key,
313
+ index_rate,
314
+ protect,
315
+ volume_envelope,
316
+ f0_autotune,
317
+ f0_autotune_strength,
318
+ proposed_pitch,
319
+ proposed_pitch_threshold,
320
+ )
321
+
322
+ if audio is None:
323
+ # In case there's an actual silence - send full block with zeros
324
+ return np.zeros(block_size, dtype=np.float32), vol
325
+
326
+ conv_input = audio[None, None, : self.crossfade_frame + self.sola_search_frame]
327
+ cor_nom = F.conv1d(conv_input, self.sola_buffer[None, None, :])
328
+ cor_den = torch.sqrt(
329
+ F.conv1d(
330
+ conv_input**2,
331
+ torch.ones(1, 1, self.crossfade_frame, device=self.device),
332
+ )
333
+ + 1e-8
334
+ )
335
+ sola_offset = torch.argmax(cor_nom[0, 0] / cor_den[0, 0])
336
+
337
+ audio = audio[sola_offset:]
338
+ audio[: self.crossfade_frame] *= self.fade_in_window
339
+ audio[: self.crossfade_frame] += self.sola_buffer * self.fade_out_window
340
+
341
+ self.sola_buffer[:] = audio[block_size : block_size + self.crossfade_frame]
342
+ return audio[:block_size].detach().cpu().numpy(), vol
343
+
344
+ @torch.no_grad()
345
+ def on_request(
346
+ self,
347
+ audio_input: np.ndarray,
348
+ f0_up_key: int = 0,
349
+ index_rate: float = 0.5,
350
+ protect: float = 0.5,
351
+ volume_envelope: float = 1,
352
+ f0_autotune: bool = False,
353
+ f0_autotune_strength: float = 1,
354
+ proposed_pitch: bool = False,
355
+ proposed_pitch_threshold: float = 155.0,
356
+ ):
357
+ if self.vc_model is None:
358
+ raise RuntimeError("Voice Changer is not selected.")
359
+
360
+ start = (
361
+ time.perf_counter()
362
+ ) # Using perf_counter to measure real-time voice conversion latency.
363
+ result, vol = self.process_audio(
364
+ audio_input,
365
+ f0_up_key,
366
+ index_rate,
367
+ protect,
368
+ volume_envelope,
369
+ f0_autotune,
370
+ f0_autotune_strength,
371
+ proposed_pitch,
372
+ proposed_pitch_threshold,
373
+ )
374
+ end = time.perf_counter()
375
+
376
+ return result, vol, [0, (end - start) * 1000, 0]
rvc/realtime/pipeline.py ADDED
@@ -0,0 +1,414 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import faiss
4
+ import numpy as np
5
+ import torch
6
+ import torch.nn.utils.parametrize
7
+ import torch.nn.functional as F
8
+ import torchaudio.transforms as tat
9
+ from torch import Tensor
10
+
11
+ now_dir = os.getcwd()
12
+ sys.path.append(now_dir)
13
+
14
+ from rvc.realtime.utils.torch import circular_write
15
+ from rvc.configs.config import Config
16
+ from rvc.infer.pipeline import Autotune, AudioProcessor
17
+ from rvc.lib.algorithm.synthesizers import Synthesizer
18
+ from rvc.lib.predictors.f0 import FCPE, RMVPE, SWIFT
19
+ from rvc.lib.utils import load_embedding, HubertModelWithFinalProj
20
+
21
+
22
+ class RealtimeVoiceConverter:
23
+ """
24
+ A class for performing realtime voice conversion using the Retrieval-Based Voice Conversion (RVC) method.
25
+ """
26
+
27
+ def __init__(self, weight_root):
28
+ """
29
+ Initializes the RealtimeVoiceConverter with default configuration, and sets up models and parameters.
30
+ """
31
+ self.config = Config() # Load configuration
32
+ self.tgt_sr = None # Target sampling rate for the output audio
33
+ self.net_g = None # Generator network for voice conversion
34
+ self.cpt = None # Checkpoint for loading model weights
35
+ self.version = None # Model version
36
+ self.use_f0 = None # Whether the model uses F0
37
+ # load weights and setup model network.
38
+ self.load_model(weight_root)
39
+ self.setup_network()
40
+
41
+ def load_model(self, weight_root):
42
+ """
43
+ Loads the model weights from the specified path.
44
+
45
+ Args:
46
+ weight_root (str): Path to the model weights.
47
+ """
48
+ self.cpt = (
49
+ torch.load(weight_root, map_location="cpu", weights_only=True)
50
+ if os.path.isfile(weight_root)
51
+ else None
52
+ )
53
+
54
+ def setup_network(self):
55
+ """
56
+ Sets up the network configuration based on the loaded checkpoint.
57
+ """
58
+ if self.cpt is not None:
59
+ self.tgt_sr = self.cpt["config"][-1]
60
+ self.cpt["config"][-3] = self.cpt["weight"]["emb_g.weight"].shape[0]
61
+ self.use_f0 = self.cpt.get("f0", 1)
62
+
63
+ self.version = self.cpt.get("version", "v1")
64
+ self.text_enc_hidden_dim = 768 if self.version == "v2" else 256
65
+ self.vocoder = self.cpt.get("vocoder", "HiFi-GAN")
66
+ self.net_g = Synthesizer(
67
+ *self.cpt["config"],
68
+ use_f0=self.use_f0,
69
+ text_enc_hidden_dim=self.text_enc_hidden_dim,
70
+ vocoder=self.vocoder,
71
+ )
72
+
73
+ self.net_g.load_state_dict(self.cpt["weight"], strict=False)
74
+ strip_parametrizations(self.net_g)
75
+ self.net_g = self.net_g.to(self.config.device).float()
76
+ self.net_g.eval()
77
+ # self.net_g.remove_weight_norm()
78
+
79
+ def inference(
80
+ self,
81
+ feats: Tensor,
82
+ p_len: Tensor,
83
+ sid: Tensor,
84
+ pitch: Tensor,
85
+ pitchf: Tensor,
86
+ ):
87
+ output = self.net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0]
88
+
89
+ return torch.clip(output, -1.0, 1.0, out=output)
90
+
91
+
92
+ class Realtime_Pipeline:
93
+ def __init__(
94
+ self,
95
+ vc: RealtimeVoiceConverter,
96
+ hubert_model: HubertModelWithFinalProj = None,
97
+ index=None,
98
+ big_npy=None,
99
+ f0_method: str = "rmvpe",
100
+ sid: int = 0,
101
+ ):
102
+ self.vc = vc
103
+ self.hubert_model = hubert_model
104
+ self.index = index
105
+ self.big_npy = big_npy
106
+ self.use_f0 = vc.use_f0
107
+ self.version = vc.version
108
+ self.f0_method = f0_method
109
+ self.sample_rate = 16000
110
+ self.tgt_sr = vc.tgt_sr
111
+ self.window = 160
112
+ self.model_window = self.tgt_sr // 100
113
+ self.f0_min = 50.0
114
+ self.f0_max = 1100.0
115
+ self.device = vc.config.device
116
+ self.sid = torch.tensor([sid], device=self.device, dtype=torch.int64)
117
+ self.autotune = Autotune()
118
+ self.resamplers = {}
119
+ self.f0_model = None
120
+
121
+ def get_f0(
122
+ self,
123
+ x: Tensor,
124
+ pitch: Tensor = None,
125
+ pitchf: Tensor = None,
126
+ f0_up_key: int = 0,
127
+ f0_autotune: bool = False,
128
+ f0_autotune_strength: float = 1.0,
129
+ proposed_pitch: bool = False,
130
+ proposed_pitch_threshold: float = 155.0,
131
+ ):
132
+ """
133
+ Estimates the fundamental frequency (F0) of a given audio signal using various methods.
134
+ """
135
+
136
+ if torch.is_tensor(x):
137
+ # If the input is a tensor, it will need to be converted to numpy array to calculate with RMVPE and FCPE.
138
+ x = x.cpu().numpy()
139
+
140
+ if self.f0_method == "rmvpe":
141
+ if self.f0_model is None:
142
+ self.f0_model = RMVPE(
143
+ device=self.device,
144
+ sample_rate=self.sample_rate,
145
+ hop_size=self.window,
146
+ )
147
+ f0 = self.f0_model.get_f0(x, filter_radius=0.03)
148
+ elif self.f0_method == "fcpe":
149
+ if self.f0_model is None:
150
+ self.f0_model = FCPE(
151
+ device=self.device,
152
+ sample_rate=self.sample_rate,
153
+ hop_size=self.window,
154
+ )
155
+ f0 = self.f0_model.get_f0(x, x.shape[0] // self.window, filter_radius=0.006)
156
+ elif self.f0_method == "swift":
157
+ if self.f0_model is None:
158
+ self.f0_model = SWIFT(
159
+ device=self.device,
160
+ sample_rate=self.sample_rate,
161
+ hop_size=self.window,
162
+ )
163
+ f0 = self.f0_model.get_f0(
164
+ x,
165
+ self.f0_min,
166
+ self.f0_max,
167
+ x.shape[0] // self.window,
168
+ confidence_threshold=0.887,
169
+ )
170
+
171
+ # f0 adjustments
172
+ if f0_autotune is True:
173
+ f0 = self.autotune.autotune_f0(f0, f0_autotune_strength)
174
+ elif proposed_pitch is True:
175
+ limit = 12
176
+ # calculate median f0 of the audio
177
+ valid_f0 = np.where(f0 > 0)[0]
178
+ if len(valid_f0) < 2:
179
+ # no valid f0 detected
180
+ up_key = 0
181
+ else:
182
+ median_f0 = float(
183
+ np.median(np.interp(np.arange(len(f0)), valid_f0, f0[valid_f0]))
184
+ )
185
+ if median_f0 <= 0 or np.isnan(median_f0):
186
+ up_key = 0
187
+ else:
188
+ # calculate proposed shift
189
+ up_key = max(
190
+ -limit,
191
+ min(
192
+ limit,
193
+ int(
194
+ np.round(
195
+ 12 * np.log2(proposed_pitch_threshold / median_f0)
196
+ )
197
+ ),
198
+ ),
199
+ )
200
+ print(
201
+ "calculated pitch offset:", up_key
202
+ ) # Might need to hide so terminal output doesn't become a mess
203
+ f0 *= pow(2, (f0_up_key + up_key) / 12)
204
+ else:
205
+ f0 *= pow(2, f0_up_key / 12)
206
+
207
+ # Convert to Tensor for computational use
208
+ f0 = torch.from_numpy(f0).to(self.device).float()
209
+
210
+ # quantizing f0 to 255 buckets to make coarse f0
211
+ f0_mel = 1127.0 * torch.log(1.0 + f0 / 700.0)
212
+ f0_mel = torch.clip(
213
+ (f0_mel - self.f0_min) * 254 / (self.f0_max - self.f0_min) + 1,
214
+ 1,
215
+ 255,
216
+ out=f0_mel,
217
+ )
218
+ f0_coarse = torch.round(f0_mel, out=f0_mel).long()
219
+
220
+ if pitch is not None and pitchf is not None:
221
+ circular_write(f0_coarse, pitch)
222
+ circular_write(f0, pitchf)
223
+ else:
224
+ pitch = f0_coarse
225
+ pitchf = f0
226
+
227
+ return pitch.unsqueeze(0), pitchf.unsqueeze(0)
228
+
229
+ def voice_conversion(
230
+ self,
231
+ audio: Tensor,
232
+ pitch: Tensor = None,
233
+ pitchf: Tensor = None,
234
+ f0_up_key: int = 0,
235
+ index_rate: float = 0.5,
236
+ p_len: int = 0,
237
+ silence_front: int = 0,
238
+ skip_head: int = None,
239
+ return_length: int = None,
240
+ protect: float = 0.5,
241
+ volume_envelope: float = 1,
242
+ f0_autotune: bool = False,
243
+ f0_autotune_strength: float = 1,
244
+ proposed_pitch: bool = False,
245
+ proposed_pitch_threshold: float = 155.0,
246
+ ):
247
+ """
248
+ Performs realtime voice conversion on a given audio segment.
249
+ """
250
+ assert audio.dim() == 1, audio.dim()
251
+ feats = audio.view(1, -1).to(self.device)
252
+
253
+ formant_length = int(np.ceil(return_length * 1.0))
254
+
255
+ pitch, pitchf = (
256
+ self.get_f0(
257
+ audio[silence_front:],
258
+ pitch,
259
+ pitchf,
260
+ f0_up_key,
261
+ f0_autotune,
262
+ f0_autotune_strength,
263
+ proposed_pitch,
264
+ proposed_pitch_threshold,
265
+ )
266
+ if self.use_f0
267
+ else (None, None)
268
+ )
269
+
270
+ # extract features
271
+ feats = self.hubert_model(feats)["last_hidden_state"]
272
+ feats = (
273
+ self.hubert_model.final_proj(feats[0]).unsqueeze(0)
274
+ if self.version == "v1"
275
+ else feats
276
+ )
277
+
278
+ feats = torch.cat((feats, feats[:, -1:, :]), 1)
279
+ # make a copy for pitch guidance and protection
280
+ feats0 = feats.detach().clone() if self.use_f0 else None
281
+
282
+ if (
283
+ self.index
284
+ ): # set by parent function, only true if index is available, loaded, and index rate > 0
285
+ feats = self._retrieve_speaker_embeddings(
286
+ skip_head, feats, self.index, self.big_npy, index_rate
287
+ )
288
+ # feature upsampling
289
+ feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)[
290
+ :, :p_len, :
291
+ ]
292
+
293
+ if self.use_f0:
294
+ feats0 = F.interpolate(feats0.permute(0, 2, 1), scale_factor=2).permute(
295
+ 0, 2, 1
296
+ )[:, :p_len, :]
297
+ pitch, pitchf = pitch[:, -p_len:], pitchf[:, -p_len:] * (
298
+ formant_length / return_length
299
+ )
300
+
301
+ # Pitch protection blending
302
+ if protect < 0.5:
303
+ pitchff = pitchf.detach().clone()
304
+ pitchff[pitchf > 0] = 1
305
+ pitchff[pitchf < 1] = protect
306
+ feats = feats * pitchff.unsqueeze(-1) + feats0 * (
307
+ 1 - pitchff.unsqueeze(-1)
308
+ )
309
+ feats = feats.to(feats0.dtype)
310
+ else:
311
+ pitch, pitchf = None, None
312
+
313
+ p_len = torch.tensor([p_len], device=self.device, dtype=torch.int64)
314
+ out_audio = self.vc.inference(feats, p_len, self.sid, pitch, pitchf).float()
315
+ if volume_envelope != 1:
316
+ out_audio = AudioProcessor.change_rms(
317
+ audio, self.sample_rate, out_audio, self.tgt_sr, volume_envelope
318
+ )
319
+
320
+ scaled_window = int(np.floor(1.0 * self.model_window))
321
+
322
+ if scaled_window != self.model_window:
323
+ if scaled_window not in self.resamplers:
324
+ self.resamplers[scaled_window] = tat.Resample(
325
+ orig_freq=scaled_window,
326
+ new_freq=self.model_window,
327
+ dtype=torch.float32,
328
+ ).to(self.device)
329
+ out_audio = self.resamplers[scaled_window](
330
+ out_audio[: return_length * scaled_window]
331
+ )
332
+
333
+ return out_audio
334
+
335
+ def _retrieve_speaker_embeddings(
336
+ self, skip_head, feats, index, big_npy, index_rate
337
+ ):
338
+ skip_offset = skip_head // 2
339
+ npy = feats[0][skip_offset:].cpu().numpy()
340
+ score, ix = index.search(npy, k=8)
341
+ weight = np.square(1 / score)
342
+ weight /= weight.sum(axis=1, keepdims=True)
343
+ npy = np.sum(big_npy[ix] * np.expand_dims(weight, axis=2), axis=1)
344
+ feats[0][skip_offset:] = (
345
+ torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate
346
+ + (1 - index_rate) * feats[0][skip_offset:]
347
+ )
348
+ return feats
349
+
350
+
351
+ def load_faiss_index(file_index):
352
+ if file_index != "" and os.path.exists(file_index):
353
+ try:
354
+ index = faiss.read_index(file_index)
355
+ big_npy = index.reconstruct_n(0, index.ntotal)
356
+ except Exception as error:
357
+ print(f"An error occurred reading the FAISS index: {error}")
358
+ index = big_npy = None
359
+ else:
360
+ index = big_npy = None
361
+
362
+ return index, big_npy
363
+
364
+
365
+ def create_pipeline(
366
+ model_path: str = None,
367
+ index_path: str = None,
368
+ f0_method: str = "rmvpe",
369
+ embedder_model: str = None,
370
+ embedder_model_custom: str = None,
371
+ # device: str = "cuda",
372
+ sid: int = 0,
373
+ ):
374
+ """
375
+ Initialize real-time voice conversion pipeline.
376
+ """
377
+
378
+ vc = RealtimeVoiceConverter(model_path)
379
+ index, big_npy = load_faiss_index(
380
+ index_path.strip()
381
+ .strip('"')
382
+ .strip("\n")
383
+ .strip('"')
384
+ .strip()
385
+ .replace("trained", "added")
386
+ )
387
+
388
+ hubert_model = load_embedding(embedder_model, embedder_model_custom)
389
+ hubert_model = hubert_model.to(vc.config.device).float()
390
+ hubert_model.eval()
391
+
392
+ pipeline = Realtime_Pipeline(
393
+ vc,
394
+ hubert_model,
395
+ index,
396
+ big_npy,
397
+ f0_method,
398
+ sid,
399
+ )
400
+
401
+ return pipeline
402
+
403
+
404
+ def strip_parametrizations(module: torch.nn.Module):
405
+ """
406
+ Remove all parametrizations (e.g., weight norm) from a module and log each removal.
407
+ """
408
+ for name, submodule in module.named_modules():
409
+ if hasattr(submodule, "parametrizations"):
410
+ for pname, plist in list(submodule.parametrizations.items()):
411
+ # print(f"Removing parametrizations from {name}.{pname}: {[p.__class__.__name__ for p in plist]}")
412
+ torch.nn.utils.parametrize.remove_parametrizations(
413
+ submodule, pname, leave_parametrized=True
414
+ )
rvc/realtime/utils/torch.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+
4
+ def circular_write(new_data: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
5
+ offset = new_data.shape[0]
6
+ target[:-offset] = target[offset:].detach().clone()
7
+ target[-offset:] = new_data
8
+ return target
rvc/realtime/utils/vad.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import webrtcvad
2
+ import numpy as np
3
+
4
+
5
+ class VADProcessor:
6
+ def __init__(self, sensitivity_mode=3, sample_rate=16000, frame_duration_ms=30):
7
+ """
8
+ Initializes the VADProcessor.
9
+
10
+ Args:
11
+ sensitivity_mode (int): VAD sensitivity (0-3). 3 is most aggressive.
12
+ sample_rate (int): Sample rate of the audio. Must be 8000, 16000, 32000, or 48000 Hz.
13
+ WebRTC VAD internally works best with 16000 Hz.
14
+ frame_duration_ms (int): Duration of each audio frame in ms. Must be 10, 20, or 30.
15
+ """
16
+
17
+ if sample_rate not in [8000, 16000, 32000, 48000]:
18
+ raise ValueError("VAD sample rate must be 8000, 16000, 32000, or 48000 Hz")
19
+ if frame_duration_ms not in [10, 20, 30]:
20
+ raise ValueError("VAD frame duration must be 10, 20, or 30 ms")
21
+
22
+ self.vad = webrtcvad.Vad(sensitivity_mode)
23
+ self.sample_rate = sample_rate
24
+ self.frame_length = int(sample_rate * (frame_duration_ms / 1000.0))
25
+ # print(f"VAD Initialized: SR={sample_rate}, Frame Duration={frame_duration_ms}ms, Frame Length={self.frame_length} samples")
26
+
27
+ def is_speech(self, audio_chunk_float32):
28
+ """
29
+ Detects if the given audio chunk contains speech.
30
+
31
+ Args:
32
+ audio_chunk_float32 (np.ndarray): A chunk of audio data in float32 format, mono.
33
+ The sample rate must match the one VAD was initialized with.
34
+
35
+ Returns:
36
+ bool: True if speech is detected in the chunk, False otherwise.
37
+ """
38
+
39
+ if audio_chunk_float32.ndim > 1 and audio_chunk_float32.shape[1] == 1:
40
+ audio_chunk_float32 = audio_chunk_float32.flatten()
41
+ elif audio_chunk_float32.ndim > 1:
42
+ # If stereo, average to mono. This is a simple approach.
43
+ # For better results, ensure mono input from the source.
44
+ print("VAD Warning: Received stereo audio, averaging to mono.")
45
+ audio_chunk_float32 = np.mean(audio_chunk_float32, axis=1)
46
+
47
+ # Convert float32 audio to int16 PCM
48
+ # WebRTC VAD expects 16-bit linear PCM audio.
49
+ if np.max(np.abs(audio_chunk_float32)) > 1.0:
50
+ # print(
51
+ # f"VAD Warning: Input audio chunk has values outside [-1.0, 1.0]: min={np.min(audio_chunk_float32)}, max={np.max(audio_chunk_float32)}. Clipping."
52
+ # )
53
+ audio_chunk_float32 = np.clip(audio_chunk_float32, -1.0, 1.0)
54
+
55
+ audio_chunk_int16 = (audio_chunk_float32 * 32767).astype(np.int16)
56
+
57
+ num_frames = len(audio_chunk_int16) // self.frame_length
58
+ if num_frames == 0 and len(audio_chunk_int16) > 0:
59
+ # If the chunk is smaller than one frame, pad it for VAD analysis
60
+ # This might not be ideal but handles small initial chunks
61
+ padding = np.zeros(
62
+ self.frame_length - len(audio_chunk_int16), dtype=np.int16
63
+ )
64
+ audio_chunk_int16 = np.concatenate((audio_chunk_int16, padding))
65
+ num_frames = 1
66
+ elif num_frames == 0 and len(audio_chunk_int16) == 0:
67
+ return False # Empty chunk
68
+
69
+ try:
70
+ for i in range(num_frames):
71
+ start = i * self.frame_length
72
+ end = start + self.frame_length
73
+ frame = audio_chunk_int16[start:end]
74
+ # The VAD expects bytes, not a NumPy array.
75
+ if self.vad.is_speech(frame.tobytes(), self.sample_rate):
76
+ return True # Speech detected in at least one frame
77
+ return False # No speech detected in any frame
78
+ except Exception as e:
79
+ # webrtcvad can sometimes throw "Error talking to VAD" or similar
80
+ # if frame length is not perfect.
81
+ print(
82
+ f"VAD processing error: {e}. Chunk length: {len(audio_chunk_int16)}, Frame length: {self.frame_length}"
83
+ )
84
+ # Fallback: assume no speech on error to avoid processing noise
85
+ return False
rvc/train/anyprecision_optimizer.py ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ # AnyPrecisionAdamW: a flexible precision AdamW optimizer
8
+ # with optional Kahan summation for high precision weight updates.
9
+ # Allows direct control over momentum, variance and auxiliary compensation
10
+ # buffer dtypes.
11
+ # Optional Kahan summation is used to offset precision reduction for
12
+ # the weight updates. This allows full training in BFloat16 (equal or
13
+ # better than FP32 results in many cases) due to high precision weight upates.
14
+
15
+ import torch
16
+ from torch.optim.optimizer import Optimizer
17
+
18
+
19
+ class AnyPrecisionAdamW(Optimizer):
20
+ def __init__(
21
+ self,
22
+ params,
23
+ lr=1e-3,
24
+ betas=(0.9, 0.999),
25
+ eps=1e-8,
26
+ weight_decay=0.0,
27
+ use_kahan_summation=True,
28
+ momentum_dtype=torch.bfloat16,
29
+ variance_dtype=torch.bfloat16,
30
+ compensation_buffer_dtype=torch.bfloat16,
31
+ ):
32
+ """
33
+ Args:
34
+ params (iterable): iterable of parameters to optimize or dicts defining
35
+ parameter groups
36
+ lr (float, optional): learning rate (default: 1e-3)
37
+ betas (Tuple[float, float], optional): coefficients used for computing
38
+ running averages of gradient and its square (default: (0.9, 0.999))
39
+ eps (float, optional): term added to the denominator to improve
40
+ numerical stability (default: 1e-8)
41
+ weight_decay (float, optional): weight decay coefficient (default: 1e-2)
42
+
43
+ # Any Precision specific
44
+ use_kahan_summation = creates auxiliary buffer to ensure high precision
45
+ model param updates (default: False)
46
+ momentum_dtype = dtype for momentum (default: BFloat32)
47
+ variance_dtype = dtype for uncentered variance (default: BFloat16)
48
+ compensation_buffer_dtype = dtype for Kahan summation
49
+ buffer (default: BFloat16). Only used if
50
+ ``use_kahan_summation=True``.
51
+
52
+ # Usage
53
+ This optimizer implements optimizer states, and Kahan summation
54
+ for high precision updates, all in user controlled dtypes.
55
+ Defaults are variance in BF16, Momentum in FP32.
56
+ This can be run in FSDP mixed precision, amp, or full precision,
57
+ depending on what training pipeline you wish to work with.
58
+
59
+ Setting to use_kahan_summation = False, and changing momentum and
60
+ variance dtypes to FP32, reverts this to a standard AdamW optimizer.
61
+ """
62
+ defaults = dict(
63
+ lr=lr,
64
+ betas=betas,
65
+ eps=eps,
66
+ weight_decay=weight_decay,
67
+ use_kahan_summation=use_kahan_summation,
68
+ momentum_dtype=momentum_dtype,
69
+ variance_dtype=variance_dtype,
70
+ compensation_buffer_dtype=compensation_buffer_dtype,
71
+ )
72
+
73
+ super().__init__(params, defaults)
74
+
75
+ @torch.no_grad()
76
+ def step(self, closure=None):
77
+ """Performs a single optimization step.
78
+ Args:
79
+ closure (callable, optional): A closure that reevaluates the model
80
+ and returns the loss.
81
+ """
82
+
83
+ if closure is not None:
84
+ with torch.enable_grad():
85
+ # to fix linter, we do not keep the returned loss for use atm.
86
+ closure()
87
+
88
+ for group in self.param_groups:
89
+
90
+ beta1, beta2 = group["betas"]
91
+ lr = group["lr"]
92
+ weight_decay = group["weight_decay"]
93
+ eps = group["eps"]
94
+ use_kahan_summation = group["use_kahan_summation"]
95
+
96
+ momentum_dtype = group["momentum_dtype"]
97
+ variance_dtype = group["variance_dtype"]
98
+ compensation_buffer_dtype = group["compensation_buffer_dtype"]
99
+
100
+ for p in group["params"]:
101
+ if p.grad is None:
102
+ continue
103
+
104
+ if p.grad.is_sparse:
105
+ raise RuntimeError(
106
+ "AnyPrecisionAdamW does not support sparse gradients"
107
+ )
108
+
109
+ state = self.state[p]
110
+
111
+ # State initialization
112
+ if len(state) == 0:
113
+
114
+ state["step"] = torch.tensor(0.0)
115
+
116
+ # momentum - EMA of gradient values
117
+ state["exp_avg"] = torch.zeros_like(
118
+ p,
119
+ dtype=momentum_dtype,
120
+ )
121
+
122
+ # variance uncentered - EMA of squared gradient values
123
+ state["exp_avg_sq"] = torch.zeros_like(
124
+ p,
125
+ dtype=variance_dtype,
126
+ )
127
+
128
+ # optional Kahan summation - accumulated error tracker
129
+ if use_kahan_summation:
130
+ state["compensation"] = torch.zeros_like(
131
+ p,
132
+ dtype=compensation_buffer_dtype,
133
+ )
134
+
135
+ # main processing -------------------------
136
+
137
+ # update the steps for each param group update
138
+ state["step"] += 1
139
+ step = state["step"]
140
+
141
+ exp_avg = state["exp_avg"]
142
+ exp_avg_sq = state["exp_avg_sq"]
143
+
144
+ grad = p.grad
145
+
146
+ # weight decay, AdamW style
147
+ if weight_decay:
148
+ p.data.mul_(1 - lr * weight_decay)
149
+
150
+ # update momentum
151
+ exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
152
+
153
+ # update uncentered variance
154
+ exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
155
+
156
+ # adjust using bias1
157
+ bias_correction1 = 1 - beta1**step
158
+
159
+ step_size = lr / bias_correction1
160
+
161
+ # adjust using bias2
162
+ denom_correction = (1 - beta2**step) ** 0.5 # avoids math import
163
+
164
+ centered_variance = (exp_avg_sq.sqrt() / denom_correction).add_(
165
+ eps, alpha=1
166
+ )
167
+
168
+ # lr update to compensation
169
+ if use_kahan_summation:
170
+ compensation = state["compensation"]
171
+
172
+ compensation.addcdiv_(exp_avg, centered_variance, value=-step_size)
173
+
174
+ # update weights with compensation (Kahan summation)
175
+ # save error back to compensation for next iteration
176
+ temp_buffer = p.detach().clone()
177
+ p.data.add_(compensation)
178
+ compensation.add_(temp_buffer.sub_(p.data))
179
+
180
+ else:
181
+ # usual AdamW updates
182
+ p.data.addcdiv_(exp_avg, centered_variance, value=-step_size)