dr87 commited on
Commit
885c4af
·
verified ·
1 Parent(s): 873c8f7

Upload 2 files

Browse files
Files changed (2) hide show
  1. extract.py +365 -0
  2. penn-python.zip +3 -0
extract.py ADDED
@@ -0,0 +1,365 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import glob
4
+ import time
5
+ import tqdm
6
+ import torch
7
+ import numpy as np
8
+ import concurrent.futures
9
+ import multiprocessing as mp
10
+ import json
11
+ import shutil
12
+ import argparse
13
+ import torchcrepe
14
+ import resampy
15
+ import penn
16
+
17
+ now_dir = os.getcwd()
18
+ sys.path.append(os.path.join(now_dir))
19
+
20
+ # Zluda hijack
21
+ import rvc.lib.zluda
22
+
23
+ from rvc.lib.utils import load_audio, load_embedding
24
+ from rvc.train.extract.preparing_files import generate_config, generate_filelist
25
+ from rvc.lib.predictors.RMVPE import RMVPE0Predictor
26
+ from rvc.configs.config import Config
27
+
28
+ # Load config
29
+ config = Config()
30
+
31
+ mp.set_start_method("spawn", force=True)
32
+
33
+
34
+ class FeatureInput:
35
+ """Class for F0 extraction."""
36
+
37
+ def __init__(self, sample_rate=16000, hop_size=160, device="cpu"):
38
+ self.fs = sample_rate
39
+ self.hop = hop_size
40
+ self.f0_bin = 256
41
+ self.f0_max = 1100.0
42
+ self.f0_min = 50.0
43
+ self.f0_mel_min = 1127 * np.log(1 + self.f0_min / 700)
44
+ self.f0_mel_max = 1127 * np.log(1 + self.f0_max / 700)
45
+ self.device = device
46
+ self.model_rmvpe = None
47
+
48
+ def compute_f0(self, np_arr, f0_method, hop_length):
49
+ """Extract F0 using the specified method."""
50
+ if f0_method == "crepe":
51
+ return self.get_crepe(np_arr, hop_length)
52
+ elif f0_method == "rmvpe":
53
+ # Ensure model is loaded if needed (handled in process_files)
54
+ if self.model_rmvpe is None:
55
+ raise RuntimeError("RMVPE model not initialized. Call process_files first.")
56
+ return self.model_rmvpe.infer_from_audio(np_arr, thred=0.03)
57
+ elif f0_method == "fcnf0":
58
+ return self.get_fcnf0(np_arr)
59
+ else:
60
+ raise ValueError(f"Unknown F0 method: {f0_method}")
61
+
62
+ def get_crepe(self, x, hop_length):
63
+ """Extract F0 using CREPE."""
64
+ audio = torch.from_numpy(x.astype(np.float32)).to(self.device)
65
+ audio /= torch.quantile(torch.abs(audio), 0.999)
66
+ audio = audio.unsqueeze(0)
67
+ pitch = torchcrepe.predict(
68
+ audio,
69
+ self.fs,
70
+ hop_length,
71
+ self.f0_min,
72
+ self.f0_max,
73
+ "full",
74
+ batch_size=hop_length * 2,
75
+ device=audio.device,
76
+ pad=True,
77
+ )
78
+ source = pitch.squeeze(0).cpu().float().numpy()
79
+ source[source < 0.001] = np.nan
80
+ target = np.interp(
81
+ np.arange(0, len(source) * (x.size // self.hop), len(source))
82
+ / (x.size // self.hop),
83
+ np.arange(0, len(source)),
84
+ source,
85
+ )
86
+ return np.nan_to_num(target)
87
+
88
+ def get_fcnf0(self, x):
89
+ """Extract F0 using FCNF0++"""
90
+ device_obj = torch.device(self.device)
91
+
92
+ # FCNF0++ uses 8kHz sample rate per paper for increased accuracy
93
+ audio_8k = resampy.resample(x, self.fs, 8000, filter='kaiser_best')
94
+ audio_tensor = torch.from_numpy(audio_8k.astype(np.float32)).to(device_obj)
95
+ audio_tensor = audio_tensor.unsqueeze(0)
96
+
97
+ gpu_index = device_obj.index if device_obj.type == 'cuda' else None
98
+
99
+ # These settings are based on both paper and authors examples
100
+ pitch, periodicity = penn.from_audio(
101
+ audio=audio_tensor,
102
+ sample_rate=8000,
103
+ hopsize=0.01, # 10 ms
104
+ fmin=30,
105
+ fmax=1600,
106
+ checkpoint=None, # Defaults stock FCNF0++ ckpt
107
+ batch_size=2048,
108
+ center='half-hop',
109
+ interp_unvoiced_at=0.065,
110
+ gpu=gpu_index
111
+ )
112
+
113
+ source = pitch.squeeze().cpu().float().numpy()
114
+
115
+
116
+ time_original = np.arange(x.size // self.hop) * (self.hop / self.fs)
117
+ time_fcnf0 = np.arange(len(source)) * 0.01 # Time points for penn output
118
+
119
+ # Handle edge case where source might be empty or have only one value
120
+ if len(source) < 2:
121
+ # If empty or single value, return constant array of that value (or NaN)
122
+ fill_value = source[0] if len(source) == 1 else np.nan
123
+ target = np.full(x.size // self.hop, fill_value)
124
+ else:
125
+ target = np.interp(time_original, time_fcnf0, source, left=source[0], right=source[-1])
126
+
127
+ return np.nan_to_num(target)
128
+
129
+ def coarse_f0(self, f0):
130
+ """Convert F0 to coarse F0."""
131
+ f0_mel = 1127 * np.log(1 + f0 / 700)
132
+ f0_mel = np.clip(
133
+ (f0_mel - self.f0_mel_min)
134
+ * (self.f0_bin - 2)
135
+ / (self.f0_mel_max - self.f0_mel_min)
136
+ + 1,
137
+ 1,
138
+ self.f0_bin - 1,
139
+ )
140
+ return np.rint(f0_mel).astype(int)
141
+
142
+ def process_file(self, file_info, f0_method, hop_length):
143
+ """Process a single audio file for F0 extraction."""
144
+ inp_path, opt_path1, opt_path2, _ = file_info
145
+
146
+ if os.path.exists(opt_path1) and os.path.exists(opt_path2):
147
+ return
148
+
149
+ try:
150
+ np_arr = load_audio(inp_path, 16000)
151
+ feature_pit = self.compute_f0(np_arr, f0_method, hop_length)
152
+ np.save(opt_path2, feature_pit, allow_pickle=False)
153
+ coarse_pit = self.coarse_f0(feature_pit)
154
+ np.save(opt_path1, coarse_pit, allow_pickle=False)
155
+ except Exception as error:
156
+ print(
157
+ f"An error occurred extracting file {inp_path} on {self.device}: {error}"
158
+ )
159
+
160
+ def process_files(
161
+ self, files, f0_method, hop_length, device_num, device, n_threads
162
+ ):
163
+ """Process multiple files."""
164
+ self.device = device
165
+ if f0_method == "rmvpe":
166
+ self.model_rmvpe = RMVPE0Predictor(
167
+ os.path.join("rvc", "models", "predictors", "rmvpe.pt"),
168
+ is_half=False,
169
+ device=device,
170
+ )
171
+ elif f0_method == "fcnf0":
172
+ # Penn lib handles it
173
+ pass
174
+ else:
175
+ n_threads = 1
176
+
177
+ n_threads = 1 if n_threads == 0 else n_threads
178
+
179
+ def process_file_wrapper(file_info):
180
+ self.process_file(file_info, f0_method, hop_length)
181
+
182
+ with tqdm.tqdm(total=len(files), leave=True, position=device_num) as pbar:
183
+ # using multi-threading
184
+ with concurrent.futures.ThreadPoolExecutor(
185
+ max_workers=n_threads
186
+ ) as executor:
187
+ futures = [
188
+ executor.submit(process_file_wrapper, file_info)
189
+ for file_info in files
190
+ ]
191
+ for future in concurrent.futures.as_completed(futures):
192
+ pbar.update(1)
193
+
194
+
195
+ def run_pitch_extraction(files, devices, f0_method, hop_length, num_processes):
196
+ devices_str = ", ".join(devices)
197
+ print(
198
+ f"Starting pitch extraction with {num_processes} cores on {devices_str} using {f0_method}..."
199
+ )
200
+ start_time = time.time()
201
+ fe = FeatureInput()
202
+ ps = []
203
+ num_devices = len(devices)
204
+ for i, device in enumerate(devices):
205
+ p = mp.Process(
206
+ target=fe.process_files,
207
+ args=(
208
+ files[i::num_devices],
209
+ f0_method,
210
+ hop_length,
211
+ i,
212
+ device,
213
+ num_processes // num_devices,
214
+ ),
215
+ )
216
+ ps.append(p)
217
+ p.start()
218
+ for i, device in enumerate(devices):
219
+ ps[i].join()
220
+
221
+ elapsed_time = time.time() - start_time
222
+ print(f"Pitch extraction completed in {elapsed_time:.2f} seconds.")
223
+
224
+
225
+ def process_file_embedding(
226
+ files, version, embedder_model, embedder_model_custom, device_num, device, n_threads
227
+ ):
228
+ dtype = torch.float32
229
+ model = load_embedding(embedder_model, embedder_model_custom).to(dtype).to(device)
230
+ n_threads = 1 if n_threads == 0 else n_threads
231
+
232
+ def process_file_embedding_wrapper(file_info):
233
+ wav_file_path, _, _, out_file_path = file_info
234
+ if os.path.exists(out_file_path):
235
+ return
236
+ feats = torch.from_numpy(load_audio(wav_file_path, 16000)).to(dtype).to(device)
237
+ feats = feats.view(1, -1)
238
+ with torch.no_grad():
239
+ feats = model(feats)["last_hidden_state"]
240
+ feats = (
241
+ model.final_proj(feats[0]).unsqueeze(0) if version == "v1" else feats
242
+ )
243
+ feats = feats.squeeze(0).float().cpu().numpy()
244
+ if not np.isnan(feats).any():
245
+ np.save(out_file_path, feats, allow_pickle=False)
246
+ else:
247
+ print(f"{file} contains NaN values and will be skipped.")
248
+
249
+ with tqdm.tqdm(total=len(files), leave=True, position=device_num) as pbar:
250
+ with concurrent.futures.ThreadPoolExecutor(max_workers=n_threads) as executor:
251
+ futures = [
252
+ executor.submit(process_file_embedding_wrapper, file_info)
253
+ for file_info in files
254
+ ]
255
+ for future in concurrent.futures.as_completed(futures):
256
+ pbar.update(1)
257
+
258
+
259
+ def run_embedding_extraction(
260
+ files, devices, version, embedder_model, embedder_model_custom, num_processes # Add num_processes here
261
+ ):
262
+ start_time = time.time()
263
+ devices_str = ", ".join(devices)
264
+
265
+ print(
266
+ f"Starting embedding extraction with {num_processes} cores on {devices_str}..."
267
+ )
268
+ ps = []
269
+ num_devices = len(devices)
270
+ for i, device in enumerate(devices):
271
+ p = mp.Process(
272
+ target=process_file_embedding,
273
+ args=(
274
+ files[i::num_devices],
275
+ version,
276
+ embedder_model,
277
+ embedder_model_custom,
278
+ i,
279
+ device,
280
+ num_processes // num_devices,
281
+ ),
282
+ )
283
+ ps.append(p)
284
+ p.start()
285
+ for i, device in enumerate(devices):
286
+ ps[i].join()
287
+ elapsed_time = time.time() - start_time
288
+ print(f"Embedding extraction completed in {elapsed_time:.2f} seconds.")
289
+
290
+
291
+ if __name__ == "__main__":
292
+ parser = argparse.ArgumentParser(description="Extract features for RVC training.")
293
+ parser.add_argument("exp_dir", type=str, help="Experiment directory (e.g., logs/my_model).")
294
+ parser.add_argument("f0_method", type=str, choices=["crepe", "rmvpe", "fcnf0"], help="F0 extraction method.")
295
+ parser.add_argument("hop_length", type=int, help="Hop length for F0 extraction.")
296
+ parser.add_argument("num_processes", type=int, help="Number of parallel processes.")
297
+ parser.add_argument("gpus", type=str, help="GPU IDs to use, separated by '-', or '-' for CPU.")
298
+ parser.add_argument("version", type=str, choices=["v1", "v2"], help="RVC model version.")
299
+ parser.add_argument("sample_rate", type=str, choices=["32000", "40000", "48000"], help="Target sample rate.")
300
+ parser.add_argument("embedder_model", type=str, help="Pretrained embedder model name or 'custom'.")
301
+ parser.add_argument("embedder_model_custom", type=str, nargs='?', default=None, help="Path to custom embedder model (if embedder_model is 'custom').")
302
+ parser.add_argument("--val", action="store_true", help="Generate filelist for validation (skips adding mute files).")
303
+
304
+ args = parser.parse_args()
305
+
306
+ exp_dir = args.exp_dir
307
+ f0_method = args.f0_method
308
+ hop_length = args.hop_length
309
+ num_processes = args.num_processes
310
+ gpus = args.gpus
311
+ version = args.version
312
+ sample_rate = args.sample_rate
313
+ embedder_model = args.embedder_model
314
+ embedder_model_custom = args.embedder_model_custom
315
+ is_validation = args.val
316
+
317
+
318
+ wav_path = os.path.join(exp_dir, "sliced_audios_16k")
319
+ os.makedirs(os.path.join(exp_dir, "f0"), exist_ok=True)
320
+ os.makedirs(os.path.join(exp_dir, "f0_voiced"), exist_ok=True)
321
+ os.makedirs(os.path.join(exp_dir, version + "_extracted"), exist_ok=True)
322
+
323
+ chosen_embedder_model = (
324
+ embedder_model_custom if embedder_model == "custom" else embedder_model
325
+ )
326
+
327
+ file_path = os.path.join(exp_dir, "model_info.json")
328
+ if os.path.exists(file_path):
329
+ with open(file_path, "r") as f:
330
+ data = json.load(f)
331
+ else:
332
+ data = {}
333
+ data.update(
334
+ {
335
+ "embedder_model": chosen_embedder_model,
336
+ }
337
+ )
338
+ with open(file_path, "w") as f:
339
+ json.dump(data, f, indent=4)
340
+
341
+ files = []
342
+ for file in glob.glob(os.path.join(wav_path, "*.wav")):
343
+ file_name = os.path.basename(file)
344
+ file_info = [
345
+ file, # full path to sliced 16k wav
346
+ os.path.join(exp_dir, "f0", file_name + ".npy"),
347
+ os.path.join(exp_dir, "f0_voiced", file_name + ".npy"),
348
+ os.path.join(
349
+ exp_dir, version + "_extracted", file_name.replace("wav", "npy")
350
+ ),
351
+ ]
352
+ files.append(file_info)
353
+
354
+ devices = ["cpu"] if gpus == "-" else [f"cuda:{idx}" for idx in gpus.split("-")]
355
+
356
+ run_pitch_extraction(files, devices, f0_method, hop_length, num_processes)
357
+
358
+
359
+ run_embedding_extraction(
360
+ files, devices, version, embedder_model, embedder_model_custom, num_processes # Pass num_processes here
361
+ )
362
+
363
+
364
+ generate_config(version, sample_rate, exp_dir)
365
+ generate_filelist(exp_dir, version, sample_rate, is_validation_set=is_validation)
penn-python.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9cb1f37a70c45d34c4860f60e15da2819ebdec46a4f4db4cb179869f03e1f614
3
+ size 115396