nevreal commited on
Commit
780f01d
·
verified ·
1 Parent(s): aaced9c

Create rvc.py

Browse files
Files changed (1) hide show
  1. rvc.py +243 -0
rvc.py ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import glob
3
+ import json
4
+ import traceback
5
+ import logging
6
+ import gradio as gr
7
+ import numpy as np
8
+ import librosa
9
+ import torch
10
+ import asyncio
11
+ import edge_tts
12
+ import yt_dlp
13
+ import ffmpeg
14
+ import subprocess
15
+ import sys
16
+ import io
17
+ import wave
18
+ from datetime import datetime
19
+ from fairseq import checkpoint_utils
20
+ from lib.infer_pack.models import (
21
+ SynthesizerTrnMs256NSFsid,
22
+ SynthesizerTrnMs256NSFsid_nono,
23
+ SynthesizerTrnMs768NSFsid,
24
+ SynthesizerTrnMs768NSFsid_nono,
25
+ )
26
+ from vc_infer_pipeline import VC
27
+ from config import Config
28
+ config = Config()
29
+ logging.getLogger("numba").setLevel(logging.WARNING)
30
+ limitation = os.getenv("SYSTEM") == "spaces"
31
+
32
+ audio_mode = []
33
+ f0method_mode = []
34
+ f0method_info = ""
35
+ if limitation is True:
36
+ audio_mode = ["Upload audio", "TTS Audio"]
37
+ f0method_mode = ["pm", "crepe", "harvest"]
38
+ f0method_info = "PM is fast, rmvpe is middle, Crepe or harvest is good but it was extremely slow (Default: PM)"
39
+ else:
40
+ audio_mode = ["Upload audio", "Youtube", "TTS Audio"]
41
+ f0method_mode = ["pm", "crepe", "harvest"]
42
+ f0method_info = "PM is fast, rmvpe is middle. Crepe or harvest is good but it was extremely slow (Default: PM))"
43
+
44
+ if os.path.isfile("rmvpe.pt"):
45
+ f0method_mode.insert(2, "rmvpe")
46
+
47
+ def create_vc_fn(model_title, tgt_sr, net_g, vc, if_f0, version, file_index):
48
+ def vc_fn(
49
+ vc_audio_mode,
50
+ vc_input,
51
+ vc_upload,
52
+ tts_text,
53
+ tts_voice,
54
+ f0_up_key,
55
+ f0_method,
56
+ index_rate,
57
+ filter_radius,
58
+ resample_sr,
59
+ rms_mix_rate,
60
+ protect,
61
+ ):
62
+ try:
63
+ if vc_audio_mode == "Input path" or "Youtube" and vc_input != "":
64
+ audio, sr = librosa.load(vc_input, sr=16000, mono=True)
65
+ elif vc_audio_mode == "Upload audio":
66
+ if vc_upload is None:
67
+ return "You need to upload an audio", None
68
+ sampling_rate, audio = vc_upload
69
+ duration = audio.shape[0] / sampling_rate
70
+ if duration > 360 and limitation:
71
+ return "Please upload an audio file that is less than 1 minute.", None
72
+ audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32)
73
+ if len(audio.shape) > 1:
74
+ audio = librosa.to_mono(audio.transpose(1, 0))
75
+ if sampling_rate != 16000:
76
+ audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000)
77
+ elif vc_audio_mode == "TTS Audio":
78
+ if len(tts_text) > 600 and limitation:
79
+ return "Text is too long", None
80
+ if tts_text is None or tts_voice is None:
81
+ return "You need to enter text and select a voice", None
82
+ asyncio.run(edge_tts.Communicate(tts_text, "-".join(tts_voice.split('-')[:-1])).save("tts.mp3"))
83
+ audio, sr = librosa.load("tts.mp3", sr=16000, mono=True)
84
+ vc_input = "tts.mp3"
85
+ times = [0, 0, 0]
86
+ f0_up_key = int(f0_up_key)
87
+ audio_opt = vc.pipeline(
88
+ hubert_model,
89
+ net_g,
90
+ 0,
91
+ audio,
92
+ vc_input,
93
+ times,
94
+ f0_up_key,
95
+ f0_method,
96
+ file_index,
97
+ # file_big_npy,
98
+ index_rate,
99
+ if_f0,
100
+ filter_radius,
101
+ tgt_sr,
102
+ resample_sr,
103
+ rms_mix_rate,
104
+ version,
105
+ protect,
106
+ f0_file=None,
107
+ )
108
+ info = f"[{datetime.now().strftime('%Y-%m-%d %H:%M')}]: npy: {times[0]}, f0: {times[1]}s, infer: {times[2]}s"
109
+ print(f"{model_title} | {info}")
110
+ return info, (tgt_sr, audio_opt)
111
+ except:
112
+ info = traceback.format_exc()
113
+ print(info)
114
+ return info, (None, None)
115
+ return vc_fn
116
+
117
+
118
+
119
+ def load_model():
120
+ categories = []
121
+ with open("weights/folder_info.json", "r", encoding="utf-8") as f:
122
+ folder_info = json.load(f)
123
+ for category_name, category_info in folder_info.items():
124
+ if not category_info['enable']:
125
+ continue
126
+ category_title = category_info['title']
127
+ category_folder = category_info['folder_path']
128
+ models = []
129
+ with open(f"weights/{category_folder}/model_info.json", "r", encoding="utf-8") as f:
130
+ models_info = json.load(f)
131
+ for character_name, info in models_info.items():
132
+ if not info['enable']:
133
+ continue
134
+ model_title = info['title']
135
+ model_name = info['model_path']
136
+ model_author = info.get("author", None)
137
+ model_cover = f"weights/{category_folder}/{character_name}/{info['cover']}"
138
+ model_index = f"weights/{category_folder}/{character_name}/{info['feature_retrieval_library']}"
139
+ cpt = torch.load(f"weights/{category_folder}/{character_name}/{model_name}", map_location="cpu")
140
+ tgt_sr = cpt["config"][-1]
141
+ cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk
142
+ if_f0 = cpt.get("f0", 1)
143
+ version = cpt.get("version", "v1")
144
+ if version == "v1":
145
+ if if_f0 == 1:
146
+ net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=config.is_half)
147
+ else:
148
+ net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
149
+ model_version = "V1"
150
+ elif version == "v2":
151
+ if if_f0 == 1:
152
+ net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=config.is_half)
153
+ else:
154
+ net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"])
155
+ model_version = "V2"
156
+ del net_g.enc_q
157
+ print(net_g.load_state_dict(cpt["weight"], strict=False))
158
+ net_g.eval().to(config.device)
159
+ if config.is_half:
160
+ net_g = net_g.half()
161
+ else:
162
+ net_g = net_g.float()
163
+ vc = VC(tgt_sr, config)
164
+ print(f"Model loaded: {character_name} / {info['feature_retrieval_library']} | ({model_version})")
165
+ models.append((character_name, model_title, model_author, model_cover, model_version, create_vc_fn(model_title, tgt_sr, net_g, vc, if_f0, version, model_index)))
166
+ categories.append([category_title, category_folder, models])
167
+ return categories
168
+
169
+
170
+
171
+ def cut_vocal_and_inst(url, audio_provider, split_model):
172
+ if url != "":
173
+ if not os.path.exists("dl_audio"):
174
+ os.mkdir("dl_audio")
175
+ if audio_provider == "Youtube":
176
+ ydl_opts = {
177
+ 'format': 'bestaudio/best',
178
+ 'postprocessors': [{
179
+ 'key': 'FFmpegExtractAudio',
180
+ 'preferredcodec': 'wav',
181
+ }],
182
+ "outtmpl": 'dl_audio/youtube_audio',
183
+ }
184
+ with yt_dlp.YoutubeDL(ydl_opts) as ydl:
185
+ ydl.download([url])
186
+ audio_path = "dl_audio/youtube_audio.wav"
187
+ else:
188
+ # Spotify doesnt work.
189
+ # Need to find other solution soon.
190
+ '''
191
+ command = f"spotdl download {url} --output dl_audio/.wav"
192
+ result = subprocess.run(command.split(), stdout=subprocess.PIPE)
193
+ print(result.stdout.decode())
194
+ audio_path = "dl_audio/spotify_audio.wav"
195
+ '''
196
+ if split_model == "htdemucs":
197
+ command = f"demucs --two-stems=vocals {audio_path} -o output"
198
+ result = subprocess.run(command.split(), stdout=subprocess.PIPE)
199
+ print(result.stdout.decode())
200
+ return "output/htdemucs/youtube_audio/vocals.wav", "output/htdemucs/youtube_audio/no_vocals.wav", audio_path, "output/htdemucs/youtube_audio/vocals.wav"
201
+ else:
202
+ command = f"demucs --two-stems=vocals -n mdx_extra_q {audio_path} -o output"
203
+ result = subprocess.run(command.split(), stdout=subprocess.PIPE)
204
+ print(result.stdout.decode())
205
+ return "output/mdx_extra_q/youtube_audio/vocals.wav", "output/mdx_extra_q/youtube_audio/no_vocals.wav", audio_path, "output/mdx_extra_q/youtube_audio/vocals.wav"
206
+ else:
207
+ raise gr.Error("URL Required!")
208
+ return None, None, None, None
209
+
210
+
211
+
212
+ def combine_vocal_and_inst(audio_data, audio_volume, split_model):
213
+ if not os.path.exists("output/result"):
214
+ os.mkdir("output/result")
215
+ vocal_path = "output/result/output.wav"
216
+ output_path = "output/result/combine.mp3"
217
+ if split_model == "htdemucs":
218
+ inst_path = "output/htdemucs/youtube_audio/no_vocals.wav"
219
+ else:
220
+ inst_path = "output/mdx_extra_q/youtube_audio/no_vocals.wav"
221
+ with wave.open(vocal_path, "w") as wave_file:
222
+ wave_file.setnchannels(1)
223
+ wave_file.setsampwidth(2)
224
+ wave_file.setframerate(audio_data[0])
225
+ wave_file.writeframes(audio_data[1].tobytes())
226
+ command = f'ffmpeg -y -i {inst_path} -i {vocal_path} -filter_complex [1:a]volume={audio_volume}dB[v];[0:a][v]amix=inputs=2:duration=longest -b:a 320k -c:a libmp3lame {output_path}'
227
+ result = subprocess.run(command.split(), stdout=subprocess.PIPE)
228
+ print(result.stdout.decode())
229
+ return output_path
230
+
231
+ def load_hubert():
232
+ global hubert_model
233
+ models, _, _ = checkpoint_utils.load_model_ensemble_and_task(
234
+ ["hubert_base.pt"],
235
+ suffix="",
236
+ )
237
+ hubert_model = models[0]
238
+ hubert_model = hubert_model.to(config.device)
239
+ if config.is_half:
240
+ hubert_model = hubert_model.half()
241
+ else:
242
+ hubert_model = hubert_model.float()
243
+ hubert_model.eval()