Aitron Emper commited on
Commit
bc5a004
·
1 Parent(s): 37974d9

Delete easy_infer.py

Browse files
Files changed (1) hide show
  1. easy_infer.py +0 -1421
easy_infer.py DELETED
@@ -1,1421 +0,0 @@
1
- import subprocess
2
- import os
3
- import sys
4
- import errno
5
- import shutil
6
- import yt_dlp
7
- from mega import Mega
8
- import datetime
9
- import unicodedata
10
- import torch
11
- import glob
12
- import gradio as gr
13
- import gdown
14
- import zipfile
15
- import traceback
16
- import json
17
- import requests
18
- import wget
19
- import hashlib
20
- now_dir = os.getcwd()
21
- sys.path.append(now_dir)
22
- from unidecode import unidecode
23
- import re
24
- from lib.infer_pack.models_onnx import SynthesizerTrnMsNSFsidM
25
- from vc_infer_pipeline import VC
26
- from MDXNet import MDXNetDereverb
27
- from infer_uvr5 import _audio_pre_, _audio_pre_new
28
- from huggingface_hub import HfApi, list_models
29
- from huggingface_hub import login
30
- from i18n import I18nAuto
31
- i18n = I18nAuto()
32
- from bs4 import BeautifulSoup
33
-
34
- weight_root = "weights"
35
- weight_uvr5_root = "uvr5_weights"
36
- index_root = "./logs/"
37
- audio_root = "audios"
38
- names = []
39
- for name in os.listdir(weight_root):
40
- if name.endswith(".pth"):
41
- names.append(name)
42
- index_paths = []
43
-
44
- global indexes_list
45
- indexes_list = []
46
-
47
- audio_paths = []
48
- for root, dirs, files in os.walk(index_root, topdown=False):
49
- for name in files:
50
- if name.endswith(".index") and "trained" not in name:
51
- index_paths.append("%s\\%s" % (root, name))
52
-
53
- for root, dirs, files in os.walk(audio_root, topdown=False):
54
- for name in files:
55
- audio_paths.append("%s/%s" % (root, name))
56
-
57
- uvr5_names = []
58
- for name in os.listdir(weight_uvr5_root):
59
- if name.endswith(".pth") or "onnx" in name:
60
- uvr5_names.append(name.replace(".pth", ""))
61
-
62
- def calculate_md5(file_path):
63
- hash_md5 = hashlib.md5()
64
- with open(file_path, "rb") as f:
65
- for chunk in iter(lambda: f.read(4096), b""):
66
- hash_md5.update(chunk)
67
- return hash_md5.hexdigest()
68
-
69
- def silentremove(filename):
70
- try:
71
- os.remove(filename)
72
- except OSError as e: # this would be "except OSError, e:" before Python 2.6
73
- if e.errno != errno.ENOENT: # errno.ENOENT = no such file or directory
74
- raise # re-raise exception if a different error occurred
75
- def get_md5(temp_folder):
76
- for root, subfolders, files in os.walk(temp_folder):
77
- for file in files:
78
- if not file.startswith("G_") and not file.startswith("D_") and file.endswith(".pth") and not "_G_" in file and not "_D_" in file:
79
- md5_hash = calculate_md5(os.path.join(root, file))
80
- return md5_hash
81
-
82
- return None
83
-
84
- def find_parent(search_dir, file_name):
85
- for dirpath, dirnames, filenames in os.walk(search_dir):
86
- if file_name in filenames:
87
- return os.path.abspath(dirpath)
88
- return None
89
-
90
- def find_folder_parent(search_dir, folder_name):
91
- for dirpath, dirnames, filenames in os.walk(search_dir):
92
- if folder_name in dirnames:
93
- return os.path.abspath(dirpath)
94
- return None
95
-
96
- def get_drive_folder_id(url):
97
- if "drive.google.com" in url:
98
- if "file/d/" in url:
99
- file_id = url.split("file/d/")[1].split("/")[0]
100
- elif "id=" in url:
101
- file_id = url.split("id=")[1].split("&")[0]
102
- else:
103
- return None
104
-
105
- def download_from_url(url):
106
- parent_path = find_folder_parent(".", "pretrained_v2")
107
- zips_path = os.path.join(parent_path, 'zips')
108
-
109
- if url != '':
110
- print(i18n("下载文件:") + f"{url}")
111
- if "drive.google.com" in url:
112
- if "file/d/" in url:
113
- file_id = url.split("file/d/")[1].split("/")[0]
114
- elif "id=" in url:
115
- file_id = url.split("id=")[1].split("&")[0]
116
- else:
117
- return None
118
-
119
- if file_id:
120
- os.chdir('./zips')
121
- result = subprocess.run(["gdown", f"https://drive.google.com/uc?id={file_id}", "--fuzzy"], capture_output=True, text=True, encoding='utf-8')
122
- if "Too many users have viewed or downloaded this file recently" in str(result.stderr):
123
- return "demasiado uso"
124
- if "Cannot retrieve the public link of the file." in str(result.stderr):
125
- return "link privado"
126
- print(result.stderr)
127
-
128
- elif "/blob/" in url:
129
- os.chdir('./zips')
130
- url = url.replace("blob", "resolve")
131
- # print("Resolved URL:", url) # Print the resolved URL
132
- wget.download(url)
133
- elif "mega.nz" in url:
134
- if "#!" in url:
135
- file_id = url.split("#!")[1].split("!")[0]
136
- elif "file/" in url:
137
- file_id = url.split("file/")[1].split("/")[0]
138
- else:
139
- return None
140
- if file_id:
141
- m = Mega()
142
- m.download_url(url, zips_path)
143
- elif "/tree/main" in url:
144
- response = requests.get(url)
145
- soup = BeautifulSoup(response.content, 'html.parser')
146
- temp_url = ''
147
- for link in soup.find_all('a', href=True):
148
- if link['href'].endswith('.zip'):
149
- temp_url = link['href']
150
- break
151
- if temp_url:
152
- url = temp_url
153
- # print("Updated URL:", url) # Print the updated URL
154
- url = url.replace("blob", "resolve")
155
- # print("Resolved URL:", url) # Print the resolved URL
156
-
157
- if "huggingface.co" not in url:
158
- url = "https://huggingface.co" + url
159
-
160
- wget.download(url)
161
- else:
162
- print("No .zip file found on the page.")
163
- # Handle the case when no .zip file is found
164
- else:
165
- os.chdir('./zips')
166
- wget.download(url)
167
-
168
- os.chdir(parent_path)
169
- print(i18n("完整下载"))
170
- return "downloaded"
171
- else:
172
- return None
173
-
174
- class error_message(Exception):
175
- def __init__(self, mensaje):
176
- self.mensaje = mensaje
177
- super().__init__(mensaje)
178
-
179
- # 一个选项卡全局只能有一个音色
180
- def get_vc(sid, to_return_protect0, to_return_protect1):
181
- global n_spk, tgt_sr, net_g, vc, cpt, version
182
- if sid == "" or sid == []:
183
- global hubert_model
184
- if hubert_model is not None: # 考虑到轮询, 需要加个判断看是否 sid 是由有模型切换到无模型的
185
- print("clean_empty_cache")
186
- del net_g, n_spk, vc, hubert_model, tgt_sr # ,cpt
187
- hubert_model = net_g = n_spk = vc = hubert_model = tgt_sr = None
188
- if torch.cuda.is_available():
189
- torch.cuda.empty_cache()
190
- ###楼下不这么折腾清理不干净
191
- if_f0 = cpt.get("f0", 1)
192
- version = cpt.get("version", "v1")
193
- if version == "v1":
194
- if if_f0 == 1:
195
- net_g = SynthesizerTrnMs256NSFsid(
196
- *cpt["config"], is_half=config.is_half
197
- )
198
- else:
199
- net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
200
- elif version == "v2":
201
- if if_f0 == 1:
202
- net_g = SynthesizerTrnMs768NSFsid(
203
- *cpt["config"], is_half=config.is_half
204
- )
205
- else:
206
- net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"])
207
- del net_g, cpt
208
- if torch.cuda.is_available():
209
- torch.cuda.empty_cache()
210
- cpt = None
211
- return (
212
- {"visible": False, "__type__": "update"},
213
- {"visible": False, "__type__": "update"},
214
- {"visible": False, "__type__": "update"},
215
- )
216
- person = "%s/%s" % (weight_root, sid)
217
- print("loading %s" % person)
218
- cpt = torch.load(person, map_location="cpu")
219
- tgt_sr = cpt["config"][-1]
220
- cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk
221
- if_f0 = cpt.get("f0", 1)
222
- if if_f0 == 0:
223
- to_return_protect0 = to_return_protect1 = {
224
- "visible": False,
225
- "value": 0.5,
226
- "__type__": "update",
227
- }
228
- else:
229
- to_return_protect0 = {
230
- "visible": True,
231
- "value": to_return_protect0,
232
- "__type__": "update",
233
- }
234
- to_return_protect1 = {
235
- "visible": True,
236
- "value": to_return_protect1,
237
- "__type__": "update",
238
- }
239
- version = cpt.get("version", "v1")
240
- if version == "v1":
241
- if if_f0 == 1:
242
- net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=config.is_half)
243
- else:
244
- net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
245
- elif version == "v2":
246
- if if_f0 == 1:
247
- net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=config.is_half)
248
- else:
249
- net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"])
250
- del net_g.enc_q
251
- print(net_g.load_state_dict(cpt["weight"], strict=False))
252
- net_g.eval().to(config.device)
253
- if config.is_half:
254
- net_g = net_g.half()
255
- else:
256
- net_g = net_g.float()
257
- vc = VC(tgt_sr, config)
258
- n_spk = cpt["config"][-3]
259
- return (
260
- {"visible": True, "maximum": n_spk, "__type__": "update"},
261
- to_return_protect0,
262
- to_return_protect1,
263
- )
264
-
265
- def load_downloaded_model(url):
266
- parent_path = find_folder_parent(".", "pretrained_v2")
267
- try:
268
- infos = []
269
- logs_folders = ['0_gt_wavs','1_16k_wavs','2a_f0','2b-f0nsf','3_feature256','3_feature768']
270
- zips_path = os.path.join(parent_path, 'zips')
271
- unzips_path = os.path.join(parent_path, 'unzips')
272
- weights_path = os.path.join(parent_path, 'weights')
273
- logs_dir = ""
274
-
275
- if os.path.exists(zips_path):
276
- shutil.rmtree(zips_path)
277
- if os.path.exists(unzips_path):
278
- shutil.rmtree(unzips_path)
279
-
280
- os.mkdir(zips_path)
281
- os.mkdir(unzips_path)
282
-
283
- download_file = download_from_url(url)
284
- if not download_file:
285
- print(i18n("无法下载模型。"))
286
- infos.append(i18n("无法下载模型。"))
287
- yield "\n".join(infos)
288
- elif download_file == "downloaded":
289
- print(i18n("模型下载成功。"))
290
- infos.append(i18n("模型下载成功。"))
291
- yield "\n".join(infos)
292
- elif download_file == "demasiado uso":
293
- raise Exception(i18n("最近查看或下载此文件的用户过多"))
294
- elif download_file == "link privado":
295
- raise Exception(i18n("无法从该私人链接获取文件"))
296
-
297
- # Descomprimir archivos descargados
298
- for filename in os.listdir(zips_path):
299
- if filename.endswith(".zip"):
300
- zipfile_path = os.path.join(zips_path,filename)
301
- print(i18n("继续提取..."))
302
- infos.append(i18n("继续提取..."))
303
- shutil.unpack_archive(zipfile_path, unzips_path, 'zip')
304
- model_name = os.path.basename(zipfile_path)
305
- logs_dir = os.path.join(parent_path,'logs', os.path.normpath(str(model_name).replace(".zip","")))
306
- yield "\n".join(infos)
307
- else:
308
- print(i18n("解压缩出错。"))
309
- infos.append(i18n("解压缩出错。"))
310
- yield "\n".join(infos)
311
-
312
- index_file = False
313
- model_file = False
314
- D_file = False
315
- G_file = False
316
-
317
- # Copiar archivo pth
318
- for path, subdirs, files in os.walk(unzips_path):
319
- for item in files:
320
- item_path = os.path.join(path, item)
321
- if not 'G_' in item and not 'D_' in item and item.endswith('.pth'):
322
- model_file = True
323
- model_name = item.replace(".pth","")
324
- logs_dir = os.path.join(parent_path,'logs', model_name)
325
- if os.path.exists(logs_dir):
326
- shutil.rmtree(logs_dir)
327
- os.mkdir(logs_dir)
328
- if not os.path.exists(weights_path):
329
- os.mkdir(weights_path)
330
- if os.path.exists(os.path.join(weights_path, item)):
331
- os.remove(os.path.join(weights_path, item))
332
- if os.path.exists(item_path):
333
- shutil.move(item_path, weights_path)
334
-
335
- if not model_file and not os.path.exists(logs_dir):
336
- os.mkdir(logs_dir)
337
- # Copiar index
338
- for path, subdirs, files in os.walk(unzips_path):
339
- for item in files:
340
- item_path = os.path.join(path, item)
341
- if item.startswith('added_') and item.endswith('.index'):
342
- index_file = True
343
- if os.path.exists(item_path):
344
- if os.path.exists(os.path.join(logs_dir, item)):
345
- os.remove(os.path.join(logs_dir, item))
346
- shutil.move(item_path, logs_dir)
347
- if item.startswith('total_fea.npy') or item.startswith('events.'):
348
- if os.path.exists(item_path):
349
- if os.path.exists(os.path.join(logs_dir, item)):
350
- os.remove(os.path.join(logs_dir, item))
351
- shutil.move(item_path, logs_dir)
352
-
353
-
354
- result = ""
355
- if model_file:
356
- if index_file:
357
- print(i18n("该模型可用于推理,并有 .index 文件。"))
358
- infos.append("\n" + i18n("该模型可用于推理,并有 .index 文件。"))
359
- yield "\n".join(infos)
360
- else:
361
- print(i18n("该模型可用于推理,但没有 .index 文件。"))
362
- infos.append("\n" + i18n("该模型可用于推理,但没有 .index 文件。"))
363
- yield "\n".join(infos)
364
-
365
- if not index_file and not model_file:
366
- print(i18n("未找到可上传的相关文件"))
367
- infos.append(i18n("未找到可上传的相关文件"))
368
- yield "\n".join(infos)
369
-
370
- if os.path.exists(zips_path):
371
- shutil.rmtree(zips_path)
372
- if os.path.exists(unzips_path):
373
- shutil.rmtree(unzips_path)
374
- os.chdir(parent_path)
375
- return result
376
- except Exception as e:
377
- os.chdir(parent_path)
378
- if "demasiado uso" in str(e):
379
- print(i18n("最近查看或下载此文件的用户过多"))
380
- yield i18n("最近查看或下载此文件的用户过多")
381
- elif "link privado" in str(e):
382
- print(i18n("无法从该私人链接获取文件"))
383
- yield i18n("无法从该私人链接获取文件")
384
- else:
385
- print(e)
386
- yield i18n("下载模型时发生错误。")
387
- finally:
388
- os.chdir(parent_path)
389
-
390
- def load_dowloaded_dataset(url):
391
- parent_path = find_folder_parent(".", "pretrained_v2")
392
- infos = []
393
- try:
394
- zips_path = os.path.join(parent_path, 'zips')
395
- unzips_path = os.path.join(parent_path, 'unzips')
396
- datasets_path = os.path.join(parent_path, 'datasets')
397
- audio_extenions =["flac","wav"]
398
-
399
- if os.path.exists(zips_path):
400
- shutil.rmtree(zips_path)
401
- if os.path.exists(unzips_path):
402
- shutil.rmtree(unzips_path)
403
-
404
- if not os.path.exists(datasets_path):
405
- os.mkdir(datasets_path)
406
-
407
- os.mkdir(zips_path)
408
- os.mkdir(unzips_path)
409
-
410
- download_file = download_from_url(url)
411
-
412
- if not download_file:
413
- print(i18n("下载模型时发生错误。"))
414
- infos.append(i18n("下载模型时发生错误。"))
415
- yield "\n".join(infos)
416
- raise Exception(i18n("下载模型时发生错误。"))
417
- elif download_file == "downloaded":
418
- print(i18n("模型下载成功。"))
419
- infos.append(i18n("模型下载成功。"))
420
- yield "\n".join(infos)
421
- elif download_file == "demasiado uso":
422
- raise Exception(i18n("最近查看或下载此文件的用户过多"))
423
- elif download_file == "link privado":
424
- raise Exception(i18n("无法从该私人链接获取文件"))
425
-
426
- zip_path = os.listdir(zips_path)
427
- foldername = ""
428
- for file in zip_path:
429
- if file.endswith('.zip'):
430
- file_path = os.path.join(zips_path, file)
431
- print("....")
432
- foldername = file.replace(".zip","").replace(" ","").replace("-","_")
433
- dataset_path = os.path.join(datasets_path, foldername)
434
- print(i18n("继续提取..."))
435
- infos.append(i18n("继续提取..."))
436
- yield "\n".join(infos)
437
- shutil.unpack_archive(file_path, unzips_path, 'zip')
438
- if os.path.exists(dataset_path):
439
- shutil.rmtree(dataset_path)
440
-
441
- os.mkdir(dataset_path)
442
-
443
- for root, subfolders, songs in os.walk(unzips_path):
444
- for song in songs:
445
- song_path = os.path.join(root, song)
446
- if song.endswith(tuple(audio_extenions)):
447
- shutil.move(song_path, dataset_path)
448
- else:
449
- print(i18n("解压缩出错。"))
450
- infos.append(i18n("解压缩出错。"))
451
- yield "\n".join(infos)
452
-
453
-
454
-
455
- if os.path.exists(zips_path):
456
- shutil.rmtree(zips_path)
457
- if os.path.exists(unzips_path):
458
- shutil.rmtree(unzips_path)
459
-
460
- print(i18n("数据集加载成功。"))
461
- infos.append(i18n("数据集加载成功。"))
462
- yield "\n".join(infos)
463
- except Exception as e:
464
- os.chdir(parent_path)
465
- if "demasiado uso" in str(e):
466
- print(i18n("最近查看或下载此文件的用户过多"))
467
- yield i18n("最近查看或下载此文件的用户过多")
468
- elif "link privado" in str(e):
469
- print(i18n("无法从该私人链接获取文件"))
470
- yield i18n("无法从该私人链接获取文件")
471
- else:
472
- print(e)
473
- yield i18n("下载模型时发生错误。")
474
- finally:
475
- os.chdir(parent_path)
476
-
477
- def save_model(modelname, save_action):
478
-
479
- parent_path = find_folder_parent(".", "pretrained_v2")
480
- zips_path = os.path.join(parent_path, 'zips')
481
- dst = os.path.join(zips_path,modelname)
482
- logs_path = os.path.join(parent_path, 'logs', modelname)
483
- weights_path = os.path.join(parent_path, 'weights', f"{modelname}.pth")
484
- save_folder = parent_path
485
- infos = []
486
-
487
- try:
488
- if not os.path.exists(logs_path):
489
- raise Exception("No model found.")
490
-
491
- if not 'content' in parent_path:
492
- save_folder = os.path.join(parent_path, 'RVC_Backup')
493
- else:
494
- save_folder = '/content/drive/MyDrive/RVC_Backup'
495
-
496
- infos.append(i18n("保存模型..."))
497
- yield "\n".join(infos)
498
-
499
- # Si no existe el folder RVC para guardar los modelos
500
- if not os.path.exists(save_folder):
501
- os.mkdir(save_folder)
502
- if not os.path.exists(os.path.join(save_folder, 'ManualTrainingBackup')):
503
- os.mkdir(os.path.join(save_folder, 'ManualTrainingBackup'))
504
- if not os.path.exists(os.path.join(save_folder, 'Finished')):
505
- os.mkdir(os.path.join(save_folder, 'Finished'))
506
-
507
- # Si ya existe el folders zips borro su contenido por si acaso
508
- if os.path.exists(zips_path):
509
- shutil.rmtree(zips_path)
510
-
511
- os.mkdir(zips_path)
512
- added_file = glob.glob(os.path.join(logs_path, "added_*.index"))
513
- d_file = glob.glob(os.path.join(logs_path, "D_*.pth"))
514
- g_file = glob.glob(os.path.join(logs_path, "G_*.pth"))
515
-
516
- if save_action == i18n("选择模型保存方法"):
517
- raise Exception("No method choosen.")
518
-
519
- if save_action == i18n("保存所有"):
520
- print(i18n("保存所有"))
521
- save_folder = os.path.join(save_folder, 'ManualTrainingBackup')
522
- shutil.copytree(logs_path, dst)
523
- else:
524
- # Si no existe el folder donde se va a comprimir el modelo
525
- if not os.path.exists(dst):
526
- os.mkdir(dst)
527
-
528
- if save_action == i18n("保存 D 和 G"):
529
- print(i18n("保存 D 和 G"))
530
- save_folder = os.path.join(save_folder, 'ManualTrainingBackup')
531
- if len(d_file) > 0:
532
- shutil.copy(d_file[0], dst)
533
- if len(g_file) > 0:
534
- shutil.copy(g_file[0], dst)
535
-
536
- if len(added_file) > 0:
537
- shutil.copy(added_file[0], dst)
538
- else:
539
- infos.append(i18n("保存时未编制索引..."))
540
-
541
- if save_action == i18n("保存声音"):
542
- print(i18n("保存声音"))
543
- save_folder = os.path.join(save_folder, 'Finished')
544
- if len(added_file) > 0:
545
- shutil.copy(added_file[0], dst)
546
- else:
547
- infos.append(i18n("保存时未编制索引..."))
548
- #raise gr.Error("¡No ha generado el archivo added_*.index!")
549
-
550
- yield "\n".join(infos)
551
- # Si no existe el archivo del modelo no copiarlo
552
- if not os.path.exists(weights_path):
553
- infos.append(i18n("无模型保存(PTH)"))
554
- #raise gr.Error("¡No ha generado el modelo pequeño!")
555
- else:
556
- shutil.copy(weights_path, dst)
557
-
558
- yield "\n".join(infos)
559
- infos.append("\n" + i18n("这可能需要几分钟时间,请稍候..."))
560
- yield "\n".join(infos)
561
-
562
- shutil.make_archive(os.path.join(zips_path,f"{modelname}"), 'zip', zips_path)
563
- shutil.move(os.path.join(zips_path,f"{modelname}.zip"), os.path.join(save_folder, f'{modelname}.zip'))
564
-
565
- shutil.rmtree(zips_path)
566
- #shutil.rmtree(zips_path)
567
-
568
- infos.append("\n" + i18n("正确存储模型"))
569
- yield "\n".join(infos)
570
-
571
- except Exception as e:
572
- print(e)
573
- if "No model found." in str(e):
574
- infos.append(i18n("您要保存的模型不存在,请确保输入的名称正确。"))
575
- else:
576
- infos.append(i18n("保存模型时发生错误"))
577
-
578
- yield "\n".join(infos)
579
-
580
- def load_downloaded_backup(url):
581
- parent_path = find_folder_parent(".", "pretrained_v2")
582
- try:
583
- infos = []
584
- logs_folders = ['0_gt_wavs','1_16k_wavs','2a_f0','2b-f0nsf','3_feature256','3_feature768']
585
- zips_path = os.path.join(parent_path, 'zips')
586
- unzips_path = os.path.join(parent_path, 'logs')
587
- weights_path = os.path.join(parent_path, 'weights')
588
- logs_dir = ""
589
-
590
- if os.path.exists(zips_path):
591
- shutil.rmtree(zips_path)
592
-
593
- os.mkdir(zips_path)
594
-
595
- download_file = download_from_url(url)
596
- if not download_file:
597
- print(i18n("无法下载模型。"))
598
- infos.append(i18n("无法下载模型。"))
599
- yield "\n".join(infos)
600
- elif download_file == "downloaded":
601
- print(i18n("模型下载成功。"))
602
- infos.append(i18n("模型下载成功。"))
603
- yield "\n".join(infos)
604
- elif download_file == "demasiado uso":
605
- raise Exception(i18n("最近查看或下载此文件的用户过多"))
606
- elif download_file == "link privado":
607
- raise Exception(i18n("无法从该私人链接获取文件"))
608
-
609
- # Descomprimir archivos descargados
610
- for filename in os.listdir(zips_path):
611
- if filename.endswith(".zip"):
612
- zipfile_path = os.path.join(zips_path,filename)
613
- zip_dir_name = os.path.splitext(filename)[0]
614
- unzip_dir = os.path.join(parent_path,'logs', zip_dir_name)
615
- print(i18n("继续提取..."))
616
- infos.append(i18n("继续提取..."))
617
- shutil.unpack_archive(zipfile_path, unzip_dir, 'zip')
618
- yield "\n".join(infos)
619
- else:
620
- print(i18n("解压缩出错。"))
621
- infos.append(i18n("解压缩出错。"))
622
- yield "\n".join(infos)
623
-
624
- result = ""
625
-
626
- for filename in os.listdir(unzips_path):
627
- if filename.endswith(".zip"):
628
- silentremove(filename)
629
-
630
- if os.path.exists(zips_path):
631
- shutil.rmtree(zips_path)
632
- print(i18n("备份已成功上传。"))
633
- infos.append("\n" + i18n("备份已成功上传。"))
634
- yield "\n".join(infos)
635
- os.chdir(parent_path)
636
- return result
637
- except Exception as e:
638
- os.chdir(parent_path)
639
- if "demasiado uso" in str(e):
640
- print(i18n("最近查看或下载此文件的用户过多"))
641
- yield i18n("最近查看或下载此文件的用户过多")
642
- elif "link privado" in str(e):
643
- print(i18n("无法从该私人链接获取文件"))
644
- yield i18n("无法从该私人链接获取文件")
645
- else:
646
- print(e)
647
- yield i18n("下载模型时发生错误。")
648
- finally:
649
- os.chdir(parent_path)
650
-
651
- def save_to_wav(record_button):
652
- if record_button is None:
653
- pass
654
- else:
655
- path_to_file=record_button
656
- new_name = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")+'.wav'
657
- new_path='./audios/'+new_name
658
- shutil.move(path_to_file,new_path)
659
- return new_name
660
-
661
- def save_to_wav2(dropbox):
662
- file_path = dropbox.name
663
- target_path = os.path.join('./audios', os.path.basename(file_path))
664
-
665
- if os.path.exists(target_path):
666
- os.remove(target_path)
667
- # print('Replacing old dropdown file...')
668
-
669
- shutil.move(file_path, target_path)
670
- return target_path
671
-
672
- def change_choices2():
673
- audio_paths=[]
674
- for filename in os.listdir("./audios"):
675
- if filename.endswith(('wav', 'mp3', 'flac', 'ogg', 'opus',
676
- 'm4a', 'mp4', 'aac', 'alac', 'wma',
677
- 'aiff', 'webm', 'ac3')):
678
- audio_paths.append(os.path.join('./audios',filename).replace('\\', '/'))
679
- return {"choices": sorted(audio_paths), "__type__": "update"}, {"__type__": "update"}
680
-
681
- def get_models_by_name(modelname):
682
- url = "https://script.google.com/macros/s/AKfycbzyrdLZzUww9qbjxnbnI08budD4yxbmRPHkWbp3UEJ9h3Id5cnNNVg0UtfFAnqqX5Rr/exec"
683
-
684
- response = requests.post(url, json={
685
- 'type': 'search_by_filename',
686
- 'filename': unidecode(modelname.strip().lower())
687
- })
688
-
689
- response_json = response.json()
690
- models = response_json['ocurrences']
691
-
692
- result = []
693
- message = "Busqueda realizada"
694
- if len(models) == 0:
695
- message = "No se han encontrado resultados."
696
- else:
697
- message = f"Se han encontrado {len(models)} resultados para {modelname}"
698
-
699
- for i in range(20):
700
- if i < len(models):
701
- urls = models[i].get('url')
702
- url = eval(urls)[0]
703
- name = str(models[i].get('name'))
704
- filename = str(models[i].get('filename')) if not name or name.strip() == "" else name
705
- # Nombre
706
- result.append(
707
- {
708
- "visible": True,
709
- "value": str("### ") + filename,
710
- "__type__": "update",
711
- })
712
- # Url
713
- result.append(
714
- {
715
- "visible": False,
716
- "value": url,
717
- "__type__": "update",
718
- })
719
- # Boton
720
- result.append({
721
- "visible": True,
722
- "__type__": "update",
723
- })
724
-
725
- # Linea separadora
726
- if i == len(models) - 1:
727
- result.append({
728
- "visible": False,
729
- "__type__": "update",
730
- })
731
- else:
732
- result.append({
733
- "visible": True,
734
- "__type__": "update",
735
- })
736
-
737
- # Row
738
- result.append(
739
- {
740
- "visible": True,
741
- "__type__": "update",
742
- })
743
- else:
744
- # Nombre
745
- result.append(
746
- {
747
- "visible": False,
748
- "__type__": "update",
749
- })
750
- # Url
751
- result.append(
752
- {
753
- "visible": False,
754
- "value": False,
755
- "__type__": "update",
756
- })
757
- # Boton
758
- result.append({
759
- "visible": False,
760
- "__type__": "update",
761
- })
762
- # Linea
763
- result.append({
764
- "visible": False,
765
- "__type__": "update",
766
- })
767
- # Row
768
- result.append(
769
- {
770
- "visible": False,
771
- "__type__": "update",
772
- })
773
- # Result
774
- result.append(
775
- {
776
- "value": message,
777
- "__type__": "update",
778
- }
779
- )
780
-
781
- return result
782
-
783
- def search_model():
784
- gr.Markdown(value="# Buscar un modelo")
785
- with gr.Row():
786
- model_name = gr.inputs.Textbox(lines=1, label="Término de búsqueda")
787
- search_model_button=gr.Button("Buscar modelo")
788
-
789
- models = []
790
- results = gr.Textbox(label="Resultado", value="", max_lines=20)
791
- with gr.Row(visible=False) as row1:
792
- l1 = gr.Markdown(value="", visible=False)
793
- l1_url = gr.Textbox("Label 1", visible=False)
794
- b1 = gr.Button("Cargar modelo", visible=False)
795
-
796
- mk1 = gr.Markdown(value="---", visible=False)
797
- b1.click(fn=load_downloaded_model, inputs=l1_url, outputs=results)
798
-
799
- with gr.Row(visible=False) as row2:
800
- l2 = gr.Markdown(value="", visible=False)
801
- l2_url = gr.Textbox("Label 1", visible=False)
802
- b2 = gr.Button("Cargar modelo", visible=False)
803
-
804
- mk2 = gr.Markdown(value="---", visible=False)
805
- b2.click(fn=load_downloaded_model, inputs=l2_url, outputs=results)
806
-
807
- with gr.Row(visible=False) as row3:
808
- l3 = gr.Markdown(value="", visible=False)
809
- l3_url = gr.Textbox("Label 1", visible=False)
810
- b3 = gr.Button("Cargar modelo", visible=False)
811
-
812
- mk3 = gr.Markdown(value="---", visible=False)
813
- b3.click(fn=load_downloaded_model, inputs=l3_url, outputs=results)
814
-
815
- with gr.Row(visible=False) as row4:
816
- l4 = gr.Markdown(value="", visible=False)
817
- l4_url = gr.Textbox("Label 1", visible=False)
818
- b4 = gr.Button("Cargar modelo", visible=False)
819
- mk4 = gr.Markdown(value="---", visible=False)
820
- b4.click(fn=load_downloaded_model, inputs=l4_url, outputs=results)
821
-
822
- with gr.Row(visible=False) as row5:
823
- l5 = gr.Markdown(value="", visible=False)
824
- l5_url = gr.Textbox("Label 1", visible=False)
825
- b5 = gr.Button("Cargar modelo", visible=False)
826
-
827
- mk5 = gr.Markdown(value="---", visible=False)
828
- b5.click(fn=load_downloaded_model, inputs=l5_url, outputs=results)
829
-
830
- with gr.Row(visible=False) as row6:
831
- l6 = gr.Markdown(value="", visible=False)
832
- l6_url = gr.Textbox("Label 1", visible=False)
833
- b6 = gr.Button("Cargar modelo", visible=False)
834
-
835
- mk6 = gr.Markdown(value="---", visible=False)
836
- b6.click(fn=load_downloaded_model, inputs=l6_url, outputs=results)
837
-
838
- with gr.Row(visible=False) as row7:
839
- l7 = gr.Markdown(value="", visible=False)
840
- l7_url = gr.Textbox("Label 1", visible=False)
841
- b7 = gr.Button("Cargar modelo", visible=False)
842
-
843
- mk7 = gr.Markdown(value="---", visible=False)
844
- b7.click(fn=load_downloaded_model, inputs=l7_url, outputs=results)
845
-
846
- with gr.Row(visible=False) as row8:
847
- l8 = gr.Markdown(value="", visible=False)
848
- l8_url = gr.Textbox("Label 1", visible=False)
849
- b8 = gr.Button("Cargar modelo", visible=False)
850
-
851
- mk8 = gr.Markdown(value="---", visible=False)
852
- b8.click(fn=load_downloaded_model, inputs=l8_url, outputs=results)
853
-
854
- with gr.Row(visible=False) as row9:
855
- l9 = gr.Markdown(value="", visible=False)
856
- l9_url = gr.Textbox("Label 1", visible=False)
857
- b9 = gr.Button("Cargar modelo", visible=False)
858
-
859
- mk9 = gr.Markdown(value="---", visible=False)
860
- b9.click(fn=load_downloaded_model, inputs=l9_url, outputs=results)
861
-
862
- with gr.Row(visible=False) as row10:
863
- l10 = gr.Markdown(value="", visible=False)
864
- l10_url = gr.Textbox("Label 1", visible=False)
865
- b10 = gr.Button("Cargar modelo", visible=False)
866
-
867
- mk10 = gr.Markdown(value="---", visible=False)
868
- b10.click(fn=load_downloaded_model, inputs=l10_url, outputs=results)
869
-
870
- with gr.Row(visible=False) as row11:
871
- l11 = gr.Markdown(value="", visible=False)
872
- l11_url = gr.Textbox("Label 1", visible=False)
873
- b11 = gr.Button("Cargar modelo", visible=False)
874
-
875
- mk11 = gr.Markdown(value="---", visible=False)
876
- b11.click(fn=load_downloaded_model, inputs=l11_url, outputs=results)
877
-
878
- with gr.Row(visible=False) as row12:
879
- l12 = gr.Markdown(value="", visible=False)
880
- l12_url = gr.Textbox("Label 1", visible=False)
881
- b12 = gr.Button("Cargar modelo", visible=False)
882
-
883
- mk12 = gr.Markdown(value="---", visible=False)
884
- b12.click(fn=load_downloaded_model, inputs=l12_url, outputs=results)
885
-
886
- with gr.Row(visible=False) as row13:
887
- l13 = gr.Markdown(value="", visible=False)
888
- l13_url = gr.Textbox("Label 1", visible=False)
889
- b13 = gr.Button("Cargar modelo", visible=False)
890
-
891
- mk13 = gr.Markdown(value="---", visible=False)
892
- b13.click(fn=load_downloaded_model, inputs=l13_url, outputs=results)
893
-
894
- with gr.Row(visible=False) as row14:
895
- l14 = gr.Markdown(value="", visible=False)
896
- l14_url = gr.Textbox("Label 1", visible=False)
897
- b14 = gr.Button("Cargar modelo", visible=False)
898
-
899
- mk14 = gr.Markdown(value="---", visible=False)
900
- b14.click(fn=load_downloaded_model, inputs=l14_url, outputs=results)
901
-
902
- with gr.Row(visible=False) as row15:
903
- l15 = gr.Markdown(value="", visible=False)
904
- l15_url = gr.Textbox("Label 1", visible=False)
905
- b15 = gr.Button("Cargar modelo", visible=False)
906
-
907
- mk15 = gr.Markdown(value="---", visible=False)
908
- b15.click(fn=load_downloaded_model, inputs=l15_url, outputs=results)
909
-
910
- with gr.Row(visible=False) as row16:
911
- l16 = gr.Markdown(value="", visible=False)
912
- l16_url = gr.Textbox("Label 1", visible=False)
913
- b16 = gr.Button("Cargar modelo", visible=False)
914
-
915
- mk16 = gr.Markdown(value="---", visible=False)
916
- b16.click(fn=load_downloaded_model, inputs=l16_url, outputs=results)
917
-
918
- with gr.Row(visible=False) as row17:
919
- l17 = gr.Markdown(value="", visible=False)
920
- l17_url = gr.Textbox("Label 1", visible=False)
921
- b17 = gr.Button("Cargar modelo", visible=False)
922
-
923
- mk17 = gr.Markdown(value="---", visible=False)
924
- b17.click(fn=load_downloaded_model, inputs=l17_url, outputs=results)
925
-
926
- with gr.Row(visible=False) as row18:
927
- l18 = gr.Markdown(value="", visible=False)
928
- l18_url = gr.Textbox("Label 1", visible=False)
929
- b18 = gr.Button("Cargar modelo", visible=False)
930
-
931
- mk18 = gr.Markdown(value="---", visible=False)
932
- b18.click(fn=load_downloaded_model, inputs=l18_url, outputs=results)
933
-
934
- with gr.Row(visible=False) as row19:
935
- l19 = gr.Markdown(value="", visible=False)
936
- l19_url = gr.Textbox("Label 1", visible=False)
937
- b19 = gr.Button("Cargar modelo", visible=False)
938
-
939
- mk19 = gr.Markdown(value="---", visible=False)
940
- b19.click(fn=load_downloaded_model, inputs=l19_url, outputs=results)
941
-
942
- with gr.Row(visible=False) as row20:
943
- l20 = gr.Markdown(value="", visible=False)
944
- l20_url = gr.Textbox("Label 1", visible=False)
945
- b20 = gr.Button("Cargar modelo", visible=False)
946
-
947
- mk20 = gr.Markdown(value="---", visible=False)
948
- b20.click(fn=load_downloaded_model, inputs=l20_url, outputs=results)
949
-
950
- # to_return_protect1 =
951
-
952
- search_model_button.click(fn=get_models_by_name, inputs=model_name, outputs=[l1,l1_url, b1, mk1, row1,
953
- l2,l2_url, b2, mk2, row2,
954
- l3,l3_url, b3, mk3, row3,
955
- l4,l4_url, b4, mk4, row4,
956
- l5,l5_url, b5, mk5, row5,
957
- l6,l6_url, b6, mk6, row6,
958
- l7,l7_url, b7, mk7, row7,
959
- l8,l8_url, b8, mk8, row8,
960
- l9,l9_url, b9, mk9, row9,
961
- l10,l10_url, b10, mk10, row10,
962
- l11,l11_url, b11, mk11, row11,
963
- l12,l12_url, b12, mk12, row12,
964
- l13,l13_url, b13, mk13, row13,
965
- l14,l14_url, b14, mk14, row14,
966
- l15,l15_url, b15, mk15, row15,
967
- l16,l16_url, b16, mk16, row16,
968
- l17,l17_url, b17, mk17, row17,
969
- l18,l18_url, b18, mk18, row18,
970
- l19,l19_url, b19, mk19, row19,
971
- l20,l20_url, b20, mk20, row20,
972
- results
973
- ])
974
-
975
-
976
- def descargar_desde_drive(url, name, output_file):
977
-
978
- print(f"Descargando {name} de drive")
979
-
980
- try:
981
- downloaded_file = gdown.download(url, output=output_file, fuzzy=True)
982
- return downloaded_file
983
- except:
984
- print("El intento de descargar con drive no funcionó")
985
- return None
986
-
987
- def descargar_desde_mega(url, name):
988
- response = False
989
- try:
990
- file_id = None
991
-
992
- if "#!" in url:
993
- file_id = url.split("#!")[1].split("!")[0]
994
- elif "file/" in url:
995
- file_id = url.split("file/")[1].split("/")[0]
996
- else:
997
- file_id = None
998
-
999
- if file_id:
1000
- mega = Mega()
1001
- m = mega.login()
1002
-
1003
- print(f"Descargando {name} de mega")
1004
- downloaded_file = m.download_url(url)
1005
-
1006
- return downloaded_file
1007
- else:
1008
- return None
1009
-
1010
- except Exception as e:
1011
- print("Error**")
1012
- print(e)
1013
- return None
1014
-
1015
- def descargar_desde_url_basica(url, name, output_file):
1016
- try:
1017
- print(f"Descargando {name} de URL BASICA")
1018
- filename = wget.download(url=url, out=output_file)
1019
- return filename
1020
- except Exception as e:
1021
- print(f"Error al descargar el archivo: {str(e)}")
1022
-
1023
- def is_valid_model(name):
1024
- parent_path = find_folder_parent(".", "pretrained_v2")
1025
- unzips_path = os.path.join(parent_path, 'unzips')
1026
-
1027
- response = []
1028
- file_path = os.path.join(unzips_path, name)
1029
-
1030
- has_model = False
1031
- has_index = False
1032
-
1033
- for root, subfolders, files in os.walk(file_path):
1034
- for file in files:
1035
- current_file_path = os.path.join(root, file)
1036
- if not file.startswith("G_") and not file.startswith("D_") and file.endswith(".pth") and not "_G_" in file and not "_D_" in file:
1037
- has_model = True
1038
- if file.startswith('added_') and file.endswith('.index'):
1039
- has_index = True
1040
-
1041
- #if has_model and has_index:
1042
- if has_index:
1043
- response.append(".index")
1044
-
1045
- if has_model:
1046
- response.append(".pth")
1047
-
1048
- return response
1049
-
1050
-
1051
- def create_zip(new_name):
1052
-
1053
- parent_path = find_folder_parent(".", "pretrained_v2")
1054
- temp_folder_path = os.path.join(parent_path, 'temp_models')
1055
- unzips_path = os.path.join(parent_path, 'unzips')
1056
- zips_path = os.path.join(parent_path, 'zips')
1057
-
1058
- file_path = os.path.join(unzips_path, new_name)
1059
- file_name = os.path.join(temp_folder_path, new_name)
1060
-
1061
- if not os.path.exists(zips_path):
1062
- os.mkdir(zips_path)
1063
-
1064
- if os.path.exists(file_name):
1065
- shutil.rmtree(file_name)
1066
-
1067
- os.mkdir(file_name)
1068
-
1069
- while not os.path.exists(file_name):
1070
- time.sleep(1)
1071
-
1072
- for root, subfolders, files in os.walk(file_path):
1073
- for file in files:
1074
- current_file_path = os.path.join(root, file)
1075
- if not file.startswith("G_") and not file.startswith("D_") and file.endswith(".pth") and not "_G_" in file and not "_D_" in file:
1076
- print(f'Copiando {current_file_path} a {os.path.join(temp_folder_path, new_name)}')
1077
- shutil.copy(current_file_path, file_name)
1078
- if file.startswith('added_') and file.endswith('.index'):
1079
- print(f'Copiando {current_file_path} a {os.path.join(temp_folder_path, new_name)}')
1080
- shutil.copy(current_file_path, file_name)
1081
-
1082
- print("Comprimiendo modelo")
1083
- zip_path = os.path.join(zips_path, new_name)
1084
-
1085
- print(f"Comprimiendo {file_name} en {zip_path}")
1086
- shutil.make_archive(zip_path, 'zip', file_name)
1087
-
1088
- def upload_to_huggingface(file_path, new_filename):
1089
- api = HfApi()
1090
- login(token="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")
1091
- api.upload_file(
1092
- path_or_fileobj=file_path,
1093
- path_in_repo=new_filename,
1094
- repo_id="juuxn/RVCModels",
1095
- repo_type="model",
1096
- )
1097
- return f"https://huggingface.co/juuxn/RVCModels/resolve/main/{new_filename}"
1098
-
1099
-
1100
- def publish_model_clicked(model_name, model_url, model_version, model_creator):
1101
-
1102
- web_service_url = "https://script.google.com/macros/s/AKfycbzyrdLZzUww9qbjxnbnI08budD4yxbmRPHkWbp3UEJ9h3Id5cnNNVg0UtfFAnqqX5Rr/exec"
1103
- name = unidecode(model_name)
1104
- new_name = unidecode(name.strip().replace(" ","_").replace("'",""))
1105
-
1106
- downloaded_path = ""
1107
- url = model_url
1108
- version = model_version
1109
- creator = model_creator
1110
- parent_path = find_folder_parent(".", "pretrained_v2")
1111
- output_folder = os.path.join(parent_path, 'archivos_descargados')
1112
- output_file = os.path.join(output_folder, f'{new_name}.zip')
1113
- unzips_path = os.path.join(parent_path, 'unzips')
1114
- zips_path = os.path.join(parent_path, 'zips')
1115
- temp_folder_path = os.path.join(parent_path, 'temp_models')
1116
-
1117
- if os.path.exists(output_folder):
1118
- shutil.rmtree(output_folder)
1119
- os.mkdir(output_folder)
1120
-
1121
- if os.path.exists(temp_folder_path):
1122
- shutil.rmtree(temp_folder_path)
1123
- os.mkdir(temp_folder_path)
1124
-
1125
-
1126
- if url and 'drive.google.com' in url:
1127
- # Descargar el elemento si la URL es de Google Drive
1128
- downloaded_path = descargar_desde_drive(url, new_name, output_file)
1129
- elif url and 'mega.nz' in url:
1130
- downloaded_path = descargar_desde_mega(url, new_name, output_file)
1131
- elif url and 'pixeldrain' in url:
1132
- print("No se puede descargar de pixeldrain")
1133
- else:
1134
- downloaded_path = descargar_desde_url_basica(url, new_name, output_file)
1135
-
1136
- if not downloaded_path:
1137
- print(f"No se pudo descargar: {name}")
1138
- else:
1139
- filename = name.strip().replace(" ","_")
1140
- dst =f'{filename}.zip'
1141
- shutil.unpack_archive(downloaded_path, os.path.join(unzips_path, filename))
1142
- md5_hash = get_md5(os.path.join(unzips_path, filename))
1143
-
1144
- if not md5_hash:
1145
- print("No tiene modelo pequeño")
1146
- return
1147
-
1148
- md5_response_raw = requests.post(web_service_url, json={
1149
- 'type': 'check_md5',
1150
- 'md5_hash': md5_hash
1151
- })
1152
-
1153
- md5_response = md5_response_raw.json()
1154
- ok = md5_response["ok"]
1155
- exists = md5_response["exists"]
1156
- message = md5_response["message"]
1157
-
1158
- is_valid = is_valid_model(filename)
1159
-
1160
- if md5_hash and exists:
1161
- print(f"El archivo ya se ha publicado en spreadsheet con md5: {md5_hash}")
1162
- return f"El archivo ya se ha publicado con md5: {md5_hash}"
1163
-
1164
- if ".pth" in is_valid and not exists:
1165
-
1166
- create_zip(filename)
1167
- huggingface_url = upload_to_huggingface(os.path.join(zips_path,dst), dst)
1168
-
1169
- response = requests.post(web_service_url, json={
1170
- 'type': 'save_model',
1171
- 'elements': [{
1172
- 'name': name,
1173
- 'filename': filename,
1174
- 'url': [huggingface_url],
1175
- 'version': version,
1176
- 'creator': creator,
1177
- 'md5_hash': md5_hash,
1178
- 'content': is_valid
1179
- }]})
1180
-
1181
- response_data = response.json()
1182
- ok = response_data["ok"]
1183
- message = response_data["message"]
1184
-
1185
- print({
1186
- 'name': name,
1187
- 'filename': filename,
1188
- 'url': [huggingface_url],
1189
- 'version': version,
1190
- 'creator': creator,
1191
- 'md5_hash': md5_hash,
1192
- 'content': is_valid
1193
- })
1194
-
1195
- if ok:
1196
- return f"El archivo se ha publicado con md5: {md5_hash}"
1197
- else:
1198
- print(message)
1199
- return message
1200
-
1201
- # Eliminar folder donde se decarga el modelo zip
1202
- if os.path.exists(output_folder):
1203
- shutil.rmtree(output_folder)
1204
-
1205
- # Eliminar folder de zips, donde se descomprimio el modelo descargado
1206
- if os.path.exists(unzips_path):
1207
- shutil.rmtree(unzips_path)
1208
-
1209
- # Eliminar folder donde se copiaron los archivos indispensables del modelo
1210
- if os.path.exists(temp_folder_path):
1211
- shutil.rmtree(temp_folder_path)
1212
-
1213
- # Eliminar folder donde se comprimio el modelo para enviarse a huggingface
1214
- if os.path.exists(zips_path):
1215
- shutil.rmtree(zips_path)
1216
-
1217
- def uvr(input_url, output_path, model_name, inp_root, save_root_vocal, paths, save_root_ins, agg, format0):
1218
- def format_title(title):
1219
- formatted_title = title.replace(" ", "_")
1220
- return formatted_title
1221
-
1222
- ydl_opts = {
1223
- 'no-windows-filenames': True,
1224
- 'restrict-filenames': True,
1225
- 'extract_audio': True,
1226
- 'format': 'bestaudio',
1227
- }
1228
-
1229
- with yt_dlp.YoutubeDL(ydl_opts) as ydl:
1230
- info_dict = ydl.extract_info(input_url, download=False)
1231
- formatted_title = format_title(info_dict.get('title', 'default_title'))
1232
- formatted_outtmpl = output_path + '/' + formatted_title + '.wav'
1233
- ydl_opts['outtmpl'] = formatted_outtmpl
1234
- ydl = yt_dlp.YoutubeDL(ydl_opts)
1235
- ydl.download([input_url])
1236
-
1237
- infos = []
1238
- try:
1239
- inp_root = inp_root.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
1240
- save_root_vocal = (
1241
- save_root_vocal.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
1242
- )
1243
- save_root_ins = (
1244
- save_root_ins.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
1245
- )
1246
- if model_name == "onnx_dereverb_By_FoxJoy":
1247
- pre_fun = MDXNetDereverb(15)
1248
- else:
1249
- func = _audio_pre_ if "DeEcho" not in model_name else _audio_pre_new
1250
- pre_fun = func(
1251
- agg=int(agg),
1252
- model_path=os.path.join(weight_uvr5_root, model_name + ".pth"),
1253
- device=config.device,
1254
- is_half=config.is_half,
1255
- )
1256
- if inp_root != "":
1257
- paths = [os.path.join(inp_root, name) for name in os.listdir(inp_root)]
1258
- else:
1259
- paths = [path.name for path in paths]
1260
- for path in paths:
1261
- inp_path = os.path.join(inp_root, path)
1262
- need_reformat = 1
1263
- done = 0
1264
- try:
1265
- info = ffmpeg.probe(inp_path, cmd="ffprobe")
1266
- if (
1267
- info["streams"][0]["channels"] == 2
1268
- and info["streams"][0]["sample_rate"] == "44100"
1269
- ):
1270
- need_reformat = 0
1271
- pre_fun._path_audio_(
1272
- inp_path, save_root_ins, save_root_vocal, format0
1273
- )
1274
- done = 1
1275
- except:
1276
- need_reformat = 1
1277
- traceback.print_exc()
1278
- if need_reformat == 1:
1279
- tmp_path = "%s/%s.reformatted.wav" % (tmp, os.path.basename(inp_path))
1280
- os.system(
1281
- "ffmpeg -i %s -vn -acodec pcm_s16le -ac 2 -ar 44100 %s -y"
1282
- % (inp_path, tmp_path)
1283
- )
1284
- inp_path = tmp_path
1285
- try:
1286
- if done == 0:
1287
- pre_fun._path_audio_(
1288
- inp_path, save_root_ins, save_root_vocal, format0
1289
- )
1290
- infos.append("%s->Success" % (os.path.basename(inp_path)))
1291
- yield "\n".join(infos)
1292
- except:
1293
- infos.append(
1294
- "%s->%s" % (os.path.basename(inp_path), traceback.format_exc())
1295
- )
1296
- yield "\n".join(infos)
1297
- except:
1298
- infos.append(traceback.format_exc())
1299
- yield "\n".join(infos)
1300
- finally:
1301
- try:
1302
- if model_name == "onnx_dereverb_By_FoxJoy":
1303
- del pre_fun.pred.model
1304
- del pre_fun.pred.model_
1305
- else:
1306
- del pre_fun.model
1307
- del pre_fun
1308
- except:
1309
- traceback.print_exc()
1310
- print("clean_empty_cache")
1311
- if torch.cuda.is_available():
1312
- torch.cuda.empty_cache()
1313
- yield "\n".join(infos)
1314
-
1315
-
1316
- def publish_models():
1317
- with gr.Column():
1318
- gr.Markdown("# Publicar un modelo en la comunidad")
1319
- gr.Markdown("El modelo se va a verificar antes de publicarse. Importante que contenga el archivo **.pth** del modelo para que no sea rechazado.")
1320
-
1321
- model_name = gr.inputs.Textbox(lines=1, label="Nombre descriptivo del modelo Ej: (Ben 10 [Latino] - RVC V2 - 250 Epoch)")
1322
- url = gr.inputs.Textbox(lines=1, label="Enlace del modelo")
1323
- moder_version = gr.Radio(
1324
- label="Versión",
1325
- choices=["RVC v1", "RVC v2"],
1326
- value="RVC v1",
1327
- interactive=True,
1328
- )
1329
- model_creator = gr.inputs.Textbox(lines=1, label="ID de discord del creador del modelo Ej: <@123455656>")
1330
- publish_model_button=gr.Button("Publicar modelo")
1331
- results = gr.Textbox(label="Resultado", value="", max_lines=20)
1332
-
1333
- publish_model_button.click(fn=publish_model_clicked, inputs=[model_name, url, moder_version, model_creator], outputs=results)
1334
-
1335
- def download_model():
1336
- gr.Markdown(value="# " + i18n("下载模型"))
1337
- gr.Markdown(value=i18n("它用于下载您的推理模型。"))
1338
- with gr.Row():
1339
- model_url=gr.Textbox(label=i18n("网址"))
1340
- with gr.Row():
1341
- download_model_status_bar=gr.Textbox(label=i18n("地位"))
1342
- with gr.Row():
1343
- download_button=gr.Button(i18n("下载"))
1344
- download_button.click(fn=load_downloaded_model, inputs=[model_url], outputs=[download_model_status_bar])
1345
-
1346
- def download_backup():
1347
- gr.Markdown(value="# " + i18n("下载备份"))
1348
- gr.Markdown(value=i18n("它用于下载您的训练备份。"))
1349
- with gr.Row():
1350
- model_url=gr.Textbox(label=i18n("网址"))
1351
- with gr.Row():
1352
- download_model_status_bar=gr.Textbox(label=i18n("地位"))
1353
- with gr.Row():
1354
- download_button=gr.Button(i18n("下载"))
1355
- download_button.click(fn=load_downloaded_backup, inputs=[model_url], outputs=[download_model_status_bar])
1356
-
1357
- def update_dataset_list(name):
1358
- new_datasets = []
1359
- for foldername in os.listdir("./datasets"):
1360
- if "." not in foldername:
1361
- new_datasets.append(os.path.join(find_folder_parent(".","pretrained"),"datasets",foldername))
1362
- return gr.Dropdown.update(choices=new_datasets)
1363
-
1364
- def download_dataset(trainset_dir4):
1365
- gr.Markdown(value="# " + i18n("下载数据集"))
1366
- gr.Markdown(value=i18n("下载兼容格式(.wav/.flac)的音频数据集以训练模型。"))
1367
- with gr.Row():
1368
- dataset_url=gr.Textbox(label=i18n("网址"))
1369
- with gr.Row():
1370
- load_dataset_status_bar=gr.Textbox(label=i18n("地位"))
1371
- with gr.Row():
1372
- load_dataset_button=gr.Button(i18n("下载"))
1373
- load_dataset_button.click(fn=load_dowloaded_dataset, inputs=[dataset_url], outputs=[load_dataset_status_bar])
1374
- load_dataset_status_bar.change(update_dataset_list, dataset_url, trainset_dir4)
1375
-
1376
- def youtube_separator():
1377
- input_url = gr.inputs.Textbox(label="Pega el enlace de youtube")
1378
- output_path = gr.Textbox(
1379
- label=i18n("输入待处理音频文件夹路径(去文件管理器地址栏拷就行了)"),
1380
- value=os.path.abspath(os.getcwd()).replace('\\', '/') + "/yt_downloads/",
1381
- visible=False,
1382
- )
1383
- dir_wav_input = gr.Textbox(
1384
- label=i18n("输入待处理音频文件夹路径"),
1385
- value=((os.getcwd()).replace('\\', '/') + "/yt_downloads"),
1386
- visible=False,
1387
- )
1388
- model_choose = gr.Textbox(
1389
- value=os.path.abspath(os.getcwd()).replace('\\', '/') + "/uvr5_weights/HP5_only_main_vocal",
1390
- visible=False,
1391
- )
1392
- opt_vocal_root = gr.Textbox(
1393
- label=i18n("指定输出主人声文件夹"), value="audios",
1394
- visible=False,
1395
- )
1396
- opt_ins_root = gr.Textbox(
1397
- label=i18n("指定输出非主人声文件夹"), value="opt",
1398
- visible=False,
1399
- )
1400
- format0 = gr.Radio(
1401
- label=i18n("导出文件格式"),
1402
- choices=["wav", "flac", "mp3", "m4a"],
1403
- value="wav",
1404
- interactive=True,
1405
- visible=False,
1406
- )
1407
- but2 = gr.Button(i18n("转换"))
1408
- vc_output4 = gr.Textbox(label=i18n("输出信息"))
1409
- but2.click(
1410
- uvr,
1411
- [
1412
- input_url,
1413
- output_path,
1414
- model_choose,
1415
- dir_wav_input,
1416
- opt_vocal_root,
1417
- opt_ins_root,
1418
- format0,
1419
- ],
1420
- [vc_output4],
1421
- )