Asankilp commited on
Commit
5f3874a
·
1 Parent(s): e67b018
Files changed (2) hide show
  1. app.py +1 -1
  2. app.py.bak +20 -29
app.py CHANGED
@@ -48,7 +48,7 @@ if not os.path.exists(os.path.join(DOWNLOAD_DIR,'config.json'):
48
  print('下载配置文件...')
49
  model_configfile = hf_hub_download(repo_id=MODEL_REPO,filename='config.json',local_dir=DOWNLOAD_DIR)
50
  if not os.path.exists(os.path.join('./pretrain/checkpoint_best_legacy_500.pt'):
51
- print('下载预训练模型文件...)
52
  checkpoint_pt = hf_hub_download(repo_id=CHECKPOINT_REPO,filename='checkpoint_best_legacy_500.pt',local_dir='./pretrain')
53
  class HParams():
54
  def __init__(self, **kwargs):
 
48
  print('下载配置文件...')
49
  model_configfile = hf_hub_download(repo_id=MODEL_REPO,filename='config.json',local_dir=DOWNLOAD_DIR)
50
  if not os.path.exists(os.path.join('./pretrain/checkpoint_best_legacy_500.pt'):
51
+ print('下载预训练模型文件...')
52
  checkpoint_pt = hf_hub_download(repo_id=CHECKPOINT_REPO,filename='checkpoint_best_legacy_500.pt',local_dir='./pretrain')
53
  class HParams():
54
  def __init__(self, **kwargs):
app.py.bak CHANGED
@@ -23,6 +23,7 @@ import soundfile
23
  from inference import infer_tool
24
  from inference import slicer
25
  from inference.infer_tool import Svc
 
26
 
27
  logging.getLogger('numba').setLevel(logging.WARNING)
28
  chunks_dict = infer_tool.read_temp("inference/chunks_temp.json")
@@ -36,8 +37,19 @@ logging.getLogger('multipart').setLevel(logging.WARNING)
36
  model = None
37
  spk = None
38
  debug = False
39
-
40
-
 
 
 
 
 
 
 
 
 
 
 
41
  class HParams():
42
  def __init__(self, **kwargs):
43
  for k, v in kwargs.items():
@@ -174,10 +186,11 @@ def vc_fn2(sid, input_audio, vc_transform, auto_f0, cluster_ratio, slice_db, noi
174
  models_info = [
175
  {
176
  "description": """
177
- gochiusa test
 
178
  """,
179
- "model_path": "./G_240000.pth",
180
- "config_path": "./config.json",
181
  }
182
  ]
183
 
@@ -240,11 +253,7 @@ if __name__ == "__main__":
240
  parser.add_argument('-ft', '--f0_filter_threshold', type=float, default=0.05,
241
  help='F0过滤阈值,只有使用crepe时有效. 数值范围从0-1. 降低该值可减少跑调概率,但会增加哑音')
242
  args = parser.parse_args()
243
- categories = ["Blue Archive"]
244
- others = {
245
- "PCR vits-fast-fineturning": "https://huggingface.co/spaces/FrankZxShen/vits-fast-finetuning-pcr",
246
- "Blue Archive vits-fast-fineturning": "https://huggingface.co/spaces/FrankZxShen/vits-fast-fineturning-models-ba",
247
- }
248
  for info in models_info:
249
  config_path = info['config_path']
250
  model_path = info['model_path']
@@ -339,24 +348,6 @@ if __name__ == "__main__":
339
  cl_num, lg_num, lgr_num, f0_predictor, enhancer_adaptive_key, cr_threshold], [vc_output1, vc_output2])
340
  vc_submit2.click(vc_fn2, [sid, vc_input3, vc_transform, auto_f0, cluster_ratio, slice_db, noise_scale, pad_seconds, cl_num,
341
  lg_num, lgr_num, text2tts, tts_rate, tts_voice, f0_predictor, enhancer_adaptive_key, cr_threshold], [vc_output1, vc_output2])
342
- # gr.Examples(
343
- # examples=example,
344
- # inputs=[textbox, char_dropdown, language_dropdown,
345
- # duration_slider, symbol_input],
346
- # outputs=[text_output, audio_output],
347
- # fn=tts_fn
348
- # )
349
- for category, link in others.items():
350
- with gr.TabItem(category):
351
- gr.Markdown(
352
- f'''
353
- <center>
354
- <h2>Click to Go</h2>
355
- <a href="{link}">
356
- <img src="https://huggingface.co/datasets/huggingface/badges/raw/main/open-in-hf-spaces-xl-dark.svg"
357
- </a>
358
- </center>
359
- '''
360
- )
361
 
362
  app.queue(concurrency_count=3).launch(show_api=False, share=args.share)
 
23
  from inference import infer_tool
24
  from inference import slicer
25
  from inference.infer_tool import Svc
26
+ from huggingface_hub import hf_hub_download
27
 
28
  logging.getLogger('numba').setLevel(logging.WARNING)
29
  chunks_dict = infer_tool.read_temp("inference/chunks_temp.json")
 
37
  model = None
38
  spk = None
39
  debug = False
40
+ MODEL_REPO = 'Asankilp/gochiusa-sovits4.0'
41
+ MODEL_NAME = 'G_240000.pth'
42
+ DOWNLOAD_DIR = './downloaded'
43
+ CHECKPOINT_REPO = 'WitchHuntTV/checkpoint_best_legacy_500.pt'
44
+ if not os.path.exists(os.path.join(DOWNLOAD_DIR,MODEL_NAME):
45
+ print('下载模型文件...')
46
+ model_pth = hf_hub_download(repo_id=MODEL_REPO,filename=MODEL_NAME,local_dir=DOWNLOAD_DIR)
47
+ if not os.path.exists(os.path.join(DOWNLOAD_DIR,'config.json'):
48
+ print('下载配置文件...')
49
+ model_configfile = hf_hub_download(repo_id=MODEL_REPO,filename='config.json',local_dir=DOWNLOAD_DIR)
50
+ if not os.path.exists(os.path.join('./pretrain/checkpoint_best_legacy_500.pt'):
51
+ print('下载预训练模型文件...)
52
+ checkpoint_pt = hf_hub_download(repo_id=CHECKPOINT_REPO,filename='checkpoint_best_legacy_500.pt',local_dir='./pretrain')
53
  class HParams():
54
  def __init__(self, **kwargs):
55
  for k, v in kwargs.items():
 
186
  models_info = [
187
  {
188
  "description": """
189
+ 请问你今天要来点so-vits-svc 4.0模型吗?\n\n
190
+ * 模型尚处于半成品(
191
  """,
192
+ "model_path": os.path.join(DOWNLOAD_DIR,MODEL_NAME),
193
+ "config_path": os.path.join(DOWNLOAD_DIR,'config.json'),
194
  }
195
  ]
196
 
 
253
  parser.add_argument('-ft', '--f0_filter_threshold', type=float, default=0.05,
254
  help='F0过滤阈值,只有使用crepe时有效. 数值范围从0-1. 降低该值可减少跑调概率,但会增加哑音')
255
  args = parser.parse_args()
256
+ categories = ['Gochiusa']
 
 
 
 
257
  for info in models_info:
258
  config_path = info['config_path']
259
  model_path = info['model_path']
 
348
  cl_num, lg_num, lgr_num, f0_predictor, enhancer_adaptive_key, cr_threshold], [vc_output1, vc_output2])
349
  vc_submit2.click(vc_fn2, [sid, vc_input3, vc_transform, auto_f0, cluster_ratio, slice_db, noise_scale, pad_seconds, cl_num,
350
  lg_num, lgr_num, text2tts, tts_rate, tts_voice, f0_predictor, enhancer_adaptive_key, cr_threshold], [vc_output1, vc_output2])
351
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
352
 
353
  app.queue(concurrency_count=3).launch(show_api=False, share=args.share)