viewfinder-annn commited on
Commit
50ec95a
·
verified ·
1 Parent(s): 179f253

Update app.py

Browse files

Download the pretrained model from huggingface

Files changed (1) hide show
  1. app.py +22 -25
app.py CHANGED
@@ -8,20 +8,24 @@ import numpy as np
8
  import soundfile as sf
9
  import gradio as gr
10
  import time
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
- base_dir = os.path.dirname(
13
- os.path.abspath(__file__)
14
- )
15
-
16
- CFG_PATH = os.path.join(base_dir, "./config/flow_matching.json")
17
- CHECKPOINT_PATH = os.path.join(
18
- base_dir, "./pretrained/flow_matching"
19
- )
20
- VOCODER_CHECKPOINT_PATH = os.path.join(
21
- base_dir, "./pretrained/vocoder"
22
- )
23
- VOCODER_CFG_PATH = os.path.join(base_dir, "./config/vocoder.json")
24
- INFER_DST = os.path.join(base_dir, "./example/output_gradio")
25
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
26
 
27
  os.makedirs(INFER_DST, exist_ok=True)
@@ -135,15 +139,10 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
135
  sources=["upload", "microphone"],
136
  )
137
 
138
- example1_path = os.path.join(
139
- base_dir, "./example/gradio/example1.mp3"
140
- )
141
- example2_path = os.path.join(
142
- base_dir, "./example/gradio/example2.wav"
143
- )
144
- example3_path = os.path.join(
145
- base_dir, "./example/gradio/example3.wav"
146
- )
147
  gr.Examples(
148
  examples=[[example1_path], [example2_path], [example3_path]],
149
  inputs=[vocal_input],
@@ -185,13 +184,11 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
185
  submit_btn.click(
186
  fn=sing2song_inference,
187
  inputs=[vocal_input, n_timesteps_slider, cfg_slider, seed_input],
188
- # The function will now update the status text as its third output
189
  outputs=[accompaniment_output, mixture_output, status_text],
190
  )
191
 
192
  random_seed_btn.click(fn=randomize_seed, inputs=None, outputs=seed_input)
193
 
194
- demo.queue()
195
 
196
  if __name__ == "__main__":
197
- demo.launch(server_name="0.0.0.0", server_port=8091)
 
8
  import soundfile as sf
9
  import gradio as gr
10
  import time
11
+ from huggingface_hub import snapshot_download
12
+
13
+ repo_id = "amphion/anyaccomp"
14
+ local_dir = "anyaccomp_model"
15
+
16
+ print(f"Downloading model files from {repo_id}...")
17
+ model_dir = snapshot_download(repo_id=repo_id, local_dir=local_dir)
18
+ print(f"Model files downloaded to: {model_dir}")
19
+
20
+ CFG_PATH = os.path.join(model_dir, "config/flow_matching.json")
21
+ CHECKPOINT_PATH = os.path.join(model_dir, "pretrained/flow_matching")
22
+ VOCODER_CHECKPOINT_PATH = os.path.join(model_dir, "pretrained/vocoder")
23
+ VOCODER_CFG_PATH = os.path.join(model_dir, "config/vocoder.json")
24
+
25
+ base_dir = os.path.dirname(os.path.abspath(__file__))
26
+ INFER_DST = os.path.join(base_dir, "output_gradio")
27
+ EXAMPLE_DIR = os.path.join(base_dir, "example/gradio")
28
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
30
 
31
  os.makedirs(INFER_DST, exist_ok=True)
 
139
  sources=["upload", "microphone"],
140
  )
141
 
142
+ example1_path = os.path.join(EXAMPLE_DIR, "example1.mp3")
143
+ example2_path = os.path.join(EXAMPLE_DIR, "example2.wav")
144
+ example3_path = os.path.join(EXAMPLE_DIR, "example3.wav")
145
+
 
 
 
 
 
146
  gr.Examples(
147
  examples=[[example1_path], [example2_path], [example3_path]],
148
  inputs=[vocal_input],
 
184
  submit_btn.click(
185
  fn=sing2song_inference,
186
  inputs=[vocal_input, n_timesteps_slider, cfg_slider, seed_input],
 
187
  outputs=[accompaniment_output, mixture_output, status_text],
188
  )
189
 
190
  random_seed_btn.click(fn=randomize_seed, inputs=None, outputs=seed_input)
191
 
 
192
 
193
  if __name__ == "__main__":
194
+ demo.launch()