lllindsey0615 commited on
Commit
5527c63
·
1 Parent(s): ef3daf0

correct app.py

Browse files
Files changed (1) hide show
  1. app.py +41 -54
app.py CHANGED
@@ -8,6 +8,9 @@ from typing import Dict
8
  from pyharp import *
9
 
10
 
 
 
 
11
 
12
  DEMUX_MODELS = ["mdx_extra_q", "mdx_extra", "htdemucs", "mdx_q"]
13
 
@@ -19,8 +22,10 @@ STEM_CHOICES = {
19
  "Instrumental (No Vocals)": "instrumental"
20
  }
21
 
 
 
 
22
 
23
- # Stem Separation
24
  def separate_stem(audio_file_path: str, model_name: str, stem_choice: str) -> AudioSignal:
25
  model = pretrained.get_model(model_name)
26
  model.to('cuda' if torch.cuda.is_available() else 'cpu')
@@ -53,78 +58,60 @@ def separate_stem(audio_file_path: str, model_name: str, stem_choice: str) -> Au
53
 
54
  return AudioSignal(stem.cpu().numpy().astype('float32'), sample_rate=sr)
55
 
56
- # Label & Metadata Handling
57
- def generate_dummy_metadata(stem_choice: str, username: str) -> Dict:
58
- dummy_label = AudioLabel(
59
- t=0.0,
60
- label=stem_choice,
61
- amplitude=0.7,
62
- description=f"Start of {stem_choice} stem",
63
- color=AudioLabel.hex_color_to_int("#FF5733")
64
- )
65
-
66
- label_list = LabelList(labels=[dummy_label])
67
- label_list.meta["user"] = username
68
-
69
- return {
70
- "meta": label_list.meta,
71
- "labels": [vars(label) for label in label_list.labels]
72
- }
73
-
74
- import json
75
-
76
- def process_fn_stem(audio_file_path, demucs_model, stem_choice, profile=None):
77
- username = profile.username if profile else "anonymous"
78
 
 
79
  stem_signal = separate_stem(audio_file_path, model_name=demucs_model, stem_choice=stem_choice)
80
  stem_filename = f"{stem_choice.lower().replace(' ', '_')}.wav"
81
  stem_path = save_audio(stem_signal, stem_filename)
82
 
83
- metadata = {
84
- "user": username,
85
  "stem": stem_choice,
86
- "amplitude": 0.7,
87
- "description": f"Start of {stem_choice}"
88
  }
89
 
90
- return stem_path, json.dumps(metadata, indent=2)
91
-
 
92
 
 
 
 
 
 
 
93
 
 
 
 
94
 
95
- # Gradio Interface
96
  with gr.Blocks() as demo:
97
  gr.Markdown("# 🎧 Demucs Stem Separator")
98
  gr.Markdown("Log in with your Hugging Face account to separate music into stems.")
99
 
100
  gr.LoginButton()
101
 
102
- with gr.Row():
103
- model_dropdown = gr.Dropdown(
104
- label="Select Demucs Model",
105
- choices=DEMUX_MODELS,
106
- value="mdx_extra_q"
107
- )
108
- stem_dropdown = gr.Dropdown(
109
- label="Select Stem to Separate",
110
- choices=list(STEM_CHOICES.keys()),
111
- value="Vocals"
112
- )
113
-
114
- audio_input = gr.Audio(label="Upload Audio", type="filepath")
115
-
116
- output_file = gr.File(label="Separated Stem (.wav)")
117
- output_text = gr.Textbox(label="Metadata (Debug-safe JSON)", lines=6, max_lines=10)
118
-
119
- run_button = gr.Button("Run Separation")
120
-
121
- run_button.click(
122
- fn=process_fn_stem,
123
- inputs=[audio_input, model_dropdown, stem_dropdown],
124
- outputs=[output_file, output_text]
125
  )
126
 
127
- demo.queue()
 
 
 
 
128
 
 
 
 
 
 
129
 
 
130
  demo.launch(show_error=True)
 
8
  from pyharp import *
9
 
10
 
11
+ # ---------------------------
12
+ # Config
13
+ # ---------------------------
14
 
15
  DEMUX_MODELS = ["mdx_extra_q", "mdx_extra", "htdemucs", "mdx_q"]
16
 
 
22
  "Instrumental (No Vocals)": "instrumental"
23
  }
24
 
25
+ # ---------------------------
26
+ # Stem Separation Logic
27
+ # ---------------------------
28
 
 
29
  def separate_stem(audio_file_path: str, model_name: str, stem_choice: str) -> AudioSignal:
30
  model = pretrained.get_model(model_name)
31
  model.to('cuda' if torch.cuda.is_available() else 'cpu')
 
58
 
59
  return AudioSignal(stem.cpu().numpy().astype('float32'), sample_rate=sr)
60
 
61
+ # ---------------------------
62
+ # Gradio Callback Function
63
+ # ---------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
 
65
+ def process_fn_stem(audio_file_path: str, demucs_model: str, stem_choice: str) -> Dict:
66
  stem_signal = separate_stem(audio_file_path, model_name=demucs_model, stem_choice=stem_choice)
67
  stem_filename = f"{stem_choice.lower().replace(' ', '_')}.wav"
68
  stem_path = save_audio(stem_signal, stem_filename)
69
 
70
+ return {
71
+ "file": stem_path,
72
  "stem": stem_choice,
73
+ "description": f"Separated {stem_choice} stem",
74
+ "amplitude": 0.7
75
  }
76
 
77
+ # ---------------------------
78
+ # Model Card (for HARP)
79
+ # ---------------------------
80
 
81
+ model_card = ModelCard(
82
+ name="Demucs Stem Separator",
83
+ description="Uses Demucs to separate a music track into a selected stem.",
84
+ author="Alexandre Défossez, Nicolas Usunier, Léon Bottou, Francis Bach",
85
+ tags=["demucs", "source-separation", "pyharp", "stems"]
86
+ )
87
 
88
+ # ---------------------------
89
+ # Gradio Interface (Blocks)
90
+ # ---------------------------
91
 
 
92
  with gr.Blocks() as demo:
93
  gr.Markdown("# 🎧 Demucs Stem Separator")
94
  gr.Markdown("Log in with your Hugging Face account to separate music into stems.")
95
 
96
  gr.LoginButton()
97
 
98
+ dropdown_model = gr.Dropdown(
99
+ label="Select Demucs Model",
100
+ choices=DEMUX_MODELS,
101
+ value="mdx_extra_q"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
  )
103
 
104
+ dropdown_stem = gr.Dropdown(
105
+ label="Select Stem to Separate",
106
+ choices=list(STEM_CHOICES.keys()),
107
+ value="Vocals"
108
+ )
109
 
110
+ app = build_endpoint(
111
+ model_card=model_card,
112
+ components=[dropdown_model, dropdown_stem],
113
+ process_fn=process_fn_stem
114
+ )
115
 
116
+ demo.queue()
117
  demo.launch(show_error=True)