SaltProphet commited on
Commit
fddc909
·
verified ·
1 Parent(s): 6cf127e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +143 -16
app.py CHANGED
@@ -1,3 +1,7 @@
 
 
 
 
1
  import gradio as gr
2
  import os
3
  import shutil
@@ -11,30 +15,153 @@ import zipfile
11
  import tempfile
12
  import matplotlib.pyplot as plt
13
  import matplotlib
14
- matplotlib.use('Agg')
 
 
15
 
16
  def update_output_visibility(choice):
17
  if "2 Stems" in choice:
18
- return {
19
- vocals_output: gr.update(visible=True),
20
- drums_output: gr.update(visible=False),
21
- bass_output: gr.update(visible=False),
22
- other_output: gr.update(visible=True, label="Instrumental (No Vocals)")
23
- }
24
  elif "4 Stems" in choice:
25
- return {
26
- vocals_output: gr.update(visible=True),
27
- drums_output: gr.update(visible=True),
28
- bass_output: gr.update(visible=True),
29
- other_output: gr.update(visible=True, label="Other")
30
- }
31
 
32
  async def separate_stems(audio_file_path, stem_choice, progress=gr.Progress(track_tqdm=True)):
33
  if audio_file_path is None: raise gr.Error("No audio file uploaded!")
34
- progress(0, desc="Starting...")
35
  try:
36
- progress(0.05, desc="Preparing audio file...")
37
- original_filename_base = os.path.basename(audio_file_path).rsplit('.', 1)[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
  stable_input_path = f"stable_input_{original_filename_base}.wav"
39
  shutil.copy(audio_file_path, stable_input_path)
40
 
 
1
+ # 1. Install all necessary libraries for the full application
2
+ !pip install gradio "demucs>=4.0.0" librosa soundfile matplotlib
3
+
4
+ # 2. Import libraries
5
  import gradio as gr
6
  import os
7
  import shutil
 
15
  import tempfile
16
  import matplotlib.pyplot as plt
17
  import matplotlib
18
+ matplotlib.use('Agg') # Use a non-interactive backend for plotting
19
+
20
+ # --- Helper/Processing Functions ---
21
 
22
  def update_output_visibility(choice):
23
  if "2 Stems" in choice:
24
+ return { vocals_output: gr.update(visible=True), drums_output: gr.update(visible=False), bass_output: gr.update(visible=False), other_output: gr.update(visible=True, label="Instrumental (No Vocals)") }
 
 
 
 
 
25
  elif "4 Stems" in choice:
26
+ return { vocals_output: gr.update(visible=True), drums_output: gr.update(visible=True), bass_output: gr.update(visible=True), other_output: gr.update(visible=True, label="Other") }
 
 
 
 
 
27
 
28
  async def separate_stems(audio_file_path, stem_choice, progress=gr.Progress(track_tqdm=True)):
29
  if audio_file_path is None: raise gr.Error("No audio file uploaded!")
30
+ progress(0, desc="Starting..."); await asyncio.sleep(0.1)
31
  try:
32
+ progress(0.05, desc="Preparing audio file..."); original_filename_base = os.path.basename(audio_file_path).rsplit('.', 1)[0]; stable_input_path = f"stable_input_{original_filename_base}.wav"; shutil.copy(audio_file_path, stable_input_path)
33
+ model_arg = "--two-stems=vocals" if "2 Stems" in stem_choice else ""; output_dir = "separated"
34
+ if os.path.exists(output_dir): shutil.rmtree(output_dir)
35
+ command = f"python3 -m demucs {model_arg} -o \"{output_dir}\" \"{stable_input_path}\""
36
+ progress(0.2, desc="Running Demucs..."); process = await asyncio.create_subprocess_shell(command, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE); await process.communicate()
37
+ if process.returncode != 0: raise gr.Error(f"Demucs failed to run.")
38
+ progress(0.8, desc="Locating separated stem files..."); stable_filename_base = os.path.basename(stable_input_path).rsplit('.', 1)[0]; model_folder_name = next(os.walk(output_dir))[1][0]; stems_path = os.path.join(output_dir, model_folder_name, stable_filename_base)
39
+ if not os.path.exists(stems_path): raise gr.Error(f"Demucs finished, but the output directory was not found!")
40
+ vocals_path = os.path.join(stems_path, "vocals.wav") if os.path.exists(os.path.join(stems_path, "vocals.wav")) else None; drums_path = os.path.join(stems_path, "drums.wav") if os.path.exists(os.path.join(stems_path, "drums.wav")) else None; bass_path = os.path.join(stems_path, "bass.wav") if os.path.exists(os.path.join(stems_path, "bass.wav")) else None
41
+ other_filename = "no_vocals.wav" if "2 Stems" in stem_choice else "other.wav"; other_path = os.path.join(stems_path, other_filename) if os.path.exists(os.path.join(stems_path, other_filename)) else None
42
+ os.remove(stable_input_path)
43
+ return vocals_path, drums_path, bass_path, other_path
44
+ except Exception as e:
45
+ print(f"An error occurred: {e}"); raise gr.Error(str(e))
46
+
47
+ def visualize_slices(stem_audio_data, progress=gr.Progress(track_tqdm=True)):
48
+ if stem_audio_data is None:
49
+ gr.Warning("This stem is empty. Cannot visualize."); return None, None, None
50
+ sample_rate, y_int = stem_audio_data; y = librosa.util.buf_to_float(y_int);
51
+ progress(0.3, desc="Finding transients..."); onset_frames = librosa.onset.onset_detect(y=librosa.to_mono(y.T) if y.ndim > 1 else y, sr=sample_rate, wait=1, pre_avg=1, post_avg=1, post_max=1, delta=0.05)
52
+ onset_times = librosa.frames_to_time(onset_frames, sr=sample_rate)
53
+ progress(0.7, desc="Generating waveform plot...")
54
+ fig, ax = plt.subplots(figsize=(10, 3)); fig.patch.set_facecolor('#1f2937'); ax.set_facecolor('#111827')
55
+ librosa.display.waveshow(y, sr=sample_rate, ax=ax, color='#32f6ff', alpha=0.7)
56
+ for t in onset_times:
57
+ ax.axvline(x=t, color='#ff3b3b', linestyle='--', linewidth=1)
58
+ ax.tick_params(colors='gray'); ax.xaxis.label.set_color('gray'); ax.yaxis.label.set_color('gray'); ax.set_xlabel("Time (s)"); ax.set_ylabel("Amplitude"); ax.set_title("Detected Slices", color='white'); plt.tight_layout()
59
+ progress(1, desc="Done!")
60
+ return fig, onset_times, stem_audio_data
61
+
62
+ def preview_slice(active_stem_audio, onset_times, evt: gr.SelectData):
63
+ if active_stem_audio is None or onset_times is None:
64
+ return None
65
+ sample_rate, y = active_stem_audio; clicked_time = evt.index[0]
66
+ start_time = 0; end_time = len(y) / sample_rate
67
+ for i, t in enumerate(onset_times):
68
+ if t > clicked_time:
69
+ end_time = t; break
70
+ start_time = t
71
+ start_sample = librosa.time_to_samples(start_time, sr=sample_rate); end_sample = librosa.time_to_samples(end_time, sr=sample_rate)
72
+ sliced_audio = y[start_sample:end_sample]
73
+ return (sample_rate, sliced_audio)
74
+
75
+ # --- NEW FUNCTIONS FOR MANAGING THE PACK ---
76
+ def add_slice_to_pack(current_preview, selection_list):
77
+ if current_preview is None:
78
+ gr.Warning("No slice is being previewed to add.")
79
+ return selection_list
80
+ selection_list.append(current_preview)
81
+ gr.Info(f"Slice added! You now have {len(selection_list)} slices in your pack.")
82
+ return selection_list
83
+
84
+ def clear_selection():
85
+ gr.Info("Selection cleared.")
86
+ return []
87
+
88
+ def create_final_pack(selection_list, progress=gr.Progress(track_tqdm=True)):
89
+ if not selection_list:
90
+ raise gr.Error("No slices have been selected to create a pack!")
91
+
92
+ progress(0, desc="Preparing final pack...")
93
+ zip_path = "Custom_Loop_Pack.zip"
94
+ temp_dir = tempfile.mkdtemp()
95
+
96
+ with zipfile.ZipFile(zip_path, 'w') as zf:
97
+ for i, audio_data in enumerate(selection_list):
98
+ progress(i / len(selection_list), desc=f"Adding slice {i+1} to pack...")
99
+ sample_rate, y = audio_data
100
+ filename = os.path.join(temp_dir, f"slice_{i+1:03d}.wav")
101
+ sf.write(filename, y, sample_rate, subtype='PCM_16')
102
+ zf.write(filename, os.path.basename(filename))
103
+
104
+ shutil.rmtree(temp_dir)
105
+ progress(1, desc="Pack Ready!")
106
+ return zip_path, gr.update(visible=True)
107
+
108
+ # --- Create the full Gradio Interface ---
109
+ with gr.Blocks(theme=gr.themes.Default(primary_hue="blue", secondary_hue="red")) as demo:
110
+ gr.Markdown("# 🎵 Loop Architect")
111
+
112
+ # State components
113
+ onset_times_state = gr.State(value=None)
114
+ active_stem_state = gr.State(value=None)
115
+ selected_slices_state = gr.State(value=[])
116
+
117
+ with gr.Row():
118
+ with gr.Column(scale=1):
119
+ gr.Markdown("### 1. Separate Stems"); audio_input = gr.Audio(type="filepath", label="Upload a Track")
120
+ stem_options = gr.Radio(["4 Stems (Vocals, Drums, Bass, Other)", "2 Stems (Vocals + Instrumental)"], label="Separation Type", value="4 Stems (Vocals, Drums, Bass, Other)")
121
+ submit_button = gr.Button("Separate Stems")
122
+ with gr.Accordion("Slicing Options", open=True):
123
+ gr.Markdown("These options are for the final pack creation.")
124
+ loop_options_radio = gr.Radio(["One-Shots (All Transients)", "4 Bar Loops", "8 Bar Loops"], label="Loop Type (for 'Slice All')", value="One-Shots (All Transients)")
125
+ sensitivity_slider = gr.Slider(minimum=0.01, maximum=0.5, value=0.05, step=0.01, label="One-Shot Sensitivity")
126
+
127
+ with gr.Column(scale=2):
128
+ with gr.Accordion("Separated Stems", open=True):
129
+ with gr.Row(): vocals_output = gr.Audio(label="Vocals", scale=4); slice_vocals_btn = gr.Button("Visualize Slices", scale=1)
130
+ with gr.Row(): drums_output = gr.Audio(label="Drums", scale=4); slice_drums_btn = gr.Button("Visualize Slices", scale=1)
131
+ with gr.Row(): bass_output = gr.Audio(label="Bass", scale=4); slice_bass_btn = gr.Button("Visualize Slices", scale=1)
132
+ with gr.Row(): other_output = gr.Audio(label="Other / Instrumental", scale=4); slice_other_btn = gr.Button("Visualize Slices", scale=1)
133
+
134
+ gr.Markdown("### Slice Editor")
135
+ slice_plot = gr.Plot(label="Click a region on the waveform to preview a slice")
136
+ with gr.Row():
137
+ preview_player = gr.Audio(label="Slice Preview", scale=3)
138
+ add_to_pack_btn = gr.Button("Add to Pack", variant="primary", scale=1)
139
+
140
+ gr.Markdown("### Your Custom Pack")
141
+ with gr.Row():
142
+ create_pack_btn = gr.Button("Create Pack from Selection", variant="primary")
143
+ clear_selection_btn = gr.Button("Clear Selection")
144
+ selected_gallery = gr.Gallery(label="Selected Slices", columns=8, object_fit="contain", height="auto")
145
+ download_zip_file = gr.File(label="Download Your Custom Pack", visible=False)
146
+
147
+ # --- Define Event Listeners ---
148
+ submit_button.click(fn=separate_stems, inputs=[audio_input, stem_options], outputs=[vocals_output, drums_output, bass_output, other_output])
149
+ stem_options.change(fn=update_output_visibility, inputs=stem_options, outputs=[vocals_output, drums_output, bass_output, other_output])
150
+
151
+ slice_vocals_btn.click(fn=visualize_slices, inputs=vocals_output, outputs=[slice_plot, onset_times_state, active_stem_state])
152
+ slice_drums_btn.click(fn=visualize_slices, inputs=drums_output, outputs=[slice_plot, onset_times_state, active_stem_state])
153
+ slice_bass_btn.click(fn=visualize_slices, inputs=bass_output, outputs=[slice_plot, onset_times_state, active_stem_state])
154
+ slice_other_btn.click(fn=visualize_slices, inputs=other_output, outputs=[slice_plot, onset_times_state, active_stem_state])
155
+
156
+ slice_plot.select(fn=preview_slice, inputs=[active_stem_state, onset_times_state], outputs=preview_player)
157
+
158
+ add_to_pack_btn.click(fn=add_slice_to_pack, inputs=[preview_player, selected_slices_state], outputs=selected_slices_state)
159
+ selected_slices_state.change(fn=lambda x: x, inputs=selected_slices_state, outputs=selected_gallery) # Update gallery when state changes
160
+ clear_selection_btn.click(fn=clear_selection, outputs=selected_slices_state)
161
+ create_pack_btn.click(fn=create_final_pack, inputs=selected_slices_state, outputs=[download_zip_file, download_zip_file])
162
+
163
+ # --- Launch the UI ---
164
+ demo.launch(debug=True) original_filename_base = os.path.basename(audio_file_path).rsplit('.', 1)[0]
165
  stable_input_path = f"stable_input_{original_filename_base}.wav"
166
  shutil.copy(audio_file_path, stable_input_path)
167