Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -4,16 +4,14 @@ import numpy as np
|
|
| 4 |
import tempfile
|
| 5 |
import os
|
| 6 |
import noisereduce as nr
|
| 7 |
-
|
| 8 |
-
import subprocess
|
| 9 |
import torch
|
| 10 |
from demucs import pretrained
|
| 11 |
from demucs.apply import apply_model
|
| 12 |
import torchaudio
|
| 13 |
-
import torch
|
| 14 |
from pathlib import Path
|
| 15 |
|
| 16 |
-
# Helper
|
| 17 |
def audiosegment_to_array(audio):
|
| 18 |
return np.array(audio.get_array_of_samples()), audio.frame_rate
|
| 19 |
|
|
@@ -25,7 +23,7 @@ def array_to_audiosegment(samples, frame_rate, channels=1):
|
|
| 25 |
channels=channels
|
| 26 |
)
|
| 27 |
|
| 28 |
-
# Effect Functions
|
| 29 |
def apply_normalize(audio):
|
| 30 |
return audio.normalize()
|
| 31 |
|
|
@@ -71,33 +69,7 @@ def apply_bass_boost(audio, gain=10):
|
|
| 71 |
def apply_treble_boost(audio, gain=10):
|
| 72 |
return audio.high_pass_filter(4000).apply_gain(gain)
|
| 73 |
|
| 74 |
-
# Vocal Isolation
|
| 75 |
-
def apply_vocal_isolation(audio_path):
|
| 76 |
-
model = pretrained.get_model(name='htdemucs')
|
| 77 |
-
wav = load_track_local(audio_path, model.samplerate, channels=2) # stereo
|
| 78 |
-
ref = wav.mean(0)
|
| 79 |
-
wav -= ref[:, None]
|
| 80 |
-
sources = apply_model(model, wav[None])[0]
|
| 81 |
-
wav += ref[:, None]
|
| 82 |
-
|
| 83 |
-
# Get vocals (index 3)
|
| 84 |
-
vocal_track = sources[3].cpu()
|
| 85 |
-
|
| 86 |
-
out_path = os.path.join(tempfile.gettempdir(), "vocals.wav")
|
| 87 |
-
save_track(out_path, vocal_track, model.samplerate)
|
| 88 |
-
return out_path
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
# Local copy of helper functions from demucs
|
| 92 |
-
def load_track(track, sample_rate, mono=True):
|
| 93 |
-
wav, sr = torchaudio.load(str(track))
|
| 94 |
-
if sr != sample_rate:
|
| 95 |
-
wav = torchaudio.functional.resample(wav, sr, sample_rate)
|
| 96 |
-
if mono and wav.shape[0] == 2:
|
| 97 |
-
wav = wav.mean(0)
|
| 98 |
-
return wav
|
| 99 |
-
|
| 100 |
-
|
| 101 |
def load_track_local(path, sample_rate, channels=2):
|
| 102 |
sig, rate = torchaudio.load(path)
|
| 103 |
if rate != sample_rate:
|
|
@@ -106,16 +78,38 @@ def load_track_local(path, sample_rate, channels=2):
|
|
| 106 |
sig = sig.mean(0)
|
| 107 |
return sig
|
| 108 |
|
| 109 |
-
|
| 110 |
def save_track(path, wav, sample_rate):
|
| 111 |
path = Path(path)
|
| 112 |
torchaudio.save(str(path), wav, sample_rate)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 113 |
return out_path
|
| 114 |
|
| 115 |
-
#
|
| 116 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 117 |
audio = AudioSegment.from_file(audio_file)
|
| 118 |
-
original = audio
|
| 119 |
|
| 120 |
effect_map = {
|
| 121 |
"Noise Reduction": apply_noise_reduction,
|
|
@@ -129,7 +123,9 @@ def process_audio(audio_file, effects, isolate_vocals):
|
|
| 129 |
"Normalize": apply_normalize,
|
| 130 |
}
|
| 131 |
|
| 132 |
-
|
|
|
|
|
|
|
| 133 |
if effect_name in effect_map:
|
| 134 |
audio = effect_map[effect_name](audio)
|
| 135 |
|
|
@@ -142,11 +138,12 @@ def process_audio(audio_file, effects, isolate_vocals):
|
|
| 142 |
else:
|
| 143 |
final_audio = audio
|
| 144 |
|
| 145 |
-
|
| 146 |
-
|
|
|
|
| 147 |
|
| 148 |
-
# Gradio Interface
|
| 149 |
-
|
| 150 |
"Noise Reduction",
|
| 151 |
"Compress Dynamic Range",
|
| 152 |
"Add Reverb",
|
|
@@ -158,16 +155,19 @@ effect_choices = [
|
|
| 158 |
"Normalize"
|
| 159 |
]
|
| 160 |
|
|
|
|
|
|
|
| 161 |
interface = gr.Interface(
|
| 162 |
fn=process_audio,
|
| 163 |
inputs=[
|
| 164 |
gr.Audio(label="Upload Audio", type="filepath"),
|
| 165 |
-
gr.CheckboxGroup(choices=
|
| 166 |
-
gr.Checkbox(label="Isolate Vocals After Effects")
|
|
|
|
| 167 |
],
|
| 168 |
-
outputs=gr.Audio(label="Processed Audio", type="filepath"),
|
| 169 |
-
title="
|
| 170 |
-
description="Apply multiple effects
|
| 171 |
allow_flagging="never"
|
| 172 |
)
|
| 173 |
|
|
|
|
| 4 |
import tempfile
|
| 5 |
import os
|
| 6 |
import noisereduce as nr
|
| 7 |
+
import json
|
|
|
|
| 8 |
import torch
|
| 9 |
from demucs import pretrained
|
| 10 |
from demucs.apply import apply_model
|
| 11 |
import torchaudio
|
|
|
|
| 12 |
from pathlib import Path
|
| 13 |
|
| 14 |
+
# === Helper Functions ===
|
| 15 |
def audiosegment_to_array(audio):
|
| 16 |
return np.array(audio.get_array_of_samples()), audio.frame_rate
|
| 17 |
|
|
|
|
| 23 |
channels=channels
|
| 24 |
)
|
| 25 |
|
| 26 |
+
# === Effect Functions ===
|
| 27 |
def apply_normalize(audio):
|
| 28 |
return audio.normalize()
|
| 29 |
|
|
|
|
| 69 |
def apply_treble_boost(audio, gain=10):
|
| 70 |
return audio.high_pass_filter(4000).apply_gain(gain)
|
| 71 |
|
| 72 |
+
# === Vocal Isolation Helpers ===
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 73 |
def load_track_local(path, sample_rate, channels=2):
|
| 74 |
sig, rate = torchaudio.load(path)
|
| 75 |
if rate != sample_rate:
|
|
|
|
| 78 |
sig = sig.mean(0)
|
| 79 |
return sig
|
| 80 |
|
|
|
|
| 81 |
def save_track(path, wav, sample_rate):
|
| 82 |
path = Path(path)
|
| 83 |
torchaudio.save(str(path), wav, sample_rate)
|
| 84 |
+
|
| 85 |
+
def apply_vocal_isolation(audio_path):
|
| 86 |
+
model = pretrained.get_model(name='htdemucs')
|
| 87 |
+
wav = load_track_local(audio_path, model.samplerate, channels=2)
|
| 88 |
+
ref = wav.mean(0)
|
| 89 |
+
wav -= ref[:, None]
|
| 90 |
+
sources = apply_model(model, wav[None])[0]
|
| 91 |
+
wav += ref[:, None]
|
| 92 |
+
|
| 93 |
+
vocal_track = sources[3].cpu() # index 3 = vocals
|
| 94 |
+
out_path = os.path.join(tempfile.gettempdir(), "vocals.wav")
|
| 95 |
+
save_track(out_path, vocal_track, model.samplerate)
|
| 96 |
return out_path
|
| 97 |
|
| 98 |
+
# === Preset Loader ===
|
| 99 |
+
def load_presets():
|
| 100 |
+
preset_files = [f for f in os.listdir("presets") if f.endswith(".json")]
|
| 101 |
+
presets = {}
|
| 102 |
+
for f in preset_files:
|
| 103 |
+
with open(os.path.join("presets", f)) as infile:
|
| 104 |
+
data = json.load(infile)
|
| 105 |
+
presets[data["name"]] = data["effects"]
|
| 106 |
+
return presets
|
| 107 |
+
|
| 108 |
+
preset_choices = load_presets()
|
| 109 |
+
|
| 110 |
+
# === Main Processing Function ===
|
| 111 |
+
def process_audio(audio_file, selected_effects, isolate_vocals, preset_name):
|
| 112 |
audio = AudioSegment.from_file(audio_file)
|
|
|
|
| 113 |
|
| 114 |
effect_map = {
|
| 115 |
"Noise Reduction": apply_noise_reduction,
|
|
|
|
| 123 |
"Normalize": apply_normalize,
|
| 124 |
}
|
| 125 |
|
| 126 |
+
# Apply selected preset or custom effects
|
| 127 |
+
effects_to_apply = preset_choices.get(preset_name, selected_effects)
|
| 128 |
+
for effect_name in effects_to_apply:
|
| 129 |
if effect_name in effect_map:
|
| 130 |
audio = effect_map[effect_name](audio)
|
| 131 |
|
|
|
|
| 138 |
else:
|
| 139 |
final_audio = audio
|
| 140 |
|
| 141 |
+
output_path = f.name
|
| 142 |
+
final_audio.export(output_path, format="mp3")
|
| 143 |
+
return output_path
|
| 144 |
|
| 145 |
+
# === Gradio Interface ===
|
| 146 |
+
effect_options = [
|
| 147 |
"Noise Reduction",
|
| 148 |
"Compress Dynamic Range",
|
| 149 |
"Add Reverb",
|
|
|
|
| 155 |
"Normalize"
|
| 156 |
]
|
| 157 |
|
| 158 |
+
preset_names = list(preset_choices.keys())
|
| 159 |
+
|
| 160 |
interface = gr.Interface(
|
| 161 |
fn=process_audio,
|
| 162 |
inputs=[
|
| 163 |
gr.Audio(label="Upload Audio", type="filepath"),
|
| 164 |
+
gr.CheckboxGroup(choices=effect_options, label="Apply Effects in Order"),
|
| 165 |
+
gr.Checkbox(label="Isolate Vocals After Effects"),
|
| 166 |
+
gr.Dropdown(choices=preset_names, label="Select Preset", value=preset_names[0] if preset_names else None)
|
| 167 |
],
|
| 168 |
+
outputs=gr.Audio(label="Processed Audio (MP3)", type="filepath"),
|
| 169 |
+
title="AI Audio Studio - Pro Edition",
|
| 170 |
+
description="Apply multiple effects, isolate vocals, and export polished tracks -- all powered by AI!",
|
| 171 |
allow_flagging="never"
|
| 172 |
)
|
| 173 |
|