Spaces:
Build error
Build error
Create app.py
Browse files
app.py
ADDED
|
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import librosa
|
| 3 |
+
import soundfile as sf
|
| 4 |
+
import os
|
| 5 |
+
import tempfile
|
| 6 |
+
import shutil
|
| 7 |
+
import torch
|
| 8 |
+
|
| 9 |
+
from demucs.pretrained import get_model as get_demucs_model
|
| 10 |
+
from demucs.apply import apply_model
|
| 11 |
+
from spleeter.separator import Separator
|
| 12 |
+
from matchering import match
|
| 13 |
+
from so_vits_svc_fork.inference.core import Svc
|
| 14 |
+
import whisper
|
| 15 |
+
import madmom
|
| 16 |
+
|
| 17 |
+
# --- 1. Audio Separation (Demucs/Spleeter) ---
|
| 18 |
+
def separate_audio(audio):
|
| 19 |
+
with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as tmp:
|
| 20 |
+
tmp.write(audio.read())
|
| 21 |
+
tmp_path = tmp.name
|
| 22 |
+
# Demucs
|
| 23 |
+
model = get_demucs_model('htdemucs')
|
| 24 |
+
wav, sr = librosa.load(tmp_path, sr=44100, mono=False)
|
| 25 |
+
sources = apply_model(model, torch.tensor(wav).unsqueeze(0), device='cpu', split=True)
|
| 26 |
+
out_dir = tempfile.mkdtemp()
|
| 27 |
+
stems = {}
|
| 28 |
+
for i, name in enumerate(model.sources):
|
| 29 |
+
out_path = os.path.join(out_dir, f"{name}.wav")
|
| 30 |
+
sf.write(out_path, sources[0, i].cpu().numpy().T, sr)
|
| 31 |
+
stems[name] = out_path
|
| 32 |
+
return stems
|
| 33 |
+
|
| 34 |
+
# --- 2. Pattern Extraction & Genre Detection ---
|
| 35 |
+
def extract_pattern(audio):
|
| 36 |
+
with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as tmp:
|
| 37 |
+
tmp.write(audio.read())
|
| 38 |
+
tmp_path = tmp.name
|
| 39 |
+
y, sr = librosa.load(tmp_path, sr=None)
|
| 40 |
+
tempo, beats = librosa.beat.beat_track(y=y, sr=sr)
|
| 41 |
+
onset_env = librosa.onset.onset_strength(y=y, sr=sr)
|
| 42 |
+
onsets = librosa.onset.onset_detect(onset_envelope=onset_env, sr=sr)
|
| 43 |
+
# Genre detection (replace with ML model if needed)
|
| 44 |
+
genre = "dj bantengan" if tempo > 120 else "pop"
|
| 45 |
+
return {
|
| 46 |
+
"tempo": float(tempo),
|
| 47 |
+
"beats": beats.tolist(),
|
| 48 |
+
"onsets": onsets.tolist(),
|
| 49 |
+
"genre": genre
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
# --- 3. Genre-Aware Pattern Generator (Magenta/MusicGen style transfer) ---
|
| 53 |
+
def generate_pattern(reference_audio, creativity=0.2):
|
| 54 |
+
# TODO: Integrate with MusicGen/Magenta for real pattern generation
|
| 55 |
+
# For now, return extracted pattern as placeholder
|
| 56 |
+
return extract_pattern(reference_audio)
|
| 57 |
+
|
| 58 |
+
# --- 4. Mixing/Mastering (Matchering) ---
|
| 59 |
+
def mix_and_master(input_audio, reference_audio):
|
| 60 |
+
with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as tmp_in, \
|
| 61 |
+
tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as tmp_ref:
|
| 62 |
+
tmp_in.write(input_audio.read())
|
| 63 |
+
tmp_ref.write(reference_audio.read())
|
| 64 |
+
in_path = tmp_in.name
|
| 65 |
+
ref_path = tmp_ref.name
|
| 66 |
+
out_path = in_path.replace(".wav", "_mastered.wav")
|
| 67 |
+
match(in_path, ref_path, out_path)
|
| 68 |
+
return out_path
|
| 69 |
+
|
| 70 |
+
# --- 5. Vocal Processing (so-vits-svc, Spleeter) ---
|
| 71 |
+
def change_vocal(audio, model_path):
|
| 72 |
+
with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as tmp:
|
| 73 |
+
tmp.write(audio.read())
|
| 74 |
+
tmp_path = tmp.name
|
| 75 |
+
svc = Svc(model_path)
|
| 76 |
+
out_wav_path = svc.infer(tmp_path)
|
| 77 |
+
return out_wav_path
|
| 78 |
+
|
| 79 |
+
# --- 6. Denoising (RNNoise, Demucs) ---
|
| 80 |
+
def denoise_audio(audio):
|
| 81 |
+
# TODO: Integrate with RNNoise or Demucs for real denoising
|
| 82 |
+
# For now, just return input
|
| 83 |
+
with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as tmp:
|
| 84 |
+
tmp.write(audio.read())
|
| 85 |
+
tmp_path = tmp.name
|
| 86 |
+
return tmp_path
|
| 87 |
+
|
| 88 |
+
# --- 7. Multi-vocal Lyric Detection (Whisper) ---
|
| 89 |
+
def detect_lyrics(audio):
|
| 90 |
+
with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as tmp:
|
| 91 |
+
tmp.write(audio.read())
|
| 92 |
+
tmp_path = tmp.name
|
| 93 |
+
model = whisper.load_model("base")
|
| 94 |
+
result = model.transcribe(tmp_path)
|
| 95 |
+
# For multi-vocal, you can use Spleeter/Demucs to split vocals, then transcribe each
|
| 96 |
+
return {"lyrics": result["text"]}
|
| 97 |
+
|
| 98 |
+
# --- Gradio UI ---
|
| 99 |
+
with gr.Blocks() as demo:
|
| 100 |
+
gr.Markdown("# DAW AI Ultra-Premium Pipeline (All-in-One, Real Pipeline)")
|
| 101 |
+
|
| 102 |
+
with gr.Tab("Separate Audio"):
|
| 103 |
+
audio_in = gr.Audio(type="file", label="Input Audio")
|
| 104 |
+
out = gr.JSON(label="Separated Stems (vocals, drums, bass, other)")
|
| 105 |
+
btn = gr.Button("Separate")
|
| 106 |
+
btn.click(separate_audio, inputs=audio_in, outputs=out)
|
| 107 |
+
|
| 108 |
+
with gr.Tab("Extract Pattern"):
|
| 109 |
+
audio_in2 = gr.Audio(type="file", label="Input Audio")
|
| 110 |
+
out2 = gr.JSON(label="Pattern Info")
|
| 111 |
+
btn2 = gr.Button("Extract")
|
| 112 |
+
btn2.click(extract_pattern, inputs=audio_in2, outputs=out2)
|
| 113 |
+
|
| 114 |
+
with gr.Tab("Generate Pattern"):
|
| 115 |
+
ref_audio = gr.Audio(type="file", label="Reference Audio")
|
| 116 |
+
creativity = gr.Slider(0, 1, value=0.2, label="Creativity")
|
| 117 |
+
out3 = gr.JSON(label="Generated Pattern")
|
| 118 |
+
btn3 = gr.Button("Generate")
|
| 119 |
+
btn3.click(generate_pattern, inputs=[ref_audio, creativity], outputs=out3)
|
| 120 |
+
|
| 121 |
+
with gr.Tab("Mix/Master"):
|
| 122 |
+
audio_in3 = gr.Audio(type="file", label="Input Audio")
|
| 123 |
+
ref_audio2 = gr.Audio(type="file", label="Reference Audio")
|
| 124 |
+
out4 = gr.Audio(label="Mastered Output")
|
| 125 |
+
btn4 = gr.Button("Master")
|
| 126 |
+
btn4.click(mix_and_master, inputs=[audio_in3, ref_audio2], outputs=out4)
|
| 127 |
+
|
| 128 |
+
with gr.Tab("Vocal Change"):
|
| 129 |
+
audio_in4 = gr.Audio(type="file", label="Input Vocal Audio")
|
| 130 |
+
model_path = gr.Textbox(label="Voice Model Path")
|
| 131 |
+
out5 = gr.Audio(label="Changed Vocal Output")
|
| 132 |
+
btn5 = gr.Button("Change Vocal")
|
| 133 |
+
btn5.click(change_vocal, inputs=[audio_in4, model_path], outputs=out5)
|
| 134 |
+
|
| 135 |
+
with gr.Tab("Denoise"):
|
| 136 |
+
audio_in5 = gr.Audio(type="file", label="Input Audio")
|
| 137 |
+
out6 = gr.Audio(label="Denoised Output")
|
| 138 |
+
btn6 = gr.Button("Denoise")
|
| 139 |
+
btn6.click(denoise_audio, inputs=audio_in5, outputs=out6)
|
| 140 |
+
|
| 141 |
+
with gr.Tab("Detect Lyrics (Multi-Vocal)"):
|
| 142 |
+
audio_in6 = gr.Audio(type="file", label="Input Audio")
|
| 143 |
+
out7 = gr.JSON(label="Detected Lyrics per Vocal")
|
| 144 |
+
btn7 = gr.Button("Detect Lyrics")
|
| 145 |
+
btn7.click(detect_lyrics, inputs=audio_in6, outputs=out7)
|
| 146 |
+
|
| 147 |
+
demo.launch()
|