# import os # import gradio as gr # from scipy.io.wavfile import write # import subprocess # import torch # from audio_separator import Separator # Ensure this is correctly implemented # def inference(audio): # os.makedirs("out", exist_ok=True) # audio_path = 'test.wav' # write(audio_path, audio[0], audio[1]) # device = 'cuda' if torch.cuda.is_available() else 'cpu' # if device=='cuda': # use_cuda=True # print(f"Using device: {device}") # else: # use_cuda=False # print(f"Using device: {device}") # try: # # Using subprocess.run for better control # command = f"python3 -m demucs.separate -n htdemucs_6s -d {device} {audio_path} -o out" # process = subprocess.run(command, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # print("Demucs script output:", process.stdout.decode()) # except subprocess.CalledProcessError as e: # print("Error in Demucs script:", e.stderr.decode()) # return None # try: # # Separating the stems using your custom separator # separator = Separator("./out/htdemucs_6s/test/vocals.wav", model_name='UVR_MDXNET_KARA_2', use_cuda=use_cuda, output_format='mp3') # primary_stem_path, secondary_stem_path = separator.separate() # except Exception as e: # print("Error in custom separation:", str(e)) # return None # # Collecting all file paths # files = [f"./out/htdemucs_6s/test/{stem}.wav" for stem in ["vocals", "bass", "drums", "other", "piano", "guitar"]] # files.extend([secondary_stem_path,primary_stem_path ]) # # Check if files exist # existing_files = [file for file in files if os.path.isfile(file)] # if not existing_files: # print("No files were created.") # return None # return existing_files # # Gradio Interface # title = "Source Separation Demo" # description = "Music Source Separation in the Waveform Domain. To use it, simply upload your audio." # gr.Interface( # inference, # gr.components.Audio(type="numpy", label="Input"), # [gr.components.Audio(type="filepath", label=stem) for stem in ["Full Vocals","Bass", "Drums", "Other", "Piano", "Guitar", "Lead Vocals", "Backing Vocals" ]], # title=title, # description=description, # ).launch() import os import gradio as gr from scipy.io.wavfile import write import subprocess import torch from audio_separator import Separator # Ensure this is correctly implemented def inference(audio, vocals, bass, drums, other, piano, guitar, lead_vocals, backing_vocals): os.makedirs("out", exist_ok=True) audio_path = 'test.wav' write(audio_path, audio[0], audio[1]) device = 'cuda' if torch.cuda.is_available() else 'cpu' if device=='cuda': use_cuda=True print(f"Using device: {device}") else: use_cuda=False print(f"Using device: {device}") try: command = f"python3 -m demucs.separate -n htdemucs_6s -d {device} {audio_path} -o out" process = subprocess.run(command, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) print("Demucs script output:", process.stdout.decode()) except subprocess.CalledProcessError as e: print("Error in Demucs script:", e.stderr.decode()) return None try: separator = Separator("./out/htdemucs_6s/test/vocals.wav", model_name='UVR_MDXNET_KARA_2', use_cuda=use_cuda, output_format='mp3') primary_stem_path, secondary_stem_path = separator.separate() except Exception as e: print("Error in custom separation:", str(e)) return None stem_files = { "vocals": "./out/htdemucs_6s/test/vocals.wav", "bass": "./out/htdemucs_6s/test/bass.wav", "drums": "./out/htdemucs_6s/test/drums.wav", "other": "./out/htdemucs_6s/test/other.wav", "piano": "./out/htdemucs_6s/test/piano.wav", "guitar": "./out/htdemucs_6s/test/guitar.wav", "lead_vocals": primary_stem_path, "backing_vocals": secondary_stem_path } # Filter out unchecked stems selected_stems = [vocals, bass, drums, other, piano, guitar, lead_vocals, backing_vocals] output_files = [stem_files[stem] if selected_stems[i] and os.path.isfile(stem_files[stem]) else None for i, stem in enumerate(stem_files)] return output_files # Checkbox for each stem checkboxes = [gr.components.Checkbox(label=stem) for stem in ["Vocals", "Bass", "Drums", "Other", "Piano", "Guitar", "Lead Vocals", "Backing Vocals"]] # Gradio Interface title = "Source Separation Demo" description = "Music Source Separation in the Waveform Domain. To use it, simply upload your audio and select the stems you want to display." gr.Interface( inference, [gr.components.Audio(type="numpy", label="Input")] + checkboxes, [gr.components.Audio(type="filepath", label=stem) for stem in ["Vocals", "Bass", "Drums", "Other", "Piano", "Guitar", "Lead Vocals", "Backing Vocals"]], title=title, description=description, live=True # Enable live updates ).launch()