File size: 7,902 Bytes
c77036d 8def2e3 c77036d 8def2e3 c77036d 8def2e3 c77036d | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 | # coding: utf-8
__author__ = 'Roman Solovyev (ZFTurbo): https://github.com/ZFTurbo/'
import time
import librosa
import sys
import os
import glob
import torch
import soundfile as sf
import numpy as np
from tqdm.auto import tqdm
import torch.nn as nn
# Using the embedded version of Python can also correctly import the utils module.
current_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(current_dir)
from utils.audio_utils import normalize_audio, denormalize_audio, draw_spectrogram
from utils.settings import get_model_from_config, parse_args_inference
from utils.model_utils import demix
from utils.model_utils import prefer_target_instrument, apply_tta, load_start_checkpoint
import warnings
warnings.filterwarnings("ignore")
def disable_rnn_flatten_parameters(model: nn.Module) -> None:
"""
Disable cuDNN RNN weight flattening to avoid inference-mode in-place updates.
This is a workaround for RuntimeError about inplace updates to inference tensors
when running RNNs during inference.
"""
for module in model.modules():
if isinstance(module, (nn.RNN, nn.GRU, nn.LSTM)):
module.flatten_parameters = lambda *args, **kwargs: None
def run_folder(model, args, config, device, verbose: bool = False):
"""
Process a folder of audio files for source separation.
Parameters:
----------
model : torch.nn.Module
Pre-trained model for source separation.
args : Namespace
Arguments containing input folder, output folder, and processing options.
config : Dict
Configuration object with audio and inference settings.
device : torch.device
Device for model inference (CPU or CUDA).
verbose : bool, optional
If True, prints detailed information during processing. Default is False.
"""
start_time = time.time()
model.eval()
mixture_paths = sorted(glob.glob(os.path.join(args.input_folder, '*.*')))
sample_rate = getattr(config.audio, 'sample_rate', 44100)
print(f"Total files found: {len(mixture_paths)}. Using sample rate: {sample_rate}")
instruments = prefer_target_instrument(config)[:]
os.makedirs(args.store_dir, exist_ok=True)
if not verbose:
mixture_paths = tqdm(mixture_paths, desc="Total progress")
if args.disable_detailed_pbar:
detailed_pbar = False
else:
detailed_pbar = True
for path in mixture_paths:
print(f"Processing track: {path}")
try:
mix, sr = librosa.load(path, sr=sample_rate, mono=False)
except Exception as e:
print(f'Cannot read track: {format(path)}')
print(f'Error message: {str(e)}')
continue
# Align channel count with model expectation to avoid bs_roformer stereo assertion.
model_stereo = getattr(getattr(config, "training", None), "stereo", None)
if model_stereo is None:
model_stereo = getattr(getattr(config, "audio", None), "num_channels", mix.shape[0]) == 2
if len(mix.shape) == 1:
mix = np.expand_dims(mix, axis=0)
if model_stereo and mix.shape[0] == 1:
print('Convert mono track to stereo...')
mix = np.concatenate([mix, mix], axis=0)
else:
if not model_stereo and mix.shape[0] == 2:
print('Convert stereo track to mono because model is mono...')
mix = np.mean(mix, axis=0, keepdims=True)
mix_orig = mix.copy()
if 'normalize' in config.inference:
if config.inference['normalize'] is True:
mix, norm_params = normalize_audio(mix)
waveforms_orig = demix(config, model, mix, device, model_type=args.model_type, pbar=detailed_pbar)
if args.use_tta:
waveforms_orig = apply_tta(config, model, mix, waveforms_orig, device, args.model_type)
if args.extract_instrumental:
instr = 'vocals' if 'vocals' in instruments else instruments[0]
waveforms_orig['instrumental'] = mix_orig - waveforms_orig[instr]
if 'instrumental' not in instruments:
instruments.append('instrumental')
file_name = os.path.splitext(os.path.basename(path))[0]
for instr in instruments:
estimates = waveforms_orig[instr]
if 'normalize' in config.inference:
if config.inference['normalize'] is True:
estimates = denormalize_audio(estimates, norm_params)
codec = 'flac' if getattr(args, 'flac_file', False) else 'wav'
subtype = args.pcm_type
dirnames, fname = format_filename(
args.filename_template,
instr=instr,
start_time=int(start_time),
file_name=file_name,
dir_name=os.path.dirname(path),
model_type=args.model_type,
model=os.path.splitext(os.path.basename(args.start_check_point))[0]
)
output_dir = os.path.join(args.store_dir, *dirnames)
os.makedirs(output_dir, exist_ok=True)
# Name output as <originalfile>_<stem> to keep stems tied to their source
stem_fname = f"{file_name}_{instr}_stem"
output_path = os.path.join(output_dir, f"{stem_fname}.{codec}")
sf.write(output_path, estimates.T, sr, subtype=subtype)
print("Wrote file:", output_path)
if args.draw_spectro > 0:
output_img_path = os.path.join(output_dir, f"{stem_fname}.jpg")
draw_spectrogram(estimates.T, sr, args.draw_spectro, output_img_path)
print("Wrote file:", output_img_path)
print(f"Elapsed time: {time.time() - start_time:.2f} seconds.")
def format_filename(template, **kwargs):
'''
Formats a filename from a template. e.g "{file_name}/{instr}"
Using slashes ('/') in template will result in directories being created
Returns [dirnames, fname], i.e. an array of dir names and a single file name
'''
result = template
for k, v in kwargs.items():
result = result.replace(f"{{{k}}}", str(v))
*dirnames, fname = result.split("/")
return dirnames, fname
def proc_folder(dict_args):
args = parse_args_inference(dict_args)
device = "cpu"
if args.force_cpu:
device = "cpu"
elif torch.cuda.is_available():
print('CUDA is available, use --force_cpu to disable it.')
device = f'cuda:{args.device_ids[0]}' if isinstance(args.device_ids, list) else f'cuda:{args.device_ids}'
elif torch.backends.mps.is_available():
device = "mps"
print("Using device: ", device)
model_load_start_time = time.time()
torch.backends.cudnn.benchmark = True
model, config = get_model_from_config(args.model_type, args.config_path)
if 'model_type' in config.training:
args.model_type = config.training.model_type
if args.start_check_point:
checkpoint = torch.load(args.start_check_point, weights_only=False, map_location='cpu')
load_start_checkpoint(args, model, checkpoint, type_='inference')
# Workaround for cuDNN RNN flattening with inference tensors
disable_rnn_flatten_parameters(model)
print("Instruments: {}".format(config.training.instruments))
# in case multiple CUDA GPUs are used and --device_ids arg is passed
if isinstance(args.device_ids, list) and len(args.device_ids) > 1 and not args.force_cpu:
model = nn.DataParallel(model, device_ids=args.device_ids)
# Ensure flattened parameters are disabled on the wrapped model too
disable_rnn_flatten_parameters(model)
model = model.to(device)
print("Model load time: {:.2f} sec".format(time.time() - model_load_start_time))
run_folder(model, args, config, device, verbose=True)
if __name__ == "__main__":
proc_folder(None)
|