import torch import argparse import gc import hashlib import json import os import shlex import subprocess from contextlib import suppress from urllib.parse import urlparse, parse_qs import time import shutil import gradio as gr import librosa import numpy as np import soundfile as sf import sox import yt_dlp from pedalboard import Pedalboard, Reverb, Compressor, HighpassFilter from pedalboard.io import AudioFile from pydub import AudioSegment import noisereduce as nr from mdx import run_mdx from rvc_voice import rvc_infer import logging logging.getLogger("httpx").setLevel(logging.WARNING) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) IS_ZERO_GPU = os.getenv("SPACES_ZERO_GPU") mdxnet_models_dir = os.path.join(BASE_DIR, 'assets', 'mdxnet_models') rvc_models_dir = os.path.join(BASE_DIR, 'rvc_models') output_dir = os.path.join(BASE_DIR, 'song_output') def clean_old_folders(base_path: str, max_age_seconds: int = 10800): if not os.path.isdir(base_path): print(f"Error: {base_path} is not a valid directory.") return now = time.time() for folder_name in os.listdir(base_path): folder_path = os.path.join(base_path, folder_name) if os.path.isdir(folder_path): last_modified = os.path.getmtime(folder_path) if now - last_modified > max_age_seconds: # print(f"Deleting folder: {folder_path}") shutil.rmtree(folder_path) def get_youtube_video_id(url, ignore_playlist=True): """ Examples: http://youtu.be/SA2iWivDJiE http://www.youtube.com/watch?v=_oPAwA_Udwc&feature=feedu http://www.youtube.com/embed/SA2iWivDJiE http://www.youtube.com/v/SA2iWivDJiE?version=3&hl=en_US """ if "m.youtube.com" in url: url = url.replace("m.youtube.com", "www.youtube.com") query = urlparse(url) if query.hostname == 'youtu.be': if query.path[1:] == 'watch': return query.query[2:] return query.path[1:] if query.hostname in {'www.youtube.com', 'youtube.com', 'music.youtube.com'}: if not ignore_playlist: # use case: get playlist id not current video in playlist with suppress(KeyError): return parse_qs(query.query)['list'][0] if query.path == '/watch': return parse_qs(query.query)['v'][0] if query.path[:7] == '/watch/': return query.path.split('/')[2] if query.path[:7] == '/embed/': return query.path.split('/')[2] if query.path[:3] == '/v/': return query.path.split('/')[2] # returns None for invalid YouTube url return None def yt_download(link): if not link.strip(): gr.Info("You need to provide a download link.") return None ydl_opts = { 'format': 'bestaudio', 'outtmpl': '%(title)s', 'nocheckcertificate': True, 'ignoreerrors': True, 'no_warnings': True, 'quiet': True, 'extractaudio': True, 'postprocessors': [{'key': 'FFmpegExtractAudio', 'preferredcodec': 'wav'}], 'postprocessor_args': [ '-acodec', 'pcm_f32le' ], } with yt_dlp.YoutubeDL(ydl_opts) as ydl: result = ydl.extract_info(link, download=True) download_path = ydl.prepare_filename(result, outtmpl='%(title)s.wav') return download_path def raise_exception(error_msg, is_webui): if is_webui: raise gr.Error(error_msg) else: raise Exception(error_msg) def get_rvc_model(voice_model, is_webui): rvc_model_filename, rvc_index_filename = None, None model_dir = os.path.join(rvc_models_dir, voice_model) # print(model_dir) for file in os.listdir(model_dir): file_path = os.path.join(model_dir, file) # print(file) if os.path.isdir(file_path): for ff in os.listdir(file_path): # print("subfile", ff) ext = os.path.splitext(ff)[1] if ext == '.pth': rvc_model_filename = os.path.join(file, ff) if ext == '.index': rvc_index_filename = os.path.join(file, ff) ext = os.path.splitext(file)[1] if ext == '.pth': rvc_model_filename = file if ext == '.index': rvc_index_filename = file if rvc_model_filename is None: error_msg = f'No model file exists in {model_dir}.' raise_exception(error_msg, is_webui) return os.path.join(model_dir, rvc_model_filename), os.path.join(model_dir, rvc_index_filename) if rvc_index_filename else '' def get_audio_paths(song_dir): orig_song_path = None instrumentals_path = None main_vocals_dereverb_path = None backup_vocals_path = None for file in os.listdir(song_dir): if file.endswith('_Instrumental.wav'): instrumentals_path = os.path.join(song_dir, file) orig_song_path = instrumentals_path.replace('_Instrumental', '') elif file.endswith('_Vocals_Main_DeReverb.wav'): main_vocals_dereverb_path = os.path.join(song_dir, file) elif file.endswith('_Vocals_Backup.wav'): backup_vocals_path = os.path.join(song_dir, file) # print(orig_song_path, instrumentals_path, main_vocals_dereverb_path, backup_vocals_path) return orig_song_path, instrumentals_path, main_vocals_dereverb_path, backup_vocals_path def get_audio_with_suffix(song_dir, suffix="_mysuffix.wav"): target_path = None for file in os.listdir(song_dir): if file.endswith(suffix): target_path = os.path.join(song_dir, file) break return target_path def convert_to_stereo(audio_path): wave, sr = librosa.load(audio_path, mono=False, sr=44100) # check if mono (use ndim for robust detection) if wave.ndim == 1: stereo_path = f'{os.path.splitext(audio_path)[0]}_stereo.wav' command = shlex.split(f'ffmpeg -y -loglevel error -i "{audio_path}" -ac 2 -f wav "{stereo_path}"') subprocess.run(command) return stereo_path else: return audio_path def pitch_shift(audio_path, pitch_change): output_path = f'{os.path.splitext(audio_path)[0]}_p{pitch_change}.wav' if not os.path.exists(output_path): try: # Try using sox first y, sr = sf.read(audio_path) tfm = sox.Transformer() tfm.pitch(pitch_change) y_shifted = tfm.build_array(input_array=y, sample_rate_in=sr) sf.write(output_path, y_shifted, sr) except Exception: # Fallback: use librosa for pitch shifting when SoX binary is not available y, sr = librosa.load(audio_path, sr=44100, mono=False) y_shifted = librosa.effects.pitch_shift(y, sr=sr, n_steps=pitch_change) sf.write(output_path, y_shifted.T if y_shifted.ndim == 2 else y_shifted, sr) return output_path def get_hash(filepath): with open(filepath, 'rb') as f: file_hash = hashlib.blake2b() while chunk := f.read(8192): file_hash.update(chunk) return file_hash.hexdigest()[:11] def display_progress(message, percent, is_webui, progress=None): if is_webui: progress(percent, desc=message) else: print(message) def preprocess_song(song_input, mdx_model_params, song_id, is_webui, input_type, progress, keep_orig, orig_song_path): song_output_dir = os.path.join(output_dir, song_id) display_progress('[~] Separating Vocals from Instrumental...', 0.1, is_webui, progress) vocals_path, instrumentals_path = run_mdx(mdx_model_params, song_output_dir, os.path.join(mdxnet_models_dir, 'UVR-MDX-NET-Voc_FT.onnx'), orig_song_path, denoise=True, keep_orig=keep_orig) display_progress('[~] Separating Main Vocals from Backup Vocals...', 0.2, is_webui, progress) backup_vocals_path, main_vocals_path = run_mdx(mdx_model_params, song_output_dir, os.path.join(mdxnet_models_dir, 'UVR_MDXNET_KARA_2.onnx'), vocals_path, suffix='Backup', invert_suffix='Main', denoise=True) display_progress('[~] Applying DeReverb to Vocals...', 0.3, is_webui, progress) _, main_vocals_dereverb_path = run_mdx(mdx_model_params, song_output_dir, os.path.join(mdxnet_models_dir, 'Reverb_HQ_By_FoxJoy.onnx'), main_vocals_path, invert_suffix='DeReverb', exclude_main=True, denoise=True) return orig_song_path, vocals_path, instrumentals_path, main_vocals_path, backup_vocals_path, main_vocals_dereverb_path def get_audio_file(song_input, is_webui, input_type, progress): keep_orig = False if input_type == 'yt': display_progress('[~] Downloading song...', 0, is_webui, progress) song_link = song_input.split('&')[0] orig_song_path = yt_download(song_link) elif input_type == 'local': orig_song_path = song_input keep_orig = True else: orig_song_path = None return keep_orig, orig_song_path device = "cuda:0" if torch.cuda.is_available() else "cpu" compute_half = True if torch.cuda.is_available() else False print(device, "half>>", compute_half) # @spaces.GPU(enable_queue=True) def voice_change(voice_model, vocals_path, output_path, pitch_change, f0_method, index_rate, filter_radius, volume_envelope, protect, hop_length, is_webui): rvc_model_path, rvc_index_path = get_rvc_model(voice_model, is_webui) rvc_infer( rvc_model_path, rvc_index_path, index_rate, vocals_path, output_path, pitch_change, f0_method, filter_radius, volume_envelope, protect, hop_length, ) def add_audio_effects(audio_path, reverb_rm_size, reverb_wet, reverb_dry, reverb_damping): output_path = f'{os.path.splitext(audio_path)[0]}_mixed.wav' # Initialize audio effects plugins board = Pedalboard( [ HighpassFilter(), Compressor(ratio=4, threshold_db=-15), Reverb(room_size=reverb_rm_size, dry_level=reverb_dry, wet_level=reverb_wet, damping=reverb_damping) ] ) with AudioFile(audio_path) as f: with AudioFile(output_path, 'w', f.samplerate, f.num_channels) as o: # Read one second of audio at a time, until the file is empty: while f.tell() < f.frames: chunk = f.read(int(f.samplerate)) effected = board(chunk, f.samplerate, reset=False) o.write(effected) return output_path def combine_audio(audio_paths, output_path, main_gain, backup_gain, inst_gain, output_format): main_vocal_audio = AudioSegment.from_wav(audio_paths[0]) - 4 + main_gain backup_vocal_audio = AudioSegment.from_wav(audio_paths[1]) - 6 + backup_gain instrumental_audio = AudioSegment.from_wav(audio_paths[2]) - 7 + inst_gain main_vocal_audio.overlay(backup_vocal_audio).overlay(instrumental_audio).export(output_path, format=output_format) def process_song( song_dir, song_input, mdx_model_params, song_id, is_webui, input_type, progress, keep_files, pitch_change, pitch_change_all, voice_model, index_rate, filter_radius, volume_envelope, protect, f0_method, hop_length, output_format, keep_orig, orig_song_path, inference_mode='full' ): """ Process a song through the AI cover pipeline. inference_mode controls which stages are executed: 'full' - MDX separation + RVC voice conversion (default) 'mdx' - MDX separation only, skip RVC (outputs separated vocals) 'rvc' - RVC voice conversion only, use input audio directly as vocals """ instrumentals_path = None backup_vocals_path = None vocals_path = None main_vocals_path = None main_vocals_dereverb_path = None # --- MDX Separation Stage --- # Run MDX for 'full' and 'mdx' modes; skip for 'rvc' mode if inference_mode in ('full', 'mdx'): if not os.path.exists(song_dir): os.makedirs(song_dir) orig_song_path, vocals_path, instrumentals_path, main_vocals_path, backup_vocals_path, main_vocals_dereverb_path = preprocess_song(song_input, mdx_model_params, song_id, is_webui, input_type, progress, keep_orig, orig_song_path) else: vocals_path, main_vocals_path = None, None paths = get_audio_paths(song_dir) # if any of the audio files aren't available or keep intermediate files, rerun preprocess if any(path is None for path in paths): orig_song_path, vocals_path, instrumentals_path, main_vocals_path, backup_vocals_path, main_vocals_dereverb_path = preprocess_song(song_input, mdx_model_params, song_id, is_webui, input_type, progress, keep_orig, orig_song_path) else: orig_song_path, instrumentals_path, main_vocals_dereverb_path, backup_vocals_path = paths else: # RVC-only mode: use the original audio as vocals directly # No MDX separation — the input audio IS the vocals main_vocals_dereverb_path = orig_song_path # We still need placeholder paths for the downstream pipeline vocals_path = orig_song_path main_vocals_path = orig_song_path # Create a simple instrumental by treating the original as instrumental too # (will be ignored in RVC-only output mixing) instrumentals_path = orig_song_path backup_vocals_path = orig_song_path pitch_change = pitch_change * 12 + pitch_change_all ai_vocals_path = os.path.join(song_dir, f'{os.path.splitext(os.path.basename(orig_song_path))[0]}_{voice_model}_p{pitch_change}_i{index_rate}_fr{filter_radius}_ve{volume_envelope}_pro{protect}_{f0_method}{"" if "crepe" not in f0_method else f"_{hop_length}"}.wav') ai_cover_path = os.path.join(song_dir, f'{os.path.splitext(os.path.basename(orig_song_path))[0]} ({voice_model} Ver).{output_format}') # --- RVC Voice Conversion Stage --- # Run RVC for 'full' and 'rvc' modes; skip for 'mdx' mode if inference_mode in ('full', 'rvc'): if not os.path.exists(ai_vocals_path): display_progress('[~] Converting voice using RVC...', 0.5, is_webui, progress) voice_change(voice_model, main_vocals_dereverb_path, ai_vocals_path, pitch_change, f0_method, index_rate, filter_radius, volume_envelope, protect, hop_length, is_webui) elif inference_mode == 'mdx': # MDX-only mode: the "AI vocals" are just the dereverbed main vocals (no RVC) ai_vocals_path = main_vocals_dereverb_path display_progress('[~] MDX separation complete (RVC skipped).', 0.6, is_webui, progress) return ai_vocals_path, ai_cover_path, instrumentals_path, backup_vocals_path, vocals_path, main_vocals_path def apply_noisereduce(audio_list, type_output="wav"): # https://github.com/sa-if/Audio-Denoiser print("Noice reduce") result = [] for audio_path in audio_list: out_path = f"{os.path.splitext(audio_path)[0]}_nr.{type_output}" try: # Load audio file audio = AudioSegment.from_file(audio_path) # Convert audio to numpy array (normalize to float [-1, 1]) samples = np.array(audio.get_array_of_samples()).astype(np.float32) samples = samples / (2 ** (8 * audio.sample_width - 1)) # Reduce noise (noisereduce expects float samples) reduced_noise = nr.reduce_noise(y=samples, sr=audio.frame_rate, prop_decrease=0.6) # Convert reduced noise signal back to integer samples for AudioSegment reduced_noise_int = (reduced_noise * (2 ** (8 * audio.sample_width - 1))).astype(np.int16 if audio.sample_width == 2 else np.int32) reduced_audio = AudioSegment( reduced_noise_int.tobytes(), frame_rate=audio.frame_rate, sample_width=audio.sample_width, channels=audio.channels ) # Save reduced audio to file reduced_audio.export(out_path, format=type_output) result.append(out_path) except Exception as e: print(f"Error noisereduce: {str(e)}") result.append(audio_path) return result # @spaces.GPU(duration=140) def song_cover_pipeline(song_input, voice_model, pitch_change, keep_files, is_webui=0, main_gain=0, backup_gain=0, inst_gain=0, index_rate=0.5, filter_radius=3, volume_envelope=0.25, f0_method='rmvpe', hop_length=128, protect=0.33, pitch_change_all=0, reverb_rm_size=0.15, reverb_wet=0.2, reverb_dry=0.8, reverb_damping=0.7, output_format='mp3', extra_denoise=False, inference_mode='full', progress=gr.Progress()): if not keep_files or IS_ZERO_GPU: clean_old_folders("./song_output", 14400) if IS_ZERO_GPU: clean_old_folders("./rvc_models", 10800) try: if not song_input: raise_exception('Ensure that the song input field is filled.', is_webui) if inference_mode in ('full', 'rvc') and not voice_model: raise_exception('A voice model is required for Full Inference and RVC Only modes.', is_webui) display_progress('[~] Starting AI Cover Generation Pipeline...', 0, is_webui, progress) with open(os.path.join(mdxnet_models_dir, 'model_data.json')) as infile: mdx_model_params = json.load(infile) # if youtube url if urlparse(song_input).scheme == 'https': input_type = 'yt' song_id = get_youtube_video_id(song_input) if song_id is None: error_msg = 'Invalid YouTube url.' raise_exception(error_msg, is_webui) # local audio file else: input_type = 'local' song_input = song_input.strip('\"') if os.path.exists(song_input): song_id = get_hash(song_input) else: error_msg = f'{song_input} does not exist.' song_id = None raise_exception(error_msg, is_webui) song_dir = os.path.join(output_dir, song_id) keep_orig, orig_song_path = get_audio_file(song_input, is_webui, input_type, progress) orig_song_path = convert_to_stereo(orig_song_path) start = time.time() ( ai_vocals_path, ai_cover_path, instrumentals_path, backup_vocals_path, vocals_path, main_vocals_path ) = process_song( song_dir, song_input, mdx_model_params, song_id, is_webui, input_type, progress, keep_files, pitch_change, pitch_change_all, voice_model, index_rate, filter_radius, volume_envelope, protect, f0_method, hop_length, output_format, keep_orig, orig_song_path, inference_mode, ) end = time.time() print(f"Execution time: {end - start:.4f} seconds") try: with sf.SoundFile(ai_vocals_path) as f: duration__ = len(f) / f.samplerate print(f"Audio duration: {duration__:.2f} seconds") except Exception as sf_err: print(f"Warning: Could not read audio duration from {ai_vocals_path}: {sf_err}") print("The RVC inference may have produced an invalid output file. Continuing with effects processing...") # --- Post-processing: Effects, mixing, and output --- # The post-processing differs based on inference_mode if inference_mode == 'mdx': # MDX-only: output the separated vocals without effects or mixing # Just export the dereverbed main vocals as the final output display_progress('[~] Exporting separated vocals (MDX Only mode)...', 0.8, is_webui, progress) ai_cover_path = os.path.join(song_dir, f'{os.path.splitext(os.path.basename(orig_song_path))[0]} (MDX Separated).{output_format}') # Export the separated vocals as the cover main_vocal_audio = AudioSegment.from_wav(ai_vocals_path) main_vocal_audio.export(ai_cover_path, format=output_format) if not keep_files: display_progress('[~] Removing intermediate audio files...', 0.95, is_webui, progress) intermediate_files = [vocals_path, main_vocals_path] for file in intermediate_files: if file and os.path.exists(file): os.remove(file) return ai_cover_path # For 'full' and 'rvc' modes: apply effects, mix, and output display_progress('[~] Applying audio effects to Vocals...', 0.8, is_webui, progress) nr_path = ai_vocals_path # get_audio_with_suffix(song_dir, "_nr.wav") if extra_denoise: ai_vocals_path = apply_noisereduce([ai_vocals_path])[0] ai_vocals_mixed_path = add_audio_effects(ai_vocals_path, reverb_rm_size, reverb_wet, reverb_dry, reverb_damping) if inference_mode == 'rvc': # RVC-only: output just the converted vocals with effects, no mixing display_progress('[~] Exporting RVC vocals (RVC Only mode)...', 0.9, is_webui, progress) ai_cover_path = os.path.join(song_dir, f'{os.path.splitext(os.path.basename(orig_song_path))[0]} (RVC Only).{output_format}') main_vocal_audio = AudioSegment.from_wav(ai_vocals_mixed_path) main_vocal_audio.export(ai_cover_path, format=output_format) if not keep_files: display_progress('[~] Removing intermediate audio files...', 0.95, is_webui, progress) intermediate_files = [ai_vocals_mixed_path, nr_path] for file in intermediate_files: if file and os.path.exists(file): os.remove(file) return ai_cover_path # Full mode: mix AI vocals with instrumentals and backup vocals ins_path = get_audio_with_suffix(song_dir, "_Voiceless.wav") if not ins_path: instrumentals_path, _ = run_mdx( mdx_model_params, os.path.join(output_dir, song_id), os.path.join(mdxnet_models_dir, "UVR-MDX-NET-Inst_HQ_4.onnx"), instrumentals_path, # exclude_main=False, exclude_inversion=True, suffix="Voiceless", denoise=False, keep_orig=True, base_device=("" if IS_ZERO_GPU else "cuda") ) # Update ins_path after creating the Voiceless file ins_path = get_audio_with_suffix(song_dir, "_Voiceless.wav") if pitch_change_all != 0: display_progress('[~] Applying overall pitch change', 0.85, is_webui, progress) instrumentals_path = pitch_shift(instrumentals_path, pitch_change_all) backup_vocals_path = pitch_shift(backup_vocals_path, pitch_change_all) display_progress('[~] Combining AI Vocals and Instrumentals...', 0.9, is_webui, progress) combine_audio([ai_vocals_mixed_path, backup_vocals_path, instrumentals_path], ai_cover_path, main_gain, backup_gain, inst_gain, output_format) if not keep_files: display_progress('[~] Removing intermediate audio files...', 0.95, is_webui, progress) intermediate_files = [vocals_path, main_vocals_path, ai_vocals_mixed_path, ins_path, nr_path] if pitch_change_all != 0: intermediate_files += [instrumentals_path, backup_vocals_path] for file in intermediate_files: if file and os.path.exists(file): os.remove(file) return ai_cover_path except Exception as e: raise_exception(str(e), is_webui) if __name__ == '__main__': parser = argparse.ArgumentParser(description='Generate a AI cover song in the song_output/id directory.', add_help=True) parser.add_argument('-i', '--song-input', type=str, required=True, help='Link to a YouTube video or the filepath to a local mp3/wav file to create an AI cover of') parser.add_argument('-dir', '--rvc-dirname', type=str, default='', help='Name of the folder in the rvc_models directory containing the RVC model file and optional index file to use. Required for "full" and "rvc" modes.') parser.add_argument('-p', '--pitch-change', type=int, required=True, help='Change the pitch of AI Vocals only. Generally, use 1 for male to female and -1 for vice-versa. (Octaves)') parser.add_argument('-k', '--keep-files', action=argparse.BooleanOptionalAction, help='Whether to keep all intermediate audio files generated in the song_output/id directory, e.g. Isolated Vocals/Instrumentals') parser.add_argument('-ir', '--index-rate', type=float, default=0.5, help='A decimal number e.g. 0.5, used to reduce/resolve the timbre leakage problem. If set to 1, more biased towards the timbre quality of the training dataset') parser.add_argument('-fr', '--filter-radius', type=int, default=3, help='A number between 0 and 7. If >=3: apply median filtering to the harvested pitch results. The value represents the filter radius and can reduce breathiness.') parser.add_argument('-ve', '--volume-envelope', type=float, default=0.25, help="A decimal number e.g. 0.25. Control how much to use the original vocal's loudness (0) or a fixed loudness (1).") parser.add_argument('-palgo', '--pitch-detection-algo', type=str, default='rmvpe', help='Best option is rmvpe (clarity in vocals), then mangio-crepe (smoother vocals).') parser.add_argument('-hop', '--hop-length', type=int, default=128, help='Hop length for pitch detection algorithms (crepe, etc.). The higher the value, the faster the conversion and less risk of voice cracks, but there is less pitch accuracy. Recommended: 128.') parser.add_argument('-pro', '--protect', type=float, default=0.33, help='A decimal number e.g. 0.33. Protect voiceless consonants and breath sounds to prevent artifacts such as tearing in electronic music. Set to 0.5 to disable. Decrease the value to increase protection, but it may reduce indexing accuracy.') parser.add_argument('-mv', '--main-vol', type=int, default=0, help='Volume change for AI main vocals in decibels. Use -3 to decrease by 3 decibels and 3 to increase by 3 decibels') parser.add_argument('-bv', '--backup-vol', type=int, default=0, help='Volume change for backup vocals in decibels') parser.add_argument('-iv', '--inst-vol', type=int, default=0, help='Volume change for instrumentals in decibels') parser.add_argument('-pall', '--pitch-change-all', type=int, default=0, help='Change the pitch/key of vocals and instrumentals. Changing this slightly reduces sound quality') parser.add_argument('-rsize', '--reverb-size', type=float, default=0.15, help='Reverb room size between 0 and 1') parser.add_argument('-rwet', '--reverb-wetness', type=float, default=0.2, help='Reverb wet level between 0 and 1') parser.add_argument('-rdry', '--reverb-dryness', type=float, default=0.8, help='Reverb dry level between 0 and 1') parser.add_argument('-rdamp', '--reverb-damping', type=float, default=0.7, help='Reverb damping between 0 and 1') parser.add_argument('-oformat', '--output-format', type=str, default='mp3', help='Output format of audio file. mp3 for smaller file size, wav for best quality') parser.add_argument('-m', '--mode', type=str, default='full', choices=['full', 'mdx', 'rvc'], help='Inference mode: "full" = MDX separation + RVC voice conversion (default), ' '"mdx" = MDX separation only (skip RVC), ' '"rvc" = RVC voice conversion only (skip MDX, use input as vocals)') args = parser.parse_args() rvc_dirname = args.rvc_dirname if args.mode in ('full', 'rvc'): if not rvc_dirname: raise Exception('A voice model directory (-dir) is required for "full" and "rvc" modes.') if not os.path.exists(os.path.join(rvc_models_dir, rvc_dirname)): raise Exception(f'The folder {os.path.join(rvc_models_dir, rvc_dirname)} does not exist.') cover_path = song_cover_pipeline(args.song_input, rvc_dirname, args.pitch_change, args.keep_files, main_gain=args.main_vol, backup_gain=args.backup_vol, inst_gain=args.inst_vol, index_rate=args.index_rate, filter_radius=args.filter_radius, volume_envelope=args.volume_envelope, f0_method=args.pitch_detection_algo, hop_length=args.hop_length, protect=args.protect, pitch_change_all=args.pitch_change_all, reverb_rm_size=args.reverb_size, reverb_wet=args.reverb_wetness, reverb_dry=args.reverb_dryness, reverb_damping=args.reverb_damping, output_format=args.output_format, inference_mode=args.mode) print(f'[+] Cover generated at {cover_path}')