AIGCP / src /webui.py
R-Kentaren's picture
Add inference mode selector: Full / MDX Only / RVC Only
5b4a1b3
import json
import os
import shutil
import urllib.request
import zipfile
from argparse import ArgumentParser
import gradio as gr
import logging
def configure_logging_libs(debug=False):
modules = [
"numba",
"httpx",
"markdown_it",
"fairseq",
"faiss",
]
try:
for module in modules:
logging.getLogger(module).setLevel(logging.WARNING)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = "3" if not debug else "1"
except Exception as error:
pass
configure_logging_libs()
from main import song_cover_pipeline, yt_download
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
IS_ZERO_GPU = os.getenv("SPACES_ZERO_GPU")
rvc_assets_dir = os.path.join(BASE_DIR, 'assets', 'rvc_models')
rvc_models_dir = os.path.join(BASE_DIR, 'rvc_models')
output_dir = os.path.join(BASE_DIR, 'song_output')
def get_current_models(models_dir):
models_list = os.listdir(models_dir)
items_to_remove = ['.gitkeep']
return [item for item in models_list if item not in items_to_remove]
def update_models_list():
models_l = get_current_models(rvc_models_dir)
return gr.update(choices=models_l)
def extract_zip(extraction_folder, zip_name):
os.makedirs(extraction_folder)
with zipfile.ZipFile(zip_name, 'r') as zip_ref:
zip_ref.extractall(extraction_folder)
os.remove(zip_name)
index_filepath, model_filepath = None, None
for root, dirs, files in os.walk(extraction_folder):
for name in files:
if name.endswith('.index') and os.stat(os.path.join(root, name)).st_size > 1024 * 100:
index_filepath = os.path.join(root, name)
if name.endswith('.pth') and os.stat(os.path.join(root, name)).st_size > 1024 * 1024 * 40:
model_filepath = os.path.join(root, name)
if not model_filepath:
raise gr.Error(f'No .pth model file was found in the extracted zip. Please check {extraction_folder}.')
os.rename(model_filepath, os.path.join(extraction_folder, os.path.basename(model_filepath)))
if index_filepath:
os.rename(index_filepath, os.path.join(extraction_folder, os.path.basename(index_filepath)))
for filepath in os.listdir(extraction_folder):
if os.path.isdir(os.path.join(extraction_folder, filepath)):
shutil.rmtree(os.path.join(extraction_folder, filepath))
def download_online_model(url, dir_name, progress=gr.Progress()):
try:
progress(0, desc=f'[~] Downloading voice model with name {dir_name}...')
zip_name = url.split('/')[-1]
extraction_folder = os.path.join(rvc_models_dir, dir_name)
if os.path.exists(extraction_folder):
raise gr.Error(f'Voice model directory {dir_name} already exists! Choose a different name for your voice model.')
if 'pixeldrain.com' in url:
url = f'https://pixeldrain.com/api/file/{zip_name}'
if "," in url:
urls = [u.strip() for u in url.split(",") if u.strip()]
os.makedirs(extraction_folder, exist_ok=True)
for u in urls:
u = u.replace("?download=true", "")
file_name = u.split('/')[-1]
file_path = os.path.join(extraction_folder, file_name)
if not os.path.exists(file_path):
urllib.request.urlretrieve(u, file_path)
else:
urllib.request.urlretrieve(url, zip_name)
progress(0.5, desc='[~] Extracting zip...')
extract_zip(extraction_folder, zip_name)
return f'[+] {dir_name} Model successfully downloaded!'
except Exception as e:
raise gr.Error(str(e))
def upload_local_model(zip_path, dir_name, progress=gr.Progress()):
try:
extraction_folder = os.path.join(rvc_models_dir, dir_name)
if os.path.exists(extraction_folder):
raise gr.Error(f'Voice model directory {dir_name} already exists! Choose a different name for your voice model.')
# Gradio 6.x with type="filepath" returns a string path, not a file object
zip_name = zip_path if isinstance(zip_path, str) else zip_path.name
progress(0.5, desc='[~] Extracting zip...')
extract_zip(extraction_folder, zip_name)
return f'[+] {dir_name} Model successfully uploaded!'
except Exception as e:
raise gr.Error(str(e))
def pub_dl_autofill(pub_models, event: gr.SelectData):
return gr.update(value=pub_models.loc[event.index[0], 'URL']), gr.update(value=pub_models.loc[event.index[0], 'Model Name'])
def show_hop_slider(pitch_detection_algo):
if 'crepe' in pitch_detection_algo:
return gr.update(visible=True)
else:
return gr.update(visible=False)
def update_voice_model_visibility(mode):
"""Show/hide the voice model dropdown based on inference mode.
Voice model is required for 'full' and 'rvc' modes, not for 'mdx' mode."""
if mode == 'mdx':
return gr.update(visible=False)
else:
return gr.update(visible=True)
def update_input_visibility(selected_method):
if selected_method == "File Upload":
return gr.update(visible=True), gr.update(visible=False), gr.update(visible=False)
elif selected_method == "YouTube URL":
return gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)
elif selected_method == "File Path":
return gr.update(visible=False), gr.update(visible=False), gr.update(visible=True)
return gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)
def process_file_path(file_path):
if os.path.exists(file_path):
return file_path, gr.update(value=f"✓ File loaded: {file_path}")
else:
return None, gr.update(value=f"✗ File not found: {file_path}")
css = """
.title { font-size: 3em; align-items: center; text-align: center; }
.info { align-items: center; text-align: center; }
"""
if __name__ == '__main__':
parser = ArgumentParser(description='Generate a AI cover song in the song_output/id directory.', add_help=True)
parser.add_argument("--share", action="store_true", dest="share_enabled", default=False, help="Enable sharing")
parser.add_argument("--listen", action="store_true", default=False, help="Make the WebUI reachable from your local network.")
parser.add_argument('--listen-host', type=str, help='The hostname that the server will use.')
parser.add_argument('--listen-port', type=int, help='The listening port that the server will use.')
parser.add_argument('--theme', type=str, default="NoCrypt/miku", help='Set the theme (default: NoCrypt/miku)')
parser.add_argument("--ssr", action="store_true", help="Enable SSR (Server-Side Rendering)")
args = parser.parse_args()
voice_models = get_current_models(rvc_models_dir)
with gr.Blocks(css=css, title='AICoverGenWebUI', theme=args.theme, fill_width=True, fill_height=True) as app:
gr.Label('AICGP created with ❤️', show_label=False)
# Main Generate tab
with gr.Tab("Generate"):
with gr.Row(equal_height=True):
rvc_model = gr.Dropdown(voice_models, label='Voice Models', info='Models folder "AICoverGen --> rvc_models". After new models are added into this folder, click the refresh button')
ref_btn = gr.Button('Refresh Models 🔁', variant='primary')
# Inference Mode Selection
with gr.Row(equal_height=True):
inference_mode = gr.Radio(
choices=['full', 'mdx', 'rvc'],
value='full',
label='Inference Mode',
info='Full: MDX separation + RVC voice conversion | MDX Only: Separate vocals only (no RVC) | RVC Only: Convert voice only (no separation)',
interactive=True
)
# Input Method Selection
with gr.Row(equal_height=True):
input_method = gr.Radio(
choices=["File Upload", "YouTube URL", "File Path"],
label="Select Input Method",
value="File Upload",
interactive=True
)
with gr.Column():
# File Upload Section
with gr.Column(visible=True) as file_upload_col:
audio_extensions = ['.mp3', '.m4a', '.flac', '.wav', '.aac', '.ogg', '.wma', '.alac', '.aiff', '.opus', '.amr']
local_file = gr.File(label='Upload Audio File', interactive=True, type="filepath", file_types=audio_extensions)
# YouTube URL Section
with gr.Column(visible=False) as yt_url_col:
yt_file = gr.Audio(label='YT OPT', interactive=True)
with gr.Column():
yt_url = gr.Textbox(label="YouTube URL", placeholder="https://www.youtube.com/watch?v=...", lines=1)
process_yt_btn = gr.Button("Process YouTube URL", variant="secondary")
yt_status = gr.Textbox(label="Status", interactive=False, visible=False)
def process_yt_url(url):
if url:
try:
downloaded_file = yt_download(url)
return downloaded_file, gr.update(visible=True, value="✓ YouTube video processed successfully!"), gr.update(value=downloaded_file)
except Exception as e:
return None, gr.update(visible=True, value=f"✗ Error: {str(e)}"), gr.update(value=None)
return None, gr.update(visible=True, value="✗ Please enter a valid URL"), gr.update(value=None)
process_yt_btn.click(process_yt_url, inputs=[yt_url], outputs=[yt_file, yt_status, local_file])
# File Path Section
with gr.Column(visible=False) as file_path_col:
file_path_input = gr.Textbox(label="File Path", placeholder="/path/to/your/audio/file.mp3", lines=1)
process_path_btn = gr.Button("Load File Path", variant="secondary")
path_status = gr.Textbox(label="Status", interactive=False, visible=False)
process_path_btn.click(process_file_path, inputs=[file_path_input], outputs=[local_file, path_status])
# Update visibility based on selection
input_method.change(update_input_visibility, inputs=[input_method], outputs=[file_upload_col, yt_url_col, file_path_col])
with gr.Row(equal_height=True):
pitch = gr.Slider(-3, 3, value=0, step=1, label='Pitch Change (Vocals ONLY)', info='Generally, use 1 for male to female conversions and -1 for vice-versa. (Octaves)')
pitch_all = gr.Slider(-12, 12, value=0, step=1, label='Overall Pitch Change', info='Changes pitch/key of vocals and instrumentals together. Altering this slightly reduces sound quality. (Semitones)')
# Voice conversion options
with gr.Accordion('Settings', open=False):
with gr.Accordion('Voice conversion options', open=False):
with gr.Row(equal_height=True):
index_rate = gr.Slider(0, 1, value=0.5, label='Index Rate', info="Controls how much of the AI voice's accent to keep in the vocals")
filter_radius = gr.Slider(0, 7, value=3, step=1, label='Filter radius', info='If >=3: apply median filtering to the harvested pitch results. Can reduce breathiness')
volume_envelope = gr.Slider(0, 1, value=0.25, label='Volume Envelope', info="Control how much to mimic the original vocal's loudness (0) or a fixed loudness (1)")
protect = gr.Slider(0, 0.5, value=0.33, label='Protect rate', info='Protect voiceless consonants and breath sounds. Set to 0.5 to disable.')
with gr.Column():
f0_method = gr.Dropdown(
['rmvpe', 'rmvpe-legacy', 'mangio-crepe', 'mangio-crepe-tiny', 'mangio-crepe-small',
'mangio-crepe-medium', 'mangio-crepe-large', 'mangio-crepe-full',
'crepe-tiny', 'crepe-small', 'crepe-medium', 'crepe-large', 'crepe-full',
'fcpe', 'fcpe-legacy', 'djcm', 'harvest', 'yin', 'pyin', 'swipe', 'dio', 'pm'],
value='rmvpe', label='Pitch detection algorithm',
info='Best: rmvpe (clarity), mangio-crepe variants (smoother), fcpe (fast). Legacy versions available for compatibility.'
)
hop_length = gr.Slider(32, 320, value=128, step=1, visible=False, label='Hop Length', info='Lower values leads to longer conversions and higher risk of voice cracks, but better pitch accuracy.')
f0_method.change(show_hop_slider, inputs=f0_method, outputs=hop_length)
with gr.Row(equal_height=True):
extra_denoise = gr.Checkbox(True, label='Denoise', info='Apply an additional noise reduction step to clean up the audio further.')
keep_files = gr.Checkbox((False if IS_ZERO_GPU else True), label='Keep intermediate files', info='Keep all audio files generated in the song_output/id directory, e.g. Isolated Vocals/Instrumentals. Leave unchecked to save space', interactive=(False if IS_ZERO_GPU else True))
# Audio mixing options
with gr.Accordion('Audio mixing options', open=False):
gr.Markdown('### Volume Change (decibels)')
with gr.Row():
main_gain = gr.Slider(-20, 20, value=0, step=1, label='Main Vocals')
backup_gain = gr.Slider(-20, 20, value=0, step=1, label='Backup Vocals')
inst_gain = gr.Slider(-20, 20, value=0, step=1, label='Music')
gr.Markdown('### Reverb Control on AI Vocals')
with gr.Row():
reverb_rm_size = gr.Slider(0, 1, value=0.15, label='Room size', info='The larger the room, the longer the reverb time')
reverb_wet = gr.Slider(0, 1, value=0.2, label='Wetness level', info='Level of AI vocals with reverb')
reverb_dry = gr.Slider(0, 1, value=0.8, label='Dryness level', info='Level of AI vocals without reverb')
reverb_damping = gr.Slider(0, 1, value=0.7, label='Damping level', info='Absorption of high frequencies in the reverb')
gr.Markdown('### Audio Output Format')
output_format = gr.Dropdown(['mp3', 'wav'], value='mp3', label='Output file type', info='mp3: small file size, decent quality. wav: Large file size, best quality')
with gr.Row(equal_height=True):
clear_btn = gr.ClearButton(value='Clear', components=[local_file, rvc_model, keep_files, yt_url, file_path_input])
generate_btn = gr.Button("Generate", variant='primary')
ai_cover = gr.Audio(label='AI Cover')
ref_btn.click(update_models_list, None, outputs=rvc_model)
inference_mode.change(update_voice_model_visibility, inputs=inference_mode, outputs=rvc_model)
is_webui = gr.Number(value=1, visible=False)
generate_btn.click(song_cover_pipeline,
inputs=[local_file, rvc_model, pitch, keep_files, is_webui, main_gain, backup_gain,
inst_gain, index_rate, filter_radius, volume_envelope, f0_method, hop_length,
protect, pitch_all, reverb_rm_size, reverb_wet, reverb_dry, reverb_damping,
output_format, extra_denoise, inference_mode],
outputs=[ai_cover])
clear_btn.click(lambda: [0, 0, 0, 0, 0.5, 3, 0.25, 0.33, 'rmvpe', 128, 0, 0.15, 0.2, 0.8, 0.7, 'mp3', None, True, 'full'],
outputs=[pitch, main_gain, backup_gain, inst_gain, index_rate, filter_radius, volume_envelope,
protect, f0_method, hop_length, pitch_all, reverb_rm_size, reverb_wet,
reverb_dry, reverb_damping, output_format, ai_cover, extra_denoise, inference_mode])
# Download tab
with gr.Tab('Download model'):
with gr.Tab('From HuggingFace/Pixeldrain URL'):
with gr.Row():
model_zip_link = gr.Text(label='Download link to model', info='Should be a zip file containing a .pth model file and an optional .index file.')
model_name = gr.Text(label='Name your model', info='Give your new model a unique name from your other voice models.')
with gr.Row():
download_btn = gr.Button('Download 🌐', variant='primary', scale=19)
dl_output_message = gr.Text(label='Output Message', interactive=False, scale=20)
download_btn.click(download_online_model, inputs=[model_zip_link, model_name], outputs=dl_output_message)
gr.Markdown('## Input Examples')
gr.Examples(
[
['https://huggingface.co/MrDawg/ToothBrushing/resolve/main/ToothBrushing.zip?download=true', 'ToothBrushing'],
['https://huggingface.co/sail-rvc/Aldeano_Minecraft__RVC_V2_-_500_Epochs_/resolve/main/model.pth?download=true, https://huggingface.co/sail-rvc/Aldeano_Minecraft__RVC_V2_-_500_Epochs_/resolve/main/model.index?download=true', 'Minecraft_Villager'],
['https://huggingface.co/phant0m4r/LiSA/resolve/main/LiSA.zip', 'Lisa'],
['https://pixeldrain.com/u/3tJmABXA', 'Gura'],
['https://huggingface.co/Kit-Lemonfoot/kitlemonfoot_rvc_models/resolve/main/AZKi%20(Hybrid).zip', 'Azki']
],
[model_zip_link, model_name],
[],
download_online_model,
cache_examples=False,
)
# Upload tab
with gr.Tab('Upload model'):
gr.Markdown('## Upload locally trained RVC v2 model and index file')
gr.Markdown('- Find model file (weights folder) and optional index file (logs/[name] folder)')
gr.Markdown('- Compress files into zip file')
gr.Markdown('- Upload zip file and give unique name for voice')
gr.Markdown('- Click Upload model')
with gr.Row():
with gr.Column():
zip_file = gr.File(label='Zip file')
local_model_name = gr.Text(label='Model name')
with gr.Row():
model_upload_button = gr.Button('Upload model', variant='primary', scale=19)
local_upload_output_message = gr.Text(label='Output Message', interactive=False, scale=20)
model_upload_button.click(upload_local_model, inputs=[zip_file, local_model_name], outputs=local_upload_output_message)
app.launch(
share=args.share_enabled,
debug=args.share_enabled,
show_error=True,
server_name=None if not args.listen else (args.listen_host or '0.0.0.0'),
server_port=args.listen_port,
ssr_mode=args.ssr
)