baseten_vibevoice / training.sh
Diksha2001's picture
Upload folder using huggingface_hub
845fb83 verified
#!/bin/bash
set -eux
export PIP_NO_CACHE_DIR=1
export CUDA_VISIBLE_DEVICES=0
export HF_HOME=/workspace/.cache/huggingface
export HF_DATASETS_CACHE=/workspace/.cache/huggingface/datasets
export HF_HUB_DOWNLOAD_TIMEOUT=600
export HF_HUB_DOWNLOAD_RETRY=10
export TORCH_HOME=/workspace/.cache/torch
export XDG_CACHE_HOME=/workspace/.cache
export WANDB_DIR=/workspace/wandb
export PIP_CACHE_DIR=/workspace/.cache/pip
export HUGGINGFACE_HUB_TOKEN=$HF_TOKEN
rm -rf /tmp/*
echo "πŸ“¦ Editable install"
apt-get update && apt-get install -y git
echo "πŸ“¦ Installing Python deps (NO torch)"
pip install --no-cache-dir -r requirements.txt
echo "πŸ“¦ Editable install"
pip install --no-cache-dir -e .
wandb login 4549b8694e56e01e56c6e24350a275ebbe271e6e
export WANDB_PROJECT="vibevoice-finetune-baseten"
echo "πŸš€ Starting VibeVoice fine-tuning"
python src/finetune_vibevoice_lora.py \
--model_name_or_path vibevoice/VibeVoice-7B \
--processor_name_or_path src/vibevoice/processor \
--dataset_name PharynxAI/merged_multilingual_tts_6k_each \
--text_column_name text \
--audio_column_name audio \
--output_dir /workspace/output/lora_hi_only_v1 \
--per_device_train_batch_size 1 \
--gradient_accumulation_steps 1 \
--learning_rate 2.5e-5 \
--max_steps 30000 \
--logging_steps 10 \
--save_steps 1000 \
--save_total_limit 3 \
--eval_split_size 0.00 \
--remove_unused_columns False \
--do_train \
--bf16 True \
--gradient_clipping \
--gradient_checkpointing True \
--dataloader_pin_memory True \
--dataloader_num_workers 4 \
--ddpm_batch_mul 1 \
--diffusion_loss_weight 1.0 \
--ce_loss_weight 0.02 \
--voice_prompt_drop_rate 0.2 \
--lora_target_modules q_proj,k_proj,v_proj,o_proj,gate_proj,up_proj,down_proj \
--lr_scheduler_type cosine \
--warmup_ratio 0.03 \
--max_grad_norm 0.8 \
--optim adamw_torch \
--push_to_hub True \
--hub_model_id Diksha2001/vibevoice-multiligual-6k-v1 \
--hub_strategy every_save