learnable-speech / speech /test_train.sh
primepake
update notebook
d1b8469
raw
history blame
3.57 kB
#!/bin/bash
# Copyright 2024 Alibaba Inc. All Rights Reserved.
data_url=www.openslr.org/resources/60
data_dir=data
pretrained_model_dir=./pretrained_models/CosyVoice2-0.5B
# if [ ${stage} -le -1 ] && [ ${stop_stage} -ge -1 ]; then
# echo "Data Download"
# for part in test-clean; do
# local/download_and_untar.sh ${data_dir} ${data_url} ${part}
# done
# fi
# if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then
# echo "Data preparation, prepare wav.scp/text/utt2spk/spk2utt"
# for x in train-clean-100 train-clean-360 train-other-500 dev-clean dev-other test-clean test-other; do
# mkdir -p data/$x
# python local/prepare_data.py --src_dir $data_dir/LibriTTS/$x --des_dir data/$x
# done
# fi
# if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then
# echo "Extract campplus speaker embedding, you will get spk2embedding.pt and utt2embedding.pt in data/$x dir"
# for x in train-clean-100 train-clean-360 train-other-500 dev-clean dev-other test-clean test-other; do
# tools/extract_embedding.py --dir data/$x \
# --onnx_path $pretrained_model_dir/campplus.onnx
# done
# fi
# if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then
# echo "Extract discrete speech token, you will get utt2speech_token.pt in data/$x dir"
# for x in train-clean-100 train-clean-360 train-other-500 dev-clean dev-other test-clean test-other; do
# tools/extract_speech_token.py --dir data/$x \
# --onnx_path $pretrained_model_dir/speech_tokenizer_v2.onnx
# done
# fi
# if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then
# echo "Prepare required parquet format data, you should have prepared wav.scp/text/utt2spk/spk2utt/utt2embedding.pt/spk2embedding.pt/utt2speech_token.pt"
# for x in train-clean-100 train-clean-360 train-other-500 dev-clean dev-other test-clean test-other; do
# mkdir -p data/$x/parquet
# tools/make_parquet_list.py --num_utts_per_parquet 1000 \
# --num_processes 10 \
# --src_dir data/$x \
# --des_dir data/$x/parquet
# done
# fi
# train llm
export CUDA_VISIBLE_DEVICES="0"
num_gpus=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
job_id=1986
dist_backend="nccl"
num_workers=2
prefetch=100
train_engine=torch_ddp
model=flow
torchrun --nnodes=1 --nproc_per_node=$num_gpus --rdzv_id=$job_id --rdzv_backend="c10d" --rdzv_endpoint="localhost:1234" \
train.py \
--train_engine $train_engine \
--config config.yaml \
--train_data data/data.list \
--cv_data data/data.list \
--qwen_pretrain_path $pretrained_model_dir/CosyVoice-BlankEN \
--model $model \
--model_dir /data/checkpoint/$model/ \
--num_workers ${num_workers} \
--prefetch ${prefetch} \
--pin_memory \
--use_amp \
--checkpoint /data/checkpoint/flow/epoch_88_step_14001.pt
# # average model
# average_num=5
# if [ ${stage} -le 6 ] && [ ${stop_stage} -ge 6 ]; then
# for model in llm flow hifigan; do
# decode_checkpoint=`pwd`/exp/cosyvoice/$model/$train_engine/${model}.pt
# echo "do model average and final checkpoint is $decode_checkpoint"
# python cosyvoice/bin/average_model.py \
# --dst_model $decode_checkpoint \
# --src_path `pwd`/exp/cosyvoice/$model/$train_engine \
# --num ${average_num} \
# --val_best
# done
# fi
# if [ ${stage} -le 7 ] && [ ${stop_stage} -ge 7 ]; then
# echo "Export your model for inference speedup. Remember copy your llm or flow model to model_dir"
# python cosyvoice/bin/export_jit.py --model_dir $pretrained_model_dir
# python cosyvoice/bin/export_onnx.py --model_dir $pretrained_model_dir
# fi