File size: 1,495 Bytes
9942354
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
#!/bin/bash

MODEL_PATH=/data1/speech/anhnmt2/Speech2Speech/half-streaming-speech-nlp/checkpoints/omni_whisper-medium_Qwen2.5-3B_pretrained-sft-fc_speech_decoder_fixed_all/checkpoint-4000
SPEECH_ENCODER=/data1/speech/anhnmt2/Speech2Speech/LLaMA-Omni/models/speech_encoder/whisper-medium
PROMPT_VERSION=qwen
DATA_PATH=/data1/speech/anhnmt2/dataset/s2s/english/qna/moss/moss_100K_phase3_tgt_units_processed.jsonl
# DEV_PATH=/data1/speech/anhnmt2/dataset/s2s/english/qna/dev_20250103.jsonl
CACHE_DIR="../output/cached_sft_speech_decoder_20250103"

deepspeed --master_port 29501 ../omni_speech/train/export.py \
    --deepspeed zero2.json \
    --model_name_or_path $MODEL_PATH \
    --version $PROMPT_VERSION \
    --data_path $DATA_PATH \
    --cache_dir $CACHE_DIR \
    --speech_encoder $SPEECH_ENCODER  \
    --mel_size 80 \
    --speech_encoder_hidden_size 1024 \
    --speech_encoder_type whisper \
    --tune_speech_generator_only True \
    --bf16 True \
    --output_dir ../checkpoints/tmp \
    --num_train_epochs 8 \
    --per_device_train_batch_size 1 \
    --per_device_eval_batch_size 1 \
    --gradient_accumulation_steps 2 \
    --evaluation_strategy "no" \
    --save_strategy "steps" \
    --save_steps 2000 \
    --save_total_limit 1 \
    --learning_rate 1e-4 \
    --weight_decay 0. \
    --warmup_ratio 0.03 \
    --logging_steps 10 \
    --tf32 True \
    --model_max_length 2048 \
    --gradient_checkpointing True \
    --dataloader_num_workers 8 \
    --has_tgt_units True