| #!/usr/bin/env bash |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| set -euo pipefail |
|
|
| |
| RUN_NAME="${RUN_NAME:-korean_1b_fp8_run1}" |
| CONFIG="${CONFIG:-configs/korean_1b_fp8.yaml}" |
| TRAIN_DATA="${TRAIN_DATA:-data/korean_train.bin}" |
| VAL_DATA="${VAL_DATA:-data/korean_val.bin}" |
| CKPT_DIR="checkpoints/${RUN_NAME}" |
| LOG_FILE="${CKPT_DIR}/train.log" |
| NPROC=8 |
| MASTER_PORT="${MASTER_PORT:-29501}" |
|
|
| |
| MAX_STEPS=34000 |
| BATCH_SIZE=8 |
| GRAD_ACCUM=4 |
| WARMUP_STEPS=2000 |
| SEED=42 |
|
|
| |
| EXTRA_ARGS="$@" |
|
|
| |
| |
| export NCCL_IB_DISABLE=1 |
| |
| export NCCL_ALGO=Ring |
| |
| export NCCL_PROTO=Simple |
| |
| export NCCL_MIN_NCHANNELS=16 |
| export NCCL_MAX_NCHANNELS=16 |
| |
| export NCCL_BUFFSIZE=67108864 |
| |
| export OMP_NUM_THREADS=4 |
| export MKL_NUM_THREADS=4 |
|
|
| |
| cd "$(dirname "$0")/.." |
|
|
| |
| if [[ ! -f "${TRAIN_DATA}" ]]; then |
| echo "==================================================================" |
| echo " ERROR: Training data not found: ${TRAIN_DATA}" |
| echo "" |
| echo " You need to run the Korean data pipeline first." |
| echo " Example steps:" |
| echo " 1. Download / prepare raw Korean corpus" |
| echo " 2. Tokenise and pack into binary format:" |
| echo " python data/prepare_korean.py --output data/korean_train.bin" |
| echo " 3. Re-run this script once the file exists." |
| echo "==================================================================" |
| exit 1 |
| fi |
|
|
| if [[ ! -f "${VAL_DATA}" ]]; then |
| echo "==================================================================" |
| echo " ERROR: Validation data not found: ${VAL_DATA}" |
| echo "" |
| echo " You need to run the Korean data pipeline first." |
| echo " Example steps:" |
| echo " 1. Download / prepare raw Korean corpus" |
| echo " 2. Tokenise and pack into binary format (val split):" |
| echo " python data/prepare_korean.py --output_val data/korean_val.bin" |
| echo " 3. Re-run this script once the file exists." |
| echo "==================================================================" |
| exit 1 |
| fi |
|
|
| mkdir -p "${CKPT_DIR}" |
|
|
| echo "==================================================================" |
| echo " Run name : ${RUN_NAME}" |
| echo " Config : ${CONFIG}" |
| echo " Train data : ${TRAIN_DATA}" |
| echo " Val data : ${VAL_DATA}" |
| echo " CKPT dir : ${CKPT_DIR}" |
| echo " Log file : ${LOG_FILE}" |
| echo " Max steps : ${MAX_STEPS}" |
| echo " Batch size : ${BATCH_SIZE} (local) × ${NPROC} GPU × ${GRAD_ACCUM} grad_accum" |
| echo " Warmup : ${WARMUP_STEPS} steps" |
| echo " Master port : ${MASTER_PORT}" |
| echo " Started : $(date)" |
| echo "==================================================================" |
|
|
| |
| export PYTHONWARNINGS="ignore::UserWarning:torch.library" |
|
|
| torchrun \ |
| --nproc_per_node=${NPROC} \ |
| --master_port=${MASTER_PORT} \ |
| train/pretrain.py \ |
| --config "${CONFIG}" \ |
| --train_data "${TRAIN_DATA}" \ |
| --val_data "${VAL_DATA}" \ |
| --checkpoint_dir "${CKPT_DIR}" \ |
| --log_file "${LOG_FILE}" \ |
| --max_steps ${MAX_STEPS} \ |
| --batch_size ${BATCH_SIZE} \ |
| --grad_accum ${GRAD_ACCUM} \ |
| --warmup_steps ${WARMUP_STEPS} \ |
| --seed ${SEED} \ |
| ${EXTRA_ARGS} \ |
| 2>&1 | grep -v "UserWarning" \ |
| | grep -v "Warning only once" \ |
| | grep -v "Overriding a previously" \ |
| | grep -v "dispatch key:" \ |
| | grep -v "previous kernel:" \ |
| | grep -v "new kernel:" \ |
| | grep -v "operator: flash_attn" \ |
| | grep -v "registered at /usr/local" \ |
| | grep -v "self.m.impl" \ |
| | tee -a "${LOG_FILE}" |
|
|
| echo "==================================================================" |
| echo " Done : $(date)" |
| echo "==================================================================" |
|
|