File size: 1,927 Bytes
e8e3f3d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 |
#!/bin/bash
# ===================================
# A40 GPU优化训练脚本 - 后台运行版本
# ===================================
# 配置参数
EXP_NAME="trajectory_a40_temporal_optimized"
DEVICE_ID=0
SAMPLING_TYPE="ddpm"
LOG_DIR="./training_logs"
# 创建日志目录
mkdir -p ${LOG_DIR}
# 获取当前时间戳用于日志文件命名
TIMESTAMP=$(date +"%Y%m%d_%H%M%S")
LOG_FILE="${LOG_DIR}/training_${EXP_NAME}_${TIMESTAMP}.log"
# 显示启动信息
echo "====================================="
echo "ProDiff A40优化训练启动"
echo "====================================="
echo "实验名称: ${EXP_NAME}"
echo "GPU设备: ${DEVICE_ID}"
echo "采样类型: ${SAMPLING_TYPE}"
echo "日志文件: ${LOG_FILE}"
echo "启动时间: $(date)"
echo "====================================="
# 检查GPU状态
echo "检查GPU状态..."
nvidia-smi
# 设置环境变量以优化A40性能
export CUDA_VISIBLE_DEVICES=${DEVICE_ID}
export PYTORCH_CUDA_ALLOC_CONF=max_split_size_mb:128
export OMP_NUM_THREADS=8
# 使用nohup后台运行,同时输出到日志文件和终端
echo "开始后台训练..."
nohup python -u main.py \
--exp_name ${EXP_NAME} \
--device_id ${DEVICE_ID} \
--sampling_type ${SAMPLING_TYPE} \
--seed 42 \
> ${LOG_FILE} 2>&1 &
# 获取进程ID
PID=$!
echo "训练进程PID: ${PID}"
echo "日志文件: ${LOG_FILE}"
# 保存PID到文件,方便后续管理
echo ${PID} > "${LOG_DIR}/training_${EXP_NAME}.pid"
echo ""
echo "====================================="
echo "训练已在后台启动!"
echo "====================================="
echo "监控命令:"
echo " 查看日志: tail -f ${LOG_FILE}"
echo " 查看进程: ps aux | grep ${PID}"
echo " 停止训练: kill ${PID}"
echo " 或者: kill \$(cat ${LOG_DIR}/training_${EXP_NAME}.pid)"
echo ""
echo "GPU监控:"
echo " nvidia-smi"
echo " watch -n 1 nvidia-smi"
echo "=====================================" |