SignVerse-2M / slurm /process_dwpose_array.slurm
Sen Fang
Harden runtime state and exclude uploaded videos
bc864be
#!/usr/bin/env bash
#SBATCH --job-name=dwpose
#SBATCH --nodes=1
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=8
#SBATCH --gres=gpu:1
#SBATCH --mem=32G
#SBATCH --time=24:00:00
#SBATCH --output=%x_%A_%a.out
#SBATCH --error=%x_%A_%a.err
set -euo pipefail
ROOT_DIR="${ROOT_DIR:-/home/sf895/SignVerse-2M}"
RUNTIME_ROOT="${RUNTIME_ROOT:-/home/sf895/SignVerse-2M-runtime}"
STATE_ROOT="${STATE_ROOT:-/home/sf895/SignVerse-2M-runtime}"
CONDA_SH="${CONDA_SH:-/home/sf895/miniconda3/etc/profile.d/conda.sh}"
CONDA_ENV="${CONDA_ENV:-signx2}"
RAW_VIDEO_DIR="${RAW_VIDEO_DIR:-$RUNTIME_ROOT/raw_video}"
SCRATCH_RAW_VIDEO_DIR="${SCRATCH_RAW_VIDEO_DIR:-/scratch/$USER/SignVerse-2M-runtime/raw_video}"
DATASET_DIR="${DATASET_DIR:-$RUNTIME_ROOT/dataset}"
SCRATCH_DATASET_DIR="${SCRATCH_DATASET_DIR:-/scratch/$USER/SignVerse-2M-runtime/dataset}"
STATS_NPZ="${STATS_NPZ:-$RUNTIME_ROOT/stats.npz}"
STATUS_JOURNAL_PATH="${STATUS_JOURNAL_PATH:-$RUNTIME_ROOT/upload_status_journal.jsonl}"
PIPELINE02="${PIPELINE02:-$ROOT_DIR/scripts/pipeline02_extract_dwpose_from_video.py}"
FPS="${FPS:-24}"
TMP_ROOT="${TMP_ROOT:-${SLURM_TMPDIR:-/tmp}}"
FORCE_PROCESS="${FORCE_PROCESS:-0}"
DELETE_SOURCE_ON_SUCCESS="${DELETE_SOURCE_ON_SUCCESS:-0}"
CLAIM_DIR="${CLAIM_DIR:-$STATE_ROOT/slurm/state/claims}"
RETRY_DIR="${RETRY_DIR:-$STATE_ROOT/slurm/state/gpu_init_retries}"
MAX_GPU_INIT_RETRIES="${MAX_GPU_INIT_RETRIES:-3}"
OPTIMIZED_MODE="${OPTIMIZED_MODE:-1}"
OPTIMIZED_PROVIDER="${OPTIMIZED_PROVIDER:-cuda}"
OPTIMIZED_FRAME_BATCH_SIZE="${OPTIMIZED_FRAME_BATCH_SIZE:-8}"
OPTIMIZED_DETECT_RESOLUTION="${OPTIMIZED_DETECT_RESOLUTION:-512}"
OPTIMIZED_FRAME_STRIDE="${OPTIMIZED_FRAME_STRIDE:-1}"
OPTIMIZED_IO_BINDING="${OPTIMIZED_IO_BINDING:-1}"
OPTIMIZED_GPU_DETECTOR_POSTPROCESS="${OPTIMIZED_GPU_DETECTOR_POSTPROCESS:-1}"
OPTIMIZED_GPU_POSE_PREPROCESS="${OPTIMIZED_GPU_POSE_PREPROCESS:-0}"
VIDEOS_PER_JOB="${VIDEOS_PER_JOB:-5}"
MANIFEST="${MANIFEST:-${1:-}}"
if [[ -z "$MANIFEST" ]]; then
echo "MANIFEST is required (env var or first positional arg)." >&2
exit 1
fi
if [[ ! -f "$MANIFEST" ]]; then
echo "Manifest not found: $MANIFEST" >&2
exit 1
fi
if [[ -z "${SLURM_ARRAY_TASK_ID:-}" ]]; then
echo "SLURM_ARRAY_TASK_ID is required." >&2
exit 1
fi
if [[ ! -f "$CONDA_SH" ]]; then
echo "Missing conda init script: $CONDA_SH" >&2
exit 1
fi
mapfile -t ALL_VIDEO_IDS < <(sed -n "$((SLURM_ARRAY_TASK_ID * VIDEOS_PER_JOB + 1)),$(((SLURM_ARRAY_TASK_ID + 1) * VIDEOS_PER_JOB))p" "$MANIFEST")
if [[ "${#ALL_VIDEO_IDS[@]}" -eq 0 ]]; then
echo "No video ids found for task index ${SLURM_ARRAY_TASK_ID} in manifest $MANIFEST" >&2
exit 1
fi
mkdir -p "$CLAIM_DIR" "$RETRY_DIR"
cleanup_claims() {
local video_id
for video_id in "${ALL_VIDEO_IDS[@]}"; do
rm -f "$CLAIM_DIR/${video_id}.claim"
done
}
trap cleanup_claims EXIT
export OMP_NUM_THREADS="${SLURM_CPUS_PER_TASK:-1}"
export MKL_NUM_THREADS="${SLURM_CPUS_PER_TASK:-1}"
echo "[$(date '+%F %T')] job=${SLURM_JOB_ID:-na} task=${SLURM_ARRAY_TASK_ID} host=$(hostname) gpu=${CUDA_VISIBLE_DEVICES:-unset} videos=${#ALL_VIDEO_IDS[@]} first_video=${ALL_VIDEO_IDS[0]}"
# shellcheck disable=SC1090
source "$CONDA_SH"
CONDA_ENV_PREFIX="$(conda env list | awk '$1 == env {print $NF}' env="$CONDA_ENV")"
if [[ -z "$CONDA_ENV_PREFIX" ]]; then
echo "Unable to resolve conda env prefix for $CONDA_ENV" >&2
exit 1
fi
export LD_LIBRARY_PATH="$CONDA_ENV_PREFIX/lib${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH}"
export PATH="$CONDA_ENV_PREFIX/bin:$PATH"
LIBSTDCXX_PATH="$CONDA_ENV_PREFIX/lib/libstdc++.so.6"
LIBGCC_PATH="$CONDA_ENV_PREFIX/lib/libgcc_s.so.1"
LD_PRELOAD_VALUE=""
if [[ -f "$LIBSTDCXX_PATH" ]]; then
LD_PRELOAD_VALUE="$LIBSTDCXX_PATH"
fi
if [[ -f "$LIBGCC_PATH" ]]; then
LD_PRELOAD_VALUE="${LD_PRELOAD_VALUE:+$LD_PRELOAD_VALUE:}$LIBGCC_PATH"
fi
export LD_PRELOAD="${LD_PRELOAD_VALUE}${LD_PRELOAD:+:$LD_PRELOAD}"
echo "Using conda env prefix=$CONDA_ENV_PREFIX"
echo "Using LD_PRELOAD=$LD_PRELOAD"
gpu_init_retry_state() {
local video_id="$1"
local attempts="$2"
local host_name="$3"
local gpu_id="$4"
local reason="$5"
local retry_state_path="$RETRY_DIR/${video_id}.state"
cat > "$retry_state_path" <<STATE
attempts=$attempts
host=$host_name
gpu=$gpu_id
updated_at=$(date '+%F %T')
reason=$reason
STATE
}
get_retry_attempts() {
local video_id="$1"
local retry_state_path="$RETRY_DIR/${video_id}.state"
if [[ -f "$retry_state_path" ]]; then
awk -F'=' '/^attempts=/{print $2}' "$retry_state_path" | tail -n 1
else
echo 0
fi
}
mark_retry_exhausted() {
local video_id="$1"
local attempts="$2"
local reason="$3"
python - <<PY
from pathlib import Path
import sys, time
root = Path(r"$ROOT_DIR")
stats_path = Path(r"$STATS_NPZ")
video_id = r"$video_id"
attempts = int(r"$attempts")
reason = r'''$reason'''
sys.path.insert(0, str(root))
from utils.stats_npz import update_video_stats_best_effort
journal_path = Path(r"$STATUS_JOURNAL_PATH")
update_video_stats_best_effort(
stats_path,
journal_path,
video_id,
process_status="skipped",
last_error=f"gpu_init_retry_exhausted after {attempts} attempts: {reason}",
updated_at=time.strftime("%Y-%m-%d %H:%M:%S"),
)
PY
rm -f "$RAW_VIDEO_DIR/$video_id.mp4" "$RAW_VIDEO_DIR/$video_id.mkv" "$RAW_VIDEO_DIR/$video_id.webm" "$RAW_VIDEO_DIR/$video_id.mov"
rm -f "$SCRATCH_RAW_VIDEO_DIR/$video_id.mp4" "$SCRATCH_RAW_VIDEO_DIR/$video_id.mkv" "$SCRATCH_RAW_VIDEO_DIR/$video_id.webm" "$SCRATCH_RAW_VIDEO_DIR/$video_id.mov"
}
should_retry_gpu_init_failure() {
local log_path="$1"
local npz_dir="$2"
local npz_count="0"
if [[ -d "$npz_dir" ]]; then
npz_count="$(find "$npz_dir" -maxdepth 1 -name '*.npz' | wc -l | tr -d '[:space:]')"
fi
if [[ "$npz_count" != "0" ]]; then
return 1
fi
grep -Eiq 'CUDA failure 2: out of memory|Failed to create CUDAExecutionProvider|libcudnn\.so\.8|CUDA is not available|CUDAExecutionProvider|onnxruntime' "$log_path"
}
env_cmd=(env
"LD_LIBRARY_PATH=$LD_LIBRARY_PATH"
"LD_PRELOAD=$LD_PRELOAD"
"PATH=$PATH"
"CONDA_NO_PLUGINS=true"
)
cmd_base=(python -u "$PIPELINE02"
--raw-video-dir "$RAW_VIDEO_DIR"
--scratch-raw-video-dir "$SCRATCH_RAW_VIDEO_DIR"
--dataset-dir "$DATASET_DIR"
--scratch-dataset-dir "$SCRATCH_DATASET_DIR"
--stats-npz "$STATS_NPZ"
--status-journal-path "$STATUS_JOURNAL_PATH"
--fps "$FPS"
--workers 1
--tmp-root "$TMP_ROOT"
)
if [[ "$OPTIMIZED_MODE" == "1" ]]; then
cmd_base+=(--optimized-mode)
cmd_base+=(--optimized-provider "$OPTIMIZED_PROVIDER")
cmd_base+=(--optimized-frame-batch-size "$OPTIMIZED_FRAME_BATCH_SIZE")
cmd_base+=(--optimized-detect-resolution "$OPTIMIZED_DETECT_RESOLUTION")
cmd_base+=(--optimized-frame-stride "$OPTIMIZED_FRAME_STRIDE")
if [[ "$OPTIMIZED_IO_BINDING" == "1" ]]; then
cmd_base+=(--optimized-io-binding)
fi
if [[ "$OPTIMIZED_GPU_DETECTOR_POSTPROCESS" == "1" ]]; then
cmd_base+=(--optimized-gpu-detector-postprocess)
fi
if [[ "$OPTIMIZED_GPU_POSE_PREPROCESS" == "1" ]]; then
cmd_base+=(--optimized-gpu-pose-preprocess)
fi
else
cmd_base+=(--legacy-mode)
fi
if [[ "$FORCE_PROCESS" == "1" ]]; then
cmd_base+=(--force)
fi
if [[ "$DELETE_SOURCE_ON_SUCCESS" == "1" ]]; then
cmd_base+=(--delete-source-on-success)
fi
overall_status=0
for VIDEO_ID in "${ALL_VIDEO_IDS[@]}"; do
[[ -z "$VIDEO_ID" ]] && continue
CLAIM_PATH="$CLAIM_DIR/${VIDEO_ID}.claim"
RETRY_STATE_PATH="$RETRY_DIR/${VIDEO_ID}.state"
echo "[$(date '+%F %T')] job=${SLURM_JOB_ID:-na} task=${SLURM_ARRAY_TASK_ID} host=$(hostname) gpu=${CUDA_VISIBLE_DEVICES:-unset} video_id=$VIDEO_ID"
TMP_LOG="$(mktemp "${TMP_ROOT%/}/dwpose_${VIDEO_ID}_XXXX.log")"
cmd=("${cmd_base[@]}" --video-ids="$VIDEO_ID")
set +e
"${env_cmd[@]}" conda run -n "$CONDA_ENV" "${cmd[@]}" 2>&1 | tee "$TMP_LOG"
cmd_status=${PIPESTATUS[0]}
set -e
if [[ "$cmd_status" -ne 0 ]]; then
npz_dir="$DATASET_DIR/$VIDEO_ID/npz"
if [[ ! -d "$npz_dir" && -d "$SCRATCH_DATASET_DIR/$VIDEO_ID/npz" ]]; then
npz_dir="$SCRATCH_DATASET_DIR/$VIDEO_ID/npz"
fi
if should_retry_gpu_init_failure "$TMP_LOG" "$npz_dir"; then
attempts="$(get_retry_attempts "$VIDEO_ID")"
attempts="$((attempts + 1))"
last_reason="$(tail -n 80 "$TMP_LOG" | tr '\n' ' ' | sed 's/[[:space:]]\+/ /g' | cut -c1-1200)"
gpu_init_retry_state "$VIDEO_ID" "$attempts" "$(hostname)" "${CUDA_VISIBLE_DEVICES:-unset}" "$last_reason"
if [[ "$attempts" -ge "$MAX_GPU_INIT_RETRIES" ]]; then
echo "GPU init failed on multiple GPUs; marking $VIDEO_ID as skipped after $attempts attempts." >&2
mark_retry_exhausted "$VIDEO_ID" "$attempts" "$last_reason"
rm -f "$CLAIM_PATH"
continue
fi
echo "GPU init failure for $VIDEO_ID on host=$(hostname) gpu=${CUDA_VISIBLE_DEVICES:-unset}; retry attempt $attempts/$MAX_GPU_INIT_RETRIES will be resubmitted later." >&2
overall_status="$cmd_status"
break
fi
overall_status="$cmd_status"
break
fi
rm -f "$RETRY_STATE_PATH" "$CLAIM_PATH"
done
exit "$overall_status"