#!/bin/bash # Dispatch 16 eval jobs in parallel — one per (modalities_canonical, t_obs, t_fut) tuple. set -euo pipefail PYTHON=python EVAL=${PULSE_ROOT}/scripts/eval_subset.py PARTITION=${PARTITION:-gpuA800} GPU_GRES=${GPU_GRES:-gpu:1} LOG_DIR=${PULSE_ROOT}/results/eval_logs mkdir -p "$LOG_DIR" # 16 distinct subsets enumerated by inspecting all results.json files. # Each line: || SUBSETS=( "emg,eyetrack,imu|8.0|2.0" "emg,eyetrack,imu,mocap|8.0|2.0" "emg,eyetrack,imu,mocap,pressure|8.0|1.0" "emg,eyetrack,imu,mocap,pressure|8.0|2.0" "emg,eyetrack,imu,mocap,pressure|8.0|5.0" "emg,eyetrack,imu,mocap,pressure|8.0|10.0" "emg,eyetrack,imu,mocap,pressure|8.0|15.0" "emg,eyetrack,imu,pressure|8.0|2.0" "emg,eyetrack,mocap,pressure|8.0|2.0" "emg,imu|8.0|2.0" "emg,imu,mocap|8.0|2.0" "emg,imu,mocap,pressure|8.0|2.0" "eyetrack,imu,mocap,pressure|8.0|2.0" "imu|8.0|2.0" "imu,mocap|8.0|2.0" "mocap|8.0|2.0" ) idx=0 for entry in "${SUBSETS[@]}"; do IFS='|' read -r mods t_obs t_fut <<< "$entry" idx=$((idx+1)) tag=$(echo "${mods}_o${t_obs}_f${t_fut}" | tr ',.' '_') job_name="evalT10_${idx}_${tag}" job_name=$(echo "$job_name" | cut -c1-60) # SLURM job names cap at ~60 chars out="${LOG_DIR}/${tag}.out" err="${LOG_DIR}/${tag}.err" cmd="export PYTHONUNBUFFERED=1; ${PYTHON} ${EVAL} --modalities ${mods} --t_obs ${t_obs} --t_fut ${t_fut}" sbatch -J "${job_name}" -p "${PARTITION}" --gres="${GPU_GRES}" \ -N 1 -n 1 --cpus-per-task=4 --mem=32G \ -t 0:20:00 -o "${out}" -e "${err}" \ --export=ALL --wrap="${cmd}" echo "submitted ${job_name}" done echo "" echo "All 16 dispatched. Logs: ${LOG_DIR}/"