royrin's picture
Upload folder using huggingface_hub
d16e9ef verified
#!/usr/bin/env bash
# Run Gumbel experiments for multiple models
###############################################################################
# Config
###############################################################################
N_PROMPTS=5000
MAX_TOKENS=500
GPU_MEM=0.85
SIGMAS="0.001,0.01,0.1,1.0"
SUPPORT_SIZE=1000
MAX_THRESHOLDS=500
MAX_MODEL_LEN=8192
# Robust HF downloads (avoid git-xet 500s)
export HF_HUB_ENABLE_GIT_XET=0
export GIT_XET=0
export HF_HUB_ENABLE_HF_TRANSFER=1
export HF_HUB_DISABLE_TELEMETRY=1
# Nice-to-have: make Python fail fast on OOMs
export PYTORCH_CUDA_ALLOC_CONF=max_split_size_mb:512
# Timestamped sweep dir
TIMESTAMP=$(date +"%Y%m%d_%H%M%S")
SWEEP_DIR="multi_model_sweep_${TIMESTAMP}"
mkdir -p "$SWEEP_DIR"
echo "========================================================================"
echo "Running multi-model Gumbel experiments"
echo "Sweep directory: $SWEEP_DIR"
echo "Sigmas: $SIGMAS"
echo "N_prompts: $N_PROMPTS, Max_tokens: $MAX_TOKENS"
echo "========================================================================"
###############################################################################
# Models
###############################################################################
declare -a MODELS=(
"Qwen/Qwen3-30B-A3B"
# "meta-llama/Llama-3.1-70B-Instruct-AWQ" # uncomment for largest-on-1xH100 (quantized)
# "Qwen/Qwen2.5-72B-Instruct-AWQ" # uncomment for largest-on-1xH100 (quantized)
"meta-llama/Llama-3.2-3B-Instruct"
"meta-llama/Llama-3.1-8B-Instruct"
)
###############################################################################
# Helpers
###############################################################################
run_analysis() {
local model_dir="$1"
local model_id="$2"
echo "Running analysis..."
# Prefer newest gumbel_cgs_analysis_results/*; else fallback to /results
local results_dir
results_dir=$(ls -td "${model_dir}/gumbel_cgs_analysis_results/"* 2>/dev/null | head -1)
if [[ -z "$results_dir" ]]; then
results_dir="${model_dir}/results"
fi
if [[ ! -d "$results_dir" ]]; then
echo "βœ— No results folder found for ${model_id} (looked in ${model_dir})"
return 1
fi
python adam_analyze_thresholds.py \
--folder "$results_dir" \
--max-thresholds "$MAX_THRESHOLDS" \
--skip-logistic-regression
if [[ $? -ne 0 ]]; then
echo "βœ— Analysis failed for ${model_id}"
return 1
fi
echo "βœ“ Successfully completed analysis for ${model_id}"
echo "Running two-step classifier analysis..."
python adam_analyze_two_step_classifier.py \
--folder "$results_dir" \
--max-thresholds "$MAX_THRESHOLDS" \
--rank-threshold 4
if [[ $? -ne 0 ]]; then
echo "βœ— Two-step classifier analysis failed for ${model_id}"
return 1
fi
echo "βœ“ Successfully completed two-step classifier analysis for ${model_id}"
}
###############################################################################
# Main loop
###############################################################################
for MODEL in "${MODELS[@]}"; do
echo ""
echo "========================================================================"
echo "Processing model: $MODEL"
echo "========================================================================"
# Per-model directory & log
MODEL_NAME=$(echo "$MODEL" | tr '/' '_')
MODEL_DIR="${SWEEP_DIR}/${MODEL_NAME}"
mkdir -p "$MODEL_DIR"
LOG_FILE="${MODEL_DIR}/run.log"
# (Optional) prefetch to local HF cache to avoid mid-run pulls
# huggingface-cli download "$MODEL" --local-dir "${MODEL_DIR}/hf_prefetch" --resume &>/dev/null
# Run experiment (tee logs)
python run_gumbel_and_cgs_recording_adam_fast.py \
--model "$MODEL" \
--n-prompts "$N_PROMPTS" \
--max-tokens "$MAX_TOKENS" \
--gpu-memory-utilization "$GPU_MEM" \
--gumbel-sigmas "$SIGMAS" \
--sweep-dir "$MODEL_DIR" \
--support-size "$SUPPORT_SIZE" \
--max-model-len "$MAX_MODEL_LEN" \
2>&1 | tee "$LOG_FILE"
if [[ ${PIPESTATUS[0]} -eq 0 ]]; then
echo "βœ“ Successfully completed experiment for $MODEL"
run_analysis "$MODEL_DIR" "$MODEL"
else
echo "βœ— Experiment failed for $MODEL (see log: $LOG_FILE)"
fi
done
echo ""
echo "========================================================================"
echo "All experiments complete!"
echo "Results saved to: $SWEEP_DIR"
echo "========================================================================"
echo ""
echo "To create combined plots, run:"
echo "python plot_multi_model_comparison.py --sweep-dir $SWEEP_DIR"