Datasets:

ArXiv:
ea-dev-pjlab-results / eval_agent /run_missing_evaluations.sh
shulin16's picture
Upload folder using huggingface_hub
9f3bc09 verified
#!/bin/bash
# Script to run evaluations for missing queries from a text file
# Based on open_eval_template.sh structure
export CUDA_VISIBLE_DEVICES=3
# Check if correct number of arguments provided
if [ "$#" -ne 1 ]; then
echo "Usage: $0 <model_name>"
echo "Example: $0 modelscope"
exit 1
fi
MODEL_NAME="$1"
QUERIES_FILE="./eval_vbench_results/$MODEL_NAME/queries_to_evaluate.txt"
# Check if queries file exists
if [ ! -f "$QUERIES_FILE" ]; then
echo "Error: Queries file not found: $QUERIES_FILE"
exit 1
fi
# Read all queries from file into an array
mapfile -t queries < "$QUERIES_FILE"
# Define models array (single model in this case)
models=("$MODEL_NAME")
# Define index for supplementary run (using supp-0 folder)
indexs=("supp-1")
timestamp=$(date +%Y-%m-%d-%H:%M:%S)
echo "========================================="
echo "Running supplementary evaluations"
echo "Model: $MODEL_NAME"
echo "Total queries: ${#queries[@]}"
echo "Index: $indexs"
echo "========================================="
# Loop through indexs, models, and queries (following open_eval_template.sh structure)
for ind in "${indexs[@]}"; do
for model in "${models[@]}"; do
for query in "${queries[@]}"; do
# Skip empty lines
if [ -z "$query" ]; then
continue
fi
echo "===ind: $ind, model: $model, query: $query===" | tee -a ./logs/$model/$ind.log
export FOLDER_NAME="$ind/$timestamp-$(echo $query | tr ' ' '_' | tr -d '?')" # Run the evaluation script (output to both terminal and log)
python eval_agent_for_vbench_open.py --user_query "$query" --model $model --recommend 2>&1 | tee -a ./logs/$model/$ind.log
unset FOLDER_NAME
done
done
done
echo "========================================="
echo "Evaluation complete!"
echo "Results saved in: eval_agent/eval_vbench_results/$MODEL_NAME/$indexs/"
echo "========================================="