Datasets:

ArXiv:
File size: 1,019 Bytes
9f3bc09
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
#!/bin/bash
set -euo pipefail


CONDA_ROOT="/mnt/hwfile/zhangfan.p/miniconda3"
ENV_PATH="$CONDA_ROOT/envs/llamafac"
export PATH="$ENV_PATH/bin:$PATH"

read -p "Enter config (0 for fine-tuned, 1 for base): " CONFIG
export CONFIG=${CONFIG:-0}

if [ $CONFIG -eq 0 ]; then
    export CUDA_VISIBLE_DEVICES=1
    export MODEL_NAME="ea-dev/eval-agent-vbench-base-table"
    export PORT=12333
    export GPU_MEMORY_UTILIZATION=0.7
else
    export CUDA_VISIBLE_DEVICES=2
    export MODEL_NAME="qwen/Qwen2.5-3B-Instruct"
    export PORT=12334
    export GPU_MEMORY_UTILIZATION=0.7
fi

# Launch using vllm serve command

echo "Starting Qwen2.5-3B eval agent server on 0.0.0.0:${PORT}..."
echo "Model: ${MODEL_NAME}"
echo "GPU Memory Utilization: ${GPU_MEMORY_UTILIZATION}"

exec python -u -m vllm.entrypoints.openai.api_server ${MODEL_NAME} \
    --host 0.0.0.0 \
    --port ${PORT} \
    --gpu-memory-utilization ${GPU_MEMORY_UTILIZATION} \
    --trust-remote-code \
    --max-model-len 16384 \
    --served-model-name eval-agent