#!/usr/bin/env bash # ============================================================================= # setup_eqbench_run.sh — EQ-Bench3 日本語版 評価実行セットアップ(ローカルvLLM) # # やること: # 1. EQ-Bench3 リポジトリのクローン # 2. 全12日本語データファイルの差し替え [Fix-JA-D1] # 3. benchmark.py / conversation.py にコードパッチ適用 [Fix-JA-D2] # 4. 依存関係のインストール # 5. .env ファイルの生成(ローカルvLLMを向く) # 6. 各種 serve スクリプトの配置 # 7. 実行手順の表示 # ============================================================================= set -euo pipefail WORKSPACE="/workspace/eqbench-run" HF_TOKEN="${HF_TOKEN:?HF_TOKEN が未設定です。export HF_TOKEN='hf_xxxx'}" HF_USERNAME="${HF_USERNAME:-YUGOROU}" PATCH_REPO="${PATCH_REPO:-${HF_USERNAME}/teememo-eq-bench-ja}" SCRIPTS_REPO="${SCRIPTS_REPO:-${HF_USERNAME}/Test-2}" HF_BASE="https://huggingface.co/datasets" echo "[setup] EQ-Bench3 日本語版 評価環境セットアップ" echo "" # ── 依存関係 ────────────────────────────────────────────────── echo "[setup] 依存関係のインストール..." pip install -q requests python-dotenv tqdm numpy scipy trueskill 2>/dev/null || true apt-get install -y tmux git 2>/dev/null || true # ── EQ-Bench3 クローン ──────────────────────────────────────── mkdir -p "${WORKSPACE}" if [ -d "${WORKSPACE}/eqbench3" ]; then echo "[setup] EQ-Bench3 既存を更新中..." git -C "${WORKSPACE}/eqbench3" pull --ff-only || true else echo "[setup] EQ-Bench3 をクローン中..." git clone --depth=1 https://github.com/EQ-bench/eqbench3.git "${WORKSPACE}/eqbench3" fi echo "[setup] ✅ EQ-Bench3" # ── [Fix-JA-D1] 全12日本語データファイルを差し替え ────────── echo "" echo "[setup] [Fix-JA-D1] 全12日本語データファイルをDL中..." JA_COUNT=0 for pair in \ "scenario_prompts_ja.txt|scenario_prompts.txt" \ "scenario_notes_ja.txt|scenario_notes.txt" \ "scenario_master_prompt_ja.txt|scenario_master_prompt.txt" \ "scenario_master_prompt_message_drafting_ja.txt|scenario_master_prompt_message_drafting.txt" \ "scenario_master_prompt_analysis_ja.txt|scenario_master_prompt_analysis.txt" \ "rubric_scoring_prompt_ja.txt|rubric_scoring_prompt.txt" \ "rubric_scoring_prompt_analysis_ja.txt|rubric_scoring_prompt_analysis.txt" \ "rubric_scoring_criteria_ja.txt|rubric_scoring_criteria.txt" \ "rubric_scoring_criteria_analysis_ja.txt|rubric_scoring_criteria_analysis.txt" \ "pairwise_prompt_eqbench3_ja.txt|pairwise_prompt_eqbench3.txt" \ "pairwise_prompt_eqbench3_analysis_ja.txt|pairwise_prompt_eqbench3_analysis.txt" \ "debrief_prompt_ja.txt|debrief_prompt.txt"; do SRC="${pair%%|*}" DEST="${WORKSPACE}/eqbench3/data/${pair##*|}" [ -f "${DEST}" ] && [ ! -f "${DEST}.en.bak" ] && cp "${DEST}" "${DEST}.en.bak" if curl -sfL \ -H "Authorization: Bearer ${HF_TOKEN}" \ "${HF_BASE}/${PATCH_REPO}/resolve/main/data/${SRC}" \ -o "${DEST}"; then echo "[setup] ✅ ${pair##*|}" JA_COUNT=$((JA_COUNT + 1)) else [ -f "${DEST}.en.bak" ] && cp "${DEST}.en.bak" "${DEST}" echo "[setup] ⚠️ DL失敗 → 英語版: ${pair##*|}" fi done echo "[setup] 日本語データファイル: ${JA_COUNT}/12" # ── [Fix-JA-D2] コードパッチ (benchmark.py / conversation.py) ─ echo "" echo "[setup] [Fix-JA-D2] コードパッチを適用中..." PATCH_DIR="${WORKSPACE}/eqbench-patch" mkdir -p "${PATCH_DIR}" PATCH_OK=0 for pf in patch_benchmark.py patch_conversation.py; do if curl -sfL \ -H "Authorization: Bearer ${HF_TOKEN}" \ "${HF_BASE}/${PATCH_REPO}/resolve/main/patch/${pf}" \ -o "${PATCH_DIR}/${pf}"; then PATCH_OK=$((PATCH_OK + 1)) else echo "[setup] ⚠️ DL失敗: ${pf}" fi done if [ "${PATCH_OK}" -eq 2 ]; then cd "${WORKSPACE}/eqbench3" python3 "${PATCH_DIR}/patch_benchmark.py" python3 "${PATCH_DIR}/patch_conversation.py" echo "[setup] ✅ コードパッチ完了" else echo "[setup] ❌ パッチ取得失敗。PATCH_REPO=${PATCH_REPO} を確認してください。" fi cd "${WORKSPACE}" # ── serve スクリプトのDL ────────────────────────────────────── echo "" echo "[setup] serve スクリプトのDL..." for script in serve_test.sh serve_judge.sh; do if curl -fL \ -H "Authorization: Bearer ${HF_TOKEN}" \ "${HF_BASE}/${SCRIPTS_REPO}/resolve/main/eqbench-ja-run/${script}" \ -o "${WORKSPACE}/${script}"; then chmod +x "${WORKSPACE}/${script}" echo "[setup] ✅ ${script}" else echo "[setup] ❌ DL失敗: ${script}" fi done # ── .env ファイルの生成 ──────────────────────────────────────── echo "[setup] .env ファイルを生成中..." cat > "${WORKSPACE}/eqbench3/.env" << ENVEOF # EQ-Bench3 ローカルvLLM設定(自動生成) # 受験者: TeenEmo LoRA (port 8000) / 採点者: Qwen3.5-35B-A3B (port 8001) TEST_API_URL=http://localhost:8000/v1/chat/completions TEST_API_KEY=dummy JUDGE_API_URL=http://localhost:8001/v1/chat/completions JUDGE_API_KEY=dummy MAX_RETRIES=3 RETRY_DELAY=5 REQUEST_TIMEOUT=300 ENVEOF echo "[setup] ✅ .env 生成完了" echo "" echo "[setup] ==============================" echo "[setup] ✅ セットアップ完了" echo "[setup] ==============================" echo "" echo "【実行手順】" echo "" echo "# Step 1: TeenEmo サーバー起動(port 8000)" echo "tmux new-session -d -s eq_run" echo "tmux new-window -t eq_run -n test" echo "tmux send-keys -t eq_run:test \"cd ${WORKSPACE} && export HF_TOKEN='\$HF_TOKEN' && ./serve_test.sh\" Enter" echo "" echo "# Step 2: 採点者サーバー起動(port 8001)" echo "tmux new-window -t eq_run -n judge" echo "tmux send-keys -t eq_run:judge \"cd ${WORKSPACE} && ./serve_judge.sh\" Enter" echo "" echo "# Step 3: 起動確認(Application startup complete. が出るまで待つ)" echo "tmux capture-pane -t eq_run:test -p | tail -3" echo "tmux capture-pane -t eq_run:judge -p | tail -3" echo "" echo "# Step 4: EQ-Bench3 実行" echo "cd ${WORKSPACE}/eqbench3" echo "python eqbench3.py \\" echo " --test-model teenemo-dpo \\" echo " --model-name TeenEmo-DPO \\" echo " --judge-model Qwen/Qwen3.5-35B-A3B \\" echo " --no-elo \\" echo " --save-interval 1 \\" echo " --iterations 1" echo "" echo "⚠️ OOMが発生した場合は順次実行モードを使用してください(README参照)"