YUGOROU commited on
Commit
f57f05f
·
verified ·
1 Parent(s): 18a46c9

fix: eqbench-vast/setup_eqbench_vast.sh (JA-D1 + JA-D2 patches)

Browse files
Files changed (1) hide show
  1. eqbench-vast/setup_eqbench_vast.sh +74 -72
eqbench-vast/setup_eqbench_vast.sh CHANGED
@@ -1,18 +1,19 @@
1
  #!/usr/bin/env bash
2
  # =============================================================================
3
- # setup_eqbench_vast.sh — EQ-Bench3 評価セットアップ (Vast.ai + HF Inference Providers)
4
  #
5
  # 構成:
6
  # 受験者: TeenEmo-LFM2.5-1.2B-DPO (LoRA) on Vast.ai vLLM (port 8000)
7
  # 採点者: openai/gpt-oss-120b via HF Inference Providers / novita
8
- # → 別インスタンス不要・OOM不要・$0.05/$0.25 per 1M tokens
9
  #
10
- # 今セッションで判明した問題と対策:
11
- # [Fix-1] max_lora_rank=32: Unsloth LoRAはrank=32のためデフォルト16では失敗
12
- # [Fix-2] max_model_len=32768: EQ-Bench3のmax_tokens=12000に対応
13
- # [Fix-3] HF Inference ProvidersのJudge URL: router.huggingface.co/novita
14
- # [Fix-4] --save-interval 1: 1タスクごとに保存して途中再開を保証
15
- # [Fix-5] MAX_RETRIES=6, REQUEST_TIMEOUT=300: HF APIレート制限・遅延対応
 
 
16
  # =============================================================================
17
 
18
  set -euo pipefail
@@ -23,10 +24,14 @@ WORKSPACE="/workspace/eqbench-run"
23
  TEST_PORT="${TEST_PORT:-8000}"
24
  LORA_NAME="${TEST_LORA_NAME:-teenemo-dpo}"
25
 
26
- echo "[setup] EQ-Bench3 Vast.ai セットアップ"
 
 
 
27
  echo "[setup] 採点者: HF Inference Providers / novita / gpt-oss-120b"
 
28
 
29
- # ── 依存パッケージ ──────────────────────────────────────────
30
  echo "[setup] 依存パッケージのインストール..."
31
  pip install -q requests python-dotenv tqdm numpy scipy trueskill 2>/dev/null || true
32
  apt-get install -y tmux git 2>/dev/null || true
@@ -42,72 +47,80 @@ else
42
  fi
43
  echo "[setup] ✅ EQ-Bench3"
44
 
45
- # ── 日本語版シナリオDL ────────────────────
46
- echo "[setup] 日本語版シナリオをDL中..."
 
47
  JA_COUNT=0
48
  for pair in \
49
- "scenario_prompts_ja.txt|${WORKSPACE}/eqbench3/data/scenario_prompts.txt" \
50
- "scenario_notes_ja.txt|${WORKSPACE}/eqbench3/data/scenario_notes.txt" \
51
- "scenario_master_prompt_message_drafting_ja.txt|${WORKSPACE}/eqbench3/data/scenario_master_prompt_message_drafting.txt" \
52
- "scenario_master_prompt_analysis_ja.txt|${WORKSPACE}/eqbench3/data/scenario_master_prompt_analysis.txt" \
53
- "scenario_master_prompt_ja.txt|${WORKSPACE}/eqbench3/data/scenario_master_prompt.txt" \
54
- "rubric_scoring_prompt_analysis_ja.txt|${WORKSPACE}/eqbench3/data/rubric_scoring_prompt_analysis.txt" \
55
- "rubric_scoring_prompt_ja.txt|${WORKSPACE}/eqbench3/data/rubric_scoring_prompt.txt" \
56
- "rubric_scoring_criteria_analysis_ja.txt|${WORKSPACE}/eqbench3/data/rubric_scoring_criteria_analysis.txt" \
57
- "rubric_scoring_criteria_ja.txt|${WORKSPACE}/eqbench3/data/rubric_scoring_criteria.txt" \
58
- "pairwise_prompt_eqbench3_analysis_ja.txt|${WORKSPACE}/eqbench3/data/pairwise_prompt_eqbench3_analysis.txt" \
59
- "pairwise_prompt_eqbench3_ja.txt|${WORKSPACE}/eqbench3/data/pairwise_prompt_eqbench3.txt" \
60
- "debrief_prompt_ja.txt|${WORKSPACE}/eqbench3/data/debrief_prompt.txt"; do
61
- SRC="${pair%%|*}"; DEST="${pair##*|}"
62
- cp "${DEST}" "${DEST}.en.bak" 2>/dev/null || true
63
- if curl -sfL -H "Authorization: Bearer ${HF_TOKEN}" \
64
- "https://huggingface.co/datasets/${HF_USERNAME}/teememo-eq-bench-ja/resolve/main/data/${SRC}" \
 
 
65
  -o "${DEST}"; then
66
- echo "[setup] 日本語版差し替え: $(basename ${DEST})"
67
  JA_COUNT=$((JA_COUNT + 1))
68
  else
69
- cp "${DEST}.en.bak" "${DEST}" 2>/dev/null || true
70
- echo "[setup] ⚠️ 英語版を使用: $(basename ${DEST})"
71
  fi
72
  done
 
73
 
74
- # 結果サマリ
75
  echo ""
76
- echo "[setup] 日本語版DL完了: ${JA_COUNT}/12 ファイル"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77
 
78
  # ── serve_test_vast.sh DL ────────────────────────────────────
79
- curl -fL -H "Authorization: Bearer ${HF_TOKEN}" \
80
- "https://huggingface.co/datasets/${HF_USERNAME}/Test-2/resolve/main/eqbench-vast/serve_test_vast.sh" \
 
81
  -o "${WORKSPACE}/serve_test_vast.sh"
82
  chmod +x "${WORKSPACE}/serve_test_vast.sh"
83
  echo "[setup] ✅ serve_test_vast.sh"
84
 
85
- # ── .env 生成 ────────────────────────────────────────────────
86
- # [Fix-3] HF Inference Providers の正しいURL
87
- # [Fix-5] タイムアウト・リトライ値を強化
88
- cat > "${WORKSPACE}/eqbench3/.env" <<'ENVEOF'
89
- # ==========================================================
90
- # EQ-Bench3 設定 (Vast.ai + HF Inference Providers)
91
- # 自動生成: $(date -u +%Y-%m-%dT%H:%M:%SZ)
92
- #
93
- # 受験者: TeenEmo LoRA on vLLM (localhost:${TEST_PORT})
94
- # [Fix-1] --max-lora-rank 32 をserve_test_vast.shで設定済み
95
- # [Fix-2] --max-model-len 32768 をserve_test_vast.shで設定済み
96
- #
97
- # 採点者: openai/gpt-oss-120b via HF Inference Providers (novita)
98
- # 推定コスト: ~\$0.024 (46シナリオ, Pro \$2クレジット内)
99
- # Input: \$0.05/1M tokens, Output: \$0.25/1M tokens
100
- # ==========================================================
101
-
102
- # 受験者: TeenEmo (vLLM ローカル)
103
  TEST_API_URL=http://localhost:${TEST_PORT}/v1/chat/completions
104
  TEST_API_KEY=dummy
105
-
106
- # [Fix-3] 採点者: HF Inference Providers → novita → gpt-oss-120b
107
  JUDGE_API_URL=https://router.huggingface.co/novita/v1/chat/completions
108
  JUDGE_API_KEY=${HF_TOKEN}
109
-
110
- # [Fix-5] HF APIのレート制限・遅延対応
111
  MAX_RETRIES=6
112
  RETRY_DELAY=10
113
  REQUEST_TIMEOUT=300
@@ -116,27 +129,16 @@ echo "[setup] ✅ .env 生成完了"
116
 
117
  echo ""
118
  echo "[setup] =============================="
119
- echo "[setup] セットアップ完了"
120
  echo "[setup] =============================="
121
  echo ""
122
- echo "【実行手順】"
123
- echo ""
124
- echo "# Step 1: tmuxセッション作成 + TeenEmoサーバー起動"
125
  echo "tmux new-session -d -s eq_run"
126
  echo "tmux send-keys -t eq_run \"cd ${WORKSPACE} && export HF_TOKEN='${HF_TOKEN}' && ./serve_test_vast.sh\" Enter"
127
  echo ""
128
- echo "# Step 2: 起動確認(Application startup complete. まで待つ)"
129
  echo "tmux capture-pane -t eq_run -p | tail -5"
130
  echo ""
131
  echo "# Step 3: EQ-Bench3 実行"
132
  echo "cd ${WORKSPACE}/eqbench3"
133
- echo "python eqbench3.py \\"
134
- echo " --test-model ${LORA_NAME} \\"
135
- echo " --model-name TeenEmo-DPO \\"
136
- echo " --judge-model openai/gpt-oss-120b \\"
137
- echo " --no-elo \\"
138
- echo " --save-interval 1 \\"
139
- echo " --iterations 1"
140
- echo ""
141
- echo "# [Fix-4] 途中で失敗した場合は再実行するとチェックポイントから再開"
142
- echo "# 完了済みタスクはスキップされる"
 
1
  #!/usr/bin/env bash
2
  # =============================================================================
3
+ # setup_eqbench_vast.sh — EQ-Bench3 日本語版セットアップ (Vast.ai)
4
  #
5
  # 構成:
6
  # 受験者: TeenEmo-LFM2.5-1.2B-DPO (LoRA) on Vast.ai vLLM (port 8000)
7
  # 採点者: openai/gpt-oss-120b via HF Inference Providers / novita
 
8
  #
9
+ # 修正履歴:
10
+ # [Fix-1] max_lora_rank=32: Unsloth LoRA rank=32
11
+ # [Fix-2] max_model_len=32768: EQ-Bench3 max_tokens=12000 に対応
12
+ # [Fix-3] HF Inference Providers Judge URL: router.huggingface.co/novita
13
+ # [Fix-4] --save-interval 1: 途中再開を保証
14
+ # [Fix-5] MAX_RETRIES=6, REQUEST_TIMEOUT=300: HF API レート制限・遅延対応
15
+ # [Fix-JA-D1] 全12日本語データファイルを差し替え(一部だけでは不十分)
16
+ # [Fix-JA-D2] benchmark.py / conversation.py にコードパッチを適用
17
  # =============================================================================
18
 
19
  set -euo pipefail
 
24
  TEST_PORT="${TEST_PORT:-8000}"
25
  LORA_NAME="${TEST_LORA_NAME:-teenemo-dpo}"
26
 
27
+ PATCH_REPO="${PATCH_REPO:-${HF_USERNAME}/teememo-eq-bench-ja}"
28
+ SCRIPTS_REPO="${SCRIPTS_REPO:-${HF_USERNAME}/Test-2}"
29
+
30
+ echo "[setup] EQ-Bench3 日本語版 Vast.ai セットアップ"
31
  echo "[setup] 採点者: HF Inference Providers / novita / gpt-oss-120b"
32
+ echo ""
33
 
34
+ # ── 依存パッケージ ────────────────────────────────────────────
35
  echo "[setup] 依存パッケージのインストール..."
36
  pip install -q requests python-dotenv tqdm numpy scipy trueskill 2>/dev/null || true
37
  apt-get install -y tmux git 2>/dev/null || true
 
47
  fi
48
  echo "[setup] ✅ EQ-Bench3"
49
 
50
+ # ── [Fix-JA-D1] 全12日本語データファイルを差し替え ──────────
51
+ echo ""
52
+ echo "[setup] [Fix-JA-D1] 全12日本語データファイルをDL中..."
53
  JA_COUNT=0
54
  for pair in \
55
+ "scenario_prompts_ja.txt|scenario_prompts.txt" \
56
+ "scenario_notes_ja.txt|scenario_notes.txt" \
57
+ "scenario_master_prompt_ja.txt|scenario_master_prompt.txt" \
58
+ "scenario_master_prompt_message_drafting_ja.txt|scenario_master_prompt_message_drafting.txt" \
59
+ "scenario_master_prompt_analysis_ja.txt|scenario_master_prompt_analysis.txt" \
60
+ "rubric_scoring_prompt_ja.txt|rubric_scoring_prompt.txt" \
61
+ "rubric_scoring_prompt_analysis_ja.txt|rubric_scoring_prompt_analysis.txt" \
62
+ "rubric_scoring_criteria_ja.txt|rubric_scoring_criteria.txt" \
63
+ "rubric_scoring_criteria_analysis_ja.txt|rubric_scoring_criteria_analysis.txt" \
64
+ "pairwise_prompt_eqbench3_ja.txt|pairwise_prompt_eqbench3.txt" \
65
+ "pairwise_prompt_eqbench3_analysis_ja.txt|pairwise_prompt_eqbench3_analysis.txt" \
66
+ "debrief_prompt_ja.txt|debrief_prompt.txt"; do
67
+ SRC="${pair%%|*}"
68
+ DEST="${WORKSPACE}/eqbench3/data/${pair##*|}"
69
+ [ -f "${DEST}" ] && [ ! -f "${DEST}.en.bak" ] && cp "${DEST}" "${DEST}.en.bak"
70
+ if curl -sfL \
71
+ -H "Authorization: Bearer ${HF_TOKEN}" \
72
+ "https://huggingface.co/datasets/${PATCH_REPO}/resolve/main/data/${SRC}" \
73
  -o "${DEST}"; then
74
+ echo "[setup] ✅ ${pair##*|}"
75
  JA_COUNT=$((JA_COUNT + 1))
76
  else
77
+ [ -f "${DEST}.en.bak" ] && cp "${DEST}.en.bak" "${DEST}"
78
+ echo "[setup] ⚠️ DL失敗 英語版: ${pair##*|}"
79
  fi
80
  done
81
+ echo "[setup] 日本語データファイル: ${JA_COUNT}/12"
82
 
83
+ # ── [Fix-JA-D2] コドパッチ (benchmark.py / conversation.py) ─
84
  echo ""
85
+ echo "[setup] [Fix-JA-D2] コードパッチを適用中..."
86
+ PATCH_DIR="${WORKSPACE}/eqbench-patch"
87
+ mkdir -p "${PATCH_DIR}"
88
+ PATCH_OK=0
89
+ for pf in patch_benchmark.py patch_conversation.py; do
90
+ if curl -sfL \
91
+ -H "Authorization: Bearer ${HF_TOKEN}" \
92
+ "https://huggingface.co/datasets/${PATCH_REPO}/resolve/main/patch/${pf}" \
93
+ -o "${PATCH_DIR}/${pf}"; then
94
+ PATCH_OK=$((PATCH_OK + 1))
95
+ else
96
+ echo "[setup] ⚠️ DL失敗: ${pf}"
97
+ fi
98
+ done
99
+
100
+ if [ "${PATCH_OK}" -eq 2 ]; then
101
+ cd "${WORKSPACE}/eqbench3"
102
+ python3 "${PATCH_DIR}/patch_benchmark.py"
103
+ python3 "${PATCH_DIR}/patch_conversation.py"
104
+ echo "[setup] ✅ コードパッチ完了"
105
+ else
106
+ echo "[setup] ❌ パッチファイル取得失敗。PATCH_REPO=${PATCH_REPO} を確認してください。"
107
+ fi
108
 
109
  # ── serve_test_vast.sh DL ────────────────────────────────────
110
+ curl -fL \
111
+ -H "Authorization: Bearer ${HF_TOKEN}" \
112
+ "https://huggingface.co/datasets/${SCRIPTS_REPO}/resolve/main/eqbench-vast/serve_test_vast.sh" \
113
  -o "${WORKSPACE}/serve_test_vast.sh"
114
  chmod +x "${WORKSPACE}/serve_test_vast.sh"
115
  echo "[setup] ✅ serve_test_vast.sh"
116
 
117
+ # ── .env 生成 ────────────────────────────────────────────────
118
+ cat > "${WORKSPACE}/eqbench3/.env" << ENVEOF
119
+ # EQ-Bench3 日本語版設定 (Vast.ai + HF Inference Providers)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120
  TEST_API_URL=http://localhost:${TEST_PORT}/v1/chat/completions
121
  TEST_API_KEY=dummy
 
 
122
  JUDGE_API_URL=https://router.huggingface.co/novita/v1/chat/completions
123
  JUDGE_API_KEY=${HF_TOKEN}
 
 
124
  MAX_RETRIES=6
125
  RETRY_DELAY=10
126
  REQUEST_TIMEOUT=300
 
129
 
130
  echo ""
131
  echo "[setup] =============================="
132
+ echo "[setup] セットアップ完了"
133
  echo "[setup] =============================="
134
  echo ""
135
+ echo "# Step 1: TeenEmo サーバー起動"
 
 
136
  echo "tmux new-session -d -s eq_run"
137
  echo "tmux send-keys -t eq_run \"cd ${WORKSPACE} && export HF_TOKEN='${HF_TOKEN}' && ./serve_test_vast.sh\" Enter"
138
  echo ""
139
+ echo "# Step 2: 起動確認"
140
  echo "tmux capture-pane -t eq_run -p | tail -5"
141
  echo ""
142
  echo "# Step 3: EQ-Bench3 実行"
143
  echo "cd ${WORKSPACE}/eqbench3"
144
+ echo "python eqbench3.py --test-model ${LORA_NAME} --model-name TeenEmo-DPO --judge-model openai/gpt-oss-120b --no-elo --save-interval 1 --iterations 1"