TeenEmo-Scripts / eqbench-mac /setup_eqbench_mac.sh
YUGOROU's picture
fix: sync
29fe2c6 verified
raw
history blame
3.46 kB
#!/usr/bin/env bash
# EQ-Bench3 macOS / M3 MacBook セットアップ
# 受験者: TeenEmo MLX (mlx_lm.server, port 8000)
# 採点者: openai/gpt-oss-120b via HF Inference Providers / novita
set -euo pipefail
HF_TOKEN="${HF_TOKEN:?HF_TOKEN が未設定です。}"
WORKSPACE="${HOME}/eqbench-teenemo"
VENV_DIR="${WORKSPACE}/.venv"
PORT="${LLAMA_SERVER_PORT:-8000}"
# mlx-community の6bit量子化済みモデル(MLX-native・ダウンロード小・変換不要)
BASE_MODEL="LiquidAI/LFM2.5-1.2B-Base"
LORA_REPO="YUGOROU/TeenEmo-LFM2.5-1.2B-DPO"
LORA_LOCAL="${WORKSPACE}/adapters/teenemo-dpo"
echo "======================================"
echo " EQ-Bench3 macOS セットアップ"
echo " 作業ディレクトリ: ${WORKSPACE}"
echo "======================================"
mkdir -p "${WORKSPACE}"
# EQ-Bench3 クローン
if [ -d "${WORKSPACE}/eqbench3" ]; then
git -C "${WORKSPACE}/eqbench3" pull --ff-only 2>/dev/null || true
else
git clone --depth=1 https://github.com/EQ-bench/eqbench3.git "${WORKSPACE}/eqbench3"
fi
echo "✅ EQ-Bench3"
# uv 仮想環境 + 依存関係
cd "${WORKSPACE}/eqbench3"
uv venv "${VENV_DIR}" --python 3.11 2>/dev/null || true
uv pip install --python "${VENV_DIR}/bin/python" \
-r requirements.txt mlx-lm huggingface_hub
echo "✅ 依存パッケージ"
# LoRAアダプタをローカルにダウンロード
# mlx_lm.server はHFリポジトリIDを直接受け付けないためローカルパスが必要
echo "LoRAアダプタをダウンロード中: ${LORA_REPO}${LORA_LOCAL}"
"${VENV_DIR}/bin/python" - << PYEOF
from huggingface_hub import snapshot_download
path = snapshot_download(
repo_id="${LORA_REPO}",
repo_type="model",
token="${HF_TOKEN}",
local_dir="${LORA_LOCAL}",
)
print(f"✅ LoRAアダプタ: {path}")
PYEOF
# 日本語版シナリオ差し替え
for pair in \
"scenario_prompts_ja.txt|data/scenario_prompts.txt" \
"scenario_notes_ja.txt|data/scenario_notes.txt"; do
SRC="${pair%%|*}"; DEST="${pair##*|}"
cp "${DEST}" "${DEST}.en.bak" 2>/dev/null || true
curl -sfL -H "Authorization: Bearer ${HF_TOKEN}" \
"https://huggingface.co/datasets/YUGOROU/teememo-eq-bench-ja/resolve/main/data/${SRC}" \
-o "${DEST}" && echo "✅ 日本語版: ${DEST}" || echo "⚠️ 英語版を使用: ${DEST}"
done
# .env 生成
cat > "${WORKSPACE}/eqbench3/.env" << ENVEOF
TEST_API_URL=http://localhost:${PORT}/v1/chat/completions
TEST_API_KEY=dummy
JUDGE_API_URL=https://router.huggingface.co/novita/v1/chat/completions
JUDGE_API_KEY=${HF_TOKEN}
MAX_RETRIES=6
RETRY_DELAY=5
REQUEST_TIMEOUT=300
ENVEOF
echo "✅ .env 生成完了"
echo ""
echo "======================================"
echo " セットアップ完了"
echo "======================================"
echo ""
echo "【実行手順】"
echo ""
echo "# Step 1: 別タブでMLXサーバー起動"
echo "source ${VENV_DIR}/bin/activate"
echo "mlx_lm.server \\"
echo " --model ${BASE_MODEL} \\"
echo " --adapter-path ${LORA_LOCAL} \\"
echo " --port ${PORT}"
echo ""
echo "# Step 2: サーバー起動確認"
echo "curl -s http://localhost:${PORT}/v1/models"
echo ""
echo "# Step 3: EQ-Bench3 評価実行"
echo "cd ${WORKSPACE}/eqbench3 && source ${VENV_DIR}/bin/activate"
echo "python eqbench3.py \\"
echo " --test-model LFM2.5-1.2B-Base \\"
echo " --model-name TeenEmo-DPO \\"
echo " --judge-model openai/gpt-oss-120b \\"
echo " --no-elo --save-interval 1 --iterations 1"