File size: 3,456 Bytes
3ce5628 4ea7388 3ce5628 4ea7388 3ce5628 4ea7388 f78385f 29fe2c6 4ea7388 f78385f 3ce5628 4ea7388 3ce5628 4ea7388 3ce5628 4ea7388 3ce5628 4ea7388 3ce5628 4ea7388 3ce5628 4ea7388 3ce5628 4ea7388 3ce5628 4ea7388 3ce5628 f78385f 4ea7388 3ce5628 4ea7388 3ce5628 4ea7388 3ce5628 f78385f 3ce5628 f78385f 4ea7388 3ce5628 f78385f 4ea7388 f78385f 4ea7388 3ce5628 29fe2c6 3ce5628 4ea7388 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 | #!/usr/bin/env bash
# EQ-Bench3 macOS / M3 MacBook セットアップ
# 受験者: TeenEmo MLX (mlx_lm.server, port 8000)
# 採点者: openai/gpt-oss-120b via HF Inference Providers / novita
set -euo pipefail
HF_TOKEN="${HF_TOKEN:?HF_TOKEN が未設定です。}"
WORKSPACE="${HOME}/eqbench-teenemo"
VENV_DIR="${WORKSPACE}/.venv"
PORT="${LLAMA_SERVER_PORT:-8000}"
# mlx-community の6bit量子化済みモデル(MLX-native・ダウンロード小・変換不要)
BASE_MODEL="LiquidAI/LFM2.5-1.2B-Base"
LORA_REPO="YUGOROU/TeenEmo-LFM2.5-1.2B-DPO"
LORA_LOCAL="${WORKSPACE}/adapters/teenemo-dpo"
echo "======================================"
echo " EQ-Bench3 macOS セットアップ"
echo " 作業ディレクトリ: ${WORKSPACE}"
echo "======================================"
mkdir -p "${WORKSPACE}"
# EQ-Bench3 クローン
if [ -d "${WORKSPACE}/eqbench3" ]; then
git -C "${WORKSPACE}/eqbench3" pull --ff-only 2>/dev/null || true
else
git clone --depth=1 https://github.com/EQ-bench/eqbench3.git "${WORKSPACE}/eqbench3"
fi
echo "✅ EQ-Bench3"
# uv 仮想環境 + 依存関係
cd "${WORKSPACE}/eqbench3"
uv venv "${VENV_DIR}" --python 3.11 2>/dev/null || true
uv pip install --python "${VENV_DIR}/bin/python" \
-r requirements.txt mlx-lm huggingface_hub
echo "✅ 依存パッケージ"
# LoRAアダプタをローカルにダウンロード
# mlx_lm.server はHFリポジトリIDを直接受け付けないためローカルパスが必要
echo "LoRAアダプタをダウンロード中: ${LORA_REPO} → ${LORA_LOCAL}"
"${VENV_DIR}/bin/python" - << PYEOF
from huggingface_hub import snapshot_download
path = snapshot_download(
repo_id="${LORA_REPO}",
repo_type="model",
token="${HF_TOKEN}",
local_dir="${LORA_LOCAL}",
)
print(f"✅ LoRAアダプタ: {path}")
PYEOF
# 日本語版シナリオ差し替え
for pair in \
"scenario_prompts_ja.txt|data/scenario_prompts.txt" \
"scenario_notes_ja.txt|data/scenario_notes.txt"; do
SRC="${pair%%|*}"; DEST="${pair##*|}"
cp "${DEST}" "${DEST}.en.bak" 2>/dev/null || true
curl -sfL -H "Authorization: Bearer ${HF_TOKEN}" \
"https://huggingface.co/datasets/YUGOROU/teememo-eq-bench-ja/resolve/main/data/${SRC}" \
-o "${DEST}" && echo "✅ 日本語版: ${DEST}" || echo "⚠️ 英語版を使用: ${DEST}"
done
# .env 生成
cat > "${WORKSPACE}/eqbench3/.env" << ENVEOF
TEST_API_URL=http://localhost:${PORT}/v1/chat/completions
TEST_API_KEY=dummy
JUDGE_API_URL=https://router.huggingface.co/novita/v1/chat/completions
JUDGE_API_KEY=${HF_TOKEN}
MAX_RETRIES=6
RETRY_DELAY=5
REQUEST_TIMEOUT=300
ENVEOF
echo "✅ .env 生成完了"
echo ""
echo "======================================"
echo " セットアップ完了"
echo "======================================"
echo ""
echo "【実行手順】"
echo ""
echo "# Step 1: 別タブでMLXサーバー起動"
echo "source ${VENV_DIR}/bin/activate"
echo "mlx_lm.server \\"
echo " --model ${BASE_MODEL} \\"
echo " --adapter-path ${LORA_LOCAL} \\"
echo " --port ${PORT}"
echo ""
echo "# Step 2: サーバー起動確認"
echo "curl -s http://localhost:${PORT}/v1/models"
echo ""
echo "# Step 3: EQ-Bench3 評価実行"
echo "cd ${WORKSPACE}/eqbench3 && source ${VENV_DIR}/bin/activate"
echo "python eqbench3.py \\"
echo " --test-model LFM2.5-1.2B-Base \\"
echo " --model-name TeenEmo-DPO \\"
echo " --judge-model openai/gpt-oss-120b \\"
echo " --no-elo --save-interval 1 --iterations 1"
|