fix: sync
Browse files
eqbench-mac/setup_eqbench_mac.sh
CHANGED
|
@@ -9,8 +9,10 @@ HF_TOKEN="${HF_TOKEN:?HF_TOKEN が未設定です。}"
|
|
| 9 |
WORKSPACE="${HOME}/eqbench-teenemo"
|
| 10 |
VENV_DIR="${WORKSPACE}/.venv"
|
| 11 |
PORT="${LLAMA_SERVER_PORT:-8000}"
|
| 12 |
-
|
|
|
|
| 13 |
LORA_REPO="YUGOROU/TeenEmo-LFM2.5-1.2B-DPO"
|
|
|
|
| 14 |
|
| 15 |
echo "======================================"
|
| 16 |
echo " EQ-Bench3 macOS セットアップ"
|
|
@@ -34,6 +36,20 @@ uv pip install --python "${VENV_DIR}/bin/python" \
|
|
| 34 |
-r requirements.txt mlx-lm huggingface_hub
|
| 35 |
echo "✅ 依存パッケージ"
|
| 36 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 37 |
# 日本語版シナリオ差し替え
|
| 38 |
for pair in \
|
| 39 |
"scenario_prompts_ja.txt|data/scenario_prompts.txt" \
|
|
@@ -57,16 +73,27 @@ REQUEST_TIMEOUT=300
|
|
| 57 |
ENVEOF
|
| 58 |
echo "✅ .env 生成完了"
|
| 59 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 60 |
echo ""
|
| 61 |
echo "【実行手順】"
|
|
|
|
| 62 |
echo "# Step 1: 別タブでMLXサーバー起動"
|
| 63 |
echo "source ${VENV_DIR}/bin/activate"
|
| 64 |
-
echo "mlx_lm.server
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 65 |
echo ""
|
| 66 |
-
echo "# Step
|
| 67 |
echo "cd ${WORKSPACE}/eqbench3 && source ${VENV_DIR}/bin/activate"
|
| 68 |
echo "python eqbench3.py \\"
|
| 69 |
-
echo " --test-model LFM2.5-1.2B-Base \\"
|
| 70 |
echo " --model-name TeenEmo-DPO \\"
|
| 71 |
echo " --judge-model openai/gpt-oss-120b \\"
|
| 72 |
echo " --no-elo --save-interval 1 --iterations 1"
|
|
|
|
| 9 |
WORKSPACE="${HOME}/eqbench-teenemo"
|
| 10 |
VENV_DIR="${WORKSPACE}/.venv"
|
| 11 |
PORT="${LLAMA_SERVER_PORT:-8000}"
|
| 12 |
+
# mlx-community の6bit量子化済みモデル(MLX-native・ダウンロード小・変換不要)
|
| 13 |
+
BASE_MODEL="mlx-community/LFM2.5-1.2B-Base-6bit"
|
| 14 |
LORA_REPO="YUGOROU/TeenEmo-LFM2.5-1.2B-DPO"
|
| 15 |
+
LORA_LOCAL="${WORKSPACE}/adapters/teenemo-dpo"
|
| 16 |
|
| 17 |
echo "======================================"
|
| 18 |
echo " EQ-Bench3 macOS セットアップ"
|
|
|
|
| 36 |
-r requirements.txt mlx-lm huggingface_hub
|
| 37 |
echo "✅ 依存パッケージ"
|
| 38 |
|
| 39 |
+
# LoRAアダプタをローカルにダウンロード
|
| 40 |
+
# mlx_lm.server はHFリポジトリIDを直接受け付けないためローカルパスが必要
|
| 41 |
+
echo "LoRAアダプタをダウンロード中: ${LORA_REPO} → ${LORA_LOCAL}"
|
| 42 |
+
"${VENV_DIR}/bin/python" - << PYEOF
|
| 43 |
+
from huggingface_hub import snapshot_download
|
| 44 |
+
path = snapshot_download(
|
| 45 |
+
repo_id="${LORA_REPO}",
|
| 46 |
+
repo_type="model",
|
| 47 |
+
token="${HF_TOKEN}",
|
| 48 |
+
local_dir="${LORA_LOCAL}",
|
| 49 |
+
)
|
| 50 |
+
print(f"✅ LoRAアダプタ: {path}")
|
| 51 |
+
PYEOF
|
| 52 |
+
|
| 53 |
# 日本語版シナリオ差し替え
|
| 54 |
for pair in \
|
| 55 |
"scenario_prompts_ja.txt|data/scenario_prompts.txt" \
|
|
|
|
| 73 |
ENVEOF
|
| 74 |
echo "✅ .env 生成完了"
|
| 75 |
|
| 76 |
+
echo ""
|
| 77 |
+
echo "======================================"
|
| 78 |
+
echo " セットアップ完了"
|
| 79 |
+
echo "======================================"
|
| 80 |
echo ""
|
| 81 |
echo "【実行手順】"
|
| 82 |
+
echo ""
|
| 83 |
echo "# Step 1: 別タブでMLXサーバー起動"
|
| 84 |
echo "source ${VENV_DIR}/bin/activate"
|
| 85 |
+
echo "mlx_lm.server \\"
|
| 86 |
+
echo " --model ${BASE_MODEL} \\"
|
| 87 |
+
echo " --adapter-path ${LORA_LOCAL} \\"
|
| 88 |
+
echo " --port ${PORT}"
|
| 89 |
+
echo ""
|
| 90 |
+
echo "# Step 2: サーバー起動確認"
|
| 91 |
+
echo "curl -s http://localhost:${PORT}/v1/models"
|
| 92 |
echo ""
|
| 93 |
+
echo "# Step 3: EQ-Bench3 評価実行"
|
| 94 |
echo "cd ${WORKSPACE}/eqbench3 && source ${VENV_DIR}/bin/activate"
|
| 95 |
echo "python eqbench3.py \\"
|
| 96 |
+
echo " --test-model LFM2.5-1.2B-Base-6bit \\"
|
| 97 |
echo " --model-name TeenEmo-DPO \\"
|
| 98 |
echo " --judge-model openai/gpt-oss-120b \\"
|
| 99 |
echo " --no-elo --save-interval 1 --iterations 1"
|