| """ |
| translate_eqbench.py — EQ-Bench3 日本語化スクリプト |
| |
| フロー: |
| 1. EQ-Bench3 の scenario_prompts.txt と scenario_notes.txt を読み込む |
| 2. vLLM(Qwen3.5-9B)経由で各シナリオを日本語に翻訳 |
| 3. 翻訳結果を保存(チェックポイント対応・再開可能) |
| 4. 完成した日本語版ファイルを output/ に書き出し |
| 5. HF Hub にアップロード |
| |
| 実行例: |
| python translate_eqbench.py |
| python translate_eqbench.py --dry-run # 最初の2シナリオのみ |
| python translate_eqbench.py --target-file scenario_notes.txt # ノートのみ |
| """ |
|
|
| from __future__ import annotations |
|
|
| import argparse |
| import asyncio |
| import json |
| import os |
| import sys |
| import time |
| import traceback |
| from datetime import datetime, timezone |
| from pathlib import Path |
|
|
| import httpx |
| from tqdm import tqdm |
|
|
| |
| VLLM_BASE_URL: str = os.environ.get("VLLM_BASE_URL", "http://localhost:8000/v1") |
| TRANSLATE_MODEL: str = os.environ.get("TRANSLATE_MODEL", "Qwen/Qwen3.5-9B") |
| HF_TOKEN: str = os.environ.get("HF_TOKEN", "") |
| HF_USERNAME: str = os.environ.get("HF_USERNAME", "YUGOROU") |
| HF_REPO: str = os.environ.get("HF_REPO", f"{HF_USERNAME}/teememo-eq-bench-ja") |
|
|
| EQBENCH_DIR: Path = Path(os.environ.get("EQBENCH_DIR", "/workspace/eqbench-ja/eqbench3")) |
| OUTPUT_DIR: Path = Path(os.environ.get("OUTPUT_DIR", "/workspace/eqbench-ja/output")) |
| CHECKPOINT_FILE: Path = Path(os.environ.get("CHECKPOINT_FILE", "/workspace/eqbench-ja/output/.checkpoint_translate.json")) |
|
|
| MAX_CONCURRENT: int = int(os.environ.get("MAX_CONCURRENT", "4")) |
| MAX_RETRIES: int = int(os.environ.get("MAX_RETRIES", "3")) |
| REQUEST_TIMEOUT: float = float(os.environ.get("REQUEST_TIMEOUT", "180.0")) |
|
|
| |
| TRANSLATE_SYSTEM = """You are a professional translator specializing in Japanese localization of psychological and emotional intelligence assessment materials. |
| |
| Your task is to translate English text into natural, fluent Japanese while: |
| 1. Preserving the original meaning, nuance, and emotional tone precisely |
| 2. Maintaining all formatting markers (######## , ####### , etc.) exactly as-is |
| 3. Keeping scenario numbers, category names, and structural markers unchanged |
| 4. Using natural conversational Japanese appropriate for the social context described |
| 5. Preserving any special instructions in brackets [like this] translated into Japanese |
| 6. For role-play scenarios involving interpersonal conflict, use natural Japanese speech patterns including appropriate keigo or casual speech as the context demands |
| 7. Translating all proper nouns contextually (names can be kept or given Japanese equivalents) |
| |
| Output ONLY the translated text with no explanations or commentary.""" |
|
|
|
|
| |
|
|
| class VLLMClient: |
| def __init__(self) -> None: |
| self._client = httpx.AsyncClient( |
| timeout=httpx.Timeout(REQUEST_TIMEOUT), |
| ) |
| self._semaphore = asyncio.Semaphore(MAX_CONCURRENT) |
|
|
| async def close(self) -> None: |
| await self._client.aclose() |
|
|
| async def __aenter__(self) -> "VLLMClient": |
| return self |
|
|
| async def __aexit__(self, *_) -> None: |
| await self.close() |
|
|
| async def wait_for_server(self, max_wait: int = 300, interval: int = 5) -> None: |
| print(f"[client] vLLM サーバーの起動を待機中 (最大 {max_wait}s)...") |
| start = time.time() |
| while time.time() - start < max_wait: |
| try: |
| response = await self._client.get(f"{VLLM_BASE_URL}/models") |
| if response.status_code == 200: |
| print("[client] vLLM サーバーが起動しました。") |
| return |
| except Exception: |
| pass |
| await asyncio.sleep(interval) |
| raise TimeoutError(f"[client] vLLM サーバーが {max_wait}s 以内に起動しませんでした。") |
|
|
| async def translate(self, text: str) -> str: |
| """テキストを日本語に翻訳する。""" |
| payload = { |
| "model": TRANSLATE_MODEL, |
| "messages": [ |
| {"role": "system", "content": TRANSLATE_SYSTEM}, |
| {"role": "user", "content": f"Translate the following to Japanese:\n\n{text}"}, |
| ], |
| "temperature": 0.1, |
| "top_p": 0.9, |
| "max_tokens": 4096, |
| "chat_template_kwargs": {"enable_thinking": False}, |
| } |
|
|
| last_exc = None |
| for attempt in range(MAX_RETRIES): |
| try: |
| async with self._semaphore: |
| response = await self._client.post( |
| f"{VLLM_BASE_URL}/chat/completions", |
| json=payload, |
| headers={"Content-Type": "application/json"}, |
| ) |
| response.raise_for_status() |
| result = response.json() |
| return result["choices"][0]["message"].get("content") or "" |
| except Exception as exc: |
| last_exc = exc |
| wait = 2 ** attempt |
| print(f"[client] リトライ {attempt+1}/{MAX_RETRIES} ({type(exc).__name__}) — {wait}s 待機") |
| await asyncio.sleep(wait) |
| raise RuntimeError(f"[client] 翻訳失敗: {last_exc}") |
|
|
|
|
| |
|
|
| def parse_scenario_prompts(filepath: Path) -> list[dict]: |
| """ |
| scenario_prompts.txt を解析してシナリオリストを返す。 |
| |
| Returns: |
| [{"id": 1, "header": "...", "prompts": {"Prompt1": "...", "Prompt2": "..."}}, ...] |
| """ |
| scenarios = [] |
| current: dict | None = None |
| current_prompt_key: str | None = None |
| current_prompt_lines: list[str] = [] |
|
|
| def _flush_prompt(): |
| if current and current_prompt_key and current_prompt_lines: |
| current["prompts"][current_prompt_key] = "\n".join(current_prompt_lines).strip() |
| current_prompt_lines.clear() |
|
|
| with open(filepath, encoding="utf-8") as f: |
| for line in f: |
| line = line.rstrip("\n") |
|
|
| |
| if line.startswith("######## "): |
| _flush_prompt() |
| if current: |
| scenarios.append(current) |
| parts = line.split("|") |
| scenario_id = parts[0].replace("#", "").strip() |
| current = { |
| "id": int(scenario_id) if scenario_id.isdigit() else scenario_id, |
| "header": line, |
| "prompts": {}, |
| } |
| current_prompt_key = None |
| current_prompt_lines = [] |
|
|
| |
| elif line.startswith("####### "): |
| _flush_prompt() |
| current_prompt_key = line.replace("#", "").strip() |
| current_prompt_lines = [] |
|
|
| |
| else: |
| if current_prompt_key is not None: |
| current_prompt_lines.append(line) |
|
|
| _flush_prompt() |
| if current: |
| scenarios.append(current) |
|
|
| return scenarios |
|
|
|
|
| def parse_scenario_notes(filepath: Path) -> dict[str, str]: |
| """ |
| scenario_notes.txt を解析する。 |
| # 1\n<note>\n# 2\n<note> の形式。 |
| |
| Returns: |
| {"1": "<note>", "2": "<note>", ...} |
| """ |
| notes = {} |
| current_key: str | None = None |
| current_lines: list[str] = [] |
|
|
| def _flush(): |
| if current_key and current_lines: |
| notes[current_key] = "\n".join(current_lines).strip() |
| current_lines.clear() |
|
|
| with open(filepath, encoding="utf-8") as f: |
| for line in f: |
| line = line.rstrip("\n") |
| if line.startswith("# ") and line[2:].strip().isdigit(): |
| _flush() |
| current_key = line[2:].strip() |
| current_lines = [] |
| else: |
| if current_key is not None: |
| current_lines.append(line) |
|
|
| _flush() |
| return notes |
|
|
|
|
| |
|
|
| def load_checkpoint() -> dict: |
| if CHECKPOINT_FILE.exists(): |
| try: |
| return json.loads(CHECKPOINT_FILE.read_text(encoding="utf-8")) |
| except Exception: |
| pass |
| return {"translated_scenarios": {}, "translated_notes": {}} |
|
|
|
|
| def save_checkpoint(state: dict) -> None: |
| CHECKPOINT_FILE.parent.mkdir(parents=True, exist_ok=True) |
| CHECKPOINT_FILE.write_text(json.dumps(state, ensure_ascii=False, indent=2), encoding="utf-8") |
|
|
|
|
| |
|
|
| def build_output_prompts( |
| scenarios: list[dict], |
| translated: dict[str, dict], |
| ) -> str: |
| """翻訳済みシナリオを scenario_prompts.txt 形式に再構築する。""" |
| lines = [] |
| for scenario in scenarios: |
| sid = str(scenario["id"]) |
| if sid in translated: |
| trans = translated[sid] |
| lines.append(trans.get("header", scenario["header"])) |
| for prompt_key, prompt_text in scenario["prompts"].items(): |
| lines.append(f"####### {prompt_key}") |
| translated_text = trans.get("prompts", {}).get(prompt_key, prompt_text) |
| lines.append(translated_text) |
| lines.append("") |
| else: |
| |
| lines.append(scenario["header"]) |
| for prompt_key, prompt_text in scenario["prompts"].items(): |
| lines.append(f"####### {prompt_key}") |
| lines.append(prompt_text) |
| lines.append("") |
| return "\n".join(lines) |
|
|
|
|
| def build_output_notes( |
| original_notes: dict[str, str], |
| translated_notes: dict[str, str], |
| ) -> str: |
| """翻訳済みノートを scenario_notes.txt 形式に再構築する。""" |
| lines = [] |
| for key, note in original_notes.items(): |
| lines.append(f"# {key}") |
| lines.append(translated_notes.get(key, note)) |
| lines.append("") |
| return "\n".join(lines) |
|
|
|
|
| |
|
|
| async def upload_to_hf(output_dir: Path) -> None: |
| if not HF_TOKEN: |
| print("[hub] HF_TOKEN 未設定のためアップロードをスキップ") |
| return |
| try: |
| from huggingface_hub import HfApi |
| api = HfApi(token=HF_TOKEN) |
| api.create_repo(repo_id=HF_REPO, repo_type="dataset", private=True, exist_ok=True) |
| for f in output_dir.glob("*.txt"): |
| api.upload_file( |
| path_or_fileobj=str(f), |
| path_in_repo=f"data/{f.name}", |
| repo_id=HF_REPO, |
| repo_type="dataset", |
| commit_message=f"update: {f.name}", |
| ) |
| print(f"[hub] アップロード完了: {f.name}") |
| print(f"[hub] ✅ https://huggingface.co/datasets/{HF_REPO}") |
| except Exception as e: |
| print(f"[hub] アップロードエラー: {e}") |
| print(traceback.format_exc()) |
|
|
|
|
| |
|
|
| async def translate_scenarios( |
| client: VLLMClient, |
| scenarios: list[dict], |
| state: dict, |
| dry_run: bool, |
| lock: asyncio.Lock, |
| ) -> dict: |
| """全シナリオを asyncio.gather で並列翻訳する。""" |
| translated = state.get("translated_scenarios", {}) |
| target_scenarios = scenarios[:2] if dry_run else scenarios |
| pending = [s for s in target_scenarios if str(s["id"]) not in translated] |
|
|
| print(f"[translate] シナリオ翻訳開始: {len(target_scenarios)}件 (未翻訳: {len(pending)}件)") |
|
|
| async def _translate_one(scenario: dict) -> None: |
| sid = str(scenario["id"]) |
| trans_scenario: dict = {"header": scenario["header"], "prompts": {}} |
|
|
| for prompt_key, prompt_text in scenario["prompts"].items(): |
| if not prompt_text.strip(): |
| trans_scenario["prompts"][prompt_key] = prompt_text |
| continue |
| try: |
| translated_text = await client.translate(prompt_text) |
| trans_scenario["prompts"][prompt_key] = translated_text |
| except Exception as e: |
| print(f"\n[translate] WARNING: シナリオ{sid}/{prompt_key} 翻訳失敗: {e}") |
| trans_scenario["prompts"][prompt_key] = prompt_text |
|
|
| async with lock: |
| translated[sid] = trans_scenario |
| state["translated_scenarios"] = translated |
| save_checkpoint(state) |
| print(f"[translate] シナリオ {sid} 完了") |
|
|
| await asyncio.gather(*[_translate_one(s) for s in pending]) |
|
|
| print(f"[translate] シナリオ翻訳完了: {len(translated)}件") |
| return translated |
|
|
|
|
| async def translate_notes( |
| client: VLLMClient, |
| notes: dict[str, str], |
| state: dict, |
| dry_run: bool, |
| lock: asyncio.Lock, |
| ) -> dict: |
| """採点ノートを asyncio.gather で並列翻訳する。""" |
| translated_notes = state.get("translated_notes", {}) |
| target_notes = dict(list(notes.items())[:2]) if dry_run else notes |
| pending = {k: v for k, v in target_notes.items() if k not in translated_notes} |
|
|
| print(f"[translate] ノート翻訳開始: {len(target_notes)}件 (未翻訳: {len(pending)}件)") |
|
|
| async def _translate_one(key: str, note: str) -> None: |
| try: |
| translated_text = await client.translate(note) |
| except Exception as e: |
| print(f"\n[translate] WARNING: ノート#{key} 翻訳失敗: {e}") |
| translated_text = note |
| async with lock: |
| translated_notes[key] = translated_text |
| state["translated_notes"] = translated_notes |
| save_checkpoint(state) |
| print(f"[translate] ノート #{key} 完了") |
|
|
| await asyncio.gather(*[_translate_one(k, v) for k, v in pending.items()]) |
|
|
| print(f"[translate] ノート翻訳完了: {len(translated_notes)}件") |
| return translated_notes |
|
|
|
|
| async def main(dry_run: bool, target_file: str | None) -> None: |
| start_time = datetime.now(timezone.utc) |
| print(f"=== EQ-Bench3 日本語化開始 [{start_time.isoformat()}] ===") |
| print(f" EQ-Bench3 ディレクトリ: {EQBENCH_DIR}") |
| print(f" 出力ディレクトリ : {OUTPUT_DIR}") |
| print(f" 翻訳モデル : {TRANSLATE_MODEL}") |
| print(f" dry-run : {dry_run}") |
| print() |
|
|
| OUTPUT_DIR.mkdir(parents=True, exist_ok=True) |
|
|
| |
| prompts_file = EQBENCH_DIR / "data" / "scenario_prompts.txt" |
| notes_file = EQBENCH_DIR / "data" / "scenario_notes.txt" |
|
|
| if not prompts_file.exists(): |
| print(f"[ERROR] scenario_prompts.txt が見つかりません: {prompts_file}") |
| print(" 先に setup_translate.sh を実行してください。") |
| sys.exit(1) |
|
|
| print(f"[data] scenario_prompts.txt 読み込み中...") |
| scenarios = parse_scenario_prompts(prompts_file) |
| print(f"[data] シナリオ数: {len(scenarios)}") |
|
|
| print(f"[data] scenario_notes.txt 読み込み中...") |
| notes = parse_scenario_notes(notes_file) |
| print(f"[data] ノート数: {len(notes)}") |
|
|
| |
| state = load_checkpoint() |
| done_s = len(state.get("translated_scenarios", {})) |
| done_n = len(state.get("translated_notes", {})) |
| if done_s > 0 or done_n > 0: |
| print(f"[checkpoint] 再開: シナリオ{done_s}件・ノート{done_n}件 処理済み") |
|
|
| |
| async with VLLMClient() as client: |
| await client.wait_for_server() |
|
|
| lock = asyncio.Lock() |
|
|
| |
| do_scenarios = target_file is None or target_file == "scenario_prompts.txt" |
| do_notes = target_file is None or target_file == "scenario_notes.txt" |
|
|
| if do_scenarios: |
| translated_scenarios = await translate_scenarios(client, scenarios, state, dry_run, lock) |
| else: |
| translated_scenarios = state.get("translated_scenarios", {}) |
|
|
| if do_notes: |
| translated_notes = await translate_notes(client, notes, state, dry_run, lock) |
| else: |
| translated_notes = state.get("translated_notes", {}) |
|
|
| |
| print("[output] 出力ファイル生成中...") |
|
|
| prompts_out = OUTPUT_DIR / "scenario_prompts_ja.txt" |
| prompts_out.write_text( |
| build_output_prompts(scenarios, translated_scenarios), |
| encoding="utf-8", |
| ) |
| print(f"[output] ✅ {prompts_out}") |
|
|
| notes_out = OUTPUT_DIR / "scenario_notes_ja.txt" |
| notes_out.write_text( |
| build_output_notes(notes, translated_notes), |
| encoding="utf-8", |
| ) |
| print(f"[output] ✅ {notes_out}") |
|
|
| |
| import shutil |
| shutil.copy(prompts_file, OUTPUT_DIR / "scenario_prompts_en.txt") |
| shutil.copy(notes_file, OUTPUT_DIR / "scenario_notes_en.txt") |
|
|
| |
| if not dry_run: |
| await upload_to_hf(OUTPUT_DIR) |
|
|
| |
| if CHECKPOINT_FILE.exists() and not dry_run: |
| CHECKPOINT_FILE.unlink() |
|
|
| elapsed = (datetime.now(timezone.utc) - start_time).total_seconds() |
| print() |
| print(f"=== 翻訳完了 (所要時間: {elapsed/60:.1f}分) ===") |
| print(f" 出力: {OUTPUT_DIR}/scenario_prompts_ja.txt") |
| print(f" 出力: {OUTPUT_DIR}/scenario_notes_ja.txt") |
|
|
|
|
| if __name__ == "__main__": |
| parser = argparse.ArgumentParser(description="EQ-Bench3 日本語化スクリプト") |
| parser.add_argument("--dry-run", action="store_true", help="最初の2シナリオのみ処理して動作確認") |
| parser.add_argument( |
| "--target-file", |
| choices=["scenario_prompts.txt", "scenario_notes.txt"], |
| default=None, |
| help="翻訳対象ファイルを指定(デフォルト: 両方)", |
| ) |
| args = parser.parse_args() |
|
|
| if not os.environ.get("HF_TOKEN"): |
| print("[WARNING] HF_TOKEN が未設定です。HF アップロードはスキップされます。") |
|
|
| asyncio.run(main(dry_run=args.dry_run, target_file=args.target_file)) |
|
|