| """ |
| translate_eqbench_v2.py — EQ-Bench3 日本語化スクリプト (Version 2) |
| |
| フロー: |
| 1. EQ-Bench3 の全データファイル (12ファイル) を読み込む |
| 2. vLLM(Qwen3.5-9B)経由で各ファイルを日本語に翻訳 |
| 3. 翻訳結果を保存(チェックポイント対応・再開可能) |
| 4. 完成した日本語版ファイルを output/ に書き出し |
| 5. HF Hub にアップロード |
| |
| 実行例: |
| python translate_eqbench_v2.py |
| python translate_eqbench_v2.py --dry-run # 最初の2シナリオのみ |
| python translate_eqbench_v2.py --target-file scenario_notes.txt # 特定ファイルのみ |
| """ |
|
|
| from __future__ import annotations |
|
|
| import argparse |
| import asyncio |
| import json |
| import os |
| import sys |
| import time |
| import traceback |
| from datetime import datetime, timezone |
| from pathlib import Path |
|
|
| import httpx |
| from tqdm import tqdm |
|
|
| |
| VLLM_BASE_URL: str = os.environ.get("VLLM_BASE_URL", "http://localhost:8000/v1") |
| TRANSLATE_MODEL: str = os.environ.get("TRANSLATE_MODEL", "Qwen/Qwen3.5-9B") |
| HF_TOKEN: str = os.environ.get("HF_TOKEN", "") |
| HF_USERNAME: str = os.environ.get("HF_USERNAME", "YUGOROU") |
| HF_REPO: str = os.environ.get("HF_REPO", f"{HF_USERNAME}/teememo-eq-bench-ja") |
|
|
| EQBENCH_DIR: Path = Path(os.environ.get("EQBENCH_DIR", "/workspace/eqbench-ja/eqbench3")) |
| OUTPUT_DIR: Path = Path(os.environ.get("OUTPUT_DIR", "/workspace/eqbench-ja/output")) |
| CHECKPOINT_FILE: Path = Path(os.environ.get("CHECKPOINT_FILE", "/workspace/eqbench-ja/output/.checkpoint_translate_v2.json")) |
|
|
| MAX_CONCURRENT: int = int(os.environ.get("MAX_CONCURRENT", "4")) |
| MAX_RETRIES: int = int(os.environ.get("MAX_RETRIES", "3")) |
| REQUEST_TIMEOUT: float = float(os.environ.get("REQUEST_TIMEOUT", "180.0")) |
|
|
| |
| |
| |
| |
| |
| |
|
|
| TARGET_FILES: dict[str, dict] = { |
| |
| "scenario_prompts.txt": { |
| "type": "scenario_split", |
| "description": "メインシナリオプロンプト", |
| }, |
| |
| "scenario_notes.txt": { |
| "type": "note_split", |
| "description": "シナリオ採点ノート", |
| }, |
| |
| "scenario_master_prompt_message_drafting.txt": { |
| "type": "single", |
| "description": "メッセージ作成用マスタープロンプト", |
| }, |
| |
| "scenario_master_prompt_analysis.txt": { |
| "type": "single", |
| "description": "分析用マスタープロンプト", |
| }, |
| |
| "scenario_master_prompt.txt": { |
| "type": "single", |
| "description": "汎用マスタープロンプト", |
| }, |
| |
| "rubric_scoring_prompt_analysis.txt": { |
| "type": "single", |
| "description": "分析用ルーブリック採点プロンプト", |
| }, |
| |
| "rubric_scoring_prompt.txt": { |
| "type": "single", |
| "description": "ルーブリック採点プロンプト", |
| }, |
| |
| "rubric_scoring_criteria_analysis.txt": { |
| "type": "single", |
| "description": "分析用ルーブリック採点基準", |
| }, |
| |
| "rubric_scoring_criteria.txt": { |
| "type": "single", |
| "description": "ルーブリック採点基準", |
| }, |
| |
| "pairwise_prompt_eqbench3_analysis.txt": { |
| "type": "single", |
| "description": "分析用ペアワイズ比較プロンプト", |
| }, |
| |
| "pairwise_prompt_eqbench3.txt": { |
| "type": "single", |
| "description": "ペアワイズ比較プロンプト", |
| }, |
| |
| "debrief_prompt.txt": { |
| "type": "single", |
| "description": "デブリーフプロンプト", |
| }, |
| } |
|
|
| |
| TRANSLATE_SYSTEM = """You are a professional translator specializing in Japanese localization of psychological and emotional intelligence assessment materials. |
| |
| Your task is to translate English text into natural, fluent Japanese while: |
| 1. Preserving the original meaning, nuance, and emotional tone precisely |
| 2. Maintaining all formatting markers (######## , ####### , {{placeholder}}, etc.) exactly as-is |
| 3. Keeping scenario numbers, category names, and structural markers unchanged |
| 4. Using natural conversational Japanese appropriate for the social context described |
| 5. Preserving any special instructions in brackets [like this] translated into Japanese |
| 6. For role-play scenarios involving interpersonal conflict, use natural Japanese speech patterns including appropriate keigo or casual speech as the context demands |
| 7. Translating all proper nouns contextually (names can be kept or given Japanese equivalents) |
| 8. Keeping JSON keys, variable names, and code-like placeholders unchanged |
| |
| Output ONLY the translated text with no explanations or commentary.""" |
|
|
|
|
| |
|
|
| class VLLMClient: |
| def __init__(self) -> None: |
| self._client = httpx.AsyncClient( |
| timeout=httpx.Timeout(REQUEST_TIMEOUT), |
| ) |
| self._semaphore = asyncio.Semaphore(MAX_CONCURRENT) |
|
|
| async def close(self) -> None: |
| await self._client.aclose() |
|
|
| async def __aenter__(self) -> "VLLMClient": |
| return self |
|
|
| async def __aexit__(self, *_) -> None: |
| await self.close() |
|
|
| async def wait_for_server(self, max_wait: int = 300, interval: int = 5) -> None: |
| print(f"[client] vLLM サーバーの起動を待機中 (最大 {max_wait}s)...") |
| start = time.time() |
| while time.time() - start < max_wait: |
| try: |
| response = await self._client.get(f"{VLLM_BASE_URL}/models") |
| if response.status_code == 200: |
| print("[client] vLLM サーバーが起動しました。") |
| return |
| except Exception: |
| pass |
| await asyncio.sleep(interval) |
| raise TimeoutError(f"[client] vLLM サーバーが {max_wait}s 以内に起動しませんでした。") |
|
|
| async def translate(self, text: str) -> str: |
| """テキストを日本語に翻訳する。""" |
| payload = { |
| "model": TRANSLATE_MODEL, |
| "messages": [ |
| {"role": "system", "content": TRANSLATE_SYSTEM}, |
| {"role": "user", "content": f"Translate the following to Japanese:\n\n{text}"}, |
| ], |
| "temperature": 0.1, |
| "top_p": 0.9, |
| "max_tokens": 8192, |
| "chat_template_kwargs": {"enable_thinking": False}, |
| } |
|
|
| last_exc = None |
| for attempt in range(MAX_RETRIES): |
| try: |
| async with self._semaphore: |
| response = await self._client.post( |
| f"{VLLM_BASE_URL}/chat/completions", |
| json=payload, |
| headers={"Content-Type": "application/json"}, |
| ) |
| response.raise_for_status() |
| result = response.json() |
| return result["choices"][0]["message"].get("content") or "" |
| except Exception as exc: |
| last_exc = exc |
| wait = 2 ** attempt |
| print(f"[client] リトライ {attempt+1}/{MAX_RETRIES} ({type(exc).__name__}) — {wait}s 待機") |
| await asyncio.sleep(wait) |
| raise RuntimeError(f"[client] 翻訳失敗: {last_exc}") |
|
|
|
|
| |
|
|
| def parse_scenario_prompts(filepath: Path) -> list[dict]: |
| """ |
| scenario_prompts.txt を解析してシナリオリストを返す。 |
| |
| Returns: |
| [{"id": 1, "header": "...", "prompts": {"Prompt1": "...", "Prompt2": "..."}}, ...] |
| """ |
| scenarios = [] |
| current: dict | None = None |
| current_prompt_key: str | None = None |
| current_prompt_lines: list[str] = [] |
|
|
| def _flush_prompt(): |
| if current and current_prompt_key and current_prompt_lines: |
| current["prompts"][current_prompt_key] = "\n".join(current_prompt_lines).strip() |
| current_prompt_lines.clear() |
|
|
| with open(filepath, encoding="utf-8") as f: |
| for line in f: |
| line = line.rstrip("\n") |
|
|
| |
| if line.startswith("######## "): |
| _flush_prompt() |
| if current: |
| scenarios.append(current) |
| parts = line.split("|") |
| scenario_id = parts[0].replace("#", "").strip() |
| current = { |
| "id": int(scenario_id) if scenario_id.isdigit() else scenario_id, |
| "header": line, |
| "prompts": {}, |
| } |
| current_prompt_key = None |
| current_prompt_lines = [] |
|
|
| |
| elif line.startswith("####### "): |
| _flush_prompt() |
| current_prompt_key = line.replace("#", "").strip() |
| current_prompt_lines = [] |
|
|
| |
| else: |
| if current_prompt_key is not None: |
| current_prompt_lines.append(line) |
|
|
| _flush_prompt() |
| if current: |
| scenarios.append(current) |
|
|
| return scenarios |
|
|
|
|
| def parse_scenario_notes(filepath: Path) -> dict[str, str]: |
| """ |
| scenario_notes.txt を解析する。 |
| # 1\n<note>\n# 2\n<note> の形式。 |
| |
| Returns: |
| {"1": "<note>", "2": "<note>", ...} |
| """ |
| notes = {} |
| current_key: str | None = None |
| current_lines: list[str] = [] |
|
|
| def _flush(): |
| if current_key and current_lines: |
| notes[current_key] = "\n".join(current_lines).strip() |
| current_lines.clear() |
|
|
| with open(filepath, encoding="utf-8") as f: |
| for line in f: |
| line = line.rstrip("\n") |
| if line.startswith("# ") and line[2:].strip().isdigit(): |
| _flush() |
| current_key = line[2:].strip() |
| current_lines = [] |
| else: |
| if current_key is not None: |
| current_lines.append(line) |
|
|
| _flush() |
| return notes |
|
|
|
|
| def parse_single_file(filepath: Path) -> str: |
| """単一ファイル全体を読み込む。""" |
| return filepath.read_text(encoding="utf-8") |
|
|
|
|
| |
|
|
| def build_output_prompts( |
| scenarios: list[dict], |
| translated: dict[str, dict], |
| ) -> str: |
| """翻訳済みシナリオを scenario_prompts.txt 形式に再構築する。""" |
| lines = [] |
| for scenario in scenarios: |
| sid = str(scenario["id"]) |
| if sid in translated: |
| trans = translated[sid] |
| lines.append(trans.get("header", scenario["header"])) |
| for prompt_key, prompt_text in scenario["prompts"].items(): |
| lines.append(f"####### {prompt_key}") |
| translated_text = trans.get("prompts", {}).get(prompt_key, prompt_text) |
| lines.append(translated_text) |
| lines.append("") |
| else: |
| |
| lines.append(scenario["header"]) |
| for prompt_key, prompt_text in scenario["prompts"].items(): |
| lines.append(f"####### {prompt_key}") |
| lines.append(prompt_text) |
| lines.append("") |
| return "\n".join(lines) |
|
|
|
|
| def build_output_notes( |
| original_notes: dict[str, str], |
| translated_notes: dict[str, str], |
| ) -> str: |
| """翻訳済みノートを scenario_notes.txt 形式に再構築する。""" |
| lines = [] |
| for key, note in original_notes.items(): |
| lines.append(f"# {key}") |
| lines.append(translated_notes.get(key, note)) |
| lines.append("") |
| return "\n".join(lines) |
|
|
|
|
| |
|
|
| def load_checkpoint() -> dict: |
| """ |
| チェックポイント構造: |
| { |
| "files": { |
| "scenario_prompts.txt": { |
| "translated_scenarios": {...}, |
| "status": "completed" | "in_progress" |
| }, |
| "scenario_notes.txt": { |
| "translated_notes": {...}, |
| "status": "completed" | "in_progress" |
| }, |
| "single_file_name.txt": { |
| "translated": "...", |
| "status": "completed" | "in_progress" |
| }, |
| ... |
| } |
| } |
| """ |
| if CHECKPOINT_FILE.exists(): |
| try: |
| return json.loads(CHECKPOINT_FILE.read_text(encoding="utf-8")) |
| except Exception: |
| pass |
| return {"files": {}} |
|
|
|
|
| def save_checkpoint(state: dict) -> None: |
| CHECKPOINT_FILE.parent.mkdir(parents=True, exist_ok=True) |
| CHECKPOINT_FILE.write_text(json.dumps(state, ensure_ascii=False, indent=2), encoding="utf-8") |
|
|
|
|
| |
|
|
| async def translate_scenario_file( |
| client: VLLMClient, |
| filepath: Path, |
| file_key: str, |
| state: dict, |
| dry_run: bool, |
| lock: asyncio.Lock, |
| ) -> dict: |
| """シナリオ分割ファイルを翻訳する。""" |
| scenarios = parse_scenario_prompts(filepath) |
| |
| if file_key not in state["files"]: |
| state["files"][file_key] = {"translated_scenarios": {}, "status": "in_progress"} |
| |
| file_state = state["files"][file_key] |
| translated = file_state.get("translated_scenarios", {}) |
| |
| target_scenarios = scenarios[:2] if dry_run else scenarios |
| pending = [s for s in target_scenarios if str(s["id"]) not in translated] |
|
|
| print(f"[translate] {file_key}: シナリオ翻訳開始: {len(target_scenarios)}件 (未翻訳: {len(pending)}件)") |
|
|
| async def _translate_one(scenario: dict) -> None: |
| sid = str(scenario["id"]) |
| trans_scenario: dict = {"header": scenario["header"], "prompts": {}} |
|
|
| for prompt_key, prompt_text in scenario["prompts"].items(): |
| if not prompt_text.strip(): |
| trans_scenario["prompts"][prompt_key] = prompt_text |
| continue |
| try: |
| translated_text = await client.translate(prompt_text) |
| trans_scenario["prompts"][prompt_key] = translated_text |
| except Exception as e: |
| print(f"\n[translate] WARNING: {file_key} シナリオ{sid}/{prompt_key} 翻訳失敗: {e}") |
| trans_scenario["prompts"][prompt_key] = prompt_text |
|
|
| async with lock: |
| translated[sid] = trans_scenario |
| file_state["translated_scenarios"] = translated |
| save_checkpoint(state) |
| print(f"[translate] {file_key} シナリオ {sid} 完了") |
|
|
| await asyncio.gather(*[_translate_one(s) for s in pending]) |
|
|
| if not dry_run and len(translated) >= len(scenarios): |
| file_state["status"] = "completed" |
| save_checkpoint(state) |
|
|
| print(f"[translate] {file_key}: シナリオ翻訳完了: {len(translated)}件") |
| return {"scenarios": scenarios, "translated": translated} |
|
|
|
|
| async def translate_note_file( |
| client: VLLMClient, |
| filepath: Path, |
| file_key: str, |
| state: dict, |
| dry_run: bool, |
| lock: asyncio.Lock, |
| ) -> dict: |
| """ノート分割ファイルを翻訳する。""" |
| notes = parse_scenario_notes(filepath) |
| |
| if file_key not in state["files"]: |
| state["files"][file_key] = {"translated_notes": {}, "status": "in_progress"} |
| |
| file_state = state["files"][file_key] |
| translated_notes = file_state.get("translated_notes", {}) |
| |
| target_notes = dict(list(notes.items())[:2]) if dry_run else notes |
| pending = {k: v for k, v in target_notes.items() if k not in translated_notes} |
|
|
| print(f"[translate] {file_key}: ノート翻訳開始: {len(target_notes)}件 (未翻訳: {len(pending)}件)") |
|
|
| async def _translate_one(key: str, note: str) -> None: |
| try: |
| translated_text = await client.translate(note) |
| except Exception as e: |
| print(f"\n[translate] WARNING: {file_key} ノート#{key} 翻訳失敗: {e}") |
| translated_text = note |
| async with lock: |
| translated_notes[key] = translated_text |
| file_state["translated_notes"] = translated_notes |
| save_checkpoint(state) |
| print(f"[translate] {file_key} ノート #{key} 完了") |
|
|
| await asyncio.gather(*[_translate_one(k, v) for k, v in pending.items()]) |
|
|
| if not dry_run and len(translated_notes) >= len(notes): |
| file_state["status"] = "completed" |
| save_checkpoint(state) |
|
|
| print(f"[translate] {file_key}: ノート翻訳完了: {len(translated_notes)}件") |
| return {"notes": notes, "translated": translated_notes} |
|
|
|
|
| async def translate_single_file( |
| client: VLLMClient, |
| filepath: Path, |
| file_key: str, |
| state: dict, |
| dry_run: bool, |
| lock: asyncio.Lock, |
| ) -> dict: |
| """単一ファイル全体を翻訳する。""" |
| content = parse_single_file(filepath) |
| |
| if file_key not in state["files"]: |
| state["files"][file_key] = {"translated": None, "status": "in_progress"} |
| |
| file_state = state["files"][file_key] |
| |
| |
| if file_state.get("translated"): |
| print(f"[translate] {file_key}: 既に翻訳済み、スキップ") |
| return {"original": content, "translated": file_state["translated"]} |
| |
| print(f"[translate] {file_key}: 翻訳開始 ({len(content)} chars)") |
|
|
| try: |
| translated_text = await client.translate(content) |
| except Exception as e: |
| print(f"\n[translate] ERROR: {file_key} 翻訳失敗: {e}") |
| translated_text = content |
|
|
| async with lock: |
| file_state["translated"] = translated_text |
| if not dry_run: |
| file_state["status"] = "completed" |
| save_checkpoint(state) |
|
|
| print(f"[translate] {file_key}: 翻訳完了") |
| return {"original": content, "translated": translated_text} |
|
|
|
|
| |
|
|
| async def upload_to_hf(output_dir: Path) -> None: |
| if not HF_TOKEN: |
| print("[hub] HF_TOKEN 未設定のためアップロードをスキップ") |
| return |
| try: |
| from huggingface_hub import HfApi |
| api = HfApi(token=HF_TOKEN) |
| api.create_repo(repo_id=HF_REPO, repo_type="dataset", private=True, exist_ok=True) |
| for f in output_dir.glob("*.txt"): |
| api.upload_file( |
| path_or_fileobj=str(f), |
| path_in_repo=f"data/{f.name}", |
| repo_id=HF_REPO, |
| repo_type="dataset", |
| commit_message=f"update: {f.name}", |
| ) |
| print(f"[hub] アップロード完了: {f.name}") |
| print(f"[hub] ✅ https://huggingface.co/datasets/{HF_REPO}") |
| except Exception as e: |
| print(f"[hub] アップロードエラー: {e}") |
| print(traceback.format_exc()) |
|
|
|
|
| |
|
|
| async def process_file( |
| client: VLLMClient, |
| filename: str, |
| file_info: dict, |
| data_dir: Path, |
| state: dict, |
| dry_run: bool, |
| lock: asyncio.Lock, |
| ) -> tuple[str, dict] | None: |
| """単一ファイルの処理を行う。""" |
| filepath = data_dir / filename |
| |
| if not filepath.exists(): |
| print(f"[skip] {filename} が見つかりません: {filepath}") |
| return None |
| |
| file_type = file_info["type"] |
| description = file_info["description"] |
| |
| print(f"\n{'='*60}") |
| print(f"[file] {filename} ({description})") |
| print(f"[file] タイプ: {file_type}") |
| print(f"{'='*60}") |
| |
| if file_type == "scenario_split": |
| result = await translate_scenario_file(client, filepath, filename, state, dry_run, lock) |
| return (filename, {"type": "scenario_split", **result}) |
| |
| elif file_type == "note_split": |
| result = await translate_note_file(client, filepath, filename, state, dry_run, lock) |
| return (filename, {"type": "note_split", **result}) |
| |
| elif file_type == "single": |
| result = await translate_single_file(client, filepath, filename, state, dry_run, lock) |
| return (filename, {"type": "single", **result}) |
| |
| else: |
| print(f"[warning] 不明なファイルタイプ: {file_type}") |
| return None |
|
|
|
|
| async def main(dry_run: bool, target_file: str | None) -> None: |
| start_time = datetime.now(timezone.utc) |
| print(f"{'='*60}") |
| print(f"EQ-Bench3 日本語化スクリプト v2") |
| print(f"開始: [{start_time.isoformat()}]") |
| print(f"{'='*60}") |
| print(f" EQ-Bench3 ディレクトリ: {EQBENCH_DIR}") |
| print(f" 出力ディレクトリ : {OUTPUT_DIR}") |
| print(f" 翻訳モデル : {TRANSLATE_MODEL}") |
| print(f" dry-run : {dry_run}") |
| print(f" 対象ファイル数 : {len(TARGET_FILES)}") |
| print() |
|
|
| OUTPUT_DIR.mkdir(parents=True, exist_ok=True) |
| data_dir = EQBENCH_DIR / "data" |
|
|
| |
| state = load_checkpoint() |
| completed_files = sum(1 for f in state.get("files", {}).values() if f.get("status") == "completed") |
| if completed_files > 0: |
| print(f"[checkpoint] 再開: {completed_files}/{len(TARGET_FILES)}ファイル処理済み") |
|
|
| |
| async with VLLMClient() as client: |
| await client.wait_for_server() |
|
|
| lock = asyncio.Lock() |
| results: dict[str, dict] = {} |
|
|
| |
| target_files_to_process = ( |
| {target_file: TARGET_FILES[target_file]} |
| if target_file and target_file in TARGET_FILES |
| else TARGET_FILES |
| ) |
|
|
| for filename, file_info in target_files_to_process.items(): |
| result = await process_file( |
| client, filename, file_info, data_dir, state, dry_run, lock |
| ) |
| if result: |
| results[result[0]] = result[1] |
|
|
| |
| print("\n" + "=" * 60) |
| print("[output] 出力ファイル生成中...") |
| print("=" * 60) |
|
|
| for filename, result in results.items(): |
| output_path = OUTPUT_DIR / filename.replace(".txt", "_ja.txt") |
| |
| if result["type"] == "scenario_split": |
| output_content = build_output_prompts(result["scenarios"], result["translated"]) |
| elif result["type"] == "note_split": |
| output_content = build_output_notes(result["notes"], result["translated"]) |
| elif result["type"] == "single": |
| output_content = result["translated"] |
| else: |
| continue |
| |
| output_path.write_text(output_content, encoding="utf-8") |
| print(f"[output] ✅ {output_path.name}") |
| |
| |
| original_path = data_dir / filename |
| if original_path.exists(): |
| import shutil |
| shutil.copy(original_path, OUTPUT_DIR / filename.replace(".txt", "_en.txt")) |
|
|
| |
| if not dry_run: |
| await upload_to_hf(OUTPUT_DIR) |
|
|
| |
| if not dry_run and all( |
| state.get("files", {}).get(f, {}).get("status") == "completed" |
| for f in TARGET_FILES |
| ): |
| if CHECKPOINT_FILE.exists(): |
| CHECKPOINT_FILE.unlink() |
| print("[checkpoint] チェックポイント削除(全ファイル完了)") |
|
|
| elapsed = (datetime.now(timezone.utc) - start_time).total_seconds() |
| print() |
| print("=" * 60) |
| print(f"翻訳完了 (所要時間: {elapsed/60:.1f}分)") |
| print(f"出力先: {OUTPUT_DIR}/") |
| print("=" * 60) |
|
|
|
|
| if __name__ == "__main__": |
| parser = argparse.ArgumentParser(description="EQ-Bench3 日本語化スクリプト v2") |
| parser.add_argument("--dry-run", action="store_true", help="最初の2シナリオのみ処理して動作確認") |
| parser.add_argument( |
| "--target-file", |
| choices=list(TARGET_FILES.keys()), |
| default=None, |
| help="翻訳対象ファイルを指定(デフォルト: 全ファイル)", |
| ) |
| parser.add_argument( |
| "--list-files", |
| action="store_true", |
| help="対象ファイル一覧を表示して終了", |
| ) |
| args = parser.parse_args() |
|
|
| if args.list_files: |
| print("翻訳対象ファイル一覧:") |
| print("-" * 60) |
| for filename, info in TARGET_FILES.items(): |
| print(f" {filename}") |
| print(f" タイプ: {info['type']}") |
| print(f" 説明 : {info['description']}") |
| sys.exit(0) |
|
|
| if not os.environ.get("HF_TOKEN"): |
| print("[WARNING] HF_TOKEN が未設定です。HF アップロードはスキップされます。") |
|
|
| asyncio.run(main(dry_run=args.dry_run, target_file=args.target_file)) |