| | |
| | """ |
| | SignX 插件的后端处理器 |
| | 通过 WebSocket plugin_message 机制处理视频列表和推理请求 |
| | """ |
| |
|
| | import os |
| | import subprocess |
| | import time |
| | import threading |
| | import uuid |
| | import traceback |
| | from pathlib import Path |
| | from typing import Dict, List, Any, Optional, Tuple |
| |
|
| | |
| | SIGNX_PLUGIN_DIR = Path(__file__).parent |
| | INFERENCE_SCRIPT = SIGNX_PLUGIN_DIR / "inference.sh" |
| | TEST_DATA_DIR = SIGNX_PLUGIN_DIR / "eval" / "tiny_test_data" |
| | INFERENCE_OUTPUT_DIR = SIGNX_PLUGIN_DIR / "inference_output" |
| | INFERENCE_OUTPUT_DIR.mkdir(parents=True, exist_ok=True) |
| |
|
| |
|
| | |
| | class PluginBackend: |
| | """SignX 后端处理类""" |
| |
|
| | def __init__(self, viser_server=None): |
| | """ |
| | 初始化 SignX 后端 |
| | |
| | Args: |
| | viser_server: Viser 服务器实例(可选) |
| | """ |
| | self.viser_server = viser_server |
| | self.name = "signx" |
| | print("✅ SignX 后端已初始化") |
| | |
| | |
| | self.jobs: Dict[str, Dict[str, Any]] = {} |
| | self.jobs_lock = threading.Lock() |
| |
|
| | def handle_message(self, message: Dict[str, Any]) -> Dict[str, Any]: |
| | """ |
| | 处理来自前端的 WebSocket 消息 |
| | |
| | Args: |
| | message: 包含 plugin, action, data 的消息字典 |
| | |
| | Returns: |
| | 处理结果字典 |
| | """ |
| | action = message.get('action', '') |
| | data = message.get('data', {}) |
| |
|
| | print(f"📨 SignX 收到消息: action={action}") |
| |
|
| | |
| | if action == 'list_videos': |
| | return self.list_videos(data) |
| | if action == 'run_inference': |
| | return self.run_inference(data) |
| | if action == 'get_job_status': |
| | return self.get_job_status(data) |
| | return { |
| | 'status': 'error', |
| | 'message': f'未知的 action: {action}' |
| | } |
| |
|
| | def list_videos(self, data: Dict[str, Any]) -> Dict[str, Any]: |
| | """列出指定文件夹中的视频文件""" |
| | folder = data.get('folder', 'good_videos') |
| | folder_path = TEST_DATA_DIR / folder |
| |
|
| | if not folder_path.exists(): |
| | return { |
| | "status": "error", |
| | "message": f"文件夹不存在: {folder}" |
| | } |
| |
|
| | videos = [] |
| | for video_file in sorted(folder_path.glob("*.mp4")): |
| | videos.append({ |
| | "name": video_file.name, |
| | "path": str(video_file) |
| | }) |
| |
|
| | print(f"📂 找到 {len(videos)} 个视频文件") |
| |
|
| | return { |
| | "status": "success", |
| | "videos": videos, |
| | "count": len(videos) |
| | } |
| |
|
| | def run_inference(self, data: Dict[str, Any]) -> Dict[str, Any]: |
| | """启动异步 SignX 推理任务(支持实时日志轮询)""" |
| | video_path = (data.get('video_path') or '').strip() |
| |
|
| | if not video_path: |
| | return {"status": "error", "message": "未提供视频路径"} |
| |
|
| | if not os.path.exists(video_path): |
| | return { |
| | "status": "error", |
| | "message": f"视频文件不存在: {video_path}" |
| | } |
| |
|
| | if not INFERENCE_SCRIPT.exists(): |
| | return { |
| | "status": "error", |
| | "message": f"推理脚本不存在: {INFERENCE_SCRIPT}" |
| | } |
| |
|
| | job = self._create_job(video_path) |
| | self._append_job_log(job["id"], f"🎬 开始推理: {video_path}") |
| |
|
| | thread = threading.Thread( |
| | target=self._execute_inference_job, |
| | args=(job["id"], video_path), |
| | name=f"signx-job-{job['id']}", |
| | daemon=True |
| | ) |
| | job["thread"] = thread |
| | thread.start() |
| |
|
| | return { |
| | "status": "running", |
| | "job_id": job["id"], |
| | "message": "SignX 推理任务已启动" |
| | } |
| |
|
| | def get_job_status(self, data: Dict[str, Any]) -> Dict[str, Any]: |
| | """获取指定任务的当前状态与增量日志""" |
| | job_id = data.get('job_id') |
| | if not job_id: |
| | return {"status": "error", "message": "缺少 job_id"} |
| |
|
| | job = self._get_job(job_id) |
| | if not job: |
| | return {"status": "error", "message": f"未找到任务: {job_id}"} |
| |
|
| | last_index = data.get('last_index', 0) |
| | try: |
| | last_index = int(last_index) |
| | if last_index < 0: |
| | last_index = 0 |
| | except (TypeError, ValueError): |
| | last_index = 0 |
| |
|
| | logs = job["logs"][last_index:] |
| | response: Dict[str, Any] = { |
| | "status": "success", |
| | "job_id": job_id, |
| | "job_status": job["status"], |
| | "logs": logs, |
| | "next_index": last_index + len(logs), |
| | "finished": job["status"] in ("success", "error"), |
| | "started_at": job.get("created_at"), |
| | } |
| |
|
| | if job["status"] == "success" and job.get("result"): |
| | response["result"] = job["result"] |
| | elif job["status"] == "error": |
| | response["error_message"] = job.get("error") or "任务失败" |
| |
|
| | return response |
| |
|
| | def _create_job(self, video_path: str) -> Dict[str, Any]: |
| | job_id = f"signx_{int(time.time() * 1000)}_{uuid.uuid4().hex[:6]}" |
| | job = { |
| | "id": job_id, |
| | "video_path": video_path, |
| | "status": "running", |
| | "created_at": time.time(), |
| | "updated_at": time.time(), |
| | "logs": [], |
| | "result": None, |
| | "error": None, |
| | "thread": None |
| | } |
| | with self.jobs_lock: |
| | self.jobs[job_id] = job |
| | return job |
| |
|
| | def _get_job(self, job_id: str) -> Optional[Dict[str, Any]]: |
| | with self.jobs_lock: |
| | return self.jobs.get(job_id) |
| |
|
| | def _append_job_log(self, job_id: str, line: str): |
| | if not line: |
| | return |
| | job = self._get_job(job_id) |
| | if not job: |
| | return |
| | job["logs"].append(line) |
| | job["updated_at"] = time.time() |
| | |
| | if len(job["logs"]) > 2000: |
| | job["logs"] = job["logs"][-2000:] |
| |
|
| | def _finalize_job(self, job_id: str, status: str, *, result: Optional[Dict[str, Any]] = None, error: Optional[str] = None): |
| | job = self._get_job(job_id) |
| | if not job: |
| | return |
| | job["status"] = status |
| | job["result"] = result |
| | job["error"] = error |
| | job["finished_at"] = time.time() |
| | job["updated_at"] = job["finished_at"] |
| | self._cleanup_jobs() |
| |
|
| | def _cleanup_jobs(self, ttl_seconds: int = 1800, max_finished: int = 10): |
| | with self.jobs_lock: |
| | now = time.time() |
| | |
| | expired = [ |
| | job_id for job_id, job in self.jobs.items() |
| | if job.get("finished_at") and now - job["finished_at"] > ttl_seconds |
| | ] |
| | for job_id in expired: |
| | self.jobs.pop(job_id, None) |
| |
|
| | |
| | finished_jobs = [job for job in self.jobs.values() if job.get("finished_at")] |
| | if len(finished_jobs) > max_finished: |
| | finished_jobs.sort(key=lambda job: job.get("finished_at", now)) |
| | for job in finished_jobs[:-max_finished]: |
| | self.jobs.pop(job["id"], None) |
| |
|
| | def _find_latest_analysis_dir(self, since_ts: Optional[float] = None) -> Optional[Path]: |
| | latest_dir = None |
| | search_roots = [] |
| | if INFERENCE_OUTPUT_DIR.exists(): |
| | search_roots.append(INFERENCE_OUTPUT_DIR) |
| | search_roots.append(SIGNX_PLUGIN_DIR) |
| |
|
| | for root in search_roots: |
| | for item in root.iterdir(): |
| | if not item.is_dir() or not item.name.startswith("detailed_prediction_"): |
| | continue |
| | if since_ts and item.stat().st_mtime < since_ts - 5: |
| | continue |
| | if latest_dir is None or item.stat().st_mtime > latest_dir.stat().st_mtime: |
| | latest_dir = item |
| | return latest_dir |
| |
|
| | def _iter_sample_dirs(self, analysis_dir: Optional[Path]) -> List[Path]: |
| | if not analysis_dir or not analysis_dir.exists(): |
| | return [] |
| | sample_dirs = [d for d in analysis_dir.iterdir() if d.is_dir()] |
| | sample_dirs.sort(key=lambda p: p.stat().st_mtime, reverse=False) |
| | return sample_dirs |
| |
|
| | def _resolve_output_location(self, original_path: Path, analysis_dir: Optional[Path]) -> Optional[Path]: |
| | if original_path.exists(): |
| | return original_path |
| |
|
| | search_roots = [] |
| | if analysis_dir: |
| | search_roots.append(analysis_dir) |
| | if INFERENCE_OUTPUT_DIR.exists(): |
| | search_roots.append(INFERENCE_OUTPUT_DIR) |
| | search_roots.append(SIGNX_PLUGIN_DIR) |
| |
|
| | for root in search_roots: |
| | for candidate in root.rglob(original_path.name): |
| | if candidate.is_file(): |
| | return candidate |
| | return None |
| |
|
| | def _read_translation_file(self, sample_dir: Path) -> Tuple[str, str]: |
| | translation_path = sample_dir / "translation.txt" |
| | with_bpe = "" |
| | clean = "" |
| | if not translation_path.exists(): |
| | return with_bpe, clean |
| |
|
| | try: |
| | with open(translation_path, 'r', encoding='utf-8') as f: |
| | for line in f: |
| | if line.startswith("With BPE:"): |
| | with_bpe = line.split(":", 1)[1].strip() |
| | elif line.startswith("Clean:"): |
| | clean = line.split(":", 1)[1].strip() |
| | except Exception as exc: |
| | print(f"⚠️ 读取 translation.txt 失败: {exc}") |
| | return with_bpe, clean |
| |
|
| | def _execute_inference_job(self, job_id: str, video_path: str): |
| | """后台线程:实际执行推理并写入日志/结果""" |
| | output_file = SIGNX_PLUGIN_DIR / f"inference_output_{job_id}.txt" |
| | output_file_clean = SIGNX_PLUGIN_DIR / f"inference_output_{job_id}.txt.clean" |
| | start_time = time.time() |
| | process = None |
| |
|
| | try: |
| | process = subprocess.Popen( |
| | ["bash", str(INFERENCE_SCRIPT), video_path, str(output_file)], |
| | cwd=str(SIGNX_PLUGIN_DIR), |
| | stdout=subprocess.PIPE, |
| | stderr=subprocess.STDOUT, |
| | text=True, |
| | bufsize=1 |
| | ) |
| |
|
| | if process.stdout: |
| | for line in process.stdout: |
| | line = line.rstrip() |
| | if line: |
| | self._append_job_log(job_id, line) |
| | print(f" {line}") |
| |
|
| | process.wait(timeout=600) |
| |
|
| | except subprocess.TimeoutExpired: |
| | if process: |
| | process.kill() |
| | self._append_job_log(job_id, "⏱️ 推理超时") |
| | self._finalize_job(job_id, "error", error="推理超时(超过10分钟)") |
| | return |
| | except Exception as exc: |
| | self._append_job_log(job_id, f"❌ 推理出错: {exc}") |
| | traceback.print_exc() |
| | self._finalize_job(job_id, "error", error=f"推理过程出错: {exc}") |
| | return |
| |
|
| | execution_time = time.time() - start_time |
| |
|
| | if process and process.returncode != 0: |
| | self._append_job_log(job_id, f"❌ 推理失败,返回码 {process.returncode}") |
| | self._finalize_job(job_id, "error", error="推理失败") |
| | return |
| |
|
| | |
| | analysis_dir = self._find_latest_analysis_dir(start_time) |
| | analysis_images: List[Dict[str, Any]] = [] |
| | primary_sample_dir: Optional[Path] = None |
| |
|
| | if analysis_dir: |
| | sample_dirs = self._iter_sample_dirs(analysis_dir) |
| | if sample_dirs: |
| | primary_sample_dir = sample_dirs[0] |
| | analysis_files = [] |
| |
|
| | image_files = [ |
| | ("attention_heatmap.png", "Attention Heatmap"), |
| | ("frame_alignment.png", "Word-Frame Alignment (Full)"), |
| | ("frame_alignment_short.png", "Word-Frame Alignment (Compact)"), |
| | ("gloss_to_frames.png", "Gloss to Video Frames"), |
| | ] |
| |
|
| | for img_file, display_name in image_files: |
| | img_path = primary_sample_dir / img_file |
| | if img_path.exists(): |
| | rel_path = img_path.relative_to(SIGNX_PLUGIN_DIR) |
| | web_url = f"/plugins/SignX/{rel_path}" |
| | analysis_files.append({ |
| | "name": display_name, |
| | "type": "image", |
| | "url": web_url, |
| | "filename": img_file |
| | }) |
| |
|
| | html_path = primary_sample_dir / "interactive_alignment.html" |
| | if html_path.exists(): |
| | rel_path = html_path.relative_to(SIGNX_PLUGIN_DIR) |
| | web_url = f"/plugins/SignX/{rel_path}" |
| | analysis_files.append({ |
| | "name": "Interactive Alignment", |
| | "type": "html", |
| | "url": web_url, |
| | "filename": "interactive_alignment.html" |
| | }) |
| |
|
| | keyframes_dir = primary_sample_dir / "attention_keyframes" |
| | if keyframes_dir.exists(): |
| | index_file = keyframes_dir / "keyframes_index.txt" |
| | keyframe_info = None |
| | if index_file.exists(): |
| | with open(index_file, 'r', encoding='utf-8') as f: |
| | keyframe_info = f.read() |
| |
|
| | rel_path = keyframes_dir.relative_to(SIGNX_PLUGIN_DIR) |
| | web_url = f"/plugins/SignX/{rel_path}" |
| |
|
| | keyframe_previews = [] |
| | for kf in sorted(keyframes_dir.glob("keyframe_*.png"))[:6]: |
| | kf_rel = kf.relative_to(SIGNX_PLUGIN_DIR) |
| | keyframe_previews.append({ |
| | "url": f"/plugins/SignX/{kf_rel}", |
| | "name": kf.stem |
| | }) |
| |
|
| | analysis_files.append({ |
| | "name": "Attention Keyframes", |
| | "type": "keyframes", |
| | "url": web_url, |
| | "info": keyframe_info, |
| | "previews": keyframe_previews, |
| | "count": len(list(keyframes_dir.glob("keyframe_*.png"))) |
| | }) |
| |
|
| | analysis_images = analysis_files |
| |
|
| | |
| | output_text = "" |
| | output_clean = "" |
| |
|
| | resolved_output = self._resolve_output_location(output_file, analysis_dir) |
| | if resolved_output and resolved_output.exists(): |
| | output_file = resolved_output |
| | with open(output_file, 'r', encoding='utf-8') as f: |
| | output_text = f.read() |
| |
|
| | resolved_clean = self._resolve_output_location(output_file_clean, analysis_dir) |
| | if resolved_clean and resolved_clean.exists(): |
| | output_file_clean = resolved_clean |
| | with open(output_file_clean, 'r', encoding='utf-8') as f: |
| | output_clean = f.read() |
| |
|
| | translation_text = "" |
| | translation_clean = "" |
| | translation_path = None |
| | if primary_sample_dir: |
| | translation_text, translation_clean = self._read_translation_file(primary_sample_dir) |
| | if translation_text or translation_clean: |
| | translation_path = primary_sample_dir / "translation.txt" |
| |
|
| | job = self._get_job(job_id) |
| | logs_snapshot = list(job["logs"]) if job else [] |
| |
|
| | final_bpe = translation_text or output_text |
| | final_clean = translation_clean or output_clean or translation_text |
| |
|
| | result = { |
| | "status": "success", |
| | "output": final_bpe, |
| | "output_clean": final_clean, |
| | "execution_time": execution_time, |
| | "analysis_dir": str(analysis_dir) if analysis_dir else None, |
| | "analysis_images": analysis_images, |
| | "video_path": video_path, |
| | "logs": logs_snapshot, |
| | "translation_path": str(translation_path) if translation_path else None |
| | } |
| |
|
| | self._append_job_log(job_id, f"✅ 推理完成,耗时 {execution_time:.2f} 秒") |
| | self._finalize_job(job_id, "success", result=result) |
| |
|