init
Browse files- .gitignore +23 -0
- CLAUDE.md +125 -0
- LICENSE +21 -0
- README.md +167 -0
- README_zh.md +167 -0
- asr_engine.py +83 -0
- asr_funasr_nano.py +125 -0
- asr_sensevoice.py +94 -0
- audio_capture.py +239 -0
- benchmark.py +180 -0
- config.yaml +78 -0
- control_panel.py +1042 -0
- dialogs.py +412 -0
- funasr_nano/__init__.py +0 -0
- funasr_nano/ctc.py +60 -0
- funasr_nano/model.py +746 -0
- funasr_nano/tools/__init__.py +0 -0
- funasr_nano/tools/utils.py +57 -0
- i18n.py +27 -0
- i18n/en.yaml +161 -0
- i18n/zh.yaml +160 -0
- log_window.py +94 -0
- main.py +648 -0
- model_manager.py +230 -0
- requirements.txt +34 -0
- screenshot/en-to-cn.png +3 -0
- screenshot/jp-to-cn.png +3 -0
- start.bat +5 -0
- subtitle_overlay.py +853 -0
- test_audio.py +56 -0
- translator.py +148 -0
- vad_processor.py +328 -0
.gitignore
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__pycache__/
|
| 2 |
+
*.pyc
|
| 3 |
+
*.pyo
|
| 4 |
+
.venv/
|
| 5 |
+
models/
|
| 6 |
+
logs/
|
| 7 |
+
user_settings.json
|
| 8 |
+
|
| 9 |
+
# IDE
|
| 10 |
+
.vscode/
|
| 11 |
+
.idea/
|
| 12 |
+
*.swp
|
| 13 |
+
*.swo
|
| 14 |
+
|
| 15 |
+
# OS
|
| 16 |
+
Thumbs.db
|
| 17 |
+
Desktop.ini
|
| 18 |
+
.DS_Store
|
| 19 |
+
|
| 20 |
+
# Distribution
|
| 21 |
+
dist/
|
| 22 |
+
build/
|
| 23 |
+
*.egg-info/
|
CLAUDE.md
ADDED
|
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# CLAUDE.md
|
| 2 |
+
|
| 3 |
+
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
| 4 |
+
|
| 5 |
+
## Project Overview
|
| 6 |
+
|
| 7 |
+
LiveTrans is a real-time audio translation system for video players on Windows. It captures system audio via WASAPI loopback, runs speech recognition, and translates via LLM APIs, displaying results in a transparent overlay.
|
| 8 |
+
|
| 9 |
+
**Current phase**: Phase 0 Python prototype (Phase 1 will be a C++ DirectShow Audio Tap Filter).
|
| 10 |
+
|
| 11 |
+
## Running
|
| 12 |
+
|
| 13 |
+
```bash
|
| 14 |
+
# Must use the project venv (system Python lacks dependencies)
|
| 15 |
+
.venv/Scripts/python.exe main.py
|
| 16 |
+
```
|
| 17 |
+
|
| 18 |
+
Linter: `ruff` (installed globally). Run `python -m ruff check --select F,E,W --ignore E501,E402 *.py` to lint. E402 is intentionally ignored because `main.py` requires torch before PyQt6.
|
| 19 |
+
|
| 20 |
+
## Architecture
|
| 21 |
+
|
| 22 |
+
The pipeline runs in a background thread: **Audio Capture (32ms chunks) -> VAD -> ASR -> Translation (async) -> Overlay**
|
| 23 |
+
|
| 24 |
+
```
|
| 25 |
+
main.py (LiveTransApp)
|
| 26 |
+
|-- model_manager.py Centralized model detection, download, cache utils
|
| 27 |
+
|-- audio_capture.py WASAPI loopback via pyaudiowpatch, auto-reconnects on device change
|
| 28 |
+
|-- vad_processor.py Silero VAD / energy-based / disabled modes, progressive silence + backtrack split
|
| 29 |
+
|-- asr_engine.py faster-whisper (Whisper) backend
|
| 30 |
+
|-- asr_sensevoice.py FunASR SenseVoice backend (better for Japanese)
|
| 31 |
+
|-- asr_funasr_nano.py FunASR Nano backend
|
| 32 |
+
|-- translator.py OpenAI-compatible API client, streaming, make_openai_client()
|
| 33 |
+
|-- subtitle_overlay.py PyQt6 transparent overlay (2-row header: controls + model/lang combos)
|
| 34 |
+
|-- control_panel.py Settings UI (5 tabs: VAD/ASR, Translation, Style, Benchmark, Cache)
|
| 35 |
+
|-- dialogs.py Setup wizard, model download/load dialogs, ModelEditDialog
|
| 36 |
+
|-- benchmark.py Translation benchmark (BENCH_SENTENCES, run_benchmark())
|
| 37 |
+
|-- log_window.py Real-time log viewer
|
| 38 |
+
```
|
| 39 |
+
|
| 40 |
+
### Threading Model
|
| 41 |
+
|
| 42 |
+
- **Main thread**: Qt event loop (all UI)
|
| 43 |
+
- **Pipeline thread**: `_pipeline_loop` in `LiveTransApp` - reads audio, runs VAD/ASR/translation
|
| 44 |
+
- **ASR loading**: Background thread via `_switch_asr_engine` (heavy model load, ~3-8s)
|
| 45 |
+
- Cross-thread UI updates use **Qt signals** (e.g., `add_message_signal`, `update_translation_signal`)
|
| 46 |
+
- ASR readiness tracked by `_asr_ready` flag; pipeline drops segments while loading
|
| 47 |
+
|
| 48 |
+
### Configuration
|
| 49 |
+
|
| 50 |
+
- `config.yaml` - Base configuration (audio, ASR, translation, subtitle defaults)
|
| 51 |
+
- `user_settings.json` - Runtime settings persisted by control panel (models, VAD params, ASR engine choice, optional `cache_path`). Takes priority over config.yaml on load.
|
| 52 |
+
|
| 53 |
+
### Model Config
|
| 54 |
+
|
| 55 |
+
Each model in `user_settings.json` has: `name`, `api_base`, `api_key`, `model`, `proxy` ("none"/"system"/custom URL), optional `no_system_role` (bool, for APIs that reject system messages like Qwen-MT).
|
| 56 |
+
|
| 57 |
+
Proxy handling: `proxy="none"` uses `httpx.Client(trust_env=False)` to bypass system proxy; `proxy="system"` uses default httpx behavior (env vars).
|
| 58 |
+
|
| 59 |
+
### Overlay UI (subtitle_overlay.py)
|
| 60 |
+
|
| 61 |
+
DragHandle is a 2-row header bar:
|
| 62 |
+
- **Row 1**: Draggable title + action buttons (Paused/Running, Clear, Settings, Monitor, Quit)
|
| 63 |
+
- **Row 2**: Checkboxes (Click-through, Top-most, Auto-scroll) + Model combo + Target Language combo
|
| 64 |
+
|
| 65 |
+
Style system:
|
| 66 |
+
- `DEFAULT_STYLE` and `STYLE_PRESETS` defined in `subtitle_overlay.py` — 14 presets including terminal themes (Dracula, Nord, Monokai, Solarized, Gruvbox, Tokyo Night, Catppuccin, One Dark, Everforest, Kanagawa)
|
| 67 |
+
- Default style is high-contrast (pure black background, white translation text, 14pt)
|
| 68 |
+
- Original and translation text have independent `font_family` fields (`original_font_family`, `translation_font_family`)
|
| 69 |
+
- `SubtitleOverlay.apply_style(style)` updates container/header backgrounds, window opacity, and rebuilds all message HTML
|
| 70 |
+
- Style dict stored in `user_settings.json` under `"style"` key; forwarded via `settings_changed` signal → `main.py` → `overlay.apply_style()`
|
| 71 |
+
- Backward compat: old `font_family` key auto-migrated to split fields in `apply_style()`
|
| 72 |
+
|
| 73 |
+
Key overlay features:
|
| 74 |
+
- **Top-most**: Toggles `WindowStaysOnTopHint`; requires `setWindowFlags()` + `show()` to take effect
|
| 75 |
+
- **Click-through**: Uses Win32 `WS_EX_TRANSPARENT` on the scroll area while keeping header interactive
|
| 76 |
+
- **Auto-scroll**: Controls whether new messages/translations auto-scroll to bottom
|
| 77 |
+
- **Model combo**: Populated from `user_settings.json` models list; switching emits `model_switch_requested` signal
|
| 78 |
+
- **Target Language combo**: Emits `target_language_changed`; synced from settings on startup
|
| 79 |
+
|
| 80 |
+
### Settings UX
|
| 81 |
+
|
| 82 |
+
- **Auto-save with debounce**: All control panel settings (combos, spinboxes) auto-save after 300ms debounce via `_auto_save()` → `_do_auto_save()`. No manual Save button needed.
|
| 83 |
+
- **Slider special handling**: VAD/Energy sliders update labels in real-time but only trigger save on `sliderReleased` (mouse) or immediately for keyboard input (`isSliderDown()` check).
|
| 84 |
+
- **Apply Prompt button**: Kept because TextEdit shouldn't trigger on every keystroke. Also persists to disk.
|
| 85 |
+
- **Cache path**: Default `./models/` (not `~/.cache`). Applied at startup in `main.py` before `import torch` via `model_manager.apply_cache_env()`.
|
| 86 |
+
|
| 87 |
+
### Startup Flow
|
| 88 |
+
|
| 89 |
+
1. `main.py` reads `user_settings.json` and calls `apply_cache_env()` before `import torch`
|
| 90 |
+
2. First launch (no `user_settings.json`) → `SetupWizardDialog`: choose hub + path + download Silero+SenseVoice
|
| 91 |
+
3. Non-first launch but models missing → `ModelDownloadDialog`: auto-download missing models
|
| 92 |
+
4. All models ready → create main UI (overlay, panel, pipeline)
|
| 93 |
+
5. Runtime ASR engine switch: if uncached → `ModelDownloadDialog`, then `_ModelLoadDialog` for GPU loading
|
| 94 |
+
|
| 95 |
+
### Key Patterns
|
| 96 |
+
|
| 97 |
+
- `torch` must be imported before PyQt6 to avoid DLL conflicts on Windows (PyTorch 2.9.0+ bug, see `main.py` and [pytorch#166628](https://github.com/pytorch/pytorch/issues/166628))
|
| 98 |
+
- Cache env vars set at module level in `main.py` before `import torch` to ensure `TORCH_HOME` is respected
|
| 99 |
+
- Deferred initialization: ASR model loading and settings application happen via `QTimer.singleShot(100)` after UI is shown to prevent startup freeze
|
| 100 |
+
- `make_openai_client()` in `translator.py` is the single shared function for proxy-aware OpenAI client creation (used by both translator and benchmark)
|
| 101 |
+
- `create_app_icon()` in `main.py` generates the app icon; set globally via `app.setWindowIcon()` so all windows inherit it
|
| 102 |
+
- Model cache detection (`is_asr_cached`, `get_local_model_path`) checks both ModelScope and HuggingFace paths to avoid redundant downloads when switching hubs
|
| 103 |
+
- Settings log output (`_apply_settings`) filters out `models` and `system_prompt` to avoid leaking API keys
|
| 104 |
+
- FunASR Nano: `asr_funasr_nano.py` does `os.chdir(model_dir)` before `AutoModel()` so relative paths in config.yaml (e.g. `Qwen3-0.6B`) resolve locally instead of triggering HuggingFace Hub network requests
|
| 105 |
+
- `Translator` defaults to 10s timeout via `make_openai_client()` to prevent API calls from hanging indefinitely
|
| 106 |
+
- Log window is created at startup but hidden; shown via tray menu "Show Log"
|
| 107 |
+
- Audio chunk duration is 32ms (512 samples at 16kHz), matching Silero VAD's native window size for minimal latency
|
| 108 |
+
- VAD adaptive silence mode: tracks recent pause durations, sets silence threshold to P75 × 1.2, auto-adjusts between 0.3s~2.0s
|
| 109 |
+
- VAD progressive silence: buffer越长接受越短的停顿切分 (<3s=full, 3-6s=half, 6-10s=quarter of silence_limit)
|
| 110 |
+
- VAD backtrack split: max duration时回溯confidence history找最低谷切分,remainder保留到下一段;三级策略(绝对低谷→相对低谷20%→低于均值兜底)
|
| 111 |
+
- FunASR `disable_pbar=True` required in all `generate()` calls — tqdm crashes in GUI process when flushing stderr
|
| 112 |
+
- ASR engine lifecycle: each engine exposes `unload()` (move to CPU + release) and `to_device(device)` (in-place migration). Device switching uses `to_device()` for PyTorch engines (SenseVoice/FunASR) and full reload for ctranslate2 (Whisper). Release order: `unload()` → `del` → `gc.collect()` → `torch.cuda.empty_cache()`
|
| 113 |
+
- Whisper (ctranslate2) only accepts `device="cuda"` not `"cuda:0"`; device index passed via `device_index` param. Parsed from combo text like `"cuda:0 (RTX 4090)"` in `_switch_asr_engine`
|
| 114 |
+
- VAD speech density filter: `_flush_segment()` discards segments where <25% of chunks are above confidence threshold (noise rejection)
|
| 115 |
+
- ASR text density filter: segments ≥2s producing ≤3 alnum characters are discarded as noise
|
| 116 |
+
- Settings file uses atomic write (write to `.tmp` then `os.replace`) to prevent corruption on crash
|
| 117 |
+
- `stop()` joins pipeline thread before flushing VAD to prevent concurrent `_process_segment` calls
|
| 118 |
+
- Cancelled ASR download/failed load restores `_asr_ready` if old engine is still available
|
| 119 |
+
- `Translator._build_system_prompt` catches format errors in user prompt templates, falls back to DEFAULT_PROMPT
|
| 120 |
+
|
| 121 |
+
## Language & Style
|
| 122 |
+
|
| 123 |
+
- Respond in Chinese
|
| 124 |
+
- Code comments in English only where critical
|
| 125 |
+
- Commit messages without Co-Authored-By
|
LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MIT License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2025 Shiro
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
README.md
ADDED
|
@@ -0,0 +1,167 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# LiveTrans
|
| 2 |
+
|
| 3 |
+
**English** | [中文](README_zh.md)
|
| 4 |
+
|
| 5 |
+
Real-time audio translation tool for Windows. Captures system audio via WASAPI loopback, runs speech recognition (ASR), translates through LLM APIs, and displays results in a transparent overlay window.
|
| 6 |
+
|
| 7 |
+
Perfect for watching foreign-language videos, livestreams, and meetings — no player modifications needed, works with any system audio.
|
| 8 |
+
|
| 9 |
+

|
| 10 |
+

|
| 11 |
+

|
| 12 |
+
|
| 13 |
+
## Features
|
| 14 |
+
|
| 15 |
+
- **Real-time translation**: System audio → ASR → LLM translation → subtitle overlay, fully automatic
|
| 16 |
+
- **Multiple ASR engines**: faster-whisper, FunASR SenseVoice (optimized for Japanese), FunASR Nano
|
| 17 |
+
- **Flexible translation backend**: Compatible with any OpenAI-format API (DeepSeek, Grok, Qwen, GPT, etc.)
|
| 18 |
+
- **Low-latency VAD**: 32ms audio chunks + Silero VAD with adaptive silence detection
|
| 19 |
+
- **Transparent overlay**: Always-on-top, click-through, draggable — doesn't interfere with your workflow
|
| 20 |
+
- **CUDA acceleration**: GPU-accelerated ASR inference
|
| 21 |
+
- **Automatic model management**: First-launch setup wizard, supports ModelScope / HuggingFace dual sources
|
| 22 |
+
- **Translation benchmark**: Built-in benchmark tool for comparing model performance
|
| 23 |
+
|
| 24 |
+
## Screenshots
|
| 25 |
+
|
| 26 |
+
**English → Chinese** (Twitch livestream)
|
| 27 |
+
|
| 28 |
+

|
| 29 |
+
|
| 30 |
+
**Japanese → Chinese** (Japanese livestream)
|
| 31 |
+
|
| 32 |
+

|
| 33 |
+
|
| 34 |
+
## Requirements
|
| 35 |
+
|
| 36 |
+
- **OS**: Windows 10/11
|
| 37 |
+
- **Python**: 3.10+
|
| 38 |
+
- **GPU** (recommended): NVIDIA GPU with CUDA 12.6 (for ASR acceleration)
|
| 39 |
+
- **Network**: Access to a translation API (DeepSeek, OpenAI, etc.)
|
| 40 |
+
|
| 41 |
+
## Installation
|
| 42 |
+
|
| 43 |
+
### 1. Clone the repository
|
| 44 |
+
|
| 45 |
+
```bash
|
| 46 |
+
git clone https://github.com/TheDeathDragon/LiveTranslate.git
|
| 47 |
+
cd LiveTranslate
|
| 48 |
+
```
|
| 49 |
+
|
| 50 |
+
### 2. Create a virtual environment
|
| 51 |
+
|
| 52 |
+
```bash
|
| 53 |
+
python -m venv .venv
|
| 54 |
+
.venv\Scripts\activate
|
| 55 |
+
```
|
| 56 |
+
|
| 57 |
+
### 3. Install PyTorch (with CUDA)
|
| 58 |
+
|
| 59 |
+
Choose the install command based on your CUDA version. See [PyTorch official site](https://pytorch.org/get-started/locally/):
|
| 60 |
+
|
| 61 |
+
```bash
|
| 62 |
+
# CUDA 12.6 (recommended)
|
| 63 |
+
pip install torch torchaudio --index-url https://download.pytorch.org/whl/cu126
|
| 64 |
+
|
| 65 |
+
# CPU only (no NVIDIA GPU)
|
| 66 |
+
pip install torch torchaudio --index-url https://download.pytorch.org/whl/cpu
|
| 67 |
+
```
|
| 68 |
+
|
| 69 |
+
### 4. Install remaining dependencies
|
| 70 |
+
|
| 71 |
+
```bash
|
| 72 |
+
pip install -r requirements.txt
|
| 73 |
+
pip install funasr --no-deps
|
| 74 |
+
```
|
| 75 |
+
|
| 76 |
+
> **Note**: FunASR is installed with `--no-deps` because its dependency `editdistance` requires a C++ compiler. The pure-Python alternative `editdistance-s` is included in `requirements.txt` as a drop-in replacement.
|
| 77 |
+
|
| 78 |
+
### 5. Launch
|
| 79 |
+
|
| 80 |
+
```bash
|
| 81 |
+
.venv\Scripts\python.exe main.py
|
| 82 |
+
```
|
| 83 |
+
|
| 84 |
+
Or double-click `start.bat`.
|
| 85 |
+
|
| 86 |
+
## First Launch
|
| 87 |
+
|
| 88 |
+
1. A **setup wizard** will appear on first launch — choose your model download source (ModelScope for China, HuggingFace for international) and model cache path
|
| 89 |
+
2. Silero VAD and SenseVoice ASR models will be downloaded automatically (~1GB)
|
| 90 |
+
3. The main UI appears once downloads complete
|
| 91 |
+
|
| 92 |
+
## Configuring the Translation API
|
| 93 |
+
|
| 94 |
+
Click **Settings** on the overlay → **Translation** tab:
|
| 95 |
+
|
| 96 |
+
| Parameter | Description |
|
| 97 |
+
|-----------|-------------|
|
| 98 |
+
| API Base | API endpoint, e.g. `https://api.deepseek.com/v1` |
|
| 99 |
+
| API Key | Your API key |
|
| 100 |
+
| Model | Model name, e.g. `deepseek-chat` |
|
| 101 |
+
| Proxy | `none` (direct) / `system` (system proxy) / custom proxy URL |
|
| 102 |
+
|
| 103 |
+
Works with any OpenAI-compatible API, including:
|
| 104 |
+
- [DeepSeek](https://platform.deepseek.com/)
|
| 105 |
+
- [xAI Grok](https://console.x.ai/)
|
| 106 |
+
- [Alibaba Qwen](https://dashscope.aliyuncs.com/)
|
| 107 |
+
- [OpenAI GPT](https://platform.openai.com/)
|
| 108 |
+
- Self-hosted [Ollama](https://ollama.ai/), [vLLM](https://github.com/vllm-project/vllm), etc.
|
| 109 |
+
|
| 110 |
+
## Usage
|
| 111 |
+
|
| 112 |
+
1. Play a video or livestream with foreign-language audio
|
| 113 |
+
2. Launch LiveTrans — the overlay appears automatically
|
| 114 |
+
3. Recognized text and translations are displayed in real time
|
| 115 |
+
|
| 116 |
+
### Overlay Controls
|
| 117 |
+
|
| 118 |
+
- **Pause/Resume**: Pause or resume translation
|
| 119 |
+
- **Clear**: Clear current subtitles
|
| 120 |
+
- **Click-through**: Mouse clicks pass through the subtitle window
|
| 121 |
+
- **Always on top**: Keep overlay above all windows
|
| 122 |
+
- **Auto-scroll**: Automatically scroll to the latest subtitle
|
| 123 |
+
- **Model selector**: Switch between configured translation models
|
| 124 |
+
- **Target language**: Change the translation target language
|
| 125 |
+
|
| 126 |
+
### Settings Panel
|
| 127 |
+
|
| 128 |
+
Open via the **Settings** button on the overlay or the system tray menu:
|
| 129 |
+
|
| 130 |
+
- **VAD/ASR**: ASR engine selection, VAD mode, sensitivity parameters
|
| 131 |
+
- **Translation**: API configuration, system prompt, multi-model management
|
| 132 |
+
- **Benchmark**: Translation speed and quality benchmarks
|
| 133 |
+
- **Cache**: Model cache path management
|
| 134 |
+
|
| 135 |
+
## Architecture
|
| 136 |
+
|
| 137 |
+
```
|
| 138 |
+
Audio (WASAPI 32ms) → VAD (Silero) → ASR (Whisper/SenseVoice/Nano) → LLM Translation → Overlay
|
| 139 |
+
```
|
| 140 |
+
|
| 141 |
+
```
|
| 142 |
+
main.py Entry point & pipeline orchestration
|
| 143 |
+
├── audio_capture.py WASAPI loopback audio capture
|
| 144 |
+
├── vad_processor.py Silero VAD speech detection
|
| 145 |
+
├── asr_engine.py faster-whisper ASR backend
|
| 146 |
+
├── asr_sensevoice.py FunASR SenseVoice backend
|
| 147 |
+
├── asr_funasr_nano.py FunASR Nano backend
|
| 148 |
+
├── translator.py OpenAI-compatible translation client
|
| 149 |
+
├── model_manager.py Model detection, download & cache management
|
| 150 |
+
├── subtitle_overlay.py PyQt6 transparent overlay window
|
| 151 |
+
├── control_panel.py Settings panel UI
|
| 152 |
+
├── dialogs.py Setup wizard & model download dialogs
|
| 153 |
+
├── log_window.py Real-time log viewer
|
| 154 |
+
├── benchmark.py Translation benchmark
|
| 155 |
+
└── config.yaml Default configuration
|
| 156 |
+
```
|
| 157 |
+
|
| 158 |
+
## Known Limitations
|
| 159 |
+
|
| 160 |
+
- Windows only (depends on WASAPI loopback)
|
| 161 |
+
- ASR model first load takes a few seconds (GPU) to tens of seconds (CPU)
|
| 162 |
+
- Translation quality depends on the LLM API used
|
| 163 |
+
- Recognition degrades in noisy environments or with overlapping speakers
|
| 164 |
+
|
| 165 |
+
## License
|
| 166 |
+
|
| 167 |
+
[MIT License](LICENSE)
|
README_zh.md
ADDED
|
@@ -0,0 +1,167 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# LiveTrans
|
| 2 |
+
|
| 3 |
+
[English](README.md) | **中文**
|
| 4 |
+
|
| 5 |
+
Windows 实时音频翻译工具。捕获系统音频(WASAPI loopback),通过语音识别(ASR)转为文字,再调用 LLM API 翻译,结果显示在透明悬浮字幕窗口上。
|
| 6 |
+
|
| 7 |
+
适用于看外语视频、直播、会议等场景——无需修改播放器,全局音频捕获即开即用。
|
| 8 |
+
|
| 9 |
+

|
| 10 |
+

|
| 11 |
+

|
| 12 |
+
|
| 13 |
+
## 功能特性
|
| 14 |
+
|
| 15 |
+
- **实时翻译**:系统音频 → 语音识别 → LLM 翻译 → 字幕显示,全流程自动
|
| 16 |
+
- **多 ASR 引擎**:支持 faster-whisper、FunASR SenseVoice(日语优化)、FunASR Nano
|
| 17 |
+
- **灵活的翻译后端**:兼容所有 OpenAI 格式 API(DeepSeek、Grok、Qwen、GPT 等)
|
| 18 |
+
- **低延迟 VAD**:32ms 音频块 + Silero VAD,自适应静音检测
|
| 19 |
+
- **透明悬浮窗**:始终置顶、鼠标穿透、可拖拽,不影响正常操作
|
| 20 |
+
- **支持 CUDA 加速**:ASR 模型可使用 GPU 推理
|
| 21 |
+
- **模型自动管理**:首次启动引导下载,支持 ModelScope / HuggingFace 双源
|
| 22 |
+
- **翻译基准测试**:内置 benchmark 工具,方便对比不同模型效果
|
| 23 |
+
|
| 24 |
+
## 截图
|
| 25 |
+
|
| 26 |
+
**英语 → 中文**(Twitch 直播)
|
| 27 |
+
|
| 28 |
+

|
| 29 |
+
|
| 30 |
+
**日语 → 中文**(日语直播)
|
| 31 |
+
|
| 32 |
+

|
| 33 |
+
|
| 34 |
+
## 系统要求
|
| 35 |
+
|
| 36 |
+
- **操作系统**:Windows 10/11
|
| 37 |
+
- **Python**:3.10+
|
| 38 |
+
- **GPU**(推荐):NVIDIA 显卡 + CUDA 12.6(用于 ASR 加速)
|
| 39 |
+
- **网络**:需要访问翻译 API(如 DeepSeek、OpenAI 等)
|
| 40 |
+
|
| 41 |
+
## 安装
|
| 42 |
+
|
| 43 |
+
### 1. 克隆仓库
|
| 44 |
+
|
| 45 |
+
```bash
|
| 46 |
+
git clone https://github.com/TheDeathDragon/LiveTranslate.git
|
| 47 |
+
cd LiveTranslate
|
| 48 |
+
```
|
| 49 |
+
|
| 50 |
+
### 2. 创建虚拟环境
|
| 51 |
+
|
| 52 |
+
```bash
|
| 53 |
+
python -m venv .venv
|
| 54 |
+
.venv\Scripts\activate
|
| 55 |
+
```
|
| 56 |
+
|
| 57 |
+
### 3. 安装 PyTorch(CUDA 版)
|
| 58 |
+
|
| 59 |
+
根据你的 CUDA 版本选择安装命令,参考 [PyTorch 官网](https://pytorch.org/get-started/locally/):
|
| 60 |
+
|
| 61 |
+
```bash
|
| 62 |
+
# CUDA 12.6(推荐)
|
| 63 |
+
pip install torch torchaudio --index-url https://download.pytorch.org/whl/cu126
|
| 64 |
+
|
| 65 |
+
# 仅 CPU(无 NVIDIA 显卡)
|
| 66 |
+
pip install torch torchaudio --index-url https://download.pytorch.org/whl/cpu
|
| 67 |
+
```
|
| 68 |
+
|
| 69 |
+
### 4. 安装其余依赖
|
| 70 |
+
|
| 71 |
+
```bash
|
| 72 |
+
pip install -r requirements.txt
|
| 73 |
+
pip install funasr --no-deps
|
| 74 |
+
```
|
| 75 |
+
|
| 76 |
+
> **注意**:FunASR 使用 `--no-deps` 安装,因为其依赖 `editdistance` 需要 C++ 编译器。`requirements.txt` 中已包含纯 Python 替代品 `editdistance-s`。
|
| 77 |
+
|
| 78 |
+
### 5. 启动
|
| 79 |
+
|
| 80 |
+
```bash
|
| 81 |
+
.venv\Scripts\python.exe main.py
|
| 82 |
+
```
|
| 83 |
+
|
| 84 |
+
或者双击 `start.bat`。
|
| 85 |
+
|
| 86 |
+
## 首次使用
|
| 87 |
+
|
| 88 |
+
1. **首次启动**会弹出设置向导,选择模型下载源(ModelScope 适合国内,HuggingFace 适合海外)和模型缓存路径
|
| 89 |
+
2. 自动下载 Silero VAD 和 SenseVoice ASR 模型(约 1GB)
|
| 90 |
+
3. 下载完成后自动进入主界面
|
| 91 |
+
|
| 92 |
+
## 配置翻译 API
|
| 93 |
+
|
| 94 |
+
在悬浮窗点击 **设置** → **翻译** 标签页,配置你的翻译 API:
|
| 95 |
+
|
| 96 |
+
| 参数 | 说明 |
|
| 97 |
+
|------|------|
|
| 98 |
+
| API Base | API 地址,如 `https://api.deepseek.com/v1` |
|
| 99 |
+
| API Key | 你的 API 密钥 |
|
| 100 |
+
| Model | 模型名,如 `deepseek-chat` |
|
| 101 |
+
| 代理 | `none`(直连)/ `system`(系统代理)/ 自定义代理地址 |
|
| 102 |
+
|
| 103 |
+
支持任何 OpenAI 兼容 API,包括但不限于:
|
| 104 |
+
- [DeepSeek](https://platform.deepseek.com/)
|
| 105 |
+
- [xAI Grok](https://console.x.ai/)
|
| 106 |
+
- [阿里云 Qwen](https://dashscope.aliyuncs.com/)
|
| 107 |
+
- [OpenAI GPT](https://platform.openai.com/)
|
| 108 |
+
- 本地部署的 [Ollama](https://ollama.ai/)、[vLLM](https://github.com/vllm-project/vllm) 等
|
| 109 |
+
|
| 110 |
+
## 使用方法
|
| 111 |
+
|
| 112 |
+
1. 播放含外语音频的视频/直播
|
| 113 |
+
2. 启动 LiveTrans,悬浮窗自动出现
|
| 114 |
+
3. 实时显示识别文字和翻译结果
|
| 115 |
+
|
| 116 |
+
### 悬浮窗控件
|
| 117 |
+
|
| 118 |
+
- **暂停/继续**:暂停或恢复翻译
|
| 119 |
+
- **清除**:清空当前字幕
|
| 120 |
+
- **鼠标穿透**:开启后鼠标可穿透字幕窗口
|
| 121 |
+
- **始终置顶**:保持在最上层
|
| 122 |
+
- **自动滚动**:新字幕自动滚动到底部
|
| 123 |
+
- **模型切换**:下拉选择不同翻译模型
|
| 124 |
+
- **目标语言**:切换翻译目标语言
|
| 125 |
+
|
| 126 |
+
### 设置面板
|
| 127 |
+
|
| 128 |
+
通过悬浮窗 **设置** 按钮或系统托盘菜单打开,包含:
|
| 129 |
+
|
| 130 |
+
- **VAD/ASR**:选择 ASR 引擎、VAD 模式、灵敏度参数
|
| 131 |
+
- **翻译**:API 配置、系统提示词、多模型管理
|
| 132 |
+
- **Benchmark**:翻译速度和质量基准测试
|
| 133 |
+
- **缓存**:模型缓存路径管理
|
| 134 |
+
|
| 135 |
+
## 架构
|
| 136 |
+
|
| 137 |
+
```
|
| 138 |
+
Audio (WASAPI 32ms) → VAD (Silero) → ASR (Whisper/SenseVoice/Nano) → LLM Translation → Overlay
|
| 139 |
+
```
|
| 140 |
+
|
| 141 |
+
```
|
| 142 |
+
main.py 主入口,管线编排
|
| 143 |
+
├── audio_capture.py WASAPI loopback 音频捕获
|
| 144 |
+
├── vad_processor.py Silero VAD 语音活动检测
|
| 145 |
+
├── asr_engine.py faster-whisper ASR 后端
|
| 146 |
+
├── asr_sensevoice.py FunASR SenseVoice 后端
|
| 147 |
+
├── asr_funasr_nano.py FunASR Nano 后端
|
| 148 |
+
├── translator.py OpenAI 兼容翻译客户端
|
| 149 |
+
├── model_manager.py 模型检测、下载、缓存管理
|
| 150 |
+
├── subtitle_overlay.py PyQt6 透明悬浮窗
|
| 151 |
+
├── control_panel.py 设置面板 UI
|
| 152 |
+
├── dialogs.py 设置向导、模型下载对话框
|
| 153 |
+
├── log_window.py 实时日志查看器
|
| 154 |
+
├── benchmark.py 翻译基准测试
|
| 155 |
+
└── config.yaml 默认配置文件
|
| 156 |
+
```
|
| 157 |
+
|
| 158 |
+
## 已知限制
|
| 159 |
+
|
| 160 |
+
- 仅支持 Windows(依赖 WASAPI loopback)
|
| 161 |
+
- ASR 模型首次加载需要数秒(GPU)到数十秒(CPU)
|
| 162 |
+
- 翻译质量取决于所用 LLM API 的能力
|
| 163 |
+
- 嘈杂环境或多人同时说话时识别效果下降
|
| 164 |
+
|
| 165 |
+
## 许可证
|
| 166 |
+
|
| 167 |
+
[MIT License](LICENSE)
|
asr_engine.py
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
from faster_whisper import WhisperModel
|
| 5 |
+
|
| 6 |
+
from translator import LANGUAGE_DISPLAY
|
| 7 |
+
|
| 8 |
+
log = logging.getLogger("LiveTrans.ASR")
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
LANGUAGE_NAMES = {**LANGUAGE_DISPLAY, "auto": "auto"}
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class ASREngine:
|
| 15 |
+
"""Speech-to-text using faster-whisper."""
|
| 16 |
+
|
| 17 |
+
def __init__(
|
| 18 |
+
self,
|
| 19 |
+
model_size="medium",
|
| 20 |
+
device="cuda",
|
| 21 |
+
device_index=0,
|
| 22 |
+
compute_type="float16",
|
| 23 |
+
language="auto",
|
| 24 |
+
download_root=None,
|
| 25 |
+
):
|
| 26 |
+
self.language = language if language != "auto" else None
|
| 27 |
+
self._model = WhisperModel(
|
| 28 |
+
model_size,
|
| 29 |
+
device=device,
|
| 30 |
+
device_index=device_index,
|
| 31 |
+
compute_type=compute_type,
|
| 32 |
+
download_root=download_root,
|
| 33 |
+
)
|
| 34 |
+
log.info(f"Model loaded: {model_size} on {device} ({compute_type})")
|
| 35 |
+
|
| 36 |
+
def set_language(self, language: str):
|
| 37 |
+
old = self.language
|
| 38 |
+
self.language = language if language != "auto" else None
|
| 39 |
+
log.info(f"ASR language: {old} -> {self.language}")
|
| 40 |
+
|
| 41 |
+
def to_device(self, device: str):
|
| 42 |
+
# ctranslate2 doesn't support device migration; must reload
|
| 43 |
+
return False
|
| 44 |
+
|
| 45 |
+
def unload(self):
|
| 46 |
+
if self._model is not None:
|
| 47 |
+
try:
|
| 48 |
+
self._model.model.unload_model()
|
| 49 |
+
except Exception:
|
| 50 |
+
pass
|
| 51 |
+
self._model = None
|
| 52 |
+
|
| 53 |
+
def transcribe(self, audio: np.ndarray) -> dict | None:
|
| 54 |
+
"""Transcribe audio segment.
|
| 55 |
+
|
| 56 |
+
Args:
|
| 57 |
+
audio: float32 numpy array, 16kHz mono
|
| 58 |
+
|
| 59 |
+
Returns:
|
| 60 |
+
dict with 'text', 'language', 'language_name' or None if no speech detected.
|
| 61 |
+
"""
|
| 62 |
+
segments, info = self._model.transcribe(
|
| 63 |
+
audio,
|
| 64 |
+
language=self.language,
|
| 65 |
+
beam_size=5,
|
| 66 |
+
vad_filter=True,
|
| 67 |
+
vad_parameters=dict(min_silence_duration_ms=500),
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
text_parts = []
|
| 71 |
+
for seg in segments:
|
| 72 |
+
text_parts.append(seg.text.strip())
|
| 73 |
+
|
| 74 |
+
full_text = " ".join(text_parts).strip()
|
| 75 |
+
if not full_text:
|
| 76 |
+
return None
|
| 77 |
+
|
| 78 |
+
detected_lang = info.language
|
| 79 |
+
return {
|
| 80 |
+
"text": full_text,
|
| 81 |
+
"language": detected_lang,
|
| 82 |
+
"language_name": LANGUAGE_NAMES.get(detected_lang, detected_lang),
|
| 83 |
+
}
|
asr_funasr_nano.py
ADDED
|
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import os
|
| 3 |
+
import sys
|
| 4 |
+
import tempfile
|
| 5 |
+
import wave
|
| 6 |
+
import re
|
| 7 |
+
import numpy as np
|
| 8 |
+
|
| 9 |
+
log = logging.getLogger("LiveTrans.FunASR-Nano")
|
| 10 |
+
|
| 11 |
+
# Add bundled code to path so model.py can resolve its imports (ctc, tools.utils)
|
| 12 |
+
_NANO_DIR = os.path.join(os.path.dirname(__file__), "funasr_nano")
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class FunASRNanoEngine:
|
| 16 |
+
"""Speech-to-text using Fun-ASR-Nano-2512 or Fun-ASR-MLT-Nano-2512."""
|
| 17 |
+
|
| 18 |
+
def __init__(self, device="cuda", hub="ms", engine_type="funasr-nano"):
|
| 19 |
+
if _NANO_DIR not in sys.path:
|
| 20 |
+
sys.path.insert(0, _NANO_DIR)
|
| 21 |
+
|
| 22 |
+
# Pre-register FunASRNano model class before AutoModel looks it up
|
| 23 |
+
import model as _nano_model # noqa: F401
|
| 24 |
+
|
| 25 |
+
from funasr import AutoModel
|
| 26 |
+
from model_manager import ASR_MODEL_IDS, get_local_model_path
|
| 27 |
+
|
| 28 |
+
model_name = ASR_MODEL_IDS[engine_type]
|
| 29 |
+
local = get_local_model_path(engine_type, hub=hub)
|
| 30 |
+
model = local or model_name
|
| 31 |
+
|
| 32 |
+
# chdir to model dir so relative paths in config.yaml (e.g. "Qwen3-0.6B")
|
| 33 |
+
# resolve locally instead of triggering HuggingFace Hub downloads
|
| 34 |
+
prev_cwd = os.getcwd()
|
| 35 |
+
if local:
|
| 36 |
+
os.chdir(local)
|
| 37 |
+
try:
|
| 38 |
+
self._model = AutoModel(
|
| 39 |
+
model=model,
|
| 40 |
+
trust_remote_code=True,
|
| 41 |
+
device=device,
|
| 42 |
+
hub=hub,
|
| 43 |
+
disable_update=True,
|
| 44 |
+
)
|
| 45 |
+
finally:
|
| 46 |
+
os.chdir(prev_cwd)
|
| 47 |
+
self.language = None
|
| 48 |
+
log.info(f"{engine_type} loaded: {model_name} on {device} (hub={hub})")
|
| 49 |
+
|
| 50 |
+
def set_language(self, language: str):
|
| 51 |
+
old = self.language
|
| 52 |
+
self.language = language if language != "auto" else None
|
| 53 |
+
log.info(f"Fun-ASR-Nano language: {old} -> {self.language}")
|
| 54 |
+
|
| 55 |
+
def to_device(self, device: str):
|
| 56 |
+
self._model.model.to(device)
|
| 57 |
+
log.info(f"Fun-ASR-Nano moved to {device}")
|
| 58 |
+
|
| 59 |
+
def unload(self):
|
| 60 |
+
if hasattr(self, "_model") and self._model is not None:
|
| 61 |
+
try:
|
| 62 |
+
self._model.model.to("cpu")
|
| 63 |
+
except Exception:
|
| 64 |
+
pass
|
| 65 |
+
self._model = None
|
| 66 |
+
|
| 67 |
+
def transcribe(self, audio: np.ndarray) -> dict | None:
|
| 68 |
+
"""Transcribe audio segment (float32, 16kHz mono)."""
|
| 69 |
+
tmp = tempfile.mktemp(suffix=".wav")
|
| 70 |
+
try:
|
| 71 |
+
audio_16bit = (audio * 32767).astype(np.int16)
|
| 72 |
+
with wave.open(tmp, "w") as wf:
|
| 73 |
+
wf.setnchannels(1)
|
| 74 |
+
wf.setsampwidth(2)
|
| 75 |
+
wf.setframerate(16000)
|
| 76 |
+
wf.writeframes(audio_16bit.tobytes())
|
| 77 |
+
|
| 78 |
+
kwargs = {"input": [tmp], "batch_size": 1, "disable_pbar": True}
|
| 79 |
+
if self.language:
|
| 80 |
+
kwargs["language"] = self.language
|
| 81 |
+
|
| 82 |
+
result = self._model.generate(**kwargs)
|
| 83 |
+
finally:
|
| 84 |
+
try:
|
| 85 |
+
os.unlink(tmp)
|
| 86 |
+
except OSError:
|
| 87 |
+
pass
|
| 88 |
+
|
| 89 |
+
if not result or not result[0].get("text"):
|
| 90 |
+
return None
|
| 91 |
+
|
| 92 |
+
raw_text = result[0]["text"]
|
| 93 |
+
text = result[0].get("text_tn", raw_text) or raw_text
|
| 94 |
+
|
| 95 |
+
# Clean special tags
|
| 96 |
+
text = re.sub(r"<\|[^|]+\|>", "", text).strip()
|
| 97 |
+
|
| 98 |
+
if not text or text == "sil":
|
| 99 |
+
return None
|
| 100 |
+
|
| 101 |
+
detected_lang = self.language or self._guess_language(text)
|
| 102 |
+
|
| 103 |
+
log.debug(f"Raw: {raw_text} | ITN: {text}")
|
| 104 |
+
return {
|
| 105 |
+
"text": text,
|
| 106 |
+
"language": detected_lang,
|
| 107 |
+
"language_name": detected_lang,
|
| 108 |
+
}
|
| 109 |
+
|
| 110 |
+
def _guess_language(self, text: str) -> str:
|
| 111 |
+
cjk = sum(1 for c in text if "\u4e00" <= c <= "\u9fff")
|
| 112 |
+
jp = sum(
|
| 113 |
+
1 for c in text if "\u3040" <= c <= "\u30ff" or "\u31f0" <= c <= "\u31ff"
|
| 114 |
+
)
|
| 115 |
+
ko = sum(1 for c in text if "\uac00" <= c <= "\ud7af")
|
| 116 |
+
total = len(text)
|
| 117 |
+
if total == 0:
|
| 118 |
+
return "auto"
|
| 119 |
+
if jp > 0:
|
| 120 |
+
return "ja"
|
| 121 |
+
if ko > total * 0.3:
|
| 122 |
+
return "ko"
|
| 123 |
+
if cjk > total * 0.3:
|
| 124 |
+
return "zh"
|
| 125 |
+
return "en"
|
asr_sensevoice.py
ADDED
|
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import re
|
| 3 |
+
import numpy as np
|
| 4 |
+
|
| 5 |
+
log = logging.getLogger("LiveTrans.SenseVoice")
|
| 6 |
+
|
| 7 |
+
# Language tag mapping from SenseVoice output
|
| 8 |
+
LANG_MAP = {
|
| 9 |
+
"<|zh|>": "zh", "<|en|>": "en", "<|ja|>": "ja",
|
| 10 |
+
"<|ko|>": "ko", "<|yue|>": "yue",
|
| 11 |
+
}
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class SenseVoiceEngine:
|
| 15 |
+
"""Speech-to-text using FunASR SenseVoice."""
|
| 16 |
+
|
| 17 |
+
def __init__(self, model_name="iic/SenseVoiceSmall", device="cuda", hub="ms"):
|
| 18 |
+
from funasr import AutoModel
|
| 19 |
+
from model_manager import get_local_model_path
|
| 20 |
+
|
| 21 |
+
local = get_local_model_path("sensevoice", hub=hub)
|
| 22 |
+
model = local or model_name
|
| 23 |
+
self._model = AutoModel(
|
| 24 |
+
model=model,
|
| 25 |
+
trust_remote_code=True,
|
| 26 |
+
device=device,
|
| 27 |
+
hub=hub,
|
| 28 |
+
disable_update=True,
|
| 29 |
+
)
|
| 30 |
+
self.language = None # None = auto detect
|
| 31 |
+
log.info(f"SenseVoice loaded: {model_name} on {device} (hub={hub})")
|
| 32 |
+
|
| 33 |
+
def set_language(self, language: str):
|
| 34 |
+
old = self.language
|
| 35 |
+
self.language = language if language != "auto" else None
|
| 36 |
+
log.info(f"SenseVoice language: {old} -> {self.language}")
|
| 37 |
+
|
| 38 |
+
def to_device(self, device: str):
|
| 39 |
+
self._model.model.to(device)
|
| 40 |
+
log.info(f"SenseVoice moved to {device}")
|
| 41 |
+
|
| 42 |
+
def unload(self):
|
| 43 |
+
if hasattr(self, "_model") and self._model is not None:
|
| 44 |
+
try:
|
| 45 |
+
self._model.model.to("cpu")
|
| 46 |
+
except Exception:
|
| 47 |
+
pass
|
| 48 |
+
self._model = None
|
| 49 |
+
|
| 50 |
+
def transcribe(self, audio: np.ndarray) -> dict | None:
|
| 51 |
+
"""Transcribe audio segment.
|
| 52 |
+
|
| 53 |
+
Args:
|
| 54 |
+
audio: float32 numpy array, 16kHz mono
|
| 55 |
+
|
| 56 |
+
Returns:
|
| 57 |
+
dict with 'text', 'language', 'language_name' or None.
|
| 58 |
+
"""
|
| 59 |
+
result = self._model.generate(
|
| 60 |
+
input=audio,
|
| 61 |
+
cache={},
|
| 62 |
+
language=self.language or "auto",
|
| 63 |
+
use_itn=True,
|
| 64 |
+
batch_size_s=0,
|
| 65 |
+
disable_pbar=True,
|
| 66 |
+
)
|
| 67 |
+
|
| 68 |
+
if not result or not result[0].get("text"):
|
| 69 |
+
return None
|
| 70 |
+
|
| 71 |
+
raw_text = result[0]["text"]
|
| 72 |
+
|
| 73 |
+
# Parse language tag and clean text
|
| 74 |
+
detected_lang = "auto"
|
| 75 |
+
text = raw_text
|
| 76 |
+
|
| 77 |
+
for tag, lang in LANG_MAP.items():
|
| 78 |
+
if tag in text:
|
| 79 |
+
detected_lang = lang
|
| 80 |
+
text = text.replace(tag, "")
|
| 81 |
+
break
|
| 82 |
+
|
| 83 |
+
# Remove emotion/event tags like <|HAPPY|>, <|BGM|>, <|Speech|> etc.
|
| 84 |
+
text = re.sub(r"<\|[^|]+\|>", "", text).strip()
|
| 85 |
+
|
| 86 |
+
if not text:
|
| 87 |
+
return None
|
| 88 |
+
|
| 89 |
+
log.debug(f"Raw: {raw_text}")
|
| 90 |
+
return {
|
| 91 |
+
"text": text,
|
| 92 |
+
"language": detected_lang,
|
| 93 |
+
"language_name": detected_lang,
|
| 94 |
+
}
|
audio_capture.py
ADDED
|
@@ -0,0 +1,239 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import threading
|
| 3 |
+
import queue
|
| 4 |
+
import time
|
| 5 |
+
import numpy as np
|
| 6 |
+
import pyaudiowpatch as pyaudio
|
| 7 |
+
|
| 8 |
+
log = logging.getLogger("LiveTrans.Audio")
|
| 9 |
+
|
| 10 |
+
DEVICE_CHECK_INTERVAL = 2.0 # seconds
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def list_output_devices():
|
| 14 |
+
"""Return list of WASAPI output device names."""
|
| 15 |
+
pa = pyaudio.PyAudio()
|
| 16 |
+
devices = []
|
| 17 |
+
wasapi_idx = None
|
| 18 |
+
for i in range(pa.get_host_api_count()):
|
| 19 |
+
info = pa.get_host_api_info_by_index(i)
|
| 20 |
+
if "WASAPI" in info["name"]:
|
| 21 |
+
wasapi_idx = info["index"]
|
| 22 |
+
break
|
| 23 |
+
if wasapi_idx is not None:
|
| 24 |
+
for i in range(pa.get_device_count()):
|
| 25 |
+
dev = pa.get_device_info_by_index(i)
|
| 26 |
+
if (dev["hostApi"] == wasapi_idx
|
| 27 |
+
and dev["maxOutputChannels"] > 0
|
| 28 |
+
and not dev.get("isLoopbackDevice", False)):
|
| 29 |
+
devices.append(dev["name"])
|
| 30 |
+
pa.terminate()
|
| 31 |
+
return devices
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class AudioCapture:
|
| 35 |
+
"""Capture system audio via WASAPI loopback using pyaudiowpatch."""
|
| 36 |
+
|
| 37 |
+
def __init__(self, device=None, sample_rate=16000, chunk_duration=0.5):
|
| 38 |
+
self.sample_rate = sample_rate
|
| 39 |
+
self.chunk_duration = chunk_duration
|
| 40 |
+
self.audio_queue = queue.Queue(maxsize=100)
|
| 41 |
+
self._stream = None
|
| 42 |
+
self._running = False
|
| 43 |
+
self._device_name = device
|
| 44 |
+
self._pa = pyaudio.PyAudio()
|
| 45 |
+
self._read_thread = None
|
| 46 |
+
self._native_channels = 2
|
| 47 |
+
self._native_rate = 44100
|
| 48 |
+
self._current_device_name = None
|
| 49 |
+
self._lock = threading.Lock()
|
| 50 |
+
|
| 51 |
+
def _get_wasapi_info(self):
|
| 52 |
+
for i in range(self._pa.get_host_api_count()):
|
| 53 |
+
info = self._pa.get_host_api_info_by_index(i)
|
| 54 |
+
if "WASAPI" in info["name"]:
|
| 55 |
+
return info
|
| 56 |
+
return None
|
| 57 |
+
|
| 58 |
+
def _get_default_output_name(self):
|
| 59 |
+
wasapi_info = self._get_wasapi_info()
|
| 60 |
+
if wasapi_info is None:
|
| 61 |
+
return None
|
| 62 |
+
default_idx = wasapi_info["defaultOutputDevice"]
|
| 63 |
+
default_dev = self._pa.get_device_info_by_index(default_idx)
|
| 64 |
+
return default_dev["name"]
|
| 65 |
+
|
| 66 |
+
@staticmethod
|
| 67 |
+
def _query_current_default():
|
| 68 |
+
"""Create a fresh PA instance to get the actual current default device."""
|
| 69 |
+
pa = pyaudio.PyAudio()
|
| 70 |
+
try:
|
| 71 |
+
for i in range(pa.get_host_api_count()):
|
| 72 |
+
info = pa.get_host_api_info_by_index(i)
|
| 73 |
+
if "WASAPI" in info["name"]:
|
| 74 |
+
default_idx = info["defaultOutputDevice"]
|
| 75 |
+
dev = pa.get_device_info_by_index(default_idx)
|
| 76 |
+
return dev["name"]
|
| 77 |
+
finally:
|
| 78 |
+
pa.terminate()
|
| 79 |
+
return None
|
| 80 |
+
|
| 81 |
+
def _find_loopback_device(self):
|
| 82 |
+
"""Find WASAPI loopback device for the default output."""
|
| 83 |
+
wasapi_info = self._get_wasapi_info()
|
| 84 |
+
if wasapi_info is None:
|
| 85 |
+
raise RuntimeError("WASAPI host API not found")
|
| 86 |
+
|
| 87 |
+
default_output_idx = wasapi_info["defaultOutputDevice"]
|
| 88 |
+
default_output = self._pa.get_device_info_by_index(default_output_idx)
|
| 89 |
+
log.info(f"Default output: {default_output['name']}")
|
| 90 |
+
|
| 91 |
+
target_name = self._device_name or default_output["name"]
|
| 92 |
+
|
| 93 |
+
for i in range(self._pa.get_device_count()):
|
| 94 |
+
dev = self._pa.get_device_info_by_index(i)
|
| 95 |
+
if dev["hostApi"] == wasapi_info["index"] and dev.get("isLoopbackDevice", False):
|
| 96 |
+
if target_name in dev["name"]:
|
| 97 |
+
return dev
|
| 98 |
+
|
| 99 |
+
# Fallback: any loopback device
|
| 100 |
+
for i in range(self._pa.get_device_count()):
|
| 101 |
+
dev = self._pa.get_device_info_by_index(i)
|
| 102 |
+
if dev.get("isLoopbackDevice", False):
|
| 103 |
+
return dev
|
| 104 |
+
|
| 105 |
+
raise RuntimeError("No WASAPI loopback device found")
|
| 106 |
+
|
| 107 |
+
def _open_stream(self):
|
| 108 |
+
"""Open stream for current default loopback device."""
|
| 109 |
+
loopback_dev = self._find_loopback_device()
|
| 110 |
+
self._native_channels = loopback_dev["maxInputChannels"]
|
| 111 |
+
self._native_rate = int(loopback_dev["defaultSampleRate"])
|
| 112 |
+
self._current_device_name = loopback_dev["name"]
|
| 113 |
+
|
| 114 |
+
log.info(f"Loopback device: {loopback_dev['name']}")
|
| 115 |
+
log.info(f"Native: {self._native_rate}Hz, {self._native_channels}ch -> {self.sample_rate}Hz mono")
|
| 116 |
+
|
| 117 |
+
native_chunk = int(self._native_rate * self.chunk_duration)
|
| 118 |
+
|
| 119 |
+
self._stream = self._pa.open(
|
| 120 |
+
format=pyaudio.paFloat32,
|
| 121 |
+
channels=self._native_channels,
|
| 122 |
+
rate=self._native_rate,
|
| 123 |
+
input=True,
|
| 124 |
+
input_device_index=loopback_dev["index"],
|
| 125 |
+
frames_per_buffer=native_chunk,
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
def _close_stream(self):
|
| 129 |
+
if self._stream:
|
| 130 |
+
try:
|
| 131 |
+
self._stream.stop_stream()
|
| 132 |
+
self._stream.close()
|
| 133 |
+
except Exception:
|
| 134 |
+
pass
|
| 135 |
+
self._stream = None
|
| 136 |
+
|
| 137 |
+
def set_device(self, device_name):
|
| 138 |
+
"""Change capture device at runtime. None = system default."""
|
| 139 |
+
if device_name == self._device_name:
|
| 140 |
+
return
|
| 141 |
+
log.info(f"Audio device changed: {self._device_name} -> {device_name}")
|
| 142 |
+
self._device_name = device_name
|
| 143 |
+
if self._running:
|
| 144 |
+
self._restart_stream()
|
| 145 |
+
|
| 146 |
+
def _restart_stream(self):
|
| 147 |
+
"""Restart stream with new default device."""
|
| 148 |
+
with self._lock:
|
| 149 |
+
self._close_stream()
|
| 150 |
+
# Refresh device list
|
| 151 |
+
self._pa.terminate()
|
| 152 |
+
self._pa = pyaudio.PyAudio()
|
| 153 |
+
self._open_stream()
|
| 154 |
+
|
| 155 |
+
def _read_loop(self):
|
| 156 |
+
"""Synchronous read loop in a background thread."""
|
| 157 |
+
last_device_check = time.monotonic()
|
| 158 |
+
|
| 159 |
+
while self._running:
|
| 160 |
+
# Auto-switch only when using system default
|
| 161 |
+
now = time.monotonic()
|
| 162 |
+
if now - last_device_check > DEVICE_CHECK_INTERVAL:
|
| 163 |
+
last_device_check = now
|
| 164 |
+
if self._device_name is None:
|
| 165 |
+
try:
|
| 166 |
+
current_default = self._query_current_default()
|
| 167 |
+
if current_default and self._current_device_name and \
|
| 168 |
+
current_default not in self._current_device_name:
|
| 169 |
+
log.info(f"System default output changed: "
|
| 170 |
+
f"{self._current_device_name} -> {current_default}")
|
| 171 |
+
log.info("Restarting audio capture for new device...")
|
| 172 |
+
self._restart_stream()
|
| 173 |
+
log.info(f"Audio capture restarted on: {self._current_device_name}")
|
| 174 |
+
except Exception as e:
|
| 175 |
+
log.warning(f"Device check error: {e}")
|
| 176 |
+
|
| 177 |
+
native_chunk = int(self._native_rate * self.chunk_duration)
|
| 178 |
+
try:
|
| 179 |
+
with self._lock:
|
| 180 |
+
if not self._stream:
|
| 181 |
+
continue
|
| 182 |
+
data = self._stream.read(native_chunk, exception_on_overflow=False)
|
| 183 |
+
except Exception as e:
|
| 184 |
+
log.warning(f"Read error (device may have changed): {e}")
|
| 185 |
+
try:
|
| 186 |
+
time.sleep(0.5)
|
| 187 |
+
self._restart_stream()
|
| 188 |
+
log.info("Stream restarted after read error")
|
| 189 |
+
except Exception as re:
|
| 190 |
+
log.error(f"Restart failed: {re}")
|
| 191 |
+
time.sleep(1)
|
| 192 |
+
continue
|
| 193 |
+
|
| 194 |
+
audio = np.frombuffer(data, dtype=np.float32)
|
| 195 |
+
|
| 196 |
+
# Mix to mono
|
| 197 |
+
if self._native_channels > 1:
|
| 198 |
+
audio = audio.reshape(-1, self._native_channels).mean(axis=1)
|
| 199 |
+
|
| 200 |
+
# Resample to target rate
|
| 201 |
+
if self._native_rate != self.sample_rate:
|
| 202 |
+
ratio = self.sample_rate / self._native_rate
|
| 203 |
+
n_out = int(len(audio) * ratio)
|
| 204 |
+
indices = np.arange(n_out) / ratio
|
| 205 |
+
indices = np.clip(indices, 0, len(audio) - 1)
|
| 206 |
+
idx_floor = indices.astype(np.int64)
|
| 207 |
+
idx_ceil = np.minimum(idx_floor + 1, len(audio) - 1)
|
| 208 |
+
frac = (indices - idx_floor).astype(np.float32)
|
| 209 |
+
audio = audio[idx_floor] * (1 - frac) + audio[idx_ceil] * frac
|
| 210 |
+
|
| 211 |
+
try:
|
| 212 |
+
self.audio_queue.put_nowait(audio)
|
| 213 |
+
except queue.Full:
|
| 214 |
+
self.audio_queue.get_nowait() # Drop oldest
|
| 215 |
+
self.audio_queue.put_nowait(audio)
|
| 216 |
+
|
| 217 |
+
def start(self):
|
| 218 |
+
self._open_stream()
|
| 219 |
+
self._running = True
|
| 220 |
+
self._read_thread = threading.Thread(target=self._read_loop, daemon=True)
|
| 221 |
+
self._read_thread.start()
|
| 222 |
+
log.info("Audio capture started")
|
| 223 |
+
|
| 224 |
+
def stop(self):
|
| 225 |
+
self._running = False
|
| 226 |
+
if self._read_thread:
|
| 227 |
+
self._read_thread.join(timeout=3)
|
| 228 |
+
self._close_stream()
|
| 229 |
+
log.info("Audio capture stopped")
|
| 230 |
+
|
| 231 |
+
def get_audio(self, timeout=1.0):
|
| 232 |
+
try:
|
| 233 |
+
return self.audio_queue.get(timeout=timeout)
|
| 234 |
+
except queue.Empty:
|
| 235 |
+
return None
|
| 236 |
+
|
| 237 |
+
def __del__(self):
|
| 238 |
+
if self._pa:
|
| 239 |
+
self._pa.terminate()
|
benchmark.py
ADDED
|
@@ -0,0 +1,180 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import statistics
|
| 2 |
+
import threading
|
| 3 |
+
import time
|
| 4 |
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
| 5 |
+
|
| 6 |
+
from translator import make_openai_client
|
| 7 |
+
|
| 8 |
+
BENCH_SENTENCES = {
|
| 9 |
+
"ja": [
|
| 10 |
+
"こんにちは、今日はいい天気ですね。",
|
| 11 |
+
"この映画はとても面白かったです。",
|
| 12 |
+
"明日の会議は何時からですか?",
|
| 13 |
+
"日本の桜は本当に美しいですね。",
|
| 14 |
+
"新しいレストランに行ってみましょう。",
|
| 15 |
+
],
|
| 16 |
+
"en": [
|
| 17 |
+
"Hello, the weather is nice today.",
|
| 18 |
+
"That movie was really interesting.",
|
| 19 |
+
"What time does tomorrow's meeting start?",
|
| 20 |
+
"The cherry blossoms in Japan are truly beautiful.",
|
| 21 |
+
"Let's try going to the new restaurant.",
|
| 22 |
+
],
|
| 23 |
+
"zh": [
|
| 24 |
+
"你好,今天天气真不错。",
|
| 25 |
+
"那部电影真的很有意思。",
|
| 26 |
+
"明天的会议几点开始?",
|
| 27 |
+
"日本的樱花真的很美丽。",
|
| 28 |
+
"我们去试试那家新餐厅吧。",
|
| 29 |
+
],
|
| 30 |
+
"ko": [
|
| 31 |
+
"안녕하세요, 오늘 날씨가 좋네요.",
|
| 32 |
+
"그 영화 정말 재미있었어요.",
|
| 33 |
+
"내일 회의는 몇 시부터인가요?",
|
| 34 |
+
"일본의 벚꽃은 정말 아름답네요.",
|
| 35 |
+
"새로운 레스토랑에 가볼까요?",
|
| 36 |
+
],
|
| 37 |
+
"fr": [
|
| 38 |
+
"Bonjour, il fait beau aujourd'hui.",
|
| 39 |
+
"Ce film était vraiment intéressant.",
|
| 40 |
+
"À quelle heure commence la réunion demain?",
|
| 41 |
+
"Les cerisiers en fleurs au Japon sont magnifiques.",
|
| 42 |
+
"Allons essayer le nouveau restaurant.",
|
| 43 |
+
],
|
| 44 |
+
"de": [
|
| 45 |
+
"Hallo, heute ist schönes Wetter.",
|
| 46 |
+
"Der Film war wirklich interessant.",
|
| 47 |
+
"Um wie viel Uhr beginnt das Meeting morgen?",
|
| 48 |
+
"Die Kirschblüten in Japan sind wunderschön.",
|
| 49 |
+
"Lass uns das neue Restaurant ausprobieren.",
|
| 50 |
+
],
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def run_benchmark(models, source_lang, target_lang, timeout_s, prompt, result_callback):
|
| 55 |
+
"""Run benchmark in a background thread. Calls result_callback(str) for each output line."""
|
| 56 |
+
sentences = BENCH_SENTENCES.get(source_lang, BENCH_SENTENCES["en"])
|
| 57 |
+
rounds = len(sentences)
|
| 58 |
+
|
| 59 |
+
result_callback(
|
| 60 |
+
f"Testing {len(models)} model(s) x {rounds} rounds | "
|
| 61 |
+
f"timeout={timeout_s}s | {source_lang} -> {target_lang}\n"
|
| 62 |
+
f"{'=' * 60}\n"
|
| 63 |
+
)
|
| 64 |
+
|
| 65 |
+
def _test_model(m):
|
| 66 |
+
name = m["name"]
|
| 67 |
+
lines = [f"Model: {name}", f" {'─' * 50}"]
|
| 68 |
+
try:
|
| 69 |
+
client = make_openai_client(
|
| 70 |
+
m["api_base"],
|
| 71 |
+
m["api_key"],
|
| 72 |
+
proxy=m.get("proxy", "none"),
|
| 73 |
+
timeout=timeout_s,
|
| 74 |
+
)
|
| 75 |
+
ttfts = []
|
| 76 |
+
totals = []
|
| 77 |
+
|
| 78 |
+
for i, text in enumerate(sentences):
|
| 79 |
+
if m.get("no_system_role"):
|
| 80 |
+
messages = [{"role": "user", "content": f"{prompt}\n{text}"}]
|
| 81 |
+
else:
|
| 82 |
+
messages = [
|
| 83 |
+
{"role": "system", "content": prompt},
|
| 84 |
+
{"role": "user", "content": text},
|
| 85 |
+
]
|
| 86 |
+
try:
|
| 87 |
+
t0 = time.perf_counter()
|
| 88 |
+
stream = client.chat.completions.create(
|
| 89 |
+
model=m["model"],
|
| 90 |
+
messages=messages,
|
| 91 |
+
max_tokens=256,
|
| 92 |
+
temperature=0.3,
|
| 93 |
+
stream=True,
|
| 94 |
+
)
|
| 95 |
+
ttft = None
|
| 96 |
+
chunks = []
|
| 97 |
+
for chunk in stream:
|
| 98 |
+
if ttft is None:
|
| 99 |
+
ttft = (time.perf_counter() - t0) * 1000
|
| 100 |
+
delta = chunk.choices[0].delta
|
| 101 |
+
if delta.content:
|
| 102 |
+
chunks.append(delta.content)
|
| 103 |
+
total_ms = (time.perf_counter() - t0) * 1000
|
| 104 |
+
result_text = "".join(chunks).strip()
|
| 105 |
+
ttft = ttft or total_ms
|
| 106 |
+
except Exception:
|
| 107 |
+
t0 = time.perf_counter()
|
| 108 |
+
resp = client.chat.completions.create(
|
| 109 |
+
model=m["model"],
|
| 110 |
+
messages=messages,
|
| 111 |
+
max_tokens=256,
|
| 112 |
+
temperature=0.3,
|
| 113 |
+
stream=False,
|
| 114 |
+
)
|
| 115 |
+
total_ms = (time.perf_counter() - t0) * 1000
|
| 116 |
+
ttft = total_ms
|
| 117 |
+
result_text = resp.choices[0].message.content.strip()
|
| 118 |
+
|
| 119 |
+
ttfts.append(ttft)
|
| 120 |
+
totals.append(total_ms)
|
| 121 |
+
lines.append(
|
| 122 |
+
f" Round {i + 1}: {total_ms:7.0f}ms "
|
| 123 |
+
f"(TTFT {ttft:6.0f}ms) | {result_text[:60]}"
|
| 124 |
+
)
|
| 125 |
+
|
| 126 |
+
avg_total = statistics.mean(totals)
|
| 127 |
+
std_total = statistics.stdev(totals) if len(totals) > 1 else 0
|
| 128 |
+
avg_ttft = statistics.mean(ttfts)
|
| 129 |
+
std_ttft = statistics.stdev(ttfts) if len(ttfts) > 1 else 0
|
| 130 |
+
lines.append(
|
| 131 |
+
f" Avg: {avg_total:.0f}ms \u00b1 {std_total:.0f}ms "
|
| 132 |
+
f"(TTFT: {avg_ttft:.0f}ms \u00b1 {std_ttft:.0f}ms)"
|
| 133 |
+
)
|
| 134 |
+
|
| 135 |
+
result_callback("\n".join(lines))
|
| 136 |
+
return {
|
| 137 |
+
"name": name,
|
| 138 |
+
"avg_ttft": avg_ttft,
|
| 139 |
+
"std_ttft": std_ttft,
|
| 140 |
+
"avg_total": avg_total,
|
| 141 |
+
"std_total": std_total,
|
| 142 |
+
"error": None,
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
except Exception as e:
|
| 146 |
+
err_msg = str(e).split("\n")[0][:120]
|
| 147 |
+
lines.append(f" FAILED: {err_msg}")
|
| 148 |
+
result_callback("\n".join(lines))
|
| 149 |
+
return {
|
| 150 |
+
"name": name,
|
| 151 |
+
"avg_ttft": 0,
|
| 152 |
+
"std_ttft": 0,
|
| 153 |
+
"avg_total": 0,
|
| 154 |
+
"std_total": 0,
|
| 155 |
+
"error": err_msg,
|
| 156 |
+
}
|
| 157 |
+
|
| 158 |
+
def _run_all():
|
| 159 |
+
results = []
|
| 160 |
+
with ThreadPoolExecutor(max_workers=len(models)) as pool:
|
| 161 |
+
futures = {pool.submit(_test_model, m): m for m in models}
|
| 162 |
+
for fut in as_completed(futures):
|
| 163 |
+
results.append(fut.result())
|
| 164 |
+
|
| 165 |
+
ok = [r for r in results if not r["error"]]
|
| 166 |
+
ok.sort(key=lambda r: r["avg_ttft"])
|
| 167 |
+
result_callback(f"\n{'=' * 60}")
|
| 168 |
+
result_callback("Ranking by Avg TTFT:")
|
| 169 |
+
for i, r in enumerate(ok):
|
| 170 |
+
result_callback(
|
| 171 |
+
f" #{i + 1} TTFT {r['avg_ttft']:6.0f}ms \u00b1 {r['std_ttft']:4.0f}ms "
|
| 172 |
+
f"Total {r['avg_total']:6.0f}ms \u00b1 {r['std_total']:4.0f}ms "
|
| 173 |
+
f"{r['name']}"
|
| 174 |
+
)
|
| 175 |
+
failed = [r for r in results if r["error"]]
|
| 176 |
+
for r in failed:
|
| 177 |
+
result_callback(f" FAIL {r['name']}: {r['error']}")
|
| 178 |
+
result_callback("__DONE__")
|
| 179 |
+
|
| 180 |
+
threading.Thread(target=_run_all, daemon=True).start()
|
config.yaml
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# LiveTrans Configuration
|
| 2 |
+
|
| 3 |
+
audio:
|
| 4 |
+
# WASAPI loopback device name (null = default output device)
|
| 5 |
+
device: null
|
| 6 |
+
# Sample rate for capture
|
| 7 |
+
sample_rate: 16000
|
| 8 |
+
# Audio chunk duration in seconds for VAD processing (32ms = Silero VAD native window)
|
| 9 |
+
chunk_duration: 0.032
|
| 10 |
+
|
| 11 |
+
asr:
|
| 12 |
+
# Model size: tiny, base, small, medium, large-v3
|
| 13 |
+
model_size: "medium"
|
| 14 |
+
# Device: cuda or cpu
|
| 15 |
+
device: "cuda"
|
| 16 |
+
# Compute type: float16, int8_float16, int8
|
| 17 |
+
compute_type: "float16"
|
| 18 |
+
# Language: auto, en, ja, zh, ko, etc.
|
| 19 |
+
language: "auto"
|
| 20 |
+
# Minimum speech duration (seconds) before sending to ASR
|
| 21 |
+
min_speech_duration: 1.0
|
| 22 |
+
# Maximum speech duration (seconds) before forced segmentation
|
| 23 |
+
max_speech_duration: 8.0
|
| 24 |
+
# VAD threshold (0.0 - 1.0)
|
| 25 |
+
vad_threshold: 0.5
|
| 26 |
+
|
| 27 |
+
translation:
|
| 28 |
+
# OpenAI-compatible API settings
|
| 29 |
+
api_base: "https://api.openai.com/v1"
|
| 30 |
+
api_key: "your-api-key-here"
|
| 31 |
+
model: "gpt-4o-mini"
|
| 32 |
+
# Target language for translation
|
| 33 |
+
target_language: "zh"
|
| 34 |
+
# Source language hint (auto = detect from ASR)
|
| 35 |
+
source_language: "auto"
|
| 36 |
+
# Context window size (number of recent translations to include)
|
| 37 |
+
context_window: 10
|
| 38 |
+
# Max tokens for translation response
|
| 39 |
+
max_tokens: 256
|
| 40 |
+
# Temperature
|
| 41 |
+
temperature: 0.3
|
| 42 |
+
# Enable streaming output
|
| 43 |
+
streaming: true
|
| 44 |
+
# System prompt template ({source_lang}, {target_lang}, {context} will be replaced)
|
| 45 |
+
system_prompt: |
|
| 46 |
+
你是一个专业的影视字幕翻译专家。请将以下{source_lang}对话翻译为{target_lang}。
|
| 47 |
+
要求:
|
| 48 |
+
1. 保持口语化,符合目标语言观众的表达习惯
|
| 49 |
+
2. 简洁明了,每句翻译不超过原文两行
|
| 50 |
+
3. 如果语句不完整,结合上下文补全含义
|
| 51 |
+
4. 不要添加解释或注释,只输出翻译结果
|
| 52 |
+
|
| 53 |
+
近期对话上下文:
|
| 54 |
+
{context}
|
| 55 |
+
|
| 56 |
+
subtitle:
|
| 57 |
+
# Font family
|
| 58 |
+
font_family: "Microsoft YaHei"
|
| 59 |
+
# Font size
|
| 60 |
+
font_size: 28
|
| 61 |
+
# Font color (hex)
|
| 62 |
+
font_color: "#FFFFFF"
|
| 63 |
+
# Outline color
|
| 64 |
+
outline_color: "#000000"
|
| 65 |
+
# Outline width
|
| 66 |
+
outline_width: 2
|
| 67 |
+
# Subtitle position from bottom (pixels)
|
| 68 |
+
bottom_margin: 80
|
| 69 |
+
# Show original text above translation
|
| 70 |
+
show_original: true
|
| 71 |
+
# Original text font size
|
| 72 |
+
original_font_size: 20
|
| 73 |
+
# Original text color
|
| 74 |
+
original_color: "#CCCCCC"
|
| 75 |
+
# Subtitle display duration (seconds)
|
| 76 |
+
display_duration: 5.0
|
| 77 |
+
# Window opacity (0.0 - 1.0)
|
| 78 |
+
opacity: 0.95
|
control_panel.py
ADDED
|
@@ -0,0 +1,1042 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import logging
|
| 3 |
+
import os
|
| 4 |
+
import threading
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
|
| 7 |
+
from PyQt6.QtCore import Qt, QTimer, pyqtSignal
|
| 8 |
+
from PyQt6.QtGui import QFont
|
| 9 |
+
from PyQt6.QtWidgets import (
|
| 10 |
+
QApplication,
|
| 11 |
+
QColorDialog,
|
| 12 |
+
QComboBox,
|
| 13 |
+
QDoubleSpinBox,
|
| 14 |
+
QFontComboBox,
|
| 15 |
+
QGridLayout,
|
| 16 |
+
QGroupBox,
|
| 17 |
+
QHBoxLayout,
|
| 18 |
+
QLabel,
|
| 19 |
+
QListWidget,
|
| 20 |
+
QListWidgetItem,
|
| 21 |
+
QMessageBox,
|
| 22 |
+
QPushButton,
|
| 23 |
+
QSlider,
|
| 24 |
+
QSpinBox,
|
| 25 |
+
QTabWidget,
|
| 26 |
+
QTextEdit,
|
| 27 |
+
QVBoxLayout,
|
| 28 |
+
QWidget,
|
| 29 |
+
)
|
| 30 |
+
|
| 31 |
+
from benchmark import run_benchmark
|
| 32 |
+
from dialogs import (
|
| 33 |
+
ModelEditDialog,
|
| 34 |
+
)
|
| 35 |
+
from model_manager import (
|
| 36 |
+
MODELS_DIR,
|
| 37 |
+
dir_size,
|
| 38 |
+
format_size,
|
| 39 |
+
get_cache_entries,
|
| 40 |
+
)
|
| 41 |
+
from i18n import t
|
| 42 |
+
|
| 43 |
+
log = logging.getLogger("LiveTrans.Panel")
|
| 44 |
+
|
| 45 |
+
SETTINGS_FILE = Path(__file__).parent / "user_settings.json"
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def _load_saved_settings() -> dict | None:
|
| 49 |
+
try:
|
| 50 |
+
if SETTINGS_FILE.exists():
|
| 51 |
+
data = json.loads(SETTINGS_FILE.read_text(encoding="utf-8"))
|
| 52 |
+
log.info(f"Loaded saved settings from {SETTINGS_FILE}")
|
| 53 |
+
return data
|
| 54 |
+
except Exception as e:
|
| 55 |
+
log.warning(f"Failed to load settings: {e}")
|
| 56 |
+
return None
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def _save_settings(settings: dict):
|
| 60 |
+
try:
|
| 61 |
+
tmp = SETTINGS_FILE.with_suffix(".tmp")
|
| 62 |
+
tmp.write_text(
|
| 63 |
+
json.dumps(settings, indent=2, ensure_ascii=False), encoding="utf-8"
|
| 64 |
+
)
|
| 65 |
+
tmp.replace(SETTINGS_FILE)
|
| 66 |
+
log.info(f"Settings saved to {SETTINGS_FILE}")
|
| 67 |
+
except Exception as e:
|
| 68 |
+
log.warning(f"Failed to save settings: {e}")
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
class ControlPanel(QWidget):
|
| 72 |
+
"""Settings and monitoring panel."""
|
| 73 |
+
|
| 74 |
+
settings_changed = pyqtSignal(dict)
|
| 75 |
+
model_changed = pyqtSignal(dict)
|
| 76 |
+
_bench_result = pyqtSignal(str)
|
| 77 |
+
_cache_result = pyqtSignal(list)
|
| 78 |
+
|
| 79 |
+
def __init__(self, config, saved_settings=None):
|
| 80 |
+
super().__init__()
|
| 81 |
+
self._config = config
|
| 82 |
+
self.setWindowTitle(t("window_control_panel"))
|
| 83 |
+
self.setMinimumSize(480, 560)
|
| 84 |
+
self.resize(520, 650)
|
| 85 |
+
|
| 86 |
+
saved = saved_settings or _load_saved_settings()
|
| 87 |
+
if saved:
|
| 88 |
+
self._current_settings = saved
|
| 89 |
+
else:
|
| 90 |
+
tc = config["translation"]
|
| 91 |
+
self._current_settings = {
|
| 92 |
+
"vad_mode": "silero",
|
| 93 |
+
"vad_threshold": config["asr"]["vad_threshold"],
|
| 94 |
+
"energy_threshold": 0.02,
|
| 95 |
+
"min_speech_duration": config["asr"]["min_speech_duration"],
|
| 96 |
+
"max_speech_duration": config["asr"]["max_speech_duration"],
|
| 97 |
+
"silence_mode": "auto",
|
| 98 |
+
"silence_duration": 0.8,
|
| 99 |
+
"asr_language": config["asr"].get("language", "auto"),
|
| 100 |
+
"asr_engine": "sensevoice",
|
| 101 |
+
"asr_device": "cuda",
|
| 102 |
+
"models": [
|
| 103 |
+
{
|
| 104 |
+
"name": f"{tc['model']}",
|
| 105 |
+
"api_base": tc["api_base"],
|
| 106 |
+
"api_key": tc["api_key"],
|
| 107 |
+
"model": tc["model"],
|
| 108 |
+
}
|
| 109 |
+
],
|
| 110 |
+
"active_model": 0,
|
| 111 |
+
"hub": "ms",
|
| 112 |
+
}
|
| 113 |
+
|
| 114 |
+
if "models" not in self._current_settings:
|
| 115 |
+
tc = config["translation"]
|
| 116 |
+
self._current_settings["models"] = [
|
| 117 |
+
{
|
| 118 |
+
"name": f"{tc['model']}",
|
| 119 |
+
"api_base": tc["api_base"],
|
| 120 |
+
"api_key": tc["api_key"],
|
| 121 |
+
"model": tc["model"],
|
| 122 |
+
}
|
| 123 |
+
]
|
| 124 |
+
self._current_settings["active_model"] = 0
|
| 125 |
+
|
| 126 |
+
layout = QVBoxLayout(self)
|
| 127 |
+
tabs = QTabWidget()
|
| 128 |
+
|
| 129 |
+
tabs.addTab(self._create_vad_tab(), t("tab_vad_asr"))
|
| 130 |
+
tabs.addTab(self._create_translation_tab(), t("tab_translation"))
|
| 131 |
+
tabs.addTab(self._create_style_tab(), t("tab_style"))
|
| 132 |
+
tabs.addTab(self._create_benchmark_tab(), t("tab_benchmark"))
|
| 133 |
+
self._cache_tab_index = tabs.addTab(self._create_cache_tab(), t("tab_cache"))
|
| 134 |
+
tabs.currentChanged.connect(self._on_tab_changed)
|
| 135 |
+
|
| 136 |
+
layout.addWidget(tabs)
|
| 137 |
+
|
| 138 |
+
self._bench_result.connect(self._on_bench_result)
|
| 139 |
+
self._cache_result.connect(self._on_cache_result)
|
| 140 |
+
|
| 141 |
+
self._save_timer = QTimer()
|
| 142 |
+
self._save_timer.setSingleShot(True)
|
| 143 |
+
self._save_timer.setInterval(300)
|
| 144 |
+
self._save_timer.timeout.connect(self._do_auto_save)
|
| 145 |
+
|
| 146 |
+
# ── VAD / ASR Tab ──
|
| 147 |
+
|
| 148 |
+
def _create_vad_tab(self):
|
| 149 |
+
widget = QWidget()
|
| 150 |
+
layout = QVBoxLayout(widget)
|
| 151 |
+
s = self._current_settings
|
| 152 |
+
|
| 153 |
+
asr_group = QGroupBox(t("group_asr_engine"))
|
| 154 |
+
asr_layout = QGridLayout(asr_group)
|
| 155 |
+
|
| 156 |
+
self._asr_engine = QComboBox()
|
| 157 |
+
self._asr_engine.addItems(
|
| 158 |
+
[
|
| 159 |
+
"Whisper (faster-whisper)",
|
| 160 |
+
"SenseVoice (FunASR)",
|
| 161 |
+
"Fun-ASR-Nano (FunASR)",
|
| 162 |
+
"Fun-ASR-MLT-Nano (FunASR, 31 langs)",
|
| 163 |
+
]
|
| 164 |
+
)
|
| 165 |
+
engine_map_idx = {
|
| 166 |
+
"whisper": 0,
|
| 167 |
+
"sensevoice": 1,
|
| 168 |
+
"funasr-nano": 2,
|
| 169 |
+
"funasr-mlt-nano": 3,
|
| 170 |
+
}
|
| 171 |
+
engine_idx = engine_map_idx.get(s.get("asr_engine"), 0)
|
| 172 |
+
self._asr_engine.setCurrentIndex(engine_idx)
|
| 173 |
+
asr_layout.addWidget(QLabel(t("label_engine")), 0, 0)
|
| 174 |
+
asr_layout.addWidget(self._asr_engine, 0, 1)
|
| 175 |
+
self._asr_engine.currentIndexChanged.connect(self._auto_save)
|
| 176 |
+
|
| 177 |
+
self._asr_lang = QComboBox()
|
| 178 |
+
self._asr_lang.addItems(
|
| 179 |
+
["auto", "ja", "en", "zh", "ko", "fr", "de", "es", "ru"]
|
| 180 |
+
)
|
| 181 |
+
lang = s.get("asr_language", self._config["asr"].get("language", "auto"))
|
| 182 |
+
idx = self._asr_lang.findText(lang)
|
| 183 |
+
if idx >= 0:
|
| 184 |
+
self._asr_lang.setCurrentIndex(idx)
|
| 185 |
+
asr_layout.addWidget(QLabel(t("label_language_hint")), 1, 0)
|
| 186 |
+
asr_layout.addWidget(self._asr_lang, 1, 1)
|
| 187 |
+
self._asr_lang.currentIndexChanged.connect(self._auto_save)
|
| 188 |
+
|
| 189 |
+
self._asr_device = QComboBox()
|
| 190 |
+
devices = ["cuda", "cpu"]
|
| 191 |
+
try:
|
| 192 |
+
import torch
|
| 193 |
+
|
| 194 |
+
for i in range(torch.cuda.device_count()):
|
| 195 |
+
name = torch.cuda.get_device_name(i)
|
| 196 |
+
devices.insert(i, f"cuda:{i} ({name})")
|
| 197 |
+
if torch.cuda.device_count() > 0:
|
| 198 |
+
devices = [d for d in devices if d != "cuda"]
|
| 199 |
+
except Exception:
|
| 200 |
+
pass
|
| 201 |
+
self._asr_device.addItems(devices)
|
| 202 |
+
saved_dev = s.get("asr_device", self._config["asr"].get("device", "cuda"))
|
| 203 |
+
for i in range(self._asr_device.count()):
|
| 204 |
+
if self._asr_device.itemText(i).startswith(saved_dev):
|
| 205 |
+
self._asr_device.setCurrentIndex(i)
|
| 206 |
+
break
|
| 207 |
+
asr_layout.addWidget(QLabel(t("label_device")), 2, 0)
|
| 208 |
+
asr_layout.addWidget(self._asr_device, 2, 1)
|
| 209 |
+
self._asr_device.currentIndexChanged.connect(self._auto_save)
|
| 210 |
+
|
| 211 |
+
self._audio_device = QComboBox()
|
| 212 |
+
self._audio_device.addItem(t("system_default"))
|
| 213 |
+
try:
|
| 214 |
+
from audio_capture import list_output_devices
|
| 215 |
+
|
| 216 |
+
for name in list_output_devices():
|
| 217 |
+
self._audio_device.addItem(name)
|
| 218 |
+
except Exception:
|
| 219 |
+
pass
|
| 220 |
+
saved_audio = s.get("audio_device")
|
| 221 |
+
if saved_audio:
|
| 222 |
+
idx = self._audio_device.findText(saved_audio)
|
| 223 |
+
if idx >= 0:
|
| 224 |
+
self._audio_device.setCurrentIndex(idx)
|
| 225 |
+
asr_layout.addWidget(QLabel(t("label_audio")), 3, 0)
|
| 226 |
+
asr_layout.addWidget(self._audio_device, 3, 1)
|
| 227 |
+
self._audio_device.currentIndexChanged.connect(self._auto_save)
|
| 228 |
+
|
| 229 |
+
self._hub_combo = QComboBox()
|
| 230 |
+
self._hub_combo.addItems([t("hub_modelscope"), t("hub_huggingface")])
|
| 231 |
+
saved_hub = s.get("hub", "ms")
|
| 232 |
+
self._hub_combo.setCurrentIndex(0 if saved_hub == "ms" else 1)
|
| 233 |
+
asr_layout.addWidget(QLabel(t("label_hub")), 4, 0)
|
| 234 |
+
asr_layout.addWidget(self._hub_combo, 4, 1)
|
| 235 |
+
self._hub_combo.currentIndexChanged.connect(self._auto_save)
|
| 236 |
+
|
| 237 |
+
self._ui_lang_combo = QComboBox()
|
| 238 |
+
self._ui_lang_combo.addItems(["English", "中文"])
|
| 239 |
+
saved_lang = s.get("ui_lang", "en")
|
| 240 |
+
self._ui_lang_combo.setCurrentIndex(0 if saved_lang == "en" else 1)
|
| 241 |
+
asr_layout.addWidget(QLabel(t("label_ui_lang")), 5, 0)
|
| 242 |
+
asr_layout.addWidget(self._ui_lang_combo, 5, 1)
|
| 243 |
+
self._ui_lang_combo.currentIndexChanged.connect(self._on_ui_lang_changed)
|
| 244 |
+
|
| 245 |
+
layout.addWidget(asr_group)
|
| 246 |
+
|
| 247 |
+
mode_group = QGroupBox(t("group_vad_mode"))
|
| 248 |
+
mode_layout = QVBoxLayout(mode_group)
|
| 249 |
+
self._vad_mode = QComboBox()
|
| 250 |
+
self._vad_mode.addItems(
|
| 251 |
+
[t("vad_silero"), t("vad_energy"), t("vad_disabled")]
|
| 252 |
+
)
|
| 253 |
+
mode_map = {"silero": 0, "energy": 1, "disabled": 2}
|
| 254 |
+
self._vad_mode.setCurrentIndex(mode_map.get(s.get("vad_mode", "energy"), 1))
|
| 255 |
+
self._vad_mode.currentIndexChanged.connect(self._on_vad_mode_changed)
|
| 256 |
+
self._vad_mode.currentIndexChanged.connect(self._auto_save)
|
| 257 |
+
mode_layout.addWidget(self._vad_mode)
|
| 258 |
+
layout.addWidget(mode_group)
|
| 259 |
+
|
| 260 |
+
silero_group = QGroupBox(t("group_silero_threshold"))
|
| 261 |
+
silero_layout = QGridLayout(silero_group)
|
| 262 |
+
self._vad_threshold_slider = QSlider(Qt.Orientation.Horizontal)
|
| 263 |
+
self._vad_threshold_slider.setRange(0, 100)
|
| 264 |
+
vad_pct = int(s.get("vad_threshold", 0.5) * 100)
|
| 265 |
+
self._vad_threshold_slider.setValue(vad_pct)
|
| 266 |
+
self._vad_threshold_slider.valueChanged.connect(self._on_threshold_changed)
|
| 267 |
+
self._vad_threshold_slider.sliderReleased.connect(self._auto_save)
|
| 268 |
+
self._vad_threshold_label = QLabel(f"{vad_pct}%")
|
| 269 |
+
self._vad_threshold_label.setFont(QFont("Consolas", 11, QFont.Weight.Bold))
|
| 270 |
+
silero_layout.addWidget(QLabel(t("label_threshold")), 0, 0)
|
| 271 |
+
silero_layout.addWidget(self._vad_threshold_slider, 0, 1)
|
| 272 |
+
silero_layout.addWidget(self._vad_threshold_label, 0, 2)
|
| 273 |
+
layout.addWidget(silero_group)
|
| 274 |
+
|
| 275 |
+
energy_group = QGroupBox(t("group_energy_threshold"))
|
| 276 |
+
energy_layout = QGridLayout(energy_group)
|
| 277 |
+
self._energy_slider = QSlider(Qt.Orientation.Horizontal)
|
| 278 |
+
self._energy_slider.setRange(1, 100)
|
| 279 |
+
energy_pm = int(s.get("energy_threshold", 0.03) * 1000)
|
| 280 |
+
self._energy_slider.setValue(energy_pm)
|
| 281 |
+
self._energy_slider.valueChanged.connect(self._on_energy_changed)
|
| 282 |
+
self._energy_slider.sliderReleased.connect(self._auto_save)
|
| 283 |
+
self._energy_label = QLabel(f"{energy_pm}\u2030")
|
| 284 |
+
self._energy_label.setFont(QFont("Consolas", 11, QFont.Weight.Bold))
|
| 285 |
+
energy_layout.addWidget(QLabel(t("label_threshold")), 0, 0)
|
| 286 |
+
energy_layout.addWidget(self._energy_slider, 0, 1)
|
| 287 |
+
energy_layout.addWidget(self._energy_label, 0, 2)
|
| 288 |
+
layout.addWidget(energy_group)
|
| 289 |
+
|
| 290 |
+
timing_group = QGroupBox(t("group_timing"))
|
| 291 |
+
timing_layout = QGridLayout(timing_group)
|
| 292 |
+
self._min_speech = QDoubleSpinBox()
|
| 293 |
+
self._min_speech.setRange(0.1, 5.0)
|
| 294 |
+
self._min_speech.setSingleStep(0.1)
|
| 295 |
+
self._min_speech.setValue(s.get("min_speech_duration", 1.0))
|
| 296 |
+
self._min_speech.setSuffix(" s")
|
| 297 |
+
self._min_speech.valueChanged.connect(self._on_timing_changed)
|
| 298 |
+
self._min_speech.valueChanged.connect(self._auto_save)
|
| 299 |
+
self._max_speech = QDoubleSpinBox()
|
| 300 |
+
self._max_speech.setRange(3.0, 30.0)
|
| 301 |
+
self._max_speech.setSingleStep(1.0)
|
| 302 |
+
self._max_speech.setValue(s.get("max_speech_duration", 8.0))
|
| 303 |
+
self._max_speech.setSuffix(" s")
|
| 304 |
+
self._max_speech.valueChanged.connect(self._on_timing_changed)
|
| 305 |
+
self._max_speech.valueChanged.connect(self._auto_save)
|
| 306 |
+
self._silence_mode = QComboBox()
|
| 307 |
+
self._silence_mode.addItems([t("silence_auto"), t("silence_fixed")])
|
| 308 |
+
saved_smode = s.get("silence_mode", "auto")
|
| 309 |
+
self._silence_mode.setCurrentIndex(0 if saved_smode == "auto" else 1)
|
| 310 |
+
self._silence_mode.currentIndexChanged.connect(self._on_silence_mode_changed)
|
| 311 |
+
self._silence_mode.currentIndexChanged.connect(self._on_timing_changed)
|
| 312 |
+
self._silence_mode.currentIndexChanged.connect(self._auto_save)
|
| 313 |
+
|
| 314 |
+
self._silence_duration = QDoubleSpinBox()
|
| 315 |
+
self._silence_duration.setRange(0.1, 3.0)
|
| 316 |
+
self._silence_duration.setSingleStep(0.1)
|
| 317 |
+
self._silence_duration.setValue(s.get("silence_duration", 0.8))
|
| 318 |
+
self._silence_duration.setSuffix(" s")
|
| 319 |
+
self._silence_duration.setEnabled(saved_smode != "auto")
|
| 320 |
+
self._silence_duration.valueChanged.connect(self._on_timing_changed)
|
| 321 |
+
self._silence_duration.valueChanged.connect(self._auto_save)
|
| 322 |
+
|
| 323 |
+
timing_layout.addWidget(QLabel(t("label_min_speech")), 0, 0)
|
| 324 |
+
timing_layout.addWidget(self._min_speech, 0, 1)
|
| 325 |
+
timing_layout.addWidget(QLabel(t("label_max_speech")), 1, 0)
|
| 326 |
+
timing_layout.addWidget(self._max_speech, 1, 1)
|
| 327 |
+
timing_layout.addWidget(QLabel(t("label_silence")), 2, 0)
|
| 328 |
+
timing_layout.addWidget(self._silence_mode, 2, 1)
|
| 329 |
+
timing_layout.addWidget(QLabel(t("label_silence_dur")), 3, 0)
|
| 330 |
+
timing_layout.addWidget(self._silence_duration, 3, 1)
|
| 331 |
+
layout.addWidget(timing_group)
|
| 332 |
+
|
| 333 |
+
layout.addStretch()
|
| 334 |
+
return widget
|
| 335 |
+
|
| 336 |
+
# ── Translation Tab ──
|
| 337 |
+
|
| 338 |
+
def _create_translation_tab(self):
|
| 339 |
+
widget = QWidget()
|
| 340 |
+
layout = QVBoxLayout(widget)
|
| 341 |
+
s = self._current_settings
|
| 342 |
+
|
| 343 |
+
active_group = QGroupBox(t("group_active_model"))
|
| 344 |
+
active_layout = QHBoxLayout(active_group)
|
| 345 |
+
self._active_model_combo = QComboBox()
|
| 346 |
+
self._refresh_model_combo()
|
| 347 |
+
active_idx = s.get("active_model", 0)
|
| 348 |
+
if 0 <= active_idx < self._active_model_combo.count():
|
| 349 |
+
self._active_model_combo.setCurrentIndex(active_idx)
|
| 350 |
+
self._active_model_combo.currentIndexChanged.connect(
|
| 351 |
+
self._on_active_model_changed
|
| 352 |
+
)
|
| 353 |
+
active_layout.addWidget(self._active_model_combo)
|
| 354 |
+
|
| 355 |
+
apply_model_btn = QPushButton(t("btn_apply"))
|
| 356 |
+
apply_model_btn.setFixedWidth(60)
|
| 357 |
+
apply_model_btn.clicked.connect(self._apply_active_model)
|
| 358 |
+
active_layout.addWidget(apply_model_btn)
|
| 359 |
+
layout.addWidget(active_group)
|
| 360 |
+
|
| 361 |
+
models_group = QGroupBox(t("group_model_configs"))
|
| 362 |
+
models_layout = QVBoxLayout(models_group)
|
| 363 |
+
|
| 364 |
+
self._model_list = QListWidget()
|
| 365 |
+
self._model_list.setFont(QFont("Consolas", 9))
|
| 366 |
+
self._model_list.itemDoubleClicked.connect(self._on_model_double_clicked)
|
| 367 |
+
self._refresh_model_list()
|
| 368 |
+
models_layout.addWidget(self._model_list)
|
| 369 |
+
|
| 370 |
+
btn_row = QHBoxLayout()
|
| 371 |
+
add_btn = QPushButton(t("btn_add"))
|
| 372 |
+
add_btn.clicked.connect(self._add_model)
|
| 373 |
+
btn_row.addWidget(add_btn)
|
| 374 |
+
edit_btn = QPushButton(t("btn_edit"))
|
| 375 |
+
edit_btn.clicked.connect(self._edit_model)
|
| 376 |
+
btn_row.addWidget(edit_btn)
|
| 377 |
+
dup_btn = QPushButton(t("btn_duplicate"))
|
| 378 |
+
dup_btn.clicked.connect(self._dup_model)
|
| 379 |
+
btn_row.addWidget(dup_btn)
|
| 380 |
+
del_btn = QPushButton(t("btn_remove"))
|
| 381 |
+
del_btn.clicked.connect(self._remove_model)
|
| 382 |
+
btn_row.addWidget(del_btn)
|
| 383 |
+
models_layout.addLayout(btn_row)
|
| 384 |
+
layout.addWidget(models_group)
|
| 385 |
+
|
| 386 |
+
prompt_group = QGroupBox(t("group_system_prompt"))
|
| 387 |
+
prompt_layout = QVBoxLayout(prompt_group)
|
| 388 |
+
|
| 389 |
+
from translator import DEFAULT_PROMPT
|
| 390 |
+
|
| 391 |
+
self._prompt_edit = QTextEdit()
|
| 392 |
+
self._prompt_edit.setFont(QFont("Consolas", 9))
|
| 393 |
+
self._prompt_edit.setMaximumHeight(100)
|
| 394 |
+
self._prompt_edit.setPlainText(s.get("system_prompt", DEFAULT_PROMPT))
|
| 395 |
+
prompt_layout.addWidget(self._prompt_edit)
|
| 396 |
+
|
| 397 |
+
prompt_btn_row = QHBoxLayout()
|
| 398 |
+
reset_prompt_btn = QPushButton(t("btn_restore_default"))
|
| 399 |
+
reset_prompt_btn.clicked.connect(
|
| 400 |
+
lambda: self._prompt_edit.setPlainText(DEFAULT_PROMPT)
|
| 401 |
+
)
|
| 402 |
+
prompt_btn_row.addWidget(reset_prompt_btn)
|
| 403 |
+
apply_prompt_btn = QPushButton(t("btn_apply_prompt"))
|
| 404 |
+
apply_prompt_btn.clicked.connect(self._apply_prompt)
|
| 405 |
+
prompt_btn_row.addWidget(apply_prompt_btn)
|
| 406 |
+
prompt_btn_row.addStretch()
|
| 407 |
+
prompt_layout.addLayout(prompt_btn_row)
|
| 408 |
+
layout.addWidget(prompt_group)
|
| 409 |
+
|
| 410 |
+
timeout_group = QGroupBox(t("group_timeout"))
|
| 411 |
+
timeout_layout = QHBoxLayout(timeout_group)
|
| 412 |
+
self._timeout_spin = QSpinBox()
|
| 413 |
+
self._timeout_spin.setRange(1, 60)
|
| 414 |
+
self._timeout_spin.setValue(s.get("timeout", 5))
|
| 415 |
+
self._timeout_spin.setSuffix(" s")
|
| 416 |
+
self._timeout_spin.valueChanged.connect(
|
| 417 |
+
lambda v: self._current_settings.update({"timeout": v})
|
| 418 |
+
)
|
| 419 |
+
self._timeout_spin.valueChanged.connect(self._auto_save)
|
| 420 |
+
timeout_layout.addWidget(self._timeout_spin)
|
| 421 |
+
timeout_layout.addStretch()
|
| 422 |
+
layout.addWidget(timeout_group)
|
| 423 |
+
|
| 424 |
+
layout.addStretch()
|
| 425 |
+
return widget
|
| 426 |
+
|
| 427 |
+
# ── Style Tab ──
|
| 428 |
+
|
| 429 |
+
def _create_style_tab(self):
|
| 430 |
+
from subtitle_overlay import DEFAULT_STYLE
|
| 431 |
+
|
| 432 |
+
widget = QWidget()
|
| 433 |
+
layout = QVBoxLayout(widget)
|
| 434 |
+
s = self._current_settings.get("style", dict(DEFAULT_STYLE))
|
| 435 |
+
|
| 436 |
+
# Preset group
|
| 437 |
+
preset_group = QGroupBox(t("group_preset"))
|
| 438 |
+
preset_layout = QHBoxLayout(preset_group)
|
| 439 |
+
self._style_preset = QComboBox()
|
| 440 |
+
preset_names = [
|
| 441 |
+
("default", t("preset_default")),
|
| 442 |
+
("transparent", t("preset_transparent")),
|
| 443 |
+
("compact", t("preset_compact")),
|
| 444 |
+
("light", t("preset_light")),
|
| 445 |
+
("dracula", t("preset_dracula")),
|
| 446 |
+
("nord", t("preset_nord")),
|
| 447 |
+
("monokai", t("preset_monokai")),
|
| 448 |
+
("solarized", t("preset_solarized")),
|
| 449 |
+
("gruvbox", t("preset_gruvbox")),
|
| 450 |
+
("tokyo_night", t("preset_tokyo_night")),
|
| 451 |
+
("catppuccin", t("preset_catppuccin")),
|
| 452 |
+
("one_dark", t("preset_one_dark")),
|
| 453 |
+
("everforest", t("preset_everforest")),
|
| 454 |
+
("kanagawa", t("preset_kanagawa")),
|
| 455 |
+
("custom", t("preset_custom")),
|
| 456 |
+
]
|
| 457 |
+
self._preset_keys = [k for k, _ in preset_names]
|
| 458 |
+
for _, label in preset_names:
|
| 459 |
+
self._style_preset.addItem(label)
|
| 460 |
+
current_preset = s.get("preset", "default")
|
| 461 |
+
if current_preset in self._preset_keys:
|
| 462 |
+
self._style_preset.setCurrentIndex(self._preset_keys.index(current_preset))
|
| 463 |
+
else:
|
| 464 |
+
self._style_preset.setCurrentIndex(5) # custom
|
| 465 |
+
self._style_preset.currentIndexChanged.connect(self._on_preset_changed)
|
| 466 |
+
preset_layout.addWidget(self._style_preset, 1)
|
| 467 |
+
reset_btn = QPushButton(t("btn_reset_style"))
|
| 468 |
+
reset_btn.clicked.connect(self._reset_style)
|
| 469 |
+
preset_layout.addWidget(reset_btn)
|
| 470 |
+
layout.addWidget(preset_group)
|
| 471 |
+
|
| 472 |
+
# Background group
|
| 473 |
+
bg_group = QGroupBox(t("group_background"))
|
| 474 |
+
bg_layout = QGridLayout(bg_group)
|
| 475 |
+
|
| 476 |
+
bg_layout.addWidget(QLabel(t("label_bg_color")), 0, 0)
|
| 477 |
+
self._bg_color_btn = self._make_color_btn(s.get("bg_color", DEFAULT_STYLE["bg_color"]))
|
| 478 |
+
self._bg_color_btn.clicked.connect(lambda: self._pick_color(self._bg_color_btn))
|
| 479 |
+
bg_layout.addWidget(self._bg_color_btn, 0, 1)
|
| 480 |
+
|
| 481 |
+
bg_layout.addWidget(QLabel(t("label_bg_opacity")), 1, 0)
|
| 482 |
+
self._bg_opacity = QSlider(Qt.Orientation.Horizontal)
|
| 483 |
+
self._bg_opacity.setRange(0, 255)
|
| 484 |
+
self._bg_opacity.setValue(s.get("bg_opacity", DEFAULT_STYLE["bg_opacity"]))
|
| 485 |
+
self._bg_opacity_label = QLabel(str(self._bg_opacity.value()))
|
| 486 |
+
self._bg_opacity.valueChanged.connect(lambda v: self._bg_opacity_label.setText(str(v)))
|
| 487 |
+
self._bg_opacity.valueChanged.connect(self._on_style_value_changed)
|
| 488 |
+
self._bg_opacity.sliderReleased.connect(self._auto_save)
|
| 489 |
+
bg_layout.addWidget(self._bg_opacity, 1, 1)
|
| 490 |
+
bg_layout.addWidget(self._bg_opacity_label, 1, 2)
|
| 491 |
+
|
| 492 |
+
bg_layout.addWidget(QLabel(t("label_header_color")), 2, 0)
|
| 493 |
+
self._header_color_btn = self._make_color_btn(s.get("header_color", DEFAULT_STYLE["header_color"]))
|
| 494 |
+
self._header_color_btn.clicked.connect(lambda: self._pick_color(self._header_color_btn))
|
| 495 |
+
bg_layout.addWidget(self._header_color_btn, 2, 1)
|
| 496 |
+
|
| 497 |
+
bg_layout.addWidget(QLabel(t("label_header_opacity")), 3, 0)
|
| 498 |
+
self._header_opacity = QSlider(Qt.Orientation.Horizontal)
|
| 499 |
+
self._header_opacity.setRange(0, 255)
|
| 500 |
+
self._header_opacity.setValue(s.get("header_opacity", DEFAULT_STYLE["header_opacity"]))
|
| 501 |
+
self._header_opacity_label = QLabel(str(self._header_opacity.value()))
|
| 502 |
+
self._header_opacity.valueChanged.connect(lambda v: self._header_opacity_label.setText(str(v)))
|
| 503 |
+
self._header_opacity.valueChanged.connect(self._on_style_value_changed)
|
| 504 |
+
self._header_opacity.sliderReleased.connect(self._auto_save)
|
| 505 |
+
bg_layout.addWidget(self._header_opacity, 3, 1)
|
| 506 |
+
bg_layout.addWidget(self._header_opacity_label, 3, 2)
|
| 507 |
+
|
| 508 |
+
bg_layout.addWidget(QLabel(t("label_border_radius")), 4, 0)
|
| 509 |
+
self._border_radius = QSpinBox()
|
| 510 |
+
self._border_radius.setRange(0, 30)
|
| 511 |
+
self._border_radius.setValue(s.get("border_radius", DEFAULT_STYLE["border_radius"]))
|
| 512 |
+
self._border_radius.setSuffix(" px")
|
| 513 |
+
self._border_radius.valueChanged.connect(self._on_style_value_changed)
|
| 514 |
+
self._border_radius.valueChanged.connect(self._auto_save)
|
| 515 |
+
bg_layout.addWidget(self._border_radius, 4, 1)
|
| 516 |
+
|
| 517 |
+
layout.addWidget(bg_group)
|
| 518 |
+
|
| 519 |
+
# Text group
|
| 520 |
+
text_group = QGroupBox(t("group_text"))
|
| 521 |
+
text_layout = QGridLayout(text_group)
|
| 522 |
+
|
| 523 |
+
text_layout.addWidget(QLabel(t("label_original_font")), 0, 0)
|
| 524 |
+
self._orig_font_combo = QFontComboBox()
|
| 525 |
+
self._orig_font_combo.setCurrentFont(QFont(s.get("original_font_family", DEFAULT_STYLE["original_font_family"])))
|
| 526 |
+
self._orig_font_combo.currentFontChanged.connect(self._on_style_value_changed)
|
| 527 |
+
self._orig_font_combo.currentFontChanged.connect(self._auto_save)
|
| 528 |
+
text_layout.addWidget(self._orig_font_combo, 0, 1)
|
| 529 |
+
|
| 530 |
+
text_layout.addWidget(QLabel(t("label_original_font_size")), 1, 0)
|
| 531 |
+
self._orig_font_size = QSpinBox()
|
| 532 |
+
self._orig_font_size.setRange(6, 24)
|
| 533 |
+
self._orig_font_size.setValue(s.get("original_font_size", DEFAULT_STYLE["original_font_size"]))
|
| 534 |
+
self._orig_font_size.setSuffix(" pt")
|
| 535 |
+
self._orig_font_size.valueChanged.connect(self._on_style_value_changed)
|
| 536 |
+
self._orig_font_size.valueChanged.connect(self._auto_save)
|
| 537 |
+
text_layout.addWidget(self._orig_font_size, 1, 1)
|
| 538 |
+
|
| 539 |
+
text_layout.addWidget(QLabel(t("label_original_color")), 2, 0)
|
| 540 |
+
self._orig_color_btn = self._make_color_btn(s.get("original_color", DEFAULT_STYLE["original_color"]))
|
| 541 |
+
self._orig_color_btn.clicked.connect(lambda: self._pick_color(self._orig_color_btn))
|
| 542 |
+
text_layout.addWidget(self._orig_color_btn, 2, 1)
|
| 543 |
+
|
| 544 |
+
text_layout.addWidget(QLabel(t("label_translation_font")), 3, 0)
|
| 545 |
+
self._trans_font_combo = QFontComboBox()
|
| 546 |
+
self._trans_font_combo.setCurrentFont(QFont(s.get("translation_font_family", DEFAULT_STYLE["translation_font_family"])))
|
| 547 |
+
self._trans_font_combo.currentFontChanged.connect(self._on_style_value_changed)
|
| 548 |
+
self._trans_font_combo.currentFontChanged.connect(self._auto_save)
|
| 549 |
+
text_layout.addWidget(self._trans_font_combo, 3, 1)
|
| 550 |
+
|
| 551 |
+
text_layout.addWidget(QLabel(t("label_translation_font_size")), 4, 0)
|
| 552 |
+
self._trans_font_size = QSpinBox()
|
| 553 |
+
self._trans_font_size.setRange(6, 24)
|
| 554 |
+
self._trans_font_size.setValue(s.get("translation_font_size", DEFAULT_STYLE["translation_font_size"]))
|
| 555 |
+
self._trans_font_size.setSuffix(" pt")
|
| 556 |
+
self._trans_font_size.valueChanged.connect(self._on_style_value_changed)
|
| 557 |
+
self._trans_font_size.valueChanged.connect(self._auto_save)
|
| 558 |
+
text_layout.addWidget(self._trans_font_size, 4, 1)
|
| 559 |
+
|
| 560 |
+
text_layout.addWidget(QLabel(t("label_translation_color")), 5, 0)
|
| 561 |
+
self._trans_color_btn = self._make_color_btn(s.get("translation_color", DEFAULT_STYLE["translation_color"]))
|
| 562 |
+
self._trans_color_btn.clicked.connect(lambda: self._pick_color(self._trans_color_btn))
|
| 563 |
+
text_layout.addWidget(self._trans_color_btn, 5, 1)
|
| 564 |
+
|
| 565 |
+
text_layout.addWidget(QLabel(t("label_timestamp_color")), 6, 0)
|
| 566 |
+
self._ts_color_btn = self._make_color_btn(s.get("timestamp_color", DEFAULT_STYLE["timestamp_color"]))
|
| 567 |
+
self._ts_color_btn.clicked.connect(lambda: self._pick_color(self._ts_color_btn))
|
| 568 |
+
text_layout.addWidget(self._ts_color_btn, 6, 1)
|
| 569 |
+
|
| 570 |
+
layout.addWidget(text_group)
|
| 571 |
+
|
| 572 |
+
# Window group
|
| 573 |
+
win_group = QGroupBox(t("group_window"))
|
| 574 |
+
win_layout = QGridLayout(win_group)
|
| 575 |
+
win_layout.addWidget(QLabel(t("label_window_opacity")), 0, 0)
|
| 576 |
+
self._window_opacity = QSlider(Qt.Orientation.Horizontal)
|
| 577 |
+
self._window_opacity.setRange(30, 100)
|
| 578 |
+
self._window_opacity.setValue(s.get("window_opacity", DEFAULT_STYLE["window_opacity"]))
|
| 579 |
+
self._window_opacity_label = QLabel(f"{self._window_opacity.value()}%")
|
| 580 |
+
self._window_opacity.valueChanged.connect(lambda v: self._window_opacity_label.setText(f"{v}%"))
|
| 581 |
+
self._window_opacity.valueChanged.connect(self._on_style_value_changed)
|
| 582 |
+
self._window_opacity.sliderReleased.connect(self._auto_save)
|
| 583 |
+
win_layout.addWidget(self._window_opacity, 0, 1)
|
| 584 |
+
win_layout.addWidget(self._window_opacity_label, 0, 2)
|
| 585 |
+
layout.addWidget(win_group)
|
| 586 |
+
|
| 587 |
+
layout.addStretch()
|
| 588 |
+
return widget
|
| 589 |
+
|
| 590 |
+
def _make_color_btn(self, color: str) -> QPushButton:
|
| 591 |
+
btn = QPushButton()
|
| 592 |
+
btn.setFixedSize(60, 24)
|
| 593 |
+
btn.setProperty("hex_color", color)
|
| 594 |
+
btn.setStyleSheet(f"background-color: {color}; border: 1px solid #888; border-radius: 3px;")
|
| 595 |
+
return btn
|
| 596 |
+
|
| 597 |
+
def _pick_color(self, btn: QPushButton):
|
| 598 |
+
from PyQt6.QtGui import QColor as _QColor
|
| 599 |
+
current = _QColor(btn.property("hex_color"))
|
| 600 |
+
color = QColorDialog.getColor(current, self)
|
| 601 |
+
if color.isValid():
|
| 602 |
+
hex_c = color.name()
|
| 603 |
+
btn.setProperty("hex_color", hex_c)
|
| 604 |
+
btn.setStyleSheet(f"background-color: {hex_c}; border: 1px solid #888; border-radius: 3px;")
|
| 605 |
+
self._on_style_value_changed()
|
| 606 |
+
self._auto_save()
|
| 607 |
+
|
| 608 |
+
def _collect_style(self) -> dict:
|
| 609 |
+
return {
|
| 610 |
+
"preset": self._preset_keys[self._style_preset.currentIndex()],
|
| 611 |
+
"bg_color": self._bg_color_btn.property("hex_color"),
|
| 612 |
+
"bg_opacity": self._bg_opacity.value(),
|
| 613 |
+
"header_color": self._header_color_btn.property("hex_color"),
|
| 614 |
+
"header_opacity": self._header_opacity.value(),
|
| 615 |
+
"border_radius": self._border_radius.value(),
|
| 616 |
+
"original_font_family": self._orig_font_combo.currentFont().family(),
|
| 617 |
+
"translation_font_family": self._trans_font_combo.currentFont().family(),
|
| 618 |
+
"original_font_size": self._orig_font_size.value(),
|
| 619 |
+
"translation_font_size": self._trans_font_size.value(),
|
| 620 |
+
"original_color": self._orig_color_btn.property("hex_color"),
|
| 621 |
+
"translation_color": self._trans_color_btn.property("hex_color"),
|
| 622 |
+
"timestamp_color": self._ts_color_btn.property("hex_color"),
|
| 623 |
+
"window_opacity": self._window_opacity.value(),
|
| 624 |
+
}
|
| 625 |
+
|
| 626 |
+
def _apply_style_to_controls(self, s: dict):
|
| 627 |
+
"""Update all style controls to match a style dict, without triggering auto-save."""
|
| 628 |
+
self._bg_color_btn.setProperty("hex_color", s["bg_color"])
|
| 629 |
+
self._bg_color_btn.setStyleSheet(f"background-color: {s['bg_color']}; border: 1px solid #888; border-radius: 3px;")
|
| 630 |
+
self._bg_opacity.setValue(s["bg_opacity"])
|
| 631 |
+
self._header_color_btn.setProperty("hex_color", s["header_color"])
|
| 632 |
+
self._header_color_btn.setStyleSheet(f"background-color: {s['header_color']}; border: 1px solid #888; border-radius: 3px;")
|
| 633 |
+
self._header_opacity.setValue(s["header_opacity"])
|
| 634 |
+
self._border_radius.setValue(s["border_radius"])
|
| 635 |
+
self._orig_font_combo.setCurrentFont(QFont(s["original_font_family"]))
|
| 636 |
+
self._trans_font_combo.setCurrentFont(QFont(s["translation_font_family"]))
|
| 637 |
+
self._orig_font_size.setValue(s["original_font_size"])
|
| 638 |
+
self._trans_font_size.setValue(s["translation_font_size"])
|
| 639 |
+
self._orig_color_btn.setProperty("hex_color", s["original_color"])
|
| 640 |
+
self._orig_color_btn.setStyleSheet(f"background-color: {s['original_color']}; border: 1px solid #888; border-radius: 3px;")
|
| 641 |
+
self._trans_color_btn.setProperty("hex_color", s["translation_color"])
|
| 642 |
+
self._trans_color_btn.setStyleSheet(f"background-color: {s['translation_color']}; border: 1px solid #888; border-radius: 3px;")
|
| 643 |
+
self._ts_color_btn.setProperty("hex_color", s["timestamp_color"])
|
| 644 |
+
self._ts_color_btn.setStyleSheet(f"background-color: {s['timestamp_color']}; border: 1px solid #888; border-radius: 3px;")
|
| 645 |
+
self._window_opacity.setValue(s["window_opacity"])
|
| 646 |
+
|
| 647 |
+
def _on_preset_changed(self, index):
|
| 648 |
+
from subtitle_overlay import STYLE_PRESETS
|
| 649 |
+
key = self._preset_keys[index]
|
| 650 |
+
if key == "custom":
|
| 651 |
+
return
|
| 652 |
+
preset = STYLE_PRESETS.get(key)
|
| 653 |
+
if not preset:
|
| 654 |
+
return
|
| 655 |
+
self._block_style_signals(True)
|
| 656 |
+
self._apply_style_to_controls(preset)
|
| 657 |
+
self._block_style_signals(False)
|
| 658 |
+
self._auto_save()
|
| 659 |
+
|
| 660 |
+
def _on_style_value_changed(self, *_args):
|
| 661 |
+
"""When any style control changes manually, switch preset to Custom."""
|
| 662 |
+
custom_idx = len(self._preset_keys) - 1
|
| 663 |
+
if self._style_preset.currentIndex() != custom_idx:
|
| 664 |
+
self._style_preset.blockSignals(True)
|
| 665 |
+
self._style_preset.setCurrentIndex(custom_idx)
|
| 666 |
+
self._style_preset.blockSignals(False)
|
| 667 |
+
if not self._bg_opacity.isSliderDown() and not self._header_opacity.isSliderDown() and not self._window_opacity.isSliderDown():
|
| 668 |
+
self._auto_save()
|
| 669 |
+
|
| 670 |
+
def _reset_style(self):
|
| 671 |
+
from subtitle_overlay import DEFAULT_STYLE
|
| 672 |
+
self._style_preset.blockSignals(True)
|
| 673 |
+
self._style_preset.setCurrentIndex(0) # default
|
| 674 |
+
self._style_preset.blockSignals(False)
|
| 675 |
+
self._block_style_signals(True)
|
| 676 |
+
self._apply_style_to_controls(DEFAULT_STYLE)
|
| 677 |
+
self._block_style_signals(False)
|
| 678 |
+
self._auto_save()
|
| 679 |
+
|
| 680 |
+
def _block_style_signals(self, block: bool):
|
| 681 |
+
for w in (self._bg_opacity, self._header_opacity, self._border_radius,
|
| 682 |
+
self._orig_font_combo, self._trans_font_combo,
|
| 683 |
+
self._orig_font_size, self._trans_font_size,
|
| 684 |
+
self._window_opacity):
|
| 685 |
+
w.blockSignals(block)
|
| 686 |
+
|
| 687 |
+
# ── Benchmark Tab ──
|
| 688 |
+
|
| 689 |
+
def _create_benchmark_tab(self):
|
| 690 |
+
widget = QWidget()
|
| 691 |
+
layout = QVBoxLayout(widget)
|
| 692 |
+
|
| 693 |
+
ctrl_row = QHBoxLayout()
|
| 694 |
+
ctrl_row.addWidget(QLabel(t("label_source")))
|
| 695 |
+
self._bench_lang = QComboBox()
|
| 696 |
+
self._bench_lang.addItems(["ja", "en", "zh", "ko", "fr", "de"])
|
| 697 |
+
self._bench_lang.setCurrentIndex(0)
|
| 698 |
+
ctrl_row.addWidget(self._bench_lang)
|
| 699 |
+
ctrl_row.addWidget(QLabel(t("target_label")))
|
| 700 |
+
self._bench_target = QComboBox()
|
| 701 |
+
self._bench_target.addItems(["zh", "en", "ja", "ko", "fr", "de", "es", "ru"])
|
| 702 |
+
ctrl_row.addWidget(self._bench_target)
|
| 703 |
+
ctrl_row.addStretch()
|
| 704 |
+
self._bench_btn = QPushButton(t("btn_test_all"))
|
| 705 |
+
self._bench_btn.clicked.connect(self._run_benchmark)
|
| 706 |
+
ctrl_row.addWidget(self._bench_btn)
|
| 707 |
+
layout.addLayout(ctrl_row)
|
| 708 |
+
|
| 709 |
+
self._bench_output = QTextEdit()
|
| 710 |
+
self._bench_output.setReadOnly(True)
|
| 711 |
+
self._bench_output.setFont(QFont("Consolas", 9))
|
| 712 |
+
self._bench_output.setStyleSheet(
|
| 713 |
+
"background: #1e1e2e; color: #cdd6f4; border: 1px solid #444;"
|
| 714 |
+
)
|
| 715 |
+
layout.addWidget(self._bench_output)
|
| 716 |
+
|
| 717 |
+
return widget
|
| 718 |
+
|
| 719 |
+
# ── Cache Tab ──
|
| 720 |
+
|
| 721 |
+
def _create_cache_tab(self):
|
| 722 |
+
widget = QWidget()
|
| 723 |
+
layout = QVBoxLayout(widget)
|
| 724 |
+
|
| 725 |
+
top_row = QHBoxLayout()
|
| 726 |
+
self._cache_total = QLabel("")
|
| 727 |
+
self._cache_total.setFont(QFont("Consolas", 9, QFont.Weight.Bold))
|
| 728 |
+
top_row.addWidget(self._cache_total, 1)
|
| 729 |
+
open_btn = QPushButton(t("btn_open_folder"))
|
| 730 |
+
open_btn.clicked.connect(
|
| 731 |
+
lambda: (
|
| 732 |
+
MODELS_DIR.mkdir(parents=True, exist_ok=True),
|
| 733 |
+
os.startfile(str(MODELS_DIR)),
|
| 734 |
+
)
|
| 735 |
+
)
|
| 736 |
+
top_row.addWidget(open_btn)
|
| 737 |
+
delete_all_btn = QPushButton(t("btn_delete_all_exit"))
|
| 738 |
+
delete_all_btn.clicked.connect(self._delete_all_and_exit)
|
| 739 |
+
top_row.addWidget(delete_all_btn)
|
| 740 |
+
layout.addLayout(top_row)
|
| 741 |
+
|
| 742 |
+
self._cache_list = QListWidget()
|
| 743 |
+
self._cache_list.setFont(QFont("Consolas", 9))
|
| 744 |
+
self._cache_list.setAlternatingRowColors(True)
|
| 745 |
+
layout.addWidget(self._cache_list, 1)
|
| 746 |
+
|
| 747 |
+
self._cache_entries = []
|
| 748 |
+
self._refresh_cache()
|
| 749 |
+
|
| 750 |
+
return widget
|
| 751 |
+
|
| 752 |
+
def _on_tab_changed(self, index):
|
| 753 |
+
if index == self._cache_tab_index:
|
| 754 |
+
self._refresh_cache()
|
| 755 |
+
|
| 756 |
+
def _refresh_cache(self):
|
| 757 |
+
self._cache_list.clear()
|
| 758 |
+
self._cache_total.setText(t("scanning"))
|
| 759 |
+
|
| 760 |
+
def _scan():
|
| 761 |
+
entries = get_cache_entries()
|
| 762 |
+
results = []
|
| 763 |
+
for name, path in entries:
|
| 764 |
+
size = dir_size(path)
|
| 765 |
+
results.append((name, str(path), size))
|
| 766 |
+
self._cache_result.emit(results)
|
| 767 |
+
|
| 768 |
+
threading.Thread(target=_scan, daemon=True).start()
|
| 769 |
+
|
| 770 |
+
def _on_cache_result(self, results):
|
| 771 |
+
self._cache_list.clear()
|
| 772 |
+
self._cache_entries = results
|
| 773 |
+
total = 0
|
| 774 |
+
for name, path, size in results:
|
| 775 |
+
total += size
|
| 776 |
+
self._cache_list.addItem(f"{name} — {format_size(size)}")
|
| 777 |
+
if not results:
|
| 778 |
+
self._cache_list.addItem(t("no_cached_models"))
|
| 779 |
+
self._cache_total.setText(
|
| 780 |
+
t("cache_total").format(size=format_size(total), count=len(results))
|
| 781 |
+
)
|
| 782 |
+
|
| 783 |
+
def _delete_all_and_exit(self):
|
| 784 |
+
if not self._cache_entries:
|
| 785 |
+
return
|
| 786 |
+
import shutil
|
| 787 |
+
|
| 788 |
+
total_size = sum(s for _, _, s in self._cache_entries)
|
| 789 |
+
ret = QMessageBox.warning(
|
| 790 |
+
self,
|
| 791 |
+
t("dialog_delete_title"),
|
| 792 |
+
t("dialog_delete_msg").format(
|
| 793 |
+
count=len(self._cache_entries), size=format_size(total_size)
|
| 794 |
+
),
|
| 795 |
+
QMessageBox.StandardButton.Yes | QMessageBox.StandardButton.No,
|
| 796 |
+
QMessageBox.StandardButton.No,
|
| 797 |
+
)
|
| 798 |
+
if ret != QMessageBox.StandardButton.Yes:
|
| 799 |
+
return
|
| 800 |
+
for name, path, _ in self._cache_entries:
|
| 801 |
+
try:
|
| 802 |
+
shutil.rmtree(path)
|
| 803 |
+
log.info(f"Deleted: {path}")
|
| 804 |
+
except Exception as e:
|
| 805 |
+
log.error(f"Failed to delete {path}: {e}")
|
| 806 |
+
QApplication.instance().quit()
|
| 807 |
+
|
| 808 |
+
# ── Model Management ──
|
| 809 |
+
|
| 810 |
+
def _refresh_model_combo(self):
|
| 811 |
+
self._active_model_combo.blockSignals(True)
|
| 812 |
+
self._active_model_combo.clear()
|
| 813 |
+
for m in self._current_settings.get("models", []):
|
| 814 |
+
self._active_model_combo.addItem(f"{m['name']} ({m['model']})")
|
| 815 |
+
self._active_model_combo.blockSignals(False)
|
| 816 |
+
|
| 817 |
+
def _refresh_model_list(self):
|
| 818 |
+
self._model_list.clear()
|
| 819 |
+
active = self._current_settings.get("active_model", 0)
|
| 820 |
+
for i, m in enumerate(self._current_settings.get("models", [])):
|
| 821 |
+
prefix = ">>> " if i == active else " "
|
| 822 |
+
proxy = m.get("proxy", "none")
|
| 823 |
+
proxy_tag = f" [proxy: {proxy}]" if proxy != "none" else ""
|
| 824 |
+
text = (
|
| 825 |
+
f"{prefix}{m['name']}{proxy_tag}\n {m['api_base']} | {m['model']}"
|
| 826 |
+
)
|
| 827 |
+
item = QListWidgetItem(text)
|
| 828 |
+
if i == active:
|
| 829 |
+
font = item.font()
|
| 830 |
+
font.setBold(True)
|
| 831 |
+
item.setFont(font)
|
| 832 |
+
self._model_list.addItem(item)
|
| 833 |
+
|
| 834 |
+
def _add_model(self):
|
| 835 |
+
dlg = ModelEditDialog(self)
|
| 836 |
+
if dlg.exec():
|
| 837 |
+
data = dlg.get_data()
|
| 838 |
+
if data["name"] and data["model"]:
|
| 839 |
+
self._current_settings.setdefault("models", []).append(data)
|
| 840 |
+
self._refresh_model_list()
|
| 841 |
+
self._refresh_model_combo()
|
| 842 |
+
_save_settings(self._current_settings)
|
| 843 |
+
|
| 844 |
+
def _edit_model(self):
|
| 845 |
+
row = self._model_list.currentRow()
|
| 846 |
+
models = self._current_settings.get("models", [])
|
| 847 |
+
if row < 0 or row >= len(models):
|
| 848 |
+
return
|
| 849 |
+
dlg = ModelEditDialog(self, models[row])
|
| 850 |
+
if dlg.exec():
|
| 851 |
+
data = dlg.get_data()
|
| 852 |
+
if data["name"] and data["model"]:
|
| 853 |
+
models[row] = data
|
| 854 |
+
self._refresh_model_list()
|
| 855 |
+
self._refresh_model_combo()
|
| 856 |
+
_save_settings(self._current_settings)
|
| 857 |
+
|
| 858 |
+
def _dup_model(self):
|
| 859 |
+
row = self._model_list.currentRow()
|
| 860 |
+
models = self._current_settings.get("models", [])
|
| 861 |
+
if row < 0 or row >= len(models):
|
| 862 |
+
return
|
| 863 |
+
dup = dict(models[row])
|
| 864 |
+
dup["name"] = dup["name"] + " (copy)"
|
| 865 |
+
models.append(dup)
|
| 866 |
+
self._refresh_model_list()
|
| 867 |
+
self._refresh_model_combo()
|
| 868 |
+
_save_settings(self._current_settings)
|
| 869 |
+
|
| 870 |
+
def _remove_model(self):
|
| 871 |
+
row = self._model_list.currentRow()
|
| 872 |
+
models = self._current_settings.get("models", [])
|
| 873 |
+
if row < 0 or row >= len(models) or len(models) <= 1:
|
| 874 |
+
return
|
| 875 |
+
models.pop(row)
|
| 876 |
+
active = self._current_settings.get("active_model", 0)
|
| 877 |
+
if active >= len(models):
|
| 878 |
+
self._current_settings["active_model"] = len(models) - 1
|
| 879 |
+
self._refresh_model_list()
|
| 880 |
+
self._refresh_model_combo()
|
| 881 |
+
self._model_list.setCurrentRow(min(row, len(models) - 1))
|
| 882 |
+
_save_settings(self._current_settings)
|
| 883 |
+
|
| 884 |
+
def _on_active_model_changed(self, index):
|
| 885 |
+
if index >= 0:
|
| 886 |
+
self._current_settings["active_model"] = index
|
| 887 |
+
self._refresh_model_list()
|
| 888 |
+
|
| 889 |
+
def _on_model_double_clicked(self, item):
|
| 890 |
+
row = self._model_list.row(item)
|
| 891 |
+
models = self._current_settings.get("models", [])
|
| 892 |
+
if 0 <= row < len(models):
|
| 893 |
+
self._active_model_combo.setCurrentIndex(row)
|
| 894 |
+
self._apply_active_model()
|
| 895 |
+
|
| 896 |
+
def _apply_active_model(self):
|
| 897 |
+
idx = self._active_model_combo.currentIndex()
|
| 898 |
+
models = self._current_settings.get("models", [])
|
| 899 |
+
if 0 <= idx < len(models):
|
| 900 |
+
self._current_settings["active_model"] = idx
|
| 901 |
+
self._refresh_model_list()
|
| 902 |
+
self.model_changed.emit(models[idx])
|
| 903 |
+
_save_settings(self._current_settings)
|
| 904 |
+
log.info(f"Active model: {models[idx]['name']} ({models[idx]['model']})")
|
| 905 |
+
|
| 906 |
+
def _run_benchmark(self):
|
| 907 |
+
models = self._current_settings.get("models", [])
|
| 908 |
+
if not models:
|
| 909 |
+
return
|
| 910 |
+
|
| 911 |
+
source_lang = self._bench_lang.currentText()
|
| 912 |
+
target_lang = self._bench_target.currentText()
|
| 913 |
+
timeout_s = self._current_settings.get("timeout", 5)
|
| 914 |
+
|
| 915 |
+
self._bench_btn.setEnabled(False)
|
| 916 |
+
self._bench_btn.setText(t("testing"))
|
| 917 |
+
self._bench_output.clear()
|
| 918 |
+
|
| 919 |
+
from translator import DEFAULT_PROMPT, LANGUAGE_DISPLAY
|
| 920 |
+
|
| 921 |
+
src = LANGUAGE_DISPLAY.get(source_lang, source_lang)
|
| 922 |
+
tgt = LANGUAGE_DISPLAY.get(target_lang, target_lang)
|
| 923 |
+
prompt = self._current_settings.get("system_prompt", DEFAULT_PROMPT)
|
| 924 |
+
try:
|
| 925 |
+
prompt = prompt.format(source_lang=src, target_lang=tgt)
|
| 926 |
+
except (KeyError, IndexError):
|
| 927 |
+
pass
|
| 928 |
+
|
| 929 |
+
run_benchmark(
|
| 930 |
+
models, source_lang, target_lang, timeout_s, prompt, self._bench_result.emit
|
| 931 |
+
)
|
| 932 |
+
|
| 933 |
+
def _on_bench_result(self, text: str):
|
| 934 |
+
if text == "__DONE__":
|
| 935 |
+
self._bench_btn.setEnabled(True)
|
| 936 |
+
self._bench_btn.setText(t("btn_test_all"))
|
| 937 |
+
else:
|
| 938 |
+
self._bench_output.append(text)
|
| 939 |
+
|
| 940 |
+
# ── Shared logic ──
|
| 941 |
+
|
| 942 |
+
def _on_silence_mode_changed(self, index):
|
| 943 |
+
self._silence_duration.setEnabled(index == 1)
|
| 944 |
+
|
| 945 |
+
def _on_vad_mode_changed(self, index):
|
| 946 |
+
modes = ["silero", "energy", "disabled"]
|
| 947 |
+
self._current_settings["vad_mode"] = modes[index]
|
| 948 |
+
|
| 949 |
+
def _on_threshold_changed(self, value):
|
| 950 |
+
val = value / 100.0
|
| 951 |
+
self._current_settings["vad_threshold"] = val
|
| 952 |
+
self._vad_threshold_label.setText(f"{value}%")
|
| 953 |
+
if not self._vad_threshold_slider.isSliderDown():
|
| 954 |
+
self._auto_save()
|
| 955 |
+
|
| 956 |
+
def _on_energy_changed(self, value):
|
| 957 |
+
val = value / 1000.0
|
| 958 |
+
self._current_settings["energy_threshold"] = val
|
| 959 |
+
self._energy_label.setText(f"{value}\u2030")
|
| 960 |
+
if not self._energy_slider.isSliderDown():
|
| 961 |
+
self._auto_save()
|
| 962 |
+
|
| 963 |
+
def _on_timing_changed(self):
|
| 964 |
+
self._current_settings["min_speech_duration"] = self._min_speech.value()
|
| 965 |
+
self._current_settings["max_speech_duration"] = self._max_speech.value()
|
| 966 |
+
self._current_settings["silence_mode"] = "auto" if self._silence_mode.currentIndex() == 0 else "fixed"
|
| 967 |
+
self._current_settings["silence_duration"] = self._silence_duration.value()
|
| 968 |
+
|
| 969 |
+
def _on_ui_lang_changed(self, index):
|
| 970 |
+
lang = "en" if index == 0 else "zh"
|
| 971 |
+
self._current_settings["ui_lang"] = lang
|
| 972 |
+
_save_settings(self._current_settings)
|
| 973 |
+
from i18n import set_lang
|
| 974 |
+
set_lang(lang)
|
| 975 |
+
from PyQt6.QtWidgets import QMessageBox
|
| 976 |
+
QMessageBox.information(
|
| 977 |
+
self, "LiveTrans",
|
| 978 |
+
"Language changed. Please restart the application.\n"
|
| 979 |
+
"语言已更改,请重启应用程序。"
|
| 980 |
+
)
|
| 981 |
+
|
| 982 |
+
def _auto_save(self):
|
| 983 |
+
self._save_timer.start()
|
| 984 |
+
|
| 985 |
+
def _do_auto_save(self):
|
| 986 |
+
self._apply_settings()
|
| 987 |
+
_save_settings(self._current_settings)
|
| 988 |
+
|
| 989 |
+
def _apply_prompt(self):
|
| 990 |
+
text = self._prompt_edit.toPlainText().strip()
|
| 991 |
+
if text:
|
| 992 |
+
self._current_settings["system_prompt"] = text
|
| 993 |
+
self._apply_active_model()
|
| 994 |
+
_save_settings(self._current_settings)
|
| 995 |
+
log.info("System prompt updated")
|
| 996 |
+
|
| 997 |
+
def _apply_settings(self):
|
| 998 |
+
self._current_settings["asr_language"] = self._asr_lang.currentText()
|
| 999 |
+
engine_map = {
|
| 1000 |
+
0: "whisper",
|
| 1001 |
+
1: "sensevoice",
|
| 1002 |
+
2: "funasr-nano",
|
| 1003 |
+
3: "funasr-mlt-nano",
|
| 1004 |
+
}
|
| 1005 |
+
self._current_settings["asr_engine"] = engine_map[
|
| 1006 |
+
self._asr_engine.currentIndex()
|
| 1007 |
+
]
|
| 1008 |
+
dev_text = self._asr_device.currentText()
|
| 1009 |
+
self._current_settings["asr_device"] = dev_text.split(" (")[0]
|
| 1010 |
+
audio_dev = self._audio_device.currentText()
|
| 1011 |
+
self._current_settings["audio_device"] = (
|
| 1012 |
+
None if self._audio_device.currentIndex() == 0 else audio_dev
|
| 1013 |
+
)
|
| 1014 |
+
self._current_settings["hub"] = (
|
| 1015 |
+
"ms" if self._hub_combo.currentIndex() == 0 else "hf"
|
| 1016 |
+
)
|
| 1017 |
+
prompt_text = self._prompt_edit.toPlainText().strip()
|
| 1018 |
+
if prompt_text:
|
| 1019 |
+
self._current_settings["system_prompt"] = prompt_text
|
| 1020 |
+
self._current_settings["timeout"] = self._timeout_spin.value()
|
| 1021 |
+
if hasattr(self, "_style_preset"):
|
| 1022 |
+
self._current_settings["style"] = self._collect_style()
|
| 1023 |
+
safe = {
|
| 1024 |
+
k: v
|
| 1025 |
+
for k, v in self._current_settings.items()
|
| 1026 |
+
if k not in ("models", "system_prompt")
|
| 1027 |
+
}
|
| 1028 |
+
log.info(f"Settings applied: {safe}")
|
| 1029 |
+
self.settings_changed.emit(dict(self._current_settings))
|
| 1030 |
+
|
| 1031 |
+
def get_settings(self):
|
| 1032 |
+
return dict(self._current_settings)
|
| 1033 |
+
|
| 1034 |
+
def get_active_model(self) -> dict | None:
|
| 1035 |
+
models = self._current_settings.get("models", [])
|
| 1036 |
+
idx = self._current_settings.get("active_model", 0)
|
| 1037 |
+
if 0 <= idx < len(models):
|
| 1038 |
+
return models[idx]
|
| 1039 |
+
return None
|
| 1040 |
+
|
| 1041 |
+
def has_saved_settings(self) -> bool:
|
| 1042 |
+
return SETTINGS_FILE.exists()
|
dialogs.py
ADDED
|
@@ -0,0 +1,412 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import re
|
| 3 |
+
import sys
|
| 4 |
+
import threading
|
| 5 |
+
|
| 6 |
+
from PyQt6.QtCore import Qt, QTimer, pyqtSignal
|
| 7 |
+
from PyQt6.QtGui import QFont
|
| 8 |
+
from PyQt6.QtWidgets import (
|
| 9 |
+
QCheckBox,
|
| 10 |
+
QComboBox,
|
| 11 |
+
QDialog,
|
| 12 |
+
QDialogButtonBox,
|
| 13 |
+
QFormLayout,
|
| 14 |
+
QGroupBox,
|
| 15 |
+
QLabel,
|
| 16 |
+
QLineEdit,
|
| 17 |
+
QPushButton,
|
| 18 |
+
QTextEdit,
|
| 19 |
+
QVBoxLayout,
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
from model_manager import download_asr, download_silero
|
| 23 |
+
from i18n import t
|
| 24 |
+
|
| 25 |
+
log = logging.getLogger("LiveTrans.Dialogs")
|
| 26 |
+
|
| 27 |
+
SETTINGS_FILE = None # set by control_panel on import
|
| 28 |
+
_save_settings = None # set by control_panel on import
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class _LogCapture(logging.Handler):
|
| 32 |
+
"""Captures log output and emits via callback."""
|
| 33 |
+
|
| 34 |
+
def __init__(self, callback):
|
| 35 |
+
super().__init__()
|
| 36 |
+
self._callback = callback
|
| 37 |
+
self.setFormatter(logging.Formatter("%(message)s"))
|
| 38 |
+
|
| 39 |
+
def emit(self, record):
|
| 40 |
+
try:
|
| 41 |
+
self._callback(self.format(record))
|
| 42 |
+
except Exception:
|
| 43 |
+
pass
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
_ANSI_RE = re.compile(r"\x1b\[[0-9;]*[A-Za-z]")
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
class _StderrCapture:
|
| 50 |
+
"""Captures stderr (tqdm) and forwards cleaned lines via callback."""
|
| 51 |
+
|
| 52 |
+
def __init__(self, callback, original):
|
| 53 |
+
self._cb = callback
|
| 54 |
+
self._orig = original
|
| 55 |
+
|
| 56 |
+
def write(self, text):
|
| 57 |
+
if self._orig:
|
| 58 |
+
self._orig.write(text)
|
| 59 |
+
if not text:
|
| 60 |
+
return
|
| 61 |
+
cleaned = _ANSI_RE.sub("", text)
|
| 62 |
+
for line in cleaned.splitlines():
|
| 63 |
+
line = line.strip()
|
| 64 |
+
if line:
|
| 65 |
+
self._cb(line)
|
| 66 |
+
|
| 67 |
+
def flush(self):
|
| 68 |
+
if self._orig:
|
| 69 |
+
self._orig.flush()
|
| 70 |
+
|
| 71 |
+
def isatty(self):
|
| 72 |
+
return False
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
class _ModelLoadDialog(QDialog):
|
| 76 |
+
"""Modal dialog shown during model download/loading with live log."""
|
| 77 |
+
|
| 78 |
+
_log_signal = pyqtSignal(str)
|
| 79 |
+
|
| 80 |
+
def __init__(self, message, parent=None):
|
| 81 |
+
super().__init__(parent)
|
| 82 |
+
self.setWindowTitle("LiveTrans")
|
| 83 |
+
self.setMinimumWidth(500)
|
| 84 |
+
self.setMinimumHeight(300)
|
| 85 |
+
self.setModal(True)
|
| 86 |
+
self.setWindowFlags(
|
| 87 |
+
Qt.WindowType.Dialog
|
| 88 |
+
| Qt.WindowType.WindowTitleHint
|
| 89 |
+
| Qt.WindowType.CustomizeWindowHint
|
| 90 |
+
| Qt.WindowType.MSWindowsFixedSizeDialogHint
|
| 91 |
+
)
|
| 92 |
+
|
| 93 |
+
layout = QVBoxLayout(self)
|
| 94 |
+
self._label = QLabel(message)
|
| 95 |
+
self._label.setWordWrap(True)
|
| 96 |
+
layout.addWidget(self._label)
|
| 97 |
+
|
| 98 |
+
self._log_view = QTextEdit()
|
| 99 |
+
self._log_view.setReadOnly(True)
|
| 100 |
+
self._log_view.setFont(QFont("Consolas", 8))
|
| 101 |
+
self._log_view.setStyleSheet(
|
| 102 |
+
"background: #1e1e2e; color: #cdd6f4; border: 1px solid #444;"
|
| 103 |
+
)
|
| 104 |
+
layout.addWidget(self._log_view)
|
| 105 |
+
|
| 106 |
+
self._log_signal.connect(self._append_log)
|
| 107 |
+
self._log_handler = _LogCapture(self._log_signal.emit)
|
| 108 |
+
self._log_handler.setLevel(logging.INFO)
|
| 109 |
+
logging.getLogger().addHandler(self._log_handler)
|
| 110 |
+
|
| 111 |
+
def _append_log(self, text):
|
| 112 |
+
self._log_view.append(text)
|
| 113 |
+
self._log_view.verticalScrollBar().setValue(
|
| 114 |
+
self._log_view.verticalScrollBar().maximum()
|
| 115 |
+
)
|
| 116 |
+
|
| 117 |
+
def done(self, result):
|
| 118 |
+
logging.getLogger().removeHandler(self._log_handler)
|
| 119 |
+
super().done(result)
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
class SetupWizardDialog(QDialog):
|
| 123 |
+
"""First-launch wizard: choose hub, download models."""
|
| 124 |
+
|
| 125 |
+
_log_signal = pyqtSignal(str)
|
| 126 |
+
|
| 127 |
+
def __init__(self, parent=None):
|
| 128 |
+
super().__init__(parent)
|
| 129 |
+
self.setWindowTitle(t("window_setup"))
|
| 130 |
+
self.setMinimumWidth(520)
|
| 131 |
+
self.setMinimumHeight(400)
|
| 132 |
+
self.setWindowFlags(
|
| 133 |
+
Qt.WindowType.Dialog
|
| 134 |
+
| Qt.WindowType.WindowTitleHint
|
| 135 |
+
| Qt.WindowType.CustomizeWindowHint
|
| 136 |
+
| Qt.WindowType.MSWindowsFixedSizeDialogHint
|
| 137 |
+
)
|
| 138 |
+
|
| 139 |
+
layout = QVBoxLayout(self)
|
| 140 |
+
|
| 141 |
+
hub_group = QGroupBox(t("group_download_source"))
|
| 142 |
+
hub_layout = QVBoxLayout(hub_group)
|
| 143 |
+
self._hub_combo = QComboBox()
|
| 144 |
+
self._hub_combo.addItems(
|
| 145 |
+
[
|
| 146 |
+
t("hub_modelscope_full"),
|
| 147 |
+
t("hub_huggingface_full"),
|
| 148 |
+
]
|
| 149 |
+
)
|
| 150 |
+
hub_layout.addWidget(self._hub_combo)
|
| 151 |
+
layout.addWidget(hub_group)
|
| 152 |
+
|
| 153 |
+
self._download_btn = QPushButton(t("btn_start_download"))
|
| 154 |
+
self._download_btn.clicked.connect(self._start_download)
|
| 155 |
+
layout.addWidget(self._download_btn)
|
| 156 |
+
|
| 157 |
+
self._log_view = QTextEdit()
|
| 158 |
+
self._log_view.setReadOnly(True)
|
| 159 |
+
self._log_view.setFont(QFont("Consolas", 8))
|
| 160 |
+
self._log_view.setStyleSheet(
|
| 161 |
+
"background: #1e1e2e; color: #cdd6f4; border: 1px solid #444;"
|
| 162 |
+
)
|
| 163 |
+
self._log_view.hide()
|
| 164 |
+
layout.addWidget(self._log_view)
|
| 165 |
+
|
| 166 |
+
self._error = None
|
| 167 |
+
self._log_signal.connect(self._append_log)
|
| 168 |
+
self._log_handler = _LogCapture(self._log_signal.emit)
|
| 169 |
+
|
| 170 |
+
def _append_log(self, text):
|
| 171 |
+
self._log_view.append(text)
|
| 172 |
+
self._log_view.verticalScrollBar().setValue(
|
| 173 |
+
self._log_view.verticalScrollBar().maximum()
|
| 174 |
+
)
|
| 175 |
+
|
| 176 |
+
def _start_download(self):
|
| 177 |
+
self._download_btn.setEnabled(False)
|
| 178 |
+
self._hub_combo.setEnabled(False)
|
| 179 |
+
self._log_view.show()
|
| 180 |
+
|
| 181 |
+
hub = "ms" if self._hub_combo.currentIndex() == 0 else "hf"
|
| 182 |
+
|
| 183 |
+
logging.getLogger().addHandler(self._log_handler)
|
| 184 |
+
self._orig_stderr = sys.stderr
|
| 185 |
+
sys.stderr = _StderrCapture(self._log_signal.emit, self._orig_stderr)
|
| 186 |
+
|
| 187 |
+
self._error = None
|
| 188 |
+
self._download_thread = threading.Thread(
|
| 189 |
+
target=self._download_worker, args=(hub,), daemon=True
|
| 190 |
+
)
|
| 191 |
+
self._download_thread.start()
|
| 192 |
+
|
| 193 |
+
self._poll_timer = QTimer()
|
| 194 |
+
self._poll_timer.setInterval(200)
|
| 195 |
+
self._poll_timer.timeout.connect(self._check_done)
|
| 196 |
+
self._poll_timer.start()
|
| 197 |
+
|
| 198 |
+
def _download_worker(self, hub):
|
| 199 |
+
try:
|
| 200 |
+
download_silero()
|
| 201 |
+
download_asr("sensevoice", hub=hub)
|
| 202 |
+
except Exception as e:
|
| 203 |
+
self._error = str(e)
|
| 204 |
+
log.error(f"Download failed: {e}", exc_info=True)
|
| 205 |
+
|
| 206 |
+
def _check_done(self):
|
| 207 |
+
if self._download_thread.is_alive():
|
| 208 |
+
return
|
| 209 |
+
self._poll_timer.stop()
|
| 210 |
+
sys.stderr = self._orig_stderr
|
| 211 |
+
logging.getLogger().removeHandler(self._log_handler)
|
| 212 |
+
|
| 213 |
+
if self._error:
|
| 214 |
+
self._append_log(f"\n{t('download_failed').format(error=self._error)}")
|
| 215 |
+
self._download_btn.setEnabled(True)
|
| 216 |
+
self._download_btn.setText(t("btn_retry"))
|
| 217 |
+
self._hub_combo.setEnabled(True)
|
| 218 |
+
return
|
| 219 |
+
|
| 220 |
+
self._append_log(f"\n{t('download_complete')}")
|
| 221 |
+
hub = "ms" if self._hub_combo.currentIndex() == 0 else "hf"
|
| 222 |
+
from control_panel import _save_settings
|
| 223 |
+
|
| 224 |
+
settings = {
|
| 225 |
+
"hub": hub,
|
| 226 |
+
"asr_engine": "sensevoice",
|
| 227 |
+
"vad_mode": "silero",
|
| 228 |
+
"vad_threshold": 0.3,
|
| 229 |
+
"energy_threshold": 0.02,
|
| 230 |
+
"min_speech_duration": 1.0,
|
| 231 |
+
"max_speech_duration": 8.0,
|
| 232 |
+
"silence_mode": "auto",
|
| 233 |
+
"silence_duration": 0.8,
|
| 234 |
+
"asr_language": "auto",
|
| 235 |
+
}
|
| 236 |
+
_save_settings(settings)
|
| 237 |
+
QTimer.singleShot(500, self.accept)
|
| 238 |
+
|
| 239 |
+
|
| 240 |
+
class ModelDownloadDialog(QDialog):
|
| 241 |
+
"""Download missing models (non-first-launch) with live log."""
|
| 242 |
+
|
| 243 |
+
_log_signal = pyqtSignal(str)
|
| 244 |
+
|
| 245 |
+
def __init__(self, missing_models, hub="ms", parent=None):
|
| 246 |
+
super().__init__(parent)
|
| 247 |
+
self.setWindowTitle(t("window_download"))
|
| 248 |
+
self.setMinimumWidth(520)
|
| 249 |
+
self.setMinimumHeight(300)
|
| 250 |
+
self.setWindowFlags(
|
| 251 |
+
Qt.WindowType.Dialog
|
| 252 |
+
| Qt.WindowType.WindowTitleHint
|
| 253 |
+
| Qt.WindowType.CustomizeWindowHint
|
| 254 |
+
| Qt.WindowType.MSWindowsFixedSizeDialogHint
|
| 255 |
+
)
|
| 256 |
+
|
| 257 |
+
layout = QVBoxLayout(self)
|
| 258 |
+
|
| 259 |
+
names = ", ".join(m["name"] for m in missing_models)
|
| 260 |
+
info = QLabel(t("downloading_models").format(names=names))
|
| 261 |
+
info.setWordWrap(True)
|
| 262 |
+
layout.addWidget(info)
|
| 263 |
+
|
| 264 |
+
self._log_view = QTextEdit()
|
| 265 |
+
self._log_view.setReadOnly(True)
|
| 266 |
+
self._log_view.setFont(QFont("Consolas", 8))
|
| 267 |
+
self._log_view.setStyleSheet(
|
| 268 |
+
"background: #1e1e2e; color: #cdd6f4; border: 1px solid #444;"
|
| 269 |
+
)
|
| 270 |
+
layout.addWidget(self._log_view)
|
| 271 |
+
|
| 272 |
+
self._close_btn = QPushButton(t("btn_close"))
|
| 273 |
+
self._close_btn.clicked.connect(self.reject)
|
| 274 |
+
self._close_btn.hide()
|
| 275 |
+
layout.addWidget(self._close_btn)
|
| 276 |
+
|
| 277 |
+
self._missing = missing_models
|
| 278 |
+
self._hub = hub
|
| 279 |
+
self._error = None
|
| 280 |
+
|
| 281 |
+
self._log_signal.connect(self._append_log)
|
| 282 |
+
self._log_handler = _LogCapture(self._log_signal.emit)
|
| 283 |
+
|
| 284 |
+
QTimer.singleShot(100, self._start_download)
|
| 285 |
+
|
| 286 |
+
def _append_log(self, text):
|
| 287 |
+
self._log_view.append(text)
|
| 288 |
+
self._log_view.verticalScrollBar().setValue(
|
| 289 |
+
self._log_view.verticalScrollBar().maximum()
|
| 290 |
+
)
|
| 291 |
+
|
| 292 |
+
def _start_download(self):
|
| 293 |
+
logging.getLogger().addHandler(self._log_handler)
|
| 294 |
+
self._orig_stderr = sys.stderr
|
| 295 |
+
sys.stderr = _StderrCapture(self._log_signal.emit, self._orig_stderr)
|
| 296 |
+
|
| 297 |
+
self._download_thread = threading.Thread(
|
| 298 |
+
target=self._download_worker, daemon=True
|
| 299 |
+
)
|
| 300 |
+
self._download_thread.start()
|
| 301 |
+
|
| 302 |
+
self._poll_timer = QTimer()
|
| 303 |
+
self._poll_timer.setInterval(200)
|
| 304 |
+
self._poll_timer.timeout.connect(self._check_done)
|
| 305 |
+
self._poll_timer.start()
|
| 306 |
+
|
| 307 |
+
def _download_worker(self):
|
| 308 |
+
try:
|
| 309 |
+
for m in self._missing:
|
| 310 |
+
if m["type"] == "silero-vad":
|
| 311 |
+
download_silero()
|
| 312 |
+
elif m["type"] in ("sensevoice", "funasr-nano", "funasr-mlt-nano"):
|
| 313 |
+
download_asr(m["type"], hub=self._hub)
|
| 314 |
+
elif m["type"].startswith("whisper-"):
|
| 315 |
+
size = m["type"].replace("whisper-", "")
|
| 316 |
+
download_asr("whisper", model_size=size, hub=self._hub)
|
| 317 |
+
except Exception as e:
|
| 318 |
+
self._error = str(e)
|
| 319 |
+
log.error(f"Download failed: {e}", exc_info=True)
|
| 320 |
+
|
| 321 |
+
def _check_done(self):
|
| 322 |
+
if self._download_thread.is_alive():
|
| 323 |
+
return
|
| 324 |
+
self._poll_timer.stop()
|
| 325 |
+
sys.stderr = self._orig_stderr
|
| 326 |
+
logging.getLogger().removeHandler(self._log_handler)
|
| 327 |
+
|
| 328 |
+
if self._error:
|
| 329 |
+
self._append_log(f"\n{t('download_failed').format(error=self._error)}")
|
| 330 |
+
self._close_btn.show()
|
| 331 |
+
return
|
| 332 |
+
|
| 333 |
+
self._append_log(f"\n{t('download_complete')}")
|
| 334 |
+
QTimer.singleShot(500, self.accept)
|
| 335 |
+
|
| 336 |
+
|
| 337 |
+
class ModelEditDialog(QDialog):
|
| 338 |
+
"""Dialog for adding/editing a model configuration."""
|
| 339 |
+
|
| 340 |
+
def __init__(self, parent=None, model_data=None):
|
| 341 |
+
super().__init__(parent)
|
| 342 |
+
self.setWindowTitle(t("dialog_edit_model") if model_data else t("dialog_add_model"))
|
| 343 |
+
self.setMinimumWidth(450)
|
| 344 |
+
|
| 345 |
+
layout = QFormLayout(self)
|
| 346 |
+
|
| 347 |
+
self._name = QLineEdit()
|
| 348 |
+
self._api_base = QLineEdit()
|
| 349 |
+
self._api_key = QLineEdit()
|
| 350 |
+
self._api_key.setEchoMode(QLineEdit.EchoMode.Password)
|
| 351 |
+
self._model = QLineEdit()
|
| 352 |
+
|
| 353 |
+
self._proxy_mode = QComboBox()
|
| 354 |
+
self._proxy_mode.addItems([t("proxy_none"), t("proxy_system"), t("proxy_custom")])
|
| 355 |
+
self._proxy_mode.currentIndexChanged.connect(self._on_proxy_mode_changed)
|
| 356 |
+
self._proxy_url = QLineEdit()
|
| 357 |
+
self._proxy_url.setPlaceholderText("http://127.0.0.1:7890")
|
| 358 |
+
self._proxy_url.setEnabled(False)
|
| 359 |
+
|
| 360 |
+
self._no_system_role = QCheckBox(t("no_system_role"))
|
| 361 |
+
|
| 362 |
+
layout.addRow(t("label_display_name"), self._name)
|
| 363 |
+
layout.addRow(t("label_api_base"), self._api_base)
|
| 364 |
+
layout.addRow(t("label_api_key"), self._api_key)
|
| 365 |
+
layout.addRow(t("label_model"), self._model)
|
| 366 |
+
layout.addRow(t("label_proxy"), self._proxy_mode)
|
| 367 |
+
layout.addRow(t("label_proxy_url"), self._proxy_url)
|
| 368 |
+
layout.addRow("", self._no_system_role)
|
| 369 |
+
|
| 370 |
+
if model_data:
|
| 371 |
+
self._name.setText(model_data.get("name", ""))
|
| 372 |
+
self._api_base.setText(model_data.get("api_base", ""))
|
| 373 |
+
self._api_key.setText(model_data.get("api_key", ""))
|
| 374 |
+
self._model.setText(model_data.get("model", ""))
|
| 375 |
+
proxy = model_data.get("proxy", "none")
|
| 376 |
+
if proxy == "system":
|
| 377 |
+
self._proxy_mode.setCurrentIndex(1)
|
| 378 |
+
elif proxy not in ("none", "system") and proxy:
|
| 379 |
+
self._proxy_mode.setCurrentIndex(2)
|
| 380 |
+
self._proxy_url.setText(proxy)
|
| 381 |
+
else:
|
| 382 |
+
self._proxy_mode.setCurrentIndex(0)
|
| 383 |
+
self._no_system_role.setChecked(model_data.get("no_system_role", False))
|
| 384 |
+
|
| 385 |
+
buttons = QDialogButtonBox(
|
| 386 |
+
QDialogButtonBox.StandardButton.Ok | QDialogButtonBox.StandardButton.Cancel
|
| 387 |
+
)
|
| 388 |
+
buttons.accepted.connect(self.accept)
|
| 389 |
+
buttons.rejected.connect(self.reject)
|
| 390 |
+
layout.addRow(buttons)
|
| 391 |
+
|
| 392 |
+
def _on_proxy_mode_changed(self, index):
|
| 393 |
+
self._proxy_url.setEnabled(index == 2)
|
| 394 |
+
|
| 395 |
+
def get_data(self) -> dict:
|
| 396 |
+
proxy_idx = self._proxy_mode.currentIndex()
|
| 397 |
+
if proxy_idx == 1:
|
| 398 |
+
proxy = "system"
|
| 399 |
+
elif proxy_idx == 2:
|
| 400 |
+
proxy = self._proxy_url.text().strip() or "none"
|
| 401 |
+
else:
|
| 402 |
+
proxy = "none"
|
| 403 |
+
result = {
|
| 404 |
+
"name": self._name.text().strip(),
|
| 405 |
+
"api_base": self._api_base.text().strip(),
|
| 406 |
+
"api_key": self._api_key.text().strip(),
|
| 407 |
+
"model": self._model.text().strip(),
|
| 408 |
+
"proxy": proxy,
|
| 409 |
+
}
|
| 410 |
+
if self._no_system_role.isChecked():
|
| 411 |
+
result["no_system_role"] = True
|
| 412 |
+
return result
|
funasr_nano/__init__.py
ADDED
|
File without changes
|
funasr_nano/ctc.py
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn.functional as F
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class CTC(torch.nn.Module):
|
| 6 |
+
"""CTC module.
|
| 7 |
+
|
| 8 |
+
Args:
|
| 9 |
+
odim: dimension of outputs
|
| 10 |
+
encoder_output_size: number of encoder projection units
|
| 11 |
+
dropout_rate: dropout rate (0.0 ~ 1.0)
|
| 12 |
+
reduce: reduce the CTC loss into a scalar
|
| 13 |
+
"""
|
| 14 |
+
|
| 15 |
+
def __init__(
|
| 16 |
+
self,
|
| 17 |
+
odim: int,
|
| 18 |
+
encoder_output_size: int,
|
| 19 |
+
dropout_rate: float = 0.0,
|
| 20 |
+
reduce: bool = True,
|
| 21 |
+
blank_id: int = 0,
|
| 22 |
+
**kwargs,
|
| 23 |
+
):
|
| 24 |
+
super().__init__()
|
| 25 |
+
eprojs = encoder_output_size
|
| 26 |
+
self.dropout_rate = dropout_rate
|
| 27 |
+
self.ctc_lo = torch.nn.Linear(eprojs, odim)
|
| 28 |
+
self.blank_id = blank_id
|
| 29 |
+
self.ctc_loss = torch.nn.CTCLoss(reduction="none", blank=blank_id)
|
| 30 |
+
self.reduce = reduce
|
| 31 |
+
|
| 32 |
+
def softmax(self, hs_pad):
|
| 33 |
+
"""softmax of frame activations
|
| 34 |
+
|
| 35 |
+
Args:
|
| 36 |
+
Tensor hs_pad: 3d tensor (B, Tmax, eprojs)
|
| 37 |
+
Returns:
|
| 38 |
+
torch.Tensor: softmax applied 3d tensor (B, Tmax, odim)
|
| 39 |
+
"""
|
| 40 |
+
return F.softmax(self.ctc_lo(hs_pad), dim=2)
|
| 41 |
+
|
| 42 |
+
def log_softmax(self, hs_pad):
|
| 43 |
+
"""log_softmax of frame activations
|
| 44 |
+
|
| 45 |
+
Args:
|
| 46 |
+
Tensor hs_pad: 3d tensor (B, Tmax, eprojs)
|
| 47 |
+
Returns:
|
| 48 |
+
torch.Tensor: log softmax applied 3d tensor (B, Tmax, odim)
|
| 49 |
+
"""
|
| 50 |
+
return F.log_softmax(self.ctc_lo(hs_pad), dim=2)
|
| 51 |
+
|
| 52 |
+
def argmax(self, hs_pad):
|
| 53 |
+
"""argmax of frame activations
|
| 54 |
+
|
| 55 |
+
Args:
|
| 56 |
+
torch.Tensor hs_pad: 3d tensor (B, Tmax, eprojs)
|
| 57 |
+
Returns:
|
| 58 |
+
torch.Tensor: argmax applied 2d tensor (B, Tmax)
|
| 59 |
+
"""
|
| 60 |
+
return torch.argmax(self.ctc_lo(hs_pad), dim=2)
|
funasr_nano/model.py
ADDED
|
@@ -0,0 +1,746 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import os
|
| 3 |
+
import random
|
| 4 |
+
import re
|
| 5 |
+
import string
|
| 6 |
+
import time
|
| 7 |
+
import traceback
|
| 8 |
+
from typing import Union
|
| 9 |
+
|
| 10 |
+
import torch
|
| 11 |
+
import torch.nn as nn
|
| 12 |
+
|
| 13 |
+
from funasr.metrics.compute_acc import compute_accuracy
|
| 14 |
+
from funasr.register import tables
|
| 15 |
+
from funasr.train_utils.device_funcs import force_gatherable, to_device
|
| 16 |
+
from funasr.utils.datadir_writer import DatadirWriter
|
| 17 |
+
from funasr.utils.load_utils import extract_fbank, load_audio_text_image_video
|
| 18 |
+
from transformers import AutoConfig, AutoModelForCausalLM
|
| 19 |
+
|
| 20 |
+
from ctc import CTC
|
| 21 |
+
from tools.utils import forced_align
|
| 22 |
+
|
| 23 |
+
dtype_map = {"bf16": torch.bfloat16, "fp16": torch.float16, "fp32": torch.float32}
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
@tables.register("model_classes", "FunASRNano")
|
| 27 |
+
class FunASRNano(nn.Module):
|
| 28 |
+
def __init__(
|
| 29 |
+
self,
|
| 30 |
+
audio_encoder: str = None,
|
| 31 |
+
audio_encoder_conf: dict = None,
|
| 32 |
+
audio_adaptor: str = None,
|
| 33 |
+
audio_adaptor_conf: dict = None,
|
| 34 |
+
llm: str = None,
|
| 35 |
+
llm_conf: dict = None,
|
| 36 |
+
input_size: int = 80,
|
| 37 |
+
length_normalized_loss: bool = False,
|
| 38 |
+
**kwargs,
|
| 39 |
+
):
|
| 40 |
+
super().__init__()
|
| 41 |
+
|
| 42 |
+
# audio encoder
|
| 43 |
+
hub = audio_encoder_conf.get("hub", None)
|
| 44 |
+
self.audio_encoder_activation_checkpoint = audio_encoder_conf.get(
|
| 45 |
+
"activation_checkpoint", False
|
| 46 |
+
)
|
| 47 |
+
if hub == "ms":
|
| 48 |
+
from funasr import AutoModel
|
| 49 |
+
|
| 50 |
+
model = AutoModel(model=audio_encoder, model_revision="master")
|
| 51 |
+
audio_encoder_output_size = (
|
| 52 |
+
model.model.encoder_output_size
|
| 53 |
+
if hasattr(model.model, "encoder_output_size")
|
| 54 |
+
else -1
|
| 55 |
+
)
|
| 56 |
+
audio_encoder = (
|
| 57 |
+
model.model.model.encoder if hasattr(model.model, "model") else model.model.encoder
|
| 58 |
+
)
|
| 59 |
+
else:
|
| 60 |
+
encoder_class = tables.encoder_classes.get(audio_encoder)
|
| 61 |
+
audio_encoder = encoder_class(input_size=input_size, **audio_encoder_conf)
|
| 62 |
+
audio_encoder_output_size = audio_encoder.output_size()
|
| 63 |
+
freeze = audio_encoder_conf.get("freeze", True)
|
| 64 |
+
|
| 65 |
+
if freeze:
|
| 66 |
+
for _, param in audio_encoder.named_parameters():
|
| 67 |
+
param.requires_grad = False
|
| 68 |
+
audio_encoder.eval()
|
| 69 |
+
self.audio_encoder = audio_encoder
|
| 70 |
+
|
| 71 |
+
# llm
|
| 72 |
+
self.llm = None
|
| 73 |
+
init_param_path = llm_conf.get("init_param_path", None)
|
| 74 |
+
llm_dim = None
|
| 75 |
+
|
| 76 |
+
llm_load_kwargs = llm_conf.get("load_kwargs", {})
|
| 77 |
+
config = AutoConfig.from_pretrained(init_param_path)
|
| 78 |
+
model = AutoModelForCausalLM.from_config(config, **llm_load_kwargs)
|
| 79 |
+
|
| 80 |
+
freeze = llm_conf.get("freeze", True)
|
| 81 |
+
if freeze:
|
| 82 |
+
for _, param in model.named_parameters():
|
| 83 |
+
param.requires_grad = False
|
| 84 |
+
model.eval()
|
| 85 |
+
if llm_conf.get("activation_checkpoint", False):
|
| 86 |
+
model.gradient_checkpointing_enable()
|
| 87 |
+
|
| 88 |
+
self.llm_dtype = llm_conf.get("llm_dtype", "fp32")
|
| 89 |
+
self.llm = model.to(dtype_map[self.llm_dtype])
|
| 90 |
+
llm_dim = model.get_input_embeddings().weight.shape[-1]
|
| 91 |
+
|
| 92 |
+
# adaptor
|
| 93 |
+
adaptor_class = tables.adaptor_classes.get(audio_adaptor)
|
| 94 |
+
if audio_encoder_output_size > 0:
|
| 95 |
+
audio_adaptor_conf["encoder_dim"] = audio_encoder_output_size
|
| 96 |
+
audio_adaptor_conf["llm_dim"] = (
|
| 97 |
+
llm_dim if llm_dim is not None else audio_adaptor_conf["llm_dim"]
|
| 98 |
+
)
|
| 99 |
+
audio_adaptor = adaptor_class(**audio_adaptor_conf)
|
| 100 |
+
freeze = audio_adaptor_conf.get("freeze", False)
|
| 101 |
+
if freeze:
|
| 102 |
+
for _, param in audio_adaptor.named_parameters():
|
| 103 |
+
param.requires_grad = False
|
| 104 |
+
audio_adaptor.eval()
|
| 105 |
+
self.audio_adaptor = audio_adaptor
|
| 106 |
+
self.use_low_frame_rate = audio_adaptor_conf.get("use_low_frame_rate", False)
|
| 107 |
+
|
| 108 |
+
# ctc decoder
|
| 109 |
+
self.ctc_decoder = None
|
| 110 |
+
# TODO: fix table name
|
| 111 |
+
ctc_decoder_class = tables.adaptor_classes.get(kwargs.get("ctc_decoder", None))
|
| 112 |
+
if ctc_decoder_class is not None:
|
| 113 |
+
ctc_tokenizer = (
|
| 114 |
+
kwargs.get("ctc_tokenizer", None)
|
| 115 |
+
if "ctc_tokenizer" in kwargs
|
| 116 |
+
else kwargs["dataset_conf"]["ctc_tokenizer"]
|
| 117 |
+
)
|
| 118 |
+
ctc_tokenizer_conf = (
|
| 119 |
+
kwargs.get("ctc_tokenizer_conf", None)
|
| 120 |
+
if "ctc_tokenizer_conf" in kwargs
|
| 121 |
+
else kwargs["dataset_conf"]["ctc_tokenizer_conf"]
|
| 122 |
+
)
|
| 123 |
+
if ctc_tokenizer is not None and ctc_tokenizer_conf is not None:
|
| 124 |
+
ctc_tokenizer_class = tables.tokenizer_classes.get(ctc_tokenizer)
|
| 125 |
+
ctc_tokenizer = ctc_tokenizer_class(**ctc_tokenizer_conf)
|
| 126 |
+
self.ctc_tokenizer = ctc_tokenizer
|
| 127 |
+
assert ctc_tokenizer is not None, f"ctc_tokenizer must be set"
|
| 128 |
+
|
| 129 |
+
ctc_vocab_size = kwargs.get("ctc_vocab_size", 60515)
|
| 130 |
+
ctc_decoder_conf = kwargs.get("ctc_decoder_conf", {})
|
| 131 |
+
if audio_encoder_output_size > 0:
|
| 132 |
+
ctc_decoder_conf["encoder_dim"] = audio_encoder_output_size
|
| 133 |
+
self.ctc_decoder = ctc_decoder_class(**ctc_decoder_conf)
|
| 134 |
+
init_param_path = ctc_decoder_conf.get("init_param_path", None)
|
| 135 |
+
if init_param_path is not None:
|
| 136 |
+
src_state = torch.load(init_param_path, map_location="cpu")
|
| 137 |
+
flag = self.ctc_decoder.load_state_dict(src_state, strict=False)
|
| 138 |
+
logging.info(f"Loading ctc_decoder ckpt: {init_param_path}, status: {flag}")
|
| 139 |
+
freeze = ctc_decoder_conf.get("freeze", False)
|
| 140 |
+
if freeze:
|
| 141 |
+
for _, param in self.ctc_decoder.named_parameters():
|
| 142 |
+
param.requires_grad = False
|
| 143 |
+
self.ctc_decoder.eval()
|
| 144 |
+
|
| 145 |
+
ctc_conf = kwargs.get("ctc_conf", {})
|
| 146 |
+
self.blank_id = ctc_conf.get("blank_id", ctc_vocab_size - 1)
|
| 147 |
+
self.ctc_weight = kwargs.get("ctc_weight", 0.3)
|
| 148 |
+
self.ctc = CTC(
|
| 149 |
+
odim=ctc_vocab_size,
|
| 150 |
+
encoder_output_size=audio_encoder_output_size,
|
| 151 |
+
blank_id=self.blank_id,
|
| 152 |
+
**ctc_conf,
|
| 153 |
+
)
|
| 154 |
+
self.detach_ctc_decoder = kwargs.get("detach_ctc_decoder", True)
|
| 155 |
+
self.error_calculator = None
|
| 156 |
+
|
| 157 |
+
self.length_normalized_loss = length_normalized_loss
|
| 158 |
+
rank = int(os.environ.get("RANK", 0))
|
| 159 |
+
logging.info(f"rank: {rank}, model is builded.")
|
| 160 |
+
|
| 161 |
+
def forward(
|
| 162 |
+
self,
|
| 163 |
+
speech: torch.Tensor = None,
|
| 164 |
+
speech_lengths: torch.Tensor = None,
|
| 165 |
+
input_ids: torch.Tensor = None,
|
| 166 |
+
attention_mask: torch.Tensor = None,
|
| 167 |
+
labels_ids: torch.Tensor = None,
|
| 168 |
+
fbank_beg: torch.Tensor = None,
|
| 169 |
+
fbank_mask: torch.Tensor = None,
|
| 170 |
+
**kwargs,
|
| 171 |
+
):
|
| 172 |
+
batch_size, token_num = input_ids.shape
|
| 173 |
+
stats = {}
|
| 174 |
+
input_ids[input_ids < 0] = 0
|
| 175 |
+
inputs_embeds = self.llm.model.get_input_embeddings()(input_ids)
|
| 176 |
+
if speech is not None:
|
| 177 |
+
if len(speech_lengths.size()) > 1:
|
| 178 |
+
speech_lengths = speech_lengths[:, 0]
|
| 179 |
+
batch_size_speech, frames, _ = speech.shape
|
| 180 |
+
|
| 181 |
+
# audio encoder
|
| 182 |
+
if self.audio_encoder_activation_checkpoint:
|
| 183 |
+
from torch.utils.checkpoint import checkpoint
|
| 184 |
+
|
| 185 |
+
encoder_out, encoder_out_lens = checkpoint(
|
| 186 |
+
self.encode, speech, speech_lengths, use_reentrant=False
|
| 187 |
+
)
|
| 188 |
+
else:
|
| 189 |
+
encoder_out, encoder_out_lens = self.encode(speech, speech_lengths)
|
| 190 |
+
|
| 191 |
+
# audio_adaptor
|
| 192 |
+
encoder_out, encoder_out_lens = self.audio_adaptor(encoder_out, encoder_out_lens)
|
| 193 |
+
|
| 194 |
+
batch_size, token_num, dims = inputs_embeds.shape
|
| 195 |
+
fake_token_len = kwargs.get("fake_token_len")
|
| 196 |
+
fake_token_len[fake_token_len < 0] = 0
|
| 197 |
+
fbank_beg[fbank_beg < 0] = 0
|
| 198 |
+
|
| 199 |
+
speech_idx = 0
|
| 200 |
+
for batch_idx in range(batch_size):
|
| 201 |
+
for turn_id in range(fbank_beg.shape[1]):
|
| 202 |
+
fbank_beg_idx = fbank_beg[batch_idx, turn_id].item()
|
| 203 |
+
if fbank_beg_idx > 0:
|
| 204 |
+
speech_token_len = fake_token_len[batch_idx, turn_id]
|
| 205 |
+
speech_token = encoder_out[speech_idx, :speech_token_len, :]
|
| 206 |
+
|
| 207 |
+
try:
|
| 208 |
+
inputs_embeds[
|
| 209 |
+
batch_idx,
|
| 210 |
+
fbank_beg_idx : fbank_beg_idx + speech_token_len,
|
| 211 |
+
:,
|
| 212 |
+
] = speech_token
|
| 213 |
+
except Exception as e:
|
| 214 |
+
logging.error(f"{str(e)}, {traceback.format_exc()}")
|
| 215 |
+
logging.info(
|
| 216 |
+
f"batch_idx: {batch_idx}, inputs_embeds: {inputs_embeds.shape}, fbank_beg_idx: {fbank_beg_idx}, speech_token_len: {speech_token_len}, encoder_out: {encoder_out.shape}, encoder_out_lens: {encoder_out_lens}, fake_token_len: {fake_token_len}, speech_lengths: {speech_lengths}"
|
| 217 |
+
)
|
| 218 |
+
speech_token_len = encoder_out_lens[speech_idx].item()
|
| 219 |
+
speech_token = encoder_out[speech_idx, :speech_token_len, :]
|
| 220 |
+
inputs_embeds[
|
| 221 |
+
batch_idx,
|
| 222 |
+
fbank_beg_idx : fbank_beg_idx + speech_token_len,
|
| 223 |
+
:,
|
| 224 |
+
] = speech_token
|
| 225 |
+
|
| 226 |
+
speech_idx += 1
|
| 227 |
+
|
| 228 |
+
stats["batch_size_speech"] = batch_size_speech
|
| 229 |
+
stats["batch_size_x_frames"] = frames * batch_size_speech
|
| 230 |
+
stats["batch_size_real_frames"] = speech_lengths.sum().item()
|
| 231 |
+
stats["padding_frames"] = stats["batch_size_x_frames"] - stats["batch_size_real_frames"]
|
| 232 |
+
|
| 233 |
+
device_type = next(self.parameters()).device.type
|
| 234 |
+
with torch.autocast(
|
| 235 |
+
device_type=device_type if device_type in ["cuda", "xpu", "mps"] else "cpu",
|
| 236 |
+
enabled=True if self.llm_dtype != "fp32" else False,
|
| 237 |
+
dtype=dtype_map[self.llm_dtype],
|
| 238 |
+
):
|
| 239 |
+
labels_ids[labels_ids == -1] = -100
|
| 240 |
+
attention_mask[attention_mask < 0] = 0
|
| 241 |
+
model_outputs = self.llm(
|
| 242 |
+
inputs_embeds=inputs_embeds.to(dtype_map[self.llm_dtype]),
|
| 243 |
+
attention_mask=attention_mask,
|
| 244 |
+
labels=labels_ids,
|
| 245 |
+
)
|
| 246 |
+
loss = model_outputs.loss
|
| 247 |
+
|
| 248 |
+
with torch.no_grad():
|
| 249 |
+
preds = torch.argmax(model_outputs.logits, -1)
|
| 250 |
+
acc_att = compute_accuracy(preds[:, :-1], labels_ids[:, 1:], ignore_label=-100)
|
| 251 |
+
stats["acc"] = acc_att
|
| 252 |
+
|
| 253 |
+
stats["loss"] = torch.clone(loss.detach())
|
| 254 |
+
stats["batch_size"] = batch_size
|
| 255 |
+
|
| 256 |
+
stats["batch_size_x_tokens"] = token_num * batch_size
|
| 257 |
+
stats["batch_size_real_tokens"] = attention_mask.sum().item()
|
| 258 |
+
stats["padding_tokens"] = stats["batch_size_x_tokens"] - stats["batch_size_real_tokens"]
|
| 259 |
+
|
| 260 |
+
dialog_turns = (fbank_beg > 0).sum(-1)
|
| 261 |
+
dialog_turns_max = torch.max(dialog_turns).int().item()
|
| 262 |
+
dialog_turns_avg = dialog_turns.sum().item() / batch_size
|
| 263 |
+
stats["dialog_turns_max"] = dialog_turns_max
|
| 264 |
+
stats["dialog_turns_avg"] = dialog_turns_avg
|
| 265 |
+
|
| 266 |
+
# force_gatherable: to-device and to-tensor if scalar for DataParallel
|
| 267 |
+
if self.length_normalized_loss:
|
| 268 |
+
batch_size = int((labels_ids > 0 + 1).sum())
|
| 269 |
+
loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)
|
| 270 |
+
return loss, stats, weight
|
| 271 |
+
|
| 272 |
+
def forward_export(self, speech, speech_lengths, **kwargs):
|
| 273 |
+
x, olens = self.audio_encoder(speech, speech_lengths)
|
| 274 |
+
encoder_out, encoder_out_lens = self.audio_adaptor(x, olens)
|
| 275 |
+
return encoder_out, encoder_out_lens
|
| 276 |
+
|
| 277 |
+
def encode(self, speech, speech_lengths):
|
| 278 |
+
# audio encoder
|
| 279 |
+
encoder_out, encoder_out_lens = self.audio_encoder(speech, speech_lengths)
|
| 280 |
+
|
| 281 |
+
return encoder_out, encoder_out_lens
|
| 282 |
+
|
| 283 |
+
def data_template(self, data):
|
| 284 |
+
system, user, assistant = [], [], []
|
| 285 |
+
for i, item in enumerate(data):
|
| 286 |
+
role = item["role"]
|
| 287 |
+
content = item["content"]
|
| 288 |
+
if role == "system":
|
| 289 |
+
system.append(content)
|
| 290 |
+
elif role == "user":
|
| 291 |
+
if "audio" in item:
|
| 292 |
+
audio = item["audio"]
|
| 293 |
+
content = [content, audio]
|
| 294 |
+
user.append(content)
|
| 295 |
+
elif role == "assistant":
|
| 296 |
+
assistant.append(content)
|
| 297 |
+
|
| 298 |
+
system = system * len(user)
|
| 299 |
+
|
| 300 |
+
contents = {
|
| 301 |
+
"system": system,
|
| 302 |
+
"user": user,
|
| 303 |
+
"assistant": assistant,
|
| 304 |
+
}
|
| 305 |
+
|
| 306 |
+
return contents
|
| 307 |
+
|
| 308 |
+
def data_load_speech(self, contents: dict, tokenizer, frontend, meta_data={}, **kwargs):
|
| 309 |
+
system = contents["system"]
|
| 310 |
+
user = contents["user"]
|
| 311 |
+
assistant = contents["assistant"]
|
| 312 |
+
pattern = re.compile(r"(<\|startofspeech\|>.*?<\|endofspeech\|>)")
|
| 313 |
+
do_think = True
|
| 314 |
+
sys_prompt = True
|
| 315 |
+
if "dataset_conf" in kwargs:
|
| 316 |
+
do_think = kwargs["dataset_conf"].get("do_think", True)
|
| 317 |
+
sys_prompt = kwargs["dataset_conf"].get("sys_prompt", True)
|
| 318 |
+
|
| 319 |
+
input_ids, labels, fbank, fbank_lens, fbank_mask, fbank_beg, fake_token_len = (
|
| 320 |
+
[],
|
| 321 |
+
[],
|
| 322 |
+
[],
|
| 323 |
+
[],
|
| 324 |
+
[],
|
| 325 |
+
[],
|
| 326 |
+
[],
|
| 327 |
+
)
|
| 328 |
+
input_source_ids = []
|
| 329 |
+
for i, (system_prompt, user_prompt, target_out) in enumerate(zip(system, user, assistant)):
|
| 330 |
+
if i >= kwargs.get("multiturn_num_max", 5):
|
| 331 |
+
break
|
| 332 |
+
if len(input_ids) > kwargs.get("max_token_length", 1500):
|
| 333 |
+
break
|
| 334 |
+
if isinstance(user_prompt, (list, tuple)):
|
| 335 |
+
user_prompt, audio = user_prompt
|
| 336 |
+
if i == 0:
|
| 337 |
+
if kwargs.get("infer_with_assistant_input", False):
|
| 338 |
+
source_input = f"<|im_start|>system\n{system_prompt}<|im_end|>\n<|im_start|>user\n{user_prompt}"
|
| 339 |
+
if not sys_prompt:
|
| 340 |
+
source_input = f"<|im_start|>user\n{user_prompt}"
|
| 341 |
+
else:
|
| 342 |
+
source_input = f"<|im_start|>system\n{system_prompt}<|im_end|>\n<|im_start|>user\n{user_prompt}<|im_end|>\n<|im_start|>assistant\n"
|
| 343 |
+
if not sys_prompt:
|
| 344 |
+
source_input = (
|
| 345 |
+
f"<|im_start|>user\n{user_prompt}<|im_end|>\n<|im_start|>assistant\n"
|
| 346 |
+
)
|
| 347 |
+
else:
|
| 348 |
+
if kwargs.get("infer_with_assistant_input", False):
|
| 349 |
+
source_input = f"<|im_start|>user\n{user_prompt}"
|
| 350 |
+
else:
|
| 351 |
+
source_input = (
|
| 352 |
+
f"<|im_start|>user\n{user_prompt}<|im_end|>\n<|im_start|>assistant\n"
|
| 353 |
+
)
|
| 354 |
+
if not do_think:
|
| 355 |
+
source_input += "<think>\n\n</think>\n\n"
|
| 356 |
+
if kwargs.get("prev_text", None) is not None:
|
| 357 |
+
source_input += kwargs["prev_text"]
|
| 358 |
+
|
| 359 |
+
splits = pattern.split(source_input)
|
| 360 |
+
source_ids = []
|
| 361 |
+
fbank_mask_i = []
|
| 362 |
+
fake_token_len_i = 0
|
| 363 |
+
fbank_beg_i = -1
|
| 364 |
+
speech, speech_lengths = [], []
|
| 365 |
+
for k, sub_str in enumerate(splits):
|
| 366 |
+
if not sub_str.startswith("<|startofspeech|>"):
|
| 367 |
+
sub_token = tokenizer.encode(sub_str)
|
| 368 |
+
source_ids += sub_token
|
| 369 |
+
fbank_mask_i += [0] * len(sub_token)
|
| 370 |
+
else:
|
| 371 |
+
sub_str = sub_str.replace("<|startofspeech|>", "").replace(
|
| 372 |
+
"<|endofspeech|>", ""
|
| 373 |
+
)
|
| 374 |
+
if sub_str.startswith("!"):
|
| 375 |
+
sub_str = sub_str[1:]
|
| 376 |
+
if sub_str.startswith("!"): # !!: audio sample point
|
| 377 |
+
sub_str = audio
|
| 378 |
+
try:
|
| 379 |
+
time1 = time.perf_counter()
|
| 380 |
+
data_src = load_audio_text_image_video(
|
| 381 |
+
sub_str, fs=frontend.fs, **kwargs
|
| 382 |
+
)
|
| 383 |
+
time2 = time.perf_counter()
|
| 384 |
+
meta_data["load_data"] = f"{time2 - time1:0.3f}"
|
| 385 |
+
except Exception as e:
|
| 386 |
+
logging.error(f"Loading wav failed! {str(e)}, {traceback.format_exc()}")
|
| 387 |
+
|
| 388 |
+
speech, speech_lengths = extract_fbank(
|
| 389 |
+
data_src,
|
| 390 |
+
data_type=kwargs.get("data_type", "sound"),
|
| 391 |
+
frontend=frontend,
|
| 392 |
+
is_final=True,
|
| 393 |
+
) # speech: [b, T, d]
|
| 394 |
+
|
| 395 |
+
time3 = time.perf_counter()
|
| 396 |
+
meta_data["extract_feat"] = f"{time3 - time2:0.3f}"
|
| 397 |
+
meta_data["batch_data_time"] = (
|
| 398 |
+
speech_lengths.sum().item()
|
| 399 |
+
* frontend.frame_shift
|
| 400 |
+
* frontend.lfr_n
|
| 401 |
+
/ 1000
|
| 402 |
+
)
|
| 403 |
+
|
| 404 |
+
if self.use_low_frame_rate:
|
| 405 |
+
olens = 1 + (speech_lengths[0].item() - 3 + 2 * 1) // 2
|
| 406 |
+
olens = 1 + (olens - 3 + 2 * 1) // 2
|
| 407 |
+
fake_token_len_i = (olens - 1) // 2 + 1
|
| 408 |
+
else:
|
| 409 |
+
fake_token_len_i = speech_lengths[0].item()
|
| 410 |
+
fake_token = [0] * fake_token_len_i
|
| 411 |
+
fbank_beg_i = len(source_ids)
|
| 412 |
+
source_ids += fake_token
|
| 413 |
+
fbank_mask_i += [1] * len(fake_token)
|
| 414 |
+
|
| 415 |
+
fbank_beg += [fbank_beg_i + len(input_ids)]
|
| 416 |
+
fake_token_len += [fake_token_len_i]
|
| 417 |
+
source_mask = [-100] * len(source_ids)
|
| 418 |
+
target_out = f"{target_out}<|im_end|>"
|
| 419 |
+
target_ids = tokenizer.encode(target_out)
|
| 420 |
+
input_source_ids = input_ids + source_ids
|
| 421 |
+
input_ids += source_ids + target_ids
|
| 422 |
+
labels += source_mask + target_ids
|
| 423 |
+
fbank_mask += fbank_mask_i
|
| 424 |
+
if len(speech) > 0:
|
| 425 |
+
fbank.append(speech[0, :, :])
|
| 426 |
+
fbank_lens.append(speech_lengths)
|
| 427 |
+
|
| 428 |
+
input_ids = torch.tensor(input_ids, dtype=torch.int64) # [: self.max_token_length]
|
| 429 |
+
attention_mask = torch.tensor([1] * len(input_ids), dtype=torch.int32)
|
| 430 |
+
labels = torch.tensor(labels, dtype=torch.int64) # [: self.max_token_length]
|
| 431 |
+
|
| 432 |
+
fbank_mask = torch.tensor(fbank_mask, dtype=torch.float32)
|
| 433 |
+
fbank_beg = torch.tensor(fbank_beg, dtype=torch.int32)
|
| 434 |
+
fake_token_len = torch.tensor(fake_token_len, dtype=torch.int32)
|
| 435 |
+
source_ids = torch.tensor(input_source_ids, dtype=torch.int64)
|
| 436 |
+
target_ids = torch.tensor(target_ids, dtype=torch.int64)
|
| 437 |
+
|
| 438 |
+
if len(fbank) > 0:
|
| 439 |
+
speech = torch.nn.utils.rnn.pad_sequence(fbank, batch_first=True, padding_value=0.0)
|
| 440 |
+
speech_lengths = torch.nn.utils.rnn.pad_sequence(
|
| 441 |
+
fbank_lens, batch_first=True, padding_value=-1
|
| 442 |
+
)
|
| 443 |
+
else:
|
| 444 |
+
speech = []
|
| 445 |
+
speech_lengths = []
|
| 446 |
+
output = {
|
| 447 |
+
"speech": speech,
|
| 448 |
+
"speech_lengths": speech_lengths,
|
| 449 |
+
"fbank_mask": fbank_mask[None, :],
|
| 450 |
+
"fbank_beg": fbank_beg[None,],
|
| 451 |
+
"fake_token_len": fake_token_len[None, :],
|
| 452 |
+
"input_ids": input_ids[None,],
|
| 453 |
+
"attention_mask": attention_mask[None,],
|
| 454 |
+
"labels_ids": labels,
|
| 455 |
+
"source_ids": source_ids[None, :],
|
| 456 |
+
"target_ids": target_ids[None, :],
|
| 457 |
+
}
|
| 458 |
+
|
| 459 |
+
return output
|
| 460 |
+
|
| 461 |
+
def inference_prepare(
|
| 462 |
+
self,
|
| 463 |
+
data_in,
|
| 464 |
+
data_lengths=None,
|
| 465 |
+
key: list = None,
|
| 466 |
+
tokenizer=None,
|
| 467 |
+
frontend=None,
|
| 468 |
+
**kwargs,
|
| 469 |
+
):
|
| 470 |
+
meta_data = {}
|
| 471 |
+
|
| 472 |
+
if kwargs.get("batch_size", 1) > 1:
|
| 473 |
+
raise NotImplementedError("batch decoding is not implemented")
|
| 474 |
+
|
| 475 |
+
contents = self.data_template(data_in[0])
|
| 476 |
+
output = self.data_load_speech(contents, tokenizer, frontend, meta_data=meta_data, **kwargs)
|
| 477 |
+
batch = to_device(output, kwargs["device"])
|
| 478 |
+
|
| 479 |
+
# audio encoder
|
| 480 |
+
speech = batch["speech"]
|
| 481 |
+
|
| 482 |
+
if len(speech) > 0:
|
| 483 |
+
if "audio_embedding" in kwargs and "audio_embedding_lens" in kwargs:
|
| 484 |
+
encoder_out = kwargs["audio_embedding"]
|
| 485 |
+
encoder_out_lens = kwargs["audio_embedding_lens"]
|
| 486 |
+
else:
|
| 487 |
+
speech_lengths = batch["speech_lengths"][:, 0]
|
| 488 |
+
# fp16
|
| 489 |
+
if kwargs.get("fp16", False):
|
| 490 |
+
speech = speech.to(torch.float16)
|
| 491 |
+
elif kwargs.get("bf16", False):
|
| 492 |
+
speech = speech.to(torch.bfloat16)
|
| 493 |
+
# audio encoder
|
| 494 |
+
encoder_out, encoder_out_lens = self.encode(speech, speech_lengths)
|
| 495 |
+
|
| 496 |
+
# audio_adaptor
|
| 497 |
+
adaptor_out, adaptor_out_lens = self.audio_adaptor(encoder_out, encoder_out_lens)
|
| 498 |
+
meta_data["encoder_out"] = encoder_out
|
| 499 |
+
meta_data["encoder_out_lens"] = encoder_out_lens
|
| 500 |
+
meta_data["audio_adaptor_out"] = adaptor_out
|
| 501 |
+
meta_data["audio_adaptor_out_lens"] = adaptor_out_lens
|
| 502 |
+
|
| 503 |
+
input_ids = batch["input_ids"]
|
| 504 |
+
source_ids = batch["source_ids"]
|
| 505 |
+
fbank_beg = batch["fbank_beg"]
|
| 506 |
+
fake_token_len = batch["fake_token_len"]
|
| 507 |
+
|
| 508 |
+
if not kwargs.get("teacherforcing", False):
|
| 509 |
+
input_ids = source_ids
|
| 510 |
+
|
| 511 |
+
input_ids[input_ids < 0] = 0
|
| 512 |
+
inputs_embeds = self.llm.model.get_input_embeddings()(input_ids)
|
| 513 |
+
|
| 514 |
+
batch_size, token_num, dims = inputs_embeds.shape
|
| 515 |
+
|
| 516 |
+
fake_token_len[fake_token_len < 0] = 0
|
| 517 |
+
fbank_beg[fbank_beg < 0] = 0
|
| 518 |
+
|
| 519 |
+
speech_idx = 0
|
| 520 |
+
for batch_idx in range(batch_size):
|
| 521 |
+
for turn_id in range(fbank_beg.shape[1]):
|
| 522 |
+
fbank_beg_idx = fbank_beg[batch_idx, turn_id].item()
|
| 523 |
+
if fbank_beg_idx > 0:
|
| 524 |
+
speech_token_len = fake_token_len[batch_idx, turn_id]
|
| 525 |
+
speech_token = adaptor_out[speech_idx, :speech_token_len, :]
|
| 526 |
+
|
| 527 |
+
try:
|
| 528 |
+
inputs_embeds[
|
| 529 |
+
batch_idx,
|
| 530 |
+
fbank_beg_idx : fbank_beg_idx + speech_token_len,
|
| 531 |
+
:,
|
| 532 |
+
] = speech_token
|
| 533 |
+
except Exception as e:
|
| 534 |
+
#
|
| 535 |
+
logging.error(f"{str(e)}, {traceback.format_exc()}")
|
| 536 |
+
logging.info(
|
| 537 |
+
f"batch_idx: {batch_idx}, inputs_embeds: {inputs_embeds.shape}, fbank_beg_idx: {fbank_beg_idx}, speech_token_len: {speech_token_len}, adaptor_out: {adaptor_out.shape}, adaptor_out_lens: {adaptor_out_lens}, fake_token_len: {fake_token_len}, speech_lengths: {speech_lengths}"
|
| 538 |
+
)
|
| 539 |
+
speech_token_len = adaptor_out_lens[speech_idx].item()
|
| 540 |
+
speech_token = adaptor_out[speech_idx, :speech_token_len, :]
|
| 541 |
+
inputs_embeds[
|
| 542 |
+
batch_idx,
|
| 543 |
+
fbank_beg_idx : fbank_beg_idx + speech_token_len,
|
| 544 |
+
:,
|
| 545 |
+
] = speech_token
|
| 546 |
+
|
| 547 |
+
speech_idx += 1
|
| 548 |
+
return inputs_embeds, contents, batch, source_ids, meta_data
|
| 549 |
+
|
| 550 |
+
def get_prompt(self, hotwords: list[str], language: str = None, itn: bool = True):
|
| 551 |
+
if len(hotwords) > 0:
|
| 552 |
+
hotwords = ", ".join(hotwords)
|
| 553 |
+
prompt = f"请结合上下文信息,更加准确地完成语音转写任务。如果没有相关信息,我们会留空。\n\n\n**上下文信息:**\n\n\n"
|
| 554 |
+
prompt += f"热词列表:[{hotwords}]\n"
|
| 555 |
+
else:
|
| 556 |
+
prompt = ""
|
| 557 |
+
if language is None:
|
| 558 |
+
prompt += "语音转写"
|
| 559 |
+
else:
|
| 560 |
+
prompt += f"语音转写成{language}"
|
| 561 |
+
if not itn:
|
| 562 |
+
prompt += ",不进行文本规整"
|
| 563 |
+
return prompt + ":"
|
| 564 |
+
|
| 565 |
+
def generate_chatml(self, prompt: str, data: Union[str, torch.Tensor]):
|
| 566 |
+
if isinstance(data, str):
|
| 567 |
+
return [
|
| 568 |
+
{"role": "system", "content": "You are a helpful assistant."},
|
| 569 |
+
{"role": "user", "content": f"{prompt}<|startofspeech|>!{data}<|endofspeech|>"},
|
| 570 |
+
{"role": "assistant", "content": "null"},
|
| 571 |
+
]
|
| 572 |
+
elif isinstance(data, torch.Tensor):
|
| 573 |
+
return [
|
| 574 |
+
{"role": "system", "content": "You are a helpful assistant."},
|
| 575 |
+
{
|
| 576 |
+
"role": "user",
|
| 577 |
+
"content": f"{prompt}<|startofspeech|>!!<|endofspeech|>",
|
| 578 |
+
"audio": data,
|
| 579 |
+
},
|
| 580 |
+
{"role": "assistant", "content": "null"},
|
| 581 |
+
]
|
| 582 |
+
|
| 583 |
+
def inference(
|
| 584 |
+
self,
|
| 585 |
+
data_in,
|
| 586 |
+
data_lengths=None,
|
| 587 |
+
key: list = None,
|
| 588 |
+
tokenizer=None,
|
| 589 |
+
frontend=None,
|
| 590 |
+
**kwargs,
|
| 591 |
+
):
|
| 592 |
+
prompt = self.get_prompt(
|
| 593 |
+
kwargs.get("hotwords", []), kwargs.get("language", None), kwargs.get("itn", True)
|
| 594 |
+
)
|
| 595 |
+
data_in = [self.generate_chatml(prompt, data) for data in data_in]
|
| 596 |
+
|
| 597 |
+
if key is None:
|
| 598 |
+
key = []
|
| 599 |
+
for _ in data_in:
|
| 600 |
+
chars = string.ascii_letters + string.digits
|
| 601 |
+
key.append("rand_key_" + "".join(random.choice(chars) for _ in range(13)))
|
| 602 |
+
|
| 603 |
+
return self.inference_llm(
|
| 604 |
+
data_in,
|
| 605 |
+
data_lengths=data_lengths,
|
| 606 |
+
key=key,
|
| 607 |
+
tokenizer=tokenizer,
|
| 608 |
+
frontend=frontend,
|
| 609 |
+
**kwargs,
|
| 610 |
+
)
|
| 611 |
+
|
| 612 |
+
def inference_llm(
|
| 613 |
+
self,
|
| 614 |
+
data_in,
|
| 615 |
+
data_lengths=None,
|
| 616 |
+
key: list = None,
|
| 617 |
+
tokenizer=None,
|
| 618 |
+
frontend=None,
|
| 619 |
+
**kwargs,
|
| 620 |
+
):
|
| 621 |
+
inputs_embeds, contents, batch, source_ids, meta_data = self.inference_prepare(
|
| 622 |
+
data_in, data_lengths, key, tokenizer, frontend, **kwargs
|
| 623 |
+
)
|
| 624 |
+
|
| 625 |
+
ctc_results = []
|
| 626 |
+
if self.ctc_decoder is not None:
|
| 627 |
+
encoder_out = meta_data["encoder_out"]
|
| 628 |
+
encoder_out_lens = meta_data["encoder_out_lens"]
|
| 629 |
+
decoder_out, decoder_out_lens = self.ctc_decoder(encoder_out, encoder_out_lens)
|
| 630 |
+
ctc_logits = self.ctc.log_softmax(decoder_out)
|
| 631 |
+
|
| 632 |
+
b, n, d = encoder_out.size()
|
| 633 |
+
if isinstance(key[0], (list, tuple)):
|
| 634 |
+
key = key[0]
|
| 635 |
+
if len(key) < b:
|
| 636 |
+
key = key * b
|
| 637 |
+
for i in range(b):
|
| 638 |
+
x = ctc_logits[i, : encoder_out_lens[i].item(), :]
|
| 639 |
+
yseq = x.argmax(dim=-1)
|
| 640 |
+
yseq = torch.unique_consecutive(yseq, dim=-1)
|
| 641 |
+
mask = yseq != self.blank_id
|
| 642 |
+
token_int = yseq[mask].tolist()
|
| 643 |
+
# Change integer-ids to tokens
|
| 644 |
+
text = self.ctc_tokenizer.decode(token_int)
|
| 645 |
+
ctc_results.append({"key": key[i], "text": text, "ctc_logits": x})
|
| 646 |
+
|
| 647 |
+
llm_dtype = kwargs.get("llm_dtype", "fp32")
|
| 648 |
+
if llm_dtype == "fp32":
|
| 649 |
+
llm_dtype = "fp16" if kwargs.get("fp16", False) else llm_dtype
|
| 650 |
+
llm_dtype = "bf16" if kwargs.get("bf16", False) else llm_dtype
|
| 651 |
+
|
| 652 |
+
device_type = torch.device(kwargs.get("device", "cuda")).type
|
| 653 |
+
with torch.autocast(
|
| 654 |
+
device_type=device_type if device_type in ["cuda", "xpu", "mps"] else "cpu",
|
| 655 |
+
enabled=True if llm_dtype != "fp32" else False,
|
| 656 |
+
dtype=dtype_map[llm_dtype],
|
| 657 |
+
):
|
| 658 |
+
label = contents["assistant"][-1]
|
| 659 |
+
self.llm = self.llm.to(dtype_map[llm_dtype])
|
| 660 |
+
inputs_embeds = inputs_embeds.to(dtype_map[llm_dtype])
|
| 661 |
+
llm_kwargs = kwargs.get("llm_kwargs", {})
|
| 662 |
+
if not kwargs.get("teacherforcing", False):
|
| 663 |
+
attention_mask = batch.get("attention_mask", None)
|
| 664 |
+
generated_ids = self.llm.generate(
|
| 665 |
+
inputs_embeds=inputs_embeds,
|
| 666 |
+
attention_mask=attention_mask,
|
| 667 |
+
max_new_tokens=kwargs.get("max_length", 512),
|
| 668 |
+
pad_token_id=self.llm.config.pad_token_id or self.llm.config.eos_token_id,
|
| 669 |
+
**llm_kwargs,
|
| 670 |
+
)
|
| 671 |
+
|
| 672 |
+
response = tokenizer.batch_decode(
|
| 673 |
+
generated_ids,
|
| 674 |
+
skip_special_tokens=kwargs.get("skip_special_tokens", True),
|
| 675 |
+
)[0]
|
| 676 |
+
|
| 677 |
+
loss = None
|
| 678 |
+
else:
|
| 679 |
+
labels_ids = batch["labels_ids"]
|
| 680 |
+
labels_ids[labels_ids == -1] = -100
|
| 681 |
+
attention_mask = batch.get("attention_mask", None)
|
| 682 |
+
model_outputs = self.llm(
|
| 683 |
+
inputs_embeds=inputs_embeds,
|
| 684 |
+
attention_mask=attention_mask,
|
| 685 |
+
labels=labels_ids,
|
| 686 |
+
pad_token_id=self.llm.config.pad_token_id or self.llm.config.eos_token_id,
|
| 687 |
+
**llm_kwargs,
|
| 688 |
+
)
|
| 689 |
+
|
| 690 |
+
preds = torch.argmax(model_outputs.logits, -1)[:, source_ids.shape[1] :]
|
| 691 |
+
response = tokenizer.batch_decode(
|
| 692 |
+
preds,
|
| 693 |
+
add_special_tokens=False,
|
| 694 |
+
skip_special_tokens=kwargs.get("skip_special_tokens", True),
|
| 695 |
+
)[0]
|
| 696 |
+
loss = model_outputs.loss.item()
|
| 697 |
+
response = kwargs.get("prev_text", "") + response
|
| 698 |
+
|
| 699 |
+
ibest_writer = None
|
| 700 |
+
if kwargs.get("output_dir") is not None:
|
| 701 |
+
if not hasattr(self, "writer"):
|
| 702 |
+
self.writer = DatadirWriter(kwargs.get("output_dir"))
|
| 703 |
+
ibest_writer = self.writer[f"{0 + 1}best_recog"]
|
| 704 |
+
|
| 705 |
+
results = []
|
| 706 |
+
response_clean = re.sub(r"[^\w\s\u3000\u4e00-\u9fff]+", "", response)
|
| 707 |
+
result_i = {
|
| 708 |
+
"key": key[0],
|
| 709 |
+
"text": re.sub(r"\s+", " ", response.replace("/sil", " ")),
|
| 710 |
+
"text_tn": response_clean,
|
| 711 |
+
"label": label,
|
| 712 |
+
}
|
| 713 |
+
if loss is not None:
|
| 714 |
+
result_i["loss"] = loss
|
| 715 |
+
results.append(result_i)
|
| 716 |
+
|
| 717 |
+
for ctc_result, result in zip(ctc_results, results):
|
| 718 |
+
result["ctc_text"] = ctc_result["text"].replace("<|nospeech|>", "")
|
| 719 |
+
target_ids = torch.tensor(
|
| 720 |
+
self.ctc_tokenizer.encode(result["ctc_text"]), dtype=torch.int64
|
| 721 |
+
)
|
| 722 |
+
result["ctc_timestamps"] = forced_align(
|
| 723 |
+
ctc_result["ctc_logits"], target_ids, self.blank_id
|
| 724 |
+
)
|
| 725 |
+
target_ids = torch.tensor(self.ctc_tokenizer.encode(result["text"]), dtype=torch.int64)
|
| 726 |
+
result["timestamps"] = forced_align(ctc_result["ctc_logits"], target_ids, self.blank_id)
|
| 727 |
+
for timestamps in [result["timestamps"], result["ctc_timestamps"]]:
|
| 728 |
+
for timestamp in timestamps:
|
| 729 |
+
timestamp["token"] = self.ctc_tokenizer.decode([timestamp["token"]])
|
| 730 |
+
timestamp["start_time"] = timestamp["start_time"] * 6 * 10 / 1000
|
| 731 |
+
timestamp["end_time"] = timestamp["end_time"] * 6 * 10 / 1000
|
| 732 |
+
|
| 733 |
+
if ibest_writer is not None:
|
| 734 |
+
ibest_writer["text"][key[0]] = response.replace("\n", " ")
|
| 735 |
+
ibest_writer["label"][key[0]] = label.replace("\n", " ")
|
| 736 |
+
ibest_writer["text_tn"][key[0]] = response_clean
|
| 737 |
+
|
| 738 |
+
return results, meta_data
|
| 739 |
+
|
| 740 |
+
@staticmethod
|
| 741 |
+
def from_pretrained(model: str = None, **kwargs):
|
| 742 |
+
from funasr import AutoModel
|
| 743 |
+
|
| 744 |
+
model, kwargs = AutoModel.build_model(model=model, trust_remote_code=True, **kwargs)
|
| 745 |
+
|
| 746 |
+
return model, kwargs
|
funasr_nano/tools/__init__.py
ADDED
|
File without changes
|
funasr_nano/tools/utils.py
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from itertools import groupby
|
| 2 |
+
|
| 3 |
+
import soundfile as sf
|
| 4 |
+
import torch
|
| 5 |
+
import torchaudio
|
| 6 |
+
import torchaudio.functional as F
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def load_audio(wav_path, rate: int = None, offset: float = 0, duration: float = None):
|
| 10 |
+
with sf.SoundFile(wav_path) as f:
|
| 11 |
+
start_frame = int(offset * f.samplerate)
|
| 12 |
+
if duration is None:
|
| 13 |
+
frames_to_read = f.frames - start_frame
|
| 14 |
+
else:
|
| 15 |
+
frames_to_read = int(duration * f.samplerate)
|
| 16 |
+
f.seek(start_frame)
|
| 17 |
+
audio_data = f.read(frames_to_read, dtype="float32")
|
| 18 |
+
audio_tensor = torch.from_numpy(audio_data)
|
| 19 |
+
if rate is not None and f.samplerate != rate:
|
| 20 |
+
if audio_tensor.ndim == 1:
|
| 21 |
+
audio_tensor = audio_tensor.unsqueeze(0)
|
| 22 |
+
else:
|
| 23 |
+
audio_tensor = audio_tensor.T
|
| 24 |
+
resampler = torchaudio.transforms.Resample(orig_freq=f.samplerate, new_freq=rate)
|
| 25 |
+
audio_tensor = resampler(audio_tensor)
|
| 26 |
+
if audio_tensor.shape[0] == 1:
|
| 27 |
+
audio_tensor = audio_tensor.squeeze(0)
|
| 28 |
+
return audio_tensor, rate if rate is not None else f.samplerate
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def forced_align(log_probs: torch.Tensor, targets: torch.Tensor, blank: int = 0):
|
| 32 |
+
items = []
|
| 33 |
+
try:
|
| 34 |
+
# The current version only supports batch_size==1.
|
| 35 |
+
log_probs, targets = log_probs.unsqueeze(0).cpu(), targets.unsqueeze(0).cpu()
|
| 36 |
+
assert log_probs.shape[1] >= targets.shape[1]
|
| 37 |
+
alignments, scores = F.forced_align(log_probs, targets, blank=blank)
|
| 38 |
+
alignments, scores = alignments[0], torch.exp(scores[0]).tolist()
|
| 39 |
+
# use enumerate to keep track of the original indices, then group by token value
|
| 40 |
+
for token, group in groupby(enumerate(alignments), key=lambda item: item[1]):
|
| 41 |
+
if token == blank:
|
| 42 |
+
continue
|
| 43 |
+
group = list(group)
|
| 44 |
+
start = group[0][0]
|
| 45 |
+
end = start + len(group)
|
| 46 |
+
score = max(scores[start:end])
|
| 47 |
+
items.append(
|
| 48 |
+
{
|
| 49 |
+
"token": token.item(),
|
| 50 |
+
"start_time": start,
|
| 51 |
+
"end_time": end,
|
| 52 |
+
"score": round(score, 3),
|
| 53 |
+
}
|
| 54 |
+
)
|
| 55 |
+
except:
|
| 56 |
+
pass
|
| 57 |
+
return items
|
i18n.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import yaml
|
| 2 |
+
from pathlib import Path
|
| 3 |
+
|
| 4 |
+
_strings: dict = {}
|
| 5 |
+
_lang = "en"
|
| 6 |
+
_dir = Path(__file__).parent / "i18n"
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def set_lang(lang: str):
|
| 10 |
+
global _lang, _strings
|
| 11 |
+
_lang = lang
|
| 12 |
+
f = _dir / f"{lang}.yaml"
|
| 13 |
+
if not f.exists():
|
| 14 |
+
f = _dir / "en.yaml"
|
| 15 |
+
_strings = yaml.safe_load(f.read_text("utf-8")) or {}
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def get_lang() -> str:
|
| 19 |
+
return _lang
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def t(key: str) -> str:
|
| 23 |
+
return _strings.get(key, key)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
# Load English by default
|
| 27 |
+
set_lang("en")
|
i18n/en.yaml
ADDED
|
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# LiveTrans English UI strings
|
| 2 |
+
|
| 3 |
+
# Overlay
|
| 4 |
+
translating: "translating..."
|
| 5 |
+
same_language: "(same language)"
|
| 6 |
+
copy_original: "Copy Original"
|
| 7 |
+
copy_translation: "Copy Translation"
|
| 8 |
+
copy_all: "Copy All"
|
| 9 |
+
paused: "Paused"
|
| 10 |
+
running: "Running"
|
| 11 |
+
clear: "Clear"
|
| 12 |
+
settings: "Settings"
|
| 13 |
+
monitor: "Monitor"
|
| 14 |
+
quit: "Quit"
|
| 15 |
+
click_through: "Click-through"
|
| 16 |
+
top_most: "Top-most"
|
| 17 |
+
auto_scroll: "Auto-scroll"
|
| 18 |
+
model_label: "Model:"
|
| 19 |
+
target_label: "Target:"
|
| 20 |
+
|
| 21 |
+
# Control Panel
|
| 22 |
+
window_control_panel: "LiveTrans - Control Panel"
|
| 23 |
+
tab_vad_asr: "VAD / ASR"
|
| 24 |
+
tab_translation: "Translation"
|
| 25 |
+
tab_benchmark: "Benchmark"
|
| 26 |
+
tab_cache: "Cache"
|
| 27 |
+
tab_style: "Style"
|
| 28 |
+
|
| 29 |
+
# Style tab
|
| 30 |
+
group_preset: "Preset"
|
| 31 |
+
preset_default: "Default (High Contrast)"
|
| 32 |
+
preset_transparent: "Transparent"
|
| 33 |
+
preset_compact: "Compact"
|
| 34 |
+
preset_light: "Light"
|
| 35 |
+
preset_dracula: "Dracula"
|
| 36 |
+
preset_nord: "Nord"
|
| 37 |
+
preset_monokai: "Monokai"
|
| 38 |
+
preset_solarized: "Solarized Dark"
|
| 39 |
+
preset_gruvbox: "Gruvbox Dark"
|
| 40 |
+
preset_tokyo_night: "Tokyo Night"
|
| 41 |
+
preset_catppuccin: "Catppuccin Mocha"
|
| 42 |
+
preset_one_dark: "One Dark"
|
| 43 |
+
preset_everforest: "Everforest"
|
| 44 |
+
preset_kanagawa: "Kanagawa"
|
| 45 |
+
preset_custom: "Custom"
|
| 46 |
+
btn_reset_style: "Reset to Default"
|
| 47 |
+
group_background: "Background"
|
| 48 |
+
label_bg_color: "Container Color:"
|
| 49 |
+
label_bg_opacity: "Container Opacity:"
|
| 50 |
+
label_header_color: "Header Color:"
|
| 51 |
+
label_header_opacity: "Header Opacity:"
|
| 52 |
+
label_border_radius: "Border Radius:"
|
| 53 |
+
group_text: "Text"
|
| 54 |
+
label_original_font: "Original Font:"
|
| 55 |
+
label_original_font_size: "Original Font Size:"
|
| 56 |
+
label_original_color: "Original Color:"
|
| 57 |
+
label_translation_font: "Translation Font:"
|
| 58 |
+
label_translation_font_size: "Translation Font Size:"
|
| 59 |
+
label_translation_color: "Translation Color:"
|
| 60 |
+
label_timestamp_color: "Timestamp Color:"
|
| 61 |
+
group_window: "Window"
|
| 62 |
+
label_window_opacity: "Window Opacity:"
|
| 63 |
+
|
| 64 |
+
# VAD / ASR tab
|
| 65 |
+
group_asr_engine: "ASR Engine"
|
| 66 |
+
label_engine: "Engine:"
|
| 67 |
+
label_language_hint: "Language Hint:"
|
| 68 |
+
label_device: "Device:"
|
| 69 |
+
label_audio: "Audio:"
|
| 70 |
+
label_hub: "Hub:"
|
| 71 |
+
system_default: "System Default"
|
| 72 |
+
hub_modelscope: "ModelScope (China)"
|
| 73 |
+
hub_huggingface: "HuggingFace (Intl)"
|
| 74 |
+
label_ui_lang: "Language:"
|
| 75 |
+
group_vad_mode: "VAD Mode"
|
| 76 |
+
vad_silero: "Silero VAD"
|
| 77 |
+
vad_energy: "Energy-based"
|
| 78 |
+
vad_disabled: "Disabled (always send)"
|
| 79 |
+
group_silero_threshold: "Silero VAD Threshold"
|
| 80 |
+
label_threshold: "Threshold:"
|
| 81 |
+
group_energy_threshold: "Energy Threshold (for Energy-based mode)"
|
| 82 |
+
group_timing: "Timing"
|
| 83 |
+
label_min_speech: "Min speech:"
|
| 84 |
+
label_max_speech: "Max speech:"
|
| 85 |
+
label_silence: "Silence:"
|
| 86 |
+
label_silence_dur: "Silence dur:"
|
| 87 |
+
silence_auto: "Auto"
|
| 88 |
+
silence_fixed: "Fixed"
|
| 89 |
+
|
| 90 |
+
# Translation tab
|
| 91 |
+
group_active_model: "Active Model (used in pipeline)"
|
| 92 |
+
btn_apply: "Apply"
|
| 93 |
+
group_model_configs: "Model Configurations"
|
| 94 |
+
btn_add: "Add"
|
| 95 |
+
btn_edit: "Edit"
|
| 96 |
+
btn_duplicate: "Duplicate"
|
| 97 |
+
btn_remove: "Remove"
|
| 98 |
+
group_system_prompt: "System Prompt (supports {source_lang} {target_lang})"
|
| 99 |
+
btn_restore_default: "Restore Default"
|
| 100 |
+
btn_apply_prompt: "Apply Prompt"
|
| 101 |
+
group_timeout: "Model Connect Timeout"
|
| 102 |
+
|
| 103 |
+
# Benchmark tab
|
| 104 |
+
label_source: "Source:"
|
| 105 |
+
btn_test_all: "Test All Models"
|
| 106 |
+
testing: "Testing..."
|
| 107 |
+
|
| 108 |
+
# Cache tab
|
| 109 |
+
btn_open_folder: "Open Folder"
|
| 110 |
+
btn_delete_all_exit: "Delete All && Exit"
|
| 111 |
+
scanning: "Scanning..."
|
| 112 |
+
no_cached_models: "No cached models found"
|
| 113 |
+
cache_total: "Total: {size} ({count} items)"
|
| 114 |
+
dialog_delete_title: "Delete All Models & Exit"
|
| 115 |
+
dialog_delete_msg: >-
|
| 116 |
+
This will delete all {count} cached models ({size})
|
| 117 |
+
and exit the application.
|
| 118 |
+
|
| 119 |
+
Settings will be preserved. Required models will be
|
| 120 |
+
re-downloaded on next launch.
|
| 121 |
+
|
| 122 |
+
Continue?
|
| 123 |
+
|
| 124 |
+
# Dialogs
|
| 125 |
+
window_setup: "LiveTrans - Initial Setup"
|
| 126 |
+
group_download_source: "Model Download Source"
|
| 127 |
+
hub_modelscope_full: "ModelScope (China)"
|
| 128 |
+
hub_huggingface_full: "HuggingFace (International)"
|
| 129 |
+
btn_start_download: "Start Download"
|
| 130 |
+
btn_retry: "Retry"
|
| 131 |
+
download_failed: "Download failed: {error}"
|
| 132 |
+
download_complete: "Download complete!"
|
| 133 |
+
window_download: "LiveTrans - Download Models"
|
| 134 |
+
downloading_models: "Downloading required models: {names}"
|
| 135 |
+
btn_close: "Close"
|
| 136 |
+
dialog_edit_model: "Edit Model"
|
| 137 |
+
dialog_add_model: "Add Model"
|
| 138 |
+
label_display_name: "Display Name:"
|
| 139 |
+
label_api_base: "API Base:"
|
| 140 |
+
label_api_key: "API Key:"
|
| 141 |
+
label_model: "Model:"
|
| 142 |
+
label_proxy: "Proxy:"
|
| 143 |
+
label_proxy_url: "Proxy URL:"
|
| 144 |
+
proxy_none: "No Proxy"
|
| 145 |
+
proxy_system: "System Proxy"
|
| 146 |
+
proxy_custom: "Custom Proxy"
|
| 147 |
+
no_system_role: "No system role (merge into user message)"
|
| 148 |
+
loading_model: "Loading {name}...\nPlease wait."
|
| 149 |
+
error_title: "Error"
|
| 150 |
+
error_load_asr: "Failed to load ASR model:\n{error}"
|
| 151 |
+
|
| 152 |
+
# Log Window
|
| 153 |
+
window_log: "LiveTrans - Log"
|
| 154 |
+
show_debug: "Show DEBUG"
|
| 155 |
+
|
| 156 |
+
# System Tray
|
| 157 |
+
tray_tooltip: "LiveTrans - Real-time Translation"
|
| 158 |
+
tray_start: "Start"
|
| 159 |
+
tray_stop: "Stop"
|
| 160 |
+
tray_show_log: "Show Log"
|
| 161 |
+
tray_show_panel: "Show Panel"
|
i18n/zh.yaml
ADDED
|
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# LiveTrans 中文 UI 字符串
|
| 2 |
+
|
| 3 |
+
# 悬浮窗
|
| 4 |
+
translating: "翻译中..."
|
| 5 |
+
same_language: "(相同语言)"
|
| 6 |
+
copy_original: "复制原文"
|
| 7 |
+
copy_translation: "复制译文"
|
| 8 |
+
copy_all: "复制全部"
|
| 9 |
+
paused: "已暂停"
|
| 10 |
+
running: "运行中"
|
| 11 |
+
clear: "清除"
|
| 12 |
+
settings: "设置"
|
| 13 |
+
monitor: "监控"
|
| 14 |
+
quit: "退出"
|
| 15 |
+
click_through: "鼠标穿透"
|
| 16 |
+
top_most: "始终置顶"
|
| 17 |
+
auto_scroll: "自动滚动"
|
| 18 |
+
model_label: "模型:"
|
| 19 |
+
target_label: "目标:"
|
| 20 |
+
|
| 21 |
+
# 控制面板
|
| 22 |
+
window_control_panel: "LiveTrans - 控制面板"
|
| 23 |
+
tab_vad_asr: "VAD / ASR"
|
| 24 |
+
tab_translation: "翻译"
|
| 25 |
+
tab_benchmark: "基准测试"
|
| 26 |
+
tab_cache: "缓存"
|
| 27 |
+
tab_style: "样式"
|
| 28 |
+
|
| 29 |
+
# 样式标签页
|
| 30 |
+
group_preset: "预设"
|
| 31 |
+
preset_default: "默认 (高对比度)"
|
| 32 |
+
preset_transparent: "透明"
|
| 33 |
+
preset_compact: "紧凑"
|
| 34 |
+
preset_light: "浅色"
|
| 35 |
+
preset_dracula: "Dracula"
|
| 36 |
+
preset_nord: "Nord"
|
| 37 |
+
preset_monokai: "Monokai"
|
| 38 |
+
preset_solarized: "Solarized Dark"
|
| 39 |
+
preset_gruvbox: "Gruvbox Dark"
|
| 40 |
+
preset_tokyo_night: "Tokyo Night"
|
| 41 |
+
preset_catppuccin: "Catppuccin Mocha"
|
| 42 |
+
preset_one_dark: "One Dark"
|
| 43 |
+
preset_everforest: "Everforest"
|
| 44 |
+
preset_kanagawa: "Kanagawa"
|
| 45 |
+
preset_custom: "自定义"
|
| 46 |
+
btn_reset_style: "恢复默认"
|
| 47 |
+
group_background: "背景"
|
| 48 |
+
label_bg_color: "容器颜色:"
|
| 49 |
+
label_bg_opacity: "容器不透明度:"
|
| 50 |
+
label_header_color: "头部颜色:"
|
| 51 |
+
label_header_opacity: "头部不透明度:"
|
| 52 |
+
label_border_radius: "圆角半径:"
|
| 53 |
+
group_text: "文字"
|
| 54 |
+
label_original_font: "原文字体:"
|
| 55 |
+
label_original_font_size: "原文字号:"
|
| 56 |
+
label_original_color: "原文颜色:"
|
| 57 |
+
label_translation_font: "译文字体:"
|
| 58 |
+
label_translation_font_size: "译文字号:"
|
| 59 |
+
label_translation_color: "译文颜色:"
|
| 60 |
+
label_timestamp_color: "时间戳颜色:"
|
| 61 |
+
group_window: "窗口"
|
| 62 |
+
label_window_opacity: "窗口透明度:"
|
| 63 |
+
|
| 64 |
+
# VAD / ASR 标签页
|
| 65 |
+
group_asr_engine: "ASR 引擎"
|
| 66 |
+
label_engine: "引擎:"
|
| 67 |
+
label_language_hint: "语言提示:"
|
| 68 |
+
label_device: "设备:"
|
| 69 |
+
label_audio: "音频:"
|
| 70 |
+
label_hub: "下载源:"
|
| 71 |
+
system_default: "系统默认"
|
| 72 |
+
hub_modelscope: "ModelScope (国内)"
|
| 73 |
+
hub_huggingface: "HuggingFace (国际)"
|
| 74 |
+
label_ui_lang: "界面语言:"
|
| 75 |
+
group_vad_mode: "VAD 模式"
|
| 76 |
+
vad_silero: "Silero VAD"
|
| 77 |
+
vad_energy: "能量检测"
|
| 78 |
+
vad_disabled: "禁用 (始终发送)"
|
| 79 |
+
group_silero_threshold: "Silero VAD 阈值"
|
| 80 |
+
label_threshold: "阈值:"
|
| 81 |
+
group_energy_threshold: "能量阈值 (用于能量检测模式)"
|
| 82 |
+
group_timing: "时间参数"
|
| 83 |
+
label_min_speech: "最短语音:"
|
| 84 |
+
label_max_speech: "最长语音:"
|
| 85 |
+
label_silence: "静音模式:"
|
| 86 |
+
label_silence_dur: "静音时长:"
|
| 87 |
+
silence_auto: "自动"
|
| 88 |
+
silence_fixed: "固定"
|
| 89 |
+
|
| 90 |
+
# 翻译标签页
|
| 91 |
+
group_active_model: "当前模型 (用于翻译管线)"
|
| 92 |
+
btn_apply: "应用"
|
| 93 |
+
group_model_configs: "模型配置"
|
| 94 |
+
btn_add: "添加"
|
| 95 |
+
btn_edit: "编辑"
|
| 96 |
+
btn_duplicate: "复制"
|
| 97 |
+
btn_remove: "删除"
|
| 98 |
+
group_system_prompt: "系统提示词 (支持 {source_lang} {target_lang})"
|
| 99 |
+
btn_restore_default: "恢复默认"
|
| 100 |
+
btn_apply_prompt: "应用提示词"
|
| 101 |
+
group_timeout: "模型连接超时"
|
| 102 |
+
|
| 103 |
+
# 基准测试标签页
|
| 104 |
+
label_source: "源语言:"
|
| 105 |
+
btn_test_all: "测试全部模型"
|
| 106 |
+
testing: "测试中..."
|
| 107 |
+
|
| 108 |
+
# 缓存标签页
|
| 109 |
+
btn_open_folder: "打开文件夹"
|
| 110 |
+
btn_delete_all_exit: "删除全部并退出"
|
| 111 |
+
scanning: "扫描中..."
|
| 112 |
+
no_cached_models: "未找到缓存模型"
|
| 113 |
+
cache_total: "总计: {size} ({count} 项)"
|
| 114 |
+
dialog_delete_title: "删除全部模型并退出"
|
| 115 |
+
dialog_delete_msg: >-
|
| 116 |
+
将删除全部 {count} 个缓存模型 ({size})
|
| 117 |
+
并退出应用程序。
|
| 118 |
+
|
| 119 |
+
设置将被保留,所需模型将在下次启动时重新下载。
|
| 120 |
+
|
| 121 |
+
是否继续?
|
| 122 |
+
|
| 123 |
+
# 对话框
|
| 124 |
+
window_setup: "LiveTrans - 初始设置"
|
| 125 |
+
group_download_source: "模型下载源"
|
| 126 |
+
hub_modelscope_full: "ModelScope (国内)"
|
| 127 |
+
hub_huggingface_full: "HuggingFace (国际)"
|
| 128 |
+
btn_start_download: "开始下载"
|
| 129 |
+
btn_retry: "重试"
|
| 130 |
+
download_failed: "下载失败: {error}"
|
| 131 |
+
download_complete: "下载完成!"
|
| 132 |
+
window_download: "LiveTrans - 下载模型"
|
| 133 |
+
downloading_models: "正在下载所需模型: {names}"
|
| 134 |
+
btn_close: "关闭"
|
| 135 |
+
dialog_edit_model: "编辑模型"
|
| 136 |
+
dialog_add_model: "添加模型"
|
| 137 |
+
label_display_name: "显示名称:"
|
| 138 |
+
label_api_base: "API 地址:"
|
| 139 |
+
label_api_key: "API 密钥:"
|
| 140 |
+
label_model: "模型:"
|
| 141 |
+
label_proxy: "代理:"
|
| 142 |
+
label_proxy_url: "代理地址:"
|
| 143 |
+
proxy_none: "不使用代理"
|
| 144 |
+
proxy_system: "系统代理"
|
| 145 |
+
proxy_custom: "自定义代理"
|
| 146 |
+
no_system_role: "无 system 角色 (合并到 user 消息)"
|
| 147 |
+
loading_model: "正在加载 {name}...\n请稍候。"
|
| 148 |
+
error_title: "错误"
|
| 149 |
+
error_load_asr: "ASR 模型加载失败:\n{error}"
|
| 150 |
+
|
| 151 |
+
# 日志窗口
|
| 152 |
+
window_log: "LiveTrans - 日志"
|
| 153 |
+
show_debug: "显示 DEBUG"
|
| 154 |
+
|
| 155 |
+
# 系统托盘
|
| 156 |
+
tray_tooltip: "LiveTrans - 实时翻译"
|
| 157 |
+
tray_start: "启动"
|
| 158 |
+
tray_stop: "停止"
|
| 159 |
+
tray_show_log: "显示日志"
|
| 160 |
+
tray_show_panel: "显示面板"
|
log_window.py
ADDED
|
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
from PyQt6.QtWidgets import QWidget, QVBoxLayout, QTextEdit, QHBoxLayout, QPushButton, QCheckBox
|
| 3 |
+
from PyQt6.QtCore import pyqtSignal, pyqtSlot
|
| 4 |
+
from PyQt6.QtGui import QFont, QTextCursor
|
| 5 |
+
from i18n import t
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class QLogHandler(logging.Handler):
|
| 9 |
+
"""Logging handler that emits to a Qt signal."""
|
| 10 |
+
|
| 11 |
+
def __init__(self, signal):
|
| 12 |
+
super().__init__()
|
| 13 |
+
self._signal = signal
|
| 14 |
+
|
| 15 |
+
def emit(self, record):
|
| 16 |
+
msg = self.format(record)
|
| 17 |
+
self._signal.emit(msg, record.levelno)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class LogWindow(QWidget):
|
| 21 |
+
"""Real-time log viewer window."""
|
| 22 |
+
|
| 23 |
+
log_signal = pyqtSignal(str, int)
|
| 24 |
+
|
| 25 |
+
def __init__(self):
|
| 26 |
+
super().__init__()
|
| 27 |
+
self.setWindowTitle(t("window_log"))
|
| 28 |
+
self.setMinimumSize(700, 400)
|
| 29 |
+
self.resize(900, 500)
|
| 30 |
+
|
| 31 |
+
layout = QVBoxLayout(self)
|
| 32 |
+
layout.setContentsMargins(4, 4, 4, 4)
|
| 33 |
+
|
| 34 |
+
# Log display
|
| 35 |
+
self._text = QTextEdit()
|
| 36 |
+
self._text.setReadOnly(True)
|
| 37 |
+
self._text.setFont(QFont("Consolas", 9))
|
| 38 |
+
self._text.setStyleSheet("background-color: #1e1e1e; color: #d4d4d4;")
|
| 39 |
+
layout.addWidget(self._text)
|
| 40 |
+
|
| 41 |
+
# Controls
|
| 42 |
+
ctrl = QHBoxLayout()
|
| 43 |
+
self._auto_scroll = QCheckBox(t("auto_scroll"))
|
| 44 |
+
self._auto_scroll.setChecked(True)
|
| 45 |
+
ctrl.addWidget(self._auto_scroll)
|
| 46 |
+
|
| 47 |
+
self._show_debug = QCheckBox(t("show_debug"))
|
| 48 |
+
self._show_debug.setChecked(False)
|
| 49 |
+
ctrl.addWidget(self._show_debug)
|
| 50 |
+
|
| 51 |
+
ctrl.addStretch()
|
| 52 |
+
|
| 53 |
+
clear_btn = QPushButton(t("clear"))
|
| 54 |
+
clear_btn.clicked.connect(self._text.clear)
|
| 55 |
+
ctrl.addWidget(clear_btn)
|
| 56 |
+
|
| 57 |
+
layout.addLayout(ctrl)
|
| 58 |
+
|
| 59 |
+
# Connect signal
|
| 60 |
+
self.log_signal.connect(self._append_log)
|
| 61 |
+
|
| 62 |
+
def get_handler(self):
|
| 63 |
+
handler = QLogHandler(self.log_signal)
|
| 64 |
+
handler.setLevel(logging.DEBUG)
|
| 65 |
+
fmt = logging.Formatter("%(asctime)s [%(levelname)s] %(name)s: %(message)s",
|
| 66 |
+
datefmt="%H:%M:%S")
|
| 67 |
+
handler.setFormatter(fmt)
|
| 68 |
+
return handler
|
| 69 |
+
|
| 70 |
+
@pyqtSlot(str, int)
|
| 71 |
+
def _append_log(self, msg: str, level: int):
|
| 72 |
+
if level < logging.INFO and not self._show_debug.isChecked():
|
| 73 |
+
return
|
| 74 |
+
|
| 75 |
+
color = {
|
| 76 |
+
logging.DEBUG: "#808080",
|
| 77 |
+
logging.INFO: "#d4d4d4",
|
| 78 |
+
logging.WARNING: "#dcdcaa",
|
| 79 |
+
logging.ERROR: "#f44747",
|
| 80 |
+
logging.CRITICAL: "#ff0000",
|
| 81 |
+
}.get(level, "#d4d4d4")
|
| 82 |
+
|
| 83 |
+
# Highlight ASR and Translate lines
|
| 84 |
+
if "ASR [" in msg:
|
| 85 |
+
color = "#4ec9b0"
|
| 86 |
+
elif "Translate:" in msg:
|
| 87 |
+
color = "#9cdcfe"
|
| 88 |
+
elif "Speech segment" in msg:
|
| 89 |
+
color = "#ce9178"
|
| 90 |
+
|
| 91 |
+
self._text.append(f'<span style="color:{color}">{msg}</span>')
|
| 92 |
+
|
| 93 |
+
if self._auto_scroll.isChecked():
|
| 94 |
+
self._text.moveCursor(QTextCursor.MoveOperation.End)
|
main.py
ADDED
|
@@ -0,0 +1,648 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
LiveTrans - Phase 0 Prototype
|
| 3 |
+
Real-time audio translation using WASAPI loopback + faster-whisper + LLM.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import sys
|
| 7 |
+
import signal
|
| 8 |
+
import logging
|
| 9 |
+
import threading
|
| 10 |
+
from concurrent.futures import ThreadPoolExecutor
|
| 11 |
+
import yaml
|
| 12 |
+
import time
|
| 13 |
+
import numpy as np
|
| 14 |
+
from pathlib import Path
|
| 15 |
+
from datetime import datetime
|
| 16 |
+
|
| 17 |
+
from model_manager import (
|
| 18 |
+
apply_cache_env,
|
| 19 |
+
get_missing_models,
|
| 20 |
+
is_asr_cached,
|
| 21 |
+
ASR_DISPLAY_NAMES,
|
| 22 |
+
MODELS_DIR,
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
# Set cache env BEFORE importing torch so TORCH_HOME is respected
|
| 26 |
+
apply_cache_env()
|
| 27 |
+
|
| 28 |
+
# torch must be imported before PyQt6 to avoid DLL conflicts on Windows
|
| 29 |
+
import torch # noqa: F401
|
| 30 |
+
|
| 31 |
+
from audio_capture import AudioCapture
|
| 32 |
+
from vad_processor import VADProcessor
|
| 33 |
+
from asr_engine import ASREngine
|
| 34 |
+
from translator import Translator
|
| 35 |
+
|
| 36 |
+
from PyQt6.QtWidgets import QApplication, QSystemTrayIcon, QMenu, QDialog, QMessageBox
|
| 37 |
+
from PyQt6.QtGui import QAction, QIcon, QPixmap, QPainter, QColor, QFont
|
| 38 |
+
from PyQt6.QtCore import QTimer, Qt
|
| 39 |
+
|
| 40 |
+
from subtitle_overlay import SubtitleOverlay
|
| 41 |
+
from log_window import LogWindow
|
| 42 |
+
from control_panel import (
|
| 43 |
+
ControlPanel,
|
| 44 |
+
SETTINGS_FILE,
|
| 45 |
+
_load_saved_settings,
|
| 46 |
+
)
|
| 47 |
+
from dialogs import (
|
| 48 |
+
SetupWizardDialog,
|
| 49 |
+
ModelDownloadDialog,
|
| 50 |
+
_ModelLoadDialog,
|
| 51 |
+
)
|
| 52 |
+
from i18n import t, set_lang
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def setup_logging():
|
| 56 |
+
log_dir = Path(__file__).parent / "logs"
|
| 57 |
+
log_dir.mkdir(exist_ok=True)
|
| 58 |
+
log_file = log_dir / f"livetrans_{datetime.now():%Y%m%d_%H%M%S}.log"
|
| 59 |
+
|
| 60 |
+
file_handler = logging.FileHandler(log_file, encoding="utf-8")
|
| 61 |
+
file_handler.setLevel(logging.DEBUG)
|
| 62 |
+
console_handler = logging.StreamHandler(sys.stdout)
|
| 63 |
+
console_handler.setLevel(logging.INFO)
|
| 64 |
+
|
| 65 |
+
fmt = logging.Formatter("%(asctime)s [%(levelname)s] %(name)s: %(message)s")
|
| 66 |
+
file_handler.setFormatter(fmt)
|
| 67 |
+
console_handler.setFormatter(fmt)
|
| 68 |
+
|
| 69 |
+
logging.basicConfig(level=logging.DEBUG, handlers=[file_handler, console_handler])
|
| 70 |
+
|
| 71 |
+
for noisy in ("httpcore", "httpx", "openai", "filelock", "huggingface_hub"):
|
| 72 |
+
logging.getLogger(noisy).setLevel(logging.WARNING)
|
| 73 |
+
logging.info(f"Log file: {log_file}")
|
| 74 |
+
return logging.getLogger("LiveTrans")
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
log = logging.getLogger("LiveTrans")
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def create_app_icon() -> QIcon:
|
| 81 |
+
pix = QPixmap(64, 64)
|
| 82 |
+
pix.fill(QColor(0, 0, 0, 0))
|
| 83 |
+
p = QPainter(pix)
|
| 84 |
+
p.setRenderHint(QPainter.RenderHint.Antialiasing)
|
| 85 |
+
p.setBrush(QColor(60, 130, 240))
|
| 86 |
+
p.setPen(Qt.PenStyle.NoPen)
|
| 87 |
+
p.drawRoundedRect(4, 4, 56, 56, 12, 12)
|
| 88 |
+
p.setPen(QColor(255, 255, 255))
|
| 89 |
+
p.setFont(QFont("Consolas", 28, QFont.Weight.Bold))
|
| 90 |
+
p.drawText(pix.rect(), Qt.AlignmentFlag.AlignCenter, "LT")
|
| 91 |
+
p.end()
|
| 92 |
+
return QIcon(pix)
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def load_config():
|
| 96 |
+
config_path = Path(__file__).parent / "config.yaml"
|
| 97 |
+
with open(config_path, "r", encoding="utf-8") as f:
|
| 98 |
+
return yaml.safe_load(f)
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
class LiveTransApp:
|
| 102 |
+
def __init__(self, config):
|
| 103 |
+
self._config = config
|
| 104 |
+
self._running = False
|
| 105 |
+
self._paused = False
|
| 106 |
+
self._asr_ready = False # True when ASR model is loaded
|
| 107 |
+
|
| 108 |
+
self._audio = AudioCapture(
|
| 109 |
+
device=config["audio"].get("device"),
|
| 110 |
+
sample_rate=config["audio"]["sample_rate"],
|
| 111 |
+
chunk_duration=config["audio"]["chunk_duration"],
|
| 112 |
+
)
|
| 113 |
+
self._vad = VADProcessor(
|
| 114 |
+
sample_rate=config["audio"]["sample_rate"],
|
| 115 |
+
threshold=config["asr"]["vad_threshold"],
|
| 116 |
+
min_speech_duration=config["asr"]["min_speech_duration"],
|
| 117 |
+
max_speech_duration=config["asr"]["max_speech_duration"],
|
| 118 |
+
chunk_duration=config["audio"]["chunk_duration"],
|
| 119 |
+
)
|
| 120 |
+
self._asr_type = None
|
| 121 |
+
self._asr = None
|
| 122 |
+
self._asr_device = config["asr"]["device"]
|
| 123 |
+
self._target_language = config["translation"]["target_language"]
|
| 124 |
+
self._translator = Translator(
|
| 125 |
+
api_base=config["translation"]["api_base"],
|
| 126 |
+
api_key=config["translation"]["api_key"],
|
| 127 |
+
model=config["translation"]["model"],
|
| 128 |
+
target_language=self._target_language,
|
| 129 |
+
max_tokens=config["translation"]["max_tokens"],
|
| 130 |
+
temperature=config["translation"]["temperature"],
|
| 131 |
+
streaming=config["translation"]["streaming"],
|
| 132 |
+
system_prompt=config["translation"].get("system_prompt"),
|
| 133 |
+
)
|
| 134 |
+
self._overlay = None
|
| 135 |
+
self._panel = None
|
| 136 |
+
self._pipeline_thread = None
|
| 137 |
+
self._tl_executor = ThreadPoolExecutor(max_workers=2)
|
| 138 |
+
|
| 139 |
+
self._asr_count = 0
|
| 140 |
+
self._translate_count = 0
|
| 141 |
+
self._total_prompt_tokens = 0
|
| 142 |
+
self._total_completion_tokens = 0
|
| 143 |
+
self._msg_id = 0
|
| 144 |
+
|
| 145 |
+
def set_overlay(self, overlay: SubtitleOverlay):
|
| 146 |
+
self._overlay = overlay
|
| 147 |
+
|
| 148 |
+
def set_panel(self, panel: ControlPanel):
|
| 149 |
+
self._panel = panel
|
| 150 |
+
panel.settings_changed.connect(self._on_settings_changed)
|
| 151 |
+
panel.model_changed.connect(self._on_model_changed)
|
| 152 |
+
|
| 153 |
+
def _on_settings_changed(self, settings):
|
| 154 |
+
self._vad.update_settings(settings)
|
| 155 |
+
if "style" in settings and self._overlay:
|
| 156 |
+
self._overlay.apply_style(settings["style"])
|
| 157 |
+
if "asr_language" in settings and self._asr:
|
| 158 |
+
self._asr.set_language(settings["asr_language"])
|
| 159 |
+
# ASR compute device change: try in-place migration first
|
| 160 |
+
new_device = settings.get("asr_device")
|
| 161 |
+
if new_device and new_device != self._asr_device:
|
| 162 |
+
old_device = self._asr_device
|
| 163 |
+
self._asr_device = new_device
|
| 164 |
+
if self._asr is not None and hasattr(self._asr, "to_device"):
|
| 165 |
+
result = self._asr.to_device(new_device)
|
| 166 |
+
if result is not False:
|
| 167 |
+
log.info(f"ASR device migrated: {old_device} -> {new_device}")
|
| 168 |
+
if self._overlay:
|
| 169 |
+
display_name = ASR_DISPLAY_NAMES.get(self._asr_type, self._asr_type)
|
| 170 |
+
self._overlay.update_asr_device(f"{display_name} [{new_device}]")
|
| 171 |
+
import gc
|
| 172 |
+
gc.collect()
|
| 173 |
+
try:
|
| 174 |
+
torch.cuda.empty_cache()
|
| 175 |
+
except Exception:
|
| 176 |
+
pass
|
| 177 |
+
else:
|
| 178 |
+
self._asr_type = None # ctranslate2: force reload
|
| 179 |
+
else:
|
| 180 |
+
self._asr_type = None # no engine loaded: force reload
|
| 181 |
+
if "asr_engine" in settings:
|
| 182 |
+
self._switch_asr_engine(settings["asr_engine"])
|
| 183 |
+
if "audio_device" in settings:
|
| 184 |
+
self._audio.set_device(settings["audio_device"])
|
| 185 |
+
if "target_language" in settings:
|
| 186 |
+
self._target_language = settings["target_language"]
|
| 187 |
+
if self._overlay:
|
| 188 |
+
self._overlay.set_target_language(self._target_language)
|
| 189 |
+
|
| 190 |
+
def _on_target_language_changed(self, lang: str):
|
| 191 |
+
self._target_language = lang
|
| 192 |
+
log.info(f"Target language: {lang}")
|
| 193 |
+
if self._panel:
|
| 194 |
+
settings = self._panel.get_settings()
|
| 195 |
+
settings["target_language"] = lang
|
| 196 |
+
from control_panel import _save_settings
|
| 197 |
+
|
| 198 |
+
_save_settings(settings)
|
| 199 |
+
active = self._panel.get_active_model() if self._panel else None
|
| 200 |
+
if active:
|
| 201 |
+
self._on_model_changed(active)
|
| 202 |
+
|
| 203 |
+
def _on_model_changed(self, model_config: dict):
|
| 204 |
+
log.info(
|
| 205 |
+
f"Switching translator: {model_config['name']} ({model_config['model']})"
|
| 206 |
+
)
|
| 207 |
+
prompt = None
|
| 208 |
+
if self._panel:
|
| 209 |
+
prompt = self._panel.get_settings().get("system_prompt")
|
| 210 |
+
if not prompt:
|
| 211 |
+
prompt = self._config["translation"].get("system_prompt")
|
| 212 |
+
timeout = 10
|
| 213 |
+
if self._panel:
|
| 214 |
+
timeout = self._panel.get_settings().get("timeout", 10)
|
| 215 |
+
self._translator = Translator(
|
| 216 |
+
api_base=model_config["api_base"],
|
| 217 |
+
api_key=model_config["api_key"],
|
| 218 |
+
model=model_config["model"],
|
| 219 |
+
target_language=self._target_language,
|
| 220 |
+
max_tokens=self._config["translation"]["max_tokens"],
|
| 221 |
+
temperature=self._config["translation"]["temperature"],
|
| 222 |
+
streaming=self._config["translation"]["streaming"],
|
| 223 |
+
system_prompt=prompt,
|
| 224 |
+
proxy=model_config.get("proxy", "none"),
|
| 225 |
+
no_system_role=model_config.get("no_system_role", False),
|
| 226 |
+
timeout=timeout,
|
| 227 |
+
)
|
| 228 |
+
|
| 229 |
+
def _switch_asr_engine(self, engine_type: str):
|
| 230 |
+
if engine_type == self._asr_type:
|
| 231 |
+
return
|
| 232 |
+
log.info(f"Switching ASR engine: {self._asr_type} -> {engine_type}")
|
| 233 |
+
self._asr_ready = False
|
| 234 |
+
device = self._asr_device
|
| 235 |
+
hub = "ms"
|
| 236 |
+
if self._panel:
|
| 237 |
+
hub = self._panel.get_settings().get("hub", "ms")
|
| 238 |
+
|
| 239 |
+
model_size = self._config["asr"]["model_size"]
|
| 240 |
+
cached = is_asr_cached(engine_type, model_size, hub)
|
| 241 |
+
display_name = ASR_DISPLAY_NAMES.get(engine_type, engine_type)
|
| 242 |
+
if engine_type == "whisper":
|
| 243 |
+
display_name = f"Whisper {model_size}"
|
| 244 |
+
|
| 245 |
+
parent = (
|
| 246 |
+
self._panel if self._panel and self._panel.isVisible() else self._overlay
|
| 247 |
+
)
|
| 248 |
+
|
| 249 |
+
if not cached:
|
| 250 |
+
missing = get_missing_models(engine_type, model_size, hub)
|
| 251 |
+
missing = [m for m in missing if m["type"] != "silero-vad"]
|
| 252 |
+
if missing:
|
| 253 |
+
dlg = ModelDownloadDialog(missing, hub=hub, parent=parent)
|
| 254 |
+
if dlg.exec() != QDialog.DialogCode.Accepted:
|
| 255 |
+
log.info(f"Download cancelled/failed: {engine_type}")
|
| 256 |
+
# Restore readiness if old engine is still available
|
| 257 |
+
if self._asr is not None:
|
| 258 |
+
self._asr_ready = True
|
| 259 |
+
return
|
| 260 |
+
|
| 261 |
+
# Release old engine BEFORE loading new one to free GPU memory
|
| 262 |
+
if self._asr is not None:
|
| 263 |
+
log.info(f"Releasing old ASR engine: {self._asr_type}")
|
| 264 |
+
if hasattr(self._asr, "unload"):
|
| 265 |
+
self._asr.unload()
|
| 266 |
+
self._asr = None
|
| 267 |
+
import gc
|
| 268 |
+
gc.collect()
|
| 269 |
+
try:
|
| 270 |
+
torch.cuda.empty_cache()
|
| 271 |
+
except Exception:
|
| 272 |
+
pass
|
| 273 |
+
|
| 274 |
+
dlg = _ModelLoadDialog(
|
| 275 |
+
t("loading_model").format(name=display_name), parent=parent
|
| 276 |
+
)
|
| 277 |
+
|
| 278 |
+
new_asr = [None]
|
| 279 |
+
load_error = [None]
|
| 280 |
+
|
| 281 |
+
def _load():
|
| 282 |
+
try:
|
| 283 |
+
# Normalize "cuda:0 (RTX 4090)" → base="cuda", index=0
|
| 284 |
+
dev = device
|
| 285 |
+
dev_index = 0
|
| 286 |
+
if dev.startswith("cuda:"):
|
| 287 |
+
part = dev.split("(")[0].strip() # "cuda:0"
|
| 288 |
+
dev_index = int(part.split(":")[1])
|
| 289 |
+
dev = "cuda"
|
| 290 |
+
|
| 291 |
+
if engine_type == "sensevoice":
|
| 292 |
+
from asr_sensevoice import SenseVoiceEngine
|
| 293 |
+
|
| 294 |
+
new_asr[0] = SenseVoiceEngine(device=device, hub=hub)
|
| 295 |
+
elif engine_type in ("funasr-nano", "funasr-mlt-nano"):
|
| 296 |
+
from asr_funasr_nano import FunASRNanoEngine
|
| 297 |
+
|
| 298 |
+
new_asr[0] = FunASRNanoEngine(
|
| 299 |
+
device=device, hub=hub, engine_type=engine_type
|
| 300 |
+
)
|
| 301 |
+
else:
|
| 302 |
+
download_root = str((MODELS_DIR / "huggingface" / "hub").resolve())
|
| 303 |
+
new_asr[0] = ASREngine(
|
| 304 |
+
model_size=model_size,
|
| 305 |
+
device=dev,
|
| 306 |
+
device_index=dev_index,
|
| 307 |
+
compute_type=self._config["asr"]["compute_type"],
|
| 308 |
+
language=self._config["asr"]["language"],
|
| 309 |
+
download_root=download_root,
|
| 310 |
+
)
|
| 311 |
+
except Exception as e:
|
| 312 |
+
load_error[0] = str(e)
|
| 313 |
+
log.error(f"Failed to load ASR engine: {e}", exc_info=True)
|
| 314 |
+
|
| 315 |
+
thread = threading.Thread(target=_load, daemon=True)
|
| 316 |
+
thread.start()
|
| 317 |
+
|
| 318 |
+
poll_timer = QTimer()
|
| 319 |
+
|
| 320 |
+
def _check():
|
| 321 |
+
if not thread.is_alive():
|
| 322 |
+
poll_timer.stop()
|
| 323 |
+
dlg.accept()
|
| 324 |
+
|
| 325 |
+
poll_timer.setInterval(100)
|
| 326 |
+
poll_timer.timeout.connect(_check)
|
| 327 |
+
poll_timer.start()
|
| 328 |
+
|
| 329 |
+
dlg.exec()
|
| 330 |
+
poll_timer.stop()
|
| 331 |
+
|
| 332 |
+
if load_error[0]:
|
| 333 |
+
QMessageBox.warning(
|
| 334 |
+
parent, t("error_title"), t("error_load_asr").format(error=load_error[0])
|
| 335 |
+
)
|
| 336 |
+
# Old engine was already released; mark ASR as unavailable
|
| 337 |
+
self._asr_type = None
|
| 338 |
+
return
|
| 339 |
+
|
| 340 |
+
self._asr = new_asr[0]
|
| 341 |
+
self._asr_type = engine_type
|
| 342 |
+
if self._panel:
|
| 343 |
+
asr_lang = self._panel.get_settings().get("asr_language", "auto")
|
| 344 |
+
self._asr.set_language(asr_lang)
|
| 345 |
+
self._asr_ready = True
|
| 346 |
+
if self._overlay:
|
| 347 |
+
self._overlay.update_asr_device(f"{display_name} [{device}]")
|
| 348 |
+
log.info(f"ASR engine ready: {engine_type} on {device}")
|
| 349 |
+
|
| 350 |
+
def _translate_async(self, msg_id, text, source_lang):
|
| 351 |
+
try:
|
| 352 |
+
tl_start = time.perf_counter()
|
| 353 |
+
translated = self._translator.translate(text, source_lang)
|
| 354 |
+
tl_ms = (time.perf_counter() - tl_start) * 1000
|
| 355 |
+
self._translate_count += 1
|
| 356 |
+
pt, ct = self._translator.last_usage
|
| 357 |
+
self._total_prompt_tokens += pt
|
| 358 |
+
self._total_completion_tokens += ct
|
| 359 |
+
log.info(f"Translate ({tl_ms:.0f}ms): {translated}")
|
| 360 |
+
if self._overlay:
|
| 361 |
+
self._overlay.update_translation(msg_id, translated, tl_ms)
|
| 362 |
+
self._overlay.update_stats(
|
| 363 |
+
self._asr_count,
|
| 364 |
+
self._translate_count,
|
| 365 |
+
self._total_prompt_tokens,
|
| 366 |
+
self._total_completion_tokens,
|
| 367 |
+
)
|
| 368 |
+
except Exception as e:
|
| 369 |
+
log.error(f"Translate error: {e}", exc_info=True)
|
| 370 |
+
if self._overlay:
|
| 371 |
+
self._overlay.update_translation(msg_id, f"[error: {e}]", 0)
|
| 372 |
+
|
| 373 |
+
def start(self):
|
| 374 |
+
if self._running:
|
| 375 |
+
return
|
| 376 |
+
self._running = True
|
| 377 |
+
self._paused = False
|
| 378 |
+
self._audio.start()
|
| 379 |
+
self._pipeline_thread = threading.Thread(
|
| 380 |
+
target=self._pipeline_loop, daemon=True
|
| 381 |
+
)
|
| 382 |
+
self._pipeline_thread.start()
|
| 383 |
+
log.info("Pipeline started")
|
| 384 |
+
|
| 385 |
+
def stop(self):
|
| 386 |
+
self._running = False
|
| 387 |
+
self._audio.stop()
|
| 388 |
+
# Wait for pipeline thread to finish before flushing
|
| 389 |
+
if self._pipeline_thread:
|
| 390 |
+
self._pipeline_thread.join(timeout=3)
|
| 391 |
+
self._pipeline_thread = None
|
| 392 |
+
# Flush remaining VAD buffer after pipeline thread is done
|
| 393 |
+
remaining = self._vad.flush()
|
| 394 |
+
if remaining is not None and self._asr_ready:
|
| 395 |
+
self._process_segment(remaining)
|
| 396 |
+
self._tl_executor.shutdown(wait=False)
|
| 397 |
+
log.info("Pipeline stopped")
|
| 398 |
+
|
| 399 |
+
def pause(self):
|
| 400 |
+
self._paused = True
|
| 401 |
+
log.info("Pipeline paused")
|
| 402 |
+
|
| 403 |
+
def resume(self):
|
| 404 |
+
self._paused = False
|
| 405 |
+
log.info("Pipeline resumed")
|
| 406 |
+
|
| 407 |
+
def _process_segment(self, speech_segment):
|
| 408 |
+
"""Run ASR + translation on a speech segment. Called from pipeline thread and stop()."""
|
| 409 |
+
seg_len = len(speech_segment) / 16000
|
| 410 |
+
log.info(f"Speech segment: {seg_len:.1f}s")
|
| 411 |
+
|
| 412 |
+
asr_start = time.perf_counter()
|
| 413 |
+
try:
|
| 414 |
+
result = self._asr.transcribe(speech_segment)
|
| 415 |
+
except Exception as e:
|
| 416 |
+
log.error(f"ASR error: {e}", exc_info=True)
|
| 417 |
+
return
|
| 418 |
+
asr_ms = (time.perf_counter() - asr_start) * 1000
|
| 419 |
+
if asr_ms > 10000:
|
| 420 |
+
log.warning(f"ASR took {asr_ms:.0f}ms, possible hang")
|
| 421 |
+
if result is None:
|
| 422 |
+
return
|
| 423 |
+
|
| 424 |
+
original_text = result["text"].strip()
|
| 425 |
+
# Skip empty or punctuation-only ASR results
|
| 426 |
+
if not original_text or not any(c.isalnum() for c in original_text):
|
| 427 |
+
log.debug(f"ASR returned empty/punctuation-only, skipping: '{result['text']}'")
|
| 428 |
+
return
|
| 429 |
+
|
| 430 |
+
# Skip suspiciously short text from long segments (likely noise)
|
| 431 |
+
alnum_chars = sum(1 for c in original_text if c.isalnum())
|
| 432 |
+
if seg_len >= 2.0 and alnum_chars <= 3:
|
| 433 |
+
log.debug(f"Noise filter: {seg_len:.1f}s segment produced only '{original_text}', skipping")
|
| 434 |
+
return
|
| 435 |
+
|
| 436 |
+
self._asr_count += 1
|
| 437 |
+
self._msg_id += 1
|
| 438 |
+
msg_id = self._msg_id
|
| 439 |
+
source_lang = result["language"]
|
| 440 |
+
timestamp = datetime.now().strftime("%H:%M:%S")
|
| 441 |
+
log.info(f"ASR [{source_lang}] ({asr_ms:.0f}ms): {original_text}")
|
| 442 |
+
|
| 443 |
+
if self._overlay:
|
| 444 |
+
self._overlay.add_message(
|
| 445 |
+
msg_id, timestamp, original_text, source_lang, asr_ms
|
| 446 |
+
)
|
| 447 |
+
|
| 448 |
+
target_lang = self._target_language
|
| 449 |
+
if source_lang == target_lang:
|
| 450 |
+
log.info(f"Same language ({source_lang}), no translation")
|
| 451 |
+
if self._overlay:
|
| 452 |
+
self._overlay.update_translation(msg_id, "", 0)
|
| 453 |
+
self._overlay.update_stats(
|
| 454 |
+
self._asr_count, self._translate_count,
|
| 455 |
+
self._total_prompt_tokens, self._total_completion_tokens,
|
| 456 |
+
)
|
| 457 |
+
else:
|
| 458 |
+
self._tl_executor.submit(
|
| 459 |
+
self._translate_async, msg_id, original_text, source_lang
|
| 460 |
+
)
|
| 461 |
+
|
| 462 |
+
def _pipeline_loop(self):
|
| 463 |
+
while self._running:
|
| 464 |
+
chunk = self._audio.get_audio(timeout=1.0)
|
| 465 |
+
if chunk is None:
|
| 466 |
+
continue
|
| 467 |
+
|
| 468 |
+
rms = float(np.sqrt(np.mean(chunk**2)))
|
| 469 |
+
|
| 470 |
+
if self._overlay:
|
| 471 |
+
self._overlay.update_monitor(rms, self._vad.last_confidence)
|
| 472 |
+
|
| 473 |
+
if self._paused:
|
| 474 |
+
continue
|
| 475 |
+
|
| 476 |
+
speech_segment = self._vad.process_chunk(chunk)
|
| 477 |
+
if speech_segment is None:
|
| 478 |
+
continue
|
| 479 |
+
|
| 480 |
+
if not self._asr_ready:
|
| 481 |
+
log.debug("ASR not ready, dropping segment")
|
| 482 |
+
continue
|
| 483 |
+
|
| 484 |
+
self._process_segment(speech_segment)
|
| 485 |
+
|
| 486 |
+
|
| 487 |
+
def main():
|
| 488 |
+
setup_logging()
|
| 489 |
+
log.info("LiveTrans starting...")
|
| 490 |
+
config = load_config()
|
| 491 |
+
log.info(
|
| 492 |
+
f"Config loaded: ASR={config['asr']['model_size']}, "
|
| 493 |
+
f"API={config['translation']['api_base']}, "
|
| 494 |
+
f"Model={config['translation']['model']}"
|
| 495 |
+
)
|
| 496 |
+
|
| 497 |
+
saved = _load_saved_settings()
|
| 498 |
+
|
| 499 |
+
# Apply UI language before creating any widgets
|
| 500 |
+
if saved and saved.get("ui_lang"):
|
| 501 |
+
set_lang(saved["ui_lang"])
|
| 502 |
+
|
| 503 |
+
app = QApplication(sys.argv)
|
| 504 |
+
app.setQuitOnLastWindowClosed(False)
|
| 505 |
+
_app_icon = create_app_icon()
|
| 506 |
+
app.setWindowIcon(_app_icon)
|
| 507 |
+
|
| 508 |
+
# First launch → setup wizard (hub + download)
|
| 509 |
+
if not SETTINGS_FILE.exists():
|
| 510 |
+
wizard = SetupWizardDialog()
|
| 511 |
+
if wizard.exec() != QDialog.DialogCode.Accepted:
|
| 512 |
+
sys.exit(0)
|
| 513 |
+
saved = _load_saved_settings()
|
| 514 |
+
log.info("Setup wizard completed")
|
| 515 |
+
|
| 516 |
+
# Non-first launch but models missing → download dialog
|
| 517 |
+
else:
|
| 518 |
+
missing = get_missing_models(
|
| 519 |
+
saved.get("asr_engine", "sensevoice"),
|
| 520 |
+
config["asr"]["model_size"],
|
| 521 |
+
saved.get("hub", "ms"),
|
| 522 |
+
)
|
| 523 |
+
if missing:
|
| 524 |
+
log.info(f"Missing models: {[m['name'] for m in missing]}")
|
| 525 |
+
dlg = ModelDownloadDialog(missing, hub=saved.get("hub", "ms"))
|
| 526 |
+
if dlg.exec() != QDialog.DialogCode.Accepted:
|
| 527 |
+
sys.exit(0)
|
| 528 |
+
|
| 529 |
+
log_window = LogWindow()
|
| 530 |
+
log_handler = log_window.get_handler()
|
| 531 |
+
logging.getLogger().addHandler(log_handler)
|
| 532 |
+
|
| 533 |
+
panel = ControlPanel(config, saved_settings=saved)
|
| 534 |
+
|
| 535 |
+
overlay = SubtitleOverlay(config["subtitle"])
|
| 536 |
+
overlay.show()
|
| 537 |
+
|
| 538 |
+
live_trans = LiveTransApp(config)
|
| 539 |
+
live_trans.set_overlay(overlay)
|
| 540 |
+
live_trans.set_panel(panel)
|
| 541 |
+
|
| 542 |
+
def _deferred_init():
|
| 543 |
+
panel._apply_settings()
|
| 544 |
+
models = panel.get_settings().get("models", [])
|
| 545 |
+
active_idx = panel.get_settings().get("active_model", 0)
|
| 546 |
+
overlay.set_models(models, active_idx)
|
| 547 |
+
style = panel.get_settings().get("style")
|
| 548 |
+
if style:
|
| 549 |
+
overlay.apply_style(style)
|
| 550 |
+
active_model = panel.get_active_model()
|
| 551 |
+
if active_model:
|
| 552 |
+
live_trans._on_model_changed(active_model)
|
| 553 |
+
|
| 554 |
+
QTimer.singleShot(100, _deferred_init)
|
| 555 |
+
|
| 556 |
+
tray = QSystemTrayIcon()
|
| 557 |
+
tray.setToolTip(t("tray_tooltip"))
|
| 558 |
+
tray.setIcon(_app_icon)
|
| 559 |
+
|
| 560 |
+
menu = QMenu()
|
| 561 |
+
start_action = QAction(t("tray_start"))
|
| 562 |
+
stop_action = QAction(t("tray_stop"))
|
| 563 |
+
log_action = QAction(t("tray_show_log"))
|
| 564 |
+
panel_action = QAction(t("tray_show_panel"))
|
| 565 |
+
quit_action = QAction(t("quit"))
|
| 566 |
+
|
| 567 |
+
def on_start():
|
| 568 |
+
try:
|
| 569 |
+
live_trans.start()
|
| 570 |
+
overlay.set_running(True)
|
| 571 |
+
except Exception as e:
|
| 572 |
+
log.error(f"Start error: {e}", exc_info=True)
|
| 573 |
+
|
| 574 |
+
def on_stop():
|
| 575 |
+
live_trans.stop()
|
| 576 |
+
overlay.set_running(False)
|
| 577 |
+
|
| 578 |
+
def on_pause():
|
| 579 |
+
live_trans.pause()
|
| 580 |
+
overlay.set_running(False)
|
| 581 |
+
|
| 582 |
+
def on_resume():
|
| 583 |
+
live_trans.resume()
|
| 584 |
+
overlay.set_running(True)
|
| 585 |
+
|
| 586 |
+
def on_toggle_log():
|
| 587 |
+
if log_window.isVisible():
|
| 588 |
+
log_window.hide()
|
| 589 |
+
else:
|
| 590 |
+
log_window.show()
|
| 591 |
+
log_window.raise_()
|
| 592 |
+
|
| 593 |
+
def on_toggle_panel():
|
| 594 |
+
if panel.isVisible():
|
| 595 |
+
panel.hide()
|
| 596 |
+
else:
|
| 597 |
+
panel.show()
|
| 598 |
+
panel.raise_()
|
| 599 |
+
|
| 600 |
+
def on_quit():
|
| 601 |
+
live_trans.stop()
|
| 602 |
+
app.quit()
|
| 603 |
+
|
| 604 |
+
start_action.triggered.connect(on_start)
|
| 605 |
+
stop_action.triggered.connect(on_stop)
|
| 606 |
+
log_action.triggered.connect(on_toggle_log)
|
| 607 |
+
panel_action.triggered.connect(on_toggle_panel)
|
| 608 |
+
def on_overlay_model_switch(index):
|
| 609 |
+
models = panel.get_settings().get("models", [])
|
| 610 |
+
if 0 <= index < len(models):
|
| 611 |
+
from control_panel import _save_settings
|
| 612 |
+
|
| 613 |
+
settings = panel.get_settings()
|
| 614 |
+
settings["active_model"] = index
|
| 615 |
+
_save_settings(settings)
|
| 616 |
+
live_trans._on_model_changed(models[index])
|
| 617 |
+
|
| 618 |
+
overlay.settings_requested.connect(on_toggle_panel)
|
| 619 |
+
overlay.target_language_changed.connect(live_trans._on_target_language_changed)
|
| 620 |
+
overlay.model_switch_requested.connect(on_overlay_model_switch)
|
| 621 |
+
overlay.start_requested.connect(on_resume)
|
| 622 |
+
overlay.stop_requested.connect(on_pause)
|
| 623 |
+
overlay.quit_requested.connect(on_quit)
|
| 624 |
+
quit_action.triggered.connect(on_quit)
|
| 625 |
+
|
| 626 |
+
menu.addAction(start_action)
|
| 627 |
+
menu.addAction(stop_action)
|
| 628 |
+
menu.addSeparator()
|
| 629 |
+
menu.addAction(log_action)
|
| 630 |
+
menu.addAction(panel_action)
|
| 631 |
+
menu.addSeparator()
|
| 632 |
+
menu.addAction(quit_action)
|
| 633 |
+
|
| 634 |
+
tray.setContextMenu(menu)
|
| 635 |
+
tray.show()
|
| 636 |
+
|
| 637 |
+
QTimer.singleShot(500, on_start)
|
| 638 |
+
|
| 639 |
+
signal.signal(signal.SIGINT, lambda *_: on_quit())
|
| 640 |
+
timer = QTimer()
|
| 641 |
+
timer.timeout.connect(lambda: None)
|
| 642 |
+
timer.start(200)
|
| 643 |
+
|
| 644 |
+
sys.exit(app.exec())
|
| 645 |
+
|
| 646 |
+
|
| 647 |
+
if __name__ == "__main__":
|
| 648 |
+
main()
|
model_manager.py
ADDED
|
@@ -0,0 +1,230 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import logging
|
| 3 |
+
from pathlib import Path
|
| 4 |
+
|
| 5 |
+
log = logging.getLogger("LiveTrans.ModelManager")
|
| 6 |
+
|
| 7 |
+
APP_DIR = Path(__file__).parent
|
| 8 |
+
MODELS_DIR = APP_DIR / "models"
|
| 9 |
+
|
| 10 |
+
ASR_MODEL_IDS = {
|
| 11 |
+
"sensevoice": "iic/SenseVoiceSmall",
|
| 12 |
+
"funasr-nano": "FunAudioLLM/Fun-ASR-Nano-2512",
|
| 13 |
+
"funasr-mlt-nano": "FunAudioLLM/Fun-ASR-MLT-Nano-2512",
|
| 14 |
+
}
|
| 15 |
+
|
| 16 |
+
ASR_DISPLAY_NAMES = {
|
| 17 |
+
"sensevoice": "SenseVoice Small",
|
| 18 |
+
"funasr-nano": "Fun-ASR-Nano",
|
| 19 |
+
"funasr-mlt-nano": "Fun-ASR-MLT-Nano",
|
| 20 |
+
"whisper": "Whisper",
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
_MODEL_SIZE_BYTES = {
|
| 24 |
+
"silero-vad": 2_000_000,
|
| 25 |
+
"sensevoice": 940_000_000,
|
| 26 |
+
"funasr-nano": 1_050_000_000,
|
| 27 |
+
"funasr-mlt-nano": 1_050_000_000,
|
| 28 |
+
"whisper-tiny": 78_000_000,
|
| 29 |
+
"whisper-base": 148_000_000,
|
| 30 |
+
"whisper-small": 488_000_000,
|
| 31 |
+
"whisper-medium": 1_530_000_000,
|
| 32 |
+
"whisper-large-v3": 3_100_000_000,
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
_WHISPER_SIZES = ["tiny", "base", "small", "medium", "large-v3"]
|
| 36 |
+
|
| 37 |
+
_CACHE_MODELS = [
|
| 38 |
+
("SenseVoice Small", "iic/SenseVoiceSmall"),
|
| 39 |
+
("Fun-ASR-Nano", "FunAudioLLM/Fun-ASR-Nano-2512"),
|
| 40 |
+
("Fun-ASR-MLT-Nano", "FunAudioLLM/Fun-ASR-MLT-Nano-2512"),
|
| 41 |
+
]
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def apply_cache_env():
|
| 45 |
+
"""Point all model caches to ./models/."""
|
| 46 |
+
resolved = str(MODELS_DIR.resolve())
|
| 47 |
+
os.environ["MODELSCOPE_CACHE"] = os.path.join(resolved, "modelscope")
|
| 48 |
+
os.environ["HF_HOME"] = os.path.join(resolved, "huggingface")
|
| 49 |
+
os.environ["TORCH_HOME"] = os.path.join(resolved, "torch")
|
| 50 |
+
log.info(f"Cache env set: {resolved}")
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def is_silero_cached() -> bool:
|
| 54 |
+
torch_hub = MODELS_DIR / "torch" / "hub"
|
| 55 |
+
return any(torch_hub.glob("snakers4_silero-vad*")) if torch_hub.exists() else False
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def _ms_model_path(org, name):
|
| 59 |
+
"""Return the first existing ModelScope cache path, or the default."""
|
| 60 |
+
for sub in (
|
| 61 |
+
MODELS_DIR / "modelscope" / org / name,
|
| 62 |
+
MODELS_DIR / "modelscope" / "hub" / "models" / org / name,
|
| 63 |
+
):
|
| 64 |
+
if sub.exists():
|
| 65 |
+
return sub
|
| 66 |
+
return MODELS_DIR / "modelscope" / org / name
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def is_asr_cached(engine_type, model_size="medium", hub="ms") -> bool:
|
| 70 |
+
if engine_type in ("sensevoice", "funasr-nano", "funasr-mlt-nano"):
|
| 71 |
+
model_id = ASR_MODEL_IDS[engine_type]
|
| 72 |
+
org, name = model_id.split("/")
|
| 73 |
+
# Accept cache from either hub to avoid redundant downloads
|
| 74 |
+
if _ms_model_path(org, name).exists():
|
| 75 |
+
return True
|
| 76 |
+
if (MODELS_DIR / "huggingface" / "hub" / f"models--{org}--{name}").exists():
|
| 77 |
+
return True
|
| 78 |
+
return False
|
| 79 |
+
elif engine_type == "whisper":
|
| 80 |
+
return (
|
| 81 |
+
MODELS_DIR
|
| 82 |
+
/ "huggingface"
|
| 83 |
+
/ "hub"
|
| 84 |
+
/ f"models--Systran--faster-whisper-{model_size}"
|
| 85 |
+
).exists()
|
| 86 |
+
return True
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
def get_missing_models(engine, model_size, hub) -> list:
|
| 90 |
+
missing = []
|
| 91 |
+
if not is_silero_cached():
|
| 92 |
+
missing.append(
|
| 93 |
+
{
|
| 94 |
+
"name": "Silero VAD",
|
| 95 |
+
"type": "silero-vad",
|
| 96 |
+
"estimated_bytes": _MODEL_SIZE_BYTES["silero-vad"],
|
| 97 |
+
}
|
| 98 |
+
)
|
| 99 |
+
if not is_asr_cached(engine, model_size, hub):
|
| 100 |
+
key = engine if engine != "whisper" else f"whisper-{model_size}"
|
| 101 |
+
display = ASR_DISPLAY_NAMES.get(engine, engine)
|
| 102 |
+
if engine == "whisper":
|
| 103 |
+
display = f"Whisper {model_size}"
|
| 104 |
+
missing.append(
|
| 105 |
+
{
|
| 106 |
+
"name": display,
|
| 107 |
+
"type": key,
|
| 108 |
+
"estimated_bytes": _MODEL_SIZE_BYTES.get(key, 0),
|
| 109 |
+
}
|
| 110 |
+
)
|
| 111 |
+
return missing
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
def get_local_model_path(engine_type, hub="ms"):
|
| 115 |
+
"""Return local snapshot path if model is cached, else None.
|
| 116 |
+
|
| 117 |
+
Checks the preferred hub first, then falls back to the other hub.
|
| 118 |
+
"""
|
| 119 |
+
if engine_type not in ASR_MODEL_IDS:
|
| 120 |
+
return None
|
| 121 |
+
model_id = ASR_MODEL_IDS[engine_type]
|
| 122 |
+
org, name = model_id.split("/")
|
| 123 |
+
|
| 124 |
+
def _try_ms():
|
| 125 |
+
local = _ms_model_path(org, name)
|
| 126 |
+
return str(local) if local.exists() else None
|
| 127 |
+
|
| 128 |
+
def _try_hf():
|
| 129 |
+
snap_dir = (
|
| 130 |
+
MODELS_DIR / "huggingface" / "hub" / f"models--{org}--{name}" / "snapshots"
|
| 131 |
+
)
|
| 132 |
+
if snap_dir.exists():
|
| 133 |
+
snaps = sorted(snap_dir.iterdir())
|
| 134 |
+
if snaps:
|
| 135 |
+
return str(snaps[-1])
|
| 136 |
+
return None
|
| 137 |
+
|
| 138 |
+
if hub == "ms":
|
| 139 |
+
return _try_ms() or _try_hf()
|
| 140 |
+
else:
|
| 141 |
+
return _try_hf() or _try_ms()
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
def download_silero():
|
| 145 |
+
import torch
|
| 146 |
+
|
| 147 |
+
log.info("Downloading Silero VAD...")
|
| 148 |
+
model, _ = torch.hub.load(
|
| 149 |
+
repo_or_dir="snakers4/silero-vad",
|
| 150 |
+
model="silero_vad",
|
| 151 |
+
trust_repo=True,
|
| 152 |
+
)
|
| 153 |
+
del model
|
| 154 |
+
log.info("Silero VAD downloaded")
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
def download_asr(engine, model_size="medium", hub="ms"):
|
| 158 |
+
resolved = str(MODELS_DIR.resolve())
|
| 159 |
+
ms_cache = os.path.join(resolved, "modelscope")
|
| 160 |
+
hf_cache = os.path.join(resolved, "huggingface", "hub")
|
| 161 |
+
if engine in ("sensevoice", "funasr-nano", "funasr-mlt-nano"):
|
| 162 |
+
model_id = ASR_MODEL_IDS[engine]
|
| 163 |
+
if hub == "ms":
|
| 164 |
+
from modelscope import snapshot_download
|
| 165 |
+
|
| 166 |
+
log.info(f"Downloading {model_id} from ModelScope...")
|
| 167 |
+
snapshot_download(model_id=model_id, cache_dir=ms_cache)
|
| 168 |
+
else:
|
| 169 |
+
from huggingface_hub import snapshot_download
|
| 170 |
+
|
| 171 |
+
log.info(f"Downloading {model_id} from HuggingFace...")
|
| 172 |
+
snapshot_download(repo_id=model_id, cache_dir=hf_cache)
|
| 173 |
+
elif engine == "whisper":
|
| 174 |
+
from huggingface_hub import snapshot_download
|
| 175 |
+
|
| 176 |
+
model_id = f"Systran/faster-whisper-{model_size}"
|
| 177 |
+
log.info(f"Downloading {model_id} from HuggingFace...")
|
| 178 |
+
snapshot_download(repo_id=model_id, cache_dir=hf_cache)
|
| 179 |
+
log.info(f"ASR model downloaded: {engine}")
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
def dir_size(path) -> int:
|
| 183 |
+
total = 0
|
| 184 |
+
try:
|
| 185 |
+
for f in Path(path).rglob("*"):
|
| 186 |
+
if f.is_file():
|
| 187 |
+
total += f.stat().st_size
|
| 188 |
+
except (OSError, PermissionError):
|
| 189 |
+
pass
|
| 190 |
+
return total
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
def format_size(size_bytes: int) -> str:
|
| 194 |
+
if size_bytes < 1024:
|
| 195 |
+
return f"{size_bytes} B"
|
| 196 |
+
elif size_bytes < 1024**2:
|
| 197 |
+
return f"{size_bytes / 1024:.1f} KB"
|
| 198 |
+
elif size_bytes < 1024**3:
|
| 199 |
+
return f"{size_bytes / (1024**2):.1f} MB"
|
| 200 |
+
else:
|
| 201 |
+
return f"{size_bytes / (1024**3):.2f} GB"
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
def get_cache_entries():
|
| 205 |
+
"""Scan ./models/ for cached models."""
|
| 206 |
+
entries = []
|
| 207 |
+
hf_base = MODELS_DIR / "huggingface" / "hub"
|
| 208 |
+
torch_base = MODELS_DIR / "torch" / "hub"
|
| 209 |
+
|
| 210 |
+
for name, model_id in _CACHE_MODELS:
|
| 211 |
+
org, model = model_id.split("/")
|
| 212 |
+
ms_path = _ms_model_path(org, model)
|
| 213 |
+
hf_path = hf_base / f"models--{org}--{model}"
|
| 214 |
+
if ms_path.exists():
|
| 215 |
+
entries.append((f"{name} (ModelScope)", ms_path))
|
| 216 |
+
if hf_path.exists():
|
| 217 |
+
entries.append((f"{name} (HuggingFace)", hf_path))
|
| 218 |
+
|
| 219 |
+
for size in _WHISPER_SIZES:
|
| 220 |
+
hf_path = hf_base / f"models--Systran--faster-whisper-{size}"
|
| 221 |
+
if hf_path.exists():
|
| 222 |
+
entries.append((f"Whisper {size}", hf_path))
|
| 223 |
+
|
| 224 |
+
if torch_base.exists():
|
| 225 |
+
for d in sorted(torch_base.glob("snakers4_silero-vad*")):
|
| 226 |
+
if d.is_dir():
|
| 227 |
+
entries.append(("Silero VAD", d))
|
| 228 |
+
break
|
| 229 |
+
|
| 230 |
+
return entries
|
requirements.txt
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Core dependencies
|
| 2 |
+
numpy>=1.24.0
|
| 3 |
+
PyYAML>=6.0
|
| 4 |
+
httpx>=0.28.0
|
| 5 |
+
openai>=1.0.0
|
| 6 |
+
psutil>=5.9.0
|
| 7 |
+
|
| 8 |
+
# Audio capture (Windows WASAPI loopback)
|
| 9 |
+
PyAudioWPatch>=0.2.12
|
| 10 |
+
|
| 11 |
+
# ASR engines
|
| 12 |
+
faster-whisper>=1.0.0
|
| 13 |
+
editdistance-s>=1.0.0
|
| 14 |
+
omegaconf>=2.3.0
|
| 15 |
+
kaldiio>=2.18.0
|
| 16 |
+
torch-complex>=0.4.0
|
| 17 |
+
soundfile>=0.12.0
|
| 18 |
+
librosa>=0.10.0
|
| 19 |
+
jaconv>=0.3.0
|
| 20 |
+
jamo>=0.4.1
|
| 21 |
+
hydra-core>=1.3.0
|
| 22 |
+
six>=1.16.0
|
| 23 |
+
sentencepiece>=0.2.0
|
| 24 |
+
transformers>=4.40.0
|
| 25 |
+
|
| 26 |
+
# Model hubs
|
| 27 |
+
modelscope>=1.20.0
|
| 28 |
+
huggingface_hub>=0.20.0
|
| 29 |
+
|
| 30 |
+
# UI
|
| 31 |
+
PyQt6>=6.5.0
|
| 32 |
+
|
| 33 |
+
# torch & torchaudio - install separately with CUDA support:
|
| 34 |
+
# pip install torch torchaudio --index-url https://download.pytorch.org/whl/cu126
|
screenshot/en-to-cn.png
ADDED
|
Git LFS Details
|
screenshot/jp-to-cn.png
ADDED
|
Git LFS Details
|
start.bat
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
@echo off
|
| 2 |
+
cd /d "%~dp0"
|
| 3 |
+
set PATH=%LOCALAPPDATA%\Microsoft\WinGet\Links;%PATH%
|
| 4 |
+
.venv\Scripts\python.exe main.py
|
| 5 |
+
pause
|
subtitle_overlay.py
ADDED
|
@@ -0,0 +1,853 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ctypes
|
| 2 |
+
import os
|
| 3 |
+
|
| 4 |
+
import psutil
|
| 5 |
+
from i18n import t
|
| 6 |
+
from PyQt6.QtCore import QPoint, Qt, QTimer, pyqtSignal, pyqtSlot
|
| 7 |
+
from PyQt6.QtGui import QCursor, QFont
|
| 8 |
+
from PyQt6.QtWidgets import (
|
| 9 |
+
QApplication,
|
| 10 |
+
QCheckBox,
|
| 11 |
+
QComboBox,
|
| 12 |
+
QHBoxLayout,
|
| 13 |
+
QLabel,
|
| 14 |
+
QMenu,
|
| 15 |
+
QProgressBar,
|
| 16 |
+
QPushButton,
|
| 17 |
+
QScrollArea,
|
| 18 |
+
QSizeGrip,
|
| 19 |
+
QVBoxLayout,
|
| 20 |
+
QWidget,
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
_GWL_EXSTYLE = -20
|
| 24 |
+
_WS_EX_TRANSPARENT = 0x20
|
| 25 |
+
|
| 26 |
+
DEFAULT_STYLE = {
|
| 27 |
+
"preset": "default",
|
| 28 |
+
"bg_color": "#000000",
|
| 29 |
+
"bg_opacity": 240,
|
| 30 |
+
"header_color": "#1a1a2e",
|
| 31 |
+
"header_opacity": 230,
|
| 32 |
+
"border_radius": 8,
|
| 33 |
+
"original_font_family": "Microsoft YaHei",
|
| 34 |
+
"translation_font_family": "Microsoft YaHei",
|
| 35 |
+
"original_font_size": 11,
|
| 36 |
+
"translation_font_size": 14,
|
| 37 |
+
"original_color": "#cccccc",
|
| 38 |
+
"translation_color": "#ffffff",
|
| 39 |
+
"timestamp_color": "#888899",
|
| 40 |
+
"window_opacity": 95,
|
| 41 |
+
}
|
| 42 |
+
|
| 43 |
+
_BASE = DEFAULT_STYLE
|
| 44 |
+
|
| 45 |
+
STYLE_PRESETS = {
|
| 46 |
+
"default": dict(_BASE),
|
| 47 |
+
"transparent": {**_BASE, "preset": "transparent",
|
| 48 |
+
"bg_opacity": 120, "header_opacity": 120, "window_opacity": 70},
|
| 49 |
+
"compact": {**_BASE, "preset": "compact",
|
| 50 |
+
"original_font_size": 9, "translation_font_size": 11},
|
| 51 |
+
"light": {**_BASE, "preset": "light",
|
| 52 |
+
"bg_color": "#e8e8f0", "bg_opacity": 230, "header_color": "#c8c8d8",
|
| 53 |
+
"header_opacity": 220, "original_color": "#333333",
|
| 54 |
+
"translation_color": "#111111", "timestamp_color": "#666688"},
|
| 55 |
+
"dracula": {**_BASE, "preset": "dracula",
|
| 56 |
+
"bg_color": "#282a36", "bg_opacity": 235, "header_color": "#44475a",
|
| 57 |
+
"header_opacity": 230, "original_color": "#f8f8f2",
|
| 58 |
+
"translation_color": "#f8f8f2", "timestamp_color": "#6272a4"},
|
| 59 |
+
"nord": {**_BASE, "preset": "nord",
|
| 60 |
+
"bg_color": "#2e3440", "bg_opacity": 235, "header_color": "#3b4252",
|
| 61 |
+
"header_opacity": 230, "original_color": "#d8dee9",
|
| 62 |
+
"translation_color": "#eceff4", "timestamp_color": "#4c566a"},
|
| 63 |
+
"monokai": {**_BASE, "preset": "monokai",
|
| 64 |
+
"bg_color": "#272822", "bg_opacity": 235, "header_color": "#3e3d32",
|
| 65 |
+
"header_opacity": 230, "original_color": "#f8f8f2",
|
| 66 |
+
"translation_color": "#f8f8f2", "timestamp_color": "#75715e"},
|
| 67 |
+
"solarized": {**_BASE, "preset": "solarized",
|
| 68 |
+
"bg_color": "#002b36", "bg_opacity": 235, "header_color": "#073642",
|
| 69 |
+
"header_opacity": 230, "original_color": "#839496",
|
| 70 |
+
"translation_color": "#eee8d5", "timestamp_color": "#586e75"},
|
| 71 |
+
"gruvbox": {**_BASE, "preset": "gruvbox",
|
| 72 |
+
"bg_color": "#282828", "bg_opacity": 235, "header_color": "#3c3836",
|
| 73 |
+
"header_opacity": 230, "original_color": "#ebdbb2",
|
| 74 |
+
"translation_color": "#fbf1c7", "timestamp_color": "#928374"},
|
| 75 |
+
"tokyo_night": {**_BASE, "preset": "tokyo_night",
|
| 76 |
+
"bg_color": "#1a1b26", "bg_opacity": 235, "header_color": "#24283b",
|
| 77 |
+
"header_opacity": 230, "original_color": "#a9b1d6",
|
| 78 |
+
"translation_color": "#c0caf5", "timestamp_color": "#565f89"},
|
| 79 |
+
"catppuccin": {**_BASE, "preset": "catppuccin",
|
| 80 |
+
"bg_color": "#1e1e2e", "bg_opacity": 235, "header_color": "#313244",
|
| 81 |
+
"header_opacity": 230, "original_color": "#cdd6f4",
|
| 82 |
+
"translation_color": "#cdd6f4", "timestamp_color": "#6c7086"},
|
| 83 |
+
"one_dark": {**_BASE, "preset": "one_dark",
|
| 84 |
+
"bg_color": "#282c34", "bg_opacity": 235, "header_color": "#3e4452",
|
| 85 |
+
"header_opacity": 230, "original_color": "#abb2bf",
|
| 86 |
+
"translation_color": "#e5c07b", "timestamp_color": "#636d83"},
|
| 87 |
+
"everforest": {**_BASE, "preset": "everforest",
|
| 88 |
+
"bg_color": "#2d353b", "bg_opacity": 235, "header_color": "#343f44",
|
| 89 |
+
"header_opacity": 230, "original_color": "#d3c6aa",
|
| 90 |
+
"translation_color": "#d3c6aa", "timestamp_color": "#859289"},
|
| 91 |
+
"kanagawa": {**_BASE, "preset": "kanagawa",
|
| 92 |
+
"bg_color": "#1f1f28", "bg_opacity": 235, "header_color": "#2a2a37",
|
| 93 |
+
"header_opacity": 230, "original_color": "#dcd7ba",
|
| 94 |
+
"translation_color": "#dcd7ba", "timestamp_color": "#54546d"},
|
| 95 |
+
}
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def _hex_to_rgba(hex_color: str, opacity: int) -> str:
|
| 99 |
+
hex_color = hex_color.lstrip("#")
|
| 100 |
+
r, g, b = int(hex_color[0:2], 16), int(hex_color[2:4], 16), int(hex_color[4:6], 16)
|
| 101 |
+
return f"rgba({r},{g},{b},{opacity})"
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
class ChatMessage(QWidget):
|
| 105 |
+
"""Single chat message widget with original + async translation."""
|
| 106 |
+
|
| 107 |
+
_current_style = DEFAULT_STYLE
|
| 108 |
+
|
| 109 |
+
def __init__(
|
| 110 |
+
self,
|
| 111 |
+
msg_id: int,
|
| 112 |
+
timestamp: str,
|
| 113 |
+
original: str,
|
| 114 |
+
source_lang: str,
|
| 115 |
+
asr_ms: float,
|
| 116 |
+
parent=None,
|
| 117 |
+
):
|
| 118 |
+
super().__init__(parent)
|
| 119 |
+
self.msg_id = msg_id
|
| 120 |
+
self._original = original
|
| 121 |
+
self._translated = ""
|
| 122 |
+
self._timestamp = timestamp
|
| 123 |
+
self._source_lang = source_lang
|
| 124 |
+
self._asr_ms = asr_ms
|
| 125 |
+
self._translate_ms = 0.0
|
| 126 |
+
self._layout = QVBoxLayout(self)
|
| 127 |
+
self._layout.setContentsMargins(8, 4, 8, 4)
|
| 128 |
+
self._layout.setSpacing(2)
|
| 129 |
+
|
| 130 |
+
s = self._current_style
|
| 131 |
+
self._header_label = QLabel(self._build_header_html(s))
|
| 132 |
+
self._header_label.setFont(QFont(s["original_font_family"], s["original_font_size"]))
|
| 133 |
+
self._header_label.setTextFormat(Qt.TextFormat.RichText)
|
| 134 |
+
self._header_label.setWordWrap(True)
|
| 135 |
+
self._header_label.setStyleSheet("background: transparent;")
|
| 136 |
+
self._layout.addWidget(self._header_label)
|
| 137 |
+
|
| 138 |
+
self._trans_label = QLabel(
|
| 139 |
+
f'<span style="color:#999; font-style:italic;">{t("translating")}</span>'
|
| 140 |
+
)
|
| 141 |
+
self._trans_label.setFont(QFont(s["translation_font_family"], s["translation_font_size"]))
|
| 142 |
+
self._trans_label.setTextFormat(Qt.TextFormat.RichText)
|
| 143 |
+
self._trans_label.setWordWrap(True)
|
| 144 |
+
self._trans_label.setStyleSheet("background: transparent;")
|
| 145 |
+
self._layout.addWidget(self._trans_label)
|
| 146 |
+
|
| 147 |
+
def _build_header_html(self, s):
|
| 148 |
+
return (
|
| 149 |
+
f'<span style="color:{s["timestamp_color"]};">[{self._timestamp}]</span> '
|
| 150 |
+
f'<span style="color:#6cf;">[{self._source_lang}]</span> '
|
| 151 |
+
f'<span style="color:{s["original_color"]};">{_escape(self._original)}</span> '
|
| 152 |
+
f'<span style="color:#8b8; font-size:9pt;">ASR {self._asr_ms:.0f}ms</span>'
|
| 153 |
+
)
|
| 154 |
+
|
| 155 |
+
def set_translation(self, translated: str, translate_ms: float):
|
| 156 |
+
self._translated = translated or ""
|
| 157 |
+
self._translate_ms = translate_ms
|
| 158 |
+
s = self._current_style
|
| 159 |
+
if translated:
|
| 160 |
+
self._trans_label.setText(
|
| 161 |
+
f'<span style="color:{s["translation_color"]};">> {_escape(translated)}</span> '
|
| 162 |
+
f'<span style="color:#db8; font-size:9pt;">TL {translate_ms:.0f}ms</span>'
|
| 163 |
+
)
|
| 164 |
+
else:
|
| 165 |
+
self._trans_label.setText(
|
| 166 |
+
f'<span style="color:#aaa; font-style:italic;">> {t("same_language")}</span>'
|
| 167 |
+
)
|
| 168 |
+
|
| 169 |
+
def apply_style(self, s: dict):
|
| 170 |
+
self._header_label.setText(self._build_header_html(s))
|
| 171 |
+
self._header_label.setFont(QFont(s["original_font_family"], s["original_font_size"]))
|
| 172 |
+
self._trans_label.setFont(QFont(s["translation_font_family"], s["translation_font_size"]))
|
| 173 |
+
if self._translated:
|
| 174 |
+
self._trans_label.setText(
|
| 175 |
+
f'<span style="color:{s["translation_color"]};">> {_escape(self._translated)}</span> '
|
| 176 |
+
f'<span style="color:#db8; font-size:9pt;">TL {self._translate_ms:.0f}ms</span>'
|
| 177 |
+
)
|
| 178 |
+
|
| 179 |
+
def contextMenuEvent(self, event):
|
| 180 |
+
menu = QMenu(self)
|
| 181 |
+
menu.setStyleSheet("""
|
| 182 |
+
QMenu { background: #2a2a3a; color: #ddd; border: 1px solid #555; }
|
| 183 |
+
QMenu::item:selected { background: #444; }
|
| 184 |
+
""")
|
| 185 |
+
copy_orig = menu.addAction(t("copy_original"))
|
| 186 |
+
copy_trans = menu.addAction(t("copy_translation"))
|
| 187 |
+
copy_all = menu.addAction(t("copy_all"))
|
| 188 |
+
action = menu.exec(event.globalPos())
|
| 189 |
+
if action == copy_orig:
|
| 190 |
+
QApplication.clipboard().setText(self._original)
|
| 191 |
+
elif action == copy_trans:
|
| 192 |
+
QApplication.clipboard().setText(self._translated)
|
| 193 |
+
elif action == copy_all:
|
| 194 |
+
QApplication.clipboard().setText(f"{self._original}\n{self._translated}")
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
def _escape(text: str) -> str:
|
| 198 |
+
return text.replace("&", "&").replace("<", "<").replace(">", ">")
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
_BTN_CSS = """
|
| 202 |
+
QPushButton {
|
| 203 |
+
background: rgba(255,255,255,20);
|
| 204 |
+
border: 1px solid rgba(255,255,255,40);
|
| 205 |
+
border-radius: 3px;
|
| 206 |
+
color: #aaa;
|
| 207 |
+
font-size: 11px;
|
| 208 |
+
padding: 0 6px;
|
| 209 |
+
}
|
| 210 |
+
QPushButton:hover {
|
| 211 |
+
background: rgba(255,255,255,40);
|
| 212 |
+
color: #ddd;
|
| 213 |
+
}
|
| 214 |
+
"""
|
| 215 |
+
|
| 216 |
+
_BAR_CSS_TPL = """
|
| 217 |
+
QProgressBar {{
|
| 218 |
+
background: rgba(255,255,255,15);
|
| 219 |
+
border: 1px solid rgba(255,255,255,30);
|
| 220 |
+
border-radius: 3px;
|
| 221 |
+
text-align: center;
|
| 222 |
+
font-size: 8pt;
|
| 223 |
+
color: #aaa;
|
| 224 |
+
}}
|
| 225 |
+
QProgressBar::chunk {{
|
| 226 |
+
background: {color};
|
| 227 |
+
border-radius: 2px;
|
| 228 |
+
}}
|
| 229 |
+
"""
|
| 230 |
+
|
| 231 |
+
|
| 232 |
+
class MonitorBar(QWidget):
|
| 233 |
+
"""Compact system monitor displayed in the overlay."""
|
| 234 |
+
|
| 235 |
+
def __init__(self, parent=None):
|
| 236 |
+
super().__init__(parent)
|
| 237 |
+
self.setStyleSheet("background: transparent;")
|
| 238 |
+
|
| 239 |
+
layout = QVBoxLayout(self)
|
| 240 |
+
layout.setContentsMargins(8, 4, 8, 4)
|
| 241 |
+
layout.setSpacing(2)
|
| 242 |
+
|
| 243 |
+
row1 = QHBoxLayout()
|
| 244 |
+
row1.setSpacing(6)
|
| 245 |
+
|
| 246 |
+
rms_lbl = QLabel("RMS")
|
| 247 |
+
rms_lbl.setFixedWidth(28)
|
| 248 |
+
rms_lbl.setFont(QFont("Consolas", 8))
|
| 249 |
+
rms_lbl.setStyleSheet("color: #888; background: transparent;")
|
| 250 |
+
row1.addWidget(rms_lbl)
|
| 251 |
+
|
| 252 |
+
self._rms_bar = QProgressBar()
|
| 253 |
+
self._rms_bar.setRange(0, 100)
|
| 254 |
+
self._rms_bar.setFixedHeight(14)
|
| 255 |
+
self._rms_bar.setTextVisible(True)
|
| 256 |
+
self._rms_bar.setFormat("%v%")
|
| 257 |
+
self._rms_bar.setStyleSheet(_BAR_CSS_TPL.format(color="#4ec9b0"))
|
| 258 |
+
row1.addWidget(self._rms_bar)
|
| 259 |
+
|
| 260 |
+
vad_lbl = QLabel("VAD")
|
| 261 |
+
vad_lbl.setFixedWidth(28)
|
| 262 |
+
vad_lbl.setFont(QFont("Consolas", 8))
|
| 263 |
+
vad_lbl.setStyleSheet("color: #888; background: transparent;")
|
| 264 |
+
row1.addWidget(vad_lbl)
|
| 265 |
+
|
| 266 |
+
self._vad_bar = QProgressBar()
|
| 267 |
+
self._vad_bar.setRange(0, 100)
|
| 268 |
+
self._vad_bar.setFixedHeight(14)
|
| 269 |
+
self._vad_bar.setTextVisible(True)
|
| 270 |
+
self._vad_bar.setFormat("%v%")
|
| 271 |
+
self._vad_bar.setStyleSheet(_BAR_CSS_TPL.format(color="#dcdcaa"))
|
| 272 |
+
row1.addWidget(self._vad_bar)
|
| 273 |
+
|
| 274 |
+
layout.addLayout(row1)
|
| 275 |
+
|
| 276 |
+
self._stats_label = QLabel()
|
| 277 |
+
self._stats_label.setFont(QFont("Consolas", 8))
|
| 278 |
+
self._stats_label.setStyleSheet("color: #888; background: transparent;")
|
| 279 |
+
self._stats_label.setTextFormat(Qt.TextFormat.RichText)
|
| 280 |
+
layout.addWidget(self._stats_label)
|
| 281 |
+
|
| 282 |
+
self._cpu = 0
|
| 283 |
+
self._ram_mb = 0.0
|
| 284 |
+
self._gpu_text = "N/A"
|
| 285 |
+
self._asr_device = ""
|
| 286 |
+
self._asr_count = 0
|
| 287 |
+
self._tl_count = 0
|
| 288 |
+
self._prompt_tokens = 0
|
| 289 |
+
self._completion_tokens = 0
|
| 290 |
+
|
| 291 |
+
self._sys_timer = QTimer(self)
|
| 292 |
+
self._sys_timer.timeout.connect(self._update_system)
|
| 293 |
+
self._sys_timer.start(1000)
|
| 294 |
+
self._update_system()
|
| 295 |
+
self._refresh_stats()
|
| 296 |
+
|
| 297 |
+
def update_audio(self, rms: float, vad: float):
|
| 298 |
+
self._rms_bar.setValue(min(100, int(rms * 500)))
|
| 299 |
+
self._vad_bar.setValue(min(100, int(vad * 100)))
|
| 300 |
+
|
| 301 |
+
def update_asr_device(self, device: str):
|
| 302 |
+
self._asr_device = device
|
| 303 |
+
self._refresh_stats()
|
| 304 |
+
|
| 305 |
+
def update_pipeline_stats(
|
| 306 |
+
self, asr_count, tl_count, prompt_tokens, completion_tokens
|
| 307 |
+
):
|
| 308 |
+
self._asr_count = asr_count
|
| 309 |
+
self._tl_count = tl_count
|
| 310 |
+
self._prompt_tokens = prompt_tokens
|
| 311 |
+
self._completion_tokens = completion_tokens
|
| 312 |
+
self._refresh_stats()
|
| 313 |
+
|
| 314 |
+
def _update_system(self):
|
| 315 |
+
try:
|
| 316 |
+
proc = psutil.Process(os.getpid())
|
| 317 |
+
self._cpu = int(proc.cpu_percent(interval=0))
|
| 318 |
+
self._ram_mb = proc.memory_info().rss / 1024 / 1024
|
| 319 |
+
except Exception:
|
| 320 |
+
pass
|
| 321 |
+
try:
|
| 322 |
+
import torch
|
| 323 |
+
|
| 324 |
+
if torch.cuda.is_available():
|
| 325 |
+
alloc = torch.cuda.memory_allocated() / 1024 / 1024
|
| 326 |
+
self._gpu_text = f"{alloc:.0f}MB"
|
| 327 |
+
else:
|
| 328 |
+
self._gpu_text = "N/A"
|
| 329 |
+
except Exception:
|
| 330 |
+
self._gpu_text = "N/A"
|
| 331 |
+
self._refresh_stats()
|
| 332 |
+
|
| 333 |
+
def _refresh_stats(self):
|
| 334 |
+
total = self._prompt_tokens + self._completion_tokens
|
| 335 |
+
tokens_str = f"{total / 1000:.1f}k" if total >= 1000 else str(total)
|
| 336 |
+
dev_str = ""
|
| 337 |
+
if self._asr_device:
|
| 338 |
+
dev_color = "#4ec9b0" if "cuda" in self._asr_device.lower() else "#dcdcaa"
|
| 339 |
+
dev_str = (
|
| 340 |
+
f'<span style="color:{dev_color};">{self._asr_device}</span> '
|
| 341 |
+
f'<span style="color:#555;">|</span> '
|
| 342 |
+
)
|
| 343 |
+
self._stats_label.setText(
|
| 344 |
+
f"{dev_str}"
|
| 345 |
+
f'<span style="color:#6cf;">CPU</span> {self._cpu}% '
|
| 346 |
+
f'<span style="color:#6cf;">RAM</span> {self._ram_mb:.0f}MB '
|
| 347 |
+
f'<span style="color:#6cf;">GPU</span> {self._gpu_text} '
|
| 348 |
+
f'<span style="color:#555;">|</span> '
|
| 349 |
+
f'<span style="color:#8b8;">ASR</span> {self._asr_count} '
|
| 350 |
+
f'<span style="color:#db8;">TL</span> {self._tl_count} '
|
| 351 |
+
f'<span style="color:#c9c;">Tok</span> {tokens_str} '
|
| 352 |
+
f'<span style="color:#666;">({self._prompt_tokens}\u2191{self._completion_tokens}\u2193)</span>'
|
| 353 |
+
)
|
| 354 |
+
|
| 355 |
+
|
| 356 |
+
class _DragArea(QWidget):
|
| 357 |
+
"""Small draggable area (title + grip)."""
|
| 358 |
+
|
| 359 |
+
def __init__(self, parent=None):
|
| 360 |
+
super().__init__(parent)
|
| 361 |
+
self.setCursor(QCursor(Qt.CursorShape.SizeAllCursor))
|
| 362 |
+
self._drag_pos = None
|
| 363 |
+
|
| 364 |
+
def mousePressEvent(self, event):
|
| 365 |
+
if event.button() == Qt.MouseButton.LeftButton:
|
| 366 |
+
self._drag_pos = (
|
| 367 |
+
event.globalPosition().toPoint()
|
| 368 |
+
- self.window().frameGeometry().topLeft()
|
| 369 |
+
)
|
| 370 |
+
|
| 371 |
+
def mouseMoveEvent(self, event):
|
| 372 |
+
if self._drag_pos and event.buttons() & Qt.MouseButton.LeftButton:
|
| 373 |
+
self.window().move(event.globalPosition().toPoint() - self._drag_pos)
|
| 374 |
+
|
| 375 |
+
def mouseReleaseEvent(self, event):
|
| 376 |
+
self._drag_pos = None
|
| 377 |
+
|
| 378 |
+
|
| 379 |
+
_COMBO_CSS = """
|
| 380 |
+
QComboBox {
|
| 381 |
+
background: rgba(255,255,255,20);
|
| 382 |
+
border: 1px solid rgba(255,255,255,40);
|
| 383 |
+
border-radius: 3px;
|
| 384 |
+
color: #aaa;
|
| 385 |
+
font-size: 11px;
|
| 386 |
+
padding: 0 4px;
|
| 387 |
+
}
|
| 388 |
+
QComboBox:hover { background: rgba(255,255,255,40); color: #ddd; }
|
| 389 |
+
QComboBox::drop-down { border: none; width: 14px; }
|
| 390 |
+
QComboBox::down-arrow { image: none; border: none; }
|
| 391 |
+
QComboBox QAbstractItemView {
|
| 392 |
+
background: #2a2a3a; color: #ccc; selection-background-color: #444;
|
| 393 |
+
}
|
| 394 |
+
"""
|
| 395 |
+
|
| 396 |
+
_CHECK_CSS = (
|
| 397 |
+
"QCheckBox { color: #888; background: transparent; spacing: 3px; }"
|
| 398 |
+
"QCheckBox::indicator { width: 12px; height: 12px; }"
|
| 399 |
+
)
|
| 400 |
+
|
| 401 |
+
|
| 402 |
+
class DragHandle(QWidget):
|
| 403 |
+
"""Top bar: row1=title+buttons, row2=checkboxes+combos."""
|
| 404 |
+
|
| 405 |
+
settings_clicked = pyqtSignal()
|
| 406 |
+
monitor_toggled = pyqtSignal(bool)
|
| 407 |
+
click_through_toggled = pyqtSignal(bool)
|
| 408 |
+
topmost_toggled = pyqtSignal(bool)
|
| 409 |
+
auto_scroll_toggled = pyqtSignal(bool)
|
| 410 |
+
target_language_changed = pyqtSignal(str)
|
| 411 |
+
model_changed = pyqtSignal(int)
|
| 412 |
+
start_clicked = pyqtSignal()
|
| 413 |
+
stop_clicked = pyqtSignal()
|
| 414 |
+
clear_clicked = pyqtSignal()
|
| 415 |
+
quit_clicked = pyqtSignal()
|
| 416 |
+
|
| 417 |
+
def __init__(self, parent=None):
|
| 418 |
+
super().__init__(parent)
|
| 419 |
+
self.setFixedHeight(48)
|
| 420 |
+
self.setStyleSheet("background: rgba(60, 60, 80, 200); border-radius: 4px;")
|
| 421 |
+
|
| 422 |
+
outer = QVBoxLayout(self)
|
| 423 |
+
outer.setContentsMargins(0, 0, 0, 0)
|
| 424 |
+
outer.setSpacing(0)
|
| 425 |
+
|
| 426 |
+
# Row 1: drag title + action buttons
|
| 427 |
+
row1 = QHBoxLayout()
|
| 428 |
+
row1.setContentsMargins(0, 0, 4, 0)
|
| 429 |
+
row1.setSpacing(3)
|
| 430 |
+
|
| 431 |
+
drag = _DragArea()
|
| 432 |
+
drag.setStyleSheet("background: transparent;")
|
| 433 |
+
drag_layout = QHBoxLayout(drag)
|
| 434 |
+
drag_layout.setContentsMargins(10, 0, 6, 0)
|
| 435 |
+
drag_layout.setSpacing(6)
|
| 436 |
+
|
| 437 |
+
title = QLabel("\u2630 LiveTrans")
|
| 438 |
+
title.setFont(QFont("Consolas", 9, QFont.Weight.Bold))
|
| 439 |
+
title.setStyleSheet("color: #aaa; background: transparent;")
|
| 440 |
+
drag_layout.addWidget(title)
|
| 441 |
+
drag_layout.addStretch()
|
| 442 |
+
row1.addWidget(drag, 1)
|
| 443 |
+
|
| 444 |
+
def _btn(text, tip=None):
|
| 445 |
+
b = QPushButton(text)
|
| 446 |
+
b.setFixedHeight(20)
|
| 447 |
+
b.setCursor(QCursor(Qt.CursorShape.PointingHandCursor))
|
| 448 |
+
b.setFont(QFont("Consolas", 8))
|
| 449 |
+
b.setStyleSheet(_BTN_CSS)
|
| 450 |
+
if tip:
|
| 451 |
+
b.setToolTip(tip)
|
| 452 |
+
return b
|
| 453 |
+
|
| 454 |
+
self._running = False
|
| 455 |
+
self._start_stop_btn = _btn(t("paused"))
|
| 456 |
+
self._start_stop_btn.setFixedWidth(56)
|
| 457 |
+
self._start_stop_btn.clicked.connect(self._on_start_stop)
|
| 458 |
+
row1.addWidget(self._start_stop_btn)
|
| 459 |
+
|
| 460 |
+
clear_btn = _btn(t("clear"))
|
| 461 |
+
clear_btn.clicked.connect(self.clear_clicked.emit)
|
| 462 |
+
row1.addWidget(clear_btn)
|
| 463 |
+
|
| 464 |
+
settings_btn = _btn(t("settings"))
|
| 465 |
+
settings_btn.clicked.connect(self.settings_clicked.emit)
|
| 466 |
+
row1.addWidget(settings_btn)
|
| 467 |
+
|
| 468 |
+
self._monitor_expanded = True
|
| 469 |
+
self._monitor_btn = _btn(t("monitor"))
|
| 470 |
+
self._monitor_btn.clicked.connect(self._toggle_monitor)
|
| 471 |
+
row1.addWidget(self._monitor_btn)
|
| 472 |
+
|
| 473 |
+
quit_btn = _btn(t("quit"))
|
| 474 |
+
quit_btn.setStyleSheet(
|
| 475 |
+
_BTN_CSS.replace("rgba(255,255,255,20)", "rgba(200,60,60,40)").replace(
|
| 476 |
+
"rgba(255,255,255,40)", "rgba(200,60,60,80)"
|
| 477 |
+
)
|
| 478 |
+
)
|
| 479 |
+
quit_btn.clicked.connect(self.quit_clicked.emit)
|
| 480 |
+
row1.addWidget(quit_btn)
|
| 481 |
+
|
| 482 |
+
outer.addLayout(row1)
|
| 483 |
+
|
| 484 |
+
# Row 2: checkboxes + combos
|
| 485 |
+
row2 = QHBoxLayout()
|
| 486 |
+
row2.setContentsMargins(10, 0, 4, 2)
|
| 487 |
+
row2.setSpacing(6)
|
| 488 |
+
|
| 489 |
+
self._ct_check = QCheckBox(t("click_through"))
|
| 490 |
+
self._ct_check.setFont(QFont("Consolas", 8))
|
| 491 |
+
self._ct_check.setCursor(QCursor(Qt.CursorShape.PointingHandCursor))
|
| 492 |
+
self._ct_check.setStyleSheet(_CHECK_CSS)
|
| 493 |
+
self._ct_check.toggled.connect(self.click_through_toggled.emit)
|
| 494 |
+
row2.addWidget(self._ct_check)
|
| 495 |
+
|
| 496 |
+
self._topmost_check = QCheckBox(t("top_most"))
|
| 497 |
+
self._topmost_check.setFont(QFont("Consolas", 8))
|
| 498 |
+
self._topmost_check.setCursor(QCursor(Qt.CursorShape.PointingHandCursor))
|
| 499 |
+
self._topmost_check.setStyleSheet(_CHECK_CSS)
|
| 500 |
+
self._topmost_check.setChecked(True)
|
| 501 |
+
self._topmost_check.toggled.connect(self.topmost_toggled.emit)
|
| 502 |
+
row2.addWidget(self._topmost_check)
|
| 503 |
+
|
| 504 |
+
self._auto_scroll = QCheckBox(t("auto_scroll"))
|
| 505 |
+
self._auto_scroll.setFont(QFont("Consolas", 8))
|
| 506 |
+
self._auto_scroll.setCursor(QCursor(Qt.CursorShape.PointingHandCursor))
|
| 507 |
+
self._auto_scroll.setStyleSheet(_CHECK_CSS)
|
| 508 |
+
self._auto_scroll.setChecked(True)
|
| 509 |
+
self._auto_scroll.toggled.connect(self.auto_scroll_toggled.emit)
|
| 510 |
+
row2.addWidget(self._auto_scroll)
|
| 511 |
+
|
| 512 |
+
row2.addStretch()
|
| 513 |
+
|
| 514 |
+
model_lbl = QLabel(t("model_label"))
|
| 515 |
+
model_lbl.setFont(QFont("Consolas", 8))
|
| 516 |
+
model_lbl.setStyleSheet("color: #888; background: transparent;")
|
| 517 |
+
row2.addWidget(model_lbl)
|
| 518 |
+
|
| 519 |
+
self._model_combo = QComboBox()
|
| 520 |
+
self._model_combo.setFixedHeight(18)
|
| 521 |
+
self._model_combo.setMinimumWidth(140)
|
| 522 |
+
self._model_combo.setFont(QFont("Consolas", 8))
|
| 523 |
+
self._model_combo.setStyleSheet(_COMBO_CSS)
|
| 524 |
+
self._model_combo.currentIndexChanged.connect(self.model_changed.emit)
|
| 525 |
+
row2.addWidget(self._model_combo)
|
| 526 |
+
|
| 527 |
+
tgt_lbl = QLabel(t("target_label"))
|
| 528 |
+
tgt_lbl.setFont(QFont("Consolas", 8))
|
| 529 |
+
tgt_lbl.setStyleSheet("color: #888; background: transparent;")
|
| 530 |
+
row2.addWidget(tgt_lbl)
|
| 531 |
+
|
| 532 |
+
self._target_lang = QComboBox()
|
| 533 |
+
self._target_lang.setFixedHeight(18)
|
| 534 |
+
self._target_lang.setFixedWidth(42)
|
| 535 |
+
self._target_lang.setFont(QFont("Consolas", 8))
|
| 536 |
+
self._target_lang.setStyleSheet(_COMBO_CSS)
|
| 537 |
+
self._target_lang.addItems(["zh", "en", "ja", "ko", "fr", "de", "es", "ru"])
|
| 538 |
+
self._target_lang.currentTextChanged.connect(self.target_language_changed.emit)
|
| 539 |
+
row2.addWidget(self._target_lang)
|
| 540 |
+
|
| 541 |
+
outer.addLayout(row2)
|
| 542 |
+
|
| 543 |
+
def _on_start_stop(self):
|
| 544 |
+
if self._running:
|
| 545 |
+
self.stop_clicked.emit()
|
| 546 |
+
else:
|
| 547 |
+
self.start_clicked.emit()
|
| 548 |
+
|
| 549 |
+
_PAUSED_CSS = _BTN_CSS.replace(
|
| 550 |
+
"rgba(255,255,255,20)", "rgba(220,180,60,50)"
|
| 551 |
+
).replace("color: #aaa", "color: #ddb")
|
| 552 |
+
|
| 553 |
+
def set_target_language(self, lang: str):
|
| 554 |
+
idx = self._target_lang.findText(lang)
|
| 555 |
+
if idx >= 0:
|
| 556 |
+
self._target_lang.setCurrentIndex(idx)
|
| 557 |
+
|
| 558 |
+
def set_models(self, models: list, active_index: int = 0):
|
| 559 |
+
self._model_combo.blockSignals(True)
|
| 560 |
+
self._model_combo.clear()
|
| 561 |
+
for m in models:
|
| 562 |
+
self._model_combo.addItem(m.get("name", m.get("model", "?")))
|
| 563 |
+
if 0 <= active_index < self._model_combo.count():
|
| 564 |
+
self._model_combo.setCurrentIndex(active_index)
|
| 565 |
+
self._model_combo.blockSignals(False)
|
| 566 |
+
|
| 567 |
+
|
| 568 |
+
|
| 569 |
+
@property
|
| 570 |
+
def auto_scroll(self) -> bool:
|
| 571 |
+
return self._auto_scroll.isChecked()
|
| 572 |
+
|
| 573 |
+
def set_running(self, running: bool):
|
| 574 |
+
self._running = running
|
| 575 |
+
if running:
|
| 576 |
+
self._start_stop_btn.setText(t("running"))
|
| 577 |
+
self._start_stop_btn.setStyleSheet(_BTN_CSS)
|
| 578 |
+
else:
|
| 579 |
+
self._start_stop_btn.setText(t("paused"))
|
| 580 |
+
self._start_stop_btn.setStyleSheet(self._PAUSED_CSS)
|
| 581 |
+
|
| 582 |
+
def _toggle_monitor(self):
|
| 583 |
+
self._monitor_expanded = not self._monitor_expanded
|
| 584 |
+
self.monitor_toggled.emit(self._monitor_expanded)
|
| 585 |
+
|
| 586 |
+
|
| 587 |
+
class SubtitleOverlay(QWidget):
|
| 588 |
+
"""Chat-style overlay window for displaying live transcription."""
|
| 589 |
+
|
| 590 |
+
add_message_signal = pyqtSignal(int, str, str, str, float)
|
| 591 |
+
update_translation_signal = pyqtSignal(int, str, float)
|
| 592 |
+
clear_signal = pyqtSignal()
|
| 593 |
+
# Monitor signals (thread-safe)
|
| 594 |
+
update_monitor_signal = pyqtSignal(float, float)
|
| 595 |
+
update_stats_signal = pyqtSignal(int, int, int, int)
|
| 596 |
+
update_asr_device_signal = pyqtSignal(str)
|
| 597 |
+
|
| 598 |
+
settings_requested = pyqtSignal()
|
| 599 |
+
target_language_changed = pyqtSignal(str)
|
| 600 |
+
model_switch_requested = pyqtSignal(int)
|
| 601 |
+
start_requested = pyqtSignal()
|
| 602 |
+
stop_requested = pyqtSignal()
|
| 603 |
+
quit_requested = pyqtSignal()
|
| 604 |
+
|
| 605 |
+
def __init__(self, config):
|
| 606 |
+
super().__init__()
|
| 607 |
+
self._config = config
|
| 608 |
+
self._messages = {}
|
| 609 |
+
self._max_messages = 50
|
| 610 |
+
self._click_through = False
|
| 611 |
+
self._setup_ui()
|
| 612 |
+
|
| 613 |
+
self.add_message_signal.connect(self._on_add_message)
|
| 614 |
+
self.update_translation_signal.connect(self._on_update_translation)
|
| 615 |
+
self.clear_signal.connect(self._on_clear)
|
| 616 |
+
self.update_monitor_signal.connect(self._on_update_monitor)
|
| 617 |
+
self.update_stats_signal.connect(self._on_update_stats)
|
| 618 |
+
self.update_asr_device_signal.connect(self._on_update_asr_device)
|
| 619 |
+
|
| 620 |
+
def _setup_ui(self):
|
| 621 |
+
self.setWindowFlags(
|
| 622 |
+
Qt.WindowType.FramelessWindowHint
|
| 623 |
+
| Qt.WindowType.WindowStaysOnTopHint
|
| 624 |
+
| Qt.WindowType.Tool
|
| 625 |
+
)
|
| 626 |
+
self.setAttribute(Qt.WidgetAttribute.WA_TranslucentBackground)
|
| 627 |
+
self.setAttribute(Qt.WidgetAttribute.WA_ShowWithoutActivating)
|
| 628 |
+
|
| 629 |
+
screen = QApplication.primaryScreen()
|
| 630 |
+
geo = screen.geometry()
|
| 631 |
+
width = 620
|
| 632 |
+
height = 500
|
| 633 |
+
x = geo.width() - width - 20
|
| 634 |
+
y = geo.height() - height - 60
|
| 635 |
+
self.setGeometry(x, y, width, height)
|
| 636 |
+
self.setMinimumSize(480, 280)
|
| 637 |
+
|
| 638 |
+
main_layout = QVBoxLayout(self)
|
| 639 |
+
main_layout.setContentsMargins(0, 0, 0, 0)
|
| 640 |
+
main_layout.setSpacing(0)
|
| 641 |
+
|
| 642 |
+
self._container = QWidget()
|
| 643 |
+
self._container.setStyleSheet(
|
| 644 |
+
"background-color: rgba(15, 15, 25, 200); border-radius: 8px;"
|
| 645 |
+
)
|
| 646 |
+
|
| 647 |
+
container_layout = QVBoxLayout(self._container)
|
| 648 |
+
container_layout.setContentsMargins(4, 4, 4, 4)
|
| 649 |
+
container_layout.setSpacing(0)
|
| 650 |
+
|
| 651 |
+
# Drag handle
|
| 652 |
+
self._handle = DragHandle()
|
| 653 |
+
self._handle.settings_clicked.connect(self.settings_requested.emit)
|
| 654 |
+
self._handle.monitor_toggled.connect(self._on_monitor_toggled)
|
| 655 |
+
self._handle.click_through_toggled.connect(self._set_click_through)
|
| 656 |
+
self._handle.topmost_toggled.connect(self._set_topmost)
|
| 657 |
+
self._handle.target_language_changed.connect(self.target_language_changed.emit)
|
| 658 |
+
self._handle.model_changed.connect(self.model_switch_requested.emit)
|
| 659 |
+
self._handle.start_clicked.connect(self.start_requested.emit)
|
| 660 |
+
self._handle.stop_clicked.connect(self.stop_requested.emit)
|
| 661 |
+
self._handle.clear_clicked.connect(self._on_clear)
|
| 662 |
+
self._handle.quit_clicked.connect(self.quit_requested.emit)
|
| 663 |
+
container_layout.addWidget(self._handle)
|
| 664 |
+
|
| 665 |
+
# Monitor bar (collapsible)
|
| 666 |
+
self._monitor = MonitorBar()
|
| 667 |
+
container_layout.addWidget(self._monitor)
|
| 668 |
+
|
| 669 |
+
# Scroll area
|
| 670 |
+
self._scroll = QScrollArea()
|
| 671 |
+
self._scroll.setWidgetResizable(True)
|
| 672 |
+
self._scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarPolicy.ScrollBarAlwaysOff)
|
| 673 |
+
self._scroll.setVerticalScrollBarPolicy(Qt.ScrollBarPolicy.ScrollBarAsNeeded)
|
| 674 |
+
self._scroll.setStyleSheet("""
|
| 675 |
+
QScrollArea { border: none; background: transparent; }
|
| 676 |
+
QScrollBar:vertical {
|
| 677 |
+
width: 6px; background: transparent;
|
| 678 |
+
}
|
| 679 |
+
QScrollBar::handle:vertical {
|
| 680 |
+
background: rgba(255,255,255,60); border-radius: 3px;
|
| 681 |
+
min-height: 20px;
|
| 682 |
+
}
|
| 683 |
+
QScrollBar::add-line:vertical, QScrollBar::sub-line:vertical {
|
| 684 |
+
height: 0;
|
| 685 |
+
}
|
| 686 |
+
""")
|
| 687 |
+
|
| 688 |
+
self._msg_container = QWidget()
|
| 689 |
+
self._msg_container.setStyleSheet("background: transparent;")
|
| 690 |
+
self._msg_layout = QVBoxLayout(self._msg_container)
|
| 691 |
+
self._msg_layout.setContentsMargins(0, 0, 0, 0)
|
| 692 |
+
self._msg_layout.setSpacing(2)
|
| 693 |
+
self._msg_layout.addStretch()
|
| 694 |
+
|
| 695 |
+
self._scroll.setWidget(self._msg_container)
|
| 696 |
+
container_layout.addWidget(self._scroll)
|
| 697 |
+
|
| 698 |
+
grip_row = QHBoxLayout()
|
| 699 |
+
grip_row.addStretch()
|
| 700 |
+
self._grip = QSizeGrip(self)
|
| 701 |
+
self._grip.setFixedSize(16, 16)
|
| 702 |
+
self._grip.setStyleSheet("background: transparent;")
|
| 703 |
+
grip_row.addWidget(self._grip)
|
| 704 |
+
container_layout.addLayout(grip_row)
|
| 705 |
+
|
| 706 |
+
main_layout.addWidget(self._container)
|
| 707 |
+
|
| 708 |
+
self._ct_timer = QTimer(self)
|
| 709 |
+
self._ct_timer.timeout.connect(self._check_click_through)
|
| 710 |
+
self._ct_timer.start(50)
|
| 711 |
+
|
| 712 |
+
def set_running(self, running: bool):
|
| 713 |
+
self._handle.set_running(running)
|
| 714 |
+
|
| 715 |
+
def _set_topmost(self, enabled: bool):
|
| 716 |
+
flags = self.windowFlags()
|
| 717 |
+
if enabled:
|
| 718 |
+
flags |= Qt.WindowType.WindowStaysOnTopHint
|
| 719 |
+
else:
|
| 720 |
+
flags &= ~Qt.WindowType.WindowStaysOnTopHint
|
| 721 |
+
self.setWindowFlags(flags)
|
| 722 |
+
self.show()
|
| 723 |
+
|
| 724 |
+
def _set_click_through(self, enabled: bool):
|
| 725 |
+
self._click_through = enabled
|
| 726 |
+
if not enabled:
|
| 727 |
+
hwnd = int(self.winId())
|
| 728 |
+
style = ctypes.windll.user32.GetWindowLongW(hwnd, _GWL_EXSTYLE)
|
| 729 |
+
if style & _WS_EX_TRANSPARENT:
|
| 730 |
+
ctypes.windll.user32.SetWindowLongW(
|
| 731 |
+
hwnd, _GWL_EXSTYLE, style & ~_WS_EX_TRANSPARENT
|
| 732 |
+
)
|
| 733 |
+
|
| 734 |
+
def _check_click_through(self):
|
| 735 |
+
if not self._click_through:
|
| 736 |
+
return
|
| 737 |
+
cursor = QCursor.pos()
|
| 738 |
+
local = self.mapFromGlobal(cursor)
|
| 739 |
+
hwnd = int(self.winId())
|
| 740 |
+
style = ctypes.windll.user32.GetWindowLongW(hwnd, _GWL_EXSTYLE)
|
| 741 |
+
|
| 742 |
+
scroll_top = self._scroll.mapTo(self, QPoint(0, 0)).y()
|
| 743 |
+
in_header = 0 <= local.x() <= self.width() and 0 <= local.y() < scroll_top
|
| 744 |
+
|
| 745 |
+
if in_header:
|
| 746 |
+
if style & _WS_EX_TRANSPARENT:
|
| 747 |
+
ctypes.windll.user32.SetWindowLongW(
|
| 748 |
+
hwnd, _GWL_EXSTYLE, style & ~_WS_EX_TRANSPARENT
|
| 749 |
+
)
|
| 750 |
+
else:
|
| 751 |
+
if not (style & _WS_EX_TRANSPARENT):
|
| 752 |
+
ctypes.windll.user32.SetWindowLongW(
|
| 753 |
+
hwnd, _GWL_EXSTYLE, style | _WS_EX_TRANSPARENT
|
| 754 |
+
)
|
| 755 |
+
|
| 756 |
+
def _on_monitor_toggled(self, expanded: bool):
|
| 757 |
+
self._monitor.setVisible(expanded)
|
| 758 |
+
|
| 759 |
+
@pyqtSlot(float, float)
|
| 760 |
+
def _on_update_monitor(self, rms: float, vad_conf: float):
|
| 761 |
+
self._monitor.update_audio(rms, vad_conf)
|
| 762 |
+
|
| 763 |
+
@pyqtSlot(int, int, int, int)
|
| 764 |
+
def _on_update_stats(self, asr_count, tl_count, prompt_tokens, completion_tokens):
|
| 765 |
+
self._monitor.update_pipeline_stats(
|
| 766 |
+
asr_count, tl_count, prompt_tokens, completion_tokens
|
| 767 |
+
)
|
| 768 |
+
|
| 769 |
+
@pyqtSlot(str)
|
| 770 |
+
def _on_update_asr_device(self, device: str):
|
| 771 |
+
self._monitor.update_asr_device(device)
|
| 772 |
+
|
| 773 |
+
@pyqtSlot(int, str, str, str, float)
|
| 774 |
+
def _on_add_message(self, msg_id, timestamp, original, source_lang, asr_ms):
|
| 775 |
+
msg = ChatMessage(msg_id, timestamp, original, source_lang, asr_ms)
|
| 776 |
+
self._messages[msg_id] = msg
|
| 777 |
+
self._msg_layout.addWidget(msg)
|
| 778 |
+
|
| 779 |
+
if len(self._messages) > self._max_messages:
|
| 780 |
+
oldest_id = min(self._messages.keys())
|
| 781 |
+
old_msg = self._messages.pop(oldest_id)
|
| 782 |
+
self._msg_layout.removeWidget(old_msg)
|
| 783 |
+
old_msg.deleteLater()
|
| 784 |
+
|
| 785 |
+
QTimer.singleShot(50, self._scroll_to_bottom)
|
| 786 |
+
|
| 787 |
+
@pyqtSlot(int, str, float)
|
| 788 |
+
def _on_update_translation(self, msg_id, translated, translate_ms):
|
| 789 |
+
msg = self._messages.get(msg_id)
|
| 790 |
+
if msg:
|
| 791 |
+
msg.set_translation(translated, translate_ms)
|
| 792 |
+
QTimer.singleShot(50, self._scroll_to_bottom)
|
| 793 |
+
|
| 794 |
+
@pyqtSlot()
|
| 795 |
+
def _on_clear(self):
|
| 796 |
+
for msg in self._messages.values():
|
| 797 |
+
self._msg_layout.removeWidget(msg)
|
| 798 |
+
msg.deleteLater()
|
| 799 |
+
self._messages.clear()
|
| 800 |
+
|
| 801 |
+
def _scroll_to_bottom(self):
|
| 802 |
+
if not self._handle.auto_scroll:
|
| 803 |
+
return
|
| 804 |
+
sb = self._scroll.verticalScrollBar()
|
| 805 |
+
sb.setValue(sb.maximum())
|
| 806 |
+
|
| 807 |
+
def apply_style(self, style: dict):
|
| 808 |
+
s = {**DEFAULT_STYLE, **style}
|
| 809 |
+
# Migrate old single font_family to split fields
|
| 810 |
+
if "font_family" in s and "original_font_family" not in style:
|
| 811 |
+
s["original_font_family"] = s["font_family"]
|
| 812 |
+
s["translation_font_family"] = s["font_family"]
|
| 813 |
+
# Container background
|
| 814 |
+
bg_rgba = _hex_to_rgba(s["bg_color"], s["bg_opacity"])
|
| 815 |
+
self._container.setStyleSheet(
|
| 816 |
+
f"background-color: {bg_rgba}; border-radius: {s['border_radius']}px;"
|
| 817 |
+
)
|
| 818 |
+
# Header background
|
| 819 |
+
hdr_rgba = _hex_to_rgba(s["header_color"], s["header_opacity"])
|
| 820 |
+
self._handle.setStyleSheet(f"background: {hdr_rgba}; border-radius: 4px;")
|
| 821 |
+
# Window opacity
|
| 822 |
+
self.setWindowOpacity(s["window_opacity"] / 100.0)
|
| 823 |
+
# Update all existing messages
|
| 824 |
+
ChatMessage._current_style = s
|
| 825 |
+
for msg in self._messages.values():
|
| 826 |
+
msg.apply_style(s)
|
| 827 |
+
|
| 828 |
+
# Thread-safe public API
|
| 829 |
+
def add_message(self, msg_id, timestamp, original, source_lang, asr_ms):
|
| 830 |
+
self.add_message_signal.emit(msg_id, timestamp, original, source_lang, asr_ms)
|
| 831 |
+
|
| 832 |
+
def update_translation(self, msg_id, translated, translate_ms):
|
| 833 |
+
self.update_translation_signal.emit(msg_id, translated, translate_ms)
|
| 834 |
+
|
| 835 |
+
def update_monitor(self, rms, vad_conf):
|
| 836 |
+
self.update_monitor_signal.emit(rms, vad_conf)
|
| 837 |
+
|
| 838 |
+
def update_stats(self, asr_count, tl_count, prompt_tokens, completion_tokens):
|
| 839 |
+
self.update_stats_signal.emit(
|
| 840 |
+
asr_count, tl_count, prompt_tokens, completion_tokens
|
| 841 |
+
)
|
| 842 |
+
|
| 843 |
+
def update_asr_device(self, device: str):
|
| 844 |
+
self.update_asr_device_signal.emit(device)
|
| 845 |
+
|
| 846 |
+
def set_target_language(self, lang: str):
|
| 847 |
+
self._handle.set_target_language(lang)
|
| 848 |
+
|
| 849 |
+
def set_models(self, models: list, active_index: int = 0):
|
| 850 |
+
self._handle.set_models(models, active_index)
|
| 851 |
+
|
| 852 |
+
def clear(self):
|
| 853 |
+
self.clear_signal.emit()
|
test_audio.py
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Quick test: capture loopback audio and check if data is non-zero."""
|
| 2 |
+
import pyaudiowpatch as pyaudio
|
| 3 |
+
import numpy as np
|
| 4 |
+
|
| 5 |
+
pa = pyaudio.PyAudio()
|
| 6 |
+
|
| 7 |
+
# Find loopback device
|
| 8 |
+
wasapi = None
|
| 9 |
+
for i in range(pa.get_host_api_count()):
|
| 10 |
+
info = pa.get_host_api_info_by_index(i)
|
| 11 |
+
if "WASAPI" in info["name"]:
|
| 12 |
+
wasapi = info
|
| 13 |
+
break
|
| 14 |
+
|
| 15 |
+
print(f"WASAPI: {wasapi['name']}")
|
| 16 |
+
|
| 17 |
+
loopback = None
|
| 18 |
+
for i in range(pa.get_device_count()):
|
| 19 |
+
dev = pa.get_device_info_by_index(i)
|
| 20 |
+
if dev.get("isLoopbackDevice", False):
|
| 21 |
+
loopback = dev
|
| 22 |
+
print(f" Loopback: [{i}] {dev['name']} ch={dev['maxInputChannels']} rate={dev['defaultSampleRate']}")
|
| 23 |
+
|
| 24 |
+
if loopback is None:
|
| 25 |
+
print("No loopback device found!")
|
| 26 |
+
pa.terminate()
|
| 27 |
+
exit(1)
|
| 28 |
+
|
| 29 |
+
channels = loopback["maxInputChannels"]
|
| 30 |
+
rate = int(loopback["defaultSampleRate"])
|
| 31 |
+
chunk = int(rate * 0.5)
|
| 32 |
+
|
| 33 |
+
print(f"\nCapturing from: {loopback['name']}")
|
| 34 |
+
print(f"Config: {rate}Hz, {channels}ch, chunk={chunk}")
|
| 35 |
+
|
| 36 |
+
stream = pa.open(
|
| 37 |
+
format=pyaudio.paFloat32,
|
| 38 |
+
channels=channels,
|
| 39 |
+
rate=rate,
|
| 40 |
+
input=True,
|
| 41 |
+
input_device_index=loopback["index"],
|
| 42 |
+
frames_per_buffer=chunk,
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
print("\nReading 6 chunks (3 seconds)...")
|
| 46 |
+
for i in range(6):
|
| 47 |
+
data = stream.read(chunk, exception_on_overflow=False)
|
| 48 |
+
audio = np.frombuffer(data, dtype=np.float32)
|
| 49 |
+
mono = audio.reshape(-1, channels).mean(axis=1) if channels > 1 else audio
|
| 50 |
+
rms = np.sqrt(np.mean(mono**2))
|
| 51 |
+
print(f" Chunk {i}: samples={len(mono)}, rms={rms:.6f}, max={np.abs(mono).max():.6f}")
|
| 52 |
+
|
| 53 |
+
stream.stop_stream()
|
| 54 |
+
stream.close()
|
| 55 |
+
pa.terminate()
|
| 56 |
+
print("\nDone.")
|
translator.py
ADDED
|
@@ -0,0 +1,148 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import time
|
| 3 |
+
|
| 4 |
+
import httpx
|
| 5 |
+
from openai import OpenAI
|
| 6 |
+
|
| 7 |
+
log = logging.getLogger("LiveTrans.TL")
|
| 8 |
+
|
| 9 |
+
LANGUAGE_DISPLAY = {
|
| 10 |
+
"en": "English",
|
| 11 |
+
"ja": "Japanese",
|
| 12 |
+
"zh": "Chinese",
|
| 13 |
+
"ko": "Korean",
|
| 14 |
+
"fr": "French",
|
| 15 |
+
"de": "German",
|
| 16 |
+
"es": "Spanish",
|
| 17 |
+
"ru": "Russian",
|
| 18 |
+
}
|
| 19 |
+
|
| 20 |
+
DEFAULT_PROMPT = (
|
| 21 |
+
"You are a subtitle translator. Translate {source_lang} into {target_lang}.\n"
|
| 22 |
+
"Output ONLY the translated text, nothing else.\n"
|
| 23 |
+
"Keep the translation natural, colloquial, and concise."
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def make_openai_client(
|
| 28 |
+
api_base: str, api_key: str, proxy: str = "none", timeout=None
|
| 29 |
+
) -> OpenAI:
|
| 30 |
+
kwargs = {"base_url": api_base, "api_key": api_key}
|
| 31 |
+
if timeout is not None:
|
| 32 |
+
kwargs["timeout"] = httpx.Timeout(timeout, connect=5.0)
|
| 33 |
+
if proxy == "system":
|
| 34 |
+
pass
|
| 35 |
+
elif proxy in ("none", "", None):
|
| 36 |
+
kwargs["http_client"] = httpx.Client(trust_env=False)
|
| 37 |
+
else:
|
| 38 |
+
kwargs["http_client"] = httpx.Client(proxy=proxy)
|
| 39 |
+
return OpenAI(**kwargs)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
class Translator:
|
| 43 |
+
"""LLM-based translation using OpenAI-compatible API."""
|
| 44 |
+
|
| 45 |
+
def __init__(
|
| 46 |
+
self,
|
| 47 |
+
api_base,
|
| 48 |
+
api_key,
|
| 49 |
+
model,
|
| 50 |
+
target_language="zh",
|
| 51 |
+
max_tokens=256,
|
| 52 |
+
temperature=0.3,
|
| 53 |
+
streaming=True,
|
| 54 |
+
system_prompt=None,
|
| 55 |
+
proxy="none",
|
| 56 |
+
no_system_role=False,
|
| 57 |
+
timeout=10,
|
| 58 |
+
):
|
| 59 |
+
self._client = make_openai_client(api_base, api_key, proxy, timeout=timeout)
|
| 60 |
+
self._no_system_role = no_system_role
|
| 61 |
+
self._model = model
|
| 62 |
+
self._target_language = target_language
|
| 63 |
+
self._max_tokens = max_tokens
|
| 64 |
+
self._temperature = temperature
|
| 65 |
+
self._streaming = streaming
|
| 66 |
+
self._timeout = timeout
|
| 67 |
+
self._system_prompt_template = system_prompt or DEFAULT_PROMPT
|
| 68 |
+
self._last_prompt_tokens = 0
|
| 69 |
+
self._last_completion_tokens = 0
|
| 70 |
+
|
| 71 |
+
@property
|
| 72 |
+
def last_usage(self):
|
| 73 |
+
"""(prompt_tokens, completion_tokens) from last translate call."""
|
| 74 |
+
return self._last_prompt_tokens, self._last_completion_tokens
|
| 75 |
+
|
| 76 |
+
def _build_system_prompt(self, source_lang):
|
| 77 |
+
src = LANGUAGE_DISPLAY.get(source_lang, source_lang)
|
| 78 |
+
tgt = LANGUAGE_DISPLAY.get(self._target_language, self._target_language)
|
| 79 |
+
try:
|
| 80 |
+
return self._system_prompt_template.format(
|
| 81 |
+
source_lang=src,
|
| 82 |
+
target_lang=tgt,
|
| 83 |
+
)
|
| 84 |
+
except (KeyError, IndexError, ValueError) as e:
|
| 85 |
+
log.warning(f"Bad prompt template, falling back to default: {e}")
|
| 86 |
+
return DEFAULT_PROMPT.format(source_lang=src, target_lang=tgt)
|
| 87 |
+
|
| 88 |
+
def _build_messages(self, system_prompt, text):
|
| 89 |
+
if self._no_system_role:
|
| 90 |
+
return [{"role": "user", "content": f"{system_prompt}\n{text}"}]
|
| 91 |
+
return [
|
| 92 |
+
{"role": "system", "content": system_prompt},
|
| 93 |
+
{"role": "user", "content": text},
|
| 94 |
+
]
|
| 95 |
+
|
| 96 |
+
def translate(self, text: str, source_language: str = "en"):
|
| 97 |
+
system_prompt = self._build_system_prompt(source_language)
|
| 98 |
+
if self._streaming:
|
| 99 |
+
return self._translate_streaming(system_prompt, text)
|
| 100 |
+
else:
|
| 101 |
+
return self._translate_sync(system_prompt, text)
|
| 102 |
+
|
| 103 |
+
def _translate_sync(self, system_prompt, text):
|
| 104 |
+
resp = self._client.chat.completions.create(
|
| 105 |
+
model=self._model,
|
| 106 |
+
messages=self._build_messages(system_prompt, text),
|
| 107 |
+
max_tokens=self._max_tokens,
|
| 108 |
+
temperature=self._temperature,
|
| 109 |
+
)
|
| 110 |
+
self._last_prompt_tokens = 0
|
| 111 |
+
self._last_completion_tokens = 0
|
| 112 |
+
if resp.usage:
|
| 113 |
+
self._last_prompt_tokens = resp.usage.prompt_tokens or 0
|
| 114 |
+
self._last_completion_tokens = resp.usage.completion_tokens or 0
|
| 115 |
+
return resp.choices[0].message.content.strip()
|
| 116 |
+
|
| 117 |
+
def _translate_streaming(self, system_prompt, text):
|
| 118 |
+
self._last_prompt_tokens = 0
|
| 119 |
+
self._last_completion_tokens = 0
|
| 120 |
+
base_kwargs = dict(
|
| 121 |
+
model=self._model,
|
| 122 |
+
messages=self._build_messages(system_prompt, text),
|
| 123 |
+
max_tokens=self._max_tokens,
|
| 124 |
+
temperature=self._temperature,
|
| 125 |
+
stream=True,
|
| 126 |
+
)
|
| 127 |
+
try:
|
| 128 |
+
stream = self._client.chat.completions.create(
|
| 129 |
+
**base_kwargs,
|
| 130 |
+
stream_options={"include_usage": True},
|
| 131 |
+
)
|
| 132 |
+
except Exception:
|
| 133 |
+
stream = self._client.chat.completions.create(**base_kwargs)
|
| 134 |
+
|
| 135 |
+
deadline = time.monotonic() + self._timeout
|
| 136 |
+
chunks = []
|
| 137 |
+
for chunk in stream:
|
| 138 |
+
if time.monotonic() > deadline:
|
| 139 |
+
stream.close()
|
| 140 |
+
raise TimeoutError(f"Translation exceeded {self._timeout}s total timeout")
|
| 141 |
+
if hasattr(chunk, "usage") and chunk.usage:
|
| 142 |
+
self._last_prompt_tokens = chunk.usage.prompt_tokens or 0
|
| 143 |
+
self._last_completion_tokens = chunk.usage.completion_tokens or 0
|
| 144 |
+
if chunk.choices:
|
| 145 |
+
delta = chunk.choices[0].delta
|
| 146 |
+
if delta.content:
|
| 147 |
+
chunks.append(delta.content)
|
| 148 |
+
return "".join(chunks).strip()
|
vad_processor.py
ADDED
|
@@ -0,0 +1,328 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import collections
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
import torch
|
| 6 |
+
|
| 7 |
+
torch.set_num_threads(1)
|
| 8 |
+
|
| 9 |
+
log = logging.getLogger("LiveTrans.VAD")
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class VADProcessor:
|
| 13 |
+
"""Voice Activity Detection with multiple modes."""
|
| 14 |
+
|
| 15 |
+
def __init__(
|
| 16 |
+
self,
|
| 17 |
+
sample_rate=16000,
|
| 18 |
+
threshold=0.50,
|
| 19 |
+
min_speech_duration=1.0,
|
| 20 |
+
max_speech_duration=15.0,
|
| 21 |
+
chunk_duration=0.032,
|
| 22 |
+
):
|
| 23 |
+
self.sample_rate = sample_rate
|
| 24 |
+
self.threshold = threshold
|
| 25 |
+
self.energy_threshold = 0.02
|
| 26 |
+
self.min_speech_samples = int(min_speech_duration * sample_rate)
|
| 27 |
+
self.max_speech_samples = int(max_speech_duration * sample_rate)
|
| 28 |
+
self._chunk_duration = chunk_duration
|
| 29 |
+
self.mode = "silero" # "silero", "energy", "disabled"
|
| 30 |
+
|
| 31 |
+
self._model, self._utils = torch.hub.load(
|
| 32 |
+
repo_or_dir="snakers4/silero-vad",
|
| 33 |
+
model="silero_vad",
|
| 34 |
+
trust_repo=True,
|
| 35 |
+
)
|
| 36 |
+
self._model.eval()
|
| 37 |
+
|
| 38 |
+
self._speech_buffer = []
|
| 39 |
+
self._confidence_history = [] # per-chunk confidence, synced with _speech_buffer
|
| 40 |
+
self._speech_samples = 0
|
| 41 |
+
self._is_speaking = False
|
| 42 |
+
self._silence_counter = 0
|
| 43 |
+
|
| 44 |
+
# Pre-speech ring buffer: capture onset consonants before VAD triggers
|
| 45 |
+
self._pre_speech_chunks = 3 # ~96ms at 32ms/chunk
|
| 46 |
+
self._pre_buffer = collections.deque(maxlen=self._pre_speech_chunks)
|
| 47 |
+
|
| 48 |
+
# Silence timing
|
| 49 |
+
self._silence_mode = "auto" # "auto" or "fixed"
|
| 50 |
+
self._fixed_silence_dur = 0.8
|
| 51 |
+
self._silence_limit = self._seconds_to_chunks(0.8)
|
| 52 |
+
|
| 53 |
+
# Progressive silence: shorter threshold when buffer is long
|
| 54 |
+
self._progressive_tiers = [
|
| 55 |
+
# (buffer_seconds, silence_multiplier)
|
| 56 |
+
(3.0, 1.0), # < 3s: use full silence_limit
|
| 57 |
+
(6.0, 0.5), # 3-6s: use half silence_limit
|
| 58 |
+
(10.0, 0.25), # 6-10s: use quarter silence_limit
|
| 59 |
+
]
|
| 60 |
+
|
| 61 |
+
# Adaptive silence tracking: recent pause durations (seconds)
|
| 62 |
+
self._pause_history = collections.deque(maxlen=50)
|
| 63 |
+
self._adaptive_min = 0.3
|
| 64 |
+
self._adaptive_max = 2.0
|
| 65 |
+
|
| 66 |
+
# Exposed for monitor
|
| 67 |
+
self.last_confidence = 0.0
|
| 68 |
+
|
| 69 |
+
def _seconds_to_chunks(self, seconds: float) -> int:
|
| 70 |
+
return max(1, round(seconds / self._chunk_duration))
|
| 71 |
+
|
| 72 |
+
def _update_adaptive_limit(self):
|
| 73 |
+
if len(self._pause_history) < 3:
|
| 74 |
+
return
|
| 75 |
+
pauses = sorted(self._pause_history)
|
| 76 |
+
# P75 of recent pauses × 1.2
|
| 77 |
+
idx = int(len(pauses) * 0.75)
|
| 78 |
+
p75 = pauses[min(idx, len(pauses) - 1)]
|
| 79 |
+
target = max(self._adaptive_min, min(self._adaptive_max, p75 * 1.2))
|
| 80 |
+
new_limit = self._seconds_to_chunks(target)
|
| 81 |
+
if new_limit != self._silence_limit:
|
| 82 |
+
log.debug(f"Adaptive silence: {target:.2f}s ({new_limit} chunks), P75={p75:.2f}s")
|
| 83 |
+
self._silence_limit = new_limit
|
| 84 |
+
|
| 85 |
+
def update_settings(self, settings: dict):
|
| 86 |
+
if "vad_mode" in settings:
|
| 87 |
+
self.mode = settings["vad_mode"]
|
| 88 |
+
if "vad_threshold" in settings:
|
| 89 |
+
self.threshold = settings["vad_threshold"]
|
| 90 |
+
if "energy_threshold" in settings:
|
| 91 |
+
self.energy_threshold = settings["energy_threshold"]
|
| 92 |
+
if "min_speech_duration" in settings:
|
| 93 |
+
self.min_speech_samples = int(
|
| 94 |
+
settings["min_speech_duration"] * self.sample_rate
|
| 95 |
+
)
|
| 96 |
+
if "max_speech_duration" in settings:
|
| 97 |
+
self.max_speech_samples = int(
|
| 98 |
+
settings["max_speech_duration"] * self.sample_rate
|
| 99 |
+
)
|
| 100 |
+
if "silence_mode" in settings:
|
| 101 |
+
self._silence_mode = settings["silence_mode"]
|
| 102 |
+
if "silence_duration" in settings:
|
| 103 |
+
self._fixed_silence_dur = settings["silence_duration"]
|
| 104 |
+
if self._silence_mode == "fixed":
|
| 105 |
+
self._silence_limit = self._seconds_to_chunks(self._fixed_silence_dur)
|
| 106 |
+
log.info(
|
| 107 |
+
f"VAD settings updated: mode={self.mode}, threshold={self.threshold}, "
|
| 108 |
+
f"silence={self._silence_mode} "
|
| 109 |
+
f"({self._silence_limit} chunks = {self._silence_limit * self._chunk_duration:.2f}s)"
|
| 110 |
+
)
|
| 111 |
+
|
| 112 |
+
def _silero_confidence(self, audio_chunk: np.ndarray) -> float:
|
| 113 |
+
window_size = 512 if self.sample_rate == 16000 else 256
|
| 114 |
+
chunk = audio_chunk[:window_size]
|
| 115 |
+
if len(chunk) < window_size:
|
| 116 |
+
chunk = np.pad(chunk, (0, window_size - len(chunk)))
|
| 117 |
+
tensor = torch.from_numpy(chunk).float()
|
| 118 |
+
return self._model(tensor, self.sample_rate).item()
|
| 119 |
+
|
| 120 |
+
def _energy_confidence(self, audio_chunk: np.ndarray) -> float:
|
| 121 |
+
rms = float(np.sqrt(np.mean(audio_chunk**2)))
|
| 122 |
+
return min(1.0, rms / (self.energy_threshold * 2))
|
| 123 |
+
|
| 124 |
+
def _get_confidence(self, audio_chunk: np.ndarray) -> float:
|
| 125 |
+
if self.mode == "silero":
|
| 126 |
+
return self._silero_confidence(audio_chunk)
|
| 127 |
+
elif self.mode == "energy":
|
| 128 |
+
return self._energy_confidence(audio_chunk)
|
| 129 |
+
else: # disabled
|
| 130 |
+
return 1.0
|
| 131 |
+
|
| 132 |
+
def _get_effective_silence_limit(self) -> int:
|
| 133 |
+
"""Progressive silence: accept shorter pauses as split points when buffer is long."""
|
| 134 |
+
buf_seconds = self._speech_samples / self.sample_rate
|
| 135 |
+
multiplier = 1.0
|
| 136 |
+
for tier_sec, tier_mult in self._progressive_tiers:
|
| 137 |
+
if buf_seconds < tier_sec:
|
| 138 |
+
break
|
| 139 |
+
multiplier = tier_mult
|
| 140 |
+
effective = max(1, round(self._silence_limit * multiplier))
|
| 141 |
+
return effective
|
| 142 |
+
|
| 143 |
+
def process_chunk(self, audio_chunk: np.ndarray):
|
| 144 |
+
confidence = self._get_confidence(audio_chunk)
|
| 145 |
+
self.last_confidence = confidence
|
| 146 |
+
|
| 147 |
+
effective_threshold = self.threshold if self.mode == "silero" else 0.5
|
| 148 |
+
eff_silence_limit = self._get_effective_silence_limit()
|
| 149 |
+
|
| 150 |
+
log.debug(
|
| 151 |
+
f"VAD conf={confidence:.3f} ({self.mode}), speaking={self._is_speaking}, "
|
| 152 |
+
f"buf={self._speech_samples / self.sample_rate:.1f}s, "
|
| 153 |
+
f"silence_cnt={self._silence_counter}, limit={eff_silence_limit} "
|
| 154 |
+
f"(base={self._silence_limit})"
|
| 155 |
+
)
|
| 156 |
+
|
| 157 |
+
if confidence >= effective_threshold:
|
| 158 |
+
# Record pause duration for adaptive mode
|
| 159 |
+
if self._is_speaking and self._silence_counter > 0:
|
| 160 |
+
pause_dur = self._silence_counter * self._chunk_duration
|
| 161 |
+
if pause_dur >= 0.1:
|
| 162 |
+
self._pause_history.append(pause_dur)
|
| 163 |
+
if self._silence_mode == "auto":
|
| 164 |
+
self._update_adaptive_limit()
|
| 165 |
+
|
| 166 |
+
if not self._is_speaking:
|
| 167 |
+
# Speech onset: prepend pre-speech buffer to capture leading consonants
|
| 168 |
+
# Use threshold as confidence so these chunks don't create false valleys
|
| 169 |
+
for pre_chunk in self._pre_buffer:
|
| 170 |
+
self._speech_buffer.append(pre_chunk)
|
| 171 |
+
self._confidence_history.append(effective_threshold)
|
| 172 |
+
self._speech_samples += len(pre_chunk)
|
| 173 |
+
self._pre_buffer.clear()
|
| 174 |
+
|
| 175 |
+
self._is_speaking = True
|
| 176 |
+
self._silence_counter = 0
|
| 177 |
+
self._speech_buffer.append(audio_chunk)
|
| 178 |
+
self._confidence_history.append(confidence)
|
| 179 |
+
self._speech_samples += len(audio_chunk)
|
| 180 |
+
elif self._is_speaking:
|
| 181 |
+
self._silence_counter += 1
|
| 182 |
+
self._speech_buffer.append(audio_chunk)
|
| 183 |
+
self._confidence_history.append(confidence)
|
| 184 |
+
self._speech_samples += len(audio_chunk)
|
| 185 |
+
else:
|
| 186 |
+
# Not speaking: feed pre-speech ring buffer
|
| 187 |
+
self._pre_buffer.append(audio_chunk)
|
| 188 |
+
|
| 189 |
+
# Force segment if max duration reached — backtrack to find best split point
|
| 190 |
+
if self._speech_samples >= self.max_speech_samples:
|
| 191 |
+
return self._split_at_best_pause()
|
| 192 |
+
|
| 193 |
+
# End segment after enough silence (progressive threshold)
|
| 194 |
+
if self._is_speaking and self._silence_counter >= eff_silence_limit:
|
| 195 |
+
if self._speech_samples >= self.min_speech_samples:
|
| 196 |
+
return self._flush_segment()
|
| 197 |
+
else:
|
| 198 |
+
self._reset()
|
| 199 |
+
return None
|
| 200 |
+
|
| 201 |
+
return None
|
| 202 |
+
|
| 203 |
+
def _find_best_split_index(self) -> int:
|
| 204 |
+
"""Find the best chunk index to split at using smoothed confidence.
|
| 205 |
+
A sliding window average reduces single-chunk noise, then we find
|
| 206 |
+
the center of the lowest valley. Works even when the speaker never
|
| 207 |
+
fully pauses (e.g. fast commentary).
|
| 208 |
+
Returns -1 if no usable split point found."""
|
| 209 |
+
n = len(self._confidence_history)
|
| 210 |
+
if n < 4:
|
| 211 |
+
return -1
|
| 212 |
+
|
| 213 |
+
# Smooth confidence with a sliding window (~160ms = 5 chunks at 32ms)
|
| 214 |
+
smooth_win = min(5, n // 2)
|
| 215 |
+
smoothed = []
|
| 216 |
+
for i in range(n):
|
| 217 |
+
lo = max(0, i - smooth_win // 2)
|
| 218 |
+
hi = min(n, i + smooth_win // 2 + 1)
|
| 219 |
+
smoothed.append(sum(self._confidence_history[lo:hi]) / (hi - lo))
|
| 220 |
+
|
| 221 |
+
# Search in the latter 70% of the buffer (avoid splitting too early)
|
| 222 |
+
search_start = max(1, n * 3 // 10)
|
| 223 |
+
|
| 224 |
+
# Find global minimum in smoothed curve
|
| 225 |
+
min_val = float("inf")
|
| 226 |
+
min_idx = -1
|
| 227 |
+
for i in range(search_start, n):
|
| 228 |
+
if smoothed[i] <= min_val:
|
| 229 |
+
min_val = smoothed[i]
|
| 230 |
+
min_idx = i
|
| 231 |
+
|
| 232 |
+
if min_idx <= 0:
|
| 233 |
+
return -1
|
| 234 |
+
|
| 235 |
+
# Check if this is a meaningful dip
|
| 236 |
+
avg_conf = sum(smoothed[search_start:]) / max(1, n - search_start)
|
| 237 |
+
dip_ratio = min_val / max(avg_conf, 1e-6)
|
| 238 |
+
|
| 239 |
+
effective_threshold = self.threshold if self.mode == "silero" else 0.5
|
| 240 |
+
if min_val < effective_threshold or dip_ratio < 0.8:
|
| 241 |
+
log.debug(
|
| 242 |
+
f"Split point at chunk {min_idx}/{n}: "
|
| 243 |
+
f"smoothed={min_val:.3f}, avg={avg_conf:.3f}, dip_ratio={dip_ratio:.2f}"
|
| 244 |
+
)
|
| 245 |
+
return min_idx
|
| 246 |
+
|
| 247 |
+
# Fallback: any point below average is better than hard cut
|
| 248 |
+
if min_val < avg_conf:
|
| 249 |
+
log.debug(
|
| 250 |
+
f"Split point (fallback) at chunk {min_idx}/{n}: "
|
| 251 |
+
f"smoothed={min_val:.3f}, avg={avg_conf:.3f}"
|
| 252 |
+
)
|
| 253 |
+
return min_idx
|
| 254 |
+
|
| 255 |
+
return -1
|
| 256 |
+
|
| 257 |
+
def _split_at_best_pause(self):
|
| 258 |
+
"""When hitting max duration, backtrack to find the best pause point.
|
| 259 |
+
Flushes the first part and keeps the remainder for continued accumulation."""
|
| 260 |
+
if not self._speech_buffer:
|
| 261 |
+
return None
|
| 262 |
+
|
| 263 |
+
split_idx = self._find_best_split_index()
|
| 264 |
+
|
| 265 |
+
if split_idx <= 0:
|
| 266 |
+
# No good split point — hard flush everything
|
| 267 |
+
log.info(
|
| 268 |
+
f"Max duration reached, no good split point, "
|
| 269 |
+
f"hard flush {self._speech_samples / self.sample_rate:.1f}s"
|
| 270 |
+
)
|
| 271 |
+
return self._flush_segment()
|
| 272 |
+
|
| 273 |
+
# Split: emit first part, keep remainder
|
| 274 |
+
first_bufs = self._speech_buffer[:split_idx]
|
| 275 |
+
remain_bufs = self._speech_buffer[split_idx:]
|
| 276 |
+
remain_confs = self._confidence_history[split_idx:]
|
| 277 |
+
|
| 278 |
+
first_samples = sum(len(b) for b in first_bufs)
|
| 279 |
+
remain_samples = sum(len(b) for b in remain_bufs)
|
| 280 |
+
|
| 281 |
+
log.info(
|
| 282 |
+
f"Max duration split at {first_samples / self.sample_rate:.1f}s, "
|
| 283 |
+
f"keeping {remain_samples / self.sample_rate:.1f}s remainder"
|
| 284 |
+
)
|
| 285 |
+
|
| 286 |
+
segment = np.concatenate(first_bufs)
|
| 287 |
+
|
| 288 |
+
# Keep remainder in buffer for next segment
|
| 289 |
+
self._speech_buffer = remain_bufs
|
| 290 |
+
self._confidence_history = remain_confs
|
| 291 |
+
self._speech_samples = remain_samples
|
| 292 |
+
self._is_speaking = True
|
| 293 |
+
self._silence_counter = 0
|
| 294 |
+
|
| 295 |
+
return segment
|
| 296 |
+
|
| 297 |
+
def _flush_segment(self):
|
| 298 |
+
if not self._speech_buffer:
|
| 299 |
+
return None
|
| 300 |
+
# Speech density check: discard segments where most chunks are below threshold
|
| 301 |
+
if len(self._confidence_history) >= 4:
|
| 302 |
+
effective_threshold = self.threshold if self.mode == "silero" else 0.5
|
| 303 |
+
voiced = sum(1 for c in self._confidence_history if c >= effective_threshold)
|
| 304 |
+
density = voiced / len(self._confidence_history)
|
| 305 |
+
if density < 0.25:
|
| 306 |
+
dur = self._speech_samples / self.sample_rate
|
| 307 |
+
log.debug(
|
| 308 |
+
f"Low speech density {density:.0%} ({voiced}/{len(self._confidence_history)}), "
|
| 309 |
+
f"discarding {dur:.1f}s segment"
|
| 310 |
+
)
|
| 311 |
+
self._reset()
|
| 312 |
+
return None
|
| 313 |
+
segment = np.concatenate(self._speech_buffer)
|
| 314 |
+
self._reset()
|
| 315 |
+
return segment
|
| 316 |
+
|
| 317 |
+
def _reset(self):
|
| 318 |
+
self._speech_buffer = []
|
| 319 |
+
self._confidence_history = []
|
| 320 |
+
self._speech_samples = 0
|
| 321 |
+
self._is_speaking = False
|
| 322 |
+
self._silence_counter = 0
|
| 323 |
+
|
| 324 |
+
def flush(self):
|
| 325 |
+
if self._speech_samples >= self.min_speech_samples:
|
| 326 |
+
return self._flush_segment()
|
| 327 |
+
self._reset()
|
| 328 |
+
return None
|