Spaces:
Running
Running
VladGeekPro Copilot commited on
Commit ·
f856ebd
1
Parent(s): 60421a5
ChangedBackToWhisperV3
Browse filesCo-authored-by: Copilot <copilot@github.com>
- Dockerfile +2 -2
- app.py +2 -2
Dockerfile
CHANGED
|
@@ -2,13 +2,13 @@ FROM python:3.11-slim
|
|
| 2 |
|
| 3 |
ENV PYTHONUNBUFFERED=1 PIP_NO_CACHE_DIR=1 HOME=/home/user \
|
| 4 |
PATH=/home/user/.local/bin:$PATH PORT=7860 \
|
| 5 |
-
WHISPER_MODEL=deepdml/faster-whisper-large-v3-
|
| 6 |
OMP_NUM_THREADS=2 OPENBLAS_NUM_THREADS=2 \
|
| 7 |
TOKENIZERS_PARALLELISM=false \
|
| 8 |
WHISPER_CPU_THREADS=2 WHISPER_NUM_BEAMS=1 \
|
| 9 |
WHISPER_VAD_FILTER=0 WHISPER_PRELOAD_ON_START=1 \
|
| 10 |
WHISPER_BACKEND=auto WHISPER_REMOTE_PROVIDER=hf-inference \
|
| 11 |
-
WHISPER_REMOTE_MODEL=openai/whisper-large-v3
|
| 12 |
WHISPER_REMOTE_TIMEOUT=15 WHISPER_PREPROCESS_AUDIO=1
|
| 13 |
|
| 14 |
RUN apt-get update && apt-get install -y --no-install-recommends ffmpeg \
|
|
|
|
| 2 |
|
| 3 |
ENV PYTHONUNBUFFERED=1 PIP_NO_CACHE_DIR=1 HOME=/home/user \
|
| 4 |
PATH=/home/user/.local/bin:$PATH PORT=7860 \
|
| 5 |
+
WHISPER_MODEL=deepdml/faster-whisper-large-v3-ct2 \
|
| 6 |
OMP_NUM_THREADS=2 OPENBLAS_NUM_THREADS=2 \
|
| 7 |
TOKENIZERS_PARALLELISM=false \
|
| 8 |
WHISPER_CPU_THREADS=2 WHISPER_NUM_BEAMS=1 \
|
| 9 |
WHISPER_VAD_FILTER=0 WHISPER_PRELOAD_ON_START=1 \
|
| 10 |
WHISPER_BACKEND=auto WHISPER_REMOTE_PROVIDER=hf-inference \
|
| 11 |
+
WHISPER_REMOTE_MODEL=openai/whisper-large-v3 \
|
| 12 |
WHISPER_REMOTE_TIMEOUT=15 WHISPER_PREPROCESS_AUDIO=1
|
| 13 |
|
| 14 |
RUN apt-get update && apt-get install -y --no-install-recommends ffmpeg \
|
app.py
CHANGED
|
@@ -286,7 +286,7 @@ def transcribe_audio_remote(audio_path: str) -> tuple[str, float]:
|
|
| 286 |
"""Транскрибирует аудио через HF Inference."""
|
| 287 |
started = time.time()
|
| 288 |
client = get_hf_asr_client()
|
| 289 |
-
model_id = os.getenv("WHISPER_REMOTE_MODEL", "openai/whisper-large-v3
|
| 290 |
|
| 291 |
result = client.automatic_speech_recognition(audio=audio_path, model=model_id)
|
| 292 |
text = (getattr(result, "text", None) or "").strip()
|
|
@@ -328,7 +328,7 @@ def get_whisper_model() -> Any:
|
|
| 328 |
if _WHISPER_MODEL is None:
|
| 329 |
from faster_whisper import WhisperModel
|
| 330 |
|
| 331 |
-
model_id = os.getenv("WHISPER_MODEL", "deepdml/faster-whisper-large-v3-
|
| 332 |
cpu_threads = max(1, int(os.getenv("WHISPER_CPU_THREADS", "2")))
|
| 333 |
|
| 334 |
_WHISPER_MODEL = WhisperModel(
|
|
|
|
| 286 |
"""Транскрибирует аудио через HF Inference."""
|
| 287 |
started = time.time()
|
| 288 |
client = get_hf_asr_client()
|
| 289 |
+
model_id = os.getenv("WHISPER_REMOTE_MODEL", "openai/whisper-large-v3")
|
| 290 |
|
| 291 |
result = client.automatic_speech_recognition(audio=audio_path, model=model_id)
|
| 292 |
text = (getattr(result, "text", None) or "").strip()
|
|
|
|
| 328 |
if _WHISPER_MODEL is None:
|
| 329 |
from faster_whisper import WhisperModel
|
| 330 |
|
| 331 |
+
model_id = os.getenv("WHISPER_MODEL", "deepdml/faster-whisper-large-v3-ct2")
|
| 332 |
cpu_threads = max(1, int(os.getenv("WHISPER_CPU_THREADS", "2")))
|
| 333 |
|
| 334 |
_WHISPER_MODEL = WhisperModel(
|