Spaces:
Sleeping
Sleeping
| from fastapi import FastAPI, UploadFile, File, Form | |
| from faster_whisper import WhisperModel | |
| import tempfile | |
| import shutil | |
| import os | |
| import torch | |
| from fastapi.middleware.cors import CORSMiddleware | |
| import time | |
| device = "cuda" if torch.cuda.is_available() else "cpu" | |
| compute_type = "float16" if device=="cuda" else "int8" | |
| app = FastAPI() | |
| app.add_middleware( | |
| CORSMiddleware, | |
| allow_origins=["https://www.cabane-data.fr", "https://www.cabane-data.fr"], # à restreindre à ton domaine | |
| allow_credentials=True, | |
| allow_methods=["*"], | |
| allow_headers=["*"], | |
| ) | |
| AVAILABLE_MODELS = ["tiny", "base", "small", "medium", "large-v2"] | |
| def load_model(model_name: str): | |
| return WhisperModel(model_name, device=device, compute_type=compute_type) | |
| async def transcribe(file: UploadFile = File(...), model_name: str = Form("base")): | |
| if model_name not in AVAILABLE_MODELS: | |
| return {"error": f"Modèle non reconnu. Choisissez parmi {AVAILABLE_MODELS}"} | |
| start = time.time() | |
| model = load_model(model_name) | |
| with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as tmp: | |
| shutil.copyfileobj(file.file, tmp) | |
| tmp_path = tmp.name | |
| segments, info = model.transcribe(tmp_path, beam_size=5) | |
| text_result = " ".join([s.text for s in segments]) | |
| os.remove(tmp_path) | |
| end = time.time() | |
| duration = round(end - start, 2) | |
| return { | |
| "text": text_result, | |
| "duration": duration, | |
| "model_used": model_name | |
| } | |