dr-mohamaid / app.py
cabanemaths's picture
Update app.py
1f9fe2e verified
from fastapi import FastAPI, UploadFile, File, Form
from faster_whisper import WhisperModel
import uvicorn
import tempfile
import shutil
import torch
import os
from fastapi.middleware.cors import CORSMiddleware
device = "cuda" if torch.cuda.is_available() else "cpu"
compute_type = "float16" if device == "cuda" else "int8"
origins = [
"*", # ton WordPress
]
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# === Dictionnaire des modèles dispo ===
AVAILABLE_MODELS = ["tiny", "base", "small", "medium", "large-v2"]
def load_model(model_name: str):
"""Charger un modèle Whisper avec CPU (modifiable si GPU dispo)"""
return WhisperModel(model_name, device=device, compute_type="int8")
# === Endpoint API REST ===
@app.post("/transcribe")
async def transcribe(
file: UploadFile = File(...),
model_name: str = Form("base") # par défaut "base"
):
if model_name not in AVAILABLE_MODELS:
return {"error": f"Modèle non reconnu. Choisissez parmi {AVAILABLE_MODELS}"}
model = load_model(model_name)
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as tmp:
shutil.copyfileobj(file.file, tmp)
tmp_path = tmp.name
segments, info = model.transcribe(tmp_path, beam_size=5)
text_result = " ".join([segment.text for segment in segments])
os.remove(tmp_path)
return {
"model_used": model_name,
"language": info.language,
"probability": info.language_probability,
"transcription": text_result,
}
if __name__ == "__main__":
uvicorn.run("app:app", host="0.0.0.0", port=7860, reload=False)