File size: 3,253 Bytes
f9ea638
64d5cde
 
f9ea638
64d5cde
 
 
f548f48
64d5cde
0f29908
64d5cde
 
 
 
 
 
 
 
 
 
172cd83
727d35b
 
f9ea638
727d35b
 
172cd83
 
64d5cde
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
727d35b
f548f48
64d5cde
f9ea638
727d35b
f548f48
 
64d5cde
 
 
 
f9ea638
64d5cde
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f9ea638
64d5cde
 
 
f9ea638
 
 
 
 
64d5cde
f9ea638
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95

import os
import io
import torch
import torchaudio
from fastapi import FastAPI, UploadFile, File, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC
from huggingface_hub import snapshot_download

# ---- Robust HF cache setup (writable in Docker/Spaces) ----
HF_HOME = os.environ.get("HF_HOME", "/tmp/hf")
os.environ["HF_HOME"] = HF_HOME
os.environ["TRANSFORMERS_CACHE"] = os.path.join(HF_HOME, "transformers")
os.makedirs(os.environ["TRANSFORMERS_CACHE"], exist_ok=True)

MODEL_ID = os.environ.get("MODEL_ID", "Mustafaa4a/ASR-Somali")
HF_TOKEN = os.environ.get("HF_TOKEN")  # only needed for private repos

app = FastAPI(title="Somali ASR API")

app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_methods=["*"],
    allow_headers=["*"],
)

processor = None
model = None

@app.on_event("startup")
def _load_model():
    global processor, model
    try:
        # Download the repo snapshot to a local, writable dir
        local_dir = snapshot_download(
            repo_id=MODEL_ID,
            token=HF_TOKEN,
            cache_dir=HF_HOME,
        )
        processor = Wav2Vec2Processor.from_pretrained(local_dir)
        model = Wav2Vec2ForCTC.from_pretrained(local_dir)
        model.eval()
    except Exception as e:
        # Surface a clear error instead of crashing Uvicorn silently
        raise RuntimeError(f"Failed to load model '{MODEL_ID}': {e}")

@app.get("/health")
def health():
    return {"status": "ok", "model_loaded": model is not None, "model_id": MODEL_ID}

@app.get("/")
def root():
    return {"message": "Somali Speech-to-Text API is running."}

@app.post("/transcribe")
async def transcribe(file: UploadFile = File(...)):
    if model is None or processor is None:
        raise HTTPException(status_code=503, detail="Model not loaded yet. Try again shortly.")

    # Read bytes
    audio_bytes = await file.read()
    if not audio_bytes:
        raise HTTPException(status_code=400, detail="Empty file")

    # Load audio from bytes
    try:
        audio_stream = io.BytesIO(audio_bytes)
        # torchaudio can auto-detect many formats if system codecs are present
        waveform, sample_rate = torchaudio.load(audio_stream)
    except Exception:
        # As a fallback, try forcing WAV (in case the client always sends WAV)
        try:
            audio_stream = io.BytesIO(audio_bytes)
            waveform, sample_rate = torchaudio.load(audio_stream, format="wav")
        except Exception as e:
            raise HTTPException(status_code=400, detail=f"Could not read audio: {e}")

    # Mono + 16k resample for Wav2Vec2
    if waveform.dim() == 2 and waveform.size(0) > 1:
        waveform = torch.mean(waveform, dim=0, keepdim=True)  # convert to mono
    if sample_rate != 16000:
        resampler = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=16000)
        waveform = resampler(waveform)

    inputs = processor(waveform.squeeze(), sampling_rate=16000, return_tensors="pt")

    with torch.no_grad():
        logits = model(**inputs).logits

    predicted_ids = torch.argmax(logits, dim=-1)
    transcription = processor.decode(predicted_ids[0])
    return {"transcription": transcription}