File size: 774 Bytes
ac7d825
 
 
d79b34d
ac7d825
 
 
 
 
 
d79b34d
ac7d825
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
from transformers import WhisperProcessor, WhisperForConditionalGeneration
import torch
import soundfile as sf

class UrduWhisper:
    def __init__(self):
        print("Loading Urdu Whisper Tiny model...")
        self.processor = WhisperProcessor.from_pretrained("kingabzpro/whisper-tiny-urdu")
        self.model = WhisperForConditionalGeneration.from_pretrained("kingabzpro/whisper-tiny-urdu")
        self.model.to("cpu")

    def transcribe(self, audio_file):
        audio, sr = sf.read(audio_file)
        inputs = self.processor(audio, sampling_rate=sr, return_tensors="pt")
        with torch.no_grad():
            predicted_ids = self.model.generate(inputs["input_features"])
        return self.processor.batch_decode(predicted_ids, skip_special_tokens=True)[0]