File size: 1,595 Bytes
41061e4
e9a563f
 
e9716f0
41061e4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
from typing import  Dict
from transformers import WhisperProcessor, WhisperForConditionalGeneration
from transformers.pipelines.audio_utils import ffmpeg_read
#from datasets import load_dataset

import torch

SAMPLE_RATE = 16000

class EndpointHandler():
    def __init__(self, path=""):
        # load the model
        #self.model = whisper.load_model("medium")
        self.processor = WhisperProcessor.from_pretrained("openai/whisper-large-v2")
        self.model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large-v2")
        self.forced_decoder_ids = processor.get_decoder_prompt_ids(language="french", task="transcribe")


    def __call__(self, data: Dict[str, bytes]) -> Dict[str, str]:
        """
        Args:
            data (:obj:):
                includes the deserialized audio file as bytes
        Return:
            A :obj:`dict`:. base64 encoded image
        """
        # process input
        inputs = data.pop("inputs", data)
        audio_nparray = ffmpeg_read(inputs, SAMPLE_RATE)
        audio_tensor= torch.from_numpy(audio_nparray)

        #ds = load_dataset("common_voice", "fr", split="test", streaming=True)
        #ds = ds.cast_column("audio", Audio(sampling_rate=16_000))
        #input_speech = next(iter(ds))["audio"]
        #input_features = processor(input_speech["array"], sampling_rate=input_speech["sampling_rate"], return_tensors="pt").input_features

        
        # run inference pipeline
        result = self.model.transcribe(audio_nparray)

        # postprocess the prediction
        return {"text": result["text"]}