| from typing import Dict, Any, List | |
| from transformers import pipeline | |
| import torch | |
| from transformers.pipelines.audio_utils import ffmpeg_read | |
| #ffmpeg | |
| device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") | |
| class EndpointHandler: | |
| def __init__(self, path=""): | |
| self.pipe = pipeline(task='automatic-speech-recognition', model=path, device=device) | |
| def __call__(self, data: Any) -> List[Dict[str, str]]: | |
| inputs = data.pop("inputs", data) | |
| audio_nparray = ffmpeg_read(inputs, 16000) | |
| audio_tensor= torch.from_numpy(audio_nparray) | |
| transcribe = self.pipe | |
| transcribe.model.config.forced_decoder_ids = transcribe.tokenizer.get_decoder_prompt_ids(language="ko", task="transcribe") | |
| result = transcribe(audio_nparray) | |
| return result |