File size: 605 Bytes
a3bc2f8
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
from transformers import WhisperForConditionalGeneration
import torch

model = WhisperForConditionalGeneration.from_pretrained(".")
processor = WhisperProcessor.from_pretrained("C:/Project/Private/AudioRecord/Spiritbox/Pro/whisper-thai-finetuned/checkpoint-500")

example_input = torch.randn(1, 80, 3000)
traced = torch.jit.trace(model, example_input)
traced.save("whisper_thai_traced.pt")

model.save_pretrained("C:/Project/Private/AudioRecord/Spiritbox/Pro/whisper-thai-finetuned/export")
processor.save_pretrained("C:/Project/Private/AudioRecord/Spiritbox/Pro/whisper-thai-finetuned/export")