Update README.md
#1
by
hackergeek98
- opened
README.md
CHANGED
|
@@ -12,13 +12,18 @@ pipeline_tag: automatic-speech-recognition
|
|
| 12 |
library_name: transformers
|
| 13 |
---
|
| 14 |
how to use the model in colab:
|
|
|
|
| 15 |
!pip install torch torchaudio transformers librosa gradio
|
|
|
|
| 16 |
from transformers import WhisperProcessor, WhisperForConditionalGeneration
|
|
|
|
| 17 |
import torch
|
| 18 |
|
| 19 |
# Load your fine-tuned Whisper model and processor
|
| 20 |
model_name = "hackergeek98/tinyyyy_whisper"
|
|
|
|
| 21 |
processor = WhisperProcessor.from_pretrained(model_name)
|
|
|
|
| 22 |
model = WhisperForConditionalGeneration.from_pretrained(model_name)
|
| 23 |
|
| 24 |
# Force the model to transcribe in Persian
|
|
|
|
| 12 |
library_name: transformers
|
| 13 |
---
|
| 14 |
how to use the model in colab:
|
| 15 |
+
|
| 16 |
!pip install torch torchaudio transformers librosa gradio
|
| 17 |
+
|
| 18 |
from transformers import WhisperProcessor, WhisperForConditionalGeneration
|
| 19 |
+
|
| 20 |
import torch
|
| 21 |
|
| 22 |
# Load your fine-tuned Whisper model and processor
|
| 23 |
model_name = "hackergeek98/tinyyyy_whisper"
|
| 24 |
+
|
| 25 |
processor = WhisperProcessor.from_pretrained(model_name)
|
| 26 |
+
|
| 27 |
model = WhisperForConditionalGeneration.from_pretrained(model_name)
|
| 28 |
|
| 29 |
# Force the model to transcribe in Persian
|