Spaces:
Running
on
Zero
Running
on
Zero
Commit
·
c8672f7
1
Parent(s):
f4c725a
fix: pipeline
Browse files- model.py +3 -2
- requirements.txt +1 -0
model.py
CHANGED
|
@@ -1,7 +1,6 @@
|
|
| 1 |
from transformers import pipeline
|
| 2 |
from accelerate import Accelerator
|
| 3 |
-
|
| 4 |
-
device = Accelerator().device
|
| 5 |
|
| 6 |
model_id = "JacobLinCool/whisper-large-v3-turbo-common_voice_19_0-zh-TW"
|
| 7 |
|
|
@@ -10,9 +9,11 @@ pipe = None
|
|
| 10 |
|
| 11 |
def load_model():
|
| 12 |
global pipe
|
|
|
|
| 13 |
pipe = pipeline("automatic-speech-recognition", model=model_id, device=device)
|
| 14 |
|
| 15 |
|
|
|
|
| 16 |
def transcribe_audio_local(audio: str) -> str:
|
| 17 |
print(f"{audio=}")
|
| 18 |
|
|
|
|
| 1 |
from transformers import pipeline
|
| 2 |
from accelerate import Accelerator
|
| 3 |
+
import spaces
|
|
|
|
| 4 |
|
| 5 |
model_id = "JacobLinCool/whisper-large-v3-turbo-common_voice_19_0-zh-TW"
|
| 6 |
|
|
|
|
| 9 |
|
| 10 |
def load_model():
|
| 11 |
global pipe
|
| 12 |
+
device = Accelerator().device
|
| 13 |
pipe = pipeline("automatic-speech-recognition", model=model_id, device=device)
|
| 14 |
|
| 15 |
|
| 16 |
+
@spaces.GPU()
|
| 17 |
def transcribe_audio_local(audio: str) -> str:
|
| 18 |
print(f"{audio=}")
|
| 19 |
|
requirements.txt
CHANGED
|
@@ -2,3 +2,4 @@ gradio==5.4.0
|
|
| 2 |
huggingface_hub==0.26.2
|
| 3 |
transformers
|
| 4 |
accelerate
|
|
|
|
|
|
| 2 |
huggingface_hub==0.26.2
|
| 3 |
transformers
|
| 4 |
accelerate
|
| 5 |
+
spaces
|