Spaces:
Running
Running
Update inference.py
Browse files- inference.py +2 -0
inference.py
CHANGED
|
@@ -2,6 +2,7 @@ import os
|
|
| 2 |
import lightning as L
|
| 3 |
import torch
|
| 4 |
import time
|
|
|
|
| 5 |
from snac import SNAC
|
| 6 |
from litgpt import Tokenizer
|
| 7 |
from litgpt.utils import (
|
|
@@ -380,6 +381,7 @@ class OmniInference:
|
|
| 380 |
download_model(ckpt_dir)
|
| 381 |
self.fabric, self.model, self.text_tokenizer, self.snacmodel, self.whispermodel = load_model(ckpt_dir, device)
|
| 382 |
|
|
|
|
| 383 |
def warm_up(self, sample='./data/samples/output1.wav'):
|
| 384 |
for _ in self.run_AT_batch_stream(sample):
|
| 385 |
pass
|
|
|
|
| 2 |
import lightning as L
|
| 3 |
import torch
|
| 4 |
import time
|
| 5 |
+
import spaces
|
| 6 |
from snac import SNAC
|
| 7 |
from litgpt import Tokenizer
|
| 8 |
from litgpt.utils import (
|
|
|
|
| 381 |
download_model(ckpt_dir)
|
| 382 |
self.fabric, self.model, self.text_tokenizer, self.snacmodel, self.whispermodel = load_model(ckpt_dir, device)
|
| 383 |
|
| 384 |
+
@spaces.GPU
|
| 385 |
def warm_up(self, sample='./data/samples/output1.wav'):
|
| 386 |
for _ in self.run_AT_batch_stream(sample):
|
| 387 |
pass
|