Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -9,18 +9,18 @@ from outetts.version.v2.prompt_processor import PromptProcessor
|
|
| 9 |
from outetts.version.playback import ModelOutput
|
| 10 |
|
| 11 |
model_path = hf_hub_download(
|
| 12 |
-
repo_id="
|
| 13 |
-
filename="unsloth.Q8_0.gguf",
|
| 14 |
)
|
| 15 |
|
| 16 |
model_config = outetts.GGUFModelConfig_v2(
|
| 17 |
model_path=model_path,
|
| 18 |
-
tokenizer_path="
|
| 19 |
)
|
| 20 |
|
| 21 |
interface = outetts.InterfaceGGUF(model_version="0.3", cfg=model_config)
|
| 22 |
audio_codec = AudioCodec()
|
| 23 |
-
prompt_processor = PromptProcessor("
|
| 24 |
whisper_model = whisper.load_model("base.en")
|
| 25 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 26 |
gguf_model = interface.get_model()
|
|
|
|
| 9 |
from outetts.version.playback import ModelOutput
|
| 10 |
|
| 11 |
model_path = hf_hub_download(
|
| 12 |
+
repo_id="KandirResearch/CiSiMi",
|
| 13 |
+
filename="unsloth.Q8_0.gguf", # unsloth.Q4_K_M.gguf
|
| 14 |
)
|
| 15 |
|
| 16 |
model_config = outetts.GGUFModelConfig_v2(
|
| 17 |
model_path=model_path,
|
| 18 |
+
tokenizer_path="KandirResearch/CiSiMi",
|
| 19 |
)
|
| 20 |
|
| 21 |
interface = outetts.InterfaceGGUF(model_version="0.3", cfg=model_config)
|
| 22 |
audio_codec = AudioCodec()
|
| 23 |
+
prompt_processor = PromptProcessor("KandirResearch/CiSiMi")
|
| 24 |
whisper_model = whisper.load_model("base.en")
|
| 25 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 26 |
gguf_model = interface.get_model()
|