Update app.py
Browse files
app.py
CHANGED
|
@@ -9,6 +9,11 @@ from transformers import AutoProcessor, ClapModel
|
|
| 9 |
from model.udit import UDiT
|
| 10 |
from vae_modules.autoencoder_wrapper import Autoencoder
|
| 11 |
import numpy as np
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
|
| 13 |
diffusion_config = './config/SoloAudio.yaml'
|
| 14 |
diffusion_ckpt = './pretrained_models/soloaudio_v2.pt'
|
|
@@ -22,8 +27,8 @@ with open(diffusion_config, 'r') as fp:
|
|
| 22 |
|
| 23 |
v_prediction = diff_config["ddim"]["v_prediction"]
|
| 24 |
|
| 25 |
-
clapmodel = ClapModel.from_pretrained("
|
| 26 |
-
processor = AutoProcessor.from_pretrained('
|
| 27 |
autoencoder = Autoencoder(autoencoder_path, 'stable_vae', quantization_first=True)
|
| 28 |
autoencoder.eval()
|
| 29 |
autoencoder.to(device)
|
|
|
|
| 9 |
from model.udit import UDiT
|
| 10 |
from vae_modules.autoencoder_wrapper import Autoencoder
|
| 11 |
import numpy as np
|
| 12 |
+
from huggingface_hub import snapshot_download
|
| 13 |
+
|
| 14 |
+
snapshot_download(repo_id="laion/larger_clap_general",
|
| 15 |
+
local_dir="./larger_clap_general",
|
| 16 |
+
local_dir_use_symlinks=False)
|
| 17 |
|
| 18 |
diffusion_config = './config/SoloAudio.yaml'
|
| 19 |
diffusion_ckpt = './pretrained_models/soloaudio_v2.pt'
|
|
|
|
| 27 |
|
| 28 |
v_prediction = diff_config["ddim"]["v_prediction"]
|
| 29 |
|
| 30 |
+
clapmodel = ClapModel.from_pretrained("./larger_clap_general").to(device)
|
| 31 |
+
processor = AutoProcessor.from_pretrained('./larger_clap_general')
|
| 32 |
autoencoder = Autoencoder(autoencoder_path, 'stable_vae', quantization_first=True)
|
| 33 |
autoencoder.eval()
|
| 34 |
autoencoder.to(device)
|