Spaces:
Sleeping
Sleeping
Update app.py
Browse files🧪 use radames/sdxl-turbo-DPO-LoRA
app.py
CHANGED
|
@@ -6,16 +6,22 @@ import random
|
|
| 6 |
from diffusers import DiffusionPipeline
|
| 7 |
import torch
|
| 8 |
|
| 9 |
-
|
| 10 |
-
|
|
|
|
| 11 |
|
| 12 |
if torch.cuda.is_available():
|
| 13 |
torch_dtype = torch.float16
|
|
|
|
| 14 |
else:
|
| 15 |
torch_dtype = torch.float32
|
|
|
|
| 16 |
|
| 17 |
-
pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
|
| 18 |
pipe = pipe.to(device)
|
|
|
|
|
|
|
|
|
|
| 19 |
|
| 20 |
MAX_SEED = np.iinfo(np.int32).max
|
| 21 |
MAX_IMAGE_SIZE = 1024
|
|
|
|
| 6 |
from diffusers import DiffusionPipeline
|
| 7 |
import torch
|
| 8 |
|
| 9 |
+
model_repo_id = "stabilityai/sdxl-turbo" # Base model
|
| 10 |
+
lora_adapter_id = "radames/sdxl-turbo-DPO-LoRA" # LoRA adapter
|
| 11 |
+
lora_adapter_name = "dpo-lora-sdxl-turbo"
|
| 12 |
|
| 13 |
if torch.cuda.is_available():
|
| 14 |
torch_dtype = torch.float16
|
| 15 |
+
device = "cuda"
|
| 16 |
else:
|
| 17 |
torch_dtype = torch.float32
|
| 18 |
+
device = "cpu"
|
| 19 |
|
| 20 |
+
pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype, variant="fp16" if torch_dtype==torch.float16 else None)
|
| 21 |
pipe = pipe.to(device)
|
| 22 |
+
# Charger l'adapter LoRA
|
| 23 |
+
pipe.load_lora_weights(lora_adapter_id, adapter_name=lora_adapter_name)
|
| 24 |
+
pipe.set_adapters([lora_adapter_name], adapter_weights=[1.0])
|
| 25 |
|
| 26 |
MAX_SEED = np.iinfo(np.int32).max
|
| 27 |
MAX_IMAGE_SIZE = 1024
|