tojonatolotra commited on
Commit
0ebe309
·
verified ·
1 Parent(s): 7a0abef

Update app.py

Browse files

🧪 use radames/sdxl-turbo-DPO-LoRA

Files changed (1) hide show
  1. app.py +9 -3
app.py CHANGED
@@ -6,16 +6,22 @@ import random
6
  from diffusers import DiffusionPipeline
7
  import torch
8
 
9
- device = "cuda" if torch.cuda.is_available() else "cpu"
10
- model_repo_id = "radames/sdxl-turbo-DPO-LoRA" # Replace to the model you would like to use
 
11
 
12
  if torch.cuda.is_available():
13
  torch_dtype = torch.float16
 
14
  else:
15
  torch_dtype = torch.float32
 
16
 
17
- pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
18
  pipe = pipe.to(device)
 
 
 
19
 
20
  MAX_SEED = np.iinfo(np.int32).max
21
  MAX_IMAGE_SIZE = 1024
 
6
  from diffusers import DiffusionPipeline
7
  import torch
8
 
9
+ model_repo_id = "stabilityai/sdxl-turbo" # Base model
10
+ lora_adapter_id = "radames/sdxl-turbo-DPO-LoRA" # LoRA adapter
11
+ lora_adapter_name = "dpo-lora-sdxl-turbo"
12
 
13
  if torch.cuda.is_available():
14
  torch_dtype = torch.float16
15
+ device = "cuda"
16
  else:
17
  torch_dtype = torch.float32
18
+ device = "cpu"
19
 
20
+ pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype, variant="fp16" if torch_dtype==torch.float16 else None)
21
  pipe = pipe.to(device)
22
+ # Charger l'adapter LoRA
23
+ pipe.load_lora_weights(lora_adapter_id, adapter_name=lora_adapter_name)
24
+ pipe.set_adapters([lora_adapter_name], adapter_weights=[1.0])
25
 
26
  MAX_SEED = np.iinfo(np.int32).max
27
  MAX_IMAGE_SIZE = 1024