Alexander Bagus commited on
Commit
c9fb392
·
1 Parent(s): a385134
Files changed (1) hide show
  1. app.py +11 -4
app.py CHANGED
@@ -59,8 +59,15 @@ pipe_lora = QwenImagePipeline.from_pretrained(
59
  )
60
 
61
  pipe_imagen = QwenImagePipeline.from_pretrained(
62
- "Qwen/Qwen-Image",
63
- device="cuda"
 
 
 
 
 
 
 
64
  )
65
 
66
 
@@ -73,10 +80,10 @@ def generate_lora(
73
  ulid = str(ULID()).lower()[:12]
74
  print(f"ulid: {ulid}")
75
 
76
- print("before: ", input_images)
77
  # input_images = [item[0] for item in input_images]
78
  input_images = [Image.open(filepath) for filepath, _ in input_images]
79
- print("after: ", input_images)
80
  if not input_images:
81
  print("images are empty.")
82
  # Load images
 
59
  )
60
 
61
  pipe_imagen = QwenImagePipeline.from_pretrained(
62
+ torch_dtype=torch.bfloat16,
63
+ device="cuda",
64
+ model_configs=[
65
+ ModelConfig(download_source="huggingface", model_id="Qwen/Qwen-Image", origin_file_pattern="transformer/diffusion_pytorch_model*.safetensors", **vram_config),
66
+ ModelConfig(download_source="huggingface", model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors", **vram_config),
67
+ ModelConfig(download_source="huggingface", model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors", **vram_config),
68
+ ],
69
+ tokenizer_config=ModelConfig(download_source="huggingface", model_id="Qwen/Qwen-Image", origin_file_pattern="tokenizer/"),
70
+ vram_limit=torch.cuda.mem_get_info("cuda")[1] / (1024 ** 3) - 0.5,
71
  )
72
 
73
 
 
80
  ulid = str(ULID()).lower()[:12]
81
  print(f"ulid: {ulid}")
82
 
83
+ # print("before: ", input_images)
84
  # input_images = [item[0] for item in input_images]
85
  input_images = [Image.open(filepath) for filepath, _ in input_images]
86
+ # print("after: ", input_images)
87
  if not input_images:
88
  print("images are empty.")
89
  # Load images