RishubhPar commited on
Commit
57e54a4
·
verified ·
1 Parent(s): b9e9d7e

changed the pipeline definition.

Browse files
Files changed (1) hide show
  1. app.py +6 -7
app.py CHANGED
@@ -177,7 +177,7 @@ state_dict = torch.load(projector_path, map_location="cpu", weights_only=True)
177
  slider_projector.load_state_dict(state_dict, strict=True)
178
 
179
  _log("[worker] assembling pipeline (sharded/offloaded)…")
180
- pipe = FluxKontextSliderPipeline.from_pretrained(
181
  pretrained,
182
  token=token,
183
  trust_remote_code=True,
@@ -196,12 +196,11 @@ _log(f"[worker] loading LoRA from: {trained_models_path}")
196
  pipe.load_lora_weights(trained_models_path)
197
  _log("[worker] LoRA loaded.")
198
 
199
- # DO NOT pipe.to("cuda") here; keep auto device_map to avoid OOM
200
- PIPELINE = pipe
201
- if cuda_ok:
202
- free, total = torch.cuda.mem_get_info()
203
- _log(f"[worker] VRAM free/total: {free/1e9:.2f}/{total/1e9:.2f} GB")
204
- _log("[worker] PIPELINE ready.")
205
 
206
  # moving the pipeline to GPU
207
  PIPELINE.to('cuda')
 
177
  slider_projector.load_state_dict(state_dict, strict=True)
178
 
179
  _log("[worker] assembling pipeline (sharded/offloaded)…")
180
+ PIPELINE = FluxKontextSliderPipeline.from_pretrained(
181
  pretrained,
182
  token=token,
183
  trust_remote_code=True,
 
196
  pipe.load_lora_weights(trained_models_path)
197
  _log("[worker] LoRA loaded.")
198
 
199
+ # if cuda_ok:
200
+ # free, total = torch.cuda.mem_get_info()
201
+ # _log(f"[worker] VRAM free/total: {free/1e9:.2f}/{total/1e9:.2f} GB")
202
+ # _log("[worker] PIPELINE ready.")
203
+
 
204
 
205
  # moving the pipeline to GPU
206
  PIPELINE.to('cuda')