linoyts HF Staff commited on
Commit
b8f8b4e
Β·
1 Parent(s): 66dba9b

Update app.py (#2)

Browse files

- Update app.py (bb27da4ee127a82638b19ed7e9f95475c0db8d04)

Files changed (1) hide show
  1. app.py +22 -12
app.py CHANGED
@@ -296,6 +296,7 @@ class LTX23UnifiedPipeline:
296
  ic_loras: list[LoraPathStrengthAndSDOps] | None = None,
297
  device: torch.device | None = None,
298
  quantization: QuantizationPolicy | None = None,
 
299
  ):
300
  self.device = device or get_device()
301
  self.dtype = torch.bfloat16
@@ -334,17 +335,23 @@ class LTX23UnifiedPipeline:
334
  device=self.device,
335
  )
336
 
337
- # Read reference downscale factor from IC-LoRA metadata
338
- self.reference_downscale_factor = 1
339
- for lora in ic_loras:
340
- scale = _read_lora_reference_downscale_factor(lora.path)
341
- if scale != 1:
342
- if self.reference_downscale_factor not in (1, scale):
343
- raise ValueError(
344
- f"Conflicting reference_downscale_factor: "
345
- f"already {self.reference_downscale_factor}, got {scale}"
346
- )
347
- self.reference_downscale_factor = scale
 
 
 
 
 
 
348
 
349
  # ── Video reference conditioning (from ICLoraPipeline) ───────────────
350
  def _create_ic_conditionings(
@@ -715,8 +722,11 @@ pipeline = LTX23UnifiedPipeline(
715
  distilled_checkpoint_path=checkpoint_path,
716
  spatial_upsampler_path=spatial_upsampler_path,
717
  gemma_root=gemma_root,
718
- # ic_loras=ic_loras,
719
  quantization=QuantizationPolicy.fp8_cast(),
 
 
 
720
  )
721
 
722
  # Preload all models for ZeroGPU tensor packing.
 
296
  ic_loras: list[LoraPathStrengthAndSDOps] | None = None,
297
  device: torch.device | None = None,
298
  quantization: QuantizationPolicy | None = None,
299
+ reference_downscale_factor: int | None = None,
300
  ):
301
  self.device = device or get_device()
302
  self.dtype = torch.bfloat16
 
335
  device=self.device,
336
  )
337
 
338
+ # Reference downscale factor: explicit value takes priority,
339
+ # otherwise read from IC-LoRA metadata, otherwise default to 1.
340
+ if reference_downscale_factor is not None:
341
+ self.reference_downscale_factor = reference_downscale_factor
342
+ else:
343
+ self.reference_downscale_factor = 1
344
+ for lora in ic_loras:
345
+ scale = _read_lora_reference_downscale_factor(lora.path)
346
+ if scale != 1:
347
+ if self.reference_downscale_factor not in (1, scale):
348
+ raise ValueError(
349
+ f"Conflicting reference_downscale_factor: "
350
+ f"already {self.reference_downscale_factor}, got {scale}"
351
+ )
352
+ self.reference_downscale_factor = scale
353
+
354
+ logging.info(f"[Pipeline] reference_downscale_factor={self.reference_downscale_factor}")
355
 
356
  # ── Video reference conditioning (from ICLoraPipeline) ───────────────
357
  def _create_ic_conditionings(
 
722
  distilled_checkpoint_path=checkpoint_path,
723
  spatial_upsampler_path=spatial_upsampler_path,
724
  gemma_root=gemma_root,
725
+ # ic_loras=ic_loras, # LoRA already fused into checkpoint
726
  quantization=QuantizationPolicy.fp8_cast(),
727
+ # Union Control IC-LoRA was trained with reference videos at half resolution.
728
+ # Set explicitly so it works both with separate LoRA and fused checkpoints.
729
+ reference_downscale_factor=2,
730
  )
731
 
732
  # Preload all models for ZeroGPU tensor packing.