schrum2 commited on
Commit
ce21a23
·
verified ·
1 Parent(s): b84e862

Changing init in questionable ways

Browse files
models/pipeline_loader.py CHANGED
@@ -18,11 +18,7 @@ def get_pipeline(model_path):
18
  else:
19
  # Assume it's a Hugging Face Hub model ID
20
  # Try to load config to determine if it's text-conditional
21
- print(model_path)
22
- print("\n\n\n\n")
23
  config = DiffusionPipeline.load_config(model_path)
24
- print(config)
25
- print("\n\n\n\n")
26
  has_text_encoder = "text_encoder" in config
27
 
28
  if has_text_encoder:
 
18
  else:
19
  # Assume it's a Hugging Face Hub model ID
20
  # Try to load config to determine if it's text-conditional
 
 
21
  config = DiffusionPipeline.load_config(model_path)
 
 
22
  has_text_encoder = "text_encoder" in config
23
 
24
  if has_text_encoder:
models/text_diffusion_pipeline.py CHANGED
@@ -22,18 +22,20 @@ class PipelineOutput(NamedTuple):
22
  # Create a custom pipeline for text-conditional generation
23
  class TextConditionalDDPMPipeline(DDPMPipeline):
24
  def __init__(self, unet, scheduler, text_encoder=None, tokenizer=None, supports_pretrained_split=False, block_embeddings=None):
25
- super().__init__(unet=unet, scheduler=scheduler)
 
 
26
  self.text_encoder = text_encoder
27
  self.tokenizer = tokenizer
28
  self.supports_negative_prompt = hasattr(unet, 'negative_prompt_support') and unet.negative_prompt_support
29
  self.supports_pretrained_split = supports_pretrained_split
30
  self.block_embeddings = block_embeddings
31
-
32
  if self.tokenizer is None and self.text_encoder is not None:
33
  # Use the tokenizer from the text encoder if not provided
34
  self.tokenizer = self.text_encoder.tokenizer
35
 
36
- # Register the text_encoder so that .to(), .cpu(), .cuda(), etc. work correctly
37
  self.register_modules(
38
  unet=unet,
39
  scheduler=scheduler,
 
22
  # Create a custom pipeline for text-conditional generation
23
  class TextConditionalDDPMPipeline(DDPMPipeline):
24
  def __init__(self, unet, scheduler, text_encoder=None, tokenizer=None, supports_pretrained_split=False, block_embeddings=None):
25
+ # Don't call super().__init__() with arguments, call it without arguments first
26
+ super(DiffusionPipeline, self).__init__()
27
+
28
  self.text_encoder = text_encoder
29
  self.tokenizer = tokenizer
30
  self.supports_negative_prompt = hasattr(unet, 'negative_prompt_support') and unet.negative_prompt_support
31
  self.supports_pretrained_split = supports_pretrained_split
32
  self.block_embeddings = block_embeddings
33
+
34
  if self.tokenizer is None and self.text_encoder is not None:
35
  # Use the tokenizer from the text encoder if not provided
36
  self.tokenizer = self.text_encoder.tokenizer
37
 
38
+ # Register ALL modules at once, including the ones from the parent class
39
  self.register_modules(
40
  unet=unet,
41
  scheduler=scheduler,