BiliSakura commited on
Commit
1274c46
·
verified ·
1 Parent(s): d295ca1

Add files using upload-large-folder tool

Browse files
Files changed (4) hide show
  1. README.md +1 -0
  2. model_index.json +1 -1
  3. modular_pipeline.py +31 -4
  4. pipeline.py +21 -8
README.md CHANGED
@@ -49,6 +49,7 @@ import torch
49
 
50
  pipe = DiffusionPipeline.from_pretrained(
51
  "BiliSakura/AeroGen",
 
52
  trust_remote_code=True,
53
  )
54
  pipe = pipe.to("cuda")
 
49
 
50
  pipe = DiffusionPipeline.from_pretrained(
51
  "BiliSakura/AeroGen",
52
+ custom_pipeline="pipeline.py",
53
  trust_remote_code=True,
54
  )
55
  pipe = pipe.to("cuda")
model_index.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_class_name": ["pipeline", "AeroGenPipeline"],
3
  "_diffusers_version": "0.25.0",
4
  "condition_encoder": [
5
  "pipeline",
 
1
  {
2
+ "_class_name": "AeroGenPipeline",
3
  "_diffusers_version": "0.25.0",
4
  "condition_encoder": [
5
  "pipeline",
modular_pipeline.py CHANGED
@@ -106,12 +106,33 @@ def load_component(model_path: Path, name: str):
106
  with open(comp_path / "config.json") as f:
107
  cfg = json.load(f)
108
 
109
- # Diffusers native format (e.g. AutoencoderKL.save_pretrained): no "target" key
110
- if "target" not in cfg and name == "vae":
111
  from diffusers import AutoencoderKL
112
  return AutoencoderKL.from_pretrained(comp_path)
113
 
114
- component = _instantiate_from_config(cfg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
  safetensors_path = comp_path / "diffusion_pytorch_model.safetensors"
116
  bin_path = comp_path / "diffusion_pytorch_model.bin"
117
  if safetensors_path.exists():
@@ -131,7 +152,13 @@ def load_component(model_path: Path, name: str):
131
  # Older checkpoints may have been saved without the "model." prefix.
132
  if name == "unet" and state and not any(k.startswith("model.") for k in state.keys()):
133
  state = {"model." + k: v for k, v in state.items()}
134
- component.load_state_dict(state, strict=True)
 
 
 
 
 
 
135
  component.eval()
136
  return component
137
 
 
106
  with open(comp_path / "config.json") as f:
107
  cfg = json.load(f)
108
 
109
+ # VAE loading: support both native diffusers format and legacy LDM config.
110
+ if name == "vae" and "target" not in cfg:
111
  from diffusers import AutoencoderKL
112
  return AutoencoderKL.from_pretrained(comp_path)
113
 
114
+ if name == "vae" and cfg.get("target") == "ldm.models.autoencoder.AutoencoderKL":
115
+ from diffusers import AutoencoderKL
116
+
117
+ ddconfig = (cfg.get("params") or {}).get("ddconfig") or {}
118
+ ch = int(ddconfig.get("ch", 128))
119
+ ch_mult = ddconfig.get("ch_mult") or [1, 2, 4, 4]
120
+ block_out_channels = [ch * int(m) for m in ch_mult]
121
+
122
+ component = AutoencoderKL(
123
+ in_channels=int(ddconfig.get("in_channels", 3)),
124
+ out_channels=int(ddconfig.get("out_ch", 3)),
125
+ down_block_types=["DownEncoderBlock2D"] * len(block_out_channels),
126
+ up_block_types=["UpDecoderBlock2D"] * len(block_out_channels),
127
+ block_out_channels=block_out_channels,
128
+ layers_per_block=int(ddconfig.get("num_res_blocks", 2)),
129
+ latent_channels=int(ddconfig.get("z_channels", 4)),
130
+ sample_size=int(ddconfig.get("resolution", 256)),
131
+ act_fn="silu",
132
+ norm_num_groups=32,
133
+ )
134
+ else:
135
+ component = _instantiate_from_config(cfg)
136
  safetensors_path = comp_path / "diffusion_pytorch_model.safetensors"
137
  bin_path = comp_path / "diffusion_pytorch_model.bin"
138
  if safetensors_path.exists():
 
152
  # Older checkpoints may have been saved without the "model." prefix.
153
  if name == "unet" and state and not any(k.startswith("model.") for k in state.keys()):
154
  state = {"model." + k: v for k, v in state.items()}
155
+ if name == "vae" and cfg.get("target") == "ldm.models.autoencoder.AutoencoderKL":
156
+ from diffusers.loaders.single_file_utils import convert_ldm_vae_checkpoint
157
+
158
+ state = convert_ldm_vae_checkpoint(state, dict(component.config))
159
+ component.load_state_dict(state, strict=False)
160
+ else:
161
+ component.load_state_dict(state, strict=True)
162
  component.eval()
163
  return component
164
 
pipeline.py CHANGED
@@ -36,14 +36,27 @@ from diffusers import DDIMScheduler, DiffusionPipeline
36
  from diffusers.utils import BaseOutput
37
  from PIL import Image
38
 
39
- from modular_pipeline import (
40
- ensure_ldm_path,
41
- ensure_ldm_path_from_config,
42
- load_component,
43
- load_components,
44
- create_scheduler,
45
- _instantiate_from_config,
46
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
47
 
48
 
49
  @dataclass
 
36
  from diffusers.utils import BaseOutput
37
  from PIL import Image
38
 
39
+ try:
40
+ # Dynamic modules loaded by diffusers are executed as package modules.
41
+ from .modular_pipeline import (
42
+ ensure_ldm_path,
43
+ ensure_ldm_path_from_config,
44
+ load_component,
45
+ load_components,
46
+ create_scheduler,
47
+ _instantiate_from_config,
48
+ )
49
+ except ImportError:
50
+ # Fallback for direct local execution (e.g. `python pipeline.py`).
51
+ import importlib
52
+
53
+ _mp = importlib.import_module("modular_pipeline")
54
+ ensure_ldm_path = _mp.ensure_ldm_path
55
+ ensure_ldm_path_from_config = _mp.ensure_ldm_path_from_config
56
+ load_component = _mp.load_component
57
+ load_components = _mp.load_components
58
+ create_scheduler = _mp.create_scheduler
59
+ _instantiate_from_config = _mp._instantiate_from_config
60
 
61
 
62
  @dataclass