schrum2 commited on
Commit
ff12ff1
·
1 Parent(s): 008c17f

removed pipeline from repo

Browse files
Files changed (1) hide show
  1. text_diffusion_pipeline.py +0 -442
text_diffusion_pipeline.py DELETED
@@ -1,442 +0,0 @@
1
- import torch
2
- import torch.nn.functional as F
3
- from typing import NamedTuple, Optional
4
- import os
5
- from diffusers import DDPMPipeline, UNet2DConditionModel, DDPMScheduler
6
- import json
7
- # Running the main at the end of this requires messing with this import
8
- from models.text_model import TransformerModel
9
- import torch
10
- import torch.nn.functional as F
11
- from transformers import AutoTokenizer, AutoModel
12
- import util.common_settings as common_settings
13
- #import models.sentence_transformers_helper as st_helper
14
- import models.text_model as text_model
15
- #from models.general_training_helper import get_scene_from_embeddings
16
-
17
- class PipelineOutput(NamedTuple):
18
- images: torch.Tensor
19
-
20
-
21
-
22
- # Create a custom pipeline for text-conditional generation
23
- class TextConditionalDDPMPipeline(DDPMPipeline):
24
- def __init__(self, unet, scheduler, text_encoder=None, tokenizer=None, supports_pretrained_split=False, block_embeddings=None):
25
- super().__init__(unet=unet, scheduler=scheduler)
26
- self.text_encoder = text_encoder
27
- self.tokenizer = tokenizer
28
- self.supports_negative_prompt = hasattr(unet, 'negative_prompt_support') and unet.negative_prompt_support
29
- self.supports_pretrained_split = supports_pretrained_split
30
- self.block_embeddings = block_embeddings
31
-
32
- if self.tokenizer is None and self.text_encoder is not None:
33
- # Use the tokenizer from the text encoder if not provided
34
- self.tokenizer = self.text_encoder.tokenizer
35
-
36
- # Register the text_encoder so that .to(), .cpu(), .cuda(), etc. work correctly
37
- self.register_modules(
38
- unet=unet,
39
- scheduler=scheduler,
40
- text_encoder=self.text_encoder,
41
- tokenizer=self.tokenizer,
42
- )
43
-
44
- # Override the to() method to ensure text_encoder is moved to the correct device
45
- def to(self, device=None, dtype=None):
46
- # Call the parent's to() method first
47
- pipeline = super().to(device, dtype)
48
-
49
- # Additionally move the text_encoder to the device
50
- if self.text_encoder is not None:
51
- self.text_encoder.to(device)
52
-
53
- return pipeline
54
-
55
- def save_pretrained(self, save_directory):
56
- os.makedirs(save_directory, exist_ok=True)
57
- super().save_pretrained(save_directory) # saves UNet and scheduler
58
-
59
- # Save block_embeddings tensor if it exists
60
- if self.block_embeddings is not None:
61
- torch.save(self.block_embeddings, os.path.join(save_directory, "block_embeddings.pt"))
62
-
63
- # Save supports_negative_prompt and supports_pretrained_split flags
64
- with open(os.path.join(save_directory, "pipeline_config.json"), "w") as f:
65
- json.dump({
66
- "supports_negative_prompt": self.supports_negative_prompt,
67
- "supports_pretrained_split": self.supports_pretrained_split,
68
- "text_encoder_type": type(self.text_encoder).__name__
69
- }, f)
70
-
71
-
72
- #Text encoder/tokenizer saving is different depending on if we're using a larger pretrained model
73
- if isinstance(self.text_encoder, TransformerModel):
74
- # Save custom text encoder
75
- if self.text_encoder is not None:
76
- self.text_encoder.save_pretrained(os.path.join(save_directory, "text_encoder"))
77
- else:
78
- #Save pretrained tokenizer by name, so we can load from huggingface instead of saving a giant local model
79
- text_encoder_info = {
80
- "text_encoder_name": self.text_encoder.config.name_or_path,
81
- "tokenizer_name": self.tokenizer.name_or_path,
82
- }
83
-
84
- text_encoder_directory = os.path.join(save_directory, "text_encoder")
85
- os.makedirs(text_encoder_directory, exist_ok=True)
86
-
87
- with open(os.path.join(text_encoder_directory, "loading_info.json"), "w") as f:
88
- json.dump(text_encoder_info, f)
89
-
90
-
91
-
92
- @classmethod
93
- def from_pretrained(cls, pretrained_model_path, **kwargs):
94
- #from diffusers.utils import load_config, load_state_dict
95
- # Load model_index.json
96
- #model_index = load_config(pretrained_model_path)
97
-
98
- # Load components manually
99
- unet_path = os.path.join(pretrained_model_path, "unet")
100
- unet = UNet2DConditionModel.from_pretrained(unet_path)
101
-
102
- scheduler_path = os.path.join(pretrained_model_path, "scheduler")
103
- # Have heard that DDIMScheduler might be faster for inference, though not necessarily better
104
- scheduler = DDPMScheduler.from_pretrained(scheduler_path)
105
-
106
- tokenizer = None
107
- text_encoder_path = os.path.join(pretrained_model_path, "text_encoder")
108
-
109
- if os.path.exists(text_encoder_path):
110
- #Test for the new saving system, where we save a simple config file
111
- if os.path.exists(os.path.join(text_encoder_path, "loading_info.json")):
112
- with open(os.path.join(text_encoder_path, "loading_info.json"), "r") as f:
113
- encoder_config = json.load(f)
114
-
115
- text_encoder = AutoModel.from_pretrained(encoder_config['text_encoder_name'], trust_remote_code=True)
116
- tokenizer = AutoTokenizer.from_pretrained(encoder_config['tokenizer_name'])
117
-
118
- #Legacy loading system, loads models directly if the whole thing is saved in the directory
119
- else:
120
- try:
121
- text_encoder = AutoModel.from_pretrained(text_encoder_path, local_files_only=True, trust_remote_code=True)
122
- tokenizer = AutoTokenizer.from_pretrained(text_encoder_path, local_files_only=True)
123
- except (ValueError, KeyError):
124
- text_encoder = TransformerModel.from_pretrained(text_encoder_path)
125
- tokenizer = text_encoder.tokenizer
126
- else:
127
- text_encoder = None
128
-
129
- # Instantiate your pipeline
130
- pipeline = cls(
131
- unet=unet,
132
- scheduler=scheduler,
133
- text_encoder=text_encoder,
134
- tokenizer=tokenizer,
135
- **kwargs,
136
- )
137
-
138
- #Loads block embeddings if present
139
- block_embeds_path = os.path.join(pretrained_model_path, "block_embeddings.pt")
140
- if os.path.exists(block_embeds_path):
141
- pipeline.block_embeddings = torch.load(block_embeds_path, map_location="cpu")
142
- else:
143
- pipeline.block_embeddings = None
144
-
145
-
146
- # Load supports_negative_prompt flag if present
147
- config_path = os.path.join(pretrained_model_path, "pipeline_config.json")
148
- if os.path.exists(config_path):
149
- with open(config_path, "r") as f:
150
- config = json.load(f)
151
- pipeline.supports_negative_prompt = config.get("supports_negative_prompt", False)
152
- pipeline.supports_pretrained_split = config.get("supports_pretrained_split", False)
153
- return pipeline
154
-
155
- # --- Handle batching for captions ---
156
- def _prepare_text_batch(self, text: Optional[str | list[str]], batch_size: int, name: str) -> Optional[list[str]]:
157
- if text is None:
158
- return None
159
- if isinstance(text, str):
160
- return [text] * batch_size
161
- if isinstance(text, list):
162
- if len(text) == 1:
163
- return text * batch_size
164
- if len(text) != batch_size:
165
- raise ValueError(f"{name} list length {len(text)} does not match batch_size {batch_size}")
166
- return text
167
- raise ValueError(f"{name} must be a string or list of strings")
168
-
169
- def _prepare_initial_sample(self,
170
- raw_latent_sample: Optional[torch.Tensor],
171
- input_scene: Optional[torch.Tensor],
172
- batch_size: int, height: int, width: int,
173
- generator: Optional[torch.Generator]) -> torch.Tensor:
174
- """Prepare the initial sample for diffusion."""
175
-
176
- sample_shape = (batch_size, self.unet.config.in_channels, height, width)
177
-
178
- if raw_latent_sample is not None:
179
- if input_scene is not None:
180
- raise ValueError("Cannot provide both raw_latent_sample and input_scene")
181
- sample = raw_latent_sample.to(self.device)
182
- if sample.shape[1] != sample_shape[1]:
183
- raise ValueError(f"Wrong number of channels in raw_latent_sample: Expected {self.unet.config.in_channels} but got {sample.shape[1]}")
184
- if sample.shape[0] == 1 and batch_size > 1:
185
- sample = sample.repeat(batch_size, 1, 1, 1)
186
- elif sample.shape[0] != batch_size:
187
- raise ValueError(f"raw_latent_sample batch size {sample.shape[0]} does not match batch_size {batch_size}")
188
- elif input_scene is not None:
189
- # input_scene can be (H, W) or (batch_size, H, W)
190
- scene_tensor = torch.tensor(input_scene, dtype=torch.long, device=self.device)
191
- if scene_tensor.dim() == 2:
192
- # (H, W) -> repeat for batch
193
- scene_tensor = scene_tensor.unsqueeze(0).repeat(batch_size, 1, 1)
194
- elif scene_tensor.shape[0] == 1 and batch_size > 1:
195
- scene_tensor = scene_tensor.repeat(batch_size, 1, 1)
196
- elif scene_tensor.shape[0] != batch_size:
197
- raise ValueError(f"input_scene batch size {scene_tensor.shape[0]} does not match batch_size {batch_size}")
198
- # One-hot encode: (batch, H, W, C)
199
- one_hot = F.one_hot(scene_tensor, num_classes=self.unet.config.in_channels).float()
200
- # (batch, H, W, C) -> (batch, C, H, W)
201
- sample = one_hot.permute(0, 3, 1, 2)
202
- else:
203
- # Start from random noise
204
- sample = torch.randn(sample_shape, generator=generator, device=self.device)
205
-
206
- return sample
207
-
208
- def __call__(
209
- self,
210
- caption: Optional[str | list[str]] = None,
211
- negative_prompt: Optional[str | list[str]] = None,
212
- generator: Optional[torch.Generator] = None,
213
- num_inference_steps: int = common_settings.NUM_INFERENCE_STEPS,
214
- guidance_scale: float = common_settings.GUIDANCE_SCALE,
215
- height: int = common_settings.MARIO_HEIGHT,
216
- width: int = common_settings.MARIO_WIDTH,
217
- raw_latent_sample: Optional[torch.FloatTensor] = None,
218
- input_scene: Optional[torch.Tensor] = None,
219
- output_type: str = "tensor",
220
- batch_size: int = 1,
221
- show_progress_bar: bool = True,
222
- ) -> PipelineOutput:
223
- """Generate a batch of images based on text input using the diffusion model.
224
-
225
- Args:
226
- caption: Text description(s) of the desired output. Can be a string or list of strings.
227
- negative_prompt: Text description(s) of what should not appear in the output. String or list.
228
- generator: Random number generator for reproducibility.
229
- num_inference_steps: Number of denoising steps (more = higher quality, slower).
230
- guidance_scale: How strongly the generation follows the text prompt (higher = stronger).
231
- height: Height of generated image in tiles.
232
- width: Width of generated image in tiles.
233
- raw_latent_sample: Optional starting point for diffusion instead of random noise.
234
- Must have correct number of channels matching the UNet.
235
- input_scene: Optional 2D or 3D int tensor where each value corresponds to a tile type.
236
- Will be converted to one-hot encoding as starting point.
237
- output_type: Currently only "tensor" is supported.
238
- batch_size: Number of samples to generate in parallel.
239
-
240
- Returns:
241
- PipelineOutput containing the generated image tensor (batch_size, ...).
242
- """
243
-
244
- # I would like to simplify the code to this, but the AI suggestion didn't work, and
245
- # I did not feel good just pasting it all in. Will need to tackle it bit by bit.
246
-
247
- # if caption is not None and self.text_encoder is None:
248
- # raise ValueError("Text encoder required for conditional generation")
249
-
250
- # self.unet.eval()
251
- # if self.text_encoder is not None:
252
- # self.text_encoder.to(self.device)
253
- # self.text_encoder.eval()
254
- #
255
- # with torch.no_grad():
256
- # # Process text inputs
257
- # captions = self.prepare_text_batch(caption, batch_size, "caption")
258
- # negatives = self.prepare_text_batch(negative_prompt, batch_size, "negative_prompt")
259
-
260
- # # Get embeddings
261
- # text_embeddings = self.prepare_embeddings(captions, negatives, batch_size)
262
- #
263
- # # Set up initial latent state
264
- # sample = self.prepare_initial_sample(raw_latent_sample, input_scene,
265
- # batch_size, height, width, generator)
266
-
267
- # # Run diffusion process
268
- # sample = self.run_diffusion(sample, text_embeddings, num_inference_steps,
269
- # guidance_scale, generator, show_progress_bar,
270
- # has_caption=caption is not None,
271
- # has_negative=negative_prompt is not None)
272
-
273
- # # Format output
274
- # if output_type == "tensor":
275
- # sample = F.softmax(sample, dim=1)
276
- # else:
277
- # raise ValueError(f"Unsupported output type: {output_type}")
278
-
279
- # return PipelineOutput(images=sample)
280
-
281
- # Validate text encoder if we need it
282
- if caption is not None and self.text_encoder is None:
283
- raise ValueError("Text encoder is required for conditional generation")
284
-
285
- self.unet.eval()
286
- if self.text_encoder is not None:
287
- self.text_encoder.to(self.device)
288
- self.text_encoder.eval()
289
-
290
- with torch.no_grad():
291
- captions = self._prepare_text_batch(caption, batch_size, "caption")
292
- negatives = self._prepare_text_batch(negative_prompt, batch_size, "negative_prompt")
293
-
294
- # --- Prepare text embeddings ---
295
- if(isinstance(self.text_encoder, TransformerModel)):
296
- text_embeddings = text_model.get_embeddings(batch_size=batch_size,
297
- tokenizer=self.text_encoder.tokenizer,
298
- text_encoder=self.text_encoder,
299
- captions=captions,
300
- neg_captions=negatives,
301
- device=self.device)
302
- else: #Case for the pre-trained text encoder
303
- if(self.supports_pretrained_split): #If we have a split flag incorporated
304
- text_embeddings = st_helper.get_embeddings_split(batch_size = batch_size,
305
- tokenizer=self.tokenizer,
306
- model=self.text_encoder,
307
- captions=captions,
308
- neg_captions=negatives,
309
- device=self.device)
310
- else:
311
- text_embeddings = st_helper.get_embeddings(batch_size = batch_size,
312
- tokenizer=self.tokenizer,
313
- model=self.text_encoder,
314
- captions=captions,
315
- neg_captions=negatives,
316
- device=self.device)
317
-
318
-
319
- # --- Set up initial latent state ---
320
- sample = self._prepare_initial_sample(raw_latent_sample, input_scene,
321
- batch_size, height, width, generator)
322
-
323
- # --- Set up diffusion process ---
324
- self.scheduler.set_timesteps(num_inference_steps)
325
-
326
- # Denoising loop
327
- iterator = self.progress_bar(self.scheduler.timesteps) if show_progress_bar else self.scheduler.timesteps
328
- for t in iterator:
329
- # Handle conditional generation
330
- if captions is not None:
331
- if negatives is not None:
332
- # Three copies for negative prompt guidance
333
- model_input = torch.cat([sample, sample, sample], dim=0)
334
- else:
335
- # Two copies for standard classifier-free guidance
336
- model_input = torch.cat([sample, sample], dim=0)
337
- else:
338
- model_input = sample
339
-
340
- # Predict noise residual
341
- model_kwargs = {"encoder_hidden_states": text_embeddings}
342
- noise_pred = self.unet(model_input, t, **model_kwargs).sample
343
-
344
- # Apply guidance
345
- if captions is not None:
346
- if negatives is not None:
347
- # Split predictions for negative, unconditional, and text-conditional
348
- noise_pred_neg, noise_pred_uncond, noise_pred_text = noise_pred.chunk(3)
349
- noise_pred_guided = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
350
- noise_pred = noise_pred_guided - guidance_scale * (noise_pred_neg - noise_pred_uncond)
351
- else:
352
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
353
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
354
-
355
- # Compute previous sample: x_{t-1} = scheduler(x_t, noise_pred)
356
- sample = self.scheduler.step(noise_pred, t, sample, generator=generator).prev_sample
357
-
358
- # Convert to output format
359
- if output_type == "tensor":
360
- if self.block_embeddings is not None:
361
- sample = get_scene_from_embeddings(sample, self.block_embeddings)
362
- else:
363
- # Apply softmax to get probabilities for each tile type
364
- sample = F.softmax(sample, dim=1)
365
- sample = sample.detach().cpu()
366
- else:
367
- raise ValueError(f"Unsupported output type: {output_type}")
368
-
369
- return PipelineOutput(images=sample)
370
-
371
- def print_unet_architecture(self):
372
- """Prints the architecture of the UNet model."""
373
- print(self.unet)
374
-
375
- def print_text_encoder_architecture(self):
376
- """Prints the architecture of the text encoder model, if it exists."""
377
- if self.text_encoder is not None:
378
- print(self.text_encoder)
379
- else:
380
- print("No text encoder is set.")
381
-
382
- def save_unet_architecture_pdf(self, height, width, filename="unet_architecture", batch_size=1, device=None):
383
- """
384
- Have to separately install torchview for this to work
385
-
386
- Saves a visualization of the UNet architecture as a PDF using torchview.
387
- Args:
388
- height: Height of the dummy input.
389
- width: Width of the dummy input.
390
- filename: Output PDF filename.
391
- batch_size: Batch size for dummy input.
392
- device: Device to run the dummy input on (defaults to pipeline device).
393
- """
394
- from torchview import draw_graph
395
- import graphviz
396
-
397
- if device is None:
398
- device = self.device if hasattr(self, 'device') else 'cpu'
399
- in_channels = self.unet.config.in_channels if hasattr(self.unet, 'config') else 1
400
- sample_shape = tuple([batch_size, in_channels, height, width])
401
-
402
- dummy_x = torch.randn(size=sample_shape, device=device)
403
- dummy_t = torch.tensor([0] * batch_size, dtype=torch.long, device=device)
404
-
405
- # Prepare dummy text embedding (match what your UNet expects)
406
- if hasattr(self.unet, 'config') and hasattr(self.unet.config, 'cross_attention_dim'):
407
- cross_attention_dim = self.unet.config.cross_attention_dim
408
- else:
409
- cross_attention_dim = 128 # fallback
410
- encoder_hidden_states = torch.randn(batch_size, 1, cross_attention_dim, device=device)
411
-
412
- self.unet.eval()
413
- inputs = (dummy_x, dummy_t, encoder_hidden_states)
414
- #self.unet.down_blocks = self.unet.down_blocks[:2]
415
-
416
- graph = draw_graph(
417
- model=self.unet,
418
- input_data=inputs,
419
- expand_nested=False,
420
- #enable_output_shape=True,
421
- #roll_out="nested",
422
- depth=1
423
- )
424
- #graph.visual_graph.engine = "neato"
425
- graph.visual_graph.attr(#rankdir="LR",
426
- nodesep="0.1", # decrease space between nodes in the same rank (default ~0.25)
427
- ranksep="0.2", # decrease space between ranks (default ~0.5)
428
- concentrate="true" # merge edges between nodes in the same rank
429
- )
430
- graph.visual_graph.node_attr.update(
431
- shape="rectangle",
432
- width="1.5", # narrow width
433
- height="0.5" # taller height to make vertical rectangles
434
- #fixedsize="true"
435
- )
436
-
437
- graph.visual_graph.render(filename, format='pdf', cleanup=False) # Cleanup removes intermediate files
438
- graph.visual_graph.save('unet_architecture.dot')
439
-
440
- # Save the graph to a PDF file
441
- print(f"UNet architecture saved to {filename}")
442
-