manbeast3b commited on
Commit
ff4b30b
·
0 Parent(s):

Initial commit

Browse files
Files changed (6) hide show
  1. .gitattributes +37 -0
  2. pyproject.toml +44 -0
  3. src/ghanta.py +74 -0
  4. src/main.py +55 -0
  5. src/pipeline.py +1168 -0
  6. uv.lock +0 -0
.gitattributes ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ RobertML.png filter=lfs diff=lfs merge=lfs -text
37
+ backup.png filter=lfs diff=lfs merge=lfs -text
pyproject.toml ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [build-system]
2
+ requires = ["setuptools >= 75.0"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "flux-schnell-edge-inference"
7
+ description = "An edge-maxxing model submission by RobertML for the 4090 Flux contest"
8
+ requires-python = ">=3.10,<3.13"
9
+ version = "8"
10
+ dependencies = [
11
+ "diffusers==0.31.0",
12
+ "transformers==4.46.2",
13
+ "accelerate==1.1.0",
14
+ "omegaconf==2.3.0",
15
+ "torch==2.5.1",
16
+ "protobuf==5.28.3",
17
+ "sentencepiece==0.2.0",
18
+ "edge-maxxing-pipelines @ git+https://github.com/womboai/edge-maxxing@7c760ac54f6052803dadb3ade8ebfc9679a94589#subdirectory=pipelines",
19
+ "gitpython>=3.1.43",
20
+ "hf_transfer==0.1.8",
21
+ "torchao==0.6.1",
22
+ ]
23
+
24
+ [[tool.edge-maxxing.models]]
25
+ repository = "black-forest-labs/FLUX.1-schnell"
26
+ revision = "741f7c3ce8b383c54771c7003378a50191e9efe9"
27
+ exclude = ["transformer"]
28
+
29
+ [[tool.edge-maxxing.models]]
30
+ repository = "RobertML/FLUX.1-schnell-int8wo"
31
+ revision = "307e0777d92df966a3c0f99f31a6ee8957a9857a"
32
+
33
+ [[tool.edge-maxxing.models]]
34
+ repository = "city96/t5-v1_1-xxl-encoder-bf16"
35
+ revision = "1b9c856aadb864af93c1dcdc226c2774fa67bc86"
36
+
37
+
38
+ [[tool.edge-maxxing.models]]
39
+ repository = "madebyollin/taef1"
40
+ revision = "2d552378e58c9c94201075708d7de4e1163b2689"
41
+
42
+ [project.scripts]
43
+ start_inference = "main:main"
44
+
src/ghanta.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from typing import Tuple, Callable
3
+ def hacer_nada(x: torch.Tensor, modo: str = None):
4
+ return x
5
+ def brujeria_mps(entrada, dim, indice):
6
+ if entrada.shape[-1] == 1:
7
+ return torch.gather(entrada.unsqueeze(-1), dim - 1 if dim < 0 else dim, indice.unsqueeze(-1)).squeeze(-1)
8
+ else:
9
+ return torch.gather(entrada, dim, indice)
10
+ def emparejamiento_suave_aleatorio_2d(
11
+ metrica: torch.Tensor,
12
+ ancho: int,
13
+ alto: int,
14
+ paso_x: int,
15
+ paso_y: int,
16
+ radio: int,
17
+ sin_aleatoriedad: bool = False,
18
+ generador: torch.Generator = None
19
+ ) -> Tuple[Callable, Callable]:
20
+ lote, num_nodos, _ = metrica.shape
21
+ if radio <= 0:
22
+ return hacer_nada, hacer_nada
23
+ recopilar = brujeria_mps if metrica.device.type == "mps" else torch.gather
24
+ with torch.no_grad():
25
+ alto_paso_y, ancho_paso_x = alto // paso_y, ancho // paso_x
26
+ if sin_aleatoriedad:
27
+ indice_aleatorio = torch.zeros(alto_paso_y, ancho_paso_x, 1, device=metrica.device, dtype=torch.int64)
28
+ else:
29
+ indice_aleatorio = torch.randint(paso_y * paso_x, size=(alto_paso_y, ancho_paso_x, 1), device=generador.device, generator=generador).to(metrica.device)
30
+ vista_buffer_indice = torch.zeros(alto_paso_y, ancho_paso_x, paso_y * paso_x, device=metrica.device, dtype=torch.int64)
31
+ vista_buffer_indice.scatter_(dim=2, index=indice_aleatorio, src=-torch.ones_like(indice_aleatorio, dtype=indice_aleatorio.dtype))
32
+ vista_buffer_indice = vista_buffer_indice.view(alto_paso_y, ancho_paso_x, paso_y, paso_x).transpose(1, 2).reshape(alto_paso_y * paso_y, ancho_paso_x * paso_x)
33
+ if (alto_paso_y * paso_y) < alto or (ancho_paso_x * paso_x) < ancho:
34
+ buffer_indice = torch.zeros(alto, ancho, device=metrica.device, dtype=torch.int64)
35
+ buffer_indice[:(alto_paso_y * paso_y), :(ancho_paso_x * paso_x)] = vista_buffer_indice
36
+ else:
37
+ buffer_indice = vista_buffer_indice
38
+ indice_aleatorio = buffer_indice.reshape(1, -1, 1).argsort(dim=1)
39
+ del buffer_indice, vista_buffer_indice
40
+ num_destino = alto_paso_y * ancho_paso_x
41
+ indices_a = indice_aleatorio[:, num_destino:, :]
42
+ indices_b = indice_aleatorio[:, :num_destino, :]
43
+ def dividir(x):
44
+ canales = x.shape[-1]
45
+ origen = recopilar(x, dim=1, index=indices_a.expand(lote, num_nodos - num_destino, canales))
46
+ destino = recopilar(x, dim=1, index=indices_b.expand(lote, num_destino, canales))
47
+ return origen, destino
48
+ metrica = metrica / metrica.norm(dim=-1, keepdim=True)
49
+ a, b = dividir(metrica)
50
+ puntuaciones = a @ b.transpose(-1, -2)
51
+ radio = min(a.shape[1], radio)
52
+ nodo_max, nodo_indice = puntuaciones.max(dim=-1)
53
+ indice_borde = nodo_max.argsort(dim=-1, descending=True)[..., None]
54
+ indice_no_emparejado = indice_borde[..., radio:, :]
55
+ indice_origen = indice_borde[..., :radio, :]
56
+ indice_destino = recopilar(nodo_indice[..., None], dim=-2, index=indice_origen)
57
+ def fusionar(x: torch.Tensor, modo="mean") -> torch.Tensor:
58
+ origen, destino = dividir(x)
59
+ n, t1, c = origen.shape
60
+ no_emparejado = recopilar(origen, dim=-2, index=indice_no_emparejado.expand(n, t1 - radio, c))
61
+ origen = recopilar(origen, dim=-2, index=indice_origen.expand(n, radio, c))
62
+ destino = destino.scatter_reduce(-2, indice_destino.expand(n, radio, c), origen, reduce=modo)
63
+ return torch.cat([no_emparejado, destino], dim=1)
64
+ def desfusionar(x: torch.Tensor) -> torch.Tensor:
65
+ longitud_no_emparejado = indice_no_emparejado.shape[1]
66
+ no_emparejado, destino = x[..., :longitud_no_emparejado, :], x[..., longitud_no_emparejado:, :]
67
+ _, _, c = no_emparejado.shape
68
+ origen = recopilar(destino, dim=-2, index=indice_destino.expand(lote, radio, c))
69
+ salida = torch.zeros(lote, num_nodos, c, device=x.device, dtype=x.dtype)
70
+ salida.scatter_(dim=-2, index=indices_b.expand(lote, num_destino, c), src=destino)
71
+ salida.scatter_(dim=-2, index=recopilar(indices_a.expand(lote, indices_a.shape[1], 1), dim=1, index=indice_no_emparejado).expand(lote, longitud_no_emparejado, c), src=no_emparejado)
72
+ salida.scatter_(dim=-2, index=recopilar(indices_a.expand(lote, indices_a.shape[1], 1), dim=1, index=indice_origen).expand(lote, radio, c), src=origen)
73
+ return salida
74
+ return fusionar, desfusionar
src/main.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import atexit
2
+ from io import BytesIO
3
+ from multiprocessing.connection import Listener
4
+ from os import chmod, remove
5
+ from os.path import abspath, exists
6
+ from pathlib import Path
7
+ from git import Repo
8
+ import torch
9
+
10
+ from PIL.JpegImagePlugin import JpegImageFile
11
+ from pipelines.models import TextToImageRequest
12
+ from pipeline import load_pipeline, infer
13
+ SOCKET = abspath(Path(__file__).parent.parent / "inferences.sock")
14
+
15
+
16
+ def at_exit():
17
+ torch.cuda.empty_cache()
18
+
19
+
20
+ def main():
21
+ atexit.register(at_exit)
22
+
23
+ print(f"Loading pipeline")
24
+ pipeline = load_pipeline()
25
+
26
+ print(f"Pipeline loaded, creating socket at '{SOCKET}'")
27
+
28
+ if exists(SOCKET):
29
+ remove(SOCKET)
30
+
31
+ with Listener(SOCKET) as listener:
32
+ chmod(SOCKET, 0o777)
33
+
34
+ print(f"Awaiting connections")
35
+ with listener.accept() as connection:
36
+ print(f"Connected")
37
+ generator = torch.Generator("cuda")
38
+ while True:
39
+ try:
40
+ request = TextToImageRequest.model_validate_json(connection.recv_bytes().decode("utf-8"))
41
+ except EOFError:
42
+ print(f"Inference socket exiting")
43
+
44
+ return
45
+ image = infer(request, pipeline, generator.manual_seed(request.seed))
46
+ data = BytesIO()
47
+ image.save(data, format=JpegImageFile.format)
48
+
49
+ packet = data.getvalue()
50
+
51
+ connection.send_bytes(packet )
52
+
53
+
54
+ if __name__ == '__main__':
55
+ main()
src/pipeline.py ADDED
@@ -0,0 +1,1168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers import FluxPipeline, AutoencoderKL, AutoencoderTiny
2
+ from diffusers.image_processor import VaeImageProcessor
3
+ from diffusers.schedulers import FlowMatchEulerDiscreteScheduler
4
+ from huggingface_hub.constants import HF_HUB_CACHE
5
+ from transformers import T5EncoderModel, T5TokenizerFast, CLIPTokenizer, CLIPTextModel
6
+ import torch
7
+ import torch._dynamo
8
+ import gc
9
+ from PIL import Image as img
10
+ from PIL.Image import Image
11
+ from pipelines.models import TextToImageRequest
12
+ from torch import Generator
13
+ import time
14
+ # from diffusers import DiffusionPipeline
15
+ from torchao.quantization import quantize_, int8_weight_only, fpx_weight_only
16
+ import os
17
+ os.environ['PYTORCH_CUDA_ALLOC_CONF']="expandable_segments:True"
18
+
19
+ import torch
20
+ import math
21
+ from typing import Type, Dict, Any, Tuple, Callable, Optional, Union, List
22
+ import ghanta
23
+ import numpy as np
24
+ import torch
25
+ import torch.nn as nn
26
+ import torch.nn.functional as F
27
+
28
+ from diffusers.configuration_utils import ConfigMixin, register_to_config
29
+ from diffusers.loaders import FromOriginalModelMixin, PeftAdapterMixin
30
+ from diffusers.models.attention import FeedForward
31
+ from diffusers.models.attention_processor import (
32
+ Attention,
33
+ AttentionProcessor,
34
+ FluxAttnProcessor2_0,
35
+ FusedFluxAttnProcessor2_0,
36
+ )
37
+ from diffusers.models.modeling_utils import ModelMixin
38
+ from diffusers.models.normalization import AdaLayerNormContinuous, AdaLayerNormZero, AdaLayerNormZeroSingle
39
+ from diffusers.utils import USE_PEFT_BACKEND, is_torch_version, logging, scale_lora_layers, unscale_lora_layers
40
+ from diffusers.utils.import_utils import is_torch_npu_available
41
+ from diffusers.utils.torch_utils import maybe_allow_in_graph
42
+ from diffusers.models.embeddings import CombinedTimestepGuidanceTextProjEmbeddings, CombinedTimestepTextProjEmbeddings, FluxPosEmbed
43
+ from diffusers.models.modeling_outputs import Transformer2DModelOutput
44
+ from torchao.quantization import quantize_, int8_weight_only
45
+
46
+ from diffusers.loaders import FluxLoraLoaderMixin, FromSingleFileMixin, TextualInversionLoaderMixin
47
+ from diffusers.models.autoencoders import AutoencoderKL
48
+ # from diffusers.models.transformers import FluxTransformer2DModel
49
+ from diffusers.utils import (
50
+ USE_PEFT_BACKEND,
51
+ is_torch_xla_available,
52
+ logging,
53
+ replace_example_docstring,
54
+ scale_lora_layers,
55
+ unscale_lora_layers,
56
+ )
57
+ from diffusers.utils.torch_utils import randn_tensor
58
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline
59
+ from diffusers.pipelines.flux.pipeline_output import FluxPipelineOutput
60
+
61
+ if is_torch_xla_available():
62
+ import torch_xla.core.xla_model as xm
63
+
64
+ XLA_AVAILABLE = True
65
+ else:
66
+ XLA_AVAILABLE = False
67
+ import inspect
68
+
69
+
70
+ def calculate_shift(
71
+ image_seq_len,
72
+ base_seq_len: int = 256,
73
+ max_seq_len: int = 4096,
74
+ base_shift: float = 0.5,
75
+ max_shift: float = 1.16,
76
+ ):
77
+ m = (max_shift - base_shift) / (max_seq_len - base_seq_len)
78
+ b = base_shift - m * base_seq_len
79
+ mu = image_seq_len * m + b
80
+ return mu
81
+
82
+
83
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
84
+ def retrieve_timesteps(
85
+ scheduler,
86
+ num_inference_steps: Optional[int] = None,
87
+ device: Optional[Union[str, torch.device]] = None,
88
+ timesteps: Optional[List[int]] = None,
89
+ sigmas: Optional[List[float]] = None,
90
+ **kwargs,
91
+ ):
92
+ if timesteps is not None and sigmas is not None:
93
+ raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
94
+ if timesteps is not None:
95
+ accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
96
+ if not accepts_timesteps:
97
+ raise ValueError(
98
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
99
+ f" timestep schedules. Please check whether you are using the correct scheduler."
100
+ )
101
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
102
+ timesteps = scheduler.timesteps
103
+ num_inference_steps = len(timesteps)
104
+ elif sigmas is not None:
105
+ accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
106
+ if not accept_sigmas:
107
+ raise ValueError(
108
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
109
+ f" sigmas schedules. Please check whether you are using the correct scheduler."
110
+ )
111
+ scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
112
+ timesteps = scheduler.timesteps
113
+ num_inference_steps = len(timesteps)
114
+ else:
115
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
116
+ timesteps = scheduler.timesteps
117
+ return timesteps, num_inference_steps
118
+
119
+
120
+ def inicializar_generador(dispositivo: torch.device, respaldo: torch.Generator = None):
121
+ if dispositivo.type == "cpu":
122
+ return torch.Generator(device="cpu").set_state(torch.get_rng_state())
123
+ elif dispositivo.type == "cuda":
124
+ return torch.Generator(device=dispositivo).set_state(torch.cuda.get_rng_state())
125
+ else:
126
+ if respaldo is None:
127
+ return inicializar_generador(torch.device("cpu"))
128
+ else:
129
+ return respaldo
130
+
131
+ def calcular_fusion(x: torch.Tensor, info_tome: Dict[str, Any]) -> Tuple[Callable, ...]:
132
+ alto_original, ancho_original = info_tome["size"]
133
+ tokens_originales = alto_original * ancho_original
134
+ submuestreo = int(math.ceil(math.sqrt(tokens_originales // x.shape[1])))
135
+ argumentos = info_tome["args"]
136
+ if submuestreo <= argumentos["down"]:
137
+ ancho = int(math.ceil(ancho_original / submuestreo))
138
+ alto = int(math.ceil(alto_original / submuestreo))
139
+ radio = int(x.shape[1] * argumentos["ratio"])
140
+
141
+ if argumentos["generator"] is None:
142
+ argumentos["generator"] = inicializar_generador(x.device)
143
+ elif argumentos["generator"].device != x.device:
144
+ argumentos["generator"] = inicializar_generador(x.device, respaldo=argumentos["generator"])
145
+
146
+ usar_aleatoriedad = argumentos["rando"]
147
+ fusion, desfusion = ghanta.emparejamiento_suave_aleatorio_2d(
148
+ x, ancho, alto, argumentos["sx"], argumentos["sy"], radio,
149
+ sin_aleatoriedad=not usar_aleatoriedad, generador=argumentos["generator"]
150
+ )
151
+ else:
152
+ fusion, desfusion = (ghanta.hacer_nada, ghanta.hacer_nada)
153
+ fusion_a, desfusion_a = (fusion, desfusion) if argumentos["m1"] else (ghanta.hacer_nada, ghanta.hacer_nada)
154
+ fusion_c, desfusion_c = (fusion, desfusion) if argumentos["m2"] else (ghanta.hacer_nada, ghanta.hacer_nada)
155
+ fusion_m, desfusion_m = (fusion, desfusion) if argumentos["m3"] else (ghanta.hacer_nada, ghanta.hacer_nada)
156
+ return fusion_a, fusion_c, fusion_m, desfusion_a, desfusion_c, desfusion_m
157
+
158
+ @maybe_allow_in_graph
159
+ class FluxSingleTransformerBlock(nn.Module):
160
+
161
+ def __init__(self, dim, num_attention_heads, attention_head_dim, mlp_ratio=4.0):
162
+ super().__init__()
163
+ self.mlp_hidden_dim = int(dim * mlp_ratio)
164
+
165
+ self.norm = AdaLayerNormZeroSingle(dim)
166
+ self.proj_mlp = nn.Linear(dim, self.mlp_hidden_dim)
167
+ self.act_mlp = nn.GELU(approximate="tanh")
168
+ self.proj_out = nn.Linear(dim + self.mlp_hidden_dim, dim)
169
+
170
+ processor = FluxAttnProcessor2_0()
171
+ self.attn = Attention(
172
+ query_dim=dim,
173
+ cross_attention_dim=None,
174
+ dim_head=attention_head_dim,
175
+ heads=num_attention_heads,
176
+ out_dim=dim,
177
+ bias=True,
178
+ processor=processor,
179
+ qk_norm="rms_norm",
180
+ eps=1e-6,
181
+ pre_only=True,
182
+ )
183
+
184
+ def forward(
185
+ self,
186
+ hidden_states: torch.FloatTensor,
187
+ temb: torch.FloatTensor,
188
+ image_rotary_emb=None,
189
+ joint_attention_kwargs=None,
190
+ tinfo: Dict[str, Any] = None,
191
+ ):
192
+ if tinfo is not None:
193
+ m_a, m_c, mom, u_a, u_c, u_m = calcular_fusion(hidden_states, tinfo)
194
+ else:
195
+ m_a, m_c, mom, u_a, u_c, u_m = (ghanta.hacer_nada, ghanta.hacer_nada, ghanta.hacer_nada, ghanta.hacer_nada, ghanta.hacer_nada, ghanta.hacer_nada)
196
+
197
+
198
+ residual = hidden_states
199
+ norm_hidden_states, gate = self.norm(hidden_states, emb=temb)
200
+ norm_hidden_states = m_a(norm_hidden_states)
201
+ mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states))
202
+ joint_attention_kwargs = joint_attention_kwargs or {}
203
+ attn_output = self.attn(
204
+ hidden_states=norm_hidden_states,
205
+ image_rotary_emb=image_rotary_emb,
206
+ **joint_attention_kwargs,
207
+ )
208
+
209
+ hidden_states = torch.cat([attn_output, mlp_hidden_states], dim=2)
210
+ gate = gate.unsqueeze(1)
211
+ hidden_states = gate * self.proj_out(hidden_states)
212
+ hidden_states = u_a(residual + hidden_states)
213
+
214
+ return hidden_states
215
+
216
+
217
+ @maybe_allow_in_graph
218
+ class FluxTransformerBlock(nn.Module):
219
+
220
+ def __init__(self, dim, num_attention_heads, attention_head_dim, qk_norm="rms_norm", eps=1e-6):
221
+ super().__init__()
222
+
223
+ self.norm1 = AdaLayerNormZero(dim)
224
+
225
+ self.norm1_context = AdaLayerNormZero(dim)
226
+
227
+ if hasattr(F, "scaled_dot_product_attention"):
228
+ processor = FluxAttnProcessor2_0()
229
+ else:
230
+ raise ValueError(
231
+ "The current PyTorch version does not support the `scaled_dot_product_attention` function."
232
+ )
233
+ self.attn = Attention(
234
+ query_dim=dim,
235
+ cross_attention_dim=None,
236
+ added_kv_proj_dim=dim,
237
+ dim_head=attention_head_dim,
238
+ heads=num_attention_heads,
239
+ out_dim=dim,
240
+ context_pre_only=False,
241
+ bias=True,
242
+ processor=processor,
243
+ qk_norm=qk_norm,
244
+ eps=eps,
245
+ )
246
+
247
+ self.norm2 = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6)
248
+ self.ff = FeedForward(dim=dim, dim_out=dim, activation_fn="gelu-approximate")
249
+
250
+ self.norm2_context = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6)
251
+ self.ff_context = FeedForward(dim=dim, dim_out=dim, activation_fn="gelu-approximate")
252
+ self._chunk_size = None
253
+ self._chunk_dim = 0
254
+
255
+ def forward(
256
+ self,
257
+ hidden_states: torch.FloatTensor,
258
+ encoder_hidden_states: torch.FloatTensor,
259
+ temb: torch.FloatTensor,
260
+ image_rotary_emb=None,
261
+ joint_attention_kwargs=None,
262
+ tinfo: Dict[str, Any] = None,
263
+ ):
264
+
265
+ if tinfo is not None:
266
+ m_a, m_c, mom, u_a, u_c, u_m = calcular_fusion(hidden_states, tinfo)
267
+ else:
268
+ m_a, m_c, mom, u_a, u_c, u_m = (ghanta.hacer_nada, ghanta.hacer_nada, ghanta.hacer_nada, ghanta.hacer_nada, ghanta.hacer_nada, ghanta.hacer_nada)
269
+
270
+
271
+ norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb)
272
+
273
+ norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context(
274
+ encoder_hidden_states, emb=temb
275
+ )
276
+ joint_attention_kwargs = joint_attention_kwargs or {}
277
+ norm_hidden_states = m_a(norm_hidden_states)
278
+ norm_encoder_hidden_states = m_c(norm_encoder_hidden_states)
279
+
280
+ attn_output, context_attn_output = self.attn(
281
+ hidden_states=norm_hidden_states,
282
+ encoder_hidden_states=norm_encoder_hidden_states,
283
+ image_rotary_emb=image_rotary_emb,
284
+ **joint_attention_kwargs,
285
+ )
286
+
287
+ attn_output = gate_msa.unsqueeze(1) * attn_output
288
+ hidden_states = u_a(attn_output) + hidden_states
289
+
290
+ norm_hidden_states = self.norm2(hidden_states)
291
+ norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
292
+
293
+ norm_hidden_states = mom(norm_hidden_states)
294
+
295
+ ff_output = self.ff(norm_hidden_states)
296
+ ff_output = gate_mlp.unsqueeze(1) * ff_output
297
+
298
+ hidden_states = u_m(ff_output) + hidden_states
299
+ context_attn_output = c_gate_msa.unsqueeze(1) * context_attn_output
300
+ encoder_hidden_states = u_c(context_attn_output) + encoder_hidden_states
301
+
302
+ norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states)
303
+ norm_encoder_hidden_states = norm_encoder_hidden_states * (1 + c_scale_mlp[:, None]) + c_shift_mlp[:, None]
304
+
305
+ context_ff_output = self.ff_context(norm_encoder_hidden_states)
306
+ encoder_hidden_states = encoder_hidden_states + c_gate_mlp.unsqueeze(1) * context_ff_output
307
+
308
+ return encoder_hidden_states, hidden_states
309
+
310
+
311
+ class FluxTransformer2DModel(ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin):
312
+
313
+ _supports_gradient_checkpointing = True
314
+ _no_split_modules = ["FluxTransformerBlock", "FluxSingleTransformerBlock"]
315
+
316
+ @register_to_config
317
+ def __init__(
318
+ self,
319
+ patch_size: int = 1,
320
+ in_channels: int = 64,
321
+ out_channels: Optional[int] = None,
322
+ num_layers: int = 19,
323
+ num_single_layers: int = 38,
324
+ attention_head_dim: int = 128,
325
+ num_attention_heads: int = 24,
326
+ joint_attention_dim: int = 4096,
327
+ pooled_projection_dim: int = 768,
328
+ guidance_embeds: bool = False,
329
+ axes_dims_rope: Tuple[int] = (16, 56, 56),
330
+ generator: Optional[torch.Generator] = None,
331
+ ):
332
+ super().__init__()
333
+ self.out_channels = out_channels or in_channels
334
+ self.inner_dim = self.config.num_attention_heads * self.config.attention_head_dim
335
+
336
+ self.pos_embed = FluxPosEmbed(theta=10000, axes_dim=axes_dims_rope)
337
+
338
+ text_time_guidance_cls = (
339
+ CombinedTimestepGuidanceTextProjEmbeddings if guidance_embeds else CombinedTimestepTextProjEmbeddings
340
+ )
341
+ self.time_text_embed = text_time_guidance_cls(
342
+ embedding_dim=self.inner_dim, pooled_projection_dim=self.config.pooled_projection_dim
343
+ )
344
+
345
+ self.context_embedder = nn.Linear(self.config.joint_attention_dim, self.inner_dim)
346
+ self.x_embedder = nn.Linear(self.config.in_channels, self.inner_dim)
347
+
348
+ self.transformer_blocks = nn.ModuleList(
349
+ [
350
+ FluxTransformerBlock(
351
+ dim=self.inner_dim,
352
+ num_attention_heads=self.config.num_attention_heads,
353
+ attention_head_dim=self.config.attention_head_dim,
354
+ )
355
+ for i in range(self.config.num_layers)
356
+ ]
357
+ )
358
+
359
+ self.single_transformer_blocks = nn.ModuleList(
360
+ [
361
+ FluxSingleTransformerBlock(
362
+ dim=self.inner_dim,
363
+ num_attention_heads=self.config.num_attention_heads,
364
+ attention_head_dim=self.config.attention_head_dim,
365
+ )
366
+ for i in range(self.config.num_single_layers)
367
+ ]
368
+ )
369
+
370
+ self.norm_out = AdaLayerNormContinuous(self.inner_dim, self.inner_dim, elementwise_affine=False, eps=1e-6)
371
+ self.proj_out = nn.Linear(self.inner_dim, patch_size * patch_size * self.out_channels, bias=True)
372
+ ratio: float = 0.5
373
+ down: int = 1
374
+ sx: int = 2
375
+ sy: int = 2
376
+ rando: bool = False
377
+ m1: bool = False
378
+ m2: bool = True
379
+ m3: bool = False
380
+
381
+ self.tinfo = {
382
+ "size": None,
383
+ "args": {
384
+ "ratio": ratio,
385
+ "down": down,
386
+ "sx": sx,
387
+ "sy": sy,
388
+ "rando": rando,
389
+ "m1": m1,
390
+ "m2": m2,
391
+ "m3": m3,
392
+ "generator": generator
393
+ }
394
+ }
395
+
396
+ self.gradient_checkpointing = False
397
+
398
+ @property
399
+ def attn_processors(self) -> Dict[str, AttentionProcessor]:
400
+ r"""
401
+ Returns:
402
+ `dict` of attention processors: A dictionary containing all attention processors used in the model with
403
+ indexed by its weight name.
404
+ """
405
+ processors = {}
406
+
407
+ def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
408
+ if hasattr(module, "get_processor"):
409
+ processors[f"{name}.processor"] = module.get_processor()
410
+
411
+ for sub_name, child in module.named_children():
412
+ fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
413
+
414
+ return processors
415
+
416
+ for name, module in self.named_children():
417
+ fn_recursive_add_processors(name, module, processors)
418
+
419
+ return processors
420
+
421
+ def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
422
+ count = len(self.attn_processors.keys())
423
+
424
+ if isinstance(processor, dict) and len(processor) != count:
425
+ raise ValueError(
426
+ f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
427
+ f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
428
+ )
429
+
430
+ def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
431
+ if hasattr(module, "set_processor"):
432
+ if not isinstance(processor, dict):
433
+ module.set_processor(processor)
434
+ else:
435
+ module.set_processor(processor.pop(f"{name}.processor"))
436
+
437
+ for sub_name, child in module.named_children():
438
+ fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
439
+
440
+ for name, module in self.named_children():
441
+ fn_recursive_attn_processor(name, module, processor)
442
+
443
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections with FusedAttnProcessor2_0->FusedFluxAttnProcessor2_0
444
+ def fuse_qkv_projections(self):
445
+ self.original_attn_processors = None
446
+
447
+ for _, attn_processor in self.attn_processors.items():
448
+ if "Added" in str(attn_processor.__class__.__name__):
449
+ raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.")
450
+
451
+ self.original_attn_processors = self.attn_processors
452
+
453
+ for module in self.modules():
454
+ if isinstance(module, Attention):
455
+ module.fuse_projections(fuse=True)
456
+
457
+ self.set_attn_processor(FusedFluxAttnProcessor2_0())
458
+
459
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.unfuse_qkv_projections
460
+ def unfuse_qkv_projections(self):
461
+ if self.original_attn_processors is not None:
462
+ self.set_attn_processor(self.original_attn_processors)
463
+
464
+ def _set_gradient_checkpointing(self, module, value=False):
465
+ if hasattr(module, "gradient_checkpointing"):
466
+ module.gradient_checkpointing = value
467
+
468
+ def forward(
469
+ self,
470
+ hidden_states: torch.Tensor,
471
+ encoder_hidden_states: torch.Tensor = None,
472
+ pooled_projections: torch.Tensor = None,
473
+ timestep: torch.LongTensor = None,
474
+ img_ids: torch.Tensor = None,
475
+ txt_ids: torch.Tensor = None,
476
+ guidance: torch.Tensor = None,
477
+ joint_attention_kwargs: Optional[Dict[str, Any]] = None,
478
+ controlnet_block_samples=None,
479
+ controlnet_single_block_samples=None,
480
+ return_dict: bool = True,
481
+ controlnet_blocks_repeat: bool = False,
482
+ ) -> Union[torch.FloatTensor, Transformer2DModelOutput]:
483
+
484
+ if len(hidden_states.shape) == 4:
485
+ self.tinfo["size"] = (hidden_states.shape[2], hidden_states.shape[3])
486
+ if len(hidden_states.shape) == 3:
487
+ self.tinfo["size"] = (hidden_states.shape[1], hidden_states.shape[2])
488
+
489
+ if joint_attention_kwargs is not None:
490
+ joint_attention_kwargs = joint_attention_kwargs.copy()
491
+ lora_scale = joint_attention_kwargs.pop("scale", 1.0)
492
+ else:
493
+ lora_scale = 1.0
494
+
495
+ if USE_PEFT_BACKEND:
496
+ # weight the lora layers by setting `lora_scale` for each PEFT layer
497
+ scale_lora_layers(self, lora_scale)
498
+ else:
499
+ if joint_attention_kwargs is not None and joint_attention_kwargs.get("scale", None) is not None:
500
+ logger.warning(
501
+ "Passing `scale` via `joint_attention_kwargs` when not using the PEFT backend is ineffective."
502
+ )
503
+
504
+ hidden_states = self.x_embedder(hidden_states)
505
+
506
+
507
+ timestep = timestep.to(hidden_states.dtype) * 1000
508
+ if guidance is not None:
509
+ guidance = guidance.to(hidden_states.dtype) * 1000
510
+ else:
511
+ guidance = None
512
+
513
+ temb = (
514
+ self.time_text_embed(timestep, pooled_projections)
515
+ if guidance is None
516
+ else self.time_text_embed(timestep, guidance, pooled_projections)
517
+ )
518
+ encoder_hidden_states = self.context_embedder(encoder_hidden_states)
519
+
520
+ if txt_ids.ndim == 3:
521
+ logger.warning(
522
+ "Passing `txt_ids` 3d torch.Tensor is deprecated."
523
+ "Please remove the batch dimension and pass it as a 2d torch Tensor"
524
+ )
525
+ txt_ids = txt_ids[0]
526
+ if img_ids.ndim == 3:
527
+ logger.warning(
528
+ "Passing `img_ids` 3d torch.Tensor is deprecated."
529
+ "Please remove the batch dimension and pass it as a 2d torch Tensor"
530
+ )
531
+ img_ids = img_ids[0]
532
+
533
+ ids = torch.cat((txt_ids, img_ids), dim=0)
534
+ image_rotary_emb = self.pos_embed(ids)
535
+
536
+ for index_block, block in enumerate(self.transformer_blocks):
537
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
538
+
539
+ def create_custom_forward(module, return_dict=None):
540
+ def custom_forward(*inputs):
541
+ if return_dict is not None:
542
+ return module(*inputs, return_dict=return_dict)
543
+ else:
544
+ return module(*inputs)
545
+
546
+ return custom_forward
547
+
548
+ ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
549
+ encoder_hidden_states, hidden_states = torch.utils.checkpoint.checkpoint(
550
+ create_custom_forward(block),
551
+ hidden_states,
552
+ encoder_hidden_states,
553
+ temb,
554
+ image_rotary_emb,
555
+ **ckpt_kwargs,
556
+ )
557
+
558
+ else:
559
+ encoder_hidden_states, hidden_states = block(
560
+ hidden_states=hidden_states,
561
+ encoder_hidden_states=encoder_hidden_states,
562
+ temb=temb,
563
+ image_rotary_emb=image_rotary_emb,
564
+ joint_attention_kwargs=joint_attention_kwargs,
565
+ tinfo=self.tinfo
566
+ )
567
+
568
+ if controlnet_block_samples is not None:
569
+ interval_control = len(self.transformer_blocks) / len(controlnet_block_samples)
570
+ interval_control = int(np.ceil(interval_control))
571
+ if controlnet_blocks_repeat:
572
+ hidden_states = (
573
+ hidden_states + controlnet_block_samples[index_block % len(controlnet_block_samples)]
574
+ )
575
+ else:
576
+ hidden_states = hidden_states + controlnet_block_samples[index_block // interval_control]
577
+
578
+ hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1)
579
+
580
+ for index_block, block in enumerate(self.single_transformer_blocks):
581
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
582
+
583
+ def create_custom_forward(module, return_dict=None):
584
+ def custom_forward(*inputs):
585
+ if return_dict is not None:
586
+ return module(*inputs, return_dict=return_dict)
587
+ else:
588
+ return module(*inputs)
589
+
590
+ return custom_forward
591
+
592
+ ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
593
+ hidden_states = torch.utils.checkpoint.checkpoint(
594
+ create_custom_forward(block),
595
+ hidden_states,
596
+ temb,
597
+ image_rotary_emb,
598
+ **ckpt_kwargs,
599
+ )
600
+
601
+ else:
602
+ hidden_states = block(
603
+ hidden_states=hidden_states,
604
+ temb=temb,
605
+ image_rotary_emb=image_rotary_emb,
606
+ joint_attention_kwargs=joint_attention_kwargs,
607
+ tinfo=self.tinfo
608
+ )
609
+
610
+ if controlnet_single_block_samples is not None:
611
+ interval_control = len(self.single_transformer_blocks) / len(controlnet_single_block_samples)
612
+ interval_control = int(np.ceil(interval_control))
613
+ hidden_states[:, encoder_hidden_states.shape[1] :, ...] = (
614
+ hidden_states[:, encoder_hidden_states.shape[1] :, ...]
615
+ + controlnet_single_block_samples[index_block // interval_control]
616
+ )
617
+
618
+ hidden_states = hidden_states[:, encoder_hidden_states.shape[1] :, ...]
619
+
620
+ hidden_states = self.norm_out(hidden_states, temb)
621
+ output = self.proj_out(hidden_states)
622
+
623
+ if USE_PEFT_BACKEND:
624
+ unscale_lora_layers(self, lora_scale)
625
+
626
+ if not return_dict:
627
+ return (output,)
628
+
629
+ return Transformer2DModelOutput(sample=output)
630
+
631
+
632
+ class FluxPipeline(
633
+ DiffusionPipeline,
634
+ FluxLoraLoaderMixin,
635
+ FromSingleFileMixin,
636
+ TextualInversionLoaderMixin,
637
+ ):
638
+ model_cpu_offload_seq = "text_encoder->text_encoder_2->transformer->vae"
639
+ _optional_components = []
640
+ _callback_tensor_inputs = ["latents", "prompt_embeds"]
641
+
642
+ def __init__(
643
+ self,
644
+ scheduler: FlowMatchEulerDiscreteScheduler,
645
+ vae: AutoencoderKL,
646
+ text_encoder: CLIPTextModel,
647
+ tokenizer: CLIPTokenizer,
648
+ text_encoder_2: T5EncoderModel,
649
+ tokenizer_2: T5TokenizerFast,
650
+ transformer: FluxTransformer2DModel,
651
+ ):
652
+ super().__init__()
653
+
654
+ self.register_modules(
655
+ vae=vae,
656
+ text_encoder=text_encoder,
657
+ text_encoder_2=text_encoder_2,
658
+ tokenizer=tokenizer,
659
+ tokenizer_2=tokenizer_2,
660
+ transformer=transformer,
661
+ scheduler=scheduler,
662
+ )
663
+ self.vae_scale_factor = (
664
+ 2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, "vae") and self.vae is not None else 8
665
+ )
666
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor * 2)
667
+ self.tokenizer_max_length = (
668
+ self.tokenizer.model_max_length if hasattr(self, "tokenizer") and self.tokenizer is not None else 77
669
+ )
670
+ self.default_sample_size = 128
671
+
672
+ def _get_t5_prompt_embeds(
673
+ self,
674
+ prompt: Union[str, List[str]] = None,
675
+ num_images_per_prompt: int = 1,
676
+ max_sequence_length: int = 512,
677
+ device: Optional[torch.device] = None,
678
+ dtype: Optional[torch.dtype] = None,
679
+ ):
680
+ device = device or self._execution_device
681
+ dtype = dtype or self.text_encoder.dtype
682
+
683
+ prompt = [prompt] if isinstance(prompt, str) else prompt
684
+ batch_size = len(prompt)
685
+
686
+ if isinstance(self, TextualInversionLoaderMixin):
687
+ prompt = self.maybe_convert_prompt(prompt, self.tokenizer_2)
688
+
689
+ text_inputs = self.tokenizer_2(
690
+ prompt,
691
+ padding="max_length",
692
+ max_length=max_sequence_length,
693
+ truncation=True,
694
+ return_length=False,
695
+ return_overflowing_tokens=False,
696
+ return_tensors="pt",
697
+ )
698
+ text_input_ids = text_inputs.input_ids
699
+ untruncated_ids = self.tokenizer_2(prompt, padding="longest", return_tensors="pt").input_ids
700
+
701
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
702
+ removed_text = self.tokenizer_2.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1])
703
+ logger.warning(
704
+ "The following part of your input was truncated because `max_sequence_length` is set to "
705
+ f" {max_sequence_length} tokens: {removed_text}"
706
+ )
707
+
708
+ prompt_embeds = self.text_encoder_2(text_input_ids.to(device), output_hidden_states=False)[0]
709
+
710
+ dtype = self.text_encoder_2.dtype
711
+ prompt_embeds = prompt_embeds.to(dtype=dtype, device=device)
712
+
713
+ _, seq_len, _ = prompt_embeds.shape
714
+
715
+ # duplicate text embeddings and attention mask for each generation per prompt, using mps friendly method
716
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
717
+ prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
718
+
719
+ return prompt_embeds
720
+
721
+ def _get_clip_prompt_embeds(
722
+ self,
723
+ prompt: Union[str, List[str]],
724
+ num_images_per_prompt: int = 1,
725
+ device: Optional[torch.device] = None,
726
+ ):
727
+ device = device or self._execution_device
728
+
729
+ prompt = [prompt] if isinstance(prompt, str) else prompt
730
+ batch_size = len(prompt)
731
+
732
+ if isinstance(self, TextualInversionLoaderMixin):
733
+ prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
734
+
735
+ text_inputs = self.tokenizer(
736
+ prompt,
737
+ padding="max_length",
738
+ max_length=self.tokenizer_max_length,
739
+ truncation=True,
740
+ return_overflowing_tokens=False,
741
+ return_length=False,
742
+ return_tensors="pt",
743
+ )
744
+
745
+ text_input_ids = text_inputs.input_ids
746
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
747
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
748
+ removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1])
749
+ logger.warning(
750
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
751
+ f" {self.tokenizer_max_length} tokens: {removed_text}"
752
+ )
753
+ prompt_embeds = self.text_encoder(text_input_ids.to(device), output_hidden_states=False)
754
+
755
+ prompt_embeds = prompt_embeds.pooler_output
756
+ prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
757
+
758
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt)
759
+ prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, -1)
760
+
761
+ return prompt_embeds
762
+
763
+ def encode_prompt(
764
+ self,
765
+ prompt: Union[str, List[str]],
766
+ prompt_2: Union[str, List[str]],
767
+ device: Optional[torch.device] = None,
768
+ num_images_per_prompt: int = 1,
769
+ prompt_embeds: Optional[torch.FloatTensor] = None,
770
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
771
+ max_sequence_length: int = 512,
772
+ lora_scale: Optional[float] = None,
773
+ ):
774
+ device = device or self._execution_device
775
+
776
+ if lora_scale is not None and isinstance(self, FluxLoraLoaderMixin):
777
+ self._lora_scale = lora_scale
778
+ if self.text_encoder is not None and USE_PEFT_BACKEND:
779
+ scale_lora_layers(self.text_encoder, lora_scale)
780
+ if self.text_encoder_2 is not None and USE_PEFT_BACKEND:
781
+ scale_lora_layers(self.text_encoder_2, lora_scale)
782
+
783
+ prompt = [prompt] if isinstance(prompt, str) else prompt
784
+
785
+ if prompt_embeds is None:
786
+ prompt_2 = prompt_2 or prompt
787
+ prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2
788
+
789
+ # We only use the pooled prompt output from the CLIPTextModel
790
+ pooled_prompt_embeds = self._get_clip_prompt_embeds(
791
+ prompt=prompt,
792
+ device=device,
793
+ num_images_per_prompt=num_images_per_prompt,
794
+ )
795
+ prompt_embeds = self._get_t5_prompt_embeds(
796
+ prompt=prompt_2,
797
+ num_images_per_prompt=num_images_per_prompt,
798
+ max_sequence_length=max_sequence_length,
799
+ device=device,
800
+ )
801
+
802
+ if self.text_encoder is not None:
803
+ if isinstance(self, FluxLoraLoaderMixin) and USE_PEFT_BACKEND:
804
+ # Retrieve the original scale by scaling back the LoRA layers
805
+ unscale_lora_layers(self.text_encoder, lora_scale)
806
+
807
+ if self.text_encoder_2 is not None:
808
+ if isinstance(self, FluxLoraLoaderMixin) and USE_PEFT_BACKEND:
809
+ # Retrieve the original scale by scaling back the LoRA layers
810
+ unscale_lora_layers(self.text_encoder_2, lora_scale)
811
+
812
+ dtype = self.text_encoder.dtype if self.text_encoder is not None else self.transformer.dtype
813
+ text_ids = torch.zeros(prompt_embeds.shape[1], 3).to(device=device, dtype=dtype)
814
+
815
+ return prompt_embeds, pooled_prompt_embeds, text_ids
816
+
817
+ def check_inputs(
818
+ self,
819
+ prompt,
820
+ prompt_2,
821
+ height,
822
+ width,
823
+ prompt_embeds=None,
824
+ pooled_prompt_embeds=None,
825
+ callback_on_step_end_tensor_inputs=None,
826
+ max_sequence_length=None,
827
+ ):
828
+ if height % (self.vae_scale_factor * 2) != 0 or width % (self.vae_scale_factor * 2) != 0:
829
+ logger.warning(
830
+ f"`height` and `width` have to be divisible by {self.vae_scale_factor * 2} but are {height} and {width}. Dimensions will be resized accordingly"
831
+ )
832
+
833
+ if callback_on_step_end_tensor_inputs is not None and not all(
834
+ k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
835
+ ):
836
+ raise ValueError(
837
+ f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
838
+ )
839
+
840
+ if prompt is not None and prompt_embeds is not None:
841
+ raise ValueError(
842
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
843
+ " only forward one of the two."
844
+ )
845
+ elif prompt_2 is not None and prompt_embeds is not None:
846
+ raise ValueError(
847
+ f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
848
+ " only forward one of the two."
849
+ )
850
+ elif prompt is None and prompt_embeds is None:
851
+ raise ValueError(
852
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
853
+ )
854
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
855
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
856
+ elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
857
+ raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
858
+
859
+ if prompt_embeds is not None and pooled_prompt_embeds is None:
860
+ raise ValueError(
861
+ "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
862
+ )
863
+
864
+ if max_sequence_length is not None and max_sequence_length > 512:
865
+ raise ValueError(f"`max_sequence_length` cannot be greater than 512 but is {max_sequence_length}")
866
+
867
+ @staticmethod
868
+ def _prepare_latent_image_ids(batch_size, height, width, device, dtype):
869
+ latent_image_ids = torch.zeros(height, width, 3)
870
+ latent_image_ids[..., 1] = latent_image_ids[..., 1] + torch.arange(height)[:, None]
871
+ latent_image_ids[..., 2] = latent_image_ids[..., 2] + torch.arange(width)[None, :]
872
+
873
+ latent_image_id_height, latent_image_id_width, latent_image_id_channels = latent_image_ids.shape
874
+
875
+ latent_image_ids = latent_image_ids.reshape(
876
+ latent_image_id_height * latent_image_id_width, latent_image_id_channels
877
+ )
878
+
879
+ return latent_image_ids.to(device=device, dtype=dtype)
880
+
881
+ @staticmethod
882
+ def _pack_latents(latents, batch_size, num_channels_latents, height, width):
883
+ latents = latents.view(batch_size, num_channels_latents, height // 2, 2, width // 2, 2)
884
+ latents = latents.permute(0, 2, 4, 1, 3, 5)
885
+ latents = latents.reshape(batch_size, (height // 2) * (width // 2), num_channels_latents * 4)
886
+
887
+ return latents
888
+
889
+ @staticmethod
890
+ def _unpack_latents(latents, height, width, vae_scale_factor):
891
+ batch_size, num_patches, channels = latents.shape
892
+
893
+ # VAE applies 8x compression on images but we must also account for packing which requires
894
+ # latent height and width to be divisible by 2.
895
+ height = 2 * (int(height) // (vae_scale_factor * 2))
896
+ width = 2 * (int(width) // (vae_scale_factor * 2))
897
+
898
+ latents = latents.view(batch_size, height // 2, width // 2, channels // 4, 2, 2)
899
+ latents = latents.permute(0, 3, 1, 4, 2, 5)
900
+
901
+ latents = latents.reshape(batch_size, channels // (2 * 2), height, width)
902
+
903
+ return latents
904
+
905
+ def prepare_latents(
906
+ self,
907
+ batch_size,
908
+ num_channels_latents,
909
+ height,
910
+ width,
911
+ dtype,
912
+ device,
913
+ generator,
914
+ latents=None,
915
+ ):
916
+ # VAE applies 8x compression on images but we must also account for packing which requires
917
+ # latent height and width to be divisible by 2.
918
+ height = 2 * (int(height) // (self.vae_scale_factor * 2))
919
+ width = 2 * (int(width) // (self.vae_scale_factor * 2))
920
+
921
+ shape = (batch_size, num_channels_latents, height, width)
922
+
923
+ if latents is not None:
924
+ latent_image_ids = self._prepare_latent_image_ids(batch_size, height // 2, width // 2, device, dtype)
925
+ return latents.to(device=device, dtype=dtype), latent_image_ids
926
+
927
+ if isinstance(generator, list) and len(generator) != batch_size:
928
+ raise ValueError(
929
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
930
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
931
+ )
932
+
933
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
934
+ latents = self._pack_latents(latents, batch_size, num_channels_latents, height, width)
935
+
936
+ latent_image_ids = self._prepare_latent_image_ids(batch_size, height // 2, width // 2, device, dtype)
937
+
938
+ return latents, latent_image_ids
939
+
940
+ @property
941
+ def guidance_scale(self):
942
+ return self._guidance_scale
943
+
944
+ @property
945
+ def joint_attention_kwargs(self):
946
+ return self._joint_attention_kwargs
947
+
948
+ @property
949
+ def num_timesteps(self):
950
+ return self._num_timesteps
951
+
952
+ @property
953
+ def interrupt(self):
954
+ return self._interrupt
955
+
956
+ # @replace_example_docstring(EXAMPLE_DOC_STRING)
957
+ @torch.no_grad()
958
+ def __call__(
959
+ self,
960
+ prompt: Union[str, List[str]] = None,
961
+ prompt_2: Optional[Union[str, List[str]]] = None,
962
+ height: Optional[int] = None,
963
+ width: Optional[int] = None,
964
+ num_inference_steps: int = 28,
965
+ sigmas: Optional[List[float]] = None,
966
+ guidance_scale: float = 3.5,
967
+ num_images_per_prompt: Optional[int] = 1,
968
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
969
+ latents: Optional[torch.FloatTensor] = None,
970
+ prompt_embeds: Optional[torch.FloatTensor] = None,
971
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
972
+ output_type: Optional[str] = "pil",
973
+ return_dict: bool = True,
974
+ joint_attention_kwargs: Optional[Dict[str, Any]] = None,
975
+ callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
976
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
977
+ max_sequence_length: int = 512,
978
+ ):
979
+
980
+ height = height or self.default_sample_size * self.vae_scale_factor
981
+ width = width or self.default_sample_size * self.vae_scale_factor
982
+
983
+ # 1. Check inputs. Raise error if not correct
984
+ self.check_inputs(
985
+ prompt,
986
+ prompt_2,
987
+ height,
988
+ width,
989
+ prompt_embeds=prompt_embeds,
990
+ pooled_prompt_embeds=pooled_prompt_embeds,
991
+ callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs,
992
+ max_sequence_length=max_sequence_length,
993
+ )
994
+
995
+ self._guidance_scale = guidance_scale
996
+ self._joint_attention_kwargs = joint_attention_kwargs
997
+ self._interrupt = False
998
+
999
+ # 2. Define call parameters
1000
+ if prompt is not None and isinstance(prompt, str):
1001
+ batch_size = 1
1002
+ elif prompt is not None and isinstance(prompt, list):
1003
+ batch_size = len(prompt)
1004
+ else:
1005
+ batch_size = prompt_embeds.shape[0]
1006
+
1007
+ device = self._execution_device
1008
+
1009
+ lora_scale = (
1010
+ self.joint_attention_kwargs.get("scale", None) if self.joint_attention_kwargs is not None else None
1011
+ )
1012
+ (
1013
+ prompt_embeds,
1014
+ pooled_prompt_embeds,
1015
+ text_ids,
1016
+ ) = self.encode_prompt(
1017
+ prompt=prompt,
1018
+ prompt_2=prompt_2,
1019
+ prompt_embeds=prompt_embeds,
1020
+ pooled_prompt_embeds=pooled_prompt_embeds,
1021
+ device=device,
1022
+ num_images_per_prompt=num_images_per_prompt,
1023
+ max_sequence_length=max_sequence_length,
1024
+ lora_scale=lora_scale,
1025
+ )
1026
+
1027
+ # 4. Prepare latent variables
1028
+ num_channels_latents = self.transformer.config.in_channels // 4
1029
+ latents, latent_image_ids = self.prepare_latents(
1030
+ batch_size * num_images_per_prompt,
1031
+ num_channels_latents,
1032
+ height,
1033
+ width,
1034
+ prompt_embeds.dtype,
1035
+ device,
1036
+ generator,
1037
+ latents,
1038
+ )
1039
+
1040
+ # 5. Prepare timesteps
1041
+ sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps) if sigmas is None else sigmas
1042
+ image_seq_len = latents.shape[1]
1043
+ mu = calculate_shift(
1044
+ image_seq_len,
1045
+ self.scheduler.config.base_image_seq_len,
1046
+ self.scheduler.config.max_image_seq_len,
1047
+ self.scheduler.config.base_shift,
1048
+ self.scheduler.config.max_shift,
1049
+ )
1050
+ timesteps, num_inference_steps = retrieve_timesteps(
1051
+ self.scheduler,
1052
+ num_inference_steps,
1053
+ device,
1054
+ sigmas=sigmas,
1055
+ mu=mu,
1056
+ )
1057
+ num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
1058
+ self._num_timesteps = len(timesteps)
1059
+
1060
+ # handle guidance
1061
+ if self.transformer.config.guidance_embeds:
1062
+ guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32)
1063
+ guidance = guidance.expand(latents.shape[0])
1064
+ else:
1065
+ guidance = None
1066
+
1067
+ # 6. Denoising loop
1068
+ for i, t in enumerate(timesteps):
1069
+ if self.interrupt:
1070
+ continue
1071
+
1072
+ # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
1073
+ timestep = t.expand(latents.shape[0]).to(latents.dtype)
1074
+
1075
+ noise_pred = self.transformer(
1076
+ hidden_states=latents,
1077
+ timestep=timestep / 1000,
1078
+ guidance=guidance,
1079
+ pooled_projections=pooled_prompt_embeds,
1080
+ encoder_hidden_states=prompt_embeds,
1081
+ txt_ids=text_ids,
1082
+ img_ids=latent_image_ids,
1083
+ joint_attention_kwargs=self.joint_attention_kwargs,
1084
+ return_dict=False,
1085
+ )[0]
1086
+
1087
+ # compute the previous noisy sample x_t -> x_t-1
1088
+ latents_dtype = latents.dtype
1089
+ latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0]
1090
+
1091
+ if latents.dtype != latents_dtype:
1092
+ if torch.backends.mps.is_available():
1093
+ # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272
1094
+ latents = latents.to(latents_dtype)
1095
+
1096
+ if callback_on_step_end is not None:
1097
+ callback_kwargs = {}
1098
+ for k in callback_on_step_end_tensor_inputs:
1099
+ callback_kwargs[k] = locals()[k]
1100
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
1101
+
1102
+ latents = callback_outputs.pop("latents", latents)
1103
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
1104
+
1105
+ if XLA_AVAILABLE:
1106
+ xm.mark_step()
1107
+
1108
+ if output_type == "latent":
1109
+ image = latents
1110
+
1111
+ else:
1112
+ latents = self._unpack_latents(latents, height, width, self.vae_scale_factor)
1113
+ latents = (latents / self.vae.config.scaling_factor) + self.vae.config.shift_factor
1114
+ image = self.vae.decode(latents, return_dict=False)[0]
1115
+ image = self.image_processor.postprocess(image, output_type=output_type)
1116
+
1117
+ # Offload all models
1118
+ self.maybe_free_model_hooks()
1119
+
1120
+ if not return_dict:
1121
+ return (image,)
1122
+
1123
+ return FluxPipelineOutput(images=image)
1124
+
1125
+ Pipeline = None
1126
+ torch.backends.cuda.matmul.allow_tf32 = True
1127
+ torch.backends.cudnn.enabled = True
1128
+ torch.backends.cudnn.benchmark = True
1129
+
1130
+ ckpt_id = "black-forest-labs/FLUX.1-schnell"
1131
+ ckpt_revision = "741f7c3ce8b383c54771c7003378a50191e9efe9"
1132
+ def empty_cache():
1133
+ gc.collect()
1134
+ torch.cuda.empty_cache()
1135
+ torch.cuda.reset_max_memory_allocated()
1136
+ torch.cuda.reset_peak_memory_stats()
1137
+
1138
+ def load_pipeline() -> Pipeline:
1139
+ empty_cache()
1140
+
1141
+ dtype, device = torch.bfloat16, "cuda"
1142
+
1143
+ text_encoder_2 = T5EncoderModel.from_pretrained(
1144
+ "city96/t5-v1_1-xxl-encoder-bf16", revision = "1b9c856aadb864af93c1dcdc226c2774fa67bc86", torch_dtype=torch.bfloat16
1145
+ ).to(memory_format=torch.channels_last)
1146
+
1147
+ path = os.path.join(HF_HUB_CACHE, "models--RobertML--FLUX.1-schnell-int8wo/snapshots/307e0777d92df966a3c0f99f31a6ee8957a9857a")
1148
+ generator = torch.Generator(device=device)
1149
+ model = FluxTransformer2DModel.from_pretrained(path, torch_dtype=dtype, use_safetensors=False, generator= generator).to(memory_format=torch.channels_last)
1150
+ pipeline = FluxPipeline.from_pretrained(
1151
+ ckpt_id,
1152
+ revision=ckpt_revision,
1153
+ transformer=model,
1154
+ text_encoder_2=text_encoder_2,
1155
+ torch_dtype=dtype,
1156
+ ).to(device)
1157
+ pipeline.vae = torch.compile(pipeline.vae)
1158
+ for _ in range(3):
1159
+ pipeline(prompt="blah blah waah waah oneshot oneshot gang gang", width=1024, height=1024, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256)
1160
+
1161
+ empty_cache()
1162
+ return pipeline
1163
+
1164
+
1165
+ @torch.no_grad()
1166
+ def infer(request: TextToImageRequest, pipeline: Pipeline, generator: Generator) -> Image:
1167
+ image=pipeline(request.prompt,generator=generator, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256, height=request.height, width=request.width, output_type="pil").images[0]
1168
+ return image
uv.lock ADDED
The diff for this file is too large to render. See raw diff