manbeast3b commited on
Commit
28019a8
·
0 Parent(s):

Initial commit

Browse files
Files changed (9) hide show
  1. .gitattributes +5 -0
  2. README.md +19 -0
  3. decoder.pth +3 -0
  4. encoder.pth +3 -0
  5. pyproject.toml +28 -0
  6. src/main.py +59 -0
  7. src/model.py +137 -0
  8. src/pipeline.py +79 -0
  9. uv.lock +0 -0
.gitattributes ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ src/taef1_decoder_only.pth filter=lfs diff=lfs merge=lfs -text
2
+ taef1_decoder_only.pth filter=lfs diff=lfs merge=lfs -text
3
+ decoder.pth filter=lfs diff=lfs merge=lfs -text
4
+ encoder.pth filter=lfs diff=lfs merge=lfs -text
5
+ backup.png filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flux-schnell-edge-inference
2
+
3
+ This holds the baseline for the FLUX Schnel NVIDIA GeForce RTX 4090 contest, which can be forked freely and optimized
4
+
5
+ Some recommendations are as follows:
6
+ - Installing dependencies should be done in `pyproject.toml`, including git dependencies
7
+ - HuggingFace models should be specified in the `models` array in the `pyproject.toml` file, and will be downloaded before benchmarking
8
+ - The pipeline does **not** have internet access so all dependencies and models must be included in the `pyproject.toml`
9
+ - Compiled models should be hosted on HuggingFace and included in the `models` array in the `pyproject.toml` (rather than compiling during loading). Loading time matters far more than file sizes
10
+ - Avoid changing `src/main.py`, as that includes mostly protocol logic. Most changes should be in `models` and `src/pipeline.py`
11
+ - Ensure the entire repository (excluding dependencies and HuggingFace models) is under 16MB
12
+
13
+ For testing, you need a docker container with pytorch and ubuntu 22.04.
14
+ You can download your listed dependencies with `uv`, installed with:
15
+ ```bash
16
+ pipx ensurepath
17
+ pipx install uv
18
+ ```
19
+ You can then relock with `uv lock`, and then run with `uv run start_inference`
decoder.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4142638173f27b872a916b2a6b599fe311f22fe3fd84c77079af5bc114928490
3
+ size 1800500
encoder.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c10a3d71556ddc04705f276bcffeaed53072e1342502a4d121ed99f585640e60
3
+ size 4940983
pyproject.toml ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [build-system]
2
+ requires = ["setuptools >= 75.0"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "flux-schnell-edge-inference"
7
+ description = "An edge-maxxing model submission for the 4090 Flux contest"
8
+ requires-python = ">=3.10,<3.13"
9
+ version = "7"
10
+ dependencies = [
11
+ "diffusers==0.31.0",
12
+ "transformers==4.46.2",
13
+ "accelerate==1.1.0",
14
+ "omegaconf==2.3.0",
15
+ "torch==2.5.1",
16
+ "protobuf==5.28.3",
17
+ "sentencepiece==0.2.0",
18
+ "edge-maxxing-pipelines @ git+https://github.com/womboai/edge-maxxing@7c760ac54f6052803dadb3ade8ebfc9679a94589#subdirectory=pipelines",
19
+ "gitpython>=3.1.43",
20
+ "torchao>=0.6.1",
21
+ "torchvision"
22
+ ]
23
+
24
+ [tool.edge-maxxing]
25
+ models = ["black-forest-labs/FLUX.1-schnell", "madebyollin/taef1"]
26
+
27
+ [project.scripts]
28
+ start_inference = "main:main"
src/main.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import atexit
2
+ from io import BytesIO
3
+ from multiprocessing.connection import Listener
4
+ from os import chmod, remove
5
+ from os.path import abspath, exists
6
+ from pathlib import Path
7
+
8
+ import torch
9
+
10
+ from PIL.JpegImagePlugin import JpegImageFile
11
+ from pipelines.models import TextToImageRequest
12
+
13
+ from pipeline import load_pipeline, infer
14
+
15
+ SOCKET = abspath(Path(__file__).parent.parent / "inferences.sock")
16
+
17
+
18
+ def at_exit():
19
+ torch.cuda.empty_cache()
20
+
21
+
22
+ def main():
23
+ atexit.register(at_exit)
24
+
25
+ print(f"Loading pipeline")
26
+ pipeline = load_pipeline()
27
+
28
+ print(f"Pipeline loaded, creating socket at '{SOCKET}'")
29
+
30
+ if exists(SOCKET):
31
+ remove(SOCKET)
32
+
33
+ with Listener(SOCKET) as listener:
34
+ chmod(SOCKET, 0o777)
35
+
36
+ print(f"Awaiting connections")
37
+ with listener.accept() as connection:
38
+ print(f"Connected")
39
+
40
+ while True:
41
+ try:
42
+ request = TextToImageRequest.model_validate_json(connection.recv_bytes().decode("utf-8"))
43
+ except EOFError:
44
+ print(f"Inference socket exiting")
45
+
46
+ return
47
+
48
+ image = infer(request, pipeline)
49
+
50
+ data = BytesIO()
51
+ image.save(data, format=JpegImageFile.format)
52
+
53
+ packet = data.getvalue()
54
+
55
+ connection.send_bytes(packet)
56
+
57
+
58
+ if __name__ == '__main__':
59
+ main()
src/model.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import torch
3
+ import torch as th
4
+ import torch.nn as nn
5
+ import torch.nn.functional as F
6
+
7
+ def conv(n_in, n_out, **kwargs):
8
+ return nn.Conv2d(n_in, n_out, 3, padding=1, **kwargs)
9
+
10
+ class Clamp(nn.Module):
11
+ def forward(self, x):
12
+ return torch.tanh(x / 3) * 3
13
+
14
+ class Block(nn.Module):
15
+ def __init__(self, n_in, n_out):
16
+ super().__init__()
17
+ self.conv = nn.Sequential(conv(n_in, n_out), nn.ReLU(), conv(n_out, n_out), nn.ReLU(), conv(n_out, n_out))
18
+ self.skip = nn.Conv2d(n_in, n_out, 1, bias=False) if n_in != n_out else nn.Identity()
19
+ self.fuse = nn.ReLU()
20
+ def forward(self, x):
21
+ return self.fuse(self.conv(x) + self.skip(x))
22
+
23
+ def Encoder(latent_channels=4):
24
+ return nn.Sequential(
25
+ conv(3, 64), Block(64, 64),
26
+ conv(64, 64, stride=2, bias=False), Block(64, 64), Block(64, 64), Block(64, 64),
27
+ conv(64, 64, stride=2, bias=False), Block(64, 64), Block(64, 64), Block(64, 64),
28
+ conv(64, 64, stride=2, bias=False), Block(64, 64), Block(64, 64), Block(64, 64),
29
+ conv(64, latent_channels),
30
+ )
31
+
32
+ class DCAH(nn.Module):
33
+ def __init__(self, in_channels, embed_dim=64, dilation_rates=(1, 2, 4)):
34
+ super(DCAH, self).__init__()
35
+ self.in_channels = in_channels
36
+ self.embed_dim = embed_dim
37
+ self.dilated_convs = nn.ModuleList([
38
+ nn.Conv2d(in_channels, embed_dim, kernel_size=3, padding=rate, dilation=rate)
39
+ for rate in dilation_rates
40
+ ])
41
+ self.dilated_conv_merge = nn.Conv2d(embed_dim * len(dilation_rates), embed_dim, kernel_size=1)
42
+ self.query = nn.Conv2d(embed_dim, embed_dim, kernel_size=1)
43
+ self.key = nn.Conv2d(embed_dim, embed_dim, kernel_size=1)
44
+ self.value = nn.Conv2d(embed_dim, embed_dim, kernel_size=1)
45
+ self.refine = nn.Sequential(
46
+ nn.Conv2d(embed_dim, embed_dim, kernel_size=3, padding=1),
47
+ nn.ReLU(),
48
+ nn.Conv2d(embed_dim, in_channels, kernel_size=1)
49
+ )
50
+
51
+ def forward(self, x):
52
+ dilated_features = [conv(x) for conv in self.dilated_convs]
53
+ concat_features = torch.cat(dilated_features, dim=1)
54
+ global_context = self.dilated_conv_merge(concat_features)
55
+ q = self.query(global_context)
56
+ k = self.key(global_context)
57
+ v = self.value(global_context)
58
+ attention = F.softmax(torch.matmul(q.flatten(2), k.flatten(2).transpose(-2, -1)), dim=-1)
59
+ attention_out = torch.matmul(attention, v.flatten(2)).view_as(global_context)
60
+ refined = self.refine(global_context + attention_out)
61
+ return refined
62
+
63
+ def DecoderSeq(latent_channels=16):
64
+ return nn.Sequential(
65
+ Clamp(),
66
+ conv(latent_channels, 48),
67
+ nn.ReLU(),
68
+ Block(48, 48), Block(48, 48),
69
+ nn.Upsample(scale_factor=2), conv(48, 48, bias=False),
70
+ Block(48, 48), Block(48, 48),
71
+ nn.Upsample(scale_factor=2), conv(48, 48, bias=False),
72
+ Block(48, 48),
73
+ nn.Upsample(scale_factor=2), conv(48, 48, bias=False),
74
+ Block(48, 48),
75
+ conv(48, 3),
76
+ )
77
+
78
+
79
+ class Decoder(nn.Module):
80
+ def __init__(self, latent_channels=16):
81
+ decoder = DecoderSeq(latent_channels=latent_channels)
82
+ refinement_head = DCAH(in_channels=3, embed_dim=64)
83
+ super(Decoder, self).__init__()
84
+ self.decoder = decoder
85
+ self.refinement_head = refinement_head
86
+
87
+ def forward(self, x):
88
+ decoded = self.decoder(x)
89
+ refined = self.refinement_head(decoded)
90
+ return refined
91
+
92
+
93
+ class Model(nn.Module):
94
+ latent_magnitude = 3
95
+ latent_shift = 0.5
96
+
97
+ def __init__(self, encoder_path="encoder.pth", decoder_path="decoder.pth", latent_channels=None):
98
+ super().__init__()
99
+ if latent_channels is None:
100
+ latent_channels = self.guess_latent_channels(str(encoder_path))
101
+ self.encoder = Encoder(latent_channels)
102
+ self.decoder = Decoder(latent_channels)
103
+ if encoder_path is not None:
104
+ encoder_state_dict = torch.load(encoder_path, map_location="cpu", weights_only=True)
105
+ filtered_state_dict = {k.strip('encoder.'): v for k, v in encoder_state_dict.items() if k.strip('encoder.') in self.encoder.state_dict() and v.size() == self.encoder.state_dict()[k.strip('encoder.')].size()}
106
+ self.encoder.load_state_dict(filtered_state_dict, strict=False)
107
+
108
+ if decoder_path is not None:
109
+ decoder_state_dict = torch.load(decoder_path, map_location="cpu", weights_only=True)
110
+ filtered_state_dict = {k: v for k, v in decoder_state_dict.items() if k in self.decoder.state_dict() and v.size() == self.decoder.state_dict()[k].size()}
111
+ self.decoder.load_state_dict(filtered_state_dict, strict=False)
112
+
113
+ self.encoder.requires_grad_(False)
114
+ self.decoder.decoder.requires_grad_(False)
115
+ self.decoder.refinement_head.requires_grad_(False)
116
+
117
+ def guess_latent_channels(self, encoder_path):
118
+ if "taef1" in encoder_path:return 16
119
+ if "taesd3" in encoder_path:return 16
120
+ return 4
121
+
122
+ @staticmethod
123
+ def scale_latents(x):
124
+ return x.div(2 * Model.latent_magnitude).add(Model.latent_shift).clamp(0, 1)
125
+
126
+ @staticmethod
127
+ def unscale_latents(x):
128
+ return x.sub(Model.latent_shift).mul(2 * Model.latent_magnitude)
129
+
130
+ def forward(self, x, return_latent=False):
131
+ latent = self.encoder(x)
132
+ out = self.decoder(latent)
133
+ if return_latent:
134
+ return out.clamp(0, 1), latent
135
+ return out.clamp(0, 1)
136
+
137
+
src/pipeline.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers import AutoencoderKL, AutoencoderTiny
2
+ from diffusers.image_processor import VaeImageProcessor
3
+ import torch
4
+ import torch._dynamo
5
+ import gc
6
+ from PIL.Image import Image
7
+ from pipelines.models import TextToImageRequest
8
+ from torch import Generator
9
+ from diffusers import FluxPipeline
10
+ from torchao.quantization import quantize_, int8_weight_only, fpx_weight_only
11
+ import torch.nn as nn
12
+ from model import Model, Decoder
13
+ import torchvision
14
+
15
+ Pipeline = None
16
+ MODEL_ID = "black-forest-labs/FLUX.1-schnell"
17
+ DTYPE = torch.bfloat16
18
+ def clear():
19
+ gc.collect()
20
+ torch.cuda.empty_cache()
21
+ torch.cuda.reset_max_memory_allocated()
22
+ torch.cuda.reset_peak_memory_stats()
23
+
24
+ def load_pipeline() -> Pipeline:
25
+ clear()
26
+
27
+ # vae = Model("encoder.pth", "decoder.pth")
28
+ # vae.to(dtype=DTYPE)
29
+
30
+ vae = AutoencoderTiny.from_pretrained("madebyollin/taef1")
31
+ vae.decoder = Decoder(16)
32
+ decoder_path = "decoder.pth"
33
+ decoder_state_dict = torch.load(decoder_path, weights_only=True) #map_location="cpu",
34
+ filtered_state_dict = {k: v for k, v in decoder_state_dict.items() if k in vae.decoder.state_dict() and v.size() == vae.decoder.state_dict()[k].size()}
35
+ print(decoder_state_dict.keys())
36
+ print(filtered_state_dict.keys())
37
+ vae.decoder.load_state_dict(filtered_state_dict, strict=False)
38
+ vae.decoder.requires_grad_(False)
39
+ vae.to(dtype=DTYPE)
40
+
41
+ # quantize_(vae, fpx_weight_only(3, 2))
42
+ # quantize_(vae, int8_weight_only())
43
+ pipeline = FluxPipeline.from_pretrained(MODEL_ID,vae=vae,
44
+ torch_dtype=DTYPE)
45
+ torch.backends.cudnn.benchmark = True
46
+ torch.backends.cuda.matmul.allow_tf32 = True
47
+ torch.cuda.set_per_process_memory_fraction(0.99)
48
+ pipeline.text_encoder.to(memory_format=torch.channels_last)
49
+ pipeline.text_encoder_2.to(memory_format=torch.channels_last)
50
+ pipeline.transformer.to(memory_format=torch.channels_last)
51
+ pipeline.vae.to(memory_format=torch.channels_last)
52
+ pipeline.vae = torch.compile(pipeline.vae)
53
+ pipeline._exclude_from_cpu_offload = ["vae"]
54
+ pipeline.enable_sequential_cpu_offload()
55
+ torch.jit.enable_onednn_fusion(True)
56
+ clear()
57
+ for _ in range(1):
58
+ pipeline(prompt="unpervaded, unencumber, froggish, groundneedle, transnatural, fatherhood, outjump, cinerator", width=1024, height=1024, guidance_scale=0.1, num_inference_steps=4, max_sequence_length=256)
59
+ return pipeline
60
+
61
+ sample = True
62
+ @torch.inference_mode()
63
+ def infer(request: TextToImageRequest, pipeline: Pipeline) -> Image:
64
+ global sample
65
+ if sample:
66
+ clear()
67
+ sample = None
68
+ torch.cuda.reset_peak_memory_stats()
69
+ generator = Generator("cuda").manual_seed(request.seed)
70
+ image = None
71
+ # with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=False):
72
+ image=pipeline(request.prompt,generator=generator, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256, height=request.height, width=request.width, output_type="pt").images[0]
73
+ # image = image / 255.
74
+ # image = image.mul_(2).sub_(1)
75
+ # image = ((image + 1) / 2) * 255
76
+ # image = image.clamp(0, 255)
77
+ # image = image.to(torch.float32)
78
+ # return torchvision.transforms.functional.to_pil_image(image)
79
+ return torchvision.transforms.functional.to_pil_image(image.to(torch.float32).mul_(2).sub_(1))
uv.lock ADDED
The diff for this file is too large to render. See raw diff