manbeast3b commited on
Commit
53e14ff
·
0 Parent(s):

Initial commit

Browse files
Files changed (8) hide show
  1. .gitattributes +37 -0
  2. ko.pth +3 -0
  3. ok.pth +3 -0
  4. pyproject.toml +55 -0
  5. src/main.py +55 -0
  6. src/pipeline.py +95 -0
  7. src/utils.py +64 -0
  8. uv.lock +0 -0
.gitattributes ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ RobertML.png filter=lfs diff=lfs merge=lfs -text
37
+ backup.png filter=lfs diff=lfs merge=lfs -text
ko.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2077712511cbeb96f4d0a6a0898b78345302ddaaf196d384f69c3d9c1adad6f9
3
+ size 4951464
ok.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b15aacc35d11e08803e9fdf07a2eed0c7f861250352f23e91cbeda4be07ad914
3
+ size 1800013
pyproject.toml ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [build-system]
2
+ requires = ["setuptools >= 75.0"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "flux-schnell-edge-inference"
7
+ description = "An edge-maxxing model submission by RobertML for the 4090 Flux contest"
8
+ requires-python = ">=3.10,<3.13"
9
+ version = "8"
10
+ dependencies = [
11
+ "diffusers==0.31.0",
12
+ "transformers==4.46.2",
13
+ "accelerate==1.1.0",
14
+ "omegaconf==2.3.0",
15
+ "torch==2.5.1",
16
+ "protobuf==5.28.3",
17
+ "sentencepiece==0.2.0",
18
+ "edge-maxxing-pipelines @ git+https://github.com/womboai/edge-maxxing@7c760ac54f6052803dadb3ade8ebfc9679a94589#subdirectory=pipelines",
19
+ "gitpython>=3.1.43",
20
+ "hf_transfer==0.1.8",
21
+ "torchao==0.6.1",
22
+ "setuptools>=75.3.0",
23
+ "torchvision"
24
+ ]
25
+
26
+ [[tool.edge-maxxing.models]]
27
+ repository = "black-forest-labs/FLUX.1-schnell"
28
+ revision = "741f7c3ce8b383c54771c7003378a50191e9efe9"
29
+ exclude = ["transformer"]
30
+
31
+ [[tool.edge-maxxing.models]]
32
+ repository = "RobertML/FLUX.1-schnell-int8wo"
33
+ revision = "307e0777d92df966a3c0f99f31a6ee8957a9857a"
34
+
35
+ [[tool.edge-maxxing.models]]
36
+ repository = "city96/t5-v1_1-xxl-encoder-bf16"
37
+ revision = "1b9c856aadb864af93c1dcdc226c2774fa67bc86"
38
+
39
+ [[tool.edge-maxxing.models]]
40
+ repository = "RobertML/FLUX.1-schnell-vae_e3m2"
41
+ revision = "da0d2cd7815792fb40d084dbd8ed32b63f153d8d"
42
+
43
+ [[tool.edge-maxxing.models]]
44
+ repository = "madebyollin/taef1"
45
+ revision = "2d552378e58c9c94201075708d7de4e1163b2689"
46
+
47
+ [[tool.edge-maxxing.models]]
48
+ repository = "manbeast3b/flux.1-schnell-full1"
49
+ revision = "cb1b599b0d712b9aab2c4df3ad27b050a27ec146"
50
+
51
+
52
+
53
+ [project.scripts]
54
+ start_inference = "main:main"
55
+
src/main.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import atexit
2
+ from io import BytesIO
3
+ from multiprocessing.connection import Listener
4
+ from os import chmod, remove
5
+ from os.path import abspath, exists
6
+ from pathlib import Path
7
+ from git import Repo
8
+ import torch
9
+
10
+ from PIL.JpegImagePlugin import JpegImageFile
11
+ from pipelines.models import TextToImageRequest
12
+ from pipeline import load_pipeline, infer
13
+ SOCKET = abspath(Path(__file__).parent.parent / "inferences.sock")
14
+
15
+
16
+ def at_exit():
17
+ torch.cuda.empty_cache()
18
+
19
+
20
+ def main():
21
+ atexit.register(at_exit)
22
+
23
+ print(f"Loading pipeline")
24
+ pipeline = load_pipeline()
25
+
26
+ print(f"Pipeline loaded, creating socket at '{SOCKET}'")
27
+
28
+ if exists(SOCKET):
29
+ remove(SOCKET)
30
+
31
+ with Listener(SOCKET) as listener:
32
+ chmod(SOCKET, 0o777)
33
+
34
+ print(f"Awaiting connections")
35
+ with listener.accept() as connection:
36
+ print(f"Connected")
37
+ generator = torch.Generator("cuda")
38
+ while True:
39
+ try:
40
+ request = TextToImageRequest.model_validate_json(connection.recv_bytes().decode("utf-8"))
41
+ except EOFError:
42
+ print(f"Inference socket exiting")
43
+
44
+ return
45
+ image = infer(request, pipeline, generator.manual_seed(request.seed))
46
+ data = BytesIO()
47
+ image.save(data, format=JpegImageFile.format)
48
+
49
+ packet = data.getvalue()
50
+
51
+ connection.send_bytes(packet )
52
+
53
+
54
+ if __name__ == '__main__':
55
+ main()
src/pipeline.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers import (
2
+ DiffusionPipeline,
3
+ AutoencoderKL,
4
+ AutoencoderTiny,
5
+ FluxPipeline,
6
+ FluxTransformer2DModel
7
+ )
8
+ from diffusers.image_processor import VaeImageProcessor
9
+ from diffusers.schedulers import FlowMatchEulerDiscreteScheduler
10
+ from huggingface_hub.constants import HF_HUB_CACHE
11
+ from transformers import (
12
+ T5EncoderModel,
13
+ T5TokenizerFast,
14
+ CLIPTokenizer,
15
+ CLIPTextModel
16
+ )
17
+ import torch
18
+ import torch._dynamo
19
+ import gc
20
+ from PIL import Image
21
+ from pipelines.models import TextToImageRequest
22
+ from torch import Generator
23
+ import time
24
+ import math
25
+ from typing import Type, Dict, Any, Tuple, Callable, Optional, Union
26
+ import numpy as np
27
+ import torch.nn as nn
28
+ import torch.nn.functional as F
29
+ from torchao.quantization import quantize_, int8_weight_only, fpx_weight_only
30
+ from utils import _load
31
+ import torchvision
32
+ import os
33
+
34
+ # preconfigs
35
+ os.environ['PYTORCH_CUDA_ALLOC_CONF']="expandable_segments:True"
36
+ os.environ["TOKENIZERS_PARALLELISM"] = "True"
37
+ torch._dynamo.config.suppress_errors = True
38
+ torch.backends.cuda.matmul.allow_tf32 = True
39
+ torch.backends.cudnn.enabled = True
40
+ torch.backends.cudnn.benchmark = True
41
+
42
+ # globals
43
+ Pipeline = None
44
+ ckpt_id = "black-forest-labs/FLUX.1-schnell"
45
+ ckpt_revision = "741f7c3ce8b383c54771c7003378a50191e9efe9"
46
+ TinyVAE = "madebyollin/taef1"
47
+ TinyVAE_REV = "2d552378e58c9c94201075708d7de4e1163b2689"
48
+
49
+ def empty_cache():
50
+ gc.collect()
51
+ torch.cuda.empty_cache()
52
+ torch.cuda.reset_max_memory_allocated()
53
+ torch.cuda.reset_peak_memory_stats()
54
+
55
+ def filter_state_dict(model, state_dict_path):
56
+ global E
57
+ state_dict = torch.load(state_dict_path, map_location="cpu", weights_only=True)
58
+ prefix = 'encoder.' if type(model) == E else 'decoder.'
59
+ return {k.strip(prefix): v for k, v in state_dict.items() if k.strip(prefix) in model.state_dict() and v.size() == model.state_dict()[k.strip(prefix)].size()}
60
+
61
+ def load_pipeline() -> Pipeline:
62
+ path = os.path.join(HF_HUB_CACHE, "models--manbeast3b--flux.1-schnell-full1/snapshots/cb1b599b0d712b9aab2c4df3ad27b050a27ec146/transformer")
63
+ transformer = FluxTransformer2DModel.from_pretrained(path, torch_dtype=torch.bfloat16, use_safetensors=False)
64
+ vae = AutoencoderTiny.from_pretrained(
65
+ TinyVAE,
66
+ revision=TinyVAE_REV,
67
+ local_files_only=True,
68
+ torch_dtype=torch.bfloat16)
69
+ vae.encoder=_load(vae.encoder, "E", dtype=torch.bfloat16); vae.decoder=_load(vae.decoder, "D", dtype=torch.bfloat16)
70
+
71
+ pipeline = FluxPipeline.from_pretrained(ckpt_id, revision=ckpt_revision, transformer=transformer, vae=vae, local_files_only=True, torch_dtype=torch.bfloat16,)
72
+ pipeline.to("cuda")
73
+
74
+ # Optimize memory format
75
+ for component in [pipeline.text_encoder, pipeline.text_encoder_2, pipeline.transformer, pipeline.vae]:
76
+ component.to(memory_format=torch.channels_last)
77
+
78
+ pipeline.transformer = torch.compile(pipeline.transformer, mode="max-autotune", fullgraph=True)
79
+ pipeline.vae = torch.compile(pipeline.vae, mode="max-autotune", fullgraph=True)
80
+
81
+ for _ in range(2):
82
+ pipeline(prompt="insensible, timbale, pothery, electrovital, centrodesmus", width=1024, height=1024, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256)
83
+ empty_cache()
84
+ return pipeline
85
+
86
+
87
+ sample = 1
88
+ @torch.no_grad()
89
+ def infer(request: TextToImageRequest, pipeline: Pipeline, generator: Generator) -> Image:
90
+ global sample
91
+ if not sample:
92
+ sample=1
93
+ empty_cache()
94
+ image=pipeline(request.prompt,generator=generator, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256, height=request.height, width=request.width, output_type="pt").images[0]
95
+ return torchvision.transforms.functional.to_pil_image(image.to(torch.float32).mul_(2).sub_(1))# torchvision.transforms.functional.to_pil_image(image)
src/utils.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ A=None
2
+ e_sd_pt="ko.pth"
3
+ d_sd_pt="ok.pth"
4
+ import torch as t, torch.nn as nn, torch.nn.functional as F
5
+ def C(n_in, n_out, **kwargs):
6
+ return nn.Conv2d(n_in, n_out, 3, padding=1, **kwargs)
7
+ class Clamp(nn.Module):
8
+ def forward(self, x):
9
+ return t.tanh(x / 3) * 3
10
+ class B(nn.Module):
11
+ def __init__(self, n_in, n_out):
12
+ super().__init__()
13
+ self.conv = nn.Sequential(C(n_in, n_out), nn.ReLU(), C(n_out, n_out), nn.ReLU(), C(n_out, n_out))
14
+ self.skip = nn.Conv2d(n_in, n_out, 1, bias=False) if n_in != n_out else nn.Identity()
15
+ self.fuse = nn.ReLU()
16
+ def forward(self, x):
17
+ return self.fuse(self.conv(x) + self.skip(x))
18
+ def E(latent_channels=4):
19
+ return nn.Sequential(
20
+ C(3, 64), B(64, 64),
21
+ C(64, 64, stride=2, bias=False), B(64, 64), B(64, 64), B(64, 64),
22
+ C(64, 64, stride=2, bias=False), B(64, 64), B(64, 64), B(64, 64),
23
+ C(64, 64, stride=2, bias=False), B(64, 64), B(64, 64), B(64, 64),
24
+ C(64, latent_channels),
25
+ )
26
+ def D(latent_channels=16):
27
+ return nn.Sequential(
28
+ Clamp(),
29
+ C(latent_channels, 48),nn.ReLU(),B(48, 48), B(48, 48),
30
+ nn.Upsample(scale_factor=2), C(48, 48, bias=False),B(48, 48), B(48, 48),
31
+ nn.Upsample(scale_factor=2), C(48, 48, bias=False),B(48, 48),
32
+ nn.Upsample(scale_factor=2), C(48, 48, bias=False),B(48, 48),
33
+ C(48, 3),
34
+ )
35
+ class M(nn.Module):
36
+ lm, ls = 3, 0.5
37
+ def __init__(s, ep="encoder.pth", dp="decoder.pth", lc=None):
38
+ super().__init__()
39
+ if lc is None: lc = s.glc(str(ep))
40
+ s.e, s.d = E(lc), D(lc)
41
+ def f(sd, mod, pfx):
42
+ f_sd = {k.strip(pfx): v for k, v in sd.items() if k.strip(pfx) in mod.state_dict() and v.size() == mod.state_dict()[k.strip(pfx)].size()}
43
+ mod.load_state_dict(f_sd, strict=False)
44
+ if ep: f(t.load(ep, map_location="cpu", weights_only=True), s.e, "encoder.")
45
+ if dp: f(t.load(dp, map_location="cpu", weights_only=True), s.d, "decoder.")
46
+ s.e.requires_grad_(False)
47
+ s.d.requires_grad_(False)
48
+ def glc(s, ep): return 16 if "taef1" in ep or "taesd3" in ep else 4
49
+ @staticmethod
50
+ def sl(x): return x.div(2 * M.lm).add(M.ls).clamp(0, 1)
51
+ @staticmethod
52
+ def ul(x): return x.sub(M.ls).mul(2 * M.lm)
53
+ def forward(s, x, rl=False):
54
+ l, o = s.e(x), s.d(s.e(x))
55
+ return (o.clamp(0, 1), l) if rl else o.clamp(0, 1)
56
+ def filter_state_dict(model, name):
57
+ state_dict = t.load(e_sd_pt if name=="E" else d_sd_pt, map_location="cpu", weights_only=True)
58
+ prefix = 'encoder.' if name=="E" else 'decoder.'
59
+ return {k.strip(prefix): v for k, v in state_dict.items() if k.strip(prefix) in model.state_dict() and v.size() == model.state_dict()[k.strip(prefix)].size()}
60
+ def _load(model, name, dtype=t.bfloat16):
61
+ model = E(16) if name=="E" else D(16)
62
+ model.load_state_dict(filter_state_dict(model, name), strict=False)
63
+ model.requires_grad_(False).to(dtype=dtype)
64
+ return model
uv.lock ADDED
The diff for this file is too large to render. See raw diff