Your Name commited on
Commit
2cc59cd
·
0 Parent(s):

Initial commit

Browse files
Files changed (6) hide show
  1. .gitattributes +36 -0
  2. pyproject.toml +43 -0
  3. quantization_map.json +1 -0
  4. src/main.py +50 -0
  5. src/pipeline.py +103 -0
  6. uv.lock +0 -0
.gitattributes ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+
pyproject.toml ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [build-system]
2
+ requires = ["setuptools >= 75.0"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "flux-schnell-edge-inference"
7
+ description = "An edge-maxxing model submission for the 4090 Flux contest"
8
+ requires-python = ">=3.10,<3.13"
9
+ version = "8"
10
+ dependencies = [
11
+ "diffusers==0.31.0",
12
+ "transformers==4.46.2",
13
+ "accelerate==1.1.0",
14
+ "omegaconf==2.3.0",
15
+ "torch==2.5.1",
16
+ "protobuf==5.28.3",
17
+ "sentencepiece==0.2.0",
18
+ "torchao==0.6.1",
19
+ "optimum-quanto",
20
+ "hf_transfer==0.1.8",
21
+ "setuptools==75.2.0",
22
+ "edge-maxxing-pipelines @ git+https://github.com/womboai/edge-maxxing@7c760ac54f6052803dadb3ade8ebfc9679a94589#subdirectory=pipelines",
23
+ ]
24
+
25
+ [[tool.edge-maxxing.models]]
26
+ repository = "black-forest-labs/FLUX.1-schnell"
27
+ revision = "741f7c3ce8b383c54771c7003378a50191e9efe9"
28
+ exclude = ["transformer", "vae", "text_encoder_2"]
29
+
30
+ [[tool.edge-maxxing.models]]
31
+ repository = "RichardWilliam/XULF_T5_bf16"
32
+ revision = "63a3d9ef7b586655600ac9bd4e4747d038237761"
33
+
34
+ [[tool.edge-maxxing.models]]
35
+ repository = "RichardWilliam/XULF_Vae"
36
+ revision = "3ee225c539465c27adadec45c6e8af50a7397b7d"
37
+
38
+ [[tool.edge-maxxing.models]]
39
+ repository = "RichardWilliam/XULF_Transfomer"
40
+ revision = "6860c51af40329808f270e159a0d018559a1204f"
41
+
42
+ [project.scripts]
43
+ start_inference = "main:main"
quantization_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"encoder.block.0.layer.0.SelfAttention.q": {"weights": "qint8", "activations": "none"}, "encoder.block.0.layer.0.SelfAttention.k": {"weights": "qint8", "activations": "none"}, "encoder.block.0.layer.0.SelfAttention.v": {"weights": "qint8", "activations": "none"}, "encoder.block.0.layer.0.SelfAttention.o": {"weights": "qint8", "activations": "none"}, "encoder.block.0.layer.1.DenseReluDense.wi_0": {"weights": "qint8", "activations": "none"}, "encoder.block.0.layer.1.DenseReluDense.wi_1": {"weights": "qint8", "activations": "none"}, "encoder.block.0.layer.1.DenseReluDense.wo": {"weights": "qint8", "activations": "none"}, "encoder.block.1.layer.0.SelfAttention.q": {"weights": "qint8", "activations": "none"}, "encoder.block.1.layer.0.SelfAttention.k": {"weights": "qint8", "activations": "none"}, "encoder.block.1.layer.0.SelfAttention.v": {"weights": "qint8", "activations": "none"}, "encoder.block.1.layer.0.SelfAttention.o": {"weights": "qint8", "activations": "none"}, "encoder.block.1.layer.1.DenseReluDense.wi_0": {"weights": "qint8", "activations": "none"}, "encoder.block.1.layer.1.DenseReluDense.wi_1": {"weights": "qint8", "activations": "none"}, "encoder.block.1.layer.1.DenseReluDense.wo": {"weights": "qint8", "activations": "none"}, "encoder.block.2.layer.0.SelfAttention.q": {"weights": "qint8", "activations": "none"}, "encoder.block.2.layer.0.SelfAttention.k": {"weights": "qint8", "activations": "none"}, "encoder.block.2.layer.0.SelfAttention.v": {"weights": "qint8", "activations": "none"}, "encoder.block.2.layer.0.SelfAttention.o": {"weights": "qint8", "activations": "none"}, "encoder.block.2.layer.1.DenseReluDense.wi_0": {"weights": "qint8", "activations": "none"}, "encoder.block.2.layer.1.DenseReluDense.wi_1": {"weights": "qint8", "activations": "none"}, "encoder.block.2.layer.1.DenseReluDense.wo": {"weights": "qint8", "activations": "none"}, "encoder.block.3.layer.0.SelfAttention.q": {"weights": "qint8", "activations": "none"}, "encoder.block.3.layer.0.SelfAttention.k": {"weights": "qint8", "activations": "none"}, "encoder.block.3.layer.0.SelfAttention.v": {"weights": "qint8", "activations": "none"}, "encoder.block.3.layer.0.SelfAttention.o": {"weights": "qint8", "activations": "none"}, "encoder.block.3.layer.1.DenseReluDense.wi_0": {"weights": "qint8", "activations": "none"}, "encoder.block.3.layer.1.DenseReluDense.wi_1": {"weights": "qint8", "activations": "none"}, "encoder.block.3.layer.1.DenseReluDense.wo": {"weights": "qint8", "activations": "none"}, "encoder.block.4.layer.0.SelfAttention.q": {"weights": "qint8", "activations": "none"}, "encoder.block.4.layer.0.SelfAttention.k": {"weights": "qint8", "activations": "none"}, "encoder.block.4.layer.0.SelfAttention.v": {"weights": "qint8", "activations": "none"}, "encoder.block.4.layer.0.SelfAttention.o": {"weights": "qint8", "activations": "none"}, "encoder.block.4.layer.1.DenseReluDense.wi_0": {"weights": "qint8", "activations": "none"}, "encoder.block.4.layer.1.DenseReluDense.wi_1": {"weights": "qint8", "activations": "none"}, "encoder.block.4.layer.1.DenseReluDense.wo": {"weights": "qint8", "activations": "none"}, "encoder.block.5.layer.0.SelfAttention.q": {"weights": "qint8", "activations": "none"}, "encoder.block.5.layer.0.SelfAttention.k": {"weights": "qint8", "activations": "none"}, "encoder.block.5.layer.0.SelfAttention.v": {"weights": "qint8", "activations": "none"}, "encoder.block.5.layer.0.SelfAttention.o": {"weights": "qint8", "activations": "none"}, "encoder.block.5.layer.1.DenseReluDense.wi_0": {"weights": "qint8", "activations": "none"}, "encoder.block.5.layer.1.DenseReluDense.wi_1": {"weights": "qint8", "activations": "none"}, "encoder.block.5.layer.1.DenseReluDense.wo": {"weights": "qint8", "activations": "none"}, "encoder.block.6.layer.0.SelfAttention.q": {"weights": "qint8", "activations": "none"}, "encoder.block.6.layer.0.SelfAttention.k": {"weights": "qint8", "activations": "none"}, "encoder.block.6.layer.0.SelfAttention.v": {"weights": "qint8", "activations": "none"}, "encoder.block.6.layer.0.SelfAttention.o": {"weights": "qint8", "activations": "none"}, "encoder.block.6.layer.1.DenseReluDense.wi_0": {"weights": "qint8", "activations": "none"}, "encoder.block.6.layer.1.DenseReluDense.wi_1": {"weights": "qint8", "activations": "none"}, "encoder.block.6.layer.1.DenseReluDense.wo": {"weights": "qint8", "activations": "none"}, "encoder.block.7.layer.0.SelfAttention.q": {"weights": "qint8", "activations": "none"}, "encoder.block.7.layer.0.SelfAttention.k": {"weights": "qint8", "activations": "none"}, "encoder.block.7.layer.0.SelfAttention.v": {"weights": "qint8", "activations": "none"}, "encoder.block.7.layer.0.SelfAttention.o": {"weights": "qint8", "activations": "none"}, "encoder.block.7.layer.1.DenseReluDense.wi_0": {"weights": "qint8", "activations": "none"}, "encoder.block.7.layer.1.DenseReluDense.wi_1": {"weights": "qint8", "activations": "none"}, "encoder.block.7.layer.1.DenseReluDense.wo": {"weights": "qint8", "activations": "none"}, "encoder.block.8.layer.0.SelfAttention.q": {"weights": "qint8", "activations": "none"}, "encoder.block.8.layer.0.SelfAttention.k": {"weights": "qint8", "activations": "none"}, "encoder.block.8.layer.0.SelfAttention.v": {"weights": "qint8", "activations": "none"}, "encoder.block.8.layer.0.SelfAttention.o": {"weights": "qint8", "activations": "none"}, "encoder.block.8.layer.1.DenseReluDense.wi_0": {"weights": "qint8", "activations": "none"}, "encoder.block.8.layer.1.DenseReluDense.wi_1": {"weights": "qint8", "activations": "none"}, "encoder.block.8.layer.1.DenseReluDense.wo": {"weights": "qint8", "activations": "none"}, "encoder.block.9.layer.0.SelfAttention.q": {"weights": "qint8", "activations": "none"}, "encoder.block.9.layer.0.SelfAttention.k": {"weights": "qint8", "activations": "none"}, "encoder.block.9.layer.0.SelfAttention.v": {"weights": "qint8", "activations": "none"}, "encoder.block.9.layer.0.SelfAttention.o": {"weights": "qint8", "activations": "none"}, "encoder.block.9.layer.1.DenseReluDense.wi_0": {"weights": "qint8", "activations": "none"}, "encoder.block.9.layer.1.DenseReluDense.wi_1": {"weights": "qint8", "activations": "none"}, "encoder.block.9.layer.1.DenseReluDense.wo": {"weights": "qint8", "activations": "none"}, "encoder.block.10.layer.0.SelfAttention.q": {"weights": "qint8", "activations": "none"}, "encoder.block.10.layer.0.SelfAttention.k": {"weights": "qint8", "activations": "none"}, "encoder.block.10.layer.0.SelfAttention.v": {"weights": "qint8", "activations": "none"}, "encoder.block.10.layer.0.SelfAttention.o": {"weights": "qint8", "activations": "none"}, "encoder.block.10.layer.1.DenseReluDense.wi_0": {"weights": "qint8", "activations": "none"}, "encoder.block.10.layer.1.DenseReluDense.wi_1": {"weights": "qint8", "activations": "none"}, "encoder.block.10.layer.1.DenseReluDense.wo": {"weights": "qint8", "activations": "none"}, "encoder.block.11.layer.0.SelfAttention.q": {"weights": "qint8", "activations": "none"}, "encoder.block.11.layer.0.SelfAttention.k": {"weights": "qint8", "activations": "none"}, "encoder.block.11.layer.0.SelfAttention.v": {"weights": "qint8", "activations": "none"}, "encoder.block.11.layer.0.SelfAttention.o": {"weights": "qint8", "activations": "none"}, "encoder.block.11.layer.1.DenseReluDense.wi_0": {"weights": "qint8", "activations": "none"}, "encoder.block.11.layer.1.DenseReluDense.wi_1": {"weights": "qint8", "activations": "none"}, "encoder.block.11.layer.1.DenseReluDense.wo": {"weights": "qint8", "activations": "none"}, "encoder.block.12.layer.0.SelfAttention.q": {"weights": "qint8", "activations": "none"}, "encoder.block.12.layer.0.SelfAttention.k": {"weights": "qint8", "activations": "none"}, "encoder.block.12.layer.0.SelfAttention.v": {"weights": "qint8", "activations": "none"}, "encoder.block.12.layer.0.SelfAttention.o": {"weights": "qint8", "activations": "none"}, "encoder.block.12.layer.1.DenseReluDense.wi_0": {"weights": "qint8", "activations": "none"}, "encoder.block.12.layer.1.DenseReluDense.wi_1": {"weights": "qint8", "activations": "none"}, "encoder.block.12.layer.1.DenseReluDense.wo": {"weights": "qint8", "activations": "none"}, "encoder.block.13.layer.0.SelfAttention.q": {"weights": "qint8", "activations": "none"}, "encoder.block.13.layer.0.SelfAttention.k": {"weights": "qint8", "activations": "none"}, "encoder.block.13.layer.0.SelfAttention.v": {"weights": "qint8", "activations": "none"}, "encoder.block.13.layer.0.SelfAttention.o": {"weights": "qint8", "activations": "none"}, "encoder.block.13.layer.1.DenseReluDense.wi_0": {"weights": "qint8", "activations": "none"}, "encoder.block.13.layer.1.DenseReluDense.wi_1": {"weights": "qint8", "activations": "none"}, "encoder.block.13.layer.1.DenseReluDense.wo": {"weights": "qint8", "activations": "none"}, "encoder.block.14.layer.0.SelfAttention.q": {"weights": "qint8", "activations": "none"}, "encoder.block.14.layer.0.SelfAttention.k": {"weights": "qint8", "activations": "none"}, "encoder.block.14.layer.0.SelfAttention.v": {"weights": "qint8", "activations": "none"}, "encoder.block.14.layer.0.SelfAttention.o": {"weights": "qint8", "activations": "none"}, "encoder.block.14.layer.1.DenseReluDense.wi_0": {"weights": "qint8", "activations": "none"}, "encoder.block.14.layer.1.DenseReluDense.wi_1": {"weights": "qint8", "activations": "none"}, "encoder.block.14.layer.1.DenseReluDense.wo": {"weights": "qint8", "activations": "none"}, "encoder.block.15.layer.0.SelfAttention.q": {"weights": "qint8", "activations": "none"}, "encoder.block.15.layer.0.SelfAttention.k": {"weights": "qint8", "activations": "none"}, "encoder.block.15.layer.0.SelfAttention.v": {"weights": "qint8", "activations": "none"}, "encoder.block.15.layer.0.SelfAttention.o": {"weights": "qint8", "activations": "none"}, "encoder.block.15.layer.1.DenseReluDense.wi_0": {"weights": "qint8", "activations": "none"}, "encoder.block.15.layer.1.DenseReluDense.wi_1": {"weights": "qint8", "activations": "none"}, "encoder.block.15.layer.1.DenseReluDense.wo": {"weights": "qint8", "activations": "none"}, "encoder.block.16.layer.0.SelfAttention.q": {"weights": "qint8", "activations": "none"}, "encoder.block.16.layer.0.SelfAttention.k": {"weights": "qint8", "activations": "none"}, "encoder.block.16.layer.0.SelfAttention.v": {"weights": "qint8", "activations": "none"}, "encoder.block.16.layer.0.SelfAttention.o": {"weights": "qint8", "activations": "none"}, "encoder.block.16.layer.1.DenseReluDense.wi_0": {"weights": "qint8", "activations": "none"}, "encoder.block.16.layer.1.DenseReluDense.wi_1": {"weights": "qint8", "activations": "none"}, "encoder.block.16.layer.1.DenseReluDense.wo": {"weights": "qint8", "activations": "none"}, "encoder.block.17.layer.0.SelfAttention.q": {"weights": "qint8", "activations": "none"}, "encoder.block.17.layer.0.SelfAttention.k": {"weights": "qint8", "activations": "none"}, "encoder.block.17.layer.0.SelfAttention.v": {"weights": "qint8", "activations": "none"}, "encoder.block.17.layer.0.SelfAttention.o": {"weights": "qint8", "activations": "none"}, "encoder.block.17.layer.1.DenseReluDense.wi_0": {"weights": "qint8", "activations": "none"}, "encoder.block.17.layer.1.DenseReluDense.wi_1": {"weights": "qint8", "activations": "none"}, "encoder.block.17.layer.1.DenseReluDense.wo": {"weights": "qint8", "activations": "none"}, "encoder.block.18.layer.0.SelfAttention.q": {"weights": "qint8", "activations": "none"}, "encoder.block.18.layer.0.SelfAttention.k": {"weights": "qint8", "activations": "none"}, "encoder.block.18.layer.0.SelfAttention.v": {"weights": "qint8", "activations": "none"}, "encoder.block.18.layer.0.SelfAttention.o": {"weights": "qint8", "activations": "none"}, "encoder.block.18.layer.1.DenseReluDense.wi_0": {"weights": "qint8", "activations": "none"}, "encoder.block.18.layer.1.DenseReluDense.wi_1": {"weights": "qint8", "activations": "none"}, "encoder.block.18.layer.1.DenseReluDense.wo": {"weights": "qint8", "activations": "none"}, "encoder.block.19.layer.0.SelfAttention.q": {"weights": "qint8", "activations": "none"}, "encoder.block.19.layer.0.SelfAttention.k": {"weights": "qint8", "activations": "none"}, "encoder.block.19.layer.0.SelfAttention.v": {"weights": "qint8", "activations": "none"}, "encoder.block.19.layer.0.SelfAttention.o": {"weights": "qint8", "activations": "none"}, "encoder.block.19.layer.1.DenseReluDense.wi_0": {"weights": "qint8", "activations": "none"}, "encoder.block.19.layer.1.DenseReluDense.wi_1": {"weights": "qint8", "activations": "none"}, "encoder.block.19.layer.1.DenseReluDense.wo": {"weights": "qint8", "activations": "none"}, "encoder.block.20.layer.0.SelfAttention.q": {"weights": "qint8", "activations": "none"}, "encoder.block.20.layer.0.SelfAttention.k": {"weights": "qint8", "activations": "none"}, "encoder.block.20.layer.0.SelfAttention.v": {"weights": "qint8", "activations": "none"}, "encoder.block.20.layer.0.SelfAttention.o": {"weights": "qint8", "activations": "none"}, "encoder.block.20.layer.1.DenseReluDense.wi_0": {"weights": "qint8", "activations": "none"}, "encoder.block.20.layer.1.DenseReluDense.wi_1": {"weights": "qint8", "activations": "none"}, "encoder.block.20.layer.1.DenseReluDense.wo": {"weights": "qint8", "activations": "none"}, "encoder.block.21.layer.0.SelfAttention.q": {"weights": "qint8", "activations": "none"}, "encoder.block.21.layer.0.SelfAttention.k": {"weights": "qint8", "activations": "none"}, "encoder.block.21.layer.0.SelfAttention.v": {"weights": "qint8", "activations": "none"}, "encoder.block.21.layer.0.SelfAttention.o": {"weights": "qint8", "activations": "none"}, "encoder.block.21.layer.1.DenseReluDense.wi_0": {"weights": "qint8", "activations": "none"}, "encoder.block.21.layer.1.DenseReluDense.wi_1": {"weights": "qint8", "activations": "none"}, "encoder.block.21.layer.1.DenseReluDense.wo": {"weights": "qint8", "activations": "none"}, "encoder.block.22.layer.0.SelfAttention.q": {"weights": "qint8", "activations": "none"}, "encoder.block.22.layer.0.SelfAttention.k": {"weights": "qint8", "activations": "none"}, "encoder.block.22.layer.0.SelfAttention.v": {"weights": "qint8", "activations": "none"}, "encoder.block.22.layer.0.SelfAttention.o": {"weights": "qint8", "activations": "none"}, "encoder.block.22.layer.1.DenseReluDense.wi_0": {"weights": "qint8", "activations": "none"}, "encoder.block.22.layer.1.DenseReluDense.wi_1": {"weights": "qint8", "activations": "none"}, "encoder.block.22.layer.1.DenseReluDense.wo": {"weights": "qint8", "activations": "none"}, "encoder.block.23.layer.0.SelfAttention.q": {"weights": "qint8", "activations": "none"}, "encoder.block.23.layer.0.SelfAttention.k": {"weights": "qint8", "activations": "none"}, "encoder.block.23.layer.0.SelfAttention.v": {"weights": "qint8", "activations": "none"}, "encoder.block.23.layer.0.SelfAttention.o": {"weights": "qint8", "activations": "none"}, "encoder.block.23.layer.1.DenseReluDense.wi_0": {"weights": "qint8", "activations": "none"}, "encoder.block.23.layer.1.DenseReluDense.wi_1": {"weights": "qint8", "activations": "none"}, "encoder.block.23.layer.1.DenseReluDense.wo": {"weights": "qint8", "activations": "none"}}
src/main.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from io import BytesIO
2
+ from multiprocessing.connection import Listener
3
+ from os import chmod, remove
4
+ from os.path import abspath, exists
5
+ from pathlib import Path
6
+
7
+ from PIL.JpegImagePlugin import JpegImageFile
8
+ from pipelines.models import TextToImageRequest
9
+
10
+ from pipeline import load_pipeline, infer
11
+
12
+ SOCKET = abspath(Path(__file__).parent.parent / "inferences.sock")
13
+
14
+
15
+ def main():
16
+ print(f"Loading pipeline")
17
+ pipeline = load_pipeline()
18
+
19
+ print(f"Pipeline loaded! , creating socket at '{SOCKET}'")
20
+
21
+ if exists(SOCKET):
22
+ remove(SOCKET)
23
+
24
+ with Listener(SOCKET) as listener:
25
+ chmod(SOCKET, 0o777)
26
+
27
+ print(f"Awaiting connections")
28
+ with listener.accept() as connection:
29
+ print(f"Connected")
30
+
31
+ while True:
32
+ try:
33
+ request = TextToImageRequest.model_validate_json(connection.recv_bytes().decode("utf-8"))
34
+ except EOFError:
35
+ print(f"Inference socket exiting")
36
+
37
+ return
38
+
39
+ image = infer(request, pipeline)
40
+
41
+ data = BytesIO()
42
+ image.save(data, format=JpegImageFile.format)
43
+
44
+ packet = data.getvalue()
45
+
46
+ connection.send_bytes(packet)
47
+
48
+
49
+ if __name__ == '__main__':
50
+ main()
src/pipeline.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # FLux Optimization Pipeline
2
+ import os
3
+ import torch
4
+ import torch._dynamo
5
+ import gc
6
+
7
+
8
+ from huggingface_hub.constants import HF_HUB_CACHE
9
+ from transformers import T5EncoderModel, T5TokenizerFast, CLIPTokenizer, CLIPTextModel
10
+
11
+ from torchao.quantization import quantize_, int8_weight_only, fpx_weight_only
12
+ from torch import Generator
13
+ from diffusers import FluxTransformer2DModel, DiffusionPipeline
14
+
15
+ from PIL.Image import Image
16
+ from diffusers import FluxPipeline, AutoencoderKL, AutoencoderTiny
17
+ from pipelines.models import TextToImageRequest
18
+ from optimum.quanto import requantize
19
+ import json
20
+ import transformers
21
+
22
+
23
+
24
+ torch._dynamo.config.suppress_errors = True
25
+ os.environ['PYTORCH_CUDA_ALLOC_CONF']="expandable_segments:True"
26
+ os.environ["TOKENIZERS_PARALLELISM"] = "True"
27
+
28
+ CHECKPOINT = "black-forest-labs/FLUX.1-schnell"
29
+ REVISION = "741f7c3ce8b383c54771c7003378a50191e9efe9"
30
+ Pipeline = None
31
+ apply_quanto=1
32
+
33
+ def reset_cache():
34
+ gc.collect()
35
+ torch.cuda.empty_cache()
36
+ torch.cuda.reset_max_memory_allocated()
37
+ torch.cuda.reset_peak_memory_stats()
38
+
39
+ def load_quanto_text_encoder_2(text_repo_path):
40
+ with open("quantization_map.json", "r") as f:
41
+ quantization_map = json.load(f)
42
+ with open(os.path.join(text_repo_path, "config.json"), "r") as f:
43
+ t5_config = transformers.T5Config(**json.load(f))
44
+ with torch.device("meta"):
45
+ text_encoder_2 = transformers.T5EncoderModel(t5_config).to(torch.bfloat16)
46
+ state_dict = None
47
+ requantize(text_encoder_2, state_dict, quantization_map, device=torch.device("cuda"))
48
+ return text_encoder_2
49
+
50
+ def load_pipeline() -> Pipeline:
51
+
52
+ try:
53
+ text_repo_path = os.path.join(HF_HUB_CACHE, "models--RichardWilliam--XULF_T5_bf16/snapshots/63a3d9ef7b586655600ac9bd4e4747d038237761")
54
+ text_encoder_2 = load_quanto_text_encoder_2(text_repo_path=text_repo_path)
55
+ except:
56
+ text_encoder_2 = T5EncoderModel.from_pretrained("RichardWilliam/XULF_T5_bf16",
57
+ revision = "63a3d9ef7b586655600ac9bd4e4747d038237761",
58
+ torch_dtype=torch.bfloat16).to(memory_format=torch.channels_last)
59
+
60
+ origin_vae = AutoencoderTiny.from_pretrained("RichardWilliam/XULF_Vae",
61
+ revision="3ee225c539465c27adadec45c6e8af50a7397b7d",
62
+ torch_dtype=torch.bfloat16)
63
+
64
+
65
+ main_path = os.path.join(HF_HUB_CACHE, "models--RichardWilliam--XULF_Transfomer/snapshots/6860c51af40329808f270e159a0d018559a1204f")
66
+ origin_trans = FluxTransformer2DModel.from_pretrained(main_path,
67
+ torch_dtype=torch.bfloat16,
68
+ use_safetensors=False).to(memory_format=torch.channels_last)
69
+ transformer = origin_trans
70
+
71
+ pipeline = DiffusionPipeline.from_pretrained(CHECKPOINT,
72
+ revision=REVISION,
73
+ vae=origin_vae,
74
+ transformer=transformer,
75
+ text_encoder_2=text_encoder_2,
76
+ torch_dtype=torch.bfloat16)
77
+ pipeline.to("cuda")
78
+
79
+ for __ in range(3):
80
+ pipeline(prompt="sweet, subordinative, gender, mormyre, arteriolosclerosis, positivism, Antiochianism, palmerite",
81
+ width=1024,
82
+ height=1024,
83
+ guidance_scale=0.0,
84
+ num_inference_steps=4,
85
+ max_sequence_length=256)
86
+ return pipeline
87
+
88
+ @torch.no_grad()
89
+ def infer(request: TextToImageRequest, pipeline: Pipeline) -> Image:
90
+
91
+ reset_cache()
92
+
93
+ generator = Generator(pipeline.device).manual_seed(request.seed)
94
+
95
+ return pipeline(
96
+ request.prompt,
97
+ generator=generator,
98
+ guidance_scale=0.0,
99
+ num_inference_steps=4,
100
+ max_sequence_length=256,
101
+ height=request.height,
102
+ width=request.width,
103
+ ).images[0]
uv.lock ADDED
The diff for this file is too large to render. See raw diff