manbeast3b commited on
Commit
002abdb
·
0 Parent(s):

Initial commit

Browse files
.gitattributes ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ RobertML.png filter=lfs diff=lfs merge=lfs -text
37
+ backup.png filter=lfs diff=lfs merge=lfs -text
pyproject.toml ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [build-system]
2
+ requires = ["setuptools >= 75.0"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "flux-schnell-edge-inference"
7
+ description = "An edge-maxxing model submission for the 4090 Flux contest"
8
+ requires-python = ">=3.10,<3.13"
9
+ version = "7"
10
+ dependencies = [
11
+ "diffusers==0.31.0",
12
+ "transformers==4.46.2",
13
+ "accelerate==1.1.0",
14
+ "omegaconf==2.3.0",
15
+ "torch==2.5.1",
16
+ "protobuf==5.28.3",
17
+ "sentencepiece==0.2.0",
18
+ "edge-maxxing-pipelines @ git+https://github.com/womboai/edge-maxxing@7c760ac54f6052803dadb3ade8ebfc9679a94589#subdirectory=pipelines",
19
+ "gitpython>=3.1.43",
20
+ "torchao>=0.6.1",
21
+ ]
22
+
23
+ [tool.edge-maxxing]
24
+ models = ["black-forest-labs/FLUX.1-schnell", "RobertML/FLUX.1-schnell-int8wo", "city96/t5-v1_1-xxl-encoder-bf16","RobertML/FLUX.1-schnell-vae_int8"]
25
+
26
+ [project.scripts]
27
+ start_inference = "main:main"
src/flux_schnell_edge_inference.egg-info/PKG-INFO ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: flux-schnell-edge-inference
3
+ Version: 7
4
+ Summary: An edge-maxxing model submission for the 4090 Flux contest
5
+ Requires-Python: <3.13,>=3.10
6
+ Requires-Dist: diffusers==0.31.0
7
+ Requires-Dist: transformers==4.46.2
8
+ Requires-Dist: accelerate==1.1.0
9
+ Requires-Dist: omegaconf==2.3.0
10
+ Requires-Dist: torch==2.5.1
11
+ Requires-Dist: protobuf==5.28.3
12
+ Requires-Dist: sentencepiece==0.2.0
13
+ Requires-Dist: edge-maxxing-pipelines@ git+https://github.com/womboai/edge-maxxing@7c760ac54f6052803dadb3ade8ebfc9679a94589#subdirectory=pipelines
14
+ Requires-Dist: gitpython>=3.1.43
15
+ Requires-Dist: torchao>=0.6.1
src/flux_schnell_edge_inference.egg-info/SOURCES.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ README.md
2
+ pyproject.toml
3
+ src/main.py
4
+ src/pipeline.py
5
+ src/flux_schnell_edge_inference.egg-info/PKG-INFO
6
+ src/flux_schnell_edge_inference.egg-info/SOURCES.txt
7
+ src/flux_schnell_edge_inference.egg-info/dependency_links.txt
8
+ src/flux_schnell_edge_inference.egg-info/entry_points.txt
9
+ src/flux_schnell_edge_inference.egg-info/requires.txt
10
+ src/flux_schnell_edge_inference.egg-info/top_level.txt
src/flux_schnell_edge_inference.egg-info/dependency_links.txt ADDED
@@ -0,0 +1 @@
 
 
1
+
src/flux_schnell_edge_inference.egg-info/entry_points.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ [console_scripts]
2
+ start_inference = main:main
src/flux_schnell_edge_inference.egg-info/requires.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ diffusers==0.31.0
2
+ transformers==4.46.2
3
+ accelerate==1.1.0
4
+ omegaconf==2.3.0
5
+ torch==2.5.1
6
+ protobuf==5.28.3
7
+ sentencepiece==0.2.0
8
+ edge-maxxing-pipelines@ git+https://github.com/womboai/edge-maxxing@7c760ac54f6052803dadb3ade8ebfc9679a94589#subdirectory=pipelines
9
+ gitpython>=3.1.43
10
+ torchao>=0.6.1
src/flux_schnell_edge_inference.egg-info/top_level.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ main
2
+ pipeline
src/main.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import atexit
2
+ from io import BytesIO
3
+ from multiprocessing.connection import Listener
4
+ from os import chmod, remove
5
+ from os.path import abspath, exists
6
+ from pathlib import Path
7
+ from git import Repo
8
+ import torch
9
+
10
+ from PIL.JpegImagePlugin import JpegImageFile
11
+ from pipelines.models import TextToImageRequest
12
+
13
+ from pipeline import load_pipeline, infer
14
+
15
+ SOCKET = abspath(Path(__file__).parent.parent / "inferences.sock")
16
+
17
+
18
+ def at_exit():
19
+ torch.cuda.empty_cache()
20
+
21
+
22
+ def main():
23
+ atexit.register(at_exit)
24
+
25
+ print(f"Loading pipeline")
26
+ pipeline = load_pipeline()
27
+
28
+ print(f"Pipeline loaded, creating socket at '{SOCKET}'")
29
+
30
+ if exists(SOCKET):
31
+ remove(SOCKET)
32
+
33
+ with Listener(SOCKET) as listener:
34
+ chmod(SOCKET, 0o777)
35
+
36
+ print(f"Awaiting connections")
37
+ with listener.accept() as connection:
38
+ print(f"Connected")
39
+
40
+ while True:
41
+ try:
42
+ request = TextToImageRequest.model_validate_json(connection.recv_bytes().decode("utf-8"))
43
+ except EOFError:
44
+ print(f"Inference socket exiting")
45
+
46
+ return
47
+
48
+ image = infer(request, pipeline)
49
+
50
+ data = BytesIO()
51
+ image.save(data, format=JpegImageFile.format)
52
+
53
+ packet = data.getvalue()
54
+
55
+ connection.send_bytes(packet)
56
+
57
+ if __name__ == '__main__':
58
+ main()
src/pipeline.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gc
3
+ import torch
4
+ from torch import Generator
5
+ from PIL.Image import Image
6
+ from diffusers import AutoencoderKL, FluxPipeline
7
+ from diffusers.image_processor import VaeImageProcessor
8
+ from pipelines.models import TextToImageRequest
9
+ from transformers import T5EncoderModel
10
+ os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:False,garbage_collection_threshold:0.001"
11
+ torch.set_float32_matmul_precision("medium")
12
+ os.environ["TOKENIZERS_PARALLELISM"] = "True"
13
+ ckpt_id = "black-forest-labs/FLUX.1-schnell"
14
+ dtype = torch.bfloat16
15
+ Pipeline = None
16
+ # Configure CUDA settings
17
+ torch.backends.cudnn.benchmark = True
18
+ torch.backends.cuda.matmul.allow_tf32 = True
19
+ torch.cuda.set_per_process_memory_fraction(0.999)
20
+
21
+ class BasicQuantization:
22
+ def __init__(self, bits=1):
23
+ self.bits = bits
24
+ self.qmin = -(2**(bits-1))
25
+ self.qmax = 2**(bits-1) - 1
26
+
27
+ def quantize_tensor(self, tensor):
28
+ scale = (tensor.max() - tensor.min()) / (self.qmax - self.qmin)
29
+ zero_point = self.qmin - torch.round(tensor.min() / scale)
30
+ qtensor = torch.round(tensor / scale + zero_point)
31
+ qtensor = torch.clamp(qtensor, self.qmin, self.qmax)
32
+ return (qtensor - zero_point) * scale, scale, zero_point
33
+
34
+ class ModelQuantization:
35
+ def __init__(self, model, bits=9):
36
+ self.model = model
37
+ self.quant = BasicQuantization(bits)
38
+
39
+ def quantize_model(self):
40
+ for name, module in self.model.named_modules():
41
+ if isinstance(module, torch.nn.Linear):
42
+ if hasattr(module, 'weightML'):
43
+ quantized_weight, _, _ = self.quant.quantize_tensor(module.weight)
44
+ module.weight = torch.nn.Parameter(quantized_weight)
45
+ if hasattr(module, 'bias') and module.bias is not None:
46
+ quantized_bias, _, _ = self.quant.quantize_tensor(module.bias)
47
+ module.bias = torch.nn.Parameter(quantized_bias)
48
+
49
+ def empty_cache():
50
+ gc.collect()
51
+ torch.cuda.empty_cache()
52
+ torch.cuda.reset_max_memory_allocated()
53
+ torch.cuda.reset_peak_memory_stats()
54
+
55
+ def load_pipeline() -> Pipeline:
56
+ empty_cache()
57
+
58
+ # Load and quantize VAE
59
+ vae = AutoencoderKL.from_pretrained(ckpt_id, subfolder="vae", torch_dtype=dtype)
60
+ quantizer = ModelQuantization(vae)
61
+ quantizer.quantize_model()
62
+
63
+ text_encoder_2 = T5EncoderModel.from_pretrained(
64
+ "city96/t5-v1_1-xxl-encoder-bf16", torch_dtype=torch.bfloat16
65
+ )
66
+
67
+ # Initialize pipeline
68
+ pipeline = FluxPipeline.from_pretrained(
69
+ ckpt_id,
70
+ text_encoder_2=text_encoder_2,
71
+ vae=vae,
72
+ torch_dtype=dtype
73
+ )
74
+
75
+
76
+ # Optimize memory format
77
+ for component in [pipeline.text_encoder, pipeline.text_encoder_2, pipeline.transformer, pipeline.vae]:
78
+ component.to(memory_format=torch.channels_last)
79
+
80
+ # Compile and configure pipeline
81
+ pipeline.vae = torch.compile(pipeline.vae, fullgraph=True, dynamic=False, mode="max-autotune")
82
+ pipeline._exclude_from_cpu_offload = ["vae"]
83
+ pipeline.enable_sequential_cpu_offload()
84
+
85
+ # Warmup run
86
+ empty_cache()
87
+ for _ in range(3):
88
+ pipeline(
89
+ prompt="posteroexternal, eurythmical, inspection, semicotton, specification, Mercatorial, ethylate, misprint",
90
+ width=1024,
91
+ height=1024,
92
+ guidance_scale=0.0,
93
+ num_inference_steps=4,
94
+ max_sequence_length=256
95
+ )
96
+
97
+ return pipeline
98
+
99
+ _inference_count = 0
100
+
101
+ @torch.inference_mode()
102
+ def infer(request: TextToImageRequest, pipeline: Pipeline) -> Image:
103
+ global _inference_count
104
+
105
+ # Clear on first inference
106
+ if _inference_count == 0:
107
+ empty_cache()
108
+
109
+ # Increment counter and empty cache every 4 inferences
110
+ _inference_count += 1
111
+ if _inference_count >= 4:
112
+ empty_cache()
113
+ _inference_count = 0
114
+
115
+ torch.cuda.reset_peak_memory_stats()
116
+ generator = Generator("cuda").manual_seed(request.seed)
117
+ return pipeline(
118
+ prompt=request.prompt,
119
+ generator=generator,
120
+ guidance_scale=0.0,
121
+ num_inference_steps=4,
122
+ max_sequence_length=256,
123
+ height=request.height,
124
+ width=request.width,
125
+ output_type="pil"
126
+ ).images[0]
uv.lock ADDED
The diff for this file is too large to render. See raw diff