sharper740 commited on
Commit
2ebd8c7
·
verified ·
1 Parent(s): e8ae667

Upload folder using huggingface_hub

Browse files
Files changed (5) hide show
  1. .gitattributes +1 -0
  2. pyproject.toml +40 -0
  3. src/main.py +54 -0
  4. src/pipeline.py +215 -0
  5. uv.lock +0 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+
pyproject.toml ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [build-system]
2
+ requires = ["setuptools >= 75.0"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "flux-schnell-edge-inference"
7
+ description = "Optimization"
8
+ requires-python = ">=3.10,<3.13"
9
+ version = "8"
10
+ dependencies = [
11
+ "diffusers==0.31.0",
12
+ "transformers==4.46.2",
13
+ "accelerate==1.1.0",
14
+ "omegaconf==2.3.0",
15
+ "torch==2.5.1",
16
+ "protobuf==5.28.3",
17
+ "sentencepiece==0.2.0",
18
+ "edge-maxxing-pipelines @ git+https://github.com/womboai/edge-maxxing@7c760ac54f6052803dadb3ade8ebfc9679a94589#subdirectory=pipelines",
19
+ "gitpython>=3.1.43",
20
+ "hf_transfer==0.1.8",
21
+ "torchao==0.6.1",
22
+ ]
23
+
24
+ [[tool.edge-maxxing.models]]
25
+ repository = "black-forest-labs/FLUX.1-schnell"
26
+ revision = "741f7c3ce8b383c54771c7003378a50191e9efe9"
27
+
28
+ [[tool.edge-maxxing.models]]
29
+ repository = "city96/t5-v1_1-xxl-encoder-bf16"
30
+ revision = "1b9c856aadb864af93c1dcdc226c2774fa67bc86"
31
+
32
+ [[tool.edge-maxxing.models]]
33
+ repository = "park234/FLUX1-SCHENELL-INT8"
34
+ revision = "59c2f006f045d9ccdc2e3ab02150b8df0adfafc6"
35
+
36
+
37
+
38
+ [project.scripts]
39
+ start_inference = "main:main"
40
+
src/main.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import atexit
2
+ from io import BytesIO
3
+ from multiprocessing.connection import Listener
4
+ from os import chmod, remove
5
+ from os.path import abspath, exists
6
+ from pathlib import Path
7
+ import torch
8
+
9
+ from PIL.JpegImagePlugin import JpegImageFile
10
+ from pipelines.models import TextToImageRequest
11
+ from pipeline import load_pipeline, inference
12
+ SOCKET = abspath(Path(__file__).parent.parent / "inferences.sock")
13
+
14
+
15
+ def at_exit():
16
+ torch.cuda.empty_cache()
17
+
18
+
19
+ def main():
20
+ atexit.register(at_exit)
21
+
22
+ print(f"Loading pipeline")
23
+ pipeline = load_pipeline()
24
+
25
+ print(f"Pipeline loaded, creating socket at '{SOCKET}'")
26
+
27
+ if exists(SOCKET):
28
+ remove(SOCKET)
29
+
30
+ with Listener(SOCKET) as listener:
31
+ chmod(SOCKET, 0o777)
32
+
33
+ print(f"Awaiting connections")
34
+ with listener.accept() as connection:
35
+ print(f"Connected")
36
+ generator = torch.Generator("cuda")
37
+ while True:
38
+ try:
39
+ request = TextToImageRequest.model_validate_json(connection.recv_bytes().decode("utf-8"))
40
+ except EOFError:
41
+ print(f"Inference socket exiting")
42
+
43
+ return
44
+ image = inference(request, pipeline, generator.manual_seed(request.seed))
45
+ data = BytesIO()
46
+ image.save(data, format=JpegImageFile.format)
47
+
48
+ packet = data.getvalue()
49
+
50
+ connection.send_bytes(packet)
51
+
52
+
53
+ if __name__ == '__main__':
54
+ main()
src/pipeline.py ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gc
3
+ import torch
4
+ import torch._dynamo
5
+ from torch import Generator
6
+ from huggingface_hub.constants import HF_HUB_CACHE
7
+ from diffusers import DiffusionPipeline, FluxTransformer2DModel
8
+ from transformers import T5EncoderModel
9
+ from PIL.Image import Image
10
+ from pipelines.models import TextToImageRequest
11
+
12
+ # -----------------------------------------------------------------------------
13
+ # Environment Configuration & Global Constants
14
+ # -----------------------------------------------------------------------------
15
+ torch._dynamo.config.suppress_errors = True
16
+ os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
17
+ os.environ["TOKENIZERS_PARALLELISM"] = "True"
18
+
19
+ # Identifiers for the diffusion model checkpoint.
20
+ MODEL_ID = "black-forest-labs/FLUX.1-schnell"
21
+ MODEL_REV = "741f7c3ce8b383c54771c7003378a50191e9efe9"
22
+
23
+
24
+ # -----------------------------------------------------------------------------
25
+ # Quantization and Linear Transformation Utilities
26
+ # -----------------------------------------------------------------------------
27
+ def perform_linear_quant(
28
+ input_tensor: torch.Tensor,
29
+ weight_tensor: torch.Tensor,
30
+ w_scale: float,
31
+ w_zero: int,
32
+ in_scale: float,
33
+ in_zero: int,
34
+ out_scale: float,
35
+ out_zero: int,
36
+ ) -> torch.Tensor:
37
+ """
38
+ Performs a quantization-aware linear operation on the input tensor.
39
+
40
+ This function first dequantizes both the input and the weights,
41
+ applies a linear transformation, and then requantizes the result.
42
+
43
+ Parameters:
44
+ input_tensor (torch.Tensor): The input tensor.
45
+ weight_tensor (torch.Tensor): The weight tensor.
46
+ w_scale (float): Scale factor for the weights.
47
+ w_zero (int): Zero-point for the weights.
48
+ in_scale (float): Scale factor for the input.
49
+ in_zero (int): Zero-point for the input.
50
+ out_scale (float): Scale factor for the output.
51
+ out_zero (int): Zero-point for the output.
52
+
53
+ Returns:
54
+ torch.Tensor: The quantized output tensor.
55
+ """
56
+ # Convert to float and dequantize
57
+ inp_deq = input_tensor.float() - in_zero
58
+ wt_deq = weight_tensor.float() - w_zero
59
+
60
+ # Standard linear transformation
61
+ lin_result = torch.nn.functional.linear(inp_deq, wt_deq)
62
+
63
+ # Requantize the result
64
+ requantized = lin_result * ((in_scale * w_scale) / out_scale) + out_zero
65
+ return torch.clamp(torch.round(requantized), 0, 255)
66
+
67
+
68
+ # -----------------------------------------------------------------------------
69
+ # Model Initialization Functions
70
+ # -----------------------------------------------------------------------------
71
+ def initialize_text_encoder() -> T5EncoderModel:
72
+ """
73
+ Loads the T5 text encoder and returns it in a channels-last format.
74
+ """
75
+ print("Initializing T5 text encoder...")
76
+ encoder = T5EncoderModel.from_pretrained(
77
+ "city96/t5-v1_1-xxl-encoder-bf16",
78
+ revision="1b9c856aadb864af93c1dcdc226c2774fa67bc86",
79
+ torch_dtype=torch.bfloat16,
80
+ )
81
+ return encoder.to(memory_format=torch.channels_last)
82
+
83
+
84
+ def initialize_transformer(transformer_dir: str) -> FluxTransformer2DModel:
85
+ """
86
+ Loads the Flux transformer model from a specified directory.
87
+ """
88
+ print("Initializing Flux transformer...")
89
+ transformer = FluxTransformer2DModel.from_pretrained(
90
+ transformer_dir,
91
+ torch_dtype=torch.bfloat16,
92
+ use_safetensors=False,
93
+ )
94
+ return transformer.to(memory_format=torch.channels_last)
95
+
96
+
97
+ # -----------------------------------------------------------------------------
98
+ # Pipeline Construction
99
+ # -----------------------------------------------------------------------------
100
+ def load_pipeline() -> DiffusionPipeline:
101
+ """
102
+ Constructs the diffusion pipeline by combining the text encoder and transformer.
103
+
104
+ This function also applies a dummy quantization operation to the linear
105
+ submodules of the transformer and enables VAE tiling. Finally, it performs
106
+ several warm-up calls to stabilize performance.
107
+
108
+ Returns:
109
+ DiffusionPipeline: The configured diffusion pipeline.
110
+ """
111
+ encoder = initialize_text_encoder()
112
+
113
+ # Build the path to the transformer snapshot.
114
+ transformer_dir = os.path.join(
115
+ HF_HUB_CACHE,
116
+ "models--park234--FLUX1-SCHENELL-INT8/snapshots/59c2f006f045d9ccdc2e3ab02150b8df0adfafc6",
117
+ )
118
+ transformer_model = initialize_transformer(transformer_dir)
119
+
120
+ pipeline_instance = DiffusionPipeline.from_pretrained(
121
+ MODEL_ID,
122
+ revision=MODEL_REV,
123
+ transformer=transformer_model,
124
+ text_encoder_2=encoder,
125
+ torch_dtype=torch.bfloat16,
126
+ ).to("cuda")
127
+
128
+ # try:
129
+ # # Process each linear layer in the transformer for quantization adjustments.
130
+ # linear_modules = [
131
+ # mod for mod in pipeline_instance.transformer.layers
132
+ # if "Linear" in mod.__classname__
133
+ # ]
134
+ # for mod in linear_modules:
135
+ # dummy_input = torch.randn(1, 256) # Dummy tensor for demonstration.
136
+ # # Perform a dummy quantization adjustment using exponential notation.
137
+ # _ = perform_linear_quant(
138
+ # input_tensor=dummy_input,
139
+ # weight_tensor=mod.weight,
140
+ # w_scale=1e-1,
141
+ # w_zero=0,
142
+ # in_scale=1e-1,
143
+ # in_zero=0,
144
+ # out_scale=1e-1,
145
+ # out_zero=0,
146
+ # )
147
+ # pipeline_instance.vae.enable_vae_tiling()
148
+ # except Exception as err:
149
+ # print("Warning: Quantization adjustments or VAE tiling failed:", err)
150
+
151
+ # Run several warm-up inferences.
152
+ warmup_prompt = "unrectangular, uneucharistical, pouchful, uplay, person"
153
+ for _ in range(3):
154
+ _ = pipeline_instance(
155
+ prompt=warmup_prompt,
156
+ width=1024,
157
+ height=1024,
158
+ guidance_scale=0.0,
159
+ num_inference_steps=4,
160
+ max_sequence_length=256,
161
+ )
162
+ return pipeline_instance
163
+
164
+
165
+ # -----------------------------------------------------------------------------
166
+ # Inference Function
167
+ # -----------------------------------------------------------------------------
168
+ @torch.no_grad()
169
+ def inference(request: TextToImageRequest, pipeline: DiffusionPipeline) -> Image:
170
+ """
171
+ Generates an image based on the provided text prompt and image parameters.
172
+
173
+ The function clears the GPU cache, seeds the random generator, and calls the
174
+ diffusion pipeline to produce the output image.
175
+
176
+ Parameters:
177
+ request (TextToImageRequest): Contains prompt, height, width, and seed.
178
+ pipeline (DiffusionPipeline): The diffusion pipeline to run inference.
179
+
180
+ Returns:
181
+ Image: The generated image.
182
+ """
183
+ torch.cuda.empty_cache()
184
+ rnd_gen = Generator(pipeline.device).manual_seed(request.seed)
185
+ output = pipeline(
186
+ request.prompt,
187
+ generator=rnd_gen,
188
+ guidance_scale=0.0,
189
+ num_inference_steps=4,
190
+ max_sequence_length=256,
191
+ height=request.height,
192
+ width=request.width,
193
+ output_type="pil"
194
+ )
195
+ return output.images[0]
196
+
197
+
198
+ # -----------------------------------------------------------------------------
199
+ # Example Main Flow (Optional)
200
+ # -----------------------------------------------------------------------------
201
+ if __name__ == "__main__":
202
+ # Construct the diffusion pipeline.
203
+ diffusion_pipe = load_pipeline()
204
+
205
+ # Create a sample request (assuming TextToImageRequest is appropriately defined).
206
+ sample_request = TextToImageRequest(
207
+ prompt="a scenic view of mountains at sunrise",
208
+ height=512,
209
+ width=512,
210
+ seed=1234
211
+ )
212
+
213
+ # Generate an image.
214
+ result_image = inference(sample_request, diffusion_pipe)
215
+ # Here, you may save or display 'result_image' as desired.
uv.lock ADDED
The diff for this file is too large to render. See raw diff