jokerbit commited on
Commit
d0d8f7e
·
verified ·
1 Parent(s): b51d45e

Upload folder using huggingface_hub

Browse files
Files changed (7) hide show
  1. .gitattributes +1 -0
  2. .gitignore +9 -0
  3. README.md +19 -0
  4. pyproject.toml +28 -0
  5. src/main.py +59 -0
  6. src/pipeline.py +164 -0
  7. uv.lock +0 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ sample.png filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ **/.cache
2
+ **/__pycache__
3
+ **/*.egg-info
4
+ *.safetensors
5
+ **/.venv
6
+ .venv
7
+ .git
8
+ *.png
9
+ *.jpeg
README.md ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flux-schnell-edge-inference
2
+
3
+ This holds the baseline for the FLUX Schnel NVIDIA GeForce RTX 4090 contest, which can be forked freely and optimized
4
+
5
+ Some recommendations are as follows:
6
+ - Installing dependencies should be done in `pyproject.toml`, including git dependencies
7
+ - HuggingFace models should be specified in the `models` array in the `pyproject.toml` file, and will be downloaded before benchmarking
8
+ - The pipeline does **not** have internet access so all dependencies and models must be included in the `pyproject.toml`
9
+ - Compiled models should be hosted on HuggingFace and included in the `models` array in the `pyproject.toml` (rather than compiling during loading). Loading time matters far more than file sizes
10
+ - Avoid changing `src/main.py`, as that includes mostly protocol logic. Most changes should be in `models` and `src/pipeline.py`
11
+ - Ensure the entire repository (excluding dependencies and HuggingFace models) is under 16MB
12
+
13
+ For testing, you need a docker container with pytorch and ubuntu 22.04.
14
+ You can download your listed dependencies with `uv`, installed with:
15
+ ```bash
16
+ pipx ensurepath
17
+ pipx install uv
18
+ ```
19
+ You can then relock with `uv lock`, and then run with `uv run start_inference`
pyproject.toml ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [build-system]
2
+ requires = ["setuptools >= 75.0"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "flux-schnell-edge-inference"
7
+ description = "An edge-maxxing model submission for the 4090 Flux contest"
8
+ requires-python = ">=3.10,<3.13"
9
+ version = "7"
10
+ dependencies = [
11
+ "diffusers==0.31.0",
12
+ "transformers==4.46.2",
13
+ "accelerate==1.1.0",
14
+ "omegaconf==2.3.0",
15
+ "torch==2.5.1",
16
+ "protobuf==5.28.3",
17
+ "sentencepiece==0.2.0",
18
+ "edge-maxxing-pipelines @ git+https://github.com/womboai/edge-maxxing@7c760ac54f6052803dadb3ade8ebfc9679a94589#subdirectory=pipelines",
19
+ "torchao>=0.6.1",
20
+ "ipython>=8.29.0",
21
+ "setuptools >= 75.0"
22
+ ]
23
+
24
+ [tool.edge-maxxing]
25
+ models = ["jokerbit/flux.1-schnell-Robert-int8wo"]
26
+
27
+ [project.scripts]
28
+ start_inference = "main:main"
src/main.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import atexit
2
+ from io import BytesIO
3
+ from multiprocessing.connection import Listener
4
+ from os import chmod, remove
5
+ from os.path import abspath, exists
6
+ from pathlib import Path
7
+
8
+ import torch
9
+
10
+ from PIL.JpegImagePlugin import JpegImageFile
11
+ from pipelines.models import TextToImageRequest
12
+
13
+ from pipeline import load_pipeline, infer
14
+
15
+ SOCKET = abspath(Path(__file__).parent.parent / "inferences.sock")
16
+
17
+
18
+ def at_exit():
19
+ torch.cuda.empty_cache()
20
+
21
+
22
+ def main():
23
+ atexit.register(at_exit)
24
+
25
+ print(f"Loading pipeline")
26
+ pipeline = load_pipeline()
27
+
28
+ print(f"Pipeline loaded, creating socket at '{SOCKET}'")
29
+
30
+ if exists(SOCKET):
31
+ remove(SOCKET)
32
+
33
+ with Listener(SOCKET) as listener:
34
+ chmod(SOCKET, 0o777)
35
+
36
+ print(f"Awaiting connections")
37
+ with listener.accept() as connection:
38
+ print(f"Connected")
39
+
40
+ while True:
41
+ try:
42
+ request = TextToImageRequest.model_validate_json(connection.recv_bytes().decode("utf-8"))
43
+ except EOFError:
44
+ print(f"Inference socket exiting")
45
+
46
+ return
47
+
48
+ image = infer(request, pipeline)
49
+
50
+ data = BytesIO()
51
+ image.save(data, format=JpegImageFile.format)
52
+
53
+ packet = data.getvalue()
54
+
55
+ connection.send_bytes(packet)
56
+
57
+
58
+ if __name__ == '__main__':
59
+ main()
src/pipeline.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from diffusers import FluxPipeline, AutoencoderKL, FluxTransformer2DModel
3
+ from diffusers.image_processor import VaeImageProcessor
4
+ from transformers import T5EncoderModel, T5TokenizerFast, CLIPTokenizer, CLIPTextModel, CLIPTextConfig, T5Config
5
+ import torch
6
+ import gc
7
+ from PIL.Image import Image
8
+ from pipelines.models import TextToImageRequest
9
+ from torch import Generator
10
+ from torchao.quantization import quantize_, int8_weight_only
11
+ from time import perf_counter
12
+
13
+
14
+ HOME = os.environ["HOME"]
15
+ QUANTIZED_MODEL = ["text_encoder_2", "vae"]
16
+ os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:False,garbage_collection_threshold:0.01"
17
+ FLUX_CHECKPOINT = "jokerbit/flux.1-schnell-Robert-int8wo"
18
+ FLUX_CACHE = os.path.join(HOME, ".cache/huggingface/hub/models--jokerbit--flux.1-schnell-Robert-int8wo/snapshots/5ef0012f11a863e5111ec56540302a023bc8587b")
19
+ torch.backends.cudnn.benchmark = True
20
+ torch.backends.cuda.matmul.allow_tf32 = True
21
+ torch.cuda.set_per_process_memory_fraction(0.99)
22
+
23
+ QUANT_CONFIG = int8_weight_only()
24
+ DTYPE = torch.bfloat16
25
+ NUM_STEPS = 4
26
+ PROMPT = 'martyr, semiconformity, peregrination, quip, twineless, emotionless, tawa, depickle'
27
+
28
+
29
+ def empty_cache():
30
+ gc.collect()
31
+ torch.cuda.empty_cache()
32
+ torch.cuda.reset_max_memory_allocated()
33
+ torch.cuda.reset_peak_memory_stats()
34
+
35
+
36
+ def quantize(pipe, config):
37
+ if "text_encoder" in QUANTIZED_MODEL:
38
+ quantize_(pipe.text_encoder, config)
39
+ if "text_encoder_2" in QUANTIZED_MODEL:
40
+ quantize_(pipe.text_encoder_2, config)
41
+ if "transformer" in QUANTIZED_MODEL:
42
+ quantize_(pipe.transformer, config, device="cuda")
43
+ if "vae" in QUANTIZED_MODEL:
44
+ quantize_(pipe.vae, config)
45
+ return pipe
46
+
47
+
48
+ def load_pipeline() -> FluxPipeline:
49
+ empty_cache()
50
+ transformer = FluxTransformer2DModel.from_pretrained(os.path.join(FLUX_CACHE, "transformer"), use_safetensors=False, torch_dtype=DTYPE)
51
+ pipe = FluxPipeline.from_pretrained(FLUX_CHECKPOINT,
52
+ transformer=transformer,
53
+ torch_dtype=DTYPE)
54
+ pipe.vae.enable_tiling()
55
+ pipe.vae.enable_slicing()
56
+ quantize(pipe, QUANT_CONFIG)
57
+ request = TextToImageRequest(prompt=PROMPT, height=1024, width=1024, seed=666)
58
+ infer(request, pipe)
59
+ # pipe.enable_model_cpu_offload()
60
+ return pipe
61
+
62
+
63
+ def encode_prompt(_pipeline, prompt: str):
64
+ pipeline = FluxPipeline.from_pipe(
65
+ _pipeline,
66
+ transformer=None,
67
+ vae=None,
68
+ ).to("cuda")
69
+ with torch.no_grad():
70
+ outputs = pipeline.encode_prompt(
71
+ prompt=prompt,
72
+ prompt_2=None,
73
+ max_sequence_length=256)
74
+ del pipeline
75
+ empty_cache()
76
+ return outputs
77
+
78
+
79
+ def infer_latents(_pipeline, prompt_embeds, pooled_prompt_embeds, width: int | None, height: int | None, seed: int | None):
80
+ pipeline = FluxPipeline.from_pipe(
81
+ _pipeline,
82
+ text_encoder=None,
83
+ text_encoder_2=None,
84
+ tokenizer=None,
85
+ tokenizer_2=None,
86
+ vae=None,
87
+ ).to("cuda")
88
+
89
+ if seed is None:
90
+ generator = None
91
+ else:
92
+ generator = Generator(pipeline.device).manual_seed(seed)
93
+ outputs = pipeline(
94
+ prompt_embeds=prompt_embeds,
95
+ pooled_prompt_embeds=pooled_prompt_embeds,
96
+ num_inference_steps=4,
97
+ guidance_scale=0.0,
98
+ width=width,
99
+ height=height,
100
+ generator=generator,
101
+ output_type="latent",
102
+ ).images
103
+ del pipeline
104
+ empty_cache()
105
+ return outputs
106
+
107
+
108
+ def decode_latents(vae, latents, width, height):
109
+ vae.to("cuda")
110
+ vae_scale_factor = 2 ** (len(vae.config.block_out_channels))
111
+ width = width or 64 * vae_scale_factor
112
+ height = height or 64 * vae_scale_factor
113
+ image_processor = VaeImageProcessor(vae_scale_factor=vae_scale_factor)
114
+ with torch.no_grad():
115
+ latents = FluxPipeline._unpack_latents(latents, height, width, vae_scale_factor)
116
+ latents = (latents / vae.config.scaling_factor) + vae.config.shift_factor
117
+ image = vae.decode(latents, return_dict=False)[0]
118
+ return image_processor.postprocess(image, output_type="pil")[0]
119
+
120
+
121
+ def infer(request: TextToImageRequest, _pipeline: FluxPipeline) -> Image:
122
+ prompt_embeds, pooled_prompt_embeds, text_ids = encode_prompt(_pipeline, request.prompt)
123
+ latents = infer_latents(_pipeline, prompt_embeds, pooled_prompt_embeds, request.width, request.height, request.seed)
124
+ del prompt_embeds
125
+ del pooled_prompt_embeds
126
+ del text_ids
127
+ # _pipeline.transformer.to("cpu")
128
+ image = decode_latents(_pipeline.vae, latents, request.width, request.height)
129
+ torch.cuda.reset_peak_memory_stats()
130
+ return image
131
+
132
+
133
+ # def infer(request: TextToImageRequest, _pipeline: FluxPipeline) -> Image:
134
+ # if request.seed is None:
135
+ # generator = None
136
+ # else:
137
+ # generator = Generator(device="cuda").manual_seed(request.seed)
138
+ # torch.cuda.reset_peak_memory_stats()
139
+ # image = _pipeline(prompt=request.prompt,
140
+ # width=request.width,
141
+ # height=request.height,
142
+ # guidance_scale=0.0,
143
+ # generator=generator,
144
+ # output_type="pil",
145
+ # max_sequence_length=256,
146
+ # num_inference_steps=NUM_STEPS).images[0]
147
+ # return image
148
+
149
+ if __name__ == "__main__":
150
+ request = TextToImageRequest(prompt=PROMPT,
151
+ height=None,
152
+ width=None,
153
+ seed=666)
154
+ start_time = perf_counter()
155
+ pipe_ = load_pipeline()
156
+ stop_time = perf_counter()
157
+ print(f"Pipeline is loaded in {stop_time - start_time}s")
158
+ for _ in range(4):
159
+ start_time = perf_counter()
160
+ infer(request, pipe_)
161
+ stop_time = perf_counter()
162
+ print(f"Request in {stop_time - start_time}s")
163
+
164
+ # pipe("cat holding a womboai sign", num_inference_steps=4, guidance_scale=0, generator=torch.Generator(pipe.device).manual_seed(666)).images[0].save("sample.png")
uv.lock ADDED
The diff for this file is too large to render. See raw diff