jokerbit commited on
Commit
062697f
·
verified ·
1 Parent(s): d023d44

Upload folder using huggingface_hub

Browse files
Files changed (8) hide show
  1. .gitattributes +1 -0
  2. .gitignore +8 -0
  3. README.md +19 -0
  4. pyproject.toml +27 -0
  5. sample.png +3 -0
  6. src/main.py +59 -0
  7. src/pipeline.py +140 -0
  8. uv.lock +0 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ sample.png filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ **/.cache
2
+ **/__pycache__
3
+ **/*.egg-info
4
+ *.safetensors
5
+ **/.venv
6
+ .venv
7
+ .git
8
+
README.md ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flux-schnell-edge-inference
2
+
3
+ This holds the baseline for the FLUX Schnel NVIDIA GeForce RTX 4090 contest, which can be forked freely and optimized
4
+
5
+ Some recommendations are as follows:
6
+ - Installing dependencies should be done in `pyproject.toml`, including git dependencies
7
+ - HuggingFace models should be specified in the `models` array in the `pyproject.toml` file, and will be downloaded before benchmarking
8
+ - The pipeline does **not** have internet access so all dependencies and models must be included in the `pyproject.toml`
9
+ - Compiled models should be hosted on HuggingFace and included in the `models` array in the `pyproject.toml` (rather than compiling during loading). Loading time matters far more than file sizes
10
+ - Avoid changing `src/main.py`, as that includes mostly protocol logic. Most changes should be in `models` and `src/pipeline.py`
11
+ - Ensure the entire repository (excluding dependencies and HuggingFace models) is under 16MB
12
+
13
+ For testing, you need a docker container with pytorch and ubuntu 22.04.
14
+ You can download your listed dependencies with `uv`, installed with:
15
+ ```bash
16
+ pipx ensurepath
17
+ pipx install uv
18
+ ```
19
+ You can then relock with `uv lock`, and then run with `uv run start_inference`
pyproject.toml ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [build-system]
2
+ requires = ["setuptools >= 75.0"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "flux-schnell-edge-inference"
7
+ description = "An edge-maxxing model submission for the 4090 Flux contest"
8
+ requires-python = ">=3.10,<3.13"
9
+ version = "7"
10
+ dependencies = [
11
+ "diffusers==0.31.0",
12
+ "transformers==4.46.2",
13
+ "accelerate==1.1.0",
14
+ "omegaconf==2.3.0",
15
+ "torch==2.5.1",
16
+ "protobuf==5.28.3",
17
+ "sentencepiece==0.2.0",
18
+ "edge-maxxing-pipelines @ git+https://github.com/womboai/edge-maxxing@7c760ac54f6052803dadb3ade8ebfc9679a94589#subdirectory=pipelines",
19
+ "torchao>=0.6.1",
20
+ "ipython>=8.29.0",
21
+ ]
22
+
23
+ [tool.edge-maxxing]
24
+ models = ["black-forest-labs/FLUX.1-schnell"]
25
+
26
+ [project.scripts]
27
+ start_inference = "main:main"
sample.png ADDED

Git LFS Details

  • SHA256: b516bf16c0ae0a532a8d6628534b71e6001857fc54a67d8d4e1f56afbd066595
  • Pointer size: 132 Bytes
  • Size of remote file: 1.34 MB
src/main.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import atexit
2
+ from io import BytesIO
3
+ from multiprocessing.connection import Listener
4
+ from os import chmod, remove
5
+ from os.path import abspath, exists
6
+ from pathlib import Path
7
+
8
+ import torch
9
+
10
+ from PIL.JpegImagePlugin import JpegImageFile
11
+ from pipelines.models import TextToImageRequest
12
+
13
+ from pipeline import load_pipeline, infer
14
+
15
+ SOCKET = abspath(Path(__file__).parent.parent / "inferences.sock")
16
+
17
+
18
+ def at_exit():
19
+ torch.cuda.empty_cache()
20
+
21
+
22
+ def main():
23
+ atexit.register(at_exit)
24
+
25
+ print(f"Loading pipeline")
26
+ pipeline = load_pipeline()
27
+
28
+ print(f"Pipeline loaded, creating socket at '{SOCKET}'")
29
+
30
+ if exists(SOCKET):
31
+ remove(SOCKET)
32
+
33
+ with Listener(SOCKET) as listener:
34
+ chmod(SOCKET, 0o777)
35
+
36
+ print(f"Awaiting connections")
37
+ with listener.accept() as connection:
38
+ print(f"Connected")
39
+
40
+ while True:
41
+ try:
42
+ request = TextToImageRequest.model_validate_json(connection.recv_bytes().decode("utf-8"))
43
+ except EOFError:
44
+ print(f"Inference socket exiting")
45
+
46
+ return
47
+
48
+ image = infer(request, pipeline)
49
+
50
+ data = BytesIO()
51
+ image.save(data, format=JpegImageFile.format)
52
+
53
+ packet = data.getvalue()
54
+
55
+ connection.send_bytes(packet)
56
+
57
+
58
+ if __name__ == '__main__':
59
+ main()
src/pipeline.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from diffusers import FluxPipeline, AutoencoderKL, FluxTransformer2DModel
3
+ from diffusers.image_processor import VaeImageProcessor
4
+ from transformers import T5EncoderModel, T5TokenizerFast, CLIPTokenizer, CLIPTextModel, CLIPTextConfig, T5Config
5
+ import torch
6
+ import gc
7
+ from PIL.Image import Image
8
+ from pipelines.models import TextToImageRequest
9
+ from torch import Generator
10
+ from torchao.quantization import quantize_, int8_weight_only
11
+ from time import perf_counter
12
+
13
+ FLUX_CHECKPOINT = "black-forest-labs/FLUX.1-schnell"
14
+ HOME = os.environ["HOME"]
15
+ # REPO_DIR = ".cache/huggingface/hub/models--jokerbit--flux-schnell-int8/snapshots/9510dd83d6d44ab375b5e8facec10afa81be2a8f"
16
+ QUANTIZED_MODEL = ["transformer", "text_encoder", "text_encoder_2", "vae"]
17
+ # QUANT_CKPT = {"transformer": os.path.join(HOME, REPO_DIR, "flux_schnell_transformer_int8wo.pt"),
18
+ # "text_encoder": os.path.join(HOME, REPO_DIR, "flux_schnell_text_encoder_int8wo.pt"),
19
+ # "text_encoder_2": os.path.join(HOME, REPO_DIR, "flux_schnell_text_encoder_2_int8wo.pt"),
20
+ # "vae": os.path.join(HOME, REPO_DIR, "flux_schnell_vae_int8wo.pt")}
21
+
22
+ QUANT_CONFIG = int8_weight_only()
23
+ DTYPE = torch.bfloat16
24
+ NUM_STEPS = 4
25
+
26
+ def get_transformer(quantize: bool = True, quant_config = int8_weight_only(), quant_ckpt: str = None):
27
+ if quant_ckpt is not None:
28
+ config = FluxTransformer2DModel.load_config(FLUX_CHECKPOINT, subfolder="transformer")
29
+ model = FluxTransformer2DModel.from_config(config).to(DTYPE)
30
+ state_dict = torch.load(quant_ckpt, map_location="cpu")
31
+ model.load_state_dict(state_dict, assign=True)
32
+ print(f"Loaded {quant_ckpt}")
33
+ return model
34
+
35
+ model = FluxTransformer2DModel.from_pretrained(
36
+ FLUX_CHECKPOINT, subfolder="transformer", torch_dtype=DTYPE,
37
+ )
38
+ if quantize:
39
+ quantize_(model, quant_config)
40
+ return model
41
+
42
+
43
+ def get_text_encoder(quantize: bool = True, quant_config = int8_weight_only(), quant_ckpt: str = None):
44
+ if quant_ckpt is not None:
45
+ config = CLIPTextConfig.from_pretrained(FLUX_CHECKPOINT, subfolder="text_encoder")
46
+ model = CLIPTextModel(config).to(DTYPE)
47
+ state_dict = torch.load(quant_ckpt, map_location="cpu")
48
+ model.load_state_dict(state_dict, assign=True)
49
+ print(f"Loaded {quant_ckpt}")
50
+ return model
51
+
52
+ model = CLIPTextModel.from_pretrained(
53
+ FLUX_CHECKPOINT, subfolder="text_encoder", torch_dtype=DTYPE
54
+ )
55
+ if quantize:
56
+ quantize_(model, quant_config)
57
+ return model
58
+
59
+
60
+ def get_text_encoder_2(quantize: bool = True, quant_config = int8_weight_only(), quant_ckpt: str = None):
61
+ if quant_ckpt is not None:
62
+ config = T5Config.from_pretrained(FLUX_CHECKPOINT, subfolder="text_encoder_2")
63
+ model = T5EncoderModel(config).to(DTYPE)
64
+ state_dict = torch.load(quant_ckpt, map_location="cpu")
65
+ print(f"Loaded {quant_ckpt}")
66
+ model.load_state_dict(state_dict, assign=True)
67
+ return model
68
+
69
+ model = T5EncoderModel.from_pretrained(
70
+ FLUX_CHECKPOINT, subfolder="text_encoder_2", torch_dtype=DTYPE
71
+ )
72
+ if quantize:
73
+ quantize_(model, quant_config)
74
+ return model
75
+
76
+
77
+ def get_vae(quantize: bool = True, quant_config = int8_weight_only(), quant_ckpt: str = None):
78
+ if quant_ckpt is not None:
79
+ config = AutoencoderKL.load_config(FLUX_CHECKPOINT, subfolder="vae")
80
+ model = AutoencoderKL.from_config(config).to(DTYPE)
81
+ state_dict = torch.load(quant_ckpt, map_location="cpu")
82
+ model.load_state_dict(state_dict, assign=True)
83
+ print(f"Loaded {quant_ckpt}")
84
+ return model
85
+ model = AutoencoderKL.from_pretrained(
86
+ FLUX_CHECKPOINT, subfolder="vae", torch_dtype=DTYPE
87
+ )
88
+ if quantize:
89
+ quantize_(model, quant_config)
90
+ return model
91
+
92
+
93
+ def empty_cache():
94
+ gc.collect()
95
+ torch.cuda.empty_cache()
96
+ torch.cuda.reset_max_memory_allocated()
97
+ torch.cuda.reset_peak_memory_stats()
98
+
99
+
100
+ def load_pipeline() -> FluxPipeline:
101
+ empty_cache()
102
+ transformer = get_transformer('transformer' in QUANTIZED_MODEL, QUANT_CONFIG)
103
+ text_encoder = get_text_encoder("text_encoder" in QUANTIZED_MODEL, QUANT_CONFIG)
104
+ text_encoder_2 = get_text_encoder_2("text_encoder_2" in QUANTIZED_MODEL, QUANT_CONFIG)
105
+ vae = get_vae("vae" in QUANTIZED_MODEL, QUANT_CONFIG)
106
+
107
+ pipe = FluxPipeline.from_pretrained(FLUX_CHECKPOINT,
108
+ transformer=transformer,
109
+ vae=vae,
110
+ text_encoder=text_encoder,
111
+ text_encoder_2=text_encoder_2,
112
+ torch_dtype=torch.bfloat16).to("cuda")
113
+ empty_cache()
114
+ pipe("cat", guidance_scale=0., max_sequence_length=256, num_inference_steps=4)
115
+ return pipe
116
+
117
+
118
+ def infer(request: TextToImageRequest, _pipeline: FluxPipeline) -> Image:
119
+ if request.seed is None:
120
+ generator = None
121
+ else:
122
+ generator = Generator(request.seed).device(_pipeline.device)
123
+
124
+ empty_cache()
125
+ image = _pipeline(prompt=request.prompt,
126
+ width=request.width,
127
+ height=request.height,
128
+ guidance_scale=0.0,
129
+ generator=generator,
130
+ output_type="pil",
131
+ max_sequence_length=256,
132
+ num_inference_steps=NUM_STEPS).images[0]
133
+ return image
134
+
135
+ if __name__ == "__main__":
136
+ start_time = perf_counter()
137
+ pipe = load_pipeline()
138
+ stop_time = perf_counter()
139
+ print(f"Pipeline is loaded in {stop_time - start_time}s")
140
+ pipe("cat holding a womboai sign", num_inference_steps=4, guidance_scale=0, generator=torch.Generator(pipe.device).manual_seed(666)).images[0].save("sample.png")
uv.lock ADDED
The diff for this file is too large to render. See raw diff