sharper740 commited on
Commit
2ee4e92
·
verified ·
1 Parent(s): f9701e9

Upload folder using huggingface_hub

Browse files
Files changed (6) hide show
  1. pyproject.toml +34 -0
  2. pyproject.toml-copy +47 -0
  3. src/main.py +50 -0
  4. src/pipeline-copy.py +93 -0
  5. src/pipeline.py +89 -0
  6. uv.lock +0 -0
pyproject.toml ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [build-system]
2
+ requires = ["setuptools >= 75.0"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "flux-schnell-edge-inference"
7
+ description = "An edge-maxxing model submission for the 4090 Flux contest"
8
+ requires-python = ">=3.10,<3.13"
9
+ version = "8"
10
+ dependencies = [
11
+ "diffusers==0.31.0",
12
+ "transformers==4.46.2",
13
+ "accelerate==1.1.0",
14
+ "omegaconf==2.3.0",
15
+ "torch==2.5.1",
16
+ "protobuf==5.28.3",
17
+ "sentencepiece==0.2.0",
18
+ "torchao==0.6.1",
19
+ "hf_transfer==0.1.8",
20
+ "edge-maxxing-pipelines @ git+https://github.com/womboai/edge-maxxing@7c760ac54f6052803dadb3ade8ebfc9679a94589#subdirectory=pipelines",
21
+ ]
22
+
23
+ [[tool.edge-maxxing.models]]
24
+ repository = "jokerbit/flux.1-schnell-Robert-int8wo"
25
+ revision = "5ef0012f11a863e5111ec56540302a023bc8587b"
26
+
27
+
28
+ [[tool.edge-maxxing.models]]
29
+ repository = "madebyollin/taef1"
30
+ revision = "2d552378e58c9c94201075708d7de4e1163b2689"
31
+
32
+
33
+ [project.scripts]
34
+ start_inference = "main:main"
pyproject.toml-copy ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [build-system]
2
+ requires = ["setuptools >= 75.0"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "flux-schnell-edge-inference"
7
+ description = "An edge-maxxing model submission for the 4090 Flux contest"
8
+ requires-python = ">=3.10,<3.13"
9
+ version = "8"
10
+ dependencies = [
11
+ "diffusers==0.31.0",
12
+ "transformers==4.46.2",
13
+ "accelerate==1.1.0",
14
+ "omegaconf==2.3.0",
15
+ "torch==2.5.1",
16
+ "protobuf==5.28.3",
17
+ "sentencepiece==0.2.0",
18
+ "torchao==0.6.1",
19
+ "hf_transfer==0.1.8",
20
+ "edge-maxxing-pipelines @ git+https://github.com/womboai/edge-maxxing@7c760ac54f6052803dadb3ade8ebfc9679a94589#subdirectory=pipelines",
21
+ ]
22
+
23
+
24
+
25
+ [[tool.edge-maxxing.models]]
26
+ repository = "madebyollin/taef1"
27
+ revision = "2d552378e58c9c94201075708d7de4e1163b2689"
28
+
29
+ [[tool.edge-maxxing.models]]
30
+ repository = "black-forest-labs/FLUX.1-schnell"
31
+ revision = "741f7c3ce8b383c54771c7003378a50191e9efe9"
32
+
33
+
34
+ [[tool.edge-maxxing.models]]
35
+ repository = "jokerbit/flux.1-schnell-Robert-int8wo"
36
+ revision = "5ef0012f11a863e5111ec56540302a023bc8587b"
37
+
38
+ [[tool.edge-maxxing.models]]
39
+ repository = "freaky231/flux.1-schnell-int8"
40
+ revision = "c33fa7f79751fe42b0a7de7f72edb5d1b86f32a7"
41
+
42
+ [[tool.edge-maxxing.models]]
43
+ repository = "freaky231/t5-encoder-bf16"
44
+ revision = "994f6e4720f69e67bfc8822cbb4063c9149b801b"
45
+
46
+ [project.scripts]
47
+ start_inference = "main:main"
src/main.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from io import BytesIO
2
+ from multiprocessing.connection import Listener
3
+ from os import chmod, remove
4
+ from os.path import abspath, exists
5
+ from pathlib import Path
6
+
7
+ from PIL.JpegImagePlugin import JpegImageFile
8
+ from pipelines.models import TextToImageRequest
9
+ import torch
10
+ from pipeline import load_pipeline, infer
11
+
12
+ SOCKET = abspath(Path(__file__).parent.parent / "inferences.sock")
13
+
14
+
15
+ def main():
16
+ print(f"Loading pipeline")
17
+ pipeline = load_pipeline()
18
+ generator = torch.Generator(pipeline.device)
19
+ print(f"Pipeline loaded, creating socket at '{SOCKET}'")
20
+
21
+ if exists(SOCKET):
22
+ remove(SOCKET)
23
+
24
+ with Listener(SOCKET) as listener:
25
+ chmod(SOCKET, 0o777)
26
+
27
+ print(f"Awaiting connections")
28
+ with listener.accept() as connection:
29
+ print(f"Connected")
30
+
31
+ while True:
32
+ try:
33
+ request = TextToImageRequest.model_validate_json(connection.recv_bytes().decode("utf-8"))
34
+ except EOFError:
35
+ print(f"Inference socket exiting")
36
+
37
+ return
38
+
39
+ image = infer(request, pipeline, generator.manual_seed(request.seed))
40
+
41
+ data = BytesIO()
42
+ image.save(data, format=JpegImageFile.format)
43
+
44
+ packet = data.getvalue()
45
+
46
+ connection.send_bytes(packet)
47
+
48
+
49
+ if __name__ == '__main__':
50
+ main()
src/pipeline-copy.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gc
2
+ import os
3
+ from typing import TypeAlias
4
+
5
+ import torch
6
+ from PIL.Image import Image
7
+ from diffusers import (
8
+ FluxPipeline,
9
+ FluxTransformer2DModel,
10
+ AutoencoderKL,
11
+ DiffusionPipeline,
12
+ AutoencoderTiny,
13
+ )
14
+ from huggingface_hub.constants import HF_HUB_CACHE
15
+ from pipelines.models import TextToImageRequest
16
+ from torch import Generator
17
+ from transformers import T5EncoderModel, CLIPTextModel
18
+
19
+
20
+ Pipeline: TypeAlias = FluxPipeline
21
+ torch.backends.cudnn.benchmark = True
22
+ torch._inductor.config.conv_1x1_as_mm = True
23
+ torch._inductor.config.coordinate_descent_tuning = True
24
+ torch._inductor.config.epilogue_fusion = False
25
+ torch._inductor.config.coordinate_descent_check_all_directions = True
26
+ os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
27
+
28
+ id = "black-forest-labs/FLUX.1-schnell"
29
+ revision = "741f7c3ce8b383c54771c7003378a50191e9efe9"
30
+
31
+ vae_id = "madebyollin/taef1"
32
+ vae_rev = "2d552378e58c9c94201075708d7de4e1163b2689"
33
+
34
+
35
+ def load_pipeline() -> Pipeline:
36
+ path = os.path.join(
37
+ HF_HUB_CACHE,
38
+ "models--freaky231--flux.1-schnell-int8/snapshots/c33fa7f79751fe42b0a7de7f72edb5d1b86f32a7/transformer",
39
+ )
40
+ transformer = FluxTransformer2DModel.from_pretrained(
41
+ path, use_safetensors=False, local_files_only=True, torch_dtype=torch.bfloat16
42
+ ).to(memory_format=torch.channels_last)
43
+ # vae = AutoencoderTiny.from_pretrained(
44
+ # vae_id, revision=vae_rev, local_files_only=True, torch_dtype=torch.bfloat16
45
+ # )
46
+ text_encoder_2 = T5EncoderModel.from_pretrained(
47
+ "freaky231/t5-encoder-bf16",
48
+ revision="994f6e4720f69e67bfc8822cbb4063c9149b801b",
49
+ torch_dtype=torch.bfloat16,
50
+ ).to(memory_format=torch.channels_last)
51
+ pipeline = DiffusionPipeline.from_pretrained(
52
+ id,
53
+ revision=revision,
54
+ transformer=transformer,
55
+ text_encoder_2=text_encoder_2,
56
+ # vae=vae,
57
+ torch_dtype=torch.bfloat16,
58
+ )
59
+
60
+ pipeline.to("cuda")
61
+ for _ in range(2):
62
+ pipeline(
63
+ prompt="satiety, unwitherable, Pygmy, ramlike, Curtis, fingerstone, rewhisper",
64
+ width=1024,
65
+ height=1024,
66
+ guidance_scale=0.0,
67
+ num_inference_steps=4,
68
+ max_sequence_length=256,
69
+ )
70
+ return pipeline
71
+
72
+
73
+ @torch.inference_mode()
74
+ def infer(
75
+ request: TextToImageRequest, pipeline: Pipeline, generator: torch.Generator
76
+ ) -> Image:
77
+
78
+ return pipeline(
79
+ request.prompt,
80
+ generator=generator,
81
+ guidance_scale=0.0,
82
+ num_inference_steps=4,
83
+ max_sequence_length=256,
84
+ height=request.height,
85
+ width=request.width,
86
+ ).images[0]
87
+
88
+
89
+ if __name__ == "__main__":
90
+ pipe_ = load_pipeline()
91
+ for _ in range(4):
92
+ request = TextToImageRequest(prompt="cat", height=None, width=None, seed=3254)
93
+ infer(request, pipe_)
src/pipeline.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gc
2
+ import os
3
+ from typing import TypeAlias
4
+
5
+ import torch
6
+ from PIL.Image import Image
7
+ from diffusers import FluxPipeline, FluxTransformer2DModel, AutoencoderKL, AutoencoderTiny
8
+ from huggingface_hub.constants import HF_HUB_CACHE
9
+ from pipelines.models import TextToImageRequest
10
+ from torch import Generator
11
+ from torchao.quantization import quantize_, int8_weight_only
12
+ from transformers import T5EncoderModel, CLIPTextModel
13
+
14
+
15
+ Pipeline: TypeAlias = FluxPipeline
16
+ torch.backends.cudnn.benchmark = True
17
+ torch._inductor.config.conv_1x1_as_mm = True
18
+ torch._inductor.config.coordinate_descent_tuning = True
19
+ torch._inductor.config.epilogue_fusion = False
20
+ torch._inductor.config.coordinate_descent_check_all_directions = True
21
+ os.environ['PYTORCH_CUDA_ALLOC_CONF']="expandable_segments:True"
22
+
23
+ CHECKPOINT = "freaky231/flux.1-schnell-int8"
24
+ REVISION = "5ef0012f11a863e5111ec56540302a023bc8587b"
25
+
26
+ TinyVAE = "madebyollin/taef1"
27
+ TinyVAE_REV = "2d552378e58c9c94201075708d7de4e1163b2689"
28
+
29
+
30
+ def load_pipeline() -> Pipeline:
31
+ path = os.path.join(HF_HUB_CACHE, "models--freaky231--flux.1-schnell-int8/snapshots/5ef0012f11a863e5111ec56540302a023bc8587b/transformer")
32
+ transformer = FluxTransformer2DModel.from_pretrained(
33
+ path,
34
+ use_safetensors=False,
35
+ local_files_only=True,
36
+ torch_dtype=torch.bfloat16)
37
+ vae = AutoencoderTiny.from_pretrained(
38
+ TinyVAE,
39
+ revision=TinyVAE_REV,
40
+ local_files_only=True,
41
+ torch_dtype=torch.bfloat16
42
+ )
43
+ pipeline = FluxPipeline.from_pretrained(
44
+ CHECKPOINT,
45
+ revision=REVISION,
46
+ transformer=transformer,
47
+ vae=vae,
48
+ local_files_only=True,
49
+ torch_dtype=torch.bfloat16,
50
+ )
51
+
52
+ pipeline.to(memory_format=torch.channels_last)
53
+ pipeline.to("cuda")
54
+ for _ in range(4):
55
+ pipeline("cat", num_inference_steps=4)
56
+
57
+ return pipeline
58
+
59
+ @torch.inference_mode()
60
+ def infer(request: TextToImageRequest, pipeline: Pipeline, generator: torch.Generator) -> Image:
61
+
62
+ return pipeline(
63
+ request.prompt,
64
+ generator=generator,
65
+ guidance_scale=0.0,
66
+ num_inference_steps=4,
67
+ max_sequence_length=256,
68
+ height=request.height,
69
+ width=request.width,
70
+ ).images[0]
71
+
72
+
73
+ if __name__ == "__main__":
74
+ from time import perf_counter
75
+ PROMPT = 'martyr, semiconformity, peregrination, quip, twineless, emotionless, tawa, depickle'
76
+ request = TextToImageRequest(prompt=PROMPT,
77
+ height=None,
78
+ width=None,
79
+ seed=666)
80
+ start_time = perf_counter()
81
+ pipe_ = load_pipeline()
82
+ stop_time = perf_counter()
83
+ print(f"Pipeline is loaded in {stop_time - start_time}s")
84
+ for _ in range(4):
85
+ start_time = perf_counter()
86
+ infer(request, pipe_)
87
+ stop_time = perf_counter()
88
+ print(f"Request in {stop_time - start_time}s")
89
+ s
uv.lock ADDED
The diff for this file is too large to render. See raw diff