sharper740 commited on
Commit
3f884f3
·
verified ·
1 Parent(s): 1abc168

Upload folder using huggingface_hub

Browse files
Files changed (4) hide show
  1. pyproject.toml +44 -0
  2. src/main.py +57 -0
  3. src/pipeline.py +85 -0
  4. uv.lock +0 -0
pyproject.toml ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [build-system]
2
+ requires = ["setuptools >= 75.0"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "flux-schnell-edge-inference"
7
+ description = "An edge-maxxing model submission by RobertML for the 4090 Flux contest"
8
+ requires-python = ">=3.10,<3.13"
9
+ version = "8"
10
+ dependencies = [
11
+ "diffusers==0.31.0",
12
+ "transformers==4.46.2",
13
+ "accelerate==1.1.0",
14
+ "omegaconf==2.3.0",
15
+ "torch==2.5.1",
16
+ "protobuf==5.28.3",
17
+ "sentencepiece==0.2.0",
18
+ "edge-maxxing-pipelines @ git+https://github.com/womboai/edge-maxxing@7c760ac54f6052803dadb3ade8ebfc9679a94589#subdirectory=pipelines",
19
+ "gitpython>=3.1.43",
20
+ "hf_transfer==0.1.8",
21
+ "torchao==0.6.1",
22
+ "setuptools>=75.3.0",
23
+ ]
24
+
25
+ [[tool.edge-maxxing.models]]
26
+ repository = "black-forest-labs/FLUX.1-schnell"
27
+ revision = "741f7c3ce8b383c54771c7003378a50191e9efe9"
28
+
29
+ [[tool.edge-maxxing.models]]
30
+ repository = "city96/t5-v1_1-xxl-encoder-bf16"
31
+ revision = "1b9c856aadb864af93c1dcdc226c2774fa67bc86"
32
+
33
+ [[tool.edge-maxxing.models]]
34
+ repository = "park234/Flux1-schnell-int8"
35
+ revision = "ba40332fa9ca3e25d439616fca8ac934a96b73f7"
36
+
37
+ [[tool.edge-maxxing.models]]
38
+ repository = "park234/flux1-schnell-vae32"
39
+ revision = "d7c57ba8ee0d581d67256219c152a8e69da853b5"
40
+
41
+
42
+ [project.scripts]
43
+ start_inference = "main:main"
44
+
src/main.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import atexit
2
+ from io import BytesIO
3
+ from multiprocessing.connection import Listener
4
+ from os import chmod, remove
5
+ from os.path import abspath, exists
6
+ from pathlib import Path
7
+ import torch
8
+
9
+ from PIL.JpegImagePlugin import JpegImageFile
10
+ from pipelines.models import TextToImageRequest
11
+ from pipeline import load_pipeline, infer
12
+
13
+ SOCKET = abspath(Path(__file__).parent.parent / "inferences.sock")
14
+
15
+
16
+ def at_exit():
17
+ torch.cuda.empty_cache()
18
+
19
+
20
+ def main():
21
+ atexit.register(at_exit)
22
+
23
+ print(f"Loading pipeline")
24
+ pipeline = load_pipeline()
25
+
26
+ print(f"Pipeline loaded, creating socket at '{SOCKET}'")
27
+
28
+ if exists(SOCKET):
29
+ remove(SOCKET)
30
+
31
+ with Listener(SOCKET) as listener:
32
+ chmod(SOCKET, 0o777)
33
+
34
+ print(f"Awaiting connections")
35
+ with listener.accept() as connection:
36
+ print(f"Connected")
37
+ generator = torch.Generator("cuda")
38
+ while True:
39
+ try:
40
+ request = TextToImageRequest.model_validate_json(
41
+ connection.recv_bytes().decode("utf-8")
42
+ )
43
+ except EOFError:
44
+ print(f"Inference socket exiting")
45
+
46
+ return
47
+ image = infer(request, pipeline, generator.manual_seed(request.seed))
48
+ data = BytesIO()
49
+ image.save(data, format=JpegImageFile.format)
50
+
51
+ packet = data.getvalue()
52
+
53
+ connection.send_bytes(packet)
54
+
55
+
56
+ if __name__ == "__main__":
57
+ main()
src/pipeline.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers import FluxPipeline, AutoencoderKL, AutoencoderTiny
2
+ from diffusers.image_processor import VaeImageProcessor
3
+ from diffusers.schedulers import FlowMatchEulerDiscreteScheduler
4
+ from huggingface_hub.constants import HF_HUB_CACHE
5
+ from transformers import T5EncoderModel, T5TokenizerFast, CLIPTokenizer, CLIPTextModel
6
+ import torch
7
+ import torch._dynamo
8
+ import gc
9
+ from PIL import Image as img
10
+ from PIL.Image import Image
11
+ from pipelines.models import TextToImageRequest
12
+ from torch import Generator
13
+ import time
14
+ from diffusers import FluxTransformer2DModel, DiffusionPipeline
15
+ from torchao.quantization import quantize_, int8_weight_only, fpx_weight_only
16
+ import os
17
+
18
+ os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
19
+ torch._dynamo.config.suppress_errors = True
20
+
21
+ Pipeline = None
22
+
23
+ ckpt_id = "black-forest-labs/FLUX.1-schnell"
24
+ ckpt_revision = "741f7c3ce8b383c54771c7003378a50191e9efe9"
25
+
26
+
27
+ def empty_cache():
28
+ gc.collect()
29
+ torch.cuda.empty_cache()
30
+ torch.cuda.reset_max_memory_allocated()
31
+ torch.cuda.reset_peak_memory_stats()
32
+
33
+
34
+ def load_pipeline() -> Pipeline:
35
+ empty_cache()
36
+
37
+ transformer_path = os.path.join(
38
+ HF_HUB_CACHE,
39
+ "models--park234--Flux1-schnell-int8/snapshots/ba40332fa9ca3e25d439616fca8ac934a96b73f7",
40
+ )
41
+ transformer_model = FluxTransformer2DModel.from_pretrained(
42
+ transformer_path, torch_dtype=torch.bfloat16, use_safetensors=False
43
+ ).to(memory_format=torch.channels_last)
44
+
45
+ vae_model = AutoencoderTiny.from_pretrained(
46
+ "park234/flux1-schnell-vae32",
47
+ revision="d7c57ba8ee0d581d67256219c152a8e69da853b5",
48
+ torch_dtype=torch.bfloat16,
49
+ )
50
+
51
+ text_encoder_2 = T5EncoderModel.from_pretrained(
52
+ "city96/t5-v1_1-xxl-encoder-bf16",
53
+ revision="1b9c856aadb864af93c1dcdc226c2774fa67bc86",
54
+ torch_dtype=torch.bfloat16,
55
+ ).to(memory_format=torch.channels_last)
56
+
57
+ pipeline = FluxPipeline.from_pretrained(
58
+ ckpt_id,
59
+ vae=vae_model,
60
+ revision=ckpt_revision,
61
+ transformer=transformer_model,
62
+ text_encoder_2=text_encoder_2,
63
+ torch_dtype=torch.bfloat16,
64
+ ).to("cuda")
65
+
66
+ pipeline.transformer = torch.compile(pipeline.transformer, mode="reduce-overhead")
67
+
68
+ empty_cache()
69
+ return pipeline
70
+
71
+
72
+ @torch.no_grad()
73
+ def infer(
74
+ request: TextToImageRequest, pipeline: Pipeline, generator: Generator
75
+ ) -> Image:
76
+ return pipeline(
77
+ request.prompt,
78
+ generator=generator,
79
+ guidance_scale=0.0,
80
+ num_inference_steps=4,
81
+ max_sequence_length=256,
82
+ height=request.height,
83
+ width=request.width,
84
+ output_type="pil",
85
+ ).images[0]
uv.lock ADDED
The diff for this file is too large to render. See raw diff