position025 commited on
Commit
df65fdd
·
verified ·
1 Parent(s): 3ff4ca5

Upload folder using huggingface_hub

Browse files
Files changed (5) hide show
  1. .gitattributes +1 -0
  2. pyproject.toml +39 -0
  3. src/main.py +53 -0
  4. src/pipeline.py +91 -0
  5. uv.lock +0 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+
pyproject.toml ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [build-system]
2
+ requires = ["setuptools >= 75.0"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "flux-schnell-edge-inference"
7
+ description = "An edge-maxxing model submission by RobertML for the 4090 Flux contest"
8
+ requires-python = ">=3.10,<3.13"
9
+ version = "8"
10
+ dependencies = [
11
+ "diffusers==0.31.0",
12
+ "transformers==4.46.2",
13
+ "accelerate==1.1.0",
14
+ "omegaconf==2.3.0",
15
+ "torch==2.5.1",
16
+ "protobuf==5.28.3",
17
+ "sentencepiece==0.2.0",
18
+ "edge-maxxing-pipelines @ git+https://github.com/womboai/edge-maxxing@7c760ac54f6052803dadb3ade8ebfc9679a94589#subdirectory=pipelines",
19
+ "gitpython>=3.1.43",
20
+ "hf_transfer==0.1.8",
21
+ "torchao==0.6.1",
22
+ ]
23
+
24
+ [[tool.edge-maxxing.models]]
25
+ repository = "city96/t5-v1_1-xxl-encoder-bf16"
26
+ revision = "1b9c856aadb864af93c1dcdc226c2774fa67bc86"
27
+
28
+ [[tool.edge-maxxing.models]]
29
+ repository = "position025/FLUX.1-schnell-qf8"
30
+ revision = "588785b2be92552014f594e7d0611a4c0e0a5def"
31
+
32
+ [[tool.edge-maxxing.models]]
33
+ repository = "position025/FLUX.1-schnell-8-pda"
34
+ revision = "cdee1db6a8b1858435ec4397b19ed703265d2153"
35
+
36
+
37
+ [project.scripts]
38
+ start_inference = "main:main"
39
+
src/main.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import atexit
2
+ from io import BytesIO
3
+ from multiprocessing.connection import Listener
4
+ from os import chmod, remove
5
+ from os.path import abspath, exists
6
+ from pathlib import Path
7
+ import torch
8
+
9
+ from PIL.JpegImagePlugin import JpegImageFile
10
+ from pipelines.models import TextToImageRequest
11
+ from pipeline import load_pipeline, infer
12
+ SOCKET = abspath(Path(__file__).parent.parent / "inferences.sock")
13
+
14
+
15
+ def at_exit():
16
+ torch.cuda.empty_cache()
17
+
18
+
19
+ def main():
20
+ atexit.register(at_exit)
21
+
22
+ print(f"Loading pipeline")
23
+ pipeline = load_pipeline()
24
+
25
+ print(f"Pipeline loaded, creating socket at '{SOCKET}'")
26
+
27
+ if exists(SOCKET):
28
+ remove(SOCKET)
29
+
30
+ with Listener(SOCKET) as listener:
31
+ chmod(SOCKET, 0o777)
32
+
33
+ print(f"Awaiting connections")
34
+ with listener.accept() as connection:
35
+ print(f"Connected")
36
+ generator = torch.Generator("cuda")
37
+ while True:
38
+ try:
39
+ request = TextToImageRequest.model_validate_json(connection.recv_bytes().decode("utf-8"))
40
+ except EOFError:
41
+ print(f"Inference socket exiting")
42
+
43
+ return
44
+ image = infer(request, pipeline, generator.manual_seed(request.seed))
45
+ data = BytesIO()
46
+ image.save(data, format=JpegImageFile.format)
47
+
48
+ packet = data.getvalue()
49
+
50
+ connection.send_bytes(packet )
51
+
52
+ if __name__ == '__main__':
53
+ main()
src/pipeline.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers import FluxPipeline, AutoencoderKL, AutoencoderTiny
2
+ from diffusers.image_processor import VaeImageProcessor
3
+ from diffusers.schedulers import FlowMatchEulerDiscreteScheduler
4
+ from huggingface_hub.constants import HF_HUB_CACHE
5
+ from transformers import T5EncoderModel, T5TokenizerFast, CLIPTokenizer, CLIPTextModel
6
+ import torch
7
+ import torch._dynamo
8
+ import gc
9
+ from PIL import Image as img
10
+ from PIL.Image import Image
11
+ from pipelines.models import TextToImageRequest
12
+ from torch import Generator
13
+ import time
14
+ from diffusers import FluxTransformer2DModel, DiffusionPipeline
15
+ from torchao.quantization import quantize_, int8_weight_only, fpx_weight_only
16
+ import os
17
+
18
+ os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
19
+
20
+ Pipeline = None
21
+ torch.backends.cuda.matmul.allow_tf32 = True
22
+ torch.backends.cudnn.enabled = True
23
+ torch.backends.cudnn.benchmark = True
24
+
25
+ ckpt_id = "position025/FLUX.1-schnell-qf8"
26
+ ckpt_revision = "588785b2be92552014f594e7d0611a4c0e0a5def"
27
+
28
+
29
+ def empty_cache():
30
+ gc.collect()
31
+ torch.cuda.empty_cache()
32
+ torch.cuda.reset_max_memory_allocated()
33
+ torch.cuda.reset_peak_memory_stats()
34
+
35
+
36
+ def load_pipeline() -> Pipeline:
37
+ empty_cache()
38
+
39
+ dtype, device = torch.bfloat16, "cuda"
40
+
41
+ text_encoder_2 = T5EncoderModel.from_pretrained(
42
+ "city96/t5-v1_1-xxl-encoder-bf16",
43
+ revision="1b9c856aadb864af93c1dcdc226c2774fa67bc86",
44
+ torch_dtype=torch.bfloat16,
45
+ ).to(memory_format=torch.channels_last)
46
+
47
+ path = os.path.join(
48
+ HF_HUB_CACHE,
49
+ "models--position025--FLUX.1-schnell-8-pda/snapshots/cdee1db6a8b1858435ec4397b19ed703265d2153",
50
+ )
51
+ model = FluxTransformer2DModel.from_pretrained(
52
+ path, torch_dtype=dtype, use_safetensors=False
53
+ ).to(memory_format=torch.channels_last)
54
+ pipeline = DiffusionPipeline.from_pretrained(
55
+ ckpt_id,
56
+ revision=ckpt_revision,
57
+ transformer=model,
58
+ text_encoder_2=text_encoder_2,
59
+ torch_dtype=dtype,
60
+ ).to(device)
61
+ quantize_(pipeline.vae, int8_weight_only())
62
+ pipeline.vae = torch.compile(pipeline.vae, mode="max-autotune")
63
+
64
+ for _ in range(3):
65
+ pipeline(
66
+ prompt="prophecy, consequence, breezy, parasitic, modifier, pulpy, simpleton, existence",
67
+ width=1024,
68
+ height=1024,
69
+ guidance_scale=0.0,
70
+ num_inference_steps=4,
71
+ max_sequence_length=256,
72
+ )
73
+
74
+ empty_cache()
75
+ return pipeline
76
+
77
+
78
+ @torch.no_grad()
79
+ def infer(
80
+ request: TextToImageRequest, pipeline: Pipeline, generator: Generator
81
+ ) -> Image:
82
+ return pipeline(
83
+ request.prompt,
84
+ generator=generator,
85
+ guidance_scale=0.0,
86
+ num_inference_steps=4,
87
+ max_sequence_length=256,
88
+ height=request.height,
89
+ width=request.width,
90
+ output_type="pil",
91
+ ).images[0]
uv.lock ADDED
The diff for this file is too large to render. See raw diff