edgetensor commited on
Commit
0a050a1
·
verified ·
1 Parent(s): 90a170a

Initial commit with folder contents

Browse files
Files changed (5) hide show
  1. .gitattributes +1 -1
  2. pyproject.toml +28 -0
  3. src/main.py +62 -0
  4. src/pipeline.py +81 -0
  5. uv.lock +0 -0
.gitattributes CHANGED
@@ -32,4 +32,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
32
  *.xz filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
32
  *.xz filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
pyproject.toml ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [build-system]
2
+ requires = ["setuptools >= 75.0"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "flux-schnell-edge-inference"
7
+ description = "An edge-maxxing model submission for the 4090 Flux contest"
8
+ requires-python = ">=3.10,<3.13"
9
+ version = "7"
10
+ dependencies = [
11
+ "diffusers==0.31.0",
12
+ "transformers==4.46.2",
13
+ "accelerate==1.1.0",
14
+ "omegaconf==2.3.0",
15
+ "torch==2.5.1",
16
+ "protobuf==5.28.3",
17
+ "sentencepiece==0.2.0",
18
+ "edge-maxxing-pipelines @ git+https://github.com/womboai/edge-maxxing@7c760ac54f6052803dadb3ade8ebfc9679a94589#subdirectory=pipelines",
19
+ "torchao>=0.6.1",
20
+ "ipython>=8.29.0",
21
+ "setuptools >= 75.0"
22
+ ]
23
+
24
+ [tool.edge-maxxing]
25
+ models = ["black-forest-labs/FLUX.1-schnell"]
26
+
27
+ [project.scripts]
28
+ start_inference = "main:main"
src/main.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import atexit
2
+ from io import BytesIO
3
+ from multiprocessing.connection import Listener
4
+ from os import chmod, remove
5
+ from os.path import abspath, exists
6
+ from pathlib import Path
7
+
8
+ import torch
9
+
10
+ from PIL.JpegImagePlugin import JpegImageFile
11
+ from pipelines.models import TextToImageRequest
12
+
13
+ from pipeline import load_pipeline, infer
14
+
15
+ SOCKET = abspath(Path(__file__).parent.parent / "inferences.sock")
16
+
17
+
18
+ def at_exit():
19
+ torch.cuda.empty_cache()
20
+
21
+
22
+ def main():
23
+ atexit.register(at_exit)
24
+
25
+ print(f"Loading pipeline")
26
+
27
+ pipeline = load_pipeline()
28
+
29
+ print(f"Pipeline loaded! , creating socket at '{SOCKET}'")
30
+
31
+ if exists(SOCKET):
32
+ remove(SOCKET)
33
+
34
+ with Listener(SOCKET) as listener:
35
+ chmod(SOCKET, 0o777)
36
+
37
+ print(f"Awaiting connections")
38
+ with listener.accept() as connection:
39
+ print(f"Connected")
40
+
41
+ while True:
42
+ try:
43
+ request = TextToImageRequest.model_validate_json(connection.recv_bytes().decode("utf-8"))
44
+ except EOFError:
45
+ print(f"Inference socket exiting")
46
+
47
+ return
48
+
49
+ image = infer(request, pipeline)
50
+
51
+ data = BytesIO()
52
+ image.save(data, format=JpegImageFile.format)
53
+
54
+ packet = data.getvalue()
55
+
56
+ connection.send_bytes(packet)
57
+
58
+
59
+
60
+
61
+ if __name__ == '__main__':
62
+ main()
src/pipeline.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from diffusers import FluxPipeline, AutoencoderKL, FluxTransformer2DModel
3
+ from diffusers.image_processor import VaeImageProcessor
4
+ from transformers import T5EncoderModel, T5TokenizerFast, CLIPTokenizer, CLIPTextModel, CLIPTextConfig, T5Config
5
+ import torch
6
+ import gc
7
+ from PIL import Image
8
+ from pipelines.models import TextToImageRequest
9
+ from torch import Generator
10
+ from time import perf_counter
11
+
12
+ os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
13
+
14
+ class EightQuantize:
15
+ def __init__(self, bits=8):
16
+ self.bits = bits
17
+ self.qmax = (1 << bits) - 1
18
+
19
+ def __call__(self, x):
20
+ scale = x.max() / self.qmax
21
+ x_quant = torch.clip(torch.round(x / scale), 0, self.qmax)
22
+ return x_quant * scale
23
+
24
+
25
+ CHECKPOINT = "black-forest-labs/FLUX.1-schnell"
26
+ DTYPE = torch.bfloat16
27
+ NUM_STEPS = 4
28
+
29
+ def empty_cache():
30
+ gc.collect()
31
+ torch.cuda.empty_cache()
32
+ torch.cuda.reset_max_memory_allocated()
33
+ torch.cuda.reset_peak_memory_stats()
34
+
35
+ def load_pipeline() -> FluxPipeline:
36
+ empty_cache()
37
+ is_quantize = 0
38
+ _pipe = None
39
+ pipe = FluxPipeline.from_pretrained(CHECKPOINT, torch_dtype=DTYPE)
40
+
41
+ pipe.text_encoder_2.to(memory_format=torch.channels_last)
42
+ pipe.transformer.to(memory_format=torch.channels_last)
43
+
44
+ pipe.vae.to(memory_format=torch.channels_last)
45
+ pipe.vae = torch.compile(pipe.vae)
46
+ pipe._exclude_from_cpu_offload = ["vae"]
47
+
48
+ try:
49
+ if is_quantize:
50
+ quantizer = EightQuantize()
51
+ with torch.no_grad():
52
+ for param in _pipe.vae.parameters():
53
+ param.data = quantizer(param.data)
54
+ except Exception as e:
55
+ print(f"Quantization warning: {e}")
56
+
57
+ pipe.enable_sequential_cpu_offload()
58
+
59
+ empty_cache()
60
+ pipe("dog", guidance_scale=0.0, max_sequence_length=256, num_inference_steps=4)
61
+ return pipe
62
+
63
+ @torch.inference_mode()
64
+ def infer(request: TextToImageRequest, _pipeline: FluxPipeline) -> Image:
65
+ torch.cuda.reset_peak_memory_stats()
66
+
67
+ if request.seed is None:
68
+ generator = None
69
+ else:
70
+ generator = Generator(device="cuda").manual_seed(request.seed)
71
+
72
+ empty_cache()
73
+ image = _pipeline(prompt=request.prompt,
74
+ width=request.width,
75
+ height=request.height,
76
+ guidance_scale=0.0,
77
+ generator=generator,
78
+ output_type="pil",
79
+ max_sequence_length=256,
80
+ num_inference_steps=NUM_STEPS).images[0]
81
+ return image
uv.lock ADDED
The diff for this file is too large to render. See raw diff