Act commited on
Commit
c37a2ed
·
verified ·
1 Parent(s): 9657818

Initial commit with folder contents

Browse files
Files changed (4) hide show
  1. pyproject.toml +27 -0
  2. src/main.py +59 -0
  3. src/pipeline.py +85 -0
  4. uv.lock +0 -0
pyproject.toml ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [build-system]
2
+ requires = ["setuptools >= 75.0"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "flux-schnell-edge-inference"
7
+ description = "An edge-maxxing model submission for the 4090 Flux contest"
8
+ requires-python = ">=3.10,<3.13"
9
+ version = "7"
10
+ dependencies = [
11
+ "diffusers==0.31.0",
12
+ "transformers==4.46.2",
13
+ "accelerate==1.1.0",
14
+ "omegaconf==2.3.0",
15
+ "torch==2.5.1",
16
+ "protobuf==5.28.3",
17
+ "sentencepiece==0.2.0",
18
+ "edge-maxxing-pipelines @ git+https://github.com/womboai/edge-maxxing@7c760ac54f6052803dadb3ade8ebfc9679a94589#subdirectory=pipelines",
19
+ "gitpython>=3.1.43",
20
+ "torchao>=0.6.1"
21
+ ]
22
+
23
+ [tool.edge-maxxing]
24
+ models = ["black-forest-labs/FLUX.1-schnell"]
25
+
26
+ [project.scripts]
27
+ start_inference = "main:main"
src/main.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import atexit
2
+ from io import BytesIO
3
+ from multiprocessing.connection import Listener
4
+ from os import chmod, remove
5
+ from os.path import abspath, exists
6
+ from pathlib import Path
7
+
8
+ import torch
9
+
10
+ from PIL.JpegImagePlugin import JpegImageFile
11
+ from pipelines.models import TextToImageRequest
12
+
13
+ from pipeline import load_pipeline, infer
14
+
15
+ SOCKET = abspath(Path(__file__).parent.parent / "inferences.sock")
16
+
17
+
18
+ def at_exit():
19
+ torch.cuda.empty_cache()
20
+
21
+
22
+ def main():
23
+ atexit.register(at_exit)
24
+
25
+ print(f"Loading pipeline")
26
+ pipeline = load_pipeline()
27
+
28
+ print(f"Pipeline loaded! , creating socket at '{SOCKET}'")
29
+
30
+ if exists(SOCKET):
31
+ remove(SOCKET)
32
+
33
+ with Listener(SOCKET) as listener:
34
+ chmod(SOCKET, 0o777)
35
+
36
+ print(f"Awaiting connections")
37
+ with listener.accept() as connection:
38
+ print(f"Connected")
39
+
40
+ while True:
41
+ try:
42
+ request = TextToImageRequest.model_validate_json(connection.recv_bytes().decode("utf-8"))
43
+ except EOFError:
44
+ print(f"Inference socket exiting")
45
+
46
+ return
47
+
48
+ image = infer(request, pipeline)
49
+
50
+ data = BytesIO()
51
+ image.save(data, format=JpegImageFile.format)
52
+
53
+ packet = data.getvalue()
54
+
55
+ connection.send_bytes(packet)
56
+
57
+
58
+ if __name__ == '__main__':
59
+ main()
src/pipeline.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers import FluxPipeline, AutoencoderKL, AutoencoderTiny
2
+ from diffusers.schedulers import FlowMatchEulerDiscreteScheduler
3
+
4
+ from transformers import T5EncoderModel, T5TokenizerFast, CLIPTokenizer, CLIPTextModel
5
+ import torch
6
+ import torch._dynamo
7
+ import gc
8
+ from PIL import Image as img
9
+ from PIL import Image
10
+ from pipelines.models import TextToImageRequest
11
+ from torch import Generator
12
+ import time
13
+ from diffusers import FluxTransformer2DModel, DiffusionPipeline
14
+ from torchao.quantization import quantize_, int8_weight_only
15
+ from diffusers.image_processor import VaeImageProcessor
16
+ Pipeline = None
17
+ import os
18
+ MODEL_ID = "black-forest-labs/FLUX.1-schnell"
19
+ traced_vae_decode_path = "traced_vae_decode.pt"
20
+ def empty_cache():
21
+ start = time.time()
22
+ gc.collect()
23
+ torch.cuda.empty_cache()
24
+ torch.cuda.reset_max_memory_allocated()
25
+ torch.cuda.reset_peak_memory_stats()
26
+ print(f"Flush took: {time.time() - start}")
27
+
28
+
29
+
30
+ def load_pipeline() -> Pipeline:
31
+ empty_cache()
32
+ dtype, device = torch.bfloat16, "cuda"
33
+ vae = AutoencoderKL.from_pretrained(
34
+ MODEL_ID, subfolder="vae", torch_dtype=torch.bfloat16
35
+ )
36
+ quantize_(vae, int8_weight_only())
37
+ pipeline = DiffusionPipeline.from_pretrained(
38
+ MODEL_ID,
39
+ vae=vae,
40
+ torch_dtype=dtype,
41
+ )
42
+
43
+ pipeline.enable_sequential_cpu_offload()
44
+ for _ in range(2):
45
+ empty_cache()
46
+ pipeline(prompt="onomancy, aftergo, spirantic, Platyhelmia, modificator, drupaceous, jobbernowl, hereness", width=1024, height=1024, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256)
47
+
48
+ return pipeline
49
+
50
+ def trace_and_save_vae_decoder(vae, latents):
51
+ try:
52
+ traced_vae_decode = torch.jit.trace(vae.decode, (latents, True))
53
+ torch.jit.save(traced_vae_decode, traced_vae_decode_path)
54
+ return traced_vae_decode
55
+ except Exception as e:
56
+ print(f"JIT tracing failed: {e}")
57
+ return vae.decode #Fall back to untraced decoder.
58
+
59
+ def decode_latents_to_image(latents, height: int, width: int, vae):
60
+ if not height:
61
+ height = 1024
62
+ if not width:
63
+ width = 1024
64
+ if vae.config.block_out_channels:
65
+ vae_scale_factor = 2 ** (len(vae.config.block_out_channels))
66
+ else:
67
+ vae_scale_factor = 1
68
+ image_processor = VaeImageProcessor(vae_scale_factor=vae_scale_factor)
69
+
70
+ traced_vae_decode = vae.decode
71
+ with torch.no_grad():
72
+ latents = FluxPipeline._unpack_latents(latents.unsqueeze(0), height, width, vae_scale_factor)
73
+ latents = (latents / vae.config.scaling_factor) + vae.config.shift_factor
74
+ image = traced_vae_decode(latents, return_dict=False)[0] # Use the traced function
75
+ decoded_image = image_processor.postprocess(image, output_type="pil")[0]
76
+
77
+ return decoded_image
78
+
79
+
80
+ @torch.inference_mode()
81
+ def infer(request: TextToImageRequest, pipeline: Pipeline) -> Image:
82
+ empty_cache()
83
+ generator = Generator("cuda").manual_seed(request.seed)
84
+ latent=pipeline(request.prompt,generator=generator, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256, height=request.height, width=request.width, output_type="latent").images[0]
85
+ return decode_latents_to_image(latent, request.height, request.width, pipeline.vae)
uv.lock ADDED
The diff for this file is too large to render. See raw diff