fringuant commited on
Commit
090dc6b
·
verified ·
1 Parent(s): 2ed5c39

Initial commit with folder contents

Browse files
Files changed (4) hide show
  1. pyproject.toml +42 -0
  2. src/main.py +50 -0
  3. src/pipeline.py +50 -0
  4. uv.lock +0 -0
pyproject.toml ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [build-system]
2
+ requires = ["setuptools >= 75.0"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "flux-schnell-edge-inference"
7
+ description = "An edge-maxxing model submission for the 4090 Flux contest"
8
+ requires-python = ">=3.10,<3.13"
9
+ version = "8"
10
+ dependencies = [
11
+ "diffusers==0.31.0",
12
+ "transformers==4.46.2",
13
+ "accelerate==1.1.0",
14
+ "omegaconf==2.3.0",
15
+ "torch==2.5.1",
16
+ "protobuf==5.28.3",
17
+ "sentencepiece==0.2.0",
18
+ "torchao==0.6.1",
19
+ "hf_transfer==0.1.8",
20
+ "edge-maxxing-pipelines @ git+https://github.com/womboai/edge-maxxing@7c760ac54f6052803dadb3ade8ebfc9679a94589#subdirectory=pipelines",
21
+ ]
22
+
23
+ [[tool.edge-maxxing.models]]
24
+ repository = "black-forest-labs/FLUX.1-schnell"
25
+ revision = "741f7c3ce8b383c54771c7003378a50191e9efe9"
26
+ exclude = ["transformer", "vae", "text_encoder_2"]
27
+
28
+ [[tool.edge-maxxing.models]]
29
+ repository = "simonbaby/int8wo"
30
+ revision = "ea08d478d1c800affec1dc0ea6442a6fa531bbb9"
31
+
32
+ [[tool.edge-maxxing.models]]
33
+ repository = "simonbaby/bf16"
34
+ revision = "24a77356026c2b8552488a3381fef097ead3459d"
35
+
36
+ [[tool.edge-maxxing.models]]
37
+ repository = "simonbaby/vae_e3m2"
38
+ revision = "3e2254259485390a5eb428ba82f98dcec24895f5"
39
+
40
+
41
+ [project.scripts]
42
+ start_inference = "main:main"
src/main.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from io import BytesIO
2
+ from multiprocessing.connection import Listener
3
+ from os import chmod, remove
4
+ from os.path import abspath, exists
5
+ from pathlib import Path
6
+
7
+ from PIL.JpegImagePlugin import JpegImageFile
8
+ from pipelines.models import TextToImageRequest
9
+
10
+ from pipeline import load_pipeline, infer
11
+
12
+ SOCKET = abspath(Path(__file__).parent.parent / "inferences.sock")
13
+
14
+
15
+ def main():
16
+ print(f"Loading pipeline")
17
+ pipeline = load_pipeline()
18
+
19
+ print(f"Pipeline loaded! , creating socket at '{SOCKET}'")
20
+
21
+ if exists(SOCKET):
22
+ remove(SOCKET)
23
+
24
+ with Listener(SOCKET) as listener:
25
+ chmod(SOCKET, 0o777)
26
+
27
+ print(f"Awaiting connections")
28
+ with listener.accept() as connection:
29
+ print(f"Connected")
30
+
31
+ while True:
32
+ try:
33
+ request = TextToImageRequest.model_validate_json(connection.recv_bytes().decode("utf-8"))
34
+ except EOFError:
35
+ print(f"Inference socket exiting")
36
+
37
+ return
38
+
39
+ image = infer(request, pipeline)
40
+
41
+ data = BytesIO()
42
+ image.save(data, format=JpegImageFile.format)
43
+
44
+ packet = data.getvalue()
45
+
46
+ connection.send_bytes(packet)
47
+
48
+
49
+ if __name__ == '__main__':
50
+ main()
src/pipeline.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #3
2
+ from huggingface_hub.constants import HF_HUB_CACHE
3
+ from transformers import T5EncoderModel, T5TokenizerFast, CLIPTokenizer, CLIPTextModel
4
+ import torch
5
+ import torch._dynamo
6
+ import gc
7
+ import os
8
+ from diffusers import FluxPipeline, AutoencoderKL, AutoencoderTiny
9
+ from diffusers.image_processor import VaeImageProcessor
10
+ from diffusers.schedulers import FlowMatchEulerDiscreteScheduler
11
+ from PIL import Image as img
12
+ from PIL.Image import Image
13
+ from pipelines.models import TextToImageRequest
14
+ from torch import Generator
15
+ from diffusers import FluxTransformer2DModel, DiffusionPipeline
16
+ from torchao.quantization import quantize_, int8_weight_only, fpx_weight_only
17
+
18
+ os.environ['PYTORCH_CUDA_ALLOC_CONF']="expandable_segments:True"
19
+ os.environ["TOKENIZERS_PARALLELISM"] = "True"
20
+ torch._dynamo.config.suppress_errors = True
21
+
22
+ Pipeline = None
23
+
24
+ ids = "black-forest-labs/FLUX.1-schnell"
25
+ Revision = "741f7c3ce8b383c54771c7003378a50191e9efe9"
26
+
27
+ def load_pipeline() -> Pipeline:
28
+ text_encoder_2 = T5EncoderModel.from_pretrained("simonbaby/bf16", revision = "24a77356026c2b8552488a3381fef097ead3459d", torch_dtype=torch.bfloat16).to(memory_format=torch.channels_last)
29
+ path = os.path.join(HF_HUB_CACHE, "models--simonbaby--int8wo/snapshots/ea08d478d1c800affec1dc0ea6442a6fa531bbb9")
30
+ model = FluxTransformer2DModel.from_pretrained(path, torch_dtype=torch.bfloat16, use_safetensors=False).to(memory_format=torch.channels_last)
31
+ pipeline = DiffusionPipeline.from_pretrained(ids, revision=Revision, transformer=model, text_encoder_2=text_encoder_2, torch_dtype=torch.bfloat16,)
32
+ pipeline.to("cuda")
33
+ quantize_(pipeline.vae, int8_weight_only())
34
+ for _ in range(3):
35
+ pipeline(prompt="insensible, timbale, pothery, electrovital, actinogram, taxis, intracerebellar, centrodesmus", width=1024, height=1024, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256)
36
+ return pipeline
37
+
38
+ @torch.no_grad()
39
+ def infer(request: TextToImageRequest, pipeline: Pipeline) -> Image:
40
+ generator = Generator(pipeline.device).manual_seed(request.seed)
41
+
42
+ return pipeline(
43
+ request.prompt,
44
+ generator=generator,
45
+ guidance_scale=0.0,
46
+ num_inference_steps=4,
47
+ max_sequence_length=256,
48
+ height=request.height,
49
+ width=request.width,
50
+ ).images[0]
uv.lock ADDED
The diff for this file is too large to render. See raw diff