farapart commited on
Commit
33c2179
·
verified ·
1 Parent(s): e835a7e

Initial commit with folder contents

Browse files
pyproject.toml ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [build-system]
2
+ requires = ["setuptools >= 75.0"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "flux-schnell-edge-inference"
7
+ description = "An edge-maxxing model submission for the 4090 Flux contest"
8
+ requires-python = ">=3.10,<3.13"
9
+ version = "8"
10
+ dependencies = [
11
+ "diffusers==0.31.0",
12
+ "transformers==4.46.2",
13
+ "accelerate==1.1.0",
14
+ "omegaconf==2.3.0",
15
+ "torch==2.5.1",
16
+ "protobuf==5.28.3",
17
+ "sentencepiece==0.2.0",
18
+ "torchao==0.6.1",
19
+ "hf_transfer==0.1.8",
20
+ "edge-maxxing-pipelines @ git+https://github.com/womboai/edge-maxxing@7c760ac54f6052803dadb3ade8ebfc9679a94589#subdirectory=pipelines",
21
+ "setuptools>=75.3.0",
22
+ ]
23
+
24
+ [[tool.edge-maxxing.models]]
25
+ repository = "farapart/flow.1-fast"
26
+ revision = "59ebc4a11e1a6d4fe2085988028c5252f3a07b74"
27
+
28
+ [project.scripts]
29
+ start_inference = "main:main"
src/flux_schnell_edge_inference.egg-info/PKG-INFO ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: flux-schnell-edge-inference
3
+ Version: 8
4
+ Summary: An edge-maxxing model submission for the 4090 Flux contest
5
+ Requires-Python: <3.13,>=3.10
6
+ Requires-Dist: diffusers==0.31.0
7
+ Requires-Dist: transformers==4.46.2
8
+ Requires-Dist: accelerate==1.1.0
9
+ Requires-Dist: omegaconf==2.3.0
10
+ Requires-Dist: torch==2.5.1
11
+ Requires-Dist: protobuf==5.28.3
12
+ Requires-Dist: sentencepiece==0.2.0
13
+ Requires-Dist: torchao==0.6.1
14
+ Requires-Dist: hf_transfer==0.1.8
15
+ Requires-Dist: edge-maxxing-pipelines@ git+https://github.com/womboai/edge-maxxing@7c760ac54f6052803dadb3ade8ebfc9679a94589#subdirectory=pipelines
src/flux_schnell_edge_inference.egg-info/SOURCES.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ README.md
2
+ pyproject.toml
3
+ src/main.py
4
+ src/pipeline.py
5
+ src/flux_schnell_edge_inference.egg-info/PKG-INFO
6
+ src/flux_schnell_edge_inference.egg-info/SOURCES.txt
7
+ src/flux_schnell_edge_inference.egg-info/dependency_links.txt
8
+ src/flux_schnell_edge_inference.egg-info/entry_points.txt
9
+ src/flux_schnell_edge_inference.egg-info/requires.txt
10
+ src/flux_schnell_edge_inference.egg-info/top_level.txt
src/flux_schnell_edge_inference.egg-info/dependency_links.txt ADDED
@@ -0,0 +1 @@
 
 
1
+
src/flux_schnell_edge_inference.egg-info/entry_points.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ [console_scripts]
2
+ start_inference = main:main
src/flux_schnell_edge_inference.egg-info/requires.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ diffusers==0.31.0
2
+ transformers==4.46.2
3
+ accelerate==1.1.0
4
+ omegaconf==2.3.0
5
+ torch==2.5.1
6
+ protobuf==5.28.3
7
+ sentencepiece==0.2.0
8
+ torchao==0.6.1
9
+ hf_transfer==0.1.8
10
+ edge-maxxing-pipelines@ git+https://github.com/womboai/edge-maxxing@7c760ac54f6052803dadb3ade8ebfc9679a94589#subdirectory=pipelines
src/flux_schnell_edge_inference.egg-info/top_level.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ main
2
+ pipeline
src/main.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from io import BytesIO
2
+ from multiprocessing.connection import Listener
3
+ from os import chmod, remove
4
+ from os.path import abspath, exists
5
+ from pathlib import Path
6
+
7
+ from PIL.JpegImagePlugin import JpegImageFile
8
+ from pipelines.models import TextToImageRequest
9
+ import torch
10
+ from pipeline import load_pipeline, infer
11
+
12
+ SOCKET = abspath(Path(__file__).parent.parent / "inferences.sock")
13
+
14
+ def main():
15
+ print(f"Loading pipeline")
16
+ pipeline = load_pipeline()
17
+ generator = torch.Generator(pipeline.device)
18
+ print(f"Pipeline loaded! , creating socket at '{SOCKET}'")
19
+
20
+ if exists(SOCKET):
21
+ remove(SOCKET)
22
+
23
+ with Listener(SOCKET) as listener:
24
+ chmod(SOCKET, 0o777)
25
+
26
+ print(f"Awaiting connections")
27
+ with listener.accept() as connection:
28
+ print(f"Connected")
29
+
30
+ while True:
31
+ try:
32
+ request = TextToImageRequest.model_validate_json(connection.recv_bytes().decode("utf-8"))
33
+ except EOFError:
34
+ print(f"Inference socket exiting")
35
+
36
+ return
37
+
38
+ image = infer(request, pipeline, generator.manual_seed(request.seed))
39
+
40
+ data = BytesIO()
41
+ image.save(data, format=JpegImageFile.format)
42
+
43
+ packet = data.getvalue()
44
+
45
+ connection.send_bytes(packet)
46
+
47
+
48
+ if __name__ == '__main__':
49
+ main()
src/pipeline.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import TypeAlias
3
+ import torch
4
+ from PIL.Image import Image
5
+ from diffusers import FluxPipeline, FluxTransformer2DModel
6
+ from huggingface_hub.constants import HF_HUB_CACHE
7
+ from pipelines.models import TextToImageRequest
8
+ from torch import Generator
9
+ from torchao.quantization import quantize_, int8_weight_only
10
+
11
+ Pipeline: TypeAlias = FluxPipeline
12
+
13
+ torch.backends.cudnn.benchmark = True
14
+ torch._inductor.config.conv_1x1_as_mm = True
15
+ torch._inductor.config.coordinate_descent_tuning = True
16
+ torch._inductor.config.epilogue_fusion = False
17
+ torch._inductor.config.coordinate_descent_check_all_directions = True
18
+
19
+ os.environ['PYTORCH_CUDA_ALLOC_CONF']="expandable_segments:True"
20
+ os.environ["TOKENIZERS_PARALLELISM"] = "True"
21
+
22
+ def load_pipeline() -> Pipeline:
23
+ path = os.path.join(HF_HUB_CACHE, "models--farapart--flow.1-fast/snapshots/59ebc4a11e1a6d4fe2085988028c5252f3a07b74/transformer")
24
+ transformer = FluxTransformer2DModel.from_pretrained(path, use_safetensors=False, local_files_only=True, torch_dtype=torch.bfloat16)
25
+
26
+ pipeline = FluxPipeline.from_pretrained("farapart/flow.1-fast", revision="59ebc4a11e1a6d4fe2085988028c5252f3a07b74", transformer=transformer, local_files_only=True, torch_dtype=torch.bfloat16).to("cuda")
27
+ pipeline.to(memory_format=torch.channels_last)
28
+
29
+ with torch.inference_mode():
30
+ for _ in range(4):
31
+ pipeline(prompt="onomancy, aftergo, spirantic, Platyhelmia, modificator, drupaceous, jobbernowl, hereness", width=1024, height=1024, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256)
32
+ torch.cuda.empty_cache()
33
+ return pipeline
34
+
35
+ @torch.inference_mode()
36
+ def infer(request: TextToImageRequest, pipeline: Pipeline, generator: torch.Generator) -> Image:
37
+ return pipeline(
38
+ request.prompt,
39
+ generator=generator,
40
+ guidance_scale=0.0,
41
+ num_inference_steps=4,
42
+ max_sequence_length=256,
43
+ height=request.height,
44
+ width=request.width,
45
+ ).images[0]
uv.lock ADDED
The diff for this file is too large to render. See raw diff