Act commited on
Commit
9fb5895
·
verified ·
1 Parent(s): ba2a15f

Initial commit with folder contents

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ RobertML.png filter=lfs diff=lfs merge=lfs -text
pyproject.toml ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [build-system]
2
+ requires = ["setuptools >= 75.0"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "flux-schnell-edge-inference"
7
+ description = "An edge-maxxing model submission by RobertML for the 4090 Flux contest"
8
+ requires-python = ">=3.10,<3.13"
9
+ version = "8"
10
+ dependencies = [
11
+ "diffusers==0.31.0",
12
+ "transformers==4.46.2",
13
+ "accelerate==1.1.0",
14
+ "omegaconf==2.3.0",
15
+ "torch==2.5.1",
16
+ "protobuf==5.28.3",
17
+ "sentencepiece==0.2.0",
18
+ "edge-maxxing-pipelines @ git+https://github.com/womboai/edge-maxxing@7c760ac54f6052803dadb3ade8ebfc9679a94589#subdirectory=pipelines",
19
+ "gitpython>=3.1.43",
20
+ "torchao>=0.6.1",
21
+ ]
22
+
23
+ [[tool.edge-maxxing.models]]
24
+ repository = "black-forest-labs/FLUX.1-schnell"
25
+ revision = "741f7c3ce8b383c54771c7003378a50191e9efe9"
26
+ exclude = ["transformer"]
27
+
28
+ [[tool.edge-maxxing.models]]
29
+ repository = "proact/PRO_FLUX_0load"
30
+ revision = "22f140d15b1b2f86794cda5ffb7bc52e7b917965"
31
+
32
+ [[tool.edge-maxxing.models]]
33
+ repository = "city96/t5-v1_1-xxl-encoder-bf16"
34
+ revision = "1b9c856aadb864af93c1dcdc226c2774fa67bc86"
35
+
36
+ [[tool.edge-maxxing.models]]
37
+ repository = "proact/PRO_FLUX_1load"
38
+ revision = "91741cd838292c87e50d14d1b5d14335e180a961"
39
+
40
+
41
+ [project.scripts]
42
+ start_inference = "main:main"
43
+
src/flux_schnell_edge_inference.egg-info/PKG-INFO ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: flux-schnell-edge-inference
3
+ Version: 7
4
+ Summary: An edge-maxxing model submission for the 4090 Flux contest
5
+ Requires-Python: <3.13,>=3.10
6
+ Requires-Dist: diffusers==0.31.0
7
+ Requires-Dist: transformers==4.46.2
8
+ Requires-Dist: accelerate==1.1.0
9
+ Requires-Dist: omegaconf==2.3.0
10
+ Requires-Dist: torch==2.5.1
11
+ Requires-Dist: protobuf==5.28.3
12
+ Requires-Dist: sentencepiece==0.2.0
13
+ Requires-Dist: edge-maxxing-pipelines@ git+https://github.com/womboai/edge-maxxing@7c760ac54f6052803dadb3ade8ebfc9679a94589#subdirectory=pipelines
14
+ Requires-Dist: gitpython>=3.1.43
15
+ Requires-Dist: torchao>=0.6.1
src/flux_schnell_edge_inference.egg-info/SOURCES.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ README.md
2
+ pyproject.toml
3
+ src/main.py
4
+ src/pipeline.py
5
+ src/flux_schnell_edge_inference.egg-info/PKG-INFO
6
+ src/flux_schnell_edge_inference.egg-info/SOURCES.txt
7
+ src/flux_schnell_edge_inference.egg-info/dependency_links.txt
8
+ src/flux_schnell_edge_inference.egg-info/entry_points.txt
9
+ src/flux_schnell_edge_inference.egg-info/requires.txt
10
+ src/flux_schnell_edge_inference.egg-info/top_level.txt
src/flux_schnell_edge_inference.egg-info/dependency_links.txt ADDED
@@ -0,0 +1 @@
 
 
1
+
src/flux_schnell_edge_inference.egg-info/entry_points.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ [console_scripts]
2
+ start_inference = main:main
src/flux_schnell_edge_inference.egg-info/requires.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ diffusers==0.31.0
2
+ transformers==4.46.2
3
+ accelerate==1.1.0
4
+ omegaconf==2.3.0
5
+ torch==2.5.1
6
+ protobuf==5.28.3
7
+ sentencepiece==0.2.0
8
+ edge-maxxing-pipelines@ git+https://github.com/womboai/edge-maxxing@7c760ac54f6052803dadb3ade8ebfc9679a94589#subdirectory=pipelines
9
+ gitpython>=3.1.43
10
+ torchao>=0.6.1
src/flux_schnell_edge_inference.egg-info/top_level.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ main
2
+ pipeline
src/main.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import atexit
2
+ from io import BytesIO
3
+ from multiprocessing.connection import Listener
4
+ from os import chmod, remove
5
+ from os.path import abspath, exists
6
+ from pathlib import Path
7
+ from git import Repo
8
+ import torch
9
+
10
+ from PIL.JpegImagePlugin import JpegImageFile
11
+ from pipelines.models import TextToImageRequest
12
+
13
+ from pipeline import load_pipeline, infer
14
+
15
+ SOCKET = abspath(Path(__file__).parent.parent / "inferences.sock")
16
+
17
+
18
+ def at_exit():
19
+ torch.cuda.empty_cache()
20
+
21
+
22
+ def main():
23
+ atexit.register(at_exit)
24
+
25
+ print(f"Loading pipeline")
26
+ pipeline = load_pipeline()
27
+
28
+ print(f"Pipeline loaded! , creating socket at '{SOCKET}'")
29
+
30
+ if exists(SOCKET):
31
+ remove(SOCKET)
32
+
33
+ with Listener(SOCKET) as listener:
34
+ chmod(SOCKET, 0o777)
35
+
36
+ print(f"Awaiting connections")
37
+ with listener.accept() as connection:
38
+ print(f"Connected")
39
+
40
+ while True:
41
+ try:
42
+ request = TextToImageRequest.model_validate_json(connection.recv_bytes().decode("utf-8"))
43
+ except EOFError:
44
+ print(f"Inference socket exiting")
45
+
46
+ return
47
+
48
+ image = infer(request, pipeline)
49
+
50
+ data = BytesIO()
51
+ image.save(data, format=JpegImageFile.format)
52
+
53
+ packet = data.getvalue()
54
+
55
+ connection.send_bytes(packet)
56
+
57
+
58
+ if __name__ == '__main__':
59
+ main()
src/pipeline.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers import FluxPipeline, AutoencoderKL, AutoencoderTiny
2
+ from diffusers.image_processor import VaeImageProcessor
3
+ from diffusers.schedulers import FlowMatchEulerDiscreteScheduler
4
+
5
+ from transformers import T5EncoderModel, T5TokenizerFast, CLIPTokenizer, CLIPTextModel
6
+ import torch
7
+ import torch._dynamo
8
+ import gc
9
+ from PIL import Image as img
10
+ from PIL.Image import Image
11
+ from pipelines.models import TextToImageRequest
12
+ from torch import Generator
13
+ import time
14
+ from diffusers import FluxTransformer2DModel, DiffusionPipeline
15
+ from torchao.quantization import quantize_, int8_weight_only
16
+ #from torchao.quantization import autoquant
17
+
18
+ Pipeline = None
19
+ torch.backends.cudnn.benchmark = True
20
+ torch.backends.cuda.matmul.allow_tf32 = True
21
+ torch.cuda.set_per_process_memory_fraction(0.95)
22
+
23
+ ckpt_id = "black-forest-labs/FLUX.1-schnell"
24
+ def empty_cache():
25
+ gc.collect()
26
+ torch.cuda.empty_cache()
27
+ torch.cuda.reset_max_memory_allocated()
28
+ torch.cuda.reset_peak_memory_stats()
29
+
30
+
31
+ def load_pipeline() -> Pipeline:
32
+ empty_cache()
33
+
34
+ dtype, device = torch.bfloat16, "cuda"
35
+
36
+ try:
37
+ vae = AutoencoderTiny.from_pretrained(
38
+ "/home/sandbox/.cache/huggingface/hub/models--proact--PRO_FLUX_1load/snapshots/91741cd838292c87e50d14d1b5d14335e180a961", torch_dtype=dtype)
39
+ except:
40
+ vae = AutoencoderTiny.from_pretrained("proact/PRO_FLUX_1load", torch_dtype=dtype)
41
+
42
+ try:
43
+ vae.enable_slicing()
44
+ vae.enable_tiling()
45
+ except:
46
+ pass
47
+
48
+ ############ Text Encoder ############
49
+ text_encoder = CLIPTextModel.from_pretrained(
50
+ ckpt_id, subfolder="text_encoder", torch_dtype=torch.bfloat16
51
+ )
52
+ ############ Text Encoder 2 ############
53
+ text_encoder_2 = T5EncoderModel.from_pretrained(
54
+ "city96/t5-v1_1-xxl-encoder-bf16", torch_dtype=torch.bfloat16
55
+ )
56
+
57
+ model = FluxTransformer2DModel.from_pretrained(
58
+ "/home/sandbox/.cache/huggingface/hub/models--proact--PRO_FLUX_0load/snapshots/22f140d15b1b2f86794cda5ffb7bc52e7b917965", torch_dtype=dtype, use_safetensors=False
59
+ )
60
+
61
+ empty_cache()
62
+ pipeline = DiffusionPipeline.from_pretrained(
63
+ ckpt_id,
64
+ transformer=model,
65
+ text_encoder=text_encoder,
66
+ text_encoder_2=text_encoder_2,
67
+ torch_dtype=dtype,
68
+ vae=vae
69
+ ).to(device)
70
+
71
+
72
+ empty_cache()
73
+ pipeline(prompt="untenibleness, gynecocracy, overcapitalization, demiplate, shockable", width=1024, height=1024, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256)
74
+
75
+ return pipeline
76
+
77
+
78
+ @torch.inference_mode()
79
+ def infer(request: TextToImageRequest, pipeline: Pipeline) -> Image:
80
+ empty_cache()
81
+ if request.seed is None:
82
+ generator = None
83
+ else:
84
+ generator = Generator(device="cuda").manual_seed(request.seed)
85
+
86
+ empty_cache()
87
+ image = pipeline(prompt=request.prompt,
88
+ width=request.width,
89
+ height=request.height,
90
+ guidance_scale=0.0,
91
+ generator=generator,
92
+ output_type="pil",
93
+ max_sequence_length=256,
94
+ num_inference_steps=4).images[0]
95
+ return image
uv.lock ADDED
The diff for this file is too large to render. See raw diff