CarlyCa commited on
Commit
9732199
·
verified ·
1 Parent(s): 2aab4d6

Initial commit with folder contents

Browse files
Files changed (5) hide show
  1. .gitattributes +2 -0
  2. pyproject.toml +34 -0
  3. src/main.py +55 -0
  4. src/pipeline.py +70 -0
  5. uv.lock +0 -0
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ RobertML.png filter=lfs diff=lfs merge=lfs -text
37
+ backup.png filter=lfs diff=lfs merge=lfs -text
pyproject.toml ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [build-system]
2
+ requires = ["setuptools >= 75.0"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "flux-schnell-edge-inference"
7
+ description = "An edge-maxxing model submission by RobertML for the 4090 Flux contest"
8
+ requires-python = ">=3.10,<3.13"
9
+ version = "8"
10
+ dependencies = [
11
+ "diffusers==0.31.0",
12
+ "transformers==4.46.2",
13
+ "accelerate==1.1.0",
14
+ "omegaconf==2.3.0",
15
+ "torch==2.5.1",
16
+ "protobuf==5.28.3",
17
+ "sentencepiece==0.2.0",
18
+ "edge-maxxing-pipelines @ git+https://github.com/womboai/edge-maxxing@7c760ac54f6052803dadb3ade8ebfc9679a94589#subdirectory=pipelines",
19
+ "gitpython>=3.1.43",
20
+ "hf_transfer==0.1.8",
21
+ "torchao==0.6.1",
22
+ "setuptools>=75.3.0",
23
+ ]
24
+ [[tool.edge-maxxing.models]]
25
+ repository = "CarlyCa/extra0well0"
26
+ revision = "5f1e655eac99b1f5df8fde3ae9d0056a928986ed"
27
+
28
+ [[tool.edge-maxxing.models]]
29
+ repository = "CarlyCa/extra1well1"
30
+ revision = "d2e0619b6df29fbced501ddf4c0c0de7d6b94063"
31
+
32
+ [project.scripts]
33
+ start_inference = "main:main"
34
+
src/main.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import atexit
2
+ from io import BytesIO
3
+ from multiprocessing.connection import Listener
4
+ from os import chmod, remove
5
+ from os.path import abspath, exists
6
+ from pathlib import Path
7
+ from git import Repo
8
+ import torch
9
+
10
+ from PIL.JpegImagePlugin import JpegImageFile
11
+ from pipelines.models import TextToImageRequest
12
+ from pipeline import load_pipeline, infer
13
+ SOCKET = abspath(Path(__file__).parent.parent / "inferences.sock")
14
+
15
+
16
+ def at_exit():
17
+ torch.cuda.empty_cache()
18
+
19
+
20
+ def main():
21
+ atexit.register(at_exit)
22
+
23
+ print(f"Loading pipeline")
24
+ pipeline = load_pipeline()
25
+
26
+ print(f"Pipeline loaded, creating socket at '{SOCKET}'")
27
+
28
+ if exists(SOCKET):
29
+ remove(SOCKET)
30
+
31
+ with Listener(SOCKET) as listener:
32
+ chmod(SOCKET, 0o777)
33
+
34
+ print(f"Awaiting connections")
35
+ with listener.accept() as connection:
36
+ print(f"Connected")
37
+ generator = torch.Generator("cuda")
38
+ while True:
39
+ try:
40
+ request = TextToImageRequest.model_validate_json(connection.recv_bytes().decode("utf-8"))
41
+ except EOFError:
42
+ print(f"Inference socket exiting")
43
+
44
+ return
45
+ image = infer(request, pipeline, generator.manual_seed(request.seed))
46
+ data = BytesIO()
47
+ image.save(data, format=JpegImageFile.format)
48
+
49
+ packet = data.getvalue()
50
+
51
+ connection.send_bytes(packet )
52
+
53
+
54
+ if __name__ == '__main__':
55
+ main()
src/pipeline.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers import (
2
+ DiffusionPipeline,
3
+ AutoencoderKL,
4
+ FluxPipeline,
5
+ FluxTransformer2DModel
6
+ )
7
+ from diffusers.image_processor import VaeImageProcessor
8
+ from diffusers.schedulers import FlowMatchEulerDiscreteScheduler
9
+ from huggingface_hub.constants import HF_HUB_CACHE
10
+ from transformers import (
11
+ T5EncoderModel,
12
+ T5TokenizerFast,
13
+ CLIPTokenizer,
14
+ CLIPTextModel
15
+ )
16
+ import torch
17
+ import torch._dynamo
18
+ import gc
19
+ from PIL import Image
20
+ from pipelines.models import TextToImageRequest
21
+ from torch import Generator
22
+ import time
23
+ import math
24
+ from typing import Type, Dict, Any, Tuple, Callable, Optional, Union
25
+ import numpy as np
26
+ import torch.nn as nn
27
+ import torch.nn.functional as F
28
+ from torchao.quantization import quantize_, int8_weight_only, fpx_weight_only
29
+
30
+ # preconfigs
31
+ import os
32
+ os.environ['PYTORCH_CUDA_ALLOC_CONF']="expandable_segments:True"
33
+ os.environ["TOKENIZERS_PARALLELISM"] = "True"
34
+ torch._dynamo.config.suppress_errors = True
35
+ torch.backends.cuda.matmul.allow_tf32 = True
36
+ torch.backends.cudnn.enabled = True
37
+ # torch.backends.cudnn.benchmark = True
38
+
39
+ # globals
40
+ Pipeline = None
41
+ ckpt_id = "CarlyCa/extra0well0"
42
+ ckpt_revision = "5f1e655eac99b1f5df8fde3ae9d0056a928986ed"
43
+
44
+ def empty_cache():
45
+ gc.collect()
46
+ torch.cuda.empty_cache()
47
+ torch.cuda.reset_max_memory_allocated()
48
+ torch.cuda.reset_peak_memory_stats()
49
+
50
+ def load_pipeline() -> Pipeline:
51
+ vae = AutoencoderKL.from_pretrained(ckpt_id,revision=ckpt_revision, subfolder="vae", local_files_only=True, torch_dtype=torch.bfloat16,)
52
+ quantize_(vae, int8_weight_only())
53
+ text_encoder_2 = T5EncoderModel.from_pretrained("CarlyCa/extra1well1", revision = "d2e0619b6df29fbced501ddf4c0c0de7d6b94063", subfolder="text_encoder_2",torch_dtype=torch.bfloat16)
54
+ path = os.path.join(HF_HUB_CACHE, "models--CarlyCa--extra1well1/snapshots/d2e0619b6df29fbced501ddf4c0c0de7d6b94063/transformer")
55
+ transformer = FluxTransformer2DModel.from_pretrained(path, torch_dtype=torch.bfloat16, use_safetensors=False)
56
+ pipeline = FluxPipeline.from_pretrained(ckpt_id, revision=ckpt_revision, transformer=transformer, text_encoder_2=text_encoder_2, torch_dtype=torch.bfloat16,)
57
+ pipeline.to("cuda")
58
+ pipeline.to(memory_format=torch.channels_last)
59
+ for _ in range(1):
60
+ pipeline(prompt="insensible, timbale, pothery, electrovital, actinogram, taxis, intracerebellar, centrodesmus", width=1024, height=1024, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256)
61
+ return pipeline
62
+
63
+ sample = 1
64
+ @torch.no_grad()
65
+ def infer(request: TextToImageRequest, pipeline: Pipeline, generator: Generator) -> Image:
66
+ global sample
67
+ if not sample:
68
+ sample=1
69
+ empty_cache()
70
+ return pipeline(request.prompt,generator=generator, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256, height=request.height, width=request.width, output_type="pil").images[0]
uv.lock ADDED
The diff for this file is too large to render. See raw diff