Upload folder using huggingface_hub
Browse files- pyproject.toml +34 -0
- src/main.py +58 -0
- src/pipeline.py +111 -0
- uv.lock +0 -0
pyproject.toml
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[build-system]
|
| 2 |
+
requires = ["setuptools >= 75.0"]
|
| 3 |
+
build-backend = "setuptools.build_meta"
|
| 4 |
+
|
| 5 |
+
[project]
|
| 6 |
+
name = "flux-schnell-edge-inference"
|
| 7 |
+
description = "An edge-maxxing model submission by RobertML for the 4090 Flux contest"
|
| 8 |
+
requires-python = ">=3.10,<3.13"
|
| 9 |
+
version = "8"
|
| 10 |
+
dependencies = [
|
| 11 |
+
"diffusers==0.31.0",
|
| 12 |
+
"transformers==4.46.2",
|
| 13 |
+
"accelerate==1.1.0",
|
| 14 |
+
"omegaconf==2.3.0",
|
| 15 |
+
"torch==2.5.1",
|
| 16 |
+
"protobuf==5.28.3",
|
| 17 |
+
"sentencepiece==0.2.0",
|
| 18 |
+
"edge-maxxing-pipelines @ git+https://github.com/womboai/edge-maxxing@7c760ac54f6052803dadb3ade8ebfc9679a94589#subdirectory=pipelines",
|
| 19 |
+
"gitpython>=3.1.43",
|
| 20 |
+
"hf_transfer==0.1.8",
|
| 21 |
+
"torchao==0.6.1",
|
| 22 |
+
"setuptools>=75.3.0",
|
| 23 |
+
]
|
| 24 |
+
[[tool.edge-maxxing.models]]
|
| 25 |
+
repository = "freaky231/t5-encoder-bf16"
|
| 26 |
+
revision = "994f6e4720f69e67bfc8822cbb4063c9149b801b"
|
| 27 |
+
|
| 28 |
+
[[tool.edge-maxxing.models]]
|
| 29 |
+
repository = "freaky231/FluxPipeline"
|
| 30 |
+
revision = "c5cf4b2fc96d25c81eb0783d2c362689ea9ccf28"
|
| 31 |
+
|
| 32 |
+
[project.scripts]
|
| 33 |
+
start_inference = "main:main"
|
| 34 |
+
|
src/main.py
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import atexit
|
| 2 |
+
from io import BytesIO
|
| 3 |
+
from multiprocessing.connection import Listener
|
| 4 |
+
from os import chmod, remove
|
| 5 |
+
from os.path import abspath, exists
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
from git import Repo
|
| 8 |
+
import torch
|
| 9 |
+
|
| 10 |
+
from PIL.JpegImagePlugin import JpegImageFile
|
| 11 |
+
from pipelines.models import TextToImageRequest
|
| 12 |
+
from pipeline import load_pipeline, infer
|
| 13 |
+
|
| 14 |
+
SOCKET = abspath(Path(__file__).parent.parent / "inferences.sock")
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def at_exit():
|
| 18 |
+
torch.cuda.empty_cache()
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def main():
|
| 22 |
+
atexit.register(at_exit)
|
| 23 |
+
|
| 24 |
+
print(f"Loading pipeline")
|
| 25 |
+
pipeline = load_pipeline()
|
| 26 |
+
|
| 27 |
+
print(f"Pipeline loaded, creating socket at '{SOCKET}'")
|
| 28 |
+
|
| 29 |
+
if exists(SOCKET):
|
| 30 |
+
remove(SOCKET)
|
| 31 |
+
|
| 32 |
+
with Listener(SOCKET) as listener:
|
| 33 |
+
chmod(SOCKET, 0o777)
|
| 34 |
+
|
| 35 |
+
print(f"Awaiting connections")
|
| 36 |
+
with listener.accept() as connection:
|
| 37 |
+
print(f"Connected")
|
| 38 |
+
generator = torch.Generator("cuda")
|
| 39 |
+
while True:
|
| 40 |
+
try:
|
| 41 |
+
request = TextToImageRequest.model_validate_json(
|
| 42 |
+
connection.recv_bytes().decode("utf-8")
|
| 43 |
+
)
|
| 44 |
+
except EOFError:
|
| 45 |
+
print(f"Inference socket exiting")
|
| 46 |
+
|
| 47 |
+
return
|
| 48 |
+
image = infer(request, pipeline, generator.manual_seed(request.seed))
|
| 49 |
+
data = BytesIO()
|
| 50 |
+
image.save(data, format=JpegImageFile.format)
|
| 51 |
+
|
| 52 |
+
packet = data.getvalue()
|
| 53 |
+
|
| 54 |
+
connection.send_bytes(packet)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
if __name__ == "__main__":
|
| 58 |
+
main()
|
src/pipeline.py
ADDED
|
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from diffusers import (
|
| 2 |
+
DiffusionPipeline,
|
| 3 |
+
AutoencoderKL,
|
| 4 |
+
FluxPipeline,
|
| 5 |
+
FluxTransformer2DModel,
|
| 6 |
+
)
|
| 7 |
+
from diffusers.image_processor import VaeImageProcessor
|
| 8 |
+
from diffusers.schedulers import FlowMatchEulerDiscreteScheduler
|
| 9 |
+
from huggingface_hub.constants import HF_HUB_CACHE
|
| 10 |
+
from transformers import T5EncoderModel, T5TokenizerFast, CLIPTokenizer, CLIPTextModel
|
| 11 |
+
import torch
|
| 12 |
+
import torch._dynamo
|
| 13 |
+
import gc
|
| 14 |
+
from PIL import Image
|
| 15 |
+
from pipelines.models import TextToImageRequest
|
| 16 |
+
from torch import Generator
|
| 17 |
+
import time
|
| 18 |
+
import math
|
| 19 |
+
from typing import Type, Dict, Any, Tuple, Callable, Optional, Union
|
| 20 |
+
import numpy as np
|
| 21 |
+
import torch.nn as nn
|
| 22 |
+
import torch.nn.functional as F
|
| 23 |
+
from torchao.quantization import quantize_, int8_weight_only, fpx_weight_only
|
| 24 |
+
|
| 25 |
+
# preconfigs
|
| 26 |
+
import os
|
| 27 |
+
|
| 28 |
+
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
|
| 29 |
+
os.environ["TOKENIZERS_PARALLELISM"] = "True"
|
| 30 |
+
torch._dynamo.config.suppress_errors = True
|
| 31 |
+
torch.backends.cuda.matmul.allow_tf32 = True
|
| 32 |
+
torch.backends.cudnn.enabled = True
|
| 33 |
+
# torch.backends.cudnn.benchmark = True
|
| 34 |
+
|
| 35 |
+
# globals
|
| 36 |
+
Pipeline = None
|
| 37 |
+
ckpt_id = "freaky231/t5-encoder-bf16"
|
| 38 |
+
ckpt_revision = "994f6e4720f69e67bfc8822cbb4063c9149b801b"
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def empty_cache():
|
| 42 |
+
gc.collect()
|
| 43 |
+
torch.cuda.empty_cache()
|
| 44 |
+
torch.cuda.reset_max_memory_allocated()
|
| 45 |
+
torch.cuda.reset_peak_memory_stats()
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def load_pipeline() -> Pipeline:
|
| 49 |
+
vae = AutoencoderKL.from_pretrained(
|
| 50 |
+
ckpt_id,
|
| 51 |
+
revision=ckpt_revision,
|
| 52 |
+
subfolder="vae",
|
| 53 |
+
local_files_only=True,
|
| 54 |
+
torch_dtype=torch.bfloat16,
|
| 55 |
+
)
|
| 56 |
+
quantize_(vae, int8_weight_only())
|
| 57 |
+
text_encoder_2 = T5EncoderModel.from_pretrained(
|
| 58 |
+
"freaky231/FluxPipeline",
|
| 59 |
+
revision="c5cf4b2fc96d25c81eb0783d2c362689ea9ccf28",
|
| 60 |
+
subfolder="text_encoder_2",
|
| 61 |
+
torch_dtype=torch.bfloat16,
|
| 62 |
+
)
|
| 63 |
+
path = os.path.join(
|
| 64 |
+
HF_HUB_CACHE,
|
| 65 |
+
"models--freaky231--FluxPipeline/snapshots/c5cf4b2fc96d25c81eb0783d2c362689ea9ccf28/transformer",
|
| 66 |
+
)
|
| 67 |
+
transformer = FluxTransformer2DModel.from_pretrained(
|
| 68 |
+
path, torch_dtype=torch.bfloat16, use_safetensors=False
|
| 69 |
+
)
|
| 70 |
+
pipeline = FluxPipeline.from_pretrained(
|
| 71 |
+
ckpt_id,
|
| 72 |
+
revision=ckpt_revision,
|
| 73 |
+
transformer=transformer,
|
| 74 |
+
text_encoder_2=text_encoder_2,
|
| 75 |
+
torch_dtype=torch.bfloat16,
|
| 76 |
+
)
|
| 77 |
+
pipeline.to("cuda")
|
| 78 |
+
pipeline.to(memory_format=torch.channels_last)
|
| 79 |
+
for _ in range(1):
|
| 80 |
+
pipeline(
|
| 81 |
+
prompt="unaware, kettledrum, clayey, bioenergetic, radiograph, locomotion, subcortical, microtubule",
|
| 82 |
+
width=1024,
|
| 83 |
+
height=1024,
|
| 84 |
+
guidance_scale=0.0,
|
| 85 |
+
num_inference_steps=4,
|
| 86 |
+
max_sequence_length=256,
|
| 87 |
+
)
|
| 88 |
+
return pipeline
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
sample = 1
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
@torch.no_grad()
|
| 95 |
+
def infer(
|
| 96 |
+
request: TextToImageRequest, pipeline: Pipeline, generator: Generator
|
| 97 |
+
) -> Image:
|
| 98 |
+
global sample
|
| 99 |
+
if not sample:
|
| 100 |
+
sample = 1
|
| 101 |
+
empty_cache()
|
| 102 |
+
return pipeline(
|
| 103 |
+
request.prompt,
|
| 104 |
+
generator=generator,
|
| 105 |
+
guidance_scale=0.0,
|
| 106 |
+
num_inference_steps=4,
|
| 107 |
+
max_sequence_length=256,
|
| 108 |
+
height=request.height,
|
| 109 |
+
width=request.width,
|
| 110 |
+
output_type="pil",
|
| 111 |
+
).images[0]
|
uv.lock
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|