File size: 2,114 Bytes
402cc8d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
from huggingface_hub.constants import HF_HUB_CACHE
from transformers import T5EncoderModel, T5TokenizerFast, CLIPTokenizer, CLIPTextModel
import torch
import torch._dynamo
import gc
from PIL import Image as img
from PIL.Image import Image
from pipelines.models import TextToImageRequest
from torch import Generator
from diffusers import FluxTransformer2DModel, DiffusionPipeline
import os
os.environ['PYTORCH_CUDA_ALLOC_CONF']="expandable_segments:True"

Pipeline = None

ckpt_id = "black-forest-labs/FLUX.1-schnell"
ckpt_revision = "741f7c3ce8b383c54771c7003378a50191e9efe9" 
def empty_cache():
    gc.collect()
    torch.cuda.empty_cache()
    torch.cuda.reset_max_memory_allocated()
    torch.cuda.reset_peak_memory_stats()

def load_pipeline() -> Pipeline:    
    empty_cache()

    dtype, device = torch.bfloat16, "cuda"
    text_encoder_2 = T5EncoderModel.from_pretrained("jade012/FLUX.1-schnell1", revision = "124d80794ed0cf70cac48ecd204dbe04b00dff94", subfolder="text_encoder_2",torch_dtype=torch.bfloat16)
    

    path = os.path.join(HF_HUB_CACHE, "models--jade012--FLUX.1-schnell1/snapshots/124d80794ed0cf70cac48ecd204dbe04b00dff94/transformer")
    transformer = FluxTransformer2DModel.from_pretrained(path, torch_dtype=torch.bfloat16, use_safetensors=False)
    pipeline = DiffusionPipeline.from_pretrained(
        ckpt_id,
        revision=ckpt_revision,
        transformer=transformer,
        text_encoder_2=text_encoder_2,
        torch_dtype=dtype,
        ).to(device)
    #quantize_(pipeline.vae, int8_weight_only())
    pipeline(prompt="logomancy, afterglow, aetheric, chondrichthyes, calibrator, pomeous, lackwit, presentness", width=1024, height=1024, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256)
    
    empty_cache()
    return pipeline


@torch.no_grad()
def infer(request: TextToImageRequest, pipeline: Pipeline, generator: Generator) -> Image:
    image=pipeline(request.prompt,generator=generator, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256, height=request.height, width=request.width, output_type="pil").images[0]
    return(image)