slobers commited on
Commit
09bb5e0
·
verified ·
1 Parent(s): 0ad923c

Update src/pipeline.py

Browse files
Files changed (1) hide show
  1. src/pipeline.py +26 -34
src/pipeline.py CHANGED
@@ -1,56 +1,48 @@
1
  #6
 
 
 
 
2
  import gc
3
  import os
4
- from typing import TypeAlias
5
-
6
- import torch
7
  from PIL.Image import Image
8
- from diffusers import FluxPipeline, FluxTransformer2DModel, AutoencoderKL, AutoencoderTiny
9
- from huggingface_hub.constants import HF_HUB_CACHE
10
  from pipelines.models import TextToImageRequest
11
  from torch import Generator
12
- from transformers import T5EncoderModel, CLIPTextModel
13
-
14
- Pipeline: TypeAlias = FluxPipeline
15
 
16
- CHECKPOINT = "black-forest-labs/FLUX.1-schnell"
17
- REVISION = "741f7c3ce8b383c54771c7003378a50191e9efe9"
 
18
 
 
 
 
19
 
20
  def load_pipeline() -> Pipeline:
21
- text_encoder = CLIPTextModel.from_pretrained(CHECKPOINT, revision=REVISION, subfolder="text_encoder", local_files_only=True, torch_dtype=torch.bfloat16,)
22
-
23
- path2 = os.path.join(HF_HUB_CACHE, "models--city96--t5-v1_1-xxl-encoder-bf16/snapshots/1b9c856aadb864af93c1dcdc226c2774fa67bc86")
24
-
25
- text_encoder_2 = T5EncoderModel.from_pretrained(path2, torch_dtype=torch.bfloat16,)
26
-
27
- pathV = os.path.join(HF_HUB_CACHE, "models--madebyollin--taef1/snapshots/5463ee684fd9131a724bea777a2f50d89b0b6b24")
28
-
29
- vae = AutoencoderTiny.from_pretrained(pathV, torch_dtype=torch.bfloat16,)
30
-
31
- pathT = os.path.join(HF_HUB_CACHE, "models--RobertML--FLUX.1-schnell-int8wo/snapshots/307e0777d92df966a3c0f99f31a6ee8957a9857a")
32
-
33
- transformer = FluxTransformer2DModel.from_pretrained(pathT, torch_dtype=torch.bfloat16, use_safetensors=False,)
34
-
35
- pipeline = FluxPipeline.from_pretrained(CHECKPOINT, revision=REVISION, local_files_only=True, text_encoder=text_encoder, text_encoder_2=text_encoder_2, transformer=transformer, vae=vae, torch_dtype=torch.bfloat16,).to("cuda")
36
-
37
- pipeline("")
38
-
39
  return pipeline
40
 
 
41
  def infer(request: TextToImageRequest, pipeline: Pipeline) -> Image:
42
- gc.collect()
43
- torch.cuda.empty_cache()
44
- torch.cuda.reset_peak_memory_stats()
45
-
46
  generator = Generator(pipeline.device).manual_seed(request.seed)
47
 
48
  return pipeline(
49
  request.prompt,
50
  generator=generator,
51
- guidance_scale=0.5,
52
  num_inference_steps=4,
53
  max_sequence_length=256,
54
  height=request.height,
55
  width=request.width,
56
- ).images[0]
 
1
  #6
2
+ from huggingface_hub.constants import HF_HUB_CACHE
3
+ from transformers import T5EncoderModel, T5TokenizerFast, CLIPTokenizer, CLIPTextModel
4
+ import torch
5
+ import torch._dynamo
6
  import gc
7
  import os
8
+ from diffusers import FluxPipeline, AutoencoderKL, AutoencoderTiny
 
 
9
  from PIL.Image import Image
 
 
10
  from pipelines.models import TextToImageRequest
11
  from torch import Generator
12
+ from diffusers import FluxTransformer2DModel, DiffusionPipeline
13
+ from torchao.quantization import quantize_, int8_weight_only, fpx_weight_only
 
14
 
15
+ os.environ['PYTORCH_CUDA_ALLOC_CONF']="expandable_segments:True"
16
+ os.environ["TOKENIZERS_PARALLELISM"] = "True"
17
+ torch._dynamo.config.suppress_errors = True
18
 
19
+ Pipeline = None
20
+ ids = "black-forest-labs/FLUX.1-schnell"
21
+ Revision = "741f7c3ce8b383c54771c7003378a50191e9efe9"
22
 
23
  def load_pipeline() -> Pipeline:
24
+ vae = AutoencoderKL.from_pretrained(ids,revision=Revision, subfolder="vae", local_files_only=True, torch_dtype=torch.bfloat16,)
25
+ quantize_(vae, int8_weight_only())
26
+ text_encoder_2 = T5EncoderModel.from_pretrained("city96/t5-v1_1-xxl-encoder-bf16", revision = "1b9c856aadb864af93c1dcdc226c2774fa67bc86", torch_dtype=torch.bfloat16).to(memory_format=torch.channels_last)
27
+ path = os.path.join(HF_HUB_CACHE, "models--RobertML--FLUX.1-schnell-int8wo/snapshots/307e0777d92df966a3c0f99f31a6ee8957a9857a")
28
+ transformer = FluxTransformer2DModel.from_pretrained(path, torch_dtype=torch.bfloat16, use_safetensors=False).to(memory_format=torch.channels_last)
29
+ pipeline = DiffusionPipeline.from_pretrained(ids, revision=Revision, transformer=transformer, text_encoder_2=text_encoder_2, torch_dtype=torch.bfloat16,)
30
+ pipeline.to("cuda")
31
+
32
+ for _ in range(3):
33
+ pipeline(prompt="insensible, timbale, pothery, electrovital, actinogram, taxis, intracerebellar, centrodesmus", width=1024, height=1024, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256)
 
 
 
 
 
 
 
 
34
  return pipeline
35
 
36
+ @torch.no_grad()
37
  def infer(request: TextToImageRequest, pipeline: Pipeline) -> Image:
 
 
 
 
38
  generator = Generator(pipeline.device).manual_seed(request.seed)
39
 
40
  return pipeline(
41
  request.prompt,
42
  generator=generator,
43
+ guidance_scale=0.0,
44
  num_inference_steps=4,
45
  max_sequence_length=256,
46
  height=request.height,
47
  width=request.width,
48
+ ).images[0]