John6666 commited on
Commit
9b4d4c0
·
verified ·
1 Parent(s): 4781298

Upload 2 files

Browse files
Files changed (2) hide show
  1. handler.py +5 -17
  2. requirements.txt +3 -2
handler.py CHANGED
@@ -2,7 +2,6 @@ import os
2
  from typing import Any, Dict
3
 
4
  from diffusers import FluxPipeline, FluxTransformer2DModel, AutoencoderKL, TorchAoConfig
5
- from diffusers.utils.remote_utils import remote_decode
6
  from PIL import Image
7
  import torch
8
 
@@ -27,10 +26,9 @@ class EndpointHandler:
27
  #repo_id = "NoMoreCopyright/FLUX.1-dev-test"
28
  dtype = torch.bfloat16
29
  quantization_config = TorchAoConfig("int8wo")
30
- #vae = AutoencoderKL.from_pretrained(repo_id, subfolder="vae", torch_dtype=dtype)
31
  #transformer = FluxTransformer2DModel.from_pretrained(repo_id, subfolder="transformer", torch_dtype=dtype, quantization_config=quantization_config).to("cuda")
32
- #self.pipeline = FluxPipeline.from_pretrained(repo_id, vae=vae, torch_dtype=dtype, quantization_config=quantization_config)
33
- self.pipeline = FluxPipeline.from_pretrained(repo_id, vae=None, torch_dtype=dtype, quantization_config=quantization_config)
34
  if IS_COMPILE: self.pipeline = compile_pipeline(self.pipeline)
35
  self.pipeline.to("cuda")
36
 
@@ -58,23 +56,13 @@ class EndpointHandler:
58
  seed = parameters.get("seed", 0)
59
  generator = torch.manual_seed(seed)
60
 
61
- latent = self.pipeline( # type: ignore
62
  prompt,
63
  height=height,
64
  width=width,
65
  guidance_scale=guidance_scale,
66
  num_inference_steps=num_inference_steps,
67
  generator=generator,
68
- output_type="latent",
69
- ).images
70
-
71
- image = remote_decode(
72
- endpoint="https://whhx50ex1aryqvw6.us-east-1.aws.endpoints.huggingface.cloud/",
73
- tensor=latent,
74
- height=height,
75
- width=width,
76
- scaling_factor=0.3611,
77
- shift_factor=0.1159,
78
- )
79
 
80
- return image
 
2
  from typing import Any, Dict
3
 
4
  from diffusers import FluxPipeline, FluxTransformer2DModel, AutoencoderKL, TorchAoConfig
 
5
  from PIL import Image
6
  import torch
7
 
 
26
  #repo_id = "NoMoreCopyright/FLUX.1-dev-test"
27
  dtype = torch.bfloat16
28
  quantization_config = TorchAoConfig("int8wo")
29
+ vae = AutoencoderKL.from_pretrained(repo_id, subfolder="vae", torch_dtype=dtype)
30
  #transformer = FluxTransformer2DModel.from_pretrained(repo_id, subfolder="transformer", torch_dtype=dtype, quantization_config=quantization_config).to("cuda")
31
+ self.pipeline = FluxPipeline.from_pretrained(repo_id, vae=vae, torch_dtype=dtype, quantization_config=quantization_config)
 
32
  if IS_COMPILE: self.pipeline = compile_pipeline(self.pipeline)
33
  self.pipeline.to("cuda")
34
 
 
56
  seed = parameters.get("seed", 0)
57
  generator = torch.manual_seed(seed)
58
 
59
+ return self.pipeline( # type: ignore
60
  prompt,
61
  height=height,
62
  width=width,
63
  guidance_scale=guidance_scale,
64
  num_inference_steps=num_inference_steps,
65
  generator=generator,
66
+ output_type="pil",
67
+ ).images[0]
 
 
 
 
 
 
 
 
 
68
 
 
requirements.txt CHANGED
@@ -2,7 +2,7 @@ huggingface_hub
2
  torch
3
  torchvision
4
  torchao
5
- git+https://github.com/huggingface/diffusers
6
  peft
7
  accelerate
8
  transformers
@@ -11,4 +11,5 @@ scipy
11
  Pillow
12
  sentencepiece
13
  protobuf
14
- pytorch-lightning
 
 
2
  torch
3
  torchvision
4
  torchao
5
+ diffusers
6
  peft
7
  accelerate
8
  transformers
 
11
  Pillow
12
  sentencepiece
13
  protobuf
14
+ pytorch-lightning
15
+ xformers