rak-r05 commited on
Commit
9264cac
·
1 Parent(s): 4bf7499

Split the models into 2

Browse files
Files changed (1) hide show
  1. src/pipeline.py +54 -8
src/pipeline.py CHANGED
@@ -1,10 +1,11 @@
1
  import torch
2
  from PIL.Image import Image
3
- from diffusers import StableDiffusionXLPipeline
4
  from pipelines.models import TextToImageRequest
5
  from torch import Generator
6
  from DeepCache import DeepCacheSDHelper
7
 
 
8
  def callback_dynamic_cfg(pipeline, step_index, timestep, callback_kwargs):
9
  if step_index == int(pipeline.num_timesteps * 0.5):
10
  callback_kwargs['prompt_embeds'] = callback_kwargs['prompt_embeds'].chunk(2)[-1]
@@ -13,10 +14,13 @@ def callback_dynamic_cfg(pipeline, step_index, timestep, callback_kwargs):
13
  pipeline._guidance_scale = 0.0
14
 
15
  return callback_kwargs
16
-
17
 
18
  # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
19
- def load_pipeline() -> StableDiffusionXLPipeline:
 
 
 
20
  pipeline = StableDiffusionXLPipeline.from_pretrained(
21
  "./models/newdream-sdxl-20",
22
  torch_dtype=torch.float16,
@@ -25,22 +29,63 @@ def load_pipeline() -> StableDiffusionXLPipeline:
25
  variant='fp16',
26
  ).to("cuda")
27
 
 
 
 
 
 
 
 
 
28
  helper = DeepCacheSDHelper(pipe=pipeline)
29
  helper.set_params(cache_interval=3, cache_branch_id=0)
30
  helper.enable()
31
 
 
 
 
 
 
32
  for _ in range(5):
33
  pipeline(prompt="")
34
 
35
- return pipeline
 
 
 
 
36
 
37
 
38
- def infer(request: TextToImageRequest, pipeline: StableDiffusionXLPipeline) -> Image:
39
  if request.seed is None:
40
  generator = None
41
  else:
42
- generator = Generator(pipeline.device).manual_seed(request.seed)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
 
 
44
  return pipeline(
45
  prompt=request.prompt,
46
  negative_prompt=request.negative_prompt,
@@ -48,6 +93,7 @@ def infer(request: TextToImageRequest, pipeline: StableDiffusionXLPipeline) -> I
48
  height=request.height,
49
  generator=generator,
50
  num_inference_steps=27,
51
- callback_on_step_end=callback_dynamic_cfg,
52
- callback_on_step_end_tensor_inputs=['prompt_embeds', 'add_text_embeds', 'add_time_ids'],
53
  ).images[0]
 
 
1
  import torch
2
  from PIL.Image import Image
3
+ from diffusers import StableDiffusionXLPipeline, AutoPipelineForImage2Image
4
  from pipelines.models import TextToImageRequest
5
  from torch import Generator
6
  from DeepCache import DeepCacheSDHelper
7
 
8
+ '''
9
  def callback_dynamic_cfg(pipeline, step_index, timestep, callback_kwargs):
10
  if step_index == int(pipeline.num_timesteps * 0.5):
11
  callback_kwargs['prompt_embeds'] = callback_kwargs['prompt_embeds'].chunk(2)[-1]
 
14
  pipeline._guidance_scale = 0.0
15
 
16
  return callback_kwargs
17
+ '''
18
 
19
  # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
20
+
21
+ def load_pipeline() -> dict: #StableDiffusionXLPipeline, AutoPipelineForImage2Image:
22
+
23
+ pipeline_dict = {}
24
  pipeline = StableDiffusionXLPipeline.from_pretrained(
25
  "./models/newdream-sdxl-20",
26
  torch_dtype=torch.float16,
 
29
  variant='fp16',
30
  ).to("cuda")
31
 
32
+ refiner = AutoPipelineForImage2Image.from_pretrained(
33
+ 'stabilityai/stable-diffusion-xl-refiner-1.0',
34
+ use_safetensors=True,
35
+ torch_dtype=torch.float16,
36
+ variant='fp16',
37
+ ).to('cuda')
38
+
39
+
40
  helper = DeepCacheSDHelper(pipe=pipeline)
41
  helper.set_params(cache_interval=3, cache_branch_id=0)
42
  helper.enable()
43
 
44
+ refiner_helper = DeepCacheSDHelper(pipe=refiner)
45
+ refiner_helper.set_params(cache_interval=3, cache_branch_id=0)
46
+ refiner_helper.enable()
47
+
48
+
49
  for _ in range(5):
50
  pipeline(prompt="")
51
 
52
+ pipeline_dict = {
53
+ 'base_pipeline': pipeline,
54
+ 'refiner': refiner
55
+ }
56
+ return pipeline_dict #base_pipeline, refiner
57
 
58
 
59
+ def infer(request: TextToImageRequest, pipeline_dict: dict) -> Image: #pipeline: StableDiffusionXLPipeline, refiner: AutoPipelineForImage2Image) -> Image:
60
  if request.seed is None:
61
  generator = None
62
  else:
63
+ generator = Generator(pipeline_dict['base_pipeline'].device).manual_seed(request.seed)
64
+
65
+
66
+ image = pipeline_dict['base_pipeline'](
67
+ prompt=request.prompt,
68
+ negative_prompt=request.negative_prompt,
69
+ width=request.width,
70
+ height=request.height,
71
+ generator=generator,
72
+ num_inference_steps=27,
73
+ denoising_end=0.8,
74
+ output_type='latent',
75
+ ).images
76
+
77
+ return pipeline_dict['refiner'](
78
+ prompt=request.prompt,
79
+ negative_prompt=request.negative_prompt,
80
+ width=request.width,
81
+ height=request.height,
82
+ generator=generator,
83
+ num_inference_steps=27,
84
+ denoising_start=0.8,
85
+ image=image,
86
+ ).images[0]
87
 
88
+ '''
89
  return pipeline(
90
  prompt=request.prompt,
91
  negative_prompt=request.negative_prompt,
 
93
  height=request.height,
94
  generator=generator,
95
  num_inference_steps=27,
96
+ #callback_on_step_end=callback_dynamic_cfg,
97
+ #callback_on_step_end_tensor_inputs=['prompt_embeds', 'add_text_embeds', 'add_time_ids'],
98
  ).images[0]
99
+ '''