silencer107 commited on
Commit
d04a589
·
verified ·
1 Parent(s): 838e2d9

Update src/pipeline.py

Browse files
Files changed (1) hide show
  1. src/pipeline.py +5 -6
src/pipeline.py CHANGED
@@ -6,6 +6,8 @@ from onediffx.deep_cache import StableDiffusionXLPipeline
6
  #from diffusers import StableDiffusionXLPipeline
7
  from pipelines.models import TextToImageRequest
8
  from torch import Generator
 
 
9
 
10
  #from onediff.schedulers import EulerDiscreteScheduler
11
  from onediffx import compile_pipe
@@ -16,14 +18,11 @@ def load_pipeline(pipeline=None) -> StableDiffusionXLPipeline:
16
  pipeline = StableDiffusionXLPipeline.from_pretrained(
17
  "stablediffusionapi/newdream-sdxl-20",
18
  torch_dtype=torch.float16,
19
- local_files_only=True,
20
  )
21
- # pipeline.scheduler = EulerDiscreteScheduler.from_config(
22
- # pipeline.scheduler.config,)
23
-
24
  pipeline.to("cuda")
25
-
26
  pipeline = compile_pipe(pipeline)
 
27
  for _ in range(4):
28
  deepcache_output = pipeline(prompt="kamala harris defends my submission", output_type="pil", cache_interval=1, cache_layer_id=1, cache_block_id=0)
29
  return pipeline
@@ -41,7 +40,7 @@ def infer(request: TextToImageRequest, pipeline: StableDiffusionXLPipeline) -> I
41
  height=request.height,
42
  generator=generator,
43
  end_cfg=0.5,
44
- num_inference_steps=20,
45
  cache_interval=1,
46
  cache_layer_id=1,
47
  cache_block_id=0,
 
6
  #from diffusers import StableDiffusionXLPipeline
7
  from pipelines.models import TextToImageRequest
8
  from torch import Generator
9
+ from diffusers import DDIMScheduler
10
+ from loss import SchedulerWrapper
11
 
12
  #from onediff.schedulers import EulerDiscreteScheduler
13
  from onediffx import compile_pipe
 
18
  pipeline = StableDiffusionXLPipeline.from_pretrained(
19
  "stablediffusionapi/newdream-sdxl-20",
20
  torch_dtype=torch.float16,
 
21
  )
 
 
 
22
  pipeline.to("cuda")
23
+ pipeline.scheduler = SchedulerWrapper(DDIMScheduler.from_config(pipeline.scheduler.config))
24
  pipeline = compile_pipe(pipeline)
25
+ pipeline.scheduler.prepare_loss()
26
  for _ in range(4):
27
  deepcache_output = pipeline(prompt="kamala harris defends my submission", output_type="pil", cache_interval=1, cache_layer_id=1, cache_block_id=0)
28
  return pipeline
 
40
  height=request.height,
41
  generator=generator,
42
  end_cfg=0.5,
43
+ num_inference_steps=14,
44
  cache_interval=1,
45
  cache_layer_id=1,
46
  cache_block_id=0,