manbeast3b commited on
Commit
67ae3d5
·
verified ·
1 Parent(s): cc0fd4d

Update src/pipeline.py

Browse files
Files changed (1) hide show
  1. src/pipeline.py +3 -3
src/pipeline.py CHANGED
@@ -25,14 +25,14 @@ def load_pipeline(pipeline=None) -> StableDiffusionXLPipeline:
25
  torch_dtype=torch.float16,
26
  ).to("cuda")
27
 
28
- # pipeline.scheduler = SchedulerWrapper(DDIMScheduler.from_config(pipeline.scheduler.config))
29
 
30
  pipeline = compile_pipe(pipeline)
31
  load_pipe(pipeline, dir="/home/sandbox/.cache/huggingface/hub/models--RobertML--cached-pipe-02/snapshots/58d70deae87034cce351b780b48841f9746d4ad7")
32
 
33
  for _ in range(1):
34
  deepcache_output = pipeline(prompt="telestereography, unstrengthen, preadministrator, copatroness, hyperpersonal, paramountness, paranoid, guaniferous", output_type="pil", num_inference_steps=20)
35
- # pipeline.scheduler.prepare_loss()
36
  for _ in range(2):
37
  pipeline(prompt="telestereography, unstrengthen, preadministrator, copatroness, hyperpersonal, paramountness, paranoid, guaniferous", output_type="pil", num_inference_steps=20)
38
  return pipeline
@@ -49,7 +49,7 @@ def infer(request: TextToImageRequest, pipeline: StableDiffusionXLPipeline) -> I
49
  width=request.width,
50
  height=request.height,
51
  generator=generator,
52
- num_inference_steps=20,
53
  cache_interval=1,
54
  cache_layer_id=1,
55
  cache_block_id=0,
 
25
  torch_dtype=torch.float16,
26
  ).to("cuda")
27
 
28
+ pipeline.scheduler = SchedulerWrapper(DDIMScheduler.from_config(pipeline.scheduler.config))
29
 
30
  pipeline = compile_pipe(pipeline)
31
  load_pipe(pipeline, dir="/home/sandbox/.cache/huggingface/hub/models--RobertML--cached-pipe-02/snapshots/58d70deae87034cce351b780b48841f9746d4ad7")
32
 
33
  for _ in range(1):
34
  deepcache_output = pipeline(prompt="telestereography, unstrengthen, preadministrator, copatroness, hyperpersonal, paramountness, paranoid, guaniferous", output_type="pil", num_inference_steps=20)
35
+ pipeline.scheduler.prepare_loss()
36
  for _ in range(2):
37
  pipeline(prompt="telestereography, unstrengthen, preadministrator, copatroness, hyperpersonal, paramountness, paranoid, guaniferous", output_type="pil", num_inference_steps=20)
38
  return pipeline
 
49
  width=request.width,
50
  height=request.height,
51
  generator=generator,
52
+ num_inference_steps=13,
53
  cache_interval=1,
54
  cache_layer_id=1,
55
  cache_block_id=0,