Ngene787 commited on
Commit
73c9ba5
·
1 Parent(s): 69057b2

feat: add test for inference and remove the picture.

Browse files
Files changed (1) hide show
  1. stable_diffusion_inference.py +8 -7
stable_diffusion_inference.py CHANGED
@@ -25,7 +25,7 @@ if torch.cuda.is_available():
25
  else:
26
  torch_dtype = torch.float32
27
  pipe = StableDiffusionPipeline.from_pretrained(model_path, torch_dtype=torch_dtype,
28
- # low_cpu_mem_usage=True,
29
  # requires_safety_checker=False
30
  )
31
  pipe = pipe.to(device)
@@ -35,10 +35,10 @@ pipe = accelerator.prepare(pipe)
35
  # pipe.enable_xformers_memory_efficient_attention()
36
 
37
  # Enable attention slicing
38
- # pipe.enable_attention_slicing()
39
 
40
  # Enable VAE slicing
41
- # pipe.enable_vae_slicing()
42
 
43
  MAX_SEED = np.iinfo(np.int32).max
44
  MAX_IMAGE_SIZE = 1024
@@ -46,13 +46,13 @@ MAX_IMAGE_SIZE = 1024
46
 
47
  @spaces.GPU(duration=65)
48
  def inference(prompt,
49
- negative_prompt="",
50
- seed=42,
51
  randomize_seed=False,
52
  width=MAX_IMAGE_SIZE,
53
  height=MAX_IMAGE_SIZE,
54
  guidance_scale=7.5,
55
- num_inference_steps=100,
56
  progress=gr.Progress(track_tqdm=True), ):
57
  if randomize_seed:
58
  seed = random.randint(0, MAX_SEED)
@@ -62,8 +62,9 @@ def inference(prompt,
62
  logger.info('Generating image ...')
63
  image = pipe(
64
  prompt=prompt,
65
- negative_prompt=negative_prompt,
66
  guidance_scale=guidance_scale,
 
67
  num_inference_steps=num_inference_steps,
68
  width=width,
69
  height=height,
 
25
  else:
26
  torch_dtype = torch.float32
27
  pipe = StableDiffusionPipeline.from_pretrained(model_path, torch_dtype=torch_dtype,
28
+ low_cpu_mem_usage=True,
29
  # requires_safety_checker=False
30
  )
31
  pipe = pipe.to(device)
 
35
  # pipe.enable_xformers_memory_efficient_attention()
36
 
37
  # Enable attention slicing
38
+ pipe.enable_attention_slicing()
39
 
40
  # Enable VAE slicing
41
+ pipe.enable_vae_slicing()
42
 
43
  MAX_SEED = np.iinfo(np.int32).max
44
  MAX_IMAGE_SIZE = 1024
 
46
 
47
  @spaces.GPU(duration=65)
48
  def inference(prompt,
49
+ # negative_prompt="",
50
+ seed=0,
51
  randomize_seed=False,
52
  width=MAX_IMAGE_SIZE,
53
  height=MAX_IMAGE_SIZE,
54
  guidance_scale=7.5,
55
+ num_inference_steps=10,
56
  progress=gr.Progress(track_tqdm=True), ):
57
  if randomize_seed:
58
  seed = random.randint(0, MAX_SEED)
 
62
  logger.info('Generating image ...')
63
  image = pipe(
64
  prompt=prompt,
65
+ # negative_prompt=negative_prompt,
66
  guidance_scale=guidance_scale,
67
+ eta=0.0,
68
  num_inference_steps=num_inference_steps,
69
  width=width,
70
  height=height,