Ngene787 commited on
Commit
6994397
·
1 Parent(s): 3355fee

feat: add test for inference and remove the picture.

Browse files
Files changed (1) hide show
  1. stable_diffusion_inference.py +5 -5
stable_diffusion_inference.py CHANGED
@@ -17,7 +17,7 @@ from loguru import logger
17
 
18
  model_path = 'Ngene787/Faice_text2face'
19
 
20
- # accelerator = Accelerator(mixed_precision="fp16", gradient_accumulation_steps=1)
21
  logger.info("Loading model ...")
22
  device = "cuda" if torch.cuda.is_available() else "cpu"
23
  if torch.cuda.is_available():
@@ -30,15 +30,15 @@ pipe = StableDiffusionPipeline.from_pretrained(model_path, torch_dtype=torch_dty
30
  )
31
  pipe = pipe.to(device)
32
 
33
- # pipe = accelerator.prepare(pipe)
34
  # Enable memory-efficient attention
35
- # pipe.enable_xformers_memory_efficient_attention()
36
 
37
  # Enable attention slicing
38
- # pipe.enable_attention_slicing()
39
 
40
  # Enable VAE slicing
41
- # pipe.enable_vae_slicing()
42
 
43
  MAX_SEED = np.iinfo(np.int32).max
44
  MAX_IMAGE_SIZE = 1024
 
17
 
18
  model_path = 'Ngene787/Faice_text2face'
19
 
20
+ accelerator = Accelerator(mixed_precision="fp16", gradient_accumulation_steps=1)
21
  logger.info("Loading model ...")
22
  device = "cuda" if torch.cuda.is_available() else "cpu"
23
  if torch.cuda.is_available():
 
30
  )
31
  pipe = pipe.to(device)
32
 
33
+ pipe = accelerator.prepare(pipe)
34
  # Enable memory-efficient attention
35
+ pipe.enable_xformers_memory_efficient_attention()
36
 
37
  # Enable attention slicing
38
+ pipe.enable_attention_slicing()
39
 
40
  # Enable VAE slicing
41
+ pipe.enable_vae_slicing()
42
 
43
  MAX_SEED = np.iinfo(np.int32).max
44
  MAX_IMAGE_SIZE = 1024