Ngene787 commited on
Commit
3355fee
·
1 Parent(s): 73c9ba5

feat: add test for inference and remove the picture.

Browse files
Files changed (1) hide show
  1. stable_diffusion_inference.py +6 -6
stable_diffusion_inference.py CHANGED
@@ -17,7 +17,7 @@ from loguru import logger
17
 
18
  model_path = 'Ngene787/Faice_text2face'
19
 
20
- accelerator = Accelerator(mixed_precision="fp16", gradient_accumulation_steps=1)
21
  logger.info("Loading model ...")
22
  device = "cuda" if torch.cuda.is_available() else "cpu"
23
  if torch.cuda.is_available():
@@ -30,15 +30,15 @@ pipe = StableDiffusionPipeline.from_pretrained(model_path, torch_dtype=torch_dty
30
  )
31
  pipe = pipe.to(device)
32
 
33
- pipe = accelerator.prepare(pipe)
34
  # Enable memory-efficient attention
35
  # pipe.enable_xformers_memory_efficient_attention()
36
 
37
  # Enable attention slicing
38
- pipe.enable_attention_slicing()
39
 
40
  # Enable VAE slicing
41
- pipe.enable_vae_slicing()
42
 
43
  MAX_SEED = np.iinfo(np.int32).max
44
  MAX_IMAGE_SIZE = 1024
@@ -46,7 +46,7 @@ MAX_IMAGE_SIZE = 1024
46
 
47
  @spaces.GPU(duration=65)
48
  def inference(prompt,
49
- # negative_prompt="",
50
  seed=0,
51
  randomize_seed=False,
52
  width=MAX_IMAGE_SIZE,
@@ -62,7 +62,7 @@ def inference(prompt,
62
  logger.info('Generating image ...')
63
  image = pipe(
64
  prompt=prompt,
65
- # negative_prompt=negative_prompt,
66
  guidance_scale=guidance_scale,
67
  eta=0.0,
68
  num_inference_steps=num_inference_steps,
 
17
 
18
  model_path = 'Ngene787/Faice_text2face'
19
 
20
+ # accelerator = Accelerator(mixed_precision="fp16", gradient_accumulation_steps=1)
21
  logger.info("Loading model ...")
22
  device = "cuda" if torch.cuda.is_available() else "cpu"
23
  if torch.cuda.is_available():
 
30
  )
31
  pipe = pipe.to(device)
32
 
33
+ # pipe = accelerator.prepare(pipe)
34
  # Enable memory-efficient attention
35
  # pipe.enable_xformers_memory_efficient_attention()
36
 
37
  # Enable attention slicing
38
+ # pipe.enable_attention_slicing()
39
 
40
  # Enable VAE slicing
41
+ # pipe.enable_vae_slicing()
42
 
43
  MAX_SEED = np.iinfo(np.int32).max
44
  MAX_IMAGE_SIZE = 1024
 
46
 
47
  @spaces.GPU(duration=65)
48
  def inference(prompt,
49
+ negative_prompt="",
50
  seed=0,
51
  randomize_seed=False,
52
  width=MAX_IMAGE_SIZE,
 
62
  logger.info('Generating image ...')
63
  image = pipe(
64
  prompt=prompt,
65
+ negative_prompt=negative_prompt,
66
  guidance_scale=guidance_scale,
67
  eta=0.0,
68
  num_inference_steps=num_inference_steps,