mochifz commited on
Commit
a546565
·
verified ·
1 Parent(s): a752c0b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -3
app.py CHANGED
@@ -54,10 +54,10 @@ def update_language(new_language):
54
  text2img = None
55
  img2img = None
56
  def Generate(image_input, prompt, negative_prompt, strength, guidance_scale, num_inference_steps, width, height, seed):
 
57
  if seed == -1:
58
  seed = generate_new_seed()
59
  generator = torch.Generator().manual_seed(int(seed))
60
- global text2img, img2img
61
  start_time = time.time()
62
  if image_input is None:
63
  image = text2img(prompt=prompt, negative_prompt=negative_prompt, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps, width=width, height=height, num_images_per_prompt=1, generator=generator).images[0]
@@ -67,9 +67,11 @@ def Generate(image_input, prompt, negative_prompt, strength, guidance_scale, num
67
  return image, f"{minutes:02d}:{seconds:02d}"
68
  def Loading(model_name, is_xl, is_cuda):
69
  global text2img, img2img
70
- device = "cuda" if is_xl else "cpu"
 
 
71
  pipeline_class = StableDiffusionXLPipeline if is_xl else StableDiffusionPipeline
72
- if is_xl:
73
  text2img = pipeline_class.from_pretrained(model_name, torch_dtype=torch.float16, variant="fp16", use_safetensors=True).to(device)
74
  else:
75
  text2img = pipeline_class.from_pretrained(model_name, use_safetensors=True).to(device)
 
54
  text2img = None
55
  img2img = None
56
  def Generate(image_input, prompt, negative_prompt, strength, guidance_scale, num_inference_steps, width, height, seed):
57
+ global text2img, img2img
58
  if seed == -1:
59
  seed = generate_new_seed()
60
  generator = torch.Generator().manual_seed(int(seed))
 
61
  start_time = time.time()
62
  if image_input is None:
63
  image = text2img(prompt=prompt, negative_prompt=negative_prompt, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps, width=width, height=height, num_images_per_prompt=1, generator=generator).images[0]
 
67
  return image, f"{minutes:02d}:{seconds:02d}"
68
  def Loading(model_name, is_xl, is_cuda):
69
  global text2img, img2img
70
+ if is_xl == False:
71
+ is_xl ='xl' in model_name.lower()
72
+ device = "cuda" if is_cuda else "cpu"
73
  pipeline_class = StableDiffusionXLPipeline if is_xl else StableDiffusionPipeline
74
+ if is_cuda:
75
  text2img = pipeline_class.from_pretrained(model_name, torch_dtype=torch.float16, variant="fp16", use_safetensors=True).to(device)
76
  else:
77
  text2img = pipeline_class.from_pretrained(model_name, use_safetensors=True).to(device)