nyanko7 commited on
Commit
0ac7cff
·
verified ·
1 Parent(s): 8f490bc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -12
app.py CHANGED
@@ -16,6 +16,10 @@ from PIL import Image
16
  from packaging import version
17
  from PIL.PngImagePlugin import PngInfo
18
 
 
 
 
 
19
  with httpimport.remote_repo(os.getenv("MODULE_URL")):
20
  import pipeline
21
  pipe, pipe2, pipe_img2img, pipe2_img2img = pipeline.get_pipeline_initialize()
@@ -171,19 +175,20 @@ def zero_inference_api(prompt, radio="model-v2", preset=PRESET_Q, h=1216, w=832,
171
  generator = torch.Generator(device="cuda").manual_seed(seed)
172
  if inference_steps > 50:
173
  inference_steps = 50
174
-
175
- if not do_img2img:
176
- if radio == "model-v2":
177
- image = pipe(prompt, height=h, width=w, negative_prompt=negative_prompt, guidance_scale=guidance_scale, guidance_rescale=0.75, generator=generator, num_inference_steps=inference_steps).images[0]
178
- else:
179
- image = pipe2(prompt, height=h, width=w, negative_prompt=negative_prompt, guidance_scale=guidance_scale, guidance_rescale=0.75, generator=generator, num_inference_steps=inference_steps).images[0]
180
- else:
181
- init_image = Image.fromarray(init_image)
182
- if radio == "model-v2":
183
- image = pipe_img2img(prompt, image=init_image, strength=image2image_strength, negative_prompt=negative_prompt, guidance_scale=guidance_scale, generator=generator, num_inference_steps=inference_steps).images[0]
184
  else:
185
- image = pipe2_img2img(prompt, image=init_image, strength=image2image_strength, negative_prompt=negative_prompt, guidance_scale=guidance_scale, generator=generator, num_inference_steps=inference_steps).images[0]
186
-
 
 
 
 
187
  naifix = prompt[:40].replace(":", "_").replace("\\", "_").replace("/", "_") + f" s-{seed}-"
188
  with tempfile.NamedTemporaryFile(prefix=naifix, suffix=".png", delete=False) as tmpfile:
189
  parameters = {
 
16
  from packaging import version
17
  from PIL.PngImagePlugin import PngInfo
18
 
19
+ if torch.cuda.get_device_properties(0).major >= 8:
20
+ torch.backends.cuda.matmul.allow_tf32 = True
21
+ torch.backends.cudnn.allow_tf32 = True
22
+
23
  with httpimport.remote_repo(os.getenv("MODULE_URL")):
24
  import pipeline
25
  pipe, pipe2, pipe_img2img, pipe2_img2img = pipeline.get_pipeline_initialize()
 
175
  generator = torch.Generator(device="cuda").manual_seed(seed)
176
  if inference_steps > 50:
177
  inference_steps = 50
178
+
179
+ with torch.autocast(device_type="cuda", dtype=torch.bfloat16):
180
+ if not do_img2img:
181
+ if radio == "model-v2":
182
+ image = pipe(prompt, height=h, width=w, negative_prompt=negative_prompt, guidance_scale=guidance_scale, guidance_rescale=0.75, generator=generator, num_inference_steps=inference_steps).images[0]
183
+ else:
184
+ image = pipe2(prompt, height=h, width=w, negative_prompt=negative_prompt, guidance_scale=guidance_scale, guidance_rescale=0.75, generator=generator, num_inference_steps=inference_steps).images[0]
 
 
 
185
  else:
186
+ init_image = Image.fromarray(init_image)
187
+ if radio == "model-v2":
188
+ image = pipe_img2img(prompt, image=init_image, strength=image2image_strength, negative_prompt=negative_prompt, guidance_scale=guidance_scale, generator=generator, num_inference_steps=inference_steps).images[0]
189
+ else:
190
+ image = pipe2_img2img(prompt, image=init_image, strength=image2image_strength, negative_prompt=negative_prompt, guidance_scale=guidance_scale, generator=generator, num_inference_steps=inference_steps).images[0]
191
+
192
  naifix = prompt[:40].replace(":", "_").replace("\\", "_").replace("/", "_") + f" s-{seed}-"
193
  with tempfile.NamedTemporaryFile(prefix=naifix, suffix=".png", delete=False) as tmpfile:
194
  parameters = {