bariscal commited on
Commit
f4e4388
·
1 Parent(s): 8fd2403

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -3
app.py CHANGED
@@ -2,20 +2,24 @@ import gradio as gr
2
  import torch
3
 
4
 
5
- gr.Interface.load("models/bariscal/cbst_style")
 
6
 
7
 
8
  def inference(prompt, negative_prompt, num_samples, height=512, width=512, num_inference_steps=50, guidance_scale=7.5):
9
- with torch.Generator('cpu'), torch.inference_mode():
 
10
  return pipe(
11
  prompt, height=int(height), width=int(width),
12
  negative_prompt=negative_prompt,
13
  num_images_per_prompt=int(num_samples),
14
  num_inference_steps=int(num_inference_steps), guidance_scale=guidance_scale,
15
- generator=g_cuda
 
16
  ).images
17
 
18
 
 
19
  with gr.Blocks() as demo:
20
  with gr.Row():
21
  with gr.Column():
 
2
  import torch
3
 
4
 
5
+ #gr.Interface.load("models/bariscal/cbst_style")
6
+ pipe = StableDiffusionPipeline.from_pretrained("models/bariscal/cbst_style", safety_checker=None, torch_dtype=torch.float16)
7
 
8
 
9
  def inference(prompt, negative_prompt, num_samples, height=512, width=512, num_inference_steps=50, guidance_scale=7.5):
10
+ # Remove the torch.autocast("cuda") context manager
11
+ with torch.inference_mode():
12
  return pipe(
13
  prompt, height=int(height), width=int(width),
14
  negative_prompt=negative_prompt,
15
  num_images_per_prompt=int(num_samples),
16
  num_inference_steps=int(num_inference_steps), guidance_scale=guidance_scale,
17
+ # Use the CPU for inference
18
+ generator="cpu"
19
  ).images
20
 
21
 
22
+
23
  with gr.Blocks() as demo:
24
  with gr.Row():
25
  with gr.Column():