aayushmnit commited on
Commit
f9fb4bc
·
1 Parent(s): cb16212

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -4
app.py CHANGED
@@ -7,16 +7,19 @@ from transformers import CLIPTextModel, CLIPTokenizer
7
  from diffusers import AutoencoderKL, UNet2DConditionModel, DDIMScheduler
8
  from diffusers import StableDiffusionInpaintPipeline
9
  import gradio as gr
 
 
 
10
 
11
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
12
  def load_artifacts():
13
  '''
14
  A function to load all diffusion artifacts
15
  '''
16
- vae = AutoencoderKL.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="vae", torch_dtype=torch.float16).to(device)
17
- unet = UNet2DConditionModel.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="unet", torch_dtype=torch.float16).to(device)
18
- tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14", torch_dtype=torch.float16)
19
- text_encoder = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14", torch_dtype=torch.float16).to(device)
20
  scheduler = DDIMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False)
21
  return vae, unet, tokenizer, text_encoder, scheduler
22
 
@@ -139,6 +142,7 @@ pipe = StableDiffusionInpaintPipeline.from_pretrained(
139
  "runwayml/stable-diffusion-inpainting",
140
  revision="fp16",
141
  torch_dtype=torch.float16,
 
142
  ).to(device)
143
 
144
  def fastDiffEdit(init_img, reference_prompt , query_prompt, g=7.5, seed=100, strength =0.7, steps=20, dim=512):
 
7
  from diffusers import AutoencoderKL, UNet2DConditionModel, DDIMScheduler
8
  from diffusers import StableDiffusionInpaintPipeline
9
  import gradio as gr
10
+ import os
11
+
12
+ auth_token = os.environ.get("API_TOKEN")
13
 
14
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
15
  def load_artifacts():
16
  '''
17
  A function to load all diffusion artifacts
18
  '''
19
+ vae = AutoencoderKL.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="vae", torch_dtype=torch.float16,use_auth_token=auth_token).to(device)
20
+ unet = UNet2DConditionModel.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="unet", torch_dtype=torch.float16, use_auth_token=auth_token).to(device)
21
+ tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14", torch_dtype=torch.float16, use_auth_token=auth_token)
22
+ text_encoder = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14", torch_dtype=torch.float16, use_auth_token=auth_token).to(device)
23
  scheduler = DDIMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False)
24
  return vae, unet, tokenizer, text_encoder, scheduler
25
 
 
142
  "runwayml/stable-diffusion-inpainting",
143
  revision="fp16",
144
  torch_dtype=torch.float16,
145
+ use_auth_token=auth_token
146
  ).to(device)
147
 
148
  def fastDiffEdit(init_img, reference_prompt , query_prompt, g=7.5, seed=100, strength =0.7, steps=20, dim=512):