yonishafir commited on
Commit
9566feb
·
verified ·
1 Parent(s): e035438

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +28 -27
README.md CHANGED
@@ -74,18 +74,17 @@ By submitting the form above, you agree to BRIA’s [Privacy policy](https://bri
74
  ```python
75
  from diffusers import (
76
  AutoencoderKL,
77
- StableDiffusionXLControlNetInpaintPipeline,
78
  LCMScheduler,
79
  )
80
  from pipeline_controlnet_sd_xl import StableDiffusionXLControlNetPipeline
81
  from controlnet import ControlNetModel, ControlNetConditioningEmbedding
82
- import os
83
- from torchvision import transforms
84
  import torch
85
- from tqdm import tqdm
86
  import numpy as np
87
- import pandas as pd
88
  from PIL import Image
 
 
 
 
89
 
90
 
91
  def download_image(url):
@@ -93,12 +92,10 @@ def download_image(url):
93
  return PIL.Image.open(BytesIO(response.content)).convert("RGB")
94
 
95
 
96
- def get_masked_image(path_to_images_dir, image_name, image, image_mask, width, height):
97
  image_mask = image_mask # inpaint area is white
98
- image_mask = add_margins_to_ratio(image_mask, 1.5)
99
  image_mask = image_mask.resize((width, height)) # object to remove is white (1)
100
  image_mask_pil = image_mask
101
- orig_image = np.array(image.convert("RGB")).astype(np.uint8)
102
  image = np.array(image.convert("RGB")).astype(np.float32) / 255.0
103
  image_mask = np.array(image_mask_pil.convert("L")).astype(np.float32) / 255.0
104
  assert image.shape[0:1] == image_mask.shape[0:1], "image and image_mask must have the same image size"
@@ -109,6 +106,14 @@ def get_masked_image(path_to_images_dir, image_name, image, image_mask, width, h
109
  masked_image_to_present = Image.fromarray((masked_image_to_present * 255.0).astype(np.uint8))
110
  return image, image_mask_pil, masked_image_to_present
111
 
 
 
 
 
 
 
 
 
112
 
113
  img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
114
  mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
@@ -116,6 +121,11 @@ mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data
116
  init_image = download_image(img_url).resize((1024, 1024))
117
  mask_image = download_image(mask_url).resize((1024, 1024))
118
 
 
 
 
 
 
119
  # Load, init model
120
  controlnet = ControlNetModel().from_config('briaai/DEV-ControlNetInpaintingFast', torch_dtype=torch.float16)
121
  controlnet.controlnet_cond_embedding = ControlNetConditioningEmbedding(
@@ -123,25 +133,22 @@ controlnet.controlnet_cond_embedding = ControlNetConditioningEmbedding(
123
  conditioning_channels = 5
124
  )
125
 
126
- controlnet.load_state_dict(torch.load(local_ckpt_dir + local_ckpt_dir_suffix))
127
  vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
128
  pipe = StableDiffusionXLControlNetPipeline.from_pretrained("briaai/BRIA-2.3", controlnet=controlnet.to(dtype=torch.float16), torch_dtype=torch.float16, vae=vae) #force_zeros_for_empty_prompt=False, # vae=vae)
129
 
130
  pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
131
- pipe.load_lora_weights("briaai/BRIA-2.3-FAST-LORA")
132
- pipe.fuse_lora()
133
 
134
  pipe = pipe.to('cuda:0')
135
  pipe.enable_xformers_memory_efficient_attention()
136
 
137
  generator = torch.Generator(device='cuda:0').manual_seed(123456)
138
 
139
-
140
-
141
-
142
  vae = pipe.vae
143
 
144
- masked_image, image_mask, masked_image_to_present = get_masked_image(path_to_images_dir, image_name, image_mask, img, width, height)
145
  masked_image_tensor = image_transforms(masked_image)
146
  masked_image_tensor = (masked_image_tensor - 0.5) / 0.5
147
 
@@ -158,32 +165,26 @@ mask_tensor = torch.tensor(image_mask, dtype=torch.float32)[None, ...]
158
  # binarize the mask
159
  mask_tensor = torch.where(mask_tensor > 128.0, 255.0, 0)
160
 
161
- if normalize_mask_to_0_1:
162
- mask_tensor = mask_tensor / 255.0
163
 
164
  mask_tensor = mask_tensor.to(device="cuda")
165
  mask_resized = torch.nn.functional.interpolate(mask_tensor[None, ...], size=(control_latents.shape[2], control_latents.shape[3]), mode='nearest')
166
  # mask_resized = mask_resized.to(torch.float16)
167
  masked_image = torch.cat([control_latents, mask_resized], dim=1)
168
 
169
- gen_img = pipe(negative_prompt=default_negative_prompt, prompt=caption,
 
 
170
  controlnet_conditioning_sale=1.0,
171
  num_inference_steps=12,
172
  height=height, width=width,
173
  image = masked_image, # control image
174
- init_image = img,
175
  mask_image = mask_tensor,
176
  guidance_scale = 1.2,
177
  generator=generator).images[0]
178
 
179
 
180
-
181
-
182
-
183
-
184
- prompt = "A park bench"
185
- generator = torch.Generator(device='cuda:0').manual_seed(123456)
186
- image = pipe(prompt=prompt, image=init_image, mask_image=mask_image,generator=generator,guidance_scale=5,strength=1).images[0]
187
- image.save("./a_park_bench.png")
188
  ```
189
 
 
74
  ```python
75
  from diffusers import (
76
  AutoencoderKL,
 
77
  LCMScheduler,
78
  )
79
  from pipeline_controlnet_sd_xl import StableDiffusionXLControlNetPipeline
80
  from controlnet import ControlNetModel, ControlNetConditioningEmbedding
 
 
81
  import torch
 
82
  import numpy as np
 
83
  from PIL import Image
84
+ import requests
85
+ import PIL
86
+ from io import BytesIO
87
+ from torchvision import transforms
88
 
89
 
90
  def download_image(url):
 
92
  return PIL.Image.open(BytesIO(response.content)).convert("RGB")
93
 
94
 
95
+ def get_masked_image(image, image_mask, width, height):
96
  image_mask = image_mask # inpaint area is white
 
97
  image_mask = image_mask.resize((width, height)) # object to remove is white (1)
98
  image_mask_pil = image_mask
 
99
  image = np.array(image.convert("RGB")).astype(np.float32) / 255.0
100
  image_mask = np.array(image_mask_pil.convert("L")).astype(np.float32) / 255.0
101
  assert image.shape[0:1] == image_mask.shape[0:1], "image and image_mask must have the same image size"
 
106
  masked_image_to_present = Image.fromarray((masked_image_to_present * 255.0).astype(np.uint8))
107
  return image, image_mask_pil, masked_image_to_present
108
 
109
+ image_transforms = transforms.Compose(
110
+ [
111
+ transforms.ToTensor(),
112
+ # transforms.Normalize([0.5], [0.5]),
113
+ ]
114
+ )
115
+
116
+ default_negative_prompt = "Logo,Watermark,Text,Ugly,Morbid,Extra fingers,Poorly drawn hands,Mutation,Blurry,Extra limbs,Gross proportions,Missing arms,Mutated hands,Long neck,Duplicate,Mutilated,Mutilated hands,Poorly drawn face,Deformed,Bad anatomy,Cloned face,Malformed limbs,Missing legs,Too many fingers"
117
 
118
  img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
119
  mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
 
121
  init_image = download_image(img_url).resize((1024, 1024))
122
  mask_image = download_image(mask_url).resize((1024, 1024))
123
 
124
+ mask_image = mask_image.convert("L")
125
+
126
+ width, height = init_image.size
127
+
128
+
129
  # Load, init model
130
  controlnet = ControlNetModel().from_config('briaai/DEV-ControlNetInpaintingFast', torch_dtype=torch.float16)
131
  controlnet.controlnet_cond_embedding = ControlNetConditioningEmbedding(
 
133
  conditioning_channels = 5
134
  )
135
 
136
+ # controlnet.load_state_dict(torch.load('briaai/DEV-ControlNetInpaintingFast'))
137
  vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
138
  pipe = StableDiffusionXLControlNetPipeline.from_pretrained("briaai/BRIA-2.3", controlnet=controlnet.to(dtype=torch.float16), torch_dtype=torch.float16, vae=vae) #force_zeros_for_empty_prompt=False, # vae=vae)
139
 
140
  pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
141
+ pipe.load_lora_weights("briaai/BRIA-2.3-FAST-LORA")
142
+ pipe.fuse_lora()
143
 
144
  pipe = pipe.to('cuda:0')
145
  pipe.enable_xformers_memory_efficient_attention()
146
 
147
  generator = torch.Generator(device='cuda:0').manual_seed(123456)
148
 
 
 
 
149
  vae = pipe.vae
150
 
151
+ masked_image, image_mask, masked_image_to_present = get_masked_image(init_image, mask_image, width, height)
152
  masked_image_tensor = image_transforms(masked_image)
153
  masked_image_tensor = (masked_image_tensor - 0.5) / 0.5
154
 
 
165
  # binarize the mask
166
  mask_tensor = torch.where(mask_tensor > 128.0, 255.0, 0)
167
 
168
+ mask_tensor = mask_tensor / 255.0
 
169
 
170
  mask_tensor = mask_tensor.to(device="cuda")
171
  mask_resized = torch.nn.functional.interpolate(mask_tensor[None, ...], size=(control_latents.shape[2], control_latents.shape[3]), mode='nearest')
172
  # mask_resized = mask_resized.to(torch.float16)
173
  masked_image = torch.cat([control_latents, mask_resized], dim=1)
174
 
175
+ prompt = "A park bench"
176
+
177
+ gen_img = pipe(negative_prompt=default_negative_prompt, prompt=prompt,
178
  controlnet_conditioning_sale=1.0,
179
  num_inference_steps=12,
180
  height=height, width=width,
181
  image = masked_image, # control image
182
+ init_image = init_image,
183
  mask_image = mask_tensor,
184
  guidance_scale = 1.2,
185
  generator=generator).images[0]
186
 
187
 
188
+ gen_img.save("./a_park_bench.png")
 
 
 
 
 
 
 
189
  ```
190