| import os |
| import cv2 |
| import numpy as np |
| import torch |
| import gradio as gr |
| |
|
|
| from glob import glob |
| from typing import Tuple |
|
|
| from PIL import Image |
| |
| from transformers import AutoModelForImageSegmentation |
| from torchvision import transforms |
|
|
| import requests |
| from io import BytesIO |
| import zipfile |
|
|
|
|
| torch.set_float32_matmul_precision('high') |
| |
|
|
| device = "cuda" if torch.cuda.is_available() else "cpu" |
|
|
| |
| def refine_foreground(image, mask, r=90): |
| if mask.size != image.size: |
| mask = mask.resize(image.size) |
| image = np.array(image) / 255.0 |
| mask = np.array(mask) / 255.0 |
| estimated_foreground = FB_blur_fusion_foreground_estimator_2(image, mask, r=r) |
| image_masked = Image.fromarray((estimated_foreground * 255.0).astype(np.uint8)) |
| return image_masked |
|
|
|
|
| def FB_blur_fusion_foreground_estimator_2(image, alpha, r=90): |
| |
| alpha = alpha[:, :, None] |
| F, blur_B = FB_blur_fusion_foreground_estimator( |
| image, image, image, alpha, r) |
| return FB_blur_fusion_foreground_estimator(image, F, blur_B, alpha, r=6)[0] |
|
|
|
|
| def FB_blur_fusion_foreground_estimator(image, F, B, alpha, r=90): |
| if isinstance(image, Image.Image): |
| image = np.array(image) / 255.0 |
| blurred_alpha = cv2.blur(alpha, (r, r))[:, :, None] |
|
|
| blurred_FA = cv2.blur(F * alpha, (r, r)) |
| blurred_F = blurred_FA / (blurred_alpha + 1e-5) |
|
|
| blurred_B1A = cv2.blur(B * (1 - alpha), (r, r)) |
| blurred_B = blurred_B1A / ((1 - blurred_alpha) + 1e-5) |
| F = blurred_F + alpha * \ |
| (image - alpha * blurred_F - (1 - alpha) * blurred_B) |
| F = np.clip(F, 0, 1) |
| return F, blurred_B |
|
|
|
|
| class ImagePreprocessor(): |
| def __init__(self, resolution: Tuple[int, int] = (1024, 1024)) -> None: |
| self.transform_image = transforms.Compose([ |
| transforms.Resize(resolution), |
| transforms.ToTensor(), |
| transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), |
| ]) |
|
|
| def proc(self, image: Image.Image) -> torch.Tensor: |
| image = self.transform_image(image) |
| return image |
|
|
|
|
| usage_to_weights_file = { |
| 'General': 'BiRefNet', |
| 'General-Lite': 'BiRefNet_lite', |
| 'General-Lite-2K': 'BiRefNet_lite-2K', |
| 'Matting': 'BiRefNet-matting', |
| 'Portrait': 'BiRefNet-portrait', |
| 'DIS': 'BiRefNet-DIS5K', |
| 'HRSOD': 'BiRefNet-HRSOD', |
| 'COD': 'BiRefNet-COD', |
| 'DIS-TR_TEs': 'BiRefNet-DIS5K-TR_TEs', |
| 'General-legacy': 'BiRefNet-legacy' |
| } |
|
|
| birefnet = AutoModelForImageSegmentation.from_pretrained('/'.join(('zhengpeng7', usage_to_weights_file['General'])), trust_remote_code=True) |
| birefnet.to(device) |
| birefnet.eval() |
|
|
|
|
| |
| def predict(images, resolution, weights_file): |
| assert (images is not None), 'AssertionError: images cannot be None.' |
|
|
| global birefnet |
| |
| _weights_file = '/'.join(('zhengpeng7', usage_to_weights_file[weights_file] if weights_file is not None else usage_to_weights_file['General'])) |
| print('Using weights: {}.'.format(_weights_file)) |
| birefnet = AutoModelForImageSegmentation.from_pretrained(_weights_file, trust_remote_code=True) |
| birefnet.to(device) |
| birefnet.eval() |
|
|
| try: |
| resolution = [int(int(reso)//32*32) for reso in resolution.strip().split('x')] |
| except: |
| resolution = (1024, 1024) if weights_file not in ['General-Lite-2K'] else (2560, 1440) |
| print('Invalid resolution input. Automatically changed to 1024x1024 or 2K.') |
|
|
| if isinstance(images, list): |
| |
| save_paths = [] |
| save_dir = 'preds-BiRefNet' |
| if not os.path.exists(save_dir): |
| os.makedirs(save_dir) |
| tab_is_batch = True |
| else: |
| images = [images] |
| tab_is_batch = False |
|
|
| for idx_image, image_src in enumerate(images): |
| if isinstance(image_src, str): |
| if os.path.isfile(image_src): |
| image_ori = Image.open(image_src) |
| else: |
| response = requests.get(image_src) |
| image_data = BytesIO(response.content) |
| image_ori = Image.open(image_data) |
| else: |
| image_ori = Image.fromarray(image_src) |
|
|
| image = image_ori.convert('RGB') |
| |
| image_preprocessor = ImagePreprocessor(resolution=tuple(resolution)) |
| image_proc = image_preprocessor.proc(image) |
| image_proc = image_proc.unsqueeze(0) |
|
|
| |
| with torch.no_grad(): |
| preds = birefnet(image_proc.to(device))[-1].sigmoid().cpu() |
| pred = preds[0].squeeze() |
|
|
| |
| pred_pil = transforms.ToPILImage()(pred) |
| image_masked = refine_foreground(image, pred_pil) |
| image_masked.putalpha(pred_pil.resize(image.size)) |
|
|
| torch.cuda.empty_cache() |
|
|
| if tab_is_batch: |
| save_file_path = os.path.join(save_dir, "{}.png".format(os.path.splitext(os.path.basename(image_src))[0])) |
| image_masked.save(save_file_path) |
| save_paths.append(save_file_path) |
|
|
| if tab_is_batch: |
| zip_file_path = os.path.join(save_dir, "{}.zip".format(save_dir)) |
| with zipfile.ZipFile(zip_file_path, 'w') as zipf: |
| for file in save_paths: |
| zipf.write(file, os.path.basename(file)) |
| return save_paths, zip_file_path |
| else: |
| return (image_masked, image_ori)[0] |
|
|
|
|
| examples = [[_] for _ in glob('/content/birefnet_background_remover/examples/*')][:] |
| |
| for idx_example, example in enumerate(examples): |
| examples[idx_example].append('1024x1024') |
| examples.append(examples[-1].copy()) |
| examples[-1][1] = '512x512' |
|
|
| examples_url = [ |
| ['https://hips.hearstapps.com/hmg-prod/images/gettyimages-1229892983-square.jpg'], |
| ] |
| for idx_example_url, example_url in enumerate(examples_url): |
| examples_url[idx_example_url].append('1024x1024') |
|
|
| descriptions = ('Upload a picture, our model will extract a highly accurate segmentation of the subject in it.\n)' |
| ' The resolution used in our training was `1024x1024`, thus the suggested resolution to obtain good results!\n' |
| ' Our codes can be found at https://github.com/ZhengPeng7/BiRefNet.\n' |
| ' We also maintain the HF model of BiRefNet at https://huggingface.co/ZhengPeng7/BiRefNet for easier access.') |
|
|
| tab_image = gr.Interface( |
| fn=predict, |
| inputs=[ |
| gr.Image(label='Upload an image'), |
| gr.Textbox(lines=1, placeholder="Type the resolution (`WxH`) you want, e.g., `1024x1024`.", label="Resolution"), |
| gr.Radio(list(usage_to_weights_file.keys()), value='General', label="Weights", info="Choose the weights you want.") |
| ], |
| outputs=gr.Image(label="BiRefNet's prediction", type="pil", format='png'), |
| examples=examples, |
| api_name="image", |
| description=descriptions, |
| ) |
|
|
| tab_text = gr.Interface( |
| fn=predict, |
| inputs=[ |
| gr.Textbox(label="Paste an image URL"), |
| gr.Textbox(lines=1, placeholder="Type the resolution (`WxH`) you want, e.g., `1024x1024`.", label="Resolution"), |
| gr.Radio(list(usage_to_weights_file.keys()), value='General', label="Weights", info="Choose the weights you want.") |
| ], |
| outputs=gr.Image(label="BiRefNet's prediction", type="pil", format='png'), |
| examples=examples_url, |
| api_name="text", |
| description=descriptions+'\nTab-URL is partially modified from https://huggingface.co/spaces/not-lain/background-removal, thanks to this great work!', |
| ) |
|
|
| tab_batch = gr.Interface( |
| fn=predict, |
| inputs=[ |
| gr.File(label="Upload multiple images", type="filepath", file_count="multiple"), |
| gr.Textbox(lines=1, placeholder="Type the resolution (`WxH`) you want, e.g., `1024x1024`.", label="Resolution"), |
| gr.Radio(list(usage_to_weights_file.keys()), value='General', label="Weights", info="Choose the weights you want.") |
| ], |
| outputs=[gr.Gallery(label="BiRefNet's predictions"), gr.File(label="Download masked images.")], |
| api_name="batch", |
| description=descriptions+'\nTab-batch is partially modified from https://huggingface.co/spaces/NegiTurkey/Multi_Birefnetfor_Background_Removal, thanks to this great work!', |
| ) |
|
|
| demo = gr.TabbedInterface( |
| [tab_image, tab_text, tab_batch], |
| ['image', 'text', 'batch'], |
| title="BiRefNet demo for subject extraction (general / matting / salient / camouflaged / portrait).", |
| ) |
|
|
| if __name__ == "__main__": |
| demo.launch(share=True) |
|
|