Duplicate from ysr/blurryAI
Browse filesCo-authored-by: Samin Yasar <ysr@users.noreply.huggingface.co>
- .gitattributes +31 -0
- README.md +13 -0
- app.py +138 -0
- requirements.txt +3 -0
.gitattributes
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: BlurryAI
|
| 3 |
+
emoji: 👓
|
| 4 |
+
colorFrom: purple
|
| 5 |
+
colorTo: pink
|
| 6 |
+
sdk: gradio
|
| 7 |
+
sdk_version: 3.1.1
|
| 8 |
+
app_file: app.py
|
| 9 |
+
pinned: true
|
| 10 |
+
duplicated_from: ysr/blurryAI
|
| 11 |
+
---
|
| 12 |
+
|
| 13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
|
@@ -0,0 +1,138 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from torchvision.models.detection import maskrcnn_resnet50_fpn_v2, MaskRCNN_ResNet50_FPN_V2_Weights
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
import torchvision.transforms as T
|
| 5 |
+
from torchvision.utils import draw_segmentation_masks, draw_bounding_boxes
|
| 6 |
+
import random
|
| 7 |
+
import gradio as gr
|
| 8 |
+
import numpy as np
|
| 9 |
+
|
| 10 |
+
output_dict = {} # this dict is shared between segment and blur_background functions
|
| 11 |
+
pred_label_unq = []
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def random_color_gen(n):
|
| 15 |
+
return [tuple(random.randint(0,255) for i in range(3)) for i in range(n)]
|
| 16 |
+
|
| 17 |
+
def segment(input_image):
|
| 18 |
+
|
| 19 |
+
# prepare image for display
|
| 20 |
+
display_img = torch.tensor(np.asarray(input_image)).unsqueeze(0)
|
| 21 |
+
display_img = display_img.permute(0, 3, 1, 2).squeeze(0)
|
| 22 |
+
|
| 23 |
+
# Prepare the RCNN model
|
| 24 |
+
weights = MaskRCNN_ResNet50_FPN_V2_Weights.COCO_V1
|
| 25 |
+
transforms = weights.transforms()
|
| 26 |
+
model = maskrcnn_resnet50_fpn_v2(weights=weights)
|
| 27 |
+
model = model.eval();
|
| 28 |
+
|
| 29 |
+
# Prepare the input image
|
| 30 |
+
input_tensor = transforms(input_image).unsqueeze(0)
|
| 31 |
+
|
| 32 |
+
# Get the predictions
|
| 33 |
+
output = model(input_tensor)[0] # idx 0 to get the first dictionary of the returned list
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
# Filter by threshold
|
| 37 |
+
score_threshold = 0.75
|
| 38 |
+
mask_threshold = 0.5
|
| 39 |
+
masks = output['masks'][output['scores'] > score_threshold] > mask_threshold;
|
| 40 |
+
boxes = output['boxes'][output['scores'] > score_threshold]
|
| 41 |
+
masks = masks.squeeze(1)
|
| 42 |
+
boxes = boxes.squeeze(1)
|
| 43 |
+
|
| 44 |
+
pred_labels = [weights.meta["categories"][label] for label in output['labels'][output['scores'] > score_threshold]]
|
| 45 |
+
n_pred = len(pred_labels)
|
| 46 |
+
|
| 47 |
+
# give unique id to all the predicitons
|
| 48 |
+
pred_label_unq = [pred_labels[i] + str(pred_labels[:i].count(pred_labels[i]) + 1) for i in range(n_pred)]
|
| 49 |
+
|
| 50 |
+
colors = random_color_gen(n_pred)
|
| 51 |
+
|
| 52 |
+
# Prepare output_dict
|
| 53 |
+
for i in range(n_pred):
|
| 54 |
+
output_dict[pred_label_unq[i]] = {'mask': masks[i].tolist(), 'color': colors[i]}
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
masked_img = draw_segmentation_masks(display_img, masks, alpha=0.9, colors=colors)
|
| 58 |
+
bounding_box_img = draw_bounding_boxes(masked_img, boxes, labels=pred_label_unq, colors='white')
|
| 59 |
+
masked_img = T.ToPILImage()(masked_img)
|
| 60 |
+
bounding_box_img = T.ToPILImage()(bounding_box_img)
|
| 61 |
+
|
| 62 |
+
return bounding_box_img;
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def blur_object(input_image, label_name):
|
| 66 |
+
|
| 67 |
+
label_names = label_name.split(' ')
|
| 68 |
+
|
| 69 |
+
input_tensor = T.ToTensor()(input_image).unsqueeze(0)
|
| 70 |
+
blur = T.GaussianBlur(15, 20)
|
| 71 |
+
blurred_tensor = blur(input_tensor)
|
| 72 |
+
|
| 73 |
+
final_img = input_tensor
|
| 74 |
+
|
| 75 |
+
for name in label_names:
|
| 76 |
+
mask = output_dict[name.strip()]['mask']
|
| 77 |
+
mask = torch.tensor(mask).unsqueeze(0)
|
| 78 |
+
|
| 79 |
+
final_img[:, :, mask.squeeze(0)] = blurred_tensor[:, :, mask.squeeze(0)];
|
| 80 |
+
|
| 81 |
+
final_img = T.ToPILImage()(final_img.squeeze(0))
|
| 82 |
+
|
| 83 |
+
return final_img;
|
| 84 |
+
|
| 85 |
+
def blur_background(input_image, label_name):
|
| 86 |
+
label_names = label_name.split(' ')
|
| 87 |
+
|
| 88 |
+
input_tensor = T.ToTensor()(input_image).unsqueeze(0)
|
| 89 |
+
blur = T.GaussianBlur(15, 20)
|
| 90 |
+
blurred_tensor = blur(input_tensor)
|
| 91 |
+
|
| 92 |
+
final_img = blurred_tensor
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
for name in label_names:
|
| 96 |
+
mask = output_dict[name.strip()]['mask']
|
| 97 |
+
mask = torch.tensor(mask).unsqueeze(0)
|
| 98 |
+
|
| 99 |
+
final_img[:, :, mask.squeeze(0)] = input_tensor[:, :, mask.squeeze(0)];
|
| 100 |
+
|
| 101 |
+
final_img = T.ToPILImage()(final_img.squeeze(0))
|
| 102 |
+
|
| 103 |
+
return final_img;
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
############################
|
| 109 |
+
""" User Interface """
|
| 110 |
+
############################
|
| 111 |
+
|
| 112 |
+
with gr.Blocks() as app:
|
| 113 |
+
|
| 114 |
+
gr.Markdown("# Blur an objects background with AI")
|
| 115 |
+
|
| 116 |
+
gr.Markdown("First segment the image and create bounding boxes")
|
| 117 |
+
with gr.Column():
|
| 118 |
+
input_image = gr.Image(type='pil')
|
| 119 |
+
b1 = gr.Button("Segment Image")
|
| 120 |
+
|
| 121 |
+
with gr.Row():
|
| 122 |
+
bounding_box_image = gr.Image();
|
| 123 |
+
|
| 124 |
+
gr.Markdown("Now choose a label (eg: person1) from the above image of your desired object and input it below")
|
| 125 |
+
gr.Markdown("You can also input multiple labels separated by spaces (eg: person1 car1 handbag1)")
|
| 126 |
+
with gr.Column():
|
| 127 |
+
label_name = gr.Textbox()
|
| 128 |
+
with gr.Row():
|
| 129 |
+
b2 = gr.Button("Blur Backbround")
|
| 130 |
+
b3 = gr.Button("Blur Object")
|
| 131 |
+
result = gr.Image()
|
| 132 |
+
|
| 133 |
+
b1.click(segment, inputs=input_image, outputs=bounding_box_image)
|
| 134 |
+
b2.click(blur_background, inputs=[input_image, label_name], outputs=result)
|
| 135 |
+
b3.click(blur_object, inputs=[input_image, label_name], outputs=result)
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
app.launch(debug=True)
|
requirements.txt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
torch
|
| 2 |
+
torchvision
|
| 3 |
+
gradio
|