Spaces:
Sleeping
Sleeping
Upload app.py
Browse files
app.py
ADDED
|
@@ -0,0 +1,245 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import random
|
| 3 |
+
import spaces
|
| 4 |
+
|
| 5 |
+
import gradio as gr
|
| 6 |
+
import matplotlib.pyplot as plt
|
| 7 |
+
import numpy as np
|
| 8 |
+
import onnxruntime
|
| 9 |
+
import torch
|
| 10 |
+
import torchvision.transforms.functional as F
|
| 11 |
+
from huggingface_hub import hf_hub_download
|
| 12 |
+
from PIL import Image, ImageColor
|
| 13 |
+
from torchvision.io import read_image
|
| 14 |
+
from torchvision.models.detection import MaskRCNN_ResNet50_FPN_Weights
|
| 15 |
+
from torchvision.utils import draw_bounding_boxes, draw_segmentation_masks
|
| 16 |
+
|
| 17 |
+
# Load pre-trained model transformations.
|
| 18 |
+
weights = MaskRCNN_ResNet50_FPN_Weights.DEFAULT
|
| 19 |
+
transforms = weights.transforms()
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def fix_category_id(cat_ids: list):
|
| 23 |
+
# Define the excluded category ids and the remaining ones
|
| 24 |
+
excluded_indices = {2, 12, 16, 19, 20}
|
| 25 |
+
remaining_categories = list(set(range(27)) - excluded_indices)
|
| 26 |
+
|
| 27 |
+
# Create a dictionary that maps new IDs to old(original) IDs
|
| 28 |
+
new_id_to_org_id = dict(zip(range(len(remaining_categories)), remaining_categories))
|
| 29 |
+
|
| 30 |
+
return [new_id_to_org_id[i-1]+1 for i in cat_ids]
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def process_categories() -> tuple:
|
| 34 |
+
"""
|
| 35 |
+
Load and process category information from a JSON file.
|
| 36 |
+
Returns a tuple containing two dictionaries: `category_id_to_name` maps category IDs to their names, and
|
| 37 |
+
`category_id_to_color` maps category IDs to a randomly sampled RGB color.
|
| 38 |
+
|
| 39 |
+
Returns:
|
| 40 |
+
tuple: A tuple containing two dictionaries:
|
| 41 |
+
- `category_id_to_name`: a dictionary mapping category IDs to their names.
|
| 42 |
+
- `category_id_to_color`: a dictionary mapping category IDs to a randomly sampled RGB color.
|
| 43 |
+
"""
|
| 44 |
+
# Load raw categories from JSON file
|
| 45 |
+
with open("categories.json") as fp:
|
| 46 |
+
categories = json.load(fp)
|
| 47 |
+
|
| 48 |
+
# Map category IDs to names
|
| 49 |
+
category_id_to_name = {d["id"]: d["name"] for d in categories}
|
| 50 |
+
|
| 51 |
+
# Set the seed for the random sampling operation
|
| 52 |
+
random.seed(42)
|
| 53 |
+
|
| 54 |
+
# Get a list of all the color names in the PIL colormap
|
| 55 |
+
color_names = list(ImageColor.colormap.keys())
|
| 56 |
+
|
| 57 |
+
# Sample 46 unique colors from the list of color names
|
| 58 |
+
sampled_colors = random.sample(color_names, 46)
|
| 59 |
+
|
| 60 |
+
# Convert the color names to RGB values
|
| 61 |
+
rgb_colors = [ImageColor.getrgb(color_name) for color_name in sampled_colors]
|
| 62 |
+
|
| 63 |
+
# Map category IDs to colors
|
| 64 |
+
category_id_to_color = {
|
| 65 |
+
category["id"]: color for category, color in zip(categories, rgb_colors)
|
| 66 |
+
}
|
| 67 |
+
|
| 68 |
+
return category_id_to_name, category_id_to_color
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def draw_predictions(
|
| 72 |
+
boxes, labels, scores, masks, img, model_name, score_threshold, proba_threshold
|
| 73 |
+
):
|
| 74 |
+
"""
|
| 75 |
+
Draw predictions on the input image based on the provided boxes, labels, scores, and masks. Only predictions
|
| 76 |
+
with scores above the `score_threshold` will be included, and masks with probabilities exceeding the
|
| 77 |
+
`proba_threshold` will be displayed.
|
| 78 |
+
|
| 79 |
+
Args:
|
| 80 |
+
- boxes: numpy.ndarray - an array of bounding box coordinates.
|
| 81 |
+
- labels: numpy.ndarray - an array of integers representing the predicted class for each bounding box.
|
| 82 |
+
- scores: numpy.ndarray - an array of confidence scores for each bounding box.
|
| 83 |
+
- masks: numpy.ndarray - an array of binary masks for each bounding box.
|
| 84 |
+
- img: PIL.Image.Image - the input image.
|
| 85 |
+
- model_name: str - name of the model given by the dropdown menu, either "facere" or "facere+".
|
| 86 |
+
- score_threshold: float - a confidence score threshold for filtering out low-scoring bbox predictions.
|
| 87 |
+
- proba_threshold: float - a threshold for filtering out low-probability (pixel-wise) mask predictions.
|
| 88 |
+
|
| 89 |
+
Returns:
|
| 90 |
+
- A list of strings, each representing the path to an image file containing the input image with a different
|
| 91 |
+
set of predictions drawn (masks, bounding boxes, masks with bounding box labels and scores).
|
| 92 |
+
"""
|
| 93 |
+
imgs_list = []
|
| 94 |
+
|
| 95 |
+
# Map label IDs to names and colors
|
| 96 |
+
label_id_to_name, label_id_to_color = process_categories()
|
| 97 |
+
|
| 98 |
+
# Filter out predictions using thresholds
|
| 99 |
+
labels_id = labels[scores > score_threshold].tolist()
|
| 100 |
+
if model_name == "facere+":
|
| 101 |
+
labels_id = fix_category_id(labels_id)
|
| 102 |
+
# models output is in range: [1,class_id+1], hence re-map to: [0,class_id]
|
| 103 |
+
labels = [label_id_to_name[int(i) - 1] for i in labels_id]
|
| 104 |
+
masks = (masks[scores > score_threshold] > proba_threshold).astype(np.uint8)
|
| 105 |
+
boxes = boxes[scores > score_threshold]
|
| 106 |
+
|
| 107 |
+
# Draw masks to input image and save
|
| 108 |
+
img_masks = draw_segmentation_masks(
|
| 109 |
+
image=img,
|
| 110 |
+
masks=torch.from_numpy(masks.squeeze(1).astype(bool)),
|
| 111 |
+
alpha=0.9,
|
| 112 |
+
colors=[label_id_to_color[int(i) - 1] for i in labels_id],
|
| 113 |
+
)
|
| 114 |
+
img_masks = F.to_pil_image(img_masks)
|
| 115 |
+
img_masks.save("img_masks.png")
|
| 116 |
+
imgs_list.append("img_masks.png")
|
| 117 |
+
|
| 118 |
+
# Draw bboxes to input image and save
|
| 119 |
+
img_bbox = draw_bounding_boxes(img, boxes=torch.from_numpy(boxes), width=4)
|
| 120 |
+
img_bbox = F.to_pil_image(img_bbox)
|
| 121 |
+
img_bbox.save("img_bbox.png")
|
| 122 |
+
imgs_list.append("img_bbox.png")
|
| 123 |
+
|
| 124 |
+
# Save masks with their bbox labels & bbox scores
|
| 125 |
+
for col, (mask, label, score) in enumerate(zip(masks, labels, scores)):
|
| 126 |
+
mask = Image.fromarray(mask.squeeze())
|
| 127 |
+
plt.imshow(mask)
|
| 128 |
+
plt.axis("off")
|
| 129 |
+
plt.title(f"{label}: {score:.2f}", fontsize=9)
|
| 130 |
+
plt.savefig(f"mask-{col}.png")
|
| 131 |
+
plt.close()
|
| 132 |
+
imgs_list.append(f"mask-{col}.png")
|
| 133 |
+
|
| 134 |
+
return imgs_list
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
@spaces.GPU(duration=20)
|
| 138 |
+
def inference(image, model_name, mask_threshold, bbox_threshold):
|
| 139 |
+
"""
|
| 140 |
+
Load the ONNX model and run inference with the provided input `image`. Visualize the predictions and save them in a
|
| 141 |
+
figure, which will be shown in the Gradio app.
|
| 142 |
+
"""
|
| 143 |
+
# Load image.
|
| 144 |
+
img = read_image(image)
|
| 145 |
+
# Apply original transformation to the image.
|
| 146 |
+
img_transformed = transforms(img)
|
| 147 |
+
|
| 148 |
+
# Download model
|
| 149 |
+
path_onnx = hf_hub_download(
|
| 150 |
+
repo_id="rizavelioglu/fashionfail",
|
| 151 |
+
filename="facere_plus.onnx" if model_name == "facere+" else "facere_base.onnx"
|
| 152 |
+
)
|
| 153 |
+
# Session options (see https://github.com/microsoft/onnxruntime/issues/14694#issuecomment-1598429295)
|
| 154 |
+
sess_options = onnxruntime.SessionOptions()
|
| 155 |
+
sess_options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_DISABLE_ALL
|
| 156 |
+
# Create an inference session.
|
| 157 |
+
ort_session = onnxruntime.InferenceSession(
|
| 158 |
+
path_onnx,
|
| 159 |
+
providers=["CUDAExecutionProvider", "CPUExecutionProvider"],
|
| 160 |
+
sess_options=sess_options,
|
| 161 |
+
)
|
| 162 |
+
|
| 163 |
+
# compute ONNX Runtime output prediction
|
| 164 |
+
ort_inputs = {
|
| 165 |
+
ort_session.get_inputs()[0].name: img_transformed.unsqueeze(dim=0).numpy()
|
| 166 |
+
}
|
| 167 |
+
ort_outs = ort_session.run(None, ort_inputs)
|
| 168 |
+
|
| 169 |
+
boxes, labels, scores, masks = ort_outs
|
| 170 |
+
imgs_list = draw_predictions(boxes, labels, scores, masks, img, model_name,
|
| 171 |
+
score_threshold=bbox_threshold, proba_threshold=mask_threshold
|
| 172 |
+
)
|
| 173 |
+
|
| 174 |
+
return imgs_list
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
title = "Facere - Demo"
|
| 178 |
+
description = r"""This is the demo of the paper <a href="https://arxiv.org/abs/2404.08582">FashionFail: Addressing
|
| 179 |
+
Failure Cases in Fashion Object Detection and Segmentation</a>. <br>Upload your image and choose the model for inference
|
| 180 |
+
from the dropdown menu—either `Facere` or `Facere+` <br> Check out the <a
|
| 181 |
+
href="https://rizavelioglu.github.io/fashionfail/">project page</a> for more information."""
|
| 182 |
+
article = r"""
|
| 183 |
+
Example images are sampled from the `Fashionpedia-test` and `FashionFail-test` set, which the models did not see during training.
|
| 184 |
+
|
| 185 |
+
<br>**Citation** <br>If you find our work useful in your research, please consider giving a star ⭐ and
|
| 186 |
+
a citation:
|
| 187 |
+
```
|
| 188 |
+
@inproceedings{velioglu2024fashionfail,
|
| 189 |
+
author = {Velioglu, Riza and Chan, Robin and Hammer, Barbara},
|
| 190 |
+
title = {FashionFail: Addressing Failure Cases in Fashion Object Detection and Segmentation},
|
| 191 |
+
journal = {IJCNN},
|
| 192 |
+
eprint = {2404.08582},
|
| 193 |
+
year = {2024},
|
| 194 |
+
}
|
| 195 |
+
```
|
| 196 |
+
"""
|
| 197 |
+
|
| 198 |
+
examples = [
|
| 199 |
+
["examples/0a4f8205a3b58e70eec99fbbb9422d08.jpg", "facere", 0.5, 0.7],
|
| 200 |
+
["examples/0a72e0f76ab9b75945f5d610508f9336.jpg", "facere", 0.5, 0.7],
|
| 201 |
+
["examples/0a939e0e67011aecf7195c17ecb9733c.jpg", "facere", 0.5, 0.7],
|
| 202 |
+
["examples/adi_9086_5.jpg", "facere", 0.5, 0.5],
|
| 203 |
+
["examples/adi_9086_5.jpg", "facere+", 0.5, 0.5],
|
| 204 |
+
["examples/adi_9704_1.jpg", "facere", 0.5, 0.5],
|
| 205 |
+
["examples/adi_9704_1.jpg", "facere+", 0.5, 0.5],
|
| 206 |
+
["examples/adi_10266_5.jpg", "facere", 0.5, 0.5],
|
| 207 |
+
["examples/adi_10266_5.jpg", "facere+", 0.5, 0.5],
|
| 208 |
+
["examples/adi_103_6.jpg", "facere", 0.5, 0.5],
|
| 209 |
+
["examples/adi_103_6.jpg", "facere+", 0.5, 0.5],
|
| 210 |
+
["examples/adi_1201_2.jpg", "facere", 0.5, 0.7],
|
| 211 |
+
["examples/adi_1201_2.jpg", "facere+", 0.5, 0.7],
|
| 212 |
+
["examples/adi_2149_5.jpg", "facere", 0.5, 0.7],
|
| 213 |
+
["examples/adi_2149_5.jpg", "facere+", 0.5, 0.7],
|
| 214 |
+
["examples/adi_5476_3.jpg", "facere", 0.5, 0.7],
|
| 215 |
+
["examples/adi_5476_3.jpg", "facere+", 0.5, 0.7],
|
| 216 |
+
["examples/adi_5641_4.jpg", "facere", 0.5, 0.7],
|
| 217 |
+
["examples/adi_5641_4.jpg", "facere+", 0.5, 0.7]
|
| 218 |
+
]
|
| 219 |
+
|
| 220 |
+
demo = gr.Interface(
|
| 221 |
+
fn=inference,
|
| 222 |
+
inputs=[
|
| 223 |
+
gr.Image(type="filepath", label="input"),
|
| 224 |
+
gr.Dropdown(["facere", "facere+"], value="facere", label="Models"),
|
| 225 |
+
gr.Slider(value=0.5, minimum=0.0, maximum=0.9, step=0.05, label="Mask threshold", info="a threshold for "
|
| 226 |
+
"filtering out "
|
| 227 |
+
"low-probability ("
|
| 228 |
+
"pixel-wise) mask "
|
| 229 |
+
"predictions"),
|
| 230 |
+
gr.Slider(value=0.7, minimum=0.0, maximum=0.9, step=0.05, label="BBox threshold", info="a threshold for "
|
| 231 |
+
"filtering out "
|
| 232 |
+
"low-scoring bbox "
|
| 233 |
+
"predictions")
|
| 234 |
+
],
|
| 235 |
+
outputs=gr.Gallery(label="output", preview=True, height=500),
|
| 236 |
+
title=title,
|
| 237 |
+
description=description,
|
| 238 |
+
article=article,
|
| 239 |
+
examples=examples,
|
| 240 |
+
cache_examples=True,
|
| 241 |
+
examples_per_page=6
|
| 242 |
+
)
|
| 243 |
+
|
| 244 |
+
if __name__ == "__main__":
|
| 245 |
+
demo.launch()
|