Spaces:
Runtime error
Runtime error
File size: 4,444 Bytes
e2d9c2e d00433a e2d9c2e c4278c2 e2d9c2e a7132f6 e2d9c2e fe2e2e1 e2d9c2e 5499ef7 a7132f6 5499ef7 fa98a90 e2d9c2e 3751a7e c33e678 63232cc 1fbdaef d1ff39a 3751a7e e2d9c2e 5499ef7 9631ed9 e2d9c2e f135c84 1b98ac5 7b534c5 e2d9c2e 7b534c5 fe2e2e1 ca416ec e2d9c2e d00433a ffeadd9 e2d9c2e 2d11ad8 e2d9c2e 2d11ad8 e2d9c2e f135c84 e2d9c2e 7b534c5 e2d9c2e ca416ec 518328e e2d9c2e 7b534c5 e2d9c2e 2cf90d1 e2d9c2e | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 | import torch
import pandas as pd
import numpy as np
import gradio as gr
from PIL import Image
from torch.nn import functional as F
from collections import OrderedDict
from torchvision import transforms
from pytorch_grad_cam import GradCAM
from pytorch_grad_cam.utils.image import show_cam_on_image
from pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget
from pytorch_lightning import LightningModule, Trainer, seed_everything
import albumentations as A
from albumentations.pytorch import ToTensorV2
import torchvision.transforms as T
from custom_resnet import LitResnet
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
wrong_img = pd.read_csv('misclassified_images.csv')
wrong_img_no = len(wrong_img)
model = LitResnet()
model.load_state_dict(torch.load("model.pth", map_location=torch.device('cpu')), strict=False)
model.eval()
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
inv_normalize = T.Normalize(
mean=[-0.50/0.23, -0.50/0.23, -0.50/0.23],
std=[1/0.23, 1/0.23, 1/0.23])
grad_cams = [GradCAM(model=model, target_layers=[model.convblock3[i]], use_cuda=False) for i in range(5)]
def get_gradcam_image(input_tensor, label, target_layer):
grad_cam = grad_cams[target_layer]
targets = [ClassifierOutputTarget(label)]
grayscale_cam = grad_cam(input_tensor=input_tensor, targets=targets)
grayscale_cam = grayscale_cam[0, :]
return grayscale_cam
def image_classifier(input_image, top_classes=3, show_cam=True, target_layers=[2, 3], transparency=0.5):
orig_image = input_image
input_image = transform(input_image)
input_image = input_image.unsqueeze(0)
output = model(input_image)
softmax = torch.nn.Softmax(dim=0)
o = softmax(output.flatten())
confidences = {classes[i]: float(o[i]) for i in range(10)}
confidences_sorted = dict(sorted(confidences.items(), key=lambda x:x[1],reverse=True))
confidences = {k: confidences_sorted[k] for k in list(confidences_sorted)[:top_classes]}
_, label = torch.max(output, 1)
outputs = list()
if show_cam:
for layer in target_layers:
grayscale_cam = get_gradcam_image(input_image, label, layer)
output_image = show_cam_on_image(orig_image / 255, grayscale_cam, use_rgb=True, image_weight=transparency)
outputs.append((output_image, f"Layer {layer - 5}"))
return outputs, confidences
#examples = [["examples/cat.jpg", 3, True,["-2","-1"],0.5], ["examples/dog.jpg", 3, True,["-2","-1"],0.5]]
examples = []
for i in range(10):
examples.append([f'examples/{classes[i]}.jpg', 3, True,["-2","-1"],0.5])
demo_1 = gr.Interface(
fn=image_classifier,
inputs=[
gr.Image(shape=(32, 32), label="Input Image").style(width=128, height=128),
gr.Slider(1, 10, value=3, step=1, label="Top Classes",
info="How many top classes do you want to see?"),
gr.Checkbox(label="Enable GradCAM", value=True, info="Do you want to see GradCAM Images?"),
gr.CheckboxGroup(["-5","-4", "-3", "-2", "-1"], value=["-2", "-1"], label="Network Layers", type='index',
info="On which layer do you want to see GradCAM?",),
gr.Slider(0, 1, value=0.5, label="Transparency", step=0.1,
info="Set Transparency of CAMs")
],
outputs=[gr.Gallery(label="Output Images", columns=2, rows=2), gr.Label(label='Top Classes')],
examples=examples
)
def show_incorrect(num_examples=10):
result = list()
for i in range(num_examples):
j = np.random.randint(1,wrong_img_no)
image = np.asarray(Image.open(f'misclassified-images/{j}.jpg'))
actual = classes[wrong_img.loc[j-1].at["actual"]]
predicted = classes[wrong_img.loc[j-1].at["predicted"]]
result.append((image, f"Actual:{actual} / Predicted:{predicted}"))
return result
demo_2 = gr.Interface(
fn=show_incorrect,
inputs=[
gr.Number(value=10, minimum=1, maximum=50, label="Number of images", precision=0,
info="How many misclassified examples do you want to view? (max 50)")
],
outputs=[gr.Gallery(label="Misclassified Images (Actual / Predicted)", columns=5)]
)
demo = gr.TabbedInterface([demo_1, demo_2], ["Image Classifier", "Misclassified Images"])
demo.launch()
|