File size: 1,820 Bytes
02967c2
 
 
 
 
 
 
 
5f133db
 
 
 
c43984a
02967c2
088ab00
 
5f133db
c43984a
5f133db
 
 
 
 
088ab00
 
c43984a
 
 
02967c2
 
 
5f133db
c43984a
5f133db
02967c2
 
9b19c99
 
c43984a
 
 
 
02967c2
 
 
d33c52d
5f133db
02967c2
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
import torch, torchvision
from torchvision import transforms
import numpy as np
import gradio as gr
from PIL import Image
from pytorch_grad_cam import GradCAM
from pytorch_grad_cam.utils.image import show_cam_on_image
import gradio as gr
import albumentations as A
from albumentations.pytorch import ToTensorV2
import config

import utils 

from model import YOLOv3
import config
from torchvision import transforms
import torch.optim as optim

scaled_anchors = (
            torch.tensor(config.ANCHORS)
            * torch.tensor(config.S).unsqueeze(1).unsqueeze(1).repeat(1, 3, 2)
        ).to('cpu')

model = YOLOv3(num_classes=config.NUM_CLASSES)
optimizer = optim.Adam(model.parameters(), lr=config.LEARNING_RATE, weight_decay=config.WEIGHT_DECAY)

utils.load_checkpoint("checkpoint.pth[1].tar", model, optimizer, config.LEARNING_RATE)


def inference(input_img,show_gradcam="yes", transparency = 0.5, target_layer_number = -1):

    out_fig = utils.plot_single_image(model, input_img, 0.6, 0.5,scaled_anchors)
    return out_fig

title = "TSAI S13 Assignment: YOLO V3 trained on PASCAL VOC Dataset"
description = "A simple Gradio interface for object detection using YOLO V3 algorithm. Bounding boxes should be shown around these objects - aeroplane, bicycle, bird, boat, bottle, bus, car, cat, chair, cow, diningtable, dog, horse, motorbike, person, pottedplant, sheep, sofa, train, tvmonitor "
 
examples = [["000002.jpg","yes", 0.5, -1],
            ["000004.jpg","yes", 0.5, -1],
            ["000006.jpg","yes", 0.5, -1],
            ["000058.jpg","yes", 0.5, -1]
           ]
demo = gr.Interface(
    inference, 
    inputs = [gr.Image(shape=(416, 416), label="Input Image")], 
    outputs = [gr.Plot(label="Plot")],
    title = title,
    description = description,
    examples = examples,
)
demo.launch()