File size: 911 Bytes
72f3dc8
 
 
 
 
b8f5ef0
72f3dc8
b8f5ef0
c4b1a94
b8f5ef0
70d8d45
88bddca
72f3dc8
 
 
 
 
 
 
 
 
88bddca
 
 
72f3dc8
 
 
 
 
 
 
88bddca
 
72f3dc8
 
 
 
 
88bddca
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
import cv2
import gradio as gr
import torch
from torchvision.transforms import Resize, ToTensor

from autoencoder import Autoencoder

device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')

model = Autoencoder()
model.load_state_dict(torch.load('model.pt', map_location=device))
model = model.eval()

resize = Resize((224))
to_tensor = ToTensor()
transforms = [to_tensor, resize]


def test(image):
    for transform in transforms:
        image = transform(image)
    image = image.unsqueeze(0)
    image = model(image).squeeze(0).permute(1,2,0).cpu().detach().numpy()
    return image


interface = gr.Interface(
    title = "OAM Autoencoder",
    description = "Select a image",
    allow_flagging="never",
    fn = test, 
    inputs = gr.Image(label = "x", type='numpy'),
    outputs = gr.Image(label = "pred"),
    examples = [
        ["img.jpg"],
    ]
)

interface.launch()