Work commited on
Commit
72f3dc8
·
1 Parent(s): 405e945
Files changed (2) hide show
  1. app.py +38 -0
  2. model.pt +3 -0
app.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import gradio as gr
3
+ import torch
4
+ from torchvision.transforms import Resize, ToTensor
5
+
6
+ device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
7
+
8
+ model = torch.nn.Module()
9
+ model.load_state_dict('model.pt', map_location=device)
10
+
11
+ resize = Resize((224))
12
+ to_tensor = ToTensor()
13
+ transforms = [to_tensor, resize]
14
+
15
+
16
+ def test(image):
17
+ for transform in transforms:
18
+ image = transform(image)
19
+ return [image]
20
+
21
+
22
+ interface = gr.Interface(
23
+ title = "OAM Autoencoder",
24
+ description = "Select a image",
25
+ allow_flagging="never",
26
+ fn = test,
27
+ inputs = [
28
+ gr.Image(label = "x", shape = [224, 224]),
29
+ ],
30
+ outputs = [
31
+ gr.Image(label = "pred"),
32
+ ],
33
+ examples = [
34
+ ["img.jpg"],
35
+ ]
36
+ )
37
+
38
+ interface.launch(share = False)
model.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:414ba68af052135ea99bf7ad45d4bd4f9f8e2a215a95dae4de489a46e11d7f7b
3
+ size 323870496