KurtLin commited on
Commit
cd59216
·
1 Parent(s): 4ab5b0e

Initial Submit

Browse files
.gitattributes CHANGED
@@ -32,3 +32,11 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ model/RegSAUnetpp.pt filter=lfs diff=lfs merge=lfs -text
36
+ model/SERegUNet16GF.pt filter=lfs diff=lfs merge=lfs -text
37
+ model/SERegUNet4GF.pt filter=lfs diff=lfs merge=lfs -text
38
+ model/UNet3plus.pt filter=lfs diff=lfs merge=lfs -text
39
+ model/AngioNet.pt filter=lfs diff=lfs merge=lfs -text
40
+ model/EffUNetppb5.pt filter=lfs diff=lfs merge=lfs -text
41
+ example/angio.png filter=lfs diff=lfs merge=lfs -text
42
+ example/@eaDir filter=lfs diff=lfs merge=lfs -text
app.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
3
+
4
+ import gradio as gr
5
+ import torch
6
+ import cv2
7
+ import numpy as np
8
+ from preprocess import unsharp_masking
9
+ import glob
10
+ import time
11
+
12
+ device = "cuda" if torch.cuda.is_available() else "cpu"
13
+
14
+ print(
15
+ "torch: ", torch.__version__,
16
+ )
17
+
18
+ def filesort(img, model):
19
+ # img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
20
+ ori = img.copy()
21
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
22
+ h, w = img.shape
23
+ img_out = preprocessing(img, model)
24
+ return img_out, h, w, img, ori
25
+
26
+ def preprocessing(img, model='SE-RegUNet 4GF'):
27
+ # print(img.shape, img.dtype)
28
+ # img = cv2.resize(img, (512, 512))
29
+ img = unsharp_masking(img).astype(np.uint8)
30
+ if model == 'AngioNet' or model == 'UNet3+':
31
+ img = np.float32((img - img.min()) / (img.max() - img.min() + 1e-6))
32
+ img_out = np.expand_dims(img, axis=0)
33
+ elif model == 'SE-RegUNet 4GF':
34
+ clahe1 = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
35
+ clahe2 = cv2.createCLAHE(clipLimit=8.0, tileGridSize=(8,8))
36
+ image1 = clahe1.apply(img)
37
+ image2 = clahe2.apply(img)
38
+ img = np.float32((img - img.min()) / (img.max() - img.min() + 1e-6))
39
+ image1 = np.float32((image1 - image1.min()) / (image1.max() - image1.min() + 1e-6))
40
+ image2 = np.float32((image2 - image2.min()) / (image2.max() - image2.min() + 1e-6))
41
+ img_out = np.stack((img, image1, image2), axis=0)
42
+ else:
43
+ clahe1 = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
44
+ image1 = clahe1.apply(img)
45
+ image1 = np.float32((image1 - image1.min()) / (image1.max() - image1.min() + 1e-6))
46
+ img_out = np.stack((image1,)*3, axis=0)
47
+ return img_out
48
+
49
+ def process_input_image(img, model, rescale):
50
+ ori_img = img.copy()
51
+ h, w, _ = ori_img.shape
52
+ pad_h = h % 32
53
+ pad_w = w % 32
54
+ img = ori_img[pad_h//2:-pad_h//2, pad_w//2:-pad_w//2]
55
+
56
+ if model == 'SE-RegUNet 4GF':
57
+ pipe = torch.jit.load('./model/SERegUNet4GF.pt')
58
+ elif model == 'SE-RegUNet 16GF':
59
+ pipe = torch.jit.load('./model/SERegUNet16GF.pt')
60
+ elif model == 'AngioNet':
61
+ pipe = torch.jit.load('./model/AngioNet.pt')
62
+ elif model == 'EffUNet++ B5':
63
+ pipe = torch.jit.load('./model/EffUNetppb5.pt')
64
+ elif model == 'Reg-SA-UNet++':
65
+ pipe = torch.jit.load('./model/RegSAUnetpp.pt')
66
+ elif model == 'UNet3+':
67
+ pipe = torch.jit.load('./model/UNet3plus.pt')
68
+ pipe = pipe.to(device).eval()
69
+
70
+ start = time.time()
71
+ img, h, w, ori_gray, ori = filesort(img, model)
72
+ img = torch.FloatTensor(img).unsqueeze(0).to(device)
73
+ with torch.no_grad():
74
+ if model == 'AngioNet':
75
+ img = torch.cat([img, img], dim=0)
76
+ logit = np.round(torch.softmax(pipe.forward(img), dim=1).detach().cpu().numpy()[0, 0]).astype(np.uint8)
77
+ spent = time.time() - start
78
+ spent = f"{spent:.3f} seconds"
79
+
80
+ logit = logit.astype(bool)
81
+ # img_out = cv2.cvtColor(ori, cv2.COLOR_GRAY2RGB)
82
+ img_out = ori.copy()
83
+ img_out[logit, 0] = 255
84
+ ori_img[pad_h//2:-pad_h//2, pad_w//2:-pad_w//2] = img_out
85
+ return spent, ori_img
86
+
87
+
88
+ my_app = gr.Blocks()
89
+ with my_app:
90
+ gr.Markdown("Coronary Angiogram Segmentation with Gradio.")
91
+ gr.Markdown("Author: Ching-Ting Lin, Artificial Intelligence Center, China Medical University Hospital, Taichung City, Taiwan.")
92
+ with gr.Tabs():
93
+ with gr.TabItem("Select your image"):
94
+ with gr.Row():
95
+ with gr.Column():
96
+ img_source = gr.Image(label="Please select angiogram.", value='./example/angio.png', shape=(512, 512))
97
+ model_choice = gr.Dropdown(['SE-RegUNet 4GF', 'SE-RegUNet 16GF', 'AngioNet', 'EffUNet++ B5',
98
+ 'Reg-SA-UNet++', 'UNet3+'], label='Model', info='Which model to infer?')
99
+ model_rescale = gr.Dropdown(['2x2', '4x4', '8x8', '16x16'], label='Rescale', info='How many batches?')
100
+ source_image_loader = gr.Button("Vessel Segment")
101
+ with gr.Column():
102
+ time_spent = gr.Label(label="Time Spent (Preprocessing + Inference)")
103
+ img_output = gr.Image(label="Output Mask")
104
+
105
+ source_image_loader.click(
106
+ process_input_image,
107
+ [
108
+ img_source,
109
+ model_choice,
110
+ model_rescale
111
+ ],
112
+ [
113
+ time_spent,
114
+ img_output
115
+ ]
116
+ )
117
+
118
+ my_app.launch(debug=True)
example/@eaDir/angio.png@SynoEAStream ADDED
Binary file (235 Bytes). View file
 
example/angio.png ADDED

Git LFS Details

  • SHA256: df1b9c2b1eedd4edbca6d807730a9aa511ffaa4f8722962d3f703f4fd147e7bb
  • Pointer size: 131 Bytes
  • Size of remote file: 221 kB
model/AngioNet.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:60f54fb6481e59b61e421df5a0e59842f07fbbd89f0f7f2535ee5a4c0a104cdb
3
+ size 148997984
model/EffUNetppb5.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c1501967cb37b8d3ec025484eedccb9ac8d343dc75886a978acd3ada22a9e1f7
3
+ size 120253012
model/RegSAUnetpp.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0fee28a388c9481c1c32c9eb04247a35474d468e4fbab562d0781112ca7f12ed
3
+ size 128479489
model/SERegUNet16GF.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:42421187a29f42d0007d686d575e86ae5a0a72c5059fbaecd142af04765f9d7e
3
+ size 811937473
model/SERegUNet4GF.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:758b4d71740d4fa9b692532a71f99db5a8095abd40eb818b90b0eab6641e191b
3
+ size 125795146
model/UNet3plus.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:331e34c0cd21c4e84fe4e06fb31267c3c0612cae69f4de350d7563151577d6d7
3
+ size 108264749
preprocess.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ import cv2
2
+
3
+ def unsharp_masking(img):
4
+ gaussian = cv2.GaussianBlur(img, (0, 0), 2.0)
5
+ img = cv2.addWeighted(img, 2.0, gaussian, -1.0, 0)
6
+ return img
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ timm == 0.8.0.dev0
2
+ torch == 1.13.1
3
+ numpy == 1.23.5
4
+ opencv-python == 4.5.5.64
5
+ matplotlib == 3.5.1