lihongjie commited on
Commit
0b80cd8
·
1 Parent(s): 97345f8

add ax637 models

Browse files
.gitattributes CHANGED
@@ -39,3 +39,9 @@ demo-imgs/im0.png filter=lfs diff=lfs merge=lfs -text
39
  demo-imgs/im1.png filter=lfs diff=lfs merge=lfs -text
40
  AXModel-Disparity-Map-rt.png filter=lfs diff=lfs merge=lfs -text
41
  ONNX-Disparity-Map-rt.png filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
39
  demo-imgs/im1.png filter=lfs diff=lfs merge=lfs -text
40
  AXModel-Disparity-Map-rt.png filter=lfs diff=lfs merge=lfs -text
41
  ONNX-Disparity-Map-rt.png filter=lfs diff=lfs merge=lfs -text
42
+ models-ax637/rt_p1.onnx filter=lfs diff=lfs merge=lfs -text
43
+ models-ax637/rt_up.axmodel filter=lfs diff=lfs merge=lfs -text
44
+ models-ax637/rt_up.onnx filter=lfs diff=lfs merge=lfs -text
45
+ models-ax637/rt_iterfn.axmodel filter=lfs diff=lfs merge=lfs -text
46
+ models-ax637/rt_iterfn.onnx filter=lfs diff=lfs merge=lfs -text
47
+ models-ax637/rt_p1.axmodel filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -32,7 +32,7 @@ For those who are interested in model conversion, you can try to export axmodel
32
  |Chips|Models |Time|
33
  |--|--|--|
34
  |AX650|rt_sceneflow|142.0 ms |
35
-
36
 
37
 
38
  ## How to use
 
32
  |Chips|Models |Time|
33
  |--|--|--|
34
  |AX650|rt_sceneflow|142.0 ms |
35
+ |AX637|rt_sceneflow(分成三段 rt_p1 + rt_iterfn + rt_up)|123.0 + 29.3*18 + 9.1 = 659.5 ms|
36
 
37
 
38
  ## How to use
infer_ax637.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import argparse
3
+ import glob
4
+ import numpy as np
5
+ import torch
6
+ from tqdm import tqdm
7
+ from pathlib import Path
8
+ from PIL import Image
9
+ from matplotlib import pyplot as plt
10
+ import os
11
+ import onnxruntime as ort
12
+ import axengine as axe
13
+
14
+
15
+
16
+ def load_image(imfile):
17
+ img = np.array(Image.open(imfile).resize((512,384))).astype(np.uint8)[..., :3]
18
+ img = torch.from_numpy(img).permute(2, 0, 1).float()
19
+ return img[None]
20
+
21
+
22
+ def visualize_disparity(disparity_map, title):
23
+ plt.figure(figsize=(10, 6))
24
+ plt.imshow(disparity_map, cmap='jet')
25
+ plt.colorbar(label="Disparity")
26
+ plt.title(title)
27
+ plt.axis('off')
28
+ # plt.show()
29
+ plt.savefig(f"{title}-rt.png")
30
+
31
+
32
+ class Session:
33
+ def __init__(self, path_p1, path_iterfn, path_up, iters=18):
34
+ self.iters = iters
35
+ if os.path.splitext(path_p1)[1] == ".onnx":
36
+ self.session_p1 = ort.InferenceSession(path_p1)
37
+ elif os.path.splitext(path_p1)[1] == ".axmodel":
38
+ self.session_p1 = axe.InferenceSession(path_p1)
39
+ else:
40
+ raise NotImplementedError
41
+
42
+ if os.path.splitext(path_iterfn)[1] == ".onnx":
43
+ self.session_iterfn = ort.InferenceSession(path_iterfn)
44
+ elif os.path.splitext(path_iterfn)[1] == ".axmodel":
45
+ self.session_iterfn = axe.InferenceSession(path_iterfn)
46
+ else:
47
+ raise NotImplementedError
48
+
49
+ if os.path.splitext(path_up)[1] == ".onnx":
50
+ self.session_up = ort.InferenceSession(path_up)
51
+ elif os.path.splitext(path_up)[1] == ".axmodel":
52
+ self.session_up = axe.InferenceSession(path_up)
53
+ else:
54
+ raise NotImplementedError
55
+
56
+
57
+
58
+
59
+ def infer(self, img_left, img_right):
60
+ inputs_p1 = {"left": img_left, "right": img_right}
61
+ outputs_p1 = self.session_p1.run(["disp", "net", "stem_2x", "c0", "c1", "c2"], inputs_p1)
62
+
63
+ disp = outputs_p1[0]
64
+ net = outputs_p1[1]
65
+ stem_2x = outputs_p1[2]
66
+ c0 = outputs_p1[3]
67
+ c1 = outputs_p1[4]
68
+ c2 = outputs_p1[5]
69
+ for itr in range(self.iters):
70
+ # disp, mask_feat_4, net = self.iter_fn(disp, net, context)
71
+ inputs_iter = {"disp_in":disp, "net_in":net, "c0":c0, "c1":c1, "c2":c2 }
72
+ outputs_iter = self.session_iterfn.run(["disp", "mask_feat_4", "net"], inputs_iter)
73
+ disp = outputs_iter[0]
74
+ net = outputs_iter[2]
75
+
76
+ mask_feat_4 = outputs_iter[1]
77
+ inputs_up = {"disp":disp, "mask_feat_4":mask_feat_4, "stem_2x":stem_2x}
78
+ outputs = self.session_up.run(None, inputs_up)[0]
79
+ outputs = outputs.squeeze()
80
+ return outputs
81
+
82
+ def demo(args):
83
+ # PyTorch 和 ONNX 推理对比
84
+ left_images = sorted(glob.glob(args.left_imgs, recursive=True))
85
+ right_images = sorted(glob.glob(args.right_imgs, recursive=True))
86
+ for (imfile1, imfile2) in tqdm(list(zip(left_images, right_images))):
87
+ image1 = load_image(imfile1)
88
+ image2 = load_image(imfile2)
89
+
90
+ # ONNX 推理
91
+
92
+ ort_session = Session("models-ax637/rt_p1.onnx", "models-ax637/rt_iterfn.onnx", "models-ax637/rt_up.onnx", args.valid_iters)
93
+ ax_session = Session("models-ax637/rt_p1.axmodel",
94
+ "models-ax637/rt_iterfn.axmodel",
95
+ "models-ax637/rt_up.axmodel",
96
+ args.valid_iters)
97
+
98
+ input_l_np = image1.cpu().numpy()
99
+ input_r_np = image2.cpu().numpy()
100
+ # ax_inputs_p1 = {"left": input_l_np.transpose(0,2,3,1).astype(np.uint8), "right": input_r_np.transpose(0,2,3,1).astype(np.uint8)}
101
+
102
+ disp_ax = ax_session.infer(input_l_np.transpose(0,2,3,1).astype(np.uint8), input_r_np.transpose(0,2,3,1).astype(np.uint8))
103
+
104
+ input_l_np = (2 * (input_l_np / 255.0) - 1.0)
105
+ input_r_np = (2 * (input_r_np / 255.0) - 1.0)
106
+ # onnx_inputs_p1 = {"left": input_l_np, "right": input_r_np}
107
+
108
+ disp_onnx = ort_session.infer(input_l_np, input_r_np)
109
+
110
+ print("disp_onnx",disp_onnx)
111
+ print("disp_ax",disp_ax)
112
+ visualize_disparity(disp_onnx, title="ONNX Disparity Map")
113
+ visualize_disparity(disp_ax, title="AXModel Disparity Map")
114
+
115
+
116
+
117
+
118
+ if __name__ == '__main__':
119
+ parser = argparse.ArgumentParser()
120
+ parser.add_argument('-l', '--left_imgs', help="path to all first (left) frames",
121
+ default="demo-imgs/im0.png")
122
+ parser.add_argument('-r', '--right_imgs', help="path to all second (right) frames",
123
+ default="demo-imgs/im1.png")
124
+ parser.add_argument('--valid_iters', type=int, default=18, help='number of flow-field updates during forward pass')
125
+
126
+ args = parser.parse_args()
127
+ demo(args)
infer_ax.py → infer_ax650.py RENAMED
File without changes
models-ax637/rt_iterfn.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:efc21c9a21a9c781f9e2177009cdb81634f00fbf699cefa7ccb8e726298180ec
3
+ size 17108207
models-ax637/rt_iterfn.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f26ea3229aa7c7824a44223abadb93d9b498ec3749f361ee9bd7efc6f5dc814
3
+ size 32100279
models-ax637/rt_p1.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:02381fc24f51a4e4d8c033f3879a878dae83a1dac9584af26e96f675f25d9d15
3
+ size 10595689
models-ax637/rt_p1.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:559ca153da0c514c7b5394436c1d3a05dfd8a4c031d3de03e0900dc083cc924d
3
+ size 16331961
models-ax637/rt_up.axmodel ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0dd1062856c2ebcb099e0536e107bf2f43e778e384c7bf50af347562b81f8976
3
+ size 206541
models-ax637/rt_up.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bae40d112487f6bb5271bf2268744279926d4b9d1c11a09a86cda3790da5449d
3
+ size 260200