JohnChiu commited on
Commit
e1728fa
·
1 Parent(s): ec63f19

add elegant algorithm

Browse files
Files changed (5) hide show
  1. .gitignore +1 -0
  2. app.py +610 -101
  3. app_.py +483 -0
  4. requirements.txt +0 -0
  5. z_tof2pointcloud.cpp +174 -0
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ .*/
app.py CHANGED
@@ -1,13 +1,89 @@
1
  import gradio as gr
 
 
2
  import numpy as np
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  import plotly.graph_objs as go
 
 
4
  from scipy.ndimage import convolve
5
- import os
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
  def readRAW(path):
8
 
9
  filesize = os.path.getsize(path)
10
- print(filesize)
11
  if filesize == 31*40*64*2:
12
  output = np.fromfile(path, dtype=np.int16)
13
  else:
@@ -32,142 +108,575 @@ def readRAW(path):
32
  p3 = (B3 << 2) | ((B4 >> 6) & 0x03)
33
 
34
  output = np.stack([p0, p1, p2, p3], axis=1).flatten()
35
- # output = np.fromfile(path, dtype=np.int16).reshape(31,40,64*2)
36
- # output = np.fromfile(path, dtype=np.int16).reshape(30,40,64)
37
-
38
  return output.reshape(31,40,64)
39
 
 
 
 
 
40
 
41
- def load_bin(file):
 
42
 
43
-
44
-
45
 
46
- # raw_hist = readRAW(file.name)[1:,...].astype(np.float32)
47
- raw_hist = readRAW(file.name).astype(np.float32)
 
 
 
 
48
 
49
- print("raw_hist shape:", raw_hist[0,0,:])
50
- # raw_hist = raw_hist[::-1, ::-1, :]
 
51
 
52
- print("raw_hist shape:", raw_hist[0,0,:])
 
53
 
54
- # raw_hist = readRAW(file.name)
55
- # 默认显示一张 sum 图像
56
 
 
57
  multishot = (raw_hist[...,62]*1024 + raw_hist[...,63])
58
- # multishot[multishot==0] = 20e3
59
- normalize_data = 1 / multishot * 20e3
60
-
61
- nor_hist = (raw_hist) * normalize_data[...,np.newaxis]
62
 
63
- # nor_hist = (raw_hist)
64
 
65
- img = np.sum(nor_hist[1:, :, :-2], axis=2)
66
- img = np.log(img +1)
67
- norm_img = (img - img.min()) / (img.max())
68
- img_uint8 = (norm_img * 255).astype(np.uint8)
69
 
70
- img_tc_zoomed = np.repeat(np.repeat(img_uint8, 16, axis=0), 16, axis=1)
71
 
 
72
 
73
- img = np.argmax(nor_hist[1:, :, 5:-2], axis=2)
74
- norm_img = (img - img.min()) / (img.max() + 1e-8)
75
- img_uint8 = (norm_img * 255).astype(np.uint8)
76
- img_tof_zoomed = np.repeat(np.repeat(img_uint8, 16, axis=0), 16, axis=1)
77
-
78
- return img_tc_zoomed,img_tof_zoomed, raw_hist, nor_hist
79
 
 
 
80
 
81
- def plot_pixel_histogram(evt: gr.SelectData, raw_hist, nor_hist):
82
- # print("evt:", evt)
83
- x, y = evt.index # Gradio SelectData 对象
84
- x = x // 16
85
- y = y // 16
86
- raw_values = raw_hist[y+1, x, :]
87
-
88
- tof = np.argmax(nor_hist[y+1, x, :-2])
89
- range = 5
90
- sim_values = nor_hist[y+1, x, tof-range:tof+range+1]
91
- histogram_sim = raw_hist[1:, :, tof-range:tof+range+1]
92
- print(sim_values.shape, histogram_sim.shape)
93
 
94
- img = np.tensordot(sim_values,histogram_sim, axes=(0, 2))
 
 
 
95
 
96
- # img = np.log(img +1)
97
- norm_img = (img - img.min()) / (img.max() + 1e-8)
98
- img_uint8 = (norm_img * 255).astype(np.uint8)
99
- img_tof_zoomed = np.repeat(np.repeat(img_uint8, 16, axis=0), 16, axis=1)
100
 
101
- vctEmbd = raw_hist[:1,:,:].flatten().astype(np.int32) >> 2
102
- fRX_Temp = (vctEmbd[15] << 3) + vctEmbd[14]
 
 
103
 
104
- LDVCC = (((((vctEmbd[65] << 8) + vctEmbd[64])) - 1024) / 1024 * 1.7 * 0.9638 + 1.42) * 6
105
- fTx_Temp = (((vctEmbd[67] << 8) + vctEmbd[66] - 1024) / 5.34 + 30)
106
- BVD = vctEmbd[23]
 
107
 
108
- # fTx_Temp = float(vctEmbd[61]+((vctEmbd[63] & 0xc0) << 2)) * 0.178 - 38.18
109
- # LDVCC = ((((vctEmbd[63]&0x30)<<4) + vctEmbd[60] - 110) * 13.7 + 5000) / 1000
110
- y_min = np.min(raw_values[:-2]) - 10
111
- y_max = np.max(raw_values[:-2]) + 10
112
- fig = go.Figure()
113
- fig.add_trace(go.Scatter(y=raw_values, mode="lines+markers"))
114
- fig.update_layout(
115
- title=f"Pixel ({x}, {y}) 在所有 {raw_values.shape[0]} 帧的强度变化 {f'RX: {fRX_Temp} °C'} {f'TX: {fTx_Temp:.2f} °C'} {f'LDVCC: {LDVCC:.2f} V'} {f'BVD: {BVD} V'}",
116
- xaxis_title="帧索引 (T)",
117
- yaxis_title="强度值",
118
- yaxis=dict(
119
- range=[y_min, y_max]) # Set the min and max for y-axis
120
- )
 
 
 
 
 
121
 
 
 
122
 
123
- return fig, img_tof_zoomed
 
 
 
124
 
125
- # def plot_depth(nor_hist):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
126
 
127
- # kernel = np.array([[1,1,1],[1,1,1],[1,1,1]])
 
128
 
129
- # # Create an empty array to store the results
130
- # output = np.zeros((96, 240, 254))
 
 
131
 
132
- # # Perform the convolution along the first two axes (height and width)
133
- # for i in range(254):
134
- # output[:, :, i] = convolve(nor_hist[:, :, i], kernel, mode='constant', cval=0)
135
 
136
- # modulate1 = np.arange(1,181,1)
137
- # modulate = modulate1 * modulate1 /(180*180)
138
- # arr = output[...,:180] * modulate
 
139
 
140
- # tc_bin = np.sum(arr,axis=(0,1))
141
- # max_id = np.argmax(tc_bin[:-2])
 
 
 
 
142
 
143
- # # modulate = np.concatenate([a, b,c])
144
- # pad_head = np.ones(max_id-4)
145
- # expand_kernel = np.arange(1,13,1) * 0.01
146
- # pad_tail = np.ones((180-len(pad_head)-len(expand_kernel)))
147
- # expand_filter = np.concatenate([pad_head, expand_kernel,pad_tail])
148
 
 
149
 
150
- # arr_expandfilter = arr * expand_filter
151
- # tof = np.argmax(arr,axis=2)
152
- # tof_filter = np.argmax(arr_expandfilter,axis=2)
 
 
 
153
 
154
- # return tof, tof_filter
155
 
156
- with gr.Blocks() as demo:
157
- gr.Markdown("## 上传 31,40,64 int16 `.bin/.raw` 文件,点击图像像素查看该像素的 64 帧直方图")
 
 
158
 
159
- file_input = gr.File(label="上传 .raw/.bin 文件", file_types=[".raw", ".bin"])
160
- image_tc_display = gr.Image(interactive=True, label="tc")
161
- image_tof_display = gr.Image(interactive=True, label="tof")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
162
 
163
- histogram = gr.Plot(label="像素强度曲线")
164
- raw_hist = gr.State()
165
- nor_hist = gr.State()
166
- image_sim_display = gr.Image(interactive=True, label="sim")
167
 
168
- file_input.change(load_bin, inputs=file_input, outputs=[image_tc_display, image_tof_display, raw_hist, nor_hist])
169
 
170
- image_tof_display.select(plot_pixel_histogram, inputs=[ raw_hist, nor_hist], outputs=[histogram,image_sim_display])
 
 
 
171
 
172
- # demo.launch(share=True)
173
- demo.launch(share=False)
 
1
  import gradio as gr
2
+ import cv2
3
+ import matplotlib
4
  import numpy as np
5
+ import os
6
+ import time
7
+ from PIL import Image
8
+ import torch
9
+ import torch.nn.functional as F
10
+ import open3d as o3d
11
+ import trimesh
12
+ import tempfile
13
+ import shutil
14
+ from pathlib import Path
15
+ from concurrent.futures import ThreadPoolExecutor
16
+ from gradio_imageslider import ImageSlider
17
+ from huggingface_hub import hf_hub_download
18
  import plotly.graph_objs as go
19
+ from plotly.subplots import make_subplots
20
+ from collections import deque
21
  from scipy.ndimage import convolve
22
+
23
+
24
+ # from ppd.utils.set_seed import set_seed
25
+ # from ppd.utils.align_depth_func import recover_metric_depth_ransac
26
+ # from ppd.utils.depth2pcd import depth2pcd
27
+ # from moge.model.v2 import MoGeModel
28
+ # from ppd.models.ppd import PixelPerfectDepth
29
+
30
+ # try:
31
+ # import spaces
32
+ # HUGGINFACE_SPACES_INSTALLED = True
33
+ # except ImportError:
34
+ # HUGGINFACE_SPACES_INSTALLED = False
35
+
36
+ # css = """
37
+ # #img-display-container {
38
+ # max-height: 100vh;
39
+ # }
40
+ # #img-display-input {
41
+ # max-height: 100vh;
42
+ # }
43
+ # #img-display-output {
44
+ # max-height: 100vh;
45
+ # }
46
+ # #download {
47
+ # height: 62px;
48
+ # }
49
+
50
+ # #img-display-output .image-slider-image {
51
+ # object-fit: contain !important;
52
+ # width: 100% !important;
53
+ # height: 100% !important;
54
+ # }
55
+ # # """
56
+
57
+ # set_seed(666)
58
+
59
+ # DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
60
+
61
+ # default_steps = 20
62
+ # model = PixelPerfectDepth(sampling_steps=default_steps)
63
+ # ckpt_path = hf_hub_download(
64
+ # repo_id="gangweix/Pixel-Perfect-Depth",
65
+ # filename="ppd.pth",
66
+ # repo_type="model"
67
+ # )
68
+ # state_dict = torch.load(ckpt_path, map_location="cpu")
69
+ # model.load_state_dict(state_dict, strict=False)
70
+ # model = model.eval()
71
+ # model = model.to(DEVICE)
72
+
73
+ # moge_model = MoGeModel.from_pretrained("Ruicheng/moge-2-vitl-normal").eval()
74
+ # moge_model = moge_model.to(DEVICE)
75
+
76
+
77
+ # 用来保存当前显示的图(全局缓存一份)
78
+ current_img = None
79
+ normalize_hist = None
80
+ g_est_nosie = None
81
+
82
 
83
  def readRAW(path):
84
 
85
  filesize = os.path.getsize(path)
86
+ print('filesize: ',filesize)
87
  if filesize == 31*40*64*2:
88
  output = np.fromfile(path, dtype=np.int16)
89
  else:
 
108
  p3 = (B3 << 2) | ((B4 >> 6) & 0x03)
109
 
110
  output = np.stack([p0, p1, p2, p3], axis=1).flatten()
 
 
 
111
  return output.reshape(31,40,64)
112
 
113
+ def on_image_click(evt: gr.SelectData):
114
+ global current_img
115
+ if current_img is None:
116
+ return None
117
 
118
+ x, y = evt.index # 注意:是 (x, y)
119
+ img = current_img.copy()
120
 
121
+ # 画一个红点
122
+ cv2.circle(img, (x, y), 5, (255, 0, 0), -1)
123
 
124
+ # current_img = img
125
+ return img
126
+
127
+ def update_image(img):
128
+ global current_img
129
+ current_img = img
130
 
131
+ def update_hist(hist):
132
+ global normalize_hist
133
+ normalize_hist = hist
134
 
135
+ def load_bin(file):
136
+ raw_hist = readRAW(file.name).astype(np.float32)
137
 
138
+ # multishot = raw_hist[..., 62] * 1024 + raw_hist[..., 63]
139
+ # raw_hist = image[1:,...].copy()
140
 
141
+
142
  multishot = (raw_hist[...,62]*1024 + raw_hist[...,63])
143
+ normalize_data = 1 / multishot * 1/1024
144
+ nor_hist = (raw_hist[...,:-2]) * normalize_data[...,np.newaxis]
145
+
 
146
 
 
147
 
148
+ # nor_hist = raw_hist / (multishot[..., None] + 1e-6) # 防止除0
 
 
 
149
 
150
+ img = np.log1p(np.sum(nor_hist[1:, :, :-2], axis=2))
151
 
152
+ norm_img = (img - img.min()) / (img.max() - img.min() + 1e-6)
153
 
154
+ img_tc_zoomed = np.kron((norm_img * 255).astype(np.uint8),
155
+ np.ones((16, 16), dtype=np.uint8))
 
 
 
 
156
 
157
+ update_image(img_tc_zoomed)
158
+ update_hist(nor_hist)
159
 
160
+ return raw_hist,img_tc_zoomed
 
 
 
 
 
 
 
 
 
 
 
161
 
162
+ cmap = matplotlib.colormaps.get_cmap('viridis')
163
+ def gray_to_color_zoom(img_1ch):
164
+ # img_1ch: H×W, uint8 or float
165
+ img_norm = (img_1ch - img_1ch.min()) / (img_1ch.max() - img_1ch.min() + 1e-6)
166
 
167
+ color_img = cmap(img_norm)[..., :3] # 取 RGB,去掉 alpha
168
+ color_img = (color_img * 255).astype(np.uint8)
169
+ color_img = np.repeat(np.repeat(color_img, 16, axis=0), 16, axis=1)
170
+ return color_img
171
 
172
+ def main(share=True):
173
+ print("Initializing Demo...")
174
+ title = "# VisionICs 3D DEMO"
175
+ description = """ 上传 `.bin/.raw` 文件,点击图像像素查看该像素的直方图 """
176
 
177
+ # @(spaces.GPU if HUGGINFACE_SPACES_INSTALLED else (lambda x: x))
178
+ # def predict_depth(image, denoise_steps):
179
+ # depth, resize_image = model.infer_image(image, sampling_steps=denoise_steps)
180
+ # return depth, resize_image
181
 
182
+ # @(spaces.GPU if HUGGINFACE_SPACES_INSTALLED else (lambda x: x))
183
+ # def predict_moge_depth(image):
184
+ # image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
185
+ # image = torch.tensor(image / 255, dtype=torch.float32, device=DEVICE).permute(2, 0, 1)
186
+ # metric_depth, mask, intrinsics = moge_model.infer(image)
187
+ # metric_depth[~mask] = metric_depth[mask].max()
188
+ # return metric_depth, mask, intrinsics
189
+ def estimate_noise(hist,noise_filter_steps):
190
+ noise_hist = np.sort(hist, axis=2)[..., ::-1][...,32:]
191
+ lower_bound = np.median(noise_hist, axis=2)
192
+ est_nosie = (lower_bound + noise_filter_steps * np.std(noise_hist,axis=2))
193
+ return est_nosie
194
+
195
+
196
+ def mean_pool_same_axis2(arr, k=3):
197
+ pad = k // 2
198
+ # reflect padding,最像真实数据
199
+ arr_pad = np.pad(arr, ((0,0),(0,0),(pad,pad)), mode='median')
200
 
201
+ H, W, C = arr.shape
202
+ out = np.zeros_like(arr)
203
 
204
+ for i in range(C):
205
+ window = arr_pad[:, :, i : i + k]
206
+ # out[:, :, i] = np.median(window, axis=2)
207
+ out[:, :, i] = np.mean(window, axis=2)
208
 
209
+ return out
210
+
211
+ def median_pool_same_axis2(arr, k=12):
212
+ pad = k // 2
213
+ # reflect padding,最像真实数据
214
+ arr_pad = np.pad(arr, ((0,0),(0,0),(pad,pad)), mode='median')
215
+
216
+ H, W, C = arr.shape
217
+ out = np.zeros_like(arr)
218
+
219
+ for i in range(C):
220
+ window = arr_pad[:, :, i : i + k]
221
+ out[:, :, i] = np.median(window, axis=2)
222
+
223
+ return out
224
+ def min_pool_same_axis2(arr, k=12):
225
+ pad = k // 2
226
+ # reflect padding,最像真实数据
227
+ arr_pad = np.pad(arr, ((0,0),(0,0),(pad,pad)), mode='median')
228
+
229
+ H, W, C = arr.shape
230
+ out = np.zeros_like(arr)
231
+
232
+ for i in range(C):
233
+ window = arr_pad[:, :, i : i + k]
234
+ out[:, :, i] = np.min(window, axis=2)
235
+
236
+ return out
237
+ def plot_pixel_histogram(evt: gr.SelectData, raw_hist, show_filter_hist):
238
+ CUSTOM_COLORS = [
239
+ "#1f77b4", # 蓝
240
+ "#ff7f0e", # 橙
241
+ "#2ca02c", # 绿
242
+ "#d62728", # 红
243
+ "#9467bd", # 紫
244
+ ]
245
+
246
+ # print("evt:", evt)
247
+ x, y = evt.index # Gradio SelectData 对象
248
+ x = x // 16
249
+ y = y // 16
250
+ # multishot = (raw_hist[...,62]*1024 + raw_hist[...,63])
251
+ # normalize_data = 1 / multishot *25e4 * 1/1024
252
+ # nor_hist = (raw_hist[...,:-2]) * normalize_data[...,np.newaxis]
253
+
254
+ ego_tof_hist = raw_hist[y+1, x, :]
255
+ # fig = go.Figure()
256
+ fig = make_subplots(specs=[[{"secondary_y": True}]])
257
+
258
+
259
+ fig.add_trace(go.Scatter(y=ego_tof_hist, mode="lines+markers",name="Raw"),secondary_y=False)
260
+
261
+ if normalize_hist is not None and show_filter_hist:
262
+ global g_est_nosie
263
+ ego_normalize_hist = normalize_hist[y, x, :]
264
+ ego_tof = np.argmax(ego_normalize_hist)
265
+ fig.add_trace(go.Scatter(y=ego_normalize_hist, mode="lines+markers",name="Filtered"),secondary_y=True)
266
+ fig.add_vline(
267
+ x=ego_tof,
268
+ line_dash="dash",
269
+ line_width=2
270
+ )
271
+ # fig.add_hline(
272
+ # y = g_est_nosie[y, x],
273
+ # line_dash="dash",
274
+ # line_width=2,secondary_y=True
275
+ # )
276
+ # print('est nosie ', g_est_nosie[y, x])
277
+
278
+
279
+ fig.update_layout(
280
+ title=f"Pixel ({x}, {y}) 在所有 {ego_tof} ",
281
+ xaxis_title="帧索引 (T)",
282
+ yaxis_title="强度值",
283
+ # yaxis=dict(
284
+ # range=[y_min, y_max]) # Set the min and max for y-axis
285
+ )
286
+
287
+
288
+ return fig
289
+
290
+
291
+
292
+
293
+ def on_submit(image,cycle_steps, neighbor_filter_steps, noise_filter_steps, apply_scatter_filter,apply_ref_filter,apply_noise_filter,tof_range_min_steps,tof_range_max_steps, request: gr.Request = None):
294
+ global g_est_nosie
295
+ raw_hist = image[1:,...].copy()
296
+
297
+
298
+ low, high = [tof_range_min_steps,tof_range_max_steps]
299
+
300
+ t0 = time.perf_counter()
301
+ multishot = (raw_hist[...,62]*1024 + raw_hist[...,63])
302
+ normalize_data = 1 / multishot *cycle_steps * 1/1024
303
+ nor_hist = (raw_hist[...,:-2]) * normalize_data[...,np.newaxis]
304
+
305
+ dcr_cps = 4000
306
+ bin_size_ns = 0.25
307
+ total_bin = 62
308
+ integ_time_s = total_bin * bin_size_ns * cycle_steps * 1e-9
309
+
310
+ count_per_bin = dcr_cps * integ_time_s *normalize_data
311
+ nor_hist = nor_hist - (count_per_bin)[...,np.newaxis]
312
+ nor_hist[nor_hist<0] = 0
313
+
314
+ filter_hist = np.zeros_like(nor_hist)
315
+
316
+ nor_hist[...,:low] = 0
317
+ nor_hist[...,high:] = 0
318
+
319
+ if apply_scatter_filter:
320
+ est_nosie = median_pool_same_axis2(nor_hist,k=12)
321
+ # est_nosie = min_pool_same_axis2(nor_hist,k=12)
322
+ sqrt_nosie = np.sqrt(est_nosie)
323
+ est_nosie = est_nosie + noise_filter_steps * sqrt_nosie
324
+
325
+ g_est_nosie = est_nosie
326
+ nor_hist = nor_hist - est_nosie
327
+ nor_hist[nor_hist<0] = 0
328
+ bin_range = 3
329
+ for i in range(0,62,bin_range):
330
+ map = (nor_hist[...,i:i+bin_range])
331
+ ratio = 1/(np.max(map)-np.min(map))*255
332
+ data = (map-np.min(map)) * ratio
333
+ # _, otsu_thresh = cv2.threshold(data.flatten().astype(np.uint8), 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
334
+ _, otsu_thresh = cv2.threshold(data.flatten().astype(np.uint8), 0, 255, cv2.THRESH_BINARY + cv2.THRESH_TRIANGLE)
335
+ mask = map > ( _ ) / ratio
336
+ filter_map = map * mask
337
+ filter_hist[...,i:i+bin_range] = filter_map
338
+
339
+
340
+
341
+ t1 = time.perf_counter()
342
+ print("elapsed:", (t1 - t0)*1e3, "miliseconds")
343
+
344
+ mask_peak_filter = 1
345
+
346
+ filter_hist = filter_hist * 25e3
347
+
348
+ kernel = np.ones((3, 3, 3), dtype=np.float32)
349
+ kernel[1, 1, 1] = 0
350
+
351
+ mask_filter = (filter_hist > 1).astype(np.uint8)
352
+ out = convolve(mask_filter, kernel, mode='nearest', cval=0.0)
353
+ mask = out >= neighbor_filter_steps
354
+ print(filter_hist,out,mask)
355
+
356
+ # filter_hist = mask * filter_hist
357
+ filter_hist = np.where(mask, filter_hist, 0)
358
+ edge_range = 3
359
+ filter_hist[...,:edge_range] = 0
360
+ nor_hist[...,:edge_range] = 0
361
+ filter_hist[...,-edge_range:] = 0
362
+ nor_hist[...,-edge_range:] = 0
363
+ if apply_scatter_filter:
364
+ tof = np.argmax(filter_hist,axis=2)
365
+ else:
366
+ tof = np.argmax(nor_hist,axis=2)
367
+
368
+ peak = np.take_along_axis(nor_hist, tof[..., None], axis=2)[..., 0]
369
+
370
+ update_hist(filter_hist)
371
+
372
+ if apply_noise_filter:
373
+ # th = est_nosie
374
+ est_nosie = mean_pool_same_axis2(nor_hist,8)
375
+ sqrt_nosie = np.sqrt(est_nosie)
376
+ est_nosie = est_nosie + noise_filter_steps * sqrt_nosie
377
+ th = np.take_along_axis(est_nosie, tof[..., None], axis=2)[..., 0]
378
+
379
+ else:
380
+ th = 0
381
+
382
+ mask_nosie = peak > th
383
+ tof = tof * mask_nosie * mask_peak_filter
384
+ img_tof_zoomed= gray_to_color_zoom(tof)
385
+ peak = np.log1p(peak)
386
+ img_peak_zoomed= gray_to_color_zoom(peak)
387
+
388
+ # tof = tof/np.max(tof+1e-7)*255
389
+ # norm_tof = (tof - tof.min()) / (tof.max() + 1e-8)
390
+ # norm_tof_uint8 = (norm_tof * 255).astype(np.uint8)
391
+ # img_tof_zoomed = np.repeat(np.repeat(norm_tof_uint8, 16, axis=0), 16, axis=1)
392
+
393
+ # peak = peak/np.max(peak+1e-7)*255
394
+ # norm_peak = (peak - peak.min()) / (peak.max() + 1e-8)
395
+ # = (norm_peak * 255).astype(np.uint8)
396
+ # = np.repeat(np.repeat(norm_peak_uint8, 16, axis=0), 16, axis=1)
397
+
398
+ # H, W = image.shape[:2]
399
+ # ppd_depth, resize_image = predict_depth(image[:, :, ::-1], denoise_steps)
400
+ # resize_H, resize_W = resize_image.shape[:2]
401
+
402
+ # # moge provide metric depth and intrinsics
403
+ # moge_depth, mask, intrinsics = predict_moge_depth(resize_image)
404
+
405
+ # # relative depth -> metric depth
406
+ # metric_depth = recover_metric_depth_ransac(ppd_depth, moge_depth, mask)
407
+ # intrinsics[0, 0] *= resize_W
408
+ # intrinsics[1, 1] *= resize_H
409
+ # intrinsics[0, 2] *= resize_W
410
+ # intrinsics[1, 2] *= resize_H
411
+
412
+ # # metric depth -> point cloud
413
+ # pcd = depth2pcd(metric_depth, intrinsics, color=cv2.cvtColor(resize_image, cv2.COLOR_BGR2RGB), input_mask=mask, ret_pcd=True)
414
+ # if apply_filter:
415
+ # cl, ind = pcd.remove_statistical_outlier(nb_neighbors=20, std_ratio=2.0)
416
+ # pcd = pcd.select_by_index(ind)
417
+
418
+ # tempdir = Path(tempfile.gettempdir(), 'ppd')
419
+ # tempdir.mkdir(exist_ok=True)
420
+ # output_path = Path(tempdir, request.session_hash)
421
+ # shutil.rmtree(output_path, ignore_errors=True)
422
+ # output_path.mkdir(exist_ok=True, parents=True)
423
+
424
+ # ply_path = os.path.join(output_path, 'pointcloud.ply')
425
+
426
+ # # save pcd to temporary .ply
427
+ # pcd.points = o3d.utility.Vector3dVector(
428
+ # np.asarray(pcd.points) * np.array([1, -1, -1], dtype=np.float32)
429
+ # )
430
+ # o3d.io.write_point_cloud(ply_path, pcd)
431
+ # vertices = np.asarray(pcd.points)
432
+ # vertex_colors = (np.asarray(pcd.colors) * 255).astype(np.uint8)
433
+ # mesh = trimesh.PointCloud(vertices=vertices, colors=vertex_colors)
434
+ # glb_path = os.path.join(output_path, 'pointcloud.glb')
435
+ # mesh.export(glb_path)
436
+
437
+
438
+ # # save raw depth (npy)
439
+ # depth = cv2.resize(ppd_depth, (W, H), interpolation=cv2.INTER_LINEAR)
440
+ # raw_depth_path = os.path.join(output_path, 'raw_depth.npy')
441
+ # np.save(raw_depth_path, depth)
442
+
443
+ # depth_vis = (depth - depth.min()) / (depth.max() - depth.min() + 1e-5) * 255.0
444
+ # depth_vis = depth_vis.astype(np.uint8)
445
+ # colored_depth = (cmap(depth_vis)[:, :, :3] * 255).astype(np.uint8)
446
+
447
+ # split_region = np.ones((image.shape[0], 50, 3), dtype=np.uint8) * 255
448
+ # combined_result = cv2.hconcat([image[:, :, ::-1], split_region, colored_depth[:, :, ::-1]])
449
+
450
+ # vis_path = os.path.join(output_path, 'image_depth_vis.png')
451
+ # cv2.imwrite(vis_path, combined_result)
452
+
453
+ # file_names = ["image_depth_vis.png", "raw_depth.npy", "pointcloud.ply"]
454
+
455
+ # download_files = [
456
+ # (output_path / name).as_posix()
457
+ # for name in file_names
458
+ # if (output_path / name).exists()
459
+ # ]
460
 
461
+ # return [(image, colored_depth), glb_path, download_files]
462
+ return [img_tof_zoomed,img_peak_zoomed]
463
 
464
+
465
+ def draw_slice(input_image,slice_steps):
466
+
467
+ raw_hist = input_image[1:,...] #remove embd
468
 
469
+
470
+
 
471
 
472
+ print(slice_steps)
473
+ multishot = (raw_hist[...,62]*1024 + raw_hist[...,63])
474
+ normalize_data = 1 / multishot *25e4
475
+ nor_hist = (raw_hist) * normalize_data[...,np.newaxis]
476
 
477
+ est_nosie = median_pool_same_axis2(nor_hist,k=12)
478
+ sqrt_nosie = np.sqrt(est_nosie)
479
+ est_nosie = est_nosie + 0.4 * sqrt_nosie
480
+
481
+ nor_hist = nor_hist - est_nosie
482
+ nor_hist[nor_hist<0] = 0
483
 
484
+ slice_img = nor_hist[...,slice_steps]
 
 
 
 
485
 
486
+ map = (nor_hist[...,slice_steps])
487
 
488
+ ratio = 1/(np.max(map)-np.min(map))*255
489
+ data = (map-np.min(map)) * ratio
490
+ nonzero = data[data > 0] # 排除 0
491
+ # _, otsu_thresh = cv2.threshold(data.flatten().astype(np.uint8), 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
492
+ _, otsu_thresh = cv2.threshold(nonzero.flatten().astype(np.uint8), 0, 255, cv2.THRESH_BINARY + cv2.THRESH_TRIANGLE)
493
+ mask = map > ( _ ) / ratio
494
 
 
495
 
496
+ rgb_img = gray_to_color_zoom(slice_img)
497
+ rgb_mask_img = gray_to_color_zoom(mask.astype(np.float32))
498
+ print(mask.shape)
499
+ return rgb_img,rgb_mask_img
500
 
501
+ with gr.Blocks(theme=gr.themes.Soft()) as demo:
502
+ gr.Markdown(title)
503
+ gr.Markdown(description)
504
+ gr.Markdown("### Simple Elegant Algorithm")
505
+
506
+ file_input = gr.File(label="上传 .raw/.bin/.txt 文件", file_types=[".raw", ".bin", ".txt"])
507
+ input_image = gr.State()
508
+
509
+ with gr.Row():
510
+ # Left: input image + settings
511
+ with gr.Column():
512
+ total_count_image = gr.Image(label="Total Count Image", image_mode="RGB", type='numpy', elem_id='img-display-input')
513
+ with gr.Column():
514
+ histogram = gr.Plot(label="像素直方图")
515
+ with gr.Row():
516
+ # Right: 3D point cloud + depth
517
+ with gr.Column():
518
+ tof_image = gr.Image(label="ToF Image", image_mode="RGB", type='numpy', elem_id='img-display-input')
519
+ with gr.Column():
520
+ peak_image = gr.Image(label="Peak Image", image_mode="RGB", type='numpy', elem_id='img-display-input')
521
+
522
+ with gr.Row():
523
+ with gr.Column():
524
+ submit_btn = gr.Button(value="Predict")
525
+ with gr.Accordion(label="Settings", open=False):
526
+ show_filter_hist = gr.Checkbox(label="Show Filter HIST", value=False)
527
+
528
+ cycle_steps = gr.Slider(label="reflect filter Steps", minimum=1, maximum=262144, value=25e4, step=1)
529
+
530
+ tof_range_min_steps = gr.Slider(label="ToF Range Max Steps", minimum=0, maximum=62, value=5, step=1)
531
+ tof_range_max_steps = gr.Slider(label="ToF Range Min Steps", minimum=0, maximum=62, value=60, step=1)
532
+
533
+
534
+
535
+ apply_scatter_filter = gr.Checkbox(label="Apply scatter filter points", value=True)
536
+
537
+ apply_ref_filter = gr.Checkbox(label="Apply reflect filter points", value=False)
538
+ neighbor_filter_steps = gr.Slider(label="reflect filter Steps", minimum=1 , maximum=26, value=12, step=1)
539
+
540
+ apply_noise_filter = gr.Checkbox(label="Apply noise filter points", value=False)
541
+ noise_filter_steps = gr.Slider(label="noise filter Steps (STD)", minimum=0, maximum=1, value=0.3, step=0.01)
542
+
543
+ # with gr.Accordion(label="Settings", open=False):
544
+
545
+ with gr.Row():
546
+
547
+ with gr.Column():
548
+ slice_steps = gr.Slider(label="Slice Steps", minimum=0, maximum=63, value=0, step=1)
549
+ slice_image = gr.Image(label="Slice Image", image_mode="RGB", type='numpy')
550
+
551
+
552
+ with gr.Column():
553
+ binary_th_steps = gr.Slider(label="Binary Steps", minimum=0, maximum=256, value=128, step=1)
554
+ slice_histogram = gr.Image(label="Slice Image", image_mode="RGB", type='numpy')
555
+ mask_image = gr.Image(label="Mask Image", image_mode="RGB", type='numpy')
556
+
557
+ # with gr.Column():
558
+ # noise_image = gr.Image(label="Nosie Image", image_mode="RGB", type='numpy', elem_id='img-display-input')
559
+ # with gr.Column():
560
+ # multishot_image = gr.Image(label="Multishot Image", image_mode="RGB", type='numpy', elem_id='img-display-input')
561
+
562
+ # with gr.Tabs():
563
+ # with gr.Tab("3D View"):
564
+ # model_3d = gr.Model3D(display_mode="solid", label="3D Point Map", clear_color=[1,1,1,1], height="60vh")
565
+ # with gr.Tab("Depth"):
566
+ # depth_map = ImageSlider(label="Depth Map with Slider View", elem_id='img-display-output', position=0.5)
567
+ # with gr.Tab("Download"):
568
+ # download_files = gr.File(type='filepath', label="Download Files")
569
+
570
+
571
+ file_input.change(load_bin, inputs=file_input, outputs=[input_image,total_count_image])
572
+
573
+ # total_count_image.change(
574
+ # fn=update_image,
575
+ # inputs=total_count_image,
576
+ # outputs=total_count_image
577
+ # )
578
+
579
+
580
+ total_count_image.select(plot_pixel_histogram, inputs=[ input_image ,show_filter_hist], outputs=[histogram])
581
+
582
+ total_count_image.select(
583
+ fn=on_image_click,
584
+ outputs=total_count_image
585
+ )
586
+
587
+ submit_btn.click(
588
+ fn=lambda: [None, None, None, "", "", ""]
589
+ ,outputs=[tof_image,peak_image]
590
+ ).then(
591
+ fn=on_submit,
592
+ inputs=[input_image,cycle_steps, neighbor_filter_steps, noise_filter_steps, apply_scatter_filter,apply_ref_filter,apply_noise_filter,tof_range_min_steps,tof_range_max_steps]
593
+ ,outputs=[tof_image,peak_image]
594
+ )
595
+
596
+ slice_steps.change(
597
+ draw_slice,
598
+ inputs=[input_image,slice_steps],
599
+ outputs=[slice_image,mask_image]
600
+ )
601
+
602
+ # example_files = os.listdir('assets/examples')
603
+ # example_files.sort()
604
+ # example_files = [os.path.join('assets/examples', filename) for filename in example_files]
605
+ # examples = gr.Examples(
606
+ # examples=example_files,
607
+ # inputs=input_image,
608
+ # outputs=[depth_map, model_3d, download_files],
609
+ # fn=on_submit,
610
+ # cache_examples=False
611
+ # )
612
+
613
+ demo.queue().launch(share=share)
614
+
615
+ if __name__ == '__main__':
616
+ main(share=False)
617
+
618
+
619
+
620
+ def mean_pool_same_axis2(arr, k=3):
621
+ pad = k // 2
622
+ # reflect padding,最像真实数据
623
+ arr_pad = np.pad(arr, ((0,0),(0,0),(pad,pad)), mode='median')
624
+
625
+ H, W, C = arr.shape
626
+ out = np.zeros_like(arr)
627
+
628
+ for i in range(C):
629
+ window = arr_pad[:, :, i : i + k]
630
+ # out[:, :, i] = np.median(window, axis=2)
631
+ out[:, :, i] = np.mean(window, axis=2)
632
+
633
+ return out
634
+
635
+ def median_pool_same_axis2(arr, k=12):
636
+ pad = k // 2
637
+ # reflect padding,最像真实数据
638
+ arr_pad = np.pad(arr, ((0,0),(0,0),(pad,pad)), mode='median')
639
+
640
+ H, W, C = arr.shape
641
+ out = np.zeros_like(arr)
642
+
643
+ for i in range(C):
644
+ window = arr_pad[:, :, i : i + k]
645
+ out[:, :, i] = np.median(window, axis=2)
646
+
647
+ return out
648
+
649
+ # raw_hist (30,40,64)
650
+ # bin_range = 3
651
+
652
+
653
+ # multishot = (raw_hist[...,62]*1024 + raw_hist[...,63])
654
+ # normalize_data = 1 / multishot *cycle_steps * 1/1024
655
+ # nor_hist = (raw_hist[...,:-2]) * normalize_data[...,np.newaxis]
656
+
657
+ # filter_hist = np.zeros_like(nor_hist)
658
+ # est_nosie = median_pool_same_axis2(nor_hist,k=12)
659
+ # sqrt_nosie = np.sqrt(est_nosie)
660
+ # est_nosie = est_nosie + noise_filter_steps * sqrt_nosie
661
+
662
+ # g_est_nosie = est_nosie
663
+ # nor_hist = nor_hist - est_nosie
664
+ # nor_hist[nor_hist<0] = 0
665
+
666
+ # for i in range(0,62,bin_range):
667
+ # map = (nor_hist[...,i:i+bin_range])
668
+
669
+
670
 
671
+ # ratio = 1/(np.max(map)-np.min(map))*255
672
+ # data = (map-np.min(map)) * ratio
 
 
673
 
674
+ # _, otsu_thresh = cv2.threshold(data.flatten().astype(np.uint8), 0, 255, cv2.THRESH_BINARY + cv2.THRESH_TRIANGLE)
675
 
676
+ # mask = map > ( _ ) / ratio
677
+ # filter_map = map * mask
678
+ # # filter_map = map * mask
679
+ # filter_hist[...,i:i+bin_range] = filter_map
680
 
681
+ # tof = np.argmax(filter_hist,axis=2)
682
+ # peak = np.take_along_axis(nor_hist, tof[..., None], axis=2)[..., 0]
app_.py ADDED
@@ -0,0 +1,483 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import plotly.graph_objs as go
4
+ from scipy.ndimage import convolve
5
+ import os
6
+ import matplotlib.pyplot as plt
7
+ from scipy.signal import find_peaks
8
+ from scipy.ndimage import gaussian_filter1d
9
+ import cv2
10
+
11
+ def readRAW(path):
12
+
13
+ filesize = os.path.getsize(path)
14
+ print(filesize)
15
+ if filesize == 31*40*64*2:
16
+ output = np.fromfile(path, dtype=np.int16)
17
+ else:
18
+ with open(path, "rb") as f:
19
+ raw_data = f.read()
20
+
21
+
22
+ raw10 = np.frombuffer(raw_data, dtype=np.uint8)
23
+ n_blocks = raw10.shape[0] // 5
24
+
25
+ raw10 = raw10[:n_blocks * 5].reshape(-1, 5)
26
+
27
+ B0 = raw10[:, 0].astype(np.uint16)
28
+ B1 = raw10[:, 1].astype(np.uint16)
29
+ B2 = raw10[:, 2].astype(np.uint16)
30
+ B3 = raw10[:, 3].astype(np.uint16)
31
+ B4 = raw10[:, 4]
32
+
33
+ p0 = (B0 << 2) | ((B4 >> 0) & 0x03)
34
+ p1 = (B1 << 2) | ((B4 >> 2) & 0x03)
35
+ p2 = (B2 << 2) | ((B4 >> 4) & 0x03)
36
+ p3 = (B3 << 2) | ((B4 >> 6) & 0x03)
37
+
38
+ output = np.stack([p0, p1, p2, p3], axis=1).flatten()
39
+ # output = np.fromfile(path, dtype=np.int16).reshape(31,40,64*2)
40
+ # output = np.fromfile(path, dtype=np.int16).reshape(30,40,64)
41
+
42
+ return output.reshape(31,40,64)
43
+
44
+
45
+ def load_bin(file):
46
+
47
+ # raw_hist = readRAW(file.name)[1:,...].astype(np.float32)
48
+ raw_hist = readRAW(file.name).astype(np.float32)
49
+
50
+ print("raw_hist shape:", raw_hist[0,0,:])
51
+ # raw_hist = raw_hist[::-1, ::-1, :]
52
+
53
+ print("raw_hist shape:", raw_hist[0,0,:])
54
+
55
+ # raw_hist = readRAW(file.name)
56
+ # 默认显示一张 sum 图像
57
+
58
+ multishot = (raw_hist[...,62]*1024 + raw_hist[...,63])
59
+ # multishot[multishot==0] = 20e3
60
+ # normalize_data = 1 / multishot * 20e3
61
+ normalize_data = 1 / multishot * 4e4 * 1/1023
62
+
63
+ nor_hist = (raw_hist) * normalize_data[...,np.newaxis]
64
+
65
+ # nor_hist = (raw_hist)
66
+
67
+ img = np.sum(nor_hist[1:, :, :-2], axis=2)
68
+ img = np.log(img +1)
69
+ norm_img = (img - img.min()) / (img.max())
70
+ img_uint8 = (norm_img * 255).astype(np.uint8)
71
+
72
+ img_tc_zoomed = np.repeat(np.repeat(img_uint8, 16, axis=0), 16, axis=1)
73
+
74
+
75
+ img = np.argmax(nor_hist[1:, :, 15:-2], axis=2)+15
76
+
77
+ nosie_est = np.mean(nor_hist[1:, :, -6:-3],axis=2)
78
+ th = nosie_est + 3*np.sqrt(nosie_est)
79
+
80
+ peak = np.max(nor_hist[1:, :, 5:-2], axis=2)
81
+
82
+ mask = peak > th
83
+ # img = img * mask
84
+ print('std of tof' , np.std(img.flatten()),'std of peak' , np.std(peak.flatten()))
85
+ norm_img = (img - img.min()) / (img.max() + 1e-8)
86
+ img_uint8 = (norm_img * 255).astype(np.uint8)
87
+ img_tof_zoomed = np.repeat(np.repeat(img_uint8, 16, axis=0), 16, axis=1)
88
+
89
+ return img_tc_zoomed,img_tof_zoomed, raw_hist, nor_hist
90
+
91
+
92
+ def plot_pixel_histogram(evt: gr.SelectData, raw_hist, nor_hist):
93
+ # print("evt:", evt)
94
+ x, y = evt.index # Gradio SelectData 对象
95
+ x = x // 16
96
+ y = y // 16
97
+
98
+ raw_hist = raw_hist - np.min(raw_hist[...,:-5],axis=2)[...,np.newaxis]
99
+ raw_hist[raw_hist<0] = 0
100
+
101
+ rm_scatter_hist = np.zeros_like(raw_hist)
102
+ # r=1
103
+ # for i in range(r,62):
104
+ # range_hist = raw_hist[...,i]
105
+ # data = range_hist
106
+ # _, otsu_thresh = cv2.threshold(data.flatten().astype(np.uint8), 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
107
+ # mask = range_hist > _
108
+ # filter_map = range_hist * mask
109
+ # raw_hist[...,i] = filter_map
110
+
111
+
112
+ raw_values = raw_hist[y+1, x, :]
113
+ raw_values1 = raw_hist[y+2, x, :]
114
+ raw_values2 = raw_hist[y, x, :]
115
+ raw_values3 = raw_hist[y+1, x+1, :]
116
+ raw_values4 = raw_hist[y+1, x-1, :]
117
+
118
+ tof = np.argmax(nor_hist[y+1, x, 10:-5]) + 10
119
+
120
+ tof_map = np.argmax(nor_hist[1:, :, 5:-5], axis=2)
121
+
122
+ kernel = np.array(([1,2,1]), dtype=np.int32)
123
+
124
+ result_conv = convolve(raw_values, kernel, mode='constant', cval=0)
125
+ # result_conv = data
126
+ I4 = tof
127
+
128
+ I3 = I4-1
129
+ I5 = I4+1
130
+ C3 = result_conv[I3]
131
+ C4 = result_conv[I4]
132
+ C5 = result_conv[I5]
133
+ shift_mat = (C5-C3)/(4.0 * C4 -2.0 * C3 - 2.0 * C5)
134
+
135
+ sr_tof = (tof + shift_mat ) * 500 * 0.15
136
+
137
+ noise = np.mean(nor_hist[1:,...,:3],axis=2)
138
+
139
+ range_hist = 3
140
+
141
+ nor_hist[nor_hist>3e3] = 3e3
142
+ epsilon=1e-10
143
+ array = (nor_hist[y+1, x, tof-range_hist:tof+range_hist+1]) - noise[y,x]
144
+ safe_array = np.where(array <= 0, epsilon, array)
145
+ sim_values = (safe_array)
146
+ array = (nor_hist[1:, :, tof-range_hist:tof+range_hist+1]) - noise[...,np.newaxis]
147
+ safe_array = np.where(array <= 0, epsilon, array)
148
+ histogram_sim = (safe_array)
149
+ print(sim_values.shape, histogram_sim.shape,noise.shape)
150
+
151
+ img = np.tensordot(sim_values,histogram_sim, axes=(0, 2))
152
+ # img = np.log10(img)
153
+ print(np.max(img))
154
+ # img[img<0] = 0
155
+
156
+ img = img/np.max(img+1e-7)*255
157
+
158
+ print('selected value: ',img[y,x],img.shape)
159
+
160
+ # img = np.zeros((30,40))
161
+ # for i in range(30):
162
+ # for j in range(40):
163
+ # tof_ = np.argmax(nor_hist[i+1, j, :-2])
164
+
165
+ # # sim_values = nor_hist[i+1, j, tof_-range_hist:tof_+range_hist+1]
166
+
167
+ # array = (nor_hist[i+1, j, tof_-range_hist:tof_+range_hist+1]) - noise[i,j]
168
+ # safe_array = np.where(array <= 0, epsilon, array)
169
+ # # print(safe_array.shape)
170
+ # if safe_array.shape[0]==0:
171
+ # continue
172
+ # sim_values = (safe_array)
173
+
174
+
175
+ # # histogram_sim = nor_hist[1:, :, tof_-range_hist:tof_+range_hist+1]
176
+ # array = (nor_hist[1:, :, tof_-range_hist:tof_+range_hist+1]) - noise[...,np.newaxis]
177
+ # safe_array = np.where(array <= 0, epsilon, array)
178
+ # if safe_array.shape[0]==0:
179
+ # continue
180
+ # histogram_sim = (safe_array)
181
+
182
+ # #
183
+ # img_ = np.tensordot(sim_values,histogram_sim, axes=(0, 2))
184
+ # img_ = np.log(img_)
185
+ # img_[img_<0] = 0
186
+ # img_ = img_/np.max(img_+1e-7)*255
187
+ # _, otsu_thresh = cv2.threshold(img_.astype(np.uint8), 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
188
+ # if (img_[i,j]-_)>0:
189
+ # # if np.std(img_.flatten())<50:
190
+ # img[i,j] = 255
191
+ # # print(i,j,img_[i,j],_)
192
+ # else:
193
+ # img[i,j] = 0
194
+
195
+ # # print(i,j,img_[i,j],_,'remove')
196
+ # if i==y and j==x:
197
+ # print(i,j,'sim ',img_[i,j],' th ',_,'selected')
198
+
199
+
200
+ # # img = np.zeros((30,40))
201
+ # img = img * tof_map
202
+ norm_img = (img - img.min()) / (img.max() + 1e-8)
203
+ img_uint8 = (norm_img * 255).astype(np.uint8)
204
+ img_tof_zoomed = np.repeat(np.repeat(img_uint8, 16, axis=0), 16, axis=1)
205
+
206
+ vctEmbd = raw_hist[:1,:,:].flatten().astype(np.int32) >> 2
207
+ fRX_Temp = (vctEmbd[15] << 3) + vctEmbd[14]
208
+
209
+ LDVCC = (((((vctEmbd[65] << 8) + vctEmbd[64])) - 1024) / 1024 * 1.7 * 0.9638 + 1.42) * 6
210
+ fTx_Temp = (((vctEmbd[67] << 8) + vctEmbd[66] - 1024) / 5.34 + 30)
211
+ BVD = vctEmbd[23]
212
+
213
+ # fTx_Temp = float(vctEmbd[61]+((vctEmbd[63] & 0xc0) << 2)) * 0.178 - 38.18
214
+ # LDVCC = ((((vctEmbd[63]&0x30)<<4) + vctEmbd[60] - 110) * 13.7 + 5000) / 1000
215
+ y_min = np.min(raw_values[:-2]) - 10
216
+ y_max = np.max(raw_values[:-2]) + 10
217
+
218
+ CUSTOM_COLORS = [
219
+ "#1f77b4", # 蓝
220
+ "#ff7f0e", # 橙
221
+ "#2ca02c", # 绿
222
+ "#d62728", # 红
223
+ "#9467bd", # 紫
224
+ ]
225
+ dash_styles = ["solid", "dash", "dot", "dashdot"]
226
+ fig = go.Figure()
227
+ # fig.add_trace(go.Scatter(y=raw_values, mode="lines+markers"))
228
+ # fig.add_trace(go.Scatter(y=raw_values1, mode="lines+markers"))
229
+ # fig.add_trace(go.Scatter(y=raw_values2, mode="lines+markers"))
230
+ # fig.add_trace(go.Scatter(y=raw_values3, mode="lines+markers"))
231
+ # fig.add_trace(go.Scatter(y=raw_values4, mode="lines+markers"))
232
+ # 取默认颜色序列
233
+ # colorway = fig.layout.colorway
234
+ # if colorway is None:
235
+ # colorway = go.Figure().layout.colorway # fallback
236
+ start = 5
237
+ range_num = 4
238
+
239
+
240
+ hist_list = [raw_values1,raw_values2,raw_values3,raw_values4]
241
+ ego_tof = np.argmax(raw_values[start:-5 ]) +start
242
+
243
+ color = CUSTOM_COLORS[0]
244
+ fig.add_trace(go.Scatter(y=raw_values, mode="lines+markers",line_color=color))
245
+
246
+ fig.add_vline(
247
+ x=ego_tof,
248
+ line_color=color,
249
+ line_dash="solid",
250
+ line_width=2
251
+ )
252
+
253
+
254
+
255
+ ego_tof_hist = raw_values[ego_tof-range_num:ego_tof+range_num+1]
256
+ ego_tof_hist = ego_tof_hist - np.min(ego_tof_hist)
257
+ ego_tof_hist = ego_tof_hist/np.linalg.norm(ego_tof_hist)
258
+
259
+ ego_tof_neighbor_hist =[]
260
+ ego_tof_neighbor_proj = []
261
+ neighbor_tof_ego_proj = []
262
+ for i,v in enumerate(hist_list):
263
+
264
+ neighbor_tof = np.argmax(v[start:-5])+start
265
+
266
+ neighbor_hist = v[ego_tof-range_num:ego_tof+range_num+1]
267
+ neighbor_hist = neighbor_hist - np.min(neighbor_hist)
268
+
269
+ neighbor_hist = neighbor_hist/np.linalg.norm(neighbor_hist)
270
+ ego_tof_neighbor_hist.append(neighbor_hist)
271
+
272
+
273
+ neighbor_tof_ego_hist = raw_values[neighbor_tof-range_num:neighbor_tof+range_num+1]
274
+ neighbor_tof_ego_hist = neighbor_tof_ego_hist - np.min(neighbor_tof_ego_hist)
275
+
276
+ neighbor_tof_ego_hist = neighbor_tof_ego_hist/np.linalg.norm(neighbor_tof_ego_hist)
277
+
278
+ neighbor_tof_neighbor_hist = v[neighbor_tof-range_num:neighbor_tof+range_num+1]
279
+ neighbor_tof_neighbor_hist = neighbor_tof_neighbor_hist - np.min(neighbor_tof_neighbor_hist)
280
+
281
+ neighbor_tof_neighbor_hist = neighbor_tof_neighbor_hist/np.linalg.norm(neighbor_tof_neighbor_hist)
282
+ # print('neighbor_hist','ego_tof_hist',neighbor_hist,ego_tof_hist,np.dot(neighbor_hist,ego_tof_hist))
283
+ # print('neighbor_tof_ego_hist','neighbor_tof_neighbor_hist',neighbor_tof_ego_hist,neighbor_tof_neighbor_hist,np.dot(neighbor_tof_ego_hist,neighbor_tof_neighbor_hist))
284
+
285
+ ego_tof_neighbor_proj.append(np.dot(neighbor_hist,ego_tof_hist))
286
+ neighbor_tof_ego_proj.append(np.dot(neighbor_tof_ego_hist,neighbor_tof_neighbor_hist))
287
+ color = CUSTOM_COLORS[i % len(CUSTOM_COLORS)+1]
288
+ # fig.add_trace(go.Scatter(y=v, mode="lines+markers",line_color=color))
289
+
290
+ # fig.add_vline(
291
+ # x=(neighbor_tof),
292
+ # line_color=color,
293
+ # line_dash=dash_styles[i % 4],
294
+ # line_width=2
295
+ # )
296
+
297
+ fig.update_layout(
298
+ title=f"Pixel ({x}, {y}) 在所有 {raw_values.shape[0]} 帧的强度变化 {f'ToF: {sr_tof:.1f} mm'} {f'RX: {fRX_Temp} °C'} {f'TX: {fTx_Temp:.2f} °C'} {f'LDVCC: {LDVCC:.2f} V'} {f'BVD: {BVD} V'}",
299
+ xaxis_title="帧索引 (T)",
300
+ yaxis_title="强度值",
301
+ yaxis=dict(
302
+ range=[y_min, y_max]) # Set the min and max for y-axis
303
+ )
304
+ print('ego_tof_neighbor_proj',ego_tof_neighbor_proj)
305
+ print('neighbor_tof_ego_proj',neighbor_tof_ego_proj)
306
+
307
+ ego_tof_neighbor_hist = np.mean(np.array(ego_tof_neighbor_hist),axis=0)
308
+ print(ego_tof_neighbor_hist)
309
+ ego_tof_neighbor_hist = ego_tof_neighbor_hist/np.linalg.norm(ego_tof_neighbor_hist)
310
+ print('mean ',np.dot(ego_tof_neighbor_hist,ego_tof_hist))
311
+ fig.add_trace(go.Scatter(y=ego_tof_neighbor_hist, mode="lines"))
312
+ fig.add_trace(go.Scatter(y=ego_tof_hist, mode="lines+markers"))
313
+ return fig, img_tof_zoomed,img
314
+
315
+ # def plot_depth(nor_hist):
316
+
317
+ # kernel = np.array([[1,1,1],[1,1,1],[1,1,1]])
318
+
319
+ # # Create an empty array to store the results
320
+ # output = np.zeros((96, 240, 254))
321
+
322
+ # # Perform the convolution along the first two axes (height and width)
323
+ # for i in range(254):
324
+ # output[:, :, i] = convolve(nor_hist[:, :, i], kernel, mode='constant', cval=0)
325
+
326
+ # modulate1 = np.arange(1,181,1)
327
+ # modulate = modulate1 * modulate1 /(180*180)
328
+ # arr = output[...,:180] * modulate
329
+
330
+ # tc_bin = np.sum(arr,axis=(0,1))
331
+ # max_id = np.argmax(tc_bin[:-2])
332
+
333
+ # # modulate = np.concatenate([a, b,c])
334
+ # pad_head = np.ones(max_id-4)
335
+ # expand_kernel = np.arange(1,13,1) * 0.01
336
+ # pad_tail = np.ones((180-len(pad_head)-len(expand_kernel)))
337
+ # expand_filter = np.concatenate([pad_head, expand_kernel,pad_tail])
338
+
339
+
340
+ # arr_expandfilter = arr * expand_filter
341
+ # tof = np.argmax(arr,axis=2)
342
+ # tof_filter = np.argmax(arr_expandfilter,axis=2)
343
+
344
+ # return tof, tof_filter
345
+
346
+
347
+
348
+ def find_bimodal_threshold(data, bins=50, sigma=2):
349
+ """
350
+ 查找双峰直方图的阈值
351
+
352
+ 参数:
353
+ data: 输入数据
354
+ bins: 直方图分组数
355
+ sigma: 高斯平滑参数
356
+
357
+ 返回:
358
+ threshold: 计算得到的阈值
359
+ peak_indices: 峰值位置
360
+ hist: 直方图数据
361
+ bin_edges: 分组边界
362
+ """
363
+ # 计算直方图
364
+ hist, bin_edges = np.histogram(data, bins=bins, density=True)
365
+ bin_centers = (bin_edges[:-1] + bin_edges[1:]) / 2
366
+
367
+ # 对直方图进行高斯平滑
368
+ smoothed_hist = gaussian_filter1d(hist, sigma=sigma)
369
+
370
+ # 查找峰值
371
+ peak_indices, peak_properties = find_peaks(smoothed_hist, height=0.01, distance=10)
372
+
373
+ if len(peak_indices) >= 2:
374
+ # 找到两个主要峰值
375
+ peak_heights = smoothed_hist[peak_indices]
376
+ sorted_peaks = peak_indices[np.argsort(peak_heights)[-2:]]
377
+ sorted_peaks = np.sort(sorted_peaks)
378
+
379
+ # 在两个峰值之间找到最低点作为阈值
380
+ valley_region = smoothed_hist[sorted_peaks[0]:sorted_peaks[1]]
381
+ if len(valley_region) > 0:
382
+ valley_index = np.argmin(valley_region) + sorted_peaks[0]
383
+ threshold = bin_centers[valley_index]
384
+ else:
385
+ threshold = bin_centers[sorted_peaks[0]]
386
+ else:
387
+ print("警告: 未检测到明显的双峰分布")
388
+ threshold = np.median(data)
389
+
390
+ return threshold, peak_indices, hist, bin_edges
391
+
392
+ def draw_histogram(evt: gr.SelectData,text, bins):
393
+ # 解析输入数据
394
+ try:
395
+ data = text.flatten()
396
+ except:
397
+ return None
398
+
399
+ x, y = evt.index # Gradio SelectData 对象
400
+ x = x // 16
401
+ y = y // 16
402
+
403
+ # 使用OpenCV的Otsu阈值
404
+ _, otsu_thresh = cv2.threshold(data.astype(np.uint8), 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
405
+
406
+
407
+ # 查找双峰阈值
408
+ # threshold, peak_indices, hist, bin_edges = find_bimodal_threshold(data)
409
+ # print(f"检测到 {len(peak_indices)} 个峰值")
410
+ hist, bin_edges = np.histogram(data, bins=bins)
411
+ indices = np.arange(len(hist))
412
+ total_weight = np.sum(hist)
413
+ centroid = np.sum(indices * hist) / total_weight
414
+
415
+ print('ostu threshold: ',_,'data std',np.std(data),' centroid, ',centroid, 'diff ', np.abs(_-centroid))
416
+
417
+ # plt.figure()
418
+ # # 绘制原始直方图
419
+ # plt.hist(data, bins=50, alpha=0.7, color='skyblue', edgecolor='black',
420
+ # label='数据分布', density=True)
421
+
422
+ # # 画直方图
423
+ plt.figure()
424
+ plt.hist(data, bins=bins, density=False)
425
+ plt.xlabel("Value")
426
+ plt.ylabel("Count")
427
+ plt.title("Histogram")
428
+ # 绘制阈值线
429
+ plt.axvline(x=_, color='red', linestyle='--', linewidth=3,
430
+ label=f'双峰阈值: {_:.2f}')
431
+
432
+ plt.axvline(x=data[y*40+x], color='green', linestyle='--', linewidth=3,
433
+ label=f'Seelcted: {_:.2f}')
434
+ # plt.legend()
435
+ return plt
436
+
437
+ with gr.Blocks() as demo:
438
+ gr.Markdown("## 上传 31,40,64 int16 `.bin/.raw` 文件,点击图像像素查看该像素的 64 帧直方图")
439
+
440
+ file_input = gr.File(label="上传 .raw/.bin 文件", file_types=[".raw", ".bin"])
441
+ image_tc_display = gr.Image(interactive=True, label="tc")
442
+ image_tof_display = gr.Image(interactive=True, label="tof")
443
+
444
+ histogram = gr.Plot(label="像素强度曲线")
445
+ raw_hist = gr.State()
446
+ nor_hist = gr.State()
447
+ img_state = gr.State() # ✅ 保存你点击后的数组(替代原来的 img = [])
448
+
449
+ bins_slider = gr.Slider(5, 200, value=64, step=1, label="Bins")
450
+
451
+ image_sim_display = gr.Image(interactive=True, label="sim")
452
+ sim_histogram = gr.Plot(label="相似性直方图")
453
+
454
+ file_input.change(load_bin, inputs=file_input, outputs=[image_tc_display, image_tof_display, raw_hist, nor_hist])
455
+
456
+ image_tof_display.select(plot_pixel_histogram, inputs=[ raw_hist, nor_hist], outputs=[histogram,image_sim_display,img_state])
457
+
458
+
459
+ # 3️⃣ 用数组 + bins 重新画直方图
460
+ image_tof_display.select(
461
+ draw_histogram,
462
+ inputs=[img_state, gr.State(16)],
463
+ outputs=sim_histogram
464
+ )
465
+
466
+ # # 3️⃣ 用数组 + bins 重新画直方图
467
+ # bins_slider.change(
468
+ # draw_histogram,
469
+ # inputs=[img_state, bins_slider],
470
+ # outputs=sim_histogram
471
+ # )
472
+
473
+ # gr.Interface(
474
+ # fn=draw_histogram,
475
+ # inputs=[
476
+ # img,
477
+ # gr.Slider(5, 200, value=64, step=1, label="Bins")
478
+ # ],
479
+ # outputs=gr.Plot(),
480
+ # )
481
+
482
+ # demo.launch(share=True)
483
+ demo.launch(share=False)
requirements.txt CHANGED
Binary files a/requirements.txt and b/requirements.txt differ
 
z_tof2pointcloud.cpp ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ constexpr float PI = 3.14159265358979323846f;
2
+
3
+ void undistort_points_pinhole_cpp(
4
+ const std::vector<float> &K,
5
+ const std::vector<float> &D,
6
+ int num_points,
7
+ std::vector<float> &unit_vector)
8
+ {
9
+ std::vector<float> distorted(num_points * 2, 0.f);
10
+ std::vector<float> undistorted(num_points * 2, 0.f);
11
+
12
+ // Generate distorted points (same as original)
13
+ for (int y = 0; y < 48; ++y)
14
+ {
15
+ for (int x = 0; x < 120; ++x)
16
+ {
17
+ int idx = (y * 120 + x) * 2;
18
+ distorted[idx] = (x + 0.5f);
19
+ distorted[idx + 1] = (y + 0.5f);
20
+ }
21
+ }
22
+
23
+ unit_vector.resize(num_points * 3);
24
+
25
+ float fx = K[0];
26
+ float fy = K[4];
27
+ float cx = K[2];
28
+ float cy = K[5];
29
+
30
+ float k1 = D[0], k2 = D[1], p1 = D[2], p2 = D[3], k3 = D[4];
31
+
32
+ for (int idx = 0; idx < num_points; ++idx)
33
+ {
34
+ int idx_two = idx * 2;
35
+ int idx_three = idx * 3;
36
+
37
+ float u = distorted[idx_two + 0];
38
+ float v = distorted[idx_two + 1];
39
+
40
+ //---------------------------------------------------------
41
+ // Step 1: Normalize distorted coords
42
+ //---------------------------------------------------------
43
+ float xd = (u - cx) / fx;
44
+ float yd = (v - cy) / fy;
45
+
46
+ //---------------------------------------------------------
47
+ // Step 2: Iteratively solve undistorted x,y (Brown–Conrady)
48
+ //---------------------------------------------------------
49
+ float x = xd;
50
+ float y = yd;
51
+
52
+ for (int iter = 0; iter < 5; iter++)
53
+ {
54
+ float r2 = x * x + y * y;
55
+ float r4 = r2 * r2;
56
+ float r6 = r4 * r2;
57
+
58
+ float radial = 1 + k1 * r2 + k2 * r4 + k3 * r6;
59
+
60
+ float x_tangential = 2 * p1 * x * y + p2 * (r2 + 2 * x * x);
61
+ float y_tangential = p1 * (r2 + 2 * y * y) + 2 * p2 * x * y;
62
+
63
+ float x_est = (xd - x_tangential) / radial;
64
+ float y_est = (yd - y_tangential) / radial;
65
+
66
+ x = x_est;
67
+ y = y_est;
68
+ }
69
+
70
+ //---------------------------------------------------------
71
+ // Step 3: Convert to undistorted pixel coords
72
+ //---------------------------------------------------------
73
+ float u_und = fx * x + cx;
74
+ float v_und = fy * y + cy;
75
+
76
+ undistorted[idx_two + 0] = u_und;
77
+ undistorted[idx_two + 1] = v_und;
78
+
79
+ //---------------------------------------------------------
80
+ // Step 4: Compute camera-frame unit vector
81
+ //---------------------------------------------------------
82
+ float X = x;
83
+ float Y = y;
84
+ float Z = 1.0f;
85
+
86
+ float norm = std::sqrt(X * X + Y * Y + Z * Z);
87
+
88
+ //---------------------------------------------------------
89
+ // Optional ellipse mask (unchanged)
90
+ //---------------------------------------------------------
91
+ // float ra = 1.f / (cx - 3);
92
+ // float rb = 1.f / (cy + 4);
93
+
94
+ // float mask = ((u - cx)*(u - cx)*ra*ra +
95
+ // (v - cy)*(v - cy)*rb*rb - 1.f) < 0.f ? 1.f : 0.f;
96
+ float mask = 1;
97
+ unit_vector[idx_three + 0] = mask * (X / norm);
98
+ unit_vector[idx_three + 1] = mask * (Y / norm);
99
+ unit_vector[idx_three + 2] = mask * (Z / norm);
100
+ }
101
+ }
102
+
103
+ void tof_to_xyz(
104
+ const std::vector<float> &unit_vector,
105
+ const std::vector<float> &tof,
106
+ int num_points,
107
+ std::vector<float> &points3D)
108
+ {
109
+ points3D.resize(num_points * 3);
110
+
111
+ for (int i = 0; i < num_points; ++i)
112
+ {
113
+ int idx3 = i * 3;
114
+ float d = tof[i];
115
+
116
+ points3D[idx3 + 0] = unit_vector[idx3 + 0] * d;
117
+ points3D[idx3 + 1] = unit_vector[idx3 + 1] * d;
118
+ points3D[idx3 + 2] = unit_vector[idx3 + 2] * d;
119
+ }
120
+ }
121
+
122
+ void depth_to_xyz(
123
+ const std::vector<float> &unit_vector,
124
+ const std::vector<float> &depth,
125
+ int num_points,
126
+ std::vector<float> &points3D)
127
+ {
128
+ points3D.resize(num_points * 3);
129
+
130
+ for (int i = 0; i < num_points; ++i)
131
+ {
132
+ int idx3 = i * 3;
133
+ float d = depth[i];
134
+
135
+ points3D[idx3 + 0] = unit_vector[idx3 + 0]/ unit_vector[idx3 + 2] * d;
136
+ points3D[idx3 + 1] = unit_vector[idx3 + 1]/ unit_vector[idx3 + 2] * d;
137
+ points3D[idx3 + 2] = d;
138
+ }
139
+ }
140
+
141
+ int main()
142
+ {
143
+ int WIDTH = 40;
144
+ int HEIGHT = 30;
145
+ int num_points = WIDTH * HEIGHT;
146
+
147
+ std::vector<float> unit_vector(num_points * 3, 0.f);
148
+ std::vector<float> depth(num_points , 0.f);
149
+ std::vector<float> points3D(num_points * 3 , 0.f); // 改成 *3
150
+
151
+ // 内参
152
+ float fx = 37;
153
+ float fy = 37;
154
+ float cx = 20;
155
+ float cy = 15;
156
+
157
+ // 畸变参数
158
+ float k1 = 0.f;
159
+ float k2 = 0.f;
160
+ float p1 = 0.f;
161
+ float p2 = 0.f;
162
+ float k3 = 0.f;
163
+
164
+ std::vector<float> K = {fx , 0.f, cx , 0.f, fy , cy , 0.f, 0.f, 1.f};
165
+ std::vector<float> D = {k1, k2, p1, p2, k3};
166
+
167
+ // 生成 unit_vector
168
+ undistort_points_pinhole_cpp(K, D, num_points, unit_vector);
169
+
170
+ // 将 depth 转成相机坐标系
171
+ depth_to_xyz(unit_vector, depth, num_points, points3D);
172
+
173
+ return 0;
174
+ }