robomaster2025 commited on
Commit
3c4a781
·
verified ·
1 Parent(s): 005ccbc

Upload folder using huggingface_hub

Browse files
Files changed (1) hide show
  1. utils.py +276 -0
utils.py ADDED
@@ -0,0 +1,276 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from PIL import Image, ImageDraw
3
+ from torch.utils.data import RandomSampler
4
+ from io import BytesIO
5
+ import imageio.v2 as imageio
6
+ import numpy as np
7
+
8
+ from torchvision import transforms
9
+ from torchvision.utils import flow_to_image
10
+ import cv2
11
+ import torch
12
+ import os
13
+
14
+ def process_points(points, frames):
15
+
16
+ if len(points) >= frames:
17
+
18
+ frames_interval = np.linspace(0, len(points) - 1, frames, dtype=int)
19
+ points = [points[i] for i in frames_interval]
20
+ return points
21
+
22
+ else:
23
+ insert_num = frames - len(points)
24
+ insert_num_dict = {}
25
+ interval = len(points) - 1
26
+ n = insert_num // interval
27
+ for i in range(interval):
28
+ insert_num_dict[i] = n
29
+
30
+ m = insert_num % interval
31
+ if m > 0:
32
+ frames_interval = np.linspace(0, len(points)-1, m, dtype=int)
33
+ if frames_interval[-1] > 0:
34
+ frames_interval[-1] -= 1
35
+ for i in range(interval):
36
+ if i in frames_interval:
37
+ insert_num_dict[i] += 1
38
+
39
+ res = []
40
+ for i in range(interval):
41
+ insert_points = []
42
+ x0, y0 = points[i]
43
+ x1, y1 = points[i + 1]
44
+
45
+ delta_x = x1 - x0
46
+ delta_y = y1 - y0
47
+
48
+ for j in range(insert_num_dict[i]):
49
+ x = x0 + (j + 1) / (insert_num_dict[i] + 1) * delta_x
50
+ y = y0 + (j + 1) / (insert_num_dict[i] + 1) * delta_y
51
+ insert_points.append([int(x), int(y)])
52
+
53
+ res += points[i : i + 1] + insert_points
54
+ res += points[-1:]
55
+
56
+ return res
57
+
58
+
59
+ def get_flow(points, optical_flow, video_len):
60
+ for i in range(video_len - 1):
61
+ p = points[i]
62
+ p1 = points[i + 1]
63
+ optical_flow[i + 1, p[1], p[0], 0] = p1[0] - p[0]
64
+ optical_flow[i + 1, p[1], p[0], 1] = p1[1] - p[1]
65
+
66
+ return optical_flow
67
+
68
+
69
+ def sigma_matrix2(sig_x, sig_y, theta):
70
+ """Calculate the rotated sigma matrix (two dimensional matrix).
71
+ Args:
72
+ sig_x (float):
73
+ sig_y (float):
74
+ theta (float): Radian measurement.
75
+ Returns:
76
+ ndarray: Rotated sigma matrix.
77
+ """
78
+ d_matrix = np.array([[sig_x**2, 0], [0, sig_y**2]])
79
+ u_matrix = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])
80
+ return np.dot(u_matrix, np.dot(d_matrix, u_matrix.T))
81
+
82
+
83
+ def mesh_grid(kernel_size):
84
+ """Generate the mesh grid, centering at zero.
85
+ Args:
86
+ kernel_size (int):
87
+ Returns:
88
+ xy (ndarray): with the shape (kernel_size, kernel_size, 2)
89
+ xx (ndarray): with the shape (kernel_size, kernel_size)
90
+ yy (ndarray): with the shape (kernel_size, kernel_size)
91
+ """
92
+ ax = np.arange(-kernel_size // 2 + 1.0, kernel_size // 2 + 1.0)
93
+ xx, yy = np.meshgrid(ax, ax)
94
+ xy = np.hstack(
95
+ (
96
+ xx.reshape((kernel_size * kernel_size, 1)),
97
+ yy.reshape(kernel_size * kernel_size, 1),
98
+ )
99
+ ).reshape(kernel_size, kernel_size, 2)
100
+ return xy, xx, yy
101
+
102
+
103
+ def pdf2(sigma_matrix, grid):
104
+ """Calculate PDF of the bivariate Gaussian distribution.
105
+ Args:
106
+ sigma_matrix (ndarray): with the shape (2, 2)
107
+ grid (ndarray): generated by :func:`mesh_grid`,
108
+ with the shape (K, K, 2), K is the kernel size.
109
+ Returns:
110
+ kernel (ndarrray): un-normalized kernel.
111
+ """
112
+ inverse_sigma = np.linalg.inv(sigma_matrix)
113
+ kernel = np.exp(-0.5 * np.sum(np.dot(grid, inverse_sigma) * grid, 2))
114
+ return kernel
115
+
116
+
117
+ def bivariate_Gaussian(kernel_size, sig_x, sig_y, theta, grid=None, isotropic=True):
118
+ """Generate a bivariate isotropic or anisotropic Gaussian kernel.
119
+ In the isotropic mode, only `sig_x` is used. `sig_y` and `theta` is ignored.
120
+ Args:
121
+ kernel_size (int):
122
+ sig_x (float):
123
+ sig_y (float):
124
+ theta (float): Radian measurement.
125
+ grid (ndarray, optional): generated by :func:`mesh_grid`,
126
+ with the shape (K, K, 2), K is the kernel size. Default: None
127
+ isotropic (bool):
128
+ Returns:
129
+ kernel (ndarray): normalized kernel.
130
+ """
131
+ if grid is None:
132
+ grid, _, _ = mesh_grid(kernel_size)
133
+ if isotropic:
134
+ sigma_matrix = np.array([[sig_x**2, 0], [0, sig_x**2]])
135
+ else:
136
+ sigma_matrix = sigma_matrix2(sig_x, sig_y, theta)
137
+ kernel = pdf2(sigma_matrix, grid)
138
+ kernel = kernel / np.sum(kernel)
139
+ return kernel
140
+
141
+ def read_points(file, video_len=16, reverse=False):
142
+ with open(file, "r") as f:
143
+ lines = f.readlines()
144
+ points = []
145
+ for line in lines:
146
+ x, y = line.strip().split(",")
147
+ points.append((int(x), int(y)))
148
+ if reverse:
149
+ points = points[::-1]
150
+
151
+ if len(points) > video_len:
152
+ skip = len(points) // video_len
153
+ points = points[::skip]
154
+ points = points[:video_len]
155
+
156
+ return points
157
+
158
+ def process_traj(point_path, num_frames, video_size, device="cpu"):
159
+
160
+ processed_points = []
161
+ points = np.load(point_path)
162
+
163
+ points = [tuple(x) for x in points.tolist()]
164
+ h, w = video_size
165
+ points = process_points(points, num_frames)
166
+ xy_range = [640, 480]
167
+ points = [[int(w * x / xy_range[0]), int(h * y / xy_range[1])] for x, y in points]
168
+ points_resized = []
169
+ for point in points:
170
+ if point[0] >= xy_range[0]:
171
+ point[0] = xy_range[0] - 1
172
+ elif point[0] < 0:
173
+ point[0] = 0
174
+ elif point[1] >= xy_range[1]:
175
+ point[1] = xy_range[1] - 1
176
+ elif point[1] < 0:
177
+ point[1] = 0
178
+ points_resized.append(point)
179
+ processed_points.append(points_resized)
180
+
181
+ return processed_points
182
+
183
+ def process_traj_v2(point_path, num_frames, video_size, device="cpu"):
184
+ optical_flow = np.zeros((num_frames, video_size[0], video_size[1], 2), dtype=np.float32)
185
+ processed_points = []
186
+
187
+ points = np.load(point_path)
188
+ points = [tuple(x) for x in points.tolist()]
189
+ h, w = video_size
190
+ points = process_points(points, num_frames)
191
+ xy_range = [640, 480]
192
+ points = [[int(w * x / xy_range[0]), int(h * y / xy_range[1])] for x, y in points]
193
+ points_resized = []
194
+ for point in points:
195
+ if point[0] >= xy_range[0]:
196
+ point[0] = xy_range[0] - 1
197
+ elif point[0] < 0:
198
+ point[0] = 0
199
+ elif point[1] >= xy_range[1]:
200
+ point[1] = xy_range[1] - 1
201
+ elif point[1] < 0:
202
+ point[1] = 0
203
+ points_resized.append(point)
204
+ optical_flow = get_flow(points_resized, optical_flow, video_len=num_frames)
205
+ processed_points.append(points_resized)
206
+
207
+ size = 99
208
+ sigma = 10
209
+ blur_kernel = bivariate_Gaussian(size, sigma, sigma, 0, grid=None, isotropic=True)
210
+ blur_kernel = blur_kernel / blur_kernel[size // 2, size // 2]
211
+
212
+ assert len(optical_flow) == num_frames
213
+ for i in range(1, num_frames):
214
+ optical_flow[i] = cv2.filter2D(optical_flow[i], -1, blur_kernel)
215
+ optical_flow = torch.tensor(optical_flow).to(device)
216
+
217
+ return optical_flow, processed_points
218
+
219
+ def draw_circle(rgb, coord, radius, color=(255, 0, 0), visible=True, color_alpha=None):
220
+ # Create a draw object
221
+ draw = ImageDraw.Draw(rgb)
222
+ # Calculate the bounding box of the circle
223
+ left_up_point = (coord[0] - radius, coord[1] - radius)
224
+ right_down_point = (coord[0] + radius, coord[1] + radius)
225
+ # Draw the circle
226
+ color = tuple(list(color) + [color_alpha if color_alpha is not None else 255])
227
+ draw.ellipse(
228
+ [left_up_point, right_down_point],
229
+ fill=tuple(color) if visible else None,
230
+ outline=tuple(color),
231
+ )
232
+ return rgb
233
+
234
+ def save_images2video(images, video_name, fps):
235
+ format = "mp4"
236
+ codec = "libx264"
237
+ ffmpeg_params = ["-crf", str(12)]
238
+ pixelformat = "yuv420p"
239
+ video_stream = BytesIO()
240
+
241
+ with imageio.get_writer(
242
+ video_stream,
243
+ fps=fps,
244
+ format=format,
245
+ codec=codec,
246
+ ffmpeg_params=ffmpeg_params,
247
+ pixelformat=pixelformat,
248
+ ) as writer:
249
+ for idx in range(len(images)):
250
+ writer.append_data(images[idx])
251
+
252
+ video_data = video_stream.getvalue()
253
+ output_path = os.path.join(video_name + ".mp4")
254
+ with open(output_path, "wb") as f:
255
+ f.write(video_data)
256
+
257
+ def sample_flowlatents(latents, flow_latents, mask, points, diameter, transit_start, transit_end):
258
+
259
+ points = points[:,::4,:]
260
+ radius = diameter // 2
261
+ channels = latents.shape[1]
262
+
263
+ for channel in range(channels):
264
+ latent_value = latents[:, channel, :].unsqueeze(2)[mask>0.].mean()
265
+ for frame in range(transit_start, transit_end):
266
+ if frame > 0:
267
+ flow_latents[0,:,frame,:,:] = flow_latents[0,:,frame-1,:,:]
268
+ centroid_x, centroid_y = points[0,frame]
269
+ centroid_x, centroid_y = int(centroid_x), int(centroid_y)
270
+ for i in range(centroid_y - radius, centroid_y + radius + 1):
271
+ for j in range(centroid_x - radius, centroid_x + radius + 1):
272
+ if 0 <= i < flow_latents.shape[-2] and 0 <= j < flow_latents.shape[-1]:
273
+ if (i - centroid_y) ** 2 + (j - centroid_x) ** 2 <= radius ** 2:
274
+ flow_latents[0,channel,frame,i,j] = latent_value + 1e-4
275
+
276
+ return flow_latents