| |
|
| | from PIL import Image, ImageDraw |
| | from torch.utils.data import RandomSampler |
| | from io import BytesIO |
| | import imageio.v2 as imageio |
| | import numpy as np |
| |
|
| | from torchvision import transforms |
| | from torchvision.utils import flow_to_image |
| | import cv2 |
| | import torch |
| | import os |
| |
|
| | def process_points(points, frames): |
| |
|
| | if len(points) >= frames: |
| |
|
| | frames_interval = np.linspace(0, len(points) - 1, frames, dtype=int) |
| | points = [points[i] for i in frames_interval] |
| | return points |
| |
|
| | else: |
| | insert_num = frames - len(points) |
| | insert_num_dict = {} |
| | interval = len(points) - 1 |
| | n = insert_num // interval |
| | for i in range(interval): |
| | insert_num_dict[i] = n |
| |
|
| | m = insert_num % interval |
| | if m > 0: |
| | frames_interval = np.linspace(0, len(points)-1, m, dtype=int) |
| | if frames_interval[-1] > 0: |
| | frames_interval[-1] -= 1 |
| | for i in range(interval): |
| | if i in frames_interval: |
| | insert_num_dict[i] += 1 |
| |
|
| | res = [] |
| | for i in range(interval): |
| | insert_points = [] |
| | x0, y0 = points[i] |
| | x1, y1 = points[i + 1] |
| |
|
| | delta_x = x1 - x0 |
| | delta_y = y1 - y0 |
| |
|
| | for j in range(insert_num_dict[i]): |
| | x = x0 + (j + 1) / (insert_num_dict[i] + 1) * delta_x |
| | y = y0 + (j + 1) / (insert_num_dict[i] + 1) * delta_y |
| | insert_points.append([int(x), int(y)]) |
| |
|
| | res += points[i : i + 1] + insert_points |
| | res += points[-1:] |
| | |
| | return res |
| |
|
| |
|
| | def get_flow(points, optical_flow, video_len): |
| | for i in range(video_len - 1): |
| | p = points[i] |
| | p1 = points[i + 1] |
| | optical_flow[i + 1, p[1], p[0], 0] = p1[0] - p[0] |
| | optical_flow[i + 1, p[1], p[0], 1] = p1[1] - p[1] |
| |
|
| | return optical_flow |
| |
|
| |
|
| | def sigma_matrix2(sig_x, sig_y, theta): |
| | """Calculate the rotated sigma matrix (two dimensional matrix). |
| | Args: |
| | sig_x (float): |
| | sig_y (float): |
| | theta (float): Radian measurement. |
| | Returns: |
| | ndarray: Rotated sigma matrix. |
| | """ |
| | d_matrix = np.array([[sig_x**2, 0], [0, sig_y**2]]) |
| | u_matrix = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) |
| | return np.dot(u_matrix, np.dot(d_matrix, u_matrix.T)) |
| |
|
| |
|
| | def mesh_grid(kernel_size): |
| | """Generate the mesh grid, centering at zero. |
| | Args: |
| | kernel_size (int): |
| | Returns: |
| | xy (ndarray): with the shape (kernel_size, kernel_size, 2) |
| | xx (ndarray): with the shape (kernel_size, kernel_size) |
| | yy (ndarray): with the shape (kernel_size, kernel_size) |
| | """ |
| | ax = np.arange(-kernel_size // 2 + 1.0, kernel_size // 2 + 1.0) |
| | xx, yy = np.meshgrid(ax, ax) |
| | xy = np.hstack( |
| | ( |
| | xx.reshape((kernel_size * kernel_size, 1)), |
| | yy.reshape(kernel_size * kernel_size, 1), |
| | ) |
| | ).reshape(kernel_size, kernel_size, 2) |
| | return xy, xx, yy |
| |
|
| |
|
| | def pdf2(sigma_matrix, grid): |
| | """Calculate PDF of the bivariate Gaussian distribution. |
| | Args: |
| | sigma_matrix (ndarray): with the shape (2, 2) |
| | grid (ndarray): generated by :func:`mesh_grid`, |
| | with the shape (K, K, 2), K is the kernel size. |
| | Returns: |
| | kernel (ndarrray): un-normalized kernel. |
| | """ |
| | inverse_sigma = np.linalg.inv(sigma_matrix) |
| | kernel = np.exp(-0.5 * np.sum(np.dot(grid, inverse_sigma) * grid, 2)) |
| | return kernel |
| |
|
| |
|
| | def bivariate_Gaussian(kernel_size, sig_x, sig_y, theta, grid=None, isotropic=True): |
| | """Generate a bivariate isotropic or anisotropic Gaussian kernel. |
| | In the isotropic mode, only `sig_x` is used. `sig_y` and `theta` is ignored. |
| | Args: |
| | kernel_size (int): |
| | sig_x (float): |
| | sig_y (float): |
| | theta (float): Radian measurement. |
| | grid (ndarray, optional): generated by :func:`mesh_grid`, |
| | with the shape (K, K, 2), K is the kernel size. Default: None |
| | isotropic (bool): |
| | Returns: |
| | kernel (ndarray): normalized kernel. |
| | """ |
| | if grid is None: |
| | grid, _, _ = mesh_grid(kernel_size) |
| | if isotropic: |
| | sigma_matrix = np.array([[sig_x**2, 0], [0, sig_x**2]]) |
| | else: |
| | sigma_matrix = sigma_matrix2(sig_x, sig_y, theta) |
| | kernel = pdf2(sigma_matrix, grid) |
| | kernel = kernel / np.sum(kernel) |
| | return kernel |
| |
|
| | def read_points(file, video_len=16, reverse=False): |
| | with open(file, "r") as f: |
| | lines = f.readlines() |
| | points = [] |
| | for line in lines: |
| | x, y = line.strip().split(",") |
| | points.append((int(x), int(y))) |
| | if reverse: |
| | points = points[::-1] |
| |
|
| | if len(points) > video_len: |
| | skip = len(points) // video_len |
| | points = points[::skip] |
| | points = points[:video_len] |
| |
|
| | return points |
| |
|
| | def process_traj(point_path, num_frames, video_size, device="cpu"): |
| | |
| | processed_points = [] |
| | points = np.load(point_path) |
| |
|
| | points = [tuple(x) for x in points.tolist()] |
| | h, w = video_size |
| | points = process_points(points, num_frames) |
| | xy_range = [640, 480] |
| | points = [[int(w * x / xy_range[0]), int(h * y / xy_range[1])] for x, y in points] |
| | points_resized = [] |
| | for point in points: |
| | if point[0] >= xy_range[0]: |
| | point[0] = xy_range[0] - 1 |
| | elif point[0] < 0: |
| | point[0] = 0 |
| | elif point[1] >= xy_range[1]: |
| | point[1] = xy_range[1] - 1 |
| | elif point[1] < 0: |
| | point[1] = 0 |
| | points_resized.append(point) |
| | processed_points.append(points_resized) |
| |
|
| | return processed_points |
| |
|
| | def process_traj_v2(point_path, num_frames, video_size, device="cpu"): |
| | optical_flow = np.zeros((num_frames, video_size[0], video_size[1], 2), dtype=np.float32) |
| | processed_points = [] |
| | |
| | points = np.load(point_path) |
| | points = [tuple(x) for x in points.tolist()] |
| | h, w = video_size |
| | points = process_points(points, num_frames) |
| | xy_range = [640, 480] |
| | points = [[int(w * x / xy_range[0]), int(h * y / xy_range[1])] for x, y in points] |
| | points_resized = [] |
| | for point in points: |
| | if point[0] >= xy_range[0]: |
| | point[0] = xy_range[0] - 1 |
| | elif point[0] < 0: |
| | point[0] = 0 |
| | elif point[1] >= xy_range[1]: |
| | point[1] = xy_range[1] - 1 |
| | elif point[1] < 0: |
| | point[1] = 0 |
| | points_resized.append(point) |
| | optical_flow = get_flow(points_resized, optical_flow, video_len=num_frames) |
| | processed_points.append(points_resized) |
| | |
| | size = 99 |
| | sigma = 10 |
| | blur_kernel = bivariate_Gaussian(size, sigma, sigma, 0, grid=None, isotropic=True) |
| | blur_kernel = blur_kernel / blur_kernel[size // 2, size // 2] |
| | |
| | assert len(optical_flow) == num_frames |
| | for i in range(1, num_frames): |
| | optical_flow[i] = cv2.filter2D(optical_flow[i], -1, blur_kernel) |
| | optical_flow = torch.tensor(optical_flow).to(device) |
| |
|
| | return optical_flow, processed_points |
| |
|
| | def draw_circle(rgb, coord, radius, color=(255, 0, 0), visible=True, color_alpha=None): |
| | |
| | draw = ImageDraw.Draw(rgb) |
| | |
| | left_up_point = (coord[0] - radius, coord[1] - radius) |
| | right_down_point = (coord[0] + radius, coord[1] + radius) |
| | |
| | color = tuple(list(color) + [color_alpha if color_alpha is not None else 255]) |
| | draw.ellipse( |
| | [left_up_point, right_down_point], |
| | fill=tuple(color) if visible else None, |
| | outline=tuple(color), |
| | ) |
| | return rgb |
| |
|
| | def save_images2video(images, video_name, fps): |
| | format = "mp4" |
| | codec = "libx264" |
| | ffmpeg_params = ["-crf", str(12)] |
| | pixelformat = "yuv420p" |
| | video_stream = BytesIO() |
| |
|
| | with imageio.get_writer( |
| | video_stream, |
| | fps=fps, |
| | format=format, |
| | codec=codec, |
| | ffmpeg_params=ffmpeg_params, |
| | pixelformat=pixelformat, |
| | ) as writer: |
| | for idx in range(len(images)): |
| | writer.append_data(images[idx]) |
| | |
| | video_data = video_stream.getvalue() |
| | output_path = os.path.join(video_name + ".mp4") |
| | with open(output_path, "wb") as f: |
| | f.write(video_data) |
| |
|
| | def sample_flowlatents(latents, flow_latents, mask, points, diameter, transit_start, transit_end): |
| |
|
| | points = points[:,::4,:] |
| | radius = diameter // 2 |
| | channels = latents.shape[1] |
| |
|
| | for channel in range(channels): |
| | latent_value = latents[:, channel, :].unsqueeze(2)[mask>0.].mean() |
| | for frame in range(transit_start, transit_end): |
| | if frame > 0: |
| | flow_latents[0,:,frame,:,:] = flow_latents[0,:,frame-1,:,:] |
| | centroid_x, centroid_y = points[0,frame] |
| | centroid_x, centroid_y = int(centroid_x), int(centroid_y) |
| | for i in range(centroid_y - radius, centroid_y + radius + 1): |
| | for j in range(centroid_x - radius, centroid_x + radius + 1): |
| | if 0 <= i < flow_latents.shape[-2] and 0 <= j < flow_latents.shape[-1]: |
| | if (i - centroid_y) ** 2 + (j - centroid_x) ** 2 <= radius ** 2: |
| | flow_latents[0,channel,frame,i,j] = latent_value + 1e-4 |
| |
|
| | return flow_latents |