File size: 5,548 Bytes
9d38064 cd45874 57e99b9 fc48d96 51273ab 9d38064 cd45874 484e90b cd45874 075ba26 51273ab 9d38064 040ba84 57e99b9 fc48d96 9d38064 ddef400 9d38064 d973ae3 9d38064 ddef400 51273ab 9d38064 ddef400 9d38064 cd45874 9d38064 cd45874 484e90b cd45874 9d38064 51273ab 484e90b 51273ab 484e90b 51273ab 484e90b 51273ab 9d38064 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 |
import numpy as np
from PIL import Image
import torch
from decord import cpu, VideoReader
from transformers import BaseImageProcessor
from typing import List, Union, Tuple
import time
from constants import *
def expand2square(pil_img, background_color):
width, height = pil_img.size
if width == height:
return pil_img
elif width > height:
result = Image.new(pil_img.mode, (width, width), background_color)
result.paste(pil_img, (0, (width - height) // 2))
return result
else:
result = Image.new(pil_img.mode, (height, height), background_color)
result.paste(pil_img, ((height - width) // 2, 0))
return result
def process_images(
images: torch.Tensor,
image_processor: List[BaseImageProcessor],
device: str
) -> Union[torch.Tensor, List[torch.Tensor]]:
# images.shape: (4294, 360, 640, 3)
# print(f'@tcm: In process_images(): images.shape={images.shape}')
if isinstance(image_processor, list):
processor_aux_list = image_processor
new_images_aux_list = []
for i, image in enumerate(images):
# image.shape: (height, width, channels)
# print(f'@tcm: In process_images(): frame {i}')
if isinstance(image, np.ndarray):
image = Image.fromarray(image)
image_aux_list = []
for processor_aux in processor_aux_list:
image_aux = image # PIL.Image
if hasattr(processor_aux, "image_mean"):
try:
target_resolution = processor_aux.crop_size["height"]
except:
target_resolution = processor_aux.size["height"]
image_aux = expand2square(
image_aux, tuple(int(x * 255) for x in processor_aux.image_mean)
).resize((target_resolution, target_resolution))
image_aux = processor_aux.preprocess(image_aux, return_tensors="pt")[
"pixel_values"
][0]
# image_aux.shape: torch.Size([3, 384, 384])
image_aux_list.append(image_aux)
new_images_aux_list.append(image_aux_list) # torch.Tensor(C, H, W) new_images_aux_list[num_frames][num_processor]
new_images_aux_list = [
list(batch_image_aux) for batch_image_aux in zip(*new_images_aux_list)
] # torch.Tensor(C, H, W) new_images_aux_list[num_processor][num_frames]
new_images_aux_list = [
torch.stack(image_aux).half().to(device) for image_aux in new_images_aux_list
] # torch.Tensor(num_frames, C, H, W) new_images_aux_list[num_processor]
return new_images_aux_list
else:
image_aspect_ratio = "pad"
new_images = []
if image_aspect_ratio == "pad":
for image in images:
image = expand2square(
image, tuple(int(x * 255) for x in image_processor.image_mean)
)
image = image_processor.preprocess(image, return_tensors="pt")[
"pixel_values"
][0]
new_images.append(image)
else:
return image_processor(images, return_tensors="pt")["pixel_values"]
if all(x.shape == new_images[0].shape for x in new_images):
new_images = torch.stack(new_images, dim=0)
return new_images
def process_video_frames(
video_path: str,
image_processors: List[BaseImageProcessor],
device: str
) -> Tuple[List[torch.Tensor], List[Tuple[int, int]]]:
vr = VideoReader(video_path, ctx=cpu(0), num_threads=1)
fps = float(vr.get_avg_fps())
frame_indices = np.array([i for i in range(0, len(vr), round(fps),)])
print(f'@tcm: In process_video_frames(): # frames = {len(frame_indices)}')
image_sizes = [vr[0].shape[:2]]
video = [[] for _ in range(len(image_processors))]
for i in range(0, len(frame_indices), CHUNK_SIZE):
print(f'@tcm: In process_video_frames(): segment {int(i/CHUNK_SIZE)}')
sub_frame_indices = frame_indices[i:min(i+CHUNK_SIZE, len(frame_indices))]
sub_videos = []
process_time = time.time()
for frame_index in sub_frame_indices:
img = vr[frame_index].asnumpy()
sub_videos.append(img)
sub_videos = np.stack(sub_videos) # shape: (num_frames, height, width, channels)
sub_videos = process_images(sub_videos, image_processors, device)
print(f'@tcm: In process_video_frames(): process_time={time.time()-process_time:4f}')
assert len(sub_videos) == len(video)
for j, sub_video in enumerate(sub_videos):
video[j].append(sub_video)
del sub_videos
if 'cuda' in device:
torch.cuda.empty_cache()
for i in range(len(video)):
video[i] = torch.cat(video[i], dim=0)
# vectorize_time = time.time()
# for frame_index in frame_indices:
# img = vr[frame_index].asnumpy()
# video.append(img)
# video = np.stack(video) # shape: (num_frames, height, width, channels)
# print(f'@tcm: In process_video_frames(): vectorize_time={time.time()-vectorize_time:4f}')
# image_sizes = [video[0].shape[:2]]
# process_time = time.time()
# video = process_images(video, image_processors, device)
# print(f'@tcm: In process_video_frames(): process_time={time.time()-process_time:4f}')
video = [item.unsqueeze(0) for item in video]
return video, image_sizes |