Fabrice-TIERCELIN commited on
Commit
f2ccc6a
·
verified ·
1 Parent(s): 1674d1b

Delete inference_utils.py

Browse files
Files changed (1) hide show
  1. inference_utils.py +0 -148
inference_utils.py DELETED
@@ -1,148 +0,0 @@
1
- import os
2
- import subprocess
3
- import tempfile
4
- import cv2
5
- import torch
6
- from PIL import Image
7
- from typing import Mapping
8
- from einops import rearrange
9
- import numpy as np
10
- import torchvision.transforms.functional as transforms_F
11
- from video_to_video.utils.logger import get_logger
12
-
13
- logger = get_logger()
14
-
15
-
16
- def tensor2vid(video, mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]):
17
- mean = torch.tensor(mean, device=video.device).reshape(1, -1, 1, 1, 1)
18
- std = torch.tensor(std, device=video.device).reshape(1, -1, 1, 1, 1)
19
- video = video.mul_(std).add_(mean)
20
- video.clamp_(0, 1)
21
- video = video * 255.0
22
- images = rearrange(video, 'b c f h w -> b f h w c')[0]
23
- return images
24
-
25
-
26
- def preprocess(input_frames):
27
- out_frame_list = []
28
- for pointer in range(len(input_frames)):
29
- frame = input_frames[pointer]
30
- frame = frame[:, :, ::-1]
31
- frame = Image.fromarray(frame.astype('uint8')).convert('RGB')
32
- frame = transforms_F.to_tensor(frame)
33
- out_frame_list.append(frame)
34
- out_frames = torch.stack(out_frame_list, dim=0)
35
- out_frames.clamp_(0, 1)
36
- mean = out_frames.new_tensor([0.5, 0.5, 0.5]).view(-1)
37
- std = out_frames.new_tensor([0.5, 0.5, 0.5]).view(-1)
38
- out_frames.sub_(mean.view(1, -1, 1, 1)).div_(std.view(1, -1, 1, 1))
39
- return out_frames
40
-
41
-
42
- def adjust_resolution(h, w, up_scale):
43
- if h*up_scale < 720:
44
- up_s = 720/h
45
- target_h = int(up_s*h//2*2)
46
- target_w = int(up_s*w//2*2)
47
- elif h*w*up_scale*up_scale > 1280*2048:
48
- up_s = np.sqrt(1280*2048/(h*w))
49
- target_h = int(up_s*h//2*2)
50
- target_w = int(up_s*w//2*2)
51
- else:
52
- target_h = int(up_scale*h//2*2)
53
- target_w = int(up_scale*w//2*2)
54
- return (target_h, target_w)
55
-
56
-
57
- def make_mask_cond(in_f_num, interp_f_num):
58
- mask_cond = []
59
- interp_cond = [-1 for _ in range(interp_f_num)]
60
- for i in range(in_f_num):
61
- mask_cond.append(i)
62
- if i != in_f_num - 1:
63
- mask_cond += interp_cond
64
- return mask_cond
65
-
66
-
67
- def load_video(vid_path):
68
- capture = cv2.VideoCapture(vid_path)
69
- _fps = capture.get(cv2.CAP_PROP_FPS)
70
- _total_frame_num = capture.get(cv2.CAP_PROP_FRAME_COUNT)
71
- pointer = 0
72
- frame_list = []
73
- stride = 1
74
- while len(frame_list) < _total_frame_num:
75
- ret, frame = capture.read()
76
- pointer += 1
77
- if (not ret) or (frame is None):
78
- break
79
- if pointer >= _total_frame_num + 1:
80
- break
81
- if pointer % stride == 0:
82
- frame_list.append(frame)
83
- capture.release()
84
- return frame_list, _fps
85
-
86
-
87
- def save_video(video, save_dir, file_name, fps=16.0):
88
- output_path = os.path.join(save_dir, file_name)
89
- images = [(img.numpy()).astype('uint8') for img in video]
90
- temp_dir = tempfile.mkdtemp()
91
-
92
- for fid, frame in enumerate(images):
93
- tpth = os.path.join(temp_dir, '%06d.png' % (fid + 1))
94
- cv2.imwrite(tpth, frame[:, :, ::-1])
95
-
96
- tmp_path = os.path.join(save_dir, 'tmp.mp4')
97
- cmd = f'ffmpeg -y -f image2 -framerate {fps} -i {temp_dir}/%06d.png \
98
- -vcodec libx264 -preset ultrafast -crf 0 -pix_fmt yuv420p {tmp_path}'
99
-
100
- status, output = subprocess.getstatusoutput(cmd)
101
- if status != 0:
102
- logger.error('Save Video Error with {}'.format(output))
103
-
104
- os.system(f'rm -rf {temp_dir}')
105
- os.rename(tmp_path, output_path)
106
-
107
-
108
-
109
- def collate_fn(data, device):
110
- """Prepare the input just before the forward function.
111
- This method will move the tensors to the right device.
112
- Usually this method does not need to be overridden.
113
-
114
- Args:
115
- data: The data out of the dataloader.
116
- device: The device to move data to.
117
-
118
- Returns: The processed data.
119
-
120
- """
121
- from torch.utils.data.dataloader import default_collate
122
-
123
- def get_class_name(obj):
124
- return obj.__class__.__name__
125
-
126
- if isinstance(data, dict) or isinstance(data, Mapping):
127
- return type(data)({
128
- k: collate_fn(v, device) if k != 'img_metas' else v
129
- for k, v in data.items()
130
- })
131
- elif isinstance(data, (tuple, list)):
132
- if 0 == len(data):
133
- return torch.Tensor([])
134
- if isinstance(data[0], (int, float)):
135
- return default_collate(data).to(device)
136
- else:
137
- return type(data)(collate_fn(v, device) for v in data)
138
- elif isinstance(data, np.ndarray):
139
- if data.dtype.type is np.str_:
140
- return data
141
- else:
142
- return collate_fn(torch.from_numpy(data), device)
143
- elif isinstance(data, torch.Tensor):
144
- return data.to(device)
145
- elif isinstance(data, (bytes, str, int, float, bool, type(None))):
146
- return data
147
- else:
148
- raise ValueError(f'Unsupported data type {type(data)}')