jiosephlee commited on
Commit
2a98c42
·
verified ·
1 Parent(s): c40e14d

Upload video_processing_interns1.py

Browse files
Files changed (1) hide show
  1. video_processing_interns1.py +197 -0
video_processing_interns1.py ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Fast Video processor class for InternS1."""
16
+
17
+ from typing import Optional, Union
18
+
19
+ from transformers.image_processing_utils import BatchFeature
20
+ from transformers.image_utils import (
21
+ OPENAI_CLIP_MEAN,
22
+ OPENAI_CLIP_STD,
23
+ SizeDict,
24
+ )
25
+ from transformers.processing_utils import Unpack, VideosKwargs
26
+ from transformers.utils import (
27
+ TensorType,
28
+ is_torch_available,
29
+ is_torchvision_available,
30
+ is_torchvision_v2_available,
31
+ is_vision_available,
32
+ )
33
+ from transformers.utils.import_utils import requires
34
+ from transformers.video_processing_utils import BaseVideoProcessor
35
+ from transformers.video_utils import VideoMetadata, group_videos_by_shape, reorder_videos
36
+
37
+
38
+ if is_torchvision_available():
39
+ if is_torchvision_v2_available():
40
+ from torchvision.transforms.v2 import functional as F
41
+ else:
42
+ from torchvision.transforms import functional as F
43
+
44
+
45
+ if is_torch_available():
46
+ import torch
47
+
48
+ if is_vision_available():
49
+ from transformers.image_utils import PILImageResampling
50
+
51
+
52
+ class InternS1VideoProcessorInitKwargs(VideosKwargs):
53
+ initial_shift: Union[bool, float, int]
54
+
55
+
56
+ @requires(backends=("torchvision",))
57
+ class InternS1VideoProcessor(BaseVideoProcessor):
58
+ resample = PILImageResampling.BICUBIC
59
+ image_mean = OPENAI_CLIP_MEAN
60
+ image_std = OPENAI_CLIP_STD
61
+ size = {"height": 384, "width": 384}
62
+ do_resize = True
63
+ do_rescale = True
64
+ do_normalize = True
65
+ do_convert_rgb = True
66
+ initial_shift = True
67
+ do_sample_frames = False # Set to False for BC, recommended to set `True` in new models
68
+ valid_kwargs = InternS1VideoProcessorInitKwargs
69
+ model_input_names = ["pixel_values_videos"]
70
+
71
+ def __init__(self, **kwargs: Unpack[InternS1VideoProcessorInitKwargs]):
72
+ super().__init__(**kwargs)
73
+
74
+ def sample_frames(
75
+ self,
76
+ video: "torch.Tensor",
77
+ metadata: Optional[Union[VideoMetadata, dict]] = None,
78
+ num_frames: Optional[int] = None,
79
+ fps: Optional[Union[int, float]] = None,
80
+ initial_shift: Optional[Union[bool, float, int]] = None,
81
+ ):
82
+ """
83
+ Default sampling function which uniformly samples the desired number of frames between 0 and total number of frames.
84
+ If `fps` is passed along with metadata, `fps` frames per second are sampled uniformty. Arguments `num_frames`
85
+ and `fps` are mutually exclusive.
86
+
87
+ Args:
88
+ video (`torch.Tensor`):
89
+ Video that need to be sampled.
90
+ metadata (`VideoMetadata`, *optional*):
91
+ Metadata of the video containing information about total duration, fps and total number of frames.
92
+ num_frames (`int`, *optional*):
93
+ Maximum number of frames to sample. Defaults to `self.num_frames`.
94
+ fps (`int` or `float`, *optional*):
95
+ Target frames to sample per second. Defaults to `self.fps`.
96
+ initial_shift (`bool`, `float` or `int`, defaults to `self.initial_shift`):
97
+ The initial shift to apply when sampling frames. If `True`, the shift is set so that frames are sampled from the middle of the video.
98
+
99
+ Returns:
100
+ torch.Tensor:
101
+ Sampled video frames.
102
+ """
103
+ num_frames = num_frames if num_frames is not None else self.num_frames
104
+ initial_shift = initial_shift if initial_shift is not None else self.initial_shift
105
+ total_num_frames = video.shape[0]
106
+
107
+ # If num_frames is not given but fps is, calculate num_frames from fps
108
+ if num_frames is None and fps is not None:
109
+ if metadata is None:
110
+ raise ValueError(
111
+ "Asked to sample `fps` frames per second but no video metadata was provided which is required when sampling with `fps`. "
112
+ "Please pass in `VideoMetadata` object or use a fixed `num_frames` per input video"
113
+ )
114
+ num_frames = int(total_num_frames / metadata["fps"] * fps)
115
+
116
+ if initial_shift is True:
117
+ initial_shift = total_num_frames / num_frames / 2
118
+
119
+ if num_frames > total_num_frames:
120
+ raise ValueError(
121
+ f"Video can't be sampled. The `num_frames={num_frames}` exceeds `total_num_frames={total_num_frames}`. "
122
+ )
123
+
124
+ indices = torch.arange(initial_shift, total_num_frames, total_num_frames / num_frames).int()
125
+ video = video[indices].contiguous()
126
+ return video
127
+
128
+ def _preprocess(
129
+ self,
130
+ videos: list["torch.Tensor"],
131
+ video_metadata: Union[list[VideoMetadata], list[dict]],
132
+ do_convert_rgb: bool,
133
+ do_resize: bool,
134
+ size: SizeDict,
135
+ size_divisor: Optional[int],
136
+ interpolation: Optional["F.InterpolationMode"],
137
+ do_center_crop: bool,
138
+ crop_size: SizeDict,
139
+ do_rescale: bool,
140
+ do_pad: bool,
141
+ rescale_factor: float,
142
+ do_normalize: bool,
143
+ image_mean: Optional[Union[float, list[float]]],
144
+ image_std: Optional[Union[float, list[float]]],
145
+ do_sample_frames: Optional[bool] = None,
146
+ fps: Optional[Union[int, float]] = None,
147
+ num_frames: Optional[int] = None,
148
+ initial_shift: Optional[Union[bool, float, int]] = None,
149
+ return_tensors: Optional[Union[str, TensorType]] = None,
150
+ device: Optional["torch.Tensor"] = None,
151
+ **kwargs
152
+ ) -> BatchFeature:
153
+ if do_sample_frames:
154
+ # Sample video frames
155
+ videos = [
156
+ self.sample_frames(video, metadata, fps=fps, num_frames=num_frames, initial_shift=initial_shift)
157
+ for video, metadata in zip(videos, video_metadata)
158
+ ]
159
+
160
+ # We need to sample frames first before moving to device, if `do_sample_frames=True`. Otherwise
161
+ # moving the whole video incurs high GPU mem usage for long videos
162
+ if device is not None:
163
+ videos = [video.to(device) for video in videos]
164
+
165
+ # Group videos by size for batched resizing
166
+ grouped_videos, grouped_videos_index = group_videos_by_shape(videos)
167
+ resized_videos_grouped = {}
168
+ for shape, stacked_videos in grouped_videos.items():
169
+ if do_convert_rgb:
170
+ stacked_videos = self.convert_to_rgb(stacked_videos)
171
+ if do_resize:
172
+ stacked_videos = self.resize(
173
+ stacked_videos, size=size, size_divisor=size_divisor, interpolation=interpolation
174
+ )
175
+ resized_videos_grouped[shape] = stacked_videos
176
+ resized_videos = reorder_videos(resized_videos_grouped, grouped_videos_index)
177
+
178
+ # Group videos by size for further processing
179
+ # Needed in case do_resize is False, or resize returns videos with different sizes
180
+ grouped_videos, grouped_videos_index = group_videos_by_shape(resized_videos)
181
+ processed_videos_grouped = {}
182
+ for shape, stacked_videos in grouped_videos.items():
183
+ if do_center_crop:
184
+ stacked_videos = self.center_crop(stacked_videos, crop_size)
185
+ # Fused rescale and normalize
186
+ stacked_videos = self.rescale_and_normalize(
187
+ stacked_videos, do_rescale, rescale_factor, do_normalize, image_mean, image_std
188
+ )
189
+ processed_videos_grouped[shape] = stacked_videos
190
+
191
+ processed_videos = reorder_videos(processed_videos_grouped, grouped_videos_index)
192
+ processed_videos = torch.stack(processed_videos, dim=0) if return_tensors else processed_videos
193
+
194
+ return BatchFeature(data={"pixel_values_videos": processed_videos}, tensor_type=return_tensors)
195
+
196
+
197
+ __all__ = ["InternS1VideoProcessor"]