bpiyush commited on
Commit
3bec7e0
·
verified ·
1 Parent(s): 0be2930

Upload modeling_tara.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. modeling_tara.py +383 -0
modeling_tara.py ADDED
@@ -0,0 +1,383 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from abc import ABCMeta, abstractmethod
3
+ from typing import Optional, Union, Dict, List
4
+ from termcolor import colored
5
+ import random
6
+
7
+
8
+ import numpy as np
9
+ import torch
10
+ from transformers import (
11
+ AutoProcessor,
12
+ AutoTokenizer,
13
+ LlavaConfig,
14
+ LlamaForCausalLM,
15
+ )
16
+ from torchvision.transforms.v2 import (
17
+ ToPILImage,
18
+ )
19
+ import decord
20
+ from decord import VideoReader
21
+
22
+ # TODO: need to use these directly
23
+ from tarsier.modeling_tarsier import TarsierForConditionalGeneration
24
+ from tarsier.processor import Processor
25
+ # from utils.model import transform_pixel_values
26
+
27
+ decord.bridge.set_bridge("torch")
28
+
29
+
30
+ EOL_PROMPTS = {
31
+ 'text': '<sent>\nSummary above sentence in one word:',
32
+ 'image': '<image>\nSummary above image in one word:',
33
+ 'video': '<video>\nSummary above video in one word:',
34
+ }
35
+
36
+
37
+ def transform_pixel_values(pixel_values: torch.Tensor | List[torch.Tensor]) -> torch.Tensor:
38
+ # NOTE: this function doesn't accept unbatched inputs
39
+ # pixel_values should be uint8 of (B, T, C, H, W)
40
+ if isinstance(pixel_values, list):
41
+ pixel_values = torch.stack(pixel_values)
42
+
43
+ if pixel_values.ndim == 4:
44
+ # pixel_values is (B, C, H, W)
45
+ # (B, C, H, W) -> (B, 1, C, H, W)
46
+ pixel_values = pixel_values.unsqueeze(1)
47
+ elif pixel_values.ndim == 5:
48
+ # pixel_values is (B, T, C, H, W)
49
+ pass
50
+ else:
51
+ raise ValueError(f"pixel_values should be 4D or 5D, got {pixel_values.ndim}D")
52
+ return pixel_values
53
+
54
+
55
+ base_registry = {}
56
+ class BaseModel(metaclass=ABCMeta):
57
+ def __init_subclass__(cls, **kwargs):
58
+ super().__init_subclass__(**kwargs)
59
+ # register model architecture
60
+ if hasattr(cls, 'ARCHITECTURE'):
61
+ base_registry[cls.ARCHITECTURE] = cls
62
+
63
+ @classmethod
64
+ def from_pretrained(
65
+ cls,
66
+ model_name_or_path: str,
67
+ load_llm: bool = False,
68
+ device_map: Optional[Union[str, Dict[str, int]]] = None,
69
+ **kwargs):
70
+ print(colored(f'[ MODEL ] Loading {cls.__name__} from {model_name_or_path} [..............]', 'yellow'))
71
+
72
+ return cls(model_name_or_path, load_llm=load_llm, device_map=device_map, **kwargs)
73
+
74
+
75
+ class BaseModelForTARA(BaseModel):
76
+
77
+ ARCHITECTURE = "TarsierForConditionalGeneration"
78
+ LLM_CLASS = LlamaForCausalLM
79
+ MLLM_CLASS = TarsierForConditionalGeneration
80
+
81
+ @property
82
+ def describe_prompt(self):
83
+ return "Describe the video in detail."
84
+
85
+ @property
86
+ def text_eol_prompt(self):
87
+ prompt = f'USER: {EOL_PROMPTS["text"]} ASSISTANT: '
88
+ return prompt
89
+
90
+ @property
91
+ def image_eol_prompt(self):
92
+ prompt = f'USER: {EOL_PROMPTS["image"]} ASSISTANT: '
93
+ return prompt
94
+
95
+ @property
96
+ def video_eol_prompt(self):
97
+ prompt = f'USER: {EOL_PROMPTS["video"]} ASSISTANT: '
98
+ return prompt
99
+
100
+ def __init__(
101
+ self,
102
+ model_name_or_path: str,
103
+ load_llm: Optional[bool] = None,
104
+ device_map: Optional[Union[str, Dict[str, int]]] = None,
105
+ **kwargs,
106
+ ):
107
+
108
+ MODEL_CLASS = self.LLM_CLASS if load_llm else self.MLLM_CLASS
109
+
110
+ if load_llm:
111
+ self.split_weights(model_name_or_path, model_name_or_path + '-llm')
112
+ model_name_or_path += '-llm'
113
+ model_config = None
114
+ self.processor = AutoProcessor.from_pretrained(model_name_or_path, use_fast=False)
115
+ else:
116
+ model_config = LlavaConfig.from_pretrained(
117
+ model_name_or_path,
118
+ # trust_remote_code=True,
119
+ )
120
+ self.processor = Processor(
121
+ model_name_or_path,
122
+ max_n_frames=32,
123
+ )
124
+
125
+ self.tokenizer = self.processor.tokenizer
126
+
127
+ self.model = MODEL_CLASS.from_pretrained(
128
+ model_name_or_path,
129
+ config=model_config,
130
+ torch_dtype=kwargs.get("torch_dtype", torch.bfloat16),
131
+ device_map=device_map,
132
+ # trust_remote_code=True
133
+ )
134
+
135
+ self.model.eval()
136
+
137
+ def split_weights(self, mllm_path, llm_path):
138
+ if os.path.exists(llm_path):
139
+ print(f'{llm_path} already exists. Skip splitting weights.')
140
+ return
141
+ print('Splitting LLM weights from MLLM.')
142
+ model = self.MLLM_CLASS.from_pretrained(mllm_path)
143
+ llm = model.language_model
144
+ processor = AutoProcessor.from_pretrained(mllm_path)
145
+ tokenizer = AutoTokenizer.from_pretrained(mllm_path)
146
+ llm.save_pretrained(llm_path)
147
+ processor.save_pretrained(llm_path)
148
+ tokenizer.save_pretrained(llm_path)
149
+
150
+
151
+ encoder_registry = {}
152
+ class EncodeMixin(metaclass=ABCMeta):
153
+ def __init_subclass__(cls, **kwargs):
154
+ super().__init_subclass__(**kwargs)
155
+ # register model architecture
156
+ if hasattr(cls, 'ARCHITECTURE'):
157
+ encoder_registry[cls.ARCHITECTURE] = cls
158
+
159
+ @abstractmethod
160
+ def encode_vision(self, pixel_values: torch.Tensor | List[torch.Tensor]) -> torch.Tensor:
161
+ """
162
+ Encodes vision data (images or videos) into a tensor representation.
163
+
164
+ Args:
165
+ pixel_values (torch.Tensor | List[torch.Tensor]): The input pixel values.
166
+ - If a tensor, it should be of shape (B, C, H, W) for images or (B, T, C, H, W) for videos.
167
+ - If a list, it will be stacked into a tensor.
168
+
169
+ Returns:
170
+ torch.Tensor: The encoded tensor representation of the input vision data.
171
+
172
+ Raises:
173
+ ValueError: If `pixel_values` is not 4D or 5D.
174
+
175
+ ## Notes:
176
+ - This function does not accept unbatched inputs.
177
+ - `pixel_values` should be of type uint8.
178
+ """
179
+ raise NotImplementedError
180
+
181
+ @abstractmethod
182
+ def encode_text(self, text: str | List[str]) -> torch.Tensor:
183
+ """
184
+ Encodes the given text(s) into a tensor representation using the model.
185
+
186
+ Args:
187
+ text (str | List[str]): A single string or a list of strings to be encoded.
188
+
189
+ Returns:
190
+ torch.Tensor: The tensor representation of the encoded text(s).
191
+
192
+ ## Notes:
193
+ - The method uses a prompt to encode the text.
194
+ - If a single string is provided, it is converted into a list containing that string.
195
+ - The method processes the prompts and generates the tensor representation using the model.
196
+ - The output tensor contains the hidden states of the last token for each input text.
197
+ """
198
+ raise NotImplementedError
199
+
200
+
201
+ class TARA(BaseModelForTARA, EncodeMixin):
202
+
203
+ def encode_vision(self, pixel_values: torch.Tensor | List[torch.Tensor]) -> torch.Tensor:
204
+
205
+ pixel_values = transform_pixel_values(pixel_values) # [B, T, C, H, W]
206
+ nframes = pixel_values.shape[1]
207
+ prompt = self.image_eol_prompt if nframes == 1 else self.video_eol_prompt
208
+
209
+ to_image = ToPILImage()
210
+ batched_frames = []
211
+ for batch in pixel_values:
212
+ frames = [to_image(v) for v in batch]
213
+ batched_frames.append(frames)
214
+
215
+ generate_kwargs = {
216
+ "max_new_tokens": 1,
217
+ "output_hidden_states": True,
218
+ "return_dict_in_generate": True,
219
+ }
220
+
221
+ vision_embs = []
222
+
223
+ for frames in batched_frames:
224
+ input_prompt = prompt.replace("<video>", "<image>"*len(frames))
225
+ input_ids = self.processor.get_text_inputs(input_prompt)
226
+ frames = self.processor.get_pixel_values(frames)
227
+ inputs = {
228
+ "input_ids": input_ids,
229
+ "pixel_values": frames
230
+ }
231
+ inputs = {k:v.to(self.model.device) for k,v in inputs.items() if v is not None}
232
+ outputs = self.model.generate(
233
+ **inputs,
234
+ **generate_kwargs,
235
+ )
236
+ vision_embs.append(outputs.hidden_states[0][-1][:, -1, :])
237
+
238
+ vision_embs = torch.cat(vision_embs)
239
+ return vision_embs
240
+
241
+ def encode_text(self, text: str | List[str]) -> torch.Tensor:
242
+
243
+ prompt = self.text_eol_prompt
244
+
245
+ if isinstance(text, str):
246
+ text = [text]
247
+
248
+ prompts = [prompt.replace('<sent>', t) for t in text]
249
+
250
+ generate_kwargs = {
251
+ "max_new_tokens": 1,
252
+ "output_hidden_states": True,
253
+ "return_dict_in_generate": True,
254
+ }
255
+
256
+ text_embs = []
257
+
258
+ for p in prompts:
259
+ text_inputs = self.processor.get_text_inputs(p)
260
+ inputs = {
261
+ "input_ids": text_inputs,
262
+ }
263
+ inputs = {k:v.to(self.model.device) for k,v in inputs.items() if v is not None}
264
+ outputs = self.model.generate(
265
+ **inputs,
266
+ **generate_kwargs,
267
+ )
268
+ text_embs.append(outputs.hidden_states[0][-1][:, -1, :])
269
+
270
+ text_embs = torch.cat(text_embs)
271
+ return text_embs
272
+
273
+
274
+
275
+
276
+
277
+ def get_frame_indices(num_frames, vlen, sample='rand', fix_start=None, input_fps=1, max_num_frames=-1):
278
+ if sample in ["rand", "middle"]: # uniform sampling
279
+ acc_samples = min(num_frames, vlen)
280
+ # split the video into `acc_samples` intervals, and sample from each interval.
281
+ intervals = np.linspace(start=0, stop=vlen, num=acc_samples + 1).astype(int)
282
+ ranges = []
283
+ for idx, interv in enumerate(intervals[:-1]):
284
+ ranges.append((interv, intervals[idx + 1] - 1))
285
+ if sample == 'rand':
286
+ try:
287
+ frame_indices = [random.choice(range(x[0], x[1])) for x in ranges]
288
+ except:
289
+ frame_indices = np.random.permutation(vlen)[:acc_samples]
290
+ frame_indices.sort()
291
+ frame_indices = list(frame_indices)
292
+ elif fix_start is not None:
293
+ frame_indices = [x[0] + fix_start for x in ranges]
294
+ elif sample == 'middle':
295
+ frame_indices = [(x[0] + x[1]) // 2 for x in ranges]
296
+ else:
297
+ raise NotImplementedError
298
+
299
+ if len(frame_indices) < num_frames: # padded with last frame
300
+ padded_frame_indices = [frame_indices[-1]] * num_frames
301
+ padded_frame_indices[:len(frame_indices)] = frame_indices
302
+ frame_indices = padded_frame_indices
303
+ elif "fps" in sample: # fps0.5, sequentially sample frames at 0.5 fps
304
+ output_fps = float(sample[3:])
305
+ duration = float(vlen) / input_fps
306
+ delta = 1 / output_fps # gap between frames, this is also the clip length each frame represents
307
+ frame_seconds = np.arange(0 + delta / 2, duration + delta / 2, delta)
308
+ frame_indices = np.around(frame_seconds * input_fps).astype(int)
309
+ frame_indices = [e for e in frame_indices if e < vlen]
310
+ if max_num_frames > 0 and len(frame_indices) > max_num_frames:
311
+ frame_indices = frame_indices[:max_num_frames]
312
+ # frame_indices = np.linspace(0 + delta / 2, duration + delta / 2, endpoint=False, num=max_num_frames)
313
+ else:
314
+ raise ValueError
315
+ return frame_indices
316
+
317
+
318
+ def read_frames_decord(
319
+ video_path, num_frames, sample='middle', fix_start=None,
320
+ max_num_frames=-1, trimmed30=False, height=-1, width=-1
321
+ ):
322
+ decord.bridge.set_bridge('torch')
323
+
324
+ # num_threads = 1 if video_path.endswith('.webm') else 0 # make ssv2 happy
325
+ num_threads = 1
326
+ video_reader = VideoReader(video_path, num_threads=num_threads, height=height, width=width)
327
+ try:
328
+ vlen = len(video_reader)
329
+
330
+ fps = video_reader.get_avg_fps()
331
+ duration = vlen / float(fps)
332
+
333
+ # only use top 30 seconds
334
+ if trimmed30 and duration > 30:
335
+ duration = 30
336
+ vlen = int(30 * float(fps))
337
+
338
+ frame_indices = get_frame_indices(
339
+ num_frames, vlen, sample=sample, fix_start=fix_start,
340
+ input_fps=fps, max_num_frames=max_num_frames
341
+ )
342
+
343
+ frames = video_reader.get_batch(frame_indices) # (T, H, W, C), torch.uint8
344
+ if not isinstance(frames, torch.Tensor):
345
+ frames = torch.from_numpy(frames.asnumpy())
346
+ frames = frames.permute(0, 3, 1, 2) # (T, C, H, W), torch.uint8
347
+ return frames
348
+ finally:
349
+ # Explicitly release underlying resources to avoid file descriptor leaks
350
+ del video_reader
351
+
352
+
353
+ if __name__ == "__main__":
354
+
355
+ # Load model
356
+ model = TARA.from_pretrained(
357
+ "/work/piyush/experiments/CaRe/Tarsier-7b/final-10112025/nli_9000+ego_1000+subj_replaced-seed_42/merged_checkpoint",
358
+ device_map='auto',
359
+ dtype=torch.bfloat16,
360
+ )
361
+ n_params = sum(p.numel() for p in model.model.parameters())
362
+ print(f"Number of parameters: {round(n_params/1e9, 3)}B")
363
+
364
+ # Let's encode a sample video
365
+ print(colored("Testing video encoding...", 'cyan'))
366
+ video_path = "./assets/folding_paper.mp4"
367
+ video_tensor = read_frames_decord(video_path, num_frames=16)
368
+ video_tensor = video_tensor.unsqueeze(0)
369
+ video_tensor = video_tensor.to(model.model.device)
370
+ with torch.no_grad():
371
+ video_emb = model.encode_vision(video_tensor).cpu().squeeze(0).float()
372
+ print("Video shape:", video_tensor.shape) # torch.Size([1, 16, 3, 240, 426])
373
+ print("Video embedding shape:", video_emb.shape) # torch.Size([4096])
374
+
375
+ # Let's encode a sample text
376
+ print(colored("Testing text encoding...", 'cyan'))
377
+ text = ['someone is folding a paper', 'cutting a paper', 'someone is folding a paper']
378
+ # NOTE: It can also take a single string
379
+ with torch.no_grad():
380
+ text_emb = model.encode_text(text).cpu().float()
381
+ print("Text:", text)
382
+ print("Text embedding shape:", text_emb.shape) # torch.Size([3, 4096])
383
+