Repoaner commited on
Commit
982f7e0
·
verified ·
1 Parent(s): 99d6453

Upload LLaVA-Next-3D/data_precessing/sam2_mask.py with huggingface_hub

Browse files
LLaVA-Next-3D/data_precessing/sam2_mask.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pycocotools import mask as maskUtils
2
+ import numpy as np
3
+ import matplotlib.pyplot as plt
4
+ import glob
5
+ import pdb
6
+ import json
7
+ import cv2
8
+ import os
9
+ from tqdm import tqdm
10
+ from torch.utils.data import DataLoader, Dataset
11
+ from decord import VideoReader
12
+ from PIL import Image
13
+
14
+ def decode_video_every_4_frame(video_path):
15
+ # Open the video file
16
+ vr = VideoReader(video_path, num_threads=1)
17
+ total_frames = len(vr)
18
+ frames = []
19
+ indices = [i for i in range(0, total_frames) if i % 4 == 0]
20
+ frames = vr.get_batch(indices).asnumpy()
21
+ frames = [Image.fromarray(frames[i]) for i in range(frames.shape[0])]
22
+ return frames
23
+
24
+ def decode_video_at_fps(video_path, target_fps=4):
25
+ # Open the video file
26
+ cap = cv2.VideoCapture(video_path)
27
+
28
+ if not cap.isOpened():
29
+ print("Error: Couldn't open video.")
30
+ return
31
+
32
+ total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
33
+ frames = []
34
+ for frame_idx in range(total_frames):
35
+ ret, frame = cap.read()
36
+ if not ret:
37
+ break
38
+ if frame_idx % 4 == 0:
39
+ frames.append(frame)
40
+ cap.release()
41
+ return frames
42
+
43
+
44
+ a = 0
45
+
46
+ class SAVDataset(Dataset):
47
+ def __init__(self, data_paths):
48
+ self.data_paths = data_paths
49
+
50
+ def __len__(self):
51
+ return len(self.data_paths)
52
+
53
+ def __getitem__(self, idx):
54
+ path = self.data_paths[idx]
55
+ with open(path, 'r') as f:
56
+ item = json.load(f)
57
+
58
+ masks = item['masklet']
59
+ video_id = item['video_id']
60
+
61
+ output_folder = './extra_data/SA-V/sav_train/'+video_id[0:7]+'/'
62
+ # if os.path.exists(output_folder+ f'{video_id}_object_0.json'):
63
+ # return 0
64
+
65
+ object_info = {}
66
+ for object_id in item['masklet_id']:
67
+ object_info[object_id] = {'size': [int(item['video_width']), int(item['video_height'])]}
68
+
69
+ for idx, rle in enumerate(masks):
70
+ # rle = item['masklet'][idx]
71
+ binary_mask = maskUtils.decode(rle).transpose((2, 1, 0)) # [num, width, height]
72
+ for object_id, mask in enumerate(binary_mask):
73
+ if np.sum(mask) > 0: # Check if the mask is not empty
74
+ x1 = int(np.min(np.where(mask == 1)[0]))
75
+ x2 = int(np.max(np.where(mask == 1)[0]))
76
+ y1 = int(np.min(np.where(mask == 1)[1]))
77
+ y2 = int(np.max(np.where(mask == 1)[1]))
78
+
79
+ object_info[object_id].update({idx:[x1, y1, x2, y2]})
80
+
81
+ for object_id, info in object_info.items():
82
+ json.dump(info, open(output_folder+ f'{video_id}_object_{object_id}.json', 'w'), indent=4)
83
+
84
+ return 0
85
+
86
+ all_paths = []
87
+ data_paths = glob.glob('./extra_data/SA-V/sav_train/sav_*')
88
+ for data_path in data_paths:
89
+ all_paths += glob.glob(data_path + '/*_manual.json')
90
+
91
+ all_paths.sort()
92
+
93
+ # Usage example:
94
+ dataset = SAVDataset(all_paths)
95
+ loader = DataLoader(dataset, batch_size=16, shuffle=False, num_workers=16, drop_last=False)
96
+
97
+ print(f"Number of dataset samples: {len(dataset)}")
98
+ for idx, batch in enumerate(loader):
99
+ print(f'processing {idx}-th batch of {len(loader)}')
100
+ pass