Datasets:

ArXiv:
License:
File size: 15,729 Bytes
7614d38
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
import os
import os.path as osp
import pathlib
import open3d as o3d
import numpy as np
import pandas as pd
import zarr
import pickle
import tqdm

# helper functions
# ================
def _get_groups_df(samples_group):
    rows = dict()
    for key, group in samples_group.items():
        rows[key] = group.attrs.asdict()
    groups_df = pd.DataFrame(data=list(rows.values()), index=rows.keys())
    groups_df.drop_duplicates(inplace=True)
    groups_df['group_key'] = groups_df.index
    return groups_df


class VRFoldingDatasetExample:
    def __init__(self,
                 # zarr
                 zarr_path: str,
                 num_pc_sample: int = 4000,
                 num_views: int = 4,
                 only_foreground_pc: bool = True,
                 vis=False,
                 # catch all
                 **kwargs):
        """
        simple dataset class to handle data in .zarr format
        """
        super().__init__()
        path = pathlib.Path(os.path.expanduser(zarr_path))
        assert(path.exists())
        self.zarr_path = str(path.absolute())
        root = zarr.open(self.zarr_path, mode='r')
        samples_group = root['samples']

        # extract common info from sample group
        _, sample_group = next(samples_group.groups())
        print(sample_group.tree())

        # load group metadata
        groups_df = _get_groups_df((samples_group))
        # check if index is sorted
        assert(groups_df.index.is_monotonic_increasing)
        groups_df['idx'] = np.arange(len(groups_df))

        # global state
        self.samples_group = samples_group
        self.groups_df = groups_df
        # params
        self.num_pc_sample = num_pc_sample
        self.num_views = num_views
        self.only_foreground_pc = only_foreground_pc
        self.vis = vis


        video_info_path = self.zarr_path
        # find all video sequences
        self.find_video_idxs(video_info_path)
        # find all valid grip intervals
        self.find_valid_grip_intervals(video_info_path)

    def find_video_idxs(self, video_seq_cache_dir: str):
        os.makedirs(video_seq_cache_dir, exist_ok=True)
        cache_path = os.path.join(video_seq_cache_dir, 'video_seq.pkl')
        if os.path.exists(cache_path):
            print('Loading video sequences cache in {}'.format(cache_path))
            with open(cache_path, 'rb') as f:
                self.video_to_idxs_dict, self.idx_to_video_list = pickle.load(f)
        else:
            data_length = self.__len__()
            self.video_to_idxs_dict = dict()
            self.idx_to_video_list = []
            print('Finding video sequences...')
            for idx in tqdm.tqdm(range(data_length), ncols=0):
                dataset_idx = idx
                row = self.groups_df.iloc[dataset_idx]
                group = self.samples_group[row.group_key]
                attrs = group.attrs.asdict()
                video_id = attrs['video_id']
                if video_id not in self.video_to_idxs_dict:
                    self.video_to_idxs_dict[video_id] = []
                self.video_to_idxs_dict[video_id].append(idx)
                self.idx_to_video_list.append(video_id)
            print('Finish finding video sequences!')
            with open(cache_path, 'wb') as f:
                pickle.dump((self.video_to_idxs_dict, self.idx_to_video_list), f)
            print('Saving video sequences cache to {}'.format(cache_path))

    def find_valid_grip_intervals(self, video_seq_cache_dir: str):
        os.makedirs(video_seq_cache_dir, exist_ok=True)
        def is_valid_grip(grip_vertex_ids):
            return grip_vertex_ids[0] != -1
        cache_path = os.path.join(video_seq_cache_dir, 'video_grip_interval_v2.pkl')
        if os.path.exists(cache_path):
            print('Loading video grip interval cache in {}'.format(cache_path))
            with open(cache_path, 'rb') as f:
                self.interval_to_idxs_dict, self.idx_to_interval_list = pickle.load(f)
        else:
            data_length = self.__len__()
            self.interval_to_idxs_dict = dict()
            self.idx_to_interval_list = []
            assert self.video_to_idxs_dict is not None
            print('Finding video valid grip intervals...')
            in_interval = False
            interval_count = 0
            for idx in tqdm.tqdm(range(data_length), ncols=0):
                dataset_idx = idx
                row = self.groups_df.iloc[dataset_idx]
                group = self.samples_group[row.group_key]
                attrs = group.attrs.asdict()
                video_id = attrs['video_id']
                grip_point_group = group['grip_vertex_id']
                left_grip_vertex_ids = grip_point_group['left_grip_vertex_id'][:]
                right_grip_vertex_ids = grip_point_group['right_grip_vertex_id'][:]
                if not in_interval and (is_valid_grip(left_grip_vertex_ids) or is_valid_grip(right_grip_vertex_ids)):
                    # interval start if any hand is grasped
                    self.interval_to_idxs_dict[interval_count] = []
                    in_interval = True

                if in_interval:
                    self.interval_to_idxs_dict[interval_count].append(idx)
                    self.idx_to_interval_list.append(interval_count)
                else:
                    self.idx_to_interval_list.append(-1)

                if in_interval and not is_valid_grip(left_grip_vertex_ids) and not is_valid_grip(right_grip_vertex_ids) \
                        or self.video_to_idxs_dict[video_id][-1] == idx:
                    # interval end (both hands are released) or video end
                    in_interval = False
                    interval_count += 1
            print('Finish finding {} valid grip intervals!'.format(interval_count))
            with open(cache_path, 'wb') as f:
                pickle.dump((self.interval_to_idxs_dict, self.idx_to_interval_list), f)
            print('Saving grip interval cache to {}'.format(cache_path))

    def __len__(self):
        return len(self.groups_df)

    def data_io(self, idx: int) -> dict:
        dataset_idx = idx
        row = self.groups_df.iloc[dataset_idx]
        group = self.samples_group[row.group_key]

        # io
        attrs = group.attrs.asdict()
        instance_id = attrs['instance_id']
        scale = attrs['scale']
        pc_group = group['point_cloud']
        mesh_group = group['mesh']
        grip_point_group = group['grip_vertex_id']
        hand_pose_group = group['hand_pose']
        if 'cls' in pc_group:
            pc_cls = pc_group['cls'][:]
            pc_cls[pc_cls > 0] = 1  # only two classes (foreground + background)
        else:
            pc_cls = np.zeros(pc_group['point'][:].shape[0]).astype(np.uint8)
        data = {
            'cloth_sim_verts': mesh_group['cloth_verts'][:],  # complete mesh vertices in task space
            'cloth_nocs_verts': mesh_group['cloth_nocs_verts'][:],  # complete mesh vertices in NOCS space
            'cloth_faces_tri': mesh_group['cloth_faces_tri'][:],  # mesh faces triangles
            'pc_nocs': pc_group['nocs'][:],  # NOCS coordinates of input partial point cloud
            'pc_sim': pc_group['point'][:],  # XYZ of input partial point cloud
            'pc_sim_rgb': pc_group['rgb'][:],  # RGB of input partial point cloud
            'pc_sizes': pc_group['sizes'][:],  # per-view number of points in input partial point cloud
            'pc_cls': pc_cls,  # classification label of input partial point cloud
            'left_grip_vertex_ids': grip_point_group['left_grip_vertex_id'][:],  # left-hand grasped mesh vertex id
            'right_grip_vertex_ids': grip_point_group['right_grip_vertex_id'][:],  # right-hand grasped mesh vertex id
            'left_hand_pos': hand_pose_group['left_hand_pos'][:],  # the positions of 25 finger bones in left hand
            'right_hand_pos': hand_pose_group['right_hand_pos'][:],  # the positions of 25 finger bones in right hand
            'left_hand_euler': hand_pose_group['left_hand_euler'][:],  # the euler angles of 25 finger bones in left hand
            'right_hand_euler': hand_pose_group['right_hand_euler'][:],  # the euler angles of 25 finger bones in right hand
            'video_id': attrs['video_id'],  # video id
            'scale': scale
        }

        return data

    def get_base_data(self, idx:int, seed:int, data_in: dict) -> dict:
        num_pc_sample = self.num_pc_sample
        num_views = self.num_views
        if self.only_foreground_pc:
            foreground_idxs = data_in['pc_cls'] == 0
            if data_in['pc_cls'].shape[0] != data_in['pc_sim_rgb'].shape[0]:
                foreground_idxs = np.arange(data_in['pc_sim_rgb'].shape[0])
            data_in['pc_sim_rgb'] = data_in['pc_sim_rgb'][foreground_idxs]
            data_in['pc_sim'] = data_in['pc_sim'][foreground_idxs]
            data_in['pc_nocs'] = data_in['pc_nocs'][foreground_idxs]
            data_in['pc_cls'] = data_in['pc_cls'][foreground_idxs]

        rs = np.random.RandomState(seed=seed)
        all_idxs = np.arange(len(data_in['pc_sim']))
        all_num_views = len(data_in['pc_sizes'])
        if num_views < all_num_views:
            idxs_mask = np.zeros_like(all_idxs, dtype=np.bool)
            selected_view_idxs = np.sort(rs.choice(all_num_views, size=num_views, replace=False))
            view_idxs = np.concatenate([[0], np.cumsum(data_in['pc_sizes'])])
            for i in selected_view_idxs:
                idxs_mask[view_idxs[i]: view_idxs[i+1]] = True
            all_idxs = all_idxs[idxs_mask]

        if all_idxs.shape[0] >= num_pc_sample:
            selected_idxs = rs.choice(all_idxs, size=num_pc_sample, replace=False)
        else:
            np.random.seed(seed)
            np.random.shuffle(all_idxs)
            res_num = len(all_idxs) - num_pc_sample
            selected_idxs = np.concatenate([all_idxs, all_idxs[:res_num]], axis=0)

        pc_sim_rgb = data_in['pc_sim_rgb'][selected_idxs].astype(np.float32) / 255
        pc_sim = data_in['pc_sim'][selected_idxs].astype(np.float32)
        pc_nocs = data_in['pc_nocs'][selected_idxs].astype(np.float32)
        pc_cls = data_in['pc_cls'][selected_idxs].astype(np.int64)
        pc_nocs[pc_cls != 0, :] = -1.0

        dataset_idx = np.array([idx])
        video_id = np.array([int(data_in['video_id'])])
        scale = np.array([data_in['scale']])

        cloth_sim_verts = data_in['cloth_sim_verts']
        cloth_nocs_verts = data_in['cloth_nocs_verts']
        left_grip_vertex_ids = data_in['left_grip_vertex_ids']
        right_grip_vertex_ids = data_in['right_grip_vertex_ids']
        left_grip_point_sim = np.array([-10., -10., -10.], dtype=np.float32)
        right_grip_point_sim = np.array([-10., -10., -10.], dtype=np.float32)
        left_grip_point_nocs = np.array([-2., -2., -2.], dtype=np.float32)
        right_grip_point_nocs = np.array([-2., -2., -2.], dtype=np.float32)
        is_left_hand_valid_grasp = False
        is_right_hand_valid_grasp = False
        for hand_id, grip_vertex_ids in enumerate((left_grip_vertex_ids, right_grip_vertex_ids)):
            if grip_vertex_ids[0] != -1:
                # valid grasp point on the garment
                grip_vertices_sim = cloth_sim_verts[grip_vertex_ids, :]
                mean_grip_point_sim = np.mean(grip_vertices_sim, axis=0)
                grip_vertices_nocs = cloth_nocs_verts[grip_vertex_ids, :]
                mean_grip_point_nocs = np.mean(grip_vertices_nocs, axis=0)
                if hand_id == 0:
                    left_grip_point_sim = mean_grip_point_sim.astype(np.float32)
                    left_grip_point_nocs = mean_grip_point_nocs.astype(np.float32)
                    is_left_hand_valid_grasp = True
                else:
                    right_grip_point_sim = mean_grip_point_sim.astype(np.float32)
                    right_grip_point_nocs = mean_grip_point_nocs.astype(np.float32)
                    is_right_hand_valid_grasp = True

        data = {
            'x': pc_sim_rgb, # RGB of input partial point cloud
            'y': pc_nocs,  # NOCS coordinates of input partial point cloud
            'pos': pc_sim,  # XYZ of input partial point cloud
            'cls': pc_cls,  # classification label of input partial point cloud
            'dataset_idx': dataset_idx, # dataset index
            'video_id': video_id,  # video id
            'left_grip_point_sim': left_grip_point_sim,  # left hand grasp-point in task space
            'left_grip_point_nocs': left_grip_point_nocs,  # left hand grasp-point in NOCS space
            'right_grip_point_sim': right_grip_point_sim,  # right hand grasp-point in task space
            'right_grip_point_nocs': right_grip_point_nocs,  # right hand grasp-point in NOCS space
            'scale': scale
        }

        if self.vis:
            vis_list = []
            pc_rgb_sim_pcd = o3d.geometry.PointCloud()
            pc_rgb_sim_pcd.points = o3d.utility.Vector3dVector(pc_sim)
            pc_rgb_sim_pcd.colors = o3d.utility.Vector3dVector(pc_sim_rgb)
            vis_list.append(pc_rgb_sim_pcd)

            mesh_sim_pcd = o3d.geometry.PointCloud()
            mesh_sim_pcd.points = o3d.utility.Vector3dVector(cloth_sim_verts)
            mesh_sim_pcd.colors = o3d.utility.Vector3dVector(cloth_nocs_verts)
            vis_list.append(mesh_sim_pcd.translate((0.8, 0., 0.)))

            if is_left_hand_valid_grasp:
                left_grasp_sphere = o3d.geometry.TriangleMesh.create_sphere(radius=0.05)
                left_grasp_sphere.paint_uniform_color([0.9, 0.1, 0.1]) # red
                left_grasp_sphere = left_grasp_sphere.translate(left_grip_point_sim)
                vis_list.append(left_grasp_sphere)

            if is_right_hand_valid_grasp:
                right_grasp_sphere = o3d.geometry.TriangleMesh.create_sphere(radius=0.05)
                right_grasp_sphere.paint_uniform_color([0.1, 0.1, 0.9])  # blue
                right_grasp_sphere = right_grasp_sphere.translate(right_grip_point_sim)
                vis_list.append(right_grasp_sphere)

            # visualization
            vis = o3d.visualization.Visualizer()
            vis.create_window(window_name='Press q or Esc to quit', width=1640, height=1080)
            for item in vis_list:
                vis.add_geometry(item)
            vis.get_render_option().load_from_json(osp.join(osp.curdir, 'render_option.json'))
            param = o3d.io.read_pinhole_camera_parameters(osp.join(os.getcwd(), 'view_point.json'))
            vis.get_view_control().convert_from_pinhole_camera_parameters(param)
            vis.run()
            param = vis.get_view_control().convert_to_pinhole_camera_parameters()
            o3d.io.write_pinhole_camera_parameters(osp.join(os.getcwd(), 'view_point.json'), param)
            vis.close()

        return data

    def __getitem__(self, idx: int) -> dict:
        raw_data = self.data_io(idx)
        input_data = self.get_base_data(idx, seed=idx, data_in=raw_data)
        return input_data

if __name__=='__main__':
    os.chdir(osp.dirname(osp.realpath(__file__)))
    zarr_path = osp.join(osp.dirname(osp.realpath(__file__)), 'VR_Folding', 'vr_simulation_folding_dataset_example.zarr', 'Tshirt')
    print(zarr_path)
    dataset = VRFoldingDatasetExample(zarr_path=zarr_path, vis=True)
    for i in range(len(dataset)):
        if dataset.idx_to_interval_list[i] == -1:
            # skip static frame without valid grasp point
            continue
        data = dataset[i]
        video_id = data['video_id']
        print(f'Reading sample {i}, video {video_id}! Press q or Esc on the window to quit visualization of current frame!')