| import os |
| import sys |
| import cv2 |
| import json |
| import numpy as np |
| from scipy.spatial.transform import Rotation as R |
| from tqdm import tqdm |
| from multiprocessing import Pool, cpu_count |
|
|
|
|
| |
| |
| |
| STEP = 5 |
| ACTION_SEQ_LEN = 16 |
| SKIP_CAMS = {"104422070044", "104422070042", "135122079702", "104122061850", "104122063678", "105422061350", "f0461559"} |
| WINDOW_STRIDE = 5 |
| IMAGE_EXT = ".png" |
| FLOW_STEP = 3 |
| FLOW_THRESHOLD = 0.005 |
| MIN_CONSECUTIVE = 15 |
| ACTION_THRESHOLD = 1e-3 |
| FLOW_RESIZE_WIDTH = 320 |
|
|
| |
|
|
|
|
| |
| |
| |
| def pose7d_to_matrix(pose7d): |
| x, y, z, qx, qy, qz, qw = pose7d |
| T = np.eye(4, dtype=np.float64) |
| T[:3, :3] = R.from_quat([qx, qy, qz, qw]).as_matrix() |
| T[:3, 3] = [x, y, z] |
| return T |
|
|
|
|
| def compute_action(pose_t, pose_tp1, grip_tp1): |
| T_t = pose7d_to_matrix(pose_t) |
| T_tp1 = pose7d_to_matrix(pose_tp1) |
| T_rel = np.linalg.inv(T_t) @ T_tp1 |
|
|
| dxyz = T_rel[:3, 3] |
| drot = R.from_matrix(T_rel[:3, :3]).as_euler("xyz", degrees=False) |
|
|
| return np.concatenate([dxyz, drot, [grip_tp1]], axis=0).astype(float).tolist() |
|
|
|
|
| def get_eef_state_from_pose7d(pose7d): |
| pose7d = np.asarray(pose7d, dtype=float) |
| xyz = pose7d[:3] |
| quat = pose7d[3:] |
| rpy = R.from_quat(quat).as_euler("xyz", degrees=False) |
| return np.concatenate([xyz, rpy], axis=0).astype(float).tolist() |
|
|
|
|
| def load_metadata(task_dir): |
| candidates = [ |
| os.path.join(task_dir, "metadata.json"), |
| os.path.join(task_dir, "metadata"), |
| ] |
| for p in candidates: |
| if os.path.exists(p): |
| try: |
| with open(p, "r") as f: |
| return json.load(f) |
| except Exception: |
| pass |
|
|
| npy_candidates = [ |
| os.path.join(task_dir, "metadata.npy"), |
| ] |
| for p in npy_candidates: |
| if os.path.exists(p): |
| obj = np.load(p, allow_pickle=True) |
| if hasattr(obj, "item"): |
| try: |
| return obj.item() |
| except Exception: |
| pass |
|
|
| raise FileNotFoundError(f"Cannot find readable metadata in {task_dir}") |
|
|
|
|
| _CALIB_CACHE = {} |
|
|
|
|
| def load_calibration(calib_root, calib_id): |
| calib_id = str(calib_id) |
| if calib_id in _CALIB_CACHE: |
| return _CALIB_CACHE[calib_id] |
|
|
| calib_dir = os.path.join(calib_root, calib_id) |
| if not os.path.isdir(calib_dir): |
| raise FileNotFoundError(f"Calibration folder not found: {calib_dir}") |
|
|
| extrinsics_path = os.path.join(calib_dir, "extrinsics.npy") |
| intrinsics_path = os.path.join(calib_dir, "intrinsics.npy") |
| devices_path = os.path.join(calib_dir, "devices.npy") |
|
|
| extrinsics = np.load(extrinsics_path, allow_pickle=True).item() |
| intrinsics = None |
| devices = None |
|
|
| if os.path.exists(intrinsics_path): |
| intrinsics = np.load(intrinsics_path, allow_pickle=True).item() |
| if os.path.exists(devices_path): |
| devices = np.load(devices_path, allow_pickle=True) |
|
|
| result = (calib_dir, extrinsics, intrinsics, devices) |
| _CALIB_CACHE[calib_id] = result |
| return result |
|
|
|
|
| def normalize_tcp_stream(tcp_stream): |
| if isinstance(tcp_stream, list): |
| return tcp_stream |
|
|
| if isinstance(tcp_stream, dict): |
| keys = sorted(tcp_stream.keys(), key=lambda x: int(x)) |
| out = [] |
| for k in keys: |
| v = tcp_stream[k] |
| if isinstance(v, dict): |
| item = dict(v) |
| if "timestamp" not in item: |
| item["timestamp"] = int(k) |
| out.append(item) |
| else: |
| raise ValueError("Unsupported tcp stream dict value format.") |
| return out |
|
|
| raise ValueError(f"Unsupported tcp stream format: {type(tcp_stream)}") |
|
|
|
|
| def normalize_gripper_stream(grip_stream): |
| if isinstance(grip_stream, dict): |
| out = {} |
| for k, v in grip_stream.items(): |
| out[int(k)] = v |
| return out |
|
|
| if isinstance(grip_stream, list): |
| out = {} |
| for item in grip_stream: |
| ts = int(item["timestamp"]) |
| out[ts] = item |
| return out |
|
|
| raise ValueError(f"Unsupported gripper stream format: {type(grip_stream)}") |
|
|
|
|
| def get_gripper_value(grip_dict, timestamp): |
| if timestamp not in grip_dict: |
| return 0.0 |
|
|
| g = grip_dict[timestamp] |
|
|
| if isinstance(g, dict): |
| if "gripper_info" in g: |
| info = g["gripper_info"] |
| if isinstance(info, (list, tuple, np.ndarray)) and len(info) > 0: |
| return float(info[0]) |
|
|
| if "gripper_command" in g: |
| cmd = g["gripper_command"] |
| if isinstance(cmd, (list, tuple, np.ndarray)) and len(cmd) > 0: |
| return float(cmd[0]) |
| return float(cmd) |
|
|
| if "gripper" in g: |
| val = g["gripper"] |
| if isinstance(val, (list, tuple, np.ndarray)) and len(val) > 0: |
| return float(val[0]) |
| return float(val) |
|
|
| if isinstance(g, (list, tuple, np.ndarray)): |
| return float(g[0]) |
|
|
| return float(g) |
|
|
|
|
| |
| |
| |
| def read_video_extract_and_flow(video_path, images_dir, flow_step, flow_resize_width=None): |
| """ |
| Single-pass video reading that simultaneously: |
| 1) Extracts all frames as images to `images_dir` |
| 2) Computes optical flow on downscaled grayscale frames (sampled every `flow_step`) |
| |
| Args: |
| video_path: path to video file |
| images_dir: directory to write frame_XXXXXX.png |
| flow_step: compute flow between every `flow_step`-th frame |
| flow_resize_width: if set, resize frames to this width before flow computation. |
| Height is auto-calculated to preserve aspect ratio. |
| |
| Returns: |
| n_frames: total number of frames |
| flow_mags: np.array of mean flow magnitudes between sampled frames |
| sampled_indices: list of original frame indices used for flow |
| """ |
| os.makedirs(images_dir, exist_ok=True) |
|
|
| cap = cv2.VideoCapture(video_path) |
| if not cap.isOpened(): |
| raise ValueError(f"Cannot open video: {video_path}") |
|
|
| n_frames = 0 |
| |
| prev_gray_small = None |
| flow_mags = [] |
| sampled_indices = [] |
|
|
| while True: |
| ret, frame = cap.read() |
| if not ret: |
| break |
|
|
| idx = n_frames |
| n_frames += 1 |
|
|
| |
| out_path = os.path.join(images_dir, f"frame_{idx:06d}{IMAGE_EXT}") |
| if not os.path.exists(out_path): |
| cv2.imwrite(out_path, frame) |
|
|
| |
| if idx % flow_step == 0: |
| gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) |
|
|
| |
| if flow_resize_width is not None and gray.shape[1] > flow_resize_width: |
| scale = flow_resize_width / gray.shape[1] |
| new_h = int(gray.shape[0] * scale) |
| gray_small = cv2.resize(gray, (flow_resize_width, new_h), |
| interpolation=cv2.INTER_AREA) |
| else: |
| gray_small = gray |
|
|
| sampled_indices.append(idx) |
|
|
| if prev_gray_small is not None: |
| flow = cv2.calcOpticalFlowFarneback( |
| prev_gray_small, gray_small, None, 0.5, 3, 15, 3, 5, 1.2, 0 |
| ) |
| mag = np.mean(np.sqrt(flow[..., 0] ** 2 + flow[..., 1] ** 2)) |
| flow_mags.append(mag) |
|
|
| prev_gray_small = gray_small |
|
|
| cap.release() |
| return n_frames, np.array(flow_mags), sampled_indices |
|
|
|
|
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
|
|
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
|
|
|
|
| def find_active_segment(flow_mags, threshold, min_consecutive): |
| is_active = flow_mags > threshold |
| N = len(is_active) |
|
|
| start = 0 |
| for i in range(N - min_consecutive + 1): |
| if all(is_active[i:i + min_consecutive]): |
| start = i |
| break |
|
|
| end = N - 1 |
| for i in range(N - 1, min_consecutive - 2, -1): |
| check_start = max(0, i - min_consecutive + 1) |
| if all(is_active[check_start:i + 1]): |
| end = i |
| break |
|
|
| return start, end |
|
|
|
|
| def compute_action_norms_for_range(tcp_list, grip_dict, start_frame, end_frame, step): |
| """ |
| For each original frame i in [start_frame, end_frame - step], |
| compute action from i -> i+step, then take norm of first 6 dims. |
| """ |
| frame_indices = [] |
| norms = [] |
|
|
| max_i = end_frame - step |
| for i in range(start_frame, max_i + 1): |
| pose_t = np.asarray(tcp_list[i]["tcp"], dtype=float) |
| pose_tp1 = np.asarray(tcp_list[i + step]["tcp"], dtype=float) |
| ts_tp1 = int(tcp_list[i + step]["timestamp"]) |
| grip_value = get_gripper_value(grip_dict, ts_tp1) |
|
|
| action = compute_action(pose_t, pose_tp1, grip_value) |
| norm = np.linalg.norm(np.asarray(action[:6], dtype=float)) |
|
|
| frame_indices.append(i) |
| norms.append(norm) |
|
|
| return frame_indices, norms |
|
|
|
|
| def trim_by_action_threshold(frame_indices, norms, threshold): |
| """ |
| Trim leading and trailing low-action region. |
| Returns valid_start, valid_end in original frame index. |
| valid_end means the last allowed sample start/end reference boundary. |
| """ |
| if len(frame_indices) == 0: |
| return None, None |
|
|
| left = 0 |
| while left < len(norms) and norms[left] <= threshold: |
| left += 1 |
|
|
| right = len(norms) - 1 |
| while right >= 0 and norms[right] <= threshold: |
| right -= 1 |
|
|
| if left > right: |
| return None, None |
|
|
| valid_start = frame_indices[left] |
| valid_end = frame_indices[right] + STEP |
| return valid_start, valid_end |
|
|
|
|
| def find_video_file(cam_dir): |
| candidates = ["color.mp4", "color.avi", "color.video", "rgb.mp4"] |
| for name in candidates: |
| p = os.path.join(cam_dir, name) |
| if os.path.exists(p): |
| return p |
| return None |
|
|
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| |
|
|
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
|
|
| |
| |
|
|
|
|
| def build_one_sample( |
| task_id, |
| cam_id, |
| start_idx, |
| tcp_list, |
| grip_dict, |
| camera_pose, |
| cam_dir, |
| dataset_root, |
| ): |
| last_frame_idx = start_idx + ACTION_SEQ_LEN * STEP |
| if last_frame_idx >= len(tcp_list): |
| return None |
|
|
| images_dir = os.path.join(cam_dir, "images") |
|
|
| num_frames = 5 |
| frame_interval = (ACTION_SEQ_LEN * STEP) // (num_frames - 1) |
| image_indices = [start_idx + i * frame_interval for i in range(num_frames)] |
|
|
| image_rels = [] |
| for fidx in image_indices: |
| abs_path = os.path.join(images_dir, f"frame_{fidx:06d}{IMAGE_EXT}") |
| if not os.path.exists(abs_path): |
| return None |
| rel_path = os.path.relpath(abs_path, dataset_root).replace("\\", "/") |
| image_rels.append(rel_path) |
|
|
| pose0 = np.asarray(tcp_list[start_idx]["tcp"], dtype=float) |
| eef_state = get_eef_state_from_pose7d(pose0) |
|
|
| action_seq = [] |
| for k in range(ACTION_SEQ_LEN): |
| idx_t = start_idx + k * STEP |
| idx_tp1 = start_idx + (k + 1) * STEP |
|
|
| if idx_tp1 >= len(tcp_list): |
| return None |
|
|
| pose_t = np.asarray(tcp_list[idx_t]["tcp"], dtype=float) |
| pose_tp1 = np.asarray(tcp_list[idx_tp1]["tcp"], dtype=float) |
| ts_tp1 = int(tcp_list[idx_tp1]["timestamp"]) |
| grip_value = get_gripper_value(grip_dict, ts_tp1) |
|
|
| action = compute_action(pose_t, pose_tp1, grip_value) |
| action_seq.append(action) |
|
|
| item = { |
| "id": f"{task_id}/{cam_id}/{start_idx:06d}", |
| "image_0": image_rels[0], |
| "image_1": image_rels[1], |
| "image_2": image_rels[2], |
| "image_3": image_rels[3], |
| "image_4": image_rels[4], |
| "camera_pose": np.asarray(camera_pose, dtype=float).tolist(), |
| "eef_state": eef_state, |
| "action": action_seq, |
| } |
| return item |
|
|
|
|
| def process_camera( |
| task_dir, |
| task_id, |
| cam_dir_name, |
| tcp_all, |
| grip_all, |
| extrinsics, |
| metadata, |
| dataset_root, |
| ): |
| cam_id = cam_dir_name.replace("cam_", "") |
| cam_dir = os.path.join(task_dir, cam_dir_name) |
|
|
| if cam_id in SKIP_CAMS: |
| print(f" [{cam_id}] skip: in SKIP_CAMS") |
| return [] |
|
|
| bad_views = set(metadata.get("bad_calib_view", [])) |
| if cam_id in bad_views: |
| print(f" [{cam_id}] skip: in metadata.bad_calib_view") |
| return [] |
|
|
| if cam_id not in tcp_all: |
| print(f" [{cam_id}] skip: missing in tcp_base.npy") |
| return [] |
| if cam_id not in grip_all: |
| print(f" [{cam_id}] skip: missing in gripper.npy") |
| return [] |
| if cam_id not in extrinsics: |
| print(f" [{cam_id}] skip: missing in extrinsics") |
| return [] |
| if extrinsics[cam_id] is None: |
| print(f" [{cam_id}] skip: extrinsics is None") |
| return [] |
|
|
| video_path = find_video_file(cam_dir) |
| if video_path is None: |
| print(f" [{cam_id}] skip: no color video") |
| return [] |
|
|
| tcp_list = normalize_tcp_stream(tcp_all[cam_id]) |
| grip_dict = normalize_gripper_stream(grip_all[cam_id]) |
| images_dir = os.path.join(cam_dir, "images") |
| print(f" [{cam_id}] reading video (extract frames + optical flow) -> {images_dir}") |
| n_frames, flow_mags, sampled_indices = read_video_extract_and_flow( |
| video_path=video_path, |
| images_dir=images_dir, |
| flow_step=FLOW_STEP, |
| flow_resize_width=FLOW_RESIZE_WIDTH, |
| ) |
|
|
| n_tcp = len(tcp_list) |
| if n_frames != n_tcp: |
| raise ValueError( |
| f"{task_id}/{cam_id}: extracted video frames ({n_frames}) != tcp length ({n_tcp})" |
| ) |
|
|
| if n_frames < ACTION_SEQ_LEN * STEP + 1: |
| print(f" [{cam_id}] skip: too short, video/tcp={n_frames}") |
| return [] |
|
|
| |
| |
| |
| flow_start_idx, flow_end_idx = find_active_segment( |
| flow_mags, FLOW_THRESHOLD, MIN_CONSECUTIVE |
| ) |
|
|
| flow_start_frame = sampled_indices[flow_start_idx] |
| flow_end_frame = sampled_indices[min(flow_end_idx + 1, len(sampled_indices) - 1)] |
|
|
| print(f" [{cam_id}] flow valid range: [{flow_start_frame}, {flow_end_frame}]") |
|
|
| |
| |
| |
| frame_indices, norms = compute_action_norms_for_range( |
| tcp_list=tcp_list, |
| grip_dict=grip_dict, |
| start_frame=flow_start_frame, |
| end_frame=flow_end_frame, |
| step=STEP, |
| ) |
|
|
| valid_start, valid_end = trim_by_action_threshold( |
| frame_indices, norms, ACTION_THRESHOLD |
| ) |
|
|
| if valid_start is None or valid_end is None: |
| print(f" [{cam_id}] skip: no valid segment after action filtering") |
| return [] |
|
|
| print(f" [{cam_id}] final valid range after action filter: [{valid_start}, {valid_end}]") |
|
|
| |
| if valid_start + ACTION_SEQ_LEN * STEP > valid_end: |
| print(f" [{cam_id}] skip: filtered segment too short") |
| return [] |
|
|
| |
| |
| |
| camera_pose = extrinsics[cam_id] |
| items = [] |
|
|
| start_idx = valid_start |
| while start_idx + ACTION_SEQ_LEN * STEP <= valid_end: |
| sample = build_one_sample( |
| task_id=task_id, |
| cam_id=cam_id, |
| start_idx=start_idx, |
| tcp_list=tcp_list, |
| grip_dict=grip_dict, |
| camera_pose=camera_pose, |
| cam_dir=cam_dir, |
| dataset_root=dataset_root, |
| ) |
| if sample is not None: |
| items.append(sample) |
| start_idx += WINDOW_STRIDE |
|
|
| print(f" [{cam_id}] generated {len(items)} samples") |
| return items |
|
|
|
|
| def process_task(task_dir, dataset_root, calib_root): |
| task_id = os.path.basename(task_dir.rstrip("/")) |
| print(f"\nProcessing task: {task_id}") |
|
|
| metadata = load_metadata(task_dir) |
|
|
| calib_id = metadata["calib"] |
| calib_dir, extrinsics, intrinsics, devices = load_calibration(calib_root, calib_id) |
|
|
| print(f" calib_id: {calib_id}") |
| print(f" calib_dir: {calib_dir}") |
| print(f" calib_quality: {metadata.get('calib_quality', 'N/A')}") |
|
|
| transform_dir = os.path.join(task_dir, "transformed") |
| |
| tcp_path = os.path.join(transform_dir, "tcp.npy") |
| grip_path = os.path.join(transform_dir, "gripper.npy") |
|
|
| if not os.path.exists(tcp_path) or not os.path.exists(grip_path): |
| print(" skip task: missing transformed/tcp_base.npy or gripper.npy") |
| return [] |
|
|
| tcp_all = np.load(tcp_path, allow_pickle=True).item() |
| grip_all = np.load(grip_path, allow_pickle=True).item() |
|
|
| cam_dirs = sorted([ |
| d for d in os.listdir(task_dir) |
| if d.startswith("cam_") and os.path.isdir(os.path.join(task_dir, d)) |
| ]) |
|
|
| task_items = [] |
| for cam_dir_name in cam_dirs: |
| cam_items = process_camera( |
| task_dir=task_dir, |
| task_id=task_id, |
| cam_dir_name=cam_dir_name, |
| tcp_all=tcp_all, |
| grip_all=grip_all, |
| extrinsics=extrinsics, |
| metadata=metadata, |
| dataset_root=dataset_root, |
| ) |
| task_items.extend(cam_items) |
|
|
| print(f" task total samples: {len(task_items)}") |
| return task_items |
|
|
|
|
| def find_task_dirs(dataset_root): |
| task_dirs = [] |
| for name in sorted(os.listdir(dataset_root)): |
| if "human" in name: |
| continue |
| path = os.path.join(dataset_root, name) |
| if os.path.isdir(path) and "task_" in name: |
| task_dirs.append(path) |
| return task_dirs |
|
|
|
|
| def process_task_wrapper(args): |
| task_dir, dataset_root, calib_root = args |
| try: |
| return process_task(task_dir, dataset_root, calib_root) |
| except Exception as e: |
| print(f"Error processing {task_dir}: {e}") |
| return [] |
|
|
|
|
| def main(dataset_root, calib_root, output_root): |
| os.makedirs(output_root, exist_ok=True) |
|
|
| all_items = [] |
|
|
| task_dirs = find_task_dirs(dataset_root) |
| print(f"Found {len(task_dirs)} task folders") |
|
|
| n_workers = min(cpu_count(), len(task_dirs)) |
| args = [(t, dataset_root, calib_root) for t in task_dirs] |
| |
| import random |
| random.seed(42) |
| task_dirs = random.sample(task_dirs, min(60, len(task_dirs))) |
|
|
| data_jsonl_path = os.path.join(output_root, "data.jsonl") |
| total = 0 |
|
|
| with open(data_jsonl_path, "a") as f: |
| with Pool(processes=n_workers) as pool: |
| for task_items in tqdm(pool.imap_unordered(process_task_wrapper, args), |
| total=len(task_dirs)): |
| for item in task_items: |
| f.write(json.dumps(item) + "\n") |
| f.flush() |
| total += len(task_items) |
|
|
| print("\n" + "=" * 80) |
| print(f"\nDone. Total samples: {total}") |
| print(f"Saved: {data_jsonl_path}") |
| print("=" * 80) |
|
|
|
|
| if __name__ == "__main__": |
| if len(sys.argv) != 4: |
| print("Usage:") |
| print(" python build_idm_data.py <dataset_root> <calib_root> <output_root>") |
| sys.exit(1) |
|
|
| dataset_root = sys.argv[1] |
| calib_root = sys.argv[2] |
| output_root = sys.argv[3] |
|
|
| if not os.path.isdir(dataset_root): |
| print(f"dataset_root does not exist: {dataset_root}") |
| sys.exit(1) |
|
|
| if not os.path.isdir(calib_root): |
| print(f"calib_root does not exist: {calib_root}") |
| sys.exit(1) |
|
|
| main(dataset_root, calib_root, output_root) |
|
|