| import mediapy |
| import os |
| import mediapy |
| import torch |
| import numpy as np |
| import json |
| import mediapy |
| from torch.utils.data import Dataset |
| import re |
|
|
| import pandas as pd |
| from accelerate import Accelerator |
|
|
| try: |
| |
| from diffusers.models import AutoencoderKLTemporalDecoder |
| except ModuleNotFoundError: |
| AutoencoderKLTemporalDecoder = None |
|
|
| try: |
| from tqdm import tqdm |
| except ModuleNotFoundError: |
| tqdm = None |
|
|
|
|
| class EncodeLatentDataset(Dataset): |
| def __init__(self, old_path, new_path, svd_path, device, size=(192, 320), rgb_skip=3, skip_latent: bool = False): |
| self.old_path = old_path |
| self.new_path = new_path |
| self.size = size |
| self.skip = rgb_skip |
| self.vae = None |
| if not skip_latent: |
| if not svd_path: |
| raise ValueError("svd_path is required unless --skip_latent is set.") |
| if AutoencoderKLTemporalDecoder is None: |
| raise ModuleNotFoundError( |
| "diffusers is not installed, but latent encoding is enabled. " |
| "Install diffusers or run with --skip_latent." |
| ) |
| self.vae = AutoencoderKLTemporalDecoder.from_pretrained(svd_path, subfolder="vae").to(device) |
|
|
| |
| |
| self.instruction_by_episode = {} |
| episodes_jsonl = os.path.join(old_path, "meta", "episodes.jsonl") |
| if os.path.exists(episodes_jsonl): |
| with open(episodes_jsonl, "r") as f: |
| for line in f: |
| try: |
| obj = json.loads(line) |
| except json.JSONDecodeError: |
| continue |
| eid = obj.get("episode_index", None) |
| if isinstance(eid, int): |
| tasks = obj.get("tasks", []) |
| if isinstance(tasks, list) and len(tasks) > 0: |
| self.instruction_by_episode[eid] = tasks[0] |
|
|
| self.items = [] |
| data_root = os.path.join(old_path, "data") |
| if not os.path.isdir(data_root): |
| raise FileNotFoundError(f"Missing data directory: {data_root}") |
|
|
| chunk_re = re.compile(r"^chunk-(\d+)$") |
| ep_re = re.compile(r"^episode_(\d+)\.parquet$") |
| for entry in sorted(os.listdir(data_root)): |
| m = chunk_re.match(entry) |
| if not m: |
| continue |
| chunk_id = int(m.group(1)) |
| chunk_dir = os.path.join(data_root, entry) |
| if not os.path.isdir(chunk_dir): |
| continue |
| for fn in sorted(os.listdir(chunk_dir)): |
| m2 = ep_re.match(fn) |
| if not m2: |
| continue |
| traj_id = int(m2.group(1)) |
| file_path = os.path.join(chunk_dir, fn) |
| self.items.append( |
| { |
| "traj_id": traj_id, |
| "chunk_id": chunk_id, |
| "file_path": file_path, |
| } |
| ) |
|
|
| def __len__(self): |
| return len(self.items) |
|
|
| def __getitem__(self, idx): |
| item = self.items[idx] |
| traj_id = item["traj_id"] |
| chunk_id = item["chunk_id"] |
| file_path = item["file_path"] |
| instruction = self.instruction_by_episode.get(traj_id, "") |
|
|
| data_type = 'val' if traj_id%100 == 99 else 'train' |
| |
| |
| |
|
|
| df = pd.read_parquet(file_path) |
| length = len(df['observation.state.cartesian_position']) |
|
|
| obs_car = [] |
| obs_joint =[] |
| obs_gripper = [] |
| action_car = [] |
| action_joint = [] |
| action_gripper = [] |
| action_joint_vel = [] |
|
|
| for i in range(length): |
| obs_car.append(df['observation.state.cartesian_position'][i].tolist()) |
| obs_joint.append(df['observation.state.joint_position'][i].tolist()) |
| obs_gripper.append(df['observation.state.gripper_position'][i].tolist()) |
| action_car.append(df['action.cartesian_position'][i].tolist()) |
| action_joint.append(df['action.joint_position'][i].tolist()) |
| action_gripper.append(df['action.gripper_position'][i].tolist()) |
| action_joint_vel.append(df['action.joint_velocity'][i].tolist()) |
| success = df['is_episode_successful'][0] |
| video_paths = [ |
| f'{self.old_path}/videos/chunk-{chunk_id:03d}/observation.images.exterior_1_left/episode_{traj_id:06d}.mp4', |
| f'{self.old_path}/videos/chunk-{chunk_id:03d}/observation.images.exterior_2_left/episode_{traj_id:06d}.mp4', |
| f'{self.old_path}/videos/chunk-{chunk_id:03d}/observation.images.wrist_left/episode_{traj_id:06d}.mp4'] |
| traj_info = {'success': success, |
| 'observation.state.cartesian_position': obs_car, |
| 'observation.state.joint_position': obs_joint, |
| 'observation.state.gripper_position': obs_gripper, |
| 'action.cartesian_position': action_car, |
| 'action.joint_position': action_joint, |
| 'action.gripper_position': action_gripper, |
| 'action.joint_velocity': action_joint_vel, |
| } |
| |
|
|
| |
| try: |
| device = self.vae.device if self.vae is not None else "cpu" |
| self.process_traj( |
| video_paths, |
| traj_info, |
| instruction, |
| self.new_path, |
| traj_id=traj_id, |
| data_type=data_type, |
| size=self.size, |
| rgb_skip=self.skip, |
| device=device, |
| ) |
| except Exception as e: |
| import traceback |
| print(f"Error processing trajectory {traj_id}, skipping...") |
| print(f"Exception type: {type(e).__name__}") |
| print(f"Exception message: {e}") |
| print(f"Parquet path: {file_path}") |
| print(f"Video paths: {video_paths}") |
| traceback.print_exc() |
| return 0 |
| |
| return 0 |
|
|
|
|
| def process_traj(self, video_paths, traj_info, instruction, save_root,traj_id=0,data_type='val', size=(192,320), rgb_skip=3, device='cuda'): |
| frames = None |
| for video_id, video_path in enumerate(video_paths): |
| |
| video = mediapy.read_video(video_path) |
| frames = torch.tensor(video).permute(0, 3, 1, 2).float() / 255.0*2-1 |
| frames = frames[::rgb_skip] |
| x = torch.nn.functional.interpolate(frames, size=size, mode='bilinear', align_corners=False) |
| resize_video = ((x / 2.0 + 0.5).clamp(0, 1)*255) |
| resize_video = resize_video.permute(0, 2, 3, 1).cpu().numpy().astype(np.uint8) |
| os.makedirs(f"{save_root}/videos/{data_type}/{traj_id}", exist_ok=True) |
| mediapy.write_video(f"{save_root}/videos/{data_type}/{traj_id}/{video_id}.mp4", resize_video, fps=5) |
|
|
| |
| if self.vae is not None: |
| x = x.to(device) |
| with torch.no_grad(): |
| batch_size = 64 |
| latents = [] |
| for i in range(0, len(x), batch_size): |
| batch = x[i:i+batch_size] |
| latent = self.vae.encode(batch).latent_dist.sample().mul_(self.vae.config.scaling_factor).cpu() |
| |
| latents.append(latent) |
| x = torch.cat(latents, dim=0) |
| os.makedirs(f"{save_root}/latent_videos/{data_type}/{traj_id}", exist_ok=True) |
| torch.save(x, f"{save_root}/latent_videos/{data_type}/{traj_id}/{video_id}.pt") |
| |
| |
| cartesian_pose = np.array(traj_info['observation.state.cartesian_position']) |
| cartesian_gripper = np.array(traj_info['observation.state.gripper_position'])[:,None] |
| |
| cartesian_states = np.concatenate((cartesian_pose, cartesian_gripper),axis=-1)[::rgb_skip].tolist() |
| |
| info = { |
| "texts": [instruction], |
| "episode_id": traj_id, |
| "success": int(traj_info['success']), |
| "video_length": frames.shape[0], |
| "state_length": len(cartesian_states), |
| "raw_length": len(traj_info['observation.state.cartesian_position']), |
| "videos": [ |
| {"video_path": f"videos/{data_type}/{traj_id}/0.mp4"}, |
| {"video_path": f"videos/{data_type}/{traj_id}/1.mp4"}, |
| {"video_path": f"videos/{data_type}/{traj_id}/2.mp4"} |
| ], |
| "latent_videos": ( |
| [ |
| {"latent_video_path": f"latent_videos/{data_type}/{traj_id}/0.pt"}, |
| {"latent_video_path": f"latent_videos/{data_type}/{traj_id}/1.pt"}, |
| {"latent_video_path": f"latent_videos/{data_type}/{traj_id}/2.pt"}, |
| ] |
| if self.vae is not None |
| else [] |
| ), |
| 'states': cartesian_states, |
| 'observation.state.cartesian_position': traj_info['observation.state.cartesian_position'], |
| 'observation.state.joint_position': traj_info['observation.state.joint_position'], |
| 'observation.state.gripper_position': traj_info['observation.state.gripper_position'], |
| 'action.cartesian_position': traj_info['action.cartesian_position'], |
| 'action.joint_position': traj_info['action.joint_position'], |
| 'action.gripper_position': traj_info['action.gripper_position'], |
| 'action.joint_velocity': traj_info['action.joint_velocity'], |
| } |
| os.makedirs(f"{save_root}/annotation/{data_type}", exist_ok=True) |
| with open(f"{save_root}/annotation/{data_type}/{traj_id}.json", "w") as f: |
| json.dump(info, f, indent=2) |
|
|
|
|
| if __name__ == "__main__": |
|
|
| from argparse import ArgumentParser |
| parser = ArgumentParser() |
| parser.add_argument('--droid_hf_path', type=str, default='/cephfs/shared/droid_hf/droid_1.0.1') |
| parser.add_argument('--droid_output_path', type=str, default='dataset_example/droid_subset') |
| parser.add_argument('--svd_path', type=str, default='/cephfs/shared/llm/stable-video-diffusion-img2vid') |
| parser.add_argument('--skip_latent', action='store_true', help='Skip SVD VAE encoding; only write resized mp4 + annotation.') |
| parser.add_argument('--num_workers', type=int, default=8, help='DataLoader workers for parallel data processing (recommended with --skip_latent).') |
| parser.add_argument('--prefetch_factor', type=int, default=2, help='Number of batches prefetched per worker.') |
| parser.add_argument('--persistent_workers', action='store_true', help='Keep workers alive across iterations when num_workers > 0.') |
| |
| parser.add_argument('--debug', action='store_true') |
| args = parser.parse_args() |
|
|
| accelerator = Accelerator() |
| dataset = EncodeLatentDataset( |
| old_path=args.droid_hf_path, |
| new_path= args.droid_output_path, |
| svd_path=args.svd_path, |
| device=accelerator.device, |
| size=(192, 320), |
| rgb_skip=3, |
| skip_latent=args.skip_latent, |
| ) |
|
|
| num_workers = max(0, int(args.num_workers)) |
| if (not args.skip_latent) and num_workers > 0: |
| |
| print("[warn] --skip_latent is not set; forcing num_workers=0 to avoid duplicating VAE across workers.") |
| num_workers = 0 |
|
|
| dl_kwargs = { |
| "batch_size": 1, |
| "num_workers": num_workers, |
| "pin_memory": True, |
| } |
| if num_workers > 0: |
| dl_kwargs["persistent_workers"] = bool(args.persistent_workers) |
| dl_kwargs["prefetch_factor"] = max(1, int(args.prefetch_factor)) |
|
|
| tmp_data_loader = torch.utils.data.DataLoader( |
| dataset, |
| **dl_kwargs, |
| ) |
| tmp_data_loader = accelerator.prepare_data_loader(tmp_data_loader) |
|
|
| it = enumerate(tmp_data_loader) |
| if tqdm is not None: |
| |
| it = tqdm( |
| it, |
| total=len(tmp_data_loader), |
| disable=not accelerator.is_main_process, |
| desc="Precomputing", |
| ) |
|
|
| for idx, _ in it: |
| if idx == 5 and args.debug: |
| break |
| if tqdm is None and idx % 100 == 0 and accelerator.is_main_process: |
| print(f"Precomputed {idx} samples") |
|
|
| |
|
|