Sraghvi's picture
Upload bimanual bone packing dataset with so101 folder structure
da5a206 verified
raw
history blame
5.56 kB
#!/usr/bin/env python3
"""
Simple dataset visualization script for LeRobot datasets
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import cv2
from pathlib import Path
import json
import argparse
def load_dataset_info(dataset_path):
"""Load dataset metadata"""
info_file = Path(dataset_path) / "meta" / "info.json"
if info_file.exists():
with open(info_file, 'r') as f:
return json.load(f)
return None
def visualize_dataset(dataset_path, repo_id=None):
"""Visualize a LeRobot dataset"""
dataset_path = Path(dataset_path)
print(f"Visualizing dataset at: {dataset_path}")
# Load metadata
info = load_dataset_info(dataset_path)
if info:
print(f"Dataset: {info.get('name', 'Unknown')}")
print(f"Version: {info.get('version', 'Unknown')}")
print(f"Total frames: {info.get('total_frames', 'Unknown')}")
print(f"FPS: {info.get('fps', 'Unknown')}")
print(f"Features: {list(info.get('features', {}).keys())}")
# Load parquet data
parquet_file = dataset_path / "data" / "chunk-000" / "episode_000000.parquet"
if not parquet_file.exists():
print(f"❌ Parquet file not found: {parquet_file}")
return
print(f"Loading data from: {parquet_file}")
df = pd.read_parquet(parquet_file)
print(f"Loaded {len(df)} frames")
print(f"Columns: {list(df.columns)}")
# Display sample data
print("\nSample data (first 3 rows):")
for i in range(min(3, len(df))):
print(f"\nFrame {i}:")
for col in df.columns:
val = df.iloc[i][col]
if isinstance(val, (list, np.ndarray)) and len(val) > 10:
print(f" {col}: {type(val)} with {len(val)} elements")
else:
print(f" {col}: {val}")
# Check for video files
videos_dir = dataset_path / "videos" / "chunk-000"
if videos_dir.exists():
print(f"\nVideo files found in: {videos_dir}")
for video_dir in videos_dir.iterdir():
if video_dir.is_dir():
print(f" Camera: {video_dir.name}")
for video_file in video_dir.glob("*.mp4"):
print(f" Video: {video_file.name}")
# Get video info
cap = cv2.VideoCapture(str(video_file))
if cap.isOpened():
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
fps = cap.get(cv2.CAP_PROP_FPS)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
print(f" Resolution: {width}x{height}")
print(f" FPS: {fps}")
print(f" Frames: {frame_count}")
cap.release()
else:
print(f" Could not open video file")
else:
print(f"\nNo video files found in: {videos_dir}")
# Create a simple plot of action data if available
if 'action' in df.columns:
print("\nCreating action data visualization...")
actions = df['action'].tolist()
# Convert to numpy array if needed
if isinstance(actions[0], list):
actions = np.array(actions)
if hasattr(actions, 'shape') and len(actions.shape) == 2 and actions.shape[1] > 0:
plt.figure(figsize=(12, 8))
# Plot each action dimension
for i in range(actions.shape[1]):
plt.subplot(2, 3, i+1)
plt.plot(actions[:, i])
plt.title(f'Action Dimension {i}')
plt.xlabel('Frame')
plt.ylabel('Value')
plt.tight_layout()
plt.savefig('action_visualization.png', dpi=150, bbox_inches='tight')
print("✅ Action visualization saved as 'action_visualization.png'")
plt.close()
# Create a simple plot of observation data if available
if 'observation.state' in df.columns:
print("\nCreating observation state visualization...")
states = df['observation.state'].tolist()
# Convert to numpy array if needed
if isinstance(states[0], list):
states = np.array(states)
if hasattr(states, 'shape') and len(states.shape) == 2 and states.shape[1] > 0:
plt.figure(figsize=(12, 8))
# Plot each state dimension
for i in range(states.shape[1]):
plt.subplot(2, 3, i+1)
plt.plot(states[:, i])
plt.title(f'State Dimension {i}')
plt.xlabel('Frame')
plt.ylabel('Value')
plt.tight_layout()
plt.savefig('state_visualization.png', dpi=150, bbox_inches='tight')
print("✅ State visualization saved as 'state_visualization.png'")
plt.close()
print("\n✅ Dataset visualization complete!")
def main():
parser = argparse.ArgumentParser(description="Visualize LeRobot dataset")
parser.add_argument("--dataset-path", type=str, required=True, help="Path to dataset directory")
parser.add_argument("--repo-id", type=str, help="Repository ID (for reference)")
args = parser.parse_args()
visualize_dataset(args.dataset_path, args.repo_id)
if __name__ == "__main__":
main()