File size: 5,556 Bytes
da5a206
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
#!/usr/bin/env python3
"""
Simple dataset visualization script for LeRobot datasets
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import cv2
from pathlib import Path
import json
import argparse

def load_dataset_info(dataset_path):
    """Load dataset metadata"""
    info_file = Path(dataset_path) / "meta" / "info.json"
    if info_file.exists():
        with open(info_file, 'r') as f:
            return json.load(f)
    return None

def visualize_dataset(dataset_path, repo_id=None):
    """Visualize a LeRobot dataset"""
    dataset_path = Path(dataset_path)
    
    print(f"Visualizing dataset at: {dataset_path}")
    
    # Load metadata
    info = load_dataset_info(dataset_path)
    if info:
        print(f"Dataset: {info.get('name', 'Unknown')}")
        print(f"Version: {info.get('version', 'Unknown')}")
        print(f"Total frames: {info.get('total_frames', 'Unknown')}")
        print(f"FPS: {info.get('fps', 'Unknown')}")
        print(f"Features: {list(info.get('features', {}).keys())}")
    
    # Load parquet data
    parquet_file = dataset_path / "data" / "chunk-000" / "episode_000000.parquet"
    if not parquet_file.exists():
        print(f"❌ Parquet file not found: {parquet_file}")
        return
    
    print(f"Loading data from: {parquet_file}")
    df = pd.read_parquet(parquet_file)
    print(f"Loaded {len(df)} frames")
    print(f"Columns: {list(df.columns)}")
    
    # Display sample data
    print("\nSample data (first 3 rows):")
    for i in range(min(3, len(df))):
        print(f"\nFrame {i}:")
        for col in df.columns:
            val = df.iloc[i][col]
            if isinstance(val, (list, np.ndarray)) and len(val) > 10:
                print(f"  {col}: {type(val)} with {len(val)} elements")
            else:
                print(f"  {col}: {val}")
    
    # Check for video files
    videos_dir = dataset_path / "videos" / "chunk-000"
    if videos_dir.exists():
        print(f"\nVideo files found in: {videos_dir}")
        for video_dir in videos_dir.iterdir():
            if video_dir.is_dir():
                print(f"  Camera: {video_dir.name}")
                for video_file in video_dir.glob("*.mp4"):
                    print(f"    Video: {video_file.name}")
                    
                    # Get video info
                    cap = cv2.VideoCapture(str(video_file))
                    if cap.isOpened():
                        frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
                        fps = cap.get(cv2.CAP_PROP_FPS)
                        width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
                        height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
                        print(f"      Resolution: {width}x{height}")
                        print(f"      FPS: {fps}")
                        print(f"      Frames: {frame_count}")
                        cap.release()
                    else:
                        print(f"      Could not open video file")
    else:
        print(f"\nNo video files found in: {videos_dir}")
    
    # Create a simple plot of action data if available
    if 'action' in df.columns:
        print("\nCreating action data visualization...")
        actions = df['action'].tolist()
        
        # Convert to numpy array if needed
        if isinstance(actions[0], list):
            actions = np.array(actions)
        
        if hasattr(actions, 'shape') and len(actions.shape) == 2 and actions.shape[1] > 0:
            plt.figure(figsize=(12, 8))
            
            # Plot each action dimension
            for i in range(actions.shape[1]):
                plt.subplot(2, 3, i+1)
                plt.plot(actions[:, i])
                plt.title(f'Action Dimension {i}')
                plt.xlabel('Frame')
                plt.ylabel('Value')
            
            plt.tight_layout()
            plt.savefig('action_visualization.png', dpi=150, bbox_inches='tight')
            print("✅ Action visualization saved as 'action_visualization.png'")
            plt.close()
    
    # Create a simple plot of observation data if available
    if 'observation.state' in df.columns:
        print("\nCreating observation state visualization...")
        states = df['observation.state'].tolist()
        
        # Convert to numpy array if needed
        if isinstance(states[0], list):
            states = np.array(states)
        
        if hasattr(states, 'shape') and len(states.shape) == 2 and states.shape[1] > 0:
            plt.figure(figsize=(12, 8))
            
            # Plot each state dimension
            for i in range(states.shape[1]):
                plt.subplot(2, 3, i+1)
                plt.plot(states[:, i])
                plt.title(f'State Dimension {i}')
                plt.xlabel('Frame')
                plt.ylabel('Value')
            
            plt.tight_layout()
            plt.savefig('state_visualization.png', dpi=150, bbox_inches='tight')
            print("✅ State visualization saved as 'state_visualization.png'")
            plt.close()
    
    print("\n✅ Dataset visualization complete!")

def main():
    parser = argparse.ArgumentParser(description="Visualize LeRobot dataset")
    parser.add_argument("--dataset-path", type=str, required=True, help="Path to dataset directory")
    parser.add_argument("--repo-id", type=str, help="Repository ID (for reference)")
    
    args = parser.parse_args()
    
    visualize_dataset(args.dataset_path, args.repo_id)

if __name__ == "__main__":
    main()