|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| """This script demonstrates how to slice a dataset and calculate the loss on a subset of the data.
|
|
|
| This technique can be useful for debugging and testing purposes, as well as identifying whether a policy
|
| is learning effectively.
|
|
|
| Furthermore, relying on validation loss to evaluate performance is generally not considered a good practice,
|
| especially in the context of imitation learning. The most reliable approach is to evaluate the policy directly
|
| on the target environment, whether that be in simulation or the real world.
|
| """
|
|
|
| import math
|
|
|
| import torch
|
|
|
| from lerobot.datasets.lerobot_dataset import LeRobotDataset, LeRobotDatasetMetadata
|
| from lerobot.policies.diffusion.modeling_diffusion import DiffusionPolicy
|
|
|
|
|
| def main():
|
| device = torch.device("cuda")
|
|
|
|
|
| pretrained_policy_path = "lerobot/diffusion_pusht"
|
|
|
|
|
|
|
| policy = DiffusionPolicy.from_pretrained(pretrained_policy_path)
|
| policy.eval()
|
| policy.to(device)
|
|
|
|
|
| delta_timestamps = {
|
|
|
|
|
| "observation.image": [-0.1, 0.0],
|
| "observation.state": [-0.1, 0.0],
|
|
|
|
|
|
|
| "action": [-0.1, 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4],
|
| }
|
|
|
|
|
|
|
| dataset_metadata = LeRobotDatasetMetadata("lerobot/pusht")
|
|
|
| total_episodes = dataset_metadata.total_episodes
|
| episodes = list(range(dataset_metadata.total_episodes))
|
| num_train_episodes = math.floor(total_episodes * 90 / 100)
|
| train_episodes = episodes[:num_train_episodes]
|
| val_episodes = episodes[num_train_episodes:]
|
| print(f"Number of episodes in full dataset: {total_episodes}")
|
| print(f"Number of episodes in training dataset (90% subset): {len(train_episodes)}")
|
| print(f"Number of episodes in validation dataset (10% subset): {len(val_episodes)}")
|
|
|
| train_dataset = LeRobotDataset(
|
| "lerobot/pusht", episodes=train_episodes, delta_timestamps=delta_timestamps
|
| )
|
| val_dataset = LeRobotDataset("lerobot/pusht", episodes=val_episodes, delta_timestamps=delta_timestamps)
|
| print(f"Number of frames in training dataset (90% subset): {len(train_dataset)}")
|
| print(f"Number of frames in validation dataset (10% subset): {len(val_dataset)}")
|
|
|
|
|
| val_dataloader = torch.utils.data.DataLoader(
|
| val_dataset,
|
| num_workers=4,
|
| batch_size=64,
|
| shuffle=False,
|
| pin_memory=device != torch.device("cpu"),
|
| drop_last=False,
|
| )
|
|
|
|
|
| loss_cumsum = 0
|
| n_examples_evaluated = 0
|
| for batch in val_dataloader:
|
| batch = {k: v.to(device, non_blocking=True) for k, v in batch.items()}
|
| loss, _ = policy.forward(batch)
|
|
|
| loss_cumsum += loss.item()
|
| n_examples_evaluated += batch["index"].shape[0]
|
|
|
|
|
| average_loss = loss_cumsum / n_examples_evaluated
|
|
|
| print(f"Average loss on validation set: {average_loss:.4f}")
|
|
|
|
|
| if __name__ == "__main__":
|
| main()
|
|
|