Datasets:
| # Nova-Sim Observation Configuration | |
| # | |
| # This file defines which parameters from the Nova-Sim state stream | |
| # are mapped to robot observations for recording, replay, and inference. | |
| # | |
| # To disable a feature, comment out the entire section or set enabled: false | |
| repositories: | |
| # Dataset repo used for upload/download and dataset metadata. | |
| dataset: "${HF_USER}/nova_ur5_demo" | |
| dataset_branch: "main" | |
| # Policy repo used for model upload/download. | |
| policy: "${HF_USER}/nova_ur5_demo-policy" | |
| policy_branch: "main" | |
| defaults: | |
| robot: | |
| type: "nova_sim" | |
| id: "ur5_robot" | |
| recording: | |
| task_name: "pick_and_place" | |
| fps: 10 | |
| episode_time_s: 60 | |
| reset_time_s: 10 | |
| num_episodes: 10 | |
| # Action horizon: how many timesteps ahead for target position actions | |
| # Time horizon = n_action_steps / fps (e.g., 5 steps @ 10 FPS = 0.5s) | |
| n_action_steps: 20 | |
| training: | |
| policy_type: "act" | |
| training_steps: 100000 | |
| policy_private: false | |
| paths: | |
| data_dir: "./data" | |
| checkpoint_dir: "./outputs" | |
| ui: | |
| trainer_host: "127.0.0.1" | |
| trainer_port: 3005 | |
| # Minimum reward that marks a step as success for next.done/next.success. | |
| success_reward_min: -0.05 | |
| # Pose restrictions applied during replay/inference when sending actions. | |
| pose_restrictions: | |
| cartesian: | |
| enabled: true | |
| x: | |
| min: 0.3 | |
| max: 0.9 | |
| y: | |
| min: -0.6 | |
| max: 0.6 | |
| z: | |
| min: 0.7 | |
| max: 1.0 | |
| observations: | |
| # State observations (non-visual sensor data) | |
| state: | |
| # End-effector Cartesian position (in meters) | |
| # Source: end_effector dict from Nova-Sim state | |
| end_effector.x: | |
| enabled: true | |
| source: end_effector | |
| field: x | |
| dtype: float | |
| mapToKey: x | |
| end_effector.y: | |
| enabled: true | |
| source: end_effector | |
| field: y | |
| dtype: float | |
| mapToKey: y | |
| # Camera/image observations | |
| cameras: | |
| # Top-down view camera | |
| ai: | |
| enabled: true | |
| width: 240 | |
| height: 160 | |
| register: true | |
| label: Front Overview | |
| lookat: [0.5, 0.0, 0.42] | |
| distance: 1.4 | |
| azimuth: 180 | |
| elevation: -40 | |
| map_to: image | |
| # Action configuration (order matters) | |
| # Actions can be simple strings (old format) or dicts with extended configuration: | |
| # name: Action name (required) | |
| # enabled: Whether to include this action (default: true) | |
| # mapToKey: Alternative name in the dataset (optional) | |
| # addObservation: observation.state field name to add to action (optional) | |
| # Recording: observation value gets added to action (velocity + position = target) | |
| # Inference: policy outputs target position, converted back to velocity via delta | |
| # Useful for training on target positions while executing with velocities | |
| actions: | |
| - name: vx | |
| enabled: true | |
| mapToKey: x | |
| addObservation: x # vx accumulates into observation x | |
| - name: vy | |
| enabled: true | |
| mapToKey: y | |
| addObservation: y # vy accumulates into observation y | |
| # Episode metadata (always included, cannot be disabled) | |
| metadata: | |
| - reward | |
| - terminated | |
| - truncated | |
| - steps | |