Add files using upload-large-folder tool
Browse files- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/configs/config_history.yaml +107 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/configs/config_history_ddim.yaml +115 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/configs/config_history_joint.yaml +107 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/configs/config_history_rui.yaml +106 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/configs/config_ms3_history_ddim.yaml +121 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/.circleci/build_count.py +33 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/.circleci/check.sh +13 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/.circleci/config.in.yml +171 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/.circleci/config.yml +688 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/.circleci/regenerate.py +183 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/LICENSE-3RD-PARTY +71 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/scripts/__pycache__/run.cpython-310.pyc +0 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/scripts/inference_server.py +338 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/scripts/inference_server_joint.py +362 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/scripts/robot_controller.py +142 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/scripts/robot_controller_joint.py +201 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/scripts/run.py +103 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/scripts/run_inference.py +334 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/scripts/run_joint_demo.py +321 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/scripts/run_ms3_task_demo.py +253 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/scripts/run_shelf_demo.py +286 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/scripts/sim_benchmark.py +302 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/tests/mp_demo.py +206 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/tests/plot_sample_data.py +16 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/tests/replay_guidance_test.py +198 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/tests/replay_planning_test.py +264 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/tests/test_clutter_env.py +116 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/tests/test_dataset.py +39 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/tests/test_envs.py +139 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/tests/test_fcm.py +60 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/tests/test_sapien.py +9 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/tests/tools.py +94 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/utils/__init__.py +0 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/utils/__pycache__/__init__.cpython-310.pyc +0 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/utils/__pycache__/rdp_path_simplify.cpython-310.pyc +0 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/utils/angle_utils.py +350 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/utils/chamfer.py +158 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/utils/data_utils.py +538 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/utils/fcm/__init__.py +0 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/utils/fcm/base_utils.py +240 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/utils/fcm/pyramid.py +1662 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/utils/fcm/utils.py +511 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/utils/fcm/vlc.py +838 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/utils/inference_recorder.py +94 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/utils/object_placement_2.py +101 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/utils/observation_wrapper.py +231 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/utils/pointcloud_sdf.py +180 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/utils/rdp_path_simplify.py +137 -0
- project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/utils/socket_utils.py +13 -0
- project/ManiSkill3/src/maniskill2_benchmark/msx_envs/msx_envs/env_config/distractorbox/7_scene_config.yaml +458 -0
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/configs/config_history.yaml
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#root_dir: '/home/ladmin/Documents/maniskill2_benchmark'
|
| 2 |
+
root_dir: '/home/xuan/Code/maniskill2_benchmark'
|
| 3 |
+
hydra:
|
| 4 |
+
run:
|
| 5 |
+
dir: '${root_dir}/logs/${now:%Y-%m-%d}/${now:%H-%M-%S}'
|
| 6 |
+
|
| 7 |
+
# General settings
|
| 8 |
+
trajectory_length: 32
|
| 9 |
+
state_dim: 9
|
| 10 |
+
|
| 11 |
+
# Device and debug settings
|
| 12 |
+
device: 'cuda'
|
| 13 |
+
debug: false
|
| 14 |
+
|
| 15 |
+
# Mandatory settings
|
| 16 |
+
seed: 11
|
| 17 |
+
# results_dir: '${root_dir}/saved_models/${now:%m.%d-%H:%M}-seed${seed}'
|
| 18 |
+
results_dir: '${root_dir}/saved_models/${now:%m.%d-%H.%M}_traj${trajectory_length}'
|
| 19 |
+
batch_size: 256
|
| 20 |
+
|
| 21 |
+
observation_wrapper: 'ObservationWrapperWithHistory'
|
| 22 |
+
|
| 23 |
+
# Dataset
|
| 24 |
+
dataset:
|
| 25 |
+
_target_: 'cfdp.datasets.maniskill2_trajectory.ManiSkill2TrajectoryWithHistory'
|
| 26 |
+
# dataset_file: '/mnt/Dataset/shelf_data.h5'
|
| 27 |
+
dataset_file: '/mnt/Dataset/full_cover.h5'
|
| 28 |
+
obs_keys: ['tcp_pose']
|
| 29 |
+
normalizer: 'LimitsNormalizer'
|
| 30 |
+
pcd: false
|
| 31 |
+
choice: 'interpolate'
|
| 32 |
+
pad_front: false
|
| 33 |
+
trajectory_length: ${trajectory_length}
|
| 34 |
+
history_length: 8
|
| 35 |
+
force_reload: false
|
| 36 |
+
load_count: -1
|
| 37 |
+
device: ${device}
|
| 38 |
+
stride: 2
|
| 39 |
+
verbose: false
|
| 40 |
+
|
| 41 |
+
val_set_size: 0.2
|
| 42 |
+
|
| 43 |
+
model:
|
| 44 |
+
# Diffusion Model
|
| 45 |
+
model_class: 'GaussianDiffusionModel'
|
| 46 |
+
variance_schedule: 'exponential' # 'cosine', 'exponential'
|
| 47 |
+
n_steps: 25
|
| 48 |
+
prediction_mode: "epsilon"
|
| 49 |
+
use_ddim: false
|
| 50 |
+
# Unet
|
| 51 |
+
unet_dim_mults_option: 1
|
| 52 |
+
unet_input_dim: 64
|
| 53 |
+
conditioning_type: 'attention'
|
| 54 |
+
conditioning_embed_dim: 32
|
| 55 |
+
# Conext model
|
| 56 |
+
context_input_dim: ${multiply:10,${state_dim}}
|
| 57 |
+
# Loss
|
| 58 |
+
loss:
|
| 59 |
+
loss_class: 'GaussianDiffusionLoss'
|
| 60 |
+
|
| 61 |
+
# Training parameters
|
| 62 |
+
training:
|
| 63 |
+
lr: 1e-4
|
| 64 |
+
num_steps: 60000
|
| 65 |
+
use_ema: true
|
| 66 |
+
use_amp: false
|
| 67 |
+
steps_til_summary: 2000
|
| 68 |
+
steps_til_ckpt: 2000
|
| 69 |
+
summary_class: 'SummaryTrajectoryGeneration'
|
| 70 |
+
|
| 71 |
+
# Inference settings
|
| 72 |
+
inference:
|
| 73 |
+
env_id: 'ShelfPick-v0'
|
| 74 |
+
obs_mode: 'image'
|
| 75 |
+
control_mode: 'pd_ee_delta_pose_align'
|
| 76 |
+
reward_mode: 'dense'
|
| 77 |
+
use_ddim: true
|
| 78 |
+
robot_noise: 0.5
|
| 79 |
+
evaluate: false
|
| 80 |
+
model_dir: '${root_dir}/saved_models/05.11-15.57_traj${trajectory_length}'
|
| 81 |
+
use_ema: true
|
| 82 |
+
episodes: 100
|
| 83 |
+
replanning_interval: 4
|
| 84 |
+
n_samples: 30
|
| 85 |
+
n_diffusion_steps_without_noise: 0
|
| 86 |
+
index: 20
|
| 87 |
+
timestep: 5
|
| 88 |
+
sample_fn:
|
| 89 |
+
n_guide_steps: 4
|
| 90 |
+
t_start_guide: 5
|
| 91 |
+
noise_std_extra_schedule_fn: 0.5
|
| 92 |
+
validation_set_only: false
|
| 93 |
+
sim_with_history: true
|
| 94 |
+
camera_type: 'world'
|
| 95 |
+
guidance_weight: 0.175
|
| 96 |
+
|
| 97 |
+
test:
|
| 98 |
+
inference_record: false # record obs, trajectory, step_info
|
| 99 |
+
recording_dir: '${root_dir}/inference_recordings/'
|
| 100 |
+
replay_dir: '${root_dir}/inference_recordings/concept_test_guidance'
|
| 101 |
+
# replay_dir: '${root_dir}/inference_recordings/recording_0421_2145'
|
| 102 |
+
# WandB settings
|
| 103 |
+
wandb:
|
| 104 |
+
enabled: true
|
| 105 |
+
mode: 'offline'
|
| 106 |
+
entity: 'xuanz_test'
|
| 107 |
+
project: 'simple test'
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/configs/config_history_ddim.yaml
ADDED
|
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#root_dir: '/home/ladmin/Documents/maniskill2_benchmark'
|
| 2 |
+
# root_dir: '/home/xuan/Code/maniskill2_benchmark'
|
| 3 |
+
root_dir: '/home/xuan/Project/ManiSkill3/src'
|
| 4 |
+
|
| 5 |
+
hydra:
|
| 6 |
+
run:
|
| 7 |
+
dir: '${root_dir}/logs/${now:%Y-%m-%d}/${now:%H-%M-%S}'
|
| 8 |
+
|
| 9 |
+
# General settings
|
| 10 |
+
trajectory_length: 32
|
| 11 |
+
state_dim: 9
|
| 12 |
+
|
| 13 |
+
# Device and debug settings
|
| 14 |
+
device: 'cuda'
|
| 15 |
+
debug: false
|
| 16 |
+
|
| 17 |
+
# Mandatory settings
|
| 18 |
+
seed: 11
|
| 19 |
+
# results_dir: '${root_dir}/saved_models/${now:%m.%d-%H:%M}-seed${seed}'
|
| 20 |
+
results_dir: '${root_dir}/saved_models/${now:%m.%d-%H.%M}_traj${trajectory_length}'
|
| 21 |
+
batch_size: 256
|
| 22 |
+
|
| 23 |
+
observation_wrapper: 'ObservationWrapperWithHistory'
|
| 24 |
+
use_ee_control: true
|
| 25 |
+
|
| 26 |
+
# Dataset
|
| 27 |
+
dataset:
|
| 28 |
+
_target_: 'cfdp.datasets.maniskill2_trajectory.ManiSkill2TrajectoryWithHistory'
|
| 29 |
+
# dataset_file: '/mnt/Dataset/shelf_data.h5'
|
| 30 |
+
# dataset_file: '/mnt/Dataset/full_cover.h5'
|
| 31 |
+
dataset_file: '/mnt/Dataset/bowl_120_split.h5'
|
| 32 |
+
obs_keys: ['tcp_pose']
|
| 33 |
+
normalizer: 'LimitsNormalizer'
|
| 34 |
+
pcd: false
|
| 35 |
+
choice: 'interpolate'
|
| 36 |
+
pad_front: false
|
| 37 |
+
trajectory_length: ${trajectory_length}
|
| 38 |
+
history_length: 8
|
| 39 |
+
force_reload: false
|
| 40 |
+
load_count: -1
|
| 41 |
+
device: ${device}
|
| 42 |
+
stride: 2
|
| 43 |
+
verbose: false
|
| 44 |
+
|
| 45 |
+
val_set_size: 0.2
|
| 46 |
+
|
| 47 |
+
model:
|
| 48 |
+
# Diffusion Model
|
| 49 |
+
model_class: 'GaussianDiffusionModel'
|
| 50 |
+
variance_schedule: 'cosine' # 'cosine', 'exponential'
|
| 51 |
+
n_steps: 25
|
| 52 |
+
prediction_mode: "v"
|
| 53 |
+
use_ddim: true
|
| 54 |
+
use_snr_weight: true
|
| 55 |
+
# Unet
|
| 56 |
+
unet_dim_mults_option: 1
|
| 57 |
+
unet_input_dim: 64
|
| 58 |
+
conditioning_type: 'attention'
|
| 59 |
+
conditioning_embed_dim: 32
|
| 60 |
+
# Conext model
|
| 61 |
+
context_input_dim: ${multiply:10,${state_dim}}
|
| 62 |
+
# Loss
|
| 63 |
+
loss:
|
| 64 |
+
loss_class: 'GaussianDiffusionLoss'
|
| 65 |
+
|
| 66 |
+
# Training parameters
|
| 67 |
+
training:
|
| 68 |
+
lr: 1e-4
|
| 69 |
+
num_steps: 100000
|
| 70 |
+
use_ema: true
|
| 71 |
+
use_amp: false
|
| 72 |
+
steps_til_summary: 2000
|
| 73 |
+
steps_til_ckpt: 2000
|
| 74 |
+
summary_class: 'SummaryTrajectoryGeneration'
|
| 75 |
+
|
| 76 |
+
# Inference settings
|
| 77 |
+
inference:
|
| 78 |
+
env_id: 'ShelfPick-v0'
|
| 79 |
+
obs_mode: 'image'
|
| 80 |
+
control_mode: 'pd_ee_delta_pose_align'
|
| 81 |
+
reward_mode: 'dense'
|
| 82 |
+
use_ddim: true
|
| 83 |
+
robot_noise: 0.5
|
| 84 |
+
evaluate: false
|
| 85 |
+
# model_dir: '${root_dir}/saved_models/08.17-15.43_traj${trajectory_length}' #all data
|
| 86 |
+
# model_dir: '${root_dir}/saved_models/07.28-14.55_traj${trajectory_length}' #shelf data
|
| 87 |
+
model_dir: '${root_dir}/saved_models/09.01-17.55_traj${trajectory_length}' #bowl 120 data
|
| 88 |
+
use_ema: true
|
| 89 |
+
episodes: 100
|
| 90 |
+
replanning_interval: 30
|
| 91 |
+
n_samples: 8
|
| 92 |
+
n_diffusion_steps_without_noise: 0
|
| 93 |
+
index: 20
|
| 94 |
+
timestep: 5
|
| 95 |
+
sample_fn:
|
| 96 |
+
n_guide_steps: 2
|
| 97 |
+
t_start_guide: 5
|
| 98 |
+
noise_std_extra_schedule_fn: 0.5
|
| 99 |
+
validation_set_only: false
|
| 100 |
+
sim_with_history: true
|
| 101 |
+
# camera_type: 'hand_camera'
|
| 102 |
+
camera_type: 'world'
|
| 103 |
+
guidance_weight: 1.0
|
| 104 |
+
|
| 105 |
+
test:
|
| 106 |
+
inference_record: false # record obs, trajectory, step_info
|
| 107 |
+
recording_dir: '${root_dir}/inference_recordings/'
|
| 108 |
+
replay_dir: '${root_dir}/inference_recordings/concept_test_guidance'
|
| 109 |
+
# replay_dir: '${root_dir}/inference_recordings/recording_0421_2145'
|
| 110 |
+
# WandB settings
|
| 111 |
+
wandb:
|
| 112 |
+
enabled: true
|
| 113 |
+
mode: 'offline'
|
| 114 |
+
entity: 'xuanz_test'
|
| 115 |
+
project: 'simple test'
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/configs/config_history_joint.yaml
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
root_dir: '/home/ladmin/Documents/maniskill2_benchmark'
|
| 2 |
+
# root_dir: '/home/rui/Documents/maniskill2_benchmark'
|
| 3 |
+
# root_dir: '/home/xuan/Code/maniskill2_benchmark'
|
| 4 |
+
hydra:
|
| 5 |
+
run:
|
| 6 |
+
dir: '${root_dir}/logs/${now:%Y-%m-%d}/${now:%H-%M-%S}'
|
| 7 |
+
|
| 8 |
+
# General settings
|
| 9 |
+
trajectory_length: 32
|
| 10 |
+
state_dim: 9
|
| 11 |
+
|
| 12 |
+
# Device and debug settings
|
| 13 |
+
device: 'cuda'
|
| 14 |
+
debug: true
|
| 15 |
+
|
| 16 |
+
# Mandatory settings
|
| 17 |
+
seed: 11
|
| 18 |
+
# results_dir: '${root_dir}/saved_models/${now:%m.%d-%H:%M}-seed${seed}'
|
| 19 |
+
results_dir: '${root_dir}/saved_models/${now:%m.%d-%H.%M}_traj${trajectory_length}'
|
| 20 |
+
batch_size: 256
|
| 21 |
+
|
| 22 |
+
observation_wrapper: 'ObservationWrapperWithHistory'
|
| 23 |
+
use_ee_control: false
|
| 24 |
+
|
| 25 |
+
# Dataset
|
| 26 |
+
dataset:
|
| 27 |
+
_target_: 'cfdp.datasets.maniskill2_trajectory.ManiSkill2TrajectoryWithHistory'
|
| 28 |
+
dataset_file: '/home/ladmin/20250626_165939.h5'
|
| 29 |
+
obs_keys: ['qpos']
|
| 30 |
+
normalizer: 'LimitsNormalizer'
|
| 31 |
+
pcd: false
|
| 32 |
+
choice: 'interpolate'
|
| 33 |
+
pad_front: false
|
| 34 |
+
trajectory_length: ${trajectory_length}
|
| 35 |
+
history_length: 8
|
| 36 |
+
force_reload: false
|
| 37 |
+
load_count: -1
|
| 38 |
+
device: ${device}
|
| 39 |
+
stride: 2
|
| 40 |
+
verbose: true
|
| 41 |
+
|
| 42 |
+
val_set_size: 0.2
|
| 43 |
+
|
| 44 |
+
model:
|
| 45 |
+
# Diffusion Model
|
| 46 |
+
model_class: 'GaussianDiffusionModel'
|
| 47 |
+
variance_schedule: 'exponential' # 'cosine', 'exponential'
|
| 48 |
+
n_steps: 25
|
| 49 |
+
prediction_mode: "epsilon"
|
| 50 |
+
# Unet
|
| 51 |
+
unet_dim_mults_option: 1
|
| 52 |
+
unet_input_dim: 64
|
| 53 |
+
conditioning_type: 'attention'
|
| 54 |
+
conditioning_embed_dim: 32
|
| 55 |
+
# Conext model
|
| 56 |
+
context_input_dim: ${multiply:10,${state_dim}}
|
| 57 |
+
# Loss
|
| 58 |
+
loss:
|
| 59 |
+
loss_class: 'GaussianDiffusionLoss'
|
| 60 |
+
|
| 61 |
+
# Training parameters
|
| 62 |
+
training:
|
| 63 |
+
lr: 1e-4
|
| 64 |
+
num_steps: 200000
|
| 65 |
+
use_ema: true
|
| 66 |
+
use_amp: false
|
| 67 |
+
steps_til_summary: 2000
|
| 68 |
+
steps_til_ckpt: 2000
|
| 69 |
+
summary_class: 'SummaryTrajectoryGeneration'
|
| 70 |
+
|
| 71 |
+
# Inference settings
|
| 72 |
+
inference:
|
| 73 |
+
env_id: 'ShelfPick-v0'
|
| 74 |
+
obs_mode: 'image'
|
| 75 |
+
control_mode: 'pd_joint_delta_pos'
|
| 76 |
+
reward_mode: 'dense'
|
| 77 |
+
robot_noise: 0.5
|
| 78 |
+
evaluate: false
|
| 79 |
+
model_dir: '/home/ladmin/Documents/maniskill2_benchmark/saved_models/06.27-17.15_traj32'
|
| 80 |
+
use_ema: true
|
| 81 |
+
use_ddim: false
|
| 82 |
+
episodes: 100
|
| 83 |
+
replanning_interval: 4
|
| 84 |
+
n_samples: 1
|
| 85 |
+
n_diffusion_steps_without_noise: 0
|
| 86 |
+
index: 20
|
| 87 |
+
timestep: 5
|
| 88 |
+
sample_fn:
|
| 89 |
+
n_guide_steps: 3
|
| 90 |
+
t_start_guide: 3
|
| 91 |
+
noise_std_extra_schedule_fn: 0.5
|
| 92 |
+
validation_set_only: false
|
| 93 |
+
sim_with_history: true
|
| 94 |
+
camera_type: 'world'
|
| 95 |
+
guidance_weight: 5.5
|
| 96 |
+
|
| 97 |
+
test:
|
| 98 |
+
inference_record: false # record obs, trajectory, step_info
|
| 99 |
+
recording_dir: '${root_dir}/inference_recordings/'
|
| 100 |
+
replay_dir: '${root_dir}/inference_recordings/concept_test_guidance'
|
| 101 |
+
# replay_dir: '${root_dir}/inference_recordings/recording_0421_2145'
|
| 102 |
+
# WandB settings
|
| 103 |
+
wandb:
|
| 104 |
+
enabled: true
|
| 105 |
+
mode: 'offline'
|
| 106 |
+
entity: 'xuanz_test'
|
| 107 |
+
project: 'simple test'
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/configs/config_history_rui.yaml
ADDED
|
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
root_dir: '/home/rui/Documents/maniskill2_benchmark'
|
| 2 |
+
# root_dir: '/home/xuan/Code/maniskill2_benchmark'
|
| 3 |
+
hydra:
|
| 4 |
+
run:
|
| 5 |
+
dir: '${root_dir}/logs/${now:%Y-%m-%d}/${now:%H-%M-%S}'
|
| 6 |
+
|
| 7 |
+
# General settings
|
| 8 |
+
trajectory_length: 32
|
| 9 |
+
state_dim: 9
|
| 10 |
+
|
| 11 |
+
# Device and debug settings
|
| 12 |
+
device: 'cuda'
|
| 13 |
+
debug: true
|
| 14 |
+
|
| 15 |
+
# Mandatory settings
|
| 16 |
+
seed: 11
|
| 17 |
+
# results_dir: '${root_dir}/saved_models/${now:%m.%d-%H:%M}-seed${seed}'
|
| 18 |
+
results_dir: '${root_dir}/saved_models/${now:%m.%d-%H.%M}_traj${trajectory_length}'
|
| 19 |
+
batch_size: 256
|
| 20 |
+
|
| 21 |
+
observation_wrapper: 'ObservationWrapperWithHistory'
|
| 22 |
+
|
| 23 |
+
# Dataset
|
| 24 |
+
dataset:
|
| 25 |
+
_target_: 'cfdp.datasets.maniskill2_trajectory.ManiSkill2TrajectoryWithHistory'
|
| 26 |
+
dataset_file: '/home/rui/Documents/maniskill2_benchmark/cfdp/data/shelf_data.h5'
|
| 27 |
+
obs_keys: ['tcp_pose']
|
| 28 |
+
normalizer: 'LimitsNormalizer'
|
| 29 |
+
pcd: false
|
| 30 |
+
choice: 'interpolate'
|
| 31 |
+
pad_front: false
|
| 32 |
+
trajectory_length: ${trajectory_length}
|
| 33 |
+
history_length: 8
|
| 34 |
+
force_reload: false
|
| 35 |
+
load_count: -1
|
| 36 |
+
device: ${device}
|
| 37 |
+
stride: 2
|
| 38 |
+
verbose: false
|
| 39 |
+
|
| 40 |
+
val_set_size: 0.2
|
| 41 |
+
|
| 42 |
+
model:
|
| 43 |
+
# Diffusion Model
|
| 44 |
+
model_class: 'GaussianDiffusionModel'
|
| 45 |
+
variance_schedule: 'exponential' # 'cosine', 'exponential'
|
| 46 |
+
n_steps: 25
|
| 47 |
+
prediction_mode: "epsilon"
|
| 48 |
+
use_ddim: false
|
| 49 |
+
# Unet
|
| 50 |
+
unet_dim_mults_option: 1
|
| 51 |
+
unet_input_dim: 64
|
| 52 |
+
conditioning_type: 'attention'
|
| 53 |
+
conditioning_embed_dim: 32
|
| 54 |
+
# Conext model
|
| 55 |
+
context_input_dim: ${multiply:10,${state_dim}}
|
| 56 |
+
# Loss
|
| 57 |
+
loss:
|
| 58 |
+
loss_class: 'GaussianDiffusionLoss'
|
| 59 |
+
|
| 60 |
+
# Training parameters
|
| 61 |
+
training:
|
| 62 |
+
lr: 1e-4
|
| 63 |
+
num_steps: 40000
|
| 64 |
+
use_ema: true
|
| 65 |
+
use_amp: false
|
| 66 |
+
steps_til_summary: 4000
|
| 67 |
+
steps_til_ckpt: 4000
|
| 68 |
+
summary_class: 'SummaryTrajectoryGeneration'
|
| 69 |
+
|
| 70 |
+
# Inference settings
|
| 71 |
+
inference:
|
| 72 |
+
env_id: 'ShelfPick-v0'
|
| 73 |
+
obs_mode: 'image'
|
| 74 |
+
control_mode: 'pd_ee_delta_pose_align'
|
| 75 |
+
reward_mode: 'dense'
|
| 76 |
+
robot_noise: 0.5
|
| 77 |
+
evaluate: false
|
| 78 |
+
model_dir: '${root_dir}/saved_models/05.11-15.57_traj${trajectory_length}'
|
| 79 |
+
use_ema: true
|
| 80 |
+
episodes: 100
|
| 81 |
+
replanning_interval: 4
|
| 82 |
+
n_samples: 30
|
| 83 |
+
n_diffusion_steps_without_noise: 0
|
| 84 |
+
index: 20
|
| 85 |
+
timestep: 5
|
| 86 |
+
sample_fn:
|
| 87 |
+
n_guide_steps: 4
|
| 88 |
+
t_start_guide: 5
|
| 89 |
+
noise_std_extra_schedule_fn: 0.5
|
| 90 |
+
validation_set_only: false
|
| 91 |
+
sim_with_history: true
|
| 92 |
+
camera_type: 'world'
|
| 93 |
+
guidance_weight: 0.2
|
| 94 |
+
|
| 95 |
+
test:
|
| 96 |
+
inference_record: false # record obs, trajectory, step_info
|
| 97 |
+
recording_dir: '${root_dir}/inference_recordings/'
|
| 98 |
+
# replay_dir: '${root_dir}/inference_recordings/concept_test_guidance'
|
| 99 |
+
# replay_dir: '${root_dir}/inference_recordings/small_box'
|
| 100 |
+
replay_dir: '${root_dir}/inference_recordings/recording_0421_2107'
|
| 101 |
+
# WandB settings
|
| 102 |
+
wandb:
|
| 103 |
+
enabled: true
|
| 104 |
+
mode: 'online'
|
| 105 |
+
entity: 'xuanz_test'
|
| 106 |
+
project: 'simple test'
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/configs/config_ms3_history_ddim.yaml
ADDED
|
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#root_dir: '/home/ladmin/Documents/maniskill2_benchmark'
|
| 2 |
+
# root_dir: '/home/xuan/Code/maniskill2_benchmark'
|
| 3 |
+
root_dir: '/home/kyber/charles/project/ManiSkill3/src/maniskill2_benchmark'
|
| 4 |
+
|
| 5 |
+
hydra:
|
| 6 |
+
run:
|
| 7 |
+
dir: '${root_dir}/logs/${now:%Y-%m-%d}/${now:%H-%M-%S}'
|
| 8 |
+
|
| 9 |
+
# General settings
|
| 10 |
+
trajectory_length: 32
|
| 11 |
+
state_dim: 9
|
| 12 |
+
|
| 13 |
+
# Device and debug settings
|
| 14 |
+
device: 'cuda'
|
| 15 |
+
debug: false
|
| 16 |
+
|
| 17 |
+
# Mandatory settings
|
| 18 |
+
seed: 11
|
| 19 |
+
# results_dir: '${root_dir}/saved_models/${now:%m.%d-%H:%M}-seed${seed}'
|
| 20 |
+
results_dir: '${root_dir}/saved_models/${now:%m.%d-%H.%M}_traj${trajectory_length}'
|
| 21 |
+
batch_size: 256
|
| 22 |
+
|
| 23 |
+
observation_wrapper: 'ObservationWrapperWithHistory'
|
| 24 |
+
use_ee_control: true
|
| 25 |
+
|
| 26 |
+
# Dataset
|
| 27 |
+
dataset:
|
| 28 |
+
_target_: 'cfdp.datasets.maniskill2_trajectory.ManiSkill2TrajectoryWithHistory'
|
| 29 |
+
# dataset_file: '/mnt/Dataset/shelf_data.h5'
|
| 30 |
+
# dataset_file: '/mnt/Dataset/full_cover.h5'
|
| 31 |
+
# dataset_file: '/mnt/Dataset/bowl_300_split.h5'
|
| 32 |
+
# dataset_file: '/mnt/Dataset/bowl_reach_500.h5'
|
| 33 |
+
dataset_file: '${root_dir}/data/panda_wristcam/Merged/merged_cup5.rl.4.h5'
|
| 34 |
+
# dataset_file: '${root_dir}/data/panda_wristcam/GraspCup-v1/motionplanning/split_cup5.rl.4.h5'
|
| 35 |
+
obs_keys: ['tcp_pose']
|
| 36 |
+
normalizer: 'LimitsNormalizer'
|
| 37 |
+
pcd: false
|
| 38 |
+
choice: 'interpolate'
|
| 39 |
+
pad_front: false
|
| 40 |
+
trajectory_length: ${trajectory_length}
|
| 41 |
+
history_length: 8
|
| 42 |
+
force_reload: false
|
| 43 |
+
load_count: -1
|
| 44 |
+
device: ${device}
|
| 45 |
+
stride: 2
|
| 46 |
+
verbose: false
|
| 47 |
+
|
| 48 |
+
val_set_size: 0.2
|
| 49 |
+
|
| 50 |
+
model:
|
| 51 |
+
# Diffusion Model
|
| 52 |
+
model_class: 'GaussianDiffusionModel'
|
| 53 |
+
variance_schedule: 'cosine' # 'cosine', 'exponential'
|
| 54 |
+
n_steps: 25
|
| 55 |
+
prediction_mode: "v"
|
| 56 |
+
use_ddim: true
|
| 57 |
+
use_snr_weight: true
|
| 58 |
+
# Unet
|
| 59 |
+
unet_dim_mults_option: 1
|
| 60 |
+
unet_input_dim: 64
|
| 61 |
+
conditioning_type: 'attention'
|
| 62 |
+
conditioning_embed_dim: 32
|
| 63 |
+
# Conext model
|
| 64 |
+
context_input_dim: ${multiply:10,${state_dim}}
|
| 65 |
+
# Loss
|
| 66 |
+
loss:
|
| 67 |
+
loss_class: 'GaussianDiffusionLoss'
|
| 68 |
+
|
| 69 |
+
# Training parameters
|
| 70 |
+
training:
|
| 71 |
+
lr: 1e-4
|
| 72 |
+
num_steps: 90000
|
| 73 |
+
use_ema: true
|
| 74 |
+
use_amp: false
|
| 75 |
+
steps_til_summary: 2000
|
| 76 |
+
steps_til_ckpt: 8000
|
| 77 |
+
summary_class: 'SummaryTrajectoryGeneration'
|
| 78 |
+
|
| 79 |
+
# Inference settings
|
| 80 |
+
inference:
|
| 81 |
+
env_id: 'GraspCup-v1'
|
| 82 |
+
obs_mode: 'rgb+position+segmentation'
|
| 83 |
+
control_mode: 'pd_ee_delta_pose'
|
| 84 |
+
reward_mode: 'dense'
|
| 85 |
+
use_ddim: true
|
| 86 |
+
robot_noise: 0.5
|
| 87 |
+
evaluate: false
|
| 88 |
+
# model_dir: '${root_dir}/saved_models/08.17-15.43_traj${trajectory_length}' #all data
|
| 89 |
+
# model_dir: '${root_dir}/saved_models/07.28-14.55_traj${trajectory_length}' #shelf data
|
| 90 |
+
# model_dir: '${root_dir}/saved_models/bowl_200_traj${trajectory_length}' #bowl 200 data
|
| 91 |
+
# model_dir: '${root_dir}/saved_models/bowl_300_new_traj${trajectory_length}' #bowl 300 data
|
| 92 |
+
# model_dir: '${root_dir}/saved_models/09.16-11.17_traj${trajectory_length}' #bowl reach 500 data
|
| 93 |
+
model_dir: '${root_dir}/saved_models/09.29-12.19_traj${trajectory_length}' #cup 5 data
|
| 94 |
+
use_ema: true
|
| 95 |
+
episodes: 100
|
| 96 |
+
replanning_interval: 30
|
| 97 |
+
n_samples: 8
|
| 98 |
+
n_diffusion_steps_without_noise: 0
|
| 99 |
+
index: 20
|
| 100 |
+
timestep: 5
|
| 101 |
+
sample_fn:
|
| 102 |
+
n_guide_steps: 2
|
| 103 |
+
t_start_guide: 8
|
| 104 |
+
noise_std_extra_schedule_fn: 0.5
|
| 105 |
+
validation_set_only: false
|
| 106 |
+
sim_with_history: true
|
| 107 |
+
# camera_type: 'hand_camera'
|
| 108 |
+
camera_type: 'world'
|
| 109 |
+
guidance_weight: 2.5
|
| 110 |
+
|
| 111 |
+
test:
|
| 112 |
+
inference_record: false # record obs, trajectory, step_info
|
| 113 |
+
recording_dir: '${root_dir}/inference_recordings/'
|
| 114 |
+
replay_dir: '${root_dir}/inference_recordings/concept_test_guidance'
|
| 115 |
+
# replay_dir: '${root_dir}/inference_recordings/recording_0421_2145'
|
| 116 |
+
# WandB settings
|
| 117 |
+
wandb:
|
| 118 |
+
enabled: true
|
| 119 |
+
mode: 'offline'
|
| 120 |
+
entity: 'xuanz_test'
|
| 121 |
+
project: 'simple test'
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/.circleci/build_count.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 3 |
+
# All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# This source code is licensed under the BSD-style license found in the
|
| 6 |
+
# LICENSE file in the root directory of this source tree.
|
| 7 |
+
|
| 8 |
+
"""
|
| 9 |
+
Print the number of nightly builds
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
from collections import Counter
|
| 13 |
+
|
| 14 |
+
import yaml
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
conf = yaml.safe_load(open("config.yml"))
|
| 18 |
+
jobs = conf["workflows"]["build_and_test"]["jobs"]
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def jobtype(job):
|
| 22 |
+
if isinstance(job, str):
|
| 23 |
+
return job
|
| 24 |
+
if len(job) == 1:
|
| 25 |
+
[name] = job.keys()
|
| 26 |
+
return name
|
| 27 |
+
return "MULTIPLE PARTS"
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
for i, j in Counter(map(jobtype, jobs)).items():
|
| 31 |
+
print(i, j)
|
| 32 |
+
print()
|
| 33 |
+
print(len(jobs))
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/.circleci/check.sh
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash -e
|
| 2 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 3 |
+
# All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# This source code is licensed under the BSD-style license found in the
|
| 6 |
+
# LICENSE file in the root directory of this source tree.
|
| 7 |
+
|
| 8 |
+
# Run this script before committing config.yml to verify it is valid yaml.
|
| 9 |
+
|
| 10 |
+
python -c 'import yaml; yaml.safe_load(open("config.yml"))' && echo OK - valid yaml
|
| 11 |
+
|
| 12 |
+
msg="circleci not installed so can't check schema"
|
| 13 |
+
command -v circleci > /dev/null && (cd ..; circleci config validate) || echo "$msg"
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/.circleci/config.in.yml
ADDED
|
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version: 2.1
|
| 2 |
+
|
| 3 |
+
#examples:
|
| 4 |
+
#https://github.com/facebookresearch/ParlAI/blob/master/.circleci/config.yml
|
| 5 |
+
#https://github.com/facebookresearch/hydra/blob/master/.circleci/config.yml
|
| 6 |
+
#https://github.com/facebookresearch/habitat-api/blob/master/.circleci/config.yml
|
| 7 |
+
|
| 8 |
+
#drive tests with nox or tox or pytest?
|
| 9 |
+
|
| 10 |
+
# -------------------------------------------------------------------------------------
|
| 11 |
+
# environments where we run our jobs
|
| 12 |
+
# -------------------------------------------------------------------------------------
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
setupcuda: &setupcuda
|
| 16 |
+
run:
|
| 17 |
+
name: Setup CUDA
|
| 18 |
+
working_directory: ~/
|
| 19 |
+
command: |
|
| 20 |
+
# download and install nvidia drivers, cuda, etc
|
| 21 |
+
wget --no-verbose --no-clobber -P ~/nvidia-downloads https://developer.download.nvidia.com/compute/cuda/11.3.1/local_installers/cuda_11.3.1_465.19.01_linux.run
|
| 22 |
+
sudo sh ~/nvidia-downloads/cuda_11.3.1_465.19.01_linux.run --silent
|
| 23 |
+
echo "Done installing CUDA."
|
| 24 |
+
pyenv versions
|
| 25 |
+
nvidia-smi
|
| 26 |
+
pyenv global 3.9.1
|
| 27 |
+
|
| 28 |
+
binary_common: &binary_common
|
| 29 |
+
parameters:
|
| 30 |
+
# Edit these defaults to do a release`
|
| 31 |
+
build_version:
|
| 32 |
+
description: "version number of release binary; by default, build a nightly"
|
| 33 |
+
type: string
|
| 34 |
+
default: ""
|
| 35 |
+
pytorch_version:
|
| 36 |
+
description: "PyTorch version to build against; by default, use a nightly"
|
| 37 |
+
type: string
|
| 38 |
+
default: ""
|
| 39 |
+
# Don't edit these
|
| 40 |
+
python_version:
|
| 41 |
+
description: "Python version to build against (e.g., 3.7)"
|
| 42 |
+
type: string
|
| 43 |
+
cu_version:
|
| 44 |
+
description: "CUDA version to build against, in CU format (e.g., cpu or cu100)"
|
| 45 |
+
type: string
|
| 46 |
+
wheel_docker_image:
|
| 47 |
+
description: "Wheel only: what docker image to use"
|
| 48 |
+
type: string
|
| 49 |
+
default: "pytorch/manylinux-cuda101"
|
| 50 |
+
conda_docker_image:
|
| 51 |
+
description: "what docker image to use for docker"
|
| 52 |
+
type: string
|
| 53 |
+
default: "pytorch/conda-cuda"
|
| 54 |
+
environment:
|
| 55 |
+
PYTHON_VERSION: << parameters.python_version >>
|
| 56 |
+
BUILD_VERSION: << parameters.build_version >>
|
| 57 |
+
PYTORCH_VERSION: << parameters.pytorch_version >>
|
| 58 |
+
CU_VERSION: << parameters.cu_version >>
|
| 59 |
+
TESTRUN_DOCKER_IMAGE: << parameters.conda_docker_image >>
|
| 60 |
+
|
| 61 |
+
jobs:
|
| 62 |
+
main:
|
| 63 |
+
environment:
|
| 64 |
+
CUDA_VERSION: "11.3"
|
| 65 |
+
resource_class: gpu.nvidia.small.multi
|
| 66 |
+
machine:
|
| 67 |
+
image: linux-cuda-11:default
|
| 68 |
+
steps:
|
| 69 |
+
- checkout
|
| 70 |
+
- <<: *setupcuda
|
| 71 |
+
- run: pip3 install --progress-bar off imageio wheel matplotlib 'pillow<7'
|
| 72 |
+
- run: pip3 install --progress-bar off torch==1.10.0+cu113 torchvision==0.11.1+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html
|
| 73 |
+
# - run: conda create -p ~/conda_env python=3.7 numpy
|
| 74 |
+
# - run: conda activate ~/conda_env
|
| 75 |
+
# - run: conda install -c pytorch pytorch torchvision
|
| 76 |
+
|
| 77 |
+
- run: pip3 install --progress-bar off 'git+https://github.com/facebookresearch/fvcore'
|
| 78 |
+
- run: pip3 install --progress-bar off 'git+https://github.com/facebookresearch/iopath'
|
| 79 |
+
- run:
|
| 80 |
+
name: build
|
| 81 |
+
command: |
|
| 82 |
+
export LD_LIBRARY_PATH=$LD_LIBARY_PATH:/usr/local/cuda-11.3/lib64
|
| 83 |
+
python3 setup.py build_ext --inplace
|
| 84 |
+
- run: LD_LIBRARY_PATH=$LD_LIBARY_PATH:/usr/local/cuda-11.3/lib64 python -m unittest discover -v -s tests -t .
|
| 85 |
+
- run: python3 setup.py bdist_wheel
|
| 86 |
+
|
| 87 |
+
binary_linux_wheel:
|
| 88 |
+
<<: *binary_common
|
| 89 |
+
docker:
|
| 90 |
+
- image: << parameters.wheel_docker_image >>
|
| 91 |
+
auth:
|
| 92 |
+
username: $DOCKERHUB_USERNAME
|
| 93 |
+
password: $DOCKERHUB_TOKEN
|
| 94 |
+
resource_class: 2xlarge+
|
| 95 |
+
steps:
|
| 96 |
+
- checkout
|
| 97 |
+
- run: MAX_JOBS=15 packaging/build_wheel.sh
|
| 98 |
+
- store_artifacts:
|
| 99 |
+
path: dist
|
| 100 |
+
- persist_to_workspace:
|
| 101 |
+
root: dist
|
| 102 |
+
paths:
|
| 103 |
+
- "*"
|
| 104 |
+
|
| 105 |
+
binary_linux_conda:
|
| 106 |
+
<<: *binary_common
|
| 107 |
+
docker:
|
| 108 |
+
- image: "<< parameters.conda_docker_image >>"
|
| 109 |
+
auth:
|
| 110 |
+
username: $DOCKERHUB_USERNAME
|
| 111 |
+
password: $DOCKERHUB_TOKEN
|
| 112 |
+
resource_class: 2xlarge+
|
| 113 |
+
steps:
|
| 114 |
+
- checkout
|
| 115 |
+
# This is building with cuda but no gpu present,
|
| 116 |
+
# so we aren't running the tests.
|
| 117 |
+
- run:
|
| 118 |
+
name: build
|
| 119 |
+
no_output_timeout: 40m
|
| 120 |
+
command: MAX_JOBS=15 TEST_FLAG=--no-test python3 packaging/build_conda.py
|
| 121 |
+
- store_artifacts:
|
| 122 |
+
path: /opt/conda/conda-bld/linux-64
|
| 123 |
+
- persist_to_workspace:
|
| 124 |
+
root: /opt/conda/conda-bld/linux-64
|
| 125 |
+
paths:
|
| 126 |
+
- "*"
|
| 127 |
+
|
| 128 |
+
binary_linux_conda_cuda:
|
| 129 |
+
<<: *binary_common
|
| 130 |
+
machine:
|
| 131 |
+
image: linux-cuda-11:default
|
| 132 |
+
resource_class: gpu.nvidia.small.multi
|
| 133 |
+
steps:
|
| 134 |
+
- checkout
|
| 135 |
+
|
| 136 |
+
- run:
|
| 137 |
+
name: Pull docker image
|
| 138 |
+
command: |
|
| 139 |
+
nvidia-smi
|
| 140 |
+
set -e
|
| 141 |
+
|
| 142 |
+
{ docker login -u="$DOCKERHUB_USERNAME" -p="$DOCKERHUB_TOKEN" ; } 2> /dev/null
|
| 143 |
+
|
| 144 |
+
echo Pulling docker image $TESTRUN_DOCKER_IMAGE
|
| 145 |
+
docker pull $TESTRUN_DOCKER_IMAGE
|
| 146 |
+
- run:
|
| 147 |
+
name: Build and run tests
|
| 148 |
+
no_output_timeout: 40m
|
| 149 |
+
command: |
|
| 150 |
+
set -e
|
| 151 |
+
|
| 152 |
+
cd ${HOME}/project/
|
| 153 |
+
|
| 154 |
+
export JUST_TESTRUN=1
|
| 155 |
+
VARS_TO_PASS="-e PYTHON_VERSION -e BUILD_VERSION -e PYTORCH_VERSION -e CU_VERSION -e JUST_TESTRUN"
|
| 156 |
+
|
| 157 |
+
docker run --gpus all --ipc=host -v $(pwd):/remote -w /remote ${VARS_TO_PASS} ${TESTRUN_DOCKER_IMAGE} python3 ./packaging/build_conda.py
|
| 158 |
+
|
| 159 |
+
workflows:
|
| 160 |
+
version: 2
|
| 161 |
+
build_and_test:
|
| 162 |
+
jobs:
|
| 163 |
+
# - main:
|
| 164 |
+
# context: DOCKERHUB_TOKEN
|
| 165 |
+
{{workflows()}}
|
| 166 |
+
- binary_linux_conda_cuda:
|
| 167 |
+
name: testrun_conda_cuda_py310_cu117_pyt201
|
| 168 |
+
context: DOCKERHUB_TOKEN
|
| 169 |
+
python_version: "3.10"
|
| 170 |
+
pytorch_version: '2.0.1'
|
| 171 |
+
cu_version: "cu117"
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/.circleci/config.yml
ADDED
|
@@ -0,0 +1,688 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version: 2.1
|
| 2 |
+
|
| 3 |
+
#examples:
|
| 4 |
+
#https://github.com/facebookresearch/ParlAI/blob/master/.circleci/config.yml
|
| 5 |
+
#https://github.com/facebookresearch/hydra/blob/master/.circleci/config.yml
|
| 6 |
+
#https://github.com/facebookresearch/habitat-api/blob/master/.circleci/config.yml
|
| 7 |
+
|
| 8 |
+
#drive tests with nox or tox or pytest?
|
| 9 |
+
|
| 10 |
+
# -------------------------------------------------------------------------------------
|
| 11 |
+
# environments where we run our jobs
|
| 12 |
+
# -------------------------------------------------------------------------------------
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
setupcuda: &setupcuda
|
| 16 |
+
run:
|
| 17 |
+
name: Setup CUDA
|
| 18 |
+
working_directory: ~/
|
| 19 |
+
command: |
|
| 20 |
+
# download and install nvidia drivers, cuda, etc
|
| 21 |
+
wget --no-verbose --no-clobber -P ~/nvidia-downloads https://developer.download.nvidia.com/compute/cuda/11.3.1/local_installers/cuda_11.3.1_465.19.01_linux.run
|
| 22 |
+
sudo sh ~/nvidia-downloads/cuda_11.3.1_465.19.01_linux.run --silent
|
| 23 |
+
echo "Done installing CUDA."
|
| 24 |
+
pyenv versions
|
| 25 |
+
nvidia-smi
|
| 26 |
+
pyenv global 3.9.1
|
| 27 |
+
|
| 28 |
+
binary_common: &binary_common
|
| 29 |
+
parameters:
|
| 30 |
+
# Edit these defaults to do a release`
|
| 31 |
+
build_version:
|
| 32 |
+
description: "version number of release binary; by default, build a nightly"
|
| 33 |
+
type: string
|
| 34 |
+
default: ""
|
| 35 |
+
pytorch_version:
|
| 36 |
+
description: "PyTorch version to build against; by default, use a nightly"
|
| 37 |
+
type: string
|
| 38 |
+
default: ""
|
| 39 |
+
# Don't edit these
|
| 40 |
+
python_version:
|
| 41 |
+
description: "Python version to build against (e.g., 3.7)"
|
| 42 |
+
type: string
|
| 43 |
+
cu_version:
|
| 44 |
+
description: "CUDA version to build against, in CU format (e.g., cpu or cu100)"
|
| 45 |
+
type: string
|
| 46 |
+
wheel_docker_image:
|
| 47 |
+
description: "Wheel only: what docker image to use"
|
| 48 |
+
type: string
|
| 49 |
+
default: "pytorch/manylinux-cuda101"
|
| 50 |
+
conda_docker_image:
|
| 51 |
+
description: "what docker image to use for docker"
|
| 52 |
+
type: string
|
| 53 |
+
default: "pytorch/conda-cuda"
|
| 54 |
+
environment:
|
| 55 |
+
PYTHON_VERSION: << parameters.python_version >>
|
| 56 |
+
BUILD_VERSION: << parameters.build_version >>
|
| 57 |
+
PYTORCH_VERSION: << parameters.pytorch_version >>
|
| 58 |
+
CU_VERSION: << parameters.cu_version >>
|
| 59 |
+
TESTRUN_DOCKER_IMAGE: << parameters.conda_docker_image >>
|
| 60 |
+
|
| 61 |
+
jobs:
|
| 62 |
+
main:
|
| 63 |
+
environment:
|
| 64 |
+
CUDA_VERSION: "11.3"
|
| 65 |
+
resource_class: gpu.nvidia.small.multi
|
| 66 |
+
machine:
|
| 67 |
+
image: linux-cuda-11:default
|
| 68 |
+
steps:
|
| 69 |
+
- checkout
|
| 70 |
+
- <<: *setupcuda
|
| 71 |
+
- run: pip3 install --progress-bar off imageio wheel matplotlib 'pillow<7'
|
| 72 |
+
- run: pip3 install --progress-bar off torch==1.10.0+cu113 torchvision==0.11.1+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html
|
| 73 |
+
# - run: conda create -p ~/conda_env python=3.7 numpy
|
| 74 |
+
# - run: conda activate ~/conda_env
|
| 75 |
+
# - run: conda install -c pytorch pytorch torchvision
|
| 76 |
+
|
| 77 |
+
- run: pip3 install --progress-bar off 'git+https://github.com/facebookresearch/fvcore'
|
| 78 |
+
- run: pip3 install --progress-bar off 'git+https://github.com/facebookresearch/iopath'
|
| 79 |
+
- run:
|
| 80 |
+
name: build
|
| 81 |
+
command: |
|
| 82 |
+
export LD_LIBRARY_PATH=$LD_LIBARY_PATH:/usr/local/cuda-11.3/lib64
|
| 83 |
+
python3 setup.py build_ext --inplace
|
| 84 |
+
- run: LD_LIBRARY_PATH=$LD_LIBARY_PATH:/usr/local/cuda-11.3/lib64 python -m unittest discover -v -s tests -t .
|
| 85 |
+
- run: python3 setup.py bdist_wheel
|
| 86 |
+
|
| 87 |
+
binary_linux_wheel:
|
| 88 |
+
<<: *binary_common
|
| 89 |
+
docker:
|
| 90 |
+
- image: << parameters.wheel_docker_image >>
|
| 91 |
+
auth:
|
| 92 |
+
username: $DOCKERHUB_USERNAME
|
| 93 |
+
password: $DOCKERHUB_TOKEN
|
| 94 |
+
resource_class: 2xlarge+
|
| 95 |
+
steps:
|
| 96 |
+
- checkout
|
| 97 |
+
- run: MAX_JOBS=15 packaging/build_wheel.sh
|
| 98 |
+
- store_artifacts:
|
| 99 |
+
path: dist
|
| 100 |
+
- persist_to_workspace:
|
| 101 |
+
root: dist
|
| 102 |
+
paths:
|
| 103 |
+
- "*"
|
| 104 |
+
|
| 105 |
+
binary_linux_conda:
|
| 106 |
+
<<: *binary_common
|
| 107 |
+
docker:
|
| 108 |
+
- image: "<< parameters.conda_docker_image >>"
|
| 109 |
+
auth:
|
| 110 |
+
username: $DOCKERHUB_USERNAME
|
| 111 |
+
password: $DOCKERHUB_TOKEN
|
| 112 |
+
resource_class: 2xlarge+
|
| 113 |
+
steps:
|
| 114 |
+
- checkout
|
| 115 |
+
# This is building with cuda but no gpu present,
|
| 116 |
+
# so we aren't running the tests.
|
| 117 |
+
- run:
|
| 118 |
+
name: build
|
| 119 |
+
no_output_timeout: 40m
|
| 120 |
+
command: MAX_JOBS=15 TEST_FLAG=--no-test python3 packaging/build_conda.py
|
| 121 |
+
- store_artifacts:
|
| 122 |
+
path: /opt/conda/conda-bld/linux-64
|
| 123 |
+
- persist_to_workspace:
|
| 124 |
+
root: /opt/conda/conda-bld/linux-64
|
| 125 |
+
paths:
|
| 126 |
+
- "*"
|
| 127 |
+
|
| 128 |
+
binary_linux_conda_cuda:
|
| 129 |
+
<<: *binary_common
|
| 130 |
+
machine:
|
| 131 |
+
image: linux-cuda-11:default
|
| 132 |
+
resource_class: gpu.nvidia.small.multi
|
| 133 |
+
steps:
|
| 134 |
+
- checkout
|
| 135 |
+
|
| 136 |
+
- run:
|
| 137 |
+
name: Pull docker image
|
| 138 |
+
command: |
|
| 139 |
+
nvidia-smi
|
| 140 |
+
set -e
|
| 141 |
+
|
| 142 |
+
{ docker login -u="$DOCKERHUB_USERNAME" -p="$DOCKERHUB_TOKEN" ; } 2> /dev/null
|
| 143 |
+
|
| 144 |
+
echo Pulling docker image $TESTRUN_DOCKER_IMAGE
|
| 145 |
+
docker pull $TESTRUN_DOCKER_IMAGE
|
| 146 |
+
- run:
|
| 147 |
+
name: Build and run tests
|
| 148 |
+
no_output_timeout: 40m
|
| 149 |
+
command: |
|
| 150 |
+
set -e
|
| 151 |
+
|
| 152 |
+
cd ${HOME}/project/
|
| 153 |
+
|
| 154 |
+
export JUST_TESTRUN=1
|
| 155 |
+
VARS_TO_PASS="-e PYTHON_VERSION -e BUILD_VERSION -e PYTORCH_VERSION -e CU_VERSION -e JUST_TESTRUN"
|
| 156 |
+
|
| 157 |
+
docker run --gpus all --ipc=host -v $(pwd):/remote -w /remote ${VARS_TO_PASS} ${TESTRUN_DOCKER_IMAGE} python3 ./packaging/build_conda.py
|
| 158 |
+
|
| 159 |
+
workflows:
|
| 160 |
+
version: 2
|
| 161 |
+
build_and_test:
|
| 162 |
+
jobs:
|
| 163 |
+
# - main:
|
| 164 |
+
# context: DOCKERHUB_TOKEN
|
| 165 |
+
- binary_linux_conda:
|
| 166 |
+
conda_docker_image: pytorch/conda-builder:cuda118
|
| 167 |
+
context: DOCKERHUB_TOKEN
|
| 168 |
+
cu_version: cu118
|
| 169 |
+
name: linux_conda_py38_cu118_pyt210
|
| 170 |
+
python_version: '3.8'
|
| 171 |
+
pytorch_version: 2.1.0
|
| 172 |
+
- binary_linux_conda:
|
| 173 |
+
conda_docker_image: pytorch/conda-builder:cuda121
|
| 174 |
+
context: DOCKERHUB_TOKEN
|
| 175 |
+
cu_version: cu121
|
| 176 |
+
name: linux_conda_py38_cu121_pyt210
|
| 177 |
+
python_version: '3.8'
|
| 178 |
+
pytorch_version: 2.1.0
|
| 179 |
+
- binary_linux_conda:
|
| 180 |
+
conda_docker_image: pytorch/conda-builder:cuda118
|
| 181 |
+
context: DOCKERHUB_TOKEN
|
| 182 |
+
cu_version: cu118
|
| 183 |
+
name: linux_conda_py38_cu118_pyt211
|
| 184 |
+
python_version: '3.8'
|
| 185 |
+
pytorch_version: 2.1.1
|
| 186 |
+
- binary_linux_conda:
|
| 187 |
+
conda_docker_image: pytorch/conda-builder:cuda121
|
| 188 |
+
context: DOCKERHUB_TOKEN
|
| 189 |
+
cu_version: cu121
|
| 190 |
+
name: linux_conda_py38_cu121_pyt211
|
| 191 |
+
python_version: '3.8'
|
| 192 |
+
pytorch_version: 2.1.1
|
| 193 |
+
- binary_linux_conda:
|
| 194 |
+
conda_docker_image: pytorch/conda-builder:cuda118
|
| 195 |
+
context: DOCKERHUB_TOKEN
|
| 196 |
+
cu_version: cu118
|
| 197 |
+
name: linux_conda_py38_cu118_pyt212
|
| 198 |
+
python_version: '3.8'
|
| 199 |
+
pytorch_version: 2.1.2
|
| 200 |
+
- binary_linux_conda:
|
| 201 |
+
conda_docker_image: pytorch/conda-builder:cuda121
|
| 202 |
+
context: DOCKERHUB_TOKEN
|
| 203 |
+
cu_version: cu121
|
| 204 |
+
name: linux_conda_py38_cu121_pyt212
|
| 205 |
+
python_version: '3.8'
|
| 206 |
+
pytorch_version: 2.1.2
|
| 207 |
+
- binary_linux_conda:
|
| 208 |
+
conda_docker_image: pytorch/conda-builder:cuda118
|
| 209 |
+
context: DOCKERHUB_TOKEN
|
| 210 |
+
cu_version: cu118
|
| 211 |
+
name: linux_conda_py38_cu118_pyt220
|
| 212 |
+
python_version: '3.8'
|
| 213 |
+
pytorch_version: 2.2.0
|
| 214 |
+
- binary_linux_conda:
|
| 215 |
+
conda_docker_image: pytorch/conda-builder:cuda121
|
| 216 |
+
context: DOCKERHUB_TOKEN
|
| 217 |
+
cu_version: cu121
|
| 218 |
+
name: linux_conda_py38_cu121_pyt220
|
| 219 |
+
python_version: '3.8'
|
| 220 |
+
pytorch_version: 2.2.0
|
| 221 |
+
- binary_linux_conda:
|
| 222 |
+
conda_docker_image: pytorch/conda-builder:cuda118
|
| 223 |
+
context: DOCKERHUB_TOKEN
|
| 224 |
+
cu_version: cu118
|
| 225 |
+
name: linux_conda_py38_cu118_pyt222
|
| 226 |
+
python_version: '3.8'
|
| 227 |
+
pytorch_version: 2.2.2
|
| 228 |
+
- binary_linux_conda:
|
| 229 |
+
conda_docker_image: pytorch/conda-builder:cuda121
|
| 230 |
+
context: DOCKERHUB_TOKEN
|
| 231 |
+
cu_version: cu121
|
| 232 |
+
name: linux_conda_py38_cu121_pyt222
|
| 233 |
+
python_version: '3.8'
|
| 234 |
+
pytorch_version: 2.2.2
|
| 235 |
+
- binary_linux_conda:
|
| 236 |
+
conda_docker_image: pytorch/conda-builder:cuda118
|
| 237 |
+
context: DOCKERHUB_TOKEN
|
| 238 |
+
cu_version: cu118
|
| 239 |
+
name: linux_conda_py38_cu118_pyt231
|
| 240 |
+
python_version: '3.8'
|
| 241 |
+
pytorch_version: 2.3.1
|
| 242 |
+
- binary_linux_conda:
|
| 243 |
+
conda_docker_image: pytorch/conda-builder:cuda121
|
| 244 |
+
context: DOCKERHUB_TOKEN
|
| 245 |
+
cu_version: cu121
|
| 246 |
+
name: linux_conda_py38_cu121_pyt231
|
| 247 |
+
python_version: '3.8'
|
| 248 |
+
pytorch_version: 2.3.1
|
| 249 |
+
- binary_linux_conda:
|
| 250 |
+
conda_docker_image: pytorch/conda-builder:cuda118
|
| 251 |
+
context: DOCKERHUB_TOKEN
|
| 252 |
+
cu_version: cu118
|
| 253 |
+
name: linux_conda_py38_cu118_pyt240
|
| 254 |
+
python_version: '3.8'
|
| 255 |
+
pytorch_version: 2.4.0
|
| 256 |
+
- binary_linux_conda:
|
| 257 |
+
conda_docker_image: pytorch/conda-builder:cuda121
|
| 258 |
+
context: DOCKERHUB_TOKEN
|
| 259 |
+
cu_version: cu121
|
| 260 |
+
name: linux_conda_py38_cu121_pyt240
|
| 261 |
+
python_version: '3.8'
|
| 262 |
+
pytorch_version: 2.4.0
|
| 263 |
+
- binary_linux_conda:
|
| 264 |
+
conda_docker_image: pytorch/conda-builder:cuda118
|
| 265 |
+
context: DOCKERHUB_TOKEN
|
| 266 |
+
cu_version: cu118
|
| 267 |
+
name: linux_conda_py38_cu118_pyt241
|
| 268 |
+
python_version: '3.8'
|
| 269 |
+
pytorch_version: 2.4.1
|
| 270 |
+
- binary_linux_conda:
|
| 271 |
+
conda_docker_image: pytorch/conda-builder:cuda121
|
| 272 |
+
context: DOCKERHUB_TOKEN
|
| 273 |
+
cu_version: cu121
|
| 274 |
+
name: linux_conda_py38_cu121_pyt241
|
| 275 |
+
python_version: '3.8'
|
| 276 |
+
pytorch_version: 2.4.1
|
| 277 |
+
- binary_linux_conda:
|
| 278 |
+
conda_docker_image: pytorch/conda-builder:cuda118
|
| 279 |
+
context: DOCKERHUB_TOKEN
|
| 280 |
+
cu_version: cu118
|
| 281 |
+
name: linux_conda_py39_cu118_pyt210
|
| 282 |
+
python_version: '3.9'
|
| 283 |
+
pytorch_version: 2.1.0
|
| 284 |
+
- binary_linux_conda:
|
| 285 |
+
conda_docker_image: pytorch/conda-builder:cuda121
|
| 286 |
+
context: DOCKERHUB_TOKEN
|
| 287 |
+
cu_version: cu121
|
| 288 |
+
name: linux_conda_py39_cu121_pyt210
|
| 289 |
+
python_version: '3.9'
|
| 290 |
+
pytorch_version: 2.1.0
|
| 291 |
+
- binary_linux_conda:
|
| 292 |
+
conda_docker_image: pytorch/conda-builder:cuda118
|
| 293 |
+
context: DOCKERHUB_TOKEN
|
| 294 |
+
cu_version: cu118
|
| 295 |
+
name: linux_conda_py39_cu118_pyt211
|
| 296 |
+
python_version: '3.9'
|
| 297 |
+
pytorch_version: 2.1.1
|
| 298 |
+
- binary_linux_conda:
|
| 299 |
+
conda_docker_image: pytorch/conda-builder:cuda121
|
| 300 |
+
context: DOCKERHUB_TOKEN
|
| 301 |
+
cu_version: cu121
|
| 302 |
+
name: linux_conda_py39_cu121_pyt211
|
| 303 |
+
python_version: '3.9'
|
| 304 |
+
pytorch_version: 2.1.1
|
| 305 |
+
- binary_linux_conda:
|
| 306 |
+
conda_docker_image: pytorch/conda-builder:cuda118
|
| 307 |
+
context: DOCKERHUB_TOKEN
|
| 308 |
+
cu_version: cu118
|
| 309 |
+
name: linux_conda_py39_cu118_pyt212
|
| 310 |
+
python_version: '3.9'
|
| 311 |
+
pytorch_version: 2.1.2
|
| 312 |
+
- binary_linux_conda:
|
| 313 |
+
conda_docker_image: pytorch/conda-builder:cuda121
|
| 314 |
+
context: DOCKERHUB_TOKEN
|
| 315 |
+
cu_version: cu121
|
| 316 |
+
name: linux_conda_py39_cu121_pyt212
|
| 317 |
+
python_version: '3.9'
|
| 318 |
+
pytorch_version: 2.1.2
|
| 319 |
+
- binary_linux_conda:
|
| 320 |
+
conda_docker_image: pytorch/conda-builder:cuda118
|
| 321 |
+
context: DOCKERHUB_TOKEN
|
| 322 |
+
cu_version: cu118
|
| 323 |
+
name: linux_conda_py39_cu118_pyt220
|
| 324 |
+
python_version: '3.9'
|
| 325 |
+
pytorch_version: 2.2.0
|
| 326 |
+
- binary_linux_conda:
|
| 327 |
+
conda_docker_image: pytorch/conda-builder:cuda121
|
| 328 |
+
context: DOCKERHUB_TOKEN
|
| 329 |
+
cu_version: cu121
|
| 330 |
+
name: linux_conda_py39_cu121_pyt220
|
| 331 |
+
python_version: '3.9'
|
| 332 |
+
pytorch_version: 2.2.0
|
| 333 |
+
- binary_linux_conda:
|
| 334 |
+
conda_docker_image: pytorch/conda-builder:cuda118
|
| 335 |
+
context: DOCKERHUB_TOKEN
|
| 336 |
+
cu_version: cu118
|
| 337 |
+
name: linux_conda_py39_cu118_pyt222
|
| 338 |
+
python_version: '3.9'
|
| 339 |
+
pytorch_version: 2.2.2
|
| 340 |
+
- binary_linux_conda:
|
| 341 |
+
conda_docker_image: pytorch/conda-builder:cuda121
|
| 342 |
+
context: DOCKERHUB_TOKEN
|
| 343 |
+
cu_version: cu121
|
| 344 |
+
name: linux_conda_py39_cu121_pyt222
|
| 345 |
+
python_version: '3.9'
|
| 346 |
+
pytorch_version: 2.2.2
|
| 347 |
+
- binary_linux_conda:
|
| 348 |
+
conda_docker_image: pytorch/conda-builder:cuda118
|
| 349 |
+
context: DOCKERHUB_TOKEN
|
| 350 |
+
cu_version: cu118
|
| 351 |
+
name: linux_conda_py39_cu118_pyt231
|
| 352 |
+
python_version: '3.9'
|
| 353 |
+
pytorch_version: 2.3.1
|
| 354 |
+
- binary_linux_conda:
|
| 355 |
+
conda_docker_image: pytorch/conda-builder:cuda121
|
| 356 |
+
context: DOCKERHUB_TOKEN
|
| 357 |
+
cu_version: cu121
|
| 358 |
+
name: linux_conda_py39_cu121_pyt231
|
| 359 |
+
python_version: '3.9'
|
| 360 |
+
pytorch_version: 2.3.1
|
| 361 |
+
- binary_linux_conda:
|
| 362 |
+
conda_docker_image: pytorch/conda-builder:cuda118
|
| 363 |
+
context: DOCKERHUB_TOKEN
|
| 364 |
+
cu_version: cu118
|
| 365 |
+
name: linux_conda_py39_cu118_pyt240
|
| 366 |
+
python_version: '3.9'
|
| 367 |
+
pytorch_version: 2.4.0
|
| 368 |
+
- binary_linux_conda:
|
| 369 |
+
conda_docker_image: pytorch/conda-builder:cuda121
|
| 370 |
+
context: DOCKERHUB_TOKEN
|
| 371 |
+
cu_version: cu121
|
| 372 |
+
name: linux_conda_py39_cu121_pyt240
|
| 373 |
+
python_version: '3.9'
|
| 374 |
+
pytorch_version: 2.4.0
|
| 375 |
+
- binary_linux_conda:
|
| 376 |
+
conda_docker_image: pytorch/conda-builder:cuda118
|
| 377 |
+
context: DOCKERHUB_TOKEN
|
| 378 |
+
cu_version: cu118
|
| 379 |
+
name: linux_conda_py39_cu118_pyt241
|
| 380 |
+
python_version: '3.9'
|
| 381 |
+
pytorch_version: 2.4.1
|
| 382 |
+
- binary_linux_conda:
|
| 383 |
+
conda_docker_image: pytorch/conda-builder:cuda121
|
| 384 |
+
context: DOCKERHUB_TOKEN
|
| 385 |
+
cu_version: cu121
|
| 386 |
+
name: linux_conda_py39_cu121_pyt241
|
| 387 |
+
python_version: '3.9'
|
| 388 |
+
pytorch_version: 2.4.1
|
| 389 |
+
- binary_linux_conda:
|
| 390 |
+
conda_docker_image: pytorch/conda-builder:cuda118
|
| 391 |
+
context: DOCKERHUB_TOKEN
|
| 392 |
+
cu_version: cu118
|
| 393 |
+
name: linux_conda_py310_cu118_pyt210
|
| 394 |
+
python_version: '3.10'
|
| 395 |
+
pytorch_version: 2.1.0
|
| 396 |
+
- binary_linux_conda:
|
| 397 |
+
conda_docker_image: pytorch/conda-builder:cuda121
|
| 398 |
+
context: DOCKERHUB_TOKEN
|
| 399 |
+
cu_version: cu121
|
| 400 |
+
name: linux_conda_py310_cu121_pyt210
|
| 401 |
+
python_version: '3.10'
|
| 402 |
+
pytorch_version: 2.1.0
|
| 403 |
+
- binary_linux_conda:
|
| 404 |
+
conda_docker_image: pytorch/conda-builder:cuda118
|
| 405 |
+
context: DOCKERHUB_TOKEN
|
| 406 |
+
cu_version: cu118
|
| 407 |
+
name: linux_conda_py310_cu118_pyt211
|
| 408 |
+
python_version: '3.10'
|
| 409 |
+
pytorch_version: 2.1.1
|
| 410 |
+
- binary_linux_conda:
|
| 411 |
+
conda_docker_image: pytorch/conda-builder:cuda121
|
| 412 |
+
context: DOCKERHUB_TOKEN
|
| 413 |
+
cu_version: cu121
|
| 414 |
+
name: linux_conda_py310_cu121_pyt211
|
| 415 |
+
python_version: '3.10'
|
| 416 |
+
pytorch_version: 2.1.1
|
| 417 |
+
- binary_linux_conda:
|
| 418 |
+
conda_docker_image: pytorch/conda-builder:cuda118
|
| 419 |
+
context: DOCKERHUB_TOKEN
|
| 420 |
+
cu_version: cu118
|
| 421 |
+
name: linux_conda_py310_cu118_pyt212
|
| 422 |
+
python_version: '3.10'
|
| 423 |
+
pytorch_version: 2.1.2
|
| 424 |
+
- binary_linux_conda:
|
| 425 |
+
conda_docker_image: pytorch/conda-builder:cuda121
|
| 426 |
+
context: DOCKERHUB_TOKEN
|
| 427 |
+
cu_version: cu121
|
| 428 |
+
name: linux_conda_py310_cu121_pyt212
|
| 429 |
+
python_version: '3.10'
|
| 430 |
+
pytorch_version: 2.1.2
|
| 431 |
+
- binary_linux_conda:
|
| 432 |
+
conda_docker_image: pytorch/conda-builder:cuda118
|
| 433 |
+
context: DOCKERHUB_TOKEN
|
| 434 |
+
cu_version: cu118
|
| 435 |
+
name: linux_conda_py310_cu118_pyt220
|
| 436 |
+
python_version: '3.10'
|
| 437 |
+
pytorch_version: 2.2.0
|
| 438 |
+
- binary_linux_conda:
|
| 439 |
+
conda_docker_image: pytorch/conda-builder:cuda121
|
| 440 |
+
context: DOCKERHUB_TOKEN
|
| 441 |
+
cu_version: cu121
|
| 442 |
+
name: linux_conda_py310_cu121_pyt220
|
| 443 |
+
python_version: '3.10'
|
| 444 |
+
pytorch_version: 2.2.0
|
| 445 |
+
- binary_linux_conda:
|
| 446 |
+
conda_docker_image: pytorch/conda-builder:cuda118
|
| 447 |
+
context: DOCKERHUB_TOKEN
|
| 448 |
+
cu_version: cu118
|
| 449 |
+
name: linux_conda_py310_cu118_pyt222
|
| 450 |
+
python_version: '3.10'
|
| 451 |
+
pytorch_version: 2.2.2
|
| 452 |
+
- binary_linux_conda:
|
| 453 |
+
conda_docker_image: pytorch/conda-builder:cuda121
|
| 454 |
+
context: DOCKERHUB_TOKEN
|
| 455 |
+
cu_version: cu121
|
| 456 |
+
name: linux_conda_py310_cu121_pyt222
|
| 457 |
+
python_version: '3.10'
|
| 458 |
+
pytorch_version: 2.2.2
|
| 459 |
+
- binary_linux_conda:
|
| 460 |
+
conda_docker_image: pytorch/conda-builder:cuda118
|
| 461 |
+
context: DOCKERHUB_TOKEN
|
| 462 |
+
cu_version: cu118
|
| 463 |
+
name: linux_conda_py310_cu118_pyt231
|
| 464 |
+
python_version: '3.10'
|
| 465 |
+
pytorch_version: 2.3.1
|
| 466 |
+
- binary_linux_conda:
|
| 467 |
+
conda_docker_image: pytorch/conda-builder:cuda121
|
| 468 |
+
context: DOCKERHUB_TOKEN
|
| 469 |
+
cu_version: cu121
|
| 470 |
+
name: linux_conda_py310_cu121_pyt231
|
| 471 |
+
python_version: '3.10'
|
| 472 |
+
pytorch_version: 2.3.1
|
| 473 |
+
- binary_linux_conda:
|
| 474 |
+
conda_docker_image: pytorch/conda-builder:cuda118
|
| 475 |
+
context: DOCKERHUB_TOKEN
|
| 476 |
+
cu_version: cu118
|
| 477 |
+
name: linux_conda_py310_cu118_pyt240
|
| 478 |
+
python_version: '3.10'
|
| 479 |
+
pytorch_version: 2.4.0
|
| 480 |
+
- binary_linux_conda:
|
| 481 |
+
conda_docker_image: pytorch/conda-builder:cuda121
|
| 482 |
+
context: DOCKERHUB_TOKEN
|
| 483 |
+
cu_version: cu121
|
| 484 |
+
name: linux_conda_py310_cu121_pyt240
|
| 485 |
+
python_version: '3.10'
|
| 486 |
+
pytorch_version: 2.4.0
|
| 487 |
+
- binary_linux_conda:
|
| 488 |
+
conda_docker_image: pytorch/conda-builder:cuda118
|
| 489 |
+
context: DOCKERHUB_TOKEN
|
| 490 |
+
cu_version: cu118
|
| 491 |
+
name: linux_conda_py310_cu118_pyt241
|
| 492 |
+
python_version: '3.10'
|
| 493 |
+
pytorch_version: 2.4.1
|
| 494 |
+
- binary_linux_conda:
|
| 495 |
+
conda_docker_image: pytorch/conda-builder:cuda121
|
| 496 |
+
context: DOCKERHUB_TOKEN
|
| 497 |
+
cu_version: cu121
|
| 498 |
+
name: linux_conda_py310_cu121_pyt241
|
| 499 |
+
python_version: '3.10'
|
| 500 |
+
pytorch_version: 2.4.1
|
| 501 |
+
- binary_linux_conda:
|
| 502 |
+
conda_docker_image: pytorch/conda-builder:cuda118
|
| 503 |
+
context: DOCKERHUB_TOKEN
|
| 504 |
+
cu_version: cu118
|
| 505 |
+
name: linux_conda_py311_cu118_pyt210
|
| 506 |
+
python_version: '3.11'
|
| 507 |
+
pytorch_version: 2.1.0
|
| 508 |
+
- binary_linux_conda:
|
| 509 |
+
conda_docker_image: pytorch/conda-builder:cuda121
|
| 510 |
+
context: DOCKERHUB_TOKEN
|
| 511 |
+
cu_version: cu121
|
| 512 |
+
name: linux_conda_py311_cu121_pyt210
|
| 513 |
+
python_version: '3.11'
|
| 514 |
+
pytorch_version: 2.1.0
|
| 515 |
+
- binary_linux_conda:
|
| 516 |
+
conda_docker_image: pytorch/conda-builder:cuda118
|
| 517 |
+
context: DOCKERHUB_TOKEN
|
| 518 |
+
cu_version: cu118
|
| 519 |
+
name: linux_conda_py311_cu118_pyt211
|
| 520 |
+
python_version: '3.11'
|
| 521 |
+
pytorch_version: 2.1.1
|
| 522 |
+
- binary_linux_conda:
|
| 523 |
+
conda_docker_image: pytorch/conda-builder:cuda121
|
| 524 |
+
context: DOCKERHUB_TOKEN
|
| 525 |
+
cu_version: cu121
|
| 526 |
+
name: linux_conda_py311_cu121_pyt211
|
| 527 |
+
python_version: '3.11'
|
| 528 |
+
pytorch_version: 2.1.1
|
| 529 |
+
- binary_linux_conda:
|
| 530 |
+
conda_docker_image: pytorch/conda-builder:cuda118
|
| 531 |
+
context: DOCKERHUB_TOKEN
|
| 532 |
+
cu_version: cu118
|
| 533 |
+
name: linux_conda_py311_cu118_pyt212
|
| 534 |
+
python_version: '3.11'
|
| 535 |
+
pytorch_version: 2.1.2
|
| 536 |
+
- binary_linux_conda:
|
| 537 |
+
conda_docker_image: pytorch/conda-builder:cuda121
|
| 538 |
+
context: DOCKERHUB_TOKEN
|
| 539 |
+
cu_version: cu121
|
| 540 |
+
name: linux_conda_py311_cu121_pyt212
|
| 541 |
+
python_version: '3.11'
|
| 542 |
+
pytorch_version: 2.1.2
|
| 543 |
+
- binary_linux_conda:
|
| 544 |
+
conda_docker_image: pytorch/conda-builder:cuda118
|
| 545 |
+
context: DOCKERHUB_TOKEN
|
| 546 |
+
cu_version: cu118
|
| 547 |
+
name: linux_conda_py311_cu118_pyt220
|
| 548 |
+
python_version: '3.11'
|
| 549 |
+
pytorch_version: 2.2.0
|
| 550 |
+
- binary_linux_conda:
|
| 551 |
+
conda_docker_image: pytorch/conda-builder:cuda121
|
| 552 |
+
context: DOCKERHUB_TOKEN
|
| 553 |
+
cu_version: cu121
|
| 554 |
+
name: linux_conda_py311_cu121_pyt220
|
| 555 |
+
python_version: '3.11'
|
| 556 |
+
pytorch_version: 2.2.0
|
| 557 |
+
- binary_linux_conda:
|
| 558 |
+
conda_docker_image: pytorch/conda-builder:cuda118
|
| 559 |
+
context: DOCKERHUB_TOKEN
|
| 560 |
+
cu_version: cu118
|
| 561 |
+
name: linux_conda_py311_cu118_pyt222
|
| 562 |
+
python_version: '3.11'
|
| 563 |
+
pytorch_version: 2.2.2
|
| 564 |
+
- binary_linux_conda:
|
| 565 |
+
conda_docker_image: pytorch/conda-builder:cuda121
|
| 566 |
+
context: DOCKERHUB_TOKEN
|
| 567 |
+
cu_version: cu121
|
| 568 |
+
name: linux_conda_py311_cu121_pyt222
|
| 569 |
+
python_version: '3.11'
|
| 570 |
+
pytorch_version: 2.2.2
|
| 571 |
+
- binary_linux_conda:
|
| 572 |
+
conda_docker_image: pytorch/conda-builder:cuda118
|
| 573 |
+
context: DOCKERHUB_TOKEN
|
| 574 |
+
cu_version: cu118
|
| 575 |
+
name: linux_conda_py311_cu118_pyt231
|
| 576 |
+
python_version: '3.11'
|
| 577 |
+
pytorch_version: 2.3.1
|
| 578 |
+
- binary_linux_conda:
|
| 579 |
+
conda_docker_image: pytorch/conda-builder:cuda121
|
| 580 |
+
context: DOCKERHUB_TOKEN
|
| 581 |
+
cu_version: cu121
|
| 582 |
+
name: linux_conda_py311_cu121_pyt231
|
| 583 |
+
python_version: '3.11'
|
| 584 |
+
pytorch_version: 2.3.1
|
| 585 |
+
- binary_linux_conda:
|
| 586 |
+
conda_docker_image: pytorch/conda-builder:cuda118
|
| 587 |
+
context: DOCKERHUB_TOKEN
|
| 588 |
+
cu_version: cu118
|
| 589 |
+
name: linux_conda_py311_cu118_pyt240
|
| 590 |
+
python_version: '3.11'
|
| 591 |
+
pytorch_version: 2.4.0
|
| 592 |
+
- binary_linux_conda:
|
| 593 |
+
conda_docker_image: pytorch/conda-builder:cuda121
|
| 594 |
+
context: DOCKERHUB_TOKEN
|
| 595 |
+
cu_version: cu121
|
| 596 |
+
name: linux_conda_py311_cu121_pyt240
|
| 597 |
+
python_version: '3.11'
|
| 598 |
+
pytorch_version: 2.4.0
|
| 599 |
+
- binary_linux_conda:
|
| 600 |
+
conda_docker_image: pytorch/conda-builder:cuda118
|
| 601 |
+
context: DOCKERHUB_TOKEN
|
| 602 |
+
cu_version: cu118
|
| 603 |
+
name: linux_conda_py311_cu118_pyt241
|
| 604 |
+
python_version: '3.11'
|
| 605 |
+
pytorch_version: 2.4.1
|
| 606 |
+
- binary_linux_conda:
|
| 607 |
+
conda_docker_image: pytorch/conda-builder:cuda121
|
| 608 |
+
context: DOCKERHUB_TOKEN
|
| 609 |
+
cu_version: cu121
|
| 610 |
+
name: linux_conda_py311_cu121_pyt241
|
| 611 |
+
python_version: '3.11'
|
| 612 |
+
pytorch_version: 2.4.1
|
| 613 |
+
- binary_linux_conda:
|
| 614 |
+
conda_docker_image: pytorch/conda-builder:cuda118
|
| 615 |
+
context: DOCKERHUB_TOKEN
|
| 616 |
+
cu_version: cu118
|
| 617 |
+
name: linux_conda_py312_cu118_pyt220
|
| 618 |
+
python_version: '3.12'
|
| 619 |
+
pytorch_version: 2.2.0
|
| 620 |
+
- binary_linux_conda:
|
| 621 |
+
conda_docker_image: pytorch/conda-builder:cuda121
|
| 622 |
+
context: DOCKERHUB_TOKEN
|
| 623 |
+
cu_version: cu121
|
| 624 |
+
name: linux_conda_py312_cu121_pyt220
|
| 625 |
+
python_version: '3.12'
|
| 626 |
+
pytorch_version: 2.2.0
|
| 627 |
+
- binary_linux_conda:
|
| 628 |
+
conda_docker_image: pytorch/conda-builder:cuda118
|
| 629 |
+
context: DOCKERHUB_TOKEN
|
| 630 |
+
cu_version: cu118
|
| 631 |
+
name: linux_conda_py312_cu118_pyt222
|
| 632 |
+
python_version: '3.12'
|
| 633 |
+
pytorch_version: 2.2.2
|
| 634 |
+
- binary_linux_conda:
|
| 635 |
+
conda_docker_image: pytorch/conda-builder:cuda121
|
| 636 |
+
context: DOCKERHUB_TOKEN
|
| 637 |
+
cu_version: cu121
|
| 638 |
+
name: linux_conda_py312_cu121_pyt222
|
| 639 |
+
python_version: '3.12'
|
| 640 |
+
pytorch_version: 2.2.2
|
| 641 |
+
- binary_linux_conda:
|
| 642 |
+
conda_docker_image: pytorch/conda-builder:cuda118
|
| 643 |
+
context: DOCKERHUB_TOKEN
|
| 644 |
+
cu_version: cu118
|
| 645 |
+
name: linux_conda_py312_cu118_pyt231
|
| 646 |
+
python_version: '3.12'
|
| 647 |
+
pytorch_version: 2.3.1
|
| 648 |
+
- binary_linux_conda:
|
| 649 |
+
conda_docker_image: pytorch/conda-builder:cuda121
|
| 650 |
+
context: DOCKERHUB_TOKEN
|
| 651 |
+
cu_version: cu121
|
| 652 |
+
name: linux_conda_py312_cu121_pyt231
|
| 653 |
+
python_version: '3.12'
|
| 654 |
+
pytorch_version: 2.3.1
|
| 655 |
+
- binary_linux_conda:
|
| 656 |
+
conda_docker_image: pytorch/conda-builder:cuda118
|
| 657 |
+
context: DOCKERHUB_TOKEN
|
| 658 |
+
cu_version: cu118
|
| 659 |
+
name: linux_conda_py312_cu118_pyt240
|
| 660 |
+
python_version: '3.12'
|
| 661 |
+
pytorch_version: 2.4.0
|
| 662 |
+
- binary_linux_conda:
|
| 663 |
+
conda_docker_image: pytorch/conda-builder:cuda121
|
| 664 |
+
context: DOCKERHUB_TOKEN
|
| 665 |
+
cu_version: cu121
|
| 666 |
+
name: linux_conda_py312_cu121_pyt240
|
| 667 |
+
python_version: '3.12'
|
| 668 |
+
pytorch_version: 2.4.0
|
| 669 |
+
- binary_linux_conda:
|
| 670 |
+
conda_docker_image: pytorch/conda-builder:cuda118
|
| 671 |
+
context: DOCKERHUB_TOKEN
|
| 672 |
+
cu_version: cu118
|
| 673 |
+
name: linux_conda_py312_cu118_pyt241
|
| 674 |
+
python_version: '3.12'
|
| 675 |
+
pytorch_version: 2.4.1
|
| 676 |
+
- binary_linux_conda:
|
| 677 |
+
conda_docker_image: pytorch/conda-builder:cuda121
|
| 678 |
+
context: DOCKERHUB_TOKEN
|
| 679 |
+
cu_version: cu121
|
| 680 |
+
name: linux_conda_py312_cu121_pyt241
|
| 681 |
+
python_version: '3.12'
|
| 682 |
+
pytorch_version: 2.4.1
|
| 683 |
+
- binary_linux_conda_cuda:
|
| 684 |
+
name: testrun_conda_cuda_py310_cu117_pyt201
|
| 685 |
+
context: DOCKERHUB_TOKEN
|
| 686 |
+
python_version: "3.10"
|
| 687 |
+
pytorch_version: '2.0.1'
|
| 688 |
+
cu_version: "cu117"
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/.circleci/regenerate.py
ADDED
|
@@ -0,0 +1,183 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 3 |
+
# All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# This source code is licensed under the BSD-style license found in the
|
| 6 |
+
# LICENSE file in the root directory of this source tree.
|
| 7 |
+
|
| 8 |
+
"""
|
| 9 |
+
This script is adapted from the torchvision one.
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
import os.path
|
| 13 |
+
|
| 14 |
+
import jinja2
|
| 15 |
+
import yaml
|
| 16 |
+
from packaging import version
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
# The CUDA versions which have pytorch conda packages available for linux for each
|
| 20 |
+
# version of pytorch.
|
| 21 |
+
CONDA_CUDA_VERSIONS = {
|
| 22 |
+
"2.1.0": ["cu118", "cu121"],
|
| 23 |
+
"2.1.1": ["cu118", "cu121"],
|
| 24 |
+
"2.1.2": ["cu118", "cu121"],
|
| 25 |
+
"2.2.0": ["cu118", "cu121"],
|
| 26 |
+
"2.2.2": ["cu118", "cu121"],
|
| 27 |
+
"2.3.1": ["cu118", "cu121"],
|
| 28 |
+
"2.4.0": ["cu118", "cu121"],
|
| 29 |
+
"2.4.1": ["cu118", "cu121"],
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def conda_docker_image_for_cuda(cuda_version):
|
| 34 |
+
if len(cuda_version) != 5:
|
| 35 |
+
raise ValueError("Unknown cuda version")
|
| 36 |
+
return "pytorch/conda-builder:cuda" + cuda_version[2:]
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def pytorch_versions_for_python(python_version):
|
| 40 |
+
if python_version in ["3.8", "3.9"]:
|
| 41 |
+
return list(CONDA_CUDA_VERSIONS)
|
| 42 |
+
if python_version == "3.10":
|
| 43 |
+
return [
|
| 44 |
+
i
|
| 45 |
+
for i in CONDA_CUDA_VERSIONS
|
| 46 |
+
if version.Version(i) >= version.Version("1.11.0")
|
| 47 |
+
]
|
| 48 |
+
if python_version == "3.11":
|
| 49 |
+
return [
|
| 50 |
+
i
|
| 51 |
+
for i in CONDA_CUDA_VERSIONS
|
| 52 |
+
if version.Version(i) >= version.Version("2.1.0")
|
| 53 |
+
]
|
| 54 |
+
if python_version == "3.12":
|
| 55 |
+
return [
|
| 56 |
+
i
|
| 57 |
+
for i in CONDA_CUDA_VERSIONS
|
| 58 |
+
if version.Version(i) >= version.Version("2.2.0")
|
| 59 |
+
]
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def workflows(prefix="", filter_branch=None, upload=False, indentation=6):
|
| 63 |
+
w = []
|
| 64 |
+
for btype in ["conda"]:
|
| 65 |
+
for python_version in ["3.8", "3.9", "3.10", "3.11", "3.12"]:
|
| 66 |
+
for pytorch_version in pytorch_versions_for_python(python_version):
|
| 67 |
+
for cu_version in CONDA_CUDA_VERSIONS[pytorch_version]:
|
| 68 |
+
w += workflow_pair(
|
| 69 |
+
btype=btype,
|
| 70 |
+
python_version=python_version,
|
| 71 |
+
pytorch_version=pytorch_version,
|
| 72 |
+
cu_version=cu_version,
|
| 73 |
+
prefix=prefix,
|
| 74 |
+
upload=upload,
|
| 75 |
+
filter_branch=filter_branch,
|
| 76 |
+
)
|
| 77 |
+
|
| 78 |
+
return indent(indentation, w)
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def workflow_pair(
|
| 82 |
+
*,
|
| 83 |
+
btype,
|
| 84 |
+
python_version,
|
| 85 |
+
pytorch_version,
|
| 86 |
+
cu_version,
|
| 87 |
+
prefix="",
|
| 88 |
+
upload=False,
|
| 89 |
+
filter_branch,
|
| 90 |
+
):
|
| 91 |
+
|
| 92 |
+
w = []
|
| 93 |
+
py = python_version.replace(".", "")
|
| 94 |
+
pyt = pytorch_version.replace(".", "")
|
| 95 |
+
base_workflow_name = f"{prefix}linux_{btype}_py{py}_{cu_version}_pyt{pyt}"
|
| 96 |
+
|
| 97 |
+
w.append(
|
| 98 |
+
generate_base_workflow(
|
| 99 |
+
base_workflow_name=base_workflow_name,
|
| 100 |
+
python_version=python_version,
|
| 101 |
+
pytorch_version=pytorch_version,
|
| 102 |
+
cu_version=cu_version,
|
| 103 |
+
btype=btype,
|
| 104 |
+
filter_branch=filter_branch,
|
| 105 |
+
)
|
| 106 |
+
)
|
| 107 |
+
|
| 108 |
+
if upload:
|
| 109 |
+
w.append(
|
| 110 |
+
generate_upload_workflow(
|
| 111 |
+
base_workflow_name=base_workflow_name,
|
| 112 |
+
btype=btype,
|
| 113 |
+
cu_version=cu_version,
|
| 114 |
+
filter_branch=filter_branch,
|
| 115 |
+
)
|
| 116 |
+
)
|
| 117 |
+
|
| 118 |
+
return w
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
def generate_base_workflow(
|
| 122 |
+
*,
|
| 123 |
+
base_workflow_name,
|
| 124 |
+
python_version,
|
| 125 |
+
cu_version,
|
| 126 |
+
pytorch_version,
|
| 127 |
+
btype,
|
| 128 |
+
filter_branch=None,
|
| 129 |
+
):
|
| 130 |
+
|
| 131 |
+
d = {
|
| 132 |
+
"name": base_workflow_name,
|
| 133 |
+
"python_version": python_version,
|
| 134 |
+
"cu_version": cu_version,
|
| 135 |
+
"pytorch_version": pytorch_version,
|
| 136 |
+
"context": "DOCKERHUB_TOKEN",
|
| 137 |
+
}
|
| 138 |
+
|
| 139 |
+
conda_docker_image = conda_docker_image_for_cuda(cu_version)
|
| 140 |
+
if conda_docker_image is not None:
|
| 141 |
+
d["conda_docker_image"] = conda_docker_image
|
| 142 |
+
|
| 143 |
+
if filter_branch is not None:
|
| 144 |
+
d["filters"] = {"branches": {"only": filter_branch}}
|
| 145 |
+
|
| 146 |
+
return {f"binary_linux_{btype}": d}
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
def generate_upload_workflow(*, base_workflow_name, btype, cu_version, filter_branch):
|
| 150 |
+
d = {
|
| 151 |
+
"name": f"{base_workflow_name}_upload",
|
| 152 |
+
"context": "org-member",
|
| 153 |
+
"requires": [base_workflow_name],
|
| 154 |
+
}
|
| 155 |
+
|
| 156 |
+
if btype == "wheel":
|
| 157 |
+
d["subfolder"] = cu_version + "/"
|
| 158 |
+
|
| 159 |
+
if filter_branch is not None:
|
| 160 |
+
d["filters"] = {"branches": {"only": filter_branch}}
|
| 161 |
+
|
| 162 |
+
return {f"binary_{btype}_upload": d}
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
def indent(indentation, data_list):
|
| 166 |
+
if len(data_list) == 0:
|
| 167 |
+
return ""
|
| 168 |
+
return ("\n" + " " * indentation).join(
|
| 169 |
+
yaml.dump(data_list, default_flow_style=False).splitlines()
|
| 170 |
+
)
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
if __name__ == "__main__":
|
| 174 |
+
d = os.path.dirname(__file__)
|
| 175 |
+
env = jinja2.Environment(
|
| 176 |
+
loader=jinja2.FileSystemLoader(d),
|
| 177 |
+
lstrip_blocks=True,
|
| 178 |
+
autoescape=False,
|
| 179 |
+
keep_trailing_newline=True,
|
| 180 |
+
)
|
| 181 |
+
|
| 182 |
+
with open(os.path.join(d, "config.yml"), "w") as f:
|
| 183 |
+
f.write(env.get_template("config.in.yml").render(workflows=workflows))
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/LICENSE-3RD-PARTY
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
SRN license ( https://github.com/vsitzmann/scene-representation-networks/ ):
|
| 2 |
+
|
| 3 |
+
MIT License
|
| 4 |
+
|
| 5 |
+
Copyright (c) 2019 Vincent Sitzmann
|
| 6 |
+
|
| 7 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 8 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 9 |
+
in the Software without restriction, including without limitation the rights
|
| 10 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 11 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 12 |
+
furnished to do so, subject to the following conditions:
|
| 13 |
+
|
| 14 |
+
The above copyright notice and this permission notice shall be included in all
|
| 15 |
+
copies or substantial portions of the Software.
|
| 16 |
+
|
| 17 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 18 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 19 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 20 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 21 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 22 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 23 |
+
SOFTWARE.
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
IDR license ( github.com/lioryariv/idr ):
|
| 27 |
+
|
| 28 |
+
MIT License
|
| 29 |
+
|
| 30 |
+
Copyright (c) 2020 Lior Yariv
|
| 31 |
+
|
| 32 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 33 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 34 |
+
in the Software without restriction, including without limitation the rights
|
| 35 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 36 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 37 |
+
furnished to do so, subject to the following conditions:
|
| 38 |
+
|
| 39 |
+
The above copyright notice and this permission notice shall be included in all
|
| 40 |
+
copies or substantial portions of the Software.
|
| 41 |
+
|
| 42 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 43 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 44 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 45 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 46 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 47 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 48 |
+
SOFTWARE.
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
NeRF https://github.com/bmild/nerf/
|
| 52 |
+
|
| 53 |
+
Copyright (c) 2020 bmild
|
| 54 |
+
|
| 55 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 56 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 57 |
+
in the Software without restriction, including without limitation the rights
|
| 58 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 59 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 60 |
+
furnished to do so, subject to the following conditions:
|
| 61 |
+
|
| 62 |
+
The above copyright notice and this permission notice shall be included in all
|
| 63 |
+
copies or substantial portions of the Software.
|
| 64 |
+
|
| 65 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 66 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 67 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 68 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 69 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 70 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 71 |
+
SOFTWARE.
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/scripts/__pycache__/run.cpython-310.pyc
ADDED
|
Binary file (3.89 kB). View file
|
|
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/scripts/inference_server.py
ADDED
|
@@ -0,0 +1,338 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import warnings
|
| 2 |
+
warnings.filterwarnings("ignore")
|
| 3 |
+
import os
|
| 4 |
+
import json
|
| 5 |
+
import socket
|
| 6 |
+
import threading
|
| 7 |
+
import time
|
| 8 |
+
import numpy as np
|
| 9 |
+
import torch
|
| 10 |
+
from tqdm import tqdm
|
| 11 |
+
import gymnasium as gym
|
| 12 |
+
import hydra
|
| 13 |
+
from omegaconf import DictConfig, OmegaConf
|
| 14 |
+
OmegaConf.register_new_resolver("multiply", lambda x, y: int(float(x)) * int(float(y)))
|
| 15 |
+
|
| 16 |
+
import mani_skill2.envs
|
| 17 |
+
import cfdp.envs as envs # customized environments
|
| 18 |
+
from cfdp.diffusion_policy.utils.loaders import get_dataset, get_model, freeze_torch_model_params
|
| 19 |
+
from cfdp.diffusion_policy.models import TemporalUnet, UNET_DIM_MULTS, MLPModel
|
| 20 |
+
from cfdp.diffusion_policy.models.guide_managers import GuideManagerPath
|
| 21 |
+
from cfdp.motion_planner.neural_motion_planner import NeuralMotionPlannerPolicy
|
| 22 |
+
from cfdp.utils.socket_utils import arr2base64, base64_to_arr
|
| 23 |
+
from cfdp.utils import observation_wrapper
|
| 24 |
+
from cfdp.envs import ROBOT_ASSETS_DIR, ENVS_DIR
|
| 25 |
+
from cfdp.utils.data_utils import transform_quat_to_ortho6d
|
| 26 |
+
from cfdp.utils.plot_utils import TrajectoryVisualizer
|
| 27 |
+
from cfdp.utils.pointcloud_utils import get_pcd_obs
|
| 28 |
+
|
| 29 |
+
from realsense_wrapper import RealsenseAPI
|
| 30 |
+
from franka_robot_wrapper.camera_calibration import calibrate_external, calibrate_hand
|
| 31 |
+
|
| 32 |
+
OBSTACLE_CONFIGS = []
|
| 33 |
+
HOST='127.0.0.1'
|
| 34 |
+
i = 2
|
| 35 |
+
PORT_SEND=5001 + (2*i)
|
| 36 |
+
PORT_RECV=5000 + (2*i)
|
| 37 |
+
|
| 38 |
+
CAMERAS = {
|
| 39 |
+
'cam_front': {
|
| 40 |
+
"id": "317422074275",
|
| 41 |
+
"calib_file": "/home/ladmin/Documents/maniskill2_benchmark/franka_robot_wrapper/317422074275.txt"
|
| 42 |
+
},
|
| 43 |
+
# 'cam_back': {
|
| 44 |
+
# "id": "317222076109",
|
| 45 |
+
# "calib_file": "/home/ladmin/Documents/maniskill2_benchmark/franka_robot_wrapper/317222076109.txt"
|
| 46 |
+
# },
|
| 47 |
+
'gripper': {
|
| 48 |
+
"id": "317422074762",
|
| 49 |
+
"calib_file": "/home/ladmin/Documents/maniskill2_benchmark/franka_robot_wrapper/317422074762.txt"
|
| 50 |
+
}
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
rs_list = []
|
| 54 |
+
calibration_list = []
|
| 55 |
+
|
| 56 |
+
obs = {'extra': {}}
|
| 57 |
+
|
| 58 |
+
def process_cameras_and_get_point_clouds(cameras_dict):
|
| 59 |
+
global rs_list, calibration_list
|
| 60 |
+
"""
|
| 61 |
+
Takes a dictionary representing cameras and processes each camera.
|
| 62 |
+
For demonstration, this function prints the camera id and calibration file.
|
| 63 |
+
|
| 64 |
+
Args:
|
| 65 |
+
cameras_dict (dict): Dictionary where each key is a camera name and value is a dict with camera info.
|
| 66 |
+
"""
|
| 67 |
+
rs_list = [RealsenseAPI(info["id"]) for name, info in cameras_dict.items()]
|
| 68 |
+
calibration_list = [
|
| 69 |
+
calibrate_hand(info["calib_file"]) if name == "gripper" else calibrate_external(info["calib_file"])
|
| 70 |
+
for name, info in cameras_dict.items()
|
| 71 |
+
]
|
| 72 |
+
# calibration_list = [calibrate_external(info["calib_file"]) for name, info in cameras_dict.items()]
|
| 73 |
+
|
| 74 |
+
return rs_list, calibration_list
|
| 75 |
+
|
| 76 |
+
def initialize_server(host, port):
|
| 77 |
+
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
| 78 |
+
s.bind((host, port))
|
| 79 |
+
s.listen(1)
|
| 80 |
+
print(f"[Server] Listening on {host}:{port}")
|
| 81 |
+
return s
|
| 82 |
+
|
| 83 |
+
def send_response(socket_server, response):
|
| 84 |
+
"""
|
| 85 |
+
Accept a single connection, send the response immediately, then exit.
|
| 86 |
+
"""
|
| 87 |
+
conn, addr = socket_server.accept()
|
| 88 |
+
payload = arr2base64(response)
|
| 89 |
+
# print(f"[Server] Connected by {addr}")
|
| 90 |
+
with conn:
|
| 91 |
+
conn.sendall(json.dumps(payload).encode())
|
| 92 |
+
print("[Server] Response sent", response)
|
| 93 |
+
|
| 94 |
+
def update_observation(conn):
|
| 95 |
+
global obs
|
| 96 |
+
try:
|
| 97 |
+
data = conn.recv(4096)
|
| 98 |
+
if not data:
|
| 99 |
+
print("[Server] Connection closed by client.")
|
| 100 |
+
else:
|
| 101 |
+
request = json.loads(data.decode())
|
| 102 |
+
payload = base64_to_arr(request)
|
| 103 |
+
if payload is not None:
|
| 104 |
+
obs['extra']['tcp_pose'] = payload[:7]
|
| 105 |
+
obs['extra']['goal_pose'] = payload[7:]
|
| 106 |
+
obs['extra']['obstacle_point_cloud'] = get_pcd_obs(rs_list, calibration_list, obs['extra']['tcp_pose'])
|
| 107 |
+
# print("Observation:", obs['extra']['tcp_pose'])
|
| 108 |
+
except json.JSONDecodeError:
|
| 109 |
+
print("[Server] Received invalid JSON. Skipping.")
|
| 110 |
+
except Exception as e:
|
| 111 |
+
print("[Server] Error while receiving:", e)
|
| 112 |
+
|
| 113 |
+
def handle_client(stop_event):
|
| 114 |
+
print("Starting server...")
|
| 115 |
+
socket_server = initialize_server(HOST, PORT_RECV)
|
| 116 |
+
while not stop_event.is_set():
|
| 117 |
+
conn, addr = socket_server.accept()
|
| 118 |
+
# print(f"[Server] Connected by {addr}")
|
| 119 |
+
with conn:
|
| 120 |
+
update_observation(conn)
|
| 121 |
+
# print(f"[Server] Connection from {addr} closed.")
|
| 122 |
+
|
| 123 |
+
@hydra.main(version_base="1.2", config_path="../configs", config_name="config_history")
|
| 124 |
+
def main(cfg: DictConfig):
|
| 125 |
+
global obs
|
| 126 |
+
########################################################################################################################
|
| 127 |
+
# Prepare the socket server for sending actions
|
| 128 |
+
socket_server_send = initialize_server(HOST, PORT_SEND)
|
| 129 |
+
########################################################################################################################
|
| 130 |
+
# Load dataset with env, robot, task
|
| 131 |
+
train_subset, train_dataloader, val_subset, val_dataloader = get_dataset(
|
| 132 |
+
cfg_dataset=cfg.dataset,
|
| 133 |
+
batch_size=cfg.batch_size,
|
| 134 |
+
val_set_size=cfg.val_set_size,
|
| 135 |
+
results_dir=cfg.results_dir,
|
| 136 |
+
save_indices=False
|
| 137 |
+
)
|
| 138 |
+
#TODO: save / load normalizer. Do not use dataset
|
| 139 |
+
|
| 140 |
+
########################################################################################################################
|
| 141 |
+
# Load prior model
|
| 142 |
+
diffusion_configs = dict(
|
| 143 |
+
variance_schedule=cfg.model.variance_schedule,
|
| 144 |
+
n_diffusion_steps=cfg.model.n_steps,
|
| 145 |
+
predict_epsilon=cfg.model.predict_epsilon,
|
| 146 |
+
)
|
| 147 |
+
|
| 148 |
+
unet_configs = dict(
|
| 149 |
+
state_dim=cfg.state_dim,
|
| 150 |
+
n_support_points=cfg.trajectory_length,
|
| 151 |
+
unet_input_dim=cfg.model.unet_input_dim,
|
| 152 |
+
dim_mults=UNET_DIM_MULTS[cfg.model.unet_dim_mults_option],
|
| 153 |
+
conditioning_type=cfg.model.conditioning_type,
|
| 154 |
+
conditioning_embed_dim = cfg.model.conditioning_embed_dim
|
| 155 |
+
)
|
| 156 |
+
|
| 157 |
+
diffusion_model = get_model(
|
| 158 |
+
model_class=cfg.model.model_class,
|
| 159 |
+
model=TemporalUnet(**unet_configs),
|
| 160 |
+
tensor_args={'device': cfg.device, 'dtype': torch.float32},
|
| 161 |
+
context_model=MLPModel(in_dim=cfg.model.context_input_dim, out_dim=cfg.model.conditioning_embed_dim, input_field='tasks', output_field='condition'),
|
| 162 |
+
**diffusion_configs,
|
| 163 |
+
**unet_configs
|
| 164 |
+
)
|
| 165 |
+
|
| 166 |
+
# load saved policy model
|
| 167 |
+
diffusion_model.load_state_dict(
|
| 168 |
+
torch.load(os.path.join(cfg.inference.model_dir, 'checkpoints',
|
| 169 |
+
'ema_model_current_state_dict.pth' if cfg.inference.use_ema else 'model_current_state_dict.pth'),
|
| 170 |
+
map_location=cfg.device,
|
| 171 |
+
weights_only=True)
|
| 172 |
+
)
|
| 173 |
+
|
| 174 |
+
diffusion_model.eval()
|
| 175 |
+
model = diffusion_model
|
| 176 |
+
|
| 177 |
+
freeze_torch_model_params(model)
|
| 178 |
+
model = torch.compile(model)
|
| 179 |
+
|
| 180 |
+
########################################################################################################################
|
| 181 |
+
env = gym.make('CustomizedPick-v0',
|
| 182 |
+
obs_mode=cfg.inference.obs_mode,
|
| 183 |
+
reward_mode=cfg.inference.reward_mode,
|
| 184 |
+
control_mode=cfg.inference.control_mode,
|
| 185 |
+
enable_shadow=False,
|
| 186 |
+
render_mode="cameras" if cfg.inference.evaluate else "human",
|
| 187 |
+
robot_init_qpos_noise=0.5,
|
| 188 |
+
obstacle_configs=OBSTACLE_CONFIGS,
|
| 189 |
+
object_config_path=os.path.join(ENVS_DIR, "env_config"),
|
| 190 |
+
create_obstacle_point_cloud=True)
|
| 191 |
+
|
| 192 |
+
# Initialize guide manager
|
| 193 |
+
guide = GuideManagerPath(
|
| 194 |
+
dataset = train_subset.dataset,
|
| 195 |
+
clip_grad=True,
|
| 196 |
+
tensor_args={'device': cfg.device, 'dtype': torch.float32},
|
| 197 |
+
)
|
| 198 |
+
# guide = None
|
| 199 |
+
ObservationWrapperClass = getattr(observation_wrapper, cfg.observation_wrapper)
|
| 200 |
+
obs_wrapper = ObservationWrapperClass(train_subset.dataset)
|
| 201 |
+
|
| 202 |
+
## visualizer ##
|
| 203 |
+
visualizer = TrajectoryVisualizer(normalizer=train_subset.dataset.normalizer)
|
| 204 |
+
########################################################################################################################
|
| 205 |
+
# Initialize policy
|
| 206 |
+
policy = NeuralMotionPlannerPolicy(
|
| 207 |
+
action_dim=env.action_space.shape[0],
|
| 208 |
+
model=model,
|
| 209 |
+
dataset=train_subset.dataset,
|
| 210 |
+
trajectory_length=cfg.trajectory_length,
|
| 211 |
+
inference_cfg=cfg.inference,
|
| 212 |
+
observation_wrapper=obs_wrapper,
|
| 213 |
+
guide_manager=guide,
|
| 214 |
+
debug=cfg.debug)
|
| 215 |
+
|
| 216 |
+
########################################################################################################################
|
| 217 |
+
# Run the policy
|
| 218 |
+
done = False
|
| 219 |
+
def create_trajectory_prior(policy, obs, trajectory_ortho6d):
|
| 220 |
+
trajectory_index = policy.get_current_path_index()
|
| 221 |
+
trajectory_prior = trajectory_ortho6d[trajectory_index:]
|
| 222 |
+
current_tcp_pose_ortho6d = transform_quat_to_ortho6d(
|
| 223 |
+
torch.tensor(obs['extra']['tcp_pose'], device=cfg.device).unsqueeze(0)
|
| 224 |
+
).squeeze(0)
|
| 225 |
+
return torch.cat([current_tcp_pose_ortho6d.unsqueeze(0), trajectory_prior], dim=0)
|
| 226 |
+
|
| 227 |
+
step = 0
|
| 228 |
+
replanning_count = 0
|
| 229 |
+
mpd = False
|
| 230 |
+
while not done:
|
| 231 |
+
policy.observation_wrapper.update_history_buffer(obs)
|
| 232 |
+
replanning_interval = 2
|
| 233 |
+
if step % replanning_interval == 0:
|
| 234 |
+
if step == 0: # initial planning
|
| 235 |
+
policy.plan_path_with_history(obs, do_normalize=True)
|
| 236 |
+
elif not mpd: # replanning
|
| 237 |
+
print("1")
|
| 238 |
+
position_diff = np.linalg.norm(obs['extra']['goal_pose'][:3] - obs['extra']['tcp_pose'][:3])
|
| 239 |
+
if position_diff > 0.10:
|
| 240 |
+
print("Replanning count: ", replanning_count, "position_diff: ", position_diff)
|
| 241 |
+
## replanning with prior
|
| 242 |
+
trajectory_prior = create_trajectory_prior(policy, obs, policy.planned_path)
|
| 243 |
+
policy.plan_path_with_history(obs,
|
| 244 |
+
trajectory_prior=trajectory_prior,
|
| 245 |
+
do_normalize=True,
|
| 246 |
+
timestep=cfg.inference.timestep, # denoising step
|
| 247 |
+
choice='interpolate')
|
| 248 |
+
## replanning without prior
|
| 249 |
+
# trajectory_ortho6d = policy.plan_path_with_history(obs, do_normalize=True)
|
| 250 |
+
replanning_count += 1
|
| 251 |
+
# if not cfg.debug:
|
| 252 |
+
# visualizer.plot_trajectory(policy.planned_path, obs, policy.point_cloud)
|
| 253 |
+
|
| 254 |
+
action = policy.follow_path(obs, real_robot=True)
|
| 255 |
+
|
| 256 |
+
# Convert action to numpy array if it's a tensor
|
| 257 |
+
if torch.is_tensor(action):
|
| 258 |
+
action = action.detach().cpu().numpy()
|
| 259 |
+
send_response(socket_server_send, action)
|
| 260 |
+
print("Steps: ", step, "Replanning count: ", replanning_count)
|
| 261 |
+
step += 1
|
| 262 |
+
position_diff = np.linalg.norm(obs['extra']['goal_pose'][:3] - obs['extra']['tcp_pose'][:3])
|
| 263 |
+
|
| 264 |
+
|
| 265 |
+
if position_diff < 0.02:
|
| 266 |
+
step = 0
|
| 267 |
+
replanning_count = 0
|
| 268 |
+
# # Initialize policy
|
| 269 |
+
# diffusion_configs = dict(
|
| 270 |
+
# variance_schedule=cfg.model.variance_schedule,
|
| 271 |
+
# n_diffusion_steps=cfg.model.n_steps,
|
| 272 |
+
# predict_epsilon=cfg.model.predict_epsilon,
|
| 273 |
+
# )
|
| 274 |
+
|
| 275 |
+
# unet_configs = dict(
|
| 276 |
+
# state_dim=cfg.state_dim,
|
| 277 |
+
# n_support_points=cfg.trajectory_length,
|
| 278 |
+
# unet_input_dim=cfg.model.unet_input_dim,
|
| 279 |
+
# dim_mults=UNET_DIM_MULTS[cfg.model.unet_dim_mults_option],
|
| 280 |
+
# conditioning_type=cfg.model.conditioning_type,
|
| 281 |
+
# conditioning_embed_dim = cfg.model.conditioning_embed_dim
|
| 282 |
+
# )
|
| 283 |
+
|
| 284 |
+
# diffusion_model = get_model(
|
| 285 |
+
# model_class=cfg.model.model_class,
|
| 286 |
+
# model=TemporalUnet(**unet_configs),
|
| 287 |
+
# tensor_args={'device': cfg.device, 'dtype': torch.float32},
|
| 288 |
+
# context_model=MLPModel(in_dim=cfg.model.context_input_dim, out_dim=cfg.model.conditioning_embed_dim, input_field='tasks', output_field='condition'),
|
| 289 |
+
# **diffusion_configs,
|
| 290 |
+
# **unet_configs
|
| 291 |
+
# )
|
| 292 |
+
|
| 293 |
+
# # load saved policy model
|
| 294 |
+
# diffusion_model.load_state_dict(
|
| 295 |
+
# torch.load(os.path.join(cfg.inference.model_dir, 'checkpoints',
|
| 296 |
+
# 'ema_model_current_state_dict.pth' if cfg.inference.use_ema else 'model_current_state_dict.pth'),
|
| 297 |
+
# map_location=cfg.device,
|
| 298 |
+
# weights_only=True)
|
| 299 |
+
# )
|
| 300 |
+
|
| 301 |
+
# diffusion_model.eval()
|
| 302 |
+
# model = diffusion_model
|
| 303 |
+
|
| 304 |
+
# freeze_torch_model_params(model)
|
| 305 |
+
# model = torch.compile(model)
|
| 306 |
+
# # guide = None
|
| 307 |
+
# ObservationWrapperClass = getattr(observation_wrapper, cfg.observation_wrapper)
|
| 308 |
+
# obs_wrapper = ObservationWrapperClass(train_subset.dataset)
|
| 309 |
+
# policy = NeuralMotionPlannerPolicy(
|
| 310 |
+
# action_dim=env.action_space.shape[0],
|
| 311 |
+
# model=model,
|
| 312 |
+
# dataset=train_subset.dataset,
|
| 313 |
+
# trajectory_length=cfg.trajectory_length,
|
| 314 |
+
# inference_cfg=cfg.inference,
|
| 315 |
+
# observation_wrapper=obs_wrapper,
|
| 316 |
+
# guide_manager=guide,
|
| 317 |
+
# debug=cfg.debug)
|
| 318 |
+
# guide = GuideManagerPath(
|
| 319 |
+
# dataset = train_subset.dataset,
|
| 320 |
+
# clip_grad=True,
|
| 321 |
+
# tensor_args={'device': cfg.device, 'dtype': torch.float32},
|
| 322 |
+
# )
|
| 323 |
+
# step = 0
|
| 324 |
+
# replanning_count = 0
|
| 325 |
+
env.close()
|
| 326 |
+
|
| 327 |
+
if __name__ == "__main__":
|
| 328 |
+
|
| 329 |
+
process_cameras_and_get_point_clouds(CAMERAS)
|
| 330 |
+
try:
|
| 331 |
+
stop_event = threading.Event()
|
| 332 |
+
thread = threading.Thread(target=handle_client, args=(stop_event,))
|
| 333 |
+
thread.start()
|
| 334 |
+
main()
|
| 335 |
+
stop_event.set()
|
| 336 |
+
thread.join()
|
| 337 |
+
except KeyboardInterrupt:
|
| 338 |
+
print(f"\nEnded")
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/scripts/inference_server_joint.py
ADDED
|
@@ -0,0 +1,362 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import warnings
|
| 2 |
+
warnings.filterwarnings("ignore")
|
| 3 |
+
|
| 4 |
+
# Standard library imports
|
| 5 |
+
import json
|
| 6 |
+
import os
|
| 7 |
+
import random
|
| 8 |
+
import socket
|
| 9 |
+
import threading
|
| 10 |
+
import time
|
| 11 |
+
|
| 12 |
+
# Third-party imports
|
| 13 |
+
from tqdm import tqdm
|
| 14 |
+
import gymnasium as gym
|
| 15 |
+
import hydra
|
| 16 |
+
import numpy as np
|
| 17 |
+
import torch
|
| 18 |
+
from matplotlib import pyplot as plt
|
| 19 |
+
from omegaconf import DictConfig, OmegaConf
|
| 20 |
+
OmegaConf.register_new_resolver("multiply", lambda x, y: int(float(x)) * int(float(y)))
|
| 21 |
+
|
| 22 |
+
# ManiSkill imports
|
| 23 |
+
import mani_skill2.envs
|
| 24 |
+
from mani_skill2.utils.wrappers import RecordEpisode
|
| 25 |
+
|
| 26 |
+
# CFDP imports
|
| 27 |
+
import cfdp.envs as envs # customized environments
|
| 28 |
+
from cfdp.diffusion_policy.models import TemporalUnet, UNET_DIM_MULTS, MLPModel
|
| 29 |
+
from cfdp.diffusion_policy.models.guide_managers import GuideManagerSTOMP
|
| 30 |
+
from cfdp.diffusion_policy.models.sample_functions import ddpm_sample_fn_stomp
|
| 31 |
+
from cfdp.diffusion_policy.utils.loaders import get_dataset, get_model, freeze_torch_model_params
|
| 32 |
+
from cfdp.diffusion_policy.utils.summary_trajectory import generate_trajectories
|
| 33 |
+
from cfdp.envs import ENVS_DIR, ROBOT_ASSETS_DIR
|
| 34 |
+
from cfdp.motion_planner.motion_controller import EndEffectorController
|
| 35 |
+
from cfdp.motion_planner.motion_planner_policy_base import ClassicalMotionPlannerPolicy
|
| 36 |
+
from cfdp.motion_planner.neural_motion_planner import NeuralMotionPlannerPolicy
|
| 37 |
+
from cfdp.utils import observation_wrapper, inference_recorder
|
| 38 |
+
from cfdp.utils.data_utils import transform_quat_to_ortho6d
|
| 39 |
+
from cfdp.utils.plot_utils import plot_trajectories, TrajectoryVisualizer
|
| 40 |
+
from cfdp.utils.socket_utils import arr2base64, base64_to_arr
|
| 41 |
+
from cfdp.utils.pointcloud_utils import get_pcd_obs
|
| 42 |
+
|
| 43 |
+
# External hardware imports
|
| 44 |
+
from franka_robot_wrapper.camera_calibration import calibrate_external, calibrate_hand
|
| 45 |
+
from realsense_wrapper import RealsenseAPI
|
| 46 |
+
|
| 47 |
+
# 1. Set random seeds for reproducibility
|
| 48 |
+
np.random.seed(42)
|
| 49 |
+
random.seed(42)
|
| 50 |
+
|
| 51 |
+
# 2. Socket/Network configuration
|
| 52 |
+
HOST = '127.0.0.1'
|
| 53 |
+
i = 2
|
| 54 |
+
PORT_RECV = 5000 + (2 * i)
|
| 55 |
+
PORT_SEND = 5001 + (2 * i)
|
| 56 |
+
OBSTACLE_CONFIGS = []
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
# 3. Camera configuration
|
| 60 |
+
CAMERAS = {
|
| 61 |
+
'cam_front': {
|
| 62 |
+
"id": "317422074275",
|
| 63 |
+
"calib_file": "/home/ladmin/Documents/maniskill2_benchmark/franka_robot_wrapper/317422074275.txt"
|
| 64 |
+
},
|
| 65 |
+
'cam_back': {
|
| 66 |
+
"id": "317222076109",
|
| 67 |
+
"calib_file": "/home/ladmin/Documents/maniskill2_benchmark/franka_robot_wrapper/317222076109.txt"
|
| 68 |
+
},
|
| 69 |
+
'cam_side': {
|
| 70 |
+
"id": "244222074448",
|
| 71 |
+
"calib_file": "/home/ladmin/Documents/maniskill2_benchmark/franka_robot_wrapper/244222074448.txt"
|
| 72 |
+
},
|
| 73 |
+
'gripper': {
|
| 74 |
+
"id": "317422074762",
|
| 75 |
+
"calib_file": "/home/ladmin/Documents/maniskill2_benchmark/franka_robot_wrapper/317422074762.txt"
|
| 76 |
+
}
|
| 77 |
+
}
|
| 78 |
+
|
| 79 |
+
# 4. Camera and calibration lists (populated later)
|
| 80 |
+
rs_list = []
|
| 81 |
+
calibration_list = []
|
| 82 |
+
|
| 83 |
+
# 5. Global observation dictionary
|
| 84 |
+
obs = {'extra': {}, 'agent': {}}
|
| 85 |
+
|
| 86 |
+
def process_cameras_and_get_point_clouds(cameras_dict):
|
| 87 |
+
global rs_list, calibration_list
|
| 88 |
+
"""
|
| 89 |
+
Takes a dictionary representing cameras and processes each camera.
|
| 90 |
+
For demonstration, this function prints the camera id and calibration file.
|
| 91 |
+
|
| 92 |
+
Args:
|
| 93 |
+
cameras_dict (dict): Dictionary where each key is a camera name and value is a dict with camera info.
|
| 94 |
+
"""
|
| 95 |
+
rs_list = [RealsenseAPI(info["id"]) for name, info in cameras_dict.items()]
|
| 96 |
+
calibration_list = [
|
| 97 |
+
calibrate_hand(info["calib_file"]) if name == "gripper" else calibrate_external(info["calib_file"])
|
| 98 |
+
for name, info in cameras_dict.items()
|
| 99 |
+
]
|
| 100 |
+
# calibration_list = [calibrate_external(info["calib_file"]) for name, info in cameras_dict.items()]
|
| 101 |
+
|
| 102 |
+
return rs_list, calibration_list
|
| 103 |
+
|
| 104 |
+
def initialize_server(host, port):
|
| 105 |
+
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
| 106 |
+
s.bind((host, port))
|
| 107 |
+
s.listen(1)
|
| 108 |
+
print(f"[Server] Listening on {host}:{port}")
|
| 109 |
+
return s
|
| 110 |
+
|
| 111 |
+
def send_response(socket_server, response):
|
| 112 |
+
"""
|
| 113 |
+
Accept a single connection, send the response immediately, then exit.
|
| 114 |
+
"""
|
| 115 |
+
conn, addr = socket_server.accept()
|
| 116 |
+
payload = arr2base64(response)
|
| 117 |
+
# print(f"[Server] Connected by {addr}")
|
| 118 |
+
with conn:
|
| 119 |
+
conn.sendall(json.dumps(payload).encode())
|
| 120 |
+
print("[Server] Response sent", response)
|
| 121 |
+
|
| 122 |
+
def update_observation(conn):
|
| 123 |
+
global obs
|
| 124 |
+
try:
|
| 125 |
+
data = conn.recv(4096)
|
| 126 |
+
if not data:
|
| 127 |
+
print("[Server] Connection closed by client.")
|
| 128 |
+
else:
|
| 129 |
+
request = json.loads(data.decode())
|
| 130 |
+
payload = base64_to_arr(request)
|
| 131 |
+
if payload is not None: #TODO: Check if this is correct
|
| 132 |
+
obs['agent']['qpos'] = payload[:9]
|
| 133 |
+
obs['extra']['goal_qpos'] = payload[9:18]
|
| 134 |
+
obs['extra']['tcp_pose'] = payload[18:25]
|
| 135 |
+
obs['extra']['goal_pose'] = payload[25:]
|
| 136 |
+
obs['extra']['obstacle_point_cloud'] = get_pcd_obs(rs_list, calibration_list, obs['extra']['tcp_pose'])
|
| 137 |
+
# print("Observation:", obs['extra']['tcp_pose'])
|
| 138 |
+
except json.JSONDecodeError:
|
| 139 |
+
print("[Server] Received invalid JSON. Skipping.")
|
| 140 |
+
except Exception as e:
|
| 141 |
+
print("[Server] Error while receiving:", e)
|
| 142 |
+
|
| 143 |
+
def handle_client(stop_event):
|
| 144 |
+
print("Starting server...")
|
| 145 |
+
socket_server = initialize_server(HOST, PORT_RECV)
|
| 146 |
+
while not stop_event.is_set():
|
| 147 |
+
conn, addr = socket_server.accept()
|
| 148 |
+
# print(f"[Server] Connected by {addr}")
|
| 149 |
+
with conn:
|
| 150 |
+
update_observation(conn)
|
| 151 |
+
# print(f"[Server] Connection from {addr} closed.")
|
| 152 |
+
|
| 153 |
+
def plot_STOMP_debug_info(guide_debug_into, visualizer, point_cloud=None,
|
| 154 |
+
sphere_idx=24, time_step=-1):
|
| 155 |
+
sphere_trajectories = guide_debug_into[time_step].sphere_poses.cpu().numpy() # [batch, seq_len, num_spheres, 3]
|
| 156 |
+
sphere_radii = guide_debug_into[time_step].sphere_radii.cpu().numpy() # [batch, seq_len, num_spheres]
|
| 157 |
+
sdf_costs = guide_debug_into[time_step].sdf_costs.cpu().numpy() # [batch, seq_len, num_spheres]
|
| 158 |
+
spheres_after_guidance = guide_debug_into[time_step].spheres_after_guidance.cpu().numpy() # [batch, seq_len, num_spheres, 3]
|
| 159 |
+
sampled_spheres = guide_debug_into[time_step].sampled_spheres.cpu().numpy() # [batch, seq_len, num_spheres, 3]
|
| 160 |
+
#TODO: plot spheres_after_guidance
|
| 161 |
+
fig = visualizer.plot_trajectory_with_cost(sphere_trajectories,
|
| 162 |
+
sdf_costs,
|
| 163 |
+
spheres_after_guidance=spheres_after_guidance,
|
| 164 |
+
sampled_spheres=sampled_spheres,
|
| 165 |
+
point_cloud=point_cloud,
|
| 166 |
+
sphere_idx=sphere_idx,
|
| 167 |
+
time_step=time_step)
|
| 168 |
+
return fig
|
| 169 |
+
|
| 170 |
+
@hydra.main(version_base="1.2", config_path="../configs", config_name="config_history_joint")
|
| 171 |
+
def main(cfg: DictConfig):
|
| 172 |
+
global obs
|
| 173 |
+
########################################################################################################################
|
| 174 |
+
# Prepare the socket server for sending actions
|
| 175 |
+
socket_server_send = initialize_server(HOST, PORT_SEND)
|
| 176 |
+
########################################################################################################################
|
| 177 |
+
# Load dataset with env, robot, task
|
| 178 |
+
train_subset, train_dataloader, val_subset, val_dataloader = get_dataset(
|
| 179 |
+
cfg_dataset=cfg.dataset,
|
| 180 |
+
batch_size=cfg.batch_size,
|
| 181 |
+
val_set_size=cfg.val_set_size,
|
| 182 |
+
results_dir=cfg.results_dir,
|
| 183 |
+
save_indices=False
|
| 184 |
+
)
|
| 185 |
+
|
| 186 |
+
########################################################################################################################
|
| 187 |
+
# Load prior model
|
| 188 |
+
diffusion_configs = dict(
|
| 189 |
+
variance_schedule=cfg.model.variance_schedule,
|
| 190 |
+
n_diffusion_steps=cfg.model.n_steps,
|
| 191 |
+
predict_epsilon=cfg.model.predict_epsilon,
|
| 192 |
+
)
|
| 193 |
+
|
| 194 |
+
unet_configs = dict(
|
| 195 |
+
state_dim=cfg.state_dim,
|
| 196 |
+
n_support_points=cfg.trajectory_length,
|
| 197 |
+
unet_input_dim=cfg.model.unet_input_dim,
|
| 198 |
+
dim_mults=UNET_DIM_MULTS[cfg.model.unet_dim_mults_option],
|
| 199 |
+
conditioning_type=cfg.model.conditioning_type,
|
| 200 |
+
conditioning_embed_dim = cfg.model.conditioning_embed_dim
|
| 201 |
+
)
|
| 202 |
+
|
| 203 |
+
diffusion_model = get_model(
|
| 204 |
+
model_class=cfg.model.model_class,
|
| 205 |
+
model=TemporalUnet(**unet_configs),
|
| 206 |
+
tensor_args={'device': cfg.device, 'dtype': torch.float32},
|
| 207 |
+
context_model=MLPModel(in_dim=cfg.model.context_input_dim,
|
| 208 |
+
out_dim=cfg.model.conditioning_embed_dim,
|
| 209 |
+
input_field='tasks',
|
| 210 |
+
output_field='condition'),
|
| 211 |
+
**diffusion_configs,
|
| 212 |
+
**unet_configs
|
| 213 |
+
)
|
| 214 |
+
|
| 215 |
+
# load saved policy model
|
| 216 |
+
diffusion_model.load_state_dict(
|
| 217 |
+
torch.load(os.path.join(cfg.inference.model_dir, 'checkpoints',
|
| 218 |
+
'ema_model_current_state_dict.pth' if cfg.inference.use_ema else 'model_current_state_dict.pth'),
|
| 219 |
+
map_location=cfg.device,
|
| 220 |
+
weights_only=True)
|
| 221 |
+
)
|
| 222 |
+
diffusion_model.eval()
|
| 223 |
+
model = diffusion_model
|
| 224 |
+
|
| 225 |
+
freeze_torch_model_params(model)
|
| 226 |
+
model = torch.compile(model)
|
| 227 |
+
|
| 228 |
+
########################################################################################################################
|
| 229 |
+
env = gym.make(cfg.inference.env_id,
|
| 230 |
+
obs_mode=cfg.inference.obs_mode,
|
| 231 |
+
reward_mode=cfg.inference.reward_mode,
|
| 232 |
+
control_mode=cfg.inference.control_mode,
|
| 233 |
+
enable_shadow=False,
|
| 234 |
+
# render_mode="cameras" if cfg.inference.evaluate else "human",
|
| 235 |
+
render_mode="human",
|
| 236 |
+
robot_init_qpos_noise=0.5,
|
| 237 |
+
obstacle_configs=OBSTACLE_CONFIGS,
|
| 238 |
+
object_config_path=os.path.join(ENVS_DIR, "env_config","shelf"),
|
| 239 |
+
create_obstacle_point_cloud=True,
|
| 240 |
+
is_demo=True)
|
| 241 |
+
|
| 242 |
+
# Initialize guide manager
|
| 243 |
+
# guide = None
|
| 244 |
+
guide = GuideManagerSTOMP(
|
| 245 |
+
dataset = train_subset.dataset,
|
| 246 |
+
robot_model = env.agent.robot,
|
| 247 |
+
clip_grad=True,
|
| 248 |
+
tensor_args={'device': cfg.device, 'dtype': torch.float32},
|
| 249 |
+
guidance_weight=cfg.inference.guidance_weight,
|
| 250 |
+
)
|
| 251 |
+
|
| 252 |
+
ObservationWrapperClass = getattr(observation_wrapper, cfg.observation_wrapper)
|
| 253 |
+
obs_wrapper = ObservationWrapperClass(train_subset.dataset, camera_type=cfg.inference.camera_type)
|
| 254 |
+
visualizer = TrajectoryVisualizer(normalizer=train_subset.dataset.normalizer)
|
| 255 |
+
|
| 256 |
+
policy = NeuralMotionPlannerPolicy(
|
| 257 |
+
action_dim=env.action_space.shape[0],
|
| 258 |
+
model=model,
|
| 259 |
+
dataset=train_subset.dataset,
|
| 260 |
+
trajectory_length=cfg.trajectory_length,
|
| 261 |
+
inference_cfg=cfg.inference,
|
| 262 |
+
observation_wrapper=obs_wrapper,
|
| 263 |
+
use_ee_control=cfg.use_ee_control,
|
| 264 |
+
guide_manager=guide,
|
| 265 |
+
debug=cfg.debug)
|
| 266 |
+
policy.reset()
|
| 267 |
+
|
| 268 |
+
######################### main inference loop #########################
|
| 269 |
+
def create_trajectory_prior(policy, obs, trajectory, replanning_interval):
|
| 270 |
+
trajectory_index = policy.get_current_path_index()
|
| 271 |
+
trajectory_prior = trajectory[trajectory_index:]
|
| 272 |
+
# trajectory_prior = trajectory[replanning_interval:]
|
| 273 |
+
current_joint_pose = torch.tensor(obs['agent']['qpos']).to(cfg.device)
|
| 274 |
+
return torch.cat([current_joint_pose.unsqueeze(0), trajectory_prior], dim=0)
|
| 275 |
+
|
| 276 |
+
# print("Waiting for 15 seconds")
|
| 277 |
+
# time.sleep(18)
|
| 278 |
+
#### Planning ####
|
| 279 |
+
try:
|
| 280 |
+
step = 0
|
| 281 |
+
done = False
|
| 282 |
+
stop_replan = False
|
| 283 |
+
replanning_count = 0
|
| 284 |
+
replanning_interval = 3
|
| 285 |
+
|
| 286 |
+
while not done:
|
| 287 |
+
# Calculate the difference between the current TCP pose and the goal pose
|
| 288 |
+
# obs['extra']['tcp_pose'] and obs['extra']['goal_pose'] are both expected to be arrays of length 7
|
| 289 |
+
current_tcp_pose = np.array(obs['extra']['tcp_pose'])[:3]
|
| 290 |
+
goal_pose_ee = obs['extra']['goal_pose'][:3]
|
| 291 |
+
tcp_pose_diff = np.linalg.norm(current_tcp_pose - goal_pose_ee)
|
| 292 |
+
if tcp_pose_diff < 0.375:
|
| 293 |
+
stop_replan = True
|
| 294 |
+
print("TCP pose difference:", tcp_pose_diff)
|
| 295 |
+
# Update obsqueue
|
| 296 |
+
policy.observation_wrapper.update_history_buffer(obs)
|
| 297 |
+
|
| 298 |
+
if step % replanning_interval == 0 and (not stop_replan):
|
| 299 |
+
if step == 0:
|
| 300 |
+
print("Initial planning")
|
| 301 |
+
policy.plan_path_with_history(
|
| 302 |
+
obs,
|
| 303 |
+
do_normalize=True
|
| 304 |
+
)
|
| 305 |
+
else:
|
| 306 |
+
replanning_count += 1
|
| 307 |
+
trajectory_prior = create_trajectory_prior(
|
| 308 |
+
policy,
|
| 309 |
+
obs,
|
| 310 |
+
policy.planned_path,
|
| 311 |
+
replanning_interval
|
| 312 |
+
)
|
| 313 |
+
policy.plan_path_with_history(
|
| 314 |
+
obs,
|
| 315 |
+
trajectory_prior=trajectory_prior,
|
| 316 |
+
do_normalize=True,
|
| 317 |
+
timestep=cfg.inference.timestep, # denoising step
|
| 318 |
+
choice='interpolate'
|
| 319 |
+
)
|
| 320 |
+
|
| 321 |
+
# debug plot, sphere positions and sdf cost
|
| 322 |
+
# fig1 = plot_STOMP_debug_info(
|
| 323 |
+
# guide.debug_state,
|
| 324 |
+
# visualizer,
|
| 325 |
+
# point_cloud=policy.point_cloud,
|
| 326 |
+
# sphere_idx=24,
|
| 327 |
+
# time_step=3
|
| 328 |
+
# )
|
| 329 |
+
# plt.show()
|
| 330 |
+
# guide.reset_debug_state()
|
| 331 |
+
|
| 332 |
+
if tcp_pose_diff < 0.25:
|
| 333 |
+
action = policy.planned_path[-1]
|
| 334 |
+
else:
|
| 335 |
+
action = policy.follow_path(obs, real_robot=True)
|
| 336 |
+
if torch.is_tensor(action):
|
| 337 |
+
done = True
|
| 338 |
+
action = action.detach().cpu().numpy()
|
| 339 |
+
send_response(socket_server_send, action)
|
| 340 |
+
print("Steps: ", step, "Replanning count: ", replanning_count)
|
| 341 |
+
step += 1
|
| 342 |
+
|
| 343 |
+
|
| 344 |
+
except KeyboardInterrupt:
|
| 345 |
+
print("\nStopping simulation...")
|
| 346 |
+
|
| 347 |
+
print("Simulation finished")
|
| 348 |
+
env.close()
|
| 349 |
+
|
| 350 |
+
|
| 351 |
+
if __name__ == "__main__":
|
| 352 |
+
|
| 353 |
+
process_cameras_and_get_point_clouds(CAMERAS)
|
| 354 |
+
try:
|
| 355 |
+
stop_event = threading.Event()
|
| 356 |
+
thread = threading.Thread(target=handle_client, args=(stop_event,))
|
| 357 |
+
thread.start()
|
| 358 |
+
main()
|
| 359 |
+
stop_event.set()
|
| 360 |
+
thread.join()
|
| 361 |
+
except KeyboardInterrupt:
|
| 362 |
+
print(f"\nEnded")
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/scripts/robot_controller.py
ADDED
|
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
sys.path.append('/home/ladmin/Documents/maniskill2_benchmark/')
|
| 3 |
+
import time
|
| 4 |
+
import json
|
| 5 |
+
import socket
|
| 6 |
+
import threading
|
| 7 |
+
|
| 8 |
+
import gym
|
| 9 |
+
import franka_gym
|
| 10 |
+
from franka_gym.utils import *
|
| 11 |
+
from franka_gym.agents import *
|
| 12 |
+
|
| 13 |
+
from cfdp.utils.socket_utils import arr2base64, base64_to_arr
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
CONFIG_FILE_PATH = '/home/ladmin/Documents/maniskill2_benchmark/franka_robot_wrapper/franka_gym/configs/example.yaml'
|
| 17 |
+
CONFIGS = configure(CONFIG_FILE_PATH)
|
| 18 |
+
CONF_EXP = CONFIGS['experiment']
|
| 19 |
+
ENV = gym.make(CONF_EXP['env-name'], config_file=CONFIG_FILE_PATH)
|
| 20 |
+
ENV.reset()
|
| 21 |
+
GOAL_POSE = np.array([0.45, 0.40, 0.08, 0.0, -1.0, 0.0, 0.0]) # 1
|
| 22 |
+
GOAL_POSE = np.array([0.45, 0.50, 0.04, 0.0, -1.0, 0.0, 0.0]) # 2
|
| 23 |
+
GOAL_POSE = np.array([0.465, 0.55, 0.08, 0.0, -1.0, 0.0, 0.0]) # 3
|
| 24 |
+
GOAL_POSE = np.array([0.36, 0.58, 0.08, 0.0, -1.0, 0.0, 0.0]) # 4
|
| 25 |
+
HOST='127.0.0.1'
|
| 26 |
+
i = 2
|
| 27 |
+
PORT_SEND=5000 + (2*i)
|
| 28 |
+
PORT_RECV=5001 + (2*i)
|
| 29 |
+
|
| 30 |
+
def get_robot_state():
|
| 31 |
+
try:
|
| 32 |
+
state = ENV.get_state()
|
| 33 |
+
robot_state = state['ee_states'][:7]
|
| 34 |
+
combined_state = np.concatenate([robot_state, GOAL_POSE])
|
| 35 |
+
return combined_state
|
| 36 |
+
except Exception as e:
|
| 37 |
+
print(f"Error getting robot state: {e}")
|
| 38 |
+
return None
|
| 39 |
+
|
| 40 |
+
def send_observation(stop_event, host=HOST, port=PORT_SEND, interval=0.5):
|
| 41 |
+
try:
|
| 42 |
+
while not stop_event.is_set():
|
| 43 |
+
try:
|
| 44 |
+
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
| 45 |
+
s.connect((host, port))
|
| 46 |
+
|
| 47 |
+
obs = get_robot_state() # your function
|
| 48 |
+
payload = arr2base64(obs) # your encoder
|
| 49 |
+
message = json.dumps(payload) + '\n' # newline-delimited JSON
|
| 50 |
+
|
| 51 |
+
s.sendall(message.encode())
|
| 52 |
+
# print("[Client] Payload sent:", payload)
|
| 53 |
+
except KeyboardInterrupt:
|
| 54 |
+
print("\n[Client] Interrupted by user. Shutting down.")
|
| 55 |
+
break
|
| 56 |
+
except Exception as e:
|
| 57 |
+
print("[Client] Error during send:", e)
|
| 58 |
+
break
|
| 59 |
+
time.sleep(interval)
|
| 60 |
+
finally:
|
| 61 |
+
s.close()
|
| 62 |
+
|
| 63 |
+
def move_to(location):
|
| 64 |
+
# global gripper_orientation_quat, env
|
| 65 |
+
distance_to_target = 1
|
| 66 |
+
while distance_to_target > 0.01:
|
| 67 |
+
ENV.move_to_ee_target_pose(ee_target_pos=location, control_mode = 'quat', iter_steps = 3)
|
| 68 |
+
state = get_robot_state()
|
| 69 |
+
distance_to_target = np.linalg.norm(state[:3] - location[:3])
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def receive_info():
|
| 73 |
+
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
| 74 |
+
s.connect((HOST, PORT_RECV))
|
| 75 |
+
data = s.recv(4096)
|
| 76 |
+
response = json.loads(data.decode())
|
| 77 |
+
response = base64_to_arr(response)
|
| 78 |
+
print("[Client] Got from server:", response)
|
| 79 |
+
return response
|
| 80 |
+
|
| 81 |
+
if __name__ == "__main__":
|
| 82 |
+
debug = False
|
| 83 |
+
joint1 = [0.14060218312656647, -0.1763630612130337, 0.04729701053311961, -1.2449121625967192, 0.029108238965272905, 1.1071924595832823, 0.9375217847931281]
|
| 84 |
+
joint2 = [0.19482878609080095, -0.1490429622283628, -0.48674890603699533, -1.520728926089772, -0.08023853053649267, 1.4145816124984727, 0.3123271169454759]
|
| 85 |
+
joint3 = [-0.30347477500480513, -0.12699215018749238, -0.0885380081042202, -1.1491600061885097, -0.05358308494422171, 1.0893443137146492, 0.25052567082056587]
|
| 86 |
+
# joint4 = [-0.2534497408239465, 0.14847098046645785, -0.12020468637205217, -2.1994750877179596, -0.024345323815941806, 2.3565164618492127, 0.7671496500118045]
|
| 87 |
+
ENV.move_to_joint_position(joint2)
|
| 88 |
+
if debug:
|
| 89 |
+
ENV.move_to_ee_target_pose(GOAL_POSE, control_mode = 'quat', iter_steps =200)
|
| 90 |
+
else:
|
| 91 |
+
stop_event = threading.Event()
|
| 92 |
+
thread = threading.Thread(target=send_observation, args=(stop_event,))
|
| 93 |
+
thread.start()
|
| 94 |
+
# Part 1
|
| 95 |
+
step = 0
|
| 96 |
+
done = False
|
| 97 |
+
while not done:
|
| 98 |
+
print("Observation: ", get_robot_state()[:7])
|
| 99 |
+
action = receive_info()
|
| 100 |
+
move_to(action)
|
| 101 |
+
position_diff = np.linalg.norm(GOAL_POSE[:3] - get_robot_state()[:3])
|
| 102 |
+
print("Step: ", step, "Position diff: ", position_diff)
|
| 103 |
+
if position_diff < 0.02:
|
| 104 |
+
done = True
|
| 105 |
+
step +=1
|
| 106 |
+
GOAL_POSE[2] -= 0.055
|
| 107 |
+
ENV.move_to_ee_target_pose(GOAL_POSE, control_mode = 'quat', iter_steps = 20)
|
| 108 |
+
ENV.grasp()
|
| 109 |
+
|
| 110 |
+
# Part 2
|
| 111 |
+
GOAL_POSE[2] = 0.20 # 4
|
| 112 |
+
move_to(GOAL_POSE)
|
| 113 |
+
|
| 114 |
+
# Move Above
|
| 115 |
+
GOAL_POSE = np.array([-0.13914922, 0.67478615, 0.20, 0.0, 0.66, 0.75, 0.0])
|
| 116 |
+
move_to(GOAL_POSE)
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
GOAL_POSE = np.array([-0.13914922, 0.67478615, 0.10, 0.0, 0.66, 0.75, 0.0])
|
| 120 |
+
move_to(GOAL_POSE)
|
| 121 |
+
ENV.open_grasp()
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
GOAL_POSE = np.array([-0.13914922, 0.67478615, 0.20, 0.0, 0.66, 0.75, 0.0])
|
| 125 |
+
move_to(GOAL_POSE)
|
| 126 |
+
ENV.reset()
|
| 127 |
+
stop_event.set()
|
| 128 |
+
# step = 0
|
| 129 |
+
# done = False
|
| 130 |
+
# while not done:
|
| 131 |
+
# print("Observation: ", get_robot_state()[:7])
|
| 132 |
+
# action = receive_info()
|
| 133 |
+
# move_to(action)
|
| 134 |
+
# position_diff = np.linalg.norm(GOAL_POSE[:3] - get_robot_state()[:3])
|
| 135 |
+
# print("Step: ", step, "Position diff: ", position_diff)
|
| 136 |
+
# if position_diff < 0.01:
|
| 137 |
+
# done = True
|
| 138 |
+
# # step +=1
|
| 139 |
+
# GOAL_POSE[2] -= 0.1
|
| 140 |
+
# ENV.move_to_ee_target_pose(GOAL_POSE, control_mode = 'quat', iter_steps = 20)
|
| 141 |
+
# ENV.reset()
|
| 142 |
+
# stop_event.set()
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/scripts/robot_controller_joint.py
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
sys.path.append('/home/ladmin/Documents/maniskill2_benchmark/')
|
| 3 |
+
sys.path.append('/home/ladmin/Documents/maniskill2_benchmark/franka_robot_wrapper/')
|
| 4 |
+
sys.path.append('/home/ladmin/Documents/maniskill2_benchmark/franka_robot_wrapper/franka_gym/')
|
| 5 |
+
import time
|
| 6 |
+
import json
|
| 7 |
+
import socket
|
| 8 |
+
import threading
|
| 9 |
+
|
| 10 |
+
import numpy as np
|
| 11 |
+
import gym
|
| 12 |
+
import franka_gym
|
| 13 |
+
from franka_gym.utils import *
|
| 14 |
+
from franka_gym.agents import *
|
| 15 |
+
|
| 16 |
+
from cfdp.utils.socket_utils import arr2base64, base64_to_arr
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
CONFIG_FILE_PATH = '/home/ladmin/Documents/maniskill2_benchmark/franka_robot_wrapper/franka_gym/configs/example.yaml'
|
| 20 |
+
CONFIGS = configure(CONFIG_FILE_PATH)
|
| 21 |
+
CONF_EXP = CONFIGS['experiment']
|
| 22 |
+
ENV = gym.make(CONF_EXP['env-name'], config_file=CONFIG_FILE_PATH)
|
| 23 |
+
ENV.reset()
|
| 24 |
+
# GOAL_POSE = np.array([0.45, 0.40, 0.08, 0.0, -1.0, 0.0, 0.0]) # Set a goal pose for the robot to move to
|
| 25 |
+
# GOAL_POSE_JOINT = np.array([1.24, 0.44, 0.09, -2.15, -0.07, 2.64, 1.05, 0.04, 0.04])
|
| 26 |
+
# GOAL_POSE_JOINT = np.array([0.654, 1.651, -1.608, -2.116, 0.626, 3.008, 1.775, 0.04, 0.04])
|
| 27 |
+
# GOAL_POSE_EE = np.array([0.13, 0.56, 0.09, 0.01, -0.85, -0.52, -0.01])
|
| 28 |
+
# GOAL_POSE_JOINT = np.array([1.08, 0.48, -0.06, -2.24, 0.22, 2.66, 0.83, 0.04, 0.04])
|
| 29 |
+
# GOAL_POSE_EE = np.array([0.27, 0.45, 0.04, 0.01, -0.92, -0.38, 0.05])
|
| 30 |
+
# # The working one for now:
|
| 31 |
+
# GOAL_POSE_JOINT = np.array([0.80, 0.49, 0.05, -1.74, -0.06, 2.24, -0.04, 0.04, 0.04])
|
| 32 |
+
# GOAL_POSE_EE = np.array([0.44, 0.50, 0.19, 0.01, 0.66, 0.75, 0.01])
|
| 33 |
+
# # # The one for the shelf:
|
| 34 |
+
# GOAL_POSE_JOINT = np.array([-0.89, 0.17, -0.47, -1.22, 0.06, 1.37, 0.71, 0.04, 0.04])
|
| 35 |
+
# GOAL_POSE_EE = np.array([0.16, -0.56, 0.57, -0.00, -0.81, 0.59, 0.01])
|
| 36 |
+
|
| 37 |
+
# # The one for the shelf:
|
| 38 |
+
# GOAL_POSE_JOINT = np.array([0.37, 1.73, -1.19, -1.58, 1.79, 2.92, 0.74, 0.04, 0.04])
|
| 39 |
+
# GOAL_POSE_EE = np.array([0.55, -0.48, 0.05, -0.27, -0.70, 0.52, -0.38])
|
| 40 |
+
|
| 41 |
+
# Lower Shelf, Top Down
|
| 42 |
+
GOAL_POSE_JOINT = np.array([-0.74, 1.07, -0.31, -0.98, 0.29, 2.05, 0.70, 0.04, 0.04])
|
| 43 |
+
GOAL_POSE_EE = np.array([0.48, -0.63, 0.11, -0.01, -0.89, 0.45, -0.01])
|
| 44 |
+
|
| 45 |
+
# # Top Shelf
|
| 46 |
+
# GOAL_POSE_JOINT = np.array([-1.09, 0.49, 0.05, -0.79, -0.04, 1.31, 0.80, 0.04, 0.04])
|
| 47 |
+
# GOAL_POSE_EE = np.array([0.32, -0.58, 0.55, -0.01, -0.86, 0.51, -0.01])
|
| 48 |
+
|
| 49 |
+
# Another Lower Shelf
|
| 50 |
+
GOAL_POSE_JOINT = np.array([-0.55, 1.25, -0.36, -0.91, 0.40, 2.18, 0.43, 0.04, 0.04])
|
| 51 |
+
GOAL_POSE_EE = np.array([0.59, -0.54, 0.03, -0.02, -0.97, 0.26, -0.03])
|
| 52 |
+
|
| 53 |
+
HOST='127.0.0.1'
|
| 54 |
+
i = 2
|
| 55 |
+
PORT_SEND=5000 + (2*i)
|
| 56 |
+
PORT_RECV=5001 + (2*i)
|
| 57 |
+
|
| 58 |
+
def get_robot_state():
|
| 59 |
+
try:
|
| 60 |
+
state = ENV.get_state()
|
| 61 |
+
joint_state = np.concatenate([state['joint_pos'][:7], np.array([0, 0])]) # TODO: Change to joint state -- Check if this is correct
|
| 62 |
+
robot_state = state['ee_states'][:7]
|
| 63 |
+
combined_state = np.concatenate([joint_state, GOAL_POSE_JOINT, robot_state, GOAL_POSE_EE])
|
| 64 |
+
return combined_state
|
| 65 |
+
except Exception as e:
|
| 66 |
+
print(f"Error getting robot state: {e}")
|
| 67 |
+
return None
|
| 68 |
+
|
| 69 |
+
def send_observation(stop_event, host=HOST, port=PORT_SEND, interval=0.5):
|
| 70 |
+
try:
|
| 71 |
+
while not stop_event.is_set():
|
| 72 |
+
try:
|
| 73 |
+
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
| 74 |
+
s.connect((host, port))
|
| 75 |
+
|
| 76 |
+
obs = get_robot_state() # your function
|
| 77 |
+
payload = arr2base64(obs) # your encoder
|
| 78 |
+
message = json.dumps(payload) + '\n' # newline-delimited JSON
|
| 79 |
+
|
| 80 |
+
s.sendall(message.encode())
|
| 81 |
+
# print("[Client] Payload sent:", payload)
|
| 82 |
+
except KeyboardInterrupt:
|
| 83 |
+
print("\n[Client] Interrupted by user. Shutting down.")
|
| 84 |
+
break
|
| 85 |
+
except Exception as e:
|
| 86 |
+
print("[Client] Error during send:", e)
|
| 87 |
+
break
|
| 88 |
+
time.sleep(interval)
|
| 89 |
+
finally:
|
| 90 |
+
s.close()
|
| 91 |
+
|
| 92 |
+
def move_to(location):
|
| 93 |
+
# global gripper_orientation_quat, env
|
| 94 |
+
# distance_to_target = 1
|
| 95 |
+
# while distance_to_target > 0.01:
|
| 96 |
+
ENV.move_to_joint_position(location[:7])
|
| 97 |
+
# state = get_robot_state()
|
| 98 |
+
# distance_to_target = np.linalg.norm(state[:7] - location[:7])
|
| 99 |
+
# print("Distance to target: ", distance_to_target)
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
def receive_info():
|
| 103 |
+
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
| 104 |
+
s.connect((HOST, PORT_RECV))
|
| 105 |
+
data = s.recv(20000)
|
| 106 |
+
response = json.loads(data.decode())
|
| 107 |
+
response = base64_to_arr(response)
|
| 108 |
+
print("[Client] Got from server:", response)
|
| 109 |
+
return response
|
| 110 |
+
|
| 111 |
+
if __name__ == "__main__":
|
| 112 |
+
debug = False
|
| 113 |
+
# Robot Frame: X-Axis, Y-Axis, Z-Axis
|
| 114 |
+
FRONT_MID_HIGH_0 = [0.079, -0.237, -0.059, -1.550, 0.029, 1.349, 0.721, 0.04, 0.04] # High
|
| 115 |
+
FRONT_MID_HIGH_1 = [0.141, -0.176, 0.047, -1.245, 0.029, 1.107, 0.938, 0.04, 0.04] # Extra High
|
| 116 |
+
FRONT_LEFT_HIGH_0 = [0.195, -0.109, -0.487, -1.521, -0.080, 1.415, 0.312, 0.04, 0.04]
|
| 117 |
+
BACK_RIGHT_LOW_0 = [1.65, 0.33, 0.33, -2.12, -0.09, 2.48, 1.15]
|
| 118 |
+
BACK_RIGHT_HIGH_0 = [1.92, 0.12, 0.01, -1.29, 0.07, 1.52, 1.12]
|
| 119 |
+
TOP_SHELF_0 = [-1.09, 0.49, 0.05, -0.79, -0.04, 1.31, 0.80]
|
| 120 |
+
LOW_SHELF_0 = [-0.87, 1.17, 0.05, -0.95, -0.04, 2.19, 0.75]
|
| 121 |
+
FRONT_LEFT_TABLE_0 = [-0.84, 0.49, 0.20, -2.33, -0.16, 2.77, 0.50]
|
| 122 |
+
FRONT_MID_MID_0 = [0.25, 0.31, -0.09, -1.98, 0.01, 2.25, 0.85]
|
| 123 |
+
SHELF_LOW_0 = np.array([0.37, 1.73, -1.19, -1.58, 1.79, 2.92, 0.74, 0.04, 0.04])
|
| 124 |
+
|
| 125 |
+
# ENV.move_to_joint_position(GOAL_POSE_JOINT)
|
| 126 |
+
ENV.move_to_joint_position(BACK_RIGHT_HIGH_0)
|
| 127 |
+
# ENV.grasp()
|
| 128 |
+
grasp = True
|
| 129 |
+
# reset = True
|
| 130 |
+
# if reset:
|
| 131 |
+
# GRASP_JOINT = [1.82, 0.61, 0.16, -2.10, -0.09, 2.72, 1.23]
|
| 132 |
+
# ENV.move_to_joint_position(GRASP_JOINT)
|
| 133 |
+
# ENV.open_grasp()
|
| 134 |
+
# ENV.move_to_joint_position(BACK_RIGHT_LOW_0)
|
| 135 |
+
|
| 136 |
+
if grasp:
|
| 137 |
+
GRASP_JOINT = [1.82, 0.61, 0.16, -2.10, -0.09, 2.72, 1.23]
|
| 138 |
+
ENV.move_to_joint_position(GRASP_JOINT)
|
| 139 |
+
ENV.grasp()
|
| 140 |
+
# ENV.open_grasp()
|
| 141 |
+
ENV.move_to_joint_position(BACK_RIGHT_HIGH_0)
|
| 142 |
+
|
| 143 |
+
if debug:
|
| 144 |
+
pass
|
| 145 |
+
else:
|
| 146 |
+
stop_event = threading.Event()
|
| 147 |
+
thread = threading.Thread(target=send_observation, args=(stop_event,))
|
| 148 |
+
thread.start()
|
| 149 |
+
# Part 1
|
| 150 |
+
step = 0
|
| 151 |
+
done = False
|
| 152 |
+
while not done:
|
| 153 |
+
print("Observation: ", get_robot_state()[:7])
|
| 154 |
+
action = receive_info()
|
| 155 |
+
move_to(action)
|
| 156 |
+
position_diff = np.linalg.norm(GOAL_POSE_JOINT[:7] - get_robot_state()[:7])
|
| 157 |
+
print("Step: ", step, "Position diff: ", position_diff)
|
| 158 |
+
if position_diff < 0.2:
|
| 159 |
+
done = True
|
| 160 |
+
step += 1
|
| 161 |
+
ENV.open_grasp()
|
| 162 |
+
|
| 163 |
+
# # GOAL_POSE[2] -= 0.055
|
| 164 |
+
# # ENV.move_to_ee_target_pose(GOAL_POSE, control_mode = 'quat', iter_steps = 20)
|
| 165 |
+
# # ENV.grasp()
|
| 166 |
+
|
| 167 |
+
# # # Part 2
|
| 168 |
+
# # GOAL_POSE[2] = 0.20 # 4
|
| 169 |
+
# # move_to(GOAL_POSE)
|
| 170 |
+
|
| 171 |
+
# # # Move Above
|
| 172 |
+
# # GOAL_POSE = np.array([-0.13914922, 0.67478615, 0.20, 0.0, 0.66, 0.75, 0.0])
|
| 173 |
+
# # move_to(GOAL_POSE)
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
# # GOAL_POSE = np.array([-0.13914922, 0.67478615, 0.10, 0.0, 0.66, 0.75, 0.0])
|
| 177 |
+
# # move_to(GOAL_POSE)
|
| 178 |
+
# # ENV.open_grasp()
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
# # GOAL_POSE = np.array([-0.13914922, 0.67478615, 0.20, 0.0, 0.66, 0.75, 0.0])
|
| 182 |
+
# # move_to(GOAL_POSE)
|
| 183 |
+
# # ENV.reset()
|
| 184 |
+
# # stop_event.set()
|
| 185 |
+
# # step = 0
|
| 186 |
+
# # done = False
|
| 187 |
+
# # while not done:
|
| 188 |
+
# # print("Observation: ", get_robot_state()[:7])
|
| 189 |
+
# # action = receive_info()
|
| 190 |
+
# # move_to(action)
|
| 191 |
+
# # position_diff = np.linalg.norm(GOAL_POSE[:3] - get_robot_state()[:3])
|
| 192 |
+
# # print("Step: ", step, "Position diff: ", position_diff)
|
| 193 |
+
# # if position_diff < 0.01:
|
| 194 |
+
# # done = True
|
| 195 |
+
# # # step +=1
|
| 196 |
+
# # GOAL_POSE[2] -= 0.1
|
| 197 |
+
# # ENV.move_to_ee_target_pose(GOAL_POSE, control_mode = 'quat', iter_steps = 20)
|
| 198 |
+
# # ENV.reset()
|
| 199 |
+
# # stop_event.set()
|
| 200 |
+
|
| 201 |
+
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/scripts/run.py
ADDED
|
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# A script to train diffusion / FM policy
|
| 2 |
+
import os
|
| 3 |
+
import torch
|
| 4 |
+
import hydra
|
| 5 |
+
import wandb
|
| 6 |
+
from omegaconf import DictConfig, OmegaConf
|
| 7 |
+
import cfdp
|
| 8 |
+
from cfdp.diffusion_policy.utils.loaders import get_dataset, get_loss, get_model, get_summary
|
| 9 |
+
from cfdp.diffusion_policy.models import TemporalUnet, UNET_DIM_MULTS, MLPModel
|
| 10 |
+
from cfdp.diffusion_policy import trainer
|
| 11 |
+
|
| 12 |
+
OmegaConf.register_new_resolver("multiply", lambda x, y: int(float(x)) * int(float(y)))
|
| 13 |
+
|
| 14 |
+
@hydra.main(version_base="1.2", config_path="../configs", config_name="config_ms3_history_ddim")
|
| 15 |
+
def main(cfg: DictConfig):
|
| 16 |
+
os.environ["WANDB_MODE"] = "disabled"
|
| 17 |
+
# ... existing code ...
|
| 18 |
+
print(OmegaConf.to_yaml(cfg))
|
| 19 |
+
|
| 20 |
+
train_subset, train_dataloader, val_subset, val_dataloader = get_dataset(
|
| 21 |
+
cfg_dataset=cfg.dataset,
|
| 22 |
+
batch_size=cfg.batch_size,
|
| 23 |
+
val_set_size=cfg.val_set_size,
|
| 24 |
+
results_dir=cfg.results_dir,
|
| 25 |
+
save_indices=False
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
+
print("train data_loader length, ", len(train_dataloader))
|
| 29 |
+
print("val data_loader length, ", len(val_dataloader))
|
| 30 |
+
|
| 31 |
+
dataset = train_subset.dataset #TrajectoryDataset
|
| 32 |
+
print("dataset, ", dataset)
|
| 33 |
+
|
| 34 |
+
# Model
|
| 35 |
+
diffusion_configs = dict(
|
| 36 |
+
variance_schedule=cfg.model.variance_schedule,
|
| 37 |
+
n_diffusion_steps=cfg.model.n_steps,
|
| 38 |
+
prediction_mode=cfg.model.prediction_mode,
|
| 39 |
+
)
|
| 40 |
+
|
| 41 |
+
unet_configs = dict(
|
| 42 |
+
state_dim=cfg.state_dim,
|
| 43 |
+
n_support_points=cfg.trajectory_length,
|
| 44 |
+
unet_input_dim=cfg.model.unet_input_dim,
|
| 45 |
+
dim_mults=UNET_DIM_MULTS[cfg.model.unet_dim_mults_option],
|
| 46 |
+
conditioning_type=cfg.model.conditioning_type,
|
| 47 |
+
conditioning_embed_dim = cfg.model.conditioning_embed_dim
|
| 48 |
+
)
|
| 49 |
+
|
| 50 |
+
model = get_model(
|
| 51 |
+
model_class=cfg.model.model_class,
|
| 52 |
+
model=TemporalUnet(**unet_configs),
|
| 53 |
+
tensor_args={'device': cfg.device, 'dtype': torch.float32},
|
| 54 |
+
context_model=MLPModel(in_dim=cfg.model.context_input_dim, out_dim=cfg.model.conditioning_embed_dim, input_field='tasks', output_field='condition'),
|
| 55 |
+
**diffusion_configs,
|
| 56 |
+
**unet_configs
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
# Loss
|
| 60 |
+
loss_fn = val_loss_fn = get_loss(
|
| 61 |
+
loss_class=cfg.loss.loss_class
|
| 62 |
+
)
|
| 63 |
+
print("loss_fn, ", loss_fn, " type of loss_fn, ", type(loss_fn))
|
| 64 |
+
|
| 65 |
+
# Summary
|
| 66 |
+
summary_fn = get_summary(
|
| 67 |
+
summary_class=cfg.training.summary_class,
|
| 68 |
+
use_ddim=cfg.model.use_ddim
|
| 69 |
+
)
|
| 70 |
+
|
| 71 |
+
# WandB
|
| 72 |
+
wandb.init(
|
| 73 |
+
project=cfg.wandb.project,
|
| 74 |
+
entity=cfg.wandb.entity,
|
| 75 |
+
mode=cfg.wandb.mode,
|
| 76 |
+
dir=cfg.results_dir
|
| 77 |
+
)
|
| 78 |
+
|
| 79 |
+
print("number of epochs, ", trainer.get_num_epochs(cfg.training.num_steps, cfg.batch_size, len(dataset)))
|
| 80 |
+
# Train
|
| 81 |
+
trainer.train(
|
| 82 |
+
model=model,
|
| 83 |
+
train_dataloader=train_dataloader,
|
| 84 |
+
train_subset=train_subset,
|
| 85 |
+
val_dataloader=val_dataloader,
|
| 86 |
+
val_subset=train_subset,
|
| 87 |
+
epochs=trainer.get_num_epochs(cfg.training.num_steps, cfg.batch_size, len(dataset)),
|
| 88 |
+
model_dir=cfg.results_dir,
|
| 89 |
+
summary_fn=summary_fn,
|
| 90 |
+
lr=cfg.training.lr,
|
| 91 |
+
loss_fn=loss_fn,
|
| 92 |
+
val_loss_fn=val_loss_fn,
|
| 93 |
+
steps_til_summary=cfg.training.steps_til_summary,
|
| 94 |
+
steps_til_checkpoint=cfg.training.steps_til_ckpt,
|
| 95 |
+
clip_grad=True,
|
| 96 |
+
use_ema=cfg.training.use_ema,
|
| 97 |
+
use_amp=cfg.training.use_amp,
|
| 98 |
+
debug=cfg.debug,
|
| 99 |
+
tensor_args={'device': cfg.device, 'dtype': torch.float32}
|
| 100 |
+
)
|
| 101 |
+
|
| 102 |
+
if __name__ == "__main__":
|
| 103 |
+
main()
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/scripts/run_inference.py
ADDED
|
@@ -0,0 +1,334 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import hydra
|
| 3 |
+
from omegaconf import DictConfig, OmegaConf
|
| 4 |
+
import gymnasium as gym
|
| 5 |
+
import mani_skill2.envs
|
| 6 |
+
import cfdp.envs as envs # customized environments
|
| 7 |
+
from tqdm import tqdm
|
| 8 |
+
import numpy as np
|
| 9 |
+
import torch
|
| 10 |
+
from cfdp.diffusion_policy.utils.loaders import get_dataset, get_model, freeze_torch_model_params
|
| 11 |
+
from cfdp.diffusion_policy.models import TemporalUnet, UNET_DIM_MULTS, MLPModel
|
| 12 |
+
from cfdp.motion_planner.neural_motion_planner import NeuralMotionPlannerPolicy
|
| 13 |
+
from cfdp.motion_planner.motion_planner_policy_base import ClassicalMotionPlannerPolicy
|
| 14 |
+
from cfdp.utils.plot_utils import plot_trajectories
|
| 15 |
+
from cfdp.envs import ROBOT_ASSETS_DIR
|
| 16 |
+
from cfdp.utils.data_utils import transform_quat_to_ortho6d
|
| 17 |
+
from cfdp.diffusion_policy.models.guide_managers import GuideManagerPath
|
| 18 |
+
from cfdp.diffusion_policy.utils.summary_trajectory import generate_trajectories
|
| 19 |
+
from cfdp.utils import observation_wrapper, inference_recorder
|
| 20 |
+
from cfdp.utils.plot_utils import TrajectoryVisualizer
|
| 21 |
+
from cfdp.envs import ENVS_DIR
|
| 22 |
+
|
| 23 |
+
import random
|
| 24 |
+
import time
|
| 25 |
+
|
| 26 |
+
np.random.seed(42)
|
| 27 |
+
random.seed(42)
|
| 28 |
+
|
| 29 |
+
OmegaConf.register_new_resolver("multiply", lambda x, y: int(float(x)) * int(float(y)))
|
| 30 |
+
|
| 31 |
+
OBSTACLE_CONFIGS = []
|
| 32 |
+
# OBSTACLE_CONFIGS = [
|
| 33 |
+
# # {
|
| 34 |
+
# # 'type': 'box',
|
| 35 |
+
# # 'half_size': [0.05, 0.05, 0.05],
|
| 36 |
+
# # 'pose': [0.5, 0, 0.1], # [x, y, z, qw, qx, qy, qz]
|
| 37 |
+
# # 'color': (1, 0, 0),
|
| 38 |
+
# # 'n_points': 500
|
| 39 |
+
# # },
|
| 40 |
+
# {
|
| 41 |
+
# 'type': 'sphere',
|
| 42 |
+
# 'radius': 0.12,
|
| 43 |
+
# 'pose': [0.5, 0.1, 0.2],
|
| 44 |
+
# 'color': (0, 1, 0),
|
| 45 |
+
# 'n_points': 500
|
| 46 |
+
# }
|
| 47 |
+
# # {
|
| 48 |
+
# # 'type': 'mesh',
|
| 49 |
+
# # 'mesh_path': 'path/to/mesh.obj',
|
| 50 |
+
# # 'scale': 1.0,
|
| 51 |
+
# # 'pose': [0.4, -0.2, 0.1, 1, 0, 0, 0],
|
| 52 |
+
# # 'color': (0, 0, 1),
|
| 53 |
+
# # 'n_points': 500
|
| 54 |
+
# # }
|
| 55 |
+
# ]
|
| 56 |
+
|
| 57 |
+
@hydra.main(version_base="1.2", config_path="../configs", config_name="config_history")
|
| 58 |
+
def main(cfg: DictConfig):
|
| 59 |
+
# print(OmegaConf.to_yaml(cfg))
|
| 60 |
+
|
| 61 |
+
########################################################################################################################
|
| 62 |
+
# Load dataset with env, robot, task
|
| 63 |
+
train_subset, train_dataloader, val_subset, val_dataloader = get_dataset(
|
| 64 |
+
cfg_dataset=cfg.dataset,
|
| 65 |
+
batch_size=cfg.batch_size,
|
| 66 |
+
val_set_size=cfg.val_set_size,
|
| 67 |
+
results_dir=cfg.results_dir,
|
| 68 |
+
save_indices=False
|
| 69 |
+
)
|
| 70 |
+
#TODO: save / load normalizer. Do not use dataset
|
| 71 |
+
|
| 72 |
+
########################################################################################################################
|
| 73 |
+
# Load prior model
|
| 74 |
+
diffusion_configs = dict(
|
| 75 |
+
variance_schedule=cfg.model.variance_schedule,
|
| 76 |
+
n_diffusion_steps=cfg.model.n_steps,
|
| 77 |
+
prediction_mode=cfg.model.prediction_mode,
|
| 78 |
+
)
|
| 79 |
+
|
| 80 |
+
unet_configs = dict(
|
| 81 |
+
state_dim=cfg.state_dim,
|
| 82 |
+
n_support_points=cfg.trajectory_length,
|
| 83 |
+
unet_input_dim=cfg.model.unet_input_dim,
|
| 84 |
+
dim_mults=UNET_DIM_MULTS[cfg.model.unet_dim_mults_option],
|
| 85 |
+
conditioning_type=cfg.model.conditioning_type,
|
| 86 |
+
conditioning_embed_dim = cfg.model.conditioning_embed_dim
|
| 87 |
+
)
|
| 88 |
+
|
| 89 |
+
diffusion_model = get_model(
|
| 90 |
+
model_class=cfg.model.model_class,
|
| 91 |
+
model=TemporalUnet(**unet_configs),
|
| 92 |
+
tensor_args={'device': cfg.device, 'dtype': torch.float32},
|
| 93 |
+
context_model=MLPModel(in_dim=cfg.model.context_input_dim, out_dim=cfg.model.conditioning_embed_dim, input_field='tasks', output_field='condition'),
|
| 94 |
+
**diffusion_configs,
|
| 95 |
+
**unet_configs
|
| 96 |
+
)
|
| 97 |
+
|
| 98 |
+
# load saved policy model
|
| 99 |
+
diffusion_model.load_state_dict(
|
| 100 |
+
torch.load(os.path.join(cfg.inference.model_dir, 'checkpoints',
|
| 101 |
+
'ema_model_current_state_dict.pth' if cfg.inference.use_ema else 'model_current_state_dict.pth'),
|
| 102 |
+
map_location=cfg.device,
|
| 103 |
+
weights_only=True)
|
| 104 |
+
)
|
| 105 |
+
diffusion_model.eval()
|
| 106 |
+
model = diffusion_model
|
| 107 |
+
|
| 108 |
+
freeze_torch_model_params(model)
|
| 109 |
+
model = torch.compile(model)
|
| 110 |
+
|
| 111 |
+
########################################################################################################################
|
| 112 |
+
|
| 113 |
+
# Create environment
|
| 114 |
+
# env = gym.make('CustomizedPick-v0',
|
| 115 |
+
# obs_mode=cfg.inference.obs_mode,
|
| 116 |
+
# reward_mode=cfg.inference.reward_mode,
|
| 117 |
+
# control_mode=cfg.inference.control_mode,
|
| 118 |
+
# enable_shadow=False,
|
| 119 |
+
# render_mode="cameras" if cfg.inference.evaluate else "human",
|
| 120 |
+
# robot_init_qpos_noise=0.5,
|
| 121 |
+
# obstacle_configs=OBSTACLE_CONFIGS,
|
| 122 |
+
# object_config_path=os.path.join(ENVS_DIR, "env_config","smallbox_distractor"),
|
| 123 |
+
# # object_config_path=os.path.join(ENVS_DIR, "env_config","distractorbox"),
|
| 124 |
+
# # object_config_path=os.path.join(ENVS_DIR, "env_config","distractor25"),
|
| 125 |
+
# # object_config_path=os.path.join(ENVS_DIR, "env_config"),
|
| 126 |
+
# # object_config_path=os.path.join(ENVS_DIR, "concept_env_config"),
|
| 127 |
+
# create_obstacle_point_cloud=True)
|
| 128 |
+
|
| 129 |
+
env = gym.make(cfg.inference.env_id,
|
| 130 |
+
obs_mode=cfg.inference.obs_mode,
|
| 131 |
+
reward_mode=cfg.inference.reward_mode,
|
| 132 |
+
control_mode=cfg.inference.control_mode,
|
| 133 |
+
enable_shadow=False,
|
| 134 |
+
render_mode="cameras" if cfg.inference.evaluate else "human",
|
| 135 |
+
robot_init_qpos_noise=0.5,
|
| 136 |
+
obstacle_configs=OBSTACLE_CONFIGS,
|
| 137 |
+
object_config_path=os.path.join(ENVS_DIR, "env_config","shelf"),
|
| 138 |
+
create_obstacle_point_cloud=True)
|
| 139 |
+
|
| 140 |
+
# Initialize guide manager
|
| 141 |
+
# guide = GuideManagerPath(
|
| 142 |
+
# dataset = train_subset.dataset,
|
| 143 |
+
# clip_grad=True,
|
| 144 |
+
# tensor_args={'device': cfg.device, 'dtype': torch.float32},
|
| 145 |
+
# guidance_weight=cfg.inference.guidance_weight,
|
| 146 |
+
# )
|
| 147 |
+
guide = None
|
| 148 |
+
ObservationWrapperClass = getattr(observation_wrapper, cfg.observation_wrapper)
|
| 149 |
+
obs_wrapper = ObservationWrapperClass(train_subset.dataset, camera_type=cfg.inference.camera_type)
|
| 150 |
+
|
| 151 |
+
policy = NeuralMotionPlannerPolicy(
|
| 152 |
+
action_dim=env.action_space.shape[0],
|
| 153 |
+
model=model,
|
| 154 |
+
dataset=train_subset.dataset,
|
| 155 |
+
trajectory_length=cfg.trajectory_length,
|
| 156 |
+
inference_cfg=cfg.inference,
|
| 157 |
+
observation_wrapper=obs_wrapper,
|
| 158 |
+
guide_manager=guide,
|
| 159 |
+
debug=cfg.debug)
|
| 160 |
+
|
| 161 |
+
# Initialize a classical policy for reject sampling
|
| 162 |
+
agent = env.unwrapped.agent
|
| 163 |
+
planner_parameters = {
|
| 164 |
+
'action_dim': env.action_space.shape[0],
|
| 165 |
+
'urdf_path': os.path.join(ROBOT_ASSETS_DIR, "panda_v2.urdf"),
|
| 166 |
+
'srdf_path': os.path.join(ROBOT_ASSETS_DIR, "panda_v2.srdf"),
|
| 167 |
+
'agent': agent,
|
| 168 |
+
'control_timestep': env.control_timestep,
|
| 169 |
+
'planner_type': 'rrt'
|
| 170 |
+
}
|
| 171 |
+
classical_policy = ClassicalMotionPlannerPolicy(**planner_parameters)
|
| 172 |
+
|
| 173 |
+
## visualizer ##
|
| 174 |
+
visualizer = TrajectoryVisualizer(normalizer=train_subset.dataset.normalizer)
|
| 175 |
+
|
| 176 |
+
## inference recorder ##
|
| 177 |
+
if cfg.test.inference_record:
|
| 178 |
+
obs_inference_recorder = inference_recorder.InferenceRecorder(save_dir=cfg.test.recording_dir)
|
| 179 |
+
else:
|
| 180 |
+
obs_inference_recorder = None
|
| 181 |
+
######################### main inference loop #########################
|
| 182 |
+
if cfg.inference.validation_set_only:
|
| 183 |
+
# run inference using validation set only
|
| 184 |
+
for _ in range(10):
|
| 185 |
+
combined_trajectories = generate_trajectories(model, val_subset.dataset, 10, cfg.trajectory_length)
|
| 186 |
+
plot_trajectories(combined_trajectories)
|
| 187 |
+
|
| 188 |
+
elif cfg.inference.sim_with_history:
|
| 189 |
+
def create_trajectory_prior(policy, obs, trajectory_ortho6d):
|
| 190 |
+
trajectory_index = policy.get_current_path_index()
|
| 191 |
+
trajectory_prior = trajectory_ortho6d[trajectory_index:]
|
| 192 |
+
current_tcp_pose_ortho6d = transform_quat_to_ortho6d(
|
| 193 |
+
torch.tensor(obs['extra']['tcp_pose'], device=cfg.device).unsqueeze(0)
|
| 194 |
+
).squeeze(0)
|
| 195 |
+
# Create a copy of the concatenated tensor to ensure we return a new object
|
| 196 |
+
result = torch.cat([current_tcp_pose_ortho6d.unsqueeze(0), trajectory_prior], dim=0).clone()
|
| 197 |
+
return result
|
| 198 |
+
|
| 199 |
+
done = False
|
| 200 |
+
truncated = False
|
| 201 |
+
episode_count = 0
|
| 202 |
+
total_episodes = cfg.inference.episodes
|
| 203 |
+
episode_pbar = tqdm(total=total_episodes, desc='Collecting episodes')
|
| 204 |
+
|
| 205 |
+
obs, _ = env.reset() # Obs Dict -- obs['extra']['tcp_pose']
|
| 206 |
+
policy.reset()
|
| 207 |
+
trajectory_prior = None
|
| 208 |
+
|
| 209 |
+
timestamp = time.strftime("%m%d_%H%M")
|
| 210 |
+
image_saving_path=os.path.join("/home/xuan/Documents/test_result", timestamp)
|
| 211 |
+
|
| 212 |
+
while episode_count < total_episodes:
|
| 213 |
+
try:
|
| 214 |
+
step = 0
|
| 215 |
+
replanning_count = 0
|
| 216 |
+
while not (done or truncated):
|
| 217 |
+
# Update obsqueue
|
| 218 |
+
policy.observation_wrapper.update_history_buffer(obs)
|
| 219 |
+
|
| 220 |
+
if not cfg.debug:
|
| 221 |
+
env.render()
|
| 222 |
+
|
| 223 |
+
replanning_interval = 4
|
| 224 |
+
new_path = False
|
| 225 |
+
if step % replanning_interval == 0:
|
| 226 |
+
|
| 227 |
+
if step == 0: # initial planning
|
| 228 |
+
policy.plan_path_with_history(obs, do_normalize=True)
|
| 229 |
+
visualizer.plot_trajectory(policy.planned_path, obs, policy.point_cloud)
|
| 230 |
+
# visualizer.plot_trajectory(policy.planned_path, obs, policy.point_cloud)
|
| 231 |
+
new_path = True
|
| 232 |
+
else: # replanning
|
| 233 |
+
position_diff = np.linalg.norm(obs['extra']['goal_pose'][:3] - obs['extra']['tcp_pose'][:3])
|
| 234 |
+
if position_diff > 0.10:
|
| 235 |
+
print("Replanning count: ", replanning_count, "position_diff: ", position_diff)
|
| 236 |
+
## replanning with prior
|
| 237 |
+
trajectory_prior = create_trajectory_prior(policy, obs, policy.planned_path)
|
| 238 |
+
denoising_steps = 3
|
| 239 |
+
policy.plan_path_with_history(obs,
|
| 240 |
+
trajectory_prior=trajectory_prior,
|
| 241 |
+
do_normalize=True,
|
| 242 |
+
timestep=denoising_steps, # denoising step
|
| 243 |
+
choice='interpolate')
|
| 244 |
+
## replanning without prior
|
| 245 |
+
# trajectory_ortho6d = policy.plan_path_with_history(obs, do_normalize=True)
|
| 246 |
+
new_path = True
|
| 247 |
+
replanning_count += 1
|
| 248 |
+
# visualize trajectory with point cloud after replanning
|
| 249 |
+
# if not cfg.debug:
|
| 250 |
+
# visualizer.plot_trajectory(policy.planned_path, obs, policy.point_cloud)
|
| 251 |
+
if cfg.debug and policy.all_trajs is not None and new_path:
|
| 252 |
+
plot_trajectories(policy.all_trajs,
|
| 253 |
+
save_path=os.path.join(image_saving_path, str(episode_count), "multi_samples"))
|
| 254 |
+
visualizer.plot_path_with_guidance(policy.model.diffusion_history,
|
| 255 |
+
policy.model.grad_scaled_history,
|
| 256 |
+
point_cloud=policy.point_cloud,
|
| 257 |
+
save_path=os.path.join(image_saving_path, str(episode_count), "guidance"))
|
| 258 |
+
if cfg.test.inference_record:
|
| 259 |
+
obs_inference_recorder.record_step(obs,
|
| 260 |
+
trajectory_prior=trajectory_prior,
|
| 261 |
+
all_samples=policy.all_trajs,
|
| 262 |
+
planned_path=policy.planned_path)
|
| 263 |
+
|
| 264 |
+
action = policy.follow_path(obs) #delta end-effector pose
|
| 265 |
+
obs, reward, done, truncated, info = env.step(action)
|
| 266 |
+
step += 1
|
| 267 |
+
|
| 268 |
+
if cfg.test.inference_record:
|
| 269 |
+
obs_inference_recorder.save_episode()
|
| 270 |
+
|
| 271 |
+
print("Simulation Done", "Total replanning count: ", replanning_count)
|
| 272 |
+
obs, info = env.reset(options={"reconfigure": True})
|
| 273 |
+
policy.reset()
|
| 274 |
+
trajectory_prior = None
|
| 275 |
+
done = False
|
| 276 |
+
truncated = False
|
| 277 |
+
episode_count += 1
|
| 278 |
+
episode_pbar.update(1) # Update progress bar
|
| 279 |
+
except KeyboardInterrupt:
|
| 280 |
+
print("\nStopping simulation...")
|
| 281 |
+
break
|
| 282 |
+
print("Simulation finished")
|
| 283 |
+
episode_pbar.close() # Close progress bar
|
| 284 |
+
env.close()
|
| 285 |
+
|
| 286 |
+
else:
|
| 287 |
+
#DDPM sampling and following trajectory in environment#
|
| 288 |
+
done = False
|
| 289 |
+
truncated = False
|
| 290 |
+
episode_count = 0
|
| 291 |
+
total_episodes = cfg.inference.episodes
|
| 292 |
+
# Create progress bar for episodes
|
| 293 |
+
episode_pbar = tqdm(total=total_episodes, desc='Collecting episodes')
|
| 294 |
+
|
| 295 |
+
obs, _ = env.reset()
|
| 296 |
+
|
| 297 |
+
#### following trajectory ######
|
| 298 |
+
while episode_count < total_episodes:
|
| 299 |
+
print("obs start& goal: ", obs['extra'])
|
| 300 |
+
try:
|
| 301 |
+
classical_result = classical_policy.plan_path(obs)
|
| 302 |
+
while not (done or truncated or classical_result is None):
|
| 303 |
+
action = policy(obs)
|
| 304 |
+
|
| 305 |
+
# if policy.all_trajs is not None:
|
| 306 |
+
# plot_trajectories(policy.all_trajs)
|
| 307 |
+
# visualizer.plot_trajectory(
|
| 308 |
+
# planned_path=policy.planned_path,
|
| 309 |
+
# obs=obs,
|
| 310 |
+
# point_cloud=policy.point_cloud
|
| 311 |
+
# )
|
| 312 |
+
if action is None:
|
| 313 |
+
print("Policy failed to generate action")
|
| 314 |
+
break
|
| 315 |
+
obs, reward, done, truncated, info = env.step(action)
|
| 316 |
+
env.render()
|
| 317 |
+
|
| 318 |
+
obs, info = env.reset(options={"reconfigure": True})
|
| 319 |
+
|
| 320 |
+
policy.reset()
|
| 321 |
+
done = False
|
| 322 |
+
truncated = False
|
| 323 |
+
episode_count += 1
|
| 324 |
+
episode_pbar.update(1) # Update progress bar
|
| 325 |
+
|
| 326 |
+
except KeyboardInterrupt:
|
| 327 |
+
print("\nStopping simulation...")
|
| 328 |
+
break
|
| 329 |
+
|
| 330 |
+
episode_pbar.close() # Close progress bar
|
| 331 |
+
env.close()
|
| 332 |
+
|
| 333 |
+
if __name__ == "__main__":
|
| 334 |
+
main()
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/scripts/run_joint_demo.py
ADDED
|
@@ -0,0 +1,321 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import hydra
|
| 3 |
+
from omegaconf import DictConfig, OmegaConf
|
| 4 |
+
import gymnasium as gym
|
| 5 |
+
import mani_skill2.envs
|
| 6 |
+
import cfdp.envs as envs # customized environments
|
| 7 |
+
from cfdp.envs import ENVS_DIR
|
| 8 |
+
|
| 9 |
+
from tqdm import tqdm
|
| 10 |
+
import numpy as np
|
| 11 |
+
import torch
|
| 12 |
+
from cfdp.diffusion_policy.utils.loaders import get_dataset, get_model, freeze_torch_model_params
|
| 13 |
+
from cfdp.diffusion_policy.models import TemporalUnet, UNET_DIM_MULTS, MLPModel
|
| 14 |
+
from cfdp.diffusion_policy.models.sample_functions import ddpm_sample_fn_stomp
|
| 15 |
+
from cfdp.motion_planner.neural_motion_planner import NeuralMotionPlannerPolicy
|
| 16 |
+
from cfdp.motion_planner.motion_planner_policy_base import ClassicalMotionPlannerPolicy
|
| 17 |
+
from cfdp.utils.plot_utils import plot_trajectories
|
| 18 |
+
from cfdp.envs import ROBOT_ASSETS_DIR
|
| 19 |
+
from cfdp.utils.data_utils import transform_quat_to_ortho6d
|
| 20 |
+
from cfdp.diffusion_policy.models.guide_managers import GuideManagerSTOMP
|
| 21 |
+
from cfdp.diffusion_policy.utils.summary_trajectory import generate_trajectories
|
| 22 |
+
from cfdp.utils import observation_wrapper, inference_recorder
|
| 23 |
+
from cfdp.utils.plot_utils import TrajectoryVisualizer
|
| 24 |
+
from cfdp.motion_planner.motion_controller import EndEffectorController
|
| 25 |
+
from mani_skill2.utils.wrappers import RecordEpisode
|
| 26 |
+
|
| 27 |
+
import random
|
| 28 |
+
import time
|
| 29 |
+
from matplotlib import pyplot as plt
|
| 30 |
+
|
| 31 |
+
np.random.seed(42)
|
| 32 |
+
random.seed(42)
|
| 33 |
+
|
| 34 |
+
OmegaConf.register_new_resolver("multiply", lambda x, y: int(float(x)) * int(float(y)))
|
| 35 |
+
|
| 36 |
+
OBSTACLE_CONFIGS = []
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def plot_STOMP_debug_info(guide_debug_into, visualizer, point_cloud=None,
|
| 40 |
+
sphere_idx=24, time_step=-1):
|
| 41 |
+
print(len(guide_debug_into))
|
| 42 |
+
sphere_trajectories = guide_debug_into[time_step].sphere_poses.cpu().numpy() # [batch, seq_len, num_spheres, 3]
|
| 43 |
+
sphere_radii = guide_debug_into[time_step].sphere_radii.cpu().numpy() # [batch, seq_len, num_spheres]
|
| 44 |
+
sdf_costs = guide_debug_into[time_step].sdf_costs.cpu().numpy() # [batch, seq_len, num_spheres]
|
| 45 |
+
spheres_after_guidance = guide_debug_into[time_step].spheres_after_guidance.cpu().numpy() # [batch, seq_len, num_spheres, 3]
|
| 46 |
+
sampled_spheres = guide_debug_into[time_step].sampled_spheres.cpu().numpy() # [batch, seq_len, num_spheres, 3]
|
| 47 |
+
#TODO: plot spheres_after_guidance
|
| 48 |
+
fig = visualizer.plot_trajectory_with_cost(sphere_trajectories,
|
| 49 |
+
sdf_costs,
|
| 50 |
+
spheres_after_guidance=spheres_after_guidance,
|
| 51 |
+
sampled_spheres=sampled_spheres,
|
| 52 |
+
point_cloud=point_cloud,
|
| 53 |
+
sphere_idx=sphere_idx,
|
| 54 |
+
time_step=time_step)
|
| 55 |
+
return fig
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
@hydra.main(version_base="1.2", config_path="../configs", config_name="config_history_joint")
|
| 59 |
+
def main(cfg: DictConfig):
|
| 60 |
+
# print(OmegaConf.to_yaml(cfg))
|
| 61 |
+
|
| 62 |
+
########################################################################################################################
|
| 63 |
+
# Load dataset with env, robot, task
|
| 64 |
+
train_subset, train_dataloader, val_subset, val_dataloader = get_dataset(
|
| 65 |
+
cfg_dataset=cfg.dataset,
|
| 66 |
+
batch_size=cfg.batch_size,
|
| 67 |
+
val_set_size=cfg.val_set_size,
|
| 68 |
+
results_dir=cfg.results_dir,
|
| 69 |
+
save_indices=False
|
| 70 |
+
)
|
| 71 |
+
#TODO: save / load normalizer. Do not use dataset
|
| 72 |
+
|
| 73 |
+
########################################################################################################################
|
| 74 |
+
# Load prior model
|
| 75 |
+
diffusion_configs = dict(
|
| 76 |
+
variance_schedule=cfg.model.variance_schedule,
|
| 77 |
+
n_diffusion_steps=cfg.model.n_steps,
|
| 78 |
+
prediction_mode=cfg.model.prediction_mode,
|
| 79 |
+
)
|
| 80 |
+
|
| 81 |
+
unet_configs = dict(
|
| 82 |
+
state_dim=cfg.state_dim,
|
| 83 |
+
n_support_points=cfg.trajectory_length,
|
| 84 |
+
unet_input_dim=cfg.model.unet_input_dim,
|
| 85 |
+
dim_mults=UNET_DIM_MULTS[cfg.model.unet_dim_mults_option],
|
| 86 |
+
conditioning_type=cfg.model.conditioning_type,
|
| 87 |
+
conditioning_embed_dim = cfg.model.conditioning_embed_dim
|
| 88 |
+
)
|
| 89 |
+
|
| 90 |
+
diffusion_model = get_model(
|
| 91 |
+
model_class=cfg.model.model_class,
|
| 92 |
+
model=TemporalUnet(**unet_configs),
|
| 93 |
+
tensor_args={'device': cfg.device, 'dtype': torch.float32},
|
| 94 |
+
context_model=MLPModel(in_dim=cfg.model.context_input_dim,
|
| 95 |
+
out_dim=cfg.model.conditioning_embed_dim,
|
| 96 |
+
input_field='tasks',
|
| 97 |
+
output_field='condition'),
|
| 98 |
+
**diffusion_configs,
|
| 99 |
+
**unet_configs
|
| 100 |
+
)
|
| 101 |
+
|
| 102 |
+
# load saved policy model
|
| 103 |
+
diffusion_model.load_state_dict(
|
| 104 |
+
torch.load(os.path.join(cfg.inference.model_dir, 'checkpoints',
|
| 105 |
+
'ema_model_current_state_dict.pth' if cfg.inference.use_ema else 'model_current_state_dict.pth'),
|
| 106 |
+
map_location=cfg.device,
|
| 107 |
+
weights_only=True)
|
| 108 |
+
)
|
| 109 |
+
diffusion_model.eval()
|
| 110 |
+
model = diffusion_model
|
| 111 |
+
|
| 112 |
+
freeze_torch_model_params(model)
|
| 113 |
+
model = torch.compile(model)
|
| 114 |
+
collision_lists = []
|
| 115 |
+
########################################################################################################################
|
| 116 |
+
env = gym.make(cfg.inference.env_id,
|
| 117 |
+
obs_mode=cfg.inference.obs_mode,
|
| 118 |
+
reward_mode=cfg.inference.reward_mode,
|
| 119 |
+
control_mode=cfg.inference.control_mode,
|
| 120 |
+
enable_shadow=False,
|
| 121 |
+
# render_mode="cameras" if cfg.inference.evaluate else "human",
|
| 122 |
+
render_mode="human",
|
| 123 |
+
# render_mode="cameras",
|
| 124 |
+
robot_init_qpos_noise=0.5,
|
| 125 |
+
obstacle_configs=OBSTACLE_CONFIGS,
|
| 126 |
+
object_config_path=os.path.join(ENVS_DIR, "env_config","shelf"),
|
| 127 |
+
create_obstacle_point_cloud=True,
|
| 128 |
+
is_demo=True)
|
| 129 |
+
|
| 130 |
+
# Initialize guide manager
|
| 131 |
+
# guide = None
|
| 132 |
+
guide = GuideManagerSTOMP(
|
| 133 |
+
dataset = train_subset.dataset,
|
| 134 |
+
robot_model = env.agent.robot,
|
| 135 |
+
clip_grad=True,
|
| 136 |
+
tensor_args={'device': cfg.device, 'dtype': torch.float32},
|
| 137 |
+
guidance_weight=cfg.inference.guidance_weight,
|
| 138 |
+
)
|
| 139 |
+
|
| 140 |
+
ObservationWrapperClass = getattr(observation_wrapper, cfg.observation_wrapper)
|
| 141 |
+
obs_wrapper = ObservationWrapperClass(train_subset.dataset, camera_type=cfg.inference.camera_type, use_ee_control=cfg.use_ee_control)
|
| 142 |
+
|
| 143 |
+
policy = NeuralMotionPlannerPolicy(
|
| 144 |
+
action_dim=env.action_space.shape[0],
|
| 145 |
+
model=model,
|
| 146 |
+
dataset=train_subset.dataset,
|
| 147 |
+
trajectory_length=cfg.trajectory_length,
|
| 148 |
+
inference_cfg=cfg.inference,
|
| 149 |
+
observation_wrapper=obs_wrapper,
|
| 150 |
+
use_ee_control=cfg.use_ee_control,
|
| 151 |
+
guide_manager=guide,
|
| 152 |
+
debug=cfg.debug)
|
| 153 |
+
|
| 154 |
+
## visualizer ##
|
| 155 |
+
visualizer = TrajectoryVisualizer(normalizer=train_subset.dataset.normalizer)
|
| 156 |
+
|
| 157 |
+
## controller to manually move the gripper ##
|
| 158 |
+
controller = EndEffectorController()
|
| 159 |
+
|
| 160 |
+
######################### main inference loop #########################
|
| 161 |
+
if cfg.inference.sim_with_history:
|
| 162 |
+
if cfg.use_ee_control:
|
| 163 |
+
def create_trajectory_prior(policy, obs, trajectory_ortho6d):
|
| 164 |
+
trajectory_index = policy.get_current_path_index()
|
| 165 |
+
trajectory_prior = trajectory_ortho6d[trajectory_index:]
|
| 166 |
+
current_tcp_pose_ortho6d = transform_quat_to_ortho6d(
|
| 167 |
+
torch.tensor(obs['extra']['tcp_pose'], device=cfg.device).unsqueeze(0)
|
| 168 |
+
).squeeze(0)
|
| 169 |
+
return torch.cat([current_tcp_pose_ortho6d.unsqueeze(0), trajectory_prior], dim=0)
|
| 170 |
+
else:
|
| 171 |
+
def create_trajectory_prior(policy, obs, trajectory):
|
| 172 |
+
trajectory_index = policy.get_current_path_index()
|
| 173 |
+
trajectory_prior = trajectory[trajectory_index:]
|
| 174 |
+
current_joint_pose = torch.tensor(obs['agent']['qpos']).to(cfg.device)
|
| 175 |
+
return torch.cat([current_joint_pose.unsqueeze(0), trajectory_prior], dim=0)
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
done = False
|
| 179 |
+
truncated = False
|
| 180 |
+
replanning_interval = 3
|
| 181 |
+
previous_replanning_index = -1
|
| 182 |
+
episode_count = 0
|
| 183 |
+
total_episodes = cfg.inference.episodes
|
| 184 |
+
episode_pbar = tqdm(total=total_episodes, desc='Collecting episodes')
|
| 185 |
+
|
| 186 |
+
replanning_interval = cfg.inference.replanning_interval
|
| 187 |
+
print("replanning interval: ", replanning_interval)
|
| 188 |
+
obs, _ = env.reset() # Obs Dict -- obs['extra']['tcp_pose']
|
| 189 |
+
policy.reset()
|
| 190 |
+
trajectory_prior = None
|
| 191 |
+
|
| 192 |
+
while episode_count < 1:
|
| 193 |
+
try:
|
| 194 |
+
step = 0
|
| 195 |
+
replanning_count = 0
|
| 196 |
+
while not (done or truncated):
|
| 197 |
+
# Update obsqueue
|
| 198 |
+
policy.observation_wrapper.update_history_buffer(obs)
|
| 199 |
+
new_path = True
|
| 200 |
+
env.render()
|
| 201 |
+
|
| 202 |
+
if step % replanning_interval == 0:
|
| 203 |
+
|
| 204 |
+
if step == 0: # initial planning
|
| 205 |
+
policy.plan_path_with_history(obs, do_normalize=True)
|
| 206 |
+
new_path = True
|
| 207 |
+
else: # replanning
|
| 208 |
+
position_diff = np.linalg.norm(obs['extra']['goal_pose'][:3] - obs['extra']['tcp_pose'][:3])
|
| 209 |
+
if position_diff > 0.20:
|
| 210 |
+
# print("Replanning count: ", replanning_count, "position_diff: ", position_diff)
|
| 211 |
+
## replanning with prior
|
| 212 |
+
print("replanning with prior at step: ", step)
|
| 213 |
+
trajectory_prior = create_trajectory_prior(policy, obs, policy.planned_path)
|
| 214 |
+
denoising_steps = 4
|
| 215 |
+
policy.plan_path_with_history(obs,
|
| 216 |
+
trajectory_prior=trajectory_prior,
|
| 217 |
+
do_normalize=True,
|
| 218 |
+
timestep=denoising_steps, # denoising step
|
| 219 |
+
choice='interpolate')
|
| 220 |
+
# # replanning without prior
|
| 221 |
+
# trajectory_ortho6d = policy.plan_path_with_history(obs, do_normalize=True)
|
| 222 |
+
replanning_count += 1
|
| 223 |
+
|
| 224 |
+
fig1 = plot_STOMP_debug_info(guide.debug_state,
|
| 225 |
+
visualizer,
|
| 226 |
+
point_cloud=policy.point_cloud,
|
| 227 |
+
sphere_idx=24,
|
| 228 |
+
time_step=4)
|
| 229 |
+
plt.show()
|
| 230 |
+
# visualizer.plot_trajectory(policy.planned_path, obs, policy.point_cloud)
|
| 231 |
+
|
| 232 |
+
action = policy.follow_path(obs)
|
| 233 |
+
obs, reward, done, truncated, info = env.step(action)
|
| 234 |
+
step += 1
|
| 235 |
+
|
| 236 |
+
obs, info = env.reset(options={"reconfigure": True})
|
| 237 |
+
policy.reset()
|
| 238 |
+
trajectory_prior = None
|
| 239 |
+
done = False
|
| 240 |
+
truncated = False
|
| 241 |
+
episode_count += 1
|
| 242 |
+
episode_pbar.update(1) # Update progress bar
|
| 243 |
+
|
| 244 |
+
except KeyboardInterrupt:
|
| 245 |
+
print("\nStopping simulation...")
|
| 246 |
+
break
|
| 247 |
+
|
| 248 |
+
print("Simulation finished")
|
| 249 |
+
episode_pbar.close() # Close progress bar
|
| 250 |
+
env.close()
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
# while episode_count < 1:
|
| 254 |
+
# #### Planning ####
|
| 255 |
+
# try:
|
| 256 |
+
# step = 0
|
| 257 |
+
# replanning_count = 0
|
| 258 |
+
# while not (done or truncated):
|
| 259 |
+
# # Update obsqueue
|
| 260 |
+
# policy.observation_wrapper.update_history_buffer(obs)
|
| 261 |
+
|
| 262 |
+
# if not cfg.debug:
|
| 263 |
+
# env.render()
|
| 264 |
+
|
| 265 |
+
# replanning_interval = 4
|
| 266 |
+
# new_path = False
|
| 267 |
+
# if step % replanning_interval == 0:
|
| 268 |
+
|
| 269 |
+
# if step == 0: # initial planning
|
| 270 |
+
# policy.plan_path_with_history(obs, do_normalize=True)
|
| 271 |
+
# # visualizer.plot_joint_trajectory(policy.all_trajs)
|
| 272 |
+
# # visualizer.plot_trajectory(policy.planned_path, obs, policy.point_cloud)
|
| 273 |
+
# new_path = True
|
| 274 |
+
# ## TODO: debug plot, sphere positions and sdf cost
|
| 275 |
+
# fig1 = plot_STOMP_debug_info(guide.debug_state,
|
| 276 |
+
# visualizer,
|
| 277 |
+
# point_cloud=policy.point_cloud,
|
| 278 |
+
# sphere_idx=24,
|
| 279 |
+
# time_step=4)
|
| 280 |
+
# fig2 = plot_STOMP_debug_info(guide.debug_state,
|
| 281 |
+
# visualizer,
|
| 282 |
+
# point_cloud=policy.point_cloud,
|
| 283 |
+
# sphere_idx=24,
|
| 284 |
+
# time_step=0)
|
| 285 |
+
# fig3 = plot_STOMP_debug_info(guide.debug_state,
|
| 286 |
+
# visualizer,
|
| 287 |
+
# point_cloud=policy.point_cloud,
|
| 288 |
+
# sphere_idx=24,
|
| 289 |
+
# time_step=2)
|
| 290 |
+
# plt.show()
|
| 291 |
+
|
| 292 |
+
# action = policy.follow_path(obs) #delta end-effector pose
|
| 293 |
+
# obs, reward, done, truncated, info = env.step(action)
|
| 294 |
+
# step += 1
|
| 295 |
+
|
| 296 |
+
# print("Open the gripper")
|
| 297 |
+
# for _ in range(10):
|
| 298 |
+
# action = controller.open_gripper(action)
|
| 299 |
+
# obs, reward, done, truncated, info = env.step(action)
|
| 300 |
+
# env.render()
|
| 301 |
+
|
| 302 |
+
# # Add a pause before resetting to see the final state
|
| 303 |
+
# time.sleep(3.0)
|
| 304 |
+
# print("Simulation Done", "Total replanning count: ", replanning_count)
|
| 305 |
+
# env.close()
|
| 306 |
+
# obs, info = env.reset(options={"reconfigure": True})
|
| 307 |
+
# policy.reset()
|
| 308 |
+
# trajectory_prior = None
|
| 309 |
+
# done = False
|
| 310 |
+
# truncated = False
|
| 311 |
+
# episode_count += 1
|
| 312 |
+
# episode_pbar.update(1) # Update progress bar
|
| 313 |
+
# except KeyboardInterrupt:
|
| 314 |
+
# print("\nStopping simulation...")
|
| 315 |
+
# break
|
| 316 |
+
# print("Simulation finished")
|
| 317 |
+
# episode_pbar.close() # Close progress bar
|
| 318 |
+
# env.close()
|
| 319 |
+
|
| 320 |
+
if __name__ == "__main__":
|
| 321 |
+
main()
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/scripts/run_ms3_task_demo.py
ADDED
|
@@ -0,0 +1,253 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import hydra
|
| 3 |
+
from typing import Any, Callable, Dict, List, Optional
|
| 4 |
+
from omegaconf import DictConfig, OmegaConf
|
| 5 |
+
import gymnasium as gym
|
| 6 |
+
from tqdm import tqdm
|
| 7 |
+
import numpy as np
|
| 8 |
+
import torch
|
| 9 |
+
import random
|
| 10 |
+
import copy,time
|
| 11 |
+
from matplotlib import pyplot as plt
|
| 12 |
+
import sapien.core as sapien
|
| 13 |
+
from mani_skill.utils.wrappers.record import RecordEpisode
|
| 14 |
+
#from mani_skill2.trajectory.merge_trajectory import merge_trajectories
|
| 15 |
+
|
| 16 |
+
from cfdp.diffusion_policy.utils.loaders import get_dataset, get_model, freeze_torch_model_params
|
| 17 |
+
from cfdp.diffusion_policy.models import TemporalUnet, UNET_DIM_MULTS, MLPModel
|
| 18 |
+
from cfdp.motion_planner.neural_motion_planner import NeuralMotionPlannerPolicy
|
| 19 |
+
from cfdp.motion_planner.motion_planner_policy_base import ClassicalMotionPlannerPolicy
|
| 20 |
+
from cfdp.utils.plot_utils import plot_trajectories
|
| 21 |
+
from cfdp.utils.data_utils import transform_quat_to_ortho6d
|
| 22 |
+
from cfdp.diffusion_policy.models.guide_managers import GuideManagerPath, TrajectoryRanker
|
| 23 |
+
from cfdp.diffusion_policy.utils.summary_trajectory import generate_trajectories
|
| 24 |
+
from cfdp.utils import observation_wrapper, inference_recorder
|
| 25 |
+
from cfdp.utils.plot_utils import TrajectoryVisualizer
|
| 26 |
+
from cfdp.motion_planner.motion_controller import EndEffectorController
|
| 27 |
+
from cfdp.motion_planner.adaptive_trigger_wrapper import AdaptiveTrigger
|
| 28 |
+
|
| 29 |
+
import msx_envs
|
| 30 |
+
|
| 31 |
+
np.random.seed(42)
|
| 32 |
+
random.seed(42)
|
| 33 |
+
|
| 34 |
+
OmegaConf.register_new_resolver("multiply", lambda x, y: int(float(x)) * int(float(y)))
|
| 35 |
+
|
| 36 |
+
def plot_traj_ranking_debug_info(rank_debug_info, visualizer, point_cloud=None,
|
| 37 |
+
sphere_idx=24, time_step=-1):
|
| 38 |
+
print(len(rank_debug_info))
|
| 39 |
+
sphere_trajectories = rank_debug_info[time_step].sphere_poses.cpu().numpy() # [batch, seq_len, num_spheres, 3]
|
| 40 |
+
sphere_radii = rank_debug_info[time_step].sphere_radii.cpu().numpy() # [batch, seq_len, num_spheres]
|
| 41 |
+
sdf_costs = rank_debug_info[time_step].sdf_costs.cpu().numpy() # [batch, seq_len, num_spheres]
|
| 42 |
+
fig = visualizer.plot_trajectory_with_cost(sphere_trajectories,
|
| 43 |
+
sdf_costs,
|
| 44 |
+
spheres_after_guidance=None,
|
| 45 |
+
sampled_spheres=None,
|
| 46 |
+
point_cloud=point_cloud,
|
| 47 |
+
sphere_idx=sphere_idx,
|
| 48 |
+
time_step=time_step)
|
| 49 |
+
return fig
|
| 50 |
+
|
| 51 |
+
@hydra.main(version_base="1.2", config_path="../configs", config_name="config_ms3_history_ddim")
|
| 52 |
+
def main(cfg: DictConfig):
|
| 53 |
+
print(OmegaConf.to_yaml(cfg))
|
| 54 |
+
########################################################################################################################
|
| 55 |
+
# Load dataset with env, robot, task
|
| 56 |
+
train_subset, train_dataloader, val_subset, val_dataloader = get_dataset(
|
| 57 |
+
cfg_dataset=cfg.dataset,
|
| 58 |
+
batch_size=cfg.batch_size,
|
| 59 |
+
val_set_size=cfg.val_set_size,
|
| 60 |
+
results_dir=cfg.results_dir,
|
| 61 |
+
save_indices=False
|
| 62 |
+
)
|
| 63 |
+
########################################################################################################################
|
| 64 |
+
# Load prior model
|
| 65 |
+
diffusion_configs = dict(
|
| 66 |
+
variance_schedule=cfg.model.variance_schedule,
|
| 67 |
+
n_diffusion_steps=cfg.model.n_steps,
|
| 68 |
+
prediction_mode=cfg.model.prediction_mode,
|
| 69 |
+
)
|
| 70 |
+
|
| 71 |
+
unet_configs = dict(
|
| 72 |
+
state_dim=cfg.state_dim,
|
| 73 |
+
n_support_points=cfg.trajectory_length,
|
| 74 |
+
unet_input_dim=cfg.model.unet_input_dim,
|
| 75 |
+
dim_mults=UNET_DIM_MULTS[cfg.model.unet_dim_mults_option],
|
| 76 |
+
conditioning_type=cfg.model.conditioning_type,
|
| 77 |
+
conditioning_embed_dim = cfg.model.conditioning_embed_dim
|
| 78 |
+
)
|
| 79 |
+
|
| 80 |
+
diffusion_model = get_model(
|
| 81 |
+
model_class=cfg.model.model_class,
|
| 82 |
+
model=TemporalUnet(**unet_configs),
|
| 83 |
+
tensor_args={'device': cfg.device, 'dtype': torch.float32},
|
| 84 |
+
context_model=MLPModel(in_dim=cfg.model.context_input_dim,
|
| 85 |
+
out_dim=cfg.model.conditioning_embed_dim,
|
| 86 |
+
input_field='tasks',
|
| 87 |
+
output_field='condition'),
|
| 88 |
+
**diffusion_configs,
|
| 89 |
+
**unet_configs
|
| 90 |
+
)
|
| 91 |
+
|
| 92 |
+
# load saved policy model
|
| 93 |
+
diffusion_model.load_state_dict(
|
| 94 |
+
torch.load(os.path.join(cfg.inference.model_dir, 'checkpoints',
|
| 95 |
+
'ema_model_current_state_dict.pth' if cfg.inference.use_ema else 'model_current_state_dict.pth'),
|
| 96 |
+
map_location=cfg.device,
|
| 97 |
+
weights_only=True)
|
| 98 |
+
)
|
| 99 |
+
diffusion_model.eval()
|
| 100 |
+
model = diffusion_model
|
| 101 |
+
|
| 102 |
+
freeze_torch_model_params(model)
|
| 103 |
+
model = torch.compile(model)
|
| 104 |
+
collision_lists = []
|
| 105 |
+
|
| 106 |
+
# create env and run inference control
|
| 107 |
+
env = gym.make(
|
| 108 |
+
cfg.inference.env_id,
|
| 109 |
+
obs_mode=cfg.inference.obs_mode,
|
| 110 |
+
control_mode=cfg.inference.control_mode,
|
| 111 |
+
render_mode="human",
|
| 112 |
+
reward_mode=cfg.inference.reward_mode,
|
| 113 |
+
enable_shadow=False,
|
| 114 |
+
robot_init_qpos_noise = 0.2,
|
| 115 |
+
rand_level=1,
|
| 116 |
+
robot_uids="panda_sphere",
|
| 117 |
+
object_config_dir="base_configs/base_table_shelf.yaml",
|
| 118 |
+
# object_config_dir="base_configs/base_table.yaml",
|
| 119 |
+
is_inference=True
|
| 120 |
+
)
|
| 121 |
+
env = RecordEpisode(env, output_dir="videos", save_trajectory=True, trajectory_name="trajectory", save_video=True, video_fps=30)
|
| 122 |
+
|
| 123 |
+
# guide = None
|
| 124 |
+
# rank_fn = None
|
| 125 |
+
guide = GuideManagerPath(
|
| 126 |
+
dataset = train_subset.dataset,
|
| 127 |
+
clip_grad=True,
|
| 128 |
+
tensor_args={'device': cfg.device, 'dtype': torch.float32},
|
| 129 |
+
guidance_weight=cfg.inference.guidance_weight,
|
| 130 |
+
)
|
| 131 |
+
rank_fn = TrajectoryRanker(
|
| 132 |
+
dataset = train_subset.dataset,
|
| 133 |
+
robot_model = env.agent.robot,
|
| 134 |
+
get_current_qpos=lambda: env.agent.robot.get_qpos(),
|
| 135 |
+
tensor_args={'device': cfg.device, 'dtype': torch.float32},
|
| 136 |
+
)
|
| 137 |
+
|
| 138 |
+
# to process observations for the policy
|
| 139 |
+
ObservationWrapperClass = getattr(observation_wrapper, cfg.observation_wrapper)
|
| 140 |
+
obs_wrapper = ObservationWrapperClass(
|
| 141 |
+
dataset=train_subset.dataset,
|
| 142 |
+
camera_type=cfg.inference.camera_type,
|
| 143 |
+
sensor_data_key="sensor_data",
|
| 144 |
+
sensor_param_key="sensor_param"
|
| 145 |
+
)
|
| 146 |
+
|
| 147 |
+
policy = NeuralMotionPlannerPolicy(
|
| 148 |
+
action_dim=env.action_space.shape[0],
|
| 149 |
+
model=model,
|
| 150 |
+
dataset=train_subset.dataset,
|
| 151 |
+
trajectory_length=cfg.trajectory_length,
|
| 152 |
+
inference_cfg=cfg.inference,
|
| 153 |
+
observation_wrapper=obs_wrapper,
|
| 154 |
+
guide_manager=guide,
|
| 155 |
+
rank_fn=rank_fn, #can be None
|
| 156 |
+
debug=cfg.debug)
|
| 157 |
+
|
| 158 |
+
## visualizer ##
|
| 159 |
+
visualizer = TrajectoryVisualizer(normalizer=train_subset.dataset.normalizer)
|
| 160 |
+
|
| 161 |
+
## controller to manually move the gripper ##
|
| 162 |
+
controller = EndEffectorController()
|
| 163 |
+
|
| 164 |
+
# ###### main inference loop ######
|
| 165 |
+
# function to create trajectory prior for policy
|
| 166 |
+
def create_trajectory_prior(policy, obs, trajectory_ortho6d):
|
| 167 |
+
trajectory_index = policy.get_current_path_index()
|
| 168 |
+
trajectory_prior = trajectory_ortho6d[trajectory_index:]
|
| 169 |
+
current_tcp_pose_ortho6d = transform_quat_to_ortho6d(
|
| 170 |
+
torch.tensor(obs['extra']['tcp_pose'], device=cfg.device).unsqueeze(0)
|
| 171 |
+
).squeeze(0)
|
| 172 |
+
# Create a copy of the concatenated tensor to ensure we return a new object
|
| 173 |
+
result = torch.cat([current_tcp_pose_ortho6d.unsqueeze(0), trajectory_prior], dim=0).clone()
|
| 174 |
+
return result
|
| 175 |
+
|
| 176 |
+
# new planner wrapper: adaptive planner
|
| 177 |
+
adaptive_trigger = AdaptiveTrigger(
|
| 178 |
+
policy=policy,
|
| 179 |
+
controller=controller,
|
| 180 |
+
create_trajectory_prior=create_trajectory_prior,
|
| 181 |
+
replanning_interval=30,
|
| 182 |
+
pos_threshold=0.10,
|
| 183 |
+
denoising_steps=3,
|
| 184 |
+
grip_threshold=0.03,
|
| 185 |
+
debug=cfg.debug
|
| 186 |
+
)
|
| 187 |
+
|
| 188 |
+
done = False
|
| 189 |
+
truncated = False
|
| 190 |
+
episode_count = 0
|
| 191 |
+
total_episodes = cfg.inference.episodes
|
| 192 |
+
episode_pbar = tqdm(total=total_episodes, desc='Collecting episodes')
|
| 193 |
+
|
| 194 |
+
obs, _ = env.reset() # Obs Dict -- obs['extra']['tcp_pose']
|
| 195 |
+
|
| 196 |
+
policy.reset()
|
| 197 |
+
trajectory_prior = None
|
| 198 |
+
|
| 199 |
+
robot_executed_path = []
|
| 200 |
+
def print_keys(d):
|
| 201 |
+
for key, value in d.items():
|
| 202 |
+
print(key)
|
| 203 |
+
if isinstance(value, dict):
|
| 204 |
+
print_keys(value)
|
| 205 |
+
|
| 206 |
+
while episode_count < 5:
|
| 207 |
+
#### Planning ####
|
| 208 |
+
try:
|
| 209 |
+
while not (done or truncated):
|
| 210 |
+
action = adaptive_trigger.step(obs, task = "grasp cup")
|
| 211 |
+
print(f"action: {action}")
|
| 212 |
+
obs, reward, done, truncated, info = env.step(action)
|
| 213 |
+
#print_keys(obs)
|
| 214 |
+
robot_executed_path.append(obs['extra']['tcp_pose'])
|
| 215 |
+
|
| 216 |
+
if adaptive_trigger.step_idx % 40 == 0:
|
| 217 |
+
visualizer.plot_trajectory(planned_paths = adaptive_trigger.policy.all_trajs,
|
| 218 |
+
obs = obs,
|
| 219 |
+
point_cloud=adaptive_trigger.policy.point_cloud,
|
| 220 |
+
simplified_paths = adaptive_trigger.policy.planned_path)
|
| 221 |
+
# fig1 = plot_traj_ranking_debug_info(rank_fn.debug_state,
|
| 222 |
+
# visualizer,
|
| 223 |
+
# point_cloud=policy.point_cloud,
|
| 224 |
+
# sphere_idx=24,
|
| 225 |
+
# time_step=2)
|
| 226 |
+
# visualizer.plot_trajectory(policy.all_trajs, obs, policy.point_cloud)
|
| 227 |
+
plt.show()
|
| 228 |
+
# visualizer.plot_trajectory(planned_paths = adaptive_trigger.policy.all_traj_normalized,
|
| 229 |
+
# obs = obs,
|
| 230 |
+
# point_cloud=adaptive_trigger.policy.point_cloud,
|
| 231 |
+
# simplified_paths = adaptive_trigger.policy.planned_path)
|
| 232 |
+
|
| 233 |
+
env.render()
|
| 234 |
+
|
| 235 |
+
# Add a pause before resetting qto see the final state
|
| 236 |
+
time.sleep(3.0)
|
| 237 |
+
# print("Simulation Done", "Total replanning count: ", replanning_count)
|
| 238 |
+
obs, info = env.reset()
|
| 239 |
+
adaptive_trigger.reset()
|
| 240 |
+
trajectory_prior = None
|
| 241 |
+
done = False
|
| 242 |
+
truncated = False
|
| 243 |
+
episode_count += 1
|
| 244 |
+
episode_pbar.update(1) # Update progress bar
|
| 245 |
+
except KeyboardInterrupt:
|
| 246 |
+
print("\nStopping simulation...")
|
| 247 |
+
break
|
| 248 |
+
print("Simulation finished")
|
| 249 |
+
episode_pbar.close() # Close progress bar
|
| 250 |
+
env.close()
|
| 251 |
+
|
| 252 |
+
if __name__ == "__main__":
|
| 253 |
+
main()
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/scripts/run_shelf_demo.py
ADDED
|
@@ -0,0 +1,286 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import hydra
|
| 3 |
+
from omegaconf import DictConfig, OmegaConf
|
| 4 |
+
import gymnasium as gym
|
| 5 |
+
import mani_skill2.envs
|
| 6 |
+
import cfdp.envs as envs # customized environments
|
| 7 |
+
from tqdm import tqdm
|
| 8 |
+
import numpy as np
|
| 9 |
+
import torch
|
| 10 |
+
from cfdp.diffusion_policy.utils.loaders import get_dataset, get_model, freeze_torch_model_params
|
| 11 |
+
from cfdp.diffusion_policy.models import TemporalUnet, UNET_DIM_MULTS, MLPModel
|
| 12 |
+
from cfdp.motion_planner.neural_motion_planner import NeuralMotionPlannerPolicy
|
| 13 |
+
from cfdp.motion_planner.motion_planner_policy_base import ClassicalMotionPlannerPolicy
|
| 14 |
+
from cfdp.utils.plot_utils import plot_trajectories
|
| 15 |
+
from cfdp.envs import ROBOT_ASSETS_DIR
|
| 16 |
+
from cfdp.utils.data_utils import transform_quat_to_ortho6d
|
| 17 |
+
from cfdp.diffusion_policy.models.guide_managers import GuideManagerPath, TrajectoryRanker
|
| 18 |
+
from cfdp.diffusion_policy.utils.summary_trajectory import generate_trajectories
|
| 19 |
+
from cfdp.utils import observation_wrapper, inference_recorder
|
| 20 |
+
from cfdp.utils.plot_utils import TrajectoryVisualizer
|
| 21 |
+
from cfdp.envs import ENVS_DIR
|
| 22 |
+
from cfdp.motion_planner.motion_controller import EndEffectorController
|
| 23 |
+
from mani_skill2.utils.wrappers import RecordEpisode
|
| 24 |
+
|
| 25 |
+
import random
|
| 26 |
+
import time
|
| 27 |
+
from matplotlib import pyplot as plt
|
| 28 |
+
|
| 29 |
+
np.random.seed(42)
|
| 30 |
+
random.seed(42)
|
| 31 |
+
|
| 32 |
+
OmegaConf.register_new_resolver("multiply", lambda x, y: int(float(x)) * int(float(y)))
|
| 33 |
+
|
| 34 |
+
OBSTACLE_CONFIGS = []
|
| 35 |
+
|
| 36 |
+
def plot_traj_ranking_debug_info(rank_debug_info, visualizer, point_cloud=None,
|
| 37 |
+
sphere_idx=24, time_step=-1):
|
| 38 |
+
print(len(rank_debug_info))
|
| 39 |
+
sphere_trajectories = rank_debug_info[time_step].sphere_poses.cpu().numpy() # [batch, seq_len, num_spheres, 3]
|
| 40 |
+
sphere_radii = rank_debug_info[time_step].sphere_radii.cpu().numpy() # [batch, seq_len, num_spheres]
|
| 41 |
+
sdf_costs = rank_debug_info[time_step].sdf_costs.cpu().numpy() # [batch, seq_len, num_spheres]
|
| 42 |
+
fig = visualizer.plot_trajectory_with_cost(sphere_trajectories,
|
| 43 |
+
sdf_costs,
|
| 44 |
+
spheres_after_guidance=None,
|
| 45 |
+
sampled_spheres=None,
|
| 46 |
+
point_cloud=point_cloud,
|
| 47 |
+
sphere_idx=sphere_idx,
|
| 48 |
+
time_step=time_step)
|
| 49 |
+
return fig
|
| 50 |
+
|
| 51 |
+
@hydra.main(version_base="1.2", config_path="../configs", config_name="config_history_ddim")
|
| 52 |
+
def main(cfg: DictConfig):
|
| 53 |
+
# print(OmegaConf.to_yaml(cfg))
|
| 54 |
+
|
| 55 |
+
########################################################################################################################
|
| 56 |
+
# Load dataset with env, robot, task
|
| 57 |
+
train_subset, train_dataloader, val_subset, val_dataloader = get_dataset(
|
| 58 |
+
cfg_dataset=cfg.dataset,
|
| 59 |
+
batch_size=cfg.batch_size,
|
| 60 |
+
val_set_size=cfg.val_set_size,
|
| 61 |
+
results_dir=cfg.results_dir,
|
| 62 |
+
save_indices=False
|
| 63 |
+
)
|
| 64 |
+
#TODO: save / load normalizer. Do not use dataset
|
| 65 |
+
|
| 66 |
+
########################################################################################################################
|
| 67 |
+
# Load prior model
|
| 68 |
+
diffusion_configs = dict(
|
| 69 |
+
variance_schedule=cfg.model.variance_schedule,
|
| 70 |
+
n_diffusion_steps=cfg.model.n_steps,
|
| 71 |
+
prediction_mode=cfg.model.prediction_mode,
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
unet_configs = dict(
|
| 75 |
+
state_dim=cfg.state_dim,
|
| 76 |
+
n_support_points=cfg.trajectory_length,
|
| 77 |
+
unet_input_dim=cfg.model.unet_input_dim,
|
| 78 |
+
dim_mults=UNET_DIM_MULTS[cfg.model.unet_dim_mults_option],
|
| 79 |
+
conditioning_type=cfg.model.conditioning_type,
|
| 80 |
+
conditioning_embed_dim = cfg.model.conditioning_embed_dim
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
+
diffusion_model = get_model(
|
| 84 |
+
model_class=cfg.model.model_class,
|
| 85 |
+
model=TemporalUnet(**unet_configs),
|
| 86 |
+
tensor_args={'device': cfg.device, 'dtype': torch.float32},
|
| 87 |
+
context_model=MLPModel(in_dim=cfg.model.context_input_dim,
|
| 88 |
+
out_dim=cfg.model.conditioning_embed_dim,
|
| 89 |
+
input_field='tasks',
|
| 90 |
+
output_field='condition'),
|
| 91 |
+
**diffusion_configs,
|
| 92 |
+
**unet_configs
|
| 93 |
+
)
|
| 94 |
+
|
| 95 |
+
# load saved policy model
|
| 96 |
+
diffusion_model.load_state_dict(
|
| 97 |
+
torch.load(os.path.join(cfg.inference.model_dir, 'checkpoints',
|
| 98 |
+
'ema_model_current_state_dict.pth' if cfg.inference.use_ema else 'model_current_state_dict.pth'),
|
| 99 |
+
map_location=cfg.device,
|
| 100 |
+
weights_only=True)
|
| 101 |
+
)
|
| 102 |
+
diffusion_model.eval()
|
| 103 |
+
model = diffusion_model
|
| 104 |
+
|
| 105 |
+
freeze_torch_model_params(model)
|
| 106 |
+
model = torch.compile(model)
|
| 107 |
+
collision_lists = []
|
| 108 |
+
########################################################################################################################
|
| 109 |
+
env = gym.make(cfg.inference.env_id,
|
| 110 |
+
obs_mode=cfg.inference.obs_mode,
|
| 111 |
+
reward_mode=cfg.inference.reward_mode,
|
| 112 |
+
control_mode=cfg.inference.control_mode,
|
| 113 |
+
enable_shadow=False,
|
| 114 |
+
# render_mode="cameras" if cfg.inference.evaluate else "human",
|
| 115 |
+
render_mode="human",
|
| 116 |
+
robot_init_qpos_noise=0.5,
|
| 117 |
+
obstacle_configs=OBSTACLE_CONFIGS,
|
| 118 |
+
object_config_path=os.path.join(ENVS_DIR, "env_config","shelf"),
|
| 119 |
+
create_obstacle_point_cloud=True,
|
| 120 |
+
is_demo=True)
|
| 121 |
+
|
| 122 |
+
# Initialize guide manager
|
| 123 |
+
# guide = None
|
| 124 |
+
guide = GuideManagerPath(
|
| 125 |
+
dataset = train_subset.dataset,
|
| 126 |
+
clip_grad=True,
|
| 127 |
+
tensor_args={'device': cfg.device, 'dtype': torch.float32},
|
| 128 |
+
guidance_weight=cfg.inference.guidance_weight,
|
| 129 |
+
)
|
| 130 |
+
|
| 131 |
+
# guide = GuideManagerJointStates(
|
| 132 |
+
# dataset = train_subset.dataset,
|
| 133 |
+
# robot_model = env.agent.robot,
|
| 134 |
+
# clip_grad=True,
|
| 135 |
+
# tensor_args={'device': cfg.device, 'dtype': torch.float32},
|
| 136 |
+
# guidance_weight=cfg.inference.guidance_weight,
|
| 137 |
+
# )
|
| 138 |
+
|
| 139 |
+
rank_fn = TrajectoryRanker(
|
| 140 |
+
dataset = train_subset.dataset,
|
| 141 |
+
robot_model = env.agent.robot,
|
| 142 |
+
get_current_qpos=lambda: env.agent.robot.get_qpos(),
|
| 143 |
+
tensor_args={'device': cfg.device, 'dtype': torch.float32},
|
| 144 |
+
)
|
| 145 |
+
|
| 146 |
+
ObservationWrapperClass = getattr(observation_wrapper, cfg.observation_wrapper)
|
| 147 |
+
obs_wrapper = ObservationWrapperClass(train_subset.dataset, camera_type=cfg.inference.camera_type)
|
| 148 |
+
|
| 149 |
+
policy = NeuralMotionPlannerPolicy(
|
| 150 |
+
action_dim=env.action_space.shape[0],
|
| 151 |
+
model=model,
|
| 152 |
+
dataset=train_subset.dataset,
|
| 153 |
+
trajectory_length=cfg.trajectory_length,
|
| 154 |
+
inference_cfg=cfg.inference,
|
| 155 |
+
observation_wrapper=obs_wrapper,
|
| 156 |
+
guide_manager=guide,
|
| 157 |
+
rank_fn=rank_fn, #can be None
|
| 158 |
+
debug=cfg.debug)
|
| 159 |
+
|
| 160 |
+
## visualizer ##
|
| 161 |
+
visualizer = TrajectoryVisualizer(normalizer=train_subset.dataset.normalizer)
|
| 162 |
+
|
| 163 |
+
## controller to manually move the gripper ##
|
| 164 |
+
controller = EndEffectorController()
|
| 165 |
+
|
| 166 |
+
######################### main inference loop #########################
|
| 167 |
+
def create_trajectory_prior(policy, obs, trajectory_ortho6d):
|
| 168 |
+
trajectory_index = policy.get_current_path_index()
|
| 169 |
+
trajectory_prior = trajectory_ortho6d[trajectory_index:]
|
| 170 |
+
current_tcp_pose_ortho6d = transform_quat_to_ortho6d(
|
| 171 |
+
torch.tensor(obs['extra']['tcp_pose'], device=cfg.device).unsqueeze(0)
|
| 172 |
+
).squeeze(0)
|
| 173 |
+
# Create a copy of the concatenated tensor to ensure we return a new object
|
| 174 |
+
result = torch.cat([current_tcp_pose_ortho6d.unsqueeze(0), trajectory_prior], dim=0).clone()
|
| 175 |
+
return result
|
| 176 |
+
|
| 177 |
+
# env = RecordEpisode(env, "/home/rui/Documents/maniskill2_benchmark", clean_on_close=False, save_video=True)
|
| 178 |
+
|
| 179 |
+
done = False
|
| 180 |
+
truncated = False
|
| 181 |
+
episode_count = 0
|
| 182 |
+
total_episodes = cfg.inference.episodes
|
| 183 |
+
episode_pbar = tqdm(total=total_episodes, desc='Collecting episodes')
|
| 184 |
+
|
| 185 |
+
obs, _ = env.reset() # Obs Dict -- obs['extra']['tcp_pose']
|
| 186 |
+
policy.reset()
|
| 187 |
+
trajectory_prior = None
|
| 188 |
+
|
| 189 |
+
while episode_count < 1:
|
| 190 |
+
|
| 191 |
+
#### Move down the gripper and grasp the object #####
|
| 192 |
+
# calculate target end-effector pose
|
| 193 |
+
# current_ee_pose = obs['extra']['tcp_pose']
|
| 194 |
+
# target_ee_pose = current_ee_pose + np.array([0, 0, -0.22, 0, 0, 0, 0])
|
| 195 |
+
# # move down the gripper
|
| 196 |
+
# for _ in range(30):
|
| 197 |
+
# collision_spheres = env.obtain_collision_spheres()
|
| 198 |
+
# # print("collision_spheres: ", collision_spheres)
|
| 199 |
+
# current_ee_pose = obs['extra']['tcp_pose']
|
| 200 |
+
# action = controller.computer_control(current_ee_pose, target_ee_pose)
|
| 201 |
+
# action = controller.open_gripper(action)
|
| 202 |
+
# obs, reward, done, truncated, info = env.step(action)
|
| 203 |
+
# ret = env.render()
|
| 204 |
+
# # print(type(ret))
|
| 205 |
+
|
| 206 |
+
# action = controller.computer_control(current_ee_pose, target_ee_pose)
|
| 207 |
+
# action = controller.close_gripper(action)
|
| 208 |
+
# obs, reward, done, truncated, info = env.step(action)
|
| 209 |
+
# env.render()
|
| 210 |
+
# print("Grasped the object")
|
| 211 |
+
|
| 212 |
+
#### Planning ####
|
| 213 |
+
try:
|
| 214 |
+
step = 0
|
| 215 |
+
replanning_count = 0
|
| 216 |
+
while not (done or truncated):
|
| 217 |
+
# Update obsqueue
|
| 218 |
+
policy.observation_wrapper.update_history_buffer(obs)
|
| 219 |
+
|
| 220 |
+
if not cfg.debug:
|
| 221 |
+
env.render()
|
| 222 |
+
|
| 223 |
+
replanning_interval = 10
|
| 224 |
+
new_path = False
|
| 225 |
+
if step % replanning_interval == 0:
|
| 226 |
+
|
| 227 |
+
if step == 0: # initial planning
|
| 228 |
+
policy.plan_path_with_history(obs, do_normalize=True)
|
| 229 |
+
# visualizer.plot_trajectory(policy.all_trajs, obs, policy.point_cloud)
|
| 230 |
+
# visualizer.plot_trajectory(policy.planned_path, obs, policy.point_cloud)
|
| 231 |
+
new_path = True
|
| 232 |
+
else: # replanning
|
| 233 |
+
position_diff = np.linalg.norm(obs['extra']['goal_pose'][:3] - obs['extra']['tcp_pose'][:3])
|
| 234 |
+
if position_diff > 0.10:
|
| 235 |
+
print("Replanning count: ", replanning_count, "position_diff: ", position_diff)
|
| 236 |
+
## replanning with prior
|
| 237 |
+
trajectory_prior = create_trajectory_prior(policy, obs, policy.planned_path)
|
| 238 |
+
denoising_steps = 3
|
| 239 |
+
policy.plan_path_with_history(obs,
|
| 240 |
+
trajectory_prior=trajectory_prior,
|
| 241 |
+
do_normalize=True,
|
| 242 |
+
timestep=denoising_steps, # denoising step
|
| 243 |
+
choice='interpolate')
|
| 244 |
+
## replanning without prior
|
| 245 |
+
# trajectory_ortho6d = policy.plan_path_with_history(obs, do_normalize=True)
|
| 246 |
+
new_path = True
|
| 247 |
+
replanning_count += 1
|
| 248 |
+
|
| 249 |
+
# fig1 = plot_traj_ranking_debug_info(rank_fn.debug_state,
|
| 250 |
+
# visualizer,
|
| 251 |
+
# point_cloud=policy.point_cloud,
|
| 252 |
+
# sphere_idx=24,
|
| 253 |
+
# time_step=2)
|
| 254 |
+
# visualizer.plot_trajectory(policy.all_trajs, obs, policy.point_cloud)
|
| 255 |
+
# plt.show()
|
| 256 |
+
|
| 257 |
+
action = policy.follow_path(obs) #delta end-effector pose
|
| 258 |
+
obs, reward, done, truncated, info = env.step(action)
|
| 259 |
+
step += 1
|
| 260 |
+
|
| 261 |
+
print("Open the gripper")
|
| 262 |
+
for _ in range(10):
|
| 263 |
+
action = controller.open_gripper(action)
|
| 264 |
+
obs, reward, done, truncated, info = env.step(action)
|
| 265 |
+
env.render()
|
| 266 |
+
|
| 267 |
+
# Add a pause before resetting to see the final state
|
| 268 |
+
time.sleep(3.0)
|
| 269 |
+
print("Simulation Done", "Total replanning count: ", replanning_count)
|
| 270 |
+
env.close()
|
| 271 |
+
obs, info = env.reset(options={"reconfigure": True})
|
| 272 |
+
policy.reset()
|
| 273 |
+
trajectory_prior = None
|
| 274 |
+
done = False
|
| 275 |
+
truncated = False
|
| 276 |
+
episode_count += 1
|
| 277 |
+
episode_pbar.update(1) # Update progress bar
|
| 278 |
+
except KeyboardInterrupt:
|
| 279 |
+
print("\nStopping simulation...")
|
| 280 |
+
break
|
| 281 |
+
print("Simulation finished")
|
| 282 |
+
episode_pbar.close() # Close progress bar
|
| 283 |
+
env.close()
|
| 284 |
+
|
| 285 |
+
if __name__ == "__main__":
|
| 286 |
+
main()
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/scripts/sim_benchmark.py
ADDED
|
@@ -0,0 +1,302 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import hydra
|
| 3 |
+
from omegaconf import DictConfig, OmegaConf
|
| 4 |
+
import gymnasium as gym
|
| 5 |
+
import mani_skill2.envs
|
| 6 |
+
import cfdp.envs as envs # customized environments
|
| 7 |
+
from tqdm import tqdm
|
| 8 |
+
import numpy as np
|
| 9 |
+
import torch
|
| 10 |
+
from cfdp.diffusion_policy.utils.loaders import get_dataset, get_model, freeze_torch_model_params
|
| 11 |
+
from cfdp.diffusion_policy.models import TemporalUnet, UNET_DIM_MULTS, MLPModel
|
| 12 |
+
from cfdp.motion_planner.neural_motion_planner import NeuralMotionPlannerPolicy
|
| 13 |
+
from cfdp.motion_planner.motion_planner_policy_base import ClassicalMotionPlannerPolicy
|
| 14 |
+
from cfdp.utils.plot_utils import plot_trajectories
|
| 15 |
+
from cfdp.envs import ROBOT_ASSETS_DIR
|
| 16 |
+
from cfdp.utils.data_utils import transform_quat_to_ortho6d
|
| 17 |
+
from cfdp.diffusion_policy.models.guide_managers import GuideManagerPath, GuideManagerSTOMP, TrajectoryRanker
|
| 18 |
+
from cfdp.diffusion_policy.utils.summary_trajectory import generate_trajectories
|
| 19 |
+
from cfdp.utils import observation_wrapper, inference_recorder
|
| 20 |
+
from cfdp.utils.plot_utils import TrajectoryVisualizer
|
| 21 |
+
from cfdp.envs import ENVS_DIR
|
| 22 |
+
from mani_skill2.utils.wrappers import RecordEpisode
|
| 23 |
+
|
| 24 |
+
import random
|
| 25 |
+
import time
|
| 26 |
+
|
| 27 |
+
np.random.seed(42)
|
| 28 |
+
random.seed(42)
|
| 29 |
+
|
| 30 |
+
# backtrace debug
|
| 31 |
+
import faulthandler
|
| 32 |
+
faulthandler.enable()
|
| 33 |
+
|
| 34 |
+
OmegaConf.register_new_resolver("multiply", lambda x, y: int(float(x)) * int(float(y)))
|
| 35 |
+
|
| 36 |
+
OBSTACLE_CONFIGS = []
|
| 37 |
+
|
| 38 |
+
@hydra.main(version_base="1.2", config_path="../configs", config_name="config_history_ddim")
|
| 39 |
+
def main(cfg: DictConfig):
|
| 40 |
+
# print(OmegaConf.to_yaml(cfg))
|
| 41 |
+
|
| 42 |
+
########################################################################################################################
|
| 43 |
+
# Load dataset with env, robot, task
|
| 44 |
+
train_subset, train_dataloader, val_subset, val_dataloader = get_dataset(
|
| 45 |
+
cfg_dataset=cfg.dataset,
|
| 46 |
+
batch_size=cfg.batch_size,
|
| 47 |
+
val_set_size=cfg.val_set_size,
|
| 48 |
+
results_dir=cfg.results_dir,
|
| 49 |
+
save_indices=False
|
| 50 |
+
)
|
| 51 |
+
#TODO: save / load normalizer. Do not use dataset
|
| 52 |
+
|
| 53 |
+
########################################################################################################################
|
| 54 |
+
# Load prior model
|
| 55 |
+
diffusion_configs = dict(
|
| 56 |
+
variance_schedule=cfg.model.variance_schedule,
|
| 57 |
+
n_diffusion_steps=cfg.model.n_steps,
|
| 58 |
+
prediction_mode=cfg.model.prediction_mode,
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
unet_configs = dict(
|
| 62 |
+
state_dim=cfg.state_dim,
|
| 63 |
+
n_support_points=cfg.trajectory_length,
|
| 64 |
+
unet_input_dim=cfg.model.unet_input_dim,
|
| 65 |
+
dim_mults=UNET_DIM_MULTS[cfg.model.unet_dim_mults_option],
|
| 66 |
+
conditioning_type=cfg.model.conditioning_type,
|
| 67 |
+
conditioning_embed_dim = cfg.model.conditioning_embed_dim
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
diffusion_model = get_model(
|
| 71 |
+
model_class=cfg.model.model_class,
|
| 72 |
+
model=TemporalUnet(**unet_configs),
|
| 73 |
+
tensor_args={'device': cfg.device, 'dtype': torch.float32},
|
| 74 |
+
context_model=MLPModel(in_dim=cfg.model.context_input_dim, out_dim=cfg.model.conditioning_embed_dim, input_field='tasks', output_field='condition'),
|
| 75 |
+
**diffusion_configs,
|
| 76 |
+
**unet_configs
|
| 77 |
+
)
|
| 78 |
+
|
| 79 |
+
# load saved policy model
|
| 80 |
+
diffusion_model.load_state_dict(
|
| 81 |
+
torch.load(os.path.join(cfg.inference.model_dir, 'checkpoints',
|
| 82 |
+
'ema_model_current_state_dict.pth' if cfg.inference.use_ema else 'model_current_state_dict.pth'),
|
| 83 |
+
map_location=cfg.device,
|
| 84 |
+
weights_only=True)
|
| 85 |
+
)
|
| 86 |
+
diffusion_model.eval()
|
| 87 |
+
model = diffusion_model
|
| 88 |
+
|
| 89 |
+
freeze_torch_model_params(model)
|
| 90 |
+
model = torch.compile(model)
|
| 91 |
+
|
| 92 |
+
########################################################################################################################
|
| 93 |
+
|
| 94 |
+
# Create environment
|
| 95 |
+
# env = gym.make(cfg.inference.env_id,
|
| 96 |
+
# obs_mode=cfg.inference.obs_mode,
|
| 97 |
+
# reward_mode=cfg.inference.reward_mode,
|
| 98 |
+
# control_mode=cfg.inference.control_mode,
|
| 99 |
+
# enable_shadow=False,
|
| 100 |
+
# render_mode="cameras" if cfg.inference.evaluate else "human",
|
| 101 |
+
# robot_init_qpos_noise=0.5,
|
| 102 |
+
# obstacle_configs=OBSTACLE_CONFIGS)
|
| 103 |
+
|
| 104 |
+
env = gym.make('CustomizedPick-v0',
|
| 105 |
+
obs_mode=cfg.inference.obs_mode,
|
| 106 |
+
reward_mode=cfg.inference.reward_mode,
|
| 107 |
+
control_mode=cfg.inference.control_mode,
|
| 108 |
+
enable_shadow=False,
|
| 109 |
+
render_mode="cameras",
|
| 110 |
+
# render_mode="human",
|
| 111 |
+
robot_init_qpos_noise=0.5,
|
| 112 |
+
obstacle_configs=OBSTACLE_CONFIGS,
|
| 113 |
+
# object_config_path=os.path.join(ENVS_DIR, "env_config","smallbox_distractor"),
|
| 114 |
+
# object_config_path=os.path.join(ENVS_DIR, "env_config","distractor25"),
|
| 115 |
+
object_config_path=os.path.join(ENVS_DIR, "env_config","distractorbox"),
|
| 116 |
+
# object_config_path=os.path.join(ENVS_DIR, "concept_env_config"),
|
| 117 |
+
create_obstacle_point_cloud=True)
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
# Initialize guide manager
|
| 121 |
+
if cfg.use_ee_control:
|
| 122 |
+
guide = GuideManagerPath(
|
| 123 |
+
dataset = train_subset.dataset,
|
| 124 |
+
clip_grad=True,
|
| 125 |
+
tensor_args={'device': cfg.device, 'dtype': torch.float32},
|
| 126 |
+
guidance_weight=cfg.inference.guidance_weight,
|
| 127 |
+
)
|
| 128 |
+
rank_fn = TrajectoryRanker(
|
| 129 |
+
dataset = train_subset.dataset,
|
| 130 |
+
robot_model = env.agent.robot,
|
| 131 |
+
get_current_qpos=lambda: env.agent.robot.get_qpos(),
|
| 132 |
+
tensor_args={'device': cfg.device, 'dtype': torch.float32},
|
| 133 |
+
)
|
| 134 |
+
print("use ee guide and rank_fn")
|
| 135 |
+
else:
|
| 136 |
+
guide = GuideManagerSTOMP(
|
| 137 |
+
dataset = train_subset.dataset,
|
| 138 |
+
robot_model = env.agent.robot,
|
| 139 |
+
clip_grad=True,
|
| 140 |
+
tensor_args={'device': cfg.device, 'dtype': torch.float32},
|
| 141 |
+
guidance_weight=cfg.inference.guidance_weight,
|
| 142 |
+
)
|
| 143 |
+
print("use joint guide")
|
| 144 |
+
rank_fn = None
|
| 145 |
+
|
| 146 |
+
# guide = None
|
| 147 |
+
ObservationWrapperClass = getattr(observation_wrapper, cfg.observation_wrapper)
|
| 148 |
+
# obs_wrapper = ObservationWrapperClass(train_subset.dataset, camera_type=cfg.inference.camera_type)
|
| 149 |
+
obs_wrapper = ObservationWrapperClass(train_subset.dataset,
|
| 150 |
+
camera_type=cfg.inference.camera_type,
|
| 151 |
+
use_ee_control=cfg.use_ee_control)
|
| 152 |
+
|
| 153 |
+
policy = NeuralMotionPlannerPolicy(
|
| 154 |
+
action_dim=env.action_space.shape[0],
|
| 155 |
+
model=model,
|
| 156 |
+
dataset=train_subset.dataset,
|
| 157 |
+
trajectory_length=cfg.trajectory_length,
|
| 158 |
+
inference_cfg=cfg.inference,
|
| 159 |
+
observation_wrapper=obs_wrapper,
|
| 160 |
+
use_ee_control=cfg.use_ee_control,
|
| 161 |
+
guide_manager=guide,
|
| 162 |
+
rank_fn=rank_fn,
|
| 163 |
+
debug=cfg.debug)
|
| 164 |
+
|
| 165 |
+
## visualizer ##
|
| 166 |
+
visualizer = TrajectoryVisualizer(normalizer=train_subset.dataset.normalizer)
|
| 167 |
+
|
| 168 |
+
## inference recorder ##
|
| 169 |
+
if cfg.test.inference_record:
|
| 170 |
+
obs_inference_recorder = inference_recorder.InferenceRecorder(save_dir=cfg.test.recording_dir)
|
| 171 |
+
else:
|
| 172 |
+
obs_inference_recorder = None
|
| 173 |
+
|
| 174 |
+
if cfg.inference.sim_with_history:
|
| 175 |
+
if cfg.use_ee_control:
|
| 176 |
+
def create_trajectory_prior(policy, obs, trajectory_ortho6d):
|
| 177 |
+
trajectory_index = policy.get_current_path_index()
|
| 178 |
+
trajectory_prior = trajectory_ortho6d[trajectory_index:]
|
| 179 |
+
current_tcp_pose_ortho6d = transform_quat_to_ortho6d(
|
| 180 |
+
torch.tensor(obs['extra']['tcp_pose'], device=cfg.device).unsqueeze(0)
|
| 181 |
+
).squeeze(0)
|
| 182 |
+
return torch.cat([current_tcp_pose_ortho6d.unsqueeze(0), trajectory_prior], dim=0)
|
| 183 |
+
else:
|
| 184 |
+
def create_trajectory_prior(policy, obs, trajectory):
|
| 185 |
+
trajectory_index = policy.get_current_path_index()
|
| 186 |
+
trajectory_prior = trajectory[trajectory_index:]
|
| 187 |
+
current_joint_pose = torch.tensor(obs['agent']['qpos']).to(cfg.device)
|
| 188 |
+
return torch.cat([current_joint_pose.unsqueeze(0), trajectory_prior], dim=0)
|
| 189 |
+
|
| 190 |
+
# env = RecordEpisode(env, "/home/xuan/Code/maniskill2_benchmark", clean_on_close=False, save_video=True)
|
| 191 |
+
|
| 192 |
+
done = False
|
| 193 |
+
truncated = False
|
| 194 |
+
episode_count = 0
|
| 195 |
+
total_episodes = 100
|
| 196 |
+
episode_pbar = tqdm(total=total_episodes, desc='Collecting episodes')
|
| 197 |
+
|
| 198 |
+
obs, _ = env.reset() # Obs Dict -- obs['extra']['tcp_pose']
|
| 199 |
+
policy.reset()
|
| 200 |
+
trajectory_prior = None
|
| 201 |
+
|
| 202 |
+
timestamp = time.strftime("%m%d_%H%M")
|
| 203 |
+
image_saving_path=os.path.join("/home/xuan/Documents/test_result", timestamp)
|
| 204 |
+
|
| 205 |
+
success_count = 0 # Done = True, Collided = False
|
| 206 |
+
task_fail_count = 0 # Done = True
|
| 207 |
+
collision_count = 0 # Collided = True
|
| 208 |
+
replanning_interval = cfg.inference.replanning_interval
|
| 209 |
+
print("replanning interval: ", replanning_interval)
|
| 210 |
+
inference_time_list = []
|
| 211 |
+
while episode_count < total_episodes:
|
| 212 |
+
collided = False
|
| 213 |
+
try:
|
| 214 |
+
step = 0
|
| 215 |
+
replanning_count = 0
|
| 216 |
+
while not (done or truncated or collided):
|
| 217 |
+
if collided is False:
|
| 218 |
+
collided = obs['extra']['collision']
|
| 219 |
+
# Update obsqueue
|
| 220 |
+
policy.observation_wrapper.update_history_buffer(obs)
|
| 221 |
+
|
| 222 |
+
# env.render()
|
| 223 |
+
new_path = False
|
| 224 |
+
if step % replanning_interval == 0:
|
| 225 |
+
|
| 226 |
+
if step == 0: # initial planning
|
| 227 |
+
# start_time = time.time()
|
| 228 |
+
policy.plan_path_with_history(obs, do_normalize=True)
|
| 229 |
+
# inference_time_list.append(time.time() - start_time)
|
| 230 |
+
new_path = True
|
| 231 |
+
else: # replanning
|
| 232 |
+
position_diff = np.linalg.norm(obs['extra']['goal_pose'][:3] - obs['extra']['tcp_pose'][:3])
|
| 233 |
+
if position_diff > 0.15:
|
| 234 |
+
# print("Replanning count: ", replanning_count, "position_diff: ", position_diff)
|
| 235 |
+
## replanning with prior
|
| 236 |
+
print("replanning with prior at step: ", step)
|
| 237 |
+
trajectory_prior = create_trajectory_prior(policy, obs, policy.planned_path)
|
| 238 |
+
denoising_steps = 4
|
| 239 |
+
# start_time = time.time()
|
| 240 |
+
policy.plan_path_with_history(obs,
|
| 241 |
+
trajectory_prior=trajectory_prior,
|
| 242 |
+
do_normalize=True,
|
| 243 |
+
timestep=denoising_steps, # denoising step
|
| 244 |
+
choice='interpolate')
|
| 245 |
+
# inference_time_list.append(time.time() - start_time)
|
| 246 |
+
# # replanning without prior
|
| 247 |
+
# trajectory_ortho6d = policy.plan_path_with_history(obs, do_normalize=True)
|
| 248 |
+
replanning_count += 1
|
| 249 |
+
if cfg.test.inference_record:
|
| 250 |
+
obs_inference_recorder.record_step(obs,
|
| 251 |
+
trajectory_prior=trajectory_prior,
|
| 252 |
+
all_samples=policy.all_trajs,
|
| 253 |
+
planned_path=policy.planned_path)
|
| 254 |
+
|
| 255 |
+
action = policy.follow_path(obs)
|
| 256 |
+
obs, reward, done, truncated, info = env.step(action)
|
| 257 |
+
# print("Statistical of inference time: ", np.mean(inference_time_list), "std: ", np.std(inference_time_list))
|
| 258 |
+
|
| 259 |
+
step += 1
|
| 260 |
+
|
| 261 |
+
if cfg.test.inference_record:
|
| 262 |
+
obs_inference_recorder.save_episode()
|
| 263 |
+
|
| 264 |
+
print("Simulation Done", "Total replanning count: ", replanning_count)
|
| 265 |
+
print("done: ", done, "truncated: ", truncated, "collided: ", collided)
|
| 266 |
+
# Metrics Loggers
|
| 267 |
+
if done and collided is False:
|
| 268 |
+
success_count += 1
|
| 269 |
+
elif collided:
|
| 270 |
+
collision_count += 1
|
| 271 |
+
else:
|
| 272 |
+
task_fail_count += 1
|
| 273 |
+
|
| 274 |
+
obs, info = env.reset(options={"reconfigure": True})
|
| 275 |
+
policy.reset()
|
| 276 |
+
print_task_rate(success_count, collision_count, task_fail_count, episode_count+1)
|
| 277 |
+
trajectory_prior = None
|
| 278 |
+
done = False
|
| 279 |
+
truncated = False
|
| 280 |
+
episode_count += 1
|
| 281 |
+
episode_pbar.update(1) # Update progress bar
|
| 282 |
+
|
| 283 |
+
except KeyboardInterrupt:
|
| 284 |
+
print("\nStopping simulation...")
|
| 285 |
+
break
|
| 286 |
+
print("Simulation finished")
|
| 287 |
+
print_task_rate(success_count, collision_count, task_fail_count, total_episodes)
|
| 288 |
+
episode_pbar.close() # Close progress bar
|
| 289 |
+
env.close()
|
| 290 |
+
else:
|
| 291 |
+
raise Exception("Error: Not implemented. You are using the wrong file.")
|
| 292 |
+
|
| 293 |
+
def print_task_rate(success_count, collision_count, task_fail_count, episodes_count):
|
| 294 |
+
success_rate = success_count / episodes_count
|
| 295 |
+
task_fail_rate = task_fail_count / episodes_count
|
| 296 |
+
collision_rate = collision_count / episodes_count
|
| 297 |
+
print(f"Success rate: {success_count} / {episodes_count} = {success_rate:.4f}")
|
| 298 |
+
print(f"Task fail rate: {task_fail_count} / {episodes_count} = {task_fail_rate:.4f}")
|
| 299 |
+
print(f"Collision rate: {collision_count} / {episodes_count} = {collision_rate:.4f}")
|
| 300 |
+
|
| 301 |
+
if __name__ == "__main__":
|
| 302 |
+
main()
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/tests/mp_demo.py
ADDED
|
@@ -0,0 +1,206 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sapien.core as sapien
|
| 2 |
+
import mplib
|
| 3 |
+
import numpy as np
|
| 4 |
+
from sapien.utils.viewer import Viewer
|
| 5 |
+
from mplib import Pose
|
| 6 |
+
import trimesh
|
| 7 |
+
|
| 8 |
+
class PlanningDemo():
|
| 9 |
+
def __init__(self):
|
| 10 |
+
self.engine = sapien.Engine()
|
| 11 |
+
self.renderer = sapien.SapienRenderer()
|
| 12 |
+
self.renderer.set_log_level('warning')
|
| 13 |
+
self.engine.set_renderer(self.renderer)
|
| 14 |
+
|
| 15 |
+
scene_config = sapien.SceneConfig()
|
| 16 |
+
self.scene = self.engine.create_scene(scene_config)
|
| 17 |
+
self.scene.set_timestep(1 / 240.0)
|
| 18 |
+
self.scene.add_ground(-0.8)
|
| 19 |
+
physical_material = self.scene.create_physical_material(1, 1, 0.0)
|
| 20 |
+
self.scene.default_physical_material = physical_material
|
| 21 |
+
|
| 22 |
+
self.scene.set_ambient_light([0.5, 0.5, 0.5])
|
| 23 |
+
self.scene.add_directional_light([0, 1, -1], [0.5, 0.5, 0.5], shadow=True)
|
| 24 |
+
self.scene.add_point_light([1, 2, 2], [1, 1, 1], shadow=True)
|
| 25 |
+
self.scene.add_point_light([1, -2, 2], [1, 1, 1], shadow=True)
|
| 26 |
+
self.scene.add_point_light([-1, 0, 1], [1, 1, 1], shadow=True)
|
| 27 |
+
|
| 28 |
+
self.viewer = Viewer(self.renderer)
|
| 29 |
+
self.viewer.set_scene(self.scene)
|
| 30 |
+
self.viewer.set_camera_xyz(x=1.2, y=0.25, z=0.4)
|
| 31 |
+
self.viewer.set_camera_rpy(r=0, p=-0.4, y=2.7)
|
| 32 |
+
|
| 33 |
+
# Robot
|
| 34 |
+
# Load URDF
|
| 35 |
+
loader: sapien.URDFLoader = self.scene.create_urdf_loader()
|
| 36 |
+
loader.fix_root_link = True
|
| 37 |
+
self.robot: sapien.Articulation = loader.load("/home/rui/Documents/maniskill2_benchmark/mani_skill2/assets/descriptions/panda_v2_sphere.urdf")
|
| 38 |
+
self.robot.set_root_pose(sapien.Pose([0, 0, 0], [1, 0, 0, 0]))
|
| 39 |
+
|
| 40 |
+
# 0.5618216, 0.0823961, 0.0563702, 0.8212124
|
| 41 |
+
# Set initial joint positions
|
| 42 |
+
init_qpos = [0, 0.19634954084936207, 0.0, -2.617993877991494, 0.0, 2.941592653589793, 0.7853981633974483, 0, 0]
|
| 43 |
+
self.robot.set_qpos(init_qpos)
|
| 44 |
+
|
| 45 |
+
self.active_joints = self.robot.get_active_joints()
|
| 46 |
+
for joint in self.active_joints:
|
| 47 |
+
joint.set_drive_property(stiffness=1000, damping=200)
|
| 48 |
+
|
| 49 |
+
# table top
|
| 50 |
+
builder = self.scene.create_actor_builder()
|
| 51 |
+
builder.add_box_collision(half_size=[0.4, 0.4, 0.025])
|
| 52 |
+
builder.add_box_visual(half_size=[0.4, 0.4, 0.025])
|
| 53 |
+
self.table = builder.build_kinematic(name='table')
|
| 54 |
+
self.table.set_pose(sapien.Pose([0.56, 0, - 0.025]))
|
| 55 |
+
|
| 56 |
+
# boxes
|
| 57 |
+
builder = self.scene.create_actor_builder()
|
| 58 |
+
builder.add_box_collision(half_size=[0.02, 0.02, 0.06])
|
| 59 |
+
builder.add_box_visual(half_size=[0.02, 0.02, 0.06], color=[1, 0, 0])
|
| 60 |
+
self.red_cube = builder.build(name='red_cube')
|
| 61 |
+
self.red_cube.set_pose(sapien.Pose([0.4, 0.3, 0.06]))
|
| 62 |
+
|
| 63 |
+
builder = self.scene.create_actor_builder()
|
| 64 |
+
builder.add_box_collision(half_size=[0.02, 0.02, 0.04])
|
| 65 |
+
builder.add_box_visual(half_size=[0.02, 0.02, 0.04], color=[0, 1, 0])
|
| 66 |
+
self.green_cube = builder.build(name='green_cube')
|
| 67 |
+
self.green_cube.set_pose(sapien.Pose([0.2, -0.3, 0.04]))
|
| 68 |
+
|
| 69 |
+
builder = self.scene.create_actor_builder()
|
| 70 |
+
builder.add_box_collision(half_size=[0.08, 0.02, 0.07])
|
| 71 |
+
builder.add_box_visual(half_size=[0.08, 0.02, 0.07], color=[0, 0, 1])
|
| 72 |
+
self.blue_cube = builder.build(name='blue_cube')
|
| 73 |
+
self.blue_cube.set_pose(sapien.Pose([0.65, 0.1, 0.1]))
|
| 74 |
+
|
| 75 |
+
self.setup_planner()
|
| 76 |
+
|
| 77 |
+
def setup_planner(self):
|
| 78 |
+
link_names = [link.get_name() for link in self.robot.get_links()]
|
| 79 |
+
joint_names = [joint.get_name() for joint in self.robot.get_active_joints()]
|
| 80 |
+
self.planner = mplib.Planner(
|
| 81 |
+
urdf="/home/rui/Documents/maniskill2_benchmark/mani_skill2/assets/descriptions/panda_v2_sphere.urdf",
|
| 82 |
+
srdf="/home/rui/Documents/maniskill2_benchmark/mani_skill2/assets/descriptions/panda_v2.srdf",
|
| 83 |
+
user_link_names=link_names,
|
| 84 |
+
user_joint_names=joint_names,
|
| 85 |
+
move_group="panda_hand",
|
| 86 |
+
joint_vel_limits=np.ones(7),
|
| 87 |
+
joint_acc_limits=np.ones(7))
|
| 88 |
+
|
| 89 |
+
def follow_path(self, result):
|
| 90 |
+
n_step = result['position'].shape[0]
|
| 91 |
+
for i in range(n_step):
|
| 92 |
+
qf = self.robot.compute_passive_force(
|
| 93 |
+
gravity=True,
|
| 94 |
+
coriolis_and_centrifugal=True)
|
| 95 |
+
self.robot.set_qf(qf)
|
| 96 |
+
for j in range(7):
|
| 97 |
+
self.active_joints[j].set_drive_target(result['position'][i][j])
|
| 98 |
+
self.active_joints[j].set_drive_velocity_target(result['velocity'][i][j])
|
| 99 |
+
self.scene.step()
|
| 100 |
+
|
| 101 |
+
if i % 4 == 0:
|
| 102 |
+
self.scene.update_render()
|
| 103 |
+
self.viewer.render()
|
| 104 |
+
|
| 105 |
+
def open_gripper(self):
|
| 106 |
+
for joint in self.active_joints[-2:]:
|
| 107 |
+
joint.set_drive_target(0.4)
|
| 108 |
+
for i in range(100):
|
| 109 |
+
qf = self.robot.compute_passive_force(
|
| 110 |
+
gravity=True,
|
| 111 |
+
coriolis_and_centrifugal=True)
|
| 112 |
+
self.robot.set_qf(qf)
|
| 113 |
+
self.scene.step()
|
| 114 |
+
if i % 4 == 0:
|
| 115 |
+
self.scene.update_render()
|
| 116 |
+
self.viewer.render()
|
| 117 |
+
|
| 118 |
+
def close_gripper(self):
|
| 119 |
+
for joint in self.active_joints[-2:]:
|
| 120 |
+
joint.set_drive_target(0)
|
| 121 |
+
for i in range(100):
|
| 122 |
+
qf = self.robot.compute_passive_force(
|
| 123 |
+
gravity=True,
|
| 124 |
+
coriolis_and_centrifugal=True)
|
| 125 |
+
self.robot.set_qf(qf)
|
| 126 |
+
self.scene.step()
|
| 127 |
+
if i % 4 == 0:
|
| 128 |
+
self.scene.update_render()
|
| 129 |
+
self.viewer.render()
|
| 130 |
+
|
| 131 |
+
def add_point_cloud(self):
|
| 132 |
+
"""We tell the planner about the obstacle through a point cloud"""
|
| 133 |
+
|
| 134 |
+
# add_point_cloud ankor
|
| 135 |
+
# box = trimesh.creation.box([0.1, 0.4, 0.2])
|
| 136 |
+
box = trimesh.creation.box([0.08*2, 0.02*2, 0.07*2])
|
| 137 |
+
points, _ = trimesh.sample.sample_surface(box, 500)
|
| 138 |
+
print(len(points))
|
| 139 |
+
points += [0.65, 0.1, 0.1]
|
| 140 |
+
self.planner.update_point_cloud(points, resolution=0.02)
|
| 141 |
+
|
| 142 |
+
def move_to_pose_with_RRTConnect(self, pose):
|
| 143 |
+
print(type(self.planner))
|
| 144 |
+
|
| 145 |
+
result = self.planner.plan_pose(pose, self.robot.get_qpos(), time_step=1/250)
|
| 146 |
+
if result['status'] != "Success":
|
| 147 |
+
print(result['status'])
|
| 148 |
+
return -1
|
| 149 |
+
# self.follow_path(result)
|
| 150 |
+
return 0
|
| 151 |
+
|
| 152 |
+
def move_to_pose_with_screw(self, pose:Pose ):
|
| 153 |
+
print("pose in move to pose with screw: ", type(pose))
|
| 154 |
+
# mppose = Pose()
|
| 155 |
+
print(self.robot.get_qpos())
|
| 156 |
+
result = self.planner.plan_screw(mppose, self.robot.get_qpos(), time_step=1/250)
|
| 157 |
+
if result['status'] != "Success":
|
| 158 |
+
result = self.planner.plan_pose(pose, self.robot.get_qpos(), time_step=1/250)
|
| 159 |
+
if result['status'] != "Success":
|
| 160 |
+
print(result['status'])
|
| 161 |
+
return -1
|
| 162 |
+
# self.follow_path(result)
|
| 163 |
+
return 0
|
| 164 |
+
|
| 165 |
+
def move_to_pose(self, pose, with_screw):
|
| 166 |
+
print("pose in move to pose: ", pose)
|
| 167 |
+
if with_screw:
|
| 168 |
+
return self.move_to_pose_with_screw(pose)
|
| 169 |
+
else:
|
| 170 |
+
return self.move_to_pose_with_RRTConnect(pose)
|
| 171 |
+
|
| 172 |
+
def pose_lst_to_mppose(self, pose):
|
| 173 |
+
p = np.array(pose[0:3])
|
| 174 |
+
q = np.array(pose[3:7])
|
| 175 |
+
return Pose(p, q)
|
| 176 |
+
|
| 177 |
+
def demo(self, with_screw = True):
|
| 178 |
+
poses = [[0.4, 0.3, 0.12, 0, 1, 0, 0],
|
| 179 |
+
[0.2, -0.3, 0.08, 0, 1, 0, 0],
|
| 180 |
+
[0.6, 0.1, 0.14, 0, 1, 0, 0]]
|
| 181 |
+
|
| 182 |
+
# poses = [[0.014, -0.022, 0.193, 0, 1, 0, 0],
|
| 183 |
+
# [0.2, -0.3, 0.08, 0, 1, 0, 0],
|
| 184 |
+
# [0.6, 0.1, 0.14, 0, 1, 0, 0]]
|
| 185 |
+
self.add_point_cloud()
|
| 186 |
+
|
| 187 |
+
for i in range(3):
|
| 188 |
+
pose = self.pose_lst_to_mppose(poses[i])
|
| 189 |
+
self.move_to_pose(pose, with_screw)
|
| 190 |
+
self.open_gripper()
|
| 191 |
+
# pose[2] -= 0.12
|
| 192 |
+
# self.move_to_pose(pose, with_screw)
|
| 193 |
+
# self.close_gripper()
|
| 194 |
+
# pose[2] += 0.12
|
| 195 |
+
# self.move_to_pose(pose, with_screw)
|
| 196 |
+
# pose[0] += 0.1
|
| 197 |
+
# self.move_to_pose(pose, with_screw)
|
| 198 |
+
# pose[2] -= 0.12
|
| 199 |
+
# self.move_to_pose(pose, with_screw)
|
| 200 |
+
# self.open_gripper()
|
| 201 |
+
# pose[2] += 0.12
|
| 202 |
+
# self.move_to_pose(pose, with_screw)
|
| 203 |
+
|
| 204 |
+
if __name__ == '__main__':
|
| 205 |
+
demo = PlanningDemo()
|
| 206 |
+
demo.demo(with_screw=False)
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/tests/plot_sample_data.py
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from cfdp.datasets.maniskill2_trajectory import ManiSkill2Trajectory
|
| 2 |
+
from cfdp.utils.plot_utils import plot_trajectories
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
if __name__ == "__main__":
|
| 6 |
+
# dataset_file = '/home/xuan/Dataset/Trajectory/videos/20250331_133545.h5'
|
| 7 |
+
dataset_file ='/mnt/Dataset/bowl_100_split.h5'
|
| 8 |
+
# dataset_file = 'data/panda_wristcam/GraspBowl-v1/motionplanning/bowl_60_split.h5'
|
| 9 |
+
dataset = ManiSkill2Trajectory(dataset_file = dataset_file,
|
| 10 |
+
choice="interpolate",
|
| 11 |
+
trajectory_length=16,
|
| 12 |
+
verbose=False)
|
| 13 |
+
#sample = dataset.get_random_sample(1)
|
| 14 |
+
#print(sample)
|
| 15 |
+
trajectory = dataset.get_random_trajectory(20, normalize=False)
|
| 16 |
+
plot_trajectories(trajectory, "Normalized Trajectory")
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/tests/replay_guidance_test.py
ADDED
|
@@ -0,0 +1,198 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import hydra
|
| 3 |
+
from omegaconf import DictConfig, OmegaConf
|
| 4 |
+
import gymnasium as gym
|
| 5 |
+
from tqdm import tqdm
|
| 6 |
+
import numpy as np
|
| 7 |
+
import torch
|
| 8 |
+
from cfdp.diffusion_policy.utils.loaders import get_dataset, get_loss, get_model, get_summary, freeze_torch_model_params
|
| 9 |
+
from cfdp.diffusion_policy.models import TemporalUnet, UNET_DIM_MULTS, MLPModel
|
| 10 |
+
from cfdp.motion_planner.neural_motion_planner import NeuralMotionPlannerPolicy
|
| 11 |
+
from cfdp.motion_planner.motion_planner_policy_base import ClassicalMotionPlannerPolicy
|
| 12 |
+
from cfdp.utils.plot_utils import plot_trajectories
|
| 13 |
+
from cfdp.utils.data_utils import ObsQueue, transform_quat_to_ortho6d, transform_ortho6d_to_quat
|
| 14 |
+
from cfdp.diffusion_policy.models.guide_managers import GuideManagerPath
|
| 15 |
+
from cfdp.diffusion_policy.utils.summary_trajectory import generate_trajectories
|
| 16 |
+
from cfdp.utils import observation_wrapper, inference_recorder
|
| 17 |
+
from cfdp.utils.plot_utils import TrajectoryVisualizer
|
| 18 |
+
|
| 19 |
+
import random
|
| 20 |
+
import time
|
| 21 |
+
import datetime
|
| 22 |
+
import matplotlib.pyplot as plt
|
| 23 |
+
|
| 24 |
+
np.random.seed(42)
|
| 25 |
+
random.seed(42)
|
| 26 |
+
|
| 27 |
+
OmegaConf.register_new_resolver("multiply", lambda x, y: int(float(x)) * int(float(y)))
|
| 28 |
+
|
| 29 |
+
@hydra.main(version_base="1.2", config_path="../configs", config_name="config_history")
|
| 30 |
+
def main(cfg: DictConfig):
|
| 31 |
+
# print(OmegaConf.to_yaml(cfg))
|
| 32 |
+
|
| 33 |
+
########################################################################################################################
|
| 34 |
+
# Load dataset with env, robot, task
|
| 35 |
+
train_subset, train_dataloader, val_subset, val_dataloader = get_dataset(
|
| 36 |
+
cfg_dataset=cfg.dataset,
|
| 37 |
+
batch_size=cfg.batch_size,
|
| 38 |
+
val_set_size=cfg.val_set_size,
|
| 39 |
+
results_dir=cfg.results_dir,
|
| 40 |
+
save_indices=False
|
| 41 |
+
)
|
| 42 |
+
## Load prior model ##
|
| 43 |
+
diffusion_configs = dict(
|
| 44 |
+
variance_schedule=cfg.model.variance_schedule,
|
| 45 |
+
n_diffusion_steps=cfg.model.n_steps,
|
| 46 |
+
predict_epsilon=cfg.model.predict_epsilon,
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
unet_configs = dict(
|
| 50 |
+
state_dim=cfg.state_dim,
|
| 51 |
+
n_support_points=cfg.trajectory_length,
|
| 52 |
+
unet_input_dim=cfg.model.unet_input_dim,
|
| 53 |
+
dim_mults=UNET_DIM_MULTS[cfg.model.unet_dim_mults_option],
|
| 54 |
+
conditioning_type=cfg.model.conditioning_type,
|
| 55 |
+
conditioning_embed_dim = cfg.model.conditioning_embed_dim
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
diffusion_model = get_model(
|
| 59 |
+
model_class=cfg.model.model_class,
|
| 60 |
+
model=TemporalUnet(**unet_configs),
|
| 61 |
+
tensor_args={'device': cfg.device, 'dtype': torch.float32},
|
| 62 |
+
context_model=MLPModel(in_dim=cfg.model.context_input_dim, out_dim=cfg.model.conditioning_embed_dim, input_field='tasks', output_field='condition'),
|
| 63 |
+
**diffusion_configs,
|
| 64 |
+
**unet_configs
|
| 65 |
+
)
|
| 66 |
+
|
| 67 |
+
# load saved policy model
|
| 68 |
+
diffusion_model.load_state_dict(
|
| 69 |
+
torch.load(os.path.join(cfg.inference.model_dir, 'checkpoints',
|
| 70 |
+
'ema_model_current_state_dict.pth' if cfg.inference.use_ema else 'model_current_state_dict.pth'),
|
| 71 |
+
map_location=cfg.device,
|
| 72 |
+
weights_only=True)
|
| 73 |
+
)
|
| 74 |
+
diffusion_model.eval()
|
| 75 |
+
model = diffusion_model
|
| 76 |
+
|
| 77 |
+
freeze_torch_model_params(model)
|
| 78 |
+
model = torch.compile(model)
|
| 79 |
+
|
| 80 |
+
# Initialize guide manager
|
| 81 |
+
guide1 = GuideManagerPath(
|
| 82 |
+
dataset = train_subset.dataset,
|
| 83 |
+
clip_grad=True,
|
| 84 |
+
tensor_args={'device': cfg.device, 'dtype': torch.float32},
|
| 85 |
+
guidance_weight=0.2
|
| 86 |
+
)
|
| 87 |
+
guide2 = GuideManagerPath(
|
| 88 |
+
dataset = train_subset.dataset,
|
| 89 |
+
clip_grad=True,
|
| 90 |
+
tensor_args={'device': cfg.device, 'dtype': torch.float32},
|
| 91 |
+
guidance_weight=0.5
|
| 92 |
+
)
|
| 93 |
+
guide3 = GuideManagerPath(
|
| 94 |
+
dataset = train_subset.dataset,
|
| 95 |
+
clip_grad=True,
|
| 96 |
+
tensor_args={'device': cfg.device, 'dtype': torch.float32},
|
| 97 |
+
guidance_weight=1.0
|
| 98 |
+
)
|
| 99 |
+
# guide = None
|
| 100 |
+
ObservationWrapperClass = getattr(observation_wrapper, cfg.observation_wrapper)
|
| 101 |
+
obs_wrapper = ObservationWrapperClass(train_subset.dataset, camera_type=cfg.inference.camera_type)
|
| 102 |
+
|
| 103 |
+
policy1 = NeuralMotionPlannerPolicy(
|
| 104 |
+
action_dim=7,
|
| 105 |
+
model=model,
|
| 106 |
+
dataset=train_subset.dataset,
|
| 107 |
+
trajectory_length=cfg.trajectory_length,
|
| 108 |
+
inference_cfg=cfg.inference,
|
| 109 |
+
observation_wrapper=obs_wrapper,
|
| 110 |
+
guide_manager=guide1,
|
| 111 |
+
debug=cfg.debug)
|
| 112 |
+
policy2 = NeuralMotionPlannerPolicy(# without prior
|
| 113 |
+
action_dim=7,
|
| 114 |
+
model=model,
|
| 115 |
+
dataset=train_subset.dataset,
|
| 116 |
+
trajectory_length=cfg.trajectory_length,
|
| 117 |
+
inference_cfg=cfg.inference,
|
| 118 |
+
observation_wrapper=obs_wrapper,
|
| 119 |
+
guide_manager=guide2,
|
| 120 |
+
debug=cfg.debug)
|
| 121 |
+
|
| 122 |
+
policy3 = NeuralMotionPlannerPolicy(# with prior/no guidance
|
| 123 |
+
action_dim=7,
|
| 124 |
+
model=model,
|
| 125 |
+
dataset=train_subset.dataset,
|
| 126 |
+
trajectory_length=cfg.trajectory_length,
|
| 127 |
+
inference_cfg=cfg.inference,
|
| 128 |
+
observation_wrapper=obs_wrapper,
|
| 129 |
+
guide_manager=guide3,
|
| 130 |
+
debug=cfg.debug)
|
| 131 |
+
|
| 132 |
+
policy0 = NeuralMotionPlannerPolicy(# with prior/no guidance
|
| 133 |
+
action_dim=7,
|
| 134 |
+
model=model,
|
| 135 |
+
dataset=train_subset.dataset,
|
| 136 |
+
trajectory_length=cfg.trajectory_length,
|
| 137 |
+
inference_cfg=cfg.inference,
|
| 138 |
+
observation_wrapper=obs_wrapper,
|
| 139 |
+
guide_manager=None,
|
| 140 |
+
debug=cfg.debug)
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
## visualizer ##
|
| 144 |
+
visualizer = TrajectoryVisualizer(normalizer=train_subset.dataset.normalizer)
|
| 145 |
+
##
|
| 146 |
+
def create_trajectory_prior(policy, obs, trajectory_ortho6d):
|
| 147 |
+
trajectory_prior = trajectory_ortho6d[0:]
|
| 148 |
+
current_tcp_pose_ortho6d = transform_quat_to_ortho6d(
|
| 149 |
+
torch.tensor(obs['extra']['tcp_pose'], device=cfg.device).unsqueeze(0)
|
| 150 |
+
).squeeze(0)
|
| 151 |
+
return torch.cat([current_tcp_pose_ortho6d.unsqueeze(0), trajectory_prior], dim=0)
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
## load recordings and run inference
|
| 155 |
+
recorder = inference_recorder.ReplayRecorder(replay_dir=cfg.test.replay_dir)
|
| 156 |
+
first_plan = True
|
| 157 |
+
policy1.reset()
|
| 158 |
+
policy2.reset()
|
| 159 |
+
policy3.reset()
|
| 160 |
+
|
| 161 |
+
# Create plots directory if it doesn't exist
|
| 162 |
+
plots_dir = os.path.join(cfg.test.replay_dir, "plots")
|
| 163 |
+
os.makedirs(plots_dir, exist_ok=True)
|
| 164 |
+
figures = []
|
| 165 |
+
|
| 166 |
+
# Timing metrics
|
| 167 |
+
with_prior_times = []
|
| 168 |
+
without_prior_times = []
|
| 169 |
+
|
| 170 |
+
episode_count = 0
|
| 171 |
+
recorder.set_episode(episode_count)
|
| 172 |
+
|
| 173 |
+
# Generate multiple prior using the initial planning
|
| 174 |
+
obs, trajectory_prior, done, planned_path, = recorder.next_step()
|
| 175 |
+
policy0.plan_path_with_history(obs, do_normalize=True) # without prior
|
| 176 |
+
policy1.plan_path_with_history(obs, do_normalize=True) # with prior
|
| 177 |
+
policy2.plan_path_with_history(obs, do_normalize=True) # without prior
|
| 178 |
+
policy3.plan_path_with_history(obs, do_normalize=True) # with prior/no guidance
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
result_to_plot = [policy0.all_trajs[0:1], policy1.all_trajs[0:2], policy2.all_trajs[0:2], policy3.all_trajs[0:3]]
|
| 182 |
+
|
| 183 |
+
title_list = ['Trajectory samples under different guidance level', 'Right View']
|
| 184 |
+
fig = visualizer.plot_trajectories_dual_view(result_to_plot,
|
| 185 |
+
policy0.point_cloud,
|
| 186 |
+
label_list=['no guidance (λ=0)', 'weak guidance (λ=0.2)', 'mid guidance (λ=0.5)', 'strong guidance (λ=1.0)'],
|
| 187 |
+
color_list =['green', 'orange', 'blue', 'deeppink'],
|
| 188 |
+
title=title_list,
|
| 189 |
+
plot_2d=True)
|
| 190 |
+
# plt.show()
|
| 191 |
+
save_path = os.path.join(plots_dir, f"episode_{episode_count}_trajectories_guidance.png")
|
| 192 |
+
fig.savefig(save_path, dpi=300, bbox_inches='tight')
|
| 193 |
+
print(f"Saved trajectory plot to {save_path}")
|
| 194 |
+
# plt.show()
|
| 195 |
+
plt.close(fig)
|
| 196 |
+
|
| 197 |
+
if __name__ == "__main__":
|
| 198 |
+
main()
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/tests/replay_planning_test.py
ADDED
|
@@ -0,0 +1,264 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import hydra
|
| 3 |
+
from omegaconf import DictConfig, OmegaConf
|
| 4 |
+
import gymnasium as gym
|
| 5 |
+
from tqdm import tqdm
|
| 6 |
+
import numpy as np
|
| 7 |
+
import torch
|
| 8 |
+
from cfdp.diffusion_policy.utils.loaders import get_dataset, get_loss, get_model, get_summary, freeze_torch_model_params
|
| 9 |
+
from cfdp.diffusion_policy.models import TemporalUnet, UNET_DIM_MULTS, MLPModel
|
| 10 |
+
from cfdp.motion_planner.neural_motion_planner import NeuralMotionPlannerPolicy
|
| 11 |
+
from cfdp.motion_planner.motion_planner_policy_base import ClassicalMotionPlannerPolicy
|
| 12 |
+
from cfdp.utils.plot_utils import plot_trajectories
|
| 13 |
+
from cfdp.utils.data_utils import ObsQueue, transform_quat_to_ortho6d, transform_ortho6d_to_quat
|
| 14 |
+
from cfdp.diffusion_policy.models.guide_managers import GuideManagerPath
|
| 15 |
+
from cfdp.diffusion_policy.utils.summary_trajectory import generate_trajectories
|
| 16 |
+
from cfdp.utils import observation_wrapper, inference_recorder
|
| 17 |
+
from cfdp.utils.plot_utils import TrajectoryVisualizer
|
| 18 |
+
|
| 19 |
+
import random
|
| 20 |
+
import time
|
| 21 |
+
import datetime
|
| 22 |
+
import matplotlib.pyplot as plt
|
| 23 |
+
|
| 24 |
+
np.random.seed(42)
|
| 25 |
+
random.seed(42)
|
| 26 |
+
|
| 27 |
+
OmegaConf.register_new_resolver("multiply", lambda x, y: int(float(x)) * int(float(y)))
|
| 28 |
+
|
| 29 |
+
@hydra.main(version_base="1.2", config_path="../configs", config_name="config_history")
|
| 30 |
+
def main(cfg: DictConfig):
|
| 31 |
+
# print(OmegaConf.to_yaml(cfg))
|
| 32 |
+
|
| 33 |
+
########################################################################################################################
|
| 34 |
+
# Load dataset with env, robot, task
|
| 35 |
+
train_subset, train_dataloader, val_subset, val_dataloader = get_dataset(
|
| 36 |
+
cfg_dataset=cfg.dataset,
|
| 37 |
+
batch_size=cfg.batch_size,
|
| 38 |
+
val_set_size=cfg.val_set_size,
|
| 39 |
+
results_dir=cfg.results_dir,
|
| 40 |
+
save_indices=False
|
| 41 |
+
)
|
| 42 |
+
## Load prior model ##
|
| 43 |
+
diffusion_configs = dict(
|
| 44 |
+
variance_schedule=cfg.model.variance_schedule,
|
| 45 |
+
n_diffusion_steps=cfg.model.n_steps,
|
| 46 |
+
predict_epsilon=cfg.model.predict_epsilon,
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
unet_configs = dict(
|
| 50 |
+
state_dim=cfg.state_dim,
|
| 51 |
+
n_support_points=cfg.trajectory_length,
|
| 52 |
+
unet_input_dim=cfg.model.unet_input_dim,
|
| 53 |
+
dim_mults=UNET_DIM_MULTS[cfg.model.unet_dim_mults_option],
|
| 54 |
+
conditioning_type=cfg.model.conditioning_type,
|
| 55 |
+
conditioning_embed_dim = cfg.model.conditioning_embed_dim
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
diffusion_model = get_model(
|
| 59 |
+
model_class=cfg.model.model_class,
|
| 60 |
+
model=TemporalUnet(**unet_configs),
|
| 61 |
+
tensor_args={'device': cfg.device, 'dtype': torch.float32},
|
| 62 |
+
context_model=MLPModel(in_dim=cfg.model.context_input_dim, out_dim=cfg.model.conditioning_embed_dim, input_field='tasks', output_field='condition'),
|
| 63 |
+
**diffusion_configs,
|
| 64 |
+
**unet_configs
|
| 65 |
+
)
|
| 66 |
+
|
| 67 |
+
# load saved policy model
|
| 68 |
+
diffusion_model.load_state_dict(
|
| 69 |
+
torch.load(os.path.join(cfg.inference.model_dir, 'checkpoints',
|
| 70 |
+
'ema_model_current_state_dict.pth' if cfg.inference.use_ema else 'model_current_state_dict.pth'),
|
| 71 |
+
map_location=cfg.device,
|
| 72 |
+
weights_only=True)
|
| 73 |
+
)
|
| 74 |
+
diffusion_model.eval()
|
| 75 |
+
model = diffusion_model
|
| 76 |
+
|
| 77 |
+
freeze_torch_model_params(model)
|
| 78 |
+
model = torch.compile(model)
|
| 79 |
+
|
| 80 |
+
# Initialize guide manager
|
| 81 |
+
guide = GuideManagerPath(
|
| 82 |
+
dataset = train_subset.dataset,
|
| 83 |
+
clip_grad=True,
|
| 84 |
+
tensor_args={'device': cfg.device, 'dtype': torch.float32},
|
| 85 |
+
)
|
| 86 |
+
# guide = None
|
| 87 |
+
ObservationWrapperClass = getattr(observation_wrapper, cfg.observation_wrapper)
|
| 88 |
+
obs_wrapper = ObservationWrapperClass(train_subset.dataset, camera_type=cfg.inference.camera_type)
|
| 89 |
+
|
| 90 |
+
policy = NeuralMotionPlannerPolicy(
|
| 91 |
+
action_dim=7,
|
| 92 |
+
model=model,
|
| 93 |
+
dataset=train_subset.dataset,
|
| 94 |
+
trajectory_length=cfg.trajectory_length,
|
| 95 |
+
inference_cfg=cfg.inference,
|
| 96 |
+
observation_wrapper=obs_wrapper,
|
| 97 |
+
guide_manager=guide,
|
| 98 |
+
debug=cfg.debug)
|
| 99 |
+
policy2 = NeuralMotionPlannerPolicy(# without prior
|
| 100 |
+
action_dim=7,
|
| 101 |
+
model=model,
|
| 102 |
+
dataset=train_subset.dataset,
|
| 103 |
+
trajectory_length=cfg.trajectory_length,
|
| 104 |
+
inference_cfg=cfg.inference,
|
| 105 |
+
observation_wrapper=obs_wrapper,
|
| 106 |
+
guide_manager=guide,
|
| 107 |
+
debug=cfg.debug)
|
| 108 |
+
|
| 109 |
+
policy3 = NeuralMotionPlannerPolicy(# with prior/no guidance
|
| 110 |
+
action_dim=7,
|
| 111 |
+
model=model,
|
| 112 |
+
dataset=train_subset.dataset,
|
| 113 |
+
trajectory_length=cfg.trajectory_length,
|
| 114 |
+
inference_cfg=cfg.inference,
|
| 115 |
+
observation_wrapper=obs_wrapper,
|
| 116 |
+
guide_manager=None,
|
| 117 |
+
debug=cfg.debug)
|
| 118 |
+
|
| 119 |
+
policy4 = NeuralMotionPlannerPolicy(# without prior/no guidance
|
| 120 |
+
action_dim=7,
|
| 121 |
+
model=model,
|
| 122 |
+
dataset=train_subset.dataset,
|
| 123 |
+
trajectory_length=cfg.trajectory_length,
|
| 124 |
+
inference_cfg=cfg.inference,
|
| 125 |
+
observation_wrapper=obs_wrapper,
|
| 126 |
+
guide_manager=None,
|
| 127 |
+
debug=cfg.debug)
|
| 128 |
+
|
| 129 |
+
## visualizer ##
|
| 130 |
+
visualizer = TrajectoryVisualizer(normalizer=train_subset.dataset.normalizer)
|
| 131 |
+
##
|
| 132 |
+
def create_trajectory_prior(policy, obs, trajectory_ortho6d):
|
| 133 |
+
trajectory_index = policy.get_current_path_index()
|
| 134 |
+
trajectory_prior = trajectory_ortho6d[trajectory_index:]
|
| 135 |
+
current_tcp_pose_ortho6d = transform_quat_to_ortho6d(
|
| 136 |
+
torch.tensor(obs['extra']['tcp_pose'], device=cfg.device).unsqueeze(0)
|
| 137 |
+
).squeeze(0)
|
| 138 |
+
return torch.cat([current_tcp_pose_ortho6d.unsqueeze(0), trajectory_prior], dim=0)
|
| 139 |
+
## load recordings and run inference
|
| 140 |
+
recorder = inference_recorder.ReplayRecorder(replay_dir=cfg.test.replay_dir)
|
| 141 |
+
first_plan = True
|
| 142 |
+
policy.reset()
|
| 143 |
+
policy2.reset()
|
| 144 |
+
|
| 145 |
+
# Create plots directory if it doesn't exist
|
| 146 |
+
plots_dir = os.path.join(cfg.test.replay_dir, "plots")
|
| 147 |
+
os.makedirs(plots_dir, exist_ok=True)
|
| 148 |
+
figures = []
|
| 149 |
+
|
| 150 |
+
# Timing metrics
|
| 151 |
+
with_prior_times = []
|
| 152 |
+
without_prior_times = []
|
| 153 |
+
|
| 154 |
+
episode_count = 0
|
| 155 |
+
recorder.set_episode(episode_count)
|
| 156 |
+
|
| 157 |
+
for step in range(200):
|
| 158 |
+
obs, trajectory_prior, done, planned_path, = recorder.next_step() #TODO: select episode
|
| 159 |
+
if done:
|
| 160 |
+
policy.reset()
|
| 161 |
+
policy2.reset()
|
| 162 |
+
policy3.reset()
|
| 163 |
+
policy4.reset()
|
| 164 |
+
print("reset")
|
| 165 |
+
first_plan = True
|
| 166 |
+
episode_count += 1
|
| 167 |
+
|
| 168 |
+
if first_plan:
|
| 169 |
+
policy.plan_path_with_history(obs, do_normalize=True) # with prior
|
| 170 |
+
policy2.plan_path_with_history(obs, do_normalize=True) # without prior
|
| 171 |
+
policy3.plan_path_with_history(obs, do_normalize=True) # with prior/no guidance
|
| 172 |
+
policy4.plan_path_with_history(obs, do_normalize=True) # without prior/no guidance
|
| 173 |
+
first_plan = False
|
| 174 |
+
else:
|
| 175 |
+
# Measure planning time with prior
|
| 176 |
+
# trajectory_prior = create_trajectory_prior(policy, obs, planned_path)
|
| 177 |
+
denoising_steps = 2
|
| 178 |
+
|
| 179 |
+
start_time = time.time()
|
| 180 |
+
|
| 181 |
+
policy.plan_path_with_history(obs,
|
| 182 |
+
trajectory_prior=trajectory_prior,
|
| 183 |
+
do_normalize=True,
|
| 184 |
+
timestep=denoising_steps, # denoising step
|
| 185 |
+
choice='interpolate') #with prior
|
| 186 |
+
end_time = time.time()
|
| 187 |
+
with_prior_time = end_time - start_time
|
| 188 |
+
if step >30:
|
| 189 |
+
with_prior_times.append(with_prior_time)
|
| 190 |
+
|
| 191 |
+
# Measure planning time without prior
|
| 192 |
+
start_time = time.time()
|
| 193 |
+
policy2.plan_path_with_history(obs, do_normalize=True) #without prior
|
| 194 |
+
end_time = time.time()
|
| 195 |
+
without_prior_time = end_time - start_time
|
| 196 |
+
without_prior_times.append(without_prior_time)
|
| 197 |
+
|
| 198 |
+
policy3.plan_path_with_history(obs,
|
| 199 |
+
trajectory_prior=trajectory_prior,
|
| 200 |
+
do_normalize=True,
|
| 201 |
+
timestep=denoising_steps, #
|
| 202 |
+
choice='interpolate') #with prior/no guidance
|
| 203 |
+
policy4.plan_path_with_history(obs, do_normalize=True) #without prior/no guidance
|
| 204 |
+
|
| 205 |
+
# Print timing information every 10 steps
|
| 206 |
+
if len(with_prior_times) > 0 and len(with_prior_times) % 10 == 0:
|
| 207 |
+
avg_with_prior = sum(with_prior_times) / len(with_prior_times)
|
| 208 |
+
avg_without_prior = sum(without_prior_times) / len(without_prior_times)
|
| 209 |
+
print(f"Step {step}, Avg times - With prior: {avg_with_prior:.4f}s, Without prior: {avg_without_prior:.4f}s")
|
| 210 |
+
|
| 211 |
+
# visualizer.plot_trajectory(policy.planned_path, obs, policy.point_cloud)
|
| 212 |
+
# Create a new figure for this step
|
| 213 |
+
title_list = ['Trajectory generated with guidance (λ=0.5)', 'Right View']
|
| 214 |
+
if step%8 == 0:
|
| 215 |
+
if step == 0:
|
| 216 |
+
fig = visualizer.plot_trajectories_dual_view([policy.all_trajs, policy3.all_trajs],
|
| 217 |
+
policy.point_cloud, label_list=['guidance only', 'no prior, no guidanc'], color_list=['orange', 'green'],
|
| 218 |
+
title=title_list)
|
| 219 |
+
else:
|
| 220 |
+
fig = visualizer.plot_trajectories_dual_view([policy.all_trajs, policy2.all_trajs, policy3.all_trajs, policy4.all_trajs],
|
| 221 |
+
policy.point_cloud, label_list=['prior + guidance', 'guidance only', 'prior only', 'no prior, no guidance'],
|
| 222 |
+
color_list =['red', 'orange', 'blue', 'green'],
|
| 223 |
+
title=title_list)
|
| 224 |
+
# plt.show()
|
| 225 |
+
save_path = os.path.join(plots_dir, f"episode_{episode_count}_trajectories_{step}.png")
|
| 226 |
+
fig.savefig(save_path, dpi=300, bbox_inches='tight')
|
| 227 |
+
print(f"Saved trajectory plot to {save_path}")
|
| 228 |
+
# plt.show()
|
| 229 |
+
plt.close(fig)
|
| 230 |
+
|
| 231 |
+
# # Calculate and print final timing statistics
|
| 232 |
+
# if with_prior_times and without_prior_times:
|
| 233 |
+
# avg_with_prior = sum(with_prior_times) / len(with_prior_times)
|
| 234 |
+
# avg_without_prior = sum(without_prior_times) / len(without_prior_times)
|
| 235 |
+
|
| 236 |
+
# min_with_prior = min(with_prior_times)
|
| 237 |
+
# max_with_prior = max(with_prior_times)
|
| 238 |
+
|
| 239 |
+
# min_without_prior = min(without_prior_times)
|
| 240 |
+
# max_without_prior = max(without_prior_times)
|
| 241 |
+
|
| 242 |
+
# std_with_prior = np.std(with_prior_times)
|
| 243 |
+
# std_without_prior = np.std(without_prior_times)
|
| 244 |
+
|
| 245 |
+
# print("\n===== Timing Statistics =====")
|
| 246 |
+
# print(f"Planning with prior: Avg = {avg_with_prior:.4f}s, Std = {std_with_prior:.4f}s, Min = {min_with_prior:.4f}s, Max = {max_with_prior:.4f}s")
|
| 247 |
+
# print(f"Planning without prior: Avg = {avg_without_prior:.4f}s, Std = {std_without_prior:.4f}s, Min = {min_without_prior:.4f}s, Max = {max_without_prior:.4f}s")
|
| 248 |
+
# print(f"Speedup with prior: {avg_without_prior/avg_with_prior:.2f}x")
|
| 249 |
+
|
| 250 |
+
# # Save timing results to a file
|
| 251 |
+
# with open(os.path.join(plots_dir, "timing_results.txt"), "w") as f:
|
| 252 |
+
# f.write("===== Timing Statistics =====\n")
|
| 253 |
+
# f.write(f"Planning with prior: Avg = {avg_with_prior:.4f}s, Min = {min_with_prior:.4f}s, Max = {max_with_prior:.4f}s\n")
|
| 254 |
+
# f.write(f"Planning without prior: Avg = {avg_without_prior:.4f}s, Min = {min_without_prior:.4f}s, Max = {max_without_prior:.4f}s\n")
|
| 255 |
+
# f.write(f"Speedup with prior: {avg_without_prior/avg_with_prior:.2f}x\n")
|
| 256 |
+
|
| 257 |
+
# # Write raw timing data
|
| 258 |
+
# f.write("\n===== Raw Timing Data =====\n")
|
| 259 |
+
# f.write("Step,WithPrior,WithoutPrior\n")
|
| 260 |
+
# for i, (wp, wop) in enumerate(zip(with_prior_times, without_prior_times)):
|
| 261 |
+
# f.write(f"{i},{wp:.6f},{wop:.6f}\n")
|
| 262 |
+
|
| 263 |
+
if __name__ == "__main__":
|
| 264 |
+
main()
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/tests/test_clutter_env.py
ADDED
|
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pathlib
|
| 2 |
+
import time
|
| 3 |
+
import gymnasium as gym
|
| 4 |
+
from tqdm import tqdm
|
| 5 |
+
import numpy as np
|
| 6 |
+
import mani_skill2.envs
|
| 7 |
+
from mani_skill2.utils.wrappers.record import RecordEpisode
|
| 8 |
+
import cfdp.envs as envs # absolute import
|
| 9 |
+
import argparse
|
| 10 |
+
import os
|
| 11 |
+
import cv2
|
| 12 |
+
from cfdp.envs import ENVS_DIR
|
| 13 |
+
import yaml
|
| 14 |
+
|
| 15 |
+
def get_args():
|
| 16 |
+
# Add argument parser
|
| 17 |
+
parser = argparse.ArgumentParser(description='Motion Planning Environment Demo')
|
| 18 |
+
parser.add_argument('--env-id', type=str, default='SceneGenerator-v0', help='Environment ID')
|
| 19 |
+
parser.add_argument('--obs-mode', type=str, default='image', help='Observation mode') #'state_dict', 'image'
|
| 20 |
+
parser.add_argument('--control-mode', type=str, default='pd_joint_delta_pos',
|
| 21 |
+
choices=['pd_joint_delta_pos', 'pd_ee_target_delta_pos'],
|
| 22 |
+
help='Control mode')
|
| 23 |
+
parser.add_argument('--reward-mode', type=str, default='dense', help='Reward mode')
|
| 24 |
+
parser.add_argument('--robot-noise', type=float, default=0.2, help='Robot initial position noise')
|
| 25 |
+
parser.add_argument('--planner-type', type=str, default='rrt', help='Motion planner type')
|
| 26 |
+
parser.add_argument('--show-obs', action='store_true', help='Show observation keys')
|
| 27 |
+
parser.add_argument('--episodes', type=int, default=10, help='Number of episodes to run')
|
| 28 |
+
parser.add_argument('--render', action='store_true', help='Enable rendering')
|
| 29 |
+
parser.add_argument('--record', action='store_true', help='Enable video recording')
|
| 30 |
+
parser.add_argument('--record-dir', type=str, default="recordings", help='Directory to store recordings')
|
| 31 |
+
parser.add_argument('--distractor-count', type=int, default=15, help='Number of distractors to add')
|
| 32 |
+
parser.add_argument('--base-object-config-path', type=str, default="scene/base_scene.yaml")
|
| 33 |
+
args = parser.parse_args()
|
| 34 |
+
|
| 35 |
+
return args
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def print_nested_keys(d, prefix=''):
|
| 39 |
+
for key in d.keys():
|
| 40 |
+
print(f"{prefix}{key}")
|
| 41 |
+
if isinstance(d[key], dict):
|
| 42 |
+
print_nested_keys(d[key], prefix=f"{prefix} ")
|
| 43 |
+
|
| 44 |
+
def save_camera_img(img, img_path):
|
| 45 |
+
# cv2.imshow("Camera Image", img)
|
| 46 |
+
# cv2.waitKey(0)
|
| 47 |
+
# Convert from RGB to BGR format for OpenCV
|
| 48 |
+
img_bgr = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
|
| 49 |
+
cv2.imwrite(img_path, img_bgr)
|
| 50 |
+
print(f"Saved image to {img_path}")
|
| 51 |
+
|
| 52 |
+
def main():
|
| 53 |
+
args = get_args()
|
| 54 |
+
|
| 55 |
+
# Create environment
|
| 56 |
+
env = gym.make(args.env_id,
|
| 57 |
+
obs_mode=args.obs_mode,
|
| 58 |
+
reward_mode=args.reward_mode,
|
| 59 |
+
control_mode=args.control_mode,
|
| 60 |
+
enable_shadow=False,
|
| 61 |
+
render_mode="cameras" if args.record else "human",
|
| 62 |
+
robot_init_qpos_noise=args.robot_noise,
|
| 63 |
+
obstacle_configs=[],
|
| 64 |
+
distractor_count=args.distractor_count,
|
| 65 |
+
base_object_config_path=args.base_object_config_path,
|
| 66 |
+
scene_config_serialization_mode='dict',
|
| 67 |
+
minimum_object_aabb_separation=0.01,
|
| 68 |
+
table_scale=[0.7, 0.7]
|
| 69 |
+
)
|
| 70 |
+
|
| 71 |
+
# if args.record:
|
| 72 |
+
# env = RecordEpisode(env, output_dir=args.record_dir, save_trajectory=False, save_video=True)
|
| 73 |
+
# # object_config_path=os.path.join(ENVS_DIR, "object_pick_env.yaml"))
|
| 74 |
+
|
| 75 |
+
options={}
|
| 76 |
+
# Run episodes
|
| 77 |
+
for episode in range(20):
|
| 78 |
+
obs, info = env.reset(options=options)
|
| 79 |
+
if args.show_obs:
|
| 80 |
+
print("Observation structure:")
|
| 81 |
+
print_nested_keys(obs)
|
| 82 |
+
## The 'reconfigure' option causes a segfault...
|
| 83 |
+
## I give up on this for now '\(-.-)/`
|
| 84 |
+
# Just remake the env if you need a different set of objects
|
| 85 |
+
# options={'reconfigure': True}
|
| 86 |
+
print(info)
|
| 87 |
+
scene_config = info['scene_config']
|
| 88 |
+
|
| 89 |
+
if args.record:
|
| 90 |
+
# Create env_config directory and parents if they don't exist
|
| 91 |
+
config_dir = pathlib.Path(ENVS_DIR) / "env_config"
|
| 92 |
+
config_dir.mkdir(parents=True, exist_ok=True)
|
| 93 |
+
|
| 94 |
+
# Save scene config to env_config subdirectory
|
| 95 |
+
with open(config_dir / f"{episode}_scene_config.yaml", 'w') as f:
|
| 96 |
+
yaml.dump(scene_config, f)
|
| 97 |
+
|
| 98 |
+
render_image = env.render_rgb_array()
|
| 99 |
+
save_camera_img(render_image, str(config_dir / f"{episode}_scene_img.png"))
|
| 100 |
+
|
| 101 |
+
time.sleep(0.1)
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
# action = env.action_space.sample()
|
| 105 |
+
# env.step(action)
|
| 106 |
+
for i in range(20):
|
| 107 |
+
# action = env.action_space.sample()
|
| 108 |
+
# obs, reward, done, truncated, info = env.step(action)
|
| 109 |
+
if args.render:
|
| 110 |
+
env.render()
|
| 111 |
+
# if done or truncated:
|
| 112 |
+
# break
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
if __name__ == "__main__":
|
| 116 |
+
main()
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/tests/test_dataset.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pytest
|
| 2 |
+
from cfdp.diffusion_policy.utils.loaders import get_dataset
|
| 3 |
+
|
| 4 |
+
@pytest.fixture
|
| 5 |
+
def dataset_params():
|
| 6 |
+
return {
|
| 7 |
+
'dataset_class': 'ManiSkill2Trajectory',
|
| 8 |
+
'dataset_file': '/home/xuan/Dataset/Trajectory/videos/20250326_131842.h5',
|
| 9 |
+
'batch_size': 2,
|
| 10 |
+
'val_set_size': 0.2
|
| 11 |
+
}
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def test_get_dataset(dataset_params):
|
| 15 |
+
# Call function
|
| 16 |
+
train_subset, train_dataloader, val_subset, val_dataloader = get_dataset(
|
| 17 |
+
**dataset_params
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
# Check outputs
|
| 21 |
+
assert train_subset is not None
|
| 22 |
+
assert train_dataloader is not None
|
| 23 |
+
assert val_subset is not None
|
| 24 |
+
assert val_dataloader is not None
|
| 25 |
+
|
| 26 |
+
# Check dataset split ratio
|
| 27 |
+
total_size = len(train_subset) + len(val_subset)
|
| 28 |
+
expected_val_size = int(total_size * dataset_params['val_set_size'])
|
| 29 |
+
assert len(val_subset) == expected_val_size
|
| 30 |
+
|
| 31 |
+
# Check batch size
|
| 32 |
+
for batch in train_dataloader:
|
| 33 |
+
assert len(batch) == 4
|
| 34 |
+
break
|
| 35 |
+
|
| 36 |
+
for batch in val_dataloader:
|
| 37 |
+
assert len(batch) == 4
|
| 38 |
+
break
|
| 39 |
+
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/tests/test_envs.py
ADDED
|
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gymnasium as gym
|
| 2 |
+
from tqdm import tqdm
|
| 3 |
+
import numpy as np
|
| 4 |
+
import mani_skill2.envs
|
| 5 |
+
import cfdp.envs as envs # absolute import
|
| 6 |
+
import argparse
|
| 7 |
+
import os
|
| 8 |
+
import cv2
|
| 9 |
+
from cfdp.envs import ENVS_DIR
|
| 10 |
+
from cfdp.utils.observation_wrapper import ObservationWrapper
|
| 11 |
+
from cfdp.datasets.maniskill2_trajectory import ManiSkill2Trajectory
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def print_nested_keys(d, prefix=''):
|
| 15 |
+
for key in d.keys():
|
| 16 |
+
print(f"{prefix}{key}")
|
| 17 |
+
if isinstance(d[key], dict):
|
| 18 |
+
print_nested_keys(d[key], prefix=f"{prefix} ")
|
| 19 |
+
|
| 20 |
+
def save_camera_img(img, img_path):
|
| 21 |
+
# cv2.imshow("Camera Image", img)
|
| 22 |
+
# cv2.waitKey(0)
|
| 23 |
+
# Convert from RGB to BGR format for OpenCV
|
| 24 |
+
img_bgr = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
|
| 25 |
+
cv2.imwrite(img_path, img_bgr)
|
| 26 |
+
print(f"Saved image to {img_path}")
|
| 27 |
+
|
| 28 |
+
def main():
|
| 29 |
+
# Add argument parser
|
| 30 |
+
parser = argparse.ArgumentParser(description='Motion Planning Environment Demo')
|
| 31 |
+
# parser.add_argument('--env-id', type=str, default='CustomizedPick-v0', help='Environment ID')
|
| 32 |
+
parser.add_argument('--env-id', type=str, default='ShelfPick-v0', help='Environment ID')
|
| 33 |
+
parser.add_argument('--obs-mode', type=str, default='image', help='Observation mode') #'state_dict', 'image'
|
| 34 |
+
parser.add_argument('--control-mode', type=str, default='pd_ee_delta_pose_align',
|
| 35 |
+
choices=['pd_joint_delta_pos', 'pd_ee_target_delta_pos', 'pd_ee_delta_pose_align'],
|
| 36 |
+
help='Control mode')
|
| 37 |
+
parser.add_argument('--reward-mode', type=str, default='dense', help='Reward mode')
|
| 38 |
+
parser.add_argument('--robot-noise', type=float, default=0.2, help='Robot initial position noise')
|
| 39 |
+
parser.add_argument('--planner-type', type=str, default='rrt', help='Motion planner type')
|
| 40 |
+
parser.add_argument('--show-obs', action='store_true', help='Show observation keys')
|
| 41 |
+
parser.add_argument('--episodes', type=int, default=10, help='Number of episodes to run')
|
| 42 |
+
parser.add_argument('--render', action='store_true', help='Enable rendering')
|
| 43 |
+
parser.add_argument('--record', action='store_true', help='Enable video recording')
|
| 44 |
+
args = parser.parse_args()
|
| 45 |
+
# Create environment
|
| 46 |
+
env = gym.make(args.env_id,
|
| 47 |
+
obs_mode=args.obs_mode,
|
| 48 |
+
reward_mode=args.reward_mode,
|
| 49 |
+
control_mode=args.control_mode,
|
| 50 |
+
enable_shadow=False,
|
| 51 |
+
render_mode="cameras" if args.record else "human",
|
| 52 |
+
robot_init_qpos_noise=args.robot_noise,
|
| 53 |
+
obstacle_configs=[],
|
| 54 |
+
# object_config_path=os.path.join(ENVS_DIR, "env_config", "empty_scene"),
|
| 55 |
+
# object_config_path=os.path.join(ENVS_DIR, "env_config", "smallbox_distractor"),
|
| 56 |
+
object_config_path=os.path.join(ENVS_DIR, "env_config", "shelf"),
|
| 57 |
+
# object_config_path=os.path.join(ENVS_DIR, "env_config", "distractor25"),
|
| 58 |
+
# object_config_path=os.path.join(ENVS_DIR, "env_config", "distractorbox"),
|
| 59 |
+
# object_config_path=os.path.join(ENVS_DIR, "env_config"),
|
| 60 |
+
# object_config_path=os.path.join(ENVS_DIR, "concept_env_config"),
|
| 61 |
+
create_obstacle_point_cloud=True,
|
| 62 |
+
is_demo=True)
|
| 63 |
+
|
| 64 |
+
# env = gym.make('RandomObstacleReach-v0',
|
| 65 |
+
# obs_mode=args.obs_mode,
|
| 66 |
+
# reward_mode=args.reward_mode,
|
| 67 |
+
# control_mode=args.control_mode,
|
| 68 |
+
# enable_shadow=False,
|
| 69 |
+
# render_mode="cameras" if args.record else "human",
|
| 70 |
+
# robot_init_qpos_noise=args.robot_noise,
|
| 71 |
+
# obstacle_configs=[])
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
obstacle_configs = [
|
| 75 |
+
{
|
| 76 |
+
'type': 'sphere',
|
| 77 |
+
'radius': 0.18,
|
| 78 |
+
'pose': [0.6, 0.0, 0.1],
|
| 79 |
+
'color': (0, 1, 0),
|
| 80 |
+
'n_points': 500
|
| 81 |
+
}
|
| 82 |
+
]
|
| 83 |
+
|
| 84 |
+
# env = gym.make('RandomObstacleReach-v0',
|
| 85 |
+
# obs_mode=args.obs_mode,
|
| 86 |
+
# reward_mode=args.reward_mode,
|
| 87 |
+
# control_mode=args.control_mode,
|
| 88 |
+
# enable_shadow=False,
|
| 89 |
+
# render_mode="cameras" if args.record else "human",
|
| 90 |
+
# robot_init_qpos_noise=args.robot_noise,
|
| 91 |
+
# obstacle_configs=obstacle_configs)
|
| 92 |
+
|
| 93 |
+
# # Test observation wrapper
|
| 94 |
+
# dataset_file ="/mnt/Dataset/20250402_131752.h5"
|
| 95 |
+
# dataset = ManiSkill2Trajectory(dataset_file = dataset_file,
|
| 96 |
+
# choice="interpolate",
|
| 97 |
+
# trajectory_length=16,
|
| 98 |
+
# verbose=False)
|
| 99 |
+
# obs_wrapper = ObservationWrapper(dataset)
|
| 100 |
+
|
| 101 |
+
##### --- Run episodes --- ####
|
| 102 |
+
obs, info = env.reset(options={"reconfigure": True})
|
| 103 |
+
if args.show_obs:
|
| 104 |
+
print("Observation structure:")
|
| 105 |
+
print_nested_keys(obs)
|
| 106 |
+
# save_camera_img(obs['image']['base_camera']['Color'],
|
| 107 |
+
# f"/home/xuan/Documents/test_result/scene_images/camera_img_{episode}.png")
|
| 108 |
+
|
| 109 |
+
import pathlib
|
| 110 |
+
save_dir = pathlib.Path("/home/xuan/Documents/test_result/scene_images")
|
| 111 |
+
save_dir.mkdir(parents=True, exist_ok=True)
|
| 112 |
+
import time
|
| 113 |
+
|
| 114 |
+
for episode in range(args.episodes):
|
| 115 |
+
obs, info = env.reset(options={"reconfigure": True})
|
| 116 |
+
# print_nested_keys(obs)
|
| 117 |
+
|
| 118 |
+
# render_image = env.render_rgb_array()
|
| 119 |
+
# timestamp = time.strftime("%H%M%S")
|
| 120 |
+
# save_camera_img(render_image, str(save_dir / f"{timestamp}_scene_img.png"))
|
| 121 |
+
# obs_wrapper.reset()
|
| 122 |
+
|
| 123 |
+
for _ in range(100):
|
| 124 |
+
# action = env.action_space.sample()
|
| 125 |
+
# obs, reward, done, truncated, info = env.step(action)
|
| 126 |
+
|
| 127 |
+
# obs_wrapper.update_observation(obs)
|
| 128 |
+
if args.render:
|
| 129 |
+
env.render()
|
| 130 |
+
# if done or truncated:
|
| 131 |
+
# break
|
| 132 |
+
time.sleep(1)
|
| 133 |
+
# env.reset()
|
| 134 |
+
|
| 135 |
+
env.close()
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
if __name__ == "__main__":
|
| 139 |
+
main()
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/tests/test_fcm.py
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
from cfdp.utils.fcm.vlc import VLC
|
| 4 |
+
import cv2
|
| 5 |
+
import mani_skill2.envs
|
| 6 |
+
import numpy as np
|
| 7 |
+
from cfdp.envs import ENVS_DIR
|
| 8 |
+
|
| 9 |
+
vlc = VLC(
|
| 10 |
+
num_levels=3, contrast_filt_sigma=1,
|
| 11 |
+
contrast_pool_sigma=3, color_pool_sigma=3,
|
| 12 |
+
w_color=0.220, w_contrast=0.0660, w_orient=0.0269,
|
| 13 |
+
w_xyz=0.100, add_xyz=None
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
# image_dir = os.path.join(ENVS_DIR, "env_config", "smallbox_distractor")
|
| 17 |
+
# image_dir = os.path.join(ENVS_DIR, "env_config", "distractorbox")
|
| 18 |
+
# image_dir = os.path.join(ENVS_DIR, "env_config", "distractor25")
|
| 19 |
+
image_dir = os.path.join(ENVS_DIR, "env_config", "empty_table")
|
| 20 |
+
# Use os.listdir to get files and filter for PNG files
|
| 21 |
+
image_files = [os.path.join(image_dir, f) for f in os.listdir(image_dir) if f.lower().endswith('.png')]
|
| 22 |
+
print("image_files:", image_files)
|
| 23 |
+
|
| 24 |
+
fcm_values = list()
|
| 25 |
+
|
| 26 |
+
for image_file in image_files:
|
| 27 |
+
image = cv2.imread(str(image_file))
|
| 28 |
+
|
| 29 |
+
# crop the table
|
| 30 |
+
height, width = image.shape[:2]
|
| 31 |
+
# Crop vertically from y=200 to y=800, keep x dimension unchanged
|
| 32 |
+
start_y = 480
|
| 33 |
+
end_y = 750
|
| 34 |
+
# Keep full width (no change in x dimension)
|
| 35 |
+
start_x = 130
|
| 36 |
+
end_x = 900
|
| 37 |
+
cropped_image = image[start_y:end_y, start_x:end_x]
|
| 38 |
+
|
| 39 |
+
# Display original and cropped image dimensions
|
| 40 |
+
print(f"Original image dimensions: {width}x{height}")
|
| 41 |
+
print(f"Cropped image dimensions: {cropped_image.shape[1]}x{cropped_image.shape[0]}")
|
| 42 |
+
|
| 43 |
+
# Optionally save the cropped image
|
| 44 |
+
# crop_filename = os.path.splitext(image_file)[0] + "_cropped.png"
|
| 45 |
+
# cv2.imwrite(crop_filename, cv2.cvtColor(cropped_image, cv2.COLOR_RGB2BGR))
|
| 46 |
+
|
| 47 |
+
# Use the original image for FCM processing
|
| 48 |
+
cropped_image = cv2.cvtColor(cropped_image, cv2.COLOR_RGB2BGR) # Convert back to BGR for consistency
|
| 49 |
+
fcm = vlc.get_fcm(cropped_image, p=1)
|
| 50 |
+
fcm_values.append(fcm[0])
|
| 51 |
+
|
| 52 |
+
print("image_file:", image_file)
|
| 53 |
+
print("fcm:",fcm[0])
|
| 54 |
+
# cv2.imshow("fcm", cropped_image)
|
| 55 |
+
# cv2.waitKey(0)
|
| 56 |
+
# Calculate mean and std of all FCM values
|
| 57 |
+
fcm_values_array = np.array(fcm_values)
|
| 58 |
+
fcm_mean = fcm_values_array.mean()
|
| 59 |
+
fcm_std = fcm_values_array.std()
|
| 60 |
+
print(f"FCM mean across all images: {fcm_mean:.4f}, std: {fcm_std:.4f}")
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/tests/test_sapien.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sapien.core as sapien
|
| 2 |
+
# # backtrace debug
|
| 3 |
+
# import faulthandler
|
| 4 |
+
# faulthandler.enable()
|
| 5 |
+
|
| 6 |
+
engine = sapien.Engine()
|
| 7 |
+
renderer = sapien.SapienRenderer()
|
| 8 |
+
engine.set_renderer(renderer)
|
| 9 |
+
scene = engine.create_scene()
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/tests/tools.py
ADDED
|
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import h5py
|
| 2 |
+
from cfdp.datasets.maniskill2_trajectory import ManiSkill2Trajectory
|
| 3 |
+
import numpy as np
|
| 4 |
+
|
| 5 |
+
def is_valid_quaternion(q):
|
| 6 |
+
"""
|
| 7 |
+
Check if a quaternion is valid. 1: [w, x, y, z]
|
| 8 |
+
"""
|
| 9 |
+
# Check if input is array-like and has 4 components
|
| 10 |
+
try:
|
| 11 |
+
q = np.array(q, dtype=float)
|
| 12 |
+
if q.shape != (4,):
|
| 13 |
+
return False
|
| 14 |
+
except:
|
| 15 |
+
return False
|
| 16 |
+
|
| 17 |
+
# Check if quaternion is normalized (magnitude ≈ 1)
|
| 18 |
+
magnitude = np.sqrt(np.sum(q * q))
|
| 19 |
+
return np.abs(magnitude - 1.0) < 1e-6
|
| 20 |
+
|
| 21 |
+
def quaternion_to_euler(q):
|
| 22 |
+
"""
|
| 23 |
+
Convert quaternion [w, x, y, z] to Euler angles [roll, pitch, yaw] in radians.
|
| 24 |
+
Uses the ZYX convention (yaw, pitch, roll).
|
| 25 |
+
|
| 26 |
+
Args:
|
| 27 |
+
q (array-like): Quaternion in [w, x, y, z] format
|
| 28 |
+
|
| 29 |
+
Returns:
|
| 30 |
+
numpy.ndarray: Array of Euler angles [roll, pitch, yaw] in radians
|
| 31 |
+
"""
|
| 32 |
+
# Extract quaternion components
|
| 33 |
+
w, x, y, z = q
|
| 34 |
+
|
| 35 |
+
# Roll (x-axis rotation)
|
| 36 |
+
sinr_cosp = 2 * (w * x + y * z)
|
| 37 |
+
cosr_cosp = 1 - 2 * (x * x + y * y)
|
| 38 |
+
roll = np.arctan2(sinr_cosp, cosr_cosp)
|
| 39 |
+
|
| 40 |
+
# Pitch (y-axis rotation)
|
| 41 |
+
sinp = 2 * (w * y - z * x)
|
| 42 |
+
pitch = np.arcsin(np.clip(sinp, -1.0, 1.0))
|
| 43 |
+
|
| 44 |
+
# Yaw (z-axis rotation)
|
| 45 |
+
siny_cosp = 2 * (w * z + x * y)
|
| 46 |
+
cosy_cosp = 1 - 2 * (y * y + z * z)
|
| 47 |
+
yaw = np.arctan2(siny_cosp, cosy_cosp)
|
| 48 |
+
|
| 49 |
+
return np.array([roll, pitch, yaw])
|
| 50 |
+
|
| 51 |
+
def print_dataset_sizes(h5_group):
|
| 52 |
+
"""Print the sizes of all datasets in an h5 file/group.
|
| 53 |
+
|
| 54 |
+
Args:
|
| 55 |
+
h5_group: h5py.File or h5py.Group object
|
| 56 |
+
"""
|
| 57 |
+
for name, item in h5_group.items():
|
| 58 |
+
if isinstance(item, h5py.Dataset):
|
| 59 |
+
size_mb = item.size * item.dtype.itemsize / (1024 * 1024)
|
| 60 |
+
print(f"{name}: {size_mb:.2f} MB")
|
| 61 |
+
elif isinstance(item, h5py.Group):
|
| 62 |
+
print(f"Group: {name}")
|
| 63 |
+
print_dataset_sizes(item)
|
| 64 |
+
|
| 65 |
+
def test_dataset_loading(file_name):
|
| 66 |
+
dataset = ManiSkill2Trajectory(file_name, obs_keys = ['tcp_pose', 'base_pose'])
|
| 67 |
+
obs, actions, goal, data = dataset[1]
|
| 68 |
+
print(obs.shape)
|
| 69 |
+
print(actions.shape)
|
| 70 |
+
print(goal.shape)
|
| 71 |
+
|
| 72 |
+
if __name__ == "__main__":
|
| 73 |
+
#########################################################
|
| 74 |
+
# test_dataset_loading("/home/xuan/Dataset/Trajectory/videos/20250324_222521.h5")
|
| 75 |
+
#########################################################
|
| 76 |
+
# with h5py.File("/home/xuan/Dataset/Trajectory/videos/20250324_164747.h5", 'r') as f:
|
| 77 |
+
# print("Dataset sizes in MB:")
|
| 78 |
+
# print_dataset_sizes(f)
|
| 79 |
+
#########################################################
|
| 80 |
+
q = [ 0.005171, 0.9916896, -0.01349258, 0.12783928]
|
| 81 |
+
print(is_valid_quaternion(q))
|
| 82 |
+
|
| 83 |
+
# Convert first quaternion to Euler angles
|
| 84 |
+
q1 = [0.04063901, 0.93449867, 0.2687286, -0.12474412]
|
| 85 |
+
euler1 = quaternion_to_euler(q1)
|
| 86 |
+
print(f"Euler angles for q1 (roll, pitch, yaw) in radians: {euler1}")
|
| 87 |
+
print(f"Euler angles for q1 in degrees: {np.degrees(euler1)}")
|
| 88 |
+
|
| 89 |
+
# Convert second quaternion to Euler angles
|
| 90 |
+
q2 = [-0.00972217, 0.98917925, 0.05200833, -0.07761869]
|
| 91 |
+
euler2 = quaternion_to_euler(q2)
|
| 92 |
+
print(f"Euler angles for q2 (roll, pitch, yaw) in radians: {euler2}")
|
| 93 |
+
print(f"Euler angles for q2 in degrees: {np.degrees(euler2)}")
|
| 94 |
+
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/utils/__init__.py
ADDED
|
File without changes
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/utils/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (211 Bytes). View file
|
|
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/utils/__pycache__/rdp_path_simplify.cpython-310.pyc
ADDED
|
Binary file (3.93 kB). View file
|
|
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/utils/angle_utils.py
ADDED
|
@@ -0,0 +1,350 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn.functional as F
|
| 4 |
+
|
| 5 |
+
def quaternion_to_euler(q):
|
| 6 |
+
"""
|
| 7 |
+
Convert quaternion [w, x, y, z] to Euler angles [roll, pitch, yaw] in radians.
|
| 8 |
+
Uses the ZYX convention (yaw, pitch, roll).
|
| 9 |
+
|
| 10 |
+
Args:
|
| 11 |
+
q (array-like): Quaternion in [w, x, y, z] format
|
| 12 |
+
|
| 13 |
+
Returns:
|
| 14 |
+
numpy.ndarray: Array of Euler angles [roll, pitch, yaw] in radians
|
| 15 |
+
"""
|
| 16 |
+
# Extract quaternion components
|
| 17 |
+
w, x, y, z = q
|
| 18 |
+
|
| 19 |
+
# Roll (x-axis rotation)
|
| 20 |
+
sinr_cosp = 2 * (w * x + y * z)
|
| 21 |
+
cosr_cosp = 1 - 2 * (x * x + y * y)
|
| 22 |
+
roll = np.arctan2(sinr_cosp, cosr_cosp)
|
| 23 |
+
|
| 24 |
+
# Pitch (y-axis rotation)
|
| 25 |
+
sinp = 2 * (w * y - z * x)
|
| 26 |
+
pitch = np.arcsin(np.clip(sinp, -1.0, 1.0))
|
| 27 |
+
|
| 28 |
+
# Yaw (z-axis rotation)
|
| 29 |
+
siny_cosp = 2 * (w * z + x * y)
|
| 30 |
+
cosy_cosp = 1 - 2 * (y * y + z * z)
|
| 31 |
+
yaw = np.arctan2(siny_cosp, cosy_cosp)
|
| 32 |
+
|
| 33 |
+
return np.array([roll, pitch, yaw])
|
| 34 |
+
|
| 35 |
+
def quaternion_multiply(q1, q2):
|
| 36 |
+
"""Helper function to multiply two quaternions."""
|
| 37 |
+
w1, x1, y1, z1 = q1
|
| 38 |
+
w2, x2, y2, z2 = q2
|
| 39 |
+
return np.array([
|
| 40 |
+
w1*w2 - x1*x2 - y1*y2 - z1*z2,
|
| 41 |
+
w1*x2 + x1*w2 + y1*z2 - z1*y2,
|
| 42 |
+
w1*y2 - x1*z2 + y1*w2 + z1*x2,
|
| 43 |
+
w1*z2 + x1*y2 - y1*x2 + z1*w2
|
| 44 |
+
])
|
| 45 |
+
|
| 46 |
+
def quaternion_inverse(q):
|
| 47 |
+
w, x, y, z = q
|
| 48 |
+
norm_sq = w*w + x*x + y*y + z*z
|
| 49 |
+
return np.array([w, -x, -y, -z]) / norm_sq
|
| 50 |
+
|
| 51 |
+
def quaternion_difference(q1, q2):
|
| 52 |
+
"""
|
| 53 |
+
q1 - q2
|
| 54 |
+
"""
|
| 55 |
+
q2_inv = quaternion_inverse(q2)
|
| 56 |
+
return quaternion_multiply(q1, q2_inv)
|
| 57 |
+
|
| 58 |
+
def quat_normalize_t(q): # tensor
|
| 59 |
+
return q / (q.norm(dim=-1, keepdim=True) + 1e-12)
|
| 60 |
+
|
| 61 |
+
def normalize_vector(v, return_mag=False):
|
| 62 |
+
# https://github.com/nickgkan/3d_diffuser_actor/blob/master/diffuser_actor/utils/utils.py
|
| 63 |
+
device = v.device
|
| 64 |
+
batch = v.shape[0]
|
| 65 |
+
v_mag = torch.sqrt(v.pow(2).sum(1))
|
| 66 |
+
v_mag = torch.max(v_mag, torch.autograd.Variable(torch.FloatTensor([1e-8]).to(device)))
|
| 67 |
+
v_mag = v_mag.view(batch, 1).expand(batch, v.shape[1])
|
| 68 |
+
v = v / v_mag
|
| 69 |
+
if return_mag:
|
| 70 |
+
return v, v_mag[:, 0]
|
| 71 |
+
else:
|
| 72 |
+
return v
|
| 73 |
+
|
| 74 |
+
def cross_product(u, v):
|
| 75 |
+
# https://github.com/nickgkan/3d_diffuser_actor/blob/master/diffuser_actor/utils/utils.py
|
| 76 |
+
batch = u.shape[0]
|
| 77 |
+
i = u[:, 1] * v[:, 2] - u[:, 2] * v[:, 1]
|
| 78 |
+
j = u[:, 2] * v[:, 0] - u[:, 0] * v[:, 2]
|
| 79 |
+
k = u[:, 0] * v[:, 1] - u[:, 1] * v[:, 0]
|
| 80 |
+
out = torch.cat((i.view(batch, 1), j.view(batch, 1), k.view(batch, 1)), 1)
|
| 81 |
+
return out # batch*3
|
| 82 |
+
|
| 83 |
+
def _sqrt_positive_part(x: torch.Tensor) -> torch.Tensor:
|
| 84 |
+
# https://github.com/nickgkan/3d_diffuser_actor/blob/master/diffuser_actor/utils/utils.py
|
| 85 |
+
"""
|
| 86 |
+
Returns torch.sqrt(torch.max(0, x))
|
| 87 |
+
but with a zero subgradient where x is 0.
|
| 88 |
+
"""
|
| 89 |
+
ret = torch.zeros_like(x)
|
| 90 |
+
positive_mask = x > 0
|
| 91 |
+
ret[positive_mask] = torch.sqrt(x[positive_mask])
|
| 92 |
+
return ret
|
| 93 |
+
|
| 94 |
+
def compute_rotation_matrix_from_quaternion(quaternion):
|
| 95 |
+
# https://github.com/papagina/RotationContinuity/blob/master/sanity_test/code/tools.py
|
| 96 |
+
batch=quaternion.shape[0]
|
| 97 |
+
|
| 98 |
+
quat = normalize_vector(quaternion).contiguous()
|
| 99 |
+
|
| 100 |
+
qw = quat[...,0].contiguous().view(batch, 1)
|
| 101 |
+
qx = quat[...,1].contiguous().view(batch, 1)
|
| 102 |
+
qy = quat[...,2].contiguous().view(batch, 1)
|
| 103 |
+
qz = quat[...,3].contiguous().view(batch, 1)
|
| 104 |
+
|
| 105 |
+
# Unit quaternion rotation matrices computatation
|
| 106 |
+
xx = qx*qx
|
| 107 |
+
yy = qy*qy
|
| 108 |
+
zz = qz*qz
|
| 109 |
+
xy = qx*qy
|
| 110 |
+
xz = qx*qz
|
| 111 |
+
yz = qy*qz
|
| 112 |
+
xw = qx*qw
|
| 113 |
+
yw = qy*qw
|
| 114 |
+
zw = qz*qw
|
| 115 |
+
|
| 116 |
+
row0 = torch.cat((1-2*yy-2*zz, 2*xy - 2*zw, 2*xz + 2*yw), 1) #batch*3
|
| 117 |
+
row1 = torch.cat((2*xy+ 2*zw, 1-2*xx-2*zz, 2*yz-2*xw ), 1) #batch*3
|
| 118 |
+
row2 = torch.cat((2*xz-2*yw, 2*yz+2*xw, 1-2*xx-2*yy), 1) #batch*3
|
| 119 |
+
|
| 120 |
+
matrix = torch.cat((row0.view(batch, 1, 3), row1.view(batch,1,3), row2.view(batch,1,3)),1) #batch*3*3
|
| 121 |
+
|
| 122 |
+
return matrix
|
| 123 |
+
|
| 124 |
+
def get_ortho6d_from_rotation_matrix(matrix):
|
| 125 |
+
# https://github.com/nickgkan/3d_diffuser_actor/blob/master/diffuser_actor/utils/utils.py
|
| 126 |
+
# The orhto6d represents the first two column vectors a1 and a2 of the
|
| 127 |
+
# rotation matrix: [ | , |, | ]
|
| 128 |
+
# [ a1, a2, a3]
|
| 129 |
+
# [ | , |, | ]
|
| 130 |
+
ortho6d = matrix[:, :, :2].permute(0, 2, 1).flatten(-2)
|
| 131 |
+
return ortho6d
|
| 132 |
+
|
| 133 |
+
def compute_rotation_matrix_from_ortho6d(ortho6d):
|
| 134 |
+
# https://github.com/nickgkan/3d_diffuser_actor/blob/master/diffuser_actor/utils/utils.py
|
| 135 |
+
x_raw = ortho6d[:, 0:3] # batch*3
|
| 136 |
+
y_raw = ortho6d[:, 3:6] # batch*3
|
| 137 |
+
|
| 138 |
+
x = normalize_vector(x_raw) # batch*3
|
| 139 |
+
z = cross_product(x, y_raw) # batch*3
|
| 140 |
+
z = normalize_vector(z) # batch*3
|
| 141 |
+
y = cross_product(z, x) # batch*3
|
| 142 |
+
|
| 143 |
+
x = x.view(-1, 3, 1)
|
| 144 |
+
y = y.view(-1, 3, 1)
|
| 145 |
+
z = z.view(-1, 3, 1)
|
| 146 |
+
matrix = torch.cat((x, y, z), 2) # batch*3*3
|
| 147 |
+
return matrix
|
| 148 |
+
|
| 149 |
+
def compute_quaternion_from_rotation_matrix(matrix: torch.Tensor) -> torch.Tensor:
|
| 150 |
+
# https://github.com/nickgkan/3d_diffuser_actor/blob/master/diffuser_actor/utils/utils.py
|
| 151 |
+
"""
|
| 152 |
+
Convert rotations given as rotation matrices to quaternions.
|
| 153 |
+
|
| 154 |
+
Args:
|
| 155 |
+
matrix: Rotation matrices as tensor of shape (..., 3, 3).
|
| 156 |
+
|
| 157 |
+
Returns:
|
| 158 |
+
quaternions with real part first, as tensor of shape (..., 4).
|
| 159 |
+
"""
|
| 160 |
+
if matrix.size(-1) != 3 or matrix.size(-2) != 3:
|
| 161 |
+
raise ValueError(f"Invalid rotation matrix shape {matrix.shape}.")
|
| 162 |
+
|
| 163 |
+
batch_dim = matrix.shape[:-2]
|
| 164 |
+
m00, m01, m02, m10, m11, m12, m20, m21, m22 = torch.unbind(
|
| 165 |
+
matrix.reshape(batch_dim + (9,)), dim=-1
|
| 166 |
+
)
|
| 167 |
+
|
| 168 |
+
q_abs = _sqrt_positive_part(
|
| 169 |
+
torch.stack(
|
| 170 |
+
[
|
| 171 |
+
1.0 + m00 + m11 + m22,
|
| 172 |
+
1.0 + m00 - m11 - m22,
|
| 173 |
+
1.0 - m00 + m11 - m22,
|
| 174 |
+
1.0 - m00 - m11 + m22,
|
| 175 |
+
],
|
| 176 |
+
dim=-1,
|
| 177 |
+
)
|
| 178 |
+
)
|
| 179 |
+
|
| 180 |
+
# we produce the desired quaternion multiplied by each of r, i, j, k
|
| 181 |
+
quat_by_rijk = torch.stack(
|
| 182 |
+
[
|
| 183 |
+
# pyre-fixme[58]: `**` is not supported for operand types `Tensor` and
|
| 184 |
+
# `int`.
|
| 185 |
+
torch.stack([q_abs[..., 0] ** 2, m21 - m12, m02 - m20, m10 - m01], dim=-1),
|
| 186 |
+
# pyre-fixme[58]: `**` is not supported for operand types `Tensor` and
|
| 187 |
+
# `int`.
|
| 188 |
+
torch.stack([m21 - m12, q_abs[..., 1] ** 2, m10 + m01, m02 + m20], dim=-1),
|
| 189 |
+
# pyre-fixme[58]: `**` is not supported for operand types `Tensor` and
|
| 190 |
+
# `int`.
|
| 191 |
+
torch.stack([m02 - m20, m10 + m01, q_abs[..., 2] ** 2, m12 + m21], dim=-1),
|
| 192 |
+
# pyre-fixme[58]: `**` is not supported for operand types `Tensor` and
|
| 193 |
+
# `int`.
|
| 194 |
+
torch.stack([m10 - m01, m20 + m02, m21 + m12, q_abs[..., 3] ** 2], dim=-1),
|
| 195 |
+
],
|
| 196 |
+
dim=-2,
|
| 197 |
+
)
|
| 198 |
+
|
| 199 |
+
# We floor here at 0.1 but the exact level is not important; if q_abs is small,
|
| 200 |
+
# the candidate won't be picked.
|
| 201 |
+
flr = torch.tensor(0.1).to(dtype=q_abs.dtype, device=q_abs.device)
|
| 202 |
+
quat_candidates = quat_by_rijk / (2.0 * q_abs[..., None].max(flr))
|
| 203 |
+
|
| 204 |
+
# if not for numerical problems, quat_candidates[i] should be same (up to a sign),
|
| 205 |
+
# forall i; we pick the best-conditioned one (with the largest denominator)
|
| 206 |
+
|
| 207 |
+
return quat_candidates[
|
| 208 |
+
F.one_hot(q_abs.argmax(dim=-1), num_classes=4) > 0.5, :
|
| 209 |
+
].reshape(batch_dim + (4,))
|
| 210 |
+
|
| 211 |
+
def quat2ortho6d(quat):
|
| 212 |
+
# Shape of quat: (number of obs, 4)
|
| 213 |
+
# Returns ortho6d: (number of obs, 6)
|
| 214 |
+
mat = compute_rotation_matrix_from_quaternion(quat)
|
| 215 |
+
ortho6d = get_ortho6d_from_rotation_matrix(mat)
|
| 216 |
+
return ortho6d
|
| 217 |
+
|
| 218 |
+
def ortho6d2quat(ortho6d):
|
| 219 |
+
mat = compute_rotation_matrix_from_ortho6d(ortho6d)
|
| 220 |
+
quat = compute_quaternion_from_rotation_matrix(mat)
|
| 221 |
+
return quat
|
| 222 |
+
|
| 223 |
+
#------------- Continuous Euler Angle ------------#
|
| 224 |
+
def q_normalize(q):
|
| 225 |
+
q = np.asarray(q, dtype=np.float64)
|
| 226 |
+
return q / (np.linalg.norm(q) + 1e-12)
|
| 227 |
+
|
| 228 |
+
def q_conj(q): # (w,x,y,z)
|
| 229 |
+
return np.array([q[0], -q[1], -q[2], -q[3]], dtype=np.float64)
|
| 230 |
+
|
| 231 |
+
def q_mul(a, b): # (wxyz)*(wxyz)
|
| 232 |
+
w1,x1,y1,z1 = a; w2,x2,y2,z2 = b
|
| 233 |
+
return np.array([
|
| 234 |
+
w1*w2 - x1*x2 - y1*y2 - z1*z2,
|
| 235 |
+
w1*x2 + x1*w2 + y1*z2 - z1*y2,
|
| 236 |
+
w1*y2 - x1*z2 + y1*w2 + z1*x2,
|
| 237 |
+
w1*z2 + x1*y2 - y1*x2 + z1*w2
|
| 238 |
+
], dtype=np.float64)
|
| 239 |
+
|
| 240 |
+
def same_hemisphere(q_ref, q):
|
| 241 |
+
"""Flip q to keep dot>=0, avoiding sign flips across frames."""
|
| 242 |
+
return q if np.dot(q_ref, q) >= 0.0 else -q
|
| 243 |
+
|
| 244 |
+
def quat_to_rotvec(q):
|
| 245 |
+
"""
|
| 246 |
+
Quaternion (wxyz) -> rotation vector (axis*angle).
|
| 247 |
+
Angle in [0, pi]. Stable near 0.
|
| 248 |
+
"""
|
| 249 |
+
q = q_normalize(q)
|
| 250 |
+
w, v = q[0], q[1:]
|
| 251 |
+
nv = np.linalg.norm(v)
|
| 252 |
+
# angle = 2*atan2(||v||, w). Clamp w>=0 to pick the shortest arc.
|
| 253 |
+
angle = 2.0 * np.arctan2(nv, max(w, 0.0))
|
| 254 |
+
if angle < 1e-8 or nv < 1e-12:
|
| 255 |
+
return np.zeros(3, dtype=np.float64)
|
| 256 |
+
axis = v / nv
|
| 257 |
+
return angle * axis
|
| 258 |
+
|
| 259 |
+
def so3_delta_error(
|
| 260 |
+
q_curr, q_tgt, *, quat_order="wxyz", same_hemi=True,
|
| 261 |
+
max_step_deg=5.0, out="euler"
|
| 262 |
+
):
|
| 263 |
+
"""
|
| 264 |
+
Rotation error in WORLD frame between q_curr -> q_tgt.
|
| 265 |
+
Returns a 3D vector:
|
| 266 |
+
- out='euler': small-angle XYZ delta euler (we pass rotvec as delta euler)
|
| 267 |
+
- out='rotvec': rotation vector (axis*angle)
|
| 268 |
+
Args:
|
| 269 |
+
q_curr, q_tgt: quaternion arrays in given quat_order.
|
| 270 |
+
quat_order: 'wxyz' (default) or 'xyzw'.
|
| 271 |
+
same_hemi: enforce same hemisphere to avoid sign flips.
|
| 272 |
+
max_step_deg: clamp |rot| per step (set None to disable).
|
| 273 |
+
"""
|
| 274 |
+
q_c = np.asarray(q_curr, dtype=np.float64)
|
| 275 |
+
q_t = np.asarray(q_tgt, dtype=np.float64)
|
| 276 |
+
|
| 277 |
+
if quat_order == "xyzw":
|
| 278 |
+
q_c = np.array([q_c[3], q_c[0], q_c[1], q_c[2]], dtype=np.float64)
|
| 279 |
+
q_t = np.array([q_t[3], q_t[0], q_t[1], q_t[2]], dtype=np.float64)
|
| 280 |
+
elif quat_order != "wxyz":
|
| 281 |
+
raise ValueError("quat_order must be 'wxyz' or 'xyzw'.")
|
| 282 |
+
|
| 283 |
+
q_c = q_normalize(q_c)
|
| 284 |
+
q_t = q_normalize(q_t)
|
| 285 |
+
if same_hemi:
|
| 286 |
+
q_t = same_hemisphere(q_c, q_t)
|
| 287 |
+
|
| 288 |
+
# Relative rotation (shortest arc): q_delta = q_t * conj(q_c)
|
| 289 |
+
q_delta = q_mul(q_t, q_conj(q_c))
|
| 290 |
+
|
| 291 |
+
# Log-map to rotvec
|
| 292 |
+
rotvec = quat_to_rotvec(q_delta)
|
| 293 |
+
|
| 294 |
+
# Clamp step angle if requested
|
| 295 |
+
if max_step_deg is not None:
|
| 296 |
+
max_rad = np.deg2rad(max_step_deg)
|
| 297 |
+
ang = np.linalg.norm(rotvec)
|
| 298 |
+
if ang > max_rad and ang > 1e-12:
|
| 299 |
+
rotvec = rotvec * (max_rad / ang)
|
| 300 |
+
|
| 301 |
+
if out == "rotvec":
|
| 302 |
+
return rotvec
|
| 303 |
+
elif out == "euler":
|
| 304 |
+
# Small-angle: rotvec ≈ [dRx, dRy, dRz] in XYZ order
|
| 305 |
+
return rotvec.copy()
|
| 306 |
+
else:
|
| 307 |
+
raise ValueError("out must be 'euler' or 'rotvec'.")
|
| 308 |
+
|
| 309 |
+
|
| 310 |
+
## Batch of code that can be useful if used with transforms3d quat2mat and mat2quat. Otherwise, do not use. Not efficient
|
| 311 |
+
# def normalize_vector(v, return_mag=False):
|
| 312 |
+
# device = v.device
|
| 313 |
+
# v_mag = torch.sqrt(v.pow(2).sum(0))
|
| 314 |
+
# v_mag = torch.max(v_mag, torch.autograd.Variable(torch.FloatTensor([1e-8]).to(device)))
|
| 315 |
+
# v_mag = v_mag.expand(v.shape[0])
|
| 316 |
+
# v = v / v_mag
|
| 317 |
+
# if return_mag:
|
| 318 |
+
# return v, v_mag
|
| 319 |
+
# else:
|
| 320 |
+
# return v
|
| 321 |
+
|
| 322 |
+
# def cross_product(u, v):
|
| 323 |
+
# i = u[1] * v[2] - u[2] * v[1]
|
| 324 |
+
# j = u[2] * v[0] - u[0] * v[2]
|
| 325 |
+
# k = u[0] * v[1] - u[1] * v[0]
|
| 326 |
+
# out = torch.stack([i, j, k])
|
| 327 |
+
# return out # 3
|
| 328 |
+
|
| 329 |
+
# def get_ortho6d_from_rotation_matrix(matrix):
|
| 330 |
+
# # The orhto6d represents the first two column vectors a1 and a2 of the
|
| 331 |
+
# # rotation matrix: [ | , |, | ]
|
| 332 |
+
# # [ a1, a2, a3]
|
| 333 |
+
# # [ | , |, | ]
|
| 334 |
+
# ortho6d = matrix[:, :2].permute(1, 0).flatten(-2)
|
| 335 |
+
# return ortho6d
|
| 336 |
+
|
| 337 |
+
# def compute_rotation_matrix_from_ortho6d(ortho6d):
|
| 338 |
+
# x_raw = ortho6d[0:3] # 3
|
| 339 |
+
# y_raw = ortho6d[3:6] # 3
|
| 340 |
+
|
| 341 |
+
# x = normalize_vector(x_raw) # 3
|
| 342 |
+
# z = cross_product(x, y_raw) # 3
|
| 343 |
+
# z = normalize_vector(z) # 3
|
| 344 |
+
# y = cross_product(z, x) # 3
|
| 345 |
+
|
| 346 |
+
# x = x.view(3, 1)
|
| 347 |
+
# y = y.view(3, 1)
|
| 348 |
+
# z = z.view(3, 1)
|
| 349 |
+
# matrix = torch.cat((x, y, z), 1) # 3x3
|
| 350 |
+
# return matrix
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/utils/chamfer.py
ADDED
|
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
|
| 2 |
+
|
| 3 |
+
from typing import Union
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
import torch.nn.functional as F
|
| 7 |
+
from pytorch3d.ops.knn import knn_gather, knn_points
|
| 8 |
+
from pytorch3d.structures.pointclouds import Pointclouds
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def _validate_chamfer_reduction_inputs(
|
| 12 |
+
batch_reduction: Union[str, None], point_reduction: str
|
| 13 |
+
):
|
| 14 |
+
"""Check the requested reductions are valid.
|
| 15 |
+
Args:
|
| 16 |
+
batch_reduction: Reduction operation to apply for the loss across the
|
| 17 |
+
batch, can be one of ["mean", "sum"] or None.
|
| 18 |
+
point_reduction: Reduction operation to apply for the loss across the
|
| 19 |
+
points, can be one of ["mean", "sum"].
|
| 20 |
+
"""
|
| 21 |
+
if batch_reduction is not None and batch_reduction not in ["mean", "sum"]:
|
| 22 |
+
raise ValueError('batch_reduction must be one of ["mean", "sum"] or None')
|
| 23 |
+
if point_reduction not in ["mean", "sum"]:
|
| 24 |
+
raise ValueError('point_reduction must be one of ["mean", "sum"]')
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def _handle_pointcloud_input(
|
| 28 |
+
points: Union[torch.Tensor, Pointclouds],
|
| 29 |
+
lengths: Union[torch.Tensor, None],
|
| 30 |
+
normals: Union[torch.Tensor, None],
|
| 31 |
+
):
|
| 32 |
+
"""
|
| 33 |
+
If points is an instance of Pointclouds, retrieve the padded points tensor
|
| 34 |
+
along with the number of points per batch and the padded normals.
|
| 35 |
+
Otherwise, return the input points (and normals) with the number of points per cloud
|
| 36 |
+
set to the size of the second dimension of `points`.
|
| 37 |
+
"""
|
| 38 |
+
if isinstance(points, Pointclouds):
|
| 39 |
+
X = points.points_padded()
|
| 40 |
+
lengths = points.num_points_per_cloud()
|
| 41 |
+
normals = points.normals_padded() # either a tensor or None
|
| 42 |
+
elif torch.is_tensor(points):
|
| 43 |
+
if points.ndim != 3:
|
| 44 |
+
raise ValueError("Expected points to be of shape (N, P, D)")
|
| 45 |
+
X = points
|
| 46 |
+
if lengths is not None and (
|
| 47 |
+
lengths.ndim != 1 or lengths.shape[0] != X.shape[0]
|
| 48 |
+
):
|
| 49 |
+
raise ValueError("Expected lengths to be of shape (N,)")
|
| 50 |
+
if lengths is None:
|
| 51 |
+
lengths = torch.full(
|
| 52 |
+
(X.shape[0],), X.shape[1], dtype=torch.int64, device=points.device
|
| 53 |
+
)
|
| 54 |
+
if normals is not None and normals.ndim != 3:
|
| 55 |
+
raise ValueError("Expected normals to be of shape (N, P, 3")
|
| 56 |
+
else:
|
| 57 |
+
raise ValueError(
|
| 58 |
+
"The input pointclouds should be either "
|
| 59 |
+
+ "Pointclouds objects or torch.Tensor of shape "
|
| 60 |
+
+ "(minibatch, num_points, 3)."
|
| 61 |
+
)
|
| 62 |
+
return X, lengths, normals
|
| 63 |
+
|
| 64 |
+
class ChamferDistance(torch.nn.Module):
|
| 65 |
+
def forward(
|
| 66 |
+
self,
|
| 67 |
+
x,
|
| 68 |
+
y,
|
| 69 |
+
x_lengths=None,
|
| 70 |
+
y_lengths=None,
|
| 71 |
+
x_normals=None,
|
| 72 |
+
y_normals=None,
|
| 73 |
+
weights=None,
|
| 74 |
+
batch_reduction: Union[str, None] = "mean",
|
| 75 |
+
point_reduction: str = "mean",
|
| 76 |
+
):
|
| 77 |
+
"""
|
| 78 |
+
Chamfer distance between two pointclouds x and y.
|
| 79 |
+
Args:
|
| 80 |
+
x: FloatTensor of shape (N, P1, D) or a Pointclouds object representing
|
| 81 |
+
a batch of point clouds with at most P1 points in each batch element,
|
| 82 |
+
batch size N and feature dimension D.
|
| 83 |
+
y: FloatTensor of shape (N, P2, D) or a Pointclouds object representing
|
| 84 |
+
a batch of point clouds with at most P2 points in each batch element,
|
| 85 |
+
batch size N and feature dimension D.
|
| 86 |
+
x_lengths: Optional LongTensor of shape (N,) giving the number of points in each
|
| 87 |
+
cloud in x.
|
| 88 |
+
y_lengths: Optional LongTensor of shape (N,) giving the number of points in each
|
| 89 |
+
cloud in x.
|
| 90 |
+
x_normals: Optional FloatTensor of shape (N, P1, D).
|
| 91 |
+
y_normals: Optional FloatTensor of shape (N, P2, D).
|
| 92 |
+
weights: Optional FloatTensor of shape (N,) giving weights for
|
| 93 |
+
batch elements for reduction operation.
|
| 94 |
+
batch_reduction: Reduction operation to apply for the loss across the
|
| 95 |
+
batch, can be one of ["mean", "sum"] or None.
|
| 96 |
+
point_reduction: Reduction operation to apply for the loss across the
|
| 97 |
+
points, can be one of ["mean", "sum"].
|
| 98 |
+
Returns:
|
| 99 |
+
2-element tuple containing
|
| 100 |
+
- **loss**: Tensor giving the reduced distance between the pointclouds
|
| 101 |
+
in x and the pointclouds in y.
|
| 102 |
+
- **loss_normals**: Tensor giving the reduced cosine distance of normals
|
| 103 |
+
between pointclouds in x and pointclouds in y. Returns None if
|
| 104 |
+
x_normals and y_normals are None.
|
| 105 |
+
"""
|
| 106 |
+
_validate_chamfer_reduction_inputs(batch_reduction, point_reduction)
|
| 107 |
+
|
| 108 |
+
x, x_lengths, x_normals = _handle_pointcloud_input(x, x_lengths, x_normals)
|
| 109 |
+
y, y_lengths, y_normals = _handle_pointcloud_input(y, y_lengths, y_normals)
|
| 110 |
+
|
| 111 |
+
return_normals = x_normals is not None and y_normals is not None
|
| 112 |
+
|
| 113 |
+
N, P1, D = x.shape
|
| 114 |
+
P2 = y.shape[1]
|
| 115 |
+
|
| 116 |
+
# Check if inputs are heterogeneous and create a lengths mask.
|
| 117 |
+
is_x_heterogeneous = (x_lengths != P1).any()
|
| 118 |
+
# is_y_heterogeneous = (y_lengths != P2).any()
|
| 119 |
+
x_mask = (
|
| 120 |
+
torch.arange(P1, device=x.device)[None] >= x_lengths[:, None]
|
| 121 |
+
) # shape [N, P1]
|
| 122 |
+
# y_mask = (
|
| 123 |
+
# torch.arange(P2, device=y.device)[None] >= y_lengths[:, None]
|
| 124 |
+
# ) # shape [N, P2]
|
| 125 |
+
|
| 126 |
+
if y.shape[0] != N or y.shape[2] != D:
|
| 127 |
+
raise ValueError("y does not have the correct shape.")
|
| 128 |
+
if weights is not None:
|
| 129 |
+
if weights.size(0) != N:
|
| 130 |
+
raise ValueError("weights must be of shape (N,).")
|
| 131 |
+
if not (weights >= 0).all():
|
| 132 |
+
raise ValueError("weights cannot be negative.")
|
| 133 |
+
if weights.sum() == 0.0:
|
| 134 |
+
weights = weights.view(N, 1)
|
| 135 |
+
if batch_reduction in ["mean", "sum"]:
|
| 136 |
+
return (
|
| 137 |
+
(x.sum((1, 2)) * weights).sum() * 0.0,
|
| 138 |
+
(x.sum((1, 2)) * weights).sum() * 0.0,
|
| 139 |
+
)
|
| 140 |
+
return ((x.sum((1, 2)) * weights) * 0.0, (x.sum((1, 2)) * weights) * 0.0)
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
x_nn = knn_points(x, y, lengths1=x_lengths, lengths2=y_lengths, K=1)
|
| 144 |
+
# y_nn = knn_points(y, x, lengths1=y_lengths, lengths2=x_lengths, K=1)
|
| 145 |
+
|
| 146 |
+
cham_x = x_nn.dists[..., 0] # (N, P1)
|
| 147 |
+
# cham_y = y_nn.dists[..., 0] # (N, P2)
|
| 148 |
+
|
| 149 |
+
if is_x_heterogeneous:
|
| 150 |
+
cham_x[x_mask] = 0.0
|
| 151 |
+
# if is_y_heterogeneous:
|
| 152 |
+
# cham_y[y_mask] = 0.0
|
| 153 |
+
|
| 154 |
+
if weights is not None:
|
| 155 |
+
cham_x *= weights.view(N, 1)
|
| 156 |
+
# cham_y *= weights.view(N, 1)
|
| 157 |
+
|
| 158 |
+
return cham_x, x_nn.idx[...,-1]
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/utils/data_utils.py
ADDED
|
@@ -0,0 +1,538 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import h5py
|
| 2 |
+
import os
|
| 3 |
+
import pickle as pkl
|
| 4 |
+
# from mani_skill2.utils.io_utils import load_json
|
| 5 |
+
import numpy as np
|
| 6 |
+
import re
|
| 7 |
+
import torch
|
| 8 |
+
import torch.nn.functional as Functional
|
| 9 |
+
|
| 10 |
+
from cfdp.utils.pointcloud_utils import convertRGBD2PCD
|
| 11 |
+
from cfdp.utils.angle_utils import quat2ortho6d, ortho6d2quat
|
| 12 |
+
|
| 13 |
+
def pad_tensor(tensor, target_length, pad_front=False):
|
| 14 |
+
"""Pad a tensor by repeating first/last element to reach target length.
|
| 15 |
+
|
| 16 |
+
Args:
|
| 17 |
+
tensor (torch.Tensor): Input tensor to pad
|
| 18 |
+
target_length (int): Desired length after padding
|
| 19 |
+
pad_front (bool): If True, pad at front by repeating first element
|
| 20 |
+
If False, pad at back by repeating last element
|
| 21 |
+
|
| 22 |
+
Returns:
|
| 23 |
+
torch.Tensor: Padded tensor of length target_length
|
| 24 |
+
"""
|
| 25 |
+
current_length = tensor.shape[0]
|
| 26 |
+
|
| 27 |
+
if current_length >= target_length:
|
| 28 |
+
return tensor
|
| 29 |
+
|
| 30 |
+
pad_size = target_length - current_length
|
| 31 |
+
|
| 32 |
+
if pad_front:
|
| 33 |
+
# Repeat first element at front
|
| 34 |
+
padding = tensor[0].unsqueeze(0).repeat(pad_size, *[1] * (len(tensor.shape) - 1))
|
| 35 |
+
return torch.cat([padding, tensor], dim=0)
|
| 36 |
+
else:
|
| 37 |
+
# Repeat last element at back
|
| 38 |
+
padding = tensor[-1].unsqueeze(0).repeat(pad_size, *[1] * (len(tensor.shape) - 1))
|
| 39 |
+
return torch.cat([tensor, padding], dim=0)
|
| 40 |
+
|
| 41 |
+
def interpolate_points(points, num_interpolated_points, resample=False):
|
| 42 |
+
# https://github.com/SamsungLabs/RAMP/blob/c3bd23b2c296c94cdd80d6575390fd96c4f83d83/mppi_planning/cost/collision_cost.py#L89
|
| 43 |
+
if len(points.shape) == 2:
|
| 44 |
+
points = points.unsqueeze(0)
|
| 45 |
+
if resample:
|
| 46 |
+
points = resample_trajectory_torch_interp(points, num_interpolated_points)
|
| 47 |
+
# points = Functional.interpolate(points.transpose(-2, -1),
|
| 48 |
+
# size=num_interpolated_points,
|
| 49 |
+
# mode='nearest').transpose(-2, -1)
|
| 50 |
+
else:
|
| 51 |
+
points = Functional.interpolate(points.transpose(-2, -1),
|
| 52 |
+
size=num_interpolated_points,
|
| 53 |
+
mode='linear', align_corners=True).transpose(-2, -1)
|
| 54 |
+
return points[0]
|
| 55 |
+
elif len(points.shape) == 3:
|
| 56 |
+
if resample:
|
| 57 |
+
points = resample_trajectory_torch_interp(points, num_interpolated_points)
|
| 58 |
+
# points = Functional.interpolate(points.transpose(-2, -1),
|
| 59 |
+
# size=num_interpolated_points,
|
| 60 |
+
# mode='nearest').transpose(-2, -1)
|
| 61 |
+
else:
|
| 62 |
+
points = Functional.interpolate(points.transpose(-2, -1),
|
| 63 |
+
size=num_interpolated_points,
|
| 64 |
+
mode='linear', align_corners=True).transpose(-2, -1)
|
| 65 |
+
return points
|
| 66 |
+
else:
|
| 67 |
+
raise ValueError(f"points.shape = {points.shape} is not supported. Only accept 2D or 3D tensor.")
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def resample_trajectory_torch_interp(points, num_points):
|
| 71 |
+
"""
|
| 72 |
+
Resample trajectory using torch interpolation for more evenly distributed points
|
| 73 |
+
based on arc length parameterization
|
| 74 |
+
"""
|
| 75 |
+
device = points.device
|
| 76 |
+
batch_size, length, dim = points.shape
|
| 77 |
+
|
| 78 |
+
# Calculate differences between consecutive points
|
| 79 |
+
diffs = points[:, 1:] - points[:, :-1] # [N, length-1, dim]
|
| 80 |
+
|
| 81 |
+
# Calculate segment lengths for each batch
|
| 82 |
+
segment_lengths = torch.norm(diffs, dim=2) # [N, length-1]
|
| 83 |
+
|
| 84 |
+
# Calculate cumulative lengths for each batch
|
| 85 |
+
zeros = torch.zeros((batch_size, 1), device=device)
|
| 86 |
+
cumulative_lengths = torch.cat([zeros, torch.cumsum(segment_lengths, dim=1)], dim=1) # [N, length]
|
| 87 |
+
|
| 88 |
+
# Create evenly spaced sampling points for each batch
|
| 89 |
+
max_lengths = cumulative_lengths[:, -1].unsqueeze(1) # [N, 1]
|
| 90 |
+
even_samples = torch.linspace(0, 1, num_points, device=device).unsqueeze(0).repeat(batch_size, 1) # [N, num_points]
|
| 91 |
+
even_samples = even_samples * max_lengths # [N, num_points]
|
| 92 |
+
|
| 93 |
+
# Initialize output tensor
|
| 94 |
+
resampled = torch.zeros((batch_size, num_points, dim), device=device)
|
| 95 |
+
|
| 96 |
+
# Interpolate each dimension for each batch
|
| 97 |
+
for b in range(batch_size):
|
| 98 |
+
# Normalize cumulative lengths to [0, 1] for this batch
|
| 99 |
+
if max_lengths[b, 0] > 0: # Avoid division by zero
|
| 100 |
+
norm_cum_lengths = cumulative_lengths[b] / max_lengths[b, 0]
|
| 101 |
+
else:
|
| 102 |
+
norm_cum_lengths = torch.linspace(0, 1, length, device=device)
|
| 103 |
+
|
| 104 |
+
# Normalize sampling points to [0, 1]
|
| 105 |
+
norm_samples = even_samples[b] / max_lengths[b, 0] if max_lengths[b, 0] > 0 else torch.linspace(0, 1, num_points, device=device)
|
| 106 |
+
|
| 107 |
+
for d in range(dim):
|
| 108 |
+
# For each point in norm_samples, find its interpolated value
|
| 109 |
+
for i in range(num_points):
|
| 110 |
+
sample = norm_samples[i]
|
| 111 |
+
|
| 112 |
+
# Find the two points to interpolate between
|
| 113 |
+
idx = torch.searchsorted(norm_cum_lengths, sample) - 1
|
| 114 |
+
idx = torch.clamp(idx, 0, length - 2)
|
| 115 |
+
|
| 116 |
+
# Get the two points and their normalized positions
|
| 117 |
+
x0 = norm_cum_lengths[idx]
|
| 118 |
+
x1 = norm_cum_lengths[idx + 1]
|
| 119 |
+
y0 = points[b, idx, d]
|
| 120 |
+
y1 = points[b, idx + 1, d]
|
| 121 |
+
|
| 122 |
+
# Linear interpolation
|
| 123 |
+
if x1 > x0: # Avoid division by zero
|
| 124 |
+
alpha = (sample - x0) / (x1 - x0)
|
| 125 |
+
else:
|
| 126 |
+
alpha = 0.0
|
| 127 |
+
|
| 128 |
+
resampled[b, i, d] = y0 + alpha * (y1 - y0)
|
| 129 |
+
|
| 130 |
+
return resampled
|
| 131 |
+
|
| 132 |
+
def get_flattened_obs(data, obs_keys):
|
| 133 |
+
data_dict = get_data_dict(data)
|
| 134 |
+
# Directly create torch tensor for the trajectory
|
| 135 |
+
obs_tensors = [transform_quat_to_ortho6d(torch.tensor(data_dict[k][:-1], dtype=torch.float32)) if k == 'tcp_pose' else torch.tensor(data_dict[k][:-1], dtype=torch.float32) for k in obs_keys]
|
| 136 |
+
# Concatenate torch tensors
|
| 137 |
+
return torch.cat(obs_tensors, dim = 1)
|
| 138 |
+
|
| 139 |
+
def transform_quat_to_ortho6d(obs_tensor):
|
| 140 |
+
# Handle 1D tensor by adding batch dimension
|
| 141 |
+
if len(obs_tensor.shape) == 1:
|
| 142 |
+
obs_tensor = obs_tensor.unsqueeze(0)
|
| 143 |
+
|
| 144 |
+
xyz = obs_tensor[:, :3] # First 3 elements are xyz position
|
| 145 |
+
quat = obs_tensor[:, 3:] # Next 4 elements are quaternion orientation
|
| 146 |
+
ortho6d = quat2ortho6d(quat)
|
| 147 |
+
return torch.cat([xyz, ortho6d], dim=1)
|
| 148 |
+
|
| 149 |
+
def transform_ortho6d_to_quat(obs_tensor):
|
| 150 |
+
# Handle 1D tensor by adding batch dimension
|
| 151 |
+
if len(obs_tensor.shape) == 1:
|
| 152 |
+
obs_tensor = obs_tensor.unsqueeze(0)
|
| 153 |
+
|
| 154 |
+
xyz = obs_tensor[:, :3]
|
| 155 |
+
ortho6d = obs_tensor[:, 3:]
|
| 156 |
+
quat = ortho6d2quat(ortho6d)
|
| 157 |
+
return torch.cat([xyz, quat], dim = 1)
|
| 158 |
+
|
| 159 |
+
def load_raw_data(dataset_file, pcd, force_reload, load_count):
|
| 160 |
+
"""Load raw data from h5 file or cached pickle file"""
|
| 161 |
+
# pkl_file = dataset_file.replace(".h5", ".pkl")
|
| 162 |
+
|
| 163 |
+
# # Check if the pickle file exists and can be used
|
| 164 |
+
# if os.path.exists(pkl_file) and not force_reload:
|
| 165 |
+
# # TODO: Load count is innacurate here. Not used for pkl file.
|
| 166 |
+
# return load_from_pickle(pkl_file)
|
| 167 |
+
|
| 168 |
+
# # Load from h5 file
|
| 169 |
+
return load_from_h5(dataset_file, pcd, load_count)#, pkl_file)
|
| 170 |
+
|
| 171 |
+
def load_from_pickle(pkl_file):
|
| 172 |
+
"""Load preprocessed data from pickle file"""
|
| 173 |
+
with open(pkl_file, "rb") as f:
|
| 174 |
+
return pkl.load(f)
|
| 175 |
+
|
| 176 |
+
def load_from_h5(dataset_file, pcd, load_count):#, pkl_file):
|
| 177 |
+
"""Load and process data from h5 file"""
|
| 178 |
+
data = h5py.File(dataset_file, "r")
|
| 179 |
+
|
| 180 |
+
# find all traj_* groups
|
| 181 |
+
traj_keys = [k for k in data.keys() if k.startswith("traj_")]
|
| 182 |
+
# natural sort so traj_2 comes before traj_10
|
| 183 |
+
def ns_key(s):
|
| 184 |
+
return [int(t) if t.isdigit() else t for t in re.split(r"(\d+)", s)]
|
| 185 |
+
traj_keys = sorted(traj_keys, key=ns_key)
|
| 186 |
+
|
| 187 |
+
# decide how many to load
|
| 188 |
+
if load_count == -1 or load_count > len(traj_keys):
|
| 189 |
+
load_count = len(traj_keys)
|
| 190 |
+
|
| 191 |
+
allData = []
|
| 192 |
+
for k in traj_keys[:load_count]:
|
| 193 |
+
traj_data = hdf5_to_dict(data[k])
|
| 194 |
+
if pcd:
|
| 195 |
+
traj_data = convertRGBD2PCD(traj_data)
|
| 196 |
+
allData.append(traj_data)
|
| 197 |
+
|
| 198 |
+
return allData
|
| 199 |
+
# data = h5py.File(dataset_file, "r")
|
| 200 |
+
# json_path = dataset_file.replace(".h5", ".json")
|
| 201 |
+
# json_data = load_json(json_path)
|
| 202 |
+
# episodes = json_data["episodes"]
|
| 203 |
+
# env_info = json_data["env_info"]
|
| 204 |
+
# env_id = env_info["env_id"]
|
| 205 |
+
# env_kwargs = env_info["env_kwargs"]
|
| 206 |
+
|
| 207 |
+
# allData = []
|
| 208 |
+
# if load_count == -1:
|
| 209 |
+
# load_count = len(episodes)
|
| 210 |
+
|
| 211 |
+
# for eps_id in range(load_count):
|
| 212 |
+
# traj_data = hdf5_to_dict(data[f'traj_{eps_id}'])
|
| 213 |
+
|
| 214 |
+
# if pcd:
|
| 215 |
+
# traj_data = convertRGBD2PCD(traj_data)
|
| 216 |
+
# allData.append(traj_data)
|
| 217 |
+
|
| 218 |
+
# # # Cache processed data
|
| 219 |
+
# # with open(pkl_file, "wb") as f:
|
| 220 |
+
# # pkl.dump(allData, f)
|
| 221 |
+
|
| 222 |
+
# return allData
|
| 223 |
+
|
| 224 |
+
def hdf5_to_dict(hdf5_group):
|
| 225 |
+
"""
|
| 226 |
+
Convert an HDF5 group to a dictionary.
|
| 227 |
+
|
| 228 |
+
This function recursively converts an HDF5 group into a nested dictionary,
|
| 229 |
+
where datasets are converted to their corresponding numpy arrays.
|
| 230 |
+
|
| 231 |
+
Args:
|
| 232 |
+
hdf5_group (h5py.Group): The HDF5 group to convert.
|
| 233 |
+
|
| 234 |
+
Returns:
|
| 235 |
+
dict: A dictionary representation of the HDF5 group.
|
| 236 |
+
"""
|
| 237 |
+
def recursively_convert(hdf5_group):
|
| 238 |
+
result = {}
|
| 239 |
+
for key, item in hdf5_group.items():
|
| 240 |
+
if isinstance(item, h5py.Dataset):
|
| 241 |
+
result[key] = item[()]
|
| 242 |
+
elif isinstance(item, h5py.Group):
|
| 243 |
+
result[key] = recursively_convert(item)
|
| 244 |
+
return result
|
| 245 |
+
return recursively_convert(hdf5_group)
|
| 246 |
+
|
| 247 |
+
class ObsQueue():
|
| 248 |
+
def __init__(self, size: int, obs=None):
|
| 249 |
+
self.size = size # Fixed length of the data structure
|
| 250 |
+
if obs is not None:
|
| 251 |
+
self.queue = [obs] * size # Initialize with the same float value
|
| 252 |
+
else:
|
| 253 |
+
self.queue = [] # Initialize with empty queue
|
| 254 |
+
|
| 255 |
+
def append(self, obs):
|
| 256 |
+
# If queue is empty, initialize it with size copies of the first observation
|
| 257 |
+
if len(self.queue) == 0:
|
| 258 |
+
self.queue = [obs] * self.size
|
| 259 |
+
else:
|
| 260 |
+
# Normal append and maintain size
|
| 261 |
+
self.queue.append(obs)
|
| 262 |
+
self.queue = self.queue[-self.size:] # Keep only the most recent 'size' elements
|
| 263 |
+
|
| 264 |
+
def get_values_as_tensor(self):
|
| 265 |
+
# Convert the current queue into a PyTorch tensor
|
| 266 |
+
return torch.stack(self.queue[-self.size:])
|
| 267 |
+
|
| 268 |
+
def __repr__(self):
|
| 269 |
+
# Display the current state of the queue
|
| 270 |
+
return f"Queue({self.queue})"
|
| 271 |
+
|
| 272 |
+
def __shape__(self):
|
| 273 |
+
return torch.stack(self.queue[-self.size:]).shape
|
| 274 |
+
|
| 275 |
+
def create_trajectory_with_history(data, history_length, trajectory_length, stride = 5):
|
| 276 |
+
"""Creates trajectories with historical context by combining past observations with interpolated future points.
|
| 277 |
+
|
| 278 |
+
Args:
|
| 279 |
+
data (torch.Tensor): Input tensor of shape (N, D) containing N observations of dimension D
|
| 280 |
+
history_length (int): Number of past observations to include in each trajectory
|
| 281 |
+
trajectory_length (int): Total desired length of each trajectory
|
| 282 |
+
|
| 283 |
+
Returns:
|
| 284 |
+
torch.Tensor: Tensor of shape (N-1, trajectory_length, D) containing trajectories that combine
|
| 285 |
+
history_length past observations with trajectory_length-history_length interpolated future points
|
| 286 |
+
|
| 287 |
+
The function maintains a queue of past observations and combines them with interpolated future points
|
| 288 |
+
to create trajectories that have both historical context and predicted future states.
|
| 289 |
+
"""
|
| 290 |
+
obsqueue = ObsQueue(history_length, data[0])
|
| 291 |
+
front_list = []
|
| 292 |
+
back_list = []
|
| 293 |
+
for i in range(1, data.shape[0]-history_length, stride):
|
| 294 |
+
front = obsqueue.get_values_as_tensor()
|
| 295 |
+
# Check if we have enough remaining observations
|
| 296 |
+
if data[i:].shape[0] >= trajectory_length:
|
| 297 |
+
# If enough data, interpolate
|
| 298 |
+
back = interpolate_points(data[i:], trajectory_length)
|
| 299 |
+
else:
|
| 300 |
+
# If not enough data, pad to trajectory length
|
| 301 |
+
back = pad_tensor(data[i:], trajectory_length, pad_front=False)
|
| 302 |
+
front_list.append(front)
|
| 303 |
+
back_list.append(back)
|
| 304 |
+
obsqueue.append(data[i])
|
| 305 |
+
return torch.stack(front_list), torch.stack(back_list)
|
| 306 |
+
|
| 307 |
+
|
| 308 |
+
def get_data_dict(data):
|
| 309 |
+
def print_nested_keys(d, prefix=''):
|
| 310 |
+
for key, value in d.items():
|
| 311 |
+
if isinstance(value, dict):
|
| 312 |
+
print(f"{prefix}- {key}/")
|
| 313 |
+
print_nested_keys(value, prefix + ' ')
|
| 314 |
+
else:
|
| 315 |
+
print(f"{prefix}- {key}")
|
| 316 |
+
|
| 317 |
+
# print("All keys in data:")
|
| 318 |
+
# print_nested_keys(data)
|
| 319 |
+
# print("--------------------------------")
|
| 320 |
+
data_dict = {
|
| 321 |
+
'qpos': data.get('obs', {}).get('agent', {}).get('qpos', None),
|
| 322 |
+
'qvel': data.get('obs', {}).get('agent', {}).get('qvel', None),
|
| 323 |
+
'base_pose': data.get('obs', {}).get('agent', {}).get('base_pose', None),
|
| 324 |
+
'tcp_pose': data.get('obs', {}).get('extra', {}).get('tcp_pose', None),
|
| 325 |
+
'goal_pose': data.get('obs', {}).get('extra', {}).get('goal_pose', None),
|
| 326 |
+
'success': data.get('success', None),
|
| 327 |
+
'env_states': data.get('env_states', None)
|
| 328 |
+
}
|
| 329 |
+
return data_dict
|
| 330 |
+
|
| 331 |
+
def get_flattened_obs_with_history(data, obs_keys, history_length, trajectory_length, stride = 5):
|
| 332 |
+
# data_dict = {
|
| 333 |
+
# 'qpos': data['obs']['agent']['qpos'],
|
| 334 |
+
# 'qvel': data['obs']['agent']['qvel'],
|
| 335 |
+
# 'base_pose': data['obs']['agent']['base_pose'],
|
| 336 |
+
# 'tcp_pose': data['obs']['extra']['tcp_pose'],
|
| 337 |
+
# 'goal_pose': data['obs']['extra']['goal_pose'],
|
| 338 |
+
# 'success': data['success'],
|
| 339 |
+
# 'env_states': data['env_states']
|
| 340 |
+
# }
|
| 341 |
+
data_dict = get_data_dict(data)
|
| 342 |
+
|
| 343 |
+
# Directly create torch tensor for the trajectory
|
| 344 |
+
obs_tensors = []
|
| 345 |
+
trajectory_tensors = []
|
| 346 |
+
for k in obs_keys:
|
| 347 |
+
if k == 'tcp_pose':
|
| 348 |
+
tensor = transform_quat_to_ortho6d(torch.tensor(data_dict[k][:-1], dtype=torch.float32))
|
| 349 |
+
obs, trajectory = create_trajectory_with_history(tensor, history_length, trajectory_length, stride)
|
| 350 |
+
elif k == 'qpos':
|
| 351 |
+
tensor = torch.tensor(data_dict[k][:-1], dtype=torch.float32)
|
| 352 |
+
obs, trajectory = create_trajectory_with_history(tensor, history_length, trajectory_length, stride)
|
| 353 |
+
else:
|
| 354 |
+
raise NotImplementedError("Non-tcp_pose keys not yet implemented")
|
| 355 |
+
obs_tensors.append(obs)
|
| 356 |
+
trajectory_tensors.append(trajectory)
|
| 357 |
+
# import pdb; pdb.set_trace() # Add breakpoint here
|
| 358 |
+
# Concatenate torch tensors
|
| 359 |
+
return torch.cat(obs_tensors, dim = 1), torch.cat(trajectory_tensors, dim = 1)
|
| 360 |
+
|
| 361 |
+
def create_sample_indices(
|
| 362 |
+
end_idx: int,
|
| 363 |
+
sequence_length: int,
|
| 364 |
+
pad_before: int = 0,
|
| 365 |
+
pad_after: int = 0,
|
| 366 |
+
):
|
| 367 |
+
indices = list()
|
| 368 |
+
start_idx = 0
|
| 369 |
+
episode_length = end_idx - start_idx
|
| 370 |
+
|
| 371 |
+
min_start = -pad_before
|
| 372 |
+
max_start = episode_length - sequence_length + pad_after
|
| 373 |
+
|
| 374 |
+
# range stops one idx before end
|
| 375 |
+
for idx in range(min_start, max_start + 1):
|
| 376 |
+
buffer_start_idx = max(idx, 0) + start_idx
|
| 377 |
+
buffer_end_idx = min(idx + sequence_length, episode_length) + start_idx
|
| 378 |
+
start_offset = buffer_start_idx - (idx + start_idx)
|
| 379 |
+
end_offset = (idx + sequence_length + start_idx) - buffer_end_idx
|
| 380 |
+
sample_start_idx = 0 + start_offset
|
| 381 |
+
sample_end_idx = sequence_length - end_offset
|
| 382 |
+
indices.append(
|
| 383 |
+
[buffer_start_idx, buffer_end_idx, sample_start_idx, sample_end_idx]
|
| 384 |
+
)
|
| 385 |
+
indices = np.array(indices)
|
| 386 |
+
return indices
|
| 387 |
+
|
| 388 |
+
def sample_sequence(
|
| 389 |
+
train_data,
|
| 390 |
+
sequence_length,
|
| 391 |
+
buffer_start_idx,
|
| 392 |
+
buffer_end_idx,
|
| 393 |
+
sample_start_idx,
|
| 394 |
+
sample_end_idx,
|
| 395 |
+
):
|
| 396 |
+
"""
|
| 397 |
+
Extracts a sequence from train_data with padding if needed.
|
| 398 |
+
|
| 399 |
+
Parameters:
|
| 400 |
+
- train_data: Dictionary of arrays containing the training data
|
| 401 |
+
- sequence_length: Desired length of the output sequence
|
| 402 |
+
- buffer_start_idx, buffer_end_idx: Indices to extract from the original data
|
| 403 |
+
- sample_start_idx: Index in the output sequence where the actual data should start
|
| 404 |
+
(if > 0, padding is added before this index using the first value)
|
| 405 |
+
- sample_end_idx: Index in the output sequence where the actual data should end
|
| 406 |
+
(if < sequence_length, padding is added after this index using the last value)
|
| 407 |
+
"""
|
| 408 |
+
result = dict()
|
| 409 |
+
for key, input_arr in train_data.items():
|
| 410 |
+
sample = input_arr[buffer_start_idx:buffer_end_idx]
|
| 411 |
+
data = sample
|
| 412 |
+
if (sample_start_idx > 0) or (sample_end_idx < sequence_length):
|
| 413 |
+
# Create a zero-initialized array of the desired sequence length
|
| 414 |
+
data = np.zeros(
|
| 415 |
+
shape=(sequence_length,) + input_arr.shape[1:], dtype=input_arr.dtype
|
| 416 |
+
)
|
| 417 |
+
# If we need padding at the beginning, fill with the first value
|
| 418 |
+
if sample_start_idx > 0:
|
| 419 |
+
data[:sample_start_idx] = sample[0]
|
| 420 |
+
# If we need padding at the end, fill with the last value
|
| 421 |
+
if sample_end_idx < sequence_length:
|
| 422 |
+
data[sample_end_idx:] = sample[-1]
|
| 423 |
+
# Place the actual data in the middle
|
| 424 |
+
data[sample_start_idx:sample_end_idx] = sample
|
| 425 |
+
result[key] = data
|
| 426 |
+
return result
|
| 427 |
+
|
| 428 |
+
def get_chunked_data(
|
| 429 |
+
data,
|
| 430 |
+
pred_horizon: int = 16,
|
| 431 |
+
obs_horizon: int = 2,
|
| 432 |
+
action_horizon: int = 8,
|
| 433 |
+
pad_before: int = None,
|
| 434 |
+
pad_after: int = None,
|
| 435 |
+
):
|
| 436 |
+
pad_before = (
|
| 437 |
+
pad_before if pad_before is not None else obs_horizon - 1
|
| 438 |
+
) # Default of Diffusion Policy
|
| 439 |
+
pad_after: int = (
|
| 440 |
+
pad_after if pad_after is not None else action_horizon - 1
|
| 441 |
+
) # Default of Diffusion Policy
|
| 442 |
+
|
| 443 |
+
train_data = {}
|
| 444 |
+
train_data['obs'] = data['obs']['extra']['tcp_pose'][:-1]
|
| 445 |
+
train_data['actions'] = data['actions']
|
| 446 |
+
data_len = len(train_data['obs'])
|
| 447 |
+
indices = create_sample_indices(data_len, pred_horizon, pad_before, pad_after)
|
| 448 |
+
obs = list()
|
| 449 |
+
actions = list()
|
| 450 |
+
# Get all the samples of data
|
| 451 |
+
for idx in range(len(indices)):
|
| 452 |
+
buffer_start_idx, buffer_end_idx, sample_start_idx, sample_end_idx = indices[
|
| 453 |
+
idx
|
| 454 |
+
]
|
| 455 |
+
|
| 456 |
+
# get nomralized data using these indices
|
| 457 |
+
nsample = sample_sequence(
|
| 458 |
+
train_data=train_data,
|
| 459 |
+
sequence_length=pred_horizon,
|
| 460 |
+
buffer_start_idx=buffer_start_idx,
|
| 461 |
+
buffer_end_idx=buffer_end_idx,
|
| 462 |
+
sample_start_idx=sample_start_idx,
|
| 463 |
+
sample_end_idx=sample_end_idx,
|
| 464 |
+
)
|
| 465 |
+
# nsample["obs"] = nsample["obs"][:obs_horizon, :]
|
| 466 |
+
obs.append(transform_quat_to_ortho6d(torch.tensor(nsample["obs"], dtype=torch.float32)))
|
| 467 |
+
actions.append(torch.tensor(nsample["actions"], dtype=torch.float32))
|
| 468 |
+
|
| 469 |
+
# Convert lists to tensors
|
| 470 |
+
obs = torch.stack(obs)
|
| 471 |
+
actions = torch.stack(actions)
|
| 472 |
+
|
| 473 |
+
return obs, actions
|
| 474 |
+
|
| 475 |
+
class ObservationData:
|
| 476 |
+
"""A structured class to handle different types of observation data"""
|
| 477 |
+
def __init__(self, data=None):
|
| 478 |
+
self.agent_data = {
|
| 479 |
+
'qpos': None, # Joint positions
|
| 480 |
+
'qvel': None, # Joint velocities
|
| 481 |
+
'base_pose': None, # Base pose
|
| 482 |
+
}
|
| 483 |
+
self.extra_data = {
|
| 484 |
+
'tcp_pose': None, # End effector pose
|
| 485 |
+
'goal_pose': None, # Goal pose
|
| 486 |
+
}
|
| 487 |
+
if data is not None:
|
| 488 |
+
self.update_from_dict(data)
|
| 489 |
+
|
| 490 |
+
def update_from_dict(self, data):
|
| 491 |
+
"""Update observation data from a dictionary"""
|
| 492 |
+
if 'obs' in data:
|
| 493 |
+
if 'agent' in data['obs']:
|
| 494 |
+
for key in self.agent_data.keys():
|
| 495 |
+
if key in data['obs']['agent']:
|
| 496 |
+
self.agent_data[key] = data['obs']['agent'][key]
|
| 497 |
+
if 'extra' in data['obs']:
|
| 498 |
+
for key in self.extra_data.keys():
|
| 499 |
+
if key in data['obs']['extra']:
|
| 500 |
+
self.extra_data[key] = data['obs']['extra'][key]
|
| 501 |
+
|
| 502 |
+
def to_tensor(self, device='cpu'):
|
| 503 |
+
"""Convert observation data to tensors"""
|
| 504 |
+
tensor_data = {}
|
| 505 |
+
for key, value in self.agent_data.items():
|
| 506 |
+
if value is not None:
|
| 507 |
+
tensor_data[key] = torch.tensor(value, dtype=torch.float32).to(device)
|
| 508 |
+
for key, value in self.extra_data.items():
|
| 509 |
+
if value is not None:
|
| 510 |
+
tensor_data[key] = torch.tensor(value, dtype=torch.float32).to(device)
|
| 511 |
+
return tensor_data
|
| 512 |
+
|
| 513 |
+
def get_hard_conditions(self, device='cpu', do_normalize=True, normalizer=None):
|
| 514 |
+
"""Get hard conditions for both joint and end effector states"""
|
| 515 |
+
hard_conds = {}
|
| 516 |
+
tensor_data = self.to_tensor(device)
|
| 517 |
+
|
| 518 |
+
# Handle end effector pose
|
| 519 |
+
if tensor_data.get('tcp_pose') is not None and tensor_data.get('goal_pose') is not None:
|
| 520 |
+
initial_state = transform_quat_to_ortho6d(tensor_data['tcp_pose'])
|
| 521 |
+
goal_state = transform_quat_to_ortho6d(tensor_data['goal_pose'])
|
| 522 |
+
|
| 523 |
+
if do_normalize and normalizer is not None:
|
| 524 |
+
temp_tensor = torch.cat([initial_state, goal_state], dim=0)
|
| 525 |
+
normalized_state = normalizer.normalize(temp_tensor, 'tcp_pose')
|
| 526 |
+
hard_conds['tcp_pose'] = {0: normalized_state[0], -1: normalized_state[-1]}
|
| 527 |
+
else:
|
| 528 |
+
hard_conds['tcp_pose'] = {0: initial_state, -1: goal_state}
|
| 529 |
+
|
| 530 |
+
# Handle joint positions
|
| 531 |
+
if tensor_data.get('qpos') is not None:
|
| 532 |
+
if do_normalize and normalizer is not None:
|
| 533 |
+
normalized_state = normalizer.normalize(tensor_data['qpos'], 'qpos')
|
| 534 |
+
hard_conds['qpos'] = {0: normalized_state[0], -1: normalized_state[-1]}
|
| 535 |
+
else:
|
| 536 |
+
hard_conds['qpos'] = {0: tensor_data['qpos'][0], -1: tensor_data['qpos'][-1]}
|
| 537 |
+
|
| 538 |
+
return hard_conds
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/utils/fcm/__init__.py
ADDED
|
File without changes
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/utils/fcm/base_utils.py
ADDED
|
@@ -0,0 +1,240 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import numpy as np
|
| 3 |
+
import pickle
|
| 4 |
+
import os
|
| 5 |
+
def normalize_image(img, min_val=None, new_range=(0,255), to_uint8=True):
|
| 6 |
+
axis = None if img.ndim == 2 else (0, 1)
|
| 7 |
+
if min_val is None:
|
| 8 |
+
old_min = np.min(img, axis=axis)
|
| 9 |
+
else:
|
| 10 |
+
old_min = np.zeros(img.shape[-1])
|
| 11 |
+
new_image = ((img - old_min) * (new_range[1]-(new_range[0])) /
|
| 12 |
+
(np.max(img, axis=axis) - old_min)) + new_range[0]
|
| 13 |
+
if to_uint8:
|
| 14 |
+
new_image = new_image.astype(np.uint8)
|
| 15 |
+
return new_image
|
| 16 |
+
|
| 17 |
+
def visualize_images(img_path):
|
| 18 |
+
# RGBXYZS
|
| 19 |
+
with open(img_path, 'rb') as f:
|
| 20 |
+
sample = pickle.load(f)
|
| 21 |
+
multi_img = np.array(sample).reshape((-1, 640, 7))
|
| 22 |
+
rgb_image = cv2.cvtColor(multi_img[..., :3].astype(np.uint8), cv2.COLOR_RGB2BGR)
|
| 23 |
+
depth_img = normalize_image(multi_img[..., 5], 0)
|
| 24 |
+
xy_image = normalize_image(multi_img[..., 3:5])
|
| 25 |
+
|
| 26 |
+
rgb_depth = cv2.hconcat((rgb_image,cv2.cvtColor(depth_img, cv2.COLOR_GRAY2RGB)))
|
| 27 |
+
xy_image = cv2.hconcat((cv2.cvtColor(xy_image[..., 0], cv2.COLOR_GRAY2RGB),
|
| 28 |
+
cv2.cvtColor(xy_image[..., 1], cv2.COLOR_GRAY2RGB)))
|
| 29 |
+
final_image = cv2.vconcat((rgb_depth, xy_image))
|
| 30 |
+
|
| 31 |
+
cv2.imshow('RGB-Depth-X-Y', final_image)
|
| 32 |
+
cv2.waitKey(0)
|
| 33 |
+
return rgb_image, depth_img, xy_image
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def get_image_pkl(img_path):
|
| 37 |
+
with open(img_path, 'rb') as f:
|
| 38 |
+
sample = pickle.load(f)
|
| 39 |
+
return np.array(sample).reshape((-1, 640, 7))
|
| 40 |
+
|
| 41 |
+
def sort_images(res_dict, key):
|
| 42 |
+
key_vals = []
|
| 43 |
+
for res in res_dict:
|
| 44 |
+
key_vals.append(res[key])
|
| 45 |
+
return np.argsort(key_vals)
|
| 46 |
+
|
| 47 |
+
def sort_images_real(res_dict, key):
|
| 48 |
+
key_vals = []
|
| 49 |
+
for k in sorted(res_dict):
|
| 50 |
+
key_vals.append(res_dict[k]['scores'][k])
|
| 51 |
+
return np.argsort(key_vals)
|
| 52 |
+
|
| 53 |
+
def sort_images_target(list_dict, key):
|
| 54 |
+
key_vals = []
|
| 55 |
+
for cnt in sorted(list_dict):
|
| 56 |
+
key_vals.append(list_dict[cnt]['scores'][key])
|
| 57 |
+
return np.argsort(key_vals)
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def sort_images_real(list_dict, key):
|
| 61 |
+
key_vals = []
|
| 62 |
+
for img_dic in list_dict:
|
| 63 |
+
key_vals.append(img_dic[key])
|
| 64 |
+
return np.argsort(key_vals)
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def rename_files(folder_path ="samples_v2/camera_debug", count=None):
|
| 68 |
+
root_folder = folder_path
|
| 69 |
+
files = os.listdir(root_folder)
|
| 70 |
+
count = count or 1
|
| 71 |
+
for f in files:
|
| 72 |
+
img_name = os.path.join(root_folder, f)
|
| 73 |
+
# f = '_'.join(f.split('_')[1:])
|
| 74 |
+
os.rename(img_name, os.path.join(root_folder, f"{count}_{f}"))
|
| 75 |
+
count += 1
|
| 76 |
+
|
| 77 |
+
def file_rename_real1():
|
| 78 |
+
root_folder = "real_samples/d1"
|
| 79 |
+
files = os.listdir(root_folder)
|
| 80 |
+
for f in files:
|
| 81 |
+
img_name = os.path.join(root_folder, f)
|
| 82 |
+
# print(f.split('_')[0], f.split('_')[0].isdigit())
|
| 83 |
+
if 'tall' in img_name and not f.split('_')[0].isdigit():
|
| 84 |
+
# img_name.replace('wrist', 'side')
|
| 85 |
+
# print(img_name)
|
| 86 |
+
newimg = f.replace('tall', 'side').split('_')
|
| 87 |
+
new_img_name = '_'.join([newimg[2], newimg[1], newimg[-1]])
|
| 88 |
+
new_img_name = os.path.join(root_folder, new_img_name)
|
| 89 |
+
print(img_name, new_img_name)
|
| 90 |
+
os.rename(img_name,new_img_name )
|
| 91 |
+
|
| 92 |
+
def file_rename_real2():
|
| 93 |
+
root_folder = "real_samples/ds1_removal_clean"
|
| 94 |
+
files = os.listdir(root_folder)
|
| 95 |
+
files_dict = {}
|
| 96 |
+
for f in files:
|
| 97 |
+
img_name = os.path.join(root_folder, f)
|
| 98 |
+
img_idx = int(f.split('_')[0])
|
| 99 |
+
if img_idx not in files_dict:
|
| 100 |
+
files_dict[img_idx] = {}
|
| 101 |
+
files_dict[img_idx][f.split('_')[-1].split('.')[0]] = img_name
|
| 102 |
+
sf_dic = {}
|
| 103 |
+
tcnt = 0
|
| 104 |
+
fcnt = 0
|
| 105 |
+
for i, k in enumerate(sorted(files_dict.keys())):
|
| 106 |
+
|
| 107 |
+
for key, fname in files_dict[k].items():
|
| 108 |
+
task_id = tcnt//5
|
| 109 |
+
new_fn = os.path.join(os.path.dirname(fname), f"sc{task_id}_{fcnt}_removal_{key}.pkl")
|
| 110 |
+
os.rename(fname,new_fn)
|
| 111 |
+
|
| 112 |
+
tcnt += 1
|
| 113 |
+
fcnt += 1
|
| 114 |
+
if fcnt % 5 == 0 and fcnt != 0:
|
| 115 |
+
fcnt = 0
|
| 116 |
+
print(new_fn)
|
| 117 |
+
# # print(f.split('_')[0], f.split('_')[0].isdigit())
|
| 118 |
+
# if 'tall' in img_name and not f.split('_')[0].isdigit():
|
| 119 |
+
# # img_name.replace('wrist', 'side')
|
| 120 |
+
# # print(img_name)
|
| 121 |
+
# newimg = f.replace('tall', 'side').split('_')
|
| 122 |
+
# new_img_name = '_'.join([newimg[2], newimg[1], newimg[-1]])
|
| 123 |
+
# new_img_name = os.path.join(root_folder, new_img_name)
|
| 124 |
+
# print(img_name, new_img_name)
|
| 125 |
+
# os.rename(img_name,new_img_name )
|
| 126 |
+
# print(files_dict)
|
| 127 |
+
|
| 128 |
+
def file_rename_real3():
|
| 129 |
+
root_folder = "real_samples/ds2_shift_clean"
|
| 130 |
+
files = os.listdir(root_folder)
|
| 131 |
+
for f in files:
|
| 132 |
+
img_name = os.path.join(root_folder, f)
|
| 133 |
+
idx = int(f.split('_')[2])
|
| 134 |
+
if idx < 33 and 'sc' not in f:
|
| 135 |
+
new_file = os.path.join(root_folder, f"sc4_shift_{idx-29}_{f.split('_')[-1]}")
|
| 136 |
+
os.rename(img_name, new_file)
|
| 137 |
+
|
| 138 |
+
# # print(f.split('_')[0], f.split('_')[0].isdigit())
|
| 139 |
+
# if 'tall' in img_name and not f.split('_')[0].isdigit():
|
| 140 |
+
# # img_name.replace('wrist', 'side')
|
| 141 |
+
# # print(img_name)
|
| 142 |
+
# newimg = f.replace('tall', 'side').split('_')
|
| 143 |
+
# new_img_name = '_'.join([newimg[2], newimg[1], newimg[-1]])
|
| 144 |
+
# new_img_name = os.path.join(root_folder, new_img_name)
|
| 145 |
+
# print(img_name, new_img_name)
|
| 146 |
+
# os.rename(img_name,new_img_name )
|
| 147 |
+
|
| 148 |
+
def test_images():
|
| 149 |
+
root_folder = "real_samples/ds2_shift"
|
| 150 |
+
save_root_folder = f"output/test"
|
| 151 |
+
os.makedirs(save_root_folder, exist_ok=True)
|
| 152 |
+
|
| 153 |
+
files = os.listdir(root_folder)
|
| 154 |
+
for f in files:
|
| 155 |
+
img_name = os.path.join(root_folder, f)
|
| 156 |
+
with open(img_name, 'rb') as fs:
|
| 157 |
+
image = pickle.load(fs)
|
| 158 |
+
image = np.squeeze(image)
|
| 159 |
+
|
| 160 |
+
# for writing the scores on the image
|
| 161 |
+
im = cv2.cvtColor(image[..., :3].astype(np.uint8), cv2.COLOR_RGB2BGR)
|
| 162 |
+
|
| 163 |
+
cv2.imwrite(os.path.join(save_root_folder, os.path.basename(img_name).split('.')[0]+".png"), im)
|
| 164 |
+
|
| 165 |
+
def test_images_tile():
|
| 166 |
+
root_folder = "real_samples/ds2_shift_clean/side"
|
| 167 |
+
save_root_folder = f"output/test"
|
| 168 |
+
os.makedirs(save_root_folder, exist_ok=True)
|
| 169 |
+
|
| 170 |
+
files = os.listdir(root_folder)
|
| 171 |
+
for f in files:
|
| 172 |
+
img_name = os.path.join(root_folder, f)
|
| 173 |
+
with open(img_name, 'rb') as fs:
|
| 174 |
+
side_image = pickle.load(fs)
|
| 175 |
+
side_image = np.squeeze(side_image)
|
| 176 |
+
img_name = os.path.join(root_folder.replace('side', 'top'), f.replace('side', 'tall'))
|
| 177 |
+
with open(img_name, 'rb') as fs:
|
| 178 |
+
top_image = pickle.load(fs)
|
| 179 |
+
top_image = np.squeeze(top_image)
|
| 180 |
+
img_name = os.path.join(root_folder.replace('side', 'wrist'), f.replace('side', 'wrist'))
|
| 181 |
+
with open(img_name, 'rb') as fs:
|
| 182 |
+
wrist_image = pickle.load(fs)
|
| 183 |
+
wrist_image = np.squeeze(wrist_image)
|
| 184 |
+
|
| 185 |
+
# for writing the scores on the image
|
| 186 |
+
side_image = cv2.cvtColor(side_image[..., :3].astype(np.uint8), cv2.COLOR_RGB2BGR)
|
| 187 |
+
top_image = cv2.cvtColor(top_image[..., :3].astype(np.uint8), cv2.COLOR_RGB2BGR)
|
| 188 |
+
wrist_image = cv2.cvtColor(wrist_image[..., :3].astype(np.uint8), cv2.COLOR_RGB2BGR)
|
| 189 |
+
|
| 190 |
+
fimg = cv2.hconcat([side_image, top_image, wrist_image])
|
| 191 |
+
cv2.imwrite(os.path.join(save_root_folder, os.path.basename(img_name).split('.')[0]+".png"), fimg)
|
| 192 |
+
|
| 193 |
+
def get_real_image(img_name):
|
| 194 |
+
with open(img_name, 'rb') as fs:
|
| 195 |
+
image = pickle.load(fs)
|
| 196 |
+
image = np.squeeze(image)
|
| 197 |
+
|
| 198 |
+
return image
|
| 199 |
+
|
| 200 |
+
def get_dict_variation(res_dict):
|
| 201 |
+
new_dict = {}
|
| 202 |
+
for cnt in res_dict:
|
| 203 |
+
for v in res_dict[cnt]:
|
| 204 |
+
if v not in new_dict:
|
| 205 |
+
new_dict[v] = {}
|
| 206 |
+
new_dict[v][cnt] = {'im': res_dict[cnt][v]['side'],
|
| 207 |
+
'scores': res_dict[cnt][v]['scores']}
|
| 208 |
+
return new_dict
|
| 209 |
+
|
| 210 |
+
def parse_image_names(root_folder):
|
| 211 |
+
fls = os.listdir(root_folder)
|
| 212 |
+
f_dict = {}
|
| 213 |
+
side = None
|
| 214 |
+
top = None
|
| 215 |
+
for f in fls:
|
| 216 |
+
fl = f.split('_')
|
| 217 |
+
task = fl[5].replace('\'', '').split('-')[0]
|
| 218 |
+
count = int(fl[2])
|
| 219 |
+
variation = int(fl[-1])
|
| 220 |
+
for _f in os.listdir(os.path.join(root_folder, f)):
|
| 221 |
+
if 'reset' not in _f and 'pkl' in _f:
|
| 222 |
+
if 'top' in _f:
|
| 223 |
+
top = os.path.join(root_folder, f, _f)
|
| 224 |
+
else:
|
| 225 |
+
side = os.path.join(root_folder, f, _f)
|
| 226 |
+
if task not in f_dict:
|
| 227 |
+
f_dict[task] = {}
|
| 228 |
+
if count not in f_dict[task]:
|
| 229 |
+
f_dict[task][count] = {}
|
| 230 |
+
if variation not in f_dict[task][count]:
|
| 231 |
+
f_dict[task][count][variation] = {'side': side, 'top': top}
|
| 232 |
+
return f_dict
|
| 233 |
+
|
| 234 |
+
|
| 235 |
+
def write_stats(heading, stats_list, file_name):
|
| 236 |
+
|
| 237 |
+
with open(file_name, 'wt') as f:
|
| 238 |
+
f.write(heading + "\n")
|
| 239 |
+
for s in stats_list:
|
| 240 |
+
f.write(s)
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/utils/fcm/pyramid.py
ADDED
|
@@ -0,0 +1,1662 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from operator import mul
|
| 2 |
+
from scipy import signal
|
| 3 |
+
import functools
|
| 4 |
+
import numpy as np
|
| 5 |
+
import warnings
|
| 6 |
+
|
| 7 |
+
from scipy.signal import convolve
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def steer_to_harmonics_mtx(harmonics, angles=None, even_phase=True):
|
| 11 |
+
'''Compute a steering matrix
|
| 12 |
+
|
| 13 |
+
This maps a directional basis set onto the angular Fourier harmonics.
|
| 14 |
+
|
| 15 |
+
Parameters
|
| 16 |
+
----------
|
| 17 |
+
harmonics: `array_like`
|
| 18 |
+
array specifying the angular harmonics contained in the steerable basis/filters.
|
| 19 |
+
angles: `array_like` or None
|
| 20 |
+
vector specifying the angular position of each filter (in radians). If None, defaults to
|
| 21 |
+
`pi * np.arange(numh) / numh`, where `numh = harmonics.size + np.count_nonzero(harmonics)`
|
| 22 |
+
even_phase : `bool`
|
| 23 |
+
specifies whether the harmonics are cosine or sine phase aligned about those positions.
|
| 24 |
+
|
| 25 |
+
Returns
|
| 26 |
+
-------
|
| 27 |
+
imtx : `np.array`
|
| 28 |
+
This matrix is suitable for passing to the function `steer`.
|
| 29 |
+
|
| 30 |
+
'''
|
| 31 |
+
# default parameter
|
| 32 |
+
numh = harmonics.size + np.count_nonzero(harmonics)
|
| 33 |
+
if angles is None:
|
| 34 |
+
angles = np.pi * np.arange(numh) / numh
|
| 35 |
+
|
| 36 |
+
# Compute inverse matrix, which maps to Fourier components onto
|
| 37 |
+
# steerable basis
|
| 38 |
+
imtx = np.zeros((angles.size, numh))
|
| 39 |
+
col = 0
|
| 40 |
+
for h in harmonics:
|
| 41 |
+
args = h * angles
|
| 42 |
+
if h == 0:
|
| 43 |
+
imtx[:, col] = np.ones(angles.shape)
|
| 44 |
+
col += 1
|
| 45 |
+
elif even_phase:
|
| 46 |
+
imtx[:, col] = np.cos(args)
|
| 47 |
+
imtx[:, col + 1] = np.sin(args)
|
| 48 |
+
col += 2
|
| 49 |
+
else: # odd phase
|
| 50 |
+
imtx[:, col] = np.sin(args)
|
| 51 |
+
imtx[:, col + 1] = -1.0 * np.cos(args)
|
| 52 |
+
col += 2
|
| 53 |
+
|
| 54 |
+
r = np.linalg.matrix_rank(imtx)
|
| 55 |
+
if r < np.min(imtx.shape):
|
| 56 |
+
warnings.warn("Matrix is not full rank")
|
| 57 |
+
|
| 58 |
+
return np.linalg.pinv(imtx)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def steer(basis, angle, harmonics=None, steermtx=None, return_weights=False, even_phase=True):
|
| 62 |
+
'''Steer BASIS to the specfied ANGLE.
|
| 63 |
+
|
| 64 |
+
Parameters
|
| 65 |
+
----------
|
| 66 |
+
basis : `array_like`
|
| 67 |
+
array whose columns are vectorized rotated copies of a steerable function, or the responses
|
| 68 |
+
of a set of steerable filters.
|
| 69 |
+
angle : `array_like` or `int`
|
| 70 |
+
scalar or column vector the size of the basis. specifies the angle(s) (in radians) to
|
| 71 |
+
steer to
|
| 72 |
+
harmonics : `list` or None
|
| 73 |
+
a list of harmonic numbers indicating the angular harmonic content of the basis. if None
|
| 74 |
+
(default), N even or odd low frequencies, as for derivative filters
|
| 75 |
+
steermtx : `array_like` or None.
|
| 76 |
+
matrix which maps the filters onto Fourier series components (ordered [cos0 cos1 sin1 cos2
|
| 77 |
+
sin2 ... sinN]). See steer_to_harmonics_mtx function for more details. If None (default),
|
| 78 |
+
assumes cosine phase harmonic components, and filter positions at 2pi*n/N.
|
| 79 |
+
return_weights : `bool`
|
| 80 |
+
whether to return the weights or not.
|
| 81 |
+
even_phase : `bool`
|
| 82 |
+
specifies whether the harmonics are cosine or sine phase aligned about those positions.
|
| 83 |
+
|
| 84 |
+
Returns
|
| 85 |
+
-------
|
| 86 |
+
res : `np.array`
|
| 87 |
+
the resteered basis
|
| 88 |
+
steervect : `np.array`
|
| 89 |
+
the weights used to resteer the basis. only returned if `return_weights` is True
|
| 90 |
+
'''
|
| 91 |
+
|
| 92 |
+
num = basis.shape[1]
|
| 93 |
+
|
| 94 |
+
if isinstance(angle, (int, float)):
|
| 95 |
+
angle = np.array([angle])
|
| 96 |
+
else:
|
| 97 |
+
if angle.shape[0] != basis.shape[0] or angle.shape[1] != 1:
|
| 98 |
+
raise Exception("""ANGLE must be a scalar, or a column vector
|
| 99 |
+
the size of the basis elements""")
|
| 100 |
+
|
| 101 |
+
# If HARMONICS is not specified, assume derivatives.
|
| 102 |
+
if harmonics is None:
|
| 103 |
+
harmonics = np.arange(1 - (num % 2), num, 2)
|
| 104 |
+
|
| 105 |
+
if len(harmonics.shape) == 1 or harmonics.shape[0] == 1:
|
| 106 |
+
# reshape to column matrix
|
| 107 |
+
harmonics = harmonics.reshape(harmonics.shape[0], 1)
|
| 108 |
+
elif harmonics.shape[0] != 1 and harmonics.shape[1] != 1:
|
| 109 |
+
raise Exception('input parameter HARMONICS must be 1D!')
|
| 110 |
+
|
| 111 |
+
if 2 * harmonics.shape[0] - (harmonics == 0).sum() != num:
|
| 112 |
+
raise Exception('harmonics list is incompatible with basis size!')
|
| 113 |
+
|
| 114 |
+
# If STEERMTX not passed, assume evenly distributed cosine-phase filters:
|
| 115 |
+
if steermtx is None:
|
| 116 |
+
steermtx = steer_to_harmonics_mtx(harmonics, np.pi * np.arange(num) / num,
|
| 117 |
+
even_phase=even_phase)
|
| 118 |
+
|
| 119 |
+
steervect = np.zeros((angle.shape[0], num))
|
| 120 |
+
arg = angle * harmonics[np.nonzero(harmonics)[0]].T
|
| 121 |
+
if all(harmonics):
|
| 122 |
+
steervect[:, range(0, num, 2)] = np.cos(arg)
|
| 123 |
+
steervect[:, range(1, num, 2)] = np.sin(arg)
|
| 124 |
+
else:
|
| 125 |
+
steervect[:, 0] = np.ones((arg.shape[0], 1))
|
| 126 |
+
steervect[:, range(1, num, 2)] = np.cos(arg)
|
| 127 |
+
steervect[:, range(2, num, 2)] = np.sin(arg)
|
| 128 |
+
|
| 129 |
+
steervect = np.dot(steervect, steermtx)
|
| 130 |
+
|
| 131 |
+
if steervect.shape[0] > 1:
|
| 132 |
+
tmp = np.dot(basis, steervect)
|
| 133 |
+
res = sum(tmp).T
|
| 134 |
+
else:
|
| 135 |
+
res = np.dot(basis, steervect.T)
|
| 136 |
+
|
| 137 |
+
if return_weights:
|
| 138 |
+
return res, np.array(steervect).reshape(num)
|
| 139 |
+
else:
|
| 140 |
+
return res
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
def convert_pyr_coeffs_to_pyr(pyr_coeffs):
|
| 144 |
+
"""this function takes a 'new pyramid' and returns the coefficients as a list
|
| 145 |
+
|
| 146 |
+
this is to enable backwards compatibility
|
| 147 |
+
|
| 148 |
+
Parameters
|
| 149 |
+
----------
|
| 150 |
+
pyr_coeffs : `dict`
|
| 151 |
+
The `pyr_coeffs` attribute of a `pyramid`.
|
| 152 |
+
|
| 153 |
+
Returns
|
| 154 |
+
-------
|
| 155 |
+
coeffs : `list`
|
| 156 |
+
list of `np.array`, which contains the pyramid coefficients in each band, in order from
|
| 157 |
+
bottom of the pyramid to top (going through the orientations in order)
|
| 158 |
+
highpass : `np.array` or None
|
| 159 |
+
either the residual highpass from the pyramid or, if that doesn't exist, None
|
| 160 |
+
lowpass : `np.array` or None
|
| 161 |
+
either the residual lowpass from the pyramid or, if that doesn't exist, None
|
| 162 |
+
|
| 163 |
+
"""
|
| 164 |
+
highpass = pyr_coeffs.pop('residual_highpass', None)
|
| 165 |
+
lowpass = pyr_coeffs.pop('residual_lowpass', None)
|
| 166 |
+
coeffs = [i[1] for i in sorted(pyr_coeffs.items(), key=lambda x: x[0])]
|
| 167 |
+
return coeffs, highpass, lowpass
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
def max_pyr_height(imsz, filtsz):
|
| 171 |
+
'''Compute maximum pyramid height for given image and filter sizes.
|
| 172 |
+
|
| 173 |
+
Specifically, this computes the number of corrDn operations that can be sequentially performed
|
| 174 |
+
when subsampling by a factor of 2.
|
| 175 |
+
|
| 176 |
+
Parameters
|
| 177 |
+
----------
|
| 178 |
+
imsz : `tuple` or `int`
|
| 179 |
+
the size of the image (should be 2-tuple if image is 2d, `int` if it's 1d)
|
| 180 |
+
filtsz : `tuple` or `int`
|
| 181 |
+
the size of the filter (should be 2-tuple if image is 2d, `int` if it's 1d)
|
| 182 |
+
|
| 183 |
+
Returns
|
| 184 |
+
-------
|
| 185 |
+
max_pyr_height : `int`
|
| 186 |
+
The maximum height of the pyramid
|
| 187 |
+
'''
|
| 188 |
+
# check if inputs are one of int, tuple and have consistent type
|
| 189 |
+
assert (isinstance(imsz, int) and isinstance(filtsz, int)) or (
|
| 190 |
+
isinstance(imsz, tuple) and isinstance(filtsz, tuple))
|
| 191 |
+
# 1D image case: reduce to the integer case
|
| 192 |
+
if isinstance(imsz, tuple) and (len(imsz) == 1 or 1 in imsz):
|
| 193 |
+
imsz = functools.reduce(mul, imsz)
|
| 194 |
+
filtsz = functools.reduce(mul, filtsz)
|
| 195 |
+
# integer case
|
| 196 |
+
if isinstance(imsz, int):
|
| 197 |
+
if imsz < filtsz:
|
| 198 |
+
return 0
|
| 199 |
+
else:
|
| 200 |
+
return 1 + max_pyr_height(imsz // 2, filtsz)
|
| 201 |
+
# 2D image case
|
| 202 |
+
if isinstance(imsz, tuple):
|
| 203 |
+
if min(imsz) < max(filtsz):
|
| 204 |
+
return 0
|
| 205 |
+
else:
|
| 206 |
+
return 1 + max_pyr_height((imsz[0] // 2, imsz[1] // 2), filtsz)
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
def parse_filter(filt, normalize=True):
|
| 210 |
+
"""Parse the name or array like, and return a column shaped filter (which is normalized by default)
|
| 211 |
+
|
| 212 |
+
Used during pyramid construction.
|
| 213 |
+
|
| 214 |
+
Parameters
|
| 215 |
+
----------
|
| 216 |
+
filt : `str` or `array_like`.
|
| 217 |
+
Name of the filter, as accepted by `named_filter`, or array to use as a filter. See that function for acceptable names.
|
| 218 |
+
|
| 219 |
+
Returns
|
| 220 |
+
-------
|
| 221 |
+
filt : `array` or `dict`
|
| 222 |
+
If `filt` was one of the steerable pyramids, then this will be a dictionary
|
| 223 |
+
containing the various steerable pyramid filters. Else, it will be an array containing
|
| 224 |
+
the specified filter.
|
| 225 |
+
|
| 226 |
+
See also
|
| 227 |
+
--------
|
| 228 |
+
named_filter : function that converts `filter_name` str into an array or dict of arrays.
|
| 229 |
+
"""
|
| 230 |
+
|
| 231 |
+
if isinstance(filt, str):
|
| 232 |
+
filt = named_filter(filt)
|
| 233 |
+
|
| 234 |
+
elif isinstance(filt, np.ndarray) or isinstance(filt, list) or isinstance(filt, tuple):
|
| 235 |
+
filt = np.array(filt)
|
| 236 |
+
if filt.ndim == 1:
|
| 237 |
+
filt = np.reshape(filt, (filt.shape[0], 1))
|
| 238 |
+
elif filt.ndim == 2 and filt.shape[0] == 1:
|
| 239 |
+
filt = np.reshape(filt, (-1, 1))
|
| 240 |
+
|
| 241 |
+
# TODO expand normalization options
|
| 242 |
+
if normalize:
|
| 243 |
+
filt = filt / filt.sum()
|
| 244 |
+
|
| 245 |
+
return filt
|
| 246 |
+
|
| 247 |
+
|
| 248 |
+
def binomial_filter(order_plus_one):
|
| 249 |
+
'''returns a vector of binomial coefficients of order (order_plus_one-1)'''
|
| 250 |
+
if order_plus_one < 2:
|
| 251 |
+
raise Exception("Error: order_plus_one argument must be at least 2")
|
| 252 |
+
|
| 253 |
+
kernel = np.array([[0.5], [0.5]])
|
| 254 |
+
for i in range(order_plus_one - 2):
|
| 255 |
+
kernel = convolve(np.array([[0.5], [0.5]]), kernel)
|
| 256 |
+
return kernel
|
| 257 |
+
|
| 258 |
+
|
| 259 |
+
def named_filter(name):
|
| 260 |
+
'''Some standard 1D filter kernels.
|
| 261 |
+
|
| 262 |
+
These are returned as column vectors (shape [N, 1]) and scaled such that their L2-norm is 1.0 (except for 'binomN')
|
| 263 |
+
|
| 264 |
+
* `'binomN'` - binomial coefficient filter of order N-1
|
| 265 |
+
* `'haar'` - Haar wavelet
|
| 266 |
+
* `'qmf8'`, `'qmf12'`, `'qmf16'` - Symmetric Quadrature Mirror Filters [1]_
|
| 267 |
+
* `'daub2'`, `'daub3'`, `'daub4'` - Daubechies wavelet [2]_
|
| 268 |
+
* `'qmf5'`, `'qmf9'`, `'qmf13'` - Symmetric Quadrature Mirror Filters [3]_, [4]_
|
| 269 |
+
* `'spN_filters'` - steerable pyramid filters of order N (N must be one of {0, 1, 3, 5}) [5]_,
|
| 270 |
+
[6]_
|
| 271 |
+
|
| 272 |
+
References
|
| 273 |
+
----------
|
| 274 |
+
.. [1] J D Johnston, "A filter family designed for use in quadrature mirror filter banks",
|
| 275 |
+
Proc. ICASSP, pp 291-294, 1980.
|
| 276 |
+
.. [2] I Daubechies, "Orthonormal bases of compactly supported wavelets", Commun. Pure Appl.
|
| 277 |
+
Math, vol. 42, pp 909-996, 1988.
|
| 278 |
+
.. [3] E P Simoncelli, "Orthogonal sub-band image transforms", PhD Thesis, MIT Dept. of Elec.
|
| 279 |
+
Eng. and Comp. Sci. May 1988. Also available as: MIT Media Laboratory Vision and Modeling
|
| 280 |
+
Technical Report #100.
|
| 281 |
+
.. [4] E P Simoncelli and E H Adelson, "Subband image coding", Subband Transforms, chapter 4,
|
| 282 |
+
ed. John W Woods, Kluwer Academic Publishers, Norwell, MA, 1990, pp 143--192.
|
| 283 |
+
.. [5] E P Simoncelli and W T Freeman, "The Steerable Pyramid: A Flexible Architecture for
|
| 284 |
+
Multi-Scale Derivative Computation," Second Int'l Conf on Image Processing, Washington, DC,
|
| 285 |
+
Oct 1995.
|
| 286 |
+
.. [6] A Karasaridis and E P Simoncelli, "A Filter Design Technique for Steerable Pyramid
|
| 287 |
+
Image Transforms", ICASSP, Atlanta, GA, May 1996.
|
| 288 |
+
|
| 289 |
+
'''
|
| 290 |
+
|
| 291 |
+
if name.startswith("binom"):
|
| 292 |
+
kernel = np.sqrt(2) * binomial_filter(int(name[5:]))
|
| 293 |
+
|
| 294 |
+
elif name.startswith('sp'):
|
| 295 |
+
kernel = steerable_filters(name)
|
| 296 |
+
|
| 297 |
+
elif name == "qmf5":
|
| 298 |
+
kernel = np.array([[-0.076103], [0.3535534], [0.8593118], [0.3535534], [-0.076103]])
|
| 299 |
+
elif name == "qmf9":
|
| 300 |
+
kernel = np.array([[0.02807382], [-0.060944743], [-0.073386624], [0.41472545], [0.7973934],
|
| 301 |
+
[0.41472545], [-0.073386624], [-0.060944743], [0.02807382]])
|
| 302 |
+
elif name == "qmf13":
|
| 303 |
+
kernel = np.array([[-0.014556438], [0.021651438], [0.039045125], [-0.09800052],
|
| 304 |
+
[-0.057827797], [0.42995453], [0.7737113], [0.42995453], [-0.057827797],
|
| 305 |
+
[-0.09800052], [0.039045125], [0.021651438], [-0.014556438]])
|
| 306 |
+
elif name == "qmf8":
|
| 307 |
+
kernel = np.sqrt(2) * np.array([[0.00938715], [-0.07065183], [0.06942827], [0.4899808],
|
| 308 |
+
[0.4899808], [0.06942827], [-0.07065183], [0.00938715]])
|
| 309 |
+
elif name == "qmf12":
|
| 310 |
+
kernel = np.array([[-0.003809699], [0.01885659], [-0.002710326], [-0.08469594],
|
| 311 |
+
[0.08846992], [0.4843894], [0.4843894], [0.08846992],
|
| 312 |
+
[-0.08469594], [-0.002710326], [0.01885659], [-0.003809699]])
|
| 313 |
+
kernel *= np.sqrt(2)
|
| 314 |
+
elif name == "qmf16":
|
| 315 |
+
kernel = np.array([[0.001050167], [-0.005054526], [-0.002589756], [0.0276414],
|
| 316 |
+
[-0.009666376], [-0.09039223], [0.09779817], [0.4810284], [0.4810284],
|
| 317 |
+
[0.09779817], [-0.09039223], [-0.009666376], [0.0276414],
|
| 318 |
+
[-0.002589756], [-0.005054526], [0.001050167]])
|
| 319 |
+
kernel *= np.sqrt(2)
|
| 320 |
+
elif name == "haar":
|
| 321 |
+
kernel = np.array([[1], [1]]) / np.sqrt(2)
|
| 322 |
+
elif name == "daub2":
|
| 323 |
+
kernel = np.array([[0.482962913145], [0.836516303738], [0.224143868042],
|
| 324 |
+
[-0.129409522551]])
|
| 325 |
+
elif name == "daub3":
|
| 326 |
+
kernel = np.array([[0.332670552950], [0.806891509311], [0.459877502118], [-0.135011020010],
|
| 327 |
+
[-0.085441273882], [0.035226291882]])
|
| 328 |
+
elif name == "daub4":
|
| 329 |
+
kernel = np.array([[0.230377813309], [0.714846570553], [0.630880767930],
|
| 330 |
+
[-0.027983769417], [-0.187034811719], [0.030841381836],
|
| 331 |
+
[0.032883011667], [-0.010597401785]])
|
| 332 |
+
elif name == "gauss5": # for backward-compatibility
|
| 333 |
+
kernel = np.sqrt(2) * np.array([[0.0625], [0.25], [0.375], [0.25], [0.0625]])
|
| 334 |
+
elif name == "gauss3": # for backward-compatibility
|
| 335 |
+
kernel = np.sqrt(2) * np.array([[0.25], [0.5], [0.25]])
|
| 336 |
+
else:
|
| 337 |
+
raise Exception("Error: Unknown filter name: %s" % (name))
|
| 338 |
+
|
| 339 |
+
return kernel
|
| 340 |
+
|
| 341 |
+
|
| 342 |
+
def steerable_filters(filter_name):
|
| 343 |
+
'''Steerable pyramid filters.
|
| 344 |
+
|
| 345 |
+
Transform described in [1]_, filter kernel design described in [2]_.
|
| 346 |
+
|
| 347 |
+
References
|
| 348 |
+
----------
|
| 349 |
+
.. [1] E P Simoncelli and W T Freeman, "The Steerable Pyramid: A Flexible Architecture for
|
| 350 |
+
Multi-Scale Derivative Computation," Second Int'l Conf on Image Processing, Washington, DC,
|
| 351 |
+
Oct 1995.
|
| 352 |
+
.. [2] A Karasaridis and E P Simoncelli, "A Filter Design Technique for Steerable Pyramid
|
| 353 |
+
Image Transforms", ICASSP, Atlanta, GA, May 1996.
|
| 354 |
+
'''
|
| 355 |
+
if filter_name == 'sp0_filters':
|
| 356 |
+
return _sp0_filters()
|
| 357 |
+
elif filter_name == 'sp1_filters':
|
| 358 |
+
return _sp1_filters()
|
| 359 |
+
elif filter_name == 'sp3_filters':
|
| 360 |
+
return _sp3_filters()
|
| 361 |
+
elif filter_name == 'sp5_filters':
|
| 362 |
+
return _sp5_filters()
|
| 363 |
+
# elif os.path.isfile(filter_name):
|
| 364 |
+
# raise Exception("Filter files not supported yet")
|
| 365 |
+
else:
|
| 366 |
+
raise Exception("filter parameters value %s not supported" % (filter_name))
|
| 367 |
+
|
| 368 |
+
|
| 369 |
+
def _sp0_filters():
|
| 370 |
+
filters = {}
|
| 371 |
+
filters['harmonics'] = np.array([0])
|
| 372 |
+
filters['lo0filt'] = (
|
| 373 |
+
np.array([[-4.514000e-04, -1.137100e-04, -3.725800e-04, -3.743860e-03,
|
| 374 |
+
-3.725800e-04, -1.137100e-04, -4.514000e-04],
|
| 375 |
+
[-1.137100e-04, -6.119520e-03, -1.344160e-02, -7.563200e-03,
|
| 376 |
+
-1.344160e-02, -6.119520e-03, -1.137100e-04],
|
| 377 |
+
[-3.725800e-04, -1.344160e-02, 6.441488e-02, 1.524935e-01,
|
| 378 |
+
6.441488e-02, -1.344160e-02, -3.725800e-04],
|
| 379 |
+
[-3.743860e-03, -7.563200e-03, 1.524935e-01, 3.153017e-01,
|
| 380 |
+
1.524935e-01, -7.563200e-03, -3.743860e-03],
|
| 381 |
+
[-3.725800e-04, -1.344160e-02, 6.441488e-02, 1.524935e-01,
|
| 382 |
+
6.441488e-02, -1.344160e-02, -3.725800e-04],
|
| 383 |
+
[-1.137100e-04, -6.119520e-03, -1.344160e-02, -7.563200e-03,
|
| 384 |
+
-1.344160e-02, -6.119520e-03, -1.137100e-04],
|
| 385 |
+
[-4.514000e-04, -1.137100e-04, -3.725800e-04, -3.743860e-03,
|
| 386 |
+
-3.725800e-04, -1.137100e-04, -4.514000e-04]]))
|
| 387 |
+
filters['lofilt'] = (
|
| 388 |
+
np.array([[-2.257000e-04, -8.064400e-04, -5.686000e-05, 8.741400e-04,
|
| 389 |
+
-1.862800e-04, -1.031640e-03, -1.871920e-03, -1.031640e-03,
|
| 390 |
+
-1.862800e-04, 8.741400e-04, -5.686000e-05, -8.064400e-04,
|
| 391 |
+
-2.257000e-04],
|
| 392 |
+
[-8.064400e-04, 1.417620e-03, -1.903800e-04, -2.449060e-03,
|
| 393 |
+
-4.596420e-03, -7.006740e-03, -6.948900e-03, -7.006740e-03,
|
| 394 |
+
-4.596420e-03, -2.449060e-03, -1.903800e-04, 1.417620e-03,
|
| 395 |
+
-8.064400e-04],
|
| 396 |
+
[-5.686000e-05, -1.903800e-04, -3.059760e-03, -6.401000e-03,
|
| 397 |
+
-6.720800e-03, -5.236180e-03, -3.781600e-03, -5.236180e-03,
|
| 398 |
+
-6.720800e-03, -6.401000e-03, -3.059760e-03, -1.903800e-04,
|
| 399 |
+
-5.686000e-05],
|
| 400 |
+
[8.741400e-04, -2.449060e-03, -6.401000e-03, -5.260020e-03,
|
| 401 |
+
3.938620e-03, 1.722078e-02, 2.449600e-02, 1.722078e-02,
|
| 402 |
+
3.938620e-03, -5.260020e-03, -6.401000e-03, -2.449060e-03,
|
| 403 |
+
8.741400e-04],
|
| 404 |
+
[-1.862800e-04, -4.596420e-03, -6.720800e-03, 3.938620e-03,
|
| 405 |
+
3.220744e-02, 6.306262e-02, 7.624674e-02, 6.306262e-02,
|
| 406 |
+
3.220744e-02, 3.938620e-03, -6.720800e-03, -4.596420e-03,
|
| 407 |
+
-1.862800e-04],
|
| 408 |
+
[-1.031640e-03, -7.006740e-03, -5.236180e-03, 1.722078e-02,
|
| 409 |
+
6.306262e-02, 1.116388e-01, 1.348999e-01, 1.116388e-01,
|
| 410 |
+
6.306262e-02, 1.722078e-02, -5.236180e-03, -7.006740e-03,
|
| 411 |
+
-1.031640e-03],
|
| 412 |
+
[-1.871920e-03, -6.948900e-03, -3.781600e-03, 2.449600e-02,
|
| 413 |
+
7.624674e-02, 1.348999e-01, 1.576508e-01, 1.348999e-01,
|
| 414 |
+
7.624674e-02, 2.449600e-02, -3.781600e-03, -6.948900e-03,
|
| 415 |
+
-1.871920e-03],
|
| 416 |
+
[-1.031640e-03, -7.006740e-03, -5.236180e-03, 1.722078e-02,
|
| 417 |
+
6.306262e-02, 1.116388e-01, 1.348999e-01, 1.116388e-01,
|
| 418 |
+
6.306262e-02, 1.722078e-02, -5.236180e-03, -7.006740e-03,
|
| 419 |
+
-1.031640e-03],
|
| 420 |
+
[-1.862800e-04, -4.596420e-03, -6.720800e-03, 3.938620e-03,
|
| 421 |
+
3.220744e-02, 6.306262e-02, 7.624674e-02, 6.306262e-02,
|
| 422 |
+
3.220744e-02, 3.938620e-03, -6.720800e-03, -4.596420e-03,
|
| 423 |
+
-1.862800e-04],
|
| 424 |
+
[8.741400e-04, -2.449060e-03, -6.401000e-03, -5.260020e-03,
|
| 425 |
+
3.938620e-03, 1.722078e-02, 2.449600e-02, 1.722078e-02,
|
| 426 |
+
3.938620e-03, -5.260020e-03, -6.401000e-03, -2.449060e-03,
|
| 427 |
+
8.741400e-04],
|
| 428 |
+
[-5.686000e-05, -1.903800e-04, -3.059760e-03, -6.401000e-03,
|
| 429 |
+
-6.720800e-03, -5.236180e-03, -3.781600e-03, -5.236180e-03,
|
| 430 |
+
-6.720800e-03, -6.401000e-03, -3.059760e-03, -1.903800e-04,
|
| 431 |
+
-5.686000e-05],
|
| 432 |
+
[-8.064400e-04, 1.417620e-03, -1.903800e-04, -2.449060e-03,
|
| 433 |
+
-4.596420e-03, -7.006740e-03, -6.948900e-03, -7.006740e-03,
|
| 434 |
+
-4.596420e-03, -2.449060e-03, -1.903800e-04, 1.417620e-03,
|
| 435 |
+
-8.064400e-04],
|
| 436 |
+
[-2.257000e-04, -8.064400e-04, -5.686000e-05, 8.741400e-04,
|
| 437 |
+
-1.862800e-04, -1.031640e-03, -1.871920e-03, -1.031640e-03,
|
| 438 |
+
-1.862800e-04, 8.741400e-04, -5.686000e-05, -8.064400e-04,
|
| 439 |
+
-2.257000e-04]]))
|
| 440 |
+
filters['mtx'] = np.array([1.000000])
|
| 441 |
+
filters['hi0filt'] = (
|
| 442 |
+
np.array([[5.997200e-04, -6.068000e-05, -3.324900e-04, -3.325600e-04,
|
| 443 |
+
-2.406600e-04, -3.325600e-04, -3.324900e-04, -6.068000e-05,
|
| 444 |
+
5.997200e-04],
|
| 445 |
+
[-6.068000e-05, 1.263100e-04, 4.927100e-04, 1.459700e-04,
|
| 446 |
+
-3.732100e-04, 1.459700e-04, 4.927100e-04, 1.263100e-04,
|
| 447 |
+
-6.068000e-05],
|
| 448 |
+
[-3.324900e-04, 4.927100e-04, -1.616650e-03, -1.437358e-02,
|
| 449 |
+
-2.420138e-02, -1.437358e-02, -1.616650e-03, 4.927100e-04,
|
| 450 |
+
-3.324900e-04],
|
| 451 |
+
[-3.325600e-04, 1.459700e-04, -1.437358e-02, -6.300923e-02,
|
| 452 |
+
-9.623594e-02, -6.300923e-02, -1.437358e-02, 1.459700e-04,
|
| 453 |
+
-3.325600e-04],
|
| 454 |
+
[-2.406600e-04, -3.732100e-04, -2.420138e-02, -9.623594e-02,
|
| 455 |
+
8.554893e-01, -9.623594e-02, -2.420138e-02, -3.732100e-04,
|
| 456 |
+
-2.406600e-04],
|
| 457 |
+
[-3.325600e-04, 1.459700e-04, -1.437358e-02, -6.300923e-02,
|
| 458 |
+
-9.623594e-02, -6.300923e-02, -1.437358e-02, 1.459700e-04,
|
| 459 |
+
-3.325600e-04],
|
| 460 |
+
[-3.324900e-04, 4.927100e-04, -1.616650e-03, -1.437358e-02,
|
| 461 |
+
-2.420138e-02, -1.437358e-02, -1.616650e-03, 4.927100e-04,
|
| 462 |
+
-3.324900e-04],
|
| 463 |
+
[-6.068000e-05, 1.263100e-04, 4.927100e-04, 1.459700e-04,
|
| 464 |
+
-3.732100e-04, 1.459700e-04, 4.927100e-04, 1.263100e-04,
|
| 465 |
+
-6.068000e-05],
|
| 466 |
+
[5.997200e-04, -6.068000e-05, -3.324900e-04, -3.325600e-04,
|
| 467 |
+
-2.406600e-04, -3.325600e-04, -3.324900e-04, -6.068000e-05,
|
| 468 |
+
5.997200e-04]]))
|
| 469 |
+
filters['bfilts'] = (
|
| 470 |
+
np.array([-9.066000e-05, -1.738640e-03, -4.942500e-03, -7.889390e-03,
|
| 471 |
+
-1.009473e-02, -7.889390e-03, -4.942500e-03, -1.738640e-03,
|
| 472 |
+
-9.066000e-05, -1.738640e-03, -4.625150e-03, -7.272540e-03,
|
| 473 |
+
-7.623410e-03, -9.091950e-03, -7.623410e-03, -7.272540e-03,
|
| 474 |
+
-4.625150e-03, -1.738640e-03, -4.942500e-03, -7.272540e-03,
|
| 475 |
+
-2.129540e-02, -2.435662e-02, -3.487008e-02, -2.435662e-02,
|
| 476 |
+
-2.129540e-02, -7.272540e-03, -4.942500e-03, -7.889390e-03,
|
| 477 |
+
-7.623410e-03, -2.435662e-02, -1.730466e-02, -3.158605e-02,
|
| 478 |
+
-1.730466e-02, -2.435662e-02, -7.623410e-03, -7.889390e-03,
|
| 479 |
+
-1.009473e-02, -9.091950e-03, -3.487008e-02, -3.158605e-02,
|
| 480 |
+
9.464195e-01, -3.158605e-02, -3.487008e-02, -9.091950e-03,
|
| 481 |
+
-1.009473e-02, -7.889390e-03, -7.623410e-03, -2.435662e-02,
|
| 482 |
+
-1.730466e-02, -3.158605e-02, -1.730466e-02, -2.435662e-02,
|
| 483 |
+
-7.623410e-03, -7.889390e-03, -4.942500e-03, -7.272540e-03,
|
| 484 |
+
-2.129540e-02, -2.435662e-02, -3.487008e-02, -2.435662e-02,
|
| 485 |
+
-2.129540e-02, -7.272540e-03, -4.942500e-03, -1.738640e-03,
|
| 486 |
+
-4.625150e-03, -7.272540e-03, -7.623410e-03, -9.091950e-03,
|
| 487 |
+
-7.623410e-03, -7.272540e-03, -4.625150e-03, -1.738640e-03,
|
| 488 |
+
-9.066000e-05, -1.738640e-03, -4.942500e-03, -7.889390e-03,
|
| 489 |
+
-1.009473e-02, -7.889390e-03, -4.942500e-03, -1.738640e-03,
|
| 490 |
+
-9.066000e-05]))
|
| 491 |
+
filters['bfilts'] = filters['bfilts'].reshape(len(filters['bfilts']), 1)
|
| 492 |
+
return filters
|
| 493 |
+
|
| 494 |
+
|
| 495 |
+
def _sp1_filters():
|
| 496 |
+
filters = {}
|
| 497 |
+
filters['harmonics'] = np.array([1])
|
| 498 |
+
filters['mtx'] = np.eye(2)
|
| 499 |
+
filters['lo0filt'] = (
|
| 500 |
+
np.array([[-8.701000e-05, -1.354280e-03, -1.601260e-03, -5.033700e-04,
|
| 501 |
+
2.524010e-03, -5.033700e-04, -1.601260e-03, -1.354280e-03,
|
| 502 |
+
-8.701000e-05],
|
| 503 |
+
[-1.354280e-03, 2.921580e-03, 7.522720e-03, 8.224420e-03,
|
| 504 |
+
1.107620e-03, 8.224420e-03, 7.522720e-03, 2.921580e-03,
|
| 505 |
+
-1.354280e-03],
|
| 506 |
+
[-1.601260e-03, 7.522720e-03, -7.061290e-03, -3.769487e-02,
|
| 507 |
+
-3.297137e-02, -3.769487e-02, -7.061290e-03, 7.522720e-03,
|
| 508 |
+
-1.601260e-03],
|
| 509 |
+
[-5.033700e-04, 8.224420e-03, -3.769487e-02, 4.381320e-02,
|
| 510 |
+
1.811603e-01, 4.381320e-02, -3.769487e-02, 8.224420e-03,
|
| 511 |
+
-5.033700e-04],
|
| 512 |
+
[2.524010e-03, 1.107620e-03, -3.297137e-02, 1.811603e-01,
|
| 513 |
+
4.376250e-01, 1.811603e-01, -3.297137e-02, 1.107620e-03,
|
| 514 |
+
2.524010e-03],
|
| 515 |
+
[-5.033700e-04, 8.224420e-03, -3.769487e-02, 4.381320e-02,
|
| 516 |
+
1.811603e-01, 4.381320e-02, -3.769487e-02, 8.224420e-03,
|
| 517 |
+
-5.033700e-04],
|
| 518 |
+
[-1.601260e-03, 7.522720e-03, -7.061290e-03, -3.769487e-02,
|
| 519 |
+
-3.297137e-02, -3.769487e-02, -7.061290e-03, 7.522720e-03,
|
| 520 |
+
-1.601260e-03],
|
| 521 |
+
[-1.354280e-03, 2.921580e-03, 7.522720e-03, 8.224420e-03,
|
| 522 |
+
1.107620e-03, 8.224420e-03, 7.522720e-03, 2.921580e-03,
|
| 523 |
+
-1.354280e-03],
|
| 524 |
+
[-8.701000e-05, -1.354280e-03, -1.601260e-03, -5.033700e-04,
|
| 525 |
+
2.524010e-03, -5.033700e-04, -1.601260e-03, -1.354280e-03,
|
| 526 |
+
-8.701000e-05]]))
|
| 527 |
+
filters['lofilt'] = (
|
| 528 |
+
np.array([[-4.350000e-05, 1.207800e-04, -6.771400e-04, -1.243400e-04,
|
| 529 |
+
-8.006400e-04, -1.597040e-03, -2.516800e-04, -4.202000e-04,
|
| 530 |
+
1.262000e-03, -4.202000e-04, -2.516800e-04, -1.597040e-03,
|
| 531 |
+
-8.006400e-04, -1.243400e-04, -6.771400e-04, 1.207800e-04,
|
| 532 |
+
-4.350000e-05],
|
| 533 |
+
[1.207800e-04, 4.460600e-04, -5.814600e-04, 5.621600e-04,
|
| 534 |
+
-1.368800e-04, 2.325540e-03, 2.889860e-03, 4.287280e-03,
|
| 535 |
+
5.589400e-03, 4.287280e-03, 2.889860e-03, 2.325540e-03,
|
| 536 |
+
-1.368800e-04, 5.621600e-04, -5.814600e-04, 4.460600e-04,
|
| 537 |
+
1.207800e-04],
|
| 538 |
+
[-6.771400e-04, -5.814600e-04, 1.460780e-03, 2.160540e-03,
|
| 539 |
+
3.761360e-03, 3.080980e-03, 4.112200e-03, 2.221220e-03,
|
| 540 |
+
5.538200e-04, 2.221220e-03, 4.112200e-03, 3.080980e-03,
|
| 541 |
+
3.761360e-03, 2.160540e-03, 1.460780e-03, -5.814600e-04,
|
| 542 |
+
-6.771400e-04],
|
| 543 |
+
[-1.243400e-04, 5.621600e-04, 2.160540e-03, 3.175780e-03,
|
| 544 |
+
3.184680e-03, -1.777480e-03, -7.431700e-03, -9.056920e-03,
|
| 545 |
+
-9.637220e-03, -9.056920e-03, -7.431700e-03, -1.777480e-03,
|
| 546 |
+
3.184680e-03, 3.175780e-03, 2.160540e-03, 5.621600e-04,
|
| 547 |
+
-1.243400e-04],
|
| 548 |
+
[-8.006400e-04, -1.368800e-04, 3.761360e-03, 3.184680e-03,
|
| 549 |
+
-3.530640e-03, -1.260420e-02, -1.884744e-02, -1.750818e-02,
|
| 550 |
+
-1.648568e-02, -1.750818e-02, -1.884744e-02, -1.260420e-02,
|
| 551 |
+
-3.530640e-03, 3.184680e-03, 3.761360e-03, -1.368800e-04,
|
| 552 |
+
-8.006400e-04],
|
| 553 |
+
[-1.597040e-03, 2.325540e-03, 3.080980e-03, -1.777480e-03,
|
| 554 |
+
-1.260420e-02, -2.022938e-02, -1.109170e-02, 3.955660e-03,
|
| 555 |
+
1.438512e-02, 3.955660e-03, -1.109170e-02, -2.022938e-02,
|
| 556 |
+
-1.260420e-02, -1.777480e-03, 3.080980e-03, 2.325540e-03,
|
| 557 |
+
-1.597040e-03],
|
| 558 |
+
[-2.516800e-04, 2.889860e-03, 4.112200e-03, -7.431700e-03,
|
| 559 |
+
-1.884744e-02, -1.109170e-02, 2.190660e-02, 6.806584e-02,
|
| 560 |
+
9.058014e-02, 6.806584e-02, 2.190660e-02, -1.109170e-02,
|
| 561 |
+
-1.884744e-02, -7.431700e-03, 4.112200e-03, 2.889860e-03,
|
| 562 |
+
-2.516800e-04],
|
| 563 |
+
[-4.202000e-04, 4.287280e-03, 2.221220e-03, -9.056920e-03,
|
| 564 |
+
-1.750818e-02, 3.955660e-03, 6.806584e-02, 1.445500e-01,
|
| 565 |
+
1.773651e-01, 1.445500e-01, 6.806584e-02, 3.955660e-03,
|
| 566 |
+
-1.750818e-02, -9.056920e-03, 2.221220e-03, 4.287280e-03,
|
| 567 |
+
-4.202000e-04],
|
| 568 |
+
[1.262000e-03, 5.589400e-03, 5.538200e-04, -9.637220e-03,
|
| 569 |
+
-1.648568e-02, 1.438512e-02, 9.058014e-02, 1.773651e-01,
|
| 570 |
+
2.120374e-01, 1.773651e-01, 9.058014e-02, 1.438512e-02,
|
| 571 |
+
-1.648568e-02, -9.637220e-03, 5.538200e-04, 5.589400e-03,
|
| 572 |
+
1.262000e-03],
|
| 573 |
+
[-4.202000e-04, 4.287280e-03, 2.221220e-03, -9.056920e-03,
|
| 574 |
+
-1.750818e-02, 3.955660e-03, 6.806584e-02, 1.445500e-01,
|
| 575 |
+
1.773651e-01, 1.445500e-01, 6.806584e-02, 3.955660e-03,
|
| 576 |
+
-1.750818e-02, -9.056920e-03, 2.221220e-03, 4.287280e-03,
|
| 577 |
+
-4.202000e-04],
|
| 578 |
+
[-2.516800e-04, 2.889860e-03, 4.112200e-03, -7.431700e-03,
|
| 579 |
+
-1.884744e-02, -1.109170e-02, 2.190660e-02, 6.806584e-02,
|
| 580 |
+
9.058014e-02, 6.806584e-02, 2.190660e-02, -1.109170e-02,
|
| 581 |
+
-1.884744e-02, -7.431700e-03, 4.112200e-03, 2.889860e-03,
|
| 582 |
+
-2.516800e-04],
|
| 583 |
+
[-1.597040e-03, 2.325540e-03, 3.080980e-03, -1.777480e-03,
|
| 584 |
+
-1.260420e-02, -2.022938e-02, -1.109170e-02, 3.955660e-03,
|
| 585 |
+
1.438512e-02, 3.955660e-03, -1.109170e-02, -2.022938e-02,
|
| 586 |
+
-1.260420e-02, -1.777480e-03, 3.080980e-03, 2.325540e-03,
|
| 587 |
+
-1.597040e-03],
|
| 588 |
+
[-8.006400e-04, -1.368800e-04, 3.761360e-03, 3.184680e-03,
|
| 589 |
+
-3.530640e-03, -1.260420e-02, -1.884744e-02, -1.750818e-02,
|
| 590 |
+
-1.648568e-02, -1.750818e-02, -1.884744e-02, -1.260420e-02,
|
| 591 |
+
-3.530640e-03, 3.184680e-03, 3.761360e-03, -1.368800e-04,
|
| 592 |
+
-8.006400e-04],
|
| 593 |
+
[-1.243400e-04, 5.621600e-04, 2.160540e-03, 3.175780e-03,
|
| 594 |
+
3.184680e-03, -1.777480e-03, -7.431700e-03, -9.056920e-03,
|
| 595 |
+
-9.637220e-03, -9.056920e-03, -7.431700e-03, -1.777480e-03,
|
| 596 |
+
3.184680e-03, 3.175780e-03, 2.160540e-03, 5.621600e-04,
|
| 597 |
+
-1.243400e-04],
|
| 598 |
+
[-6.771400e-04, -5.814600e-04, 1.460780e-03, 2.160540e-03,
|
| 599 |
+
3.761360e-03, 3.080980e-03, 4.112200e-03, 2.221220e-03,
|
| 600 |
+
5.538200e-04, 2.221220e-03, 4.112200e-03, 3.080980e-03,
|
| 601 |
+
3.761360e-03, 2.160540e-03, 1.460780e-03, -5.814600e-04,
|
| 602 |
+
-6.771400e-04],
|
| 603 |
+
[1.207800e-04, 4.460600e-04, -5.814600e-04, 5.621600e-04,
|
| 604 |
+
-1.368800e-04, 2.325540e-03, 2.889860e-03, 4.287280e-03,
|
| 605 |
+
5.589400e-03, 4.287280e-03, 2.889860e-03, 2.325540e-03,
|
| 606 |
+
-1.368800e-04, 5.621600e-04, -5.814600e-04, 4.460600e-04,
|
| 607 |
+
1.207800e-04],
|
| 608 |
+
[-4.350000e-05, 1.207800e-04, -6.771400e-04, -1.243400e-04,
|
| 609 |
+
-8.006400e-04, -1.597040e-03, -2.516800e-04, -4.202000e-04,
|
| 610 |
+
1.262000e-03, -4.202000e-04, -2.516800e-04, -1.597040e-03,
|
| 611 |
+
-8.006400e-04, -1.243400e-04, -6.771400e-04, 1.207800e-04,
|
| 612 |
+
-4.350000e-05]]))
|
| 613 |
+
filters['hi0filt'] = (
|
| 614 |
+
np.array([[-9.570000e-04, -2.424100e-04, -1.424720e-03, -8.742600e-04,
|
| 615 |
+
-1.166810e-03, -8.742600e-04, -1.424720e-03, -2.424100e-04,
|
| 616 |
+
-9.570000e-04],
|
| 617 |
+
[-2.424100e-04, -4.317530e-03, 8.998600e-04, 9.156420e-03,
|
| 618 |
+
1.098012e-02, 9.156420e-03, 8.998600e-04, -4.317530e-03,
|
| 619 |
+
-2.424100e-04],
|
| 620 |
+
[-1.424720e-03, 8.998600e-04, 1.706347e-02, 1.094866e-02,
|
| 621 |
+
-5.897780e-03, 1.094866e-02, 1.706347e-02, 8.998600e-04,
|
| 622 |
+
-1.424720e-03],
|
| 623 |
+
[-8.742600e-04, 9.156420e-03, 1.094866e-02, -7.841370e-02,
|
| 624 |
+
-1.562827e-01, -7.841370e-02, 1.094866e-02, 9.156420e-03,
|
| 625 |
+
-8.742600e-04],
|
| 626 |
+
[-1.166810e-03, 1.098012e-02, -5.897780e-03, -1.562827e-01,
|
| 627 |
+
7.282593e-01, -1.562827e-01, -5.897780e-03, 1.098012e-02,
|
| 628 |
+
-1.166810e-03],
|
| 629 |
+
[-8.742600e-04, 9.156420e-03, 1.094866e-02, -7.841370e-02,
|
| 630 |
+
-1.562827e-01, -7.841370e-02, 1.094866e-02, 9.156420e-03,
|
| 631 |
+
-8.742600e-04],
|
| 632 |
+
[-1.424720e-03, 8.998600e-04, 1.706347e-02, 1.094866e-02,
|
| 633 |
+
-5.897780e-03, 1.094866e-02, 1.706347e-02, 8.998600e-04,
|
| 634 |
+
-1.424720e-03],
|
| 635 |
+
[-2.424100e-04, -4.317530e-03, 8.998600e-04, 9.156420e-03,
|
| 636 |
+
1.098012e-02, 9.156420e-03, 8.998600e-04, -4.317530e-03,
|
| 637 |
+
-2.424100e-04],
|
| 638 |
+
[-9.570000e-04, -2.424100e-04, -1.424720e-03, -8.742600e-04,
|
| 639 |
+
-1.166810e-03, -8.742600e-04, -1.424720e-03, -2.424100e-04,
|
| 640 |
+
-9.570000e-04]]))
|
| 641 |
+
filters['bfilts'] = (
|
| 642 |
+
np.array([[6.125880e-03, -8.052600e-03, -2.103714e-02, -1.536890e-02,
|
| 643 |
+
-1.851466e-02, -1.536890e-02, -2.103714e-02, -8.052600e-03,
|
| 644 |
+
6.125880e-03, -1.287416e-02, -9.611520e-03, 1.023569e-02,
|
| 645 |
+
6.009450e-03, 1.872620e-03, 6.009450e-03, 1.023569e-02,
|
| 646 |
+
-9.611520e-03, -1.287416e-02, -5.641530e-03, 4.168400e-03,
|
| 647 |
+
-2.382180e-02, -5.375324e-02, -2.076086e-02, -5.375324e-02,
|
| 648 |
+
-2.382180e-02, 4.168400e-03, -5.641530e-03, -8.957260e-03,
|
| 649 |
+
-1.751170e-03, -1.836909e-02, 1.265655e-01, 2.996168e-01,
|
| 650 |
+
1.265655e-01, -1.836909e-02, -1.751170e-03, -8.957260e-03,
|
| 651 |
+
0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00,
|
| 652 |
+
0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00,
|
| 653 |
+
0.000000e+00, 8.957260e-03, 1.751170e-03, 1.836909e-02,
|
| 654 |
+
-1.265655e-01, -2.996168e-01, -1.265655e-01, 1.836909e-02,
|
| 655 |
+
1.751170e-03, 8.957260e-03, 5.641530e-03, -4.168400e-03,
|
| 656 |
+
2.382180e-02, 5.375324e-02, 2.076086e-02, 5.375324e-02,
|
| 657 |
+
2.382180e-02, -4.168400e-03, 5.641530e-03, 1.287416e-02,
|
| 658 |
+
9.611520e-03, -1.023569e-02, -6.009450e-03, -1.872620e-03,
|
| 659 |
+
-6.009450e-03, -1.023569e-02, 9.611520e-03, 1.287416e-02,
|
| 660 |
+
-6.125880e-03, 8.052600e-03, 2.103714e-02, 1.536890e-02,
|
| 661 |
+
1.851466e-02, 1.536890e-02, 2.103714e-02, 8.052600e-03,
|
| 662 |
+
-6.125880e-03],
|
| 663 |
+
[-6.125880e-03, 1.287416e-02, 5.641530e-03, 8.957260e-03,
|
| 664 |
+
0.000000e+00, -8.957260e-03, -5.641530e-03, -1.287416e-02,
|
| 665 |
+
6.125880e-03, 8.052600e-03, 9.611520e-03, -4.168400e-03,
|
| 666 |
+
1.751170e-03, 0.000000e+00, -1.751170e-03, 4.168400e-03,
|
| 667 |
+
-9.611520e-03, -8.052600e-03, 2.103714e-02, -1.023569e-02,
|
| 668 |
+
2.382180e-02, 1.836909e-02, 0.000000e+00, -1.836909e-02,
|
| 669 |
+
-2.382180e-02, 1.023569e-02, -2.103714e-02, 1.536890e-02,
|
| 670 |
+
-6.009450e-03, 5.375324e-02, -1.265655e-01, 0.000000e+00,
|
| 671 |
+
1.265655e-01, -5.375324e-02, 6.009450e-03, -1.536890e-02,
|
| 672 |
+
1.851466e-02, -1.872620e-03, 2.076086e-02, -2.996168e-01,
|
| 673 |
+
0.000000e+00, 2.996168e-01, -2.076086e-02, 1.872620e-03,
|
| 674 |
+
-1.851466e-02, 1.536890e-02, -6.009450e-03, 5.375324e-02,
|
| 675 |
+
-1.265655e-01, 0.000000e+00, 1.265655e-01, -5.375324e-02,
|
| 676 |
+
6.009450e-03, -1.536890e-02, 2.103714e-02, -1.023569e-02,
|
| 677 |
+
2.382180e-02, 1.836909e-02, 0.000000e+00, -1.836909e-02,
|
| 678 |
+
-2.382180e-02, 1.023569e-02, -2.103714e-02, 8.052600e-03,
|
| 679 |
+
9.611520e-03, -4.168400e-03, 1.751170e-03, 0.000000e+00,
|
| 680 |
+
-1.751170e-03, 4.168400e-03, -9.611520e-03, -8.052600e-03,
|
| 681 |
+
-6.125880e-03, 1.287416e-02, 5.641530e-03, 8.957260e-03,
|
| 682 |
+
0.000000e+00, -8.957260e-03, -5.641530e-03, -1.287416e-02,
|
| 683 |
+
6.125880e-03]]).T)
|
| 684 |
+
filters['bfilts'] = np.negative(filters['bfilts'])
|
| 685 |
+
return filters
|
| 686 |
+
|
| 687 |
+
|
| 688 |
+
def _sp3_filters():
|
| 689 |
+
filters = {}
|
| 690 |
+
filters['harmonics'] = np.array([1, 3])
|
| 691 |
+
filters['mtx'] = (
|
| 692 |
+
np.array([[0.5000, 0.3536, 0, -0.3536],
|
| 693 |
+
[-0.0000, 0.3536, 0.5000, 0.3536],
|
| 694 |
+
[0.5000, -0.3536, 0, 0.3536],
|
| 695 |
+
[-0.0000, 0.3536, -0.5000, 0.3536]]))
|
| 696 |
+
filters['hi0filt'] = (
|
| 697 |
+
np.array([[-4.0483998600E-4, -6.2596000498E-4, -3.7829999201E-5,
|
| 698 |
+
8.8387000142E-4, 1.5450799838E-3, 1.9235999789E-3,
|
| 699 |
+
2.0687500946E-3, 2.0898699295E-3, 2.0687500946E-3,
|
| 700 |
+
1.9235999789E-3, 1.5450799838E-3, 8.8387000142E-4,
|
| 701 |
+
-3.7829999201E-5, -6.2596000498E-4, -4.0483998600E-4],
|
| 702 |
+
[-6.2596000498E-4, -3.2734998967E-4, 7.7435001731E-4,
|
| 703 |
+
1.5874400269E-3, 2.1750701126E-3, 2.5626500137E-3,
|
| 704 |
+
2.2892199922E-3, 1.9755100366E-3, 2.2892199922E-3,
|
| 705 |
+
2.5626500137E-3, 2.1750701126E-3, 1.5874400269E-3,
|
| 706 |
+
7.7435001731E-4, -3.2734998967E-4, -6.2596000498E-4],
|
| 707 |
+
[-3.7829999201E-5, 7.7435001731E-4, 1.1793200392E-3,
|
| 708 |
+
1.4050999889E-3, 2.2253401112E-3, 2.1145299543E-3,
|
| 709 |
+
3.3578000148E-4, -8.3368999185E-4, 3.3578000148E-4,
|
| 710 |
+
2.1145299543E-3, 2.2253401112E-3, 1.4050999889E-3,
|
| 711 |
+
1.1793200392E-3, 7.7435001731E-4, -3.7829999201E-5],
|
| 712 |
+
[8.8387000142E-4, 1.5874400269E-3, 1.4050999889E-3,
|
| 713 |
+
1.2960999738E-3, -4.9274001503E-4, -3.1295299996E-3,
|
| 714 |
+
-4.5751798898E-3, -5.1014497876E-3, -4.5751798898E-3,
|
| 715 |
+
-3.1295299996E-3, -4.9274001503E-4, 1.2960999738E-3,
|
| 716 |
+
1.4050999889E-3, 1.5874400269E-3, 8.8387000142E-4],
|
| 717 |
+
[1.5450799838E-3, 2.1750701126E-3, 2.2253401112E-3,
|
| 718 |
+
-4.9274001503E-4, -6.3222697936E-3, -2.7556000277E-3,
|
| 719 |
+
5.3632198833E-3, 7.3032598011E-3, 5.3632198833E-3,
|
| 720 |
+
-2.7556000277E-3, -6.3222697936E-3, -4.9274001503E-4,
|
| 721 |
+
2.2253401112E-3, 2.1750701126E-3, 1.5450799838E-3],
|
| 722 |
+
[1.9235999789E-3, 2.5626500137E-3, 2.1145299543E-3,
|
| 723 |
+
-3.1295299996E-3, -2.7556000277E-3, 1.3962360099E-2,
|
| 724 |
+
7.8046298586E-3, -9.3812197447E-3, 7.8046298586E-3,
|
| 725 |
+
1.3962360099E-2, -2.7556000277E-3, -3.1295299996E-3,
|
| 726 |
+
2.1145299543E-3, 2.5626500137E-3, 1.9235999789E-3],
|
| 727 |
+
[2.0687500946E-3, 2.2892199922E-3, 3.3578000148E-4,
|
| 728 |
+
-4.5751798898E-3, 5.3632198833E-3, 7.8046298586E-3,
|
| 729 |
+
-7.9501636326E-2, -0.1554141641, -7.9501636326E-2,
|
| 730 |
+
7.8046298586E-3, 5.3632198833E-3, -4.5751798898E-3,
|
| 731 |
+
3.3578000148E-4, 2.2892199922E-3, 2.0687500946E-3],
|
| 732 |
+
[2.0898699295E-3, 1.9755100366E-3, -8.3368999185E-4,
|
| 733 |
+
-5.1014497876E-3, 7.3032598011E-3, -9.3812197447E-3,
|
| 734 |
+
-0.1554141641, 0.7303866148, -0.1554141641,
|
| 735 |
+
-9.3812197447E-3, 7.3032598011E-3, -5.1014497876E-3,
|
| 736 |
+
-8.3368999185E-4, 1.9755100366E-3, 2.0898699295E-3],
|
| 737 |
+
[2.0687500946E-3, 2.2892199922E-3, 3.3578000148E-4,
|
| 738 |
+
-4.5751798898E-3, 5.3632198833E-3, 7.8046298586E-3,
|
| 739 |
+
-7.9501636326E-2, -0.1554141641, -7.9501636326E-2,
|
| 740 |
+
7.8046298586E-3, 5.3632198833E-3, -4.5751798898E-3,
|
| 741 |
+
3.3578000148E-4, 2.2892199922E-3, 2.0687500946E-3],
|
| 742 |
+
[1.9235999789E-3, 2.5626500137E-3, 2.1145299543E-3,
|
| 743 |
+
-3.1295299996E-3, -2.7556000277E-3, 1.3962360099E-2,
|
| 744 |
+
7.8046298586E-3, -9.3812197447E-3, 7.8046298586E-3,
|
| 745 |
+
1.3962360099E-2, -2.7556000277E-3, -3.1295299996E-3,
|
| 746 |
+
2.1145299543E-3, 2.5626500137E-3, 1.9235999789E-3],
|
| 747 |
+
[1.5450799838E-3, 2.1750701126E-3, 2.2253401112E-3,
|
| 748 |
+
-4.9274001503E-4, -6.3222697936E-3, -2.7556000277E-3,
|
| 749 |
+
5.3632198833E-3, 7.3032598011E-3, 5.3632198833E-3,
|
| 750 |
+
-2.7556000277E-3, -6.3222697936E-3, -4.9274001503E-4,
|
| 751 |
+
2.2253401112E-3, 2.1750701126E-3, 1.5450799838E-3],
|
| 752 |
+
[8.8387000142E-4, 1.5874400269E-3, 1.4050999889E-3,
|
| 753 |
+
1.2960999738E-3, -4.9274001503E-4, -3.1295299996E-3,
|
| 754 |
+
-4.5751798898E-3, -5.1014497876E-3, -4.5751798898E-3,
|
| 755 |
+
-3.1295299996E-3, -4.9274001503E-4, 1.2960999738E-3,
|
| 756 |
+
1.4050999889E-3, 1.5874400269E-3, 8.8387000142E-4],
|
| 757 |
+
[-3.7829999201E-5, 7.7435001731E-4, 1.1793200392E-3,
|
| 758 |
+
1.4050999889E-3, 2.2253401112E-3, 2.1145299543E-3,
|
| 759 |
+
3.3578000148E-4, -8.3368999185E-4, 3.3578000148E-4,
|
| 760 |
+
2.1145299543E-3, 2.2253401112E-3, 1.4050999889E-3,
|
| 761 |
+
1.1793200392E-3, 7.7435001731E-4, -3.7829999201E-5],
|
| 762 |
+
[-6.2596000498E-4, -3.2734998967E-4, 7.7435001731E-4,
|
| 763 |
+
1.5874400269E-3, 2.1750701126E-3, 2.5626500137E-3,
|
| 764 |
+
2.2892199922E-3, 1.9755100366E-3, 2.2892199922E-3,
|
| 765 |
+
2.5626500137E-3, 2.1750701126E-3, 1.5874400269E-3,
|
| 766 |
+
7.7435001731E-4, -3.2734998967E-4, -6.2596000498E-4],
|
| 767 |
+
[-4.0483998600E-4, -6.2596000498E-4, -3.7829999201E-5,
|
| 768 |
+
8.8387000142E-4, 1.5450799838E-3, 1.9235999789E-3,
|
| 769 |
+
2.0687500946E-3, 2.0898699295E-3, 2.0687500946E-3,
|
| 770 |
+
1.9235999789E-3, 1.5450799838E-3, 8.8387000142E-4,
|
| 771 |
+
-3.7829999201E-5, -6.2596000498E-4, -4.0483998600E-4]]))
|
| 772 |
+
filters['lo0filt'] = (
|
| 773 |
+
np.array([[-8.7009997515E-5, -1.3542800443E-3, -1.6012600390E-3,
|
| 774 |
+
-5.0337001448E-4, 2.5240099058E-3, -5.0337001448E-4,
|
| 775 |
+
-1.6012600390E-3, -1.3542800443E-3, -8.7009997515E-5],
|
| 776 |
+
[-1.3542800443E-3, 2.9215801042E-3, 7.5227199122E-3,
|
| 777 |
+
8.2244202495E-3, 1.1076199589E-3, 8.2244202495E-3,
|
| 778 |
+
7.5227199122E-3, 2.9215801042E-3, -1.3542800443E-3],
|
| 779 |
+
[-1.6012600390E-3, 7.5227199122E-3, -7.0612900890E-3,
|
| 780 |
+
-3.7694871426E-2, -3.2971370965E-2, -3.7694871426E-2,
|
| 781 |
+
-7.0612900890E-3, 7.5227199122E-3, -1.6012600390E-3],
|
| 782 |
+
[-5.0337001448E-4, 8.2244202495E-3, -3.7694871426E-2,
|
| 783 |
+
4.3813198805E-2, 0.1811603010, 4.3813198805E-2,
|
| 784 |
+
-3.7694871426E-2, 8.2244202495E-3, -5.0337001448E-4],
|
| 785 |
+
[2.5240099058E-3, 1.1076199589E-3, -3.2971370965E-2,
|
| 786 |
+
0.1811603010, 0.4376249909, 0.1811603010,
|
| 787 |
+
-3.2971370965E-2, 1.1076199589E-3, 2.5240099058E-3],
|
| 788 |
+
[-5.0337001448E-4, 8.2244202495E-3, -3.7694871426E-2,
|
| 789 |
+
4.3813198805E-2, 0.1811603010, 4.3813198805E-2,
|
| 790 |
+
-3.7694871426E-2, 8.2244202495E-3, -5.0337001448E-4],
|
| 791 |
+
[-1.6012600390E-3, 7.5227199122E-3, -7.0612900890E-3,
|
| 792 |
+
-3.7694871426E-2, -3.2971370965E-2, -3.7694871426E-2,
|
| 793 |
+
-7.0612900890E-3, 7.5227199122E-3, -1.6012600390E-3],
|
| 794 |
+
[-1.3542800443E-3, 2.9215801042E-3, 7.5227199122E-3,
|
| 795 |
+
8.2244202495E-3, 1.1076199589E-3, 8.2244202495E-3,
|
| 796 |
+
7.5227199122E-3, 2.9215801042E-3, -1.3542800443E-3],
|
| 797 |
+
[-8.7009997515E-5, -1.3542800443E-3, -1.6012600390E-3,
|
| 798 |
+
-5.0337001448E-4, 2.5240099058E-3, -5.0337001448E-4,
|
| 799 |
+
-1.6012600390E-3, -1.3542800443E-3, -8.7009997515E-5]]))
|
| 800 |
+
filters['lofilt'] = (
|
| 801 |
+
np.array([[-4.3500000174E-5, 1.2078000145E-4, -6.7714002216E-4,
|
| 802 |
+
-1.2434000382E-4, -8.0063997302E-4, -1.5970399836E-3,
|
| 803 |
+
-2.5168000138E-4, -4.2019999819E-4, 1.2619999470E-3,
|
| 804 |
+
-4.2019999819E-4, -2.5168000138E-4, -1.5970399836E-3,
|
| 805 |
+
-8.0063997302E-4, -1.2434000382E-4, -6.7714002216E-4,
|
| 806 |
+
1.2078000145E-4, -4.3500000174E-5],
|
| 807 |
+
[1.2078000145E-4, 4.4606000301E-4, -5.8146001538E-4,
|
| 808 |
+
5.6215998484E-4, -1.3688000035E-4, 2.3255399428E-3,
|
| 809 |
+
2.8898599558E-3, 4.2872801423E-3, 5.5893999524E-3,
|
| 810 |
+
4.2872801423E-3, 2.8898599558E-3, 2.3255399428E-3,
|
| 811 |
+
-1.3688000035E-4, 5.6215998484E-4, -5.8146001538E-4,
|
| 812 |
+
4.4606000301E-4, 1.2078000145E-4],
|
| 813 |
+
[-6.7714002216E-4, -5.8146001538E-4, 1.4607800404E-3,
|
| 814 |
+
2.1605400834E-3, 3.7613599561E-3, 3.0809799209E-3,
|
| 815 |
+
4.1121998802E-3, 2.2212199401E-3, 5.5381999118E-4,
|
| 816 |
+
2.2212199401E-3, 4.1121998802E-3, 3.0809799209E-3,
|
| 817 |
+
3.7613599561E-3, 2.1605400834E-3, 1.4607800404E-3,
|
| 818 |
+
-5.8146001538E-4, -6.7714002216E-4],
|
| 819 |
+
[-1.2434000382E-4, 5.6215998484E-4, 2.1605400834E-3,
|
| 820 |
+
3.1757799443E-3, 3.1846798956E-3, -1.7774800071E-3,
|
| 821 |
+
-7.4316998944E-3, -9.0569201857E-3, -9.6372198313E-3,
|
| 822 |
+
-9.0569201857E-3, -7.4316998944E-3, -1.7774800071E-3,
|
| 823 |
+
3.1846798956E-3, 3.1757799443E-3, 2.1605400834E-3,
|
| 824 |
+
5.6215998484E-4, -1.2434000382E-4],
|
| 825 |
+
[-8.0063997302E-4, -1.3688000035E-4, 3.7613599561E-3,
|
| 826 |
+
3.1846798956E-3, -3.5306399222E-3, -1.2604200281E-2,
|
| 827 |
+
-1.8847439438E-2, -1.7508180812E-2, -1.6485679895E-2,
|
| 828 |
+
-1.7508180812E-2, -1.8847439438E-2, -1.2604200281E-2,
|
| 829 |
+
-3.5306399222E-3, 3.1846798956E-3, 3.7613599561E-3,
|
| 830 |
+
-1.3688000035E-4, -8.0063997302E-4],
|
| 831 |
+
[-1.5970399836E-3, 2.3255399428E-3, 3.0809799209E-3,
|
| 832 |
+
-1.7774800071E-3, -1.2604200281E-2, -2.0229380578E-2,
|
| 833 |
+
-1.1091699824E-2, 3.9556599222E-3, 1.4385120012E-2,
|
| 834 |
+
3.9556599222E-3, -1.1091699824E-2, -2.0229380578E-2,
|
| 835 |
+
-1.2604200281E-2, -1.7774800071E-3, 3.0809799209E-3,
|
| 836 |
+
2.3255399428E-3, -1.5970399836E-3],
|
| 837 |
+
[-2.5168000138E-4, 2.8898599558E-3, 4.1121998802E-3,
|
| 838 |
+
-7.4316998944E-3, -1.8847439438E-2, -1.1091699824E-2,
|
| 839 |
+
2.1906599402E-2, 6.8065837026E-2, 9.0580143034E-2,
|
| 840 |
+
6.8065837026E-2, 2.1906599402E-2, -1.1091699824E-2,
|
| 841 |
+
-1.8847439438E-2, -7.4316998944E-3, 4.1121998802E-3,
|
| 842 |
+
2.8898599558E-3, -2.5168000138E-4],
|
| 843 |
+
[-4.2019999819E-4, 4.2872801423E-3, 2.2212199401E-3,
|
| 844 |
+
-9.0569201857E-3, -1.7508180812E-2, 3.9556599222E-3,
|
| 845 |
+
6.8065837026E-2, 0.1445499808, 0.1773651242,
|
| 846 |
+
0.1445499808, 6.8065837026E-2, 3.9556599222E-3,
|
| 847 |
+
-1.7508180812E-2, -9.0569201857E-3, 2.2212199401E-3,
|
| 848 |
+
4.2872801423E-3, -4.2019999819E-4],
|
| 849 |
+
[1.2619999470E-3, 5.5893999524E-3, 5.5381999118E-4,
|
| 850 |
+
-9.6372198313E-3, -1.6485679895E-2, 1.4385120012E-2,
|
| 851 |
+
9.0580143034E-2, 0.1773651242, 0.2120374441,
|
| 852 |
+
0.1773651242, 9.0580143034E-2, 1.4385120012E-2,
|
| 853 |
+
-1.6485679895E-2, -9.6372198313E-3, 5.5381999118E-4,
|
| 854 |
+
5.5893999524E-3, 1.2619999470E-3],
|
| 855 |
+
[-4.2019999819E-4, 4.2872801423E-3, 2.2212199401E-3,
|
| 856 |
+
-9.0569201857E-3, -1.7508180812E-2, 3.9556599222E-3,
|
| 857 |
+
6.8065837026E-2, 0.1445499808, 0.1773651242,
|
| 858 |
+
0.1445499808, 6.8065837026E-2, 3.9556599222E-3,
|
| 859 |
+
-1.7508180812E-2, -9.0569201857E-3, 2.2212199401E-3,
|
| 860 |
+
4.2872801423E-3, -4.2019999819E-4],
|
| 861 |
+
[-2.5168000138E-4, 2.8898599558E-3, 4.1121998802E-3,
|
| 862 |
+
-7.4316998944E-3, -1.8847439438E-2, -1.1091699824E-2,
|
| 863 |
+
2.1906599402E-2, 6.8065837026E-2, 9.0580143034E-2,
|
| 864 |
+
6.8065837026E-2, 2.1906599402E-2, -1.1091699824E-2,
|
| 865 |
+
-1.8847439438E-2, -7.4316998944E-3, 4.1121998802E-3,
|
| 866 |
+
2.8898599558E-3, -2.5168000138E-4],
|
| 867 |
+
[-1.5970399836E-3, 2.3255399428E-3, 3.0809799209E-3,
|
| 868 |
+
-1.7774800071E-3, -1.2604200281E-2, -2.0229380578E-2,
|
| 869 |
+
-1.1091699824E-2, 3.9556599222E-3, 1.4385120012E-2,
|
| 870 |
+
3.9556599222E-3, -1.1091699824E-2, -2.0229380578E-2,
|
| 871 |
+
-1.2604200281E-2, -1.7774800071E-3, 3.0809799209E-3,
|
| 872 |
+
2.3255399428E-3, -1.5970399836E-3],
|
| 873 |
+
[-8.0063997302E-4, -1.3688000035E-4, 3.7613599561E-3,
|
| 874 |
+
3.1846798956E-3, -3.5306399222E-3, -1.2604200281E-2,
|
| 875 |
+
-1.8847439438E-2, -1.7508180812E-2, -1.6485679895E-2,
|
| 876 |
+
-1.7508180812E-2, -1.8847439438E-2, -1.2604200281E-2,
|
| 877 |
+
-3.5306399222E-3, 3.1846798956E-3, 3.7613599561E-3,
|
| 878 |
+
-1.3688000035E-4, -8.0063997302E-4],
|
| 879 |
+
[-1.2434000382E-4, 5.6215998484E-4, 2.1605400834E-3,
|
| 880 |
+
3.1757799443E-3, 3.1846798956E-3, -1.7774800071E-3,
|
| 881 |
+
-7.4316998944E-3, -9.0569201857E-3, -9.6372198313E-3,
|
| 882 |
+
-9.0569201857E-3, -7.4316998944E-3, -1.7774800071E-3,
|
| 883 |
+
3.1846798956E-3, 3.1757799443E-3, 2.1605400834E-3,
|
| 884 |
+
5.6215998484E-4, -1.2434000382E-4],
|
| 885 |
+
[-6.7714002216E-4, -5.8146001538E-4, 1.4607800404E-3,
|
| 886 |
+
2.1605400834E-3, 3.7613599561E-3, 3.0809799209E-3,
|
| 887 |
+
4.1121998802E-3, 2.2212199401E-3, 5.5381999118E-4,
|
| 888 |
+
2.2212199401E-3, 4.1121998802E-3, 3.0809799209E-3,
|
| 889 |
+
3.7613599561E-3, 2.1605400834E-3, 1.4607800404E-3,
|
| 890 |
+
-5.8146001538E-4, -6.7714002216E-4],
|
| 891 |
+
[1.2078000145E-4, 4.4606000301E-4, -5.8146001538E-4,
|
| 892 |
+
5.6215998484E-4, -1.3688000035E-4, 2.3255399428E-3,
|
| 893 |
+
2.8898599558E-3, 4.2872801423E-3, 5.5893999524E-3,
|
| 894 |
+
4.2872801423E-3, 2.8898599558E-3, 2.3255399428E-3,
|
| 895 |
+
-1.3688000035E-4, 5.6215998484E-4, -5.8146001538E-4,
|
| 896 |
+
4.4606000301E-4, 1.2078000145E-4],
|
| 897 |
+
[-4.3500000174E-5, 1.2078000145E-4, -6.7714002216E-4,
|
| 898 |
+
-1.2434000382E-4, -8.0063997302E-4, -1.5970399836E-3,
|
| 899 |
+
-2.5168000138E-4, -4.2019999819E-4, 1.2619999470E-3,
|
| 900 |
+
-4.2019999819E-4, -2.5168000138E-4, -1.5970399836E-3,
|
| 901 |
+
-8.0063997302E-4, -1.2434000382E-4, -6.7714002216E-4,
|
| 902 |
+
1.2078000145E-4, -4.3500000174E-5]]))
|
| 903 |
+
filters['bfilts'] = (
|
| 904 |
+
np.array([[-8.1125000725E-4, 4.4451598078E-3, 1.2316980399E-2,
|
| 905 |
+
1.3955879956E-2, 1.4179450460E-2, 1.3955879956E-2,
|
| 906 |
+
1.2316980399E-2, 4.4451598078E-3, -8.1125000725E-4,
|
| 907 |
+
3.9103501476E-3, 4.4565401040E-3, -5.8724298142E-3,
|
| 908 |
+
-2.8760801069E-3, 8.5267601535E-3, -2.8760801069E-3,
|
| 909 |
+
-5.8724298142E-3, 4.4565401040E-3, 3.9103501476E-3,
|
| 910 |
+
1.3462699717E-3, -3.7740699481E-3, 8.2581602037E-3,
|
| 911 |
+
3.9442278445E-2, 5.3605638444E-2, 3.9442278445E-2,
|
| 912 |
+
8.2581602037E-3, -3.7740699481E-3, 1.3462699717E-3,
|
| 913 |
+
7.4700999539E-4, -3.6522001028E-4, -2.2522680461E-2,
|
| 914 |
+
-0.1105690673, -0.1768419296, -0.1105690673,
|
| 915 |
+
-2.2522680461E-2, -3.6522001028E-4, 7.4700999539E-4,
|
| 916 |
+
0.0000000000, 0.0000000000, 0.0000000000,
|
| 917 |
+
0.0000000000, 0.0000000000, 0.0000000000,
|
| 918 |
+
0.0000000000, 0.0000000000, 0.0000000000,
|
| 919 |
+
-7.4700999539E-4, 3.6522001028E-4, 2.2522680461E-2,
|
| 920 |
+
0.1105690673, 0.1768419296, 0.1105690673,
|
| 921 |
+
2.2522680461E-2, 3.6522001028E-4, -7.4700999539E-4,
|
| 922 |
+
-1.3462699717E-3, 3.7740699481E-3, -8.2581602037E-3,
|
| 923 |
+
-3.9442278445E-2, -5.3605638444E-2, -3.9442278445E-2,
|
| 924 |
+
-8.2581602037E-3, 3.7740699481E-3, -1.3462699717E-3,
|
| 925 |
+
-3.9103501476E-3, -4.4565401040E-3, 5.8724298142E-3,
|
| 926 |
+
2.8760801069E-3, -8.5267601535E-3, 2.8760801069E-3,
|
| 927 |
+
5.8724298142E-3, -4.4565401040E-3, -3.9103501476E-3,
|
| 928 |
+
8.1125000725E-4, -4.4451598078E-3, -1.2316980399E-2,
|
| 929 |
+
-1.3955879956E-2, -1.4179450460E-2, -1.3955879956E-2,
|
| 930 |
+
-1.2316980399E-2, -4.4451598078E-3, 8.1125000725E-4],
|
| 931 |
+
[0.0000000000, -8.2846998703E-4, -5.7109999034E-5,
|
| 932 |
+
4.0110000555E-5, 4.6670897864E-3, 8.0871898681E-3,
|
| 933 |
+
1.4807609841E-2, 8.6204400286E-3, -3.1221499667E-3,
|
| 934 |
+
8.2846998703E-4, 0.0000000000, -9.7479997203E-4,
|
| 935 |
+
-6.9718998857E-3, -2.0865600090E-3, 2.3298799060E-3,
|
| 936 |
+
-4.4814897701E-3, 1.4917500317E-2, 8.6204400286E-3,
|
| 937 |
+
5.7109999034E-5, 9.7479997203E-4, 0.0000000000,
|
| 938 |
+
-1.2145539746E-2, -2.4427289143E-2, 5.0797060132E-2,
|
| 939 |
+
3.2785870135E-2, -4.4814897701E-3, 1.4807609841E-2,
|
| 940 |
+
-4.0110000555E-5, 6.9718998857E-3, 1.2145539746E-2,
|
| 941 |
+
0.0000000000, -0.1510555595, -8.2495503128E-2,
|
| 942 |
+
5.0797060132E-2, 2.3298799060E-3, 8.0871898681E-3,
|
| 943 |
+
-4.6670897864E-3, 2.0865600090E-3, 2.4427289143E-2,
|
| 944 |
+
0.1510555595, 0.0000000000, -0.1510555595,
|
| 945 |
+
-2.4427289143E-2, -2.0865600090E-3, 4.6670897864E-3,
|
| 946 |
+
-8.0871898681E-3, -2.3298799060E-3, -5.0797060132E-2,
|
| 947 |
+
8.2495503128E-2, 0.1510555595, 0.0000000000,
|
| 948 |
+
-1.2145539746E-2, -6.9718998857E-3, 4.0110000555E-5,
|
| 949 |
+
-1.4807609841E-2, 4.4814897701E-3, -3.2785870135E-2,
|
| 950 |
+
-5.0797060132E-2, 2.4427289143E-2, 1.2145539746E-2,
|
| 951 |
+
0.0000000000, -9.7479997203E-4, -5.7109999034E-5,
|
| 952 |
+
-8.6204400286E-3, -1.4917500317E-2, 4.4814897701E-3,
|
| 953 |
+
-2.3298799060E-3, 2.0865600090E-3, 6.9718998857E-3,
|
| 954 |
+
9.7479997203E-4, 0.0000000000, -8.2846998703E-4,
|
| 955 |
+
3.1221499667E-3, -8.6204400286E-3, -1.4807609841E-2,
|
| 956 |
+
-8.0871898681E-3, -4.6670897864E-3, -4.0110000555E-5,
|
| 957 |
+
5.7109999034E-5, 8.2846998703E-4, 0.0000000000],
|
| 958 |
+
[8.1125000725E-4, -3.9103501476E-3, -1.3462699717E-3,
|
| 959 |
+
-7.4700999539E-4, 0.0000000000, 7.4700999539E-4,
|
| 960 |
+
1.3462699717E-3, 3.9103501476E-3, -8.1125000725E-4,
|
| 961 |
+
-4.4451598078E-3, -4.4565401040E-3, 3.7740699481E-3,
|
| 962 |
+
3.6522001028E-4, 0.0000000000, -3.6522001028E-4,
|
| 963 |
+
-3.7740699481E-3, 4.4565401040E-3, 4.4451598078E-3,
|
| 964 |
+
-1.2316980399E-2, 5.8724298142E-3, -8.2581602037E-3,
|
| 965 |
+
2.2522680461E-2, 0.0000000000, -2.2522680461E-2,
|
| 966 |
+
8.2581602037E-3, -5.8724298142E-3, 1.2316980399E-2,
|
| 967 |
+
-1.3955879956E-2, 2.8760801069E-3, -3.9442278445E-2,
|
| 968 |
+
0.1105690673, 0.0000000000, -0.1105690673,
|
| 969 |
+
3.9442278445E-2, -2.8760801069E-3, 1.3955879956E-2,
|
| 970 |
+
-1.4179450460E-2, -8.5267601535E-3, -5.3605638444E-2,
|
| 971 |
+
0.1768419296, 0.0000000000, -0.1768419296,
|
| 972 |
+
5.3605638444E-2, 8.5267601535E-3, 1.4179450460E-2,
|
| 973 |
+
-1.3955879956E-2, 2.8760801069E-3, -3.9442278445E-2,
|
| 974 |
+
0.1105690673, 0.0000000000, -0.1105690673,
|
| 975 |
+
3.9442278445E-2, -2.8760801069E-3, 1.3955879956E-2,
|
| 976 |
+
-1.2316980399E-2, 5.8724298142E-3, -8.2581602037E-3,
|
| 977 |
+
2.2522680461E-2, 0.0000000000, -2.2522680461E-2,
|
| 978 |
+
8.2581602037E-3, -5.8724298142E-3, 1.2316980399E-2,
|
| 979 |
+
-4.4451598078E-3, -4.4565401040E-3, 3.7740699481E-3,
|
| 980 |
+
3.6522001028E-4, 0.0000000000, -3.6522001028E-4,
|
| 981 |
+
-3.7740699481E-3, 4.4565401040E-3, 4.4451598078E-3,
|
| 982 |
+
8.1125000725E-4, -3.9103501476E-3, -1.3462699717E-3,
|
| 983 |
+
-7.4700999539E-4, 0.0000000000, 7.4700999539E-4,
|
| 984 |
+
1.3462699717E-3, 3.9103501476E-3, -8.1125000725E-4],
|
| 985 |
+
[3.1221499667E-3, -8.6204400286E-3, -1.4807609841E-2,
|
| 986 |
+
-8.0871898681E-3, -4.6670897864E-3, -4.0110000555E-5,
|
| 987 |
+
5.7109999034E-5, 8.2846998703E-4, 0.0000000000,
|
| 988 |
+
-8.6204400286E-3, -1.4917500317E-2, 4.4814897701E-3,
|
| 989 |
+
-2.3298799060E-3, 2.0865600090E-3, 6.9718998857E-3,
|
| 990 |
+
9.7479997203E-4, -0.0000000000, -8.2846998703E-4,
|
| 991 |
+
-1.4807609841E-2, 4.4814897701E-3, -3.2785870135E-2,
|
| 992 |
+
-5.0797060132E-2, 2.4427289143E-2, 1.2145539746E-2,
|
| 993 |
+
0.0000000000, -9.7479997203E-4, -5.7109999034E-5,
|
| 994 |
+
-8.0871898681E-3, -2.3298799060E-3, -5.0797060132E-2,
|
| 995 |
+
8.2495503128E-2, 0.1510555595, -0.0000000000,
|
| 996 |
+
-1.2145539746E-2, -6.9718998857E-3, 4.0110000555E-5,
|
| 997 |
+
-4.6670897864E-3, 2.0865600090E-3, 2.4427289143E-2,
|
| 998 |
+
0.1510555595, 0.0000000000, -0.1510555595,
|
| 999 |
+
-2.4427289143E-2, -2.0865600090E-3, 4.6670897864E-3,
|
| 1000 |
+
-4.0110000555E-5, 6.9718998857E-3, 1.2145539746E-2,
|
| 1001 |
+
0.0000000000, -0.1510555595, -8.2495503128E-2,
|
| 1002 |
+
5.0797060132E-2, 2.3298799060E-3, 8.0871898681E-3,
|
| 1003 |
+
5.7109999034E-5, 9.7479997203E-4, -0.0000000000,
|
| 1004 |
+
-1.2145539746E-2, -2.4427289143E-2, 5.0797060132E-2,
|
| 1005 |
+
3.2785870135E-2, -4.4814897701E-3, 1.4807609841E-2,
|
| 1006 |
+
8.2846998703E-4, -0.0000000000, -9.7479997203E-4,
|
| 1007 |
+
-6.9718998857E-3, -2.0865600090E-3, 2.3298799060E-3,
|
| 1008 |
+
-4.4814897701E-3, 1.4917500317E-2, 8.6204400286E-3,
|
| 1009 |
+
0.0000000000, -8.2846998703E-4, -5.7109999034E-5,
|
| 1010 |
+
4.0110000555E-5, 4.6670897864E-3, 8.0871898681E-3,
|
| 1011 |
+
1.4807609841E-2, 8.6204400286E-3, -3.1221499667E-3]]).T)
|
| 1012 |
+
return filters
|
| 1013 |
+
|
| 1014 |
+
|
| 1015 |
+
def _sp5_filters():
|
| 1016 |
+
filters = {}
|
| 1017 |
+
filters['harmonics'] = np.array([1, 3, 5])
|
| 1018 |
+
filters['mtx'] = (
|
| 1019 |
+
np.array([[0.3333, 0.2887, 0.1667, 0.0000, -0.1667, -0.2887],
|
| 1020 |
+
[0.0000, 0.1667, 0.2887, 0.3333, 0.2887, 0.1667],
|
| 1021 |
+
[0.3333, -0.0000, -0.3333, -0.0000, 0.3333, -0.0000],
|
| 1022 |
+
[0.0000, 0.3333, 0.0000, -0.3333, 0.0000, 0.3333],
|
| 1023 |
+
[0.3333, -0.2887, 0.1667, -0.0000, -0.1667, 0.2887],
|
| 1024 |
+
[-0.0000, 0.1667, -0.2887, 0.3333, -0.2887, 0.1667]]))
|
| 1025 |
+
filters['hi0filt'] = (
|
| 1026 |
+
np.array([[-0.00033429, -0.00113093, -0.00171484,
|
| 1027 |
+
-0.00133542, -0.00080639, -0.00133542,
|
| 1028 |
+
-0.00171484, -0.00113093, -0.00033429],
|
| 1029 |
+
[-0.00113093, -0.00350017, -0.00243812,
|
| 1030 |
+
0.00631653, 0.01261227, 0.00631653,
|
| 1031 |
+
-0.00243812, -0.00350017, -0.00113093],
|
| 1032 |
+
[-0.00171484, -0.00243812, -0.00290081,
|
| 1033 |
+
-0.00673482, -0.00981051, -0.00673482,
|
| 1034 |
+
-0.00290081, -0.00243812, -0.00171484],
|
| 1035 |
+
[-0.00133542, 0.00631653, -0.00673482,
|
| 1036 |
+
-0.07027679, -0.11435863, -0.07027679,
|
| 1037 |
+
-0.00673482, 0.00631653, -0.00133542],
|
| 1038 |
+
[-0.00080639, 0.01261227, -0.00981051,
|
| 1039 |
+
-0.11435863, 0.81380200, -0.11435863,
|
| 1040 |
+
-0.00981051, 0.01261227, -0.00080639],
|
| 1041 |
+
[-0.00133542, 0.00631653, -0.00673482,
|
| 1042 |
+
-0.07027679, -0.11435863, -0.07027679,
|
| 1043 |
+
-0.00673482, 0.00631653, -0.00133542],
|
| 1044 |
+
[-0.00171484, -0.00243812, -0.00290081,
|
| 1045 |
+
-0.00673482, -0.00981051, -0.00673482,
|
| 1046 |
+
-0.00290081, -0.00243812, -0.00171484],
|
| 1047 |
+
[-0.00113093, -0.00350017, -0.00243812,
|
| 1048 |
+
0.00631653, 0.01261227, 0.00631653,
|
| 1049 |
+
-0.00243812, -0.00350017, -0.00113093],
|
| 1050 |
+
[-0.00033429, -0.00113093, -0.00171484,
|
| 1051 |
+
-0.00133542, -0.00080639, -0.00133542,
|
| 1052 |
+
-0.00171484, -0.00113093, -0.00033429]]))
|
| 1053 |
+
filters['lo0filt'] = (
|
| 1054 |
+
np.array([[0.00341614, -0.01551246, -0.03848215, -0.01551246,
|
| 1055 |
+
0.00341614],
|
| 1056 |
+
[-0.01551246, 0.05586982, 0.15925570, 0.05586982,
|
| 1057 |
+
-0.01551246],
|
| 1058 |
+
[-0.03848215, 0.15925570, 0.40304148, 0.15925570,
|
| 1059 |
+
-0.03848215],
|
| 1060 |
+
[-0.01551246, 0.05586982, 0.15925570, 0.05586982,
|
| 1061 |
+
-0.01551246],
|
| 1062 |
+
[0.00341614, -0.01551246, -0.03848215, -0.01551246,
|
| 1063 |
+
0.00341614]]))
|
| 1064 |
+
filters['lofilt'] = (
|
| 1065 |
+
2 * np.array([[0.00085404, -0.00244917, -0.00387812, -0.00944432,
|
| 1066 |
+
-0.00962054, -0.00944432, -0.00387812, -0.00244917,
|
| 1067 |
+
0.00085404],
|
| 1068 |
+
[-0.00244917, -0.00523281, -0.00661117, 0.00410600,
|
| 1069 |
+
0.01002988, 0.00410600, -0.00661117, -0.00523281,
|
| 1070 |
+
-0.00244917],
|
| 1071 |
+
[-0.00387812, -0.00661117, 0.01396746, 0.03277038,
|
| 1072 |
+
0.03981393, 0.03277038, 0.01396746, -0.00661117,
|
| 1073 |
+
-0.00387812],
|
| 1074 |
+
[-0.00944432, 0.00410600, 0.03277038, 0.06426333,
|
| 1075 |
+
0.08169618, 0.06426333, 0.03277038, 0.00410600,
|
| 1076 |
+
-0.00944432],
|
| 1077 |
+
[-0.00962054, 0.01002988, 0.03981393, 0.08169618,
|
| 1078 |
+
0.10096540, 0.08169618, 0.03981393, 0.01002988,
|
| 1079 |
+
-0.00962054],
|
| 1080 |
+
[-0.00944432, 0.00410600, 0.03277038, 0.06426333,
|
| 1081 |
+
0.08169618, 0.06426333, 0.03277038, 0.00410600,
|
| 1082 |
+
-0.00944432],
|
| 1083 |
+
[-0.00387812, -0.00661117, 0.01396746, 0.03277038,
|
| 1084 |
+
0.03981393, 0.03277038, 0.01396746, -0.00661117,
|
| 1085 |
+
-0.00387812],
|
| 1086 |
+
[-0.00244917, -0.00523281, -0.00661117, 0.00410600,
|
| 1087 |
+
0.01002988, 0.00410600, -0.00661117, -0.00523281,
|
| 1088 |
+
-0.00244917],
|
| 1089 |
+
[0.00085404, -0.00244917, -0.00387812, -0.00944432,
|
| 1090 |
+
-0.00962054, -0.00944432, -0.00387812, -0.00244917,
|
| 1091 |
+
0.00085404]]))
|
| 1092 |
+
filters['bfilts'] = (
|
| 1093 |
+
np.array([[0.00277643, 0.00496194, 0.01026699, 0.01455399, 0.01026699,
|
| 1094 |
+
0.00496194, 0.00277643, -0.00986904, -0.00893064,
|
| 1095 |
+
0.01189859, 0.02755155, 0.01189859, -0.00893064,
|
| 1096 |
+
-0.00986904, -0.01021852, -0.03075356, -0.08226445,
|
| 1097 |
+
-0.11732297, -0.08226445, -0.03075356, -0.01021852,
|
| 1098 |
+
0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000,
|
| 1099 |
+
0.00000000, 0.00000000, 0.01021852, 0.03075356, 0.08226445,
|
| 1100 |
+
0.11732297, 0.08226445, 0.03075356, 0.01021852, 0.00986904,
|
| 1101 |
+
0.00893064, -0.01189859, -0.02755155, -0.01189859,
|
| 1102 |
+
0.00893064, 0.00986904, -0.00277643, -0.00496194,
|
| 1103 |
+
-0.01026699, -0.01455399, -0.01026699, -0.00496194,
|
| 1104 |
+
-0.00277643],
|
| 1105 |
+
[-0.00343249, -0.00640815, -0.00073141, 0.01124321,
|
| 1106 |
+
0.00182078, 0.00285723, 0.01166982, -0.00358461,
|
| 1107 |
+
-0.01977507, -0.04084211, -0.00228219, 0.03930573,
|
| 1108 |
+
0.01161195, 0.00128000, 0.01047717, 0.01486305,
|
| 1109 |
+
-0.04819057, -0.12227230, -0.05394139, 0.00853965,
|
| 1110 |
+
-0.00459034, 0.00790407, 0.04435647, 0.09454202,
|
| 1111 |
+
-0.00000000, -0.09454202, -0.04435647, -0.00790407,
|
| 1112 |
+
0.00459034, -0.00853965, 0.05394139, 0.12227230,
|
| 1113 |
+
0.04819057, -0.01486305, -0.01047717, -0.00128000,
|
| 1114 |
+
-0.01161195, -0.03930573, 0.00228219, 0.04084211,
|
| 1115 |
+
0.01977507, 0.00358461, -0.01166982, -0.00285723,
|
| 1116 |
+
-0.00182078, -0.01124321, 0.00073141, 0.00640815,
|
| 1117 |
+
0.00343249],
|
| 1118 |
+
[0.00343249, 0.00358461, -0.01047717, -0.00790407,
|
| 1119 |
+
-0.00459034, 0.00128000, 0.01166982, 0.00640815,
|
| 1120 |
+
0.01977507, -0.01486305, -0.04435647, 0.00853965,
|
| 1121 |
+
0.01161195, 0.00285723, 0.00073141, 0.04084211, 0.04819057,
|
| 1122 |
+
-0.09454202, -0.05394139, 0.03930573, 0.00182078,
|
| 1123 |
+
-0.01124321, 0.00228219, 0.12227230, -0.00000000,
|
| 1124 |
+
-0.12227230, -0.00228219, 0.01124321, -0.00182078,
|
| 1125 |
+
-0.03930573, 0.05394139, 0.09454202, -0.04819057,
|
| 1126 |
+
-0.04084211, -0.00073141, -0.00285723, -0.01161195,
|
| 1127 |
+
-0.00853965, 0.04435647, 0.01486305, -0.01977507,
|
| 1128 |
+
-0.00640815, -0.01166982, -0.00128000, 0.00459034,
|
| 1129 |
+
0.00790407, 0.01047717, -0.00358461, -0.00343249],
|
| 1130 |
+
[-0.00277643, 0.00986904, 0.01021852, -0.00000000,
|
| 1131 |
+
-0.01021852, -0.00986904, 0.00277643, -0.00496194,
|
| 1132 |
+
0.00893064, 0.03075356, -0.00000000, -0.03075356,
|
| 1133 |
+
-0.00893064, 0.00496194, -0.01026699, -0.01189859,
|
| 1134 |
+
0.08226445, -0.00000000, -0.08226445, 0.01189859,
|
| 1135 |
+
0.01026699, -0.01455399, -0.02755155, 0.11732297,
|
| 1136 |
+
-0.00000000, -0.11732297, 0.02755155, 0.01455399,
|
| 1137 |
+
-0.01026699, -0.01189859, 0.08226445, -0.00000000,
|
| 1138 |
+
-0.08226445, 0.01189859, 0.01026699, -0.00496194,
|
| 1139 |
+
0.00893064, 0.03075356, -0.00000000, -0.03075356,
|
| 1140 |
+
-0.00893064, 0.00496194, -0.00277643, 0.00986904,
|
| 1141 |
+
0.01021852, -0.00000000, -0.01021852, -0.00986904,
|
| 1142 |
+
0.00277643],
|
| 1143 |
+
[-0.01166982, -0.00128000, 0.00459034, 0.00790407,
|
| 1144 |
+
0.01047717, -0.00358461, -0.00343249, -0.00285723,
|
| 1145 |
+
-0.01161195, -0.00853965, 0.04435647, 0.01486305,
|
| 1146 |
+
-0.01977507, -0.00640815, -0.00182078, -0.03930573,
|
| 1147 |
+
0.05394139, 0.09454202, -0.04819057, -0.04084211,
|
| 1148 |
+
-0.00073141, -0.01124321, 0.00228219, 0.12227230,
|
| 1149 |
+
-0.00000000, -0.12227230, -0.00228219, 0.01124321,
|
| 1150 |
+
0.00073141, 0.04084211, 0.04819057, -0.09454202,
|
| 1151 |
+
-0.05394139, 0.03930573, 0.00182078, 0.00640815,
|
| 1152 |
+
0.01977507, -0.01486305, -0.04435647, 0.00853965,
|
| 1153 |
+
0.01161195, 0.00285723, 0.00343249, 0.00358461,
|
| 1154 |
+
-0.01047717, -0.00790407, -0.00459034, 0.00128000,
|
| 1155 |
+
0.01166982],
|
| 1156 |
+
[-0.01166982, -0.00285723, -0.00182078, -0.01124321,
|
| 1157 |
+
0.00073141, 0.00640815, 0.00343249, -0.00128000,
|
| 1158 |
+
-0.01161195, -0.03930573, 0.00228219, 0.04084211,
|
| 1159 |
+
0.01977507, 0.00358461, 0.00459034, -0.00853965,
|
| 1160 |
+
0.05394139, 0.12227230, 0.04819057, -0.01486305,
|
| 1161 |
+
-0.01047717, 0.00790407, 0.04435647, 0.09454202,
|
| 1162 |
+
-0.00000000, -0.09454202, -0.04435647, -0.00790407,
|
| 1163 |
+
0.01047717, 0.01486305, -0.04819057, -0.12227230,
|
| 1164 |
+
-0.05394139, 0.00853965, -0.00459034, -0.00358461,
|
| 1165 |
+
-0.01977507, -0.04084211, -0.00228219, 0.03930573,
|
| 1166 |
+
0.01161195, 0.00128000, -0.00343249, -0.00640815,
|
| 1167 |
+
-0.00073141, 0.01124321, 0.00182078, 0.00285723,
|
| 1168 |
+
0.01166982]]).T)
|
| 1169 |
+
return filters
|
| 1170 |
+
|
| 1171 |
+
|
| 1172 |
+
class Pyramid:
|
| 1173 |
+
"""Base class for multiscale pyramids
|
| 1174 |
+
|
| 1175 |
+
You should not instantiate this base class, it is instead inherited by the other classes found
|
| 1176 |
+
in this module.
|
| 1177 |
+
|
| 1178 |
+
Parameters
|
| 1179 |
+
----------
|
| 1180 |
+
image : `array_like`
|
| 1181 |
+
1d or 2d image upon which to construct to the pyramid.
|
| 1182 |
+
edge_type : {'circular', 'reflect1', 'reflect2', 'repeat', 'zero', 'extend', 'dont-compute'}
|
| 1183 |
+
Specifies how to handle edges. Options are:
|
| 1184 |
+
|
| 1185 |
+
* `'circular'` - circular convolution
|
| 1186 |
+
* `'reflect1'` - reflect about the edge pixels
|
| 1187 |
+
* `'reflect2'` - reflect, doubling the edge pixels
|
| 1188 |
+
* `'repeat'` - repeat the edge pixels
|
| 1189 |
+
* `'zero'` - assume values of zero outside image boundary
|
| 1190 |
+
* `'extend'` - reflect and invert
|
| 1191 |
+
* `'dont-compute'` - zero output when filter overhangs imput boundaries.
|
| 1192 |
+
|
| 1193 |
+
Attributes
|
| 1194 |
+
----------
|
| 1195 |
+
image : `array_like`
|
| 1196 |
+
The input image used to construct the pyramid.
|
| 1197 |
+
image_size : `tuple`
|
| 1198 |
+
The size of the input image.
|
| 1199 |
+
pyr_type : `str` or `None`
|
| 1200 |
+
Human-readable string specifying the type of pyramid. For base class, is None.
|
| 1201 |
+
edge_type : `str`
|
| 1202 |
+
Specifies how edges were handled.
|
| 1203 |
+
pyr_coeffs : `dict`
|
| 1204 |
+
Dictionary containing the coefficients of the pyramid. Keys are `(level, band)` tuples and
|
| 1205 |
+
values are 1d or 2d numpy arrays (same number of dimensions as the input image)
|
| 1206 |
+
pyr_size : `dict`
|
| 1207 |
+
Dictionary containing the sizes of the pyramid coefficients. Keys are `(level, band)`
|
| 1208 |
+
tuples and values are tuples.
|
| 1209 |
+
is_complex : `bool`
|
| 1210 |
+
Whether the coefficients are complex- or real-valued. Only `SteerablePyramidFreq` can have
|
| 1211 |
+
a value of True, all others must be False.
|
| 1212 |
+
"""
|
| 1213 |
+
|
| 1214 |
+
def __init__(self, image, edge_type):
|
| 1215 |
+
|
| 1216 |
+
self.image = np.array(image).astype(np.float)
|
| 1217 |
+
if self.image.ndim == 1:
|
| 1218 |
+
self.image = self.image.reshape(-1, 1)
|
| 1219 |
+
assert self.image.ndim == 2, "Error: Input signal must be 1D or 2D."
|
| 1220 |
+
|
| 1221 |
+
self.image_size = self.image.shape
|
| 1222 |
+
if not hasattr(self, 'pyr_type'):
|
| 1223 |
+
self.pyr_type = None
|
| 1224 |
+
self.edge_type = edge_type
|
| 1225 |
+
self.pyr_coeffs = {}
|
| 1226 |
+
self.pyr_size = {}
|
| 1227 |
+
self.is_complex = False
|
| 1228 |
+
|
| 1229 |
+
def _set_num_scales(self, filter_name, height, extra_height=0):
|
| 1230 |
+
"""Figure out the number of scales (height) of the pyramid
|
| 1231 |
+
|
| 1232 |
+
The user should not call this directly. This is called during construction of a pyramid,
|
| 1233 |
+
and is based on the size of the filters (thus, should be called after instantiating the
|
| 1234 |
+
filters) and the input image, as well as the `extra_height` parameter (which corresponds to
|
| 1235 |
+
the residuals, which the Gaussian pyramid contains and others do not).
|
| 1236 |
+
|
| 1237 |
+
This sets `self.num_scales` directly instead of returning something, so be careful.
|
| 1238 |
+
|
| 1239 |
+
Parameters
|
| 1240 |
+
----------
|
| 1241 |
+
filter_name : `str`
|
| 1242 |
+
Name of the filter in the `filters` dict that determines the height of the pyramid
|
| 1243 |
+
height : `'auto'` or `int`
|
| 1244 |
+
During construction, user can specify the number of scales (height) of the pyramid.
|
| 1245 |
+
The pyramid will have this number of scales unless that's greater than the maximum
|
| 1246 |
+
possible height.
|
| 1247 |
+
extra_height : `int`, optional
|
| 1248 |
+
The automatically calculated maximum number of scales is based on the size of the input
|
| 1249 |
+
image and filter size. The Gaussian pyramid also contains the final residuals and so we
|
| 1250 |
+
need to add one more to this number.
|
| 1251 |
+
|
| 1252 |
+
Returns
|
| 1253 |
+
-------
|
| 1254 |
+
None
|
| 1255 |
+
"""
|
| 1256 |
+
# the Gaussian and Laplacian pyramids can go one higher than the value returned here, so we
|
| 1257 |
+
# use the extra_height argument to allow for that
|
| 1258 |
+
max_ht = max_pyr_height(self.image.shape, self.filters[filter_name].shape) + extra_height
|
| 1259 |
+
if height == 'auto':
|
| 1260 |
+
self.num_scales = max_ht
|
| 1261 |
+
elif height > max_ht:
|
| 1262 |
+
raise Exception("Cannot build pyramid higher than %d levels." % (max_ht))
|
| 1263 |
+
else:
|
| 1264 |
+
self.num_scales = int(height)
|
| 1265 |
+
|
| 1266 |
+
def _recon_levels_check(self, levels):
|
| 1267 |
+
"""Check whether levels arg is valid for reconstruction and return valid version
|
| 1268 |
+
|
| 1269 |
+
When reconstructing the input image (i.e., when calling `recon_pyr()`), the user specifies
|
| 1270 |
+
which levels to include. This makes sure those levels are valid and gets them in the form
|
| 1271 |
+
we expect for the rest of the reconstruction. If the user passes `'all'`, this constructs
|
| 1272 |
+
the appropriate list (based on the values of `self.pyr_coeffs`).
|
| 1273 |
+
|
| 1274 |
+
Parameters
|
| 1275 |
+
----------
|
| 1276 |
+
levels : `list`, `int`, or {`'all'`, `'residual_highpass'`, or `'residual_lowpass'`}
|
| 1277 |
+
If `list` should contain some subset of integers from `0` to `self.num_scales-1`
|
| 1278 |
+
(inclusive) and `'residual_highpass'` and `'residual_lowpass'` (if appropriate for the
|
| 1279 |
+
pyramid). If `'all'`, returned value will contain all valid levels. Otherwise, must be
|
| 1280 |
+
one of the valid levels.
|
| 1281 |
+
|
| 1282 |
+
Returns
|
| 1283 |
+
-------
|
| 1284 |
+
levels : `list`
|
| 1285 |
+
List containing the valid levels for reconstruction.
|
| 1286 |
+
|
| 1287 |
+
"""
|
| 1288 |
+
if isinstance(levels, str) and levels == 'all':
|
| 1289 |
+
levels = ['residual_highpass'] + list(range(self.num_scales)) + ['residual_lowpass']
|
| 1290 |
+
else:
|
| 1291 |
+
if not hasattr(levels, '__iter__') or isinstance(levels, str):
|
| 1292 |
+
# then it's a single int or string
|
| 1293 |
+
levels = [levels]
|
| 1294 |
+
levs_nums = np.array([int(i) for i in levels if isinstance(i, int) or i.isdigit()])
|
| 1295 |
+
assert (levs_nums >= 0).all(), "Level numbers must be non-negative."
|
| 1296 |
+
assert (levs_nums < self.num_scales).all(), "Level numbers must be in the range [0, %d]" % (
|
| 1297 |
+
self.num_scales - 1)
|
| 1298 |
+
levs_tmp = list(np.sort(levs_nums)) # we want smallest first
|
| 1299 |
+
if 'residual_highpass' in levels:
|
| 1300 |
+
levs_tmp = ['residual_highpass'] + levs_tmp
|
| 1301 |
+
if 'residual_lowpass' in levels:
|
| 1302 |
+
levs_tmp = levs_tmp + ['residual_lowpass']
|
| 1303 |
+
levels = levs_tmp
|
| 1304 |
+
# not all pyramids have residual highpass / lowpass, but it's easier to construct the list
|
| 1305 |
+
# including them, then remove them if necessary.
|
| 1306 |
+
if 'residual_lowpass' not in self.pyr_coeffs.keys() and 'residual_lowpass' in levels:
|
| 1307 |
+
levels.pop(-1)
|
| 1308 |
+
if 'residual_highpass' not in self.pyr_coeffs.keys() and 'residual_highpass' in levels:
|
| 1309 |
+
levels.pop(0)
|
| 1310 |
+
return levels
|
| 1311 |
+
|
| 1312 |
+
def _recon_bands_check(self, bands):
|
| 1313 |
+
"""Check whether bands arg is valid for reconstruction and return valid version
|
| 1314 |
+
|
| 1315 |
+
When reconstructing the input image (i.e., when calling `recon_pyr()`), the user specifies
|
| 1316 |
+
which orientations to include. This makes sure those orientations are valid and gets them
|
| 1317 |
+
in the form we expect for the rest of the reconstruction. If the user passes `'all'`, this
|
| 1318 |
+
constructs the appropriate list (based on the values of `self.pyr_coeffs`).
|
| 1319 |
+
|
| 1320 |
+
Parameters
|
| 1321 |
+
----------
|
| 1322 |
+
bands : `list`, `int`, or `'all'`.
|
| 1323 |
+
If list, should contain some subset of integers from `0` to `self.num_orientations-1`.
|
| 1324 |
+
If `'all'`, returned value will contain all valid orientations. Otherwise, must be one
|
| 1325 |
+
of the valid orientations.
|
| 1326 |
+
|
| 1327 |
+
Returns
|
| 1328 |
+
-------
|
| 1329 |
+
bands: `list`
|
| 1330 |
+
List containing the valid orientations for reconstruction.
|
| 1331 |
+
"""
|
| 1332 |
+
if isinstance(bands, str) and bands == "all":
|
| 1333 |
+
bands = np.arange(self.num_orientations)
|
| 1334 |
+
else:
|
| 1335 |
+
bands = np.array(bands, ndmin=1)
|
| 1336 |
+
assert (bands >= 0).all(), "Error: band numbers must be larger than 0."
|
| 1337 |
+
assert (bands < self.num_orientations).all(), "Error: band numbers must be in the range [0, %d]" % (
|
| 1338 |
+
self.num_orientations - 1)
|
| 1339 |
+
return bands
|
| 1340 |
+
|
| 1341 |
+
def _recon_keys(self, levels, bands, max_orientations=None):
|
| 1342 |
+
"""Make a list of all the relevant keys from `pyr_coeffs` to use in pyramid reconstruction
|
| 1343 |
+
|
| 1344 |
+
When reconstructing the input image (i.e., when calling `recon_pyr()`), the user specifies
|
| 1345 |
+
some subset of the pyramid coefficients to include in the reconstruction. This function
|
| 1346 |
+
takes in those specifications, checks that they're valid, and returns a list of tuples
|
| 1347 |
+
that are keys into the `pyr_coeffs` dictionary.
|
| 1348 |
+
|
| 1349 |
+
Parameters
|
| 1350 |
+
----------
|
| 1351 |
+
levels : `list`, `int`, or {`'all'`, `'residual_highpass'`, `'residual_lowpass'`}
|
| 1352 |
+
If `list` should contain some subset of integers from `0` to `self.num_scales-1`
|
| 1353 |
+
(inclusive) and `'residual_highpass'` and `'residual_lowpass'` (if appropriate for the
|
| 1354 |
+
pyramid). If `'all'`, returned value will contain all valid levels. Otherwise, must be
|
| 1355 |
+
one of the valid levels.
|
| 1356 |
+
bands : `list`, `int`, or `'all'`.
|
| 1357 |
+
If list, should contain some subset of integers from `0` to `self.num_orientations-1`.
|
| 1358 |
+
If `'all'`, returned value will contain all valid orientations. Otherwise, must be one
|
| 1359 |
+
of the valid orientations.
|
| 1360 |
+
max_orientations: `None` or `int`.
|
| 1361 |
+
The maximum number of orientations we allow in the reconstruction. when we determine
|
| 1362 |
+
which ints are allowed for bands, we ignore all those greater than max_orientations.
|
| 1363 |
+
|
| 1364 |
+
Returns
|
| 1365 |
+
-------
|
| 1366 |
+
recon_keys : `list`
|
| 1367 |
+
List of `tuples`, all of which are keys in `pyr_coeffs`. These are the coefficients to
|
| 1368 |
+
include in the reconstruction of the image.
|
| 1369 |
+
|
| 1370 |
+
"""
|
| 1371 |
+
levels = self._recon_levels_check(levels)
|
| 1372 |
+
bands = self._recon_bands_check(bands)
|
| 1373 |
+
if max_orientations is not None:
|
| 1374 |
+
for i in bands:
|
| 1375 |
+
if i >= max_orientations:
|
| 1376 |
+
warnings.warn(("You wanted band %d in the reconstruction but max_orientation"
|
| 1377 |
+
" is %d, so we're ignoring that band" % (i, max_orientations)))
|
| 1378 |
+
bands = [i for i in bands if i < max_orientations]
|
| 1379 |
+
recon_keys = []
|
| 1380 |
+
for level in levels:
|
| 1381 |
+
# residual highpass and lowpass
|
| 1382 |
+
if isinstance(level, str):
|
| 1383 |
+
recon_keys.append(level)
|
| 1384 |
+
# else we have to get each of the (specified) bands at
|
| 1385 |
+
# that level
|
| 1386 |
+
else:
|
| 1387 |
+
recon_keys.extend([(level, band) for band in bands])
|
| 1388 |
+
return recon_keys
|
| 1389 |
+
|
| 1390 |
+
|
| 1391 |
+
class SteerablePyramidBase(Pyramid):
|
| 1392 |
+
"""base class for steerable pyramid
|
| 1393 |
+
|
| 1394 |
+
should not be called directly, we just use it so we can make both SteerablePyramidFreq and
|
| 1395 |
+
SteerablePyramidSpace inherit the steer_coeffs function
|
| 1396 |
+
|
| 1397 |
+
"""
|
| 1398 |
+
|
| 1399 |
+
def __init__(self, image, edge_type):
|
| 1400 |
+
super().__init__(image=image, edge_type=edge_type)
|
| 1401 |
+
|
| 1402 |
+
def steer_coeffs(self, angles, even_phase=True):
|
| 1403 |
+
"""Steer pyramid coefficients to the specified angles
|
| 1404 |
+
|
| 1405 |
+
This allows you to have filters that have the Gaussian derivative order specified in
|
| 1406 |
+
construction, but arbitrary angles or number of orientations.
|
| 1407 |
+
|
| 1408 |
+
Parameters
|
| 1409 |
+
----------
|
| 1410 |
+
angles : `list`
|
| 1411 |
+
list of angles (in radians) to steer the pyramid coefficients to
|
| 1412 |
+
even_phase : `bool`
|
| 1413 |
+
specifies whether the harmonics are cosine or sine phase aligned about those positions.
|
| 1414 |
+
|
| 1415 |
+
Returns
|
| 1416 |
+
-------
|
| 1417 |
+
resteered_coeffs : `dict`
|
| 1418 |
+
dictionary of re-steered pyramid coefficients. will have the same number of scales as
|
| 1419 |
+
the original pyramid (though it will not contain the residual highpass or lowpass).
|
| 1420 |
+
like `self.pyr_coeffs`, keys are 2-tuples of ints indexing the scale and orientation,
|
| 1421 |
+
but now we're indexing `angles` instead of `self.num_orientations`.
|
| 1422 |
+
resteering_weights : `dict`
|
| 1423 |
+
dictionary of weights used to re-steer the pyramid coefficients. will have the same
|
| 1424 |
+
keys as `resteered_coeffs`.
|
| 1425 |
+
|
| 1426 |
+
"""
|
| 1427 |
+
resteered_coeffs = {}
|
| 1428 |
+
resteering_weights = {}
|
| 1429 |
+
for i in range(self.num_scales):
|
| 1430 |
+
basis = np.vstack([self.pyr_coeffs[(i, j)].flatten() for j in
|
| 1431 |
+
range(self.num_orientations)]).T
|
| 1432 |
+
for j, a in enumerate(angles):
|
| 1433 |
+
res, steervect = steer(basis, a, return_weights=True, even_phase=even_phase)
|
| 1434 |
+
resteered_coeffs[(i, j)] = res.reshape(self.pyr_coeffs[(i, 0)].shape)
|
| 1435 |
+
resteering_weights[(i, j)] = steervect
|
| 1436 |
+
|
| 1437 |
+
return resteered_coeffs, resteering_weights
|
| 1438 |
+
|
| 1439 |
+
|
| 1440 |
+
class SteerablePyramidSpace(SteerablePyramidBase):
|
| 1441 |
+
"""Steerable pyramid (using spatial convolutions)
|
| 1442 |
+
|
| 1443 |
+
Notes
|
| 1444 |
+
-----
|
| 1445 |
+
Transform described in [1]_, filter kernel design described in [2]_.
|
| 1446 |
+
|
| 1447 |
+
Parameters
|
| 1448 |
+
----------
|
| 1449 |
+
image : `array_like`
|
| 1450 |
+
2d image upon which to construct to the pyramid.
|
| 1451 |
+
height : 'auto' or `int`.
|
| 1452 |
+
The height of the pyramid. If 'auto', will automatically determine based on the size of
|
| 1453 |
+
`image`.
|
| 1454 |
+
order : {0, 1, 3, 5}.
|
| 1455 |
+
The Gaussian derivative order used for the steerable filters. If you want a different
|
| 1456 |
+
value, see SteerablePyramidFreq. Note that to achieve steerability the minimum number
|
| 1457 |
+
of orientation is `order` + 1, and is used here. To get more orientations at the same
|
| 1458 |
+
order, use the method `steer_coeffs`
|
| 1459 |
+
edge_type : {'circular', 'reflect1', 'reflect2', 'repeat', 'zero', 'extend', 'dont-compute'}
|
| 1460 |
+
Specifies how to handle edges. Options are:
|
| 1461 |
+
|
| 1462 |
+
* `'circular'` - circular convolution
|
| 1463 |
+
* `'reflect1'` - reflect about the edge pixels
|
| 1464 |
+
* `'reflect2'` - reflect, doubling the edge pixels
|
| 1465 |
+
* `'repeat'` - repeat the edge pixels
|
| 1466 |
+
* `'zero'` - assume values of zero outside image boundary
|
| 1467 |
+
* `'extend'` - reflect and invert
|
| 1468 |
+
* `'dont-compute'` - zero output when filter overhangs imput boundaries.
|
| 1469 |
+
|
| 1470 |
+
Attributes
|
| 1471 |
+
----------
|
| 1472 |
+
image : `array_like`
|
| 1473 |
+
The input image used to construct the pyramid.
|
| 1474 |
+
image_size : `tuple`
|
| 1475 |
+
The size of the input image.
|
| 1476 |
+
pyr_type : `str` or `None`
|
| 1477 |
+
Human-readable string specifying the type of pyramid. For base class, is None.
|
| 1478 |
+
edge_type : `str`
|
| 1479 |
+
Specifies how edges were handled.
|
| 1480 |
+
pyr_coeffs : `dict`
|
| 1481 |
+
Dictionary containing the coefficients of the pyramid. Keys are `(level, band)` tuples and
|
| 1482 |
+
values are 1d or 2d numpy arrays (same number of dimensions as the input image)
|
| 1483 |
+
pyr_size : `dict`
|
| 1484 |
+
Dictionary containing the sizes of the pyramid coefficients. Keys are `(level, band)`
|
| 1485 |
+
tuples and values are tuples.
|
| 1486 |
+
is_complex : `bool`
|
| 1487 |
+
Whether the coefficients are complex- or real-valued. Only `SteerablePyramidFreq` can have
|
| 1488 |
+
a value of True, all others must be False.
|
| 1489 |
+
|
| 1490 |
+
References
|
| 1491 |
+
----------
|
| 1492 |
+
.. [1] E P Simoncelli and W T Freeman, "The Steerable Pyramid: A Flexible Architecture for
|
| 1493 |
+
Multi-Scale Derivative Computation," Second Int'l Conf on Image Processing, Washington, DC,
|
| 1494 |
+
Oct 1995.
|
| 1495 |
+
.. [2] A Karasaridis and E P Simoncelli, "A Filter Design Technique for Steerable Pyramid
|
| 1496 |
+
Image Transforms", ICASSP, Atlanta, GA, May 1996.
|
| 1497 |
+
"""
|
| 1498 |
+
|
| 1499 |
+
def __init__(self, image, height='auto', order=1, edge_type='symm'):
|
| 1500 |
+
super().__init__(image=image, edge_type=edge_type)
|
| 1501 |
+
|
| 1502 |
+
self.order = order
|
| 1503 |
+
self.num_orientations = self.order + 1
|
| 1504 |
+
self.filters = parse_filter("sp{:d}_filters".format(self.num_orientations - 1), normalize=False)
|
| 1505 |
+
self.pyr_type = 'SteerableSpace'
|
| 1506 |
+
self._set_num_scales('lofilt', height)
|
| 1507 |
+
|
| 1508 |
+
hi0 = corrDn(image=self.image, filt=self.filters['hi0filt'], edge_type=self.edge_type)
|
| 1509 |
+
|
| 1510 |
+
self.pyr_coeffs['residual_highpass'] = hi0
|
| 1511 |
+
self.pyr_size['residual_highpass'] = hi0.shape
|
| 1512 |
+
|
| 1513 |
+
lo = corrDn(image=self.image, filt=self.filters['lo0filt'], edge_type=self.edge_type)
|
| 1514 |
+
for i in range(self.num_scales):
|
| 1515 |
+
# assume square filters -- start of buildSpyrLevs
|
| 1516 |
+
bfiltsz = int(np.floor(np.sqrt(self.filters['bfilts'].shape[0])))
|
| 1517 |
+
|
| 1518 |
+
for b in range(self.num_orientations):
|
| 1519 |
+
filt = self.filters['bfilts'][:, b].reshape(bfiltsz, bfiltsz).T
|
| 1520 |
+
band = corrDn(image=lo, filt=filt, edge_type=self.edge_type)
|
| 1521 |
+
self.pyr_coeffs[(i, b)] = np.array(band)
|
| 1522 |
+
self.pyr_size[(i, b)] = band.shape
|
| 1523 |
+
|
| 1524 |
+
lo = corrDn(image=lo, filt=self.filters['lofilt'], edge_type=self.edge_type, step=(2, 2))
|
| 1525 |
+
|
| 1526 |
+
self.pyr_coeffs['residual_lowpass'] = lo
|
| 1527 |
+
self.pyr_size['residual_lowpass'] = lo.shape
|
| 1528 |
+
|
| 1529 |
+
def recon_pyr(self, order=None, edge_type=None, levels='all', bands='all'):
|
| 1530 |
+
"""Reconstruct the image, optionally using subset of pyramid coefficients.
|
| 1531 |
+
|
| 1532 |
+
Parameters
|
| 1533 |
+
----------
|
| 1534 |
+
order : {None, 0, 1, 3, 5}.
|
| 1535 |
+
the Gaussian derivative order you want to use for the steerable pyramid filters used to
|
| 1536 |
+
reconstruct the pyramid. If None, uses the same order as that used to construct the
|
| 1537 |
+
pyramid.
|
| 1538 |
+
edge_type : {None, 'circular', 'reflect1', 'reflect2', 'repeat', 'zero', 'extend',
|
| 1539 |
+
'dont-compute'}
|
| 1540 |
+
Specifies how to handle edges. Options are:
|
| 1541 |
+
|
| 1542 |
+
* None (default) - use `self.edge_type`, the edge_type used to construct the pyramid
|
| 1543 |
+
* `'circular'` - circular convolution
|
| 1544 |
+
* `'reflect1'` - reflect about the edge pixels
|
| 1545 |
+
* `'reflect2'` - reflect, doubling the edge pixels
|
| 1546 |
+
* `'repeat'` - repeat the edge pixels
|
| 1547 |
+
* `'zero'` - assume values of zero outside image boundary
|
| 1548 |
+
* `'extend'` - reflect and inverts
|
| 1549 |
+
* `'dont-compute'` - zero output when filter overhangs imput boundaries.
|
| 1550 |
+
levels : `list`, `int`, or {`'all'`, `'residual_highpass'`}
|
| 1551 |
+
If `list` should contain some subset of integers from `0` to `self.num_scales-1`
|
| 1552 |
+
(inclusive) and `'residual_lowpass'`. If `'all'`, returned value will contain all
|
| 1553 |
+
valid levels. Otherwise, must be one of the valid levels.
|
| 1554 |
+
bands : `list`, `int`, or `'all'`.
|
| 1555 |
+
If list, should contain some subset of integers from `0` to `self.num_orientations-1`.
|
| 1556 |
+
If `'all'`, returned value will contain all valid orientations. Otherwise, must be one
|
| 1557 |
+
of the valid orientations.
|
| 1558 |
+
|
| 1559 |
+
Returns
|
| 1560 |
+
-------
|
| 1561 |
+
recon : `np.array`
|
| 1562 |
+
The reconstructed image.
|
| 1563 |
+
"""
|
| 1564 |
+
|
| 1565 |
+
if order is None:
|
| 1566 |
+
filters = self.filters
|
| 1567 |
+
recon_keys = self._recon_keys(levels, bands)
|
| 1568 |
+
else:
|
| 1569 |
+
filters = parse_filter("sp{:d}_filters".format(order), normalize=False)
|
| 1570 |
+
recon_keys = self._recon_keys(levels, bands, order + 1)
|
| 1571 |
+
|
| 1572 |
+
# assume square filters -- start of buildSpyrLevs
|
| 1573 |
+
bfiltsz = int(np.floor(np.sqrt(filters['bfilts'].shape[0])))
|
| 1574 |
+
|
| 1575 |
+
if edge_type is None:
|
| 1576 |
+
edges = self.edge_type
|
| 1577 |
+
else:
|
| 1578 |
+
edges = edge_type
|
| 1579 |
+
|
| 1580 |
+
# initialize reconstruction
|
| 1581 |
+
if 'residual_lowpass' in recon_keys:
|
| 1582 |
+
recon = self.pyr_coeffs['residual_lowpass']
|
| 1583 |
+
else:
|
| 1584 |
+
recon = np.zeros_like(self.pyr_coeffs['residual_lowpass'])
|
| 1585 |
+
|
| 1586 |
+
for lev in reversed(range(self.num_scales)):
|
| 1587 |
+
# we need to upConv once per level, in order to up-sample
|
| 1588 |
+
# the image back to the right shape.
|
| 1589 |
+
recon = upConv(image=recon, filt=filters['lofilt'], edge_type=edges,
|
| 1590 |
+
step=(2, 2), start=(0, 0), stop=self.pyr_size[(lev, 0)])
|
| 1591 |
+
# I think the most effective way to do this is to just
|
| 1592 |
+
# check every possible sub-band and then only add in the
|
| 1593 |
+
# ones we want (given that we have to loop through the
|
| 1594 |
+
# levels above in order to up-sample)
|
| 1595 |
+
for band in reversed(range(self.num_orientations)):
|
| 1596 |
+
if (lev, band) in recon_keys:
|
| 1597 |
+
filt = filters['bfilts'][:, band].reshape(bfiltsz, bfiltsz, order='F')
|
| 1598 |
+
recon += upConv(image=self.pyr_coeffs[(lev, band)], filt=filt, edge_type=edges,
|
| 1599 |
+
stop=self.pyr_size[(lev, band)])
|
| 1600 |
+
|
| 1601 |
+
# apply lo0filt
|
| 1602 |
+
recon = upConv(image=recon, filt=filters['lo0filt'], edge_type=edges, stop=recon.shape)
|
| 1603 |
+
|
| 1604 |
+
if 'residual_highpass' in recon_keys:
|
| 1605 |
+
recon += upConv(image=self.pyr_coeffs['residual_highpass'], filt=filters['hi0filt'],
|
| 1606 |
+
edge_type=edges, start=(0, 0), step=(1, 1), stop=recon.shape)
|
| 1607 |
+
|
| 1608 |
+
return recon
|
| 1609 |
+
|
| 1610 |
+
|
| 1611 |
+
def corrDn(image, filt, edge_type='symm', step=(1, 1), start=(0, 0), stop=None):
|
| 1612 |
+
"""Correlation of image with filter.
|
| 1613 |
+
|
| 1614 |
+
Parameters
|
| 1615 |
+
----------
|
| 1616 |
+
image : `np.array`
|
| 1617 |
+
The image to be filtered.
|
| 1618 |
+
filt : `np.array`
|
| 1619 |
+
The filter to be used.
|
| 1620 |
+
edge_type: str {fill, wrap, symm}, optional
|
| 1621 |
+
step : tuple of ints
|
| 1622 |
+
The step size used to sample the image.
|
| 1623 |
+
start : `tuple`
|
| 1624 |
+
2-tuple which specifies the start of the window over which we perform the convolution.
|
| 1625 |
+
|
| 1626 |
+
|
| 1627 |
+
"""
|
| 1628 |
+
if (stop is None):
|
| 1629 |
+
stop = image.shape
|
| 1630 |
+
filt_output = signal.correlate2d(image, filt, mode='same', boundary=edge_type)
|
| 1631 |
+
output = filt_output[start[0]:stop[0]:step[0], start[1]:stop[1]:step[1]]
|
| 1632 |
+
return output
|
| 1633 |
+
|
| 1634 |
+
|
| 1635 |
+
def upConv(image, filt, edge_type='symm', step=(1, 1), start=(0, 0), stop=None):
|
| 1636 |
+
"""Up-convolution of image with filter.
|
| 1637 |
+
Up sample by inserting 0s and then apply the filter.
|
| 1638 |
+
|
| 1639 |
+
Parameters
|
| 1640 |
+
----------
|
| 1641 |
+
image : `np.array`
|
| 1642 |
+
The image to be filtered.
|
| 1643 |
+
filt : `np.array`
|
| 1644 |
+
The filter to be used.
|
| 1645 |
+
edge_type: str {fill, wrap, symm}, optional
|
| 1646 |
+
fill
|
| 1647 |
+
pad input arrays with fillvalue.
|
| 1648 |
+
wrap
|
| 1649 |
+
circular boundary conditions.
|
| 1650 |
+
symm
|
| 1651 |
+
symmetrical boundary conditions.
|
| 1652 |
+
step : tuple of ints
|
| 1653 |
+
The step size used to upsample the image.
|
| 1654 |
+
start : `tuple`
|
| 1655 |
+
2-tuple which specifies the start of the image over which we perform the convolution.
|
| 1656 |
+
"""
|
| 1657 |
+
if (stop is None):
|
| 1658 |
+
stop = image.shape
|
| 1659 |
+
output = np.zeros((int(stop[0] * step[0]), int(stop[1] * step[1])))
|
| 1660 |
+
output[start[0]:stop[0] * step[0]:step[0], start[1]:stop[1] * step[0]:step[1]] = image
|
| 1661 |
+
filt_output = signal.correlate2d(output, filt, mode='same', boundary=edge_type)
|
| 1662 |
+
return filt_output
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/utils/fcm/utils.py
ADDED
|
@@ -0,0 +1,511 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import numpy as np
|
| 3 |
+
from scipy import signal
|
| 4 |
+
from scipy import ndimage
|
| 5 |
+
from PIL import Image
|
| 6 |
+
from skimage import transform
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def RGB2Lab(im):
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
Converts RGB color space to CIELab color space
|
| 13 |
+
|
| 14 |
+
Parameters
|
| 15 |
+
----------
|
| 16 |
+
im : an input RGB image
|
| 17 |
+
|
| 18 |
+
Returns
|
| 19 |
+
-------
|
| 20 |
+
im_Lab : the output Lab image
|
| 21 |
+
|
| 22 |
+
"""
|
| 23 |
+
|
| 24 |
+
im = np.float32(im) / 255 # get r,g,b value in the range of [0,1]
|
| 25 |
+
|
| 26 |
+
# the figure from graybar.m and the infromation from the website
|
| 27 |
+
# http://www.cinenet.net/~spitzak/conversion/whysrgb.html, we can conclude
|
| 28 |
+
# that our RGB system is sRGB
|
| 29 |
+
|
| 30 |
+
# if RGB system is sRGB
|
| 31 |
+
mask = im >= 0.04045
|
| 32 |
+
im[mask] = ((im[mask] + 0.055) / 1.055) ** 2.4
|
| 33 |
+
im[~mask] = im[~mask] / 12.92
|
| 34 |
+
|
| 35 |
+
# Observer. = 2°, Illuminant = D65
|
| 36 |
+
matrix = np.array([[0.412453, 0.357580, 0.180423],
|
| 37 |
+
[0.212671, 0.715160, 0.072169],
|
| 38 |
+
[0.019334, 0.119193, 0.950227]])
|
| 39 |
+
|
| 40 |
+
c_im = np.dot(im, matrix.T)
|
| 41 |
+
c_im[:, :, 0] = c_im[:, :, 0] / 95.047
|
| 42 |
+
c_im[:, :, 1] = c_im[:, :, 1] / 100.000
|
| 43 |
+
c_im[:, :, 2] = c_im[:, :, 2] / 108.833
|
| 44 |
+
|
| 45 |
+
mask = c_im >= 0.008856
|
| 46 |
+
c_im[mask] = c_im[mask] ** (1 / 3)
|
| 47 |
+
c_im[~mask] = 7.787 * c_im[~mask] + 16 / 116
|
| 48 |
+
|
| 49 |
+
im_Lab = np.zeros_like(c_im)
|
| 50 |
+
|
| 51 |
+
im_Lab[:, :, 0] = (116 * c_im[:, :, 1]) - 16
|
| 52 |
+
im_Lab[:, :, 1] = 500 * (c_im[:, :, 0] - c_im[:, :, 1])
|
| 53 |
+
im_Lab[:, :, 2] = 200 * (c_im[:, :, 1] - c_im[:, :, 2])
|
| 54 |
+
|
| 55 |
+
return im_Lab
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def normlize(arr):
|
| 59 |
+
"""
|
| 60 |
+
|
| 61 |
+
Normlizes the array input between (min, max) -> (0, 255)
|
| 62 |
+
|
| 63 |
+
"""
|
| 64 |
+
return ((arr - arr.min()) * (1 / (arr.max() - arr.min()) * 255)).astype('uint8')
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def conv2(x, y, mode=None):
|
| 68 |
+
if mode == 'same':
|
| 69 |
+
return np.rot90(signal.convolve2d(np.rot90(x, 2), np.rot90(y, 2), mode=mode), 2)
|
| 70 |
+
else:
|
| 71 |
+
return signal.convolve2d(x, y)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def RRoverlapconv(kernel, in_):
|
| 75 |
+
"""
|
| 76 |
+
|
| 77 |
+
Filters the image in with filter kernel, where it only "counts" the
|
| 78 |
+
part of the filter that overlaps the image. Rescales the filter so its
|
| 79 |
+
weights which overlap the image sum to the same as the full filter
|
| 80 |
+
kernel.
|
| 81 |
+
|
| 82 |
+
"""
|
| 83 |
+
|
| 84 |
+
# Convolve with the original kernel
|
| 85 |
+
out = conv2(in_, kernel, mode='same')
|
| 86 |
+
|
| 87 |
+
# Convolve kernel with an image of 1's, of the same size as the input image
|
| 88 |
+
rect = np.ones_like(in_)
|
| 89 |
+
|
| 90 |
+
overlapsum = conv2(rect, kernel, 'same')
|
| 91 |
+
# Now scale the output image at each pixel by the relative overlap of the filter with the image
|
| 92 |
+
out = np.sum(kernel) * out / overlapsum
|
| 93 |
+
return out
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
def RRgaussfilter1D(halfsupport, sigma, center=0):
|
| 97 |
+
"""
|
| 98 |
+
|
| 99 |
+
Creates a 1D gaussian filter kernel, centered at center (default=0), with pixels from
|
| 100 |
+
a range -halfsupport:halfsupport+1, and standard deviation sigma.
|
| 101 |
+
|
| 102 |
+
"""
|
| 103 |
+
t = list(range(-halfsupport, halfsupport + 1))
|
| 104 |
+
kernel = np.array([np.exp(-(x - center) ** 2 / (2 * sigma ** 2)) for x in t])
|
| 105 |
+
kernel = kernel / sum(kernel)
|
| 106 |
+
|
| 107 |
+
return kernel.reshape(1, kernel.shape[0])
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
def DoG1filter(a, sigma):
|
| 111 |
+
"""
|
| 112 |
+
|
| 113 |
+
Creates 2 1-D gaussian filters.
|
| 114 |
+
|
| 115 |
+
Parameters
|
| 116 |
+
----------
|
| 117 |
+
a : half-support of the filter.
|
| 118 |
+
sigma: standard deviation.
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
Notes
|
| 122 |
+
-----
|
| 123 |
+
2-D DoG filters can be contructed by combining 2 1-D DoG filters separably, in x and y directions
|
| 124 |
+
|
| 125 |
+
References
|
| 126 |
+
----------
|
| 127 |
+
Jitendra Malik and Pietro Perona. Preattentive texture discrimination
|
| 128 |
+
with early vision mechanisms. Journal of Optical Society of America A,
|
| 129 |
+
7(5), May 1990, 923-932.
|
| 130 |
+
|
| 131 |
+
Zhenlan Jin
|
| 132 |
+
|
| 133 |
+
"""
|
| 134 |
+
sigi = 0.71 * sigma
|
| 135 |
+
sigo = 1.14 * sigma
|
| 136 |
+
|
| 137 |
+
t = range(-a, a + 1)
|
| 138 |
+
|
| 139 |
+
gi = [np.exp(-x ** 2 / (2 * sigi ** 2)) for x in t]
|
| 140 |
+
gi = gi / sum(gi)
|
| 141 |
+
go = [np.exp(- x ** 2 / (2 * sigo ** 2)) for x in t]
|
| 142 |
+
go = go / sum(go)
|
| 143 |
+
|
| 144 |
+
return gi.reshape(1, gi.shape[0]), go.reshape(1, go.shape[0])
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
def addborder(im, xbdr, ybdr, arg):
|
| 148 |
+
"""
|
| 149 |
+
|
| 150 |
+
imnew = addborder(im,xborder,yborder,arg) Make image w/added border.
|
| 151 |
+
imnew = addborder(im,5,5,128) Add 5 wide border of val 128.
|
| 152 |
+
imnew = addborder (im,5,5,'even') Even reflection.
|
| 153 |
+
imnew = addborder (im,5,5,'odd') Odd reflection.
|
| 154 |
+
imnew = addborder (im,5,5,'wrap') Wraparound.
|
| 155 |
+
|
| 156 |
+
"""
|
| 157 |
+
ysize, xsize = im.shape
|
| 158 |
+
|
| 159 |
+
# check thickness
|
| 160 |
+
if (xbdr > xsize) or (ybdr > ysize):
|
| 161 |
+
raise ValueError('borders must be thinner than image')
|
| 162 |
+
|
| 163 |
+
# if arg is a number, fill border with its value.
|
| 164 |
+
if isinstance(arg, (int, float)):
|
| 165 |
+
imbig = cv2.copyMakeBorder(im, ybdr, ybdr, xbdr, xbdr, cv2.BORDER_CONSTANT, value=arg)
|
| 166 |
+
|
| 167 |
+
# Even reflections
|
| 168 |
+
elif arg == 'even':
|
| 169 |
+
imbig = cv2.copyMakeBorder(im, ybdr, ybdr, xbdr, xbdr, cv2.BORDER_REFLECT)
|
| 170 |
+
|
| 171 |
+
# Odd reflections
|
| 172 |
+
elif arg == 'odd':
|
| 173 |
+
imbig = cv2.copyMakeBorder(im, ybdr, ybdr, xbdr, xbdr, cv2.BORDER_REFLECT_101)
|
| 174 |
+
|
| 175 |
+
# Wraparound
|
| 176 |
+
elif arg == 'wrap':
|
| 177 |
+
imbig = cv2.copyMakeBorder(im, ybdr, ybdr, xbdr, xbdr, cv2.BORDER_WRAP)
|
| 178 |
+
else:
|
| 179 |
+
raise ValueError('unknown border style')
|
| 180 |
+
return imbig
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
def filt2(kernel, im1, reflect_style='odd'):
|
| 184 |
+
"""
|
| 185 |
+
|
| 186 |
+
Improved version of filter2 in MATLAB, which includes reflection.
|
| 187 |
+
Default style is 'odd'. Also can be 'even', or 'wrap'.
|
| 188 |
+
|
| 189 |
+
Examples
|
| 190 |
+
--------
|
| 191 |
+
im2 = filt2(kern,image) apply kernel with odd reflection (default).
|
| 192 |
+
im2 = filt2(kern,image,'even') Use even reflection.
|
| 193 |
+
im2 = filt2(kern,image,128) Fill with 128's.
|
| 194 |
+
|
| 195 |
+
Ruth Rosenholtz
|
| 196 |
+
|
| 197 |
+
"""
|
| 198 |
+
|
| 199 |
+
ky, kx = kernel.shape
|
| 200 |
+
iy, ix = im1.shape
|
| 201 |
+
|
| 202 |
+
imbig = addborder(im1, kx, ky, reflect_style)
|
| 203 |
+
imbig = conv2(imbig, kernel, 'same')
|
| 204 |
+
im2 = imbig[ky:ky + iy, kx:kx + ix]
|
| 205 |
+
|
| 206 |
+
return im2
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
def RRcontrast1channel(pyr, DoG_sigma=2):
|
| 210 |
+
"""
|
| 211 |
+
|
| 212 |
+
Filters a Gaussian pyramid, pyr, with a 1-channel contrast feature detector.
|
| 213 |
+
|
| 214 |
+
Parameters
|
| 215 |
+
----------
|
| 216 |
+
pyr : a Gaussian pyramid. It can be computed from this "pyrtools" package
|
| 217 |
+
DoG_sigma : size of the center-surround (Difference-of-Gaussian) filter used for computing the contrast. Default = 2. Refer to DoG1filter.
|
| 218 |
+
|
| 219 |
+
Code by Ruth Rosenholtz and Zhenlan Jin
|
| 220 |
+
modified by Yuanzhen Li, Sep 2004
|
| 221 |
+
|
| 222 |
+
"""
|
| 223 |
+
levels = len(pyr)
|
| 224 |
+
contrast = [0] * levels
|
| 225 |
+
|
| 226 |
+
# Here we're using the difference-of-gaussian filters. Separable.
|
| 227 |
+
# Refer to routine 'DoG1filter'.
|
| 228 |
+
innerG1, outerG1 = DoG1filter(round(DoG_sigma * 3), DoG_sigma)
|
| 229 |
+
|
| 230 |
+
# Do contrast feature computation with these filters:
|
| 231 |
+
for i in range(0, levels):
|
| 232 |
+
inner = filt2(innerG1, pyr[(i, 0)])
|
| 233 |
+
inner = filt2(innerG1.T, inner)
|
| 234 |
+
outer = filt2(outerG1, pyr[(i, 0)])
|
| 235 |
+
outer = filt2(outerG1.T, outer)
|
| 236 |
+
tmp = inner - outer
|
| 237 |
+
contrast[i] = abs(tmp) # ** 2
|
| 238 |
+
|
| 239 |
+
return contrast
|
| 240 |
+
|
| 241 |
+
|
| 242 |
+
def reduce(image0, kernel=None):
|
| 243 |
+
"""
|
| 244 |
+
|
| 245 |
+
Reduce: for building Gaussian or Laplacian pyramids. 1-D separable kernels.
|
| 246 |
+
|
| 247 |
+
Examples
|
| 248 |
+
--------
|
| 249 |
+
imnew = reduce(im0) Reduce w/default kernel: [.05 .25 .4 .25 .05]
|
| 250 |
+
imnew = reduce(im0, kern) Reduce with kern; sums to unity.
|
| 251 |
+
|
| 252 |
+
Ruth Rosenholtz
|
| 253 |
+
"""
|
| 254 |
+
|
| 255 |
+
if kernel is None:
|
| 256 |
+
# Default kernel
|
| 257 |
+
kernel = np.array([[0.05, 0.25, 0.4, 0.25, 0.05]])
|
| 258 |
+
|
| 259 |
+
ysize, xsize = image0.shape
|
| 260 |
+
|
| 261 |
+
image0 = filt2(kernel, image0) # Filter horizontally.
|
| 262 |
+
# filt2 is filter2 with reflection.
|
| 263 |
+
image1 = image0[:, range(0, xsize, 2)]
|
| 264 |
+
|
| 265 |
+
image1 = filt2(kernel.T, image1) # Filter vertically.
|
| 266 |
+
image2 = image1[range(0, ysize, 2), :]
|
| 267 |
+
|
| 268 |
+
return image2
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
def RRoverlapconvexpand(in_, kernel=None):
|
| 272 |
+
"""
|
| 273 |
+
|
| 274 |
+
Examples
|
| 275 |
+
--------
|
| 276 |
+
out = RRoverlapconvexpand(in_) return an image expanded to double size,
|
| 277 |
+
out = RRoverlapconvexpand(in, kernel); specify 1-D kernel with unity sum.
|
| 278 |
+
|
| 279 |
+
"""
|
| 280 |
+
|
| 281 |
+
if kernel is None:
|
| 282 |
+
# Default kernel
|
| 283 |
+
kernel = np.array([[0.05, 0.25, 0.4, 0.25, 0.05]])
|
| 284 |
+
|
| 285 |
+
ysize, xsize = in_.shape
|
| 286 |
+
kernel = kernel * 2 # kernel sum=2 to account for padding.
|
| 287 |
+
|
| 288 |
+
tmp = np.zeros([ysize, 2 * xsize]) # First double the width
|
| 289 |
+
k = list(range(0, xsize))
|
| 290 |
+
k_2 = [x * 2 for x in k]
|
| 291 |
+
tmp[:, k_2] = in_[:, k]
|
| 292 |
+
tmp = RRoverlapconv(kernel, tmp) # ..and filter horizontally.
|
| 293 |
+
|
| 294 |
+
out = np.zeros([2 * ysize, 2 * xsize]) # Next double the height
|
| 295 |
+
k = list(range(0, ysize))
|
| 296 |
+
k_2 = [x * 2 for x in k]
|
| 297 |
+
out[k_2, :] = tmp[k, :]
|
| 298 |
+
out = RRoverlapconv(kernel.T, out) # ..and filter vertically.
|
| 299 |
+
|
| 300 |
+
return out
|
| 301 |
+
|
| 302 |
+
|
| 303 |
+
def HV(in_):
|
| 304 |
+
"""
|
| 305 |
+
|
| 306 |
+
Outputs H-V
|
| 307 |
+
|
| 308 |
+
"""
|
| 309 |
+
out = in_[0] - in_[1]
|
| 310 |
+
return out
|
| 311 |
+
|
| 312 |
+
|
| 313 |
+
def DD(in_):
|
| 314 |
+
"""
|
| 315 |
+
|
| 316 |
+
Outputs R-L
|
| 317 |
+
|
| 318 |
+
"""
|
| 319 |
+
out = in_[3] - in_[2]
|
| 320 |
+
return out
|
| 321 |
+
|
| 322 |
+
|
| 323 |
+
def sumorients(in_):
|
| 324 |
+
"""
|
| 325 |
+
|
| 326 |
+
Sums the four orientations into one image.
|
| 327 |
+
|
| 328 |
+
"""
|
| 329 |
+
|
| 330 |
+
out = in_[0] + in_[1] + in_[2] + in_[3]
|
| 331 |
+
return out
|
| 332 |
+
|
| 333 |
+
|
| 334 |
+
def poolnew(in_, sigma=None):
|
| 335 |
+
"""
|
| 336 |
+
|
| 337 |
+
Pools with a gaussian. Note assumes that input image is actually
|
| 338 |
+
4 equal-size images, side by side.
|
| 339 |
+
|
| 340 |
+
"""
|
| 341 |
+
|
| 342 |
+
in1 = in_[0] # H -> first quarter
|
| 343 |
+
in2 = in_[1] # V -> second quarter
|
| 344 |
+
in3 = in_[2] # L -> third quarter
|
| 345 |
+
in4 = in_[3] # R -> last quarter
|
| 346 |
+
|
| 347 |
+
if sigma is None:
|
| 348 |
+
out1 = reduce(RRoverlapconvexpand(in1))
|
| 349 |
+
out2 = reduce(RRoverlapconvexpand(in2))
|
| 350 |
+
out3 = reduce(RRoverlapconvexpand(in3))
|
| 351 |
+
out4 = reduce(RRoverlapconvexpand(in4))
|
| 352 |
+
else:
|
| 353 |
+
kernel = RRgaussfilter1D(round(2 * sigma), sigma)
|
| 354 |
+
out1 = reduce(RRoverlapconvexpand(in1, kernel), kernel)
|
| 355 |
+
out2 = reduce(RRoverlapconvexpand(in2, kernel), kernel)
|
| 356 |
+
out3 = reduce(RRoverlapconvexpand(in3, kernel), kernel)
|
| 357 |
+
out4 = reduce(RRoverlapconvexpand(in4, kernel), kernel)
|
| 358 |
+
|
| 359 |
+
out = out1, out2, out3, out4
|
| 360 |
+
|
| 361 |
+
return out
|
| 362 |
+
|
| 363 |
+
|
| 364 |
+
def imrotate_cv(image, angle, method='bicubic', bbox='crop'):
|
| 365 |
+
image_center = tuple(np.array(image.shape[1::-1]) / 2)
|
| 366 |
+
rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)
|
| 367 |
+
result = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)
|
| 368 |
+
return result
|
| 369 |
+
|
| 370 |
+
|
| 371 |
+
def imrotate_skimage(im, angle, method='bicubic', bbox='crop'):
|
| 372 |
+
"""
|
| 373 |
+
|
| 374 |
+
rotate an image by Skimage package. Basically just a wrapper to
|
| 375 |
+
deal with the fact that skimage thinks floating point images need to be between [-1.0,1.0]
|
| 376 |
+
|
| 377 |
+
angle is in DEGREE
|
| 378 |
+
|
| 379 |
+
"""
|
| 380 |
+
|
| 381 |
+
func_bbox = {'loose': True, 'crop': False}
|
| 382 |
+
|
| 383 |
+
immin = np.min(im)
|
| 384 |
+
imrange = np.max(im) - immin
|
| 385 |
+
im = im - immin
|
| 386 |
+
im = im / imrange
|
| 387 |
+
im = transform.rotate(im, angle, order=3, resize=func_bbox[bbox])
|
| 388 |
+
im = im * imrange
|
| 389 |
+
im = im + immin
|
| 390 |
+
return im
|
| 391 |
+
|
| 392 |
+
|
| 393 |
+
def imrotate_pil(im, angle, method='nearest', bbox='crop'):
|
| 394 |
+
"""
|
| 395 |
+
|
| 396 |
+
roatate an image by PIL package
|
| 397 |
+
|
| 398 |
+
"""
|
| 399 |
+
|
| 400 |
+
# interpolation methods
|
| 401 |
+
func_method = {'nearest': 0, 'bilinear': 2, 'bicubic': 3, 'cubic': 3}
|
| 402 |
+
# crop or not methods
|
| 403 |
+
func_bbox = {'loose': True, 'crop': False}
|
| 404 |
+
PIL_im = Image.fromarray(im)
|
| 405 |
+
# roatate
|
| 406 |
+
im_rot = PIL_im.rotate(angle, expand=func_bbox[bbox], resample=func_method[method])
|
| 407 |
+
return np.array(im_rot)
|
| 408 |
+
|
| 409 |
+
|
| 410 |
+
def imrotate_scipy(im, angle, method='bicubic', bbox='crop'):
|
| 411 |
+
"""
|
| 412 |
+
|
| 413 |
+
roatate an image by Scipy package
|
| 414 |
+
|
| 415 |
+
"""
|
| 416 |
+
|
| 417 |
+
# By default rotate uses cubic interpolation
|
| 418 |
+
return ndimage.rotate(im, angle=angle, order=3, reshape=False)
|
| 419 |
+
|
| 420 |
+
|
| 421 |
+
def orient_filtnew(pyr, sigma=16 / 14):
|
| 422 |
+
"""
|
| 423 |
+
|
| 424 |
+
ORIENT_FILTNEW Filters "pyr" (in principle, one level of the Gaussian pyramid generated by gausspyr) with 2nd
|
| 425 |
+
derivative filters in 4 directions.
|
| 426 |
+
|
| 427 |
+
Returns
|
| 428 |
+
-------
|
| 429 |
+
hvdd : the 4 output images appended together in a list, in the order horizontal, vertical, up-left, and down-right.
|
| 430 |
+
"""
|
| 431 |
+
|
| 432 |
+
halfsupport = round(3 * sigma)
|
| 433 |
+
# halfsupport was 10, for default sigma. We need a halfsupport of about
|
| 434 |
+
# 2*sigma for a single Gaussian. Here we have three, one at -sigma, one at
|
| 435 |
+
# sigma, so we should need a halfsupport of about 3*sigma.
|
| 436 |
+
|
| 437 |
+
sigy = sigma
|
| 438 |
+
sigx = sigma # Was sigx = 3*sigma.
|
| 439 |
+
|
| 440 |
+
gx = RRgaussfilter1D(halfsupport, sigx)
|
| 441 |
+
gy = RRgaussfilter1D(halfsupport, sigy, sigma)
|
| 442 |
+
Ga = conv2(gx, gy.T)
|
| 443 |
+
Ga = Ga / sum(sum(Ga))
|
| 444 |
+
gy = RRgaussfilter1D(halfsupport, sigy)
|
| 445 |
+
Gb = conv2(gx, gy.T)
|
| 446 |
+
Gb = Gb / sum(sum(Gb))
|
| 447 |
+
gy = RRgaussfilter1D(halfsupport, sigy, -sigma)
|
| 448 |
+
Gc = conv2(gx, gy.T)
|
| 449 |
+
Gc = Gc / sum(sum(Gc))
|
| 450 |
+
H = -Ga + 2 * Gb - Gc
|
| 451 |
+
V = H.T
|
| 452 |
+
|
| 453 |
+
GGa = imrotate_skimage(Ga, 45, 'bicubic', 'crop')
|
| 454 |
+
GGa = GGa / sum(sum(GGa))
|
| 455 |
+
GGb = imrotate_skimage(Gb, 45, 'bicubic', 'crop')
|
| 456 |
+
GGb = GGb / sum(sum(GGb))
|
| 457 |
+
GGc = imrotate_skimage(Gc, 45, 'bicubic', 'crop')
|
| 458 |
+
GGc = GGc / sum(sum(GGc))
|
| 459 |
+
R = -GGa + 2 * GGb - GGc
|
| 460 |
+
GGa = imrotate_skimage(Ga, -45, 'bicubic', 'crop')
|
| 461 |
+
GGa = GGa / sum(sum(GGa))
|
| 462 |
+
GGb = imrotate_skimage(Gb, -45, 'bicubic', 'crop')
|
| 463 |
+
GGb = GGb / sum(sum(GGb))
|
| 464 |
+
GGc = imrotate_skimage(Gc, -45, 'bicubic', 'crop')
|
| 465 |
+
GGc = GGc / sum(sum(GGc))
|
| 466 |
+
L = -GGa + 2 * GGb - GGc
|
| 467 |
+
|
| 468 |
+
hout = filt2(H, pyr)
|
| 469 |
+
vout = filt2(V, pyr)
|
| 470 |
+
lout = filt2(L, pyr)
|
| 471 |
+
rout = filt2(R, pyr)
|
| 472 |
+
|
| 473 |
+
hvdd = hout, vout, lout, rout
|
| 474 |
+
|
| 475 |
+
return hvdd
|
| 476 |
+
|
| 477 |
+
|
| 478 |
+
def histc(x, bins):
|
| 479 |
+
"""
|
| 480 |
+
|
| 481 |
+
MATLAB `histc` equivalent.
|
| 482 |
+
|
| 483 |
+
"""
|
| 484 |
+
map_to_bins = np.digitize(x, bins) # Get indices of the bins to which each value in input array belongs.
|
| 485 |
+
res = np.zeros(bins.shape)
|
| 486 |
+
for el in map_to_bins:
|
| 487 |
+
res[el - 1] += 1 # Increment appropriate bin.
|
| 488 |
+
return res
|
| 489 |
+
|
| 490 |
+
|
| 491 |
+
def entropy(x, nbins=None):
|
| 492 |
+
"""
|
| 493 |
+
|
| 494 |
+
Computes the entropy of signal "x", given the number of bins "nbins" used uniform binning in the calculation.
|
| 495 |
+
|
| 496 |
+
"""
|
| 497 |
+
|
| 498 |
+
nsamples = x.shape[0]
|
| 499 |
+
|
| 500 |
+
if nbins is None:
|
| 501 |
+
nbins = int(np.ceil(np.sqrt(nsamples)))
|
| 502 |
+
elif nbins == 1:
|
| 503 |
+
return 0
|
| 504 |
+
|
| 505 |
+
edges = np.histogram(x, bins=nbins - 1)[1]
|
| 506 |
+
ref_hist = histc(x, edges)
|
| 507 |
+
ref_hist = ref_hist / float(np.sum(ref_hist))
|
| 508 |
+
ref_hist = ref_hist[np.nonzero(ref_hist)]
|
| 509 |
+
ref_ent = -np.sum(ref_hist * np.log(ref_hist))
|
| 510 |
+
|
| 511 |
+
return ref_ent
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/utils/fcm/vlc.py
ADDED
|
@@ -0,0 +1,838 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import copy
|
| 2 |
+
import os
|
| 3 |
+
import cv2
|
| 4 |
+
import pickle
|
| 5 |
+
import numpy as np
|
| 6 |
+
# import pyrtools as pt
|
| 7 |
+
from PIL import Image
|
| 8 |
+
from .utils import RRgaussfilter1D, RGB2Lab, conv2
|
| 9 |
+
from .utils import normlize, RRoverlapconv, RRcontrast1channel
|
| 10 |
+
from .utils import orient_filtnew, poolnew, HV
|
| 11 |
+
from .utils import DD, sumorients, entropy
|
| 12 |
+
from .pyramid import upConv
|
| 13 |
+
from .base_utils import normalize_image, visualize_images, get_image_pkl
|
| 14 |
+
from .base_utils import sort_images, rename_files, get_real_image
|
| 15 |
+
from .base_utils import sort_images_target, get_dict_variation, parse_image_names
|
| 16 |
+
from .base_utils import write_stats, sort_images_real
|
| 17 |
+
|
| 18 |
+
class VLC:
|
| 19 |
+
"""
|
| 20 |
+
Class for computing Feature Congestion Map.
|
| 21 |
+
ref: Ruth Rosenholtz, Yuanzhen Li, and Lisa Nakano. "Measuring Visual Clutter".
|
| 22 |
+
Journal of Vision, 7(2), 2007.
|
| 23 |
+
"""
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
Class of two measures of visual clutter (Feature Congestion and Subband Entropy)
|
| 27 |
+
|
| 28 |
+
Parameters
|
| 29 |
+
----------
|
| 30 |
+
inputImage : gives the input. It can be one of the following 2 things: 1. an RGB image; 2. a string, i.e., file name of an RGB image.
|
| 31 |
+
numlevels : the number of levels.
|
| 32 |
+
contrast_filt_sigma : the sigma (standard deviation) of the center-surround DoG1 filter used for computing the contrast
|
| 33 |
+
contrast_pool_sigma : the sigma (standard deviation) of this Gaussian window for contrast clutter. Default = 3*filt_sigma.
|
| 34 |
+
color_pool_sigma : the sigma (standard deviation) of this Gaussian window for color clutter, Defaults to 3.
|
| 35 |
+
|
| 36 |
+
Methods
|
| 37 |
+
-------
|
| 38 |
+
getClutter_FC: computes Feature Congestion clutter, outputs both a scalar (clutter of the whole image) and a map (local clutter).
|
| 39 |
+
getClutter_SE: computes Subband Entropy clutter, outputs only a scalar.
|
| 40 |
+
colorClutter: computes clutter maps indicating local variability in color
|
| 41 |
+
contrastClutter: computes clutter maps indicating local variability in contrast
|
| 42 |
+
orientationClutter: computes clutter maps indicating local variability in orientation
|
| 43 |
+
|
| 44 |
+
(Please see individual routines for more info about parameters and outputs.)
|
| 45 |
+
|
| 46 |
+
References
|
| 47 |
+
----------
|
| 48 |
+
Ruth Rosenholtz, Yuanzhen Li, and Lisa Nakano. "Measuring Visual Clutter".
|
| 49 |
+
Journal of Vision, 7(2), 2007. http://www.journalofvision.com/7/2/
|
| 50 |
+
Ruth Rosenholtz, Yuanzhen Li, and Lisa Nakano, March 2007.
|
| 51 |
+
|
| 52 |
+
Ruth Rosenholtz, Yuanzhen Li, Jonathan Mansfield, and Zhenlan Jin. "Feature Congestion: A Measure of Display Clutter".
|
| 53 |
+
CHI '05: Proc. of the SIGCHI conference on Human factors in computing systems. May 2005. 761-770.
|
| 54 |
+
|
| 55 |
+
"""
|
| 56 |
+
|
| 57 |
+
def __init__(self, add_xyz=None, num_levels=3, contrast_filt_sigma=1,
|
| 58 |
+
contrast_pool_sigma=None, color_pool_sigma=3, xyz_sigma=1,
|
| 59 |
+
w_color=0.2088, w_contrast=0.0660, w_orient=0.0269,
|
| 60 |
+
w_xyz=0.1):
|
| 61 |
+
"""
|
| 62 |
+
:param add_xyz: whether to add xyz channels. options: None, all, 'x' or 'y' or 'z'
|
| 63 |
+
:param num_levels: number of sacles
|
| 64 |
+
:param contrast_filt_sigma: sigma for gaussian filter for contrast
|
| 65 |
+
:param contrast_pool_sigma: gaussian window size for contrast
|
| 66 |
+
:param color_pool_sigma: gaussian window for color
|
| 67 |
+
:param xyz_sigma: gaussian window for xyz
|
| 68 |
+
:param w_color: mixing weight of color clutter
|
| 69 |
+
:param w_contrast: mixing weight of contrast clutter
|
| 70 |
+
:param w_orient: mixing weight of orientation clutter
|
| 71 |
+
:param w_xyz: mixing weight of orientation clutter
|
| 72 |
+
|
| 73 |
+
"""
|
| 74 |
+
self.add_xyz = add_xyz
|
| 75 |
+
self.num_levels = num_levels
|
| 76 |
+
self.contrast_filt_sigma = contrast_filt_sigma
|
| 77 |
+
self.contrast_pool_sigma = 3 * contrast_filt_sigma if contrast_pool_sigma is None else contrast_pool_sigma
|
| 78 |
+
self.color_pool_sigma = color_pool_sigma
|
| 79 |
+
self.xyz_sigma = xyz_sigma
|
| 80 |
+
|
| 81 |
+
# orient_pool_sigma is the sigma (standard deviation) of this Gaussian window, and here is hard-wired to 7/2.
|
| 82 |
+
self.orient_pool_sigma = 7 / 2
|
| 83 |
+
self.w_color = w_color
|
| 84 |
+
self.w_contrast = w_contrast
|
| 85 |
+
self.w_orient = w_orient
|
| 86 |
+
self.w_xyz = w_xyz
|
| 87 |
+
self.L_pyr, self.a_pyr, self.b_pyr = None, None, None
|
| 88 |
+
self.x, self.y, self.z, self.xyz = None, None, None, None
|
| 89 |
+
def collapse(self, clutter_levels):
|
| 90 |
+
"""
|
| 91 |
+
Collapses multple scale clutter maps into a unified one
|
| 92 |
+
:param clutter_levels: number of scales of clutter maps
|
| 93 |
+
:return:
|
| 94 |
+
clutter_map: a unified clutter map
|
| 95 |
+
"""
|
| 96 |
+
|
| 97 |
+
kernel_1d = np.array([[0.05, 0.25, 0.4, 0.25, 0.05]])
|
| 98 |
+
kernel_2d = conv2(kernel_1d, kernel_1d.T)
|
| 99 |
+
|
| 100 |
+
clutter_map = clutter_levels[0].copy()
|
| 101 |
+
for scale in range(1, len(clutter_levels)):
|
| 102 |
+
clutter_here = clutter_levels[scale]
|
| 103 |
+
|
| 104 |
+
for kk in range(scale, 0, -1):
|
| 105 |
+
# clutter_here = upConv(image=clutter_here, filt=kernel_2d,
|
| 106 |
+
# edge_type='reflect1',
|
| 107 |
+
# step=[2, 2],
|
| 108 |
+
# start=[0, 0])
|
| 109 |
+
clutter_here = upConv(image=clutter_here,
|
| 110 |
+
filt=kernel_2d,
|
| 111 |
+
edge_type='symm',
|
| 112 |
+
step=[2, 2],
|
| 113 |
+
start=[0, 0])
|
| 114 |
+
|
| 115 |
+
common_sz = (min(clutter_map.shape[0], clutter_here.shape[0]),
|
| 116 |
+
min(clutter_map.shape[1], clutter_here.shape[1]))
|
| 117 |
+
for i in range(0, common_sz[0]):
|
| 118 |
+
for j in range(0, common_sz[1]):
|
| 119 |
+
clutter_map[i][j] = max(clutter_map[i][j], clutter_here[i][j])
|
| 120 |
+
|
| 121 |
+
return clutter_map
|
| 122 |
+
|
| 123 |
+
def get_orient_opponent_energy(self, num_levels, L_pyr):
|
| 124 |
+
"""
|
| 125 |
+
:param num_levels: number of scales
|
| 126 |
+
:param L_pyr: luminance pyramid
|
| 127 |
+
:return:
|
| 128 |
+
out: oriented opponent energy
|
| 129 |
+
"""
|
| 130 |
+
hvdd = [0] * num_levels
|
| 131 |
+
hv = [0] * num_levels
|
| 132 |
+
dd = [0] * num_levels
|
| 133 |
+
out = [0] * num_levels
|
| 134 |
+
total = [0] * num_levels
|
| 135 |
+
|
| 136 |
+
noise = 1.0 # Was 1.5
|
| 137 |
+
filterScale = 16 / 14 * 1.75
|
| 138 |
+
poolScale = 1.75
|
| 139 |
+
# These probably seem like arbitrary numbers, but it's just trying to get
|
| 140 |
+
# three very different feature extraction methods to operate at basically
|
| 141 |
+
# the same scales.
|
| 142 |
+
|
| 143 |
+
for scale in range(0, num_levels):
|
| 144 |
+
# Check this is the right order for Landy/Bergen. RRR
|
| 145 |
+
hvdd[scale] = orient_filtnew(L_pyr[(scale, 0)], filterScale)
|
| 146 |
+
# filt with 4 oriented filters 0, 45, 90, 135. Was sigma = 16/14, orient_filtnew,
|
| 147 |
+
# then 16/14*1.75 to match contrast and other scales.
|
| 148 |
+
# Eventually make this sigma a variable that's passed to this routine.
|
| 149 |
+
# hvdd[scale] is the 4 output images concatenated together,
|
| 150 |
+
# in the order horizontal, vertical, up-left, and down-right.
|
| 151 |
+
|
| 152 |
+
hvdd[scale] = [x ** 2 for x in hvdd[scale]] # local energy
|
| 153 |
+
hvdd[scale] = poolnew(hvdd[scale],
|
| 154 |
+
poolScale) # Pools with a gaussian filter. Was effectively sigma=1, then 1.75 to match 1.75 above.
|
| 155 |
+
# RRR Should look at these results and see if this is the right amount of
|
| 156 |
+
# pooling for the new filters. It was right for the Landy-Bergen
|
| 157 |
+
# filters.
|
| 158 |
+
hv[scale] = HV(hvdd[scale]) # get the difference image between horizontal and vertical: H-V (0-90)
|
| 159 |
+
dd[scale] = DD(hvdd[scale]) # get the difference image between right and left: R-L (45-135)
|
| 160 |
+
# Normalize by the total response at this scale, assuming the total
|
| 161 |
+
# response is high enough. If it's too low, we'll never see this
|
| 162 |
+
# orientation. I'm not sure what to do here -- set it to zeros and
|
| 163 |
+
# it's like that's the orientation. Maybe output the total response
|
| 164 |
+
# and decide what to do later. RRR
|
| 165 |
+
total[scale] = sumorients(hvdd[scale]) + noise # add noise based upon sum orients at visibility threshold
|
| 166 |
+
hv[scale] = hv[scale] / total[scale] # normalize the hv and dd image
|
| 167 |
+
dd[scale] = dd[scale] / total[scale]
|
| 168 |
+
out[scale] = hv[scale], dd[
|
| 169 |
+
scale] # out is the 2 output images concatenated together, in the order of hv, dd
|
| 170 |
+
|
| 171 |
+
return out
|
| 172 |
+
|
| 173 |
+
def compute_orientation_clutter(self):
|
| 174 |
+
"""
|
| 175 |
+
Computes the orientation clutter maps
|
| 176 |
+
:return:
|
| 177 |
+
orient_clt_lvls: list of orientation clutter maps as multiple scales (specified by numLevels)
|
| 178 |
+
"""
|
| 179 |
+
|
| 180 |
+
noise = 0.001 # Was eps, but that gave too much orientation noise in the saliency maps. Then changed to 0.000001
|
| 181 |
+
poolScale = 7 / 2
|
| 182 |
+
|
| 183 |
+
numlevels = len(self.L_pyr)
|
| 184 |
+
Dc = [0] * numlevels # mean "cos 2 theta" at distractor scale
|
| 185 |
+
Ds = [0] * numlevels # mean "sin 2 theta" at distractor scale
|
| 186 |
+
|
| 187 |
+
# Get approximations to cos(2theta) and sin(2theta) from oriented opponent
|
| 188 |
+
# energy, at each of the numlevels of the pyramid
|
| 189 |
+
angles = self.get_orient_opponent_energy(self.num_levels, self.L_pyr)
|
| 190 |
+
|
| 191 |
+
# Compute the two-vector [meancos, meansin] at each scale, as well as the
|
| 192 |
+
# things we need to compute the mean and covariance of this two-vector at
|
| 193 |
+
# the larger, distractor scale.
|
| 194 |
+
bigG = RRgaussfilter1D(round(8 * poolScale), 4 * poolScale)
|
| 195 |
+
maxbigG = max(bigG) ** 2
|
| 196 |
+
|
| 197 |
+
covMx = {}
|
| 198 |
+
orient_clt_lvls = [0] * numlevels
|
| 199 |
+
|
| 200 |
+
for i in range(0, numlevels):
|
| 201 |
+
cmx = angles[i][0]
|
| 202 |
+
smx = angles[i][1]
|
| 203 |
+
|
| 204 |
+
# Pool to get means at distractor scale. In pooling, don't pool over the target
|
| 205 |
+
# region (implement this by pooling with a big Gaussian, then
|
| 206 |
+
# subtracting the pooling over the target region computed above. Note,
|
| 207 |
+
# however, that we first need to scale the target region pooling so
|
| 208 |
+
# that its peak is the same height as this much broader Gaussian used
|
| 209 |
+
# to pool over the distractor region.
|
| 210 |
+
Dc[i] = RRoverlapconv(bigG, cmx)
|
| 211 |
+
Dc[i] = RRoverlapconv(bigG.T, Dc[i])
|
| 212 |
+
Ds[i] = RRoverlapconv(bigG, smx)
|
| 213 |
+
Ds[i] = RRoverlapconv(bigG.T, Ds[i])
|
| 214 |
+
|
| 215 |
+
# Covariance matrix elements. Compare with computations in
|
| 216 |
+
# RRStatisticalSaliency. I tried to match computeColorClutter, but I
|
| 217 |
+
# don't remember the meaning of some of the terms I removed. XXX
|
| 218 |
+
covMx[(i, 0, 0)] = RRoverlapconv(bigG, cmx ** 2)
|
| 219 |
+
covMx[(i, 0, 0)] = RRoverlapconv(bigG.T, covMx[(i, 0, 0)]) - Dc[i] ** 2 + noise
|
| 220 |
+
covMx[(i, 0, 1)] = RRoverlapconv(bigG, cmx * smx)
|
| 221 |
+
covMx[(i, 0, 1)] = RRoverlapconv(bigG.T, covMx[(i, 0, 1)]) - Dc[i] * Ds[i]
|
| 222 |
+
covMx[(i, 1, 1)] = RRoverlapconv(bigG, smx ** 2)
|
| 223 |
+
covMx[(i, 1, 1)] = RRoverlapconv(bigG.T, covMx[(i, 1, 1)]) - Ds[i] ** 2 + noise
|
| 224 |
+
|
| 225 |
+
# Get determinant of covariance matrix, which is the volume of the
|
| 226 |
+
# covariance ellipse
|
| 227 |
+
detIm = covMx[(i, 0, 0)] * covMx[(i, 1, 1)] - covMx[(i, 0, 1)] ** 2
|
| 228 |
+
# Take the square root considering variance is squared, and the square
|
| 229 |
+
# root again, since this is the area and the contrast measure is a "length"
|
| 230 |
+
orient_clt_lvls[i] = detIm ** (1 / 4)
|
| 231 |
+
|
| 232 |
+
return orient_clt_lvls
|
| 233 |
+
|
| 234 |
+
def get_contrast_clutter(self):
|
| 235 |
+
"""
|
| 236 |
+
Computes the contrast clutter map(s) of an image.
|
| 237 |
+
:return:
|
| 238 |
+
contrast_clt_lvls: list of contrast clutter maps as multiple scales (specified by numLevels)
|
| 239 |
+
contrast_clt_map: an array of same size as input image computed by taking the max of clutter maps at different
|
| 240 |
+
scales (i.e. combined map)
|
| 241 |
+
"""
|
| 242 |
+
|
| 243 |
+
# Compute "contrast-energy" by filtering the luminance
|
| 244 |
+
# channel L by a center-surround filter and squaring (or taking the absolute
|
| 245 |
+
# values of) the filter outputs. The center-surround filter is a DoG1 filter
|
| 246 |
+
# with std 'contrast_filt_sigma'.
|
| 247 |
+
contrast = RRcontrast1channel(self.L_pyr, self.contrast_filt_sigma)
|
| 248 |
+
|
| 249 |
+
# Get a Gaussian filter for computing the variance of contrast
|
| 250 |
+
# Since we used a Gaussian pyramid to find contrast features, these filters
|
| 251 |
+
# have the same size regardless of the scale of processing.
|
| 252 |
+
bigG = RRgaussfilter1D(round(self.contrast_pool_sigma * 2), self.contrast_pool_sigma)
|
| 253 |
+
contrast_clt_lvls = self.compute_variance(bigG, contrast)
|
| 254 |
+
contrast_clt_map = self.collapse(contrast_clt_lvls)
|
| 255 |
+
return contrast_clt_lvls, contrast_clt_map
|
| 256 |
+
|
| 257 |
+
def compute_multi_ch_covar(self, bigG, pyr_1, pyr_2, pyr_3,
|
| 258 |
+
delta1=0., delta2=0., delta3=0.):
|
| 259 |
+
"""
|
| 260 |
+
Computes the covariance of multi-layer feature map
|
| 261 |
+
:param bigG: Gaussian kernel
|
| 262 |
+
:param delta1: Delta value to be added (for Color)
|
| 263 |
+
:param delta2: Delta value to be added (for Color)
|
| 264 |
+
:param delta3: Delta value to be added (for Color)
|
| 265 |
+
:return:
|
| 266 |
+
Covariances for each scale
|
| 267 |
+
"""
|
| 268 |
+
covMx = {}
|
| 269 |
+
clutter_lvls = [0] * self.num_levels
|
| 270 |
+
D1 = [0] * self.num_levels
|
| 271 |
+
D2 = [0] * self.num_levels
|
| 272 |
+
D3 = [0] * self.num_levels
|
| 273 |
+
for i in range(0, self.num_levels):
|
| 274 |
+
# get E(X) by filtering X with a 1-D Gaussian window separably in x and y directions:
|
| 275 |
+
D1[i] = RRoverlapconv(bigG, pyr_1[(i, 0)])
|
| 276 |
+
D1[i] = RRoverlapconv(bigG.T, D1[i]) # E(L)
|
| 277 |
+
D2[i] = RRoverlapconv(bigG, pyr_2[(i, 0)])
|
| 278 |
+
D2[i] = RRoverlapconv(bigG.T, D2[i]) # E(a)
|
| 279 |
+
D3[i] = RRoverlapconv(bigG, pyr_3[(i, 0)])
|
| 280 |
+
D3[i] = RRoverlapconv(bigG.T, D3[i]) # E(b)
|
| 281 |
+
|
| 282 |
+
# Covariance matrix
|
| 283 |
+
# covMx(L,a,b) = | cov(L,L) cov(L,a) cov(L,b) |
|
| 284 |
+
# | cov(a,L) cov(a,a) cov(a,b) |
|
| 285 |
+
# | cov(b,L) cov(b,a) cov(b,b) |
|
| 286 |
+
# where cov(X,Y) = E(XY) - E(X)E(Y)
|
| 287 |
+
# and if X is the same as Y, then it's the variance var(X) =
|
| 288 |
+
# E(X.^2)-E(X).^2
|
| 289 |
+
# and as cov(X,Y) = cov(Y,X), covMx is symmetric
|
| 290 |
+
# covariance matrix elements:
|
| 291 |
+
covMx[(i, 0, 0)] = RRoverlapconv(bigG, pyr_1[(i, 0)] ** 2)
|
| 292 |
+
covMx[(i, 0, 0)] = RRoverlapconv(bigG.T, covMx[(i, 0, 0)]) - D1[i] ** 2 + delta1 # cov(L,L) + deltaL2
|
| 293 |
+
covMx[(i, 0, 1)] = RRoverlapconv(bigG, pyr_1[(i, 0)] * pyr_2[(i, 0)])
|
| 294 |
+
covMx[(i, 0, 1)] = RRoverlapconv(bigG.T, covMx[(i, 0, 1)]) - D1[i] * D2[i] # cov(L,a)
|
| 295 |
+
covMx[(i, 0, 2)] = RRoverlapconv(bigG, pyr_1[(i, 0)] * pyr_3[(i, 0)])
|
| 296 |
+
covMx[(i, 0, 2)] = RRoverlapconv(bigG.T, covMx[(i, 0, 2)]) - D1[i] * D3[i] # cov(L,b)
|
| 297 |
+
covMx[(i, 1, 1)] = RRoverlapconv(bigG, pyr_2[(i, 0)] ** 2)
|
| 298 |
+
covMx[(i, 1, 1)] = RRoverlapconv(bigG.T, covMx[(i, 1, 1)]) - D2[i] ** 2 + delta2 # cov(a,a) + deltaa2
|
| 299 |
+
covMx[(i, 1, 2)] = RRoverlapconv(bigG, pyr_2[(i, 0)] * pyr_3[(i, 0)])
|
| 300 |
+
covMx[(i, 1, 2)] = RRoverlapconv(bigG.T, covMx[(i, 1, 2)]) - D2[i] * D3[i] # cov(a,b)
|
| 301 |
+
covMx[(i, 2, 2)] = RRoverlapconv(bigG, pyr_3[(i, 0)] ** 2)
|
| 302 |
+
covMx[(i, 2, 2)] = RRoverlapconv(bigG.T, covMx[(i, 2, 2)]) - D3[i] ** 2 + delta3 # cov(b,b) + deltab2
|
| 303 |
+
|
| 304 |
+
# Get the determinant of covariance matrix
|
| 305 |
+
# which is the "volume" of the covariance ellipsoid
|
| 306 |
+
detIm = covMx[(i, 0, 0)] * (covMx[(i, 1, 1)] * covMx[(i, 2, 2)] - covMx[(i, 1, 2)] * covMx[(i, 1, 2)]) - \
|
| 307 |
+
covMx[(i, 0, 1)] * (covMx[(i, 0, 1)] * covMx[(i, 2, 2)] - covMx[(i, 1, 2)] * covMx[(i, 0, 2)]) + \
|
| 308 |
+
covMx[(i, 0, 2)] * (covMx[(i, 0, 1)] * covMx[(i, 1, 2)] - covMx[(i, 1, 1)] * covMx[(i, 0, 2)])
|
| 309 |
+
|
| 310 |
+
# take the square root considering variance is squared, and the cube
|
| 311 |
+
# root, since this is the volume and the contrast measure is a "length"
|
| 312 |
+
detIm[detIm < 0] = 0
|
| 313 |
+
clutter_lvls[i] = np.sqrt(detIm) ** (1 / 3)
|
| 314 |
+
return clutter_lvls
|
| 315 |
+
|
| 316 |
+
def compute_variance(self, bigG, pyr):
|
| 317 |
+
# initiate clutter_map and clutter_levels:
|
| 318 |
+
m, n = len(pyr), 1
|
| 319 |
+
clutter_lvls = [0] * m
|
| 320 |
+
for scale in range(0, m):
|
| 321 |
+
for channel in range(0, n):
|
| 322 |
+
# var(X) = E(X.^2) - E(X).^2
|
| 323 |
+
# get E(X) by filtering X with a 1-D Gaussian window separably in x and y directions
|
| 324 |
+
meanD = RRoverlapconv(bigG, pyr[scale])
|
| 325 |
+
meanD = RRoverlapconv(bigG.T, meanD)
|
| 326 |
+
# get E(X.^2) by filtering X.^2 with a 1-D Gaussian window separably in x and y directions
|
| 327 |
+
meanD2 = RRoverlapconv(bigG, pyr[scale] ** 2)
|
| 328 |
+
meanD2 = RRoverlapconv(bigG.T, meanD2)
|
| 329 |
+
|
| 330 |
+
# get variance by var(X) = E(X.^2) - E(X).^2
|
| 331 |
+
stddevD = np.sqrt(abs(meanD2 - meanD ** 2))
|
| 332 |
+
clutter_lvls[scale] = stddevD
|
| 333 |
+
return clutter_lvls
|
| 334 |
+
|
| 335 |
+
def get_color_clutter(self):
|
| 336 |
+
"""
|
| 337 |
+
Computes color clutter maps for a given image
|
| 338 |
+
Color clutter is computed as the "volume" of a color distribution
|
| 339 |
+
ellipsoid, which is the determinant of covariance matrix. Covariance
|
| 340 |
+
matrix can be computed efficiently through linear filtering. More
|
| 341 |
+
specifically, cov(X,Y) = E(XY)-E(X)E(Y), where E (expectation value)
|
| 342 |
+
can be approximated by filtering with a Gaussian window.
|
| 343 |
+
|
| 344 |
+
:return:
|
| 345 |
+
color_clt_lvls: list of color clutter maps as multiple scales (specified by numLevels)
|
| 346 |
+
color_clt_map: an array of same size as input image computed by taking the max of clutter maps at different
|
| 347 |
+
scales (i.e. combined map)
|
| 348 |
+
"""
|
| 349 |
+
# Compute clutter
|
| 350 |
+
# sensitivities to the L,a,and b channels are different, therefore we use
|
| 351 |
+
# deltaL2, deltaa2, and deltab2 to "scale" the L,a,b axes when computing
|
| 352 |
+
# the covariance matrix. Eventually these numbers should be vary according
|
| 353 |
+
# to the spatial scales, mimicing our visual system's sensitivity function
|
| 354 |
+
deltaL2 = 0.0007 ** 2
|
| 355 |
+
deltaa2 = 0.1 ** 2
|
| 356 |
+
deltab2 = 0.05 ** 2
|
| 357 |
+
|
| 358 |
+
# Get a Gaussian filter for computing the covariance
|
| 359 |
+
bigG = RRgaussfilter1D(round(2 * self.color_pool_sigma), self.color_pool_sigma)
|
| 360 |
+
color_clt_lvls = self.compute_multi_ch_covar(bigG, self.L_pyr, self.a_pyr, self.b_pyr,
|
| 361 |
+
deltaL2, deltaa2, deltab2)
|
| 362 |
+
|
| 363 |
+
color_clt_map = self.collapse(color_clt_lvls)
|
| 364 |
+
|
| 365 |
+
return color_clt_lvls, color_clt_map
|
| 366 |
+
|
| 367 |
+
def get_xyz_clutter(self):
|
| 368 |
+
"""
|
| 369 |
+
Computes the color clutter maps
|
| 370 |
+
:return:
|
| 371 |
+
color_clt_lvls: ist of color clutter maps as multiple scales (specified by numLevels)
|
| 372 |
+
"""
|
| 373 |
+
# Get a Gaussian filter for computing the covariance
|
| 374 |
+
bigG = RRgaussfilter1D(round(2 * self.xyz_sigma), self.xyz_sigma)
|
| 375 |
+
# xyz_clt_lvls = self.compute_multi_ch_covar(bigG, self.x_pyr,self.y_pyr,self.z_pyr)
|
| 376 |
+
if self.add_xyz == 'all':
|
| 377 |
+
xyz_clt_lvls, xyz_clt_map = [], []
|
| 378 |
+
for pyr in [self.x_pyr, self.y_pyr, self.z_pyr]:
|
| 379 |
+
clt_lvls = self.compute_variance(bigG, pyr)
|
| 380 |
+
clt_map = self.collapse(clt_lvls)
|
| 381 |
+
xyz_clt_lvls.append(clt_lvls)
|
| 382 |
+
xyz_clt_map.append(clt_map)
|
| 383 |
+
else:
|
| 384 |
+
xyz_clt_lvls = self.compute_variance(bigG, self.xyz_pyr)
|
| 385 |
+
xyz_clt_map = self.collapse(xyz_clt_lvls)
|
| 386 |
+
|
| 387 |
+
return xyz_clt_lvls, xyz_clt_map
|
| 388 |
+
|
| 389 |
+
def get_orientation_clutter(self):
|
| 390 |
+
"""
|
| 391 |
+
Computes the orientation clutter map(s) of an image.
|
| 392 |
+
:return:
|
| 393 |
+
orient_clt_lvls: list of orientation clutter maps as multiple scales (specified by numLevels)
|
| 394 |
+
orient_clt_map: an array of same size as input image computed by taking the max of clutter maps at different
|
| 395 |
+
scales (i.e. combined map)
|
| 396 |
+
"""
|
| 397 |
+
# Compute clutter
|
| 398 |
+
orient_clt_lvls = self.compute_orientation_clutter()
|
| 399 |
+
orient_clt_map = self.collapse(orient_clt_lvls)
|
| 400 |
+
|
| 401 |
+
return orient_clt_lvls, orient_clt_map
|
| 402 |
+
|
| 403 |
+
def get_clutter(self, image):
|
| 404 |
+
"""
|
| 405 |
+
Computes Feature Congestion clutter map(s) of an image.
|
| 406 |
+
:param image: Path to or the image of interest
|
| 407 |
+
:return: Clutter maps for each given feature
|
| 408 |
+
"""
|
| 409 |
+
self.get_image(image)
|
| 410 |
+
# compute the color clutter
|
| 411 |
+
color_clt_lvls, color_clt_map = self.get_color_clutter()
|
| 412 |
+
# compute the contrast clutter
|
| 413 |
+
contrast_clt_lvls, contrast_clt_map = self.get_contrast_clutter()
|
| 414 |
+
# compute the orientation clutter
|
| 415 |
+
orient_clt_lvls, orientation_clt_map = self.get_orientation_clutter()
|
| 416 |
+
|
| 417 |
+
xyz_clutter = None
|
| 418 |
+
if self.add_xyz is not None:
|
| 419 |
+
xyz_clt_lvls, xyz_clt_map = self.get_xyz_clutter()
|
| 420 |
+
xyz_clutter = [xyz_clt_lvls, xyz_clt_map]
|
| 421 |
+
|
| 422 |
+
# output them in list structures
|
| 423 |
+
color_clutter = [color_clt_lvls, color_clt_map]
|
| 424 |
+
contrast_clutter = [contrast_clt_lvls, contrast_clt_map]
|
| 425 |
+
orientation_clutter = [orient_clt_lvls, orientation_clt_map]
|
| 426 |
+
|
| 427 |
+
return color_clutter, contrast_clutter, orientation_clutter, xyz_clutter
|
| 428 |
+
|
| 429 |
+
def get_image(self, image):
|
| 430 |
+
"""
|
| 431 |
+
Loads and coverts the image to Lab image space
|
| 432 |
+
:param image: path to or image
|
| 433 |
+
:return: None
|
| 434 |
+
"""
|
| 435 |
+
|
| 436 |
+
if isinstance(image, str):
|
| 437 |
+
image = cv2.imread(image)
|
| 438 |
+
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
| 439 |
+
|
| 440 |
+
image_rgb = image[..., :3]
|
| 441 |
+
# image_rgb = cv2.cvtColor(image_rgb, cv2.COLOR_BGR2RGB)
|
| 442 |
+
|
| 443 |
+
# we first convert it into the perceptually-based CIELab color space.
|
| 444 |
+
Lab = RGB2Lab(image_rgb)
|
| 445 |
+
|
| 446 |
+
# luminance(L) and the chrominance(a,b) channels
|
| 447 |
+
l, a, b = cv2.split(Lab.astype(np.float32))
|
| 448 |
+
l_pyr_cof, a_pyr_cof, b_pyr_cof = {}, {}, {}
|
| 449 |
+
|
| 450 |
+
if self.add_xyz is not None:
|
| 451 |
+
if self.add_xyz == 'all':
|
| 452 |
+
assert image.shape[-1] >= 6
|
| 453 |
+
x_pyr_cof, y_pyr_cof, z_pyr_cof = {}, {}, {}
|
| 454 |
+
x, y, z = image[..., 3], image[..., 4], image[..., 5]
|
| 455 |
+
else:
|
| 456 |
+
xyz_pyr_cof = {}
|
| 457 |
+
idx = 3 if image.ndim == 4 else {'x': 3, 'y': 4, 'z': 5}[self.add_xyz]
|
| 458 |
+
xyz = image[...,idx]
|
| 459 |
+
|
| 460 |
+
# Get Gaussian pyramids (one for each of L,a,b)
|
| 461 |
+
for i in range(0, self.num_levels):
|
| 462 |
+
l_pyr_cof[(i, 0)] = cv2.pyrDown(l)
|
| 463 |
+
a_pyr_cof[(i, 0)] = cv2.pyrDown(a)
|
| 464 |
+
b_pyr_cof[(i, 0)] = cv2.pyrDown(b)
|
| 465 |
+
l = copy.deepcopy(l_pyr_cof[(i, 0)])
|
| 466 |
+
a = copy.deepcopy(a_pyr_cof[(i, 0)])
|
| 467 |
+
b = copy.deepcopy(b_pyr_cof[(i, 0)])
|
| 468 |
+
if self.add_xyz == 'all':
|
| 469 |
+
# key = (i,0)
|
| 470 |
+
key = i
|
| 471 |
+
x_pyr_cof[key] = cv2.pyrDown(x)
|
| 472 |
+
y_pyr_cof[key] = cv2.pyrDown(y)
|
| 473 |
+
z_pyr_cof[key] = cv2.pyrDown(z)
|
| 474 |
+
x = copy.deepcopy(x_pyr_cof[key])
|
| 475 |
+
y = copy.deepcopy(y_pyr_cof[key])
|
| 476 |
+
z = copy.deepcopy(z_pyr_cof[key])
|
| 477 |
+
elif self.add_xyz in ['x', 'y', 'z']:
|
| 478 |
+
key = i
|
| 479 |
+
xyz_pyr_cof[key] = cv2.pyrDown(xyz)
|
| 480 |
+
xyz = copy.deepcopy(xyz_pyr_cof[key])
|
| 481 |
+
|
| 482 |
+
self.L_pyr = l_pyr_cof
|
| 483 |
+
self.a_pyr = a_pyr_cof
|
| 484 |
+
self.b_pyr = b_pyr_cof
|
| 485 |
+
if self.add_xyz == 'all':
|
| 486 |
+
self.x_pyr = x_pyr_cof
|
| 487 |
+
self.y_pyr = y_pyr_cof
|
| 488 |
+
self.z_pyr = z_pyr_cof
|
| 489 |
+
elif self.add_xyz is not None:
|
| 490 |
+
self.xyz_pyr = xyz_pyr_cof
|
| 491 |
+
|
| 492 |
+
def get_fcm(self, image,
|
| 493 |
+
p=1, save_maps=False,
|
| 494 |
+
img_name="sample.png"):
|
| 495 |
+
"""
|
| 496 |
+
Computes the Feature Congestion Measure (FCM) for visual clutter based on local variability of color, orientation,
|
| 497 |
+
and contrast
|
| 498 |
+
:param image: Path to or the image of interest
|
| 499 |
+
:param p: order of Minkowski distance (\\sigma (x-y)**p)**(1/p)
|
| 500 |
+
:param save_maps: whether to save the maps
|
| 501 |
+
:param img_name: path_to the image
|
| 502 |
+
:return:
|
| 503 |
+
fcm_scalar: the value of metric
|
| 504 |
+
fcm_map: clutter map which gives the information about local clutter information
|
| 505 |
+
"""
|
| 506 |
+
color_clt, contrast_clt, orient_clt, xyz_clt = self.get_clutter(image)
|
| 507 |
+
fcm_map = (color_clt[1] / self.w_color + contrast_clt[1] / self.w_contrast +
|
| 508 |
+
orient_clt[1] / self.w_orient)
|
| 509 |
+
if self.add_xyz is not None:
|
| 510 |
+
xyz_clt_val = None
|
| 511 |
+
if isinstance(xyz_clt[1],list):
|
| 512 |
+
for idx, cl in enumerate(xyz_clt[1]):
|
| 513 |
+
if idx == 0:
|
| 514 |
+
xyz_clt_val = cl/self.w_xyz
|
| 515 |
+
else:
|
| 516 |
+
xyz_clt_val += cl/self.w_xyz
|
| 517 |
+
else:
|
| 518 |
+
xyz_clt_val = xyz_clt[1]/self.w_xyz
|
| 519 |
+
fcm_map += xyz_clt_val
|
| 520 |
+
|
| 521 |
+
fcm_scalar = np.mean(fcm_map ** p) ** (1 / p) # element wise
|
| 522 |
+
|
| 523 |
+
if save_maps:
|
| 524 |
+
save_root_fl = f"output/{os.path.dirname(img_name)}_maps"
|
| 525 |
+
os.makedirs(save_root_fl, exist_ok=True)
|
| 526 |
+
for k, v in {'color_map': color_clt[1], 'contrast_map': contrast_clt[1],
|
| 527 |
+
'orient_map': orient_clt[1]}.items():
|
| 528 |
+
pil_image = Image.fromarray(normlize(v))
|
| 529 |
+
# save collapsed clutter map(s)
|
| 530 |
+
pil_image.save(os.path.join(save_root_fl, f"{os.path.basename(img_name).split('.')[0]}_{k}.png"))
|
| 531 |
+
|
| 532 |
+
return fcm_scalar, fcm_map
|
| 533 |
+
|
| 534 |
+
|
| 535 |
+
|
| 536 |
+
def test_multiview(out_put_file='data_bases/test.pkl'):
|
| 537 |
+
root_folder = "sim_data_v2"
|
| 538 |
+
save_root_folder = f"output/{root_folder}"
|
| 539 |
+
os.makedirs(save_root_folder, exist_ok=True)
|
| 540 |
+
|
| 541 |
+
files_dic = parse_image_names(root_folder)
|
| 542 |
+
|
| 543 |
+
clt = VLC(num_levels=3, contrast_filt_sigma=1,
|
| 544 |
+
contrast_pool_sigma=3, color_pool_sigma=3,
|
| 545 |
+
w_color=0.220, w_contrast=0.0660, w_orient=0.0269,
|
| 546 |
+
add_xyz=None)
|
| 547 |
+
|
| 548 |
+
clt_xyz = VLC(num_levels=3, contrast_filt_sigma=1,
|
| 549 |
+
contrast_pool_sigma=3, color_pool_sigma=3,
|
| 550 |
+
w_color=0.220, w_contrast=0.0660, w_orient=0.0269,
|
| 551 |
+
w_xyz=0.100, add_xyz='all')
|
| 552 |
+
# idx = 0
|
| 553 |
+
for task in files_dic:
|
| 554 |
+
# if task != 'GraspSingleOpenedCokeCanInScene':
|
| 555 |
+
# continue
|
| 556 |
+
for cnt in files_dic[task]:
|
| 557 |
+
for variation in files_dic[task][cnt]:
|
| 558 |
+
# idx += 1
|
| 559 |
+
# if idx < 81:
|
| 560 |
+
# continue
|
| 561 |
+
side_img = get_image_pkl(files_dic[task][cnt][variation]['side'])
|
| 562 |
+
top_img = get_image_pkl(files_dic[task][cnt][variation]['top'])
|
| 563 |
+
side_fc, _ = clt.get_fcm(side_img[...,:3], p=1, save_maps=False,
|
| 564 |
+
img_name=files_dic[task][cnt][variation]['side'])
|
| 565 |
+
side_fc_xyz, _ = clt_xyz.get_fcm(side_img, p=1, save_maps=False,
|
| 566 |
+
img_name=files_dic[task][cnt][variation]['side'])
|
| 567 |
+
top_fc, _ = clt.get_fcm(top_img[...,:3], p=1, save_maps=False,
|
| 568 |
+
img_name=files_dic[task][cnt][variation]['side'])
|
| 569 |
+
# cv2.imshow('test', cv2.cvtColor(top_img[..., :3].astype(np.uint8), cv2.COLOR_RGB2BGR))
|
| 570 |
+
# cv2.waitKey(0)
|
| 571 |
+
# Do xyz version
|
| 572 |
+
top_fc_xyz, _ = clt_xyz.get_fcm(top_img, p=1, save_maps=False,
|
| 573 |
+
img_name=files_dic[task][cnt][variation]['side'])
|
| 574 |
+
files_dic[task][cnt][variation]['scores'] = {
|
| 575 |
+
'fc_s': side_fc, 'fc_t': top_fc, 'fc_avg':np.mean([side_fc, top_fc]),
|
| 576 |
+
'fcx_s': side_fc_xyz, 'fcx_t': top_fc_xyz, 'fcx_avg':np.mean([side_fc_xyz, top_fc_xyz])}
|
| 577 |
+
print(files_dic[task][cnt][variation]['side'],
|
| 578 |
+
side_fc, top_fc, np.mean([side_fc, top_fc]),
|
| 579 |
+
side_fc_xyz, top_fc_xyz, np.mean([side_fc_xyz, top_fc_xyz]))
|
| 580 |
+
os.makedirs(os.path.dirname(out_put_file), exist_ok=True)
|
| 581 |
+
with open(out_put_file, 'wb') as f:
|
| 582 |
+
pickle.dump(files_dic, f, pickle.HIGHEST_PROTOCOL)
|
| 583 |
+
return files_dic, out_put_file
|
| 584 |
+
|
| 585 |
+
def test_sorting_xyz(root_folder ="sim_data",
|
| 586 |
+
db_root= "data_bases/test.pkl",
|
| 587 |
+
gen_images=True):
|
| 588 |
+
save_root_folder = f"output/{root_folder}_multi_combined"
|
| 589 |
+
os.makedirs(save_root_folder, exist_ok=True)
|
| 590 |
+
with open(db_root, 'rb') as f:
|
| 591 |
+
res_dict = pickle.load(f)
|
| 592 |
+
stats = []
|
| 593 |
+
for task in res_dict:
|
| 594 |
+
skeys = res_dict[task][1][0]['scores'].keys()
|
| 595 |
+
var_dict = get_dict_variation(res_dict[task])
|
| 596 |
+
for v in var_dict:
|
| 597 |
+
log_stat = False
|
| 598 |
+
for sort_key in skeys:
|
| 599 |
+
idxs = sort_images_target(var_dict[v], sort_key)
|
| 600 |
+
fimg = None
|
| 601 |
+
h_image = []
|
| 602 |
+
for i, idx in enumerate(idxs):
|
| 603 |
+
image_dic = var_dict[v][idx+1]
|
| 604 |
+
text1 = (f"fcm_s: {image_dic['scores']['fc_s']:.2}, fcm_t: {image_dic['scores']['fc_t']:.2}, "
|
| 605 |
+
f"fcm_avg: {image_dic['scores']['fc_avg']:.3}")
|
| 606 |
+
text2 = (f"fcmx_s: {image_dic['scores']['fcx_s']:.2}, fcmx_t: {image_dic['scores']['fcx_t']:.2}, "
|
| 607 |
+
f"fcmx_avg: {image_dic['scores']['fcx_avg']:.3}")
|
| 608 |
+
if not log_stat:
|
| 609 |
+
stats.append(f"{task}, {v}, {idx+1}, {image_dic['scores']['fc_s']:.4}, {image_dic['scores']['fc_t']:.4},"
|
| 610 |
+
f"{image_dic['scores']['fc_avg']:.4}, {image_dic['scores']['fcx_s']:.4},"
|
| 611 |
+
f"{image_dic['scores']['fcx_t']:.4},{image_dic['scores']['fcx_avg']:.4}\n")
|
| 612 |
+
if gen_images:
|
| 613 |
+
im = get_image_pkl(image_dic['im'])
|
| 614 |
+
im = cv2.cvtColor(im[..., :3].astype(np.uint8), cv2.COLOR_RGB2BGR)
|
| 615 |
+
im = cv2.resize(im, (320, 240), im)
|
| 616 |
+
|
| 617 |
+
im = cv2.putText(im, text1,
|
| 618 |
+
(0, im.shape[0] - 30),
|
| 619 |
+
cv2.FONT_HERSHEY_SIMPLEX,
|
| 620 |
+
0.5, (255, 0, 255), 1)
|
| 621 |
+
im = cv2.putText(im, text2,
|
| 622 |
+
(0, im.shape[0] - 10),
|
| 623 |
+
cv2.FONT_HERSHEY_SIMPLEX,
|
| 624 |
+
0.5, (255, 165, 0), 1)
|
| 625 |
+
im = cv2.putText(im, f"{idx+1}",
|
| 626 |
+
(0, 30),
|
| 627 |
+
cv2.FONT_HERSHEY_SIMPLEX,
|
| 628 |
+
1, (255, 165, 0), 2)
|
| 629 |
+
if gen_images:
|
| 630 |
+
if i == 5:
|
| 631 |
+
h_image.append(fimg)
|
| 632 |
+
if i == 0 or i == 5:
|
| 633 |
+
fimg = im
|
| 634 |
+
else:
|
| 635 |
+
fimg = cv2.hconcat([fimg, im])
|
| 636 |
+
if gen_images:
|
| 637 |
+
h_image.append(fimg)
|
| 638 |
+
fimg = cv2.vconcat(h_image)
|
| 639 |
+
img_name = os.path.join(root_folder, f"{task}_{v}_{sort_key}") + '.png'
|
| 640 |
+
cv2.imwrite(os.path.join(save_root_folder, os.path.basename(img_name)), fimg)
|
| 641 |
+
log_stat = True
|
| 642 |
+
write_stats(heading="task, variation, count, fcm_s, fcm_t, fcm_avg, fcmx_s, fcmx_t, fcmx_avg",
|
| 643 |
+
stats_list=stats, file_name= os.path.join(save_root_folder, 'stats_sim_v2.csv'))
|
| 644 |
+
|
| 645 |
+
def test_multiview_real(root_folder="real_samples",
|
| 646 |
+
db_root="data_bases/real_db.pkl"):
|
| 647 |
+
subset = "removal_clean"
|
| 648 |
+
camera = 'side'
|
| 649 |
+
root_folder = f"{root_folder}/{subset}/{camera}"
|
| 650 |
+
save_root_folder = f"output/{root_folder}_multi"
|
| 651 |
+
|
| 652 |
+
os.makedirs(save_root_folder, exist_ok=True)
|
| 653 |
+
|
| 654 |
+
files = os.listdir(root_folder)
|
| 655 |
+
clt = VLC(num_levels=3, contrast_filt_sigma=1,
|
| 656 |
+
contrast_pool_sigma=3, color_pool_sigma=3,
|
| 657 |
+
w_color=0.220, w_contrast=0.0660, w_orient=0.0269,
|
| 658 |
+
add_xyz=None)
|
| 659 |
+
|
| 660 |
+
sc_dict = {}
|
| 661 |
+
for f in files:
|
| 662 |
+
if 'pkl' not in f or 'reset' in f:
|
| 663 |
+
continue
|
| 664 |
+
side_fname = os.path.join(root_folder, f)
|
| 665 |
+
side_img = get_real_image(side_fname)
|
| 666 |
+
top_img = get_real_image(side_fname.replace('side', 'tall'))
|
| 667 |
+
|
| 668 |
+
side_img = cv2.cvtColor(side_img[..., :3].astype(np.uint8), cv2.COLOR_RGB2BGR)
|
| 669 |
+
top_img = cv2.cvtColor(top_img[..., :3].astype(np.uint8), cv2.COLOR_RGB2BGR)
|
| 670 |
+
|
| 671 |
+
# os.rename(img_name, os.path.join(root_folder, f"{count}.png"))
|
| 672 |
+
side_fc, _ = clt.get_fcm(side_img, p=1, save_maps=False, img_name=f)
|
| 673 |
+
top_fc, _ = clt.get_fcm(top_img, p=1, save_maps=False, img_name=f)
|
| 674 |
+
sc_id = f.split('_')[0]
|
| 675 |
+
if sc_id not in sc_dict:
|
| 676 |
+
sc_dict[sc_id] = []
|
| 677 |
+
|
| 678 |
+
image_dic = {'im': side_fname, 'fcm_s': side_fc, 'fcm_t': top_fc,
|
| 679 |
+
'fcm_avg': np.mean([side_fc, top_fc])}
|
| 680 |
+
sc_dict[sc_id].append(image_dic)
|
| 681 |
+
# Do xyz version
|
| 682 |
+
print(image_dic['fcm_s'],image_dic['fcm_t'], image_dic['fcm_avg'])
|
| 683 |
+
|
| 684 |
+
with open(db_root, 'wb') as f:
|
| 685 |
+
pickle.dump(sc_dict, f, pickle.HIGHEST_PROTOCOL)
|
| 686 |
+
return root_folder
|
| 687 |
+
|
| 688 |
+
def test_sorting_real(root_folder="real_samples",
|
| 689 |
+
db_root= "data_bases/real_db.pkl",
|
| 690 |
+
skeys=('fcm_s', 'fcm_t', 'fcm_avg'),
|
| 691 |
+
gen_images= True):
|
| 692 |
+
save_root_folder = f"output/{root_folder}_multi_combined"
|
| 693 |
+
os.makedirs(save_root_folder, exist_ok=True)
|
| 694 |
+
with open(db_root, 'rb') as f:
|
| 695 |
+
sc_dict = pickle.load(f)
|
| 696 |
+
stats = []
|
| 697 |
+
for scid, sc_list in sc_dict.items():
|
| 698 |
+
h_image = []
|
| 699 |
+
log_stat = False
|
| 700 |
+
for sort_key in skeys:
|
| 701 |
+
idxs = sort_images_real(sc_list, sort_key)
|
| 702 |
+
fimg = None
|
| 703 |
+
for i, idx in enumerate(idxs):
|
| 704 |
+
image_dic = sc_list[idx]
|
| 705 |
+
# for writing the scores on the image
|
| 706 |
+
text1 = (f"fcm_s: {image_dic['fcm_s']:.2}, fcm_t: {image_dic['fcm_t']:.2}, "
|
| 707 |
+
f"fcm_avg: {image_dic['fcm_avg']:.3}")
|
| 708 |
+
img_id = os.path.basename(image_dic['im']).split('_')[1]
|
| 709 |
+
if not log_stat:
|
| 710 |
+
stats.append(f"{scid}, {img_id}, {image_dic['fcm_s']:.4}, {image_dic['fcm_t']:.4},"
|
| 711 |
+
f"{image_dic['fcm_avg']:.4}\n")
|
| 712 |
+
|
| 713 |
+
if gen_images:
|
| 714 |
+
im = get_real_image(image_dic['im'])[..., :3] if 'pkl' in image_dic['im'] else \
|
| 715 |
+
cv2.imread(image_dic['im'])
|
| 716 |
+
im = cv2.cvtColor(im.astype(np.uint8), cv2.COLOR_RGB2BGR)
|
| 717 |
+
im = cv2.resize(im, (320, 240), im)
|
| 718 |
+
im = cv2.putText(im, img_id,
|
| 719 |
+
(0, 30),
|
| 720 |
+
cv2.FONT_HERSHEY_SIMPLEX,
|
| 721 |
+
1, (255, 100, 0), 2)
|
| 722 |
+
im = cv2.putText(im, text1,
|
| 723 |
+
(0, im.shape[0] - 10),
|
| 724 |
+
cv2.FONT_HERSHEY_SIMPLEX,
|
| 725 |
+
0.5, (255, 0, 255), 1)
|
| 726 |
+
fimg = im if i == 0 else cv2.hconcat([fimg, im])
|
| 727 |
+
|
| 728 |
+
if gen_images:
|
| 729 |
+
h_image.append(fimg)
|
| 730 |
+
log_stat = True
|
| 731 |
+
|
| 732 |
+
if gen_images:
|
| 733 |
+
fimg = cv2.vconcat(h_image)
|
| 734 |
+
img_name = os.path.join(root_folder, f"{scid}_{'-'.join(skeys)}") + '.png'
|
| 735 |
+
cv2.imwrite(os.path.join(save_root_folder, os.path.basename(img_name)), fimg)
|
| 736 |
+
write_stats(heading="scid, imgid, fcm_s, fcm_t, fcm_avg",
|
| 737 |
+
stats_list=stats, file_name= os.path.join(save_root_folder, 'stats.csv'))
|
| 738 |
+
|
| 739 |
+
root = "camera_pointcloud_recolor_2_v4"
|
| 740 |
+
# files = os.listdir(root)
|
| 741 |
+
|
| 742 |
+
def test_multiview_target(out_put_file='data_bases/recolor.pkl'):
|
| 743 |
+
root_folder = "camera_pointcloud_recolor_2_v4"
|
| 744 |
+
save_root_folder = f"output/{root_folder}"
|
| 745 |
+
os.makedirs(save_root_folder, exist_ok=True)
|
| 746 |
+
|
| 747 |
+
# files_dic = parse_image_names(root_folder)
|
| 748 |
+
|
| 749 |
+
clt_xyz = VLC(num_levels=3, contrast_filt_sigma=1,
|
| 750 |
+
contrast_pool_sigma=3, color_pool_sigma=3,
|
| 751 |
+
w_color=0.220, w_contrast=0.0660, w_orient=0.0269,
|
| 752 |
+
w_xyz=0.100, add_xyz='all')
|
| 753 |
+
|
| 754 |
+
files_dic = {}
|
| 755 |
+
for task in os.listdir(root_folder):
|
| 756 |
+
files_dic[task] = {}
|
| 757 |
+
for f in os.listdir((os.path.join(root_folder, task))):
|
| 758 |
+
if 'pkl' not in f:
|
| 759 |
+
continue
|
| 760 |
+
key = None
|
| 761 |
+
if '_wo_' in f:
|
| 762 |
+
key = 'wot'
|
| 763 |
+
else:
|
| 764 |
+
key = 't'
|
| 765 |
+
if key not in files_dic[task]:
|
| 766 |
+
files_dic[task][key] = {'top': None, 'side': None}
|
| 767 |
+
|
| 768 |
+
if 'top' in f:
|
| 769 |
+
files_dic[task][key]['top'] = os.path.join(root_folder, task, f)
|
| 770 |
+
else:
|
| 771 |
+
files_dic[task][key]['side'] = os.path.join(root_folder, task, f)
|
| 772 |
+
|
| 773 |
+
|
| 774 |
+
# idx = 0
|
| 775 |
+
for task in files_dic:
|
| 776 |
+
|
| 777 |
+
side_img = get_image_pkl(files_dic[task]['t']['side'])
|
| 778 |
+
top_img = get_image_pkl(files_dic[task]['t']['top'])
|
| 779 |
+
side_fc, _ = clt_xyz.get_fcm(side_img, p=1, save_maps=False,
|
| 780 |
+
img_name=files_dic[task]['t']['side'])
|
| 781 |
+
top_fc, _ = clt_xyz.get_fcm(top_img, p=1, save_maps=False,
|
| 782 |
+
img_name=files_dic[task]['t']['side'])
|
| 783 |
+
|
| 784 |
+
side_img_wot = get_image_pkl(files_dic[task]['wot']['side'])
|
| 785 |
+
top_img_wot = get_image_pkl(files_dic[task]['wot']['top'])
|
| 786 |
+
side_fc_wot, _ = clt_xyz.get_fcm(side_img_wot, p=1, save_maps=False,
|
| 787 |
+
img_name=files_dic[task]['t']['side'])
|
| 788 |
+
top_fc_wot, _ = clt_xyz.get_fcm(top_img_wot, p=1, save_maps=False,
|
| 789 |
+
img_name=files_dic[task]['t']['side'])
|
| 790 |
+
sc_wot = np.mean([side_fc_wot, top_fc_wot])
|
| 791 |
+
sc_t = np.mean([side_fc, top_fc])
|
| 792 |
+
print(f"wot: {sc_wot}, t: {sc_t}, delta: {sc_t - sc_wot}")
|
| 793 |
+
os.makedirs(os.path.dirname(save_root_folder), exist_ok=True)
|
| 794 |
+
side_img_t = side_img[...,:3][...,[2,1,0]].astype(np.uint8)
|
| 795 |
+
side_img_wot = side_img_wot[...,:3][...,[2,1,0]].astype(np.uint8)
|
| 796 |
+
simg = cv2.hconcat([side_img_wot, side_img_t])
|
| 797 |
+
im = cv2.putText(simg, f"wot: {sc_wot}, t: {sc_t}, delta: {sc_t - sc_wot}",
|
| 798 |
+
(0, simg.shape[0] - 10),
|
| 799 |
+
cv2.FONT_HERSHEY_SIMPLEX,
|
| 800 |
+
1, (255, 0, 0), 1)
|
| 801 |
+
cv2.imwrite(os.path.join(save_root_folder, f"{task}.png"),im)
|
| 802 |
+
|
| 803 |
+
# Do xyz version
|
| 804 |
+
#
|
| 805 |
+
# files_dic[task]['scores'] = {
|
| 806 |
+
# 'fc_s': side_fc, 'fc_t': top_fc, 'fc_avg':np.mean([side_fc, top_fc]),
|
| 807 |
+
# 'fcx_s': side_fc_xyz, 'fcx_t': top_fc_xyz, 'fcx_avg':np.mean([side_fc_xyz, top_fc_xyz])}
|
| 808 |
+
# print(files_dic[task][cnt][variation]['side'],
|
| 809 |
+
# side_fc, top_fc, np.mean([side_fc, top_fc]),
|
| 810 |
+
# side_fc_xyz, top_fc_xyz, np.mean([side_fc_xyz, top_fc_xyz]))
|
| 811 |
+
# os.makedirs(os.path.dirname(out_put_file), exist_ok=True)
|
| 812 |
+
# with open(out_put_file, 'wb') as f:
|
| 813 |
+
# pickle.dump(files_dic, f, pickle.HIGHEST_PROTOCOL)
|
| 814 |
+
# return files_dic, out_put_file
|
| 815 |
+
|
| 816 |
+
# test_multiview(out_put_file='data_bases/sim_sample_v2.pkl')
|
| 817 |
+
# test_sorting_xyz(root_folder="sim_data_v2",
|
| 818 |
+
# db_root= "data_bases/sim_sample_v2.pkl", gen_images=False)
|
| 819 |
+
# test_multiview_real()
|
| 820 |
+
# test_sorting_real(gen_images=True)
|
| 821 |
+
# test_rgb_real()
|
| 822 |
+
# test_xyz()
|
| 823 |
+
# test_depth()
|
| 824 |
+
# test_average()
|
| 825 |
+
# test_sorting()
|
| 826 |
+
|
| 827 |
+
# test_multiview_target()
|
| 828 |
+
|
| 829 |
+
# for i, fold in enumerate(files):
|
| 830 |
+
# os.rename(os.path.join(root, fold), os.path.join(root, f"{i}"))
|
| 831 |
+
# for fold in files:
|
| 832 |
+
# fold_files = os.listdir(os.path.join(root, fold))
|
| 833 |
+
# for fi in fold_files:
|
| 834 |
+
# if 'pkl' not in fi:
|
| 835 |
+
# continue
|
| 836 |
+
# img = side_img = get_image_pkl(os.path.join(root, fold, fi))
|
| 837 |
+
# cv2.imshow("dool", img[...,:3][...,[2,1,0]] .astype(np.uint8))
|
| 838 |
+
# cv2.waitKey(0)
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/utils/inference_recorder.py
ADDED
|
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import time
|
| 2 |
+
import os
|
| 3 |
+
import pickle
|
| 4 |
+
|
| 5 |
+
class InferenceRecorder:
|
| 6 |
+
def __init__(self, save_dir):
|
| 7 |
+
self.save_dir = save_dir
|
| 8 |
+
self.timestamp = time.strftime("%m%d_%H%M")
|
| 9 |
+
self.recording_dir = os.path.join(save_dir, f"recording_{self.timestamp}")
|
| 10 |
+
os.makedirs(self.recording_dir, exist_ok=True)
|
| 11 |
+
self.current_episode = 0
|
| 12 |
+
self.episode_data = []
|
| 13 |
+
|
| 14 |
+
def record_step(self, obs,
|
| 15 |
+
trajectory_prior,
|
| 16 |
+
all_samples=None,
|
| 17 |
+
planned_path=None):
|
| 18 |
+
# Record data for the current step
|
| 19 |
+
data = {
|
| 20 |
+
'observation': obs,
|
| 21 |
+
'trajectory_prior': trajectory_prior,
|
| 22 |
+
'all_samples': all_samples, #unnormalized result from policy(obs, trajectory_prior)
|
| 23 |
+
'planned_path': planned_path, #unnormalized result from policy(obs, trajectory_prior)
|
| 24 |
+
'episode': self.current_episode,
|
| 25 |
+
}
|
| 26 |
+
self.episode_data.append(data)
|
| 27 |
+
|
| 28 |
+
def save_episode(self):
|
| 29 |
+
# Save episode data (similar to DataRecorder.save_episode)
|
| 30 |
+
# Create episode directory
|
| 31 |
+
# Save episode data directly to the recording directory with episode number in filename
|
| 32 |
+
episode_file = os.path.join(self.recording_dir, f"episode_{self.current_episode}.pkl")
|
| 33 |
+
with open(episode_file, 'wb') as f:
|
| 34 |
+
pickle.dump(self.episode_data, f)
|
| 35 |
+
|
| 36 |
+
print(f"Saved episode {self.current_episode} data to {episode_file}")
|
| 37 |
+
|
| 38 |
+
# Reset for next episode
|
| 39 |
+
self.episode_data = []
|
| 40 |
+
self.current_step = 0
|
| 41 |
+
self.current_episode += 1
|
| 42 |
+
|
| 43 |
+
class ReplayRecorder:
|
| 44 |
+
def __init__(self, replay_dir):
|
| 45 |
+
self.replay_dir = replay_dir
|
| 46 |
+
self.episode_files = sorted([f for f in os.listdir(replay_dir) if f.endswith('.pkl')])
|
| 47 |
+
self.total_episodes = len(self.episode_files)
|
| 48 |
+
print(f"Found {self.total_episodes} episode recordings in {replay_dir}")
|
| 49 |
+
self.current_episode = 0
|
| 50 |
+
self._load_episode(self.current_episode)
|
| 51 |
+
|
| 52 |
+
def _load_episode(self, episode_id):
|
| 53 |
+
episode_file = os.path.join(self.replay_dir, f"episode_{episode_id}.pkl")
|
| 54 |
+
with open(episode_file, 'rb') as f:
|
| 55 |
+
self.current_episode_data = pickle.load(f)
|
| 56 |
+
self.current_step = 0
|
| 57 |
+
|
| 58 |
+
def set_episode(self, episode_id):
|
| 59 |
+
self.current_episode = episode_id
|
| 60 |
+
self._load_episode(self.current_episode)
|
| 61 |
+
|
| 62 |
+
def next_step(self):
|
| 63 |
+
if self.current_step >= len(self.current_episode_data):
|
| 64 |
+
print(f"Episode {self.current_episode} finished")
|
| 65 |
+
self.current_episode += 1
|
| 66 |
+
if self.current_episode >= self.total_episodes:
|
| 67 |
+
print("All episodes finished")
|
| 68 |
+
return None, None, None, None
|
| 69 |
+
self.current_step = 0
|
| 70 |
+
self._load_episode(self.current_episode)
|
| 71 |
+
|
| 72 |
+
# Get observation from current step
|
| 73 |
+
obs = self.current_episode_data[self.current_step]['observation']
|
| 74 |
+
trajectory_prior = self.current_episode_data[self.current_step]['trajectory_prior']
|
| 75 |
+
planned_path = self.current_episode_data[self.current_step]['planned_path']
|
| 76 |
+
|
| 77 |
+
is_last_step = (self.current_step == len(self.current_episode_data) - 1)
|
| 78 |
+
done = is_last_step
|
| 79 |
+
# print("current step: ", self.current_step)
|
| 80 |
+
# print("is last step: ", is_last_step)
|
| 81 |
+
# print("length of current episode data: ", len(self.current_episode_data))
|
| 82 |
+
self.current_step += 1
|
| 83 |
+
return obs, trajectory_prior, done, planned_path,
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
if __name__ == "__main__":
|
| 87 |
+
recorder = ReplayRecorder(replay_dir="/home/xuan/Code/maniskill2_benchmark/inference_recordings/recording_0414_1723")
|
| 88 |
+
for _ in range(175):
|
| 89 |
+
obs, _, done, _ = recorder.next_step()
|
| 90 |
+
print(obs['extra']['tcp_pose'].shape)
|
| 91 |
+
print(done)
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/utils/object_placement_2.py
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Updated API calls
|
| 2 |
+
"""Extracted object placement algorithm."""
|
| 3 |
+
|
| 4 |
+
from dataclasses import dataclass
|
| 5 |
+
from typing import List, Tuple
|
| 6 |
+
|
| 7 |
+
import numpy as np
|
| 8 |
+
import sapien.core as sapien
|
| 9 |
+
import torch
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
@dataclass(frozen=True)
|
| 13 |
+
class ObjectBoundingBox:
|
| 14 |
+
min: tuple[float, float, float]
|
| 15 |
+
max: tuple[float, float, float]
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
@dataclass(frozen=True)
|
| 19 |
+
class PlacementBounds:
|
| 20 |
+
low: tuple[float, float]
|
| 21 |
+
high: tuple[float, float]
|
| 22 |
+
size: tuple[int, ...]
|
| 23 |
+
|
| 24 |
+
def random_in(self, rng: np.random.RandomState):
|
| 25 |
+
return rng.uniform(self.low, self.high, size=self.size)
|
| 26 |
+
|
| 27 |
+
def is_overlapping(bb1_min, bb1_max, bb2_min, bb2_max, pos1, pos2, padding=0.05, check_z=False):
|
| 28 |
+
# Translate bounding boxes based on positions
|
| 29 |
+
bb1_min_trans = np.add(bb1_min, pos1) - padding
|
| 30 |
+
bb1_max_trans = np.add(bb1_max, pos1) + padding
|
| 31 |
+
bb2_min_trans = np.add(bb2_min, pos2) - padding
|
| 32 |
+
bb2_max_trans = np.add(bb2_max, pos2) + padding
|
| 33 |
+
|
| 34 |
+
# Check for overlap in each dimension
|
| 35 |
+
overlap_x = not (bb1_max_trans[0] < bb2_min_trans[0] or bb2_max_trans[0] < bb1_min_trans[0])
|
| 36 |
+
overlap_y = not (bb1_max_trans[1] < bb2_min_trans[1] or bb2_max_trans[1] < bb1_min_trans[1])
|
| 37 |
+
overlap_z = False
|
| 38 |
+
if check_z:
|
| 39 |
+
overlap_z = not (bb1_max_trans[2] < bb2_min_trans[2] or bb2_max_trans[2] < bb1_min_trans[2])
|
| 40 |
+
return overlap_x and overlap_y and overlap_z
|
| 41 |
+
|
| 42 |
+
return overlap_x and overlap_y
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def find_valid_position_2d(
|
| 46 |
+
placement_bounds: PlacementBounds,
|
| 47 |
+
actor: sapien.Entity,
|
| 48 |
+
actor_local_bbox: ObjectBoundingBox,
|
| 49 |
+
placed_objects: List[Tuple[sapien.Entity, ObjectBoundingBox]],
|
| 50 |
+
rng: np.random.RandomState,
|
| 51 |
+
retries: int,
|
| 52 |
+
minimum_aabb_separation: float = 0.005
|
| 53 |
+
) -> tuple[float, float]:
|
| 54 |
+
"""Bounding box is for cached collision check."""
|
| 55 |
+
attempt = 0
|
| 56 |
+
|
| 57 |
+
half_obj_xy_dim = 0.5 * (actor_local_bbox.max - actor_local_bbox.min)[:2]
|
| 58 |
+
assert (
|
| 59 |
+
np.linalg.norm(half_obj_xy_dim)
|
| 60 |
+
< np.linalg.norm(np.subtract(placement_bounds.high, placement_bounds.low))
|
| 61 |
+
), "Surface is much smaller than object"
|
| 62 |
+
|
| 63 |
+
low_bounds = placement_bounds.low + half_obj_xy_dim
|
| 64 |
+
high_bounds = placement_bounds.high - half_obj_xy_dim
|
| 65 |
+
size = placement_bounds.size
|
| 66 |
+
|
| 67 |
+
while attempt < 1 + retries:
|
| 68 |
+
xy = rng.uniform(low_bounds, high_bounds, size=size)
|
| 69 |
+
|
| 70 |
+
overlap_found = False
|
| 71 |
+
for obj, bbox in placed_objects:
|
| 72 |
+
# Convert pose to numpy safely
|
| 73 |
+
obj_pos = obj.pose.p
|
| 74 |
+
if isinstance(obj_pos, torch.Tensor):
|
| 75 |
+
obj_pos = obj_pos.detach().cpu().numpy().reshape(-1)
|
| 76 |
+
else:
|
| 77 |
+
obj_pos = np.array(obj_pos, dtype=float).reshape(-1)
|
| 78 |
+
|
| 79 |
+
if is_overlapping(
|
| 80 |
+
bbox.min,
|
| 81 |
+
bbox.max,
|
| 82 |
+
actor_local_bbox.min,
|
| 83 |
+
actor_local_bbox.max,
|
| 84 |
+
np.pad(xy, (0, 1)), # add z=0
|
| 85 |
+
obj_pos,
|
| 86 |
+
padding=0.5 * minimum_aabb_separation,
|
| 87 |
+
check_z=False
|
| 88 |
+
):
|
| 89 |
+
overlap_found = True
|
| 90 |
+
break
|
| 91 |
+
|
| 92 |
+
if not overlap_found:
|
| 93 |
+
return xy
|
| 94 |
+
attempt += 1
|
| 95 |
+
|
| 96 |
+
return None
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def level_to_surface(surface_height: float, bbox: ObjectBoundingBox):
|
| 101 |
+
return surface_height + 0.5 * -bbox.min[2] + 0.05
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/utils/observation_wrapper.py
ADDED
|
@@ -0,0 +1,231 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# wrap observation for inference usage
|
| 2 |
+
import numpy as np
|
| 3 |
+
import torch
|
| 4 |
+
from cfdp.utils.pointcloud_utils import convertRGBD2PCD
|
| 5 |
+
from cfdp.utils.plot_utils import visualize_rgbd_and_pointcloud
|
| 6 |
+
from cfdp.utils.data_utils import ObsQueue, transform_quat_to_ortho6d, transform_ortho6d_to_quat
|
| 7 |
+
|
| 8 |
+
class ObservationWrapper:
|
| 9 |
+
def __init__(self, dataset, camera_type="world", sensor_data_key="image", sensor_param_key="camera_param"):
|
| 10 |
+
self.dataset = dataset
|
| 11 |
+
self._processed_obs = {'hard_conds': None, 'context': None, 'environment': None}
|
| 12 |
+
self.device = dataset.device
|
| 13 |
+
self.camera_type = camera_type
|
| 14 |
+
self.sensor_data_key = sensor_data_key
|
| 15 |
+
self.sensor_param_key = sensor_param_key
|
| 16 |
+
|
| 17 |
+
def _transform_positions_to_world(self, obs, camera_type="hand_camera"):
|
| 18 |
+
"""
|
| 19 |
+
Transform position coordinates from camera frame to world frame for both cameras.
|
| 20 |
+
|
| 21 |
+
Args:
|
| 22 |
+
obs (dict): Observation dictionary containing camera parameters and images
|
| 23 |
+
|
| 24 |
+
Returns:
|
| 25 |
+
dict: Updated observation dictionary with transformed positions
|
| 26 |
+
"""
|
| 27 |
+
point_cloud = {}
|
| 28 |
+
# Get position data and reshape to (N, 3)
|
| 29 |
+
# pos = obs[self.sensor_data_key][camera_type]['Position'][:,:,:3]
|
| 30 |
+
pos = obs[self.sensor_data_key][camera_type]['position'][:,:,:3]
|
| 31 |
+
if isinstance(pos, torch.Tensor):
|
| 32 |
+
pos = pos.cpu().numpy()
|
| 33 |
+
pos = pos.reshape(-1, 3)
|
| 34 |
+
|
| 35 |
+
# Get camera to world transformation matrix
|
| 36 |
+
cam2world = obs[self.sensor_param_key][camera_type]['cam2world_gl']
|
| 37 |
+
|
| 38 |
+
# Convert to homogeneous coordinates by adding 1 as fourth coordinate
|
| 39 |
+
pos_homog = np.ones((pos.shape[0], 4))
|
| 40 |
+
pos_homog[:, :3] = pos
|
| 41 |
+
|
| 42 |
+
# Transform to world frame
|
| 43 |
+
pos_world = (cam2world @ pos_homog.T).T # Matrix multiplication and transpose
|
| 44 |
+
point_cloud[camera_type] = pos_world[:, :3]
|
| 45 |
+
|
| 46 |
+
return point_cloud
|
| 47 |
+
|
| 48 |
+
#TODO: optimize calculation speed
|
| 49 |
+
def clip_point_cloud_with_boxes(self, point_cloud, world_range,
|
| 50 |
+
tcp_pose, goal_pose,
|
| 51 |
+
box_size=0.1, original_shapes=None):
|
| 52 |
+
"""
|
| 53 |
+
Efficiently clip point cloud using world range and bounding boxes around TCP and goal poses.
|
| 54 |
+
|
| 55 |
+
Args:
|
| 56 |
+
point_cloud: Dictionary of point clouds for each camera
|
| 57 |
+
world_range: Global clipping range
|
| 58 |
+
tcp_pose: TCP pose [x, y, z, ...]
|
| 59 |
+
goal_pose: Goal pose [x, y, z, ...]
|
| 60 |
+
box_size: Size of the bounding box around poses (default: 0.2 meters)
|
| 61 |
+
original_shapes: Original image shapes (for visualization crop)
|
| 62 |
+
"""
|
| 63 |
+
clipped_point_cloud = {}
|
| 64 |
+
valid_masks = {}
|
| 65 |
+
|
| 66 |
+
half_size = box_size / 2
|
| 67 |
+
|
| 68 |
+
for camera, points in point_cloud.items():
|
| 69 |
+
# World range mask - compute all axes at once
|
| 70 |
+
world_mins = np.array([world_range['x'][0], world_range['y'][0], world_range['z'][0]])
|
| 71 |
+
world_maxs = np.array([world_range['x'][1], world_range['y'][1], world_range['z'][1]])
|
| 72 |
+
|
| 73 |
+
if isinstance(points, torch.Tensor):
|
| 74 |
+
points = points.cpu().numpy()
|
| 75 |
+
|
| 76 |
+
world_mask = np.all((points[:, :3] >= world_mins) & (points[:, :3] <= world_maxs), axis=1)
|
| 77 |
+
|
| 78 |
+
# TCP and goal box masks - compute all axes at once
|
| 79 |
+
tcp_mins = tcp_pose[:3] - half_size
|
| 80 |
+
tcp_maxs = tcp_pose[:3] + half_size
|
| 81 |
+
goal_mins = goal_pose[:3] - half_size
|
| 82 |
+
goal_maxs = goal_pose[:3] + half_size
|
| 83 |
+
|
| 84 |
+
if isinstance(tcp_mins, torch.Tensor):
|
| 85 |
+
tcp_mins = tcp_mins.cpu().numpy()
|
| 86 |
+
if isinstance(tcp_maxs, torch.Tensor):
|
| 87 |
+
tcp_maxs = tcp_maxs.cpu().numpy()
|
| 88 |
+
if isinstance(goal_mins, torch.Tensor):
|
| 89 |
+
goal_mins = goal_mins.cpu().numpy()
|
| 90 |
+
if isinstance(goal_maxs, torch.Tensor):
|
| 91 |
+
goal_maxs = goal_maxs.cpu().numpy()
|
| 92 |
+
|
| 93 |
+
# print("tcp_pose shape: ", tcp_pose.shape)
|
| 94 |
+
# print("tcp mins shape: ", tcp_mins.shape)
|
| 95 |
+
# print("tcp maxs shape: ", tcp_maxs.shape)
|
| 96 |
+
# print("goal mins shape: ", goal_mins.shape)
|
| 97 |
+
# print("goal maxs shape: ", goal_maxs.shape)
|
| 98 |
+
|
| 99 |
+
# Invert the TCP and goal masks to exclude points within these boxes
|
| 100 |
+
tcp_mask = ~np.all((points[:, :3] >= tcp_mins) & (points[:, :3] <= tcp_maxs), axis=1)
|
| 101 |
+
goal_mask = ~np.all((points[:, :3] >= goal_mins) & (points[:, :3] <= goal_maxs), axis=1)
|
| 102 |
+
|
| 103 |
+
# # Combine masks: keep points that are within world bounds AND outside both TCP and goal boxes
|
| 104 |
+
# print("world_mask shape: ", world_mask.shape)
|
| 105 |
+
# print("tcp_mask shape: ", tcp_mask.shape)
|
| 106 |
+
# print("goal_mask shape: ", goal_mask.shape)
|
| 107 |
+
|
| 108 |
+
final_mask = world_mask & tcp_mask & goal_mask
|
| 109 |
+
|
| 110 |
+
clipped_point_cloud[camera] = points[final_mask]
|
| 111 |
+
if original_shapes is not None:
|
| 112 |
+
valid_masks[camera] = final_mask.reshape(original_shapes[0], original_shapes[1])
|
| 113 |
+
else:
|
| 114 |
+
valid_masks[camera] = None
|
| 115 |
+
return clipped_point_cloud, valid_masks
|
| 116 |
+
|
| 117 |
+
def crop_rgb_with_mask(self, rgb_image, mask):
|
| 118 |
+
return rgb_image[mask]
|
| 119 |
+
|
| 120 |
+
def _process_point_cloud(self, obs, point_cloud, camera_type):
|
| 121 |
+
"""
|
| 122 |
+
Process point cloud by clipping with world range and bounding boxes.
|
| 123 |
+
|
| 124 |
+
Args:
|
| 125 |
+
obs: Observation dictionary
|
| 126 |
+
point_cloud: Dictionary of point clouds for each camera
|
| 127 |
+
camera_type: Type of camera ('world', 'base_camera', etc.)
|
| 128 |
+
|
| 129 |
+
Returns:
|
| 130 |
+
tuple: Processed point cloud and cropped colors (if applicable)
|
| 131 |
+
"""
|
| 132 |
+
tcp_pose = obs['extra']['tcp_pose']
|
| 133 |
+
goal_pose = obs['extra']['goal_pose']
|
| 134 |
+
clip_range = {
|
| 135 |
+
'x': (-0.25, 1.5),
|
| 136 |
+
'y': (-1.0, 1.0),
|
| 137 |
+
'z': (-0.01, 0.8)
|
| 138 |
+
}
|
| 139 |
+
|
| 140 |
+
cropped_colors = None
|
| 141 |
+
|
| 142 |
+
if camera_type == 'world':
|
| 143 |
+
point_cloud, valid_masks = self.clip_point_cloud_with_boxes(point_cloud,
|
| 144 |
+
clip_range,
|
| 145 |
+
tcp_pose,
|
| 146 |
+
goal_pose,
|
| 147 |
+
box_size=0.08,
|
| 148 |
+
original_shapes=None)
|
| 149 |
+
else:
|
| 150 |
+
point_cloud, valid_masks = self.clip_point_cloud_with_boxes(point_cloud,
|
| 151 |
+
clip_range,
|
| 152 |
+
tcp_pose,
|
| 153 |
+
goal_pose,
|
| 154 |
+
box_size=0.08,
|
| 155 |
+
original_shapes=obs[self.sensor_data_key][camera_type]['rgb'].shape[:2])
|
| 156 |
+
# Crop RGB images according to valid masks
|
| 157 |
+
if valid_masks is not None:
|
| 158 |
+
cropped_colors = {}
|
| 159 |
+
for camera in point_cloud.keys():
|
| 160 |
+
# rgb_image = obs[self.sensor_data_key][camera]['Color'][:,:,:3]
|
| 161 |
+
rgb_image = obs[self.sensor_data_key][camera]['rgb'][:,:,:3]
|
| 162 |
+
cropped_colors[camera] = self.crop_rgb_with_mask(rgb_image, valid_masks[camera])
|
| 163 |
+
# visualize_rgbd_and_pointcloud(obs[self.sensor_data_key][camera_type]['Color'][:,:,:3],
|
| 164 |
+
# obs[self.sensor_data_key][camera_type]['Position'][:,:,3],
|
| 165 |
+
# point_cloud[camera_type],
|
| 166 |
+
# cropped_colors[camera_type])
|
| 167 |
+
|
| 168 |
+
# print(obs_pointcloud['base_camera'].shape)
|
| 169 |
+
# print(obs_pointcloud['hand_camera'].shape)
|
| 170 |
+
# from cfdp.utils.pointcloud_utils import visualize_cloud_with_axes
|
| 171 |
+
# # visualize_cloud_with_axes(point_cloud['world'])
|
| 172 |
+
return point_cloud, cropped_colors
|
| 173 |
+
|
| 174 |
+
def update_observation(self, obs):
|
| 175 |
+
hard_conds = self.dataset.get_hard_conds(obs, do_normalize=True)
|
| 176 |
+
context = self.dataset.get_context(obs, do_normalize=True)
|
| 177 |
+
if 'obstacle_point_cloud' in obs['extra'] and self.camera_type == 'world':
|
| 178 |
+
point_cloud = {'world': obs['extra']['obstacle_point_cloud']}
|
| 179 |
+
else:
|
| 180 |
+
point_cloud = self._transform_positions_to_world(obs, camera_type=self.camera_type)
|
| 181 |
+
|
| 182 |
+
if point_cloud is not None:
|
| 183 |
+
point_cloud, _ = self._process_point_cloud(obs, point_cloud, self.camera_type)
|
| 184 |
+
self._processed_obs['point_cloud'] = point_cloud[self.camera_type]
|
| 185 |
+
else:
|
| 186 |
+
self._processed_obs['point_cloud'] = None
|
| 187 |
+
|
| 188 |
+
self._processed_obs['hard_conds'] = hard_conds
|
| 189 |
+
self._processed_obs['context'] = context
|
| 190 |
+
return self._processed_obs
|
| 191 |
+
|
| 192 |
+
def get_processed_obs(self):
|
| 193 |
+
return self._processed_obs
|
| 194 |
+
|
| 195 |
+
def reset(self):
|
| 196 |
+
self._processed_obs = {'hard_conds': None, 'context': None, 'environment': None}
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
class ObservationWrapperWithHistory(ObservationWrapper):
|
| 200 |
+
def __init__(self, dataset, camera_type="world", use_ee_control=True, sensor_data_key="image", sensor_param_key="camera_param"):
|
| 201 |
+
super().__init__(dataset, camera_type, sensor_data_key=sensor_data_key, sensor_param_key=sensor_param_key)
|
| 202 |
+
self.history_length = dataset.history_length
|
| 203 |
+
self.history_buffer = []
|
| 204 |
+
self.obsqueue = ObsQueue(size=self.history_length, obs=None)
|
| 205 |
+
self.use_ee_control = use_ee_control
|
| 206 |
+
|
| 207 |
+
def update_history_buffer(self, obs):
|
| 208 |
+
if self.use_ee_control:
|
| 209 |
+
self.obsqueue.append(transform_quat_to_ortho6d(torch.tensor(obs['extra']['tcp_pose'], dtype=torch.float32).to(self.device)))
|
| 210 |
+
else:
|
| 211 |
+
self.obsqueue.append(torch.tensor(obs['agent']['qpos'], dtype=torch.float32).to(self.device))
|
| 212 |
+
|
| 213 |
+
def update_observation(self, obs):
|
| 214 |
+
self.update_history_buffer(obs)
|
| 215 |
+
hard_conds = self.dataset.get_hard_conds(obs, do_normalize=True)
|
| 216 |
+
context = self.dataset.get_context(obs, obsqueue=self.obsqueue, do_normalize=True)
|
| 217 |
+
if 'obstacle_point_cloud' in obs['extra'] and self.camera_type == 'world':
|
| 218 |
+
point_cloud = {'world': obs['extra']['obstacle_point_cloud']}
|
| 219 |
+
else:
|
| 220 |
+
point_cloud = self._transform_positions_to_world(obs, camera_type=self.camera_type)
|
| 221 |
+
|
| 222 |
+
if point_cloud[self.camera_type] is not None:
|
| 223 |
+
point_cloud, _ = self._process_point_cloud(obs, point_cloud, self.camera_type)
|
| 224 |
+
self._processed_obs['point_cloud'] = point_cloud[self.camera_type]
|
| 225 |
+
else:
|
| 226 |
+
self._processed_obs['point_cloud'] = None
|
| 227 |
+
|
| 228 |
+
self._processed_obs['hard_conds'] = hard_conds
|
| 229 |
+
self._processed_obs['context'] = context
|
| 230 |
+
return self._processed_obs
|
| 231 |
+
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/utils/pointcloud_sdf.py
ADDED
|
@@ -0,0 +1,180 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# General imports
|
| 2 |
+
import logging
|
| 3 |
+
import numpy as np
|
| 4 |
+
import random
|
| 5 |
+
import time
|
| 6 |
+
from typing import Union
|
| 7 |
+
|
| 8 |
+
# NN imports
|
| 9 |
+
import torch
|
| 10 |
+
import torch.nn as nn
|
| 11 |
+
import torch.nn.functional as F
|
| 12 |
+
from cfdp.utils.chamfer import ChamferDistance
|
| 13 |
+
|
| 14 |
+
# Set seeds for reproducibility
|
| 15 |
+
def set_seed(seed=42):
|
| 16 |
+
"""
|
| 17 |
+
Set seeds for NumPy, PyTorch, and Python's random module for reproducibility.
|
| 18 |
+
|
| 19 |
+
:param seed: The seed value to use (default: 42)
|
| 20 |
+
"""
|
| 21 |
+
random.seed(seed)
|
| 22 |
+
np.random.seed(seed)
|
| 23 |
+
torch.manual_seed(seed)
|
| 24 |
+
torch.cuda.manual_seed_all(seed)
|
| 25 |
+
torch.backends.cudnn.deterministic = True
|
| 26 |
+
torch.backends.cudnn.benchmark = False
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class PointCloud_CSDF(nn.Module):
|
| 30 |
+
def __init__(
|
| 31 |
+
self,
|
| 32 |
+
pcd: Union[np.ndarray, None],
|
| 33 |
+
sphere_radius: float = 0.05,
|
| 34 |
+
max_distance: float = None,
|
| 35 |
+
device: str = 'cuda'
|
| 36 |
+
):
|
| 37 |
+
|
| 38 |
+
"""
|
| 39 |
+
Brute force estimation of SDF for a point cloud.
|
| 40 |
+
|
| 41 |
+
:param pcd: N x 3 numpy array describing the point cloud
|
| 42 |
+
:param sphere_radius: Radius of sphere enclosing each point in the point cloud
|
| 43 |
+
:param max_distance: Maximum distance beyond which we don't need gradients
|
| 44 |
+
:param device: Device to load the point cloud
|
| 45 |
+
"""
|
| 46 |
+
|
| 47 |
+
super().__init__()
|
| 48 |
+
|
| 49 |
+
self._device = device
|
| 50 |
+
self._sphere_radius = sphere_radius
|
| 51 |
+
self._max_distance = max_distance
|
| 52 |
+
|
| 53 |
+
if pcd is not None:
|
| 54 |
+
self.pcd = torch.from_numpy(pcd).float().to(self._device)
|
| 55 |
+
else:
|
| 56 |
+
self.pcd = None
|
| 57 |
+
|
| 58 |
+
self.chamfer_distance = ChamferDistance()
|
| 59 |
+
|
| 60 |
+
def update_pcd(self, pcd: np.ndarray):
|
| 61 |
+
|
| 62 |
+
"""
|
| 63 |
+
Function for updating the internal point cloud.
|
| 64 |
+
|
| 65 |
+
:param pcd: N x 3 numpy array describing the point cloud
|
| 66 |
+
"""
|
| 67 |
+
|
| 68 |
+
self.pcd = torch.from_numpy(pcd).float().to(self._device)
|
| 69 |
+
|
| 70 |
+
def compute_distances(self, x: torch.Tensor):
|
| 71 |
+
|
| 72 |
+
"""
|
| 73 |
+
Function for computing the distances of passed points to the internally saved point cloud.
|
| 74 |
+
|
| 75 |
+
:param x: batch_size x num_points x 3 query points
|
| 76 |
+
:returns: batch_size x num_points distance values
|
| 77 |
+
"""
|
| 78 |
+
|
| 79 |
+
# Save shape features of input (batch_size x num_points x 3)
|
| 80 |
+
batch_size = x.shape[0]
|
| 81 |
+
num_points = x.shape[1]
|
| 82 |
+
|
| 83 |
+
# Compute distance (ChamferDistance returns the squared distance between the point clouds)
|
| 84 |
+
dist_x_to_pcd, _ = self.chamfer_distance(x.reshape(-1, 3).unsqueeze(0), self.pcd.unsqueeze(0))
|
| 85 |
+
dist_x_to_pcd = torch.sqrt(dist_x_to_pcd.reshape((batch_size, num_points))) - self._sphere_radius
|
| 86 |
+
|
| 87 |
+
return dist_x_to_pcd
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def forward(self, x: torch.Tensor):
|
| 91 |
+
"""
|
| 92 |
+
Function for returning C-SDF values of passed points to the internally saved point cloud.
|
| 93 |
+
With clipping at max_distance to prevent gradients beyond that distance.
|
| 94 |
+
|
| 95 |
+
:param x: batch_size x num_points x 3 query points
|
| 96 |
+
:returns: batch_size C-SDF values
|
| 97 |
+
"""
|
| 98 |
+
# Maximum distance beyond which we don't need gradients
|
| 99 |
+
# Compute distances
|
| 100 |
+
dist_x_to_pcd = self.compute_distances(x)
|
| 101 |
+
|
| 102 |
+
if self._max_distance is not None:
|
| 103 |
+
# Clip the distances to max_distance
|
| 104 |
+
# This creates a flat region in the SDF beyond max_distance
|
| 105 |
+
clipped_distances = torch.minimum(dist_x_to_pcd, torch.tensor(self._max_distance, device=self._device))
|
| 106 |
+
else:
|
| 107 |
+
clipped_distances = dist_x_to_pcd
|
| 108 |
+
# print("clipped_distances: ", clipped_distances)
|
| 109 |
+
|
| 110 |
+
return clipped_distances
|
| 111 |
+
|
| 112 |
+
def running_avg(tensor, window_size):
|
| 113 |
+
"""
|
| 114 |
+
Computes running average using PyTorch's conv1d operation.
|
| 115 |
+
|
| 116 |
+
:param tensor: Input tensor of shape [batch_size, seq_len, dim]
|
| 117 |
+
:param window_size: Size of the sliding window
|
| 118 |
+
:return: Tensor with same shape as input, containing running averages
|
| 119 |
+
"""
|
| 120 |
+
# PyTorch doesn't have a direct convolve function like numpy
|
| 121 |
+
# Need to handle batch dimensions properly for trajectory data
|
| 122 |
+
batch_size, seq_len, dim = tensor.shape
|
| 123 |
+
result = torch.zeros_like(tensor)
|
| 124 |
+
|
| 125 |
+
# Create the kernel for the running average
|
| 126 |
+
kernel = torch.ones(window_size, device=tensor.device) / window_size
|
| 127 |
+
|
| 128 |
+
# Apply running average to each dimension separately
|
| 129 |
+
for b in range(batch_size):
|
| 130 |
+
for d in range(dim):
|
| 131 |
+
# Use 1D convolution for running average
|
| 132 |
+
# F.conv1d expects input shape [batch, channels, length]
|
| 133 |
+
padded = torch.nn.functional.pad(tensor[b, :, d].unsqueeze(0).unsqueeze(0),
|
| 134 |
+
(window_size-1, 0), mode='reflect')
|
| 135 |
+
convolved = torch.nn.functional.conv1d(padded,
|
| 136 |
+
kernel.view(1, 1, -1))
|
| 137 |
+
result[b, :, d] = convolved.squeeze()
|
| 138 |
+
|
| 139 |
+
return result
|
| 140 |
+
|
| 141 |
+
if __name__ == "__main__":
|
| 142 |
+
# Set seed for reproducibility
|
| 143 |
+
set_seed(42)
|
| 144 |
+
|
| 145 |
+
DEVICE = "cuda"
|
| 146 |
+
CONTROL_POINTS = 8
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
# pcd = np.load('/home/vasileiosv/scene_pcd.npy')
|
| 150 |
+
pcd = np.random.rand(1000,3)
|
| 151 |
+
|
| 152 |
+
model = PointCloud_CSDF(sphere_radius=0.01, max_distance=0.2, pcd=pcd, device=DEVICE)
|
| 153 |
+
model.eval()
|
| 154 |
+
model.to(DEVICE)
|
| 155 |
+
|
| 156 |
+
points = torch.rand((2, CONTROL_POINTS, 3), device=DEVICE, requires_grad=True)
|
| 157 |
+
# print("points:", points)
|
| 158 |
+
since = time.time()
|
| 159 |
+
model.update_pcd(pcd)
|
| 160 |
+
|
| 161 |
+
# with torch.no_grad():
|
| 162 |
+
sdf_values = model(points)
|
| 163 |
+
print(f'Total time to compute the SDF value: {time.time()-since}')
|
| 164 |
+
|
| 165 |
+
# total_sdf_value = sdf_values.sum()
|
| 166 |
+
# since = time.time()
|
| 167 |
+
# total_sdf_value.backward()
|
| 168 |
+
sdf_values.sum().backward()
|
| 169 |
+
sdf_gradient = points.grad
|
| 170 |
+
# sdf_gradient = sdf_gradient[torch.nonzero(sdf_gradient).data[0][0]]
|
| 171 |
+
# # print(f'SDF values: {sdf_values.cpu().detach().numpy()}')
|
| 172 |
+
# print(f'SDF gradient: {sdf_gradient.cpu().detach().numpy()}')
|
| 173 |
+
|
| 174 |
+
print("sdf_gradient: ", sdf_gradient)
|
| 175 |
+
sliding_window_avg_sdf_gradient = running_avg(sdf_gradient, 2)
|
| 176 |
+
print("--------------------------------------------------------------")
|
| 177 |
+
print("sliding window avg sdf gradient: ", sliding_window_avg_sdf_gradient)
|
| 178 |
+
print("sliding_window_avg_sdf_gradient shape: ", sliding_window_avg_sdf_gradient.shape)
|
| 179 |
+
# print(f'Total time to compute the SDF gradient: {time.time()-since}')
|
| 180 |
+
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/utils/rdp_path_simplify.py
ADDED
|
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from cfdp.utils.angle_utils import quaternion_difference, quaternion_to_euler, quat_normalize_t
|
| 3 |
+
from cfdp.utils.data_utils import transform_ortho6d_to_quat
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def quat_geodesic_angle_t(q1, q2):
|
| 7 |
+
""" q1,q2: (...,4) -> angle (...,) """
|
| 8 |
+
q1 = quat_normalize_t(q1)
|
| 9 |
+
q2 = quat_normalize_t(q2)
|
| 10 |
+
dot = torch.abs((q1 * q2).sum(-1))
|
| 11 |
+
dot = torch.clamp(dot, -1.0, 1.0)
|
| 12 |
+
return 2.0 * torch.arccos(dot)
|
| 13 |
+
|
| 14 |
+
def quat_slerp_t(q0, q1, t: float):
|
| 15 |
+
""" q0,q1: (4,), t: float in [0,1] """
|
| 16 |
+
q0 = quat_normalize_t(q0)
|
| 17 |
+
q1 = quat_normalize_t(q1)
|
| 18 |
+
dot = torch.dot(q0, q1)
|
| 19 |
+
if dot < 0.0:
|
| 20 |
+
q1 = -q1
|
| 21 |
+
dot = -dot
|
| 22 |
+
dot = torch.clamp(dot, -1.0, 1.0)
|
| 23 |
+
if dot > 0.9995:
|
| 24 |
+
q = q0 + t*(q1 - q0)
|
| 25 |
+
return quat_normalize_t(q)
|
| 26 |
+
th0 = torch.arccos(dot)
|
| 27 |
+
sin_th0 = torch.sin(th0)
|
| 28 |
+
th = th0 * t
|
| 29 |
+
s0 = torch.sin(th0 - th) / sin_th0
|
| 30 |
+
s1 = torch.sin(th) / sin_th0
|
| 31 |
+
return s0*q0 + s1*q1
|
| 32 |
+
|
| 33 |
+
def point_segment_distance_t(p, a, b):
|
| 34 |
+
""" return dist, t """
|
| 35 |
+
ap = p - a
|
| 36 |
+
ab = b - a
|
| 37 |
+
denom = torch.dot(ab, ab)
|
| 38 |
+
if denom < 1e-18:
|
| 39 |
+
return torch.norm(ap), 0.0
|
| 40 |
+
t = torch.clamp(torch.dot(ap, ab) / denom, 0.0, 1.0)
|
| 41 |
+
proj = a + t*ab
|
| 42 |
+
return torch.norm(p - proj), float(t)
|
| 43 |
+
|
| 44 |
+
def find_max_pos_deviation_t(pos, i, j):
|
| 45 |
+
a, b = pos[i], pos[j]
|
| 46 |
+
max_dev = -1.0
|
| 47 |
+
max_k = -1
|
| 48 |
+
for k in range(i+1, j):
|
| 49 |
+
d, _ = point_segment_distance_t(pos[k], a, b)
|
| 50 |
+
if d > max_dev:
|
| 51 |
+
max_dev, max_k = d, k
|
| 52 |
+
return max_dev, max_k
|
| 53 |
+
|
| 54 |
+
def find_max_ori_deviation_t(quat, i, j):
|
| 55 |
+
qi, qj = quat[i], quat[j]
|
| 56 |
+
L = j - i
|
| 57 |
+
max_ang = -1.0
|
| 58 |
+
max_k = -1
|
| 59 |
+
for k in range(i+1, j):
|
| 60 |
+
t = (k - i) / L
|
| 61 |
+
q_interp = quat_slerp_t(qi, qj, t)
|
| 62 |
+
ang = float(quat_geodesic_angle_t(quat[k].unsqueeze(0), q_interp.unsqueeze(0)))
|
| 63 |
+
if ang > max_ang:
|
| 64 |
+
max_ang, max_k = ang, k
|
| 65 |
+
return max_ang, max_k
|
| 66 |
+
|
| 67 |
+
def rdp_indices_with_ori_t(pos, quat, eps_pos=0.02, eps_ang_rad=0.17):
|
| 68 |
+
"""
|
| 69 |
+
pos: (N,3) torch tensor
|
| 70 |
+
quat: (N,4) torch tensor
|
| 71 |
+
returns: indices (M,) torch.long
|
| 72 |
+
"""
|
| 73 |
+
N = pos.shape[0]
|
| 74 |
+
if N <= 2:
|
| 75 |
+
return torch.arange(N, device=pos.device)
|
| 76 |
+
|
| 77 |
+
keep = torch.zeros(N, dtype=torch.bool, device=pos.device)
|
| 78 |
+
keep[0] = True; keep[-1] = True
|
| 79 |
+
stack = [(0, N-1)]
|
| 80 |
+
|
| 81 |
+
while stack:
|
| 82 |
+
i, j = stack.pop()
|
| 83 |
+
max_pos, k_pos = find_max_pos_deviation_t(pos, i, j)
|
| 84 |
+
max_ang, k_ang = find_max_ori_deviation_t(quat, i, j)
|
| 85 |
+
|
| 86 |
+
pos_ok = (max_pos <= eps_pos)
|
| 87 |
+
ang_ok = (max_ang <= eps_ang_rad)
|
| 88 |
+
|
| 89 |
+
if pos_ok and ang_ok:
|
| 90 |
+
continue
|
| 91 |
+
|
| 92 |
+
if not pos_ok and ang_ok:
|
| 93 |
+
mid = k_pos
|
| 94 |
+
elif pos_ok and not ang_ok:
|
| 95 |
+
mid = k_ang
|
| 96 |
+
else:
|
| 97 |
+
pos_ratio = max_pos / (eps_pos + 1e-12)
|
| 98 |
+
ang_ratio = max_ang / (eps_ang_rad + 1e-12)
|
| 99 |
+
mid = k_pos if pos_ratio >= ang_ratio else k_ang
|
| 100 |
+
|
| 101 |
+
keep[mid] = True
|
| 102 |
+
if mid - i >= 1: stack.append((i, mid))
|
| 103 |
+
if j - mid >= 1: stack.append((mid, j))
|
| 104 |
+
|
| 105 |
+
return torch.nonzero(keep, as_tuple=False).squeeze(-1)
|
| 106 |
+
|
| 107 |
+
def simplify_waypoints_preserve_format_t(waypoints: torch.Tensor,
|
| 108 |
+
eps_pos: float = 0.02,
|
| 109 |
+
eps_ang_deg: float | None = 10.0):
|
| 110 |
+
"""
|
| 111 |
+
waypoints: (N,3)/(N,7)/(N,9) torch.Tensor
|
| 112 |
+
- 3: pos
|
| 113 |
+
- 7: pos + quat(wxyz)
|
| 114 |
+
- 9: pos + ortho6d
|
| 115 |
+
returns: (keep_idx, simplified_waypoints)
|
| 116 |
+
"""
|
| 117 |
+
assert waypoints.ndim == 2 and waypoints.shape[1] in (3,7,9), "Expect (N,3)/(N,7)/(N,9)"
|
| 118 |
+
device, dtype = waypoints.device, waypoints.dtype
|
| 119 |
+
D = waypoints.shape[1]
|
| 120 |
+
pos = waypoints[:, :3]
|
| 121 |
+
|
| 122 |
+
# position only
|
| 123 |
+
if (D == 3) or (eps_ang_deg is None):
|
| 124 |
+
idx = rdp_indices_pos_only_t(pos, eps_pos)
|
| 125 |
+
return idx, waypoints.index_select(0, idx)
|
| 126 |
+
|
| 127 |
+
# orientation guarding: take quat (7D directly, 9D first convert from 6D)
|
| 128 |
+
if D == 7:
|
| 129 |
+
quat = waypoints[:, 3:7]
|
| 130 |
+
else: # D == 9
|
| 131 |
+
# quat = ortho6d_to_quat_t(waypoints[:, 3:9])
|
| 132 |
+
quat_waypoints = transform_ortho6d_to_quat(waypoints)
|
| 133 |
+
quat = quat_waypoints[:, 3:7]
|
| 134 |
+
|
| 135 |
+
eps_ang_rad = torch.deg2rad(torch.tensor(eps_ang_deg, device=device, dtype=dtype))
|
| 136 |
+
idx = rdp_indices_with_ori_t(pos, quat, eps_pos=eps_pos, eps_ang_rad=float(eps_ang_rad))
|
| 137 |
+
return idx, waypoints.index_select(0, idx)
|
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/utils/socket_utils.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import base64
|
| 2 |
+
import numpy as np
|
| 3 |
+
|
| 4 |
+
def arr2base64(arr):
|
| 5 |
+
arr_bytes = arr.tobytes()
|
| 6 |
+
encoded_arr = base64.b64encode(arr_bytes).decode('ascii')
|
| 7 |
+
return {'data': encoded_arr, 'dtype': str(arr.dtype), 'shape': arr.shape}
|
| 8 |
+
|
| 9 |
+
def base64_to_arr(arr_dict):
|
| 10 |
+
arr_bytes = base64.b64decode(arr_dict['data'])
|
| 11 |
+
arr = np.frombuffer(arr_bytes, np.dtype(arr_dict['dtype']))
|
| 12 |
+
arr = arr.reshape(arr_dict['shape'])
|
| 13 |
+
return arr
|
project/ManiSkill3/src/maniskill2_benchmark/msx_envs/msx_envs/env_config/distractorbox/7_scene_config.yaml
ADDED
|
@@ -0,0 +1,458 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
objects:
|
| 2 |
+
- collision:
|
| 3 |
+
path: object/table/desktable.obj
|
| 4 |
+
name: table
|
| 5 |
+
pose:
|
| 6 |
+
position:
|
| 7 |
+
- 0.4000000059604645
|
| 8 |
+
- 0.0
|
| 9 |
+
- -0.75
|
| 10 |
+
quaternion:
|
| 11 |
+
- 0.0
|
| 12 |
+
- 0.0
|
| 13 |
+
- 0.7071068286895752
|
| 14 |
+
- 0.7071068286895752
|
| 15 |
+
scale:
|
| 16 |
+
- 2.0
|
| 17 |
+
- 1.0
|
| 18 |
+
- 1.5
|
| 19 |
+
visual:
|
| 20 |
+
path: object/table/desktable.glb
|
| 21 |
+
- collision:
|
| 22 |
+
path: object/box_01/box_01.gltf
|
| 23 |
+
name: box_01
|
| 24 |
+
pose:
|
| 25 |
+
position:
|
| 26 |
+
- 0.5411531925201416
|
| 27 |
+
- -0.33478599786758423
|
| 28 |
+
- 0.19764788448810577
|
| 29 |
+
quaternion:
|
| 30 |
+
- 0.9953973889350891
|
| 31 |
+
- 1.1641532182693481e-09
|
| 32 |
+
- 2.3010215954855084e-10
|
| 33 |
+
- -0.09583356231451035
|
| 34 |
+
scale:
|
| 35 |
+
- 0.43156745678016356
|
| 36 |
+
- 0.4938321481829596
|
| 37 |
+
- 1.0544241940054395
|
| 38 |
+
visual:
|
| 39 |
+
path: object/box_01/box_01.gltf
|
| 40 |
+
- collision:
|
| 41 |
+
path: object/box_01/box_01.gltf
|
| 42 |
+
name: box_02
|
| 43 |
+
pose:
|
| 44 |
+
position:
|
| 45 |
+
- 0.18066096305847168
|
| 46 |
+
- -0.3856646418571472
|
| 47 |
+
- 0.12996400892734528
|
| 48 |
+
quaternion:
|
| 49 |
+
- 0.9750638008117676
|
| 50 |
+
- 1.862645149230957e-09
|
| 51 |
+
- 3.5033735912293196e-09
|
| 52 |
+
- -0.22192484140396118
|
| 53 |
+
scale:
|
| 54 |
+
- 0.40738405359814406
|
| 55 |
+
- 0.5761527279656022
|
| 56 |
+
- 0.7179754719214234
|
| 57 |
+
visual:
|
| 58 |
+
path: object/box_01/box_01.gltf
|
| 59 |
+
- collision:
|
| 60 |
+
path: object/051_large_clamp/collision.obj
|
| 61 |
+
name: large_clamp
|
| 62 |
+
pose:
|
| 63 |
+
position:
|
| 64 |
+
- 0.3960020840167999
|
| 65 |
+
- 0.4104568660259247
|
| 66 |
+
- 0.0048619951121509075
|
| 67 |
+
quaternion:
|
| 68 |
+
- 0.7836728692054749
|
| 69 |
+
- 0.0002483418211340904
|
| 70 |
+
- -0.00019835252896882594
|
| 71 |
+
- -0.6211735606193542
|
| 72 |
+
scale: 1.0
|
| 73 |
+
visual:
|
| 74 |
+
path: object/051_large_clamp/textured.obj
|
| 75 |
+
- collision:
|
| 76 |
+
path: object/032_knife/collision.obj
|
| 77 |
+
name: knife
|
| 78 |
+
pose:
|
| 79 |
+
position:
|
| 80 |
+
- 0.16431424021720886
|
| 81 |
+
- -0.26314669847488403
|
| 82 |
+
- 0.2801703214645386
|
| 83 |
+
quaternion:
|
| 84 |
+
- 0.9511080384254456
|
| 85 |
+
- -0.05688799172639847
|
| 86 |
+
- 0.021953586488962173
|
| 87 |
+
- -0.3027794361114502
|
| 88 |
+
scale: 1.0
|
| 89 |
+
visual:
|
| 90 |
+
path: object/032_knife/textured.obj
|
| 91 |
+
- collision:
|
| 92 |
+
path: object/038_padlock/collision.obj
|
| 93 |
+
name: padlock
|
| 94 |
+
pose:
|
| 95 |
+
position:
|
| 96 |
+
- 0.6913907527923584
|
| 97 |
+
- -0.47387224435806274
|
| 98 |
+
- 0.00037308820174075663
|
| 99 |
+
quaternion:
|
| 100 |
+
- 0.7253404259681702
|
| 101 |
+
- 4.12575900554657e-06
|
| 102 |
+
- 4.9620866775512695e-06
|
| 103 |
+
- -0.6883903741836548
|
| 104 |
+
scale: 1.0
|
| 105 |
+
visual:
|
| 106 |
+
path: object/038_padlock/textured.obj
|
| 107 |
+
- collision:
|
| 108 |
+
path: object/005_tomato_soup_can/collision.obj
|
| 109 |
+
name: tomato_soup_can
|
| 110 |
+
pose:
|
| 111 |
+
position:
|
| 112 |
+
- 0.4213438630104065
|
| 113 |
+
- -0.10455051809549332
|
| 114 |
+
- 0.036737337708473206
|
| 115 |
+
quaternion:
|
| 116 |
+
- 0.9973571300506592
|
| 117 |
+
- 0.004662784282118082
|
| 118 |
+
- -0.0008021022658795118
|
| 119 |
+
- -0.07250283658504486
|
| 120 |
+
scale: 1.0
|
| 121 |
+
visual:
|
| 122 |
+
path: object/005_tomato_soup_can/textured.obj
|
| 123 |
+
- collision:
|
| 124 |
+
path: object/057_racquetball/collision.obj
|
| 125 |
+
name: racquetball
|
| 126 |
+
pose:
|
| 127 |
+
position:
|
| 128 |
+
- 0.6549382209777832
|
| 129 |
+
- -0.7179558873176575
|
| 130 |
+
- 0.013666164129972458
|
| 131 |
+
quaternion:
|
| 132 |
+
- -0.944601833820343
|
| 133 |
+
- -0.2577575147151947
|
| 134 |
+
- 0.007072235457599163
|
| 135 |
+
- -0.203073188662529
|
| 136 |
+
scale: 1.0
|
| 137 |
+
visual:
|
| 138 |
+
path: object/057_racquetball/textured.obj
|
| 139 |
+
- collision:
|
| 140 |
+
path: object/054_softball/collision.obj
|
| 141 |
+
name: softball
|
| 142 |
+
pose:
|
| 143 |
+
position:
|
| 144 |
+
- 0.3998604118824005
|
| 145 |
+
- 0.16705569624900818
|
| 146 |
+
- 0.03259090706706047
|
| 147 |
+
quaternion:
|
| 148 |
+
- 0.9982295632362366
|
| 149 |
+
- 0.009225627407431602
|
| 150 |
+
- -0.023404540494084358
|
| 151 |
+
- 0.05389898642897606
|
| 152 |
+
scale: 1.0
|
| 153 |
+
visual:
|
| 154 |
+
path: object/054_softball/textured.obj
|
| 155 |
+
- collision:
|
| 156 |
+
path: object/009_gelatin_box/collision.obj
|
| 157 |
+
name: gelatin_box
|
| 158 |
+
pose:
|
| 159 |
+
position:
|
| 160 |
+
- 0.08669579029083252
|
| 161 |
+
- 0.3514019250869751
|
| 162 |
+
- 0.0009558660676702857
|
| 163 |
+
quaternion:
|
| 164 |
+
- 0.746235728263855
|
| 165 |
+
- 3.0816299840807915e-06
|
| 166 |
+
- 9.892610250972211e-07
|
| 167 |
+
- -0.6656818389892578
|
| 168 |
+
scale: 1.0
|
| 169 |
+
visual:
|
| 170 |
+
path: object/009_gelatin_box/textured.obj
|
| 171 |
+
- collision:
|
| 172 |
+
path: object/011_banana/collision.obj
|
| 173 |
+
name: banana
|
| 174 |
+
pose:
|
| 175 |
+
position:
|
| 176 |
+
- 0.11127632856369019
|
| 177 |
+
- 0.18324284255504608
|
| 178 |
+
- 0.004434763453900814
|
| 179 |
+
quaternion:
|
| 180 |
+
- 0.9793081879615784
|
| 181 |
+
- -0.0016427943482995033
|
| 182 |
+
- -0.0029420959763228893
|
| 183 |
+
- -0.20234665274620056
|
| 184 |
+
scale: 1.0
|
| 185 |
+
visual:
|
| 186 |
+
path: object/011_banana/textured.obj
|
| 187 |
+
- collision:
|
| 188 |
+
path: object/043_phillips_screwdriver/collision.obj
|
| 189 |
+
name: phillips_screwdriver
|
| 190 |
+
pose:
|
| 191 |
+
position:
|
| 192 |
+
- 0.651358425617218
|
| 193 |
+
- 0.06117602437734604
|
| 194 |
+
- -2.2212043404579163e-06
|
| 195 |
+
quaternion:
|
| 196 |
+
- 0.8662685751914978
|
| 197 |
+
- -0.02353747934103012
|
| 198 |
+
- -0.08970711380243301
|
| 199 |
+
- 0.49089452624320984
|
| 200 |
+
scale: 1.0
|
| 201 |
+
visual:
|
| 202 |
+
path: object/043_phillips_screwdriver/textured.obj
|
| 203 |
+
- collision:
|
| 204 |
+
path: object/073-a_lego_duplo/collision.obj
|
| 205 |
+
name: lego_duplo
|
| 206 |
+
pose:
|
| 207 |
+
position:
|
| 208 |
+
- 0.07054553925991058
|
| 209 |
+
- 0.45550820231437683
|
| 210 |
+
- -0.0019614154007285833
|
| 211 |
+
quaternion:
|
| 212 |
+
- 0.7769989967346191
|
| 213 |
+
- -5.024252459406853e-06
|
| 214 |
+
- -1.7005615518428385e-05
|
| 215 |
+
- 0.6295019388198853
|
| 216 |
+
scale: 1.0
|
| 217 |
+
visual:
|
| 218 |
+
path: object/073-a_lego_duplo/textured.obj
|
| 219 |
+
- collision:
|
| 220 |
+
path: object/006_mustard_bottle/collision.obj
|
| 221 |
+
name: mustard_bottle
|
| 222 |
+
pose:
|
| 223 |
+
position:
|
| 224 |
+
- 0.6522485017776489
|
| 225 |
+
- -0.02355482429265976
|
| 226 |
+
- 0.017643030732870102
|
| 227 |
+
quaternion:
|
| 228 |
+
- 0.31925877928733826
|
| 229 |
+
- 0.4426552951335907
|
| 230 |
+
- 0.5555307865142822
|
| 231 |
+
- 0.6273083686828613
|
| 232 |
+
scale: 1.0
|
| 233 |
+
visual:
|
| 234 |
+
path: object/006_mustard_bottle/textured.obj
|
| 235 |
+
- collision:
|
| 236 |
+
path: object/002_master_chef_can/collision.obj
|
| 237 |
+
name: master_chef_can
|
| 238 |
+
pose:
|
| 239 |
+
position:
|
| 240 |
+
- 0.39390599727630615
|
| 241 |
+
- 0.034177470952272415
|
| 242 |
+
- 0.05646386370062828
|
| 243 |
+
quaternion:
|
| 244 |
+
- 0.8733154535293579
|
| 245 |
+
- 3.2882962841540575e-06
|
| 246 |
+
- -4.950270522385836e-07
|
| 247 |
+
- -0.48715531826019287
|
| 248 |
+
scale: 1.0
|
| 249 |
+
visual:
|
| 250 |
+
path: object/002_master_chef_can/textured.obj
|
| 251 |
+
- collision:
|
| 252 |
+
path: object/036_wood_block/collision.obj
|
| 253 |
+
name: wood_block
|
| 254 |
+
pose:
|
| 255 |
+
position:
|
| 256 |
+
- 0.16562879085540771
|
| 257 |
+
- -0.14416122436523438
|
| 258 |
+
- 0.043825723230838776
|
| 259 |
+
quaternion:
|
| 260 |
+
- -0.010184310376644135
|
| 261 |
+
- 0.6387211680412292
|
| 262 |
+
- -0.3020576536655426
|
| 263 |
+
- -0.7075966596603394
|
| 264 |
+
scale: 1.0
|
| 265 |
+
visual:
|
| 266 |
+
path: object/036_wood_block/textured.obj
|
| 267 |
+
- collision:
|
| 268 |
+
path: object/012_strawberry/collision.obj
|
| 269 |
+
name: strawberry
|
| 270 |
+
pose:
|
| 271 |
+
position:
|
| 272 |
+
- 0.2075018435716629
|
| 273 |
+
- -0.059122636914253235
|
| 274 |
+
- 0.008252094499766827
|
| 275 |
+
quaternion:
|
| 276 |
+
- 0.9944765567779541
|
| 277 |
+
- -0.06852412968873978
|
| 278 |
+
- -0.0287138931453228
|
| 279 |
+
- 0.07413822412490845
|
| 280 |
+
scale: 1.0
|
| 281 |
+
visual:
|
| 282 |
+
path: object/012_strawberry/textured.obj
|
| 283 |
+
- collision:
|
| 284 |
+
path: object/072-c_toy_airplane/collision.obj
|
| 285 |
+
name: toy_airplane
|
| 286 |
+
pose:
|
| 287 |
+
position:
|
| 288 |
+
- 0.24646730720996857
|
| 289 |
+
- 0.24238541722297668
|
| 290 |
+
- 0.019568663090467453
|
| 291 |
+
quaternion:
|
| 292 |
+
- 0.935096800327301
|
| 293 |
+
- 0.002827427815645933
|
| 294 |
+
- 0.00012870205682702363
|
| 295 |
+
- -0.3543815016746521
|
| 296 |
+
scale: 1.0
|
| 297 |
+
visual:
|
| 298 |
+
path: object/072-c_toy_airplane/textured.obj
|
| 299 |
+
- collision:
|
| 300 |
+
path: object/015_peach/collision.obj
|
| 301 |
+
name: peach
|
| 302 |
+
pose:
|
| 303 |
+
position:
|
| 304 |
+
- 0.601452112197876
|
| 305 |
+
- 0.3438147306442261
|
| 306 |
+
- 0.01431876327842474
|
| 307 |
+
quaternion:
|
| 308 |
+
- 0.8866758942604065
|
| 309 |
+
- -0.0986284390091896
|
| 310 |
+
- -0.22087711095809937
|
| 311 |
+
- -0.3940708637237549
|
| 312 |
+
scale: 1.0
|
| 313 |
+
visual:
|
| 314 |
+
path: object/015_peach/textured.obj
|
| 315 |
+
- collision:
|
| 316 |
+
path: object/014_lemon/collision.obj
|
| 317 |
+
name: lemon
|
| 318 |
+
pose:
|
| 319 |
+
position:
|
| 320 |
+
- 0.5213211178779602
|
| 321 |
+
- -0.4761643707752228
|
| 322 |
+
- 0.012237854301929474
|
| 323 |
+
quaternion:
|
| 324 |
+
- 0.9941476583480835
|
| 325 |
+
- 6.818398833274841e-05
|
| 326 |
+
- 3.129243850708008e-07
|
| 327 |
+
- 0.10803081095218658
|
| 328 |
+
scale: 1.0
|
| 329 |
+
visual:
|
| 330 |
+
path: object/014_lemon/textured.obj
|
| 331 |
+
- collision:
|
| 332 |
+
path: object/055_baseball/collision.obj
|
| 333 |
+
name: baseball
|
| 334 |
+
pose:
|
| 335 |
+
position:
|
| 336 |
+
- 0.10353413969278336
|
| 337 |
+
- -0.024039605632424355
|
| 338 |
+
- 0.02265048772096634
|
| 339 |
+
quaternion:
|
| 340 |
+
- 0.1044173389673233
|
| 341 |
+
- 0.8803550004959106
|
| 342 |
+
- 0.06693390011787415
|
| 343 |
+
- -0.45781248807907104
|
| 344 |
+
scale: 1.0
|
| 345 |
+
visual:
|
| 346 |
+
path: object/055_baseball/textured.obj
|
| 347 |
+
- collision:
|
| 348 |
+
path: object/026_sponge/collision.obj
|
| 349 |
+
name: sponge
|
| 350 |
+
pose:
|
| 351 |
+
position:
|
| 352 |
+
- 0.28831154108047485
|
| 353 |
+
- 0.2725689113140106
|
| 354 |
+
- 0.0306338332593441
|
| 355 |
+
quaternion:
|
| 356 |
+
- 0.689015805721283
|
| 357 |
+
- -0.31184300780296326
|
| 358 |
+
- 0.3014889359474182
|
| 359 |
+
- -0.5806166529655457
|
| 360 |
+
scale: 1.0
|
| 361 |
+
visual:
|
| 362 |
+
path: object/026_sponge/textured.obj
|
| 363 |
+
- collision:
|
| 364 |
+
path: object/007_tuna_fish_can/collision.obj
|
| 365 |
+
name: tuna_fish_can
|
| 366 |
+
pose:
|
| 367 |
+
position:
|
| 368 |
+
- 0.4987218379974365
|
| 369 |
+
- -0.22164365649223328
|
| 370 |
+
- 0.4239289164543152
|
| 371 |
+
quaternion:
|
| 372 |
+
- 0.9792429208755493
|
| 373 |
+
- -0.004228629171848297
|
| 374 |
+
- 0.008018635213375092
|
| 375 |
+
- 0.20248772203922272
|
| 376 |
+
scale: 1.0
|
| 377 |
+
visual:
|
| 378 |
+
path: object/007_tuna_fish_can/textured.obj
|
| 379 |
+
- collision:
|
| 380 |
+
path: object/017_orange/collision.obj
|
| 381 |
+
name: orange
|
| 382 |
+
pose:
|
| 383 |
+
position:
|
| 384 |
+
- 0.16483265161514282
|
| 385 |
+
- -0.4286644160747528
|
| 386 |
+
- 0.021655144169926643
|
| 387 |
+
quaternion:
|
| 388 |
+
- 0.8321624994277954
|
| 389 |
+
- 0.04870299994945526
|
| 390 |
+
- -0.002386651933193207
|
| 391 |
+
- -0.5523840188980103
|
| 392 |
+
scale: 1.0
|
| 393 |
+
visual:
|
| 394 |
+
path: object/017_orange/textured.obj
|
| 395 |
+
- collision:
|
| 396 |
+
path: object/033_spatula/collision.obj
|
| 397 |
+
name: spatula
|
| 398 |
+
pose:
|
| 399 |
+
position:
|
| 400 |
+
- 0.2773701250553131
|
| 401 |
+
- -0.03216280788183212
|
| 402 |
+
- 0.010732786729931831
|
| 403 |
+
quaternion:
|
| 404 |
+
- 0.7247846722602844
|
| 405 |
+
- 0.5543031692504883
|
| 406 |
+
- -0.3050392270088196
|
| 407 |
+
- -0.2727387845516205
|
| 408 |
+
scale: 1.0
|
| 409 |
+
visual:
|
| 410 |
+
path: object/033_spatula/textured.obj
|
| 411 |
+
- collision:
|
| 412 |
+
path: object/077_rubiks_cube/collision.obj
|
| 413 |
+
name: rubiks_cube
|
| 414 |
+
pose:
|
| 415 |
+
position:
|
| 416 |
+
- 0.3757307827472687
|
| 417 |
+
- -0.42385706305503845
|
| 418 |
+
- 0.014687028713524342
|
| 419 |
+
quaternion:
|
| 420 |
+
- 0.9600054025650024
|
| 421 |
+
- -0.002521239221096039
|
| 422 |
+
- 0.0010814964771270752
|
| 423 |
+
- 0.2799682915210724
|
| 424 |
+
scale: 1.0
|
| 425 |
+
visual:
|
| 426 |
+
path: object/077_rubiks_cube/textured.obj
|
| 427 |
+
- collision:
|
| 428 |
+
path: object/004_sugar_box/collision.obj
|
| 429 |
+
name: sugar_box
|
| 430 |
+
pose:
|
| 431 |
+
position:
|
| 432 |
+
- 0.5863490700721741
|
| 433 |
+
- 0.41124847531318665
|
| 434 |
+
- 0.07367444783449173
|
| 435 |
+
quaternion:
|
| 436 |
+
- 0.8212317824363708
|
| 437 |
+
- -0.010641193017363548
|
| 438 |
+
- -0.006307687144726515
|
| 439 |
+
- -0.5704607963562012
|
| 440 |
+
scale: 1.0
|
| 441 |
+
visual:
|
| 442 |
+
path: object/004_sugar_box/textured.obj
|
| 443 |
+
- collision:
|
| 444 |
+
path: object/003_cracker_box/collision.obj
|
| 445 |
+
name: cracker_box
|
| 446 |
+
pose:
|
| 447 |
+
position:
|
| 448 |
+
- 0.5719598531723022
|
| 449 |
+
- -0.38586848974227905
|
| 450 |
+
- 0.06775876134634018
|
| 451 |
+
quaternion:
|
| 452 |
+
- 0.4747459888458252
|
| 453 |
+
- 0.47796034812927246
|
| 454 |
+
- -0.5211132764816284
|
| 455 |
+
- -0.5240334272384644
|
| 456 |
+
scale: 1.0
|
| 457 |
+
visual:
|
| 458 |
+
path: object/003_cracker_box/textured.obj
|