cfpi / sg /variant.json
ezhang7423
adding sg
a42555c
{
"alg_class": {
"$class": "rlkit.torch.torch_rl_algorithm.OfflineTorchBatchRLAlgorithm"
},
"algorithm": "behavior-cloning",
"algorithm_kwargs": {
"batch_size": 512,
"max_path_length": 1000,
"num_epochs": 0,
"num_eval_steps_per_epoch": 1000,
"num_trains_per_train_loop": 1000,
"start_epoch": -1000
},
"d4rl": true,
"env_id": "halfcheetah-medium-expert-v2",
"normalize_env": true,
"path_loader_kwargs": {},
"pipeline": {
"$class": "rlkit.launchers.pipeline.<Pipeline offline_sac_experiment>:\noffline_init,\ncreate_eval_env,\ncreate_dataset_next_actions,\noptionally_normalize_dataset,\ncreate_policy,\ncreate_trainer,\ncreate_eval_policy,\ncreate_eval_path_collector,\ncreate_replay_buffer,\ncreate_algorithm,\nload_demos,\ntrain"
},
"policy_class": {
"$class": "rlkit.policies.gaussian_policy.TanhGaussianPolicy"
},
"policy_kwargs": {
"hidden_sizes": [
256,
256,
256
]
},
"qf_class": {
"$class": "rlkit.torch.networks.mlp.ConcatMlp"
},
"qf_kwargs": {
"hidden_sizes": [
1024,
1024
]
},
"replay_buffer_class": {
"$class": "rlkit.data_management.env_replay_buffer.EnvReplayBuffer"
},
"replay_buffer_size": 2000000,
"rollout_fn": {
"$function": "rlkit.samplers.rollout_functions.rollout"
},
"seed": 0,
"snapshot_gap": 100,
"snapshot_mode": "gap_and_last",
"trainer_class": {
"$class": "rlkit.torch.torch_rl_algorithm.TorchTrainer"
},
"trainer_cls": {
"$class": "rlkit.torch.algorithms.bc.BCTrainer"
},
"trainer_kwargs": {
"discount": 0.99,
"policy_lr": 0.0001,
"qf_lr": 0.0001,
"reward_scale": 1,
"soft_target_tau": 0.005,
"target_update_period": 1
},
"version": "normalized-tinkywinky-256-256-256"
}