cfpi / q-iqn /variant.json
ezhang7423
adding q-iqn
dd9be66
{
"alg_class": {
"$class": "rlkit.torch.torch_rl_algorithm.OfflineTorchBatchRLAlgorithm"
},
"algorithm": "sarsa-iqn",
"algorithm_kwargs": {
"batch_size": 256,
"max_path_length": 0,
"num_epochs": 0,
"num_eval_steps_per_epoch": 0,
"num_trains_per_train_loop": 1000,
"start_epoch": -400
},
"d4rl": true,
"env_id": "halfcheetah-medium-expert-v2",
"normalize_env": true,
"path_loader_kwargs": {},
"pipeline": {
"$class": "rlkit.launchers.pipeline.<Pipeline SarsaIQNPipeline>:\noffline_init,\ncreate_eval_env,\ncreate_dataset_next_actions,\noptionally_normalize_dataset,\ncreate_q_iqn,\ncreate_policy,\ncreate_trainer,\ncreate_eval_policy,\ncreate_eval_path_collector,\ncreate_replay_buffer,\ncreate_algorithm,\nload_demos,\ntrain"
},
"policy_class": {
"$class": "rlkit.policies.gaussian_policy.TanhGaussianPolicy"
},
"policy_kwargs": {
"hidden_sizes": [
1024,
1024
]
},
"qf_class": {
"$class": "rlkit.torch.networks.mlp.QuantileMlp"
},
"qf_kwargs": {
"embedding_size": 64,
"hidden_sizes": [
256,
256,
256
],
"num_quantiles": 8
},
"replay_buffer_class": {
"$class": "rlkit.data_management.env_replay_buffer.EnvReplayBufferNextAction"
},
"replay_buffer_size": 2000000,
"rollout_fn": {
"$function": "rlkit.samplers.rollout_functions.rollout"
},
"seed": 0,
"snapshot_gap": 50,
"snapshot_mode": "gap_and_last",
"trainer_class": {
"$class": "rlkit.torch.torch_rl_algorithm.TorchTrainer"
},
"trainer_cls": {
"$class": "rlkit.torch.algorithms.sarsa_iqn.SarsaIQNTrainer"
},
"trainer_kwargs": {
"discount": 0.99,
"num_quantiles": 8,
"policy_lr": 0.0003,
"qf_lr": 0.0003,
"reward_scale": 1,
"soft_target_tau": 0.005,
"target_update_period": 1
},
"version": "normalize-env"
}