ppo-PyramidsRND / run_logs /timers.json
ZZVic's picture
Pyramids PPO + RND
433449d verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.35128381848335266,
"min": 0.35128381848335266,
"max": 1.4836589097976685,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 10768.95703125,
"min": 10768.95703125,
"max": 45008.27734375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989993.0,
"min": 29952.0,
"max": 989993.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989993.0,
"min": 29952.0,
"max": 989993.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6462169289588928,
"min": -0.1151743233203888,
"max": 0.7126260995864868,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 187.4029083251953,
"min": -27.296314239501953,
"max": 203.81106567382812,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.016567405313253403,
"min": -0.00963687151670456,
"max": 0.4577394425868988,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 4.8045477867126465,
"min": -2.5441341400146484,
"max": 108.48424530029297,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07472720345896153,
"min": 0.06562921970259954,
"max": 0.07472720345896153,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.120908051884423,
"min": 0.5084574545421517,
"max": 1.120908051884423,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01609852208025081,
"min": 0.00027856066728807936,
"max": 0.01609852208025081,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.24147783120376215,
"min": 0.0033427280074569523,
"max": 0.24147783120376215,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.422237525953336e-06,
"min": 7.422237525953336e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011133356288930004,
"min": 0.00011133356288930004,
"max": 0.0033294831901722996,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1024740466666667,
"min": 0.1024740466666667,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5371107000000004,
"min": 1.3886848,
"max": 2.4851503,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002571572620000001,
"min": 0.0002571572620000001,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003857358930000001,
"min": 0.003857358930000001,
"max": 0.11099178722999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.012633202597498894,
"min": 0.012633202597498894,
"max": 0.49036550521850586,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.18949803709983826,
"min": 0.17762303352355957,
"max": 3.432558536529541,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 276.70535714285717,
"min": 275.9026548672566,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30991.0,
"min": 15984.0,
"max": 32777.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.687569621950388,
"min": -1.0000000521540642,
"max": 1.6983155839213537,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 189.00779765844345,
"min": -30.751801691949368,
"max": 190.46859860420227,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.687569621950388,
"min": -1.0000000521540642,
"max": 1.6983155839213537,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 189.00779765844345,
"min": -30.751801691949368,
"max": 190.46859860420227,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.036321166881992085,
"min": 0.036321166881992085,
"max": 10.38529742974788,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.067970690783113,
"min": 3.8536868135561235,
"max": 166.16475887596607,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1765682720",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1765684848"
},
"total": 2127.8084158829997,
"count": 1,
"self": 0.43535983099991427,
"children": {
"run_training.setup": {
"total": 0.025693969000030847,
"count": 1,
"self": 0.025693969000030847
},
"TrainerController.start_learning": {
"total": 2127.347362083,
"count": 1,
"self": 1.1476008659551553,
"children": {
"TrainerController._reset_env": {
"total": 3.5488113110000086,
"count": 1,
"self": 3.5488113110000086
},
"TrainerController.advance": {
"total": 2122.565538001045,
"count": 64144,
"self": 1.2117880389796483,
"children": {
"env_step": {
"total": 1487.7057640450207,
"count": 64144,
"self": 1352.5656859240412,
"children": {
"SubprocessEnvManager._take_step": {
"total": 134.44874492299903,
"count": 64144,
"self": 4.202563125010897,
"children": {
"TorchPolicy.evaluate": {
"total": 130.24618179798813,
"count": 62550,
"self": 130.24618179798813
}
}
},
"workers": {
"total": 0.6913331979806117,
"count": 64144,
"self": 0.0,
"children": {
"worker_root": {
"total": 2120.9118624309617,
"count": 64144,
"is_parallel": true,
"self": 874.12321091393,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0050684430000274006,
"count": 1,
"is_parallel": true,
"self": 0.00377103299990722,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012974100001201805,
"count": 8,
"is_parallel": true,
"self": 0.0012974100001201805
}
}
},
"UnityEnvironment.step": {
"total": 0.04805200700002388,
"count": 1,
"is_parallel": true,
"self": 0.0005683339999222881,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004504659999611249,
"count": 1,
"is_parallel": true,
"self": 0.0004504659999611249
},
"communicator.exchange": {
"total": 0.045382165000091845,
"count": 1,
"is_parallel": true,
"self": 0.045382165000091845
},
"steps_from_proto": {
"total": 0.0016510420000486192,
"count": 1,
"is_parallel": true,
"self": 0.00035021600024265354,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013008259998059657,
"count": 8,
"is_parallel": true,
"self": 0.0013008259998059657
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1246.7886515170317,
"count": 64143,
"is_parallel": true,
"self": 32.53600549603516,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 21.654276645014647,
"count": 64143,
"is_parallel": true,
"self": 21.654276645014647
},
"communicator.exchange": {
"total": 1093.378968577972,
"count": 64143,
"is_parallel": true,
"self": 1093.378968577972
},
"steps_from_proto": {
"total": 99.21940079800959,
"count": 64143,
"is_parallel": true,
"self": 20.25721121507752,
"children": {
"_process_rank_one_or_two_observation": {
"total": 78.96218958293207,
"count": 513144,
"is_parallel": true,
"self": 78.96218958293207
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 633.6479859170443,
"count": 64144,
"self": 2.281439316063711,
"children": {
"process_trajectory": {
"total": 119.04855562697924,
"count": 64144,
"self": 118.83492418197932,
"children": {
"RLTrainer._checkpoint": {
"total": 0.21363144499991904,
"count": 2,
"self": 0.21363144499991904
}
}
},
"_update_policy": {
"total": 512.3179909740013,
"count": 451,
"self": 285.1750611249987,
"children": {
"TorchPPOOptimizer.update": {
"total": 227.1429298490026,
"count": 22806,
"self": 227.1429298490026
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.800000952964183e-07,
"count": 1,
"self": 9.800000952964183e-07
},
"TrainerController._save_models": {
"total": 0.08541092499990555,
"count": 1,
"self": 0.0010028239998973731,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08440810100000817,
"count": 1,
"self": 0.08440810100000817
}
}
}
}
}
}
}