ppo-Pyramid / run_logs /timers.json
GiuliaMP's picture
First Push
21b7f57
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.27556145191192627,
"min": 0.26695194840431213,
"max": 1.3941131830215454,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 8302.115234375,
"min": 7953.0322265625,
"max": 42291.81640625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989958.0,
"min": 29952.0,
"max": 989958.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989958.0,
"min": 29952.0,
"max": 989958.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.637802004814148,
"min": -0.10546290874481201,
"max": 0.6883416175842285,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 181.77357482910156,
"min": -25.522024154663086,
"max": 198.93072509765625,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.013792258687317371,
"min": -0.027831459417939186,
"max": 0.2697961926460266,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.9307937622070312,
"min": -7.653651237487793,
"max": 63.94169616699219,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06789965870917686,
"min": 0.06419532771923557,
"max": 0.07318611259403292,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.950595221928476,
"min": 0.4978240721627546,
"max": 1.0546276776003651,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015187001611117181,
"min": 0.0013215781021859048,
"max": 0.017018990710355515,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21261802255564055,
"min": 0.013215781021859049,
"max": 0.2504981152088993,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.483061791392858e-06,
"min": 7.483061791392858e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001047628650795,
"min": 0.0001047628650795,
"max": 0.0036322480892507,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10249432142857143,
"min": 0.10249432142857143,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4349205,
"min": 1.3886848,
"max": 2.6107492999999993,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002591827107142857,
"min": 0.0002591827107142857,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036285579500000002,
"min": 0.0036285579500000002,
"max": 0.12109385507,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01018906943500042,
"min": 0.01018906943500042,
"max": 0.3457257151603699,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.14264696836471558,
"min": 0.14264696836471558,
"max": 2.4200799465179443,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 290.6380952380952,
"min": 265.55555555555554,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30517.0,
"min": 15984.0,
"max": 33770.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.669823063752399,
"min": -1.0000000521540642,
"max": 1.7159184955612377,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 173.6615986302495,
"min": -30.21900163590908,
"max": 187.51799749583006,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.669823063752399,
"min": -1.0000000521540642,
"max": 1.7159184955612377,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 173.6615986302495,
"min": -30.21900163590908,
"max": 187.51799749583006,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.031166161715261227,
"min": 0.029188046224278338,
"max": 6.458563609048724,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.2412808183871675,
"min": 3.1523089922220606,
"max": 103.33701774477959,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1701693356",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1701695679"
},
"total": 2323.387765455,
"count": 1,
"self": 0.49491358400018726,
"children": {
"run_training.setup": {
"total": 0.08818975799999862,
"count": 1,
"self": 0.08818975799999862
},
"TrainerController.start_learning": {
"total": 2322.8046621129997,
"count": 1,
"self": 1.3247596239766608,
"children": {
"TrainerController._reset_env": {
"total": 4.584955154999989,
"count": 1,
"self": 4.584955154999989
},
"TrainerController.advance": {
"total": 2316.8159202090233,
"count": 64321,
"self": 1.446254879006574,
"children": {
"env_step": {
"total": 1671.4985068330034,
"count": 64321,
"self": 1537.3414384419373,
"children": {
"SubprocessEnvManager._take_step": {
"total": 133.30170170201092,
"count": 64321,
"self": 4.730092713001682,
"children": {
"TorchPolicy.evaluate": {
"total": 128.57160898900923,
"count": 62579,
"self": 128.57160898900923
}
}
},
"workers": {
"total": 0.8553666890551312,
"count": 64321,
"self": 0.0,
"children": {
"worker_root": {
"total": 2318.292344265036,
"count": 64321,
"is_parallel": true,
"self": 899.5020173450746,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006787363999990248,
"count": 1,
"is_parallel": true,
"self": 0.004819732000100885,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001967631999889363,
"count": 8,
"is_parallel": true,
"self": 0.001967631999889363
}
}
},
"UnityEnvironment.step": {
"total": 0.0527699599999778,
"count": 1,
"is_parallel": true,
"self": 0.0006082500000275104,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005854839999983596,
"count": 1,
"is_parallel": true,
"self": 0.0005854839999983596
},
"communicator.exchange": {
"total": 0.04992519999996148,
"count": 1,
"is_parallel": true,
"self": 0.04992519999996148
},
"steps_from_proto": {
"total": 0.001651025999990452,
"count": 1,
"is_parallel": true,
"self": 0.0003381109999054388,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013129150000850132,
"count": 8,
"is_parallel": true,
"self": 0.0013129150000850132
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1418.7903269199612,
"count": 64320,
"is_parallel": true,
"self": 34.656530345925376,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.839583564989766,
"count": 64320,
"is_parallel": true,
"self": 23.839583564989766
},
"communicator.exchange": {
"total": 1262.8207402320352,
"count": 64320,
"is_parallel": true,
"self": 1262.8207402320352
},
"steps_from_proto": {
"total": 97.47347277701095,
"count": 64320,
"is_parallel": true,
"self": 19.107457206935067,
"children": {
"_process_rank_one_or_two_observation": {
"total": 78.36601557007589,
"count": 514560,
"is_parallel": true,
"self": 78.36601557007589
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 643.8711584970131,
"count": 64321,
"self": 2.7216486680499656,
"children": {
"process_trajectory": {
"total": 129.66686075195798,
"count": 64321,
"self": 129.44409639595756,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22276435600042532,
"count": 2,
"self": 0.22276435600042532
}
}
},
"_update_policy": {
"total": 511.4826490770052,
"count": 455,
"self": 304.2638235180018,
"children": {
"TorchPPOOptimizer.update": {
"total": 207.2188255590034,
"count": 22818,
"self": 207.2188255590034
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0670000847312622e-06,
"count": 1,
"self": 1.0670000847312622e-06
},
"TrainerController._save_models": {
"total": 0.07902605799972662,
"count": 1,
"self": 0.0012285600000723207,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0777974979996543,
"count": 1,
"self": 0.0777974979996543
}
}
}
}
}
}
}