ppo-PyramidsRND / run_logs /timers.json
Eldund's picture
Finish Unit
71dd8d7
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.672910749912262,
"min": 0.616730809211731,
"max": 1.4508994817733765,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 20273.455078125,
"min": 18472.3203125,
"max": 44014.48828125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989987.0,
"min": 29952.0,
"max": 989987.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989987.0,
"min": 29952.0,
"max": 989987.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.10608898848295212,
"min": -0.11718977242708206,
"max": 0.1571715921163559,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 26.310070037841797,
"min": -28.35992431640625,
"max": 39.450069427490234,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.045009151101112366,
"min": -0.045009151101112366,
"max": 0.37586790323257446,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -11.162269592285156,
"min": -11.162269592285156,
"max": 89.08069610595703,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07146338403464242,
"min": 0.06506722529314171,
"max": 0.07281231384518187,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0719507605196363,
"min": 0.48680259599412284,
"max": 1.0719507605196363,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.007467172420810028,
"min": 0.00030027468442815966,
"max": 0.012326708114456759,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.11200758631215041,
"min": 0.003603296213137916,
"max": 0.12927786844314443,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.466857511080003e-06,
"min": 7.466857511080003e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011200286266620005,
"min": 0.00011200286266620005,
"max": 0.0032564720145094,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10248892000000001,
"min": 0.10248892000000001,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5373338,
"min": 1.3691136000000002,
"max": 2.4012773,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025864310800000013,
"min": 0.00025864310800000013,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003879646620000002,
"min": 0.003879646620000002,
"max": 0.10857051094,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01378475409001112,
"min": 0.01378475409001112,
"max": 0.5093545317649841,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.20677131414413452,
"min": 0.19732972979545593,
"max": 3.565481662750244,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 775.7777777777778,
"min": 685.3636363636364,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 27928.0,
"min": 15984.0,
"max": 32535.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.2795332939260536,
"min": -1.0000000521540642,
"max": 0.6325863267887722,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 10.063198581337929,
"min": -32.000001668930054,
"max": 27.83379837870598,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.2795332939260536,
"min": -1.0000000521540642,
"max": 0.6325863267887722,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 10.063198581337929,
"min": -32.000001668930054,
"max": 27.83379837870598,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.10934112376869759,
"min": 0.10086687872436331,
"max": 10.808986278250813,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.9362804556731135,
"min": 3.9362804556731135,
"max": 172.94378045201302,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1684687578",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1684689398"
},
"total": 1820.4243316379998,
"count": 1,
"self": 0.3728432879997854,
"children": {
"run_training.setup": {
"total": 0.04471152600035566,
"count": 1,
"self": 0.04471152600035566
},
"TrainerController.start_learning": {
"total": 1820.0067768239996,
"count": 1,
"self": 1.7925489049684984,
"children": {
"TrainerController._reset_env": {
"total": 0.8317693110002438,
"count": 1,
"self": 0.8317693110002438
},
"TrainerController.advance": {
"total": 1817.2865952210313,
"count": 63199,
"self": 1.697485334963858,
"children": {
"env_step": {
"total": 1146.9654740730366,
"count": 63199,
"self": 1052.0796794200733,
"children": {
"SubprocessEnvManager._take_step": {
"total": 93.80180647793804,
"count": 63199,
"self": 5.2984733749590305,
"children": {
"TorchPolicy.evaluate": {
"total": 88.50333310297901,
"count": 62561,
"self": 88.50333310297901
}
}
},
"workers": {
"total": 1.083988175025297,
"count": 63199,
"self": 0.0,
"children": {
"worker_root": {
"total": 1817.090095374011,
"count": 63199,
"is_parallel": true,
"self": 877.0513256671402,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019190610000805464,
"count": 1,
"is_parallel": true,
"self": 0.000591429001360666,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013276319987198804,
"count": 8,
"is_parallel": true,
"self": 0.0013276319987198804
}
}
},
"UnityEnvironment.step": {
"total": 0.04031272300017008,
"count": 1,
"is_parallel": true,
"self": 0.00038763000065955566,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00031544099965685746,
"count": 1,
"is_parallel": true,
"self": 0.00031544099965685746
},
"communicator.exchange": {
"total": 0.03832684400003927,
"count": 1,
"is_parallel": true,
"self": 0.03832684400003927
},
"steps_from_proto": {
"total": 0.0012828079998143949,
"count": 1,
"is_parallel": true,
"self": 0.0002744330004134099,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001008374999400985,
"count": 8,
"is_parallel": true,
"self": 0.001008374999400985
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 940.0387697068709,
"count": 63198,
"is_parallel": true,
"self": 23.774064885807093,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 15.930998650875154,
"count": 63198,
"is_parallel": true,
"self": 15.930998650875154
},
"communicator.exchange": {
"total": 831.672887262016,
"count": 63198,
"is_parallel": true,
"self": 831.672887262016
},
"steps_from_proto": {
"total": 68.66081890817259,
"count": 63198,
"is_parallel": true,
"self": 14.91194012800861,
"children": {
"_process_rank_one_or_two_observation": {
"total": 53.74887878016398,
"count": 505584,
"is_parallel": true,
"self": 53.74887878016398
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 668.6236358130309,
"count": 63199,
"self": 3.090019102070073,
"children": {
"process_trajectory": {
"total": 100.1443645399695,
"count": 63199,
"self": 99.9334961989698,
"children": {
"RLTrainer._checkpoint": {
"total": 0.21086834099969565,
"count": 2,
"self": 0.21086834099969565
}
}
},
"_update_policy": {
"total": 565.3892521709913,
"count": 442,
"self": 323.4010258780281,
"children": {
"TorchPPOOptimizer.update": {
"total": 241.9882262929632,
"count": 22773,
"self": 241.9882262929632
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4170000213198364e-06,
"count": 1,
"self": 1.4170000213198364e-06
},
"TrainerController._save_models": {
"total": 0.09586196999953245,
"count": 1,
"self": 0.0018641520000528544,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0939978179994796,
"count": 1,
"self": 0.0939978179994796
}
}
}
}
}
}
}